licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | code | 12048 | # println("##### methods #####")
using Test
using TreeTools
using Logging
## Testing equality operator
root_1 = TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/methods/tree1.nwk")
root_2 = TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/methods/tree1_reordered.nwk")
@testset "Equality `==`" begin
@test root_1 == root_2
end
@testset "node2tree" begin
@test typeof(node2tree(root_1)) <: Tree
@test typeof(node2tree(root_2)) <: Tree
end
# Testing ancestors
@testset "Ancestors" begin
root_1 = TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/methods/tree1.nwk")
@test TreeTools.is_ancestor(root_1, root_1.child[1])
@test TreeTools.is_ancestor(root_1, root_1.child[1].child[1])
@test !TreeTools.is_ancestor(root_1.child[1],root_1.child[2])
root_2 = TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/methods/tree2.nwk")
@test lca((root_2.child[1].child[1], root_2.child[1].child[2])).label == "ABC"
@test lca((root_2.child[1].child[1].child[1], root_2.child[1].child[1].child[2], root_2.child[1].child[2])).label == "ABC"
@test lca((root_2.child[1].child[1], root_2.child[2])).label == "ABCD"
end
@testset "Count" begin
t1 = node2tree(root_1)
@test count(isleaf, t1) == 4
@test count(n -> n.label[1] == 'A', t1) == 3
@test count(isleaf, t1.lnodes["AB"]) == 2
@test count(n -> n.label[1] == 'A', t1.lnodes["AB"]) == 2
end
@testset "Copy type" begin
t1 = Tree(TreeNode(MiscData(Dict(1=>2))))
tc = copy(t1)
@test tc.root.data[1] == 2
t1.root.data[1] = 3
@test tc.root.data[1] == 2
end
@testset "Copy" begin
t1 = node2tree(root_1)
t2 = copy(t1)
t3 = copy(t1, force_new_tree_label=true)
t4 = copy(t1, label="tree_4")
@test typeof(t1) == typeof(t2)
prunesubtree!(t2, ["A"])
@test haskey(t1.lnodes, "A")
@test !haskey(t2.lnodes, "A")
@test t1.label == t2.label
@test t1.label != t3.label
@test t1.label != t4.label
@test t4.label == "tree_4"
end
@testset "Convert" begin
t1 = Tree(TreeNode(MiscData(Dict(1=>2))))
# No op
@test convert(Tree{MiscData}, t1) === t1
@test convert(MiscData, t1) === t1
@test convert(Tree{MiscData}, t1).root.data === t1.root.data
@test convert(MiscData, t1).root.data === t1.root.data
# Converting to EmptyData and back
t2 = convert(Tree{TreeTools.EmptyData}, t1)
t3 = convert(MiscData, t2)
@test typeof(t2) == Tree{TreeTools.EmptyData}
@test t2.root.data == TreeTools.EmptyData()
@test typeof(t3) == Tree{TreeTools.MiscData}
@test !haskey(t3.root.data, 1)
###
t1 = Tree(TreeNode(TreeTools.EmptyData()))
# No op
@test convert(TreeTools.EmptyData, t1) === t1
# Converting to MiscData and back
t2 = convert(Tree{TreeTools.MiscData}, t1)
@test typeof(t2) == Tree{TreeTools.MiscData}
@test isempty(t2.root.data)
@test typeof(convert(Tree{TreeTools.EmptyData}, t2)) == Tree{TreeTools.EmptyData}
##check convert will keep tree labels by default
t3 = Tree(TreeNode(TreeTools.EmptyData()))
t3.label = "tree3"
#while converting to MiscData and back
@test convert(TreeTools.MiscData, t3).label === "tree3"
@test convert(Tree{TreeTools.EmptyData}, t3).label === "tree3"
##check label can be changed if specified
t3 = Tree(TreeNode(TreeTools.EmptyData()))
t3.label = "tree3"
@test convert(Tree{TreeTools.MiscData}, t3; label="tree4").label === "tree4"
end
nwk = "(A:3,(B:1,C:1):2);"
@testset "Distance" begin
t = parse_newick_string(nwk)
# Branch length
@test distance(t, "A", "B") == 6
@test distance(t.lnodes["A"], t.lnodes["B"]) == 6
@test distance(t, "A", "B") == distance(t, "A", "C")
@test distance(t.root, t.lnodes["A"]) == 3
# Topological
@test distance(t.root, t.lnodes["A"]; topological=true) == 1
@test distance(t, "A", "B"; topological=true) == distance(t, "A", "C"; topological=true)
@test distance(t, "A", "B"; topological=true) == 3
for n in nodes(t)
@test distance(t[n.label], t[n.label]; topological=true) == 0
@test distance(t[n.label], t[n.label]; topological=false) == 0
end
# tests below can be removed when `divtime` is removed
@test divtime(t.lnodes["A"], t.lnodes["B"]) == 6
@test divtime(t.root, t.lnodes["A"]) == 3
end
## The tests below depend on the way internal nodes are labelled
## They may need to be rewritten
nwk = "(A,(B,C));"
@testset "Spanning tree 1" begin
t = parse_newick_string(nwk)
@test isempty(TreeTools.branches_in_spanning_tree(t, "A"))
@test sort(TreeTools.branches_in_spanning_tree(t, "A", "B")) == sort(["A", "B", "NODE_2"])
@test sort(TreeTools.branches_in_spanning_tree(t, "B", "C")) == sort(["B", "C"])
end
nwk = "((A,B),(D,(E,F,G)));"
@testset "Spanning tree 2" begin
t = parse_newick_string(nwk)
tmp = sort(TreeTools.branches_in_spanning_tree(t, "A", "E", "F"))
@test tmp == sort(["A", "NODE_2", "E", "F", "NODE_4", "NODE_3"])
@test isempty(TreeTools.branches_in_spanning_tree(t, "E"))
end
@testset "ladderize alphabetically" begin
t1 = node2tree(TreeTools.parse_newick("((D,A,B),C)"; node_data_type=TreeTools.MiscData); label="t1")
TreeTools.ladderize!(t1)
@test write_newick(t1) == "(C,(A,B,D)NODE_2)NODE_1:0;"
end
@testset "Binarize" begin
bl(t) = sum(skipmissing(map(x -> x.tau, nodes(t)))) # branch length should stay unchanged
nwk = "(A,(B,C,D,E,(F,G,H)));"
@testset "1" begin
t = parse_newick_string(nwk)
TreeTools.rand_times!(t)
L = bl(t)
z = TreeTools.binarize!(t; mode=:balanced)
@test z == 4
@test length(SplitList(t)) == 7
@test isapprox(bl(t), L; rtol = 1e-14)
end
nwk = "(8:571.0,(((10:0.0,17:0.0)internal_1:12.8,(12:0.0,19:0.0)internal_2:12.5)internal_11:80.7,((6:26.3,(4:0.0,5:0.0)internal_7:22.0)internal_14:22.4,(1:12.5,3:12.5)internal_10:36.1,((11:0.0,20:0.0)internal_5:16.5,7:11.2,16:11.2,9:0.0,13:0.0,18:0.0,15:0.0)internal_13:23.1,(2:0.0,14:0.0)internal_4:42.1)internal_17:43.0)internal_18:477.0)internal_19:0;"
@testset "2" begin
t = parse_newick_string(nwk)
L = bl(t)
z = TreeTools.binarize!(t; mode=:balanced)
@test length(nodes(t)) == 2*length(leaves(t)) - 1
for n in nodes(t)
@test length(children(n)) == 2 || isleaf(n)
end
end
end
@testset "Simple rooting" begin
nwk = "(A:1,(B:1,(C:1,D:1):1):1);"
tree = parse_newick_string(nwk)
leaf_labels = map(label, leaves(tree))
pw_distances = [distance(tree, x, y) for x in leaf_labels for y in leaf_labels]
TreeTools.root!(tree, lca(tree, "C", "D").label)
@test isroot(lca(tree, "C", "D"))
@test !isroot(lca(tree, "A", "B"))
@test pw_distances == [distance(tree, x, y) for x in leaf_labels for y in leaf_labels]
tree = parse_newick_string(nwk)
TreeTools.root!(tree, "B"; time = .5)
@test tree["B"] |> ancestor |> isroot
@test pw_distances == [distance(tree, x, y) for x in leaf_labels for y in leaf_labels]
@test_throws ErrorException redirect_stderr(
() -> TreeTools.root!(tree, "B"; time = 5), devnull
)
end
@testset "Midpoint rooting" begin
@testset "1" begin
nwk = "(A,(B,(C,(D,(E,F)))));"
t = parse_newick_string(nwk)
TreeTools.rand_times!(t)
TreeTools.root!(t, method=:midpoint, topological=true)
for n in nodes(t)
@test (n.isroot && ismissing(branch_length(n))) || (!n.isroot && !ismissing(branch_length(n)))
end
@test length(children(t.root)) == 2
d1 = TreeTools.distance_to_deepest_leaf(t.root.child[1]; topological=true) + 1
d2 = TreeTools.distance_to_deepest_leaf(t.root.child[2]; topological=true) + 1
@test d1 == d2 || abs(d1-d2) == 1
end
@testset "2" begin
nwk = "(A,(B,(C,(D,E))));"
t = parse_newick_string(nwk)
TreeTools.rand_times!(t)
TreeTools.root!(t, method=:midpoint, topological=true)
for n in nodes(t)
@test (n.isroot && ismissing(branch_length(n))) || (!n.isroot && !ismissing(branch_length(n)))
end
@test length(children(t.root)) == 2
d1 = TreeTools.distance_to_deepest_leaf(t.root.child[1]; topological=true) + 1
d2 = TreeTools.distance_to_deepest_leaf(t.root.child[2]; topological=true) + 1
@test d1 == d2 || abs(d1-d2) == 1
end
@testset "3" begin
nwk = "(A,((B,(C,D)),E,F,(G,(H,I))));"
t = parse_newick_string(nwk);
TreeTools.rand_times!(t)
TreeTools.root!(t, method = :midpoint)
@test length(children(t.root)) == 2
d1 = TreeTools.distance_to_deepest_leaf(t.root.child[1]; topological=false) + t.root.child[1].tau
d2 = TreeTools.distance_to_deepest_leaf(t.root.child[2]; topological=false) + t.root.child[2].tau
@test isapprox(d1, d2, rtol = 1e-10)
end
# Some Kingman tree
nwk = "((3:42.39239447896236,9:42.39239447896236)internal_7:184.59454190028205,(((7:5.386265198881789,(4:3.4161799796066714,6:3.4161799796066714)internal_1:1.970085219275118)internal_3:13.350057070009068,(2:5.857739627778067,5:5.857739627778067)internal_4:12.878582641112791)internal_5:27.712331677710498,(10:33.43880444968331,(1:4.740041795143892,8:4.740041795143892)internal_2:28.69876265453942)internal_6:13.009849496918044)internal_8:180.53828243264306)internal_9:0;"
@testset "4" begin
t = parse_newick_string(nwk)
TreeTools.root!(t; method=:midpoint)
for n in nodes(t)
@test isroot(n) || !ismissing(branch_length(n))
end
@test length(children(t.root)) == 2
d1 = TreeTools.distance_to_deepest_leaf(t.root.child[1]; topological=false) + t.root.child[1].tau
d2 = TreeTools.distance_to_deepest_leaf(t.root.child[2]; topological=false) + t.root.child[2].tau
@test isapprox(d1, d2, rtol = 1e-10)
end
# Some sick tree by Marco
@testset "5" begin
nwk = "(A:0.0,B:0.0,C:0.0,D:0.0,E:0.0,F:0.0,G:0.0,H:0.0,I:0.0,J:0.0)ROOT;"
t = parse_newick_string(nwk)
@test_logs (:warn, r"") match_mode=:any TreeTools.root!(t; method=:midpoint) # should warn and do nothing
@test label(t.root) == "ROOT"
TreeTools.root!(t; method=:midpoint, topological=true) # should do nothing since root is already midpoint
@test label(t.root) == "ROOT"
branch_length!(t["A"], 1.)
TreeTools.root!(t; method=:midpoint)
@test length(children(t.root)) == 2
@test in(t["A"], children(t.root))
end
end
@testset "Model rooting" begin
model_nwk = "((A:1,B:1):1,(C:2,(D:3,E:1):2):2);"
model = parse_newick_string(model_nwk)
tree = copy(model)
TreeTools.root!(tree, lca(tree, "D", "E").label, time = 1)
@test_logs min_level=Logging.Warn TreeTools.root!(tree; method=:model, model)
tree = copy(model)
TreeTools.root!(tree, "A", time = .5)
@test_logs min_level=Logging.Warn TreeTools.root!(tree; method=:model, model)
nwk = "((A:1,B:1):1,(D:2,(C:3,E:1):2):2);"
tree = parse_newick_string(nwk)
@test_logs (:warn,) TreeTools.root!(tree; method=:model, model)
nwk = "(A:1,((D:2,(C:3,E:1):2):2,B:0.5):1);"
tree = parse_newick_string(nwk)
@test_logs (:warn,) TreeTools.root!(tree; method=:model, model)
nwk = "((A:1,C:1):1,(B:2,(C:3,E:1):2):2);"
@test_logs (:warn,) TreeTools.root!(tree; method=:model, model)
# Test with missing branch lengths
model_nwk = "((A,B),(C,(D,E)));"
model = parse_newick_string(model_nwk)
tree = copy(model)
TreeTools.root!(tree, lca(tree, "D", "E").label, time = missing)
@test_logs min_level=Logging.Warn TreeTools.root!(tree; method=:model, model)
end
@testset "Tree measures" begin
nwk1 = "((A,B),C);"
nwk2 = "(A,(B,C));"
nwk3 = "((A,B,D),C);"
nwk4 = "(((A,B),D),C);"
nwk5 = "(A,B,C,D);"
t1 = node2tree(TreeTools.parse_newick(nwk1), label = "a")
t2 = node2tree(TreeTools.parse_newick(nwk2), label = "b")
t3 = node2tree(TreeTools.parse_newick(nwk3), label = "c")
t4 = node2tree(TreeTools.parse_newick(nwk4), label = "d")
t5 = node2tree(TreeTools.parse_newick(nwk5), label = "e")
@testset "RF distance" begin
@test TreeTools.RF_distance(t1, t2) == 2
@test TreeTools.RF_distance(t3, t4) == 1
@test TreeTools.RF_distance(t1, t2; normalize=true) == 1
@test TreeTools.RF_distance(t3, t4; normalize=true) == 1/3
@test_throws AssertionError TreeTools.RF_distance(t1, t3)
end
@testset "resolution value" begin
@test TreeTools.resolution_value(t3) == (1/2)
@test TreeTools.resolution_value(t4) == 1
@test TreeTools.resolution_value(t5) == 0
end
end
| TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | code | 489 | # println("##### objects #####")
using TreeTools
using Test
@testset "Node relabel" begin
nwk = "(A,(B,C));"
t = parse_newick_string(nwk)
label!(t, t["A"], "D")
@test check_tree(t)
@test !in("A", t)
@test in("D", t)
@test length(nodes(t)) == 5
labels = map(label, POT(t))
@test !in("A", labels)
@test in("D", labels)
@test sort(labels) == sort(collect(keys(t.lnodes)))
@test_throws AssertionError label!(t, t["D"], "B")
@test_throws AssertionError label!(t, "D", "B")
end
| TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | code | 6499 | using Test
using TreeTools
using Chain
@testset "TreeNode level functions" begin
root_1 = TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/prunegraft/tree1.nwk")
@testset "Pruning (node)" begin
# Pruning with modification
root = deepcopy(root_1)
global A = TreeTools.prunenode!(root.child[1].child[1])[1]
root_ref = TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/prunegraft/tree1_Apruned.nwk")
@test root == root_ref
# Pruning with copy
root = deepcopy(root_1)
global A2 = TreeTools.prunenode(root.child[1].child[1])[1]
@test root == root_1 && A == A2
end
@testset "Grafting (node)" begin
root = deepcopy(root_1)
A = TreeTools.prunenode!(root.child[1].child[1])[1]
TreeTools.graftnode!(root.child[2].child[1], A);
@test root == TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/prunegraft/tree_grafttest1.nwk")
end
@testset "Deleting" begin
root = deepcopy(root_1)
temp = TreeTools.delete_node!(root.child[1])
@test temp == root && root == TreeTools.read_newick("$(dirname(pathof(TreeTools)))/../test/prunegraft/tree_del1.nwk")
end
end
@testset "Grafting new node onto tree" begin
nwk = "((A:1,B:1)AB:2,(C:1,D:1)CD:2)R;"
t = parse_newick_string(nwk; node_data_type = TreeTools.EmptyData)
# 1
E = TreeNode(label = "E", tau = 4.)
tc = copy(t)
graft!(tc, E, "AB")
@test sort(map(label, children(tc["AB"]))) == ["A","B","E"]
@test ancestor(E) == tc["AB"]
@test in(E, tc)
@test in(E, children(tc["AB"]))
@test branch_length(E) == 4
@test_throws ErrorException graft!(tc, E, "CD") # E is not a root anymore
# 2
E = TreeNode(label = "E", tau = 5.)
tc = copy(t)
@test_throws ErrorException graft!(tc, E, tc["A"])
graft!(tc, E, tc["A"], graft_on_leaf=true, time = 1.)
@test !isleaf(tc["A"])
@test ancestor(E) == tc["A"]
@test in(E, tc)
@test in(E, children(tc["A"]))
@test branch_length(E) == 1
# 3
E = node2tree(TreeNode(label = "E", tau = 5.))
tc = copy(t)
graft!(tc, E, "A", graft_on_leaf=true) # will copy E
@test sort(map(label, children(tc["A"]))) == ["E"]
@test isroot(E.root)
@test check_tree(E)
@test in("E", tc)
@test_throws ErrorException graft!(tc, E, "CD")
# 4
E = TreeNode(label = "E", tau = 4., data = MiscData())
tc = copy(t)
@test_throws ErrorException graft!(tc, E, "AB")
tc = convert(Tree{MiscData}, t)
E = TreeNode(label = "E", tau = 4.)
@test_throws ErrorException graft!(tc, E, "AB")
end
@testset "Pruning" begin
nwk = "((A:1,B:1)AB:2,(C:1,D:1)CD:2)R;"
t = parse_newick_string(nwk; node_data_type = TreeTools.EmptyData)
# 1
tc = copy(t)
r, a = prunesubtree!(tc, "AB"; remove_singletons = true)
@test !in("AB", tc)
@test !in("A", tc)
@test isroot(a)
@test isroot(r)
@test check_tree(tc)
@test tc.root.label == "CD"
@test label(a) == "R"
@test sort(map(label, children(r))) == ["A", "B"]
# 2
tc = copy(t)
@test_throws ErrorException prunesubtree!(tc, "R")
@test_throws KeyError prunesubtree!(tc, "X")
@test_throws ErrorException prune!(tc, ["A", "C"])
prunesubtree!(tc, ["A", "B"])
@test_throws KeyError prunesubtree!(tc, "A")
# 3
tc = copy(t)
tp, _ = prune!(tc, ["A","B"]; remove_singletons = false)
@test !in("AB", tc)
@test !in("A", tc)
@test in("AB", tp)
@test in("A", tp)
@test check_tree(tp) # tc has singletons so check_tree will fail
@test sort(map(label, children(tc.root))) == ["CD"]
# 4
t = parse_newick_string("(A,(B,(C,D)));")
tp, _ = prune!(t, ["B","D"], clade_only=false)
@test length(leaves(t)) == 1
@test length(leaves(tp)) == 3
@test sort(map(label, leaves(tp))) == ["B","C","D"]
end
@testset "Insert" begin
nwk = "((A:1,B:1)AB:2,(C:1,D:1)CD:2)R;"
t = parse_newick_string(nwk; node_data_type = TreeTools.EmptyData)
# 1
tc = copy(t)
@test_throws ErrorException insert!(tc, "A"; time = 2.)
@test_throws ErrorException insert!(tc, "A"; time = missing)
@test_throws ErrorException insert!(tc, "A"; name = "B")
@test_throws ErrorException insert!(tc, "R"; time = 1.)
# 2
tc = convert(Tree{MiscData}, t)
s = insert!(tc, "A"; time = 0.25)
@test in(label(s), tc)
@test ancestor(tc["A"]) == s
@test map(label, children(s)) == ["A"]
@test ancestor(s) == t["AB"]
@test branch_length(s) == 0.75
@test branch_length(s) + branch_length(tc["A"]) == 1.
# 3
tc = convert(Tree{MiscData}, t)
s = insert!(tc, "A"; time = 0.25)
@test typeof(s) == TreeNode{MiscData}
@test in(label(s), tc)
@test ancestor(tc["A"]) == s
@test map(label, children(s)) == ["A"]
@test ancestor(s) == t["AB"]
@test branch_length(s) == 0.75
@test branch_length(s) + branch_length(tc["A"]) == 1.
s.data["Hello"] = " World!"
@test tc[s.label].data["Hello"] == " World!"
end
@testset "Delete" begin
nwk = "((A:1,B:1)AB:2,(C:1,D:1)CD:2)R;"
t = parse_newick_string(nwk; node_data_type = TreeTools.EmptyData)
# 1
tc = copy(t)
@test_throws ErrorException delete!(tc, "R")
delete!(tc, "AB")
@test sort(map(label, children(tc["R"]))) == ["A", "B", "CD"]
@test branch_length(tc["A"]) == 3
@test ancestor(tc["A"]) == tc["R"]
end
@testset "Remove internal singletons" begin
nwk = "(((C:1,((D2:1,D3:1)D1:1)D:1)B:1)A:1)R:1;"
t = parse_newick_string(nwk, strict_check=false)
dmat = Dict(
(n1.label, n2.label) => distance(n1, n2) for n1 in leaves(t) for n2 in leaves(t)
)
tc = copy(t)
TreeTools.remove_internal_singletons!(tc)
@test isempty(Iterators.filter(x -> length(children(x)) == 1, nodes(tc)))
dmat2 = Dict(
(n1.label, n2.label) => distance(n1, n2) for n1 in leaves(tc) for n2 in leaves(tc)
)
@test dmat == dmat2
end
@testset "delete_branches!" begin
# Constructing a binary tree with branch length 1/k for layer k
K = 5
tree = let
tree = Tree()
label!(tree, tree.root, "root")
nodes = [tree.root]
new_nodes = []
for k in 1:K
for n in nodes, _ in 1:2
c = TreeNode(;
label = TreeTools.make_random_label("$k"),
tau = 1/k,
)
graft!(tree, c, n; graft_on_leaf=true)
push!(new_nodes, c)
end
nodes = new_nodes
new_nodes = []
end
tree
end
# Remove branches with cutoff 1/k for all k.
# Check that we removed the correct amount and that the tree remains ultrametric.
for cutoff in [1/k for k in reverse(1:K)]
tc = copy(tree)
n_removed = count(
n -> !isroot(n) && branch_length(n) < cutoff,
internals(tree)
)
TreeTools.delete_branches!(n -> branch_length(n) < cutoff, tc)
@test length(collect(internals(tc))) == 2^(K) - 1 - n_removed
@test length(@chain leaves(tc) map(n->distance(n, tc.root), _) unique) == 1
end
end
| TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | code | 436 | using Test
using TreeTools
using Chain
@testset "star tree" begin
n = 16
times = rand(n)
tree = star_tree(n, times)
@test length(nodes(tree)) == n+1
for node in leaves(tree)
@test branch_length(node) == times[parse(Int, label(node))]
end
tree = star_tree(n)
for node in leaves(tree)
@test ismissing(branch_length(node))
end
@test_throws AssertionError star_tree(3, [1,2])
end
| TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | code | 1830 | using Test
using TreeTools
# println("##### splits #####")
nwk1 = "(((A1,A2),(B1,B2),(C1,C2)),D,E)"
t = node2tree(TreeTools.parse_newick(nwk1)) #
S = SplitList(t)
@testset "1" begin
expected = [
["A1", "A2"],
["B1", "B2"],
["C1", "C2"],
["A1", "A2", "B1", "B2", "C1", "C2"],
["A1", "A2", "B1", "B2", "C1", "C2", "D", "E"]
]
@test [leaves(S,i) for i in eachindex(S)] == expected
@test isequal(S, expected)
@test S == expected
end
s4 = S.splitmap["NODE_4"] # ["B1", "B2"] ~ [3,4]
s5 = S.splitmap["NODE_5"] # ["C1", "C2"] ~ [5,6]
s45 = TreeTools.joinsplits(s4,s5)
@testset "2" begin
@test s4 == Split([3,4])
@test s4 != Split([3,4,5])
@test in(s4, S)
@test in(s5, S)
@test !in(s45, S)
@test s45.dat == Int[3,4,5,6]
@test !isnothing(findfirst(==(s5), S.splits))
@test isnothing(findfirst(==(s45), S.splits))
end
@testset "3" begin
@test !TreeTools.isroot(S.splitmap["NODE_4"], S.mask)
@test !TreeTools.isroot(S.splitmap["NODE_2"], S.mask)
@test TreeTools.isroot(S.splitmap["NODE_1"], S.mask)
@testset for n in internals(t)
@test !TreeTools.isleaf(S.splitmap[n.label])
end
end
@testset "4" begin
@testset for s in S, t in S
@test arecompatible(s,t)
@test arecompatible(s,t,S.mask)
@test arecompatible(s,t,rand(Bool,8))
end
@test arecompatible(s4, s45)
@test arecompatible(s5, s45)
end
@testset "5" begin
t1 = Split([1,3])
t2 = Split([1,2,3])
t3 = Split([1,3,7,8])
u = Split([7,8])
@test !iscompatible(t1, S)
@test !iscompatible(t2, S)
@test !iscompatible(t3, S)
@test iscompatible(u, S)
end
# Unions
Smcc = SplitList(S.leaves)
append!(Smcc.splits, [Split([1,2,3,4]), Split([5,6,7])])
U = union(S, Smcc)
Sc = deepcopy(S)
union!(Sc, Smcc)
@testset "7" begin
@test in(Smcc.splits[1], U)
@test in(Smcc.splits[2], U)
@test length(U) == length(S) + 2
@test U == Sc
end
println()
| TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | docs | 891 | [](https://pierrebarrat.github.io/TreeTools.jl)
# TreeTools
Simple tools for handling *rooted* phylogenetic or genealogic trees with Julia.
I used this package for all of my work related to trees and found it useful. I hope you will to!
A very brief list of what it can do:
- read and write Newick files
- attach arbitrary data to tree nodes
- compute distance between nodes in a tree, their most recent common ancestor, etc...
- traverse the tree (post-order or arbitrary)
- prune nodes, insert nodes, remove singletons
- re-root the tree (docs not written yet)
- decompose the tree into [splits](https://en.wikipedia.org/wiki/Split_(phylogenetics)) and perform operations on them (docs not written yet)
Please read the [documentation](https://pierrebarrat.github.io/TreeTools.jl) to discover what you can do with the package.
| TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | docs | 2513 | # Reading and writing
For now, TreeTools only handles the [Newick format](https://en.wikipedia.org/wiki/Newick_format).
Functions are quite basic at this stage.
## Reading
To read from a Newick file, use `read_tree`.
Here is an example with the `example/tree_10.nwk` file:
```@example
using TreeTools # hide
tree = read_tree("../../examples/tree_10.nwk")
```
The documentation reproduced below gives more information:
```@docs
TreeTools.read_tree
```
`read_tree` will also read files containing several Newick strings, provided they are on separate lines.
It then returns an array of `Tree` objects.
If you have a variable containing a Newick string, simply call `parse_newick_string` to return a tree:
```@example
using TreeTools # hide
nwk = "(A:3,(B:1,C:1)BC:1);"
tree = parse_newick_string(nwk)
```
If internal nodes of a Newick string do not have names, TreeTools will by default give them names of the form `NODE_i` with `i::Int`.
This happens during parsing of the Newick string, in the `parse_newick!` function.
This label is technically not guaranteed to be unique: the Newick string may also contain nodes with the same name.
In some cases, it is thus necessary to create a unique identifier for a node.
This is done by creating a random string obtained with the call `Random.randstring(8)`, and happens at a later stage, when calling the `node2tree` function (see the section about [Tree](@ref)).
This happens when:
- the node label is found to be a bootstrap value (see `?TreeTools.isbootstrap`).
- the option `force_new_labels` is used when calling `read_tree`. This is useful if some internal nodes of the Newick string have redundant names.
- for some reason, the node does not yet have a label.
There are about $2\cdot 10^{14}$ strings of length 8 (alphabetic + numeric characters), so this should be fine for most problems. A quick calculation shows that for a tree of 1000 leaves, the probability of obtaining two equal identifiers for different nodes is $\sim 2 \cdot 10^{-9}$, which is probably acceptable for most applications. If you think it's not enough, I can add a handle to let user create longer strings, or solve this in a more elegant way.
## Writing
To write `t::Tree` to a Newick file, simply call `write(filename, t)`.
If you want to append to a file, call `write(filename, t, "a")`.
Note that `write(filename, t)` adds a newline `'\n'` character at the end of the Newick string.
This is done in case other trees have to be added to the file. | TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | docs | 4626 | # Iteration: going through a tree
TreeTools offers two different ways to iterate through nodes: post-order traversal, and arbitrary iteration.
## Post-order traversal
The exact definition can be found [here](https://en.wikipedia.org/wiki/Tree_traversal#Post-order,_LRN).
This order of iteration guarantees that:
1. children nodes are always visited before parent nodes
2. the order in which children of a node are visited is the same as that given in `children(node)`.
```@repl iteration_1
using TreeTools # hide
tree = parse_newick_string("((A:1,B:1)AB:2,C:3)R;")
for n in POT(tree)
println(n)
end
```
If you want to access only leaves, you can use `POTleaves`, or simply filter the results:
```@repl iteration_1
[label(n) for n in POTleaves(tree["AB"])]
for n in Iterators.filter(isleaf, POT(tree))
println("$(label(n)) is a leaf")
end
```
Note that `POT` can also be called on `TreeNode` objects.
In this case, it will only iterate through the clade below the input node, including its root:
```@repl iteration_1
let
node = tree["AB"]
X = map(label, POT(node))
println("The nodes in the clade defined by $(label(node)) are $(X).")
end
```
### `map`, `count`, etc...
In some cases, one wants to do something like count the number of nodes that have a given property, or apply some function `f` to each node and collect the result.
To facilitate this, TreeTools extends the Base functions `map`, `map!` and `count` to `Tree` and `TreeNode` objects.
Using these functions will traverse the tree in post-order.
If called on a `TreeNode` object, they will only iterate through the clade defined by this node.
```@repl iteration_1
map(branch_length, tree) # Branch length of all nodes, in POT
map!(tree) do node # Double the length of all branches - map! returns `nothing`
x = branch_length(node)
if !ismissing(x) branch_length!(node, 2*x) end
end
map(branch_length, tree) # Doubled branch length, except for root (`missing`)
count(n -> label(n)[1] == 'A', tree) # count the nodes with a label starting with 'A'
```
Note that there is a difference between the TreeTools extension of `map!` with respect Base: in TreeTools, `map!` returns nothing instead of an array.
## Arbitrary order
As explained in [Basic concepts](@ref), a `Tree` object is mainly a dictionary mapping labels to `TreeNode` objects.
We can thus iterate through nodes in the tree using this dictionary.
For this, TreeTools provides the `nodes`, `leaves` and `internals` methods.
This will traverse the tree in an arbitrary order but is faster than `POT`.
```@repl iteration_1
for n in leaves(tree)
println("$(label(n)) is a leaf")
end
for n in internals(tree)
println("$(label(n)) is an internal node")
end
map(label, nodes(tree)) == union(
map(label, leaves(tree)),
map(label, internals(tree))
)
```
## A note on speed
Iterating through `tree` using `nodes(tree)` will be faster than using `POT(tree)`. This is mainly because of my inability to write an efficient iterator: currently, `POT` will allocate a number of times that is proportional to the size of the tree. Below is a simple example where we define functions that count the number of nodes in a tree:
```@example iteration_2
using TreeTools # hide
count_nodes_pot(tree) = sum(x -> 1, POT(tree)) # Traversing the tree in post-order while summing 1
count_nodes_arbitrary(tree) = sum(x -> 1, nodes(tree)) # Arbitrary order
nothing # hide
```
These two functions obviously give the same result, but not with the same run time. Here, we try it on the `example/tree_10.nwk` file:
```@repl iteration_2
tree = read_tree("../../examples/tree_10.nwk")
using BenchmarkTools
@btime count_nodes_pot(tree)
@btime count_nodes_arbitrary(tree)
```
If a fast post-order is needed, the only solution in the current state of TreeTools is to "manually" program it.
The code below defines a more efficient to count nodes, traversing the tree in post-order.
```@example iteration_2
function count_nodes_eff_pot(n::TreeNode) # this counts the number of nodes below `n`
cnt = 0
for c in children(n)
cnt += count_nodes_eff_pot(c) #
end
return cnt + 1
end
function count_nodes_eff_pot(tree::Tree)
return count_nodes_eff_pot(tree.root)
end
nothing # hide
```
This will run faster than `count_nodes_pot`, and does not allocate.
For counting nodes, this is really overkill, and one could just call `length(nodes(tree))`.
In particular, traversing the tree in a precise order does not matter at all.
But for more complex use case, writing short recursive code as above does not add a lot of complexity.
```@repl iteration_2
@btime count_nodes_eff_pot(tree)
``` | TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | docs | 7046 | # Basic concepts
To introduce basic concepts and data structures used in TreeTools, we will use the small tree given by this [Newick string](https://en.wikipedia.org/wiki/Newick_format): `"((A:1,B:1)AB:2,C:3)R;"`.
To obtain a tree from the string, run the following code in a julia REPL:
```@repl basic
using TreeTools
nwk = "((A:1,B:1)AB:2,C:3)R;"
tree = parse_newick_string(nwk)
```
## TreeNode
At the basic level, the tree is represented by a set of linked `TreeNode` structures. A node `n` contains the following information:
- `ancestor(n)` returns the node above `n`. If `n` is the root, `ancestor(n)` returns `nothing`.
- `children(n)` returns an array containing all the nodes below `n`. If `n` is a leaf, `children(n)` is empty.
- `label(n)` returns the label of `n`, which also serves as an identifier of `n` in many TreeTools functions. See the warning below.
- `branch_length(n)` returns the length of the branch above `n` as a `Float64`. If `n` is the root or if it does not have a branch length, it returns `missing`.
- `data(n)` returns data attached to `n`, see [TreeNodeData](@ref)
- `isroot(n)` and `isleaf(n)` are boolean functions with explicit behavior.
!!! warning "Node labels"
TreeTools generally uses the label of nodes as an identifier. This is visible in the `Tree` structure which uses node labels for indexing. Another example is the equality between `TreeNode` objects `n1 == n2`, which simply falls back to `label(n1) == label(n2)`. For this reason, it is **strongly discouraged** to directly change the label of a node, *e.g.* by doing something like `n.label = mylabel`. A function `label!` is provided for that, called like this: `label!(tree, n, mylabel)`. This makes sure that the struct `tree` is informed about the label change.
!!! danger "Loops in the tree"
TreeTools does not actively enforce the fact that trees do not have loops. That is, if you try to, you can perfectly create a state where *e.g.* a node is its own ancestor. This will of course result in a lot of issues. I'd like to enforce the absence of loops at some point, but for now it's up to the user to be careful.
The illustration below is a summary of the `TreeNode` object.

Each `TreeNode` can be accessed by directly indexing into the tree:
```@repl basic
AB = tree["AB"]
```
Testing this on the above example would give:
```@repl basic
println("The ancestor of $(label(AB)) is $(label(ancestor(AB))), at distance $(branch_length(AB))")
println("Children of $(label(AB)): ", map(label, children(AB)))
isleaf(AB)
map(isleaf, children(AB))
isroot(ancestor(AB))
```
Changing the value of the branch length or of the data attached to a node is done using the `branch_length!` and `data!` functions:
```@repl basic
branch_length!(AB, 4.)
println("The distance from $(label(AB)) to $(label(ancestor(AB))) is now $(branch_length(AB))")
```
!!! note "Branches"
TreeTools has no structure or type to represent branches.
Since only rooted trees are considered, it is natural for each node to "own" the branch above it.
As a result, informations about branches are expected to be stored on the node below, as is the case for the branch length.
## TreeNodeData
TreeTools gives the possibility to attach data to nodes.
The `TreeNode` type is parametric: if data of type `D` is attached to a node, its type will be `TreeNode{D}`.
Children and ancestor of a `TreeNode{D}` object must also be of the `TreeNode{D}` type.
This implies that *all nodes in the tree must have the same type of data attached to them*.
The data type `D` has to be a subtype of the abstract type `TreeNodeData`.
The creation of nodes with a given data type is controlled by the `node_data_type` keyword argument in functions like `parse_newick_string` or `read_tree` (see the [Reading and writing](@ref) page).
Two subtypes of `TreeNodeData` are already implemented in TreeTools.
- `EmptyData` is a data type containing nothing. Use it if you do not want to attach any data to nodes. It is used by default when creating tree nodes.
- `MiscData` is a wrapper around `Dict`, allowing arbitrary data to be stored
```@repl
using TreeTools # hide
tree = parse_newick_string("((A:1,B:1)AB:2,C:3)R;", node_data_type = MiscData)
A = tree["A"]
typeof(A)
dat = data(A)
dat[1] = 2; dat["Hello"] = "world!";
data(A)
data(A)["Hello"]
```
### Custom data type
One can of course create arbitrary subtypes of `TreeNodeData`.
The only requirement for a custom data type `D` is that the call `D()` returns a valid instance of the type.
This is used when initially constructing the tree.
Below is an example of a custom `Sequence` type.
Note that if you actually want to use biological sequences, I encourage the use of the [BioSequences.jl](https://biojulia.net/BioSequences.jl/stable/) package.
```@repl
using TreeTools # hide
Base.@kwdef mutable struct Sequence <: TreeNodeData # Create a custom data type
seq :: String = ""
seq_type :: Symbol = :dna
end
tree = parse_newick_string("((A:1,B:1)AB:2,C:3)R;", node_data_type = Sequence)
typeof(tree["C"])
data!(tree["C"], Sequence(seq = "ACGT"))
data(tree["C"]).seq
data(tree["C"]).seq_type
```
## Tree
In principle, access to one `TreeNode` object is enough to perform any operation on the tree.
However, in practice, it is often convenient to see the tree as a concept on its own, and not to see it through one of its nodes.
This is why TreeTools uses the `Tree` structure, which is basically a list of `TreeNode` objects.
`Tree` objects provide some specific methods:
- `nodes` and `leaves` and `internals` respectively return iterators over all nodes, leaves and internal nodes of the tree, in an *arbitrary order*
```@repl basic
println("Internal nodes: ", map(label, internals(tree)))
println("Leaves: ", map(label, leaves(tree)))
```
- the call `tree[label]` will return the tree node with the corresponding label. Presence of a node in tree can be checked with `in(node, tree)` or `in(label, tree)`
```@repl basic
in("AB", tree)
in(tree["AB"], tree)
in("MyCat", tree)
```
- `label` returns the name of the tree. It can be changed the `label!` method
- `root` returns the root of the tree
`Tree`s are construceted automatically from Newick strings when using functions such as `parse_newick_string` or `read_tree` (see [Reading and writing](@ref)).
To construct a tree from a `Tree` from a `TreeNode`, use the `node2tree` function. Note that this is only intended to be used on root nodes: a warning will be produced if not.
```@repl
using TreeTools # hide
tree = parse_newick_string("((A:1,B:1)AB:2,C:3)R;") # hide
R = tree["R"]
tree2 = node2tree(R)
```
!!! warning "Copying a tree"
The call `tree2 = node2tree(tree.root)` will produce another tree that shares nodes with `tree`.. This is usually not a good way to copy a tree, since the actual tree nodes are not copied. Any modification of the nodes of `tree` will also modify those of `tree2`. To make an independent copy, simply call `copy(tree)`.
| TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | docs | 696 | # TreeTools.jl
TreeTools is a package to allow manipulation and simple operations on *rooted* phylogenetic or genealogic trees.
It started off as a dependency of another package [TreeKnit](https://github.com/PierreBarrat/TreeKnit.jl), but can in principle be used for any problem involving trees.
## Installation
You can simply install TreeTools using the julia package manager (if you don't have julia, you can get it from [here](https://julialang.org/downloads/)):
```julia
using Pkg
Pkg.add("TreeTools")
```
You should now be able to use `using TreeKnit` from inside julia and follow the rest of the documentation.
!!! info
The documentation is being written: more things to come! | TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | docs | 3642 | # Modifying the tree
On some occasions, it can be useful to modify a phylogenetic tree, *e.g.* removing some clades or branches.
TreeKnit offers a few methods for this:
- `prune!` and `prunesubtree!` for pruning a clade.
- `graft!` for grafting a node onto a tree.
- `insert!` for inserting an internal node on an existing branch of a tree.
- `delete!` to delete an internal node while keeping the nodes below it.
## Pruning
There are two functions to prune nodes: `prune!` and `prunesubtree!`.
They behave exactly the same except for the return value: `prune!` returns the prunde clade as a `Tree` object, while `prunesubtree!` just returns its root as a `TreeNode` object.
Both also return the previous ancestor of the pruned clade.
Let's see an example
```@repl prunegraft
using TreeTools # hide
tree = parse_newick_string("(A:1.,(B:1.,(X1:0.,X2:0.)X:5.)BX:1.)R;")
```
Let's assume that we realized leaves `X1` and `X2` are really a weird outlier in our tree.
We want to get rid of them.
```@repl prunegraft
tx, a = prune!(tree, "X1", "X2");
tx
tree
```
When called on a list of labels, `prune!` finds the MRCA of the input labels and prunes it from the tree.
Here, `lca(tree, X1, X2)` is internal node `X`, which is removed from the tree.
Note that cutting the branch above `X` will leave the internal node `BX` with a single child.
By default, `prune!` also removes singletons from the input tree.
```@repl prunegraft
map(label, nodes(tree)) # `BX` is not in there
```
This behavior can be changed with the `remove_singletons` keyword argument:
```@repl prunegraft
let
tree = parse_newick_string("(A:1.,(B:1.,(X1:0.,X2:0.)X:5.)BX:1.)R;")
prune!(tree, "X"; remove_singletons=false)
map(label, nodes(tree))
end
```
The `prunesubtree!` method does exactly the same as `prune!`, but returns the root of the pruned clade as a `TreeNode`, without converting it to a `Tree`.
Thus the two calls are equivalent:
```@repl
using TreeTools # hide
tree = parse_newick_string("(A:1.,(B:1.,(X1:0.,X2:0.)X:5.)BX:1.)R;") # hide
tx = prune!(tree, "X")[1] # or ...
tree = parse_newick_string("(A:1.,(B:1.,(X1:0.,X2:0.)X:5.)BX:1.)R;") # hide
tx = let
r, a = prunesubtree!(tree, "X")
node2tree(r)
end
```
## Deleting a node
## Grafting
## Inserting a node
## `TreeNode` level functions
All the methods above take a `Tree` as a first argument.
As described in [Basic concepts](@ref), the actual information about the tree is contained in `TreeNode` objects, while the `Tree` is basically a wrapper around `TreeNode`s.
Thus, a method like `prune!` has to do two things:
1. cut the ancestry relation between two `TreeNode` objects.
2. update the `Tree` object in a consistent way.
The first part is where the "actual" pruning happens, and is done by the `prunenode!` function, which just takes a single `TreeNode` as input.
TreeTools has similar "`TreeNode` level" methods (not exported):
- `prunenode!(n::TreeNode)`: cut the relation between `n` and its ancestor
- `graftnode!(r::TreeNode, n::TreeNode)`: graft `n` onto `r`.
- `delete_node!(n::TreeNode)`: delete cut the relation between `n` and its ancestor `a`, but re-graft the children of `n` onto `a`.
- `insert_node!(c, a, s, t)`: insert `s::TreeNode` between `c` and `a`, at height `t` on the branch.
These methods only act on `TreeNode` objects and do not care about the consistency with the `Tree`.
In most cases, it's more practical to call the `Tree` level methods.
However, if speed is important, it might be better to use them.
## Other useful functions
### Remove singletons
`remove_internal_singletons!`
### Delete insignificant branches | TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.6.13 | 0102ab9be27a64b0963c77280a495c761e1a8c4f | docs | 1823 | # Useful functions
## Copy, convert
To make an independent copy of a tree, simply call `copy`.
```@repl copy
using TreeTools # hide
tree = parse_newick_string("((A:1,B:1)AB:2,C:3)R;");
tree_copy = copy(tree);
label!(tree_copy, "A", "Alfred") # relabel node A
tree_copy
tree
```
The `convert` function allows one to change the data type attached to nodes:
```@repl copy
typeof(tree)
data(tree["A"])
tree_with_data = convert(Tree{MiscData}, tree);
typeof(tree_with_data)
data(tree_with_data["A"])["Hello"] = " world!"
```
## MRCA, divergence time
The most recent common ancestor between two nodes or more is found using the function `lca`:
```@repl copy
lca(tree["A"], tree["B"]) # simplest form
lca(tree, "A", "B") # lca(tree, labels...)
lca(tree, "A", "B", "C") # takes a variable number of labels as input
lca(tree, "A", "AB") # This is not restricted to leaves
```
To compute the distance or divergence time between two tree nodes, use `distance`.
The `topological` keyword allows computing the number of branches separating two nodes.
```@repl copy
distance(tree, "A", "C")
distance(tree["A"], tree["C"]; topological=true)
```
The function `is_ancestor` tells you if one node is found among the ancestors of another.
This uses equality between `TreeNode`, which simply compares labels, see [Basic concepts](@ref)
```@repl copy
is_ancestor(tree, "A", "C")
is_ancestor(tree, "R", "A")
```
## Distance between trees
The `distance` function also lets you compute the distance between two trees.
For now, only the [Robinson-Foulds distance](https://en.wikipedia.org/wiki/Robinson%E2%80%93Foulds_metric) is implemented, but more could come.
```@repl
using TreeTools # hide
t1 = parse_newick_string("((A,B,D),C);")
t2 = parse_newick_string("((A,(B,D)),C);")
distance(t1, t2)
distance(t1, t2; scale=true)
``` | TreeTools | https://github.com/PierreBarrat/TreeTools.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | code | 634 | using EasyFFTs
using Documenter
DocMeta.setdocmeta!(EasyFFTs, :DocTestSetup, :(using EasyFFTs); recursive=true)
makedocs(;
modules=[EasyFFTs],
authors="KronosTheLate",
repo="https://github.com/KronosTheLate/EasyFFTs.jl/blob/{commit}{path}#{line}",
sitename="EasyFFTs.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://KronosTheLate.github.io/EasyFFTs.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/KronosTheLate/EasyFFTs.jl",
devbranch="main",
)
| EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | code | 1639 | import Base: iterate, getindex, firstindex, lastindex, length, show
using Term
"""
EasyFFT(frequencies, response)
A type to hold the response and corresponding frequencies of
a discrete fourier transform.
Has the fields `freq` and `resp`, which can be accessed by dot syntax.
Mainly intended to be constructed through [`easyfft`](@ref).
"""
struct EasyFFT
freq::Vector{Float64}
resp::Vector{Complex{Float64}}
end
length(ef::EasyFFT) = length(ef.resp)
Base.getindex(ef::EasyFFT, i) = getindex((ef.freq, ef.resp), i)
firstindex(ef::EasyFFT) = 1
lastindex(ef::EasyFFT) = 2
# Allow (f, r) = easyfft(...)
Base.iterate(ef::EasyFFT, i=1) = iterate((;freq=ef.freq, resp=ef.resp), i)
function show(io::IO, ef::EasyFFT)
dominant_frequency_indices = finddomfreq(ef)
table = Term.Table(
hcat(round.(ef.freq[dominant_frequency_indices], sigdigits=5), round.(abs.(ef.resp[dominant_frequency_indices]), sigdigits=5)),
header=["Frequency", "Magnitude"]
)
print(io, "EasyFFT with ", length(ef), " samples.\nDominant component(s):", table)
end
# Convenience functions:
"""
magnitude(ef::EasyFFT)
The absolute values of the response vector.
See also: [`phase`](@ref), [`phased`](@ref)
"""
magnitude(ef::EasyFFT) = abs.(ef.resp)
export magnitude
"""
phase(ef::EasyFFT)
The phase of the response vector.
See also: [`magnitude`](@ref), [`phased`](@ref)
"""
phase(ef::EasyFFT) = angle.(ef.resp)
export phase
"""
phased(ef::EasyFFT)
The phase of the response vector in degrees.
See also: [`phase`](@ref), [`magnitude`](@ref)
"""
phased(ef::EasyFFT) = rad2deg.(angle.(ef.resp))
export phased
| EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | code | 2213 | # pkg"activate /home/dennishb/GeekyStuff/Julia/Packages/EasyFFTs"
module EasyFFTs
using Reexport
@reexport using FFTW
include("EasyFFT_type.jl")
include("plotting.jl")
include("utils.jl")
"""
easyfft(s) -> EasyFFT
easyfft(s, fs) -> EasyFFT
Compute the Discrete Fourier Transform (DFT) of the
input vector `s`, scaling by `1/length(s)` by default.
This function uses FFTW.rfft if `s` has real elements,
and FFTW.fft otherwise.
Note that if `s` has real elements, the one-side spectrum
is returned. This means that the amplitude of the frequencies
is doubled, excluding the frequency=0 component. To get the full symmetric spectrum for real signals, use [`easymirror`](@ref), or change the element type of the signal by something like `easyfft(signal.|>ComplexF64)`.
The output is an `EasyFFT` object, with fields `freq` and `resp` containing the frequences and
response respectivly.
# Keyword arguments
- `scalebylength::Bool`: determines if the response is scaled by its length. Defaults to `true`.
# Examples
```jldoctest
julia> using EasyFFTs
julia> fs = 100; # 100 samples per second
julia> timestamps = range(0, 1, step = 1/fs);
julia> s = sin.(2π * 2 * timestamps); # sine of frequency = 2 Hz
julia> easyfft(s, fs)
EasyFFT with 51 samples.
Dominant component(s):
Frequency │ Magnitude
╺━━━━━━━━━━━━━┿━━━━━━━━━━━━━╸
1.9802 │ 0.98461
julia> easyfft(s) # `fs` defaults to 1
EasyFFT with 51 samples.
Dominant component(s):
Frequency │ Magnitude
╺━━━━━━━━━━━━━┿━━━━━━━━━━━━━╸
0.019802 │ 0.98461
╵
```
"""
function easyfft end
export easyfft
function easyfft(s::AbstractVector, fs::Real=1.0; scalebylength=true)
resp = FFTW.fft(s)
if scalebylength
resp ./= length(s)
end
freq = FFTW.fftshift(FFTW.fftfreq(length(s), fs))
resp = FFTW.fftshift(resp)
return EasyFFT(freq, resp)
end
function easyfft(s::AbstractVector{<:Real}, fs::Real=1.0; scalebylength=true)
resp = FFTW.rfft(s)
resp[1] /= 2
resp .*= 2
if scalebylength
resp ./= length(s)
end
freq = FFTW.rfftfreq(length(s), fs)
return EasyFFT(freq, resp)
end
end #module
| EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | code | 630 | using RecipesBase
# Plot recipe - so plot(easyfft(y, f)) does the right thing
@recipe function f(ef::EasyFFTs.EasyFFT)
layout := (2, 1)
link := :x
if length(ef.freq) ≥ 100
nothing # because stem plots are heavy/slow when having many points
else
seriestype --> :stem
markershape --> :circle
end
@series begin
yguide := "Magnitude"
subplot := 1
label := nothing
ef.freq, magnitude(ef)
end
@series begin
xguide := "Frequency"
yguide := "Phase"
subplot := 2
label := nothing
ef.freq, phase(ef)
end
end | EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | code | 4674 | """
easymirror(v::AbstractVector) -> Vector
easymirror(ef::EasyFFT) -> Vector
Given a one-sided spectrum, return a two-sided version
by "mirroring" about 0. This convenience function also
ajusts the amplitude of `v`, or the amplitudes of `ef.resp`
appropriately.
# Examples
```jldoctest
julia> using EasyFFTs
julia> fs = 100; # 100 samples per second
julia> timestamps = range(0, 1, step = 1/fs);
julia> s = sin.(2π * 2 * timestamps); # sine of frequency = 2 Hz
julia> easymirror(ef)
EasyFFT with 101 samples.
Dominant component(s): ╷
Frequency │ Magnitude
╺━━━━━━━━━━━━━┿━━━━━━━━━━━━━╸
-1.9802 │ 0.4923
╶─────────────┼─────────────╴
1.9802 │ 0.4923
```
"""
function easymirror end
export easymirror
function easymirror(s::AbstractVector)
mirrored = FFTW.fftshift(vcat(s ./ 2, reverse(s[begin+1:end] ./ 2)))
mirrored[end÷2+1] *= 2
return mirrored
end
function easymirror(input::EasyFFT)
freq = FFTW.fftshift(vcat(input.freq, reverse(input.freq[begin+1:end]) .* -1))
resp = easymirror(input.resp)
return EasyFFT(freq, resp)
end
"""
finddomfreq(ef) -> Vector
finddomfreq(ef; n=5, t=0.1, window=length(ef)//50) -> Vector
Find and return a vector containing the indices of the
dominant frequency components in `ef`.
This function is used internally in the `show` method for `EasyFFT`.
# Keyword arguments
- `n`: The maximal of dominant peaks to find. Defaults to `5`
- `t`: Minimal magnitude as fraction of maximal magnitude. Defaults to `0.1`
- `window`: Minimal difference in index between any larger peak. Defaults to `length(ef)//50`
See also: [`domfreq`](@ref)
"""
function finddomfreq(ef::EasyFFT; n=5, t=0.1, window=length(ef)//50)
absresp = abs.(ef.resp)
threshold = sum((1.0-t, t).*extrema(absresp))
maxindices = sortperm(absresp; rev=true)
peaks = Int64[]
for i in maxindices
length(peaks) >= n && break
absresp[i] < threshold && break
any(i-window < p < i+window for p in peaks) && continue
push!(peaks, i)
end
return peaks
end
export finddomfreq
"""
domfreq(ef) -> Vector
domfreq(ef, n=5, t=0.1, window=length(ef)//50) -> Vector
Find and return a vector containing the
dominant frequencies in `ef`.
# Keyword arguments
- `n`: The maximal of dominant peaks to find. Defaults to `5`
- `t`: Minimal magnitude as fraction of maximal magnitude. Defaults to `0.1`
- `window`: Minimal difference in index between any larger peak. Defaults to `length(ef)//50`
See also: [`finddomfreq`](@ref)
"""
function domfreq(ef::EasyFFT; n=5, t=0.1, window=length(ef)//50)
peaks = finddomfreq(ef; n, t, window)
return ef.freq[peaks]
end
export domfreq
"""
response_at(ef, f) -> NamedTuple
response_at(ef, fs) -> NamedTuple
Find the response at the frequency closest
to a number `f`, or closest to each frequency in the
vector `fs`. The first argument `ef` should be
of type `EasyFFT`, as returned by `easyfft`.
The returned object is a named tuple, fields "freq" and "resp".
The values match the type of the second argument (number for
`f`, vector for `fs`).
The reason for returning the frequencies is because they are
likely to differ from the given `f` (or values in `fs`), as
the discretized frequencies will often not match the given frequencies.
# Examples
Getting the DC component of the spectrum:
```
julia> response_at(easyfft(rand(1000)), 0)
(freq = 0.0, resp = 0.49191028527567726 + 0.0im)
```
The requested frequency does not align
perfectly with discretized frequencies:
````
julia> response_at(easyfft(rand(1000)), 0.4415)
(freq = 0.441, resp = 0.003584422218085957 - 0.0025392417679877704im)
```
Response at multiple frequencies (long output supressed):
```
julia> response_at(easyfft(rand(1000)), [0, 0.1, 0.11, 0.111, 0.1111]);
```
"""
function response_at(ef::EasyFFT, f::Real)
i = firstindex(ef.freq)
f ≤ first(ef.freq) && return (freq=first(ef.freq), resp=(first(ef.resp)))
f ≥ last(ef.freq) && return (freq=last(ef.freq), resp=(last(ef.resp)))
while ef.freq[i]<f
i+=1
end
if abs(f-ef.freq[i]) < abs(f-ef.freq[i-1])
return (freq=ef.freq[i], resp=ef.resp[i])
else
return (freq=ef.freq[i-1], resp=ef.resp[i-1])
end
end
function response_at(ef::EasyFFT, fs::AbstractVector{<:Real})
freq = Vector{Float64}(undef, length(fs))
resp = Vector{ComplexF64}(undef, length(fs))
for i in eachindex(fs)
freq[i], resp[i] = response_at(ef, fs[i])
end
return (;freq, resp)
end
export response_at
| EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | code | 1148 | using Pkg
Pkg.activate(joinpath(homedir(), ".julia", "environments", "v1.8"))
using EasyFFTs
Pkg.status("EasyFFTs")
fs = 100;
duration = 1;
timestamps = range(0, duration, step=1 / fs);
f1 = 5 ; A1 = 2;
f2 = 10; A2 = 3;
s = @. A1 * sin(f1 * 2π * timestamps) + A2 * sin(f2 * 2π * timestamps);
ef = easyfft(s, fs)
Pkg.offline(true)
try
using MakieCore
catch e
Pkg.add("MakieCore")
using MakieCore
end
using GLMakie
Makie.convert_arguments(P::MakieCore.PointBased, ef::EasyFFTs.EasyFFT) = (decompose(Point2f, ef.freq), decompose(Point2f, magnitude(ef)))
Makie.convert_arguments(P::MakieCore.PointBased, ef::EasyFFTs.EasyFFT) = (ef.freq, magnitude(ef))
plottype(::EasyFFTs.EasyFFT) = Stem
plot(ef)
##
using Plots
plot(ef)
##? Makie
using WGLMakie
let
fs = 1000
duration = 1
ts = range(0, duration, step=1 / fs)
f = 5
A = 2
s1 = @. A * sin(f * 2π * ts)
s2 = @. 1 * sin(2f * 2π * ts)
s = s1 .+ s2
fig, ax, _ = stem(ts, s, axis=(title="Signal",))
s_fft = easyfft(s, fs)
stem(fig[2, 1], s_fft.freqs, s_fft.resp .|> abs, axis=(title="FFT of signal",))
# DataInspector()
fig
end
| EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | code | 89 | using EasyFFTs
using Test
@testset "EasyFFTs.jl" begin
# Write your tests here.
end
| EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | docs | 7618 | # EasyFFTs
[](https://KronosTheLate.github.io/EasyFFTs.jl/stable/)
[](https://KronosTheLate.github.io/EasyFFTs.jl/dev/)
[](https://github.com/KronosTheLate/EasyFFTs.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/KronosTheLate/EasyFFTs.jl)
##
Are you sick and tired of always doing the same preprocessing before you can visualize your fft? Look no further. EasyFFTs aims to automate common preprocessing of fft's, aimed at visual inspection of the frequency spectrum. The main workhorse of this package is a very simple function `easyfft` that modifies the output of `fft` and `rfft` from [FFTW.jl](https://github.com/JuliaMath/FFTW.jl) slightly.
This function offers four main benefits to using the FFTW functions directly:
- The output is scaled by default, making the absolute value of the response
correspond directly to the amplitude of the sinusoids that make up the signal.
- Simple and short syntax for getting the associated frequencies from sample frequency.
- Frequencies and response are sorted by increasing frequency (if you have ever used `fftshift` you know what I am talking about)
- `rfft` is automatically called for real input signals, avoiding
the common mistake of always using `fft`. This makes it so that half of the symmetric
spectrum is not computed, and not returned. This reduces computation and allocations, without loss of information.
If you want both sides of the spectrum, use `easymirror`, with usage demonstrated in the docstring.
In case you also want to compute the "as the mathematicians define it" [Discrete Fourier Transform](https://en.wikipedia.org/wiki/Discrete_Fourier_transform), this package reexports everything exported from FFTW, so that `using EasyFFTs; fft(rand(100))` is equivalent to `using FFTW; fft(rand(100))`. The only difference between `using EasyFFTs` and `using FFTW` is therefore that EasyFFTs exports a few extra functions that mainly facilitate visualization of the spectrum.
# Examples
It is easier to explain by example, so view the examples below as a light introduction to all function defined in `EasyFFTs`, and how to use them.
## Setup
First, we need something to analyze. Let's define some sample-timestamps:
```julia
julia> using EasyFFTs
julia> fs = 100; # sampling frequency
julia> timestamps = range(0, 1, step = 1 / fs); # One second signal duration
```
We then make a signal `s` composed of 2 pure sinusoids with frequencies of 5 Hz and 10 Hz, sampled at `timestamps`:
```julia
julia> f1 = 5 ; A1 = 2;
julia> f2 = 10; A2 = 3;
julia> s = @. A1 * sin(f1 * 2π * timestamps) + A2 * sin(f2 * 2π * timestamps);
```
## How to use `easyfft`
Lets now use `easyfft`, and bind the output to `ef`:
```julia
julia> ef = easyfft(s, fs)
EasyFFT with 51 samples.
Dominant component(s):
Frequency │ Magnitude
╺━━━━━━━━━━━━━┿━━━━━━━━━━━━━╸
9.901 │ 2.8796
╶─────────────┼─────────────╴
4.9505 │ 1.9997
```
The output is of the type `EasyFFT`, so to understand the output (bound to `ef`), we have to understand the type.
It is not complicated at all. In fact, it essentially acts as a `NamedTuple`.
The reason for wrapping the output in a new type is the pretty printing seen above, and
automatic plotting. Note that the pretty printing rounds values to 5 significant digits.
## The `EasyFFT` type
The type `EasyFFT` contains frequencies and the corresponding (complex) responses.
There are 3 different ways to access the frequencies and responses, just like for named tuples.
The first is way "dot syntax":
```julia
julia> ef.freq
51-element Vector{Float64}:
0.0
0.9900990099009901
⋮
48.51485148514851
49.504950495049506
julia> ef.resp
51-element Vector{ComplexF64}:
-9.578394722256253e-17 + 0.0im
0.00042622566734221867 - 0.013698436692159435im
⋮
-0.025328817492520122 + 0.0011826329422999651im
-0.02532460367843232 + 0.00039389110927144075im
```
Should you ever forget that you should use `freq` and `resp`, the Base Julia function `propertynames` will remind you.
```julia
julia> propertynames(ef)
(:freq, :resp)
```
The second method is iteration, which allows for [destructuring assignment](https://docs.julialang.org/en/v1/manual/functions/#destructuring-assignment) into seperate variables:
```julia
julia> frequencies, response = easyfft(s, fs);
julia> ef.freq == frequencies
true
julia> ef.resp == response
true
```
The third and final way of accessing the frequencies and response is indexing:
```julia
julia> ef.freq == frequencies == ef[1]
true
julia> ef.resp == response == ef[2]
true
```
## Convenience functions
Convenience functions are defined to extract the magnitude and phase of the response:
```julia
julia> magnitude(ef) == abs.(ef.resp)
true
julia> phase(ef) == angle.(ef.resp)
true
```
Appending a `d` to `phase` will get you the angle in degrees, analogous to `sin` and `sind`:
```julia
julia> phased(ef) == rad2deg.(phase(ef))
true
```
We saw that objects of the type `EasyFFT` are displayed
as a table of the dominant frequencies. The functions used
to find the dominant values are exported.
We can get the dominant frequencies like so:
```julia
julia> domfreq(ef)
2-element Vector{Float64}:
9.900990099009901
4.9504950495049505
```
And their indices like so:
```julia
julia> finddomfreq(ef)
2-element Vector{Int64}:
11
6
```
Sometimes we want to know the response at a specific frequency. This
functionality is provided by the `response_at` function:
```julia
julia> response_at(ef, 5)
(freq = 4.9504950495049505, resp = 0.3097558587965989 - 1.9756025627302725im)
julia> response_at(ef, [5, 10])
(freq = [4.9504950495049505, 9.900990099009901], resp = ComplexF64[0.3097558587965989 - 1.9756025627302725im, 0.881335139504854 - 2.741456352889268im])
```
Finally, you can get the symmetric spectrum using `easymirror`:
```julia
julia> easymirror(ef)
EasyFFT with 101 samples.
Dominant component(s):
Frequency │ Magnitude
╺━━━━━━━━━━━━━┿━━━━━━━━━━━━━╸
-9.901 │ 1.4398
╶─────────────┼─────────────╴
9.901 │ 1.4398
╶─────────────┼─────────────╴
-4.9505 │ 0.99987
╶─────────────┼─────────────╴
4.9505 │ 0.99987
```
The amplitudes are adjusted correctly, halving the magnitude of
all component except for the 0 Hz component.
That wraps up the examples for the functions defined in `EasyFFTs`. Each function has a docstring with a lot more detail about the method signatures and arguments, so check that out if if you have questions. If anything is still unclear, please [open up an issue](https://github.com/KronosTheLate/EasyFFTs.jl/issues/new).
## Plotting
Because the returned value is of a custom type, automatic plot recipes can be defined. This has been done for [Plots.jl](https://github.com/JuliaPlots/Plots.jl):
```julia
using Plots
plot(ef)
```

For less than 100 datapoints, the plot defaults to a stem plot, which is the most appropriate for showing discrete quantities.
However, stem plots get messy and slow with too many points, which is why the default changes to a line plot if there
are 100 datapoints or more. Change the keywords `seriestype` and `markershape` in the call to `plot` to customize the behavior. | EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.3.0 | 1cbd5dfc4dd0146bdbf15c601c3aa0ad7950b654 | docs | 181 | ```@meta
CurrentModule = EasyFFTs
```
# EasyFFTs
Documentation for [EasyFFTs](https://github.com/KronosTheLate/EasyFFTs.jl).
```@index
```
```@autodocs
Modules = [EasyFFTs]
```
| EasyFFTs | https://github.com/KronosTheLate/EasyFFTs.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 459 | using OndaBatches
using Documenter
makedocs(; modules=[OndaBatches],
sitename="OndaBatches",
authors="Beacon Biosignals and other contributors",
pages=["Documentation" => "index.md"],
strict=Documenter.except(:missing_docs),
format=Documenter.HTML(; prettyurls=true, ansicolor=true))
deploydocs(; repo="github.com/beacon-biosignals/OndaBatches.jl.git",
push_preview=true,
devbranch="main")
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 10642 | # In this code tour, we will outline some of the main functions of OndaBatches.jl with
# examples of how they might be used in machine learning project in a distributed environment.
# Before proceeding, we recommend completing the code tours of Legolas.jl and Onda.jl:
# https://github.com/beacon-biosignals/Legolas.jl/blob/main/examples/tour.jl
# https://github.com/beacon-biosignals/Onda.jl/blob/main/examples/tour.jl
#
# Why OndaBatches?
# We've seen how Onda.jl defines a set of interrelated, but distinct, Legolas.jl schemas for
# working with LPCM-encoded data serialised (usually) in Arrow format, namely: onda.signal
# and onda.annotation.
# In practice, however, these schema are usually extended further to encode information
# relevant to a particular use-case, e.g. defining a label component for an annotation.
#
# To effectively use these data in a machine learning context, where models are trained to,
# e.g., infer the annotation label for a given section of a signal, we need to have some
# tooling in place first:
# - We need to be able to associate existing labels to the corresponding signals to create a
# hollistic dataset.
# - We need to be able to systematically construct batches of training / evaluation data
# from this dataset while being flexible enough in our sampling mechanism so that we can
# tailor the properties of the outputs.
# - Finally, we need to have this batching mechanism be carefully controlled by a scheduler
# when working in a distributed environment.
#
# OndaBatches.jl aims to serve these needs, which we will explore in detail below.
# For this walkthrough we will use testdataset.jl script as a source of data.
# You may not have access to this data so use whichever source is convenient if you want to
# work through this code interactively.
using DataFrames
using Dates
using Distributed
using Legolas
using OndaBatches
using Onda
using Random
using Test
using TimeSpans
const VALID_STAGES = ("wake", "nrem1", "nrem2", "nrem3", "rem", "no_stage")
const SLEEP_STAGE_INDEX = Dict(s => UInt8(i)
for (i, s)
in
enumerate(VALID_STAGES))
# Load the necessary signals and annotations tables - you may have to edit this file path
# depending where your instantiated project directory is.
include("test/testdataset.jl")
signals = DataFrame(Legolas.read(uncompressed_signals_path))
annotations = DataFrame(Legolas.read(stages_path))
###
### LabeledSignals
###
# LabeledSignalV2 extends the SignalV2 schema to incorporate the labels of a given signal,
# which are represented as either another onda.signal or onda.sample, by marrying the
# underling signals and annotations across overlapping time spans.
# Note that the labels must be dense and contiguous.
# This constraint is a legacy of the origns of this code and may not be applicable to all
# use-cases.
# Preprocess the annotations table
annotations = sort_and_trim_spans(annotations, :recording; epoch=Second(30))
# This discards unnecessary fields such as :author_X, :source_X, and :display_icon_shortcode.
# Also removes :stage in favour of adding :labels, which get encoded as above and stored as
# onda.samples, and :label_span.
labeled_signals = label_signals(signals,
annotations,
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
@test eltype(labeled_signals.labels) <: Onda.Samples
@test eltype(labeled_signals.label_span) <: TimeSpan
# We can now load and inspect the underlying Samples data for one of our labeled signals.
# This is given as a tuple of Samples: one for the signal and the other the labels.
# See Onda.jl for working with Samples objects.
ls1 = LabeledSignalV2(labeled_signals[1, :])
s1, l1 = load_labeled_signal(ls1)
@test s1 isa Samples
@test l1 isa Samples
# Of note is that a LabeledSignal describes two time spans, both of which are described
# relative to the _start of the recording_:
# - `span`: describing the time span of the signal
# - `label_span`: describing the time span of the labels
#
# Note that these spans are not necessarily equal however the signal `span` must entirely
# encapsulate the `label_span`.
@test overlaps(ls1.span, ls1.label_span)
# It is also possible to select a sub-span of the LabeledSignal.
# Here we extract a sub-span that starts at 2hrs into the recording and ends at 3hrs.
sub_span = TimeSpan(Minute(120), Minute(180))
ls2 = LabeledSignalV2(sub_label_span(ls1, sub_span))
s2, l2 = load_labeled_signal(ls2)
# XXX: is this a bug? shouldn't signal span also be == sub_span?
# the size of the data below seems to suggest it should?
@test ls2.span == ls1.span
@test ls2.label_span == sub_span
@test size(s1.data, 2) > size(s2.data, 2)
@test size(l1.data, 2) > size(l2.data, 2)
###
### RandomBatches
###
# Now that we are able to construct a holistic dataset, in which the labels for each signal
# have been assigned, we want to be able to prescribe and load batches of these data for
# training and evaluating a machine learning learning model.
#
# The RandomBatches type specifies one possible iteration scheme for sampling from a
# collection of labeled signals to generate these batches in pseudo-random fashion.
#
# Specifically, RandomBatches constructs batches via the following mechanism:
# - randomly sample over the signals
# - for a given signal, randomly select a label
# - for a given label, randomly select a segment of the signal with that label
# - for a given segment, randomly select the number of required channels
#
# Optionally, we can also weight the sampling of the signals and labels.
#
# In this example we are specifying:
# - 1 channel per sample
# - 3 samples per batch
# - Each sample taken over a 60 second window
# - All signals and labels uniformly weighted
batches = RandomBatches(labeled_signals, nothing, nothing, 1, 3, Second(60))
# We can now draw down batches of samples from the RandomBatches
# Calling iterate_batch returns a batch item and the new state after iterating once.
init_state = MersenneTwister(1)
item, new_state = iterate_batch_item(batches, init_state)
@test item isa BatchItemV2
@test new_state isa AbstractRNG
# Here, we return and load a single batch item
# Note that the labels are not necessarily sampled at the same resolution as the signal.
# This is because the labels are sampled at 0.033 Hz (1 every 30 seconds) while the signal
# is sampled at 128 Hz.
x, y = materialize_batch_item(item)
@test size(x) == (1, 7680) # 1 channel, 60 seconds @ 128Hz resolution
@test size(y) == (1, 2) # 2 labels, one for each 30 second segment
# Additionally, we can draw down the entire batch at once
init_state = MersenneTwister(1)
batch, new_state = iterate_batch(batches, init_state)
X, Y = materialize_batch(batch)
@test size(X) == (1, 7680, 3) # we now draw 3 samples and concatenate along the 3rd dimension
@test size(Y) == (1, 2, 3)
# Since we provided the same initial state - the first items in X and Y are x and y above.
@test X[:, :, 1] == x
@test Y[:, :, 1] == y
# Note that we can continue to draw as many batches as we like by repeatedly passing the
# new_state back to iterate_batch, much like Julia's Iterator interface
# https://docs.julialang.org/en/v1/manual/interfaces/#man-interface-iteration
# In this way, we can dynamically allocate and load batches of data depending on the needs
# of the model and infrastructure resources that are available.
batch, new_state = iterate_batch(batches, new_state)
X2, Y2 = materialize_batch(batch)
@test X2 != X
@test Y2 != Y
###
### Batcher
###
# Now that we have the mechanism for generating pseudo-random batches, we need to have these
# dynamically allocated and loaded in parallel across julia processes. This will enable us
# to scale our training process across a cluster.
# Let's start by adding a few processes and loading the necessary packages
addprocs(4)
batch_workers = workers()
batch_manager = popfirst!(batch_workers) # https://github.com/beacon-biosignals/OndaBatches.jl/issues/25
@everywhere begin
using Pkg
Pkg.activate(@__DIR__)
using OndaBatches
end
# A Batcher governs the allocation of batch processing on a distributed environment.
# We'll provide the RandomBatcher defined above.
batcher = Batcher(batch_manager, batch_workers, batches; start=false)
# First let's check the initialised batcher hasn't started
@test get_status(batcher) == :stopped
# Now let's start the batcher with a fresh initial state
init_state = MersenneTwister(1)
start!(batcher, init_state)
# It should now be running and ready to allocated batches across nodes
@test get_status(batcher) == :running
# X3, Y3 are the same batches we sampled above.
# Similarly, we can keep sampling from this by repeatedly passing in the new_state
(X3, Y3), new_state = take!(batcher, init_state)
@test X3 == X
@test Y3 == Y
stop!(batcher)
@test get_status(batcher) == :closed
###
### A Realistic Example
###
# In practice, a batcher is launched from a process performing some computationally heavy
# task, such as training a neural network. Meanwhile, the batch_manager is run on a
# lightweight process that simply constructs the batches and allocates them to high-RAM
# batch_workers who materialize and store the associated data.
# The training worker then `takes!` these batches in sequence and updates the model based on
# the data + labels.
# In summary: a typical architecture for using this package might involve:
# - 1 root process
# - 1 train_worker GPU
# - N batch_worker CPUs
# - 1 batch_manager CPU
# spawn the training worker and allocate any resources it needs to train the model
train_worker = only(addprocs(1))
@everywhere train_worker begin
using OndaBatches
using Random
end
# Restart the batcher...
init_state = MersenneTwister(1)
start!(batcher, init_state)
# Issue a remotecall to `train_worker` which will serialize the code inside the do-block.
# In practice, this would be training a model but as a MWE we'll instead just compute a
# statistic on the batch results.
model_fut = remotecall(train_worker, batcher) do batcher
results = Float64[] # placeholder for some initialized model
batch_state = init_state
for batch_i in 1:10
(x, y), batch_state = take!(batcher, batch_state)
# Here we would implement a function that trains the model based on data + labels.
# Instead we just push to the results vector to simulate it.
push!(results, sum(x))
end
return results
end
# We can now fetch the result of the Future returned by the remotecall
model = fetch(model_fut)
@test model isa Vector{Float64}
stop!(batcher)
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 685 | module OndaBatches
using AlignedSpans
using AWSS3
using DataFrames
using Dates
using Distributed
using Legolas: Legolas, @schema, @version
using Onda: Onda, Samples, SignalV2, SamplesInfoV2
using StatsBase
using Tables
using TimeSpans
using Tables: rowmerge
include("utils.jl")
include("labeled_signal.jl")
export LabeledSignalV2, sub_label_span, label_signals, load_labeled_signal,
store_labels, sort_and_trim_spans
include("materialize_batch.jl")
export BatchItemV2, materialize_batch_item, materialize_batch
include("iterate_batch.jl")
export RandomBatches, iterate_batch_item, iterate_batch
include("batch_services.jl")
export Batcher, start!, stop!, get_status
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 24372 | #####
##### Single-worker batching
#####
"""
start_batching(channel::RemoteChannel, batches, state)
Begin loading batches onto a `RemoteChannel` based on batches (e.g.,
[`RandomBatches`](@ref)) and initial state.
This will run an infinite loop which loads one batch at a time with
[`iterate_batch`](@ref) and [`materialize_batch`](@ref), and `put!`s the
resulting `(x, y)` and `state` values into the channel.
Batching continues until the channel is closed or an error is encountered. When
the channel is closed, the `InvalidStateException` is caught and `:closed` is
returned from the function. Other errors are rethrown. If somehow the loop is
exited without an error (honestly not sure how this would happen), `:done` is
returned.
This function is intended to used with `@async` or `remotecall` (e.g., in a
[`Batcher`](@ref)); the `Future` that `remotecall` returns can be monitored
with [`get_status`](@ref).
Calls to [`materialize_batch`](@ref) are wrapped in `Base.retry` to add some
measure of resiliency to transient network interruptions.
Runs on the batching manager (i.e. `Batcher.manager`), but only when
`Batcher.workers` is empty.
"""
function start_batching(channel::RemoteChannel, batches, state)
@debug "Starting batching..."
closed_msg = "batch channel closed, stopping batching"
return with_channel(channel; closed_msg) do channel
batch_counter = 1
# setup: grab prev state and next batch+state to start
prev_state = deepcopy(state)
next = iterate_batch(batches, state)
# iteration loop: go until iterate_batch returns nothing
while next !== nothing
# unpack iterated pair
batch, state = next
# probably (xx, yy) but who knows
batch_out = retry_materialize_batch(batch; retries=4)
next_out = (batch_out, deepcopy(state))
@debug "loaded batch $(batch_counter):" batch state
put!(channel, (next_out, prev_state))
@debug "put batch on channel:" batch state
batch_counter += 1
# next iteration: grab prev state, iterate next pair
prev_state = deepcopy(state)
next = iterate_batch(batches, state)
end
# we need this last thing in order to support synchronization.
# consumers call `take!` on the batcher, which first fetches the
# previous state, and if it's consistent with the requested state,
# proceeds to fetch the next batch+state.
put!(channel, (nothing, prev_state))
close(channel)
return :done
end
end
#####
##### Multi-worker batching
#####
# represents a single `materialize_batch` job
struct BatchJob
worker::Union{Int,Nothing}
batch_future::Union{Future,Nothing}
state::Any
prev_state::Any
end
"""
_feed_jobs!(jobs::Channel, batches, state, workers)
Function that iterates `batches` starting from `state`, creating a `BatchJob` to
materialize each one using the pool of `workers`. Each job holds is put onto
the `jobs` channel in the order they were iterated, and is a struct with fields
- `worker` PID of the worker loading this batch
- `batch_future` a `Future` containing the output of `materialize_batch`
- `state` the iteration state after iterating this batch
- `prev_state` the iteration state before iterating this batch (i.e., the input
to `iterate_batch(batches, state)` required to reproduce this batch
When batch iteration is complete (as indicated by `iterate_batch` returning
`nothing`, a final placeholder job will be placed on the jobs channel, with
values of `nothing` everywhere except for `prev_state`, which is required to
support synchronization on the client end (i.e., to confirm that the user really
did ask for the final batch with `take!`).
Returns `nothing`.
Runs on the batching manager (i.e., `Batcher.manager`), in an async Task created
in `start_batching`.
"""
function _feed_jobs!(jobs::Channel, batches, state, workers)
prev_state = deepcopy(state)
next = iterate_batch(batches, state)
# iteration loop: go until iterate_batch returns nothing
while next !== nothing
batch, state = next
# why wait here? we don't want to take a worker if the jobs channel
# isn't still open, but we do want to block creating more jobs until a
# worker is available to run it. so we wait...(_wait actually)
_wait(workers)
# ...check that the jobs channel is still open, returning if not...
isopen(jobs) || return nothing
# ...and if is, we take the worker that (hopefully) is still ready
@debug "feeder: taking worker from pool..."
worker = take!(workers)
@debug "feeder: materializing batch on worker $(worker) for state $(state)"
batch_future = remotecall(retry_materialize_batch,
worker, batch; retries=4)
job = BatchJob(worker, batch_future, deepcopy(state), prev_state)
put!(jobs, job)
prev_state = deepcopy(state)
next = iterate_batch(batches, state)
end
@debug "finished: feeder exited at state $(state)"
# we always need to put a final batch on the output with the correct
# previous state to support synchronization by the consumer, so
# rather than just closing the channel, put a whole lotta nothing w/
# the correct prev_state onto our internal channel
put!(jobs, BatchJob(nothing, nothing, nothing, prev_state))
return nothing
end
"""
start_batching(channel::RemoteChannel, batches, state, workers)
Start batching loop, utilizing multiple workers to load batches in parallel.
This method will yield batches in the same order that `start_batching` without
`workers` will, using a [`_feed_jobs!`](@ref) feed batch materialization jobs to
an internal channel (maintaining iteration order while distributing work across
`workers`).
Runs on the batching manager (i.e. `Batcher.manager`)
"""
function start_batching(channel::RemoteChannel, batches, state, workers)
# we need to be sure that the worker pool has active workers or else we may
# deadlock waiting on a job below... #54
reset!(workers)
@debug "Starting batching on $(length(workers)) workers..."
# batches are assigned to workers to materialize using a worker pool: the
# feeder task takes a worker from the pool, and the consumer loop returns
# the worker to the pool when the batch is ready. this controls the number
# of batches that are being worked on simultaneously.
jobs = Channel{BatchJob}(Inf)
feeder = @async begin
# wrap this in a `with_channel` to gracefully handle closure of the jobs
# channel
with_channel(jobs) do jobs
_feed_jobs!(jobs, batches, state, workers)
end
end
# this will ensure that the jobs channel is closed when the feeder task
# completes AND forward any errors thrown on the feeder task to anyone
# waiting on `jobs` (i.e. the main loop below)
bind(jobs, feeder)
# create a sentinel task that will close the jobs channel if/when the output
# channel is closed. this is necessary because we are waiting on the jobs
# channel below which may block if there's resource starvation, but we still
# need to be able to handle the closure of the output channel since that's
# how the client communicates that batching should be stopped prematurely.
sentinel = @async begin
while isopen(channel)
sleep(1)
end
@debug "output channel closed, closing jobs channel"
close(jobs)
end
Base.errormonitor(sentinel)
try
closed_msg = "batch channel closed, stopping batching"
status = with_channel(channel; closed_msg) do channel
for (; worker, batch_future, state, prev_state) in jobs
if batch_future === nothing
# to support synchronization from the consumer, we need to put
# one "final" "batch" on the channel with `nothing` in place of
# the materialized batch + next state tuple.
put!(channel, (nothing, prev_state))
else
# TODO: consider only `wait` here, and put future directly onto
# channel.
local materialized
try
materialized = fetch(batch_future)
@debug "returning worker $(worker) to pool"
put!(workers, worker)
catch
# in the event of an error, close the jobs channel to
# stop the feeder task
@debug "caught exception, closing jobs channel"
close(jobs)
rethrow()
end
@debug "putting batch onto channel" state
put!(channel, ((materialized, state), prev_state))
end
end
# because we may be waiting on jobs, we may exit the `for ... in
# jobs` loop on channel closure without hitting an
# InvalidStateException which would cause `with_channel` to return
# `:closed`. so we need an additional manual check here...
return isopen(channel) ? :done : :closed
end
# rethrow possible task failed exception once we finish with all the
# good batches (task will close channel on failure).
if istaskfailed(feeder)
@debug "feeder task failed, fetching to rethrow"
fetch(feeder)
end
# if the feeder task is not done and we've gotten here, something has
# gone wrong and we should notify the external world
status != :closed && !istaskdone(feeder) &&
error("`start_batching` feeder task is not done but internal job channel closed!")
return status
finally
# always make sure the jobs channel is closed and all workers for
# in-flight jobs are returned to the pool
close(jobs)
reset!(workers)
end
end
"""
A struct that provides control of batching process on one or more remote
workers. This struct keeps track of
- `manager::Int` the PID where `start_batching` will be called.
- `workers` an `AbstractWorkerPool` for the worker process(es).
- `channel::RemoteChannel` the channel that batches are loaded into.
- `status::Future` the return value of the `start_batching` function as a
Future; see [`get_status`](@ref) for a convenient accessor.
- `batches` the iterator of batches that will be materialized; only requirement
is that [`iterate_batch`](@ref) be defined; see [`RandomBatches`](@ref) for an
example
- `state::Any` batcher state (passed to [`iterate_batch`](@ref), updated with
each new batch that's yielded by the batcher.
- `buffer::Int` the size of the batch buffer to keep locally (e.g., the capacity
of `channel`).
Use [`start!`](@ref) to start the batching service, [`stop!`](@ref) to stop it,
and [`get_status`](@ref) to check the status.
Once the batcher is started, the sequence of materialized batches (the output of
[`materialize_batch`](@ref)) and corresponding batcher states can be retrieved
by [`take!`](@ref).
## Architecture
A `Batcher` is meant to run in an architecture where remote workers are created
with a Distributed.jl cluster manager. We use the following terminology to
describe the roles these different processes play:
- "Batch worker": one or more processes that are used to actually load batches
(via [`materialize_batch`](@ref))
- "Batch manager": the process which coordinates the loading of batches,
ensuring consistent iteration order, distributing work to the batch workers,
and populating the output channel. [`start_batching`](@ref) runs on this
process.
- "Client": the process which is consuming batches via `take!(::Batcher, state)`
(which OndaBatches.jl is generally agnostic about and does not manage)
- "Manager": the process on which the `Batcher` is initially created, and holds
the reference for the worker pool (for multi-worker batching).
!!! note
We try hard to make `Batcher`s relocatable to other processes (e.g.,
serializing to the Client after initialization on the Manager). However,
since a new `RemoteChannel` is created each time the batcher is started
(including when the desired state does not match the `Batcher`'s current
state), some care needs to be taken if it matters where that channel is
hosted (although this behavior may change in the future).
Also note that while a running (i.e. `start!`ed) `Batcher` can be relocated
to another process, the `status` and `channel` fields are not guaranteed to
stay in sync on the two copies.
"""
Base.@kwdef mutable struct Batcher
manager::Int
workers::AbstractWorkerPool
channel::RemoteChannel
status::Future
batches::Any
buffer::Int
end
function Batcher(manager::Int, workers::Vector{Int}, batches; kwargs...)
# why not a CachingPool? they're not serializable, and it's generally
# important to be able to serialize a Batcher. so this is a sensible
# default that users can override as they need to. also, in general we are
# not doing a lot of `remotecall`s with chonky closures, so that negates
# most of the benefits of a CachingPool.
pool = WorkerPool(workers)
return Batcher(manager, pool, batches; kwargs...)
end
function Batcher(workers::Vector{Int}, batches; kwargs...)
# first worker is going to be the manager
manager, workers = Iterators.peel(workers)
return Batcher(manager, collect(workers), batches; kwargs...)
end
"""
Batcher([manager::Int,] workers::Vector{Int}, batches; start=true, state=nothing, buffer=2 * length(workers) + 1)
Batcher(manager::Int, workers::AbstractWorkerPool, batches; start=true, state=nothing, buffer=2 * length(workers) + 1)
Construct a new [`Batcher`](@ref), using worker IDs, batches, and initial state.
The batcher's channel and status will be initialized.
The `workers` may be specified as an `AbstractWorkerPool` or a vector of PIDs
(in which case a `WorkerPool` will be constructed).
!!! warning
If workers are supplied as an `AbstractWorkerPool`, it is assumed that _all_
workers managed by the pool are available for loading batches. Whenever the
batcher is stopped, the worker pool is reset, and all managed workers are
returned to the channel of available workers.
See [`RandomBatches`](@ref) for an example of creation of `batches`.
The initial `state` is the state that is used by
[`iterate_batch`](@ref), e.g., the RNG used by [`RandomBatches`](@ref).
If `start=true`, batching is [`start!`](@ref)ed. The `state` keyword argument must be supplied in this case to provide an initial state.
The `buffer` controls the capacity of the batch channel; a value greater than or
equal to the number of workers is recommended so that batch loading workers do
not block waiting for batches to be taken off the channel.
"""
function Batcher(manager::Int, workers::AbstractWorkerPool, batches;
start=true, state=nothing,
buffer=2 * length(workers) + 1)
channel = RemoteChannel(() -> Channel{Any}(buffer))
status = Future()
put!(status, :stopped)
batcher = Batcher(; manager, workers, channel, status, batches, buffer)
if start
state === nothing &&
throw(ArgumentError("state must have a value when `start`=true"))
start!(batcher, state)
end
return batcher
end
"""
get_status(batcher::Batcher)
Check the status of a remote batcher.
Possible return values are
- `:stopped`: the batcher was created but not started
- `:running`: the batching loop is still running
- `:closed`: the batch channel was closed and the batch loop has terminated
- `:done`: the infinite loop in [`start_batching`](@ref) has terminated without
error (not expected)
- a `RemoteException` that wraps an error thrown by `start_batching` on the
batch manager (which may further wrap an exception thrown on a batch worker
"""
get_status(batcher::Batcher) = get_status(batcher.status)
get_status(s::Future) = isready(s) ? fetch_and_catch(s) : :running
function fetch_and_catch(s::Future)
try
return fetch(s)
catch e
msg = sprint(showerror, e, catch_backtrace())
@error "batcher status error: $msg"
return e
end
end
"""
start!(batcher::Batcher, state)
Start the remote process that loads batches into the batcher's channel. A new
channel is created since the old one cannot always be re-used.
This invokes [`start_batching`](@ref) on `batcher.manager` with `remotecall`.
The (modified) batcher is returned.
If the batcher is already running (as indicated by [`get_status ==
:running`](@ref get_status)), a warning is raised and the batcher is returned.
Runs on the Client.
"""
function start!(batcher::Batcher, state)
(; manager, workers, batches, status, buffer) = batcher
# status is a Future that `isready` is not running, and `!isready` if it is
# still running and needs to be stopped.
if get_status(batcher) == :running
@warn "batcher already running; use `stop!` to halt before `start!`"
return batcher
end
# because of how remotecall works, we need to make a copy of the state when
# using the calling PID as the manager: the manager is where we're iterating
# batches which could mutate the state, and when remotecalling onto
# `myid()`, it functions like a local invocation instead of serializing:
# https://docs.julialang.org/en/v1/manual/distributed-computing/#Local-invocations
if manager == myid()
state = copy(state)
end
# it's not really possible to check whether a channel is closed without
# (possibly) blocking so we just create a new one every time we start.
channel = RemoteChannel(() -> Channel{Any}(buffer))
if length(workers) > 0
# length is the total number of workers in the pool, regardless of
# whether they're available for work or not.
length(workers) == 1 && @warn "only one extra worker to load batches!"
@info "starting multi-worker batching, with manager $(manager) and workers $(workers), at state $(state)"
batcher.status = remotecall(start_batching,
manager,
channel,
batches,
state,
workers)
else
@info "starting batching on worker $(manager) at state $(state)"
# TODO: `isready` docs say we should `put!` this future into a Future owned
# by this process
batcher.status = remotecall(start_batching,
manager,
channel,
batches,
state)
end
batcher.channel = channel
return batcher
end
"""
stop!(batcher::Batcher)
Close `batcher.channel` to stop the remote batching. This blocks on
`fetch(batcher.status)` to wait for channel closure. If an error is thrown on
the remote worker that is not caught, it will be rethrown here.
The batcher is returned.
Runs on the Client.
"""
function stop!(batcher::Batcher)
(; channel, status, workers) = batcher
@info "stopping batcher"
@debug "closing channel"
# where = 0 when channel has been finalized. close on a finalize channel
# throws a pretty opaque error.
channel.where != 0 && close(channel)
@debug "waiting for done status"
# catch errors here so we can stop the batcher even if there was an error
status = fetch_and_catch(status)
# need to finalize this in order to release remote refs for GC
finalize(channel)
return batcher
end
"""
Base.take!(batcher::Batcher, state)
Take one batch + state pair from the batcher, starting at the specified state.
If the requested state does not match the batcher's current state, then the
batching process will be restarted with the new state. If the batcher is not
running (as indicated by [`get_status`](@ref)), it will be started with
[`start!`](@ref).
If an error has occurred on any of the batch loading workers, the next call to
`take!` will immediately throw the wrapped `RemoteException`, even if there are
still good batches on the channel.
Returns an `(x, y), state` tuple, where `x` is the batch signal data, `y` is the
label data (see [`materialize_batch`](@ref)), and `state` is the next batch
iterator state.
Runs on the Client.
"""
function Base.take!(batcher::Batcher, state)
# we first check the status future so that there was an error, it throws
# immediately instead of blocking on `fetch` on the channel...
#
# we don't use the get_status convenience wrapper because we WANT to throw
# the error, rather than just logging it and getting the exception itself.
@debug "checking batcher status before waiting on channel"
isready(batcher.status) && fetch(batcher.status)
# wrap the rest of this in a try-catch to handle when batchers close the
# channel on errors:
try
synchronize!(batcher, state)
@debug "taking materialized batch and next state from channel"
# next is probably ((xx, yy), state) but could be nothing to indicate that
# batching is all done
next, _ = Base.take!(batcher.channel)
return next
# TODO: consider allowing next to be a (::Future, state) tuple, to avoid
# extra data movement in case of multi-worker batching (e.g., put
# `remotecall` futures directly onto channel). In that case, we'd need
# to do: (can broadcase because of fallback `fetch(x::Any) = x`)
# return fetch.(next)
catch e
@debug "caught exception: $e"
if is_closed_ex(e)
@debug "is a channel closed exception, getting batcher status..."
# channel was closed by a worker (or otherwise).
# figure out why, by synchronizing on the status
ready = timedwait(() -> isready(batcher.status), 60)
if ready == :ok
# if start_batching threw, this will rethrow the
# RemoteException. In this case, because we're throwing from
# inside a catch block, we'll see the whole exception stack,
# with the RemoteException at the top, and the local exception
# due to the channel closure at the bottom.
status = fetch(batcher.status)
else
@warn "Waited 1 minute for batcher status to be ready after channel closed, continuing with status=:unknown"
status = :unknown
end
@warn "batcher channel closed but batcher status did not throw: $(status)"
end
# if we made it through here without throwing a remote exception, then
# we wanna rethrow the original exception that we caught here.
rethrow()
end
end
function synchronize!(batcher::Batcher, state)
status = get_status(batcher)
# need to also check that the channel is open, since iteration may be done
# but batches remaining on the channel
status == :running || isready(batcher.channel) ||
throw(ArgumentError("status must be `:running` or channel ready to synchronize state (got $(status))"))
@debug "fetching previous state to synchronize"
_, prev_state = fetch(batcher.channel)
if state != prev_state
@warn("mismatch between requested batch state and Batcher state, restarting",
state,
prev_state)
stop!(batcher)
start!(batcher, state)
end
return batcher
end
# a same-process batcher with the same `take!` interface that `Batcher` uses in
# the training loop, for testing purposes
struct BlockingBatcher
batches::Any
end
function Base.take!(batcher::BlockingBatcher, state)
next = iterate_batch(batcher.batches, state)
next === nothing && return nothing
batch, state = next
xx, yy = materialize_batch(batch)
return (xx, yy), state
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 8248 | """
RandomBatches
An iterator of pseudo-randomly sampled batches derived from a table of densely labeled signals (a [`labeled.signal@2`](@ref LabeledSignalV2) table).
Batches consist of `batch_size` "batch items".
A single batch item consists of `batch_duration * label_sample_rate` labels, and `batch_duration * signal_sample_rate` samples of multichannel data.
Batch items are sampled according
to the following procedure:
1. A single labeled signal is sampled (optionally with weights)
2. A single label from that signal is sampled (optionally with weights)
3. One or more channels is selected, optionally randomly.
Each batch item is sampled independently, and in particular different batch items in a given batch can have different channels included (although the same number of them, `n_channels`).
The functions [`iterate_batch_item`](@ref) and [`iterate_batch`](@ref) sample a
single batch item and a full batch, respectively.
## Fields
- `labeled_signals::DataFrame`: the table of labeled signals that batches are
sampled from.
- `signal_weights::AbstractWeights`: weights for individual signals (unweighted
by default). May be `nothing` duration construction, in which case unit
weights are created.
- `label_weights::Vector{AbstractWeights}`: weights for individual labels of
each labeled signal (unweighted by default). May be `nothing` during
construction, in which case unit weights will be created for each labeled
signal.
- `n_channels::Union{Nothing,Int}`: the number of channels each batch item
should have; this many channels are sampled without replacement, unless
`n_channels === nothing` in which case all channels are included.
- `batch_size::Int`: the number of items that make one complete batch
- `batch_duration::TimePeriod`: the duration of the window for a single batch.
"""
Base.@kwdef struct RandomBatches
labeled_signals::DataFrame
signal_weights::AbstractWeights
label_weights::Vector{AbstractWeights}
n_channels::Union{Nothing,Int}
batch_size::Int
batch_duration::TimePeriod
function RandomBatches(labeled_signals,
signal_weights,
label_weights,
n_channels,
batch_size,
batch_duration)
Legolas.validate(Tables.schema(Tables.columns(labeled_signals)),
LabeledSignalV2SchemaVersion())
signal_weights = something(signal_weights,
uweights(nrow(labeled_signals)))
length(signal_weights) == nrow(labeled_signals) ||
throw(ArgumentError("mismatch between number of signals ($nrow(labeled_signals)) and weights ($(length(signal_weights)))"))
label_lengths = _label_sample_count.(eachrow(labeled_signals))
label_weights = @something(label_weights, uweights.(label_lengths))
all(length.(label_weights) .== label_lengths) ||
throw(ArgumentError("mismatch between length of label weights and labels"))
return new(labeled_signals,
signal_weights,
label_weights,
n_channels,
batch_size,
batch_duration)
end
end
"""
iterate_batch(batches::Batches, rng)
Return a "batch listing" that can be materialized into model training/evaluation
input.
A batch is a table that has one row per batch item, and follows the
[`"batch-item@2"`](@ref BatchItemV2) schema.
This is consumed by a [`materialize_batch`](@ref) function that can be run on a
remote worker, so this sends just the minimum of information necessary to load
the batch signal data, the stage labels, and the spans that say how they line
up.
"""
function iterate_batch(batches::RandomBatches, rng)
(; batch_size) = batches
batch = DataFrame()
for i in 1:batch_size
row, rng = iterate_batch_item(batches, rng)
push!(batch, NamedTuple(row); cols=:union)
end
return batch, rng
end
"""
iterate_batch_item(batches::RandomBatches, rng)
Yields a single "batch item". See documentation for [`RandomBatches`](@ref) for
the details on the sampling scheme.
Individual batch items are rows of a batch table with schema
[`"batch-item@2"`](@ref BatchItemV2), and are consumed by
[`materialize_batch_item`](@ref).
"""
function iterate_batch_item(batches::RandomBatches, rng)
(; labeled_signals,
batch_duration,
label_weights,
signal_weights,
n_channels) = batches
row_idx = sample(rng, 1:nrow(labeled_signals), signal_weights)
signal_label_row = labeled_signals[row_idx, :]
label_weights = label_weights[row_idx]
(; labels, label_span, channels) = signal_label_row
batch_label_span = sample_label_span(rng, labels, label_span,
label_weights, batch_duration)
# TODO: #5
batch_channels = if n_channels === nothing
channels
else
# sample n_channels without replacement
sample(rng, channels, n_channels; replace=false)
end
batch_item = Tables.rowmerge(sub_label_span(signal_label_row,
batch_label_span);
batch_channels)
return BatchItemV2(batch_item), rng
end
"""
sample_label_span(rng, labels, label_span, labels_weight, batch_duration)
Return a TimeSpan sampled from labels. First, an epoch is sampled according to
`labels_weight`. Next, the position of this epoch in a window of
`batch_duration` is sampled with uniform probability, with the constraint that
the window must lie completely within `labels`.
The returned TimeSpan will have duration equal to `batch_duration` and will be
relative to the start of the _recording_. The earliest possible return span
starts at `start(label_span)`, and the latest possible span stops at
`stop(label_span)`.
"""
function sample_label_span(rng, labels, label_span, labels_weight, batch_duration)
Nanosecond(batch_duration) <= duration(label_span) ||
throw(ArgumentError("requested span of $(batch_duration) is too long " *
"given labeled span $(label_span) " *
"($(duration(label_span)))"))
batch_seconds = Dates.value(Nanosecond(batch_duration)) / Dates.value(Nanosecond(Second(1)))
sample_rate = _sample_rate(labels)
batch_segments = batch_seconds * sample_rate
isinteger(batch_segments) ||
throw(ArgumentError("batch segments must be an integer, got " *
"$(batch_segments) with batch duration of " *
"$(batch_duration) and sampling rate of " *
"$(sample_rate)"))
batch_segments = round(Int, batch_segments)
available_epochs = 1:_label_sample_count(labels, label_span)
epoch = sample(rng, available_epochs, labels_weight)
# now sample position of epoch within a window of length batch_segments
# window can start anywhere from epoch 1 to end-batch_segments
earliest_start = first(available_epochs)
latest_start = last(available_epochs) - batch_segments + 1
available_starts = earliest_start:latest_start
# possible starts that include the sampled epoch
epoch_starts = (epoch + 1 - batch_segments):epoch
# sample from the intersection of these two to ensure we get something valid
# reasonable
epoch_start = sample(rng, intersect(available_starts, epoch_starts))
# TimeSpans are right-open, so we need an _epoch_ range of batch_segments+1.
# By using [epoch_start, epoch_start + batch_segments) as the epoch index
# interval and calling `time_from_index` on the start/stop manually we make
# sure that we get correct behavior even when `batch_segments` is 1.
#
# works around https://github.com/beacon-biosignals/TimeSpans.jl/issues/45
epoch_stop = epoch_start + batch_segments
# this is relative to `label_span`
new_span = TimeSpan(time_from_index(sample_rate, epoch_start),
time_from_index(sample_rate, epoch_stop))
# shift return span to be relative to _recording_, like `label_span`
return translate(new_span, start(label_span))
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 20546 | @schema "labeled.signal" LabeledSignal
"""
@version LabeledSignalV2 > SignalV2 begin
label_span::TimeSpan
labels::Union{Samples,SignalV2}
end
Legolas.jl record type that represents one Onda signal with associated
labels. Labels must be dense and contiguous, and are represented as
Onda.Samples or an Onda.Signal that refers to Onda.Samples serialized as LPCM.
`label_span` corresponds to the time span (relative to the recording) spanned by
the `labels`.
Note that the signal `span` and labels' `label_span` are both relative to the
start of the _recording_.
"""
LabeledSignalV2
@version LabeledSignalV2 > SignalV2 begin
label_span::TimeSpan
labels::Union{Samples,SignalV2}
end
# get the number of samples for a labeled signal row
_label_sample_count(row) = _label_sample_count(row.labels, row.label_span)
_label_sample_count(labels::Samples, _) = size(labels.data, 2)
_label_sample_count(labels::SignalV2, span) = Onda.sample_count(labels, duration(span))
_sample_rate(labels::Samples) = labels.info.sample_rate
_sample_rate(labels::SignalV2) = labels.sample_rate
"""
store_labels(labeled_signals, root; format="lpcm")
Store labels to `root`, replacing the `Onda.Samples` in the `labels` column of
`labeled_signals` with `Onda.Signal`s.
"""
function store_labels(labeled_signals, root; format="lpcm")
_mat = Tables.materializer(labeled_signals)
rows = map(Tables.namedtupleiterator(labeled_signals)) do row
return store_labels(LabeledSignalV2(row), root; format)
end
return _mat(rows)
end
_base_noversion(x::Any) = first(splitext(basename(x)))
_base_noversion(x::S3Path) = _base_noversion(string(S3Path(x.bucket, x.key)))
"""
store_labels(labeled_signal::LabeledSignalV2, root; format="lpcm")
Store a single set of labels to `root`, replacing the `Onda.Samples` in the
`labels` column of `labeled_signals` with `Onda.SignalV2`s. A single updated
`LabeledSignalV2` row is returned.
The filepath of the stored labels' Signal is the basename of
`labeled_signal.file_path` with `"labels_"` prepended.
"""
function store_labels(labeled_signal::LabeledSignalV2, root; format="lpcm")
(; recording, labels, label_span, file_path, recording) = labeled_signal
out_path = joinpath(root, "labels_" * _base_noversion(file_path))
labels_signal = Onda.store(out_path, format, labels, recording, start(label_span))
return rowmerge(labeled_signal; labels=Onda.SignalV2(labels_signal))
end
"""
function load_labeled_signal(labeled_signal, samples_eltype::Type=Float64)
Load signal data as `Onda.Samples` from a labeled segment of an `Onda.SignalV2` (i.e.,
a [`LabeledSignalV2`](@ref) or row with schema `"labeled.signal@2"`), and
returns the portion of the samples data corresponding to `labeled_signal.label_span`,
along with the corresponding labels (as another `Onda.Samples` object).
If possible, this will only retrieve the bytes corresponding to
`labeled_signal.label_span`.
The `eltype` of the returned `Samples` is `samples_eltype`, which defaults to
`Float64`.
!!! note
The handling of samples `eltype` is different than `Onda.load`, for which
the `eltype` depends on the resolution/offset specified in the samples info:
when they are 1/0 respectively, the underlying encoded data is _always_
returned exactly as-is, even if the type differs from the requested
`eltype`. This allows for some optimizations in such cases, but is a
potential footgun when a particular `eltype` is actually required. We work
around this inconsistency here by always allocating a _new_ array with the
requested `eltype` to hold the decoded samples.
Returns a `samples, labels` tuple.
"""
function load_labeled_signal(labeled_signal, ::Type{T}=Float64) where {T}
# TODO: handle this with a type restriction/validation by construction?
# Legolas.validate((labeled_signal, ), Legolas.Schema("labeled.signal@2"))
# (; labels, label_span, span) = labeled_signal
(; sample_rate, labels, label_span, span) = LabeledSignalV2(labeled_signal)
# we need to convert the label_span from relative to start of recording to
# be relative to the loaded samples
#
# (---------------recording----------------------------------------)
# (--------span---------------------------)
# (----label_span-----)
# -------> start(span)
# --------------> start(label_span)
# ------> start(translate(label_span, -start(span)))
# TODO: should we check that the label span is inside the signal span here
# or on construction of the labeled span? right now it's an error if the
# labels start before or end after the actual signal span.
label_span_relative_to_samples = translate(label_span, -start(span))
# now make sure we always get the right number of samples. Rounding the
# span start down is consistent with how TimeSpans handles indices for times
# that fall between samples, taking "the most recent sample taken at
# `sample_time`"
aligned = AlignedSpan(sample_rate,
label_span_relative_to_samples,
ConstantSamplesRoundingMode(RoundDown))
# load samples encoded, and then decode them to our desired eltype
samples = Onda.load(labeled_signal, aligned; encoded=true)
# why this juggling? well, if the resolution/offset of samples is 1/0
# respectively, then `decode` is a no-op, EVEN IF `T !=
# eltype(samples.data)`. By providing the storage array to `decode!`, we
# force conversion to T.
samples = Onda.decode!(similar(samples.data, T), samples)
# return labels as-is if they are Samples, and load/index appropriately if
# they are a Lazy Signal
labels = get_labels(labels, label_span)
# XXX: #4 want to make sure that we're always getting the "right" number of
# samples, so should use AlignedSpans here too probably
return samples, labels
end
"""
get_labels(labels::Samples, span)
get_labels(labels::SignalV2, span)
Return labels as Samples, deserializing with `Onda.load` if necessary. `span`
is the span _relative to the start of the recording_ that should be loaded.
This function is meant for internal use only; users should instead use
`load_labeled_signal` and `sub_label_span`.
"""
get_labels(labels::Samples, span) = labels
function get_labels(labels::SignalV2, span_relative_to_recording)
# when labels are stored on disk, we can't eagerly sub-select them during
# `sub_label_span`. so we have to do the same juggling to translate the
# label_span (here, span_relative_to_recording) to be relative to the
# labels' Samples, and then load.
span_relative_to_labels = translate(span_relative_to_recording,
-start(labels.span))
return Onda.load(labels, span_relative_to_labels)
end
"""
label_signals(signals, annotations;
groups=:recording,
labels_column,
epoch,
encoding,
roundto=nothing)
Create a "labeled signals" table from a signals table and a table of annotations
containing labels.
Annotations will be passed to [`labels_to_samples_table`](@ref), as well as
kwargs. `labels_to_samples_table` requires these keyword arguments:
- `groups`: the column to group over, defaults to `:recording`.
- `labels_column`: the column in the annotations table containing the labels.
- `epoch`: the sampling period of the labels.
- `encoding::Dict`: the label -> `UInt8` mapping to use for encoding the labels.
- `roundto`: controls rounding of "shaggy spans", defaults to `nothing` for no rounding.
Annotations must be
- contiguous and non-overlapping (withing `groups`)
- regularly sampled, with spans an even integer multiple of the `epoch` kwarg.
Returns a [`LabeledSignalV2`](@ref) table (e.g., with schema
`"labeled.signal@2"`), with labels in `:labels` and the signal spans occupied by
these labels in `:label_span`. Like the signal `:span`, the `:label_span` is
relative to the start of the _recording_, not necessarily to the start of the
data represented by the _signal_.
If any label span is not entirely contained within the corresponding signal
span, this will throw an ArgumentError.
"""
function label_signals(signals, annotations;
groups=:recording,
labels_column,
epoch,
encoding,
roundto=nothing)
labels_table = labels_to_samples_table(annotations;
groups,
labels_column,
epoch,
encoding,
roundto)
joined = leftjoin(DataFrame(signals), labels_table; on=groups)
if any(ismissing, joined.labels)
missings = select(filter(:labels => ismissing, joined), groups)
@warn "Dropping $(nrow(missings)) rows with no labels\n\n$(missings)"
filter!(:labels => !ismissing, joined)
end
for (; recording, span, label_span) in eachrow(joined)
if !TimeSpans.contains(span, label_span)
e = "label span not contained in signal span for $(recording):\n" *
"label span: $(label_span), signal span: $(span)"
throw(ArgumentError(e))
end
end
disallowmissing!(joined, [:labels, :label_span])
return joined
end
"""
sub_label_span(labeled_signal, new_label_span)
Select a sub-span of labeled signals `labeled_signal` (with schema
`"labeled.signal@2"`), returning a new labeled signal with updated `labels` and
`label_span`.
The `new_label_span` should be relative to the start of the recording (like the
signal's `span` and the current `label_span`).
"""
function sub_label_span(labeled_signal, new_label_span)
(; labels, label_span) = labeled_signal
if !TimeSpans.contains(label_span, new_label_span)
throw(ArgumentError("""
new labeled span is not contained within labeled span!
input: $(new_label_span)
currently labeled: $(label_span)
"""))
end
# new_label_span is relative to start of recording; align to start of
# label_span
#
# (---------------recording----------------------------------------)
# (--------label-span---------------------------)
# (----new-label-span-----)
# -------> start(label-span)
# --------------> start(new-label-span)
# ------> start(translate(new_label_span, -start(label_span)))
span = translate(new_label_span, -start(label_span))
# This does not check that `span` aligns exactly with labels due to sample rounding and could
# give bad results if they are misaligned.
# TODO #4
labels = _get_span(labels, span)
label_span = new_label_span
return Tables.rowmerge(labeled_signal; labels, label_span)
end
_get_span(samples::Samples, span) = samples[:, span]
# handle labels stored on disk/s3
_get_span(signal::SignalV2, span) = signal
#####
##### convert labels in spans to samples
#####
"""
all_contiguous(spans)
Returns `true` if all `spans` are contiguous. Assumes spans are sorted by start
time.
"""
function all_contiguous(spans)
cur, rest = Iterators.peel(spans)
for next in rest
stop(cur) == start(next) || return false
cur = next
end
return true
end
"""
is_epoch_divisible(span::TimeSpan, epoch; roundto=nothing)
Tests whether `span` is evenly divisible into contiguous sub-spans of length
`epoch`, after optionally rounding to `roundto` (by default, no rounding is
performed).
"""
function is_epoch_divisible(span::TimeSpan, epoch; roundto=nothing)
roundto = something(roundto, Nanosecond)
dur = round(duration(span), roundto)
return dur == floor(dur, epoch)
end
"""
check_epoch_divisible(spans, epoch; roundto=Second)
Throw an `ArgumentError` if any of `spans` are not evenly divisible into
contiguous sub-spans of length `epoch`, according to
[`is_epoch_divisible`](@ref).
"""
function check_epoch_divisible(spans, epoch; roundto=nothing)
all(is_epoch_divisible(span, epoch; roundto) for span in spans) ||
throw(ArgumentError("spans are not evenly divisible into epochs!"))
return nothing
end
function int_encode_labels(; epoch,
encoding::Dict,
roundto=nothing)
return (stages, spans) -> int_encode_labels(stages, spans;
epoch, encoding, roundto)
end
"""
int_encode_labels(stages, spans; epoch, encoding::Dict,
roundto=nothing)
int_encode_labels(; epoch, encoding, roundto)
Return a `Vector{UInt8}` of stage labels, using `encoding` to look up each stage
label in `stages`, sampled evenly at intervals of `epoch`. `spans` are expanded
into non-overlapping, contiguous spans of duration `epoch`; `spans` must be
contiguous and with durations evenly divisible by `epoch`, except for the final
span which will be truncated. `spans` durations will be rounded to the nearest
`roundto` (can be a `TimePeriod` subtype or instance, such as
`Millisecond(100)`, or `nothing`) before division into epochs to accommodate
minor errors in stage label durations; if `roundto=nothing` (the default) no
rounding will be performed.
The `Vector{UInt8}` of labels that is returned will have length
`floor(duration(shortest_timespan_containing(spans)), epoch)`
The `encoding` is used to map the values in `stages` to `UInt8`s, and should be
provided in the form of a `Dict{eltype(stages), UInt8}`.
`int_encode_labels(; epoch, encoding, roundto)` will return a closure which
captures the configuration options.
"""
function int_encode_labels(stages, spans;
epoch,
encoding,
roundto=nothing)
issorted(spans; by=start) || throw(ArgumentError("spans must be sorted"))
length(spans) == length(stages) ||
throw(ArgumentError("mismatching lengths of spans ($(length(spans))) " *
"and stages ($(length(stages)))"))
all_contiguous(spans) ||
throw(ArgumentError("can only int encode contiguous label spans"))
check_epoch_divisible(spans, epoch; roundto)
roundto = something(roundto, Nanosecond)
# iterate through the spans/stages and undo the RLE
labels = UInt8[]
for (span, stage) in zip(spans, stages)
# XXX: this may be necessary to "snap" some spans that all start/end at like 995ms. it
# may cause some very slight misalignment between the processed label
# spans and the source, but by no more than 500 ms (and in practice,
# more like 5ms) out of 30s (so ~1% max).
#
# note that we now DEFAULT to no rounding; this is still included to
# preserve backwards compatibility with older versions
dur = round(duration(span), roundto)
n = Nanosecond(dur) ÷ Nanosecond(epoch)
i = encoding[stage]
for _ in 1:n
push!(labels, i)
end
end
return labels
end
floor_containing(; epoch) = spans -> floor_containing(spans; epoch)
"""
floor_containing(spans; epoch)
floor_containing(; epoch)
Compute the shortest timespan containing contiguous `spans`, rounded down to
the nearest multiple of `epoch`.
Note that this function will not check whether spans are contiguous.
The kwarg-only method returns a closure which captures the epoch.
"""
function floor_containing(spans; epoch)
span = shortest_timespan_containing(spans)
dur = floor(duration(span), epoch)
return TimeSpan(start(span), start(span) + Nanosecond(dur))
end
"""
labels_to_samples(labels::AbstractVector{UInt8}; epoch)
labels_to_samples(; epoch)
Convert a vector of UInt8 stage labels sampled evenly at intervals of `epoch`
into `Onda.Samples` with samples rate of `1/epoch`.
The kwarg only form returns a closure that captures the `epoch`.
The returned samples have samples info:
```
SamplesInfoV2(; sensor_type="label",
channels=["label"],
sample_unit="label",
sample_resolution_in_unit=1,
sample_offset_in_unit=0,
sample_type=UInt8,
sample_rate=Second(1) / epoch)
```
"""
labels_to_samples(; epoch) = x -> labels_to_samples(x; epoch)
function labels_to_samples(labels::AbstractVector{UInt8}; epoch)
# XXX: include label levels (ref array) and other useful metadata (possibly
# using a schema extension.
info = SamplesInfoV2(; sensor_type="label",
channels=["label"],
sample_unit="label",
sample_resolution_in_unit=1,
sample_offset_in_unit=0,
sample_type=UInt8,
sample_rate=Second(1) / epoch)
samples = Samples(reshape(labels, 1, :), info, false)
return samples
end
"""
labels_to_samples_table(labels::AbstractDataFrame; labels_column,
groups=:recording, epoch, kwargs...)
Convert annotations table into a table of labels as Samples. This groups by
`groups` (defaults to `:recording`), and then applies
[`int_encode_labels`](@ref) to the `labels_column` and `:span` columns from each
group, and converts the resulting `UInt8` labels to `Onda.Samples` via
[`labels_to_samples`](@ref). The sampling rate for the resulting labels is `1 /
epoch`. The samples are returned in the `:labels` column.
Along with `epoch`, additional kwargs are forwarded to
[`int_encode_labels`](@ref):
- `encoding::Dict` the label -> `UInt8` mapping to use for encoding
- `roundto` controls rounding of "shaggy spans" (defaults to `nothing` for no
rounding)
The `span` corresponding to these labels is determined by
[`floor_containing`](@ref) and returned in the `:label_span` column.
A `DataFrame` is returned with the `:labels` and `:label_span` per group, as
well as the `groups` variables.
"""
function labels_to_samples_table(stages::AbstractDataFrame; labels_column,
groups=:recording, epoch, kwargs...)
grouped = groupby(stages, groups)
make_samples = labels_to_samples(; epoch) ∘ int_encode_labels(; epoch, kwargs...)
return combine(grouped,
[labels_column, :span] => make_samples => :labels,
:span => floor_containing(; epoch) => :label_span)
end
"""
sort_and_trim_spans(labels::AbstractDataFrame; epoch)
Sort a data frame of labels by `span` (according to `start`), and trim the final
span so that it has a duration that is an even multiple of `epoch`. This
prepares a table of labels for input to `labels_to_samples_table`, which
requires sorted spans and no ragged trailing span.
The input table for this function must contain spans that are contiguous after
sorting; otherwise an error will be thrown.
If the trimmed final span has zero duration, it is removed from the return table.
Returns a sorted copy of `labels`.
"""
function sort_and_trim_spans(labels::AbstractDataFrame; epoch)
labels = sort(labels, order(:span; by=start))
all_contiguous(labels.span) ||
throw(ArgumentError("`sort_and_trim_spans(labels)` requires that spans " *
"be contiguous after sorting. For tables with " *
"multiple sets of contiguous spans, use " *
"`sort_and_trim_spans(labels, grouping_cols)` to " *
"group before sorting/trimming."))
if !is_epoch_divisible(labels.span[end], epoch)
lastspan = labels.span[end]
newspan = translate(TimeSpan(0, floor(duration(lastspan), epoch)),
start(lastspan))
if duration(newspan) > Nanosecond(1)
labels.span[end] = newspan
else
deleteat!(labels, nrow(labels))
end
end
return labels
end
"""
sort_and_trim_spans(labels::AbstractDataFrame, by; epoch)
Sort and trim spans, first grouping the labels by columns associated to the DataFrames column selector `by` (e.g. `:recording`).
"""
function sort_and_trim_spans(labels::AbstractDataFrame, by; epoch)
grouped = groupby(labels, by)
return combine(grouped) do labels_grouped
return sort_and_trim_spans(labels_grouped; epoch)
end
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 3224 | @schema "batch-item" BatchItem
"""
@version BatchItemV2{T} > LabeledSignalV2 begin
batch_channels::T
end
Legolas record type representing a single batch item. Fields are inherited
from [`LabeledSignalV2 > SignalV2`](@ref LabeledSignalV2), and an additional `batch_channels`
field gives a channel selector for this batch. A "channel selector" is anything
that can be used as a channel index for `Onda.Samples`, or `missing` (in which
case, all channels will be used in the order they occur in the `Samples`).
Columns include:
- columns from `Onda.SignalV2` (everything required to `Onda.load` the segment)
- `labels` and `label_span` from `LabeledSignalV2`
- `batch_channels`
"""
BatchItemV2
@version BatchItemV2 > LabeledSignalV2 begin
batch_channels::(<:Any)
end
"""
materialize_batch_item(batch_item, samples_eltype::Type=Float64)
Load the signal data for a single [`BatchItemV2`](@ref), selecting only the
channels specified in the `batch_channels` field (using all channels if the
field is `missing`).
Returns a `signal_data, label_data` tuple, which is the contents of the `data`
field of the signals and labels `Samples` objects returned by
`[load_labeled_signal`](@ref), after the signals data by `batch_channels`.
The eltype of `signal_data` will be `samples_eltype`; the eltype of `label_data`
is whatever is returned by [`get_labels`](@ref).
"""
function materialize_batch_item(batch_item, samples_eltype::Type=Float64)
samples, labels = load_labeled_signal(batch_item, samples_eltype)
batch_channels = coalesce(batch_item.batch_channels, samples.info.channels)
signal_data = get_channel_data(samples, batch_channels)
label_data = labels.data
return signal_data, label_data
end
"""
get_channel_data(samples, channels)
Get the data associated with the specified channels. Default fallback simply
calls `samples[channels, :].data`. But custom channel selectors can be used to
implement more exotic featurization schemes, (see tests for examples).
"""
get_channel_data(samples::Samples, channels) = samples[channels, :].data
"""
materialize_batch(batch, samples_eltype::Type=Float64)
Materialize an entire batch, which is a table of [`BatchItemV2`](@ref) rows. Each
row is materialized concurrently by [`materialize_batch_item`](@ref), and the
resulting signals and labels arrays are concatenated on dimension `ndims(x) + 1`
respectively.
Returns a `signal_data, label_data` tuple. The dimensionality of these arrays
depends on the dimensionality of the results of
[`materialize_batch_item`](@ref), but will in general be `ndims(x) + 1`.
The eltype of `signal_data` will be `samples_eltype`; the eltype of `label_data`
is whatever is returned by [`get_labels`](@ref).
"""
function materialize_batch(batch, samples_eltype::Type=Float64)
# TODO: check integrity of labeled_signals table for batch construction (are
# the spans the same duratin/number of samples? etc.)
signals_labels = asyncmap(item -> materialize_batch_item(item, samples_eltype),
Tables.rows(batch))
signals, labels = first.(signals_labels), last.(signals_labels)
x = _glue(signals)
y = _glue(labels)
return x, y
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 5537 | # glue together a bunch of N-d arrays on the N+1th dimension; used to create
# N+1-dim tensors during batch materialization
function _glue(stuff)
sizes = unique(size.(stuff))
if length(sizes) > 1
sizes_str = join(sizes, ", ")
throw(ArgumentError("all elements must have the same size, " *
"got >1 unique sizes: $(sizes_str)"))
end
stuff_size = only(unique(size.(stuff)))
# use collect to ensure this is a vector-of-vectors
stuff_vec = reduce(vcat, vec.(collect(stuff)))
return reshape(stuff_vec, stuff_size..., length(stuff))
end
#####
##### channel wrangling
#####
is_closed_ex(ex) = false
is_closed_ex(ex::InvalidStateException) = ex.state == :closed
is_closed_ex(ex::RemoteException) = is_closed_ex(ex.captured.ex)
# for use in retry:
check_is_closed_ex(_, ex) = is_closed_ex(ex)
function retry_materialize_batch(batch; retries=4)
@debug "worker $(myid()) materializing batch" batch
return Base.retry(materialize_batch; delays=ExponentialBackOff(; n=retries),
check=!check_is_closed_ex)(batch)
end
"""
with_channel(f, channel; closed_msg="channel close, stopping")
Run `f(channel)`, handling channel closure gracefully and closing the channel if
an error is caught.
If the channel is closed, the `InvalidStateException` is caught, the
`closed_msg` is logged as `@info`, and `:closed` is returned.
If any other error occurs, the channel is closed before rethrowing (with a
`@debug` log message reporting the error + stacktrace).
Otherwise, the return value is `f(channel)`.
"""
function with_channel(f, channel; closed_msg="channel close, stopping")
try
return f(channel)
catch e
if is_closed_ex(e)
@info closed_msg
return :closed
else
# close the channel to communicate to anyone waiting on these
# batches that a problem has occurred
msg = sprint(showerror, e, catch_backtrace())
@debug "caught exception, closing channel and re-throwing: $msg"
close(channel)
rethrow()
end
end
end
#####
##### WorkerPool workarounds
#####
"""
reset!(pool::AbstractWorkerPool)
Restore worker pool to something like the state it would be in after
construction, with the channel populated with one instance of each worker
managed by the pool.
This has two phases: first, the contents of the channel are cleared out to avoid
double-adding workers to the channel. Second, the contents of `pool.workers` is
sorted, checked against the list of active processes with `procs()`, and then
live PIDs `put!` into the pool one-by-one. Dead workers are removed from the
set of workers held by the pool.
For a `WorkerPool`, this operation is forwarded to the process holding the
original pool (as with `put!`, `take!`, etc.) so it is safe to call on
serialized copies of the pool.
`nothing` is returned.
"""
reset!(pool::AbstractWorkerPool) = reset!(pool)
function reset!(pool::WorkerPool)
if pool.ref.where != myid()
return remotecall_fetch(ref -> _local_reset!(fetch(ref).value),
pool.ref.where,
pool.ref)::Nothing
else
return _local_reset!(pool)
end
end
function _local_reset!(pool::AbstractWorkerPool)
# clean out existing workers so that we're not double-put!ing workers into
# the channel. we work directly with the channel to work around
# https://github.com/JuliaLang/julia/issues/48255
while isready(pool.channel)
take!(pool.channel)
end
live_procs = Set(procs())
for worker in sort!(collect(pool.workers))
# don't put worker back in pool if it's dead
if worker in live_procs
put!(pool, worker)
else
delete!(pool.workers, worker)
end
end
return nothing
end
# there isn't a Base.wait method for worker pools. `take!` blocks but removes a
# worker and we don't want that. the workaround here is to `wait` on the
# `.channel` field, which is consistent with the docs description of relying on
# a `.channel` field for fallback implementations of the API methods like
# `take!`:
# https://docs.julialang.org/en/v1/stdlib/Distributed/#Distributed.AbstractWorkerPool
#
# this is contributed upstream but it may be ... some time before it's usable
# here so in the mean time...
#
# https://github.com/JuliaLang/julia/pull/48238
_wait(p::AbstractWorkerPool) = wait(p.channel)
# but we also need to handle when a WorkerPool has been serialized, so this is
# basically copy-pasta from Distributed.jl stdlib:
function _wait(pool::WorkerPool)
# just for my own understanding later on: WorkerPool handles serialization
# by storing a "ref" as a RemoteChannel which contains the workerpool
# itself. this is created when the WorkerPool is created so the "where"
# field is the worker id which holds the actual pool; deserializing the pool
# somewhere else creates a dummy Channel (for available workers) and
# deserializes the ref.
#
# so in order to wait on a worker, we have to first determine whether our
# copy of the pool is the "real" one
if pool.ref.where != myid()
# this is the "remote" branch, so we remotecall `wait` on
return remotecall_fetch(ref -> wait(fetch(ref).value.channel),
pool.ref.where,
pool.ref)::Nothing
else
return wait(pool.channel)
end
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 5365 | using Distributed
# one manager and 3 batch loaders
addprocs(4)
@everywhere begin
using DataFrames
using Dates
using Legolas
using Onda
using OndaBatches
using StableRNGs
end
#####
##### setup
#####
include("local_data.jl")
const VALID_STAGES = ("wake", "nrem1", "nrem2", "nrem3", "rem", "no_stage")
const SLEEP_STAGE_INDEX = Dict(s => UInt8(i)
for (i, s)
in enumerate(VALID_STAGES))
#####
##### basic functionality
#####
signals, labels = load_tables(; strip_refs=true);
# input signal data (X)
signals
describe(signals, :eltype, :first)
samples = Onda.load(first(signals))
# input label data (Y)
describe(labels, :eltype, :first)
labeled_signals = label_signals(signals, labels,
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
describe(labeled_signals, :eltype)
labeled_signals.labels[1]
batches = RandomBatches(; labeled_signals,
# uniform weighting of signals + labels
signal_weights=nothing,
label_weights=nothing,
n_channels=2,
batch_size=3,
batch_duration=Minute(5))
state0 = StableRNG(1)
batch, state = iterate_batch(batches, deepcopy(state0))
describe(batch, :eltype, :first)
x, y = materialize_batch(batch);
# signal tensor:
x
# labels tensor:
y
#####
##### perist label sets
#####
labeled_signals_stored = store_labels(labeled_signals,
joinpath(@__DIR__, "data", "labels"))
describe(labeled_signals_stored, :eltype, :first)
first(labeled_signals_stored.labels)
batches = RandomBatches(; labeled_signals=labeled_signals_stored,
# uniform weighting of signals + labels
signal_weights=nothing,
label_weights=nothing,
n_channels=2,
batch_size=3,
batch_duration=Minute(5))
state0 = StableRNG(1)
batch, state = iterate_batch(batches, deepcopy(state0))
describe(batch, :eltype, :first)
x, y = materialize_batch(batch);
x
y
#####
##### zero missing channels
#####
struct ZeroMissingChannels
channels::Vector{String}
end
function OndaBatches.get_channel_data(samples::Samples, channels::ZeroMissingChannels)
out = zeros(eltype(samples.data), length(channels.channels), size(samples.data, 2))
for (i, c) in enumerate(channels.channels)
if c ∈ samples.info.channels
@views out[i:i, :] .= samples[c, :].data
end
end
return out
end
channels = ZeroMissingChannels(["c3", "c4", "o1", "o2", "not-a-real-channel"])
OndaBatches.get_channel_data(samples, channels)
# normally we'd make our batch iterator set this field for us but for demo
# purposes we'll do it manually
batch.batch_channels .= Ref(channels);
x, y = materialize_batch(batch);
batch
x
#####
##### compartments
#####
struct EvenOdds
n_channels::Int
end
function OndaBatches.get_channel_data(samples::Samples, channels::EvenOdds)
n = channels.n_channels
chans = samples.info.channels
odds = @view(samples[chans[1:2:n], :]).data
evens = @view(samples[chans[2:2:n], :]).data
return cat(evens, odds; dims=3)
end
channels = EvenOdds(4)
labeled_signals_four_channels = filter(:channels => >=(4) ∘ length,
labeled_signals)
batches_eo = RandomBatches(; labeled_signals=labeled_signals_four_channels,
# uniform weighting of signals + labels
signal_weights=nothing,
label_weights=nothing,
n_channels=2,
batch_size=3,
batch_duration=Minute(5))
batch, _ = iterate_batch(batches_eo, copy(state0))
batch.batch_channels .= Ref(channels)
x, y = materialize_batch(batch)
x
batch_flat = deepcopy(batch)
batch_flat.batch_channels .= Ref(1:4)
x_flat, _ = materialize_batch(batch_flat)
x_flat
#####
##### batch service
#####
using Distributed
# one manager and 3 batch loaders
addprocs(4)
@everywhere begin
using DataFrames
using Dates
using Legolas
using Onda
using OndaBatches
using StableRNGs
end
batcher = Batcher(workers(), batches; start=false)
get_status(batcher)
start!(batcher, copy(state0));
get_status(batcher)
state = copy(state0)
(x, y), state = take!(batcher, state)
(x, y), state = take!(batcher, state)
# taking from an out-of-sync state will restart teh batcher
(x, y), state = take!(batcher, copy(state0))
stop!(batcher)
get_status(batcher)
# errors get propagated to consumer
bad_batches = deepcopy(batches)
bad_batches.labeled_signals.file_path .= "blah blah not a path"
bad_batcher = Batcher(workers(), bad_batches; start=true, state=copy(state0))
take!(bad_batcher, state0)
# turn on debug logging if you want to see the gory details for debugging
@everywhere batcher.manager ENV["JULIA_DEBUG"] = "OndaBatches"
start!(batcher, copy(state0))
# turn on debug logging on the loaders for the _really gory_ details
stop!(batcher)
@everywhere ENV["JULIA_DEBUG"] = "OndaBatches"
start!(batcher, copy(state0))
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 1758 | using AWSS3
using Legolas: @schema, @version
using Onda
using OndaBatches
include(joinpath(@__DIR__, "../test/testdataset.jl"))
local_root = joinpath(@__DIR__, "data")
local_signals_path = joinpath(local_root, "signals.arrow")
if !isfile(local_signals_path)
signals = DataFrame(Legolas.read(uncompressed_signals_path); copycols=true)
local_signals = transform(signals,
:file_path => ByRow() do path
local_path = joinpath(local_root, "samples",
basename(path))
cp(path, Path(local_path))
@info string(path, '→', local_path)
return local_path
end => :file_path)
Onda.load(first(local_signals))
Legolas.write(local_signals_path, local_signals, SignalV2SchemaVersion())
end
local_stages_path = joinpath(local_root, "stages.arrow")
if !isfile(local_stages_path)
cp(stages_path, Path(local_stages_path))
stages = DataFrame(Legolas.read(local_stages_path); copycols=true)
stages = OndaBatches.sort_and_trim_spans(stages, :recording; epoch=Second(30))
Legolas.write(local_stages_path, stages, SleepStageV1SchemaVersion())
end
function load_tables(; strip_refs=true)
signals = DataFrame(Legolas.read(local_signals_path); copycols=true)
if strip_refs
transform!(signals,
:channels => ByRow() do channels
[string(first(split(c, "-"; limit=2))) for c in channels]
end => :channels)
end
stages = DataFrame(Legolas.read(local_stages_path); copycols=true)
return signals, stages
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 264 | using Remark, FileWatching
while true
Remark.slideshow(@__DIR__;
options = Dict("ratio" => "16:9"),
title = "OndaBatches.jl")
@info "Rebuilt"
FileWatching.watch_file(joinpath(@__DIR__, "src", "index.md"))
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 4409 | module OndaBatchesTests
using AlignedSpans
using Aqua
using AWS
using AWSS3
using DataFrames
using Dates
using Distributed
using Legolas: Legolas, SchemaVersion, @schema, @version
using Onda
using OndaBatches
using Pkg
using StableRNGs
using StatsBase
using ReTest
using TimeSpans
using UUIDs
using Onda: SignalV2SchemaVersion
using OndaBatches: LabeledSignalV2SchemaVersion
using Tables: rowmerge
function isvalid(tbl, schema::SchemaVersion)
tbl_schema = Tables.schema(Tables.columns(tbl))
return Legolas.complies_with(tbl_schema, schema)
end
const VALID_STAGES = ("wake", "nrem1", "nrem2", "nrem3", "rem", "no_stage")
const SLEEP_STAGE_INDEX = Dict(s => UInt8(i)
for (i, s)
in enumerate(VALID_STAGES))
const TEST_ROOT = joinpath(S3Path("s3://beacon-public-oss/ondabatches-ci/tmp"),
string(uuid4()))
atexit() do
(; bucket, key) = TEST_ROOT
@sync for key in s3_list_keys(bucket, key * "/")
for obj in s3_list_versions(bucket, key)
version = obj["VersionId"]
@async begin
try
s3_delete(bucket, key; version)
catch e
path = string(S3Path(bucket, key; version))
@error("Error deleting $(path)",
exception=(e, catch_backtrace()))
end
end
end
end
end
include("testdataset.jl")
const signals = DataFrame(Legolas.read(signals_path); copycols=true)
const uncompressed_signals = DataFrame(Legolas.read(uncompressed_signals_path); copycols=true)
const stages = DataFrame(Legolas.read(stages_path); copycols=true)
# this gets used all over the place so we'll just do it once here and avoid
# repetition
const labeled_signals = label_signals(uncompressed_signals,
sort_and_trim_spans(stages, :recording; epoch=Second(30)),
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
const N_WORKERS = 3
# for testing get_channel_data
struct EvenOdds end
function OndaBatches.get_channel_data(samples::Samples, channels::EvenOdds)
chans = samples.info.channels
odds = @view(samples[chans[1:2:end], :]).data
evens = @view(samples[chans[2:2:end], :]).data
return cat(evens, odds; dims=3)
end
struct ZeroMissingChannels
channels::Vector{String}
end
function OndaBatches.get_channel_data(samples::Samples, channels::ZeroMissingChannels)
out = zeros(eltype(samples.data), length(channels.channels), size(samples.data, 2))
for (i, c) in enumerate(channels.channels)
if c ∈ samples.info.channels
@views out[i:i, :] .= samples[c, :].data
end
end
return out
end
# utilities to get the number of workers in a pool that are actually available
function count_ready_workers(pool::AbstractWorkerPool)
n = 0
workers = []
while isready(pool)
# need to work around bugged behavior of `isread` vs `take!` blocking
# when presented with a non-existent worker id
id = @async take!(pool)
timedwait(() -> istaskdone(id), 1) == :ok || continue
push!(workers, fetch(id))
n += 1
end
# replace the workers
foreach(w -> put!(pool, w), workers)
return n
end
# activate project + load dependencies on workers
function provision_worker(worker_ids)
project = Pkg.project().path
Distributed.remotecall_eval(Main, worker_ids,
:(using Pkg; Pkg.activate($(project))))
if isdefined(Main, :Revise)
Distributed.remotecall_eval(Main, worker_ids, :(using Revise))
end
Distributed.remotecall_eval(Main, worker_ids,
:(using OndaBatches, StableRNGs, ReTest))
# not needed in CI but useful when running tests locally and doens't hurt
Distributed.remotecall_eval(Main, worker_ids,
:(using AWS;
global_aws_config($(global_aws_config()))))
return nothing
end
@testset "aqua" begin
Aqua.test_all(OndaBatches; ambiguities=false)
end
include("utils.jl")
include("labeled_signal.jl")
include("iterate_batch.jl")
include("materialize_batch.jl")
include("batch_services.jl")
end #module
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 11140 | # need these because how the exception gets wrapped depends on julia
# version
unwrap(e) = e
unwrap(e::RemoteException) = unwrap(e.captured)
unwrap(e::CapturedException) = unwrap(e.ex)
@testset "Batcher" begin
# provision some workers for service tests.
worker_ids = addprocs(N_WORKERS)
try
provision_worker(worker_ids)
init_batch_state = StableRNG(1338)
batches = RandomBatches(; labeled_signals,
signal_weights=nothing,
label_weights=nothing,
n_channels=1,
batch_size=2,
batch_duration=Minute(5))
localbatcher = OndaBatches.BlockingBatcher(batches)
# state is required when start=true (the default)
@test_throws ArgumentError Batcher(worker_ids[1:1], batches; start=true, state=nothing)
@test_throws ArgumentError Batcher(worker_ids[1:1], batches; state=nothing)
batcher = Batcher(worker_ids[1:1], batches; start=false)
# test behavior without starting first
@test get_status(batcher) == :stopped
@test_throws ArgumentError take!(batcher, copy(init_batch_state))
start!(batcher, copy(init_batch_state))
@test get_status(batcher) == :running
b, s1 = take!(batcher, copy(init_batch_state))
multibatcher = Batcher(worker_ids, batches; state=copy(init_batch_state))
@test get_status(batcher) == :running
@test get_status(multibatcher) == :running
@test take!(batcher, copy(init_batch_state)) == (b, s1)
bm, sm1 = take!(multibatcher, copy(init_batch_state))
bl, sl1 = take!(localbatcher, copy(init_batch_state))
@test b == bm == bl
@test s1 == sm1 == sl1
# make sure it's repeatable:
# warns that state is wrong, then info to stop, then info to start
b0 = b
b, s1 = @test_logs (:warn,) (:info,) (:info,) take!(batcher, copy(init_batch_state))
bm, sm1 = @test_logs (:warn,) (:info,) (:info,) take!(multibatcher, copy(init_batch_state))
bl, sl1 = take!(localbatcher, copy(init_batch_state))
@test b == b0 == bm == bl
@test s1 == sm1 == sl1
b, s2 = @test_logs take!(batcher, s1)
bm, sm2 = take!(multibatcher, sm1)
bl, sl2 = take!(localbatcher, s1)
@test b == bm == bl
@test s2 == sm2 == sl2
# take more batches to make sure they stay in sync for a few rounds
let sm=sm2, s=s2
for _ in 1:10
bm, sm = take!(multibatcher, sm)
b, s = take!(batcher, s)
@test b == bm
@test s == sm
end
end
# start! on a running batcher is a no-op
let chan=batcher.channel, status=batcher.status
@test_throws MethodError start!(batcher)
@test_logs (:warn,) start!(batcher, init_batch_state)
@test chan === batcher.channel
@test status === batcher.status
@test_logs (:info,) stop!(batcher)
@test_logs (:info,) start!(batcher, copy(init_batch_state))
@test chan != batcher.channel
@test status != batcher.status
end
# manual close stops batching
close(batcher.channel)
wait(batcher.status)
@test get_status(batcher) == :closed
@test_logs (:info,) stop!(batcher)
# check that we can do this twice without error
@test_logs (:info,) stop!(batcher)
@test get_status(batcher) == :closed
@test_logs (:info,) stop!(multibatcher)
@test get_status(multibatcher) == :closed
# test worker pool integrity after stop!
pool = multibatcher.workers
@test length(pool) == count_ready_workers(pool) == length(worker_ids) - 1
@testset "local manager doesn't mutate init state" begin
b1 = Batcher(myid(), worker_ids[2:2], batches; start=false)
b2 = Batcher(worker_ids[1], worker_ids[2:2], batches; start=false)
init_state = StableRNG(1)
start!(b1, init_state)
start!(b2, init_state)
x1, s1 = take!(b1, init_state)
x2, s2 = take!(b2, init_state)
@test x1 == x2
@test s1 == s2 != init_state
# not mutated
@test init_state == StableRNG(1)
end
@testset "start with empty worker pool" begin
manager, rest = Iterators.peel(worker_ids)
workers = WorkerPool(collect(rest))
batcher = Batcher(manager, workers, batches; start=false)
while isready(workers)
take!(workers)
end
@test !isready(workers)
start!(batcher, copy(init_batch_state))
t = @async take!(batcher, copy(init_batch_state))
# because this may deadlock, we put it behind a timedwait
@test timedwait(() -> istaskdone(t), 5) == :ok
# this, alas, may also deadlock (but should be fast):
t = @async stop!(batcher)
@test timedwait(() -> istaskdone(t), 1) == :ok
@test get_status(batcher) == :closed
end
@testset "error handling" begin
# need to do some juggling to test behavior when channel is MANUALLY
# closed. we're gonna splice in a future that resolve to `running` to
# trick everybody...
new_status = Future()
put!(new_status, :running)
batcher.status = new_status
# ...and then close the channel
batcher.channel = RemoteChannel(() -> Channel{Any}(Inf))
close(batcher.channel)
# ...and then attempt to take!
#
# warns about "okay"/not error status, and then throws channel closed
@test_logs (:warn,) @test_throws InvalidStateException take!(batcher, copy(init_batch_state))
badbatch = deepcopy(batches)
# replace signals with nonsense paths that'll throw errors
transform!(badbatch.labeled_signals,
:file_path => ByRow(_ -> "blahblah") => :file_path)
badbatcher = Batcher(worker_ids[1:1], badbatch; state=copy(init_batch_state))
@test_throws RemoteException wait(badbatcher.status)
@test_throws RemoteException take!(badbatcher, copy(init_batch_state))
@test (@test_logs (:error,) get_status(badbatcher)) isa RemoteException
@test_logs (:info,) (:error,) stop!(badbatcher)
# check that we can do this twice without error
@test_logs (:info,) (:error,) stop!(badbatcher)
badmultibatcher = Batcher(worker_ids, badbatch; state=copy(init_batch_state))
@test_throws RemoteException wait(badmultibatcher.status)
@test_throws RemoteException take!(badmultibatcher, copy(init_batch_state))
@test (@test_logs (:error,) get_status(badmultibatcher)) isa RemoteException
@test_logs (:info,) (:error,) stop!(badmultibatcher)
# confirm that worker pool is restored to good state
pool = badmultibatcher.workers
@test length(pool) == count_ready_workers(pool) == length(worker_ids) - 1
# test behavior of error on workers while waiting inside `take!`
badbatcher = Batcher(worker_ids[1:1], badbatch; start=false)
try
start!(badbatcher, copy(init_batch_state))
take!(badbatcher, copy(init_batch_state))
catch e
@test e isa RemoteException
e = unwrap(e)
@test e isa SystemError
@test e.prefix == "opening file \"blahblah\""
end
end
@testset "finite batches" begin
# for testing, iterate batch items one at a time
@everywhere begin
using OndaBatches
function OndaBatches.iterate_batch(batches::Vector{<:BatchItemV2}, state::Int)
state > length(batches) && return nothing
batch, next_state = batches[state], state + 1
# @show next_state
return [batch], next_state
end
end
batches = map(eachrow(labeled_signals)) do row
span = translate(TimeSpan(0, Minute(1)),
start(row.label_span))
item = sub_label_span(row, span)
batch_channels = first(row.channels)
return BatchItemV2(Tables.rowmerge(item, batch_channels))
end
# test multiple batcher methodologies against each other
batchers = map((worker_ids, worker_ids[1:1])) do workers
batcher = Batcher(workers, batches; start=true, state=1)
return batcher
end
all_batches_rt = map(batchers) do batcher
state = 1
next = take!(batcher, state)
batches_rt = []
while next !== nothing
batch, state = next
# @show state
push!(batches_rt, batch)
next = take!(batcher, state)
end
return batches_rt
end
# need a vector of items to materialize, so we just call vcat on each
@test all(==(materialize_batch.(vcat.(batches))), all_batches_rt)
@test all(==(:done), get_status.(batchers))
end
@testset "recovery from dead worker" begin
# we want to test what happens when a worker dies and needs to be
# replaced...
# first, make sure that we CAN remove a worker and get reasonable
# behavior...
batches = RandomBatches(; labeled_signals,
signal_weights=nothing,
label_weights=nothing,
n_channels=1,
batch_size=2,
batch_duration=Minute(5))
manager, loaders = Iterators.peel(worker_ids)
pool = WorkerPool(collect(loaders))
batcher = Batcher(manager, pool, batches; state=copy(init_batch_state))
wait(batcher.channel)
rmprocs(last(collect(loaders)))
caught_ex = let s=copy(init_batch_state)
try
while true
b, s = take!(batcher, s)
end
catch e
e
end
end
@test unwrap(caught_ex) isa ProcessExitedException
# we're left with one less worker in the pool
@test length(pool) ==
count_ready_workers(pool) ==
length(collect(loaders)) - 1
end
finally
rmprocs(worker_ids)
end
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 8525 | lab_sigs = ("eager" => labeled_signals, "lazy" => OndaBatches.store_labels(labeled_signals, TEST_ROOT))
@testset "iterate batches: $type" for (type, labeled_signals) in lab_sigs
using OndaBatches: get_labels, _sample_rate, _label_sample_count
batch_spec = RandomBatches(; labeled_signals,
signal_weights=nothing, # uniform
label_weights=nothing, # uniform
n_channels=1,
batch_size=10,
batch_duration=Minute(1))
rng = StableRNG(1337)
batch_row, rng = iterate_batch_item(batch_spec, rng)
@test rng isa StableRNG
@test all(in(propertynames(batch_row)),
(:label_span, :batch_channels, :labels))
@test isvalid([batch_row], SignalV2SchemaVersion())
@test isvalid([batch_row], LabeledSignalV2SchemaVersion())
@test duration(batch_row.label_span) ==
duration(get_labels(batch_row.labels, batch_row.label_span)) ==
batch_spec.batch_duration
batch_list, rng = iterate_batch(batch_spec, StableRNG(1337))
@test rng isa StableRNG
@test BatchItemV2(first(Tables.rows(batch_list))) == batch_row
@test isvalid(batch_list, SignalV2SchemaVersion())
@test isvalid(batch_list, LabeledSignalV2SchemaVersion())
@test length(Tables.rows(batch_list)) == batch_spec.batch_size
# this depends on the RNG but is meant to check that different
# recordings are being sampled, in contrast to below...
@test length(unique(batch_list.recording)) > 1
# we actually subsample channels
@test all(batch_list.channels .!= batch_list.batch_channels)
@test all(length.(batch_list.batch_channels) .== batch_spec.n_channels)
@testset "channels sampled without replacement" begin
batch_spec = RandomBatches(; labeled_signals,
signal_weights=nothing, # uniform
label_weights=nothing, # uniform
n_channels=2,
batch_size=10,
batch_duration=Minute(1))
rng = StableRNG(1337)
for _ in 1:100
batch_list, rng = iterate_batch(batch_spec, rng)
@test all(allunique.(batch_list.batch_channels))
end
end
@testset "weights are used" begin
# make sure weights are actually being used by zeroing out everything
# except one:
for i in 1:nrow(labeled_signals)
allone_weights = Weights((1:nrow(labeled_signals)) .== i)
batch_spec_one = RandomBatches(; labeled_signals,
signal_weights=allone_weights,
label_weights=nothing, # uniform
n_channels=1,
batch_size=10,
batch_duration=Minute(1))
allone_batch_list, rng = iterate_batch(batch_spec_one,
StableRNG(1337))
@test all(==(labeled_signals.recording[i]),
allone_batch_list.recording)
end
# make sure label weights are actually being used
label_weights_allone = map(enumerate(eachrow(labeled_signals))) do (i, row)
(; labels, label_span) = row
labels = get_labels(labels, label_span)
n_labels = size(labels.data, 2)
just_i = Weights((1:n_labels) .== i)
return just_i
end
for i in 1:nrow(labeled_signals)
allone_weights = Weights((1:nrow(labeled_signals)) .== i)
batch_spec_one = RandomBatches(; labeled_signals,
signal_weights=allone_weights,
label_weights=label_weights_allone,
n_channels=1,
batch_size=10,
batch_duration=Second(30))
batch_item, _ = iterate_batch_item(batch_spec_one, StableRNG(1337))
@test duration(batch_item.label_span) ==
duration(get_labels(batch_item.labels, batch_item.label_span)) ==
batch_spec_one.batch_duration
t = time_from_index(_sample_rate(labeled_signals.labels[i]), i)
t += start(labeled_signals.label_span[i])
span = TimeSpan(t, t + Nanosecond(Second(30)))
@test TimeSpans.contains(batch_item.label_span, t)
@test batch_item.label_span == span
end
end
@testset "n_channels == nothing means use all channels" begin
batchspec_all_chans = RandomBatches(; labeled_signals,
signal_weights=nothing, # uniform
label_weights=nothing, # uniform
n_channels=nothing,
batch_size=10,
batch_duration=Minute(1))
rng = StableRNG(1337)
for _ in 1:100
batch, rng = iterate_batch_item(batchspec_all_chans, rng)
@test batch.batch_channels == batch.channels
end
end
@testset "sample_label_span edge cases" begin
using OndaBatches: sample_label_span
(; label_span, labels) = first(labeled_signals)
labels_weights = uweights(_label_sample_count(labels, label_span))
rng = StableRNG(1337)
whole_span = sample_label_span(rng, labels, label_span, labels_weights,
duration(label_span))
@test whole_span == label_span
shifted = sample_label_span(rng,
labels,
translate(label_span, Minute(1)),
labels_weights,
duration(label_span))
@test shifted == translate(label_span, Minute(1))
# choose weights so only first span is valid:
label_sample_idxs = 1:_label_sample_count(labels, label_span)
first_only_ws = Weights(label_sample_idxs .== 1)
first_span = sample_label_span(rng, labels, label_span, first_only_ws,
Minute(1))
@test first_span == translate(TimeSpan(0, Minute(1)), start(label_span))
# choose weights so only last span is valid:
last_only_ws = Weights(reverse(first_only_ws))
last_span = sample_label_span(rng, labels, label_span, last_only_ws,
Minute(1))
@test stop(last_span) == stop(label_span)
@test duration(last_span) == Minute(1)
# sample a span that is a single label
first_one = sample_label_span(rng, labels, label_span, first_only_ws,
Second(30))
@test duration(first_one) == Second(30)
@test start(first_one) == start(label_span)
last_one = sample_label_span(rng, labels, label_span, last_only_ws,
Second(30))
@test duration(last_one) == Second(30)
@test stop(last_one) == stop(label_span)
# span too short
@test_throws(ArgumentError("batch segments must be an integer, got 0.03333333333333333 with batch duration of 1 second and sampling rate of 0.03333333333333333"),
sample_label_span(rng, labels, label_span, labels_weights,
Second(1)))
# span not even multiple of epoch
@test_throws(ArgumentError("batch segments must be an integer, got 1.0333333333333332 with batch duration of 31 seconds and sampling rate of 0.03333333333333333"),
sample_label_span(rng, labels, label_span, labels_weights,
Second(31)))
# span too long
@test_throws(ArgumentError,
sample_label_span(rng, labels, label_span, labels_weights,
duration(label_span) + Nanosecond(Second(30))))
# empty span
@test_throws(ArgumentError,
sample_label_span(rng, labels, label_span, labels_weights,
Second(0)))
end
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 27340 | @testset "labeled signal" begin
@testset "Label preprocessing" begin
using OndaBatches: all_contiguous
@testset "is_contiguous and all_contiguous synth data" begin
spans = [TimeSpan(Second(i), Second(i + 1)) for i in 1:10]
spans_discont = spans[[1, 2, 4, 5, 7]]
@test all_contiguous(spans)
@test !all_contiguous(reverse(spans))
@test !all_contiguous(spans_discont)
end
@testset "epoch divisibility" begin
using OndaBatches: is_epoch_divisible, check_epoch_divisible
epoch = Second(30)
@test is_epoch_divisible(TimeSpan(0, Minute(10)), epoch)
@test !is_epoch_divisible(TimeSpan(0, Second(31)), epoch)
# no rounding by default
@test !is_epoch_divisible(TimeSpan(0, Millisecond(30001)), epoch)
# snapping to nearest second
@test is_epoch_divisible(TimeSpan(0, Millisecond(30001)), epoch;
roundto=Second)
@test is_epoch_divisible(TimeSpan(0, Millisecond(30499)), epoch;
roundto=Second)
@test !is_epoch_divisible(TimeSpan(0, Millisecond(30500)), epoch;
roundto=Second)
@test !is_epoch_divisible(TimeSpan(0, Millisecond(30499)), epoch;
roundto=Millisecond)
# absurd rounding
@test is_epoch_divisible(TimeSpan(0, Millisecond(30499)), epoch;
roundto=Minute)
good = TimeSpan(0, Second(30))
bad = TimeSpan(0, Second(31))
@test_throws ArgumentError check_epoch_divisible([bad], epoch)
@test_throws ArgumentError check_epoch_divisible([bad, good], epoch)
@test nothing === check_epoch_divisible([good], epoch)
@test nothing === check_epoch_divisible([bad, good], Second(1))
end
@testset "snap containing" begin
using OndaBatches: floor_containing
epoch = Second(30)
spans = [TimeSpan(epoch * i, epoch * (i + 1)) for i in 1:20]
@test floor_containing(spans; epoch) == shortest_timespan_containing(spans)
@test floor_containing(spans; epoch=Second(31)) ==
TimeSpan(Second(30), Second(30) + Second(31) * (length(spans) - 1))
@test duration(floor_containing(spans; epoch=Second(31))) <
duration(floor_containing(spans; epoch))
# non-congtiguous
@test floor_containing(reverse(spans); epoch) == floor_containing(spans; epoch)
@test floor_containing(spans[1:2:20]; epoch) == floor_containing(spans[1:19]; epoch)
end
@testset "int encode labels" begin
using OndaBatches: int_encode_labels
encoding = SLEEP_STAGE_INDEX
stage_names = VALID_STAGES[1:5]
stages = reduce(append!, (repeat([s], i) for (i, s) in enumerate(stage_names)))
spans = [TimeSpan(Second(30 * i), Second(30 * (i + 1)))
for i in 1:length(stages)]
labels = int_encode_labels(stages, spans;
epoch=Second(30),
encoding)
@test labels == reduce(append!,
(repeat(UInt8[i], i) for i in 1:length(stage_names)))
labels = int_encode_labels(stages, spans; epoch=Second(10), encoding)
@test labels == reduce(append!,
(repeat(UInt8[i], i * 3) for i in 1:length(stage_names)))
@test_throws ArgumentError int_encode_labels(stages, reverse(spans);
epoch=Second(30),
encoding)
@test_throws ArgumentError int_encode_labels(stages[1:2:end], spans[1:2:end];
epoch=Second(30),
encoding)
@test_throws ArgumentError int_encode_labels(stages[1:2], spans;
epoch=Second(30),
encoding)
@test_throws ArgumentError int_encode_labels(stages, spans;
epoch=Second(31),
encoding)
spans_short_first = [TimeSpan(Second(31), Second(60)); spans[2:end]]
@test_throws ArgumentError int_encode_labels(stages, spans_short_first;
epoch=Second(30),
encoding)
# ragged (last span is long) are an error
spans_ragged = [spans[1:(end - 1)];
TimeSpan(start(spans[end]),
stop(spans[end]) + Nanosecond(Second(2)))]
@test_throws ArgumentError int_encode_labels(stages, spans_ragged;
epoch=Second(30),
encoding)
@test int_encode_labels(stages, spans; epoch=Second(10), encoding) ==
int_encode_labels(; epoch=Second(10), encoding)(stages, spans)
end
@testset "labels_to_samples_table" begin
using OndaBatches: labels_to_samples_table
# not sorted yet:
@test_throws(ArgumentError("spans must be sorted"),
labels_to_samples_table(stages;
labels_column=:stage,
epoch=Second(30),
encoding=SLEEP_STAGE_INDEX))
mystages = sort(stages, [:recording, order(:span; by=start)])
# not trimmed yet
@test_throws(ArgumentError("spans are not evenly divisible into epochs!"),
labels_to_samples_table(mystages;
labels_column=:stage,
epoch=Second(30),
encoding=SLEEP_STAGE_INDEX))
mystages = sort_and_trim_spans(mystages, :recording; epoch=Second(30))
labels = labels_to_samples_table(mystages;
labels_column=:stage,
epoch=Second(30),
encoding=SLEEP_STAGE_INDEX)
@test eltype(labels.labels) <: Samples
@test eltype(labels.label_span) == TimeSpan
labels_10s = labels_to_samples_table(mystages; epoch=Second(10),
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX)
foreach(labels.labels, labels_10s.labels) do labels, labels10
# durations may be different if last span is long
@test duration(labels) <= duration(labels10)
@test duration(labels10) - duration(labels) < Second(30)
# long final span can result in extra 10s span at the end
labels_span = TimeSpan(0, duration(labels))
@test all(labels.data .== labels10[:, labels_span].data[:, 1:3:end])
end
@test all(TimeSpans.contains.(labels_10s.label_span, labels.label_span))
end
@testset "sort_and_trim_spans" begin
dangles = DataFrame(; span=[TimeSpan(Second(1), Millisecond(2500)),
TimeSpan(0, Second(1))],
name="two")
@test sort_and_trim_spans(dangles; epoch=Second(1)).span ==
[TimeSpan(0, Second(1)), TimeSpan(Second(1), Second(2))]
@test sort_and_trim_spans(dangles; epoch=Millisecond(500)).span ==
reverse(dangles.span)
nodangles = sort_and_trim_spans(dangles; epoch=Second(1))
@test sort_and_trim_spans(nodangles; epoch=Second(1)) == nodangles
@test sort_and_trim_spans(dangles; epoch=Millisecond(500)) ==
reverse(dangles)
dangles1 = DataFrame(; span=[TimeSpan(0, Second(1)),
TimeSpan(Second(1), Millisecond(1100))],
name="one")
nodangles1 = sort_and_trim_spans(dangles1; epoch=Second(1))
@test nrow(nodangles1) == 1
@test nodangles1 == dangles1[1:1, :]
alldangles = vcat(dangles, dangles1)
# non-contiguous spans is an error
@test_throws ArgumentError sort_and_trim_spans(alldangles; epoch=Second(1))
alltrimmed = sort_and_trim_spans(alldangles, :name; epoch=Second(1))
# order of columns can be changed by `combine`
@test alltrimmed == select(vcat(nodangles, nodangles1), names(alltrimmed))
end
end
@testset "labeled signal" begin
using OndaBatches: label_signals, get_labels
sorted_stages = sort_and_trim_spans(sort(stages, :recording), :recording;
epoch=Second(30))
labeled = label_signals(signals, sorted_stages;
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
@test isvalid(labeled, LabeledSignalV2SchemaVersion())
durations = combine(groupby(sorted_stages, :recording),
:span => (s -> sum(duration, s)) => :duration)
leftjoin!(durations,
select(labeled,
:recording,
:label_span => ByRow(duration) => :label_duration),
on=:recording)
@test all(floor.(durations.duration, Second(30)) .==
durations.label_duration)
for (r, (lab, stag)) in Legolas.gather(:recording, labeled, sorted_stages)
lab = only(lab)
(; labels, label_span) = lab
(; stage, span) = stag
foreach(stage, span) do s, sp
# shift span to "label space"
lab_sp = translate(sp, -start(label_span))
unique_lab = only(unique(get_labels(labels, label_span)[1, lab_sp].data))
@test VALID_STAGES[unique_lab] == s
# sub_label_span takes a "new labeled span" (relative to the
# recording start, so same as annotation spans)
sub_lab = sub_label_span(lab, floor_containing([sp]; epoch=Second(30)))
# these in general won't be exactly equal because the final
# input span can be too long sometimes, and is truncated
@test start(sp) == start(sub_lab.label_span)
@test stop(sp) - stop(sub_lab.label_span) < Second(30)
@test only(unique(sub_lab.labels.data)) == unique_lab
end
end
# missing labels
stages_missing = filter(:recording => !=(first(signals.recording)),
sorted_stages)
labeled_missing = @test_logs (:warn, ) label_signals(signals,
stages_missing;
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
@test all(!=(first(signals.recording)), labeled_missing.recording)
@test nrow(labeled_missing) == nrow(signals) - 1
@testset "non-overlapping label spans are an error" begin
onesignal = signals[1:1, :]
badstage = [(; recording=onesignal.recording[1],
stage="wake",
id=uuid4(),
span=translate(onesignal.span[1], -Second(1)))]
@test_throws ArgumentError label_signals(onesignal,
DataFrame(badstage);
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
end
end
@testset "`store_labels`" begin
using OndaBatches: label_signals, store_labels
sorted_stages = sort(stages, [:recording, order(:span; by=start)])
new_labeled_signals = OndaBatches.store_labels(labeled_signals, TEST_ROOT)
@test isvalid(new_labeled_signals, LabeledSignalV2SchemaVersion())
@test all(isfile.(new_labeled_signals.file_path))
end
@testset "labeled signal (lazy)" begin
using OndaBatches: label_signals, store_labels, get_labels
sorted_stages = sort_and_trim_spans(stages, :recording; epoch=Second(30))
labeled = label_signals(signals, sorted_stages;
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
labeled = store_labels(labeled, TEST_ROOT)
@test isvalid(labeled, LabeledSignalV2SchemaVersion())
durations = combine(groupby(sorted_stages, :recording),
:span => (s -> sum(duration, s)) => :duration)
leftjoin!(durations,
select(labeled,
:recording,
:label_span => ByRow(duration) => :label_duration),
on=:recording)
@test all(floor.(durations.duration, Second(30)) .==
durations.label_duration)
for (r, (lab, stag)) in Legolas.gather(:recording, labeled, sorted_stages)
lab = only(lab)
(; labels, label_span) = lab
(; stage, span) = stag
foreach(stage, span) do s, sp
# shift span to "label space"
lab_sp = translate(sp, -start(label_span))
unique_lab = only(unique(get_labels(labels, label_span)[1, lab_sp].data))
@test VALID_STAGES[unique_lab] == s
#
# # sub_label_span takes a "new labeled span" (relative to the
# # recording start, so same as annotation spans)
sub_lab = sub_label_span(lab, floor_containing([sp]; epoch=Second(30)))
# # these in general won't be exactly equal because the final
# # input span can be too long sometimes, and is truncated
@test start(sp) == start(sub_lab.label_span)
@test stop(sp) - stop(sub_lab.label_span) < Second(30)
@test only(unique(get_labels(sub_lab.labels, sub_lab.label_span).data)) == unique_lab
end
end
# missing labels
stages_missing = filter(:recording => !=(first(signals.recording)),
sorted_stages)
labeled_missing = @test_logs (:warn, ) label_signals(signals,
stages_missing;
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
@test all(!=(first(signals.recording)), labeled_missing.recording)
@test nrow(labeled_missing) == nrow(signals) - 1
@testset "non-overlapping label spans are an error" begin
onesignal = signals[1:1, :]
badstage = [(; recording=onesignal.recording[1],
stage="wake",
id=uuid4(),
span=translate(onesignal.span[1], -Second(1)))]
@test_throws ArgumentError label_signals(onesignal,
DataFrame(badstage);
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
end
end
@testset "`get_labels`" begin
using OndaBatches: label_signals, store_labels, get_labels
new_label_paths = joinpath.(TEST_ROOT,
"labels_" .* first.(splitext.(basename.(signals.file_path))))
new_labeled_signals = OndaBatches.store_labels(labeled_signals, TEST_ROOT)
for (eager, lazy) in zip(eachrow(labeled_signals), eachrow(new_labeled_signals))
labels = get_labels(lazy.labels, lazy.label_span)
@test lazy.labels.sample_rate == labels.info.sample_rate
@test lazy.labels.sensor_type == labels.info.sensor_type
@test lazy.labels.channels == labels.info.channels
@test lazy.labels.sample_unit == labels.info.sample_unit
@test lazy.labels.sample_resolution_in_unit == labels.info.sample_resolution_in_unit
@test lazy.labels.sample_offset_in_unit == labels.info.sample_offset_in_unit
@test lazy.labels.sample_type == labels.info.sample_type
@test labels.data == eager.labels.data
end
end
@testset "load labeled signal" begin
recording = uuid4()
# generate a synthetic dataset of samples + annotations with one second
# of 0, then 1, etc.; annotations are every 1s, samples are 128Hz.
data = repeat(0:100; inner=128)
samples = Samples(reshape(data, 1, :),
SamplesInfoV2(; sensor_type="synthetic",
channels=["synthetic"],
sample_unit="none",
sample_resolution_in_unit=1,
sample_offset_in_unit=0,
sample_type=Int,
sample_rate=128),
false)
signal_path = joinpath(TEST_ROOT, "$(recording).lpcm")
signal = Onda.store(signal_path, "lpcm", samples, recording, 0)
labels = [(; recording,
span=TimeSpan(Second(i), Second(i+1)),
value=i)
for i in 0:100]
encoding = Dict(i => UInt8(i) for i in 0:128)
labeled_signal = only(label_signals([signal], DataFrame(labels);
groups=:recording,
labels_column=:value,
epoch=Second(1),
encoding))
# shfited versions of signal/labels that start at 00:00:10 of the
# recording
shifted_signal_path = joinpath(TEST_ROOT, "$(recording)-shifted.lpcm")
shifted_signal = Onda.store(shifted_signal_path, "lpcm", samples,
recording, Second(10))
shifted_labels = [Tables.rowmerge(label;
span=translate(label.span,
Second(10)))
for label in labels]
@testset "loaded samples eltype" begin
x, _ = load_labeled_signal(labeled_signal)
x64, _ = load_labeled_signal(labeled_signal, Float64)
@test x == x64
@test eltype(x.data) == eltype(x64.data) == Float64
x32, _ = load_labeled_signal(labeled_signal, Float32)
@test eltype(x32.data) == Float32
@test x32.data ≈ x.data
end
@testset "aligned to start of recording" begin
@test labeled_signal.span == labeled_signal.label_span
samples_rt, _ = load_labeled_signal(labeled_signal)
@test samples_rt.data == samples.data
sub_lab_sig = sub_label_span(labeled_signal, TimeSpan(Second(10), Second(11)))
sub_samples, _ = load_labeled_signal(sub_lab_sig)
@test all(==(10), sub_samples.data)
end
@testset "shifted by less than one signal sample" begin
tiny_shifted_span = translate(labeled_signal.label_span,
Millisecond(5))
tiny_shift = Tables.rowmerge(labeled_signal;
label_span=tiny_shifted_span)
samples, _ = load_labeled_signal(tiny_shift)
og_samples, _ = load_labeled_signal(labeled_signal)
@test samples == og_samples
end
@testset "all shifted by 10s" begin
shifted_lab_sig = only(label_signals([shifted_signal], DataFrame(shifted_labels);
groups=:recording,
labels_column=:value,
epoch=Second(1),
encoding))
shifted_samples, shifted_labs_rt = load_labeled_signal(shifted_lab_sig)
@test shifted_labs_rt.data == shifted_lab_sig.labels.data
shifted_sub_lab_sig = sub_label_span(shifted_lab_sig,
# translate 10-11
TimeSpan(Second(20), Second(21)))
shifted_sub_samples, shifted_sub_labels = load_labeled_signal(shifted_sub_lab_sig)
@test all(==(10), shifted_sub_samples.data)
@test size(shifted_sub_samples.data) == (1, 128)
@test all(==(10), shifted_sub_labels.data)
@test size(shifted_sub_labels.data) == (1, 1)
end
@testset "only signals shifted by 10s" begin
# errors because labels are starting at 00:00:00 but signal starts
# at 00:00:10, so trying to load negative times from the signal
# errors
@test_throws(ArgumentError,
label_signals([shifted_signal], DataFrame(labels);
groups=:recording,
labels_column=:value,
epoch=Second(1),
encoding))
# to test error path for load_labeled_signal, need to work around
# the check in label_signals by manipulating the labeled signal
# table directly
shifted_sig_err = rowmerge(labeled_signal;
span=translate(labeled_signal.span,
Second(10)))
@test_throws ArgumentError load_labeled_signal(shifted_sig_err)
# lop off first 10s of labels since there's no samples data for
# them after shifting the signal span up by 10s
labels_drop10 = filter(:span => >=(Second(10)) ∘ start,
DataFrame(labels))
shifted_sig = only(label_signals([shifted_signal],
labels_drop10;
groups=:recording,
labels_column=:value,
epoch=Second(1),
encoding))
shifted_samples, labs_rt = load_labeled_signal(shifted_sig)
# we've lopped off 10s from the labels, so load 10s fewer samples
@test duration(shifted_samples) ==
duration(labs_rt) ==
duration(samples) - Second(10)
# labels start at 10 (they're not shifted)
@test first(labs_rt.data) == 10
# samples start at 0 since we've shifted them
@test first(shifted_samples.data) == 0
shifted_sub_sig = sub_label_span(shifted_sig,
# translate 10-11
TimeSpan(Second(20), Second(21)))
shifted_sub_samples, shifted_sub_labels = load_labeled_signal(shifted_sub_sig)
#
@test all(==(10), shifted_sub_samples.data)
@test size(shifted_sub_samples.data) == (1, 128)
@test all(==(20), shifted_sub_labels.data)
@test size(shifted_sub_labels.data) == (1, 1)
end
@testset "only labels shifted by 10s" begin
# throws an error since there's 10s of extra labeled time after
# shifting labels but not signals
@test_throws(ArgumentError,
label_signals([signal], DataFrame(shifted_labels);
groups=:recording,
labels_column=:value,
epoch=Second(1),
encoding))
# to test error path for load_labeled_signal, need to work around
# the check in label_signals by manipulating the labeled signal
# table directly
shifted_lab_err = rowmerge(labeled_signal,
label_span=translate(labeled_signal.label_span,
Second(10)))
@test_throws ArgumentError load_labeled_signal(shifted_lab_err)
# last label in original set is 100:101
labels_drop10 = filter(:span => <=(Second(101)) ∘ stop,
DataFrame(shifted_labels))
shifted_lab = only(label_signals([signal], labels_drop10;
groups=:recording,
labels_column=:value,
epoch=Second(1),
encoding))
samples_rt, shifted_labs_rt = load_labeled_signal(shifted_lab)
@test duration(samples_rt) ==
duration(shifted_labs_rt) ==
duration(samples) - Nanosecond(Second(10))
@test first(samples_rt.data) == 10
@test first(shifted_labs_rt.data) == 0
shifted_sub_sig = sub_label_span(shifted_lab,
# translate 10-11
TimeSpan(Second(20), Second(21)))
shifted_sub_samples, shifted_sub_labels = load_labeled_signal(shifted_sub_sig)
# signal samples still start at 0
@test all(==(20), shifted_sub_samples.data)
@test size(shifted_sub_samples.data) == (1, 128)
# labels are shifted
@test all(==(10), shifted_sub_labels.data)
@test size(shifted_sub_labels.data) == (1, 1)
end
end
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 5905 | @testset "materialize batches::$(sample_type)" for labed_sigs in (labeled_signals, OndaBatches.store_labels(labeled_signals, TEST_ROOT)), sample_type in (Float32, Float64)
# create batches by pulling one minute from each labeled signal
batches = map(enumerate(Tables.rows(labed_sigs))) do (i, labeled_signal)
(; label_span, channels) = labeled_signal
one_minute = TimeSpan(Minute(i), Minute(i + 1))
batch_span = translate(one_minute, start(label_span))
batch_item = sub_label_span(labeled_signal, batch_span)
batch_channels = mod1.(i:i + 1, length(batch_item.channels))
return Tables.rowmerge(batch_item; batch_channels)
end
xys = materialize_batch_item.(batches, sample_type)
x, y = materialize_batch(batches, sample_type)
# consistency with the single item form
@test x == cat(first.(xys)...; dims=3)
@test y == cat(last.(xys)...; dims=3)
@test all(==(sample_type) ∘ eltype ∘ first, xys)
@test eltype(x) == sample_type
if sample_type == Float64
x_default, _ = materialize_batch(batches)
@test x_default == x
end
@test size(x) == (2, # channel
Dates.value(Second(Minute(1))) * 128, # time
nrow(labed_sigs)) # batch
@test size(y) == (1, # channel (just one for labels)
2, # 30s epochs in 1 minute
nrow(labed_sigs)) # batch
# check consistency with manually pulled batches
for i in 1:size(x, 3)
labeled_signal = labed_sigs[i, :]
(; span, label_span, sample_rate, channels, labels) = labeled_signal
labels = OndaBatches.get_labels(labels, label_span)
# span relative to start of labels
batch_label_span = TimeSpan(Minute(i), Minute(i + 1))
# span relative to start of signals
batch_span = translate(batch_label_span, start(label_span) - start(span))
batch_span = AlignedSpan(sample_rate, batch_span,
ConstantSamplesRoundingMode(RoundDown))
samples = Onda.load(labeled_signal, batch_span)
chans = channels[mod1.(i:i + 1, length(channels))]
@test convert.(sample_type, samples[chans, :].data) ≈ x[:, :, i]
@test labels[:, batch_label_span].data == y[:, :, i]
end
@testset "batch_channels missing" begin
batches_all_chans = map(enumerate(Tables.rows(labed_sigs))) do (i, labeled_signal)
(; label_span, channels) = labeled_signal
one_minute = TimeSpan(Minute(i), Minute(i + 1))
batch_span = translate(one_minute, start(label_span))
batch_item = sub_label_span(labeled_signal, batch_span)
return Tables.rowmerge(batch_item;
batch_channels=missing)
end
xys_all_chans = materialize_batch_item.(batches_all_chans)
xs_all = first.(xys_all_chans)
@test size.(xs_all, 1) == length.(labed_sigs.channels)
end
@testset "get_channel_data" begin
@testset "even-odd compartments" begin
# we need to filter out the ones with an odd number of channels...
for batch_item in eachrow(filter(:channels => iseven ∘ length,
labed_sigs))
label_span = translate(TimeSpan(0, Second(1)),
start(batch_item.label_span))
batch_eo = Tables.rowmerge(batch_item;
batch_channels=EvenOdds(),
label_span)
x, y = materialize_batch_item(batch_eo, sample_type)
@test size(x) == (length(batch_item.channels) ÷ 2, 128, 2)
chans = batch_item.channels
chans_e = chans[2:2:end]
chans_o = chans[1:2:end]
samples, labels = load_labeled_signal(batch_eo, sample_type)
@test x[:, :, 1] == samples[chans_e, :].data
@test x[:, :, 2] == samples[chans_o, :].data
# throws method error because by default Onda tries to iterate
# channels
@test_throws MethodError samples[EvenOdds(), :]
# manually construct even channel/odd channel batches and merge
batch_e = Tables.rowmerge(batch_eo; batch_channels = chans_e)
x_e, y_e = materialize_batch_item(batch_e, sample_type)
batch_o = Tables.rowmerge(batch_eo; batch_channels = chans_o)
x_o, y_o = materialize_batch_item(batch_o, sample_type)
@test x == cat(x_e, x_o; dims=3)
@test y == y_e == y_o
end
end
@testset "zeros for missing channels loader" begin
psg6 = ["c3-m2", "c4-m1", "f3-m2", "f4-m1", "o1-m2", "o2-m1"]
zmc = ZeroMissingChannels(psg6)
for batch_item in eachrow(labed_sigs)
label_span = translate(TimeSpan(0, Second(1)),
start(batch_item.label_span))
batch_zmc = Tables.rowmerge(batch_item;
batch_channels=zmc,
label_span)
samples, _ = load_labeled_signal(batch_zmc, sample_type)
x, y = materialize_batch_item(batch_zmc, sample_type)
@test size(x) == (length(zmc.channels), 128)
for (i, c) in enumerate(zmc.channels)
if c ∈ batch_zmc.channels
@test x[i:i, :] == samples[c, :].data
else
@test all(iszero, x[i, :])
end
end
end
end
end
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 59 | include("OndaBatchesTests.jl")
OndaBatchesTests.runtests()
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 717 | using AWSS3
using Legolas: @schema, @version
using Onda
# pointers to test dataset tables + necessary schemas
const signals_path = S3Path("s3://beacon-public-oss/ondabatches-ci/test-data/clean-sleep/test.onda.signal.arrow?versionId=BCcLcHSQPE.RXFstxDcYciWotittBEGE")
const uncompressed_signals_path = S3Path("s3://beacon-public-oss/ondabatches-ci/test-data/clean-sleep/uncompressed.test.onda.signal.arrow?versionId=0eW58zErHNUUjkExSLqpCv24UDwnBDTV")
const stages_path = S3Path("s3://beacon-public-oss/ondabatches-ci/test-data/clean-sleep/test.clean-sleep.sleepstage.arrow?versionId=FiRWymDsbNbeUDFeyWgLmtY8rBPzKTeN")
@schema "sleep-stage" SleepStage
@version SleepStageV1 > AnnotationV1 begin
stage::String
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | code | 6706 | @testset "utilities" begin
@testset "s3 access" begin
# check that we can actually read/write from test sandbox
hello_f = joinpath(TEST_ROOT, "hello.txt")
write(hello_f, "hello, world!")
@test String(read(hello_f)) == "hello, world!"
end
@testset "Onda.read_byte_range" begin
using Dates
using TimeSpans
signal = first(eachrow(signals))
signal_uncomp = only(filter(:recording => ==(signal.recording),
uncompressed_signals))
samples_all = Onda.load(signal_uncomp)
onesec_span = TimeSpan(Second(100), Second(101))
samples_onesec = Onda.load(signal_uncomp, onesec_span)
@test samples_onesec.data == samples_all[:, onesec_span].data
# not sure why this is broken...
@test_broken samples_onesec.data isa Base.ReshapedArray
bad_span = TimeSpan(stop(signal_uncomp.span) + Nanosecond(Second(1)),
stop(signal_uncomp.span) + Nanosecond(Second(2)))
# this throws a BoundsError without our code (since Onda falls back to
# loading EVERYTHING and then indexing. with our utils, it passes the
# byte range to AWS which says it's invalid
@test_throws AWS.AWSException Onda.load(signal_uncomp, bad_span)
ex = try
Onda.load(signal_uncomp, bad_span)
catch e
e
end
@test ex isa AWS.AWSException
@test ex.code == "InvalidRange"
# does not hit this path for a compressed format
samples_compress_onesec = Onda.load(signal, onesec_span)
@test samples_compress_onesec.data == samples_onesec.data
# why does this throw an inexact error? something gets scrambled
# inbetween the zst decompression and the deserialization code that
# checks reshapes the array of bytes into a channel-by-sample matrix.
# it looks like it's returning a single byte, which then gets divided by
# 6 (number of channels) and tries to covnert that to an Int...
@test_throws InexactError Onda.load(signal, bad_span)
end
@testset "_glue" begin
using OndaBatches: _glue
vecs = [rand(10) for _ in 1:11]
@test _glue(vecs) == reduce(hcat, vecs) == hcat(vecs...)
@test size(_glue(vecs)) == (10, 11)
# still get added dimension with only a single element collection
@test size(_glue(vecs[1:1])) == (10, 1)
mats = [rand(3, 4) for _ in 1:11]
@test _glue(mats) == cat(mats...; dims=3)
@test size(_glue(mats)) == (3, 4, 11)
bricks = [rand(3, 4, 5) for _ in 1:11]
@test _glue(bricks) == cat(bricks...; dims=4)
@test size(_glue(bricks)) == (3, 4, 5, 11)
tup = Tuple(mats)
@test _glue(tup) == _glue(mats)
@test_throws(ArgumentError("all elements must have the same size, got >1 unique sizes: (1, 2), (2,)"),
_glue([[1 2], [1; 2]]))
end
@testset "with_channel" begin
using OndaBatches: with_channel
# normal return values are propagated:
done = with_channel(Channel{Any}(1)) do channel
put!(channel, 1)
return :done
end
@test done == :done
# channel closed errors are caught and return :closed
#
# we need to do some shenanigans here to be able to test what happens when
# the channel is closed by someone else. so we create an async task that
# waits on the channel...
c = Channel{Any}(Inf)
task = @async with_channel(c) do channel
wait(channel)
end
# ...then we close it...
close(c)
# ...and grab the return value of `with_channel` from the task:
@test fetch(task) == :closed
# other errors are propagated as usual:
c = Channel{Any}(Inf)
@test_throws ErrorException("AHHHH") with_channel(c) do c
error("AHHHH")
end
end
@testset "_wait" begin
using OndaBatches: _wait
ws = addprocs(1)
try
provision_worker(ws)
@show ws
pool = WorkerPool(ws)
w = take!(pool)
# local call to _wait
while isready(pool)
take!(pool)
end
@test !isready(pool)
t = @async _wait(pool)
@test !istaskdone(t)
put!(pool, w)
# avoid race condition
status = timedwait(() -> istaskdone(t), 10)
@test status == :ok
# remote call to _wait
while isready(pool)
take!(pool)
end
@test !isready(pool)
f = remotecall(_wait, w, pool)
@test !isready(f)
# XXX: debugging test failure here...
isready(f) && fetch(f)
put!(pool, w)
status = timedwait(() -> isready(f), 10)
@test status == :ok
finally
rmprocs(ws)
end
end
@testset "reset!" begin
using OndaBatches: reset!
ws = addprocs(2)
try
provision_worker(ws)
pool = WorkerPool(ws)
@test isready(pool)
@test length(pool) == count_ready_workers(pool) == 2
reset!(pool)
@test isready(pool)
@test length(pool) == count_ready_workers(pool) == 2
w = take!(pool)
# sorted:
@test w == first(ws)
reset!(pool)
@test isready(pool)
@test length(pool) == count_ready_workers(pool) == 2
w = take!(pool)
# sorted (again):
@test w == first(ws)
while isready(pool)
take!(pool)
end
@test !isready(pool)
reset!(pool)
@test isready(pool)
@test length(pool) == count_ready_workers(pool) == 2
while isready(pool)
take!(pool)
end
remotecall_fetch(reset!, first(ws), pool)
@test isready(pool)
@test length(pool) == count_ready_workers(pool) == 2
rmprocs(first(ws))
remotecall_fetch(reset!, last(ws), pool)
@test isready(pool)
# reset! drops dead workers
@test length(pool) == count_ready_workers(pool) == 1
rmprocs(take!(pool))
reset!(pool)
# no more workers in pool
@test !isready(pool)
@test length(pool) == count_ready_workers(pool) == 0
finally
rmprocs(ws)
end
end
end
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | docs | 1566 | # OndaBatches.jl
[](https://github.com/beacon-biosignals/OndaBatches.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/beacon-biosignals/OndaBatches.jl)
[](https://beacon-biosignals.github.io/OndaBatches.jl/stable)
[](https://beacon-biosignals.github.io/OndaBatches.jl/dev)
[Take the tour!](https://github.com/beacon-biosignals/OndaBatches.jl/tree/master/examples/tour.jl)
OndaBatches.jl provides tooling to enable local and distributed batch loading of Onda-formatted datasets.
In particular, it defines utilites to:
- ...associate existing labels to the corresponding signals to create a hollistic dataset.
- ...systematically construct batches of training / evaluation data from this dataset while being flexible enough in our sampling mechanism so that we can tailor the properties of the outputs.
- ...initiate a batching mechanism, carefully controlled by a scheduler, when working in a distributed environment.
### JuliaCon 2023
Watch our [JuliaCon2023 talk on
OndaBatches.jl](https://www.youtube.com/live/FIeO1yenQ6Y?feature=share&t=23190)!
[Slides](https://beacon-biosignals.github.io/OndaBatches.jl/juliacon2023/)
(and [source + demo](https://github.com/beacon-biosignals/OndaBatches.jl/tree/main/talk/))
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | docs | 1222 | ```@meta
CurrentModule = OndaBatches
```
Watch our [JuliaCon2023 talk on
OndaBatches.jl](https://www.youtube.com/live/FIeO1yenQ6Y?feature=share&t=23190)!
[Slides](https://beacon-biosignals.github.io/OndaBatches.jl/juliacon2023/)
(and [source + demo](https://github.com/beacon-biosignals/OndaBatches.jl/tree/main/talk/))
# Public API
## Labeled signals
```@docs
LabeledSignalV2
sub_label_span
label_signals
load_labeled_signal
store_labels
```
## Batch sampling
```@docs
BatchItemV2
RandomBatches
iterate_batch_item
iterate_batch
```
## Batch materialization
```@docs
materialize_batch_item
materialize_batch
get_channel_data
```
## Batching service
```@docs
Batcher
Batcher(::Int, ::AbstractWorkerPool, ::Any; start::Any, state::Any, buffer::Any)
Base.take!(::Batcher, state)
start!
stop!
get_status
```
## Internal utilities
!!! warning
None of the following are meant to be called by users, are not part of the
API for semantic versioning purposes, and can change at any time.
```@docs
labels_to_samples_table
labels_to_samples
get_labels
int_encode_labels
floor_containing
is_epoch_divisible
check_epoch_divisible
all_contiguous
sample_label_span
start_batching
_feed_jobs!
reset!
with_channel
```
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.9 | f093f86bbcb6ba120afc867e8a4e33bcab068062 | docs | 8923 | class: middle
.slide-title[
# OndaBatches.jl: Continuous, repeatable, and distributed batching
## Dave Kleinschmidt — Beacon Biosignals
### JuliaCon 2023 — [slide source + demo](https://github.com/beacon-biosignals/OndaBatches.jl/tree/main/talk/)
]
---
# Who am I?
Research Software Engineer at Beacon Biosignals
Our team builds tools for internal users at Beacon doing machine learning and
other quantitative/computational work
---
# Who are we?
Beacon Biosignals
> From its founding in 2019, Beacon Biosignals has engineered a machine learning
> platform designed to interrogate large EEG datasets at unprecedented speed and
> scale.
---
# Why did we make this?
Support common need to _build batches from annotated time series data_ across
multiple ML efforts at Beacon:
--
Multi-channel, regularly sampled time series data (i.e., EEG recordings)
Task is "image segmentation": output dense, regularly sampled labels (i.e.,
every 30s span gets a label)
--
Input data is Onda-formatted `Samples` + annotations (time span + label)
Models requires _numerical tensors_ for training/evaluation/inference
---
# Who is this for?
This might be interesting to you if you are
1. a ML engineer looking to model large time-series datasets and want to
acutally _use_ OndaBatches to build your batches.
2. developing similar tools and are interested in how we build re-usable
tools like this at Beacon.
--
## Why might you care?
1. We actually use this at Beacon!
2. It's a potentially useful example (cautionary tale?) for how to wrangle
inconveniently large data and the nuances of distributed computing in a
restricted domain
???
gonna be honest, mostly focusing on the second group here!
this is pretty specific to beacon's tooling and needs! and there's a fair
amount of path dependence in how we got to this state...
---
# Outline
Part 1: Design, philosophy, and basic functionality
Part 2: Making a distributed batch loading system that doesn't require expertise
in distributed systems to use
---
# Design: Goals
Distributed (integrate with our distributed ML pipelines, throw more resources
at it to make sure data movement is not the bottleneck)
Scalable (handle out-of-core datasets, both for signal data and labels)
Deterministic + reproducible (pseudo-random)
Resumable
Flexible and extensible via normal Julia mechanisms of multiple dispatch
---
# Design: Philosophy
Separate the _cheap_ parts where _order matters_ ("batch specification") from
_expensive parts_ which can be done _asynchronously_ ("batch materialization")
--
Build on standard tooling (at Beacon), using
[Legolas.jl](https://github.com/beacon-biosignals/Legolas.jl) to define
interface schemas which extend
[Onda.jl](https://github.com/beacon-biosignals/Onda.jl) schemas.
--
Use _iterator patterns_ to generate pseudorandom sequence of batch specs.
--
Be flexible enough that it can be broadly useful across different ML efforts at
Beaacon (and beyond??)
--
Use function calls we control to provide hooks for users to customize certain
behaviors via multiple dispatch (e.g., how to materialize `Samples` data into
batch tensor)
---
# How does it work?
```julia
signals, labels = load_tables()
labeled_signals = label_signals(signals, labels,
labels_column=:stage,
encoding=SLEEP_STAGE_INDEX,
epoch=Second(30))
batches = RandomBatches(; labeled_signals,
# uniform weighting of signals + labels
signal_weights=nothing,
label_weights=nothing,
n_channels=1,
batch_size=2,
batch_duration=Minute(5))
state0 = StableRNG(1)
batch, state = iterate_batch(batches, deepcopy(state0))
x, y = materialize_batch(batch)
```
???
live demo here...
---
# Extensibility
Some models require a specific set of channels to function (a "montage"), but
recordings don't always have all the required channels.
Here's a "channel selector" to fill in the missing channels with zeros:
```julia
struct ZeroMissingChannels
channels::Vector{String}
end
function OndaBatches.get_channel_data(samples::Samples, channels::ZeroMissingChannels)
out = zeros(eltype(samples.data), length(channels.channels), size(samples.data, 2))
for (i, c) in enumerate(channels.channels)
if c ∈ samples.info.channels
@views out[i:i, :] .= samples[c, :]
end
end
return out
end
```
---
# Extensibility
A very silly kind of "featurization": separate even and odd channels into
separate "compartments" (so they're processed independently in the model)
```julia
struct EvenOdds end
function OndaBatches.get_channel_data(samples::Samples, channels::EvenOdds)
chans = samples.info.channels
odds = @view(samples[chans[1:2:end], :]).data
evens = @view(samples[chans[2:2:end], :]).data
return cat(evens, odds; dims=3)
end
```
---
# Distributed batch loading: Why
different models have different demands on batch loading (data size,
amount of preprocessing required, etc.)
batch loading should _never_ be the bottleneck in our pipeline (GPU time is
expensive)
distributing batch loading means we can always "throw more compute" at it
???
(case study in lifting a serial workload into a distributed/async workload)
another thing: working around flakiness of multithreading and unacceptably low
throughput for S3 reads. worker-to-worker communication has good enough
throughput
---
# Distributed batch loading: How
step 1: `return` → `RemoteChannel`
```julia
start_batching(channel, batches, state)
try
while true
batch, state = iterate_batch(batches, state)
xy = materialize_batch(batch)
put!(channel, (xy, copy(state)))
end
catch e
if is_channel_closed(e)
@info "channel closed, stopping batcher..."
return :closed
else
rethrow()
end
end
end
init_state = StableRNG(1)
# need a buffered channel in order for producers to stay ahead
channel = RemoteChannel(() -> Channel{Any}(10))
batch_worker = addprocs(1)
future = remotecall(start_batching!, batch_worker, batches, channel, init_state)
# now consumer can `take!(channel)` to retrieve batches when they're ready
```
???
the basic idea is that instead of calling a function `materialize_batch ∘
iterate_batch`, we will instead make a _service_ that feeds materialized batches
and the corresponding batcher states onto a `Distributed.RemoteChannel` where a
consumer can retrieve them.
of course, this still loads batches in serial, one at a time. if we didn't care
about the order of the batches or reproducibility, we could just start multiple
independent feeder processes to feed the channel.
---
# Distributed batch loading: How
Step 2: Load multiple batches at the same time
Need to be careful to make sure the _order of batches_ is the same regardless of
the number of workers etc.
This is where the separation between batch _specification_ and batch
_materialization_ pays off: the specifications are small and cheap to
produce/serialize, so we can do them sequentially on the "manager" process.
```julia
function pmap_batches!(channel::RemoteChannel, spec, state, workers)
futures_states = map(workers) do worker
batch, state = iterate_batch(spec, state)
batch_future = remotecall(materialize_batch, worker, batch)
return batch_future, copy(state)
end
for (future, s) in futures_states
xy = fetch(future)
put!(channel, (xy, s))
end
return state
end
```
(Note this doesn't quite work when you have _finite_ series of batches)
???
cycle through the workers one at a time, feeding them a batch spec.
---
# Distributed batch loading: How
Step 2: Load multiple batches at the same time
```julia
function pmap_batches!(channel::RemoteChannel, spec, state, workers)
# ...
end
function start_batching(channel::RemoteChannel, spec, state, workers)
try
while true
state = pmap_batches!(channel, spec, state, workers)
end
catch e
if is_closed_ex(e)
@info "batch channel closed, batching stopped"
return :closed
else
rethrow(e)
end
end
end
```
---
# Batching service
Lots of bookkeeping requried for this!
- `Future` returned by `remotecall(start_batching, ...)`
- `RemoteChannel` for serving the batches
- batch iterator itself
What happens when things go wrong?? it's very tricky to get errors to surface
properly and avoid bad states like slient deadlocks
We provide a `Batcher` struct that
- does the bookkeeping
- provides a limited API surface to reduce complexity for users...
- ...and manage complexity for developers/maintainers
---
# thanks!
| OndaBatches | https://github.com/beacon-biosignals/OndaBatches.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 688 | using BenchmarkTools
using UniformIsingModels
SUITE = BenchmarkGroup()
N = 100
J = 0.5
h = 1.2 * randn(N)
β = 2.3
x = UniformIsing(N, J, h, β)
SUITE["constructor"] = BenchmarkGroup()
SUITE["constructor"]["constructor"] = @benchmarkable UniformIsing($N, $J, $h, $β)
SUITE["observables"] = BenchmarkGroup()
SUITE["observables"]["normalization"] = @benchmarkable normalization(x)
SUITE["observables"]["entropy"] = @benchmarkable entropy(x)
SUITE["observables"]["site_magnetizations"] = @benchmarkable site_magnetizations(x)
# SUITE["observables"]["pair_magnetizations"] = @benchmarkable pair_magnetizations(x)
SUITE["observables"]["sum_distribution"] = @benchmarkable sum_distribution(x) | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 599 | module UniformIsingModels
using OffsetArrays: OffsetVector, fill
using LinearAlgebra: dot
using Random: default_rng, AbstractRNG
using LogExpFunctions: logsumexp, logaddexp
export UniformIsing, nvariables, variables, recompute_partials!,
energy, lognormalization, normalization, pdf,
avg_energy, entropy, free_energy,
site_magnetizations!, site_magnetizations,
correlations!, correlations,
covariances, covariances,
sum_distribution!, sum_distribution,
sample!, sample
include("accumulate.jl")
include("uniform_ising.jl")
end # end module | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 2445 | function accumulate_left!(L, h, β)
N = length(h)
L[0][0] = 0.0
for k in 1:N
for s in -(k-1):k-1
L[k][s] = logaddexp( β*h[k] + L[k-1][s-1],
-β*h[k] + L[k-1][s+1] )
end
L[k][k] = β*h[k] + L[k-1][k-1]
L[k][-k] = -β*h[k] + L[k-1][-k+1]
end
L
end
function accumulate_left(h, β)
N = length(h)
L = OffsetVector([fill(-Inf, -N:N) for i in 0:N], 0:N)
accumulate_left!(L, h, β)
end
function accumulate_d_left!(L, dLdβ, h, β)
N = length(h)
L[0] .= -Inf; L[0][0] = 0.0; dLdβ[0] .= 0.0
for k in 1:N
for s in -(k-1):k-1
L[k][s] = logaddexp( β*h[k] + L[k-1][s-1],
-β*h[k] + L[k-1][s+1] )
dLdβ[k][s] = (+h[k] + dLdβ[k-1][s-1])*exp(+β*h[k]+L[k-1][s-1]) +
(-h[k] + dLdβ[k-1][s+1])*exp(-β*h[k]+L[k-1][s+1])
dLdβ[k][s] = dLdβ[k][s] / exp(L[k][s])
isnan(dLdβ[k][s]) && (dLdβ[k][s] = 0.0)
end
L[k][k] = β*h[k] + L[k-1][k-1]
L[k][-k] = -β*h[k] + L[k-1][-k+1]
dLdβ[k][k] = h[k] + dLdβ[k-1][k-1]
dLdβ[k][-k] = -h[k] + dLdβ[k-1][-k+1]
end
L, dLdβ
end
function accumulate_d_left(h, β)
N = length(h)
L = OffsetVector([fill(-Inf, -N:N) for i in 0:N], 0:N)
dLdβ = OffsetVector([fill(0.0, -N:N) for i in 0:N], 0:N)
accumulate_d_left!(L, dLdβ, h, β)
end
function accumulate_right!(R, h, β)
N = length(h)
R[N+1][0] = 0.0
for k in N:-1:1
for s in -(N-k):(N-k)
R[k][s] = logaddexp( β*h[k] + R[k+1][s-1],
-β*h[k] + R[k+1][s+1] )
end
R[k][N-k+1] = β*h[k] + R[k+1][N-k]
R[k][-(N-k+1)] = -β*h[k] + R[k+1][-(N-k)]
end
R
end
function accumulate_right(h, β)
N = length(h)
R = OffsetVector([fill(-Inf, -N:N) for i in 1:N+1], 1:N+1)
accumulate_right!(R, h, β)
end
function accumulate_middle!(M, h, β)
N = length(h)
for i in 1:N
for s in (-1,1)
M[i,i][s] = β*h[i]*s
end
for j in i+1:N
for s in -(j-i+1):j-i+1
M[i,j][s] = logaddexp( β*h[j] + M[i,j-1][s-1],
-β*h[j] + M[i,j-1][s+1] )
end
end
end
M
end
function accumulate_middle(h, β)
N = length(h)
M = [fill(-Inf, -(N+1):(N+1)) for i in 1:N, j in 1:N]
accumulate_middle!(M, h, β)
end | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 6410 | mutable struct UniformIsing{T<:Real, U<:OffsetVector}
J :: T # uniform coupling strength
h :: Vector{T} # external fields
β :: T # inverse temperature
L :: OffsetVector{U, Vector{U}} # partial sums from the left
R :: OffsetVector{U, Vector{U}} # partial sums from the right
dLdβ :: OffsetVector{U, Vector{U}} # Derivative of L wrt β
function UniformIsing(N::Integer, J::T, h::Vector{T}, β::T=1.0) where T
@assert length(h) == N
@assert β ≥ 0
R = accumulate_right(h, β)
L, dLdβ = accumulate_d_left(h, β)
U = eltype(L)
return new{T, U}(J, h, β, L, R, dLdβ)
end
end
function UniformIsing(N::Integer, J::T, β::T=1.0) where {T<:Real}
h = zeros(T, N)
return UniformIsing(N, J, h, β)
end
# re-compute the partial quantities needed to compute observables, in case some parameter (`J,h,β`) was modified
function recompute_partials!(x::UniformIsing)
(; h, β, L, R, dLdβ) = x
accumulate_left!(L, h, β)
accumulate_right!(R, h, β)
accumulate_d_left!(L, dLdβ, h, β)
end
nvariables(x::UniformIsing) = length(x.h)
variables(x::UniformIsing) = 1:nvariables(x)
function energy(x::UniformIsing, σ)
s = sum(σ)
f = dot(σ, x.h)
N = nvariables(x)
return -( x.J/2*(s^2/N-1) + f )
end
function lognormalization(x::UniformIsing)
(; β, J, L) = x
N = nvariables(x)
return logsumexp( β*J/2*(s^2/N-1) + L[N][s] for s in -N:N )
end
function normalization(x::UniformIsing; logZ = lognormalization(x))
return exp(logZ)
end
pdf(x::UniformIsing, σ) = exp(-x.β*energy(x, σ) - lognormalization(x))
free_energy(x::UniformIsing; logZ = lognormalization(x)) = -logZ / x.β
function sample_spin(rng::AbstractRNG, p::Real)
@assert 0 ≤ p ≤ 1
r = rand(rng)
return r < p ? 1 : -1
end
# return a sample along with its probability
function sample!(rng::AbstractRNG, σ, x::UniformIsing; logZ = lognormalization(x))
(; J, h, β, R) = x
N = nvariables(x)
a = 0.0; b = 0
f(s) = β*J/2*(s^2/N-1)
for i in 1:N
tmp_plus = tmp_minus = 0.0
for s in -N:N
tmp_plus += exp(f(b+1+s) + R[i+1][s])
tmp_minus += exp(f(b-1+s) + R[i+1][s])
end
p_plus = exp(β*h[i]) * tmp_plus
p_minus = exp(-β*h[i]) * tmp_minus
p_i = p_plus / (p_plus + p_minus)
σi = sample_spin(rng, p_i)
σ[i] = σi
a += h[i]*σi
b += σi
end
p = exp(f(b) + β*a - logZ)
@assert a == dot(h, σ); @assert b == sum(σ)
return σ, p
end
sample!(σ, x::UniformIsing; kw...) = sample!(default_rng(), σ, x; kw...)
sample(rng::AbstractRNG, x::UniformIsing; kw...) = sample!(rng, zeros(Int, nvariables(x)), x; kw...)
sample(x::UniformIsing; kw...) = sample(default_rng(), x; kw...)
# first store in `p[i]` the quantity log(p(σᵢ=+1)), then transform at the end
function site_magnetizations!(p, x::UniformIsing; logZ = lognormalization(x))
(; J, h, β, L, R) = x
N = nvariables(x)
f(s) = β*J/2*(s^2/N-1)
for i in eachindex(p)
p[i] = -Inf
for sL in -N:N
for sR in -N:N
s = sL + sR
p[i] = logaddexp(p[i], f(s+1) + β*h[i] + L[i-1][sL] + R[i+1][sR])
end
end
# include normalization
p[i] = exp(p[i] - logZ)
# transform form p(+) to m=2p(+)-1
p[i] = 2*p[i] - 1
end
return p
end
function site_magnetizations(x::UniformIsing{T,U}; kw...) where {T,U}
return site_magnetizations!(zeros(T, nvariables(x)), x; kw...)
end
# distribution of the sum of all variables as an OffsetVector
function sum_distribution!(p, x::UniformIsing; logZ = lognormalization(x))
(; J, β, L) = x
N = nvariables(x)
for s in -N:N
p[s] = exp( β*J/2*(s^2/N-1) + L[N][s] - logZ )
end
return p
end
function sum_distribution(x::UniformIsing{T,U}; kw...) where {T,U}
p = fill(zero(T), -nvariables(x):nvariables(x))
return sum_distribution!(p, x; kw...)
end
function avg_energy(x::UniformIsing{T}; logZ = lognormalization(x)) where T
(; J, β, L, dLdβ) = x
N = nvariables(x)
Zt = sum( exp( β*J/2*(s^2/N-1) + L[N][s]) * (J/2*(s^2/N-1)+dLdβ[N][s]) for s in -N:N)
return -exp(log(Zt) - logZ)
end
entropy(x::UniformIsing; kw...) = x.β * (avg_energy(x; kw...) - free_energy(x; kw...))
function correlations!(m, x::UniformIsing{T,U};
M = accumulate_middle(x.h, x.β), logZ = lognormalization(x)) where {T,U}
(; J, h, β, L, R) = x
N = nvariables(x)
Z = exp(logZ)
f(s) = β*J/2*(s^2/N-1)
for i in 1:N
# j = i
m[i,i] = 1
# j = i+1
j = i + 1
j > N && break
m[i,j] = 0
for sL in -N:N
for sR in -N:N
s = sL + sR
m[i,j] += ( exp( f(s+2) + β*(h[i]+h[j]) ) +
exp( f(s-2) - β*(h[i]+h[j]) ) -
exp( f(s) + β*(h[i]-h[j]) ) -
exp( f(s) - β*(h[i]-h[j]) ) ) *
exp(L[i-1][sL] + R[j+1][sR])
end
end
m[i,j] /= Z; m[j,i] = m[i,j]
# j > i + 1
for j in i+2:N
m[i,j] = 0
for sM in -N:N
for sL in -N:N
for sR in -N:N
s = sL + sM + sR
m[i,j] += ( exp( f(s+2) + β*(h[i]+h[j]) ) +
exp( f(s-2) - β*(h[i]+h[j]) ) -
exp( f(s) + β*(h[i]-h[j]) ) -
exp( f(s) - β*(h[i]-h[j]) ) ) *
exp(L[i-1][sL] + M[i+1,j-1][sM] + R[j+1][sR])
end
end
end
m[i,j] /= Z; m[j,i] = m[i,j]
end
end
return m
end
function correlations(x::UniformIsing{T,U}; kw...) where {T,U}
correlations!(zeros(T,nvariables(x),nvariables(x)), x; kw...)
end
function covariances!(c, x::UniformIsing; logZ = lognormalization(x),
m = site_magnetizations(x; logZ), p = correlations(x; logZ))
N = nvariables(x)
for i in 1:N
for j in 1:N
c[i,j] = p[i,j] - m[i]*m[j]
end
end
return c
end
function covariances(x::UniformIsing{T,U}; kw...) where {T,U}
covariances!(zeros(T,nvariables(x),nvariables(x)), x; kw...)
end | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 609 | N = 10
J = 0.5
β = 2.3
x = UniformIsing(N, J, β)
@testset "basics" begin
@testset "outer constructor" begin
@test all(isequal(0), x.h)
end
@testset "left accumulator" begin
L = UniformIsingModels.accumulate_left(x.h, x.β)
@test L == x.L
end
@testset "mutate and recompute partials" begin
hnew = ones(N)
Jnew = -1.1
βnew = 0.1
x.h = hnew
x.J = Jnew
x.β = βnew
recompute_partials!(x)
xnew = UniformIsing(N, Jnew, hnew, βnew)
@test lognormalization(x) == lognormalization(xnew)
end
end | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 2865 | # Loop once over all 2^N states and compute observables
function Obs(f::Function)
o = 0.0
function measure(x::UniformIsing, s)
o += f(x, s)
end
end
function observables_bruteforce(x::UniformIsing,
observables::Vector{<:Function})
N = nvariables(x)
if N > 10
@warn "Exponential scaling alert"
end
for s in Iterators.product(fill((-1,1), N)...)
for f! in observables
f!(x, s)
end
end
[obs.o.contents for obs in observables]
end
N = 10
J = 0.5
h = 1.2 * randn(N)
β = 2.3
x = UniformIsing(N, J, h, β)
@testset "observables" begin
@testset "normalization" begin
_normaliz = (x, s) -> exp(-x.β*energy(x, s))
Z_bruteforce = observables_bruteforce(x, [Obs(_normaliz)])[1]
@test normalization(x) ≈ Z_bruteforce
end
@testset "magnetizations" begin
m = site_magnetizations(x)
_magnetiz = [Obs((x, s) -> pdf(x, s)*s[i]) for i in variables(x)]
magnetiz_bruteforce = observables_bruteforce(x, _magnetiz)
@test all(variables(x)) do i
m[i] ≈ magnetiz_bruteforce[i]
end
end
@testset "correlations" begin
p = correlations(x)
_pair_magnetiz = [Obs((x, s) -> pdf(x, s)*s[i]*s[j])
for i in variables(x) for j in variables(x)]
pair_magnetiz_bruteforce = observables_bruteforce(x, _pair_magnetiz)
@test all(Iterators.product(variables(x), variables(x))) do (i,j)
k = Int( (j-1)*N + i )
p[i,j] ≈ pair_magnetiz_bruteforce[k]
end
end
@testset "covariances" begin
m = site_magnetizations(x)
c = covariances(x)
_correl = [Obs((x, s) -> pdf(x, s)*(s[i]*s[j]-m[i]*m[j]))
for i in variables(x) for j in variables(x)]
correl_bruteforce = observables_bruteforce(x, _correl)
@test all(Iterators.product(variables(x),variables(x))) do (i,j)
k = Int( (j-1)*N + i )
isapprox( c[i,j], correl_bruteforce[k], atol=1e-4 )
end
end
@testset "distribution of sum" begin
p = sum_distribution(x)
_sum_distr = [Obs((x, s) -> pdf(x, s) * (sum(s) == σ)) for σ in -N:N]
sum_distr_bruteforce = observables_bruteforce(x, _sum_distr)
@test p.parent ≈ sum_distr_bruteforce
end
@testset "average energy" begin
U = avg_energy(x)
_energy = Obs((x,s) -> pdf(x,s)*energy(x,s))
avg_energy_bruteforce = observables_bruteforce(x, [_energy])[1]
@test U ≈ avg_energy_bruteforce
end
@testset "entropy" begin
S = entropy(x)
_entropy = Obs((x,s) -> -pdf(x,s)*log(pdf(x,s)))
entropy_bruteforce = observables_bruteforce(x, [_entropy])[1]
@test S ≈ entropy_bruteforce
end
end | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 106 | using UniformIsingModels
using Test
include("basics.jl")
include("observables.jl")
include("sampling.jl") | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | code | 421 | N = 25
J = 2.5
h = 0.3*randn(N)
β = 0.1
x = UniformIsing(N, J, h, β)
nsamples = 10^3
ntests = 20
@testset "sampling" begin
X = [sample(x)[1] for _ in 1:nsamples]
σ_test = [rand([-1,1], N) for _ in 1:ntests]
tol = 1 / sqrt(nsamples)
@test all(1:ntests) do t
σ = σ_test[t]
p = pdf(x, σ)
p_empirical = sum(x == σ for x in X) / nsamples
abs(p - p_empirical) < tol
end
end | UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.4.0 | 76338fc88a0cb17081d48761b16e82b70aeb645c | docs | 2943 | # UniformIsingModels
[](https://github.com/stecrotti/UniformIsingModels.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/stecrotti/UniformIsingModels.jl)
A fully-connected ferromagnetic [Ising model](https://en.wikipedia.org/wiki/Ising_model) with uniform coupling strength, described by a Boltzmann distribution
$p(\boldsymbol{\sigma}) = \frac{1}{Z} \exp\left[\beta\left(\frac{J}{N}\sum_{i<j}\sigma_i\sigma_j+\sum_{i=1}^Nh_i\sigma_i\right)\right],\quad \boldsymbol{\sigma}\in\{-1,1\}^N $
is exactly solvable in polynomial time.
| Quantity | Expression | Cost |
| ------------- | ----------| ----------- |
| Normalization | $Z=\sum\limits_{\boldsymbol{\sigma}}\exp\left[\beta\left(\frac{J}{N}\sum_{i<j}\sigma_i\sigma_j+\sum_{i=1}^Nh_i\sigma_i\right)\right]$ | $\mathcal O (N^2)$ |
| Free energy | $F = -\frac{1}{\beta}\log Z$ | $\mathcal O (N^2)$ |
| Sample a configuration | $\boldsymbol{\sigma} \sim p(\boldsymbol{\sigma})$ | $\mathcal O (N^2)$ |
| Average energy | $U = \sum\limits_{\boldsymbol{\sigma}}p(\boldsymbol{\sigma})\left[-\left(\frac{J}{N}\sum_{i<j}\sigma_i\sigma_j+\sum_{i=1}^Nh_i\sigma_i\right)\right]$ | $\mathcal O (N^2)$ |
| Entropy | $S = -\sum\limits_{\boldsymbol{\sigma}}p(\boldsymbol{\sigma})\log p(\boldsymbol{\sigma})$ | $\mathcal O (N^2)$ |
| Distribution of the sum of the N spins | $p_S(s)=\sum\limits_{\boldsymbol{\sigma}}p(\boldsymbol{\sigma})\delta\left(s-\sum_{i=1}^N\sigma_i\right)$ | $\mathcal O (N^2)$ |
| Site magnetizations | $m_i=\sum\limits_{\boldsymbol{\sigma}}p(\boldsymbol{\sigma})\sigma_i,\quad\forall i\in\{1,2,\ldots,N\}$ | $\mathcal O (N^3)$ |
| Correlations | $r_{ij}=\sum\limits_{\boldsymbol{\sigma}}p(\boldsymbol{\sigma})\sigma_i\sigma_j,\quad\forall j\in\{1,2,\ldots,N\},i<j$ | $\mathcal O (N^5)$ |
## Example
```
]add UniformIsingModels
```
Construct a `UniformIsing` instance
```
using UniformIsingModels, Random
N = 10
J = 2.0
rng = MersenneTwister(0)
h = randn(rng, N)
β = 0.1
x = UniformIsing(N, J, h, β)
```
Compute stuff
```
# normalization and free energy
Z = normalization(x)
F = free_energy(x)
# energy and probability of a configuration
σ = rand(rng, (-1,1), N)
E = energy(x, σ)
prob = pdf(x, σ)
# a sample along with its probability
σ, p = sample(rng, x)
# single-site magnetizations <σᵢ>
m = site_magnetizations(x)
# distribution of the sum Σᵢσᵢ of all variables
ps = sum_distribution(x)
# energy expected value
U = avg_energy(x)
# entropy
S = entropy(x)
# correlations <σᵢσⱼ> and covariances <σᵢσⱼ>-<σᵢ><σⱼ>
p = correlations(x)
c = covariances(x)
```
## Notes
The internals rely on dynamic programming.
If you know of any implementation that's more efficient than this one I'd be very happy to learn about it!
| UniformIsingModels | https://github.com/stecrotti/UniformIsingModels.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 790 | using Documenter
using TexTables
makedocs(
modules = [TexTables],
sitename= "TexTables.jl",
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true"
),
warnonly = :missing_docs,
clean = false,
authors = "Jacob Adenbaum",
pages = [
"Introduction"=> "index.md",
"Easy Examples"=> "easy_examples.md",
"Basic Usage" => "basic_usage.md",
"Regression API"=> "regression_tables.md",
"Advanced Usage"=>"advanced_usage.md"]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(
repo = "github.com/jacobadenbaum/TexTables.jl.git",
)
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 14889 | #=
This code provides the framework to stich together two separate tables
(either concatenating them horizontally or vertically).
=#
mutable struct IndexedTable{N, M} <: TexTable
columns::Vector
row_index::Index{N}
col_index::Index{M}
end
IndexedTable(t::TableCol) = begin
columns = [t]
row_index = keys(t.data) |> collect |> sort
col_index = [t.header]
return IndexedTable(columns, row_index, col_index)
end
convert(::Type{IndexedTable}, t::TexTable) = IndexedTable(t)
convert(::Type{IndexedTable}, t::IndexedTable) = t
################################################################################
#################### Merging and Concatenating #################################
################################################################################
function vcat(t1::IndexedTable, t2::IndexedTable)
# Promote to the same dimensions
t1, t2 = deepcopy.(promote(t1, t2))
# Row Indices stay the same except within the highest group, where they need
# to be shifted up in order to keep the index unique
shift = maximum(get_idx(t1.row_index, 1)) -
minimum(get_idx(t2.row_index, 1)) + 1
new_index = map(t2.row_index) do idx
i1 = idx.idx[1] + shift
new_idx = tuple(i1, idx.idx[2:end]...)
return update_index(idx, new_idx)
end
new_columns = deepcopy(t2.columns)
for col in new_columns
for (idx, new_idx) in zip(t2.row_index, new_index)
if haskey(col.data, idx)
col[new_idx] = pop!(col.data, idx)
end
end
end
row_index = vcat(t1.row_index, new_index)
# Columns
col_index = deepcopy(t1.col_index)
columns = deepcopy(t1.columns)
for (i, idx) in enumerate(t2.col_index)
# Figure out where to insert the column
new_idx, s = insert_index!(col_index, idx)
# It might be a new column
if s > length(columns)
push!(columns, new_columns[i])
# If not, we need to move all the data over
else
for (key, value) in new_columns[i].data
columns[s].data[key] = value
end
end
end
return IndexedTable(columns, row_index, col_index)
end
function hcat(t1::IndexedTable, t2::IndexedTable)
# Promote to the same dimensions
t1, t2 = deepcopy.(promote(t1, t2))
# Column Indices stay the same except within the highest group,
# where they need to be shifted up in order to keep the index unique
shift = maximum(get_idx(t1.col_index, 1)) -
minimum(get_idx(t2.col_index, 1)) + 1
new_index = map(t2.col_index) do idx
i1 = idx.idx[1] + shift
return update_index(idx, tuple(i1, idx.idx[2:end]...))
end
col_index = vcat(t1.col_index, new_index)
# Row indices are merged in (inserted) one at a time, maintaining
# strict insertion order in all index levels but the lowest one
new_columns = deepcopy(t2.columns)
row_index = t1.row_index
for idx in t2.row_index
# Insert the index and recover the new_index and the required
# insertion point
new_idx, s = insert_index!(row_index, idx)
# Rename the old indexes to the new ones
for col in new_columns
if haskey(col.data, idx)
val = pop!(col.data, idx)
col[new_idx] = val
end
end
end
# Remap the internal column headers to keep them consistent
old_new = Dict(Pair.(t2.col_index, new_index))
for col in new_columns
col.header = old_new[col.header]
end
# Now, we're ready to append the columns together.
columns = vcat(t1.columns, new_columns)
return IndexedTable(columns, row_index, col_index)
end
hcat(tables::Vararg{TexTable}) = reduce(hcat, tables)
vcat(tables::Vararg{TexTable}) = reduce(vcat, tables)
function hvcat(rows::Tuple{Vararg{Int}}, as::Vararg{TexTable})
nbr = length(rows) # number of block rows
rs = Array{Any,1}(undef, nbr)
a = 1
for i = 1:nbr
rs[i] = hcat(as[a:a-1+rows[i]]...)
a += rows[i]
end
vcat(rs...)
end
# Make vcat and hcat work for all TexTables
vcat(t1::TexTable, t2::TexTable) = vcat(convert.(IndexedTable,
(t1, t2))...)
hcat(t1::TexTable, t2::TexTable) = hcat(convert.(IndexedTable,
(t1, t2))...)
join_table(t1::IndexedTable) = t1
function join_table(t1::TexTable, t2::TexTable)
# Promote to the same dimensions
t1, t2 = promote(convert.(IndexedTable, (t1, t2))...)
t1_new = add_col_level(t1, 1)
t2_new = add_col_level(t2, 2)
return hcat(t1_new, t2_new)
end
function join_table(t1::TexTable, t2::TexTable,
t3::TexTable, args...)
return join_table(join_table(t1,t2), t3, args...)
end
# Joining on Pairs
function join_table(p1::Pair{P1,T1}) where {P1 <: Printable,
T1 <: TexTable}
t1 = convert(IndexedTable, p1.second)
t1_new = add_col_level(p1.second, 1, p1.first)
end
function join_table(p1::Pair{P1,T1}, p2::Pair{P2,T2}) where
{P1 <: Printable, P2 <: Printable, T1 <: TexTable, T2<:TexTable}
t1, t2 = promote(convert.(IndexedTable, (p1.second, p2.second))...)
t1_new = add_col_level(t1, 1, p1.first)
t2_new = add_col_level(t2, 2, p2.first)
return hcat(t1_new, t2_new)
end
function join_table(p1::Pair{P1,T1},
p2::Pair{P2,T2},
p3::Pair{P3,T3}, args...) where
{P1 <: Printable, P2 <: Printable, P3<:Printable,
T1 <: TexTable, T2<:TexTable, T3<:TexTable}
return join_table(join_table(p1, p2), p3, args...)
end
join_table(t1::IndexedTable, p2::Pair{P2,T2}) where {P2, T2} = begin
join_table(t1, join_table(p2))
end
join_table(p2::Pair{P2,T2},t1::IndexedTable) where {P2, T2} = begin
join_table(join_table(p2), t1)
end
join_table(t1::IndexedTable, p2::Pair{P2,T2}, args...) where {P2, T2} = begin
join_table(join_table(t1, p2), args...)
end
# Appending
append_table(t1::TexTable) = t1
function append_table(t1::TexTable, t2::TexTable)
# Promote to the same dimensions
t1, t2 = promote(t1, t2)
t1_new = add_row_level(t1, 1)
t2_new = add_row_level(t2, 2)
return vcat(t1_new, t2_new)
end
function append_table(t1::TexTable, t2::TexTable, t3::TexTable, args...)
return append_table(append_table(t1,t2), t3, args...)
end
# Appending on Pairs
function append_table(p1::Pair{P1,T1}) where {P1 <: Printable,
T1 <: TexTable}
t1 = convert(IndexedTable, p1.second)
t1_new = add_row_level(p1.second, 1, p1.first)
end
function append_table(p1::Pair{P1,T1}, p2::Pair{P2,T2}) where
{P1 <: Printable, P2 <: Printable, T1 <: TexTable, T2<:TexTable}
t1, t2 = promote(convert.(IndexedTable, (p1.second, p2.second))...)
t1_new = add_row_level(t1, 1, p1.first)
t2_new = add_row_level(t2, 2, p2.first)
return vcat(t1_new, t2_new)
end
function append_table(p1::Pair{P1,T1},
p2::Pair{P2,T2},
p3::Pair{P3,T3}, args...) where
{P1 <: Printable, P2 <: Printable, P3<:Printable,
T1 <: TexTable, T2<:TexTable, T3<:TexTable}
return append_table(append_table(p1, p2), p3, args...)
end
append_table(t1::IndexedTable, p2::Pair{P2,T2}) where {P2, T2} = begin
append_table(t1, append_table(p2))
end
append_table(p2::Pair{P2,T2},t1::IndexedTable) where {P2, T2} = begin
append_table(append_table(p2), t1)
end
append_table(t1::IndexedTable, p2::Pair{P2,T2}, args...) where {P2, T2} = begin
append_table(append_table(t1, p2), args...)
end
################################################################################
#################### Conversion Between Dimensions #############################
################################################################################
function promote_rule(::Type{IndexedTable{N1,M1}},
::Type{IndexedTable{N2,M2}}) where
{N1, M1, N2, M2}
N = max(N1, N2)
M = max(M1, M2)
return IndexedTable{N, M}
end
function convert(::Type{IndexedTable{N, M}}, t::IndexedTable{N0, M0}) where
{N,M,N0,M0}
if (N0 > N) | (M0 > M)
msg = """
Cannot convert IndexedTable{$N0,$M0} to IndexedTable{$N,$M}
"""
throw(error(msg))
else
for i=1:N-N0
t = add_row_level(t, 1)
end
for i=1:M-M0
t = add_col_level(t, 1)
end
end
return t
end
function promote_rule(::Type{T1}, ::Type{T2}) where
{T1 <: IndexedTable, T2 <: TexTable}
return IndexedTable
end
################################################################################
#################### General Indexing ##########################################
################################################################################
function insert_index!(index::Index{N}, idx::TableIndex{N}) where N
range = searchsorted(index, idx, lt=isless_group)
# If it's empty, insert it in the right position
if isempty(range)
insert!(index, range.start, idx)
return idx, range.start
# Otherwise, check to see whether or not the last level matches already
else
N_index = get_idx(index[range], N)
N_names = get_name(index[range], N)
# If it does, then we don't have to do anything except check that the
# strings are right
if idx.name[N] in N_names
loc = findall(N_names .== idx.name[N])[1]
# Here's the new index
new_idx = update_index(idx, tuple(idx.idx[1:N-1]..., loc))
return new_idx, range.start + loc - 1
else
# Otherwise, it's not there so we need to insert it into the index,
# and its last integer level should be one higher than all the
# others
new_idx = update_index(idx, tuple(idx.idx[1:N-1]...,
maximum(N_index)+1))
insert!(index, range.stop+1, new_idx)
return new_idx, range.stop + 1
end
end
end
get_idx(index) = map(x->x.idx, index)
get_idx(index, level::Int) = map(x->x.idx[level], index)
get_name(index) = map(x->x.name, index)
get_name(index, level::Int) = map(x->x.name[level], index)
function find_level(index::Index{N}, idx::Idx{N}, level::Int) where N
range = searchsorted(get_level(index, level), idx[level])
return range
end
function add_level(index::Vector{TableIndex{N}}, level,
name::Printable="") where N
return map(index) do idx
return TableIndex(tuple(level, idx.idx...),
tuple(Symbol(name), idx.name...))
end
end
"""
```
add_row_level(t::IndexedTable, level::Int, name::$Printable="")
```
Add's a new level to the row index with the given `level` for the integer
component of the index, and `name` for the symbol component
"""
function add_row_level(t::IndexedTable{N,M}, level::Int,
name::Printable="") where {N,M}
new_rows = add_level(t.row_index, level, name)
old_new = Dict(Pair.(t.row_index, new_rows)...)
new_columns = []
for col in t.columns
data = TableDict{N+1, FormattedNumber}()
for (key, value) in col.data
data[old_new[key]] = value
end
push!(new_columns, TableCol(col.header, data))
end
return IndexedTable(new_columns, new_rows, t.col_index)
end
"""
```
add_col_level(t::IndexedTable, level::Int, name::$Printable="")
```
Add's a new level to the column index with the given `level` for the integer
component of the index, and `name` for the symbol component
"""
function add_col_level(t::IndexedTable{N,M},
level::Int, name::Printable="") where {N,M}
new_cols = add_level(t.col_index, level, name)
old_new = Dict(Pair.(t.col_index, new_cols))
new_columns = []
for col in t.columns
push!(new_columns, TableCol(old_new[col.header],
col.data))
end
return IndexedTable(new_columns, t.row_index, new_cols)
end
add_row_level(t::TexTable, args...) = add_row_level(IndexedTable(t), args...)
add_col_level(t::TexTable, args...) = add_col_level(IndexedTable(t), args...)
################################################################################
#################### Access Methods ############################################
################################################################################
Indexable{N} = Union{TableIndex{N}, Tuple}
Indexable1D = Union{Printable, Integer}
function row_loc(t::IndexedTable{N,M}, idx::Indexable{N}) where {N,M}
locate(t.row_index, idx)
end
function col_loc(t::IndexedTable{N,M}, idx::Indexable{N}) where {N,M}
locate(t.col_index, idx)
end
function loc(t::IndexedTable{N,M}, ridx::Indexable{N},
cidx::Indexable{M}) where {N,M}
rloc = locate(t.row_index, ridx)
cloc = locate(t.col_index, cidx)
if isempty(rloc) | isempty(cloc)
throw(KeyError("key ($ridx, $cidx) not found"))
elseif length(rloc) > 1
throw(KeyError("$ridx does not uniquely identify a row"))
elseif length(cloc) > 1
throw(KeyError("$cidx does not uniquely identify a column"))
else
return rloc[1], cloc[1]
end
end
function locate(index::Vector{TableIndex{N}}, idx::TableIndex{N}) where N
return findall(index .== Ref(idx))
end
function locate(index::Vector{TableIndex{N}}, idx) where N
length(idx) == N || throw(ArgumentError("$idx does not have dimension $N"))
return findall(index) do x
for i=1:N
match_index(x, idx[i], i) || return false
end
return true
end
end
function match_index(index::TableIndex{N}, idx::Printable, level::Int) where N
return index.name[level] == Symbol(idx)
end
function match_index(index::TableIndex{N}, idx::Int, level::Int) where N
return index.idx[level] == idx
end
function getindex(t::IndexedTable{N,M}, row::Indexable{N},
col::Indexable{M}) where {N,M}
rloc, cloc = loc(t, row, col)
return t.columns[cloc][t.row_index[rloc]]
end
function setindex!(t::IndexedTable, args...)
throw(error("setindex! not implemented yet"))
end
# Fallback Methods
function getindex(t::IndexedTable, row::Indexable1D, col::Indexable1D)
return t[tuple(row), tuple(col)]
end
function getindex(t::IndexedTable, row::Indexable, col::Indexable1D)
return t[row, tuple(col)]
end
function getindex(t::IndexedTable, row::Indexable1D, col::Indexable)
return t[tuple(row), col]
end
# Getvals
function get_vals(t::IndexedTable, row, col)
get_vals(t[row, col])
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 4494 | function default_fmt(T::Type{S}) where {S}
error("Format for $T is not defined")
end
"""
Usage is: @fmt T = fmtstring
Available types and their default formats are:
1. Real "{:.3g}"
2. Int "{:,n}"
3. Bool "{:}"
4. AbstractString "{:}"
"""
macro fmt(ex)
msg = """
Usage is: @fmt T = fmtstring
Available types and their default formats are:
1. Real "{:.3g}"
2. Int "{:,n}"
3. Bool "{:}"
4. AbstractString "{:}"
"""
@assert(ex.head == :(=), msg)
@assert(length(ex.args) == 2, msg)
@assert(ex.args[1] isa Symbol, msg)
@assert(isa(ex.args[2], String), msg)
@assert(ex.args[1] in [:Real, :Int, :Bool, :AbstractString, :Missing], msg)
ex1 = ex.args[1]
ex2 = ex.args[2]
q = quote
TexTables.default_fmt(T::Type{S}) where {S <: $ex1} = $ex2
end
return q
end
@fmt Real = "{:.3g}"
@fmt Int = "{:,n}"
@fmt Bool = "{:}"
@fmt AbstractString = "{:}"
@fmt Missing = ""
const _fmt_spec_gG = r"[gG]"
const _fmt_spec_g = r"[g]"
const _fmt_spec_G = r"[G]"
function fixed_or_scientific(val, format)
if occursin(_fmt_spec_gG, format)
if val isa Integer
return replace(format, _fmt_spec_gG => "n")
else
mag = log(abs(val))/log(10)
if (-Inf < mag <= -3) | (mag >= 5)
r = "e"
else
r = "f"
end
if occursin(_fmt_spec_g, format)
return replace(format, _fmt_spec_g => lowercase(r))
else
return replace(format, _fmt_spec_G => uppercase(r))
end
end
end
return format
end
abstract type FormattedNumber{T} end
mutable struct FNum{T} <: FormattedNumber{T}
val::T
star::Int
format::String
function FNum(val::T, star::Int, format::String) where T
return new{T}(val, star, fixed_or_scientific(val, format))
end
end
==(x1::FNum, x2::FNum) = x1.val == x2.val &&
x1.format == x2.format &&
x1.star == x2.star
mutable struct FNumSE{T} <: FormattedNumber{T}
val::T
se::Float64
star::Int
format::String
format_se::String
function FNumSE(val::T, se::Float64, star::Int, format::String,
format_se::String) where T
return new{T}(val, se, star, fixed_or_scientific(val, format),
fixed_or_scientific(se, format_se))
end
end
==(x1::FNumSE, x2::FNumSE) = x1.val == x2.val &&
x1.se == x2.se &&
x1.star == x2.star &&
x1.format == x2.format &&
x1.format_se == x2.format_se
function FormattedNumber(val::T, format::String=default_fmt(T)) where T
return FNum(val, 0, format)
end
function FormattedNumber(val::T, se::S,
format::String=default_fmt(T),
format_se::String=default_fmt(S)) where
{T<:AbstractFloat, S <: AbstractFloat}
se2 = Float64(se)
newval, newse = promote(val, se)
return FNumSE(newval, newse, 0, format, format_se)
end
function FormattedNumber(val::T, se::S,
format::String=default_fmt(T),
format_se::String=default_fmt(S)) where
{T, S<:AbstractFloat}
se2 = Float64(se)
@assert(isnan(se), "Cannot have non-NaN Standard Errors for $T")
return FNumSE(val, se, 0, format, format_se)
end
FormattedNumber(x::FormattedNumber) = x
# Unpack Tuples of Floats for precision
FormattedNumber(x::Tuple{T1, T2}) where {T1<: AbstractFloat,
T2<:AbstractFloat} = begin
return FormattedNumber(x[1], x[2])
end
Base.show(io::IO, x::FNum) = print(io, format(x.format, x.val))
Base.show(io::IO, x::FNumSE)= begin
if isnan(x.se)
print(io, value(x))
else
str = string(value(x), " ", se(x))
print(io, str)
end
end
Base.convert(::Type{FNumSE}, x::FNum) = FormattedNumber(x.val, NaN)
Base.promote_rule(::Type{FNumSE{T}}, ::Type{FNum{S}}) where {T,S} = FNumSE
value(x::FormattedNumber) = format(x.format, x.val)
se(x::FNumSE) = format("($(x.format_se))", x.se)
se(x::FNum) = ""
function star!(x::FormattedNumber, i::Int)
x.star = i
return x
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 24305 |
"""
Internal function.
Checks `table_type` argument for validity, and throws error if not in
list of valid values.
"""
check_table_type(table_type) = begin
table_type in [:ascii, :latex] && return
msg = """
$table_type is invalid argument for table_type. See
documentation for `TablePrinter`:
table_type::Symbol (default :ascii)
Controls which type of table is printed. Currently has
two options:
1. :ascii -- Prints an ASCII table to be displayed in
the REPL
2. :latex -- Prints a LaTeX table for output
"""
throw(ArgumentError(msg))
end
default_sep(table_type) = begin
check_table_type(table_type)
table_type == :ascii && return "|"
table_type == :latex && return "&"
end
@with_kw mutable struct TableParams
pad::Int = 1
table_type::Symbol = :ascii
se_pos::Symbol = :below
star::Bool = true
sep::String = default_sep(table_type)
align::String = "c"
TableParams(pad, table_type, se_pos, star, sep, align) = begin
# Argument Checking
return new(pad, table_type, se_pos, star, sep, align)
end
end
mutable struct TablePrinter{N,M}
table::IndexedTable{N,M}
params::TableParams
col_schema
row_schema
end
"""
```
TablePrinter(table::IndexedTable{N,M}; kwargs...) where {N,M}
```
Maps out the column and row schemas, and constructs a `TablePrinter`
object for the given table. Additional option parameters can be passed
as keyword arguments
Parameters
----------
pad::Int (default 1)
The number of spaces to pad the separator characters on each side.
table_type::Symbol (default :ascii)
Controls which type of table is printed. Currently has two options:
1. :ascii -- Prints an ASCII table to be displayed in the REPL
2. :latex -- Prints a LaTeX table for output
"""
function TablePrinter(t::TexTable; kwargs...)
table = convert(IndexedTable, t)
col_schema = generate_schema(table.col_index)
row_schema = generate_schema(table.row_index)
params = TableParams(;kwargs...)
return TablePrinter(table, params, col_schema, row_schema)
end
########################################################################
#################### Index Schemas #####################################
########################################################################
"""
```
new_group(idx1::TableIndex, idx2::TableIndex, level::Int)
```
Internal method. Compares two elements from an index (of type
`Index{N}`) and checks whether or not `idx2` is in a different group
than `idx1` at index-depth `level`. An index starts a new group if
either its numeric position has changed, or the string value has
changed.
Calls itself recursively on each level above the given level to see
whether or not we have a new group at any of the higher levels.
"""
function new_group(idx1::TableIndex{N}, idx2::TableIndex{N},
level::Int) where N
# Terminal case
level == 0 && return false
# In other levels, recurse backwards through the levels -- short
# circuiting if we find any level where we're switching to a new
# group
return new_group(idx1, idx2, level-1) || begin
same_num = idx1.idx[level] == idx2.idx[level]
same_name = idx1.name[level] == idx2.name[level]
return (!same_num) | (!same_name)
end
end
"""
```
get_level(idx::$Idx, i)
```
Extracts the `i`th level of the index `idx` (both the integer and name
component) and returns it as a tuple.
"""
get_level(idx::TableIndex, i) = idx.idx[i], idx.name[i]
"""
```
generate_schema(index::$Index [, level::Int])
```
Returns a vector of `Pair{Tuple{Int, Symbol},Int}`, where for each
`pair`, `pair.first` is a tuple of the positional and symbolic
components of the index level, and `pair.second` is the number of times
that this entry is repeated within `level`.
When called without a level specified, returns an `OrderedDict` of
`level=>generate_schema(index, level)` for each level.
Example
-------
If we had a column `c1` that looked like
```
julia> show(c1)
| test
-----------------------
key1 | 0.867
key2 | -0.902
key3 | -0.494
key4 | -0.903
key5 | 0.864
key6 | 2.212
key7 | 0.533
key8 | -0.272
key9 | 0.502
key10 | -0.517
-----------------------
Fixed Effects | Yes
```
Then `generate_schema` would return:
```
julia> generate_schema(c2.row_index, 1)
2-element Array{Any,1}:
(1, Symbol(""))=>8
(2, Symbol(""))=>1
julia> generate_schema(c2.row_index, 2)
9-element Array{Any,1}:
(1, :key2)=>1
(2, :key3)=>1
(3, :key4)=>1
(4, :key5)=>1
(5, :key6)=>1
(6, :key7)=>1
(7, :key8)=>1
(8, :key9)=>1
(1, Symbol("Fixed Effects"))=>1
```
"""
function generate_schema(index::Index{N}, level::Int) where N
# Argument Checking
1 <= level <= N || throw(BoundsError("$level is invalid level"))
# Initialize the level schema
level_schema = []
idx = get_level(index[1], level)
count = 1
# Loop through the index values
n = length(index)
for i = 1:n-1
# If the next index is new, push it to the list
if new_group(index[i], index[i+1], level)
push!(level_schema, idx=>count)
idx = get_level(index[i+1], level)
count = 1
else
count += 1
end
end
push!(level_schema, idx=>count)
return level_schema
end
function generate_schema(index::Index{N}) where N
return OrderedDict(i=>generate_schema(index, i) for i=1:N)
end
"""
```
get_lengths(t::TablePrinter, col_schema=generate_schema(t.table.col_index))
```
Internal function.
Returns a vector of column lengths to be used in printing the table.
Has the same length as `t.columns`. `col_schema` will be generated by
default if not passed. It must be the output of
`generate_schema(t.col_index)`.
If any header requires more printing space than the columns themselves
would require, allocates the additional space equally among all the
columns it spans.
"""
function get_lengths(printer::TablePrinter{N,M},
col_schema=generate_schema(
printer.table.col_index)) where {N,M}
# Get the underlying table and some parameters
t = printer.table
pad = printer.params.pad
sep = printer.params.sep
# Start out with the assumption that we just need enough space for
# the column contents
k = length(t.columns)
lengths = col_length.(Ref(printer), t.columns)
# Initialize a Block-Width Schema
bw_schema = Dict()
# Repeat this code twice so that everything updates fully
for u = 1:2
# Loop through the levels of the column index from the bottom up
for i=M:-1:1
level_schema = printer.col_schema[i]
col_pos = 0
bw_schema[i] = []
for (s, p) in enumerate(level_schema)
pos, name = p.first
block_size = p.second
fname = format_name(printer, i, block_size, name)
# Figure out the block width, accounting for the extra
# space from the separators and the padding
block_width = sum(lengths[(1:block_size) .+ col_pos])
block_width+= (block_size-1)*(2*pad + length(sep))
# If the block is not big enough for the formatted name,
# then add extra whitespace to each column (evenly) until
# there's enough.
difference = length(fname) - block_width
if difference > 0
extra_space = div(difference, block_size)
remainder = rem(difference, block_size)
for j = (1:block_size) .+ col_pos
lengths[j] += extra_space
if j <= remainder & u == 1
lengths[j] += 1
end
end
end
# Add the block width to the block width schema
push!(bw_schema[i], max(block_width, length(fname)))
# Update the column position
col_pos += block_size
end
end
end
return lengths, bw_schema
end
########################################################################
#################### Printer Methods ###################################
########################################################################
"""
```
mc(cols, val="", align="c")
"""
function mc(cols, val="", align="c")
return "\\multicolumn{$cols}{$align}{$val}"
end
function format_name(printer::TablePrinter{N,M}, level::Int,
block_size::Int, name)::String where {N,M}
# ASCII tables just print the centered name
printer.params.table_type == :ascii && return string(name)
# LaTeX tables need to print the name in a multi-column environment
# except at the lowest level
if printer.params.table_type == :latex
# When printing the name in Latex, we may want to escape some characters
name = escape_latex(printer, name)
if level == M
return "$name"
else
align = printer.params.align
return mc(block_size, name, align)
end
end
end
function align_name(printer::TablePrinter, len, name)
@unpack table_type = printer.params
table_type == :ascii && return center(name, len)
table_type == :latex && return format("{:$len}", name)
end
escape_latex(p::TablePrinter, name) = escape_latex(name)
function escape_latex(name)
# Convert the name to a string
name = string(name)
# Keep track of whether we're in math mode
mathmode = false
i = 1
while i < length(name)
# Update whether we're in mathmode
if name[i] == '$'
mathmode = mathmode ? false : true
end
s = name[i]
if (name[i] in keys(latex_replacements)) & !mathmode
r = latex_replacements[s]
name = name[1:i-1] * r * name[i+1:end]
i += length(r)
elseif (name[i] in keys(mathmode_replacements)) & mathmode
r = mathmode_replacements[s]
name = name[1:i-1] * r * name[i+1:end]
i += length(r)
else
i += 1
end
end
return name
end
const latex_replacements = Dict{Char, String}('_' => "\\_")
const mathmode_replacements = Dict{Char, String}()
"""
Returns the maximum length of the column entries, not accounting for the
header columns
"""
function col_length(p::TablePrinter, col::TableCol)
@unpack pad, se_pos, star = p.params
l = 0
for key in keys(col.data)
val, se, stars = get_vals(col, key)
sl = star ? length(stars) : 0
if se_pos == :below
l = max(l, length(val) + sl, length(se))
elseif se_pos == :inline
l = max(l, length(val) + sl + pad + length(se))
elseif se_pos == :none
l = max(l, length(val) + sl)
end
end
return l
end
function newline(p::TablePrinter)
@unpack table_type = p.params
table_type == :ascii && return ""
table_type == :latex && return "\\\\"
end
endline(p::TablePrinter) = "\n"
# subline not properly implemented yet
subline(p::TablePrinter, args...) = ""
function hline(p::TablePrinter)
@unpack table_type = p.params
table_type == :ascii && return "\n"*"-"^width(p)
table_type == :latex && return " \\hline"
end
function hline(p::TablePrinter{N,M}, i::Int) where {N,M}
t = p.table
n, m = size(t)
# Figure out what kind of line ending we're on
i < 0 && return ""
i == 0 && return hline(p)
if 0 < i < n
ridx1 = t.row_index[i]
ridx2 = t.row_index[i+1]
return new_group(ridx1, ridx2, N-1) ? hline(p) : ""
elseif i == n
return ""
else
msg = "There are only $n rows. $i is not a valid index"
throw(BoundsError(msg))
end
end
function width(p::TablePrinter)
@unpack pad, sep = p.params
sep_len = length(sep)
lengths = get_lengths(p)[1]
rh_len = rowheader_length(p)
pad_ws = " "^pad
# Compute Total Width
len = 0
len += rh_len + pad
len += sum(lengths)
len += map(x->2*pad + sep_len, lengths) |> sum
end
function top_matter(printer::TablePrinter{N,M}) where {N,M}
@unpack table_type = printer.params
@unpack col_schema = printer
t = printer.table
table_type == :ascii && return ""
table_type == :latex && return begin
align = ""
for i=1:N
align *= empty_row(t, i) ? "" : "r"
end
for (i, pair) in enumerate(col_schema[max(M-1,1)])
align *= (M > 1) | (i==1) ? "|" : ""
align *= "c"^pair.second
end
return "\\begin{tabular}{$align}\n\\toprule\n"
end
end
"""
Count the number of non-empty row-index dimensions
"""
function nonempty_rows(t::IndexedTable{N,M}) where {N,M}
c = 0
for i=1:N
c += empty_row(t, i) ? 0 : 1
end
return c
end
"""
Check whether the row index in dimension `i` is empty
"""
function empty_row(t::IndexedTable{N,M}, i::Int) where {N,M}
names = get_name(t.row_index, i)
return all(isempty.(string.(names)))
end
"""
Check whether the col index in dimension `i` is empty
"""
function empty_col(t::IndexedTable{N,M}, i::Int) where {N,M}
names = get_name(t.col_index, i)
return all(isempty.(string.(names)))
end
########################################################################
#################### Header Printing ###################################
########################################################################
function head(printer::TablePrinter{N,M}) where {N,M}
t = printer.table
pad = printer.params.pad
pad_ws = " "^pad
sep = printer.params.sep
sep_len = length(sep)
# Get the column lengths, column schema, block width schema, and
# rowheader length
lengths, bw_schema = get_lengths(printer)
col_schema = printer.col_schema
rh_length = rowheader_length(printer)
# Handle any top matter
output = ""
output *= top_matter(printer)
for i=1:M
# Skip this iteration in the loop if the column-level is empty
empty_col(t, i) && continue
# Add whitespace to account for the rowheader length, and add a
# separator
for j=1:N
rhj = rowheader_length(printer, j)
empty_row(t, j) && continue
output *= format("{:$rhj}", "")
output *= j < N ? pad_ws * sep * pad_ws : pad_ws
end
# Write each header
for (s, pair) in enumerate(col_schema[i])
block_len = bw_schema[i][s]
block_size = pair.second
name = pair.first[2]
name = format_name(printer, i, block_size, name)
header = align_name(printer, block_len, name)
# Write the header to the output
output *= sep * pad_ws * header * pad_ws
end
output *= newline(printer)
output *= hline(printer, -M + i)
output *= subline(printer, -M + i)
output *= endline(printer)
end
return output
end
########################################################################
#################### Body Printing #####################################
########################################################################
"""
```
empty_se(t::IndexedTable, ridx)
```
Internal printing function.
Computes for an indexed table `t` whether or not any of the entries
corresponding to row index `ridx` have nonempty standard errors.
"""
function empty_se(t::IndexedTable, ridx)
n, m = size(t)
for j=1:m
cidx = t.col_index[j]
val, se, star = get_vals(t, ridx, cidx)
isempty(se) || return false
end
return true
end
function body(printer::TablePrinter{N,M}) where {N,M}
output = ""
n, m = size(printer.table)
for i=1:n
output *= printline(printer, i)
output *= newline(printer)
output *= subline(printer, i)
output *= hline(printer, i)
output *= endline(printer)
end
return output
end
function printline(printer::TablePrinter, i)
i >= 1 || throw(ArgumentError("$i is an invalid row number"))
t = printer.table
n, m = size(t)
pad = printer.params.pad
pad_ws = " "^pad
sep = printer.params.sep
sep_len = length(sep)
@unpack se_pos, star = printer.params
# Get the column lengths, column schema, block width schema, and
# rowheader length
lengths, bw_schema = get_lengths(printer)
# Start writing the lines
output = ""
output *= rowheader(printer, i)
ridx = t.row_index[i]
print_se = !empty_se(t, ridx)
inline = se_pos == :inline
below = se_pos == :below
if below & print_se
line2= rowheader(printer, i, empty=true)
end
for j = 1:m
cidx = t.col_index[j]
val, se, stars = get_vals(t, ridx, cidx)
# Are we printing the standard errors?
use_se = inline & print_se
# Format and Print the value
output *= sep * pad_ws
entry = val
entry *= star ? stars : ""
entry *= use_se ?
pad_ws * se :
""
output *= format("{:>$(lengths[j])}",entry) * pad_ws
if below & print_se
line2 *= sep * pad_ws
line2 *= format("{:>$(lengths[j])}",se)
line2 *= pad_ws
end
end
if below & print_se
output *= newline(printer)*endline(printer)
output *= line2
end
return output
end
function rowheader(printer::TablePrinter{N,M}, i; empty=false) where {N,M}
t = printer.table
pad = printer.params.pad
pad_ws = " "^pad
sep = printer.params.sep
sep_len = length(sep)
ridx = t.row_index
output = ""
for j = 1:N
# Check whether they're all empty
empty_row(t, j) && continue
# Otherwise, figure out if we need to print it this time round
no_check = (i == 1) | (j == N)
print_name = no_check ? true : new_group(ridx[i],ridx[i-1], j)
# Print the separator and the padding
output *= ((j==1) || empty_row(t, j-1)) ? "" : sep * pad_ws
# Get length of rowheader for formatting
len = rowheader_length(printer, j)
# Print the name or whitespace depending on `print_name`
if print_name & !empty
# Check the size of the row if we're printing
block_size = schema_lookup(printer.row_schema[j], i)
# Add the row to the output
name = ridx[i].name[j]
output *= format("{:>$len}",
format_row_name(printer, j, block_size,
name))
else
output *= format("{:>$len}", "")
end
# Print the final padding
output *= pad_ws
end
return output
end
function schema_lookup(schema, i)
# Cumulative count sum
counts = map(x->x.second, schema) |> cumsum
# Find insertion point for i
idx = searchsortedfirst(counts, i)
# Return the block size at that point
return schema[idx].second
end
function format_row_name(printer::TablePrinter{N,M}, level::Int,
block_size::Int, name)::String where {N,M}
@unpack table_type = printer.params
if table_type == :ascii
fname = string(name)
elseif table_type == :latex
# When printing the name in Latex, we may want to escape some characters
sname = escape_latex(printer, name)
fname = block_size == 1 ? sname : mr(block_size,sname)
end
return string(fname)
end
"""
```
mr(rows, val="", align="c")
"""
function mr(cols, val="")
return "\\multirow{$cols}{*}{$val}"
end
########################################################################
#################### Footer ############################################
########################################################################
function foot(t::TablePrinter)
@unpack table_type = t.params
table_type == :ascii && return ""
table_type == :latex && return "\\bottomrule\n\\end{tabular}"
end
########################################################################
#################### Printing ##########################################
########################################################################
function center(str::String, width::Int)
l = length(str)
if l > width
return str
else
k = div(width - l, 2)
str = format("{:>$(l + k)}", str)
str = format("{:<$width}", str)
return str
end
end
center(str::Symbol, width::Int) = center(string(str), width)
function getindex(t::IndexedTable, row)
output = []
for col in t.columns
push!(output, col[row])
end
return output
end
show(io::IO, col::TableCol) = print(io, IndexedTable(col))
size(t::IndexedTable)= (length(t.row_index), length(t.col_index))
function rowheader_length(printer::TablePrinter, level::Int)
t = printer.table
row_index = t.row_index
row_names = get_name(row_index)
row_schema = printer.row_schema[level]
n, m = size(t)
# Compute the max length
l = 0
for i=1:n
idx = t.row_index[i]
name = idx.name[level]
bsize = schema_lookup(row_schema, i)
fname = format_row_name(printer, level, bsize, name)
l = max(l, length(fname))
end
return l
end
"""
```
rowheader_length(printer::TablePrinter [, level::Int])
```
Computes the full length of the row-header. If level is specified, it
computes the length of just the one level. If no level is specified, it
computes the sum of all the level lengths, accounting for padding and
separator characters.
"""
function rowheader_length(printer::TablePrinter{N,M}) where {N,M}
# Unpack
t = printer.table
pad = printer.params.pad
sep = printer.params.sep
sep_len = length(sep)
total_pad = 2*pad + sep_len
# Offset it by one since there's no leading space
l = -total_pad
for i=1:N
lh = rowheader_length(printer, i)
l += lh
l += lh > 0 ? total_pad : 0
end
return l
end
########################################################################
#################### REPL Output #######################################
########################################################################
function print(io::IO, p::TablePrinter)
print(io, head(p)*body(p)*foot(p))
end
function print(io::IO, t::IndexedTable{N,M}; kwargs...) where {N,M}
if all(size(t) .> 0)
# Construct the printer
printer = TablePrinter(t; kwargs...)
print(io, printer)
else
print(io, "IndexedTable{$N,$M} of size $(size(t))")
end
end
function print(t::IndexedTable; kwargs...)
print(stdout, t; kwargs...)
end
show(io::IO, t::IndexedTable; kwargs...) = print(io, t; kwargs...)
show(t::IndexedTable; kwargs...) = print(t; kwargs...)
########################################################################
#################### String Output Methods #############################
########################################################################
function to_ascii(t::TexTable; kwargs...)
any(size(t) .== 0) && throw(error("Can't export empty table"))
p= TablePrinter(t; table_type = :ascii, kwargs...)
return head(p)*body(p)*foot(p)
end
function to_tex(t::TexTable; kwargs...)
any(size(t) .== 0) && throw(error("Can't export empty table"))
p = TablePrinter(t; table_type = :latex, kwargs...)
return head(p)*body(p)*foot(p)
end
########################################################################
#################### Latex Table Output ################################
########################################################################
function write_tex(outfile, t::TexTable)
open(outfile, "w") do f
write(f, to_tex(t))
end
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 4765 | ########################################################################
#################### Summary Tables ####################################
########################################################################
count(x) = mapreduce(y->1, +, x)
p10(x) = quantile(x |> collect, .1)
p25(x) = quantile(x |> collect, .25)
p50(x) = quantile(x |> collect, .50)
p75(x) = quantile(x |> collect, .75)
p90(x) = quantile(x |> collect, .9)
function default_stats(detail::Bool)
if !detail
return ("Obs" => count ∘ skipmissing,
"Mean" => mean ∘ skipmissing,
"Std. Dev." => std ∘ skipmissing,
"Min" => minimum ∘ skipmissing,
"Max" => maximum ∘ skipmissing)
else
return ("Obs" => count ∘ skipmissing,
"Mean" => mean ∘ skipmissing,
"Std. Dev." => std ∘ skipmissing,
"Min" => minimum ∘ skipmissing,
"p10" => p10 ∘ skipmissing,
"p25" => p25 ∘ skipmissing,
"p50" => p50 ∘ skipmissing,
"p75" => p75 ∘ skipmissing,
"p90" => p90 ∘ skipmissing,
"Max" => maximum ∘ skipmissing)
end
end
NumericCol = AbstractVector{T} where {T1<:Real, T2<:Real,
T<:Union{T1, Union{T2, Missing}}}
tuplefy(x) = tuple(x)
tuplefy(x::Tuple) = x
function promotearray(x::AbstractArray{S, N}) where {S,N}
types = unique(typeof(val) for val in x)
T = reduce(promote_type, types)
return Array{T,N}
end
function summarize(df::AbstractDataFrame, fields=names(df);
detail=false, stats=default_stats(detail), kwargs...)
# Determine whether each column is numeric or not
numeric = Dict(header => typeof(df[!,header]) <: NumericCol
for header in fields)
cols = TableCol[]
for pair in tuplefy(stats)
col = TableCol(pair.first)
for header in fields
if numeric[header]
col[header] = pair.second(df[!,header])
else
col[header] = ""
end
end
push!(cols, col)
end
return hcat(cols...)
end
function summarize(df::AbstractDataFrame, field::Symbol; kwargs...)
summarize(df, vcat(field); kwargs...)
end
function summarize_by(df, byname::Symbol,
fields=setdiff(names(df), vcat(string.(byname)));
kwargs...)
tabs = []
gd = groupby(df, byname)
for sub in gd
tab = summarize(sub, fields; kwargs...)
vals= unique(sub[!,byname])
length(vals) == 1 || throw(error("Groupby isn't working"))
idx = vals[1]
push!(tabs, string(idx)=>tab)
end
return append_table(tabs...)
end
########################################################################
#################### Cross Tabulations #################################
########################################################################
function tabulate(df::AbstractDataFrame, field::Symbol)
# Count the number of observations by `field`
tab = combine(groupby(df, field), field => length => :_N)
# Construct a Frequency Column
sort!(tab, field)
vals = tab[!,field] .|> Symbol
freq = tab[!,:_N]
pct = freq/sum(freq)*100
cum = cumsum(pct)
# Construct Table
col1 = append_table(TableCol("Freq.", vals, freq),
TableCol("Freq.", "Total"=>sum(freq)))
col2 = append_table(TableCol("Percent", vals, pct ),
TableCol("Percent", "Total"=>sum(pct)))
col3 = TableCol("Cum.", vals, cum )
col = hcat(col1, col2, col3)
end
function tabulate(df::AbstractDataFrame, field1::Symbol, field2::Symbol)
# Count the number of observations by `field`
fields = vcat(field1, field2)
df = dropmissing(df[!,fields], disallowmissing=true)
tab = combine(groupby(df, fields), field1 => length => :_N)
sort!(tab, [field1, field2])
# Put it into wide form
tab = unstack(tab, field1, field2, :_N)
# Construct the table
vals = Symbol.(sort(unique(df[!,field2])))
cols = []
for val in vals
col = TableCol(val, Vector(tab[!,field1]), tab[!,val])
col2 = TableCol(val, "Total" => sum(coalesce.(tab[!,val], 0)))
push!(cols, append_table(field1=>col, ""=>col2))
end
sums = sum(coalesce.(Matrix(tab[!,vals]), 0), dims=2) |> vec
tot1 = TableCol("Total", Vector(tab[!,field1]), sums)
tot2 = TableCol("Total", "Total" => sum(sums))
tot = append_table(field1=>tot1, ""=>tot2)
ret = join_table(field2=>hcat(cols...), tot)
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 5094 | #=
This file implements StatsModels constructors for TableCols. That is,
it allows one to pass a StatsModels object directly to the TableCol
constructor (or, since the Table constructor falls back to TableCols on
a vararg, the Table constructor).
=#
########################################################################
#################### General Regression API Implementation #############
########################################################################
RegCol{M} = TableCol{3,M}
function RegCol(header::Printable)
return TableCol(TableIndex(1, header), TableDict{3, FormattedNumber}())
end
"""
```
getnext(t::TableCol{3,M}, group::Int, level::Int) where M
```
In a TableCol `t` of row depth 3, computes the next index on the third level
given that the first dimension of the index is `group` and the second is
`level`.
"""
function getnext(t::RegCol, group::Int, level::Int)
max_idx = 0
for ridx in keys(t.data)
ridx.idx[1] != group && continue
ridx.idx[2] != level && continue
max_idx = max(max_idx, ridx.idx[3])
end
return max_idx + 1
end
const block_labs = Dict(:setcoef! =>"Coefficient",
:setmeta! =>"Metadata",
:setstats! =>"Statistics")
for (block, fname) in zip([:1,:2,:3], [:setcoef!, :setmeta!, :setstats!])
"""
```
$fname(t::$RegCol, key, val[, se]; level=1, name="")
$fname(t::$RegCol, key=>val; level=1, name="")
$fname(t::$RegCol, kv::AbstractDict)
```
Inserts into `t` a key/value pair (possibly with a standard error) within
the block. Like the `TableCol` constructor, the pairs can be passed as
either individual key/value[/se] tuples or pairs, as several vectors of
key/value[/se] pairs, or as an associative.
To add additional sub-blocks, use the `level` keyword argument. Integers
less than 0 will appears in blocks above the standard block, and integers
greater than 1 will appear below it.
To name the block or sub-block, pass a nonempty string as the `name` keyword
argument.
"""
@eval function ($fname)(t::RegCol, key::Printable, val; level=1, name="")
next_idx = getnext(t, $block, level)
index = TableIndex(($block, level, next_idx), ("", name, key))
t[index] = val
end
@eval function ($fname)(t::RegCol, key::Printable, val, se; level=1, name="")
next_idx = getnext(t, $block, level)
index = TableIndex(($block, level, next_idx), ("", name, key))
t[index] = val, se
end
@eval function ($fname)(t::RegCol, p::Pair; level=1, name="")
next_idx = getnext(t, $block, level)
key = p.first
val = p.second
index = TableIndex(($block, level, next_idx), ("", name, key))
t[index] = val
end
# Handle associatives
@eval function ($fname)(t::RegCol, args...)
for kv in zip(args)
($fname)(t, kv...)
end
end
@eval function ($fname)(t::RegCol, ps::AbstractDict; level=1, name="")
($fname)(t, ps...)
end
end
########################################################################
#################### Linear Model Interface ############################
########################################################################
function TableCol(header, m::RegressionModel;
stats=(:N=>Int∘nobs, "\$R^2\$"=>r2),
meta=(), stderror::Function=StatsBase.stderror, kwargs...)
# Compute p-values
pval(m) = ccdf.(FDist(1, dof_residual(m)),
abs2.(coef(m)./stderror(m)))
# Initialize the column
col = RegCol(header)
# Add the coefficients
for (name, val, se, p) in zip(coefnames(m), coef(m), stderror(m), pval(m))
setcoef!(col, name, val, se)
0.05 < p <= .1 && star!(col[name], 1)
0.01 < p <= .05 && star!(col[name], 2)
p <= .01 && star!(col[name], 3)
end
# Add in the fit statistics
setstats!(col, OrderedDict(p.first=>p.second(m) for p in tuplefy(stats)))
# Add in the metadata
setmeta!(col, OrderedDict(p.first=>p.second(m) for p in tuplefy(meta)))
return col
end
########################################################################
#################### regtable Interface ###############################
########################################################################
TableAble = Union{RegressionModel, TexTable, Pair, Tuple}
function regtable(args::Vararg{TableAble}; num=1, kwargs...)
cols = TexTable[]
for arg in args
new_tab = regtable(arg; num=num, kwargs...)
n, m = size(new_tab)
num += m
push!(cols, new_tab)
end
return hcat(cols...)
end
function regtable(t::RegressionModel; num=1, kwargs...)
return TableCol("($num)", t; kwargs...)
end
function regtable(t::TexTable; kwargs...)
return t
end
function regtable(p::Pair; kwargs...)
return join_table(p.first=>regtable(p.second; kwargs...))
end
function regtable(p::Tuple; kwargs...)
return regtable(p...; kwargs...)
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 7378 | Idx{N} = NTuple{N, Int}
Name{N} = NTuple{N, Symbol}
struct TableIndex{N}
idx::Idx{N}
name::Name{N}
end
Index{N} = Vector{TableIndex{N}}
Printable = Union{AbstractString, Symbol}
function TableIndex(idx, name)
return TableIndex(tuplefy(idx), tuplefy(Symbol.(name)))
end
TableIndex(idx::Integer, name::Printable) = begin
TableIndex(tuple(idx), tuple(Symbol(name)))
end
TableIndex(name::Printable) = TableIndex(1, name)
function update_index(index::TableIndex{N}, new_idx::Idx{N}) where N
return TableIndex(new_idx, index.name)
end
TableDict{N, T} = OrderedDict{TableIndex{N}, T} where T <: FormattedNumber
########################################################################
#################### Sorting the Index #################################
########################################################################
function isless(index1::TableIndex{N}, index2::TableIndex{N}) where N
for i=1:N
# First Check the numeric index
if index1.idx[i] < index2.idx[i]
return true
elseif index1.idx[i] > index2.idx[i]
return false
# Then check the strings
elseif index1.name[i] < index2.name[i]
return true
elseif index1.name[i] > index2.name[i]
return false
end
end
return false
end
function isless_group(index1::TableIndex{N}, index2::TableIndex{N},
level=N-1) where N
for i = 1:N-1
# First Check the numeric index
if index1.idx[i] < index2.idx[i]
return true
elseif index1.idx[i] > index2.idx[i]
return false
# Then check the strings
elseif index1.name[i] < index2.name[i]
return true
elseif index1.name[i] > index2.name[i]
return false
end
end
return false
end
########################################################################
#################### Columns ###########################################
########################################################################
mutable struct TableCol{N,M} <: TexTable
header::TableIndex{M}
data::TableDict{N, FormattedNumber}
end
TableCol(header::Printable) = TableCol(TableIndex(1, header),
TableDict{1, FormattedNumber}())
TableCol(x::TableCol; kwargs...) = x
function TableCol(header::Printable, kv::TableDict{N,T}) where
{N,T<:FormattedNumber}
return TableCol(TableIndex(header),
convert(TableDict{N, FormattedNumber}, kv))
end
# Columns are equal if they are the same entry all the way down
==(t1::TableCol, t2::TableCol) = begin
t1.header == t2.header || return false
t1.data == t2.data || return false
return true
end
########################################################################
#################### Constructors ######################################
########################################################################
function TableCol(header::Printable, kv::AbstractDict)
pairs = collect(TableIndex(i, key)=>FormattedNumber(value)
for (i, (key, value)) in enumerate(kv))
TableCol(header,
OrderedDict{TableIndex{1}, FormattedNumber}(pairs))
end
function TableCol(header, kv::AbstractDict, kp::AbstractDict)
TableCol(header,
OrderedDict{TableIndex{1}, FormattedNumber}(
TableIndex(i, key)=>(key in keys(kp)) ?
FormattedNumber(val, kp[key]) :
FormattedNumber(val)
for (i, (key, val))
in enumerate(kv)))
end
function TableCol(header, ks::Vector, vs::Vector)
pairs = [TableIndex(i, key)=>FormattedNumber(val)
for (i, (key, val)) in enumerate(zip(ks, vs))]
TableCol(header,
OrderedDict{TableIndex{1}, FormattedNumber}(pairs...))
end
function TableCol(header, keys::Vector, values::Vector,
precision::Vector)
pairs = [ TableIndex(i, key)=>FormattedNumber(val, se)
for (i, (key, val, se))
in enumerate(zip(keys, values, precision))]
data = OrderedDict(pairs...)
return TableCol(header, data)
end
convert(::Type{FormattedNumber}, x) = FormattedNumber(x)
convert(::Type{FormattedNumber}, x::FormattedNumber) = x
Entry = Pair{T, K} where {T<:Printable, K<:Union{Printable, Number, Missing,
NTuple{2,Number}}}
function TableCol(header::Printable, pairs::Vararg{Entry})
return TableCol(header, OrderedDict(pairs))
end
########################################################################
#################### Indexing ##########################################
########################################################################
function get_vals(x::FormattedNumber)
val = value(x)
seval = se(x)
star = "*"^x.star
return val, seval, star
end
function get_vals(col::TableCol, x::TableIndex, backup="")
if x in keys(col.data)
return get_vals(col.data[x])
else
return backup, "", ""
end
end
# This is an inefficient backup getindex method to maintain string
# indexing for users
function getindex(col::TableCol, key::Printable)
x = Symbol(key)
loc = name_lookup(col, x)
index = keys(col.data) |> collect
if length(loc) > 1
throw(KeyError("""
The string keys you've provided are not unique. Try indexing
by TableIndex instead.
"""))
else
return col[index[loc[1]]]
end
end
function name_lookup(col::TableCol{N,M}, x::Symbol) where {N,M}
index = keys(col.data) |> collect
names = get_name(index, N)
return findall(y->y==x, names)
end
function getindex(col::TableCol, x::TableIndex)
if haskey(col.data, x)
return col.data[x]
else
return FormattedNumber("")
end
end
function setindex!(col::TableCol{1,N}, value, key::Printable) where N
skey = Symbol(key)
loc = name_lookup(col, skey)
col_index = keys(col.data) |> collect
if length(loc) > 1
throw(KeyError("""
The string keys you've provided are not unique. Try indexing
by TableIndex instead.
"""))
elseif length(loc) == 0
# We need to insert it at a new position
index = get_idx(col_index, 1)
new_idx = length(index) > 0 ?
maximum(index) + 1 :
1
col[TableIndex(new_idx, skey)] = value
else
col[col_index[loc[1]]] = value
end
end
# General Backup falls back to FormattedNumber constructor
function setindex!(col::TableCol, value, key::TableIndex)
col.data[key] = FormattedNumber(value)
return col
end
# Handle values passed with precision
function setindex!(col::TableCol, value::Tuple{T, T2}, key::TableIndex) where
{T, T2<:AbstractFloat}
col.data[key] = FormattedNumber(value)
return col
end
# Handle optional stars
function setindex!(col::TableCol, value::Tuple{T, T2, Int},
key::TableIndex) where {T, T2<:AbstractFloat}
col.data[key] = FormattedNumber(value[1:2])
star!(col.data[key], value[3])
return col
end
function size(t::TableCol)
n = length(t.data)
return n, 1
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 974 | module TexTables
# package code goes here
# Nice string formattting
using Format, DataStructures, DataFrames
# Required for StatsModels Integration
using StatsBase
using Distributions
using GLM: LinearModel
using Parameters
using Compat
export FormattedNumber, FNum, FNumSE, @fmt, TableCol, star!
export TableCol, Table, TexTable, get_vals
export IndexedTable, append_table, join_table, promote_rule
export to_tex, to_ascii, write_tex, regtable
export setcoef!, setmeta!, setstats!, RegCol
# Import from base to extend
import Base.getindex, Base.setindex!, Base.push!
import Base: isless, ==
import Base: getindex, size, hcat, vcat, hvcat, convert, promote_rule
import Base: show, size, print
export summarize, summarize_by, tabulate
# Need this abstract type
abstract type TexTable end
include("FormattedNumbers.jl")
include("TableCol.jl")
include("CompositeTable.jl")
include("Printing.jl")
include("StatsModelsInterface.jl")
include("QuickTools.jl")
end # module
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 1185 | # Baseline check
using Random
x = [0.8673472019512456, -0.9017438158568171, -0.4944787535042339,
-0.9029142938652416, 0.8644013132535154, 2.2118774995743475,
0.5328132821695382, -0.27173539603462066, 0.5023344963886675,
-0.5169836206932686]
x2 = [-0.5605013381807765, -0.019291781689849075, 0.12806443451512645,
1.852782957725545, -0.8277634318169205, 0.11009612632217552,
-0.2511757400198831, 0.3697140350317453, 0.07211635315125874,
-1.503429457351051]
y = [Symbol(:key, i) for i=1:10]
t1 = TableCol("test", y, x)
t2 = TableCol("test2", y[2:9], x[2:9])
t3 = TableCol("test3", y, x, x2 .|> abs .|> sqrt)
sub_tab1= hcat(t1, t2, t3)
# Composite Table Checks
t4 = TableCol("test" , Dict("Fixed Effects"=>"Yes")) |> IndexedTable
t5 = TableCol("test2", Dict("Fixed Effects"=>"No")) |> IndexedTable
t6 = TableCol("test3", Dict("Fixed Effects"=>"Yes")) |> IndexedTable
# Build the table two different ways
sub_tab2= hcat(t4, t5, t6)
tab = vcat(sub_tab1, sub_tab2)
tab2 = [t1 t2 t3
t4 t5 t6]
@test to_ascii(tab) == to_ascii(tab2)
c1 = append_table(t1, t4)
c2 = append_table(t2, t5)
c3 = append_table(t3, t6)
compare_file(1, to_tex(sub_tab1))
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 6551 | using TexTables, DataStructures, DataFrames
using StatsModels, GLM, RDatasets
@testset "Linear Model Examples No Stars" begin
# Check that this code runs without errors
df = dataset("datasets", "attitude")
# Compute summary stats for each variable
cols = []
for header in names(df)
x = df[!,header]
stats = TableCol(header,
"N" => length(x),
"Mean" => mean(x),
"Std" => std(x),
"Min" => minimum(x),
"Max" => maximum(x))
push!(cols, stats)
end
tab = hcat(cols...)
m1 = lm(@formula( Rating ~ 1 + Raises ), df)
m2 = lm(@formula( Rating ~ 1 + Raises + Learning), df)
m3 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges), df)
m4 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints), df)
m5 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints + Critical), df)
reg_table = hcat(TableCol("(1)", m1),
TableCol("(2)", m2),
TableCol("(3)", m3),
TableCol("(4)", m4),
TableCol("(5)", m5))
compare_file(2, to_ascii(reg_table, star=false))
compare_file(3, to_tex(reg_table, star=false))
# Check that regtable interface works
reg_table_b = regtable(m1, m2, m3, m4, m5)
compare_file(2, to_ascii(reg_table_b, star=false))
compare_file(3, to_tex(reg_table_b, star=false))
group1 = hcat( TableCol("(1)", m1),
TableCol("(2)", m2),
TableCol("(3)", m3))
compare_file(4, to_ascii(group1, star=false))
compare_file(5, to_tex(group1, star=false))
group2 = hcat( TableCol("(1)", m4),
TableCol("(2)", m5))
compare_file(6, to_ascii(group2, star=false))
compare_file(7, to_tex(group2, star=false))
grouped_table = join_table( "Group 1"=>group1,
"Group 2"=>group2)
compare_file(8, to_ascii(grouped_table, star=false))
compare_file(9, to_tex(grouped_table, star=false))
# Check that regtable interface works
grouped_table_b = regtable("Group 1"=>regtable(m1, m2, m3),
"Group 2"=>regtable(m4, m5))
compare_file(8, to_ascii(grouped_table_b, star=false))
compare_file(9, to_tex(grouped_table_b, star=false))
grouped_table_c = regtable("Group 1"=>(m1, m2, m3),
"Group 2"=>(m4, m5))
compare_file(32, to_ascii(grouped_table_c, star=false))
compare_file(33, to_tex(grouped_table_c, star=false))
# Compare against the original table for group1 again (to make sure
# that all the join_methods are non-mutating)
compare_file(4, to_ascii(group1, star=false))
compare_file(5, to_tex(group1, star=false))
end
@testset "Linear Models With Stars" begin
# Check that this code runs without errors
df = dataset("datasets", "attitude")
# Compute summary stats for each variable
cols = []
for header in names(df)
x = df[!,header]
stats = TableCol(header,
"N" => length(x),
"Mean" => mean(x),
"Std" => std(x),
"Min" => minimum(x),
"Max" => maximum(x))
push!(cols, stats)
end
tab = hcat(cols...)
m1 = lm(@formula( Rating ~ 1 + Raises ), df)
m2 = lm(@formula( Rating ~ 1 + Raises + Learning), df)
m3 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges), df)
m4 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints), df)
m5 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints + Critical), df)
reg_table = hcat(TableCol("(1)", m1),
TableCol("(2)", m2),
TableCol("(3)", m3),
TableCol("(4)", m4),
TableCol("(5)", m5))
compare_file(22, to_ascii(reg_table, star=true))
compare_file(23, to_tex(reg_table, star=true))
# Check that regtable interface works
reg_table_b = regtable(m1, m2, m3, m4, m5)
compare_file(22, to_ascii(reg_table_b, star=true))
compare_file(23, to_tex(reg_table_b, star=true))
group1 = hcat( TableCol("(1)", m1),
TableCol("(2)", m2),
TableCol("(3)", m3))
compare_file(24, to_ascii(group1, star=true))
compare_file(25, to_tex(group1, star=true))
group2 = hcat( TableCol("(1)", m4),
TableCol("(2)", m5))
compare_file(26, to_ascii(group2, star=true))
compare_file(27, to_tex(group2, star=true))
grouped_table = join_table( "Group 1"=>group1,
"Group 2"=>group2)
compare_file(28, to_ascii(grouped_table, star=true))
compare_file(29, to_tex(grouped_table, star=true))
# Check that regtable interface works
grouped_table_b = regtable("Group 1"=>regtable(m1, m2, m3),
"Group 2"=>regtable(m4, m5))
compare_file(28, to_ascii(grouped_table_b, star=true))
compare_file(29, to_tex(grouped_table_b, star=true))
grouped_table_c = regtable("Group 1"=>(m1, m2, m3),
"Group 2"=>(m4, m5))
compare_file(30, to_ascii(grouped_table_c, star=true))
compare_file(31, to_tex(grouped_table_c, star=true))
# Compare against the original table for group1 again (to make sure
# that all the join_methods are non-mutating)
compare_file(24, to_ascii(group1, star=true))
compare_file(25, to_tex(group1, star=true))
end
@testset "Summary Tables" begin
iris = dataset("datasets", "iris")
sum1 = summarize(iris)
compare_file(10, to_ascii(sum1))
compare_file(11, to_tex(sum1))
sum2 = summarize(iris, detail=true)
compare_file(12,to_ascii(sum2))
compare_file(13, to_tex(sum2))
sum3 = summarize_by(iris, :Species)
compare_file(14, to_ascii(sum3))
compare_file(15, to_tex(sum3))
sum4 = summarize_by(iris, :Species, detail=true)
compare_file(16, to_ascii(sum4))
compare_file(17, to_tex(sum4))
sum5 = tabulate(iris, :Species)
compare_file(18, to_ascii(sum5))
compare_file(19, to_tex(sum5))
sum6 = tabulate(iris, :PetalWidth)
compare_file(20, to_ascii(sum6))
compare_file(21, to_tex(sum6))
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 2688 | @testset "Check Constructor" begin
@testset "No SE" begin
for x in [1, .1, "1", π]
@test FormattedNumber(x).val == x
@test FormattedNumber(x).star == 0
end
@test FormattedNumber(1).format == "{:,n}"
@test FormattedNumber(.1).format == "{:.3f}"
@test FormattedNumber(1e-3).format == "{:.3f}"
@test FormattedNumber(1e-4).format == "{:.3e}"
@test FormattedNumber(1e3).format == "{:.3f}"
@test FormattedNumber(1e6).format == "{:.3e}"
@test_throws ErrorException FormattedNumber(Complex(1, 1))
end
@testset "SE" begin
for x in [1, "1"]
@test_throws AssertionError FormattedNumber(x, .2)
end
@test FormattedNumber(1.0, 0.1).format == "{:.3f}"
@test FormattedNumber(1.0, 0.1).format_se == "{:.3f}"
@test FormattedNumber(1.0e4, 0.1).format == "{:.3f}"
@test FormattedNumber(1.0e4, 0.1).format_se == "{:.3f}"
@test FormattedNumber(1.0e5, 0.1).format == "{:.3e}"
@test FormattedNumber(1.0e5, 0.1).format_se == "{:.3f}"
@test FormattedNumber(1.0e-4, 0.1).format == "{:.3e}"
@test FormattedNumber(1.0e-4, 0.1).format_se == "{:.3f}"
@test FormattedNumber(1.0, 1e4).format_se == "{:.3f}"
@test FormattedNumber(1.0, 1e5).format_se == "{:.3e}"
@test FormattedNumber(1.0, 1e-3).format_se == "{:.3f}"
@test FormattedNumber(1.0, 1e-4).format_se == "{:.3e}"
x = FormattedNumber(1.0, .1)
@test x.val == 1.0
@test x.se == 0.1
@test x.star == 0
star!(x, 1)
@test x.star == 1
# Check automatic conversion to FNumSE
@testset "Promotion" begin
x = FormattedNumber(1.0)
y = FormattedNumber(1.0, 0.1)
x1, y1 = promote(x, y)
@test y1 == y
@test x1.val == x.val
@test isnan(x1.se)
end
end
end
@testset "Showing Formatted Numbers" begin
@test sprint(show, FormattedNumber(.1)) == "0.100"
@test sprint(show, FormattedNumber(1)) == "1"
@test sprint(show, FormattedNumber("test")) == "test"
@test sprint(show, FormattedNumber(true)) == "true"
@test sprint(show, FormattedNumber(1.0, 0.1)) == "1.000 (0.100)"
@test sprint(show, FormattedNumber(1.0, 1e-3)) == "1.000 (0.001)"
@test sprint(show, FormattedNumber(1.0, 1e-4)) == "1.000 (1.000e-04)"
@test sprint(show, FormattedNumber(1.0, 1e4)) == "1.000 (10000.000)"
@test sprint(show, FormattedNumber(1.0, 1e5)) == "1.000 (1.000e+05)"
@test sprint(show, FormattedNumber(1.0, NaN)) == "1.000"
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 1684 | ########################################################################
#################### Helper Functions for Testing ######################
########################################################################
function resource_path()
if splitdir(pwd())[2] == "TexTables"
return joinpath("test", "resources")
else
return joinpath("resources")
end
end
"""
Compares the contents of the file found at `fpath` to `fstring` line by
line, testing for equality.
"""
function compare_file(fpath::String, fstring::String)
open(fpath) do f
lines = readlines(f)
lines2 = split(fstring, "\n")
for (l1, l2) in zip(lines, lines2)
@test l1 == l2
end
end
end
"""
Generates the path to test table `i`
"""
function test_table(i)
return joinpath(resource_path(), "test_table$i.txt")
end
function compare_file(i::Int, fstring::String)
compare_file(test_table(i), fstring)
end
function export_table(i, fstring::String)
open(test_table(i), "w") do f
write(f, fstring)
end
end
function export_table(t::TexTable; kwargs...)
export_table(to_ascii(t; kwargs...), table_type="ascii")
export_table(to_tex(t; kwargs...), table_type="latex")
end
function next_test_table()
files = readdir(resource_path())
nums = map(files) do file
m = match(r"(?<=(test_table))\d*(?=(.txt))", file).match
parse(Int, m)
end
return maximum(nums) + 1
end
function export_table(fstring::String; table_type="")
next_num = next_test_table()
export_table(next_num, fstring)
println("Exported $table_type to $(test_table(next_num))")
return next_num
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 10099 | gv = get_vals
@testset "getindex" begin
@testset "TableCol Indexing" begin
# Data
name = "test"
data = OrderedDict("key1"=>1,
"key2"=>2,
"key3"=>.5,
"key4"=>(.25, .1),
"key5"=>"foo")
# Simple way to construct it
col = TableCol(name, data)
# Indexing Returns a Tuple of Formatted Values and Precisions
@test strip.(col["key1"] |> gv) == ("1", "", "")
@test strip.(col["key2"] |> gv) == ("2", "", "")
@test strip.(col["key3"] |> gv) == ("0.500", "", "")
@test strip.(col["key4"] |> gv) == ("0.250", "(0.100)", "")
@test strip.(col["key5"] |> gv) == ("foo", "", "")
# Indexing with Symbols
@test strip.(col[:key1] |> gv) == ("1", "", "")
@test strip.(col[:key2] |> gv) == ("2", "", "")
@test strip.(col[:key3] |> gv) == ("0.500", "", "")
@test strip.(col[:key4] |> gv) == ("0.250", "(0.100)", "")
@test strip.(col[:key5] |> gv) == ("foo", "", "")
end
@testset "IndexedTable Indexing" begin
# Construct some random tables
x = [0.8673472019512456, -0.9017438158568171, -0.4944787535042339,
-0.9029142938652416, 0.8644013132535154, 2.2118774995743475,
0.5328132821695382, -0.27173539603462066, 0.5023344963886675,
-0.5169836206932686]
x2 = [-0.5605013381807765, -0.019291781689849075, 0.12806443451512645,
1.852782957725545, -0.8277634318169205, 0.11009612632217552,
-0.2511757400198831, 0.3697140350317453, 0.07211635315125874,
-1.503429457351051]
y = [Symbol(:key, i) for i=1:10]
t1 = TableCol("test", y, x)
t2 = TableCol("test2", y[2:9], x[2:9])
t3 = TableCol("test3", y, x, x2 .|> abs .|> sqrt)
sub_tab1= hcat(t1, t2, t3)
# Composite Table Checks
t4 = TableCol("test" , Dict("Fixed Effects"=>"Yes"))
t5 = TableCol("test2", Dict("Fixed Effects"=>"No"))
t6 = TableCol("test3", Dict("Fixed Effects"=>"Yes"))
c1 = append_table(t1, t4)
c2 = append_table(t2, t5)
c3 = append_table(t3, t6)
@test strip.(c1[ (1, :key1 ), "test"] |> gv) == ("0.867" , "", "")
@test strip.(c1[ (1, :key2 ), "test"] |> gv) == ("-0.902", "", "")
@test strip.(c1[ (1, :key3 ), "test"] |> gv) == ("-0.494", "", "")
@test strip.(c1[ (1, :key4 ), "test"] |> gv) == ("-0.903", "", "")
@test strip.(c1[ (1, :key5 ), "test"] |> gv) == ("0.864" , "", "")
@test strip.(c1[ (1, :key6 ), "test"] |> gv) == ("2.212" , "", "")
@test strip.(c1[ (1, :key7 ), "test"] |> gv) == ("0.533" , "", "")
@test strip.(c1[ (1, :key8 ), "test"] |> gv) == ("-0.272", "", "")
@test strip.(c1[ (1, :key9 ), "test"] |> gv) == ("0.502" , "", "")
@test strip.(c1[ (1, :key10), "test"] |> gv) == ("-0.517", "", "")
@test strip.(c1[(2, "Fixed Effects"), "test"] |> gv) == ("Yes", "", "")
# Check that indexing into the second column we constructed
# works as expected
@test strip.(c2[ (1, :key2 ), "test2"] |> gv ) == ("-0.902", "", "")
@test strip.(c2[ (1, :key3 ), "test2"] |> gv ) == ("-0.494", "", "")
@test strip.(c2[ (1, :key4 ), "test2"] |> gv ) == ("-0.903", "", "")
@test strip.(c2[ (1, :key5 ), "test2"] |> gv ) == ("0.864" , "", "")
@test strip.(c2[ (1, :key6 ), "test2"] |> gv ) == ("2.212" , "", "")
@test strip.(c2[ (1, :key7 ), "test2"] |> gv ) == ("0.533" , "", "")
@test strip.(c2[ (1, :key8 ), "test2"] |> gv ) == ("-0.272", "", "")
@test strip.(c2[ (1, :key9 ), "test2"] |> gv ) == ("0.502" , "", "")
@test strip.(c2[(2, "Fixed Effects"), "test2"] |> gv) == ("No", "", "")
@test_throws KeyError c2[ (1, :key1 ), "test2"]
@test_throws KeyError c2[ (1, :key10), "test2"]
# Check that indexing with the wrong header throws an error
@test_throws KeyError c2[ (1, :key2), "test" ]
# Check that indexing into the wrong block throws an error
@test_throws KeyError c2[ (1, "Fixed Effects"), "test2"]
# Check that indexing into IndexedTables works with standard
# errors
@test strip.(c3[(1,:key1 ), "test3"] |> gv) == ("0.867" , "(0.749)", "")
@test strip.(c3[(1,:key2 ), "test3"] |> gv) == ("-0.902", "(0.139)", "")
@test strip.(c3[(1,:key3 ), "test3"] |> gv) == ("-0.494", "(0.358)", "")
@test strip.(c3[(1,:key4 ), "test3"] |> gv) == ("-0.903", "(1.361)", "")
@test strip.(c3[(1,:key5 ), "test3"] |> gv) == ("0.864" , "(0.910)", "")
@test strip.(c3[(1,:key6 ), "test3"] |> gv) == ("2.212" , "(0.332)", "")
@test strip.(c3[(1,:key7 ), "test3"] |> gv) == ("0.533" , "(0.501)", "")
@test strip.(c3[(1,:key8 ), "test3"] |> gv) == ("-0.272", "(0.608)", "")
@test strip.(c3[(1,:key9 ), "test3"] |> gv) == ("0.502" , "(0.269)", "")
@test strip.(c3[(1,:key10), "test3"] |> gv) == ("-0.517", "(1.226)", "")
# Check that indexing into merged tables works right
tab = [c1 c2 c3]
@test strip.(tab[ (1, :key1 ), "test"] |> gv ) == ("0.867" , "", "")
@test strip.(tab[ (1, :key2 ), "test"] |> gv ) == ("-0.902", "", "")
@test strip.(tab[ (1, :key3 ), "test"] |> gv ) == ("-0.494", "", "")
@test strip.(tab[ (1, :key4 ), "test"] |> gv ) == ("-0.903", "", "")
@test strip.(tab[ (1, :key5 ), "test"] |> gv ) == ("0.864" , "", "")
@test strip.(tab[ (1, :key6 ), "test"] |> gv ) == ("2.212" , "", "")
@test strip.(tab[ (1, :key7 ), "test"] |> gv ) == ("0.533" , "", "")
@test strip.(tab[ (1, :key8 ), "test"] |> gv ) == ("-0.272", "", "")
@test strip.(tab[ (1, :key9 ), "test"] |> gv ) == ("0.502" , "", "")
@test strip.(tab[ (1, :key10), "test"] |> gv ) == ("-0.517", "", "")
@test strip.(tab[(2, "Fixed Effects"), "test"] |> gv) == ("Yes", "", "")
# Check that indexing into the second column we constructed
# works as expected
@test strip.(tab[ (1, :key2 ), "test2"] |> gv) == ("-0.902", "", "")
@test strip.(tab[ (1, :key3 ), "test2"] |> gv) == ("-0.494", "", "")
@test strip.(tab[ (1, :key4 ), "test2"] |> gv) == ("-0.903", "", "")
@test strip.(tab[ (1, :key5 ), "test2"] |> gv) == ("0.864" , "", "")
@test strip.(tab[ (1, :key6 ), "test2"] |> gv) == ("2.212" , "", "")
@test strip.(tab[ (1, :key7 ), "test2"] |> gv) == ("0.533" , "", "")
@test strip.(tab[ (1, :key8 ), "test2"] |> gv) == ("-0.272", "", "")
@test strip.(tab[ (1, :key9 ), "test2"] |> gv) == ("0.502" , "", "")
@test strip.(tab[(2, "Fixed Effects"), "test2"] |> gv ) == ("No", "", "")
# Broken @test_throws
# @test_throws KeyError tab[ (1, :key1 ), "test2"]
# @test_throws KeyError tab[ (1, :key10), "test2"]
@test_skip tab[ (1, :key1 ), "test2"]
@test_skip tab[ (1, :key10), "test2"]
# Check that indexing into the wrong block throws an error
# @test_throws KeyError tab[ (1, "Fixed Effects"), "test2"]
@test_skip tab[ (1, "Fixed Effects"), "test2"]
# Check that indexing into IndexedTables works with standard
# errors
@test strip.(tab[(1,:key1 ), "test3"] |> gv ) == ("0.867" , "(0.749)", "")
@test strip.(tab[(1,:key2 ), "test3"] |> gv ) == ("-0.902", "(0.139)", "")
@test strip.(tab[(1,:key3 ), "test3"] |> gv ) == ("-0.494", "(0.358)", "")
@test strip.(tab[(1,:key4 ), "test3"] |> gv ) == ("-0.903", "(1.361)", "")
@test strip.(tab[(1,:key5 ), "test3"] |> gv ) == ("0.864" , "(0.910)", "")
@test strip.(tab[(1,:key6 ), "test3"] |> gv ) == ("2.212" , "(0.332)", "")
@test strip.(tab[(1,:key7 ), "test3"] |> gv ) == ("0.533" , "(0.501)", "")
@test strip.(tab[(1,:key8 ), "test3"] |> gv ) == ("-0.272", "(0.608)", "")
@test strip.(tab[(1,:key9 ), "test3"] |> gv ) == ("0.502" , "(0.269)", "")
@test strip.(tab[(1,:key10), "test3"] |> gv ) == ("-0.517", "(1.226)", "")
end
@testset "IndexedTable Indexing" begin
# Baseline check
x = [0.8673472019512456, -0.9017438158568171, -0.4944787535042339,
-0.9029142938652416, 0.8644013132535154, 2.2118774995743475,
0.5328132821695382, -0.27173539603462066, 0.5023344963886675,
-0.5169836206932686]
x2 = [-0.5605013381807765, -0.019291781689849075, 0.12806443451512645,
1.852782957725545, -0.8277634318169205, 0.11009612632217552,
-0.2511757400198831, 0.3697140350317453, 0.07211635315125874,
-1.503429457351051]
y = [Symbol(:key, i) for i=1:10]
t1 = TableCol("test", y, x) |> IndexedTable
t2 = TableCol("test2", y[2:9], x[2:9]) |> IndexedTable
t3 = TableCol("test3", y, x, x2.|>abs.|>sqrt) |> IndexedTable
t4 = TableCol("test" , Dict("Fixed Effects"=>"Yes")) |> IndexedTable
t5 = TableCol("test2", Dict("Fixed Effects"=>"No")) |> IndexedTable
t6 = TableCol("test3", Dict("Fixed Effects"=>"Yes")) |> IndexedTable
c1 = append_table(t1, t4)
c2 = append_table(t2, t5)
c3 = append_table(t3, t6)
# Put them together in several different multi-leveled ways
tab = [c1 c2 c3]
# tab2 = join_table("group 1"=>[c1, c2], "group 3"=>c3)
# tab3 = join_table("group 1"=>c1, "group2"=>c2, "group3"=>c3)
# tab4 = join_table("BIG GROUP 1"=>tab, "BIG GROUP 2"=>tab3)
# Check that the indexing is consistent between the TableCols
# and the IndexedTable:
for t in [t1, t2, t3, t4, t5, t6, c1, c2, c3, tab]
n, m = size(t)
for i=1:n, j=1:m
ridx = t.row_index[i]
cidx = t.col_index[j]
@test t[ridx, cidx] == t.columns[j][ridx]
end
end
end
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 4963 | import TexTables: get_level, generate_schema, check_table_type,
default_sep, schema_lookup
@testset "Extracting Index Levels" begin
# Standard Composite Tables
x = [0.8673472019512456, -0.9017438158568171, -0.4944787535042339,
-0.9029142938652416, 0.8644013132535154, 2.2118774995743475,
0.5328132821695382, -0.27173539603462066, 0.5023344963886675,
-0.5169836206932686]
x2 = [-0.5605013381807765, -0.019291781689849075, 0.12806443451512645,
1.852782957725545, -0.8277634318169205, 0.11009612632217552,
-0.2511757400198831, 0.3697140350317453, 0.07211635315125874,
-1.503429457351051]
y = [Symbol(:key, i) for i=1:10]
t1 = TableCol("test", y, x)
t2 = TableCol("test2", y[2:9], x[2:9])
t3 = TableCol("test3", y, x, x2 .|> abs .|> sqrt)
t4 = TableCol("test" , Dict("Fixed Effects"=>"Yes"))
t5 = TableCol("test2", Dict("Fixed Effects"=>"No"))
t6 = TableCol("test3", Dict("Fixed Effects"=>"Yes"))
c1 = append_table(t1, t4)
c2 = append_table(t2, t5)
c3 = append_table(t3, t6)
# Check that the index levels are what I think they should be:
@test map(x->get_level(x, 1), c1.row_index) == begin
Tuple{Int64,Symbol}[(1, Symbol("")), (1, Symbol("")),
(1, Symbol("")), (1, Symbol("")),
(1, Symbol("")), (1, Symbol("")),
(1, Symbol("")), (1, Symbol("")),
(1, Symbol("")), (1, Symbol("")),
(2, Symbol(""))]
end
@test map(x->get_level(x, 2), c1.row_index) == begin
Tuple{Int64,Symbol}[(1, :key1), (2, :key2), (3, :key3),
(4, :key4), (5, :key5), (6, :key6),
(7, :key7), (8, :key8), (9, :key9),
(10, :key10),
(1, Symbol("Fixed Effects"))]
end
@test map(x->get_level(x, 1), c1.col_index) == [tuple(1, :test)]
@test map(x->get_level(x, 1), c2.col_index) == [tuple(1, :test2)]
@test_throws BoundsError map(x->get_level(x, 2), c1.col_index)
end
@testset "Index Schemas" begin
# Standard Composite Tables
x = [0.8673472019512456, -0.9017438158568171, -0.4944787535042339,
-0.9029142938652416, 0.8644013132535154, 2.2118774995743475,
0.5328132821695382, -0.27173539603462066, 0.5023344963886675,
-0.5169836206932686]
x2 = [-0.5605013381807765, -0.019291781689849075, 0.12806443451512645,
1.852782957725545, -0.8277634318169205, 0.11009612632217552,
-0.2511757400198831, 0.3697140350317453, 0.07211635315125874,
-1.503429457351051]
y = [Symbol(:key, i) for i=1:10]
t1 = TableCol("test", y, x)
t2 = TableCol("test2", y[2:9], x[2:9])
t3 = TableCol("test3", y, x, x2 .|> abs .|> sqrt)
t4 = TableCol("test" , Dict("Fixed Effects"=>"Yes"))
t5 = TableCol("test2", Dict("Fixed Effects"=>"No"))
t6 = TableCol("test3", Dict("Fixed Effects"=>"Yes"))
c1 = append_table(t1, t4)
c2 = append_table(t2, t5)
c3 = append_table(t3, t6)
@test generate_schema(c1.row_index, 1) == Any[(1, Symbol(""))=>10,
(2, Symbol(""))=>1]
@test generate_schema(c1.row_index, 2) == begin
Any[(1, :key1)=>1, (2, :key2)=>1, (3, :key3)=>1, (4, :key4)=>1,
(5, :key5)=>1, (6, :key6)=>1, (7, :key7)=>1, (8, :key8)=>1,
(9, :key9)=>1, (10, :key10)=>1, (1, Symbol("Fixed Effects"))=>1]
end
@test generate_schema(c3.row_index,1)==generate_schema(c1.row_index,1)
@test generate_schema(c3.row_index,2)==generate_schema(c1.row_index,2)
@test_throws BoundsError generate_schema(c3.row_index, 3)
s = generate_schema(c2.row_index, 1)
@test schema_lookup(s, 1) == 8
@test schema_lookup(s, 8) == 8
@test schema_lookup(s, 9) == 1
s = generate_schema(c2.row_index, 2)
@test schema_lookup(s, 1) == 1
@test schema_lookup(s, 8) == 1
@test schema_lookup(s, 9) == 1
end
@testset "Argument Checking" begin
@test check_table_type(:ascii) == nothing
@test check_table_type(:latex) == nothing
@test_throws ArgumentError check_table_type(:html)
@test_throws ArgumentError check_table_type("ascii")
@test_throws ArgumentError check_table_type("latex")
@test_throws ArgumentError check_table_type(5)
@test_throws ArgumentError check_table_type(5.0)
end
@testset "table_type Defaults" begin
@test default_sep(:ascii) == "|"
@test default_sep(:latex) == "&"
@test_throws ArgumentError default_sep("ascii")
end
# c1_a = TableCol("Column 1",
# OrderedDict("Row 1"=>1,
# "Row 2"=>2.3,
# "Row 3"=>(2.3, .3),
# "Row 4"=>(832.1, 20.0)))
# c1_b = TableCol("Column 1",
# OrderedDict("Stat 1" => 232))
#
# c1 = append_table(c1_a, c1_b)
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 1024 | import TexTables: tuplefy
@testset "tuplefy" begin
vals = [1, 1.0, "1", :1]
for x in vals
@test tuplefy(x) == (x,)
end
for x in [(val,) for val in vals]
@test tuplefy(x) == x
end
for x in vals, y in vals
@test tuplefy((x, y)) == (x,y)
end
end
@testset "summarize" begin
df = DataFrame(A=[1,2,3],
B=[1.0, 2.0, 3.0],
C=[true, true, false],
D=BitArray([true, true, false]),
E=[1, 2, missing],
F=[1.0, 2.0, missing],
G=["test1", "test2", "test3"])
t = summarize(df)
compare_file(34, to_ascii(t))
compare_file(35, to_tex(t))
end
@testset "twoway tabulate" begin
# Check a generic one
iris = dataset("datasets", "iris")
iris[!,:TestVar] = CSV.read("resources/iris_testvar.csv", DataFrame)[!, :TestVar]
t = tabulate(iris, :Species, :TestVar)
compare_file(36, to_ascii(t))
compare_file(37, to_tex(t))
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 518 | using TexTables
import TexTables: TableIndex, TableDict
using DataStructures
using DataFrames
using Test
using Random
using LinearAlgebra
using Statistics
using RDatasets
using CSV
include("helper.jl")
tests = [ "tablecol", "composite_tables", "indexing", "printing",
"examples", "quicktools", "formatted_numbers", "table_joins"]
@testset "TexTables" begin
for testsuite in tests
@testset "$testsuite" begin
include("$testsuite.jl")
end
end
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 668 | @testset "Table Joins for Several Pairs" begin
import TexTables: IndexedTable, TableCol, join_table
# Tests for issue in PR #28 (makes sure that the recursive table join
# implementation on pairs works properly)
n = 5
keys = ["x$i" for i in 1:5]
cols = Vector{IndexedTable{1,1}}(undef, 4)
for j in 1:4
cols[j] = hcat(TableCol("mean", keys, rand(n)),
TableCol("std", keys, rand(n)))
end
# Test that the construction doesn't error out with 4 pairs
for k in 1:4
@test begin
tab = join_table(collect("($j)" => cols[j] for j in 1:k)...)
true
end
end
end | TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | code | 3092 |
import TexTables: TableIndex, tuplefy
function test_constructor(name, x, y)
pairs = Pair.(x,y)
col = TableCol(name, x, y)
@test col isa TableCol{1,1}
# Check that all the constructors work
@test begin
col2 = TableCol(name, OrderedDict(pairs))
col2 == col
end
@test begin
col3 = TableCol(name, pairs...)
col3 == col
end
# Build in a loop
@test begin
col4 = TableCol(name)
for (key, val) in zip(x,y)
col4[key] = val
end
col4 == col
end
end
# This version does it with precisions
function test_constructor(name, x, y, p)
pairs = Pair.(x,tuple.(y,p))
col = TableCol(name, x, y, p)
@test col isa TableCol{1,1}
# Check that all the constructors work
@test begin
col2 = TableCol(name, OrderedDict(pairs))
col2 == col
end
@test begin
col3 = TableCol(name, pairs...)
col3 == col
end
@test begin
p1 = Pair.(x, y) |> OrderedDict
p2 = Pair.(x, p) |> OrderedDict
col4 = TableCol(name, p1, p2)
col4 == col
end
# Build in a loop
@test begin
col5 = TableCol(name)
for (key, val, se) in zip(x,y,p)
col5[key] = val, se
end
col5 == col
end
end
@testset "Constructing TableCols" begin
@testset "Integer Values" begin
# Data
name = "test"
x = ["key1", "key2", "key3"]
y = [1, 2, 3]
test_constructor(name, x, y)
end
@testset "Float Values with Precision" begin
# Data
name = "test"
x = ["key1", "key2", "key3"]
y = [1.0, 2.0, 3.0]
p = [.2, .3, .3]
test_constructor(name, x, y, p)
end
@testset "Construct With Mixed Types" begin
name = "foo"
x = ["key1", "key2", "key3", "key4"]
y = [1, 2.2, (3.2, .24), "bar"]
test_constructor(name, x, y)
end
end
@testset "TableIndex" begin
####################################################################
################### Constructing TableIndex ########################
####################################################################
x = TableIndex(1, "test")
@test x.idx == (1,)
@test x.name == (:test,)
x = TableIndex(1, :test)
@test x.idx == (1,)
@test x.name == (:test,)
####################################################################
################### Comparing TableIndex Values for Sorting ########
####################################################################
a = "a test"
t = "test"
z = "z test"
# Sort Lexicographically on the levels
@test TableIndex((1,1), (a, t)) < TableIndex((2,1), (a, t))
@test TableIndex((1,1), (z, t)) < TableIndex((2,1), (a, t))
@test TableIndex((2,1), (a, t)) < TableIndex((2,1), (z, t))
@test TableIndex((2,1), (a, t)) <= TableIndex((2,1), (a, z))
@test TableIndex((2,1), (a, t)) <= TableIndex((2,1), (a, t))
@test TableIndex((2,1), (a, z)) > TableIndex((2,1), (a, a))
end
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | docs | 1366 | # TexTables.jl
[](https://github.com/jacobadenbaum/TexTables.jl/actions/workflows/ci.yml)
[](https://codecov.io/gh/jacobadenbaum/TexTables.jl)
[](https://jacobadenbaum.github.io/TexTables.jl/stable)
[](https://jacobadenbaum.github.io/TexTables.jl/latest)
The TexTables package provides an easy way for Julia users to quickly
build well-formated and publication-ready ASCII and LaTeX tables from a
variety of different data structures. It allows the user to easily
build complex tables from small, modular components in an object
oriented fashion, as well as providing some methods for easily
constructing common tables from regression output.
This package is still in beta. I'm quite happy with it and I've been
using it (or some iteration of it) in my own work for quite a while.
But I'd appreciate feedback, feature requests, or pull requests (if you
want to help!).
# Quickstart
TexTables is a registered Julia package, so it can be installed with the command
```julia
Pkg.add("TexTables")
```
For more usage details, please see the latest documentation.
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | docs | 5571 | # Advanced Usage
These sections are for advanced users who are interested in fine-tuning
their own custom tables or integrating `TexTables` into their packages.
# Building Tables from Scratch
The core object when constructing tables with `TexTables` is the
`TableCol` type. This is just a wrapper around an `OrderedDict` and a
header index, that enforces conversion of the header and the keys to
a special multi-index type that work with the `TexTables` structure for
printing.
Let's make up some data (values, keys, and standard errors) so that we
can see all of the different ways to construct columns:
```julia
julia> Random.seed!(1234);
julia> vals = randn(10)
10-element Array{Float64,1}:
0.867347
-0.901744
-0.494479
-0.902914
0.864401
2.21188
0.532813
-0.271735
0.502334
-0.516984
julia> key = [Symbol(:key, i) for i=1:10];
julia> se = randn(10) .|> abs .|> sqrt
10-element Array{Float64,1}:
0.748666
0.138895
0.357861
1.36117
0.909815
0.331807
0.501174
0.608041
0.268545
1.22614
```
## Constructing Columns From Vectors:
If your data is already in vector form, the easiest way to construct a
`TableCol` is to just pass the vectors as positional arguments:
```julia
julia> t1 = TableCol("Column", key, vals)
| Column
---------------
key1 | 0.867
key2 | -0.902
key3 | -0.494
key4 | -0.903
key5 | 0.864
key6 | 2.212
key7 | 0.533
key8 | -0.272
key9 | 0.502
key10 | -0.517
julia> typeof(t1)
TexTables.TableCol{1,1}
```
We can also build it iteratively by constructing an empty `TableCol`
object and populating it in a loop:
```julia
julia> t2 = TableCol("Column")
IndexedTable{1,1} of size (0, 1)
julia> for (k, v) in zip(key, vals)
t2[k] = v
end
julia> t2 == t1
true
```
## Constructing Columns with Standard Errors
To include standard errors, we can either pass the column of standard
errors as a third column, or we can set the index using tuples of `(key,
value)` pairs
```julia
julia> t3 = TableCol("Column 2");
julia> for (k, v, p) in zip(key, vals, se)
t3[k] = v, p
end
julia> t3
| Column 2
-----------------
key1 | 0.867
| (0.749)
key2 | -0.902
| (0.139)
key3 | -0.494
| (0.358)
key4 | -0.903
| (1.361)
key5 | 0.864
| (0.910)
key6 | 2.212
| (0.332)
key7 | 0.533
| (0.501)
key8 | -0.272
| (0.608)
key9 | 0.502
| (0.269)
key10 | -0.517
| (1.226)
julia> t3 == TableCol("Column 2", key,vals, se)
true
```
## Constructing Columns from `<: Associative`
You can also pass an `Associative` of `key=>value` pairs like a `Dict` or
an `OrderedDict`. Beware though of using `Dict` types to pass the data,
since they will not maintain insertion order:
```julia
julia> dict = Dict(Pair.(key, vals));
julia> dict2 = OrderedDict(Pair.(key, vals));
julia> TableCol("Column", dict) == TableCol("Column",dict2)
false
```
To pass standard errors in an `Associative` as well, you can either pass
an associative where the values are tuples, or you can pass two
different lookup tables:
```julia
julia> se_dict1= OrderedDict(Pair.(key, tuple.(vals, se)));
julia> se_dict2= OrderedDict(Pair.(key, se));
julia> t3 == TableCol("Column 2",dict2, se_dict2) == TableCol("Column 2", se_dict1)
true
```
## A word of caution about merging tables
Be careful when you are stacking tables: `TexTables` does not stack them
positionally. It merges them on the the appropriate column or row keys.
So suppose we were constructing a summary statistics table by computing
each column and concatenating them together:
```julia
using RDatasets, TexTables, DataStructures, DataFrames
df = dataset("datasets", "attitude")
# Compute summary stats for each variable
cols = []
for header in names(df)
x = df[header]
stats = TableCol(header,
"N" => length(x),
"Mean" => mean(x),
"Std" => std(x),
"Min" => minimum(x),
"Max" => maximum(x))
push!(cols, stats)
end
```
The right way to put them together horizontally is by calling `hcat`:
```julia
julia> tab = hcat(cols[1], cols[2])
| Rating | Complaints
---------------------------
N | 30 | 30
Mean | 64.633 | 66.600
Std | 12.173 | 13.315
Min | 40 | 37
Max | 85 | 90
```
But if instead we tried to vertically concatenate them, we would not
simply stack the tables the way you might expect. `TexTables` will
merge the two columns vertically on their column indexes, which in this
case are _different_.
```julia
julia> [cols[1]; cols[2]]
| Rating | Complaints
---------------------------
N | 30 |
Mean | 64.633 |
Std | 12.173 |
Min | 40 |
Max | 85 |
N | | 30
Mean | | 66.600
Std | | 13.315
Min | | 37
Max | | 90
```
This result, while perhaps unintuitive, is by design. `cols[1]` and
`cols[2]` really are not of a shape that could be put together
vertically (at least not without overwriting one of their column names).
But rather than give an error when some keys are not present,
`TexTables` tries it's best to put them together in the order you've
requested. This behavior is essential for horizontally concatenating
two regression tables with summary statistics blocks at the bottom.
In general, whenever you concatenate two tables, they need to have the
same structure in the dimension that they are not being joined upon, or
the results will probably not be what you expected.
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | docs | 19907 | # Basic Usage
The goal for this package is to make most tables extremely easy to
assemble on the fly. In the next few sections, I'll demonstrate some of
the basic usage, primarily using several convenience functions that make
it easy to construct common tables. However, these functions are a
small subset of what `TexTables` is designed for: it should be easy
to programmatically make any type of hierarchical table and and print it
to LaTeX. For more details on how to easily roll-your-own tables (or
integrate LaTeX tabular output into your own package) using `TexTables`,
see the Advanced Usage section below.
## Making A Table of Summary Statistics
Let's download the `iris` dataset from `RDatasets`, and quickly
compute some summary statistics.
```julia
julia> using RDatasets, TexTables, DataStructures, DataFrames
julia> df = dataset("datasets", "iris");
julia> summarize(df)
| Obs | Mean | Std. Dev. | Min | Max
------------------------------------------------------
SepalLength | 150 | 5.843 | 0.828 | 4.300 | 7.900
SepalWidth | 150 | 3.057 | 0.436 | 2.000 | 4.400
PetalLength | 150 | 3.758 | 1.765 | 1.000 | 6.900
PetalWidth | 150 | 1.199 | 0.762 | 0.100 | 2.500
Species | | | | |
```
If we want more detail, we can pass the `detail=true` keyword argument:
```julia
julia> summarize(df,detail=true)
| Obs | Mean | Std. Dev. | Min | p10 | p25 | p50 | p75 | p90 | Max
----------------------------------------------------------------------------------------------
SepalLength | 150 | 5.843 | 0.828 | 4.300 | 4.800 | 5.100 | 5.800 | 6.400 | 6.900 | 7.900
SepalWidth | 150 | 3.057 | 0.436 | 2.000 | 2.500 | 2.800 | 3.000 | 3.300 | 3.610 | 4.400
PetalLength | 150 | 3.758 | 1.765 | 1.000 | 1.400 | 1.600 | 4.350 | 5.100 | 5.800 | 6.900
PetalWidth | 150 | 1.199 | 0.762 | 0.100 | 0.200 | 0.300 | 1.300 | 1.800 | 2.200 | 2.500
Species | | | | | | | | | |
```
We can restrict to only some variables by passing a second positional
argument, which can be either a `Symbol` or an iterable collection of
symbols.
The summarize function is similar to the Stata command `summarize`: it
reports string variables all entries missing, and skips all missing
values when computing statistics.
To customize what statistics are calculated, you can pass `summarize`
a `stats::Tuple{Union{Symbol,String},Function}` (or just a single pair
will work too) keyword argument:
```julia
# Quantiles of nonmissing values (need to collect to pass to quantile)
julia> nomiss(x) = skipmissing(x) |> collect;
julia> new_stats = ("p25" => x-> quantile(nomiss(x), .25),
"p50" => x-> quantile(nomiss(x), .5),
"p75" => x-> quantile(nomiss(x), .75));
julia> summarize(df, stats=new_stats)
| p25 | p50 | p75
------------------------------------
SepalLength | 5.100 | 5.800 | 6.400
SepalWidth | 2.800 | 3.000 | 3.300
PetalLength | 1.600 | 4.350 | 5.100
PetalWidth | 0.300 | 1.300 | 1.800
Species | | |
```
## Stacking Tables
It's easy to stack two tables that you created at different parts of
your code using calls to `hcat` or `vcat`:
```julia
julia> t11 = summarize(df, :SepalLength)
| Obs | Mean | Std. Dev. | Min | Max
------------------------------------------------------
SepalLength | 150 | 5.843 | 0.828 | 4.300 | 7.900
julia> t21= summarize(df, :SepalWidth)
| Obs | Mean | Std. Dev. | Min | Max
-----------------------------------------------------
SepalWidth | 150 | 3.057 | 0.436 | 2.000 | 4.400
julia> t12 = summarize(df, :SepalLength, stats=new_stats)
| p25 | p50 | p75
------------------------------------
SepalLength | 5.100 | 5.800 | 6.400
julia> t22 = summarize(df, :SepalWidth, stats=new_stats)
| p25 | p50 | p75
-----------------------------------
SepalWidth | 2.800 | 3.000 | 3.300
julia> tab = [t11 t12
t21 t22]
| Obs | Mean | Std. Dev. | Min | Max | p25 | p50 | p75
------------------------------------------------------------------------------
SepalLength | 150 | 5.843 | 0.828 | 4.300 | 7.900 | 5.100 | 5.800 | 6.400
SepalWidth | 150 | 3.057 | 0.436 | 2.000 | 4.400 | 2.800 | 3.000 | 3.300
```
You can also group statistics together with a call to the function
`join_table`. This constructs a new table with a column multi-index
that groups your data into two column blocks.
```julia
julia> join_table( "Regular Summarize" =>vcat(t11, t21),
"My Detail" =>vcat(t12, t22))
| Regular Summarize | My Detail
| Obs | Mean | Std. Dev. | Min | Max | p25 | p50 | p75
------------------------------------------------------------------------------
SepalLength | 150 | 5.843 | 0.828 | 4.300 | 7.900 | 5.100 | 5.800 | 6.400
SepalWidth | 150 | 3.057 | 0.436 | 2.000 | 4.400 | 2.800 | 3.000 | 3.300
```
There is an analogous function for creating multi-indexed row tables
`append_table`. You can see it in action with a call to the function
`summarize_by`, which calculates summary statistics by grouping on a
variable.
```julia
julia> c1 = summarize_by(df, :Species, [:SepalLength, :SepalWidth])
| | Obs | Mean | Std. Dev. | Min | Max
-------------------------------------------------------------------
setosa | SepalLength | 50 | 5.006 | 0.352 | 4.300 | 5.800
| SepalWidth | 50 | 3.428 | 0.379 | 2.300 | 4.400
-------------------------------------------------------------------
versicolor | SepalLength | 50 | 5.936 | 0.516 | 4.900 | 7.000
| SepalWidth | 50 | 2.770 | 0.314 | 2.000 | 3.400
-------------------------------------------------------------------
virginica | SepalLength | 50 | 6.588 | 0.636 | 4.900 | 7.900
| SepalWidth | 50 | 2.974 | 0.322 | 2.200 | 3.800
julia> c2 = summarize_by(df, :Species, [:SepalLength, :SepalWidth],
stats=new_stats)
| | p25 | p50 | p75
-------------------------------------------------
setosa | SepalLength | 4.800 | 5.000 | 5.200
| SepalWidth | 3.200 | 3.400 | 3.675
-------------------------------------------------
versicolor | SepalLength | 5.600 | 5.900 | 6.300
| SepalWidth | 2.525 | 2.800 | 3.000
-------------------------------------------------
virginica | SepalLength | 6.225 | 6.500 | 6.900
| SepalWidth | 2.800 | 3.000 | 3.175
```
Now, when we horizontally concatenate `c1` and `c2`, they will
automatically maintain the block-ordering in the rows:
```julia
julia> final_table = join_table("Regular Summarize"=>c1, "My Detail"=>c2)
| | Regular Summarize | My Detail
| | Obs | Mean | Std. Dev. | Min | Max | p25 | p50 | p75
-------------------------------------------------------------------------------------------
setosa | SepalLength | 50 | 5.006 | 0.352 | 4.300 | 5.800 | 4.800 | 5.000 | 5.200
| SepalWidth | 50 | 3.428 | 0.379 | 2.300 | 4.400 | 3.200 | 3.400 | 3.675
-------------------------------------------------------------------------------------------
versicolor | SepalLength | 50 | 5.936 | 0.516 | 4.900 | 7.000 | 5.600 | 5.900 | 6.300
| SepalWidth | 50 | 2.770 | 0.314 | 2.000 | 3.400 | 2.525 | 2.800 | 3.000
-------------------------------------------------------------------------------------------
virginica | SepalLength | 50 | 6.588 | 0.636 | 4.900 | 7.900 | 6.225 | 6.500 | 6.900
| SepalWidth | 50 | 2.974 | 0.322 | 2.200 | 3.800 | 2.800 | 3.000 | 3.175
```
## Tabulate Function
`TexTables` also provides a convenience `tabulate` function:
```julia
julia> tabulate(df, :Species)
| Freq. | Percent | Cum.
---------------------------------------
setosa | 50 | 33.333 | 33.333
versicolor | 50 | 33.333 | 66.667
virginica | 50 | 33.333 | 100.000
---------------------------------------
Total | 150 | 100.000 |
```
In the future, I may add support for two way tables (it's a very easy
extension).
## StatsModels Integrations
Let's say that we want to run a few regressions on some data that we
happened to come by:
```julia
using StatsModels, GLM
df = dataset("datasets", "attitude")
m1 = lm(@formula( Rating ~ 1 + Raises ), df)
m2 = lm(@formula( Rating ~ 1 + Raises + Learning), df)
m3 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges), df)
m4 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints), df)
m5 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints + Critical), df)
```
We can construct a single column for any one of these with the
`TableCol` constructor:
```julia
julia> t1 = TableCol("(1)", m1)
| (1)
-----------------------
(Intercept) | 19.978*
| (11.688)
Raises | 0.691***
| (0.179)
-----------------------
N | 30
$R^2$ | 0.348
```
But in general, it is easier to just use the `regtable` function when
combining several different models:
```julia
julia> reg_table = regtable(m1, m2, m3, m4, m5)
| (1) | (2) | (3) | (4) | (5)
-------------------------------------------------------------------
(Intercept) | 19.978* | 15.809 | 14.167 | 11.834 | 11.011
| (11.688) | (11.084) | (11.519) | (8.535) | (11.704)
Raises | 0.691*** | 0.379* | 0.352 | -0.026 | -0.033
| (0.179) | (0.217) | (0.224) | (0.184) | (0.202)
Learning | | 0.432** | 0.394* | 0.246 | 0.249
| | (0.193) | (0.204) | (0.154) | (0.160)
Privileges | | | 0.105 | -0.103 | -0.104
| | | (0.168) | (0.132) | (0.135)
Complaints | | | | 0.691*** | 0.692***
| | | | (0.146) | (0.149)
Critical | | | | | 0.015
| | | | | (0.147)
-------------------------------------------------------------------
N | 30 | 30 | 30 | 30 | 30
$R^2$ | 0.348 | 0.451 | 0.459 | 0.715 | 0.715
```
Currently, `TexTables` works with several standard regression packages
in the `StatsModels` family to construct custom coefficient tables.
I've mostly implemented these as proof of concept, since I'm not sure
how best to proceed on extending it to more model types. By default,
`TexTables` will display significance stars using p-value thresholds of
0.1 for 1 star, 0.05 for 2 stars, and 0.01 for 3 stars (as is standard).
I think that I may spin these off into a "formulas" package at some
point in the future.
If you are interested in integrating `TexTables` into your regression
package, please see the topic below under "Advanced Usage."
## Row and Column Blocks
As you can see, the summary statistics are kept in a separate row-block
while the columns are being merged together. We can do this either with
unnamed groups (like in the previous example), or with named groups that
will be visible in the table itself.
Suppose that our first 3 regressions needed to be visually grouped
together under a single heading, and the last two were separate. We
could instead construct each group separately and then combine them
together with the `join_table` function:
```julia
group1 = regtable(m1, m2, m3)
group2 = regtable(m4, m5)
grouped_table = join_table( "Group 1"=>group1,
"Group 2"=>group2)
```
This will display as:
```julia
julia> grouped_table = join_table( "Group 1"=>group1,
"Group 2"=>group2)
| Group 1 | Group 2
| (1) | (2) | (3) | (1) | (2)
------------------------------------------------------------------
(Intercept) | 19.978 | 15.809 | 14.167 | 11.834 | 11.011
| (11.688) | (11.084) | (11.519) | (8.535) | (11.704)
Raises | 0.691 | 0.379 | 0.352 | -0.026 | -0.033
| (0.179) | (0.217) | (0.224) | (0.184) | (0.202)
Learning | | 0.432 | 0.394 | 0.246 | 0.249
| | (0.193) | (0.204) | (0.154) | (0.160)
Privileges | | | 0.105 | -0.103 | -0.104
| | | (0.168) | (0.132) | (0.135)
Complaints | | | | 0.691 | 0.692
| | | | (0.146) | (0.149)
Critical | | | | | 0.015
| | | | | (0.147)
------------------------------------------------------------------
N | 30 | 30 | 30 | 30 | 30
$R^2$ | 0.348 | 0.451 | 0.459 | 0.715 | 0.715
```
If instead, we wanted to maintain a consistent numbering from (1)-(5),
we could do it using the `regtable` function:
```julia
julia> regtable("Group 1"=>(m1, m2, m3), "Group 2"=>(m4, m5))
| Group 1 | Group 2
| (1) | (2) | (3) | (4) | (5)
-------------------------------------------------------------------
(Intercept) | 19.978* | 15.809 | 14.167 | 11.834 | 11.011
| (11.688) | (11.084) | (11.519) | (8.535) | (11.704)
Raises | 0.691*** | 0.379* | 0.352 | -0.026 | -0.033
| (0.179) | (0.217) | (0.224) | (0.184) | (0.202)
Learning | | 0.432** | 0.394* | 0.246 | 0.249
| | (0.193) | (0.204) | (0.154) | (0.160)
Privileges | | | 0.105 | -0.103 | -0.104
| | | (0.168) | (0.132) | (0.135)
Complaints | | | | 0.691*** | 0.692***
| | | | (0.146) | (0.149)
Critical | | | | | 0.015
| | | | | (0.147)
-------------------------------------------------------------------
N | 30 | 30 | 30 | 30 | 30
$R^2$ | 0.348 | 0.451 | 0.459 | 0.715 | 0.715
```
And in latex, the group labels will be displayed with `\multicolumn`
commands:
```latex
\begin{tabular}{r|ccc|cc}
\toprule
& \multicolumn{3}{c}{Group 1} & \multicolumn{2}{c}{Group 2}\\
& (1) & (2) & (3) & (4) & (5) \\ \hline
(Intercept) & 19.978 & 15.809 & 14.167 & 11.834 & 11.011 \\
& (11.688) & (11.084) & (11.519) & (8.535) & (11.704) \\
Raises & 0.691 & 0.379 & 0.352 & -0.026 & -0.033 \\
& (0.179) & (0.217) & (0.224) & (0.184) & (0.202) \\
Learning & & 0.432 & 0.394 & 0.246 & 0.249 \\
& & (0.193) & (0.204) & (0.154) & (0.160) \\
Privileges & & & 0.105 & -0.103 & -0.104 \\
& & & (0.168) & (0.132) & (0.135) \\
Complaints & & & & 0.691 & 0.692 \\
& & & & (0.146) & (0.149) \\
Critical & & & & & 0.015 \\
& & & & & (0.147) \\ \hline
N & 30 & 30 & 30 & 30 & 30 \\
$R^2$ & 0.348 & 0.451 & 0.459 & 0.715 & 0.715 \\
\bottomrule
\end{tabular}
```
The vertical analogue of `join_table` is the function `append_table`.
Both will also accept the table objects as arguments instead of pairs if
you want to construct the row/column groups without adding a visible
multi-index.
## Display Options
You can recover the string output using the functions `to_latex` and
`to_ascii`. But, it is also possible to tweak the layout of the tables
by passing keyword arguments to the `print`, `show`, `to_tex`, or
`to_ascii` functions. For instance, if you would like to display your
standard errors on the same row as the coefficients, you can do so with
the `se_pos` argument:
```julia
julia> print(to_ascii(hcat( TableCol("(1)", m1), TableCol("(2)", m2)),
se_pos=:inline))
| (1) | (2)
-------------------------------------------------
(Intercept) | 19.978* (11.688) | 15.809 (11.084)
Raises | 0.691*** (0.179) | 0.379* (0.217)
Learning | | 0.432** (0.193)
-------------------------------------------------
N | 30 | 30
$R^2$ | 0.348 | 0.451
```
Similarly, if you want to print a table without showing the significance
stars, then simply pass the keyword argument `star=false`:
```julia
julia> print(to_ascii(hcat( TableCol("(1)", m1), TableCol("(2)", m2)),
star=false))
| (1) | (2)
----------------------------------
(Intercept) | 19.978 | 15.809
| (11.688) | (11.084)
Raises | 0.691 | 0.379
| (0.179) | (0.217)
Learning | | 0.432
| | (0.193)
----------------------------------
N | 30 | 30
$R^2$ | 0.348 | 0.451
```
Currently, `TexTables` supports the following display options:
1. `pad::Int` (default `1`)
The number of spaces to pad the separator characters on each side.
2. `se_pos::Symbol` (default `:below`)
1. :below -- Prints standard errors in parentheses on a second line
below the coefficients
2. :inline -- Prints standard errors in parentheses on the same
line as the coefficients
3. :none -- Suppresses standard errors. (I don't know why you would
want to do this... you probably shouldn't ever use it.)
3. `star::Bool` (default `true`)
If true, then prints any table entries that have been decorated
with significance stars with the appropriate number of stars.
## Changing the Default Formatting
`TexTables` stores all of the table entries using special formatting
aware container types types that are subtypes of the abstract type
`FormattedNumber`. By default, `TexTables` displays floating points
with three decimal precision (and auto-converts to scientific notation
for values less than 1e-3 and greater than 1e5). Formatting is done
using Python-like formatting strings (Implemented by the excellent
[Formatting.jl](https://github.com/JuliaIO/Formatting.jl) package) If you
would like to change the default formatting values, you can do so using
the macro `@fmt`:
```julia
@fmt Real = "{:.3f}" # Sets the default for reals to .3 fixed precision
@fmt Real = "{:.2f}" # Sets the default for reals to .2 fixed precision
@fmt Real = "{:.2e}" # Sets the default for reals to .2 scientific
@fmt Int = "{:,n}" # Sets the default for integers to use commas
@fmt Bool = "{:}" # No extra formatting for Bools
@fmt AbstractString= "{:}" # No extra formatting for Strings
```
Note that this controls the _defaults_ used when constructing a
`FormattedNumber`. If you want to change the formatting in a table that
has already been constructed, you need to manually change the `format`
field of each entry in the table:
```julia
julia> x = FormattedNumber(5.0)
5.000
julia> x.format
"{:.3f"}
julia> x.format = "{:.3e}";
julia> x
5.000e+00
```
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | docs | 7839 | # Easy Examples
Here are just a couple examples of tables that TexTables makes extremely easy to
produce and export. These are mostly proof of concept: TexTables provides a
backend that makes the code to write these convenience methods extremely [compact] (https://github.com/jacobadenbaum/TexTables.jl/blob/master/src/QuickTools.jl).
## Regression Tables
```@meta
DocTestSetup = quote
# Get the warning out of the way before we start
using TexTables, StatsModels, GLM, RDatasets
df = dataset("datasets", "iris")
end
```
```jldoctest ex1
using TexTables, StatsModels, GLM, RDatasets
df = dataset("datasets", "attitude");
m1 = lm(@formula( Rating ~ 1 + Raises ), df);
m2 = lm(@formula( Rating ~ 1 + Raises + Learning), df);
m3 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges), df);
m4 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints), df);
m5 = lm(@formula( Rating ~ 1 + Raises + Learning + Privileges
+ Complaints + Critical), df);
table = regtable(m1, m2, m3, m4, m5)
# output
| (1) | (2) | (3) | (4) | (5)
-------------------------------------------------------------------
(Intercept) | 19.978* | 15.809 | 14.167 | 11.834 | 11.011
| (11.688) | (11.084) | (11.519) | (8.535) | (11.704)
Raises | 0.691*** | 0.379* | 0.352 | -0.026 | -0.033
| (0.179) | (0.217) | (0.224) | (0.184) | (0.202)
Learning | | 0.432** | 0.394* | 0.246 | 0.249
| | (0.193) | (0.204) | (0.154) | (0.160)
Privileges | | | 0.105 | -0.103 | -0.104
| | | (0.168) | (0.132) | (0.135)
Complaints | | | | 0.691*** | 0.692***
| | | | (0.146) | (0.149)
Critical | | | | | 0.015
| | | | | (0.147)
-------------------------------------------------------------------
N | 30 | 30 | 30 | 30 | 30
$R^2$ | 0.348 | 0.451 | 0.459 | 0.715 | 0.715
```
## Grouped Regression Tables
We can add a add a hierarchical structure by passing the model objects as pairs
of Strings/Symbols and model objects/tuples of model objects:
```jldoctest ex1
grouped_table = regtable( "Group 1"=>(m1,m2,m3),
"Group 2"=>(m4, m5))
# output
| Group 1 | Group 2
| (1) | (2) | (3) | (4) | (5)
-------------------------------------------------------------------
(Intercept) | 19.978* | 15.809 | 14.167 | 11.834 | 11.011
| (11.688) | (11.084) | (11.519) | (8.535) | (11.704)
Raises | 0.691*** | 0.379* | 0.352 | -0.026 | -0.033
| (0.179) | (0.217) | (0.224) | (0.184) | (0.202)
Learning | | 0.432** | 0.394* | 0.246 | 0.249
| | (0.193) | (0.204) | (0.154) | (0.160)
Privileges | | | 0.105 | -0.103 | -0.104
| | | (0.168) | (0.132) | (0.135)
Complaints | | | | 0.691*** | 0.692***
| | | | (0.146) | (0.149)
Critical | | | | | 0.015
| | | | | (0.147)
-------------------------------------------------------------------
N | 30 | 30 | 30 | 30 | 30
$R^2$ | 0.348 | 0.451 | 0.459 | 0.715 | 0.715
```
## Exporting to Latex
All of these commands return subtypes of the abstract `TexTable` type. Any
`TexTable` can be printed as either an ascii table (as shown above) with the
method `to_ascii` or as a latex table with the method `to_tex`:
```jldoctest ex1
to_tex(grouped_table) |> print
# output
\begin{tabular}{r|ccc|cc}
\toprule
& \multicolumn{3}{c}{Group 1} & \multicolumn{2}{c}{Group 2} \\
& (1) & (2) & (3) & (4) & (5) \\ \hline
(Intercept) & 19.978* & 15.809 & 14.167 & 11.834 & 11.011 \\
& (11.688) & (11.084) & (11.519) & (8.535) & (11.704) \\
Raises & 0.691*** & 0.379* & 0.352 & -0.026 & -0.033 \\
& (0.179) & (0.217) & (0.224) & (0.184) & (0.202) \\
Learning & & 0.432** & 0.394* & 0.246 & 0.249 \\
& & (0.193) & (0.204) & (0.154) & (0.160) \\
Privileges & & & 0.105 & -0.103 & -0.104 \\
& & & (0.168) & (0.132) & (0.135) \\
Complaints & & & & 0.691*** & 0.692*** \\
& & & & (0.146) & (0.149) \\
Critical & & & & & 0.015 \\
& & & & & (0.147) \\ \hline
N & 30 & 30 & 30 & 30 & 30 \\
$R^2$ & 0.348 & 0.451 & 0.459 & 0.715 & 0.715 \\
\bottomrule
\end{tabular}
```
It's as simple as that. As you can see, higher level groupings will be
separated with vertical bars, and their headings will be printed as
`\multicolumn` environments. In tables with row-groupings, TexTables will
automatically use `\multirow` environments. TableTex will automatically handle
printing it in a way that is well aligned and can be read even from the raw tex
file, and will align the multi-columns and multi-indexes for you.
You can write the table to a tex file yourself, or you can use the convenience
wrapper `write_tex(fpath::String, t::TexTable)`.
## Summary Tables
Making summary tables is similarly easy:
```jldoctest ex1
df = dataset("datasets", "iris");
summarize(df)
# output
| Obs | Mean | Std. Dev. | Min | Max
------------------------------------------------------
SepalLength | 150 | 5.843 | 0.828 | 4.300 | 7.900
SepalWidth | 150 | 3.057 | 0.436 | 2.000 | 4.400
PetalLength | 150 | 3.758 | 1.765 | 1.000 | 6.900
PetalWidth | 150 | 1.199 | 0.762 | 0.100 | 2.500
Species | | | | |
```
To choose only a subset of variables, and get a more detailed summary table:
```jldoctest ex1
summarize(df, [:SepalLength, :SepalWidth], detail=true)
# output
| Obs | Mean | Std. Dev. | Min | p10 | p25 | p50 | p75 | p90 | Max
----------------------------------------------------------------------------------------------
SepalLength | 150 | 5.843 | 0.828 | 4.300 | 4.800 | 5.100 | 5.800 | 6.400 | 6.900 | 7.900
SepalWidth | 150 | 3.057 | 0.436 | 2.000 | 2.500 | 2.800 | 3.000 | 3.300 | 3.610 | 4.400
```
To group by another variable in the DataFrame, use the `summarize_by` function:
```jldoctest ex1
c1 = summarize_by(df, :Species, [:SepalLength, :SepalWidth])
# output
| | Obs | Mean | Std. Dev. | Min | Max
-------------------------------------------------------------------
setosa | SepalLength | 50 | 5.006 | 0.352 | 4.300 | 5.800
| SepalWidth | 50 | 3.428 | 0.379 | 2.300 | 4.400
-------------------------------------------------------------------
versicolor | SepalLength | 50 | 5.936 | 0.516 | 4.900 | 7.000
| SepalWidth | 50 | 2.770 | 0.314 | 2.000 | 3.400
-------------------------------------------------------------------
virginica | SepalLength | 50 | 6.588 | 0.636 | 4.900 | 7.900
| SepalWidth | 50 | 2.974 | 0.322 | 2.200 | 3.800
```
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | docs | 1846 | # Introduction
The TexTable package provides an easy way for Julia users to quickly build
well-formatted and publication-ready ASCII and LaTeX tables from a variety of
different data structures. It allows the user to easily build complex tables
from small, modular components in an object oriented fashion, as well as
providing some methods for easily constructing common tables from regression
output.
TexTables.jl is designed for building all sorts of statistical tables in a very
modular fashion and for quickly displaying them in the REPL or exporting them to
LaTeX. It’s quite extensible, and probably the most important use cases will be
for people who want to make their own custom tables, but it has implemented
support for some basic regression tables, cross-tabulations, and summary
statistics as proof-of-concept.
## Features
Currently TexTables will allow you to:
1. Build multi-indexed tables programmatically with a simple to use interface
that allows for row and column groupings.
2. Print them in the REPL as ASCII tables, or export them to LaTeX for easy
inclusion
It also provides constructors and methods to
3. Quickly construct regression tables from estimated models that adhere to the
`LinearModel` API.
1. Add customized model metadata (such as the type of estimator used, etc...)
2. Group regression columns into subgroups using multicolumn headings
3. Add significance stars to coefficient estimates.
4. Use robust standard errors from CovarianceMatrices.jl or other packages.
4. Construct several standard tables that may be useful in exploratory data
analysis
1. Summary tables
2. Grouped summary tables.
3. One-way frequency tables.
## Installation
TexTables is not yet registered, so it can be installed with the command
```julia
Pkg.add("TexTables")
```
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.3.0 | 7041d8bc4de0027fea0b1fe72a36a6cc12e1e340 | docs | 6952 | # Regression Tables API
TexTables should be able to provide a basic regression table for any model that
adheres to the `RegressionModel` API found in StatsBase and makes it easy to
customize the tables with additional fit statistics or model information as you
see fit. This section documents how to use and customize the regression tables
functionality for models in your code, as well as how to override the default
settings for a model in your Package.
## Special Structure of Regression Tables
Regression tables in TexTables are constructed using a special API that is
provided to ensure that the regression tables from different estimators
(potentially from separate packages) can be merged together. You should _not_
construct your tables directly if you want them to merge nicely with the
standard regression tables. Instead, you should use the methods documented in
this section.
Regression tables are divided into 3 separate row blocks:
1. Coefficients: This block contains the parameter estimates and
standard errors (possibly decorated with stars for p-values) and always
appears first
2. Metadata: This block is empty by default (and therefore will not be
printed in the table), but can be populated by the user to include
column/model specific metadata. For example, a user might want to denote
whether or not they controlled for one of the variables in their data, or
which estimator they used in each column (OLS/Fixed Effects/2SLS/etc...)
3. Fit Statistics: This block contains fit statistics. It defaults to $R^2$
and the number of observations, but this can be changed by the user.
You can construct sub-blocks within each of these three layers, although this is
turned off by default. In order to support these three layers and the possible
addition of sublayers, `TableCol`s that conform to this API must be subtypes of
`TableCol{3,M} where M`. For convenience a typealias `RegCol{M} =
TableCol{3,M}` is provided, along with a constructor for empty `RegCol`s from
just the desired header.
## Adding Each Block
You can construct or add to each of the three blocks using the convenience
methods `setcoef!`, `setmeta!`, and `setstats!`. All three have an identical
syntax:
```
set[block]!(t::RegCol, key, val[, se]; level=1, name="")
set[block]!(t::RegCol, key=>val; level=1, name="")
set[block]!(t::RegCol, kv::Associative)
```
This will insert into `t` a key/value pair (possibly with a standard error) within
the specified block. Like the `TableCol` constructor, the pairs
can be passed as either individual key/value[/se] tuples or pairs, as
several vectors of key/value[/se] pairs, or as an associative.
To add additional sub-blocks, use the `level` keyword argument. Integers
less than 0 will appears in blocks above the standard block, and integers
greater than 1 will appear below it.
To name the block or sub-block, pass a nonempty string as the `name` keyword
argument.
For instance, if you wanted to construct a regression column with two
coefficients 1.32 (0.89) and -0.21 (0.01), metadata that indicates that the
underlying estimation routine used OLS, and an $R^2$ of 0.73, then you would
run the following code:
```jldoctest; setup = :(using TexTables)
col = RegCol("My Column")
setcoef!(col, "Coef 1"=>(1.32, 0.89), "Coef 2"=>(-0.21, 0.01))
setmeta!(col, :Estimator=>"OLS")
setstats!(col, "\$R^2\$"=>0.73)
println(col)
# output
| My Column
----------------------
Coef 1 | 1.320
| (0.890)
Coef 2 | -0.210
| (0.010)
----------------------
Estimator | OLS
----------------------
$R^2$ | 0.730
```
## Robust Standard Errors
If you would like to override the standard `stderror` function for your table,
use the `stderror` keyword argument. For instance, you might want to use the
[CovarianceMatrices](https://github.com/gragusa/CovarianceMatrices.jl) package
to compute robust standard errors. In this case, you would simply define a new
function
```julia
using CovarianceMatrices
robust(m) = stderror(m, HC0)
TableCol("My Column", m; stderror=robust)
```
Note: This feature is relatively experimental and its usage may change in future
releases.
## Integrating `TexTables` into your own Estimation Package
Once you know how you would like your model's regression tables to look, it is
extremely easy to built it with `TexTables`. For instance, the code to
integrate `TexTables` with some of the basic StatsModels.jl `RegressionModel`
types is extremely short, and quite instructive to examine:
```julia
function TableCol(header, m::RegressionModel;
stats=(:N=>Int∘nobs, "\$R^2\$"=>r2),
meta=(), stderror::Function=stderror, kwargs...)
# Compute p-values
pval(m) = ccdf.(FDist(1, dof_residual(m)),
abs2.(coef(m)./stderror(m)))
# Initialize the column
col = RegCol(header)
# Add the coefficients
for (name, val, se, p) in zip(coefnames(m), coef(m), stderror(m), pval(m))
addcoef!(col, name, val, se)
0.05 < p <= .1 && star!(col[name], 1)
0.01 < p <= .05 && star!(col[name], 2)
p <= .01 && star!(col[name], 3)
end
# Add in the fit statistics
addstats!(col, OrderedDict(p.first=>p.second(m) for p in stats))
# Add in the metadata
addmeta!(col, OrderedDict(p.first=>p.second(m) for p in meta))
return col
end
```
Here, we
1. Constructed an empty column with the header value passed by the user
2. Looped through the coefficients, their names, their standard
errors, and their p-values. On each iteration, we:
a. Insert the coefficient value and its standard error into the table
b. Check whether the p-values fall below the desired threshold (in
descending order), and if so, call the function
`star!(x::FormattedNumber, num_stars)` with the desired number of
stars.
`TexTables` stores all of the table values internally with a
`FormattedNumber` type, which contains the value, the standard error if
appropriate, the number of stars the value should display, and a
formatting string. As a result, it is probably easiest to set the table
value first, and then add stars later with the `star!` function.
However, we could also have constructed each value directly as:
```julia
if .05 < pval <= .1
coef_block[name] = val, se, 1
elseif 0.01 < pval <= .05
coef_block[name] = val, se, 2
elseif pval <= .01
coef_block[name] = val, se, 3
end
```
How you choose to do it is mostly a matter of taste and coding style.
Note that by default, the number of stars is always set to zero. In
other words, `TexTables` will _not_ assume that it can infer the number
of significance stars from the standard errors and the coefficients
alone. If you want to annotate your table with significance stars, you
must explicitly choose in your model-specific code which entries to
annotate and how many stars they should have.
| TexTables | https://github.com/jacobadenbaum/TexTables.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | code | 811 | push!(LOAD_PATH,"../src/")
using Documenter
using QuestDBClient
makedocs(
sitename = "QuestDBClient",
format = Documenter.HTML(),
modules = [QuestDBClient],
pages = Any[
"Home" => "index.md"
"User Guide" => Any[
"Functional Approach" => "man/functional.md"
"Macro Approach" => "man/macros.md"
]
"API" => Any[
"Sender" => "lib/sender.md",
"Operators" => "lib/operators.md",
"Types" => "lib/types.md",
"Exceptions" => "lib/exceptions.md"
]
]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(
repo = "github.com/Ochibobo/QuestDBClient.jl.git",
)
| QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | code | 678 | module QuestDBClient
export QuestDBSender
export QuestDBExceptions
export Sender, Auth, connect, close, write, flush
export table, symbol, IntegerColumn, FloatColumn, BoolColumn, StringColumn, CharColumn,
DateTimeColumn, DateColumn, UUIDColumn, At, AtNow, Source
export @table, @symbol, @IntegerColumn, @StringColumn, @CharColumn, @DateTimeColumn, @BoolColumn,
@DateColumn, @UUIDColumn, @AtNow, @At, @FloatColumn, @source
# Write your package code here.
include("sender.jl")
using .QuestDBSender
include("exceptions.jl")
using .QuestDBExceptions
include("questdb_operators.jl")
using .QuestDBOperators
include("questdb_types.jl")
using .QuestDBTypes
end
| QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | code | 2815 | module QuestDBExceptions
export IllegalColumnNameCharacterException, IllegalTableNameCharacterException, EmptyColumnNameException,
ColumnNameLengthLimitExceededException, MultipleTableDefinitionsException, MissingTableDeclarationException,
MalformedLineProtocolSyntaxException, UnsupportedAtColumnTypeException, UnsupportedColumnTypeException,
QuestDBClientException
"""
abstract type QuestDBClientException <: Exception end
Custom exception type used in QuestDBClient
"""
abstract type QuestDBClientException <: Exception end
"""
struct IllegalTableNameCharacterException <: QuestDBClientException
errorMessage::String
end
Illegal Table Name character exception
"""
struct IllegalTableNameCharacterException <: QuestDBClientException
errorMessage::String
end
"""
struct IllegalColumnNameCharacterException <: QuestDBClientException
errorMessage::String
end
Illegal Column Name character exception
"""
struct IllegalColumnNameCharacterException <: QuestDBClientException
errorMessage::String
end
"""
struct EmptyColumnNameException <: QuestDBClientException
errorMessage::String
end
Empty Column Name exception
"""
struct EmptyColumnNameException <: QuestDBClientException
errorMessage::String
end
"""
struct ColumnNameLengthLimitExceededException <: QuestDBClientException
message::String
end
Column Name Length LimitnExceeded exception
"""
struct ColumnNameLengthLimitExceededException <: QuestDBClientException
message::String
end
"""
struct MultipleTableDefinitionsException <: QuestDBClientException
message::String
end
Multiple Table definitions detected
"""
struct MultipleTableDefinitionsException <: QuestDBClientException
message::String
end
"""
struct MissingTableDeclarationException <: QuestDBClientException
message::String
end
Missing table declaration detected -> May change this to MalformedLineProtocolSyntaxException
"""
struct MissingTableDeclarationException <: QuestDBClientException
message::String
end
"""
struct MalformedLineProtocolSyntaxException <: QuestDBClientException
message::String
end
Malformed Line Protocol syntax detected
"""
struct MalformedLineProtocolSyntaxException <: QuestDBClientException
message::String
end
"""
struct UnsupportedColumnTypeException <: QuestDBClientException
message::String
end
Unsupported Column Types detected
"""
struct UnsupportedColumnTypeException <: QuestDBClientException
message::String
end
"""
struct UnsupportedAtColumnTypeException <: QuestDBClientException
message::String
end
Specified At Column is not a timestamp
"""
struct UnsupportedAtColumnTypeException <: QuestDBClientException
message::String
end
end | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | code | 29140 | """
QuestDB Operators
Functional Implementation of the QuestDBClient interface
"""
module QuestDBOperators
using ..QuestDBSender
using ..QuestDBExceptions
using Dates
using UUIDs
using DataFrames
export table, symbol, IntegerColumn, FloatColumn, BoolColumn, StringColumn, CharColumn, DateTimeColumn, DateColumn,
UUIDColumn, At, AtNow, Source
"""
const ColumnName = Union{Symbol, String}
The column name can be specified as a symbol or as a string
"""
const ColumnName = Union{Symbol, String}
"""
const SymbolColumnValue = Union{Symbol, String}
A symbol column can take values that are either a Symbol or a String
"""
const SymbolColumnValue = Union{Symbol, String, Nothing}
"""
const supportedTypes::Vector{Type} = [Integer, AbstractFloat, AbstractString, Symbol, Dates.DateTime,
Dates.Date, Char, UUID]
A list of supported types
"""
const supportedTypes = [Integer, AbstractFloat, AbstractString, Symbol,
Dates.DateTime, Dates.Date, Char, UUID]
"""
Constant Definitions
"""
"""
const COMMA = ","
A constant referencing a comma - ','
"""
const COMMA = ','
"""
const SPACE_CHARACTER = ' '
A constant referencing the space character
"""
const SPACE_CHARACTER = ' '
"""
const RETURN_CHARACTER = '\\n'
A constant referencing the return character
"""
const RETURN_CHARACTER = '\n'
"""
const EQUALS_CHARACTER = '='
A constant referencing the equals character.
"""
const EQUALS_CHARACTER = '='
"""
const UNSUPPORTED_CHARACTERS = ['?', '.' , ',' , '\'' , '"' , '\\' , '/', ':' , '(' , ')' , '+' , '-' , '*' , '%' , '~' , ' ' , '\0']
The list of unsupported characters for table & column names
"""
const UNSUPPORTED_CHARACTERS = ['?', '.' , ',' , '\'' , '"' , '\\' , '/', ':' , '(' , ')' , '+' , '-' , '*' , '%' , '~' , ' ' , '\0']
"""
table(sender::Sender, name::T)::Sender where {T <: ColumnName}
Table definition function for an ILP entry. Adds a table name to the `sender`'s buffer and returns the sender with
an updated buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> table(sender, :readings)
## sender with table readings
```
"""
function table(sender::Sender, name::T)::Sender where {T <: ColumnName}
name = string(name)
length(name) == 0 && throw(DomainError("table name cannot be empty"))
## Assert that all characters are supported
checkUnsupportedCharacters(name, IllegalTableNameCharacterException)
## Check if a table has already been defined
sender.hasTable && throw(MultipleTableDefinitionsException("cannot define table more than once."))
## Add the table to the buffer
sender.buffer = sender.buffer * string(name) * COMMA
sender.hasTable = true
return sender
end
"""
symbol(sender::Sender, symbol::Pair{T, V})::Sender where {T <: ColumnName, V <: SymbolColumnValue}
Symbol definition function for an ILP entry. Adds a symbol (tag_set) to the `sender`'s buffer and returns the sender with
an updated buffer. The `symbol` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: SymbolColumnValue`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
The `SymbolColumnValue` is `Union{Symbol, String}`
# Example
```julia-repl
julia> symbol(sender, :make => :Omron)
## sender with symbol make=Omron
```
"""
function symbol(sender::Sender, symbol::Pair{T, V})::Sender where {T <: ColumnName, V <: SymbolColumnValue}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define symbol before a table is defined."))
sender.hasFields && throw(MalformedLineProtocolSyntaxException("cannot define symbol after field(s)."))
## Validate symbol characters
## If the last character of the buffer is a comma, then the buffer contains a table only and this is the first symbol
## If the last character of the buffer is space, then the buffer contains at least one other symbol
lastBufferChar = sender.buffer[end]
if lastBufferChar == SPACE_CHARACTER
## Trim the last character (space character) and append a comma
sender.buffer = sender.buffer[1:end - 1] * COMMA
lastBufferChar = COMMA
end
if lastBufferChar == COMMA
sender.buffer = writeSymbol(sender.buffer, symbol)
else
throw(MalformedLineProtocolSyntaxException("malformed line protocol syntax detected: $(sender.buffer)"))
end
return sender
end
"""
writeSymbol(queryString::String, symbol::Pair{T, V})::String where {T <: ColumnName, V <: SymbolColumnValue}
Function used to create append a symbol when constructing an ILP string.
The `symbol` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: SymbolColumnValue`
Also asserts that QuestDB unsupported column name characters result in exception propagation
The `ColumnName` is `Union{Symbol, String}`
The `SymbolColumnValue` is `Union{Symbol, String}`
# Example
```julia-repl
julia> writeSymbol("", :make => :Omron)
make=Omron
```
"""
function writeSymbol(queryString::String, symbol::Pair{T, V})::String where {T <: ColumnName, V <: SymbolColumnValue}
## If the column name is emtpy, throw an error
if length(strip(string(symbol.first))) == 0 throw(EmptyColumnNameException("column name cannot be empty")) end
## If symbol value is nothing, return an empty string
if isnothing(symbol.second) return "" end
## Asset that all characters in the column name are not illegal
checkUnsupportedCharacters(string(symbol.first), IllegalColumnNameCharacterException)
return queryString * string(symbol.first) * EQUALS_CHARACTER * string(symbol.second) * SPACE_CHARACTER
end
"""
IntegerColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{Integer, Nothing}}
IntegerColumn definition function for an ILP entry. Adds a field of type integer to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{Integer, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
All `Integer` subtypes are supported:
`Bool`, `BigInt`, `Int128`, `Int64`, `Int32`, `Int16`, `Int8`,
`UInt128`, `UInt64`, `UInt32`, `UInt16`, `UInt8`
# Example
```julia-repl
julia> IntegerColumn(sender, :count => 12)
## sender with field count=12
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> IntegerColumn(sender, :count => nothing)
## sender without an updated buffer
```
"""
function IntegerColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{Integer, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define integer column before a table is defined."))
## If the sender has a fields already, prepend a comma, else, just append the column to the ILP
if sender.hasFields
sender.buffer = sender.buffer * COMMA
end
sender.buffer = writeFieldColumn(sender.buffer, data)
## Mark the sender as having a field
sender.hasFields = true
return sender
end
"""
writeFieldColumn(queryString::String, data::Pair{T, V})::String where {T <:ColumnName, V <: Any}
Function used to append any other field to the ILP entry. If the type of `V` is `nothing`, the passed field will not be written
to the `queryString` which is eventually appended to the `sender`'s buffer.
A check for unsupported column characters is also performed.
# Example
```julia-repl
julia> writeFieldColumn(sender, :count => 15)
## sender with field count=12
```
# Example
```julia-repl
julia> IntegerColumn(sender, :count => nothing)
## sender without an updated buffer
```
"""
function writeFieldColumn(queryString::String, data::Pair{T, V})::String where {T <:ColumnName, V <: Any}
## If the column name is emtpy, throw an error
if length(strip(string(data.first))) == 0 throw(EmptyColumnNameException("column name cannot be empty")) end
## If the value is nothing, don't write anything
if isnothing(data.second) return "" end
## Assert that all characters in the column name are not illegal
checkUnsupportedCharacters(string(data.first), IllegalColumnNameCharacterException)
return queryString * string(data.first) * EQUALS_CHARACTER * string(data.second)
end
"""
checkUnsupportedCharacters(subject::T, exception::Type{E}) where {T <: ColumnName, E <: QuestDBClientException}
Asserts that only supported column name characters pass this evaluation. Any unsupported character results in throwing a
`QuestDBClientException`.
Unsupported characters include:
['?', '.' , ',' , '\'' , '"' , '\\' , '/', ':' , '(' , ')' , '+' , '-' , '*' , '%' , '~' , ' ' , '\0']
"""
function checkUnsupportedCharacters(subject::T, exception::Type{E}) where {T <: ColumnName, E <: QuestDBClientException}
## Check if the table name has characters that are unsupported
matched_char_flags = contains.(subject, UNSUPPORTED_CHARACTERS)
## Means unsupported characters have been detected
if in(1, matched_char_flags)
matched_indices = findall(==(1), matched_char_flags)
illegal_chars = join(UNSUPPORTED_CHARACTERS[matched_indices], ", ")
throw(exception("unsupported character(s) $(illegal_chars) detected in your specified table name: $(subject)"))
end
end
"""
BoolColumn(sender::Sender, data::Pair{T, Bool})::Sender where {T <: ColumnName}
BoolColumn definition function for an ILP entry. Adds a field of type bool to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, Bool}` where `T <: ColumnName`.
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> BoolColumn(sender, :present => true)
## sender with field present=true
```
"""
function BoolColumn(sender::Sender, data::Pair{T, Bool})::Sender where {T <: ColumnName}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define boolean column before a table is defined."))
return IntegerColumn(sender, data)
end
"""
FloatColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{AbstractFloat, Nothing}}
FloatColumn definition function for an ILP entry. Adds a field of type float to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{AbstractFloat, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
All `AbstractFloat` subtypes are supported:
`BigFloat`, `Float64`, `Float32`, `Float16`
# Example
```julia-repl
julia> FloatColumn(sender, :tempareture => 29.4)
## sender with field tempareture=29.4
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> FloatColumn(sender, :tempareture => nothing)
## sender without an updated buffer
```
"""
function FloatColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{AbstractFloat, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define float column before a table is defined."))
if sender.hasFields
sender.buffer = sender.buffer * COMMA
end
sender.buffer = writeFieldColumn(sender.buffer, data)
sender.hasFields = true
return sender
end
"""
StringColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{AbstractString, Nothing}}
StringColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{AbstractString, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
All `AbstractString` subtypes are supported:
`Core.Compiler.LazyString`, `InlineStrings.InlineString`, `LaTeXStrings.LaTeXString`, `LazyString`,
`String`, `SubString`, `SubstitutionString`, `Test.GenericString`
# Example
```julia-repl
julia> StringColumn(sender, :city => "Nairobi")
## sender with field city=Nairobi
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> StringColumn(sender, :city => nothing)
## sender without an updated buffer
```
"""
function StringColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{AbstractString, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define string column before a table is defined."))
if sender.hasFields
sender.buffer = sender.buffer * COMMA
end
sender.buffer = writeFieldColumn(sender.buffer, data)
sender.hasFields = true
return sender
end
"""
CharColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{Char, Nothing}}
CharColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{Char, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> CharColumn(sender, :region => 'A')
## sender with field region=A
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> CharColumn(sender, :region => nothing)
## sender without an updated buffer
```
"""
function CharColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{Char, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define char column before a table is defined."))
if sender.hasFields
sender.buffer = sender.buffer * COMMA
end
sender.buffer = writeFieldColumn(sender.buffer, data)
sender.hasFields = true
return sender
end
"""
DateTimeColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{DateTime, Nothing}}
DateTimeColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{DateTime, Nothing}`
The DateTime is converted to milliseconds since UNIXEPOCH
This is not the record's designated timestamp field but another field whose value is a timestamp.
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> DateTimeColumn(sender, :pick_up_date => now())
## sender with field pick_up_date=1680990219992
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> DateTimeColumn(sender, :pick_up_date => nothing)
## sender without an updated buffer
```
"""
function DateTimeColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{DateTime, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define datetime column before a table is defined."))
if sender.hasFields
sender.buffer = sender.buffer * COMMA
end
## Get the time in milliseconds
_timeInMilliseconds = ifelse(isnothing(data.second), nothing, Dates.value(data.second) - Dates.UNIXEPOCH)
sender.buffer = writeFieldColumn(sender.buffer, data.first => _timeInMilliseconds)
sender.hasFields = true
return sender
end
"""
DateColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{Date, Nothing}}
DateColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{Date, Nothing}`
The Date is converted to milliseconds since UNIXEPOCH
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> DateColumn(sender, :collection_date => Date(2023, 4, 8))
## sender with field collection_date=1680912000000
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> DateColumn(sender, :collection_date => nothing)
## sender without an updated buffer
```
"""
function DateColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{Date, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define date column before a table is defined."))
_timeInMilliseconds = nothing
## Get the day from the dates param
if(!isnothing(data.second))
_timeInMilliseconds = Dates.value(DateTime(data.second)) - Dates.UNIXEPOCH
end
sender.buffer = writeFieldColumn(sender.buffer, data.first => _timeInMilliseconds)
sender.hasFields = true
return sender
end
"""
UUIDColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{UUID, Nothing}}
UUIDColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{UUID, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> using UUIDs
julia> using Random
julia> rng = MersenneTwister(1234);
julia> u4 = uuid4(rng);
julia> UUIDColumn(sender, :user_id => u4)
## sender with field user_id=7a052949-c101-4ca3-9a7e-43a2532b2fa8
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> UUIDColumn(sender, :user_id => nothing)
## sender without an updated buffer
```
"""
function UUIDColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{UUID, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define UUID column before a table is defined."))
if sender.hasFields
sender.buffer = sender.buffer * COMMA
end
sender.buffer = writeFieldColumn(sender.buffer, data)
sender.hasFields = true
return sender
end
"""
UUIDColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{String, Nothing}}
UUIDColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the sender with
an updated buffer. The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{String, Nothing}`
Takes in the UUID as a string.
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> UUIDColumn(sender, :user_id => "7a052949-c101-4ca3-9a7e-43a2532b2fa8")
## sender with field user_id=7a052949-c101-4ca3-9a7e-43a2532b2fa8
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> UUIDColumn(sender, :user_id => nothing)
## sender without an updated buffer
```
"""
function UUIDColumn(sender::Sender, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Union{String, Nothing}}
!sender.hasTable && throw(MissingTableDeclarationException("cannot define UUID column before a table is defined."))
return StringColumn(sender, data)
end
"""
At(sender::Sender, timestamp::DateTime)::Nothing
At column definition function for an ILP entry. This is the designated timestamp field.
The timestamp is converted to nanoseconds since UNIXEPOCH
This requires that a `table` has already been added to the sender's buffer.
Upon setting this field, the `hasFields` and `hasTable` properties of the `sender` are set to false. This also marks the
end of the record with a '\\n'.
Serves as a terminal definition of a record. Should always be defined last.
!!! note
The `sender` attempts to write values to the `QuestDB Database Server` depending
on whether the buffer size has been met or exceeded when `At` is executed.
# Example
```julia-repl
julia> At(sender, now())
## sender with field 1680993284179000000\\n
```
"""
function At(sender::Sender, timestamp::DateTime)::Nothing
!sender.hasTable && throw(MissingTableDeclarationException("cannot define At column before a table is defined."))\
if sender.hasFields
sender.buffer = sender.buffer * SPACE_CHARACTER
end
## Convert the datetime to nano seconds
time_ns = convert(Dates.Nanosecond, Dates.Millisecond(Dates.value(timestamp) - Dates.UNIXEPOCH))
## Multiply the time by 1_000_000 to convert it to Nanoseconds
sender.buffer = sender.buffer * SPACE_CHARACTER * string(time_ns.value) * RETURN_CHARACTER
## Mark the hasFields to false & hasTable to false
sender.hasFields = false
sender.hasTable = false
## Persist to QuestDB
QuestDBSender.write(sender)
return nothing
end
"""
AtNow(sender::Sender)::Nothing
This requires that a `table` has already been added to the sender's buffer.
Resolves to:
At(sender, now())
!!! note
The `sender` attempts to write values to the `QuestDB Database Server` depending
on whether the buffer size has been met or exceeded when `AtNow(sender)` is executed.
# Example
```julia-repl
julia> AtNow(sender)
## sender with field 1680993284179000000\\n
```
"""
function AtNow(sender::Sender)::Nothing
!sender.hasTable && throw(MissingTableDeclarationException("cannot define AtNow column before a table is defined."))
return At(sender, now())
end
"""
Source(sender::Sender, df::DataFrame, table::TT;
at::T = "", symbols::Vector{V} = [])::Sender where {TT<: ColumnName, T <: ColumnName, V <: ColumnName}
Takes in a `DataFrame` object and creates ILP insert statement for each row element.
# Arguments
- `sender::Sender` : QUestDBClient sender object
- `df::DataFrame`: the `DataFrame` that serves as the source of the data
- `table::TT where {TT <: ColumnName}` : the name of the `table`
- `at::T where {T <: ColumnName}` : the column that has timestamp values that server as the designated timestamp
- `symbols::Vector{V} where {V <: ColumnName}`: the list of column names whose columns serve as `tag_set` values for an ILP record
!!! note
The `sender`, `df`, and `table` arguments are compulsory and are positional arguments.
The `at` and `symbols` arguments are optional named arguments.
The `ColumnName` is `Union{Symbol, String}`
The `table` specification is a requirement.
!!! note
Supported column data types include:
`Symbol`, `Integer` and subtypes, `AbstractFloat` and subtypes, `Bool`, `Char`, `AbstractString` and subtypes,
`Date`, `DateTime`, `UUID`
For `DataFrames`, entries of type `Missing` are not supported. They should be cast to `Nothing`.
`at` argument is used to specify the column header of the column in the `DataFrame` that will serve as the designated timestamp field. The column
should have values of type `DateTime` and will be converted to nanoseconds upon when converted to an ILP record. If the `at` is not specified,
the current time will be added to each ILP record.
`symbols` argument specifies a vector of columns headers of `DataFrame` columns that serve as the `tag_set` in the ILP statement. If `symbols`
are not specified, then no `tag_set` fields will be part of the ILP statement.
| city | make | tempareture | humidity |
|:-----------|:-----------|-------------:|------------:|
| London | Omron | 29.4 | 0.334 |
| Nairobi | Honeywell | 24.0 | 0.51 |
- Assuming `df` below is the `DataFrame` above:
# Example
```julia-repl
julia> using DataFrames
julia> df = DataFrame(city=["London", "Nairobi"], make=[:Omron, :Honeywell], temperature=[29.4, 24.0], humidity=[0.334, 0.51]);
julia> Source(sender, df, :readings, symbols=[:city, :make]);
## sender with 2 ILP records from the 2 rows in the DataFrame
```
| city | make | tempareture | humidity | collection_time |
|:-----------|:-----------|-------------:|------------:|-------------------------:
| London | Omron | 29.4 | 0.334 | 2023-04-10T13:09:31Z |
| Nairobi | Honeywell | 24.0 | 0.51 | 2023-04-10T13:09:42Z |
- An example with the `at` field specified.
# Example
```julia-repl
julia> using DataFrames
julia> df = DataFrame(city=["London", "Nairobi"], make=[:Omron, :Honeywell], temperature=[29.4, 24.0],
humidity=[0.334, 0.51], collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"]);
julia> using Dates
julia> date_format = dateformat"y-m-dTH:M:SZ";
julia> df[!, :collection_time] = DateTime.(df[:, :collection_time], date_format);
julia> Source(sender, df, :readings, symbols = [:city, :make], at = :collection_time);
## sender with 2 ILP records from the 2 rows in the DataFrame
```
!!! note
The `sender` attempts to write values to the `QuestDB Database Server` depending
on whether the buffer size has been met or exceeded while reading the rows of the
`DataFrame`. This is even before the `flush` or `close` function is called.
"""
function Source(sender::Sender, df::DataFrame, table::TT;
at::T = "", symbols::Vector{V} = [])::Sender where {TT<: ColumnName, T <: ColumnName, V <: ColumnName}
## Create a columnName => columnType dictionary
columnsDict::Dict{String, DataType} = Dict(names(df) .=> eltype.(eachcol(df)))
unsupportedTypes = []
## Assert that all types are supported or request for a cast
for(k, v) in columnsDict
if !in(1, v .<: supportedTypes)
push!(unsupportedTypes, k => v)
end
end
## Log unsupported types
if !isempty(unsupportedTypes)
throw(UnsupportedColumnTypeException("detected unsupported column type(s): $unsupportedTypes. Cast them to a supported type."))
end
hasAt = false
at = string(at)
## Assert the at column exists
if length(at) != 0
## Assert the column is a Timestamp type
if(haskey(columnsDict, at))
type = columnsDict[at]
if(!(type <: Dates.DateTime))
throw(UnsupportedAtColumnTypeException("specified At column: $at of type $type is not of type Timestamp"))
end
hasAt = true
end
end
## Handle Missing type
## Remove the columns from the columnsDict
namedCols = string.([symbols...])
ifelse(hasAt, push!(namedCols, at), nothing)
# namedCols = string.([namedCols])
for namedCol in namedCols
delete!(columnsDict, namedCol)
end
## Loop through each row building the ILP String as guided by the types
for row in eachrow(df)
## Write the table first
sender = QuestDBOperators.table(sender, table)
## Write symbols first
for s in symbols
sender = symbol(sender, s => row[s])
end
## Loop through other columns & write them
for (col, coltype) in columnsDict
sender = writeRowEnty!(sender, coltype, col => row[col])
end
## If the at was specified, write it last, else write AtNow
if hasAt
At(sender, row[at])
else
AtNow(sender)
end
end
## Return the sender
return sender
end
"""
writeRowEnty!(sender::Sender, dataType::Type, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Any}
A helper function to build ILP records from a dataframe based on the column types.
"""
function writeRowEnty!(sender::Sender, dataType::Type, data::Pair{T, V})::Sender where {T <: ColumnName, V <: Any}
if dataType <: Symbol
return symbol(sender, data.first => Symbol(data.second))
elseif dataType <: Integer
return IntegerColumn(sender, data)
elseif dataType <: AbstractFloat
return FloatColumn(sender, data)
elseif dataType <: AbstractString
return StringColumn(sender, data)
elseif dataType <: DateTime
return DateTimeColumn(sender, data)
elseif dataType <: Date
return DateColumn(sender, data)
elseif dataType <: Char
return CharColumn(sender, data)
elseif dataType <: UUID
return UUIDColumn(sender, data)
else
throw(UnsupportedColumnTypeException("column type of $dataType is not supported"))
end
end
end
| QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | code | 17830 | """
Macro Definitions of supported types
Type macros return a sender object
"""
module QuestDBTypes
import ..QuestDBOperators
export @table, @symbol, @IntegerColumn, @StringColumn, @CharColumn, @DateTimeColumn, @BoolColumn,
@DateColumn, @UUIDColumn, @AtNow, @At, @FloatColumn, @source
import Base: string
"""
@table(name)
@table definition macro for an ILP entry. Returns an expression that upon evaluation returns a closure that taken a `sender` as an argument.
Once the closure is evaluated, it adds a table name to the `sender`'s buffer and returns the sender with an updated buffer.
The `name` should be of type `ColumnName`
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> sender |> @table(:readings)
## sender with table readings
```
"""
macro table(name)
return quote
(sender) -> QuestDBOperators.table(sender, $(esc(name)))
end
end
"""
@symbol(symbol::Pair{T, V}) where {T <: ColumnName, V <: SymbolColumnValue}
@symbol definition macro for an ILP entry. Adds a symbol (tag_set) to the `sender`'s buffer and returns the closure that takes a `sender` as an argument.
Once the closure is evaluated, it adds a symbol to the `sender`'s buffer and returns the sender with an updated buffer. The `symbol` argument is a
`Pair{T, V}` where `T <: ColumnName` and `V <: SymbolColumnValue`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
The `SymbolColumnValue` is `Union{Symbol, String}`
# Example
```julia-repl
julia> sender |> @symbol(:make => :Omron)
## sender with symbol make=Omron
```
"""
macro symbol(symbolPair)
return quote
(sender) -> QuestDBOperators.symbol(sender, $(esc(symbolPair)))
end
end
"""
@IntegerColumn(data::Pair{T, V}) where {T <: ColumnName, V <: Union{Integer, Nothing}}
@IntegerColumn definition macro for an ILP entry. Adds a field of type integer to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, it returns the sender with an updated buffer.
The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{Integer, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
The `SymbolColumnValue` is `Union{Symbol, String}`
All `Integer` subtypes are supported:
`Bool`, `BigInt`, `Int128`, `Int64`, `Int32`, `Int16`, `Int8`,
`UInt128`, `UInt64`, `UInt32`, `UInt16`, `UInt8`
# Example
```julia-repl
julia> sender |> @IntegerColumn(:count => 12)
## sender with field count=12
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> sender |> @IntegerColumn(:count => nothing)
## sender without an updated buffer
```
"""
macro IntegerColumn(data)
return quote
(sender) -> QuestDBOperators.IntegerColumn(sender, $(esc(data)))
end
end
"""
BoolColumn(sender::Sender, data::Pair{T, Bool}) where {T <: ColumnName}
@BoolColumn definition macro for an ILP entry. Adds a field of type bool to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `data` argument is a `Pair{T, Bool}` where `T <: ColumnName`.
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> sender |> @BoolColumn(:present => true)
## sender with field present=true
```
"""
macro BoolColumn(data)
return quote
(sender) -> QuestDBOperators.BoolColumn(sender, $(esc(data)))
end
end
"""
@FloatColumn(data::Pair{T, V}) where {T <: ColumnName, V <: Union{AbstractFloat, Nothing}}
@FloatColumn definition function for an ILP entry. Adds a field of type float to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{AbstractFloat, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
All `AbstractFloat` subtypes are supported:
`BigFloat`, `Float64`, `Float32`, `Float16`
# Example
```julia-repl
julia> sender |> @FloatColumn(:tempareture => 29.4)
## sender with field tempareture=29.4
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> sender |> @FloatColumn(:tempareture => nothing)
## sender without an updated buffer
```
"""
macro FloatColumn(data)
return quote
(sender) -> QuestDBOperators.FloatColumn(sender, $(esc(data)))
end
end
"""
@StringColumn(data::Pair{T, V}) where {T <: ColumnName, V <: Union{AbstractString, Nothing}}
@StringColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{AbstractString, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
All `AbstractString` subtypes are supported:
`Core.Compiler.LazyString`, `InlineStrings.InlineString`, `LaTeXStrings.LaTeXString`, `LazyString`,
`String`, `SubString`, `SubstitutionString`, `Test.GenericString`
# Example
```julia-repl
julia> sender |> @StringColumn(:city => "Nairobi")
## sender with field city=Nairobi
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> sender |> @StringColumn(:city => nothing)
## sender without an updated buffer
```
"""
macro StringColumn(data)
return quote
(sender) -> QuestDBOperators.StringColumn(sender, $(esc(data)))
end
end
"""
@CharColumn(data::Pair{T, V}) where {T <: ColumnName, V <: Union{Char, Nothing}}
@CharColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{Char, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> sender |> @CharColumn(:region => 'A')
## sender with field region=A
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> sender |> @CharColumn(:region => nothing)
## sender without an updated buffer
```
"""
macro CharColumn(data)
return quote
(sender) -> QuestDBOperators.CharColumn(sender, $(esc(data)))
end
end
"""
@DateTimeColumn(data::Pair{T, V}) where {T <: ColumnName, V <: Union{DateTime, Nothing}}
@DateTimeColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{DateTime, Nothing}`
The DateTime is converted to milliseconds since UNIXEPOCH
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> sender |> @DateTimeColumn(:pick_up_date => now())
## sender with field pick_up_date=1680990219992
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> sender |> @DateTimeColumn(:pick_up_date => nothing)
## sender without an updated buffer
```
"""
macro DateTimeColumn(data)
return quote
(sender) -> QuestDBOperators.DateTimeColumn(sender, $(esc(data)))
end
end
"""
@DateColumn(data::Pair{T, V}) where {T <: ColumnName, V <: Union{Date, Nothing}}
@DateColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{Date, Nothing}`
The Date is converted to milliseconds since UNIXEPOCH
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> sender |> @DateColumn(:collection_date => Date(2023, 4, 8))
## sender with field collection_date=1680912000000
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> sender |> @DateColumn(:collection_date => nothing)
## sender without an updated buffer
```
"""
macro DateColumn(data)
return quote
(sender) -> QuestDBOperators.DateColumn(sender, $(esc(data)))
end
end
"""
@UUIDColumn(data::Pair{T, V}) where {T <: ColumnName, V <: Union{UUID, String, Nothing}}
@UUIDColumn definition function for an ILP entry. Adds a field of type string to the `sender`'s buffer and returns the closure that takes
a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `data` argument is a `Pair{T, V}` where `T <: ColumnName` and `V <: Union{UUID, Nothing}`
This requires that a `table` has already been added to the sender's buffer.
The `ColumnName` is `Union{Symbol, String}`
# Example
```julia-repl
julia> using UUIDs
julia> using Random
julia> rng = MersenneTwister(1234);
julia> u4 = uuid4(rng);
julia> sender |> @UUIDColumn(:user_id => u4)
## sender with field user_id=7a052949-c101-4ca3-9a7e-43a2532b2fa8
```
Works too when the passed UUID is a string
# Example
```julia-repl
julia> sender |> @UUIDColumn(:user_id => "7a052949-c101-4ca3-9a7e-43a2532b2fa8")
## sender with field user_id=7a052949-c101-4ca3-9a7e-43a2532b2fa8
```
If `nothing` is passed as the second part of the part of the data pair, `V`, this column won't be written
# Example
```julia-repl
julia> sender |> @UUIDColumn(:user_id => nothing)
## sender without an updated buffer
```
"""
macro UUIDColumn(data)
return quote
(sender) -> QuestDBOperators.UUIDColumn(sender, $(esc(data)))
end
end
"""
@At(timestamp::DateTime)
@At column definition function for an ILP entry. This is the designated timestamp field.
The timestamp is converted to nanoseconds since UNIXEPOCH.
It returns the closure that takes a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
This requires that a `table` has already been added to the sender's buffer.
Upon setting this field, the `hasFields` and `hasTable` properties of the `sender` are set to false. This also marks the
end of the record with a '\\n'. Furthermore, the `sender` attempts to write values to the `QuestDB Database Server` depending
on whether the buffer size has been met or exceeded.
Serves as a terminal definition of a record. Should always be defined last.
# Example
```julia-repl
julia> sender |> At(now())
## sender with field 1680993284179000000\\n
```
"""
macro At(data)
return quote
(sender) -> QuestDBOperators.At(sender, $(esc(data)))
end
end
"""
@AtNow
This requires that a `table` has already been added to the sender's buffer.
Resolves to:
@At(now())
# Example
```julia-repl
julia> sender |> AtNow
## sender with field 1680993284179000000\\n
```
"""
macro AtNow()
return quote
(sender) -> QuestDBOperators.AtNow(sender)
end
end
## Helps in conversion of QuoteNode to string
string(k::QuoteNode) = string(k.value)
"""
@source(df::DataFrame = DataFrame(), table::TT = "", at::T = "",
symbols::Vector{V} = []) where {TT<: ColumnName, T <: ColumnName, V <: ColumnName}
Takes in a `DataFrame` object and creates ILP insert statement for each row element.
This macro requires named arguments to be specified:
# Arguments
- `df::DataFrame`: the `DataFrame` that serves as the source of the data
- `table::TT where {TT <: ColumnName}` : the name of the `table`
- `at::T where {T <: ColumnName}` : the column that has timestamp values that server as the designated timestamp
- `symbols::Vector{V} where {V <: ColumnName}`: the list of column names whose columns serve as `tag_set` values for an ILP record
The `ColumnName` is `Union{Symbol, String}`
!!! note
Only the `df` and `table` parameters must be specified. Then `at` and `symbols` parameters are optional.
It returns the closure that takes a `sender` as an argument. Once the closure is evaluated, returns the sender with an updated buffer.
The `table` specification is a requirement.
!!! note
Supported column data types include:
`Symbol`, `Integer` and subtypes, `AbstractFloat` and subtypes, `Bool`, `Char`, `AbstractString` and subtypes,
`Date`, `DateTime`, `UUID`
For `DataFrames`, entries of type `Missing` are not supported. They should be cast to `Nothing`.
`at` argument is used to specify the column header of the column in the `DataFrame` that will serve as the designated timestamp field. The column
should have values of type `DateTime` and will be converted to nanoseconds upon when converted to an ILP record. If the `at` is not specified,
the current time will be added to each ILP record.
`symbols` argument specifies a vector of columns headers of `DataFrame` columns that serve as the `tag_set` in the ILP statement. If `symbols`
are not specified, then no `tag_set` fields will be part of the ILP statement.
| city | make | tempareture | humidity |
|:-----------|:-----------|-------------:|------------:|
| London | Omron | 29.4 | 0.334 |
| Nairobi | Honeywell | 24.0 | 0.51 |
- Assuming `df` below is the `DataFrame` above:
# Example
```julia-repl
julia> using DataFrames
julia> df = DataFrame(city=["London", "Nairobi"], make=[:Omron, :Honeywell], temperature=[29.4, 24.0], humidity=[0.334, 0.51]);
julia> sender |> source(df = df, table = :readings, symbols=[:city, :make]);
## sender with 2 ILP records from the 2 rows in the DataFrame
```
| city | make | tempareture | humidity | collection_time |
|:-----------|:-----------|-------------:|------------:|-------------------------:
| London | Omron | 29.4 | 0.334 | 2023-04-10T13:09:31Z |
| Nairobi | Honeywell | 24.0 | 0.51 | 2023-04-10T13:09:42Z |
- An example with the `at` field specified.
# Example
```julia-repl
julia> using DataFrames
julia> df = DataFrame(city=["London", "Nairobi"], make=[:Omron, :Honeywell], temperature=[29.4, 24.0],
humidity=[0.334, 0.51], collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"]);
julia> using Dates
julia> date_format = dateformat"y-m-dTH:M:SZ";
julia> df[!, :collection_time] = DateTime.(df[:, :collection_time], date_format);
julia> sender |> @source(df = df, table = :readings, symbols = [:city, :make], at = :collection_time);
## sender with 2 ILP records from the 2 rows in the DataFrame
```
!!! note
The `sender` attempts to write values to the `QuestDB Database Server` depending
on whether the buffer size has been met or exceeded while reading the rows of the
`DataFrame`. This is even before the `flush` or `close` function is called.
"""
macro source(args...)
length(args) == 0 && throw(ArgumentError("no arguments have been passed"))
(length(args) > 4) && throw(ArgumentError("this macro takes a maximum of 4 arguments. Number of arguments passed = $(length(args)): $(args)"))
## Get the keyword arguments
local supportedKeywords = [:df, :table, :at, :symbols]
df = nothing
table = ""
at = ""
symbols = []
for arg in args
## Assert the passed parameter is an expression
if isa(arg, Expr)
## Asset the expression has the format (=, args)
if !(arg.head == :(=))
throw(ArgumentError("invalid argument format $(arg)"))
end
expr_args = arg.args
(length(args) == 0 || length(args) < 2)&& throw(ArgumentError("invalid argument format $(arg)"))
keyword = expr_args[1]
if keyword == :df
df = expr_args[2]
elseif keyword == :table
table = expr_args[2]
elseif keyword == :at
at = expr_args[2]
elseif keyword == :symbols
## This is expected to be a vector expression
vect_exp = expr_args[2]
symbols = string.(vect_exp.args)
else
## Throw unsupported keyword error if the passed keyword isn't supported
throw(ArgumentError("unsupported keyword $(keyword) passed as an argument. Supported keywords are: $(supportedKeywords...)"))
end
else
## Throw an error if the passed value is not an expression
throw(ArgumentError("invalid argument format $(arg)"))
end
end
## Return a closure mapping the sender to QueryOperators.Source function
return quote
(sender) -> QuestDBOperators.Source(sender, $(esc(df)), $(esc(table)), at = $(esc(at)), symbols = $(esc(symbols)))
end
end
end
| QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | code | 4783 | """
Create a client used to send the ilp data
"""
module QuestDBSender
using Parameters
using Sockets
export Sender, connect, close, write, flush
export Auth
"""
const DEFAULT_BUFFER_SIZE::Int = 128 * 1024
Default buffer size of the sender's buffer
"""
const DEFAULT_BUFFER_SIZE = 128 * 1024
"""
See: https://questdb.io/docs/reference/api/ilp/authenticate
Authentication object used
"""
@with_kw struct Auth
kid::String
dKey::String
xKey::String
yKey::String
end
"""
@with_kw mutable struct Sender
host::String="127.0.0.1"
port::Int=9009
batchSize::Int=DEFAULT_BUFFER_SIZE
tlsMode::Bool=false
hasTable::Bool=false
hasFields::Bool=false
auth::Union{Auth, Nothing} = nothing ## Can be changed into a new
buffer::String = ""
socket::Union{TCPSocket, Nothing} = nothing
end
`Sender` struct is the entity responsible for connecting to the `QuestDB Server`, build records & send them using the
ILP protocol.
# Arguments
- `host::String` - the host address. The default one is the `localhost` or `127.0.0.1`
- `port::Int` - the port connected to on the host machine. Default value is `9009`
- `batchSize::Int` - the buffer size beyond which the contents of the buffer written to the server. Default size is 128 * 1024
- `hasTable::Bool` - used to indicate if an ILP record statement has a table defined
- `hasFields::Bool` - used to indicate if an ILP record statement has fields defined
- `buffer::String` - used to buffer ILP record statements before writing them to the server
- `socket::Union{TCPSocket, Nothing}` - holds the socket connection to the QuestDB Server instance
"""
@with_kw mutable struct Sender
host::String="127.0.0.1"
port::Int=9009
batchSize::Int=DEFAULT_BUFFER_SIZE
tlsMode::Bool=false
hasTable::Bool=false
hasFields::Bool=false
auth::Union{Auth, Nothing} = nothing ## Can be changed into a new
buffer::String = ""
socket::Union{TCPSocket, Nothing} = nothing
end
"""
Compare 2 auth objects
"""
function Base.:(==)(a::Auth, b::Auth)
return true
end
"""
Compare 2 senders
"""
function Base.:(==)(a::Sender, b::Sender)
return isequal(a.host, b.host) &&
a.port == b.port &&
a.batchSize == b.batchSize &&
a.tlsMode == b.tlsMode &&
a.hasTable == b.hasTable &&
a.hasFields == b.hasFields &&
a.auth == b.auth &&
isequal(a.buffer, b.buffer)
Sockets.getsockname(a) == Sockets.getsockname(b)
end
"""
write(sender::Sender)
Attempts to write the ILP record statements to the server. In case of an error, an exception is thrown
"""
function write(sender::Sender)
try
## Only write to server when the buffer is full
if length(sender.buffer) >= sender.batchSize
## Write to the server
Base.write(sender.socket, sender.buffer)
## Clear the buffer
clear(sender)
## Output on write
@info "Inserted an ILP record..."
end
catch err
@error "Failed to write to server\n"
throw(err)
end
end
function consume(sender::Sender)::Sender
sender.auth = Auth("", "", "","")
return sender
end
"""
clear(sender::Sender)
Clears the buffer contents
"""
function clear(sender::Sender)
sender.buffer = ""
return nothing
end
"""
connect(sender::Sender)
Attempts to connect the sender to the server socket. In case of an error, an exception is thrown
"""
function connect(sender::Sender)
try
sender.socket = Sockets.connect(sender.host, sender.port)
catch err
throw(err)
end
@info "Successfully connected sender to $(sender.host):$(sender.port)"
end
"""
flush(sender::Sender)
Attempts to flush any unsent text to the server socket. In case of an error, an exception is thrown
"""
function flush(sender::Sender)
## Consume the remaining text
try
## Flush the remaining bytes
Base.write(sender.socket, sender.buffer)
Base.flush(sender.socket)
## Clear the sender buffer
clear(sender)
@info "Flushed extra bytes to server..."
catch err
@error "Failed to write remaining bytes to server\n"
throw(err)
end
return nothing
end
"""
close(sender::Sender)
Attempts to close the sender's connection to the server. In case of an error, an exception is thrown
"""
function close(sender::Sender)
try
## Flush the remaining data before closing
QuestDBSender.flush(sender)
## CLose the socket
Sockets.close(sender.socket)
catch err
throw(err)
end
@info "Successfully closed the connection to $(sender.host):$(sender.port)"
end
end
| QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | code | 18656 | using QuestDBClient
using Dates
using DataFrames
using Test
@testset "QuestDBSender initialization" begin
## Tests around the sender
sender = Sender()
## Equality test
@test sender == Sender()
## Test the address matches
@test sender.host == "127.0.0.1"
## Test the port
@test sender.port == 9009
## Test the default batch size
@test sender.batchSize == QuestDBClient.QuestDBSender.DEFAULT_BUFFER_SIZE
## Test that the tables & fields are empty now
@test sender.hasTable == false
@test sender.hasFields == false
## Test that the buffer is empty
@test isempty(sender.buffer)
## Test that there is no socket connection
@test isnothing(sender.socket)
end
@testset "QuestDBOperators basic buffer write" begin
sender = Sender()
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Omron) |>
x -> symbol(x, :city => :Nairobi) |>
x -> FloatColumn(x, :temperature => 26.8) |>
x -> FloatColumn(x, :humidity => 0.51) |>
x -> At(x, DateTime(2023, 4, 1))
records = "readings,make=Omron,city=Nairobi temperature=26.8,humidity=0.51 1680307200000000000\n"
@test sender.buffer == records
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Honeywell) |>
x -> symbol(x, :city => :London) |>
x -> FloatColumn(x, :temperature => 22.9) |>
x -> FloatColumn(x, :humidity => 0.254) |>
x -> At(x, DateTime(2023, 4, 2))
records *= "readings,make=Honeywell,city=London temperature=22.9,humidity=0.254 1680393600000000000\n"
@test sender.buffer == records
end
@testset "QuestDBTypes basic buffer writes" begin
sender = Sender()
sender |>
@table(:readings) |>
@symbol(:make => :Omron) |>
@symbol(:city => :Nairobi) |>
@FloatColumn(:temperature => 26.8) |>
@FloatColumn(:humidity => 0.334) |>
@At(DateTime(2023, 4, 1))
records = "readings,make=Omron,city=Nairobi temperature=26.8,humidity=0.334 1680307200000000000\n"
@test sender.buffer == records
sender |>
@table(:readings) |>
@symbol(:make => :Honeywell) |>
@symbol(:city => :Kisumu) |>
@FloatColumn(:temperature => 30.2) |>
@FloatColumn(:humidity => 0.54) |>
@At(DateTime(2023, 4, 2))
records *= "readings,make=Honeywell,city=Kisumu temperature=30.2,humidity=0.54 1680393600000000000\n"
@test sender.buffer == records
end
@testset "QuestDBOperators dataframes buffer write" begin
sender = Sender()
## DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.334, 0.51],
collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"])
## Cast the collection_time to DateTime
date_format = dateformat"y-m-dTH:M:SZ"
df[!, :collection_time] = DateTime.(df[:, :collection_time], date_format)
sender |> x -> Source(x, df, :readings, symbols = [:city, :make], at = :collection_time)
records = "readings,city=London,make=Omron humidity=0.334,temperature=29.4 1681132171000000000\n"
records *= "readings,city=Nairobi,make=Honeywell humidity=0.51,temperature=24.0 1681132182000000000\n"
@test sender.buffer == records
end
@testset "QuestDBTypes dataframes buffer write" begin
sender = Sender()
## DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.334, 0.51],
collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"])
## Cast the collection_time to DateTime
date_format = dateformat"y-m-dTH:M:SZ"
df[!, :collection_time] = DateTime.(df[:, :collection_time], date_format)
sender |> @source(df = df, table = :readings, symbols = [:city, :make], at = :collection_time)
records = "readings,city=London,make=Omron humidity=0.334,temperature=29.4 1681132171000000000\n"
records *= "readings,city=Nairobi,make=Honeywell humidity=0.51,temperature=24.0 1681132182000000000\n"
@test sender.buffer == records
end
@testset "QuestDBOperators empty table name exception" begin
sender = Sender()
@test_throws DomainError table(sender, "")
end
@testset "QuestDBTypes empty table name exception" begin
sender = Sender()
@test_throws DomainError sender |> @table("")
end
@testset "QuestDBOperators illegal character in table name exception" begin
sender = Sender()
@test_throws QuestDBExceptions.IllegalTableNameCharacterException table(sender, "tab?le")
@test_throws QuestDBExceptions.IllegalTableNameCharacterException table(sender, "tab.le:")
@test_throws QuestDBExceptions.IllegalTableNameCharacterException table(sender, "~tab+le")
@test_throws QuestDBExceptions.IllegalTableNameCharacterException table(sender, "(my**table)")
end
@testset "QuestDBTypes illegal character in table name exception" begin
sender = Sender()
@test_throws QuestDBExceptions.IllegalTableNameCharacterException sender |> @table("tab?le")
@test_throws QuestDBExceptions.IllegalTableNameCharacterException sender |> @table( "tab.le:")
@test_throws QuestDBExceptions.IllegalTableNameCharacterException sender |> @table("~tab+le")
@test_throws QuestDBExceptions.IllegalTableNameCharacterException sender |> @table("(my**table)")
end
@testset "QuestDBOperators multiple table definitions exception" begin
sender = Sender()
sender |> x -> table(x, :table) |> x -> symbol(x, :sym => :P)
@test_throws QuestDBExceptions.MultipleTableDefinitionsException sender |> x -> table(x, :table) |>
x -> IntegerColumn(x -> :count => 13)
end
@testset "QuestDBTypes multiple table definitions exception" begin
sender = Sender()
sender |> @table(:table) |> @symbol(:sym => :P)
@test_throws QuestDBExceptions.MultipleTableDefinitionsException sender |> @table(:table) |> @IntegerColumn(:count => 13)
end
@testset "QuestDBOperators empty column name exception" begin
sender = Sender()
sender = Sender()
@test_throws QuestDBExceptions.EmptyColumnNameException sender |> x -> table(x, "table") |> x -> IntegerColumn(x, "" => 15)
end
@testset "QuestDBTypes empty column name exception" begin
sender = Sender()
sender = Sender()
@test_throws QuestDBExceptions.EmptyColumnNameException sender |> @table("table") |> @IntegerColumn("" => 15)
end
@testset "QuestDBOperators illegal character column name exception" begin
sender = Sender()
@test_throws QuestDBExceptions.IllegalColumnNameCharacterException sender |> x -> table(x, "table") |> x -> symbol(x, "?make" => :Omron)
sender = Sender()
@test_throws QuestDBExceptions.IllegalColumnNameCharacterException sender |> x -> table(x, "table") |> x -> FloatColumn(x, "am~ount" => 15.989)
sender = Sender()
@test_throws QuestDBExceptions.IllegalColumnNameCharacterException sender |> x -> table(x, "table") |> x -> StringColumn(x, "ip*" => "127.0.0.1")
end
@testset "QuestDBTypes illegal character column name exception" begin
sender = Sender()
@test_throws QuestDBExceptions.IllegalColumnNameCharacterException sender |> @table("table") |> @symbol("?make" => :Omron)
sender = Sender()
@test_throws QuestDBExceptions.IllegalColumnNameCharacterException sender |> @table("table") |> @FloatColumn("am~ount" => 15.989)
sender = Sender()
@test_throws QuestDBExceptions.IllegalColumnNameCharacterException sender |> @table("table") |> @StringColumn("ip*" => "127.0.0.1")
end
@testset "QuestDBOperators missing table declaration exception" begin
sender = Sender()
@test_throws QuestDBExceptions.MissingTableDeclarationException IntegerColumn(sender, :age => 20)
@test_throws QuestDBExceptions.MissingTableDeclarationException CharColumn(sender, :state => 'A')
@test_throws QuestDBExceptions.MissingTableDeclarationException FloatColumn(sender, :weight => 70.2)
@test_throws QuestDBExceptions.MissingTableDeclarationException symbol(sender, :city => :Mombasa)
end
@testset "QuestDBTypes missing table declaration exception" begin
sender = Sender()
@test_throws QuestDBExceptions.MissingTableDeclarationException sender |> @IntegerColumn(:age => 20)
@test_throws QuestDBExceptions.MissingTableDeclarationException sender |> @CharColumn(:state => 'A')
@test_throws QuestDBExceptions.MissingTableDeclarationException sender |> @FloatColumn(:weight => 70.2)
@test_throws QuestDBExceptions.MissingTableDeclarationException sender |> @symbol(:city => :Mombasa)
end
@testset "QuestDBOperators unsupported dataframe column type exception" begin
sender = Sender()
## DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[missing, missing])
@test_throws QuestDBExceptions.UnsupportedColumnTypeException Source(sender, df, :table, symbols = [:city, :make])
end
@testset "QuestDBTypes unsupported dataframe column type exception" begin
sender = Sender()
## DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[missing, missing])
@test_throws QuestDBExceptions.UnsupportedColumnTypeException sender |> @source(df = df, table = :readings,
symbols = [:city, :make])
end
@testset "QuestDBOperators unsupported at type exception" begin
sender = Sender()
## DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.54, 0.34],
collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"])
@test_throws QuestDBExceptions.UnsupportedAtColumnTypeException Source(sender, df, :table, symbols = [:city, :make], at = :collection_time)
end
@testset "QuestDBTypes unsupported at type exception" begin
sender = Sender()
## DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.54, 0.34],
collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"])
@test_throws QuestDBExceptions.UnsupportedAtColumnTypeException sender |> @source(df = df, table = :readings, at = :collection_time,
symbols = [:city, :make])
end
@testset "QuestDBOperators method error" begin
sender = Sender()
@test_throws MethodError sender |> x -> table(x, :table) |> x -> IntegerColumn(x, :age => "15")
sender = Sender()
@test_throws MethodError sender |> x -> table(x, :table) |> x -> StringColumn(x, :address => 15656)
sender = Sender()
@test_throws MethodError sender |> x -> table(x, :table) |> x -> FloatColumn(x, :group => 'A')
sender = Sender()
@test_throws MethodError sender |> x -> table(x, :table) |> x -> CharColumn(x, :label => "15")
sender = Sender()
@test_throws MethodError sender |> x -> table(x, :table) |> x -> symbol(x, :sym => 789)
end
@testset "QuestDBTypes method error" begin
sender = Sender()
@test_throws MethodError sender |> @table(:table) |> @IntegerColumn(:age => "15")
sender = Sender()
@test_throws MethodError sender |> @table(:table) |> @StringColumn(:address => 15656)
sender = Sender()
@test_throws MethodError sender |> @table(table) |> @FloatColumn(:group => 'A')
sender = Sender()
@test_throws MethodError sender |> @table(:table) |> @CharColumn(:label => "15")
sender = Sender()
@test_throws MethodError sender |> @table(:table) |> @symbol(:sym => 789)
end
#########################################################
# #
# #
# SERVER TESTS (INTEGRATION) #
# #
# #
#########################################################
"""
The tests depend on the existence of the following table in the local QuestDB Server
CREATE TABLE quest_db_client_jl (
timestamp TIMESTAMP,
city SYMBOL,
temperature DOUBLE,
humidity DOUBLE,
make SYMBOL
) TIMESTAMP(timestamp) PARTITION BY DAY;
"""
@testset "QuestDBSender server connection" begin
sender = Sender()
@test isnothing(QuestDBSender.connect(sender))
QuestDBSender.close(sender)
end
@testset "QuestDBSender basic server writes" begin
sender = Sender()
## Connect to the sender
QuestDBSender.connect(sender)
sender |>
x -> table(x, :quest_db_client_jl) |>
x -> symbol(x, :make => :Omron) |>
x -> symbol(x, :city => :Nairobi) |>
x -> FloatColumn(x, :temperature => 26.8) |>
x -> FloatColumn(x, :humidity => 0.51) |>
x -> At(x, DateTime(2023, 4, 1))
records = "quest_db_client_jl,make=Omron,city=Nairobi temperature=26.8,humidity=0.51 1680307200000000000\n"
sender |>
x -> table(x, :quest_db_client_jl) |>
x -> symbol(x, :make => :Honeywell) |>
x -> symbol(x, :city => :London) |>
x -> FloatColumn(x, :temperature => 22.9) |>
x -> FloatColumn(x, :humidity => 0.254) |>
x -> At(x, DateTime(2023, 4, 2))
records *= "quest_db_client_jl,make=Honeywell,city=London temperature=22.9,humidity=0.254 1680393600000000000\n"
@testset "QuestDBOperators writes" begin
## Attempted write does not persist as data is < buffer size
@test isnothing(QuestDBSender.write(sender))
@test sender.buffer == records
@test sender.hasTable == false
@test sender.hasFields == false
## Flush the data
@test isnothing(QuestDBSender.flush(sender))
@test isempty(sender.buffer)
@test sender.hasTable == false
@test sender.hasFields == false
end
records = ""
sender |>
@table(:quest_db_client_jl) |>
@symbol(:make => :Omron) |>
@symbol(:city => :Nairobi) |>
@FloatColumn(:temperature => 26.8) |>
@FloatColumn(:humidity => 0.334) |>
@At(DateTime(2023, 4, 1))
records = "quest_db_client_jl,make=Omron,city=Nairobi temperature=26.8,humidity=0.334 1680307200000000000\n"
sender |>
@table(:quest_db_client_jl) |>
@symbol(:make => :Honeywell) |>
@symbol(:city => :Kisumu) |>
@FloatColumn(:temperature => 30.2) |>
@FloatColumn(:humidity => 0.54) |>
@At(DateTime(2023, 4, 2))
records *= "quest_db_client_jl,make=Honeywell,city=Kisumu temperature=30.2,humidity=0.54 1680393600000000000\n"
@testset "QuestDBTypes writes" begin
## Attempted write does not persist as data is < buffer size
@test isnothing(QuestDBSender.write(sender))
@test sender.buffer == records
@test sender.hasTable == false
@test sender.hasFields == false
## Flush the data
@test isnothing(QuestDBSender.flush(sender))
@test isempty(sender.buffer)
@test sender.hasTable == false
@test sender.hasFields == false
end
## Close the socket
QuestDBSender.close(sender)
end
@testset "QuestDBSender dataframe writes" begin
sender = Sender()
## Connect to the sender
QuestDBSender.connect(sender)
## DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.334, 0.51],
collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"])
## Cast the collection_time to DateTime
date_format = dateformat"y-m-dTH:M:SZ"
df[!, :collection_time] = DateTime.(df[:, :collection_time], date_format)
sender |> x -> Source(x, df, :quest_db_client_jl, symbols = [:city, :make], at = :collection_time)
records = "quest_db_client_jl,city=London,make=Omron humidity=0.334,temperature=29.4 1681132171000000000\n"
records *= "quest_db_client_jl,city=Nairobi,make=Honeywell humidity=0.51,temperature=24.0 1681132182000000000\n"
@testset "QuestDBOperators writes" begin
## Attempt to write, but fail as the buffer isn't full
@test isnothing(QuestDBSender.write(sender))
@test sender.buffer == records
@test sender.hasTable == false
@test sender.hasFields == false
## Flush the data
@test isnothing(QuestDBSender.flush(sender))
@test isempty(sender.buffer)
@test sender.hasTable == false
@test sender.hasFields == false
end
sender |> @source(df = df, table = :quest_db_client_jl, symbols = [:city, :make], at = :collection_time)
@testset "QuestDBTypes writes" begin
## Attempt to write, but fail as the buffer isn't full
@test isnothing(QuestDBSender.write(sender))
@test sender.buffer == records
@test sender.hasTable == false
@test sender.hasFields == false
## Flush the data
@test isnothing(QuestDBSender.flush(sender))
@test isempty(sender.buffer)
@test sender.hasTable == false
@test sender.hasFields == false
end
## Close the sender
QuestDBSender.close(sender)
end
@testset "QuestDBSender clear" begin
sender = Sender()
sender |>
x -> table(x, :quest_db_client_jl) |>
x -> symbol(x, :make => :Omron) |>
x -> symbol(x, :city => :Nairobi) |>
x -> FloatColumn(x, :temperature => 26.8) |>
x -> FloatColumn(x, :humidity => 0.51) |>
x -> At(x, DateTime(2023, 4, 1))
records = "quest_db_client_jl,make=Omron,city=Nairobi temperature=26.8,humidity=0.51 1680307200000000000\n"
@test sender.buffer == records
@test isnothing(QuestDBSender.clear(sender))
@test isempty(sender.buffer)
end
## Test that close returns nothing
@testset "QuestDBSender close" begin
sender = Sender()
QuestDBSender.connect(sender)
@test isnothing(QuestDBSender.close(sender))
end
| QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 889 | # QuestDBClient
This is a Julia package tha can be used to connect to a [QuestDB](https://questdb.io/) database server and send data using the [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/).
>This package is strictly used to write data to the database. Reading is not supported. To read data from QuestDB, you can use QuestDB's [LibPQ](https://github.com/iamed2/LibPQ.jl) or [DBInterface](https://github.com/JuliaDatabases/DBInterface.jl) through port `8812`. Alternatively, you can read the data over through QuestDB's REST API on port `9000`. Visit QuestDB's [docs](https://questdb.io/docs/develop/query-data/) to get more information on how to query data.
**Installation** at the Julia REPL, `using Pkg; Pkg.add("QuestDBClient")`
**Documentation** can be found [here](https://ochibobo.github.io/QuestDBClient.jl/dev/). | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 4381 | # QuestDBClient.jl
Documentation for QuestDBClient.jl
```@meta
CurrentModule = QuestDBClient
```
## Overview
This is a Julia package that can be used to connect to a [QuestDB](https://questdb.io/) database server and send data using the [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/).
!!! note
This package is strictly used to write data to the database. Reading is not supported. To read data from QuestDB, you can use QuestDB's [LibPQ](https://github.com/iamed2/LibPQ.jl) or [DBInterface](https://github.com/JuliaDatabases/DBInterface.jl) through port `8812`. Alternatively, you can read the data over through QuestDB's REST API on port `9000`. Visit QuestDB's [docs](https://questdb.io/docs/develop/query-data/) to get more information on how to query data.
!!! tip
You can join the QuestDB Community [here](https://questdb.io/community/).
## Installation
You can install the package at the Pkg REPL-mode with:
````julia
(@v1.8) pkg> add QuestDBClient
````
## Quick Examples
### Functional Approach
Using functions to write to a QuestDB Server:
````julia
using QuestDBClient
"""
Assumes the presence of a table called readings created using:
CREATE TABLE readings (
timestamp TIMESTAMP,
city SYMBOL,
temperature DOUBLE,
humidity DOUBLE,
make SYMBOL
) TIMESTAMP(timestamp) PARTITION BY DAY;
"""
## Connects to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Create ILP records
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Omron) |>
x -> symbol(x, :city => :Nairobi) |>
x -> FloatColumn(x, :tempareture => 26.8) |>
x -> FloatColumn(x, :humidity => 0.51) |>
x -> AtNow(x)
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Honeywell) |>
x -> symbol(x, :city => :London) |>
x -> FloatColumn(x, :tempareture => 22.9) |>
x -> FloatColumn(x, :humidity => 0.254) |>
x -> AtNow(x)
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Omron) |>
x -> symbol(x, :city => :Bristol) |>
x -> FloatColumn(x, :tempareture => 23.9) |>
x -> FloatColumn(x, :humidity => 0.233) |>
x -> AtNow(x)
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
!!! tip
You can use packages such as [Chain.jl](https://github.com/jkrumbiegel/Chain.jl), [Pipe.jl](https://github.com/oxinabox/Pipe.jl), [Lazy.jl](https://github.com/MikeInnes/Lazy.jl) or any other for function chaining, based on your preference.
### Macro based approach
Using macros to write to the QuestDB Server:
````julia
using QuestDBClient
"""
Assumes the presence of a table called readings created using:
CREATE TABLE readings (
timestamp TIMESTAMP,
city SYMBOL,
temperature DOUBLE,
humidity DOUBLE,
make SYMBOL
) TIMESTAMP(timestamp) PARTITION BY DAY;
"""
## Connects to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Create ILP record statements
sender |>
@table(:readings) |>
@symbol(:make => :Omron) |>
@symbol(:city => :Lisbon) |>
@FloatColumn(:tempareture => 24.8) |>
@FloatColumn(:humidity => 0.334) |>
@AtNow
sender |>
@table(:readings) |>
@symbol(:make => :HoneyWell) |>
@symbol(:city => :Kisumu) |>
@FloatColumn(:tempareture => 30.2) |>
@FloatColumn(:humidity => 0.54) |>
@AtNow
sender |>
@table(:readings) |>
@symbol(:make => :Omron) |>
@symbol(:city => :Berlin) |>
@FloatColumn(:tempareture => 26.1) |>
@FloatColumn(:humidity => 0.45) |>
@AtNow
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
## Package Manual
```@contents
Pages = [
"man/functional.md",
"man/macros.md",
"man/dataframes.md"
]
```
## API
This client exposes a set of endpoints. However, some need to be prefixed with a module name because of the naming collision with existing `Base` functions.
```@contents
Pages = [
"lib/sender.md",
"lib/operators.md",
"lib/types.md"
]
``` | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 492 | # QuestDBExceptions
```@meta
CurrentModule = QuestDBClient.QuestDBExceptions
```
```@index
Pages = ["exceptions.md"]
Modules = [QuestDBExceptions]
```
```@docs
QuestDBClientException
IllegalTableNameCharacterException
IllegalColumnNameCharacterException
EmptyColumnNameException
ColumnNameLengthLimitExceededException
MultipleTableDefinitionsException
MissingTableDeclarationException
MalformedLineProtocolSyntaxException
UnsupportedColumnTypeException
UnsupportedAtColumnTypeException
``` | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 771 | # QuestDBOperators
```@meta
CurrentModule = QuestDBClient.QuestDBOperators
```
```@index
Pages = ["operators.md"]
Modules = [QuestDBOperators]
```
```@docs
ColumnName
SymbolColumnValue
supportedTypes
COMMA
SPACE_CHARACTER
RETURN_CHARACTER
EQUALS_CHARACTER
UNSUPPORTED_CHARACTERS
QuestDBOperators.table
QuestDBOperators.symbol
QuestDBOperators.writeSymbol
QuestDBOperators.IntegerColumn
QuestDBOperators.writeFieldColumn
QuestDBOperators.checkUnsupportedCharacters
QuestDBOperators.BoolColumn
QuestDBOperators.FloatColumn
QuestDBOperators.StringColumn
QuestDBOperators.CharColumn
QuestDBOperators.DateTimeColumn
QuestDBOperators.DateColumn
QuestDBOperators.UUIDColumn
QuestDBOperators.At
QuestDBOperators.AtNow
QuestDBOperators.Source
QuestDBOperators.writeRowEnty!
``` | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 412 | # QuestDBSender
Documentation for QuestDBSender
```@meta
CurrentModule = QuestDBClient.QuestDBSender
```
```@index
Pages = ["sender.md"]
Modules = [QuestDBSender]
```
```@docs
QuestDBSender.DEFAULT_BUFFER_SIZE
Auth
Sender
QuestDBSender.write(sender::Sender)
QuestDBSender.clear(sender::Sender)
QuestDBSender.connect(sender::Sender)
QuestDBSender.flush(sender::Sender)
QuestDBSender.close(sender::Sender)
```
| QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 455 | # QuestDBTypes
```@meta
CurrentModule = QuestDBClient.QuestDBTypes
```
```@index
Pages = ["types.md"]
Modules = [QuestDBTypes]
```
```@docs
QuestDBTypes.@table
QuestDBTypes.@symbol
QuestDBTypes.@IntegerColumn
QuestDBTypes.@BoolColumn
QuestDBTypes.@FloatColumn
QuestDBTypes.@StringColumn
QuestDBTypes.@CharColumn
QuestDBTypes.@DateTimeColumn
QuestDBTypes.@DateColumn
QuestDBTypes.@UUIDColumn
QuestDBTypes.@At
QuestDBTypes.@AtNow
QuestDBTypes.@source
``` | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 5490 | # Functional Approach
The `QuestDBClient` can be used with functions to write data to a QuestDB server instance.
## API
The API that describes the implementation this functional approach can be found here:
```@contents
Pages = ["../lib/operators.md"]
```
## Example
!!! note
The functions need to following order:
`table -> symbol -> others -> At/AtNow`
`others` represent field types such as integers, floats etc. The **terminal** symbol for each
chain **should** be `At()` or `AtNow()`.
Not following the specified order will result in an **exception** being thrown.
### Basic Example
````julia
using QuestDBClient
"""
Assumes the presence of a table called readings created using:
CREATE TABLE readings (
timestamp TIMESTAMP,
city SYMBOL,
temperature DOUBLE,
humidity DOUBLE,
make SYMBOL
) TIMESTAMP(timestamp) PARTITION BY DAY;
"""
## Create a sender instance that will connect to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Create ILP records
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Omron) |>
x -> symbol(x, :city => :Nairobi) |>
x -> FloatColumn(x, :tempareture => 26.8) |>
x -> FloatColumn(x, :humidity => 0.51) |>
x -> AtNow(x)
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Honeywell) |>
x -> symbol(x, :city => :London) |>
x -> FloatColumn(x, :tempareture => 22.9) |>
x -> FloatColumn(x, :humidity => 0.254) |>
x -> AtNow(x)
sender |>
x -> table(x, :readings) |>
x -> symbol(x, :make => :Omron) |>
x -> symbol(x, :city => :Bristol) |>
x -> FloatColumn(x, :tempareture => 23.9) |>
x -> FloatColumn(x, :humidity => 0.233) |>
x -> AtNow(x)
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
### Working with DataFrames
DataFrames can also be used as a datasource in the QuestDBClient. However, some preprocessing
is needed such as converting/casting the column types to supported types. The table also needs to be specified
beforehand, the column that represents the designated timestamp and any symbols(tags) need to be specified too.
Supported types include: `Symbol`, `Integer` and subtypes, `AbstractFloat` and subtypes, `Bool`, `Char`, `AbstractString` and subtypes,
`Date`, `DateTime`, `UUID`.
#### Example
!!! note
This example requires the installation of the [DataFrames.jl](https://github.com/JuliaData/DataFrames.jl) package.
A DataFrame object with the following structure will be used in the example:
| city | make | tempareture | humidity |
|:-----------|:-----------|-------------:|------------:|
| London | Omron | 29.4 | 0.334 |
| Nairobi | Honeywell | 24.0 | 0.51 |
````julia
using DataFrames
using QuestDBClient
## Create a DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.334, 0.51])
## Create a sender instance that will connect to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Map the dataframe data to ILP record statements
sender |> x -> Source(x, df, :readings, symbols=[:city, :make])
## can also use: Source(sender, df, :readings, symbols=[:city, :make]);
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
An example with the `At` field specified.
| city | make | tempareture | humidity | collection_time |
|:-----------|:-----------|-------------:|------------:|-------------------------:
| London | Omron | 29.4 | 0.334 | 2023-04-10T13:09:31Z |
| Nairobi | Honeywell | 24.0 | 0.51 | 2023-04-10T13:09:42Z |
````julia
using DataFrames
using Dates
using QuestDBClient
## A DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.334, 0.51],
collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"])
## Cast the collection_time to DateTime
date_format = dateformat"y-m-dTH:M:SZ"
df[!, :collection_time] = DateTime.(df[:, :collection_time], date_format)
## Create a sender instance that will connect to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Map the dataframe data to ILP record statements
sender |> x -> Source(x, df, :readings, symbols = [:city, :make], at = :collection_time)
## can also use: Source(sender, df, :readings, symbols = [:city, :make], at = :collection_time)
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
!!! note
The `sender` attempts to write values to the `QuestDB Database Server` depending
on whether the buffer size has been met or exceeded while reading the rows of the
`DataFrame`. This is even before the `flush` or `close` function is called. | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.0 | 8fd50c23df209ceadb0a6c85225e72699dfa55aa | docs | 5156 | # Macro Approach
The `QuestDBClient` can be used with macros to write data to a QuestDB server instance.
## API
The API that describes the implementation this macro-based approach can be found here:
```@contents
Pages = ["../lib/types.md"]
```
## Example
!!! note
The functions need to following order:
`table -> symbol -> others -> At/AtNow`
`others` represent field types such as integers, floats etc. The **terminal** symbol for each
chain **should** be `@At()` or `@AtNow()`.
Not following the specified order will result in an **exception** being thrown.
### Basic Example
````julia
using QuestDBClient
"""
Assumes the presence of a table called readings created using:
CREATE TABLE readings (
timestamp TIMESTAMP,
city SYMBOL,
temperature DOUBLE,
humidity DOUBLE,
make SYMBOL
) TIMESTAMP(timestamp) PARTITION BY DAY;
"""
## Connects to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Create ILP record statements
sender |>
@table(:readings) |>
@symbol(:make => :Omron) |>
@symbol(:city => :Lisbon) |>
@FloatColumn(:tempareture => 24.8) |>
@FloatColumn(:humidity => 0.334) |>
@AtNow
sender |>
@table(:readings) |>
@symbol(:make => :HoneyWell) |>
@symbol(:city => :Kisumu) |>
@FloatColumn(:tempareture => 30.2) |>
@FloatColumn(:humidity => 0.54) |>
@AtNow
sender |>
@table(:readings) |>
@symbol(:make => :Omron) |>
@symbol(:city => :Berlin) |>
@FloatColumn(:tempareture => 26.1) |>
@FloatColumn(:humidity => 0.45) |>
@AtNow
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
### Working with DataFrames
DataFrames can also be used as a datasource in the QuestDBClient. However, some preprocessing
is needed such as converting/casting the column types to supported types. The table also needs to be specified
beforehand, the column that represents the designated timestamp and any symbols(tags) need to be specified too.
Supported types include: `Symbol`, `Integer` and subtypes, `AbstractFloat` and subtypes, `Bool`, `Char`, `AbstractString` and subtypes,
`Date`, `DateTime`, `UUID`.
#### Example
!!! note
This example requires the installation of the [DataFrames.jl](https://github.com/JuliaData/DataFrames.jl) package.
A DataFrame object with the following structure will be used in the example:
| city | make | tempareture | humidity |
|:-----------|:-----------|-------------:|------------:|
| London | Omron | 29.4 | 0.334 |
| Nairobi | Honeywell | 24.0 | 0.51 |
````julia
using DataFrames
using QuestDBClient
## Create a DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.334, 0.51])
## Create a sender instance that will connect to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Map the dataframe data to ILP record statements
sender |> @source(df = df, table = :readings, symbols=[:city, :make])
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
An example with the `At` field specified.
| city | make | tempareture | humidity | collection_time |
|:-----------|:-----------|-------------:|------------:|-------------------------:
| London | Omron | 29.4 | 0.334 | 2023-04-10T13:09:31Z |
| Nairobi | Honeywell | 24.0 | 0.51 | 2023-04-10T13:09:42Z |
````julia
using DataFrames
using Dates
using QuestDBClient
## A DataFrame instance
df = DataFrame(city=["London", "Nairobi"],
make=[:Omron, :Honeywell],
temperature=[29.4, 24.0],
humidity=[0.334, 0.51],
collection_time=["2023-04-10T13:09:31Z", "2023-04-10T13:09:42Z"])
## Cast the collection_time to DateTime
date_format = dateformat"y-m-dTH:M:SZ"
df[!, :collection_time] = DateTime.(df[:, :collection_time], date_format)
## Create a sender instance that will connect to the localhost at port 9009
sender = Sender()
## Connect the sender to the server first
connect(sender)
## Map the dataframe data to ILP record statements
sender |> @source(df = df, table = :readings, symbols = [:city, :make], at = :collection_time)
## Flush the output to the server
QuestDBSender.flush(sender)
## Close the socket connection
## Close first calls QuestDBSender.flush(sender) as part of its definition
QuestDBSender.close(sender)
````
!!! note
The `sender` attempts to write values to the `QuestDB Database Server` depending
on whether the buffer size has been met or exceeded while reading the rows of the
`DataFrame`. This is even before the `flush` or `close` function is called. | QuestDBClient | https://github.com/Ochibobo/QuestDBClient.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 1527 | using Documenter, MultipleScattering
makedocs(
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true"
), # changes urls if built locally
sitename = "MultipleScattering.jl",
authors = "Artur L. Gower and Jonathan Deakin",
source = "src",
# modules = [MultipleScattering],
pages = [
"Home" =>"index.md",
"Manual" => [
"manual/intro.md",
"manual/source.md",
"manual/shapes.md",
"manual/particles.md",
"manual/time_response.md",
"manual/plot.md",
"manual/new_types.md"
],
"Library" => [
"library/base.md",
"library/acoustics.md",
"library/random.md"
],
"Theory" => "maths/README.md",
"Examples" =>
[
"example/README.md",
"example/helmholtz-resonator/resonator.md",
"example/particles_in_circle/README.md",
"example/box_size/README.md",
"example/hankel_convergence/README.md",
"example/moments/README.md",
"example/near_surface_backscattering/README.md",
"example/random_particles/README.md",
"example/time_response_single_particle/README.md",
"example/two_particles/README.md"
]
]
)
deploydocs(
branch = "gh-pages",
target = "build",
versions = ["stable" => "v^", "v#.#.#"],
repo = "github.com/JuliaWaveScattering/MultipleScattering.jl.git"
)
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 893 | # find julia files to be converted into README.md
function generate_README(examplestoconvert, folder)
ps = map(examplestoconvert) do str
fs = filter(s -> split(s, ".")[end] == "jl", readdir(folder*str))
if length(fs) > 0
[str, fs[1]]
else
String[]
end
end
filter!(p -> p != String[], ps)
# This code below writes the README.md
for p in ps
s1 = string(folder,p[1],"/",p[2])
str_arr = split(read(open(s1,"r"), String), "\n")
# convert to md format
str_arr = map(str_arr) do s
if length(s) > 2 && s[1] == '#' && s[2] == ' '
string(s[3:end],"\n")
else
string(s,"\n")
end
end
s2 = string(folder,p[1],"/README.md")
f = open(s2,"w")
write(f, string(str_arr...))
close(f)
end
end
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 1900 | using MultipleScattering
using Plots;
using Random
using LinearAlgebra
function box_size_convergence( m = 4,
volfrac = 0.05,
radius = 1.0,
times = [20.0,30.0,40.0,50.0,60.0,70.0],
ω = collect(LinRange(0.01,1.0,100))
)
host_medium = Acoustic(1.0, 1.0, 2)
particle_medium = Acoustic(1.0, 0.0, 2)
particle_shape = Circle(radius)
listener_position = [-10.0; 0.0]
bigshape = TimeOfFlightPlaneWaveToPoint(listener_position, maximum(times))
seed = MersenneTwister(1).seed
allparticles = random_particles(particle_medium, particle_shape;
region_shape = bigshape,
volume_fraction = volfrac,
seed = seed
)
source = plane_source( host_medium; position = [0.0, 0.0], direction = [1.0, 0.0] )
simulations = map(times) do t
println("Calculating response with particles at a maximum of $t away")
shape = TimeOfFlightPlaneWaveToPoint(listener_position, t)
particles = filter(p -> p ⊆ shape, allparticles)
return run(particles, source, [listener_position], ω; basis_order = m)
end
return map(x -> frequency_to_time(x; t_vec = LinRange(0.0,100.0,201)), simulations)
end
function plot_box_size_convergence(simulations; times = [20,30,40,50,60,70])
plot(xguide = "Time (t)", yguide ="Response")
for s in eachindex(simulations)
plot!(simulations[s], label = "Box cut at t=$(times[s])")
plot!(size=(800,600))
end
plot!(title="Time response from random particles from increasingly large boxes")
end
simulations = box_size_convergence()
plot_box_size_convergence(simulations)
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 2205 | using MultipleScattering
using Plots; pyplot()
using Random
function hankel_order_convergence(m=[0,1,2,3,4,5,6,7,8,9,10], volfrac = 0.1,
radius = 1.0, maxtime = 40.0, k_arr=collect(LinRange(0.01,1.0,100)) )
listener_position = [-10.0,0.0]
shape = TimeOfFlightPlaneWaveToPoint(listener_position,maxtime)
seed = MersenneTwister(1).seed
particles = random_particles(volfrac, radius, shape; seed = seed)
simulations = Vector{FrequencySimulation{Float64}}(undef,length(m))
for i = eachindex(m)
simulations[i] = FrequencySimulation(particles, k_arr; seed=seed, hankel_order=m[i])
end
return simulations
end
function plot_hankel_order_convergence(simulations)
responses = Vector{Vector{Complex{Float64}}}(undef,length(simulations))
m = Vector{Int64}(undef,length(simulations))
# labels = Matrix{String}(undef,1,0)
for i = eachindex(simulations)
responses[i] = reshape(simulations[i].response, size(simulations[i].response,1))
m[i] = simulations[i].hankel_order
labels = [labels "m = $(m[i])"]
end
error = [r .- responses[end] for r in responses[1:end-1]]
integrated_error = norm.(error).*map(m->((m.k_arr[end]-m.k_arr[1])/length(m.k_arr)),simulations[1:end-1])
colors = reshape(LinRange(RGB(0.6,1,0.6),RGB(0,0.4,0),length(m)),1,length(m))
realcolors = reshape(LinRange(RGB(0.6,0.6,1),RGB(0,0,0.4),length(m)),1,length(m))
imagcolors = reshape(LinRange(RGB(1,0.6,0.6),RGB(0.4,0,0),length(m)),1,length(m))
absvec(v) = abs.(v)
plot(
plot(simulations[end],0.5),
plot(simulations[1].k_arr, [real(responses),imag(responses)],
labels=[ map(c -> "real "*c,labels) map(c -> "imag "*c,labels)],
xguide ="Wavenumber (k)", yguide ="Response", linecolor=[realcolors imagcolors]
),
plot(simulations[1].k_arr, absvec.(error),
yscale=:log10, labels=labels, linecolor=colors,
xguide ="Wavenumber (k)", yguide ="Absolute error",
),
plot(m[1:end-1], integrated_error,
yscale=:log10, legend=false,
xguide ="Hankel order", yguide ="\$L^2\$ integrated error",
)
)
end
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 2400 | using MultipleScattering
using Plots
# The four parameters below are all the required dimensions of the resonator.
cavity_radius = 1.0;
cavity_length = 2.5;
mouth_radius = 0.3;
mouth_length = 0.5;
# Define positions of the particles constituting the resonator
radius = .1; # radius of the particles
d_particle = 2.001*radius; # distance between centers
# define the cavity of the resonator
cavity_up = [[x,cavity_radius+radius] for x=radius:d_particle:cavity_length];
cavity_down = [[x,-cavity_radius-radius] for x=radius:d_particle:cavity_length]
cavity_right = [[cavity_length+.05*radius,y] for y=(-cavity_radius-radius):d_particle:(cavity_radius+2*radius)]
# define the mouth of the resonator
mouth_connect_down = [[radius-d_particle,y] for y=(-cavity_radius-radius):d_particle:(-mouth_radius)]
mouth_connect_up = [[radius-d_particle,y] for y=(mouth_radius+radius):d_particle:(cavity_radius+2*radius)]
mouth_up = [[x,mouth_radius+radius] for x = radius-2*d_particle:-d_particle:-mouth_length-radius]
mouth_down = [[x,-mouth_radius-radius] for x = radius-2*d_particle:-d_particle:-mouth_length-radius]
# put the different pieces together
X = [cavity_up;cavity_down;cavity_right;mouth_connect_down;mouth_connect_up;mouth_up;mouth_down];
X = [x - [cavity_length/2,cavity_radius/2-0.5] for x in X];
# Build the resonator
particle_medium = Acoustic(2; ρ = 0., c = 0.);
Resonator = [Particle(particle_medium, Circle(x, radius)) for x in X];
plot(Resonator)
# savefig("Resonator.png")
# simulate the scattered fields for incoming plane waves of different frequencies
host_medium = Acoustic(2; ρ=1.0, c=1.0); # medium of the background, 2 is the dimension of the setting.
source = plane_source(host_medium; direction = [1.0,0.0])
# region where the result will be plot
M=N=5.0;
bottomleft = [-M;-N]; topright = [M;N];
region = Box([bottomleft, topright]);
sim = FrequencySimulation(Resonator, source);
list_ω = [1.99,3.99,2.74]
result = run(sim, region, list_ω, basis_order=5,only_scattered_waves=true; res=200)
# generate the plots of the fields
# pyplot()
# pyplot(size = (300,300))
for p=1:3 # loop on the different frequencies
plot(result,list_ω[p]; seriestype = :contour,c=:balance) # clim=(-5.0,5.0)
colormap("RdBu")
plot!(Resonator,colorbar=true,
title="Field at ω="*string(list_ω[p]),axis=false, xguide ="",yguide ="")
savefig("plot_"*string(p)*".png")
end
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 286 | using MultipleScattering
p1 = Particle([-2.,2.])
p2 = Particle([-2.,-2.])
particles = [p1,p2]
w_arr = collect(0.1:0.01:1.)
simulation = FrequencySimulation(particles, w_arr)
using Plots
plot(simulation)
savefig("plot_simulation.png")
plot(simulation,0.8)
savefig("plot_field.png")
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 797 | # Example shows how to egnerate a whole batch of responses with the same label
# (volume fraction, radius and shape) but different realisations (actual
# positions of the particles). We will then extract stochastic information from
# them
using MultipleScattering
function moments_example()
volfrac = 0.01
radius = 1.0
num_particles = 10
k_arr = collect(LinRange(0.01,1.0,100))
# region to place the particles
shape = Rectangle(volfrac, radius, num_particles)
# Holder for our simulations
simulations = Vector{FrequencySimulation{Float64}}(undef,10)
for i=1:10
simulations[i] = FrequencySimulation(volfrac,radius,k_arr;seed=[0x7f5def91, 0x82255da3, 0xc5e461c7, UInt32(i)])
end
moments = StatisticalMoments(simulations)
return moments
end
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 4792 | # # Near-surface backscattering
#
# Near-surface backscattering is a method of accurately calculating the backscattering from an infinite halfspace. For just the code see [backscattering.jl](backscattering.jl)
# First, let us see why it is difficult to approximate the scattering from a halfspace filled with particles. That is, let us find out how many particles are required before the backscattering converges.
#
# ## Generate a large material filled with particles.
#
using MultipleScattering, LinearAlgebra
using Plots
dim = 2
host_medium = Acoustic(dim; c = 1.0, ρ = 1.0)
radius = 0.8
volfrac = 0.10
max_width = 70.
particle_medium = Acoustic(dim; ρ = 0.5, c = 0.6)
particle_shape = Circle(radius)
bottomleft = [0.,-max_width]
topright = [max_width,max_width]
shape = Box([bottomleft,topright])
particles = random_particles(particle_medium, particle_shape; region_shape = shape, volume_fraction = volfrac, seed = 2)
# We send an incoming harmonic plane wave and receive the backscattering at `x`:
x = [-10.,0.]
source = plane_source(host_medium;
position = x,
direction = [1.0,0.],
amplitude = 1.0
)
plot(particles)
scatter!([x[1]],[x[2]], lab="")
annotate!([(x[1], x[2] -max_width/10., "Receiver")])
plot!(shape, linecolor = :red)
# ## Calculate backscattering for different quantity of particles
# We will shave off particles on the right of this group of particles (above), and then calculate the backscattered waves for a range of angular frequencies `ωs`.
ωs = collect(5e-3:5e-3:1.)
t_to_ω(ωs)
widths = 10.:5.0:max_width
num_particles = zeros(length(widths))
#This part below my take a while! Uncomment to run
results = map(eachindex(widths)) do i
bottomleft = [0.,-widths[i]]
topright = [widths[i],widths[i]]
shape = Box([bottomleft, topright])
ps = filter(p -> p ⊆ shape, particles) # select particles that are inside shape
num_particles[i] = Int(length(ps))
simulation = FrequencySimulation(ps, source)
run(simulation, x, ωs)
end
save("results.jld2", "$(typeof(results))",results)
save("num_particles.jld2", "$(typeof(num_particles))",num_particles)
#To load results uncomment
# results = first(values(load("results.jld2")))
# num_particles = first(values(load("num_particles.jld2")))
backscattered_waves = field.(results)
M = length(backscattered_waves)
bM = backscattered_waves[M] # backscattering from largest material
differences = [norm(b - bM) for b in backscattered_waves[1:(M-1)]]./norm(bM)
plot_converge = plot(num_particles[1:(M-1)], differences,
xlabel = "number of particles", yguide ="error %",
label="frequency convergence"
)
savefig("freq_convergence.png")
gauss_impulse = GaussianImpulse(maximum(ωs))
receiver = results[1].x[1]
times = 2*(widths .- receiver[1]) # time it takes for an incident plane wave to reach the furthest particles and then return to the receiver
time_simulations = frequency_to_time.(results; impulse = gauss_impulse)
plot()
for i in [1,3,6,9,12,13]
plot!(time_simulations[i],label="$(num_particles[i]) particles"
, xlims=(0,maximum(times))
, ylims=(-0.25,0.3)
, xticks = [0.; 20.; times]
)
end
gui()
savefig("time_response.png")
time_vec = 0.:pi:34.2
time_results = frequency_to_time.(results; t_vec = time_vec, impulse = gauss_impulse)
backscattered_waves = field.(time_results)
bM = backscattered_waves[M] # backscattering from largest material
differences = [norm(b - bM) for b in backscattered_waves[1:(M-1)]]./norm(bM)
plot(plot_converge)
plot!(num_particles[1:(M-1)], differences, xlabel = "number of particles", yguide ="error %", label="time convergence")
savefig("compare_convergence.png")
## Using near surface backscattering
shape = TimeOfFlightPlaneWaveToPoint(receiver,70.0)
scatter([receiver[1]],[receiver[2]], label = "");
annotate!([(receiver[1], receiver[2] -max_width/10., "Receiver")])
plot!(particles);
plot!(shape, linecolor=:red)
savefig("time_of_flight_shape.png")
times = 40.:10.:70.
near_surface_simulations = map(times) do t
shape = TimeOfFlightPlaneWaveToPoint(receiver,t) # choose a material with particles only in the near surface region
ps = filter(p -> p ⊆ shape, particles) # select particles that are inside shape
run(FrequencySimulation(ps, source), x, ωs) # calculate backscattering
end
save("near_surface_simulations.jld2","Array{FrequencySimulation{Float64},1}",near_surface_simulations)
time_near_simulations = frequency_to_time.(near_surface_simulations; impulse = gauss_impulse)
plot()
for i in 1:length(times)
plot!(time_near_simulations[i],label="time of flight $(times[i])"
, xlims=(0,maximum(times)+10.)
, ylims=(-0.2,0.3)
, xticks = [0.; times], title=""
, linewidth = 2.0
)
end
gui()
savefig("time_of_flight_response.png")
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
|
[
"MIT"
] | 0.1.21 | 3df28a341378af006306b3a60cf5df63f8d5b76c | code | 2367 | # # Random particles in a circle
# The code [particles_in_circle.jl](particles_in_circle.jl) compares the scattered wave from one big circle, with the scattered wave from a circle filled with small particles.
# ```julia
using MultipleScattering
#You can also pick your own shape, an generate random particles inside it
#with a certain radius ands volume fraction
radius = 0.3
volfrac = 0.45
centre = [0.,0.]
big_radius = 3.0
particle_medium = Acoustic(2; ρ=0.0, c=0.0) # 2D particle with density ρ = 0.0 and soundspeed c = 0.0
particle_shape = Circle(radius)
circle = Sphere(centre, big_radius)
particles = random_particles(particle_medium, particle_shape; region_shape = circle, volume_fraction = volfrac, seed=1)
x = [-10.,0.] # position to receive the reflected wave
host_medium = Acoustic(2; ρ=1.0, c=1.0)
source = plane_source(host_medium; position = x, direction = [1.0,0.])
simulation = FrequencySimulation(particles, source)
# ```
# The particles chosen are impenetrable, i.e. the wave is 100\% reflected. So this circle filled with scatterers should act like one big particle.
# ```julia
big_particle = Particle(particle_medium, circle)
big_particle_simulation = FrequencySimulation([big_particle], source)
#define a bounding box for plot
bottomleft = [-10, -2*big_radius]
topright = [big_radius, 2*big_radius]
box = Box([bottomleft, topright])
using Plots
height = 300
#gr(size=(1.4*height,height))
#pyplot(leg=false, size=(1.4*height,height))
ω = 0.5
#plot(big_particle_simulation, ω; res=15, bounds = box);
#plot!(big_particle)
#savefig("plot_field_big.png")
#plot(simulation, ω; res=15, bounds = box);
#plot!(particles, linecolor = :green)
#savefig("plot_field.png")
# ```
# Resulting in the figures:
# 
# 
# If we compare the response measured at the listener `[-10., 0.]`, they should be very similar:
# ```julia
#define angular frequency range
ωs = collect(LinRange(0.1,1.0,10))
result = run(simulation, x, ωs)
big_result = run(big_particle_simulation, x, ωs)
#plot(result, lab = "scattering from particles")
#plot!(big_result,
#lab = "scattering from big particle",
#title="Compare scattered wave from one big particle, \n and a circle filled with small particles")
# ```
# 
| MultipleScattering | https://github.com/JuliaWaveScattering/MultipleScattering.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.