licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 4988 |
# ===== Parcel constructors =====
"""
Parcel(surface)
Make an empty `Parcel` where `surface::Hemisphere` dictates the length
of the representational space.
"""
function Parcel(surface::Hemisphere)
return Parcel(surface, falses(size(surface)))
end
"""
Parcel(surface, verts)
Make a `Parcel`, given a `surface::Hemisphere` its vertex indices.
"""
function Parcel(surface::Hemisphere, verts::Vector{Int})
membership = falses(size(surface))
membership[verts] .= true
return Parcel(surface, membership)
end
"""
Parcel(surface, coords, tree)
Given a `Matrix` of arbitrary x, y, z coordinates and a `KDTree` representing the
positions of defined cortical vertex indices, make a `Parcel` by mapping those
coordinates to the set of defined indices via nearest neighbor search.
"""
function Parcel(surface::Hemisphere, coords::AbstractMatrix, tree::KDTree)
inds, dists = knn(tree, coords, 1)
inds = [x[1] for x in inds] # flatten result to just a vector
nverts = size(surface)
return all(inds .> 0) ? Parcel(surface, inds) : Parcel(surface)
end
"""
Parcel(p)
Create a new `Parcel` that's a copy of another one `p`.
"""
function Parcel(p::Parcel)
Parcel(p.surface, vertices(p))
end
# ===== Parcellation constructors =====
"""
BilateralParcellation{T}(surface, x)
Create a `BilateralParcellation` from a `Vector` `x`, the length of which should match
the size of the `surface::CorticalSurface` being supplied. The distinct elements of that
`Vector` will become the `Parcels` of the resulting struct. Parcels will be keyed by IDs
of type `T`; therefore the eltype of the `Vector` you supply must be coercable to `T`.
"""
function BilateralParcellation{T}(surface::CorticalSurface, x::AbstractVector) where T
nverts = size(surface)
input_size = length(x)
if input_size != nverts
input_size == size(surface, Exclusive()) || error(DimensionMismatch)
x = pad(x, surface; sentinel = 0)
end
parcels = Dict{BrainStructure, HemisphericParcellation{T}}()
for hem in LR
verts = vertices(surface[hem], Bilateral(), Inclusive())
parcels[hem] = HemisphericParcellation{T}(surface[hem], x[verts])
end
length(intersect(keys(parcels[L]), keys(parcels[R]))) == 0 ||
error("Found parcels with membership spanning hemispheres; this is not supported")
return BilateralParcellation{T}(surface, parcels)
end
"""
HemisphericParcellation{T}(surface, x)
Create a `HemisphericParcellation` from a `Vector` `x`, the length of which should match
the size of the `surface::Hemisphere` being supplied. The distinct elements of that
`Vector` will become the `Parcels` of the resulting struct. Parcels will be keyed by IDs
of type `T`; therefore the eltype of the `Vector` you supply must be coercable to `T`.
"""
function HemisphericParcellation{T}(surface::Hemisphere, x::AbstractVector) where T
nverts = size(surface)
input_size = length(x)
if input_size != nverts
input_size == size(surface, Exclusive()) || error(DimensionMismatch)
x = pad(x, surface; sentinel = 0)
end
return HemisphericParcellation{T}(
surface,
Dict(p => Parcel(surface, findall(x .== p)) for p in setdiff(x, 0))
)
end
"""
BilateralParcellation{T}(surface, x)
Create a `BilateralParcellation` from a single-column `Matrix` `x`.
"""
function BilateralParcellation{T}(surface::CorticalSurface, x::AbstractMatrix) where T
size(x, 2) == 1 || error("For matrix input, column dimension must be singleton")
BilateralParcellation{T}(surface, x[:])
end
"""
HemisphericParcellation{T}(surface, x)
Create a `HemisphericParcellation` from a single-column `Matrix` `x`.
"""
function HemisphericParcellation{T}(surface::Hemisphere, x::AbstractMatrix) where T
size(x, 2) == 1 || error("For matrix input, column dimension must be singleton")
HemisphericParcellation{T}(surface, x[:])
end
"""
BilateralParcellation{T}(surface)
Create an empty `BilateralParcellation` from `surface::CorticalSurface`.
"""
function BilateralParcellation{T}(surface::CorticalSurface) where T
return BilateralParcellation{T}(
surface,
Dict(hem => HemisphericParcellation{T}(surface[hem]) for hem in LR)
)
end
"""
HemisphericParcellation{T}(surface::Hemisphere)
Create an empty `HemisphericParcellation`.
"""
function HemisphericParcellation{T}(surface::Hemisphere) where T
return HemisphericParcellation{T}(surface, Dict{T, Parcel}())
end
"""
BilateralParcellation{T}(surface, cifti)
Create a `BilateralParcellation` from `surface::CorticalSurface`, with parcels
initialized by the values given from a CiftiStruct (see CIFTI.jl)
"""
function BilateralParcellation{T}(surface::CorticalSurface, cifti::CiftiStruct) where T
size(cifti)[2] == 1 || error("Expected single-column CIFTI file")
px = Dict(
hem => HemisphericParcellation{T}(surface[hem], cifti[hem])
for hem in LR
)
length(intersect(keys(px[L]), keys(px[R]))) == 0 ||
error("Found parcels with membership spanning hemispheres; this is not supported")
return BilateralParcellation{T}(surface, px)
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 1794 |
abstract type DistanceMethod end
struct CentroidToCentroid <: DistanceMethod end
struct ClosestVertices <: DistanceMethod end
"""
centroid(p, distances)
Find the centroid of a parcel (the vertex that has the least summed distance
to all other vertices in the parcel). `distances` is expected to be
a square distance matrix of dimensions (length(p), length(p)).
"""
function centroid(p::Parcel, distances::DistanceMatrix)
all(size(distances) .== length(p)) || error(DimensionMismatch)
verts = vertices(p)
summed_dists = sum(distances[verts, verts]; dims = 1)[:]
return verts[argmin(summed_dists)]
end
"""
distance(p1, p2, distances; method = CentroidToCentroid())
Find the distance between `Parcel`s `p1` and `p2` according to distance matrix
`distances`, using `method` (one of `CentroidToCentroid()` or `ClosestVertices`)
"""
function distance(
p1::Parcel, p2::Parcel, distances::DistanceMatrix;
method::DistanceMethod = CentroidToCentroid()
)
distance(method, p1, p2, distances)
end
"""
distance(p1, p2; method = CentroidToCentroid())
Find the distance between `Parcel`s `p1` and `p2` using `method` (one of
`CentroidToCentroid()` or `ClosestVertices`). This method call will expect to find a
distance matrix `:distances` belonging to the first parcel's `SurfaceSpace` struct
"""
function distance(p1::Parcel, p2::Parcel, method::DistanceMethod = CentroidToCentroid())
distance(method, p1, p2, p1.surface[:distances])
end
function distance(
::CentroidToCentroid, p1::Parcel, p2::Parcel, distances::DistanceMatrix
)
c1 = centroid(p1, distances)
c2 = centroid(p2, distances)
return distances[c1, c2]
end
function distance(
::ClosestVertices, p1::Parcel, p2::Parcel, distances::DistanceMatrix
)
return minimum(distances[vertices(p1), vertices(p2)])
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 4037 |
import Graphs
Base.setindex!(p::Parcel, args...) = setindex!(p.membership, args...)
Base.dotview(p::Parcel, args...) = view(p.membership, args...)
function Base.setindex!(px::HemisphericParcellation{T}, p::Parcel, k::T) where T
p.surface == px.surface || error("Surface incompatibility")
px.parcels[k] = p
end
function Graphs.Graph(p::Parcel, A::AdjacencyMatrix)
verts = vertices(p)
nvertices = length(p)
temp = spzeros(Bool, nvertices, nvertices)
temp[verts, verts] .= true
temp .*= A
return Graphs.Graph(temp)
end
"""
cut(p, A)
Cut articulation point(s), if any, from a graph representation of a `Parcel`, and return
a new set of `Parcel`s: one for each connected component remaining after the vertex cut.
"""
function cut(p::Parcel, A::AdjacencyMatrix)
g = Graphs.Graph(p, A)
a = Graphs.articulation(g)
Graphs.rem_vertices!(g, a)
clusters = filter(x -> length(x) > 1, Graphs.connected_components(g))
new_parcels = [Parcel(p.surface, c) for c in clusters]
return new_parcels
end
function cut(p::Parcel)
haskey(p.surface, :A) || error("Operation requires adjacency matrix `:A`")
return cut(p, p.surface[:A])
end
# TODO: this function is almost exactly the same as cut(p); not sure how to
# refactor though without type piracy
"""
split(p, v)
Remove vertices `v` from a graph representation of a `Parcel`, and return
a new set of `Parcel`s: one for each connected component remaining.
"""
function Base.split(p::Parcel, v::Vector{Int})
haskey(p.surface, :A) || error("Operation requires adjacency matrix `:A`")
g = Graphs.Graph(p, p.surface[:A])
Graphs.rem_vertices!(g, v)
clusters = filter(x -> length(x) > 1, Graphs.connected_components(g))
new_parcels = [Parcel(p.surface, c) for c in clusters]
return new_parcels
end
"""
clear!(p)
Zero-out all membership vertices of a `Parcel`.
"""
clear!(p::Parcel) = p.membership .*= false
"""
delete!(px, k)
Delete `Parcel` with ID `k` from a `Parcellation`.
"""
Base.delete!(px::HemisphericParcellation{T}, k::T) where T = delete!(px.parcels, k)
"""
append!(p, v)
Add vertex `v::Int` to the `p`'s membership vector.
"""
Base.append!(p::Parcel, v::Int) = p.membership[v] = true
Base.append!(p::Parcel, v::Vector{Int}) = p.membership[v] .= true
"""
merge!(p1, p2, A)
Merge two `Parcel`s by moving the member vertices of `p2` to `p1`.
"""
function Base.merge!(p1::Parcel, p2::Parcel, A::AdjacencyMatrix)
i = interstices(p1, p2, A)
sum(i) > 0 || return 0
union!(p1, i)
union!(p1, p2)
clear!(p2)
return size(p1)
end
function Base.merge!(p1::Parcel, p2::Parcel)
haskey(p1.surface, :A) || error("Operation requires adjacency matrix `A`")
p1.surface == p2.surface || error("Surfaces must be the same for both parcels")
return merge!(p1, p2, p1.surface[:A])
end
"""
merge!(px, k1, k2, A)
Given a `Parcellation{T}` and two keys of type `T`, merge the two `Parcel`s denoted
by those keys and delete the latter one from the dictionary.
"""
function Base.merge!(px::HemisphericParcellation{T}, k1::T, k2::T, A::AdjacencyMatrix) where T
p1 = px[k1]
p2 = px[k2]
merge!(p1, p2, A)
delete!(px, k2)
return size(p1)
end
function Base.merge!(px::HemisphericParcellation{T}, k1::T, k2::T) where T
haskey(px.surface, :A) || error("Operation requires adjacency matrix `A`")
return merge!(px, k1, k2, px.surface[:A])
end
"""
deepcopy(p)
Make a new `Parcel` containing a `deepcopy` of original parcel `p`'s `membership`
vector. Note however that the surface remains just a reference and is not itself
copied, since it may be a large object.
"""
function Base.deepcopy(p::Parcel)
return Parcel(p)
end
"""
deepcopy(px)
Make a new parcellation containing a `deepcopy` of all parcels from `px`. Note however
that, as with `deepcopy(p::Parcel)`, the surface remains just a reference and is not
itself copied, since it may be a large object.
"""
function Base.deepcopy(px::HemisphericParcellation{T}) where T
pxโฒ = HemisphericParcellation{T}(px.surface)
for k in keys(px)
pxโฒ[k] = deepcopy(px[k])
end
return pxโฒ
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 5134 |
"""
dilate!(p, A; limit = nothing)
Perform a single pass of dilation on `Parcel` `p`, guided by adjacency matrix `A`;
optionally specify a `limit::Int` on the number of new vertices that can be added.
"""
function dilate!(
p::Parcel, A::AdjacencyMatrix; limit::Union{Nothing, Int} = nothing
)
parcel_verts = vertices(p)
border_verts = setdiff(unique(A[:, parcel_verts].rowval), parcel_verts)
length(border_verts) > 0 || return
if !isnothing(limit) && length(border_verts) > limit
border_verts = border_verts[1:limit]
end
border = Parcel(p.surface, border_verts)
union!(p, border)
return length(border_verts)
end
dilate!(p::Parcel; limit::Union{Nothing, Int} = nothing) =
dilate!(p, p.surface[:A]; limit = limit)
function dilate(p::Parcel, args...)
pโฒ = Parcel(p.surface, vertices(p))
dilate!(pโฒ, args...)
return pโฒ
end
"""
erode!(p, neighbors; limit = nothing)
Perform a single pass of erosion on `Parcel` `p`, guided by adjacency list `neighbors`;
optionally specify a `limit::Int` on the number of vertices that you want to remove.
"""
function erode!(
p::Parcel, neighbors::AdjacencyList; limit::Union{Nothing, Int} = nothing
)
verts = vertices(p)
border_verts = verts[
[any([!(x in verts) for x in neighbors[x]]) for x in verts]
]
if !isnothing(limit) && length(border_verts) > limit
border_verts = border_verts[1:limit]
end
setdiff!(p, border_verts)
return length(border_verts)
end
erode!(p::Parcel; limit::Union{Nothing, Int} = nothing) =
erode!(p, p.surface[:neighbors]; limit = limit)
function erode(p::Parcel, args...)
pโฒ = Parcel(p.surface, vertices(p))
erode!(pโฒ, args...)
return pโฒ
end
"""
close!(p, neighbors)
Given a `Parcel` `p` and an adjacency list `neighbors`, perform a morphological
closing to fill in gaps, if any, by finding vertices in `p` where all of its
neighbors but one belong to `p`. Note: for performance reasons, this may not be
technically quite the same as a true closing operation, `erode!(dilate!(p))`.
"""
function close!(p::Parcel, neighbors::AdjacencyList)
candidates = union([neighbors[v] for v in vertices(p)]...)
while true
add_inds = filter(x -> sum(.!p[neighbors[x]]) .<= 2, candidates)
p2 = Parcel(p.surface, add_inds)
complement(p2, p) > 0 || break
union!(p, p2)
end
end
close!(p::Parcel) = close!(p, p.surface[:neighbors])
"""
resize!(p, desired_size, A, neighbors)
Resize a `Parcel` `p`, guided by an adjacency matrix and an adjacency list,
by repeated dilation or erosion until `p` reaches `desired_size`.
"""
function Base.resize!(
p::Parcel, desired_size::Int, A::AdjacencyMatrix, neighbors::AdjacencyList
)
curr_size = size(p)
ฮ = curr_size - desired_size
while ฮ != 0
if ฮ < 0
nchanged = dilate!(p, A; limit = abs(ฮ))
ฮ += nchanged
else
nchanged = erode!(p, neighbors; limit = ฮ)
ฮ -= nchanged
end
if nchanged == 0
siz = size(p)
println("Could not achieve size $desired_size; stopped at $siz")
return siz
end
end
return desired_size
end
"""
resize!(p, desired_size)
Resize a `Parcel` `p`, using adjacency information from its `surface` field.
"""
Base.resize!(p::Parcel, desired_size::Int) =
resize!(p, desired_size, p.surface[:A], p.surface[:neighbors])
"""
interstices(p1, p2, A)
Find the vertices lying in the boundaries between two `Parcel`s.
"""
function interstices(p1::Parcel, p2::Parcel, A::AdjacencyMatrix)::BitVector
p1โฒ = dilate(p1, A)
p2โฒ = dilate(p2, A)
setdiff!(p1โฒ, p1)
setdiff!(p2โฒ, p2)
temp = intersect(p1โฒ, p2โฒ)
return membership(temp) .& Iterators.flatten(sum(A[:, membership(union(p1, p2))]; dims = 2) .> 2)
end
interstices(p1::Parcel, p2::Parcel) = interstices(p1, p2, p1.surface[:A])
"""
interstices(px, A)
Iterate through a parcellation and find, for each pair of neighboring `Parcel`s
separated by a 1-vertex-wide gap, the vertices in that interstitial region.
"""
function interstices(px::HemisphericParcellation{T}, A::AdjacencyMatrix) where T
v = vec(px)
u = unassigned(px)
temp = @view A[:, u]
# find all unassigned vertices that have 2 or more parcels as neighbors
status = filter(
x -> length(x) >= 2,
ThreadsX.map(x -> sort(setdiff(v[x], 0)), eachcol(temp))
)
# from all unique parcel-parcel pairs discovered from the above,
# make a dict in which to store their interstitial vertices, if any
result = Dict{Tuple{T, T}, BitVector}()
for parcel_list in status
for x in parcel_list
for y in setdiff(parcel_list, x)
a = min(x, y)
b = max(x, y)
haskey(result, (a, b)) && continue
i = interstices(px[a], px[b], A) .& u
any(i) || continue
result[(a, b)] = i
end
end
end
return result
end
interstices(px::HemisphericParcellation) = interstices(px, px.surface[:A])
function borders(p::Parcel, neighbors::AdjacencyList)
verts = vertices(p)
counts = [sum(map(!in(verts), n)) for n in neighbors[verts]]
out = falses(length(p))
out[verts[counts .> 0]] .= true
return out
end
"""
borders(p)
Get a `BitVector` of the just the vertices of `Parcel p` that lie on its outermost edge.
"""
function borders(p::Parcel)
return borders(p, p.surface[:neighbors])
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 2201 |
Base.intersect(p1::Parcel, p2::Parcel) = Parcel(p1.surface, p1.membership .& p2.membership)
Base.union(p1::Parcel, p2::Parcel) = Parcel(p1.surface, p1.membership .| p2.membership)
Base.setdiff(p1::Parcel, p2::Parcel) = Parcel(p1.surface, p1.membership .& .!p2.membership)
Base.intersect(p::Parcel, x::BitVector) = Parcel(p.surface, p.membership .&= x)
Base.union(p::Parcel, x::BitVector) = Parcel(p.surface, p.membership .|= x)
Base.setdiff(p::Parcel, x::BitVector) = Parcel(p.surface, p.membership .&= .!x)
Base.intersect!(p1::Parcel, p2::Parcel) = p1.membership .&= p2.membership
Base.union!(p1::Parcel, p2::Parcel) = p1.membership .|= p2.membership
Base.setdiff!(p1::Parcel, p2::Parcel) = p1.membership .&= .!p2.membership
Base.intersect!(p::Parcel, x::BitVector) = p.membership .&= x
Base.union!(p::Parcel, x::BitVector) = p.membership .|= x
Base.setdiff!(p::Parcel, x::BitVector) = p.membership .&= .!x
Base.intersect!(p::Parcel, x::Vector{T}) where T <: Integer = p.membership .*= x
Base.union!(p::Parcel, x::Vector{T}) where T <: Integer = p.membership[x] .= true
Base.setdiff!(p::Parcel, x::Vector{T}) where T <: Integer = p.membership[x] .= false
"""
overlap(p1, p2)
Compute the number of member vertices shared between two `Parcel`s `p1`, `p2`.
"""
overlap(p1::Parcel, p2::Parcel) = p1.membership' * p2.membership
overlap(p::Parcel, x::Union{Vector{Bool}, BitVector}) = p.membership' * x
overlap(p::Parcel, px::HemisphericParcellation) = sum([overlap(p, px[k]) for k in keys(px)])
overlap(px::HemisphericParcellation, p::Parcel) = overlap(p, px)
overlap(x::Union{Vector{Bool}, BitVector}, p::Parcel) = overlap(p, x)
function overlap(px::HemisphericParcellation)
counts = zeros(Int, length(px))
for k in keys(px)
counts .+= px[k].membership
end
overlap_inds = findall(counts .> 1)
return sum(counts[overlap_inds] .- 1)
end
"""
complement(p1, p2)
Compute the number of member vertices in `Parcel` `p1` not shared by those of `p2`.
"""
complement(p1::Parcel, p2::Parcel) = p1.membership' * .!p2.membership
complement(p::Parcel, x::Union{Vector{Bool}, BitVector}) = p.membership' * .!x
complement(x::Union{Vector{Bool}, BitVector}, p::Parcel) = complement(p, x)
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 1576 |
function Base.show(
io::IO, ::MIME"text/plain", p::Parcel; label::Any = nothing
)
siz = size(p)
len = length(p)
dens = round(density(p) * 100; digits = 2)
id_str = isnothing(label) ? "" : " [$label]"
print(io, "Parcel")
printstyled(io, id_str; bold = true)
print(io, " with $siz non-zero vertices out of $len")
print(io, " ($dens% dense)")
end
function Base.show(io::IO, mime::MIME"text/plain", px::HemisphericParcellation)
ks = @chain keys(px) collect sample(_, size(px); replace = false)
dens = Int(round(density(px) * 100; digits = 0))
print(io, "HemisphericParcellation{$(eltype(keys(px)))} with $(size(px)) parcels,")
print(io, " $dens% dense,")
print(io, " in a space of $(size(px.surface)) vertices")
print(io, "\n")
for i in 1:min(length(ks), 3)
print(io, " โข ")
show(io, mime, px[ks[i]]; label = ks[i])
print(io, "\n")
end
if length(ks) > 4
println(io, " โข โฎ")
end
if length(ks) > 3
print(io, " โข ")
show(io, mime, px[ks[end]]; label = ks[end])
end
end
function Base.show(io::IO, mime::MIME"text/plain", px::BilateralParcellation)
println("CORTEX_LEFT:")
show(io, mime, px[L])
println("\nCORTEX_RIGHT:")
show(io, mime, px[R])
end
function Base.show(
io::IO, mime::MIME"text/plain", pxs::Vector{HemisphericParcellation{T}}
) where T
print(io, "Vector of $(length(pxs)) HemisphericParcellation with keys of type $T")
end
function Base.show(
io::IO, mime::MIME"text/plain", pxs::Vector{BilateralParcellation{T}}
) where T
print(io, "Vector of $(length(pxs)) BilateralParcellation with keys of type $T")
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 360 |
struct Parcel
surface::Hemisphere
membership::BitVector
end
abstract type AbstractParcellation end
struct HemisphericParcellation{T} <: AbstractParcellation
surface::Hemisphere
parcels::Dict{T, Parcel}
end
struct BilateralParcellation{T} <: AbstractParcellation
surface::CorticalSurface
parcels::Dict{BrainStructure, HemisphericParcellation{T}}
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 3977 | using CorticalParcels
using CorticalSurfaces
using JLD
using Test
using Chain
using CIFTI
using Pkg.Artifacts
data_dir = artifact"CIFTI_test_files"
surf_file = joinpath(data_dir, "MSC01.jld")
temp = load(surf_file)
hems = Dict()
for hem in LR
coords = temp["pointsets"]["midthickness"][hem]
mw = temp["medial wall"][hem]
triangles = temp["triangle"][hem] # required for adjacency calculations below
hems[hem] = Hemisphere(hem, coords, mw; triangles = triangles)
end
c = CorticalSurface(hems[L], hems[R])
initialize_adjacencies!(c)
hem = c[L] # for most of these tests below we'll just deal with left hem for now
parcel_file = joinpath(data_dir, "test_parcels.dtseries.nii")
cifti_data = CIFTI.load(parcel_file)
types_to_test = [UInt16, Int64]
@testset "CorticalParcels.jl" begin
for dtype in types_to_test
px = HemisphericParcellation{dtype}(hem, cifti_data[L])
@test size(px) == length(setdiff(cifti_data[L], 0))
@test length(px) == 32492
@test all(trim(vec(px), hem) .== cifti_data[L])
parcel_sizes = [size(px[p]) for p in keys(px)]
@test sum(parcel_sizes) == sum(cifti_data[L] .!= 0)
parcel_vertices = [vertices(px[p]) for p in keys(px)]
@test all(length.(parcel_vertices) == parcel_sizes)
pxโฒ = deepcopy(px)
@test pxโฒ.surface === px.surface
@test pxโฒ == px
end
# things should work with arbitrary non-numeric types of T as well; test this
dtype = String
inds = [9, 99, 999, 9999]
temp = fill("unassigned", 32492)
temp[inds] .= "test"
px = HemisphericParcellation{dtype}(hem, temp)
@test size(px["test"]) == 4
@test size(px["unassigned"]) == 32492 - 4
# `vec(px)` is not possible however because we can only do this where T <: Real:
@test_throws MethodError vec(px)
end
@testset "demo.jl tests" begin
p1 = Parcel(hem, [17344])
p2 = Parcel(hem, [8423])
@test dilate!(p1) == 6
@test dilate!(p1) == 12
p1โฒ = deepcopy(p1)
dilate!(p1โฒ)
erode!(p1โฒ)
@test isequal(p1, p1โฒ)
@test size(Parcel(hem)) == 0
test_sizes = [1, 100, 500, 1000, 10000, size(hem)]
for siz in test_sizes
resize!(p1โฒ, siz)
@test size(p1โฒ) == siz
end
clear!(p1โฒ)
@test size(p1โฒ) == 0
p1โฒ = deepcopy(p1)
limits = [0, 1, 3, 5, 10, 20, 30]
limits = [limits; reverse(limits)]
for limit in limits
dilate!(p1โฒ; limit = limit)
end
@test size(p1โฒ) == size(p1) + sum(limits)
border_verts = borders(p1)
@test findall(border_verts) == [17259, 17260, 17261, 17301, 17304, 17342, 17346, 17383, 17386, 17423, 17424, 17425]
while sum(interstices(p1, p2)) == 0
dilate!(p2)
end
@test overlap(p1, p2) == 0
@test complement(p1, p2) == size(p1)
@test complement(p2, p1) == size(p2)
margin_vertices = findall(interstices(p1, p2))
@test length(margin_vertices) == 3
px = HemisphericParcellation{Int}(hem)
px[1] = deepcopy(p1)
px[2] = deepcopy(p2)
@test size(px) == 2
@test_throws ErrorException px[2] = Parcel(Hemisphere(L, 9999))
merge!(px, 1, 2)
@test size(px) == 1 # just one parcel remains now
@test size(px[1]) == size(p1) + size(p2) + length(margin_vertices)
setdiff!(px[1], p2)
setdiff!(px[1], margin_vertices)
@test isequal(px[1], p1)
px[2] = deepcopy(p2)
append!(px[1], margin_vertices[1])
p3 = deepcopy(px[1])
union!(p3, p2)
@test size(p3) == 1 + size(p1) + size(p2)
orig_parcels = cut(p3)
@test isequal(orig_parcels[1], p2)
@test overlap(orig_parcels[2], p1) == size(p1) - 1
delete!(px, 1)
delete!(px, 2)
@test size(px) == 0
# load in a real parcellation form a CIFTI file:
parcel_file = joinpath(data_dir, "test_parcels.dtseries.nii")
temp = CIFTI.load(parcel_file)
px = BilateralParcellation{Int}(c, temp)
pxL = HemisphericParcellation{Int}(c[L], temp[L])
pxR = HemisphericParcellation{Int}(c[R], temp[R])
@test vec(px) == vcat(vec(pxL), vec(pxR)) == pad(temp[LR][:], c; sentinel = 0)
@test length(keys(px[L])) == 185
@test density(px[L]) โ 0.740613073987443
@test sum(unassigned(px[L])) == 8428
@test sum(nnz(px[L])) == length(px[L]) - sum(unassigned(px[L])) == sum(union(px[L]))
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | docs | 8518 | # CorticalParcels
This Julia package supplies a set of tools for conveniently and efficiently working with parcels, or regions of interest, in the context of the surface-space representation of the cerebral cortex. It builds upon the `Hemisphere` and `CorticalSurface` types (with supertype `SurfaceSpace`) from [CorticalSurfaces.jl](https://github.com/myersm0/CorticalSurfaces.jl) and provides the foundation for my implementation of an important parcel-generation and -evaluation method in [WatershedParcellation.jl](https://github.com/myersm0/WatershedParcellation.jl). The functions supplied are based on MATLAB code developed at Washington University by Tim Laumann and Evan Gordon from their 2016 paper ["Generation and Evaluation of a Cortical Area Parcellation from Resting-State Correlations."](https://pubmed.ncbi.nlm.nih.gov/25316338/).
A `Parcel` is a discrete region of interest on the cortical surface, and in this implementation is stored internally as a `BitVector` of vertices where each element denotes membership (`true` or `false`). The total length of that vector constitutes the surface-space representation in which the parcel is defined. The size of a parcel `size(p::Parcel)` is given as the number of non-zero elements of that vector, i.e. the number of vertices belonging to that parcel. This implementation was chosen to enable very fast performance of common operations such as getting the size, computing overlap with other parcels, dilating and eroding, etc, by reducing them internally to simple bitwise operations.
A parcellation is a collection of `Parcel`s that all share the same space. It's typically the case that the parcels within it are non-overlapping, but nothing in this implementation enforces that.
As of version 0.7, the former Parcellation type has been replaced by `HemisphericParcellation`, which is functionally equivalent. It's been reanmed to distinguish it from the new `BilateralParcellation` struct, which provides the capability of dealing with both hemispheres at the same time. Currently `BilateralParcellation` is available only as a convenient container and constructor for its left and right component `HemisphericParcellation`s; it has little functionality beyond that *yet*, so for the moment you can use it to store the hemispheres and then you'll have to manage iteration over the hemispheres yourself in order to do some work on each of them individually.
The `HemisphericParcellation` struct contains two fields:
- `surface`: a `Hemisphere` supplying details of the geometry (particularly, the size of the space) that all its component `Parcel`s must conform to
- (if however the geometry is not of interest in your application, then a dummy surface can be created by, for example, `Hemisphere(32492)` where the only piece of information that's strictly required is the number of vertices, 32492 in this case)
- `parcels`: a `Dict{T, Parcel}` mapping keys of type `T` to parcels, where `T` can be any type (preferably one for which a `zero(::T)` method is defined) that you want to use as keys for accessing and labeling individual parcels
Rather than having to create the parcel dictionaries yourself, I anticipate that a parcellation will most often be initialized via its `BilateralParcellation(surface::SurfaceSpace, x::Vector{T})` constructor, since the `Vector{T}` representation is probably the way you read in an existing parcellation from disk, e.g. from a [CIFTI](https://github.com/myersm0/CIFTI.jl") file. See the Usage section below.
A parcellation (either hemispheric or bilateral) can be mapped back to a vanilla `Vector{T}` representation if desired via `vec(px::AbstractParcellation)`.
Some notation notes: in the following documentation and in demos, `p`, `p1`, `p2` will refer to individual parcels; and `px` will refer to a whole parcellation.
## Performance
The performance is going to depend on several factors. The benchmarks below are based on using a single-hemisphere parcellation of 185 parcels, in a space of 32492 vertices, and compares the current `BitVector`-based implementation to an alternative using `SparseVector`s as well as to a naive `Vector{Int}` representation (simply a list of vertex index numbers).
- *Adding or removing vertices to/from a `Parcel`*. This is where the current implementation shines most, via operations like `union!(p1::Parcel, p2::Parcel)` and analogous calls to `setdiff!` and `intersect!`.
- *Computing the amount of overlap of two `Parcel`s*. This is fast because it reduces to just taking the dot product of their respective membership vectors.
- *Checking the size of a `Parcel`.* This is the only case where the current implementation lags behind alternatives.
- *Checking a `Parcellation` for unassigned values*. This is relatively "slow" compared to `Parcel`-level operations supplied. But it should be infrequent enough that it doesn't matter much; and the present `BitVector` is still faster than alternatives.
| |`intersect!(p1, p2)`|`overlap(p1, p2`|`size(p)`|`unassigned(px)`|
|:-------------|-------------------:|-------------------:|-------------------:|-------------------:|
|**`BitVector`**|**85 ns**|**108 ns**|104 ns|**22000** ns|
|`SparseVector`|3047 ns|812 ns|83 ns|39000 ns|
|`Vector`|7692 ns|49110 ns|**9 ns**|1024000 ns|
While the need to compute the size of a parcel is indeed a common operation and we'd like it to be as fast as possible, this implementation's considerable advantage in the other basic operations should still make it the clear frontrunner in most use cases.
If we assume for simplicity that the above operations occur equally often, the `SparseVector` implementation (used in this package version 0.1.0 only) achieves a 25x speedup relative to the naive case, and the present `BitVector` implementation (package version 0.2+) achieves a 48x speedup relative to the same. If we ignore the `unassigned(px)` call, the current implementation improves to a 191x speedup over baseline.
## Installation
Within Julia:
```
using Pkg
Pkg.add("CorticalParcels")
```
## Usage
A full demo of the basic functionality can be found in `examples/demo.jl`.
The package [CorticalSurfaces.jl](https://github.com/myersm0/CorticalSurfaces.jl) provides the definitions of `Hemisphere` and `CorticalSurface` types (and their supertype `SurfaceSpace`), on which many of the operations in this package depend. So first of all, load both packages and create a `Hemisphere` struct that will define the vertex space. At a minimum, you need to specify the number of vertices in that space, for example 32492; but see [CorticalSurfaces.jl](https://github.com/myersm0/CorticalSurfaces.jl) for further details.
```
using CorticalSurfaces
using CorticalParcels
hem = Hemisphere(32492) # create a Hemisphere of 32492 vertices that will define the space
```
### Constructors
Once a `Hemisphere` has been created (we'll call it `hem` here), the following are two basic ways in which to initialize a `Parcel`::
```
Parcel(hem) # create an empty parcel within the same space as `hem`
Parcel(hem, [5, 6, 7]) # create a parcel with 3 vertices within the same space as `hem`
```
A `HemisphericParcellation` can be initialized in several ways, such as:
```
hem = Hemisphere(32492) # create a Hemisphere of 39492 vertices
HemisphericParcellation{Int}(hem) # create an empty parcellation within that space
# as above, but this time fill the space with 10 randomly assigned parcels
HemisphericParcellation{Int}(hem, rand(1:10, 32492))
```
The above examples use `Int` as the initialization parameter, and this defines the type of key that will be assigned to each parcel. Any type should be usable, however, provided that its `typemax` can represent the largest value you anticipate needing to store. You could use `String` keys, for example, if you want to provide descriptive labels for your parcels and index them in that way.
### Accessors
`unassigned(px::HemisphericParcellation)` may be used to dynamically determine the elements in the vector space that are not assigned to any parcel.
`vec(px::AbstractParcellation)` will reduce the parcellation to a single `Vector{T}`. If you constructed `px` from a `Vector{T}` (and have not changed any of its elements), this operation should return that same vector.
[](https://github.com/myersm0/CorticalParcels.jl/actions/workflows/CI.yml?query=branch%3Amain)
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.2.0 | 0aa363f36f4d46f190859807c4f9849a3cf07a43 | code | 5272 | module GtkMarkdownTextView
using Gtk4
import Gtk4: _GtkTextIter, create_tag, apply_tag
import Gtk4.GLib: gobject_move_ref, GObject
using Markdown
export MarkdownTextView, MarkdownColors
struct MarkdownColors
font_size::Int
color::String
background::String
highlight_color::String
highlight_background::String
end
MarkdownColors() = MarkdownColors(13, "#000", "#fff", "#111", "#eee")
mutable struct MarkdownTextView <: GtkTextView
handle::Ptr{GObject}
view::GtkTextView
buffer::GtkTextBuffer
function MarkdownTextView(m::Markdown.MD, prelude::String, mc::MarkdownColors = MarkdownColors())
buffer = GtkTextBuffer()
buffer.text = prelude
view = GtkTextView(buffer)
style_css(view,
"window, view, textview, buffer, text {
background-color: $(mc.background);
color: $(mc.color);
font-family: Monaco, Consolas, Courier, monospace;
margin:0px;
}"
)
#set_gtk_property!(view, :margin_left, 1)
view.monospace = true
view.wrap_mode = true
fs = mc.font_size
create_tag(buffer, "normal", font = "$fs")
create_tag(buffer, "h1", font = "bold $(fs+3)")
create_tag(buffer, "h2", font = "bold $(fs+2)")
create_tag(buffer, "h3", font = "bold $(fs+1)")
create_tag(buffer, "h4", font = "bold $(fs)")
create_tag(buffer, "h5", font = "$(fs)")
create_tag(buffer, "h6", font = "$(fs-1)")
create_tag(buffer, "bold", font = "bold $(fs)")
create_tag(buffer, "italic", font = "italic $fs")
create_tag(buffer, "code", font = "bold $fs",
foreground=mc.highlight_color, background=mc.highlight_background)
insert_MD!(buffer, m)
# tag(buffer, "normal", 1, length(buffer))
n = new(view.handle, view, buffer)
gobject_move_ref(n, view)
end
MarkdownTextView(m::String) = MarkdownTextView(Markdown.parse(m), "")
MarkdownTextView(m::String, prelude::String, mc::MarkdownColors = MarkdownColors()) = MarkdownTextView(Markdown.parse(m), prelude, mc)
MarkdownTextView(m::String, mc::MarkdownColors) = MarkdownTextView(Markdown.parse(m), "", mc)
end
function tag(buffer, what, i, j)
apply_tag(buffer, what,
_GtkTextIter(buffer, i), _GtkTextIter(buffer, j)
)
end
function style_css(w::GtkWidget, css::String)
sc = Gtk4.style_context(w)
push!(sc, GtkCssProvider(css), 600)
end
function insert_MD!(buffer, m::Markdown.Header{N}, i) where N
ip = i
insert!(buffer, " ")
i += 4
for el in m.text
i = insert_MD!(buffer, el, i)
end
tag(buffer, "h$(min(N,4))", ip, i)
i
end
function insert_MD!(buffer, m::Markdown.BlockQuote, i)
insert!(buffer, "โ ")
i += 3
for el in m.content
i = insert_MD!(buffer, el, i)
end
i
end
function insert_MD!(buffer, m::String, i)
insert!(buffer, m)
i += length(m)
end
function insert_MD!(buffer, m::Markdown.LaTeX, i)
i = insert_MD!(buffer, m.formula, i)
end
function insert_MD!(buffer, m::Markdown.Paragraph, i)
# insert!(buffer, "\n\n")
# i += 2
for el in m.content
i = insert_MD!(buffer, el, i)
end
i
end
function insert_MD!(buffer, m::Markdown.Code, i)
insert!(buffer, m.code)
tag(buffer, "code", i, i+sizeof(m.code))
i += length(m.code)
end
function insert_MD!(buffer, m::Markdown.List, i)
marker = k -> m.ordered == -1 ? "โข" : "$(k)."
for (k, it) in enumerate(m.items)
insert!(buffer, " $(marker(k)) ")
i += 6 + (m.ordered == 1)
for el in it
i = insert_MD!(buffer, el, i)
end
insert!(buffer, "\n")
i += 1
end
i
end
tagname(m::Markdown.Italic) = "italic"
tagname(m::Markdown.Bold) = "bold"
function insert_MD!(buffer, m::T, i) where T <: Union{Markdown.Italic, Markdown.Bold}
ip = i
for el in m.text
i = insert_MD!(buffer, el, i)
end
tag(buffer, tagname(m), ip, i)
i
end
function insert_MD!(buffer, m, i)
if isdefined(m, :text)
for el in m.text
i = insert_MD!(buffer, el, i)
end
end
if isdefined(m, :content)
for el in m.content
i = insert_MD!(buffer, el, i)
end
end
i
end
function insert_MD!(buffer, m::Markdown.MD)
i = length(buffer)+1
for el in m.content
i = insert_MD!(buffer, el, i)
insert!(buffer, "\n\n")
i += 2
end
end
end
| GtkMarkdownTextView | https://github.com/JuliaGtk/GtkMarkdownTextView.jl.git |
|
[
"MIT"
] | 0.2.0 | 0aa363f36f4d46f190859807c4f9849a3cf07a43 | code | 803 | using GtkMarkdownTextView, Test
using Gtk4
@testset "MarkdownTextView" begin
w = GtkWindow("")
md = """
IOBuffer([data::AbstractVector{UInt8}]; keywords...) -> IOBuffer
Create an in-memory I/O stream, which may optionally operate on a pre-existing array.
It may take optional keyword arguments:
- `read`, `write`, `append`: restricts operations to the buffer; see `open` for details.
- `truncate`: truncates the buffer size to zero length.
- `maxsize`: specifies a size beyond which the buffer may not be grown.
- `sizehint`: suggests a capacity of the buffer (`data` must implement `sizehint!(data, size)`).
When `data` is not given, the buffer will be both readable and writable by default.
"""
v = MarkdownTextView(md)
push!(w,v)
show(w)
sleep(1)
destroy(w)
end
| GtkMarkdownTextView | https://github.com/JuliaGtk/GtkMarkdownTextView.jl.git |
|
[
"MIT"
] | 0.2.0 | 0aa363f36f4d46f190859807c4f9849a3cf07a43 | docs | 1197 | # GtkMarkdownTextView
[](https://github.com/JuliaGtk/GtkMarkdownTextView.jl/actions/workflows/CI.yml)
[](https://coveralls.io/github/jonathanBieler/GtkMarkdownTextView.jl?branch=master)
A widget to display simple markdown formatted text:

```julia
using Gtk4, GtkMarkdownTextView
w = GtkWindow("")
md = """
# h1 heading
## h2 heading
## h3 heading
*italic* normal **bold**
code
> quote
- item 1
- items 2
1. list
2. list2
"""
v = MarkdownTextView(md)
push!(w,v)
show(w)
```
The constructor can take a prelude text and color settings :
```julia
MarkdownTextView(m::String, prelude::String, mc::MarkdownColors = MarkdownColors())
```
The color settings are defined as :
```julia
struct MarkdownColors
font_size::Int
color::String
background::String
highlight_color::String
highlight_background::String
end
# default values
MarkdownColors() = MarkdownColors(13, "#000", "#fff", "#111", "#eee")
```
| GtkMarkdownTextView | https://github.com/JuliaGtk/GtkMarkdownTextView.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 353 | using BenchmarkTools, SegmentIntersections
random_coordinates = 100 .* rand(500, 4);
segments = Segment{Float64}[]
for datap in eachrow(random_coordinates)
push!(segments, Segment(datap...))
end
println("Running brute force...")
@btime find_intersections_brute(segments);
println("Running Bentley-Ottman...")
@btime find_intersections(segments);
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 2588 | using PyPlot, SegmentIntersections
function plot_random_lines()
random_coordinates = 100 .* rand(100, 4);
segments = Segment{Float64}[]
for datap in eachrow(random_coordinates)
push!(segments, Segment(datap...))
end
intersections_brute = find_intersections_brute(segments);
intersections = find_intersections(segments);
fig, ax = plt.subplots(1,2, sharex=true, sharey=true, figsize=(10,4))
for segment in segments
for axis in ax
axis.plot(
[segment.p.x, segment.q.x],
[segment.p.y, segment.q.y],
color = "C0",
linewidth = 1,
)
end
end
ax[1].set_title("Brute force algorithm")
ax[2].set_title("Bentley-Ottmann algorithm")
xs = [point.x for point in intersections];
ys = [point.y for point in intersections];
ax[1].scatter(xs, ys, color = "red", s = 30)
xs = [point.x for point in intersections_brute];
ys = [point.y for point in intersections_brute];
ax[2].scatter(xs, ys, color = "red", s = 30)
plt.subplots_adjust(wspace=0, hspace=0)
fig.savefig("random_lines.png", dpi=300, bbox_inches="tight")
end
function plot_complete_graph(n=16, r=100)
theta_values = range(0, 2ฯ, length=n+1)[1:end-1]
coordinates = [[r*cos(theta), r*sin(theta)] for theta in theta_values]
segments = Segment{Float64}[]
for i in 1:n
for j in (i+1):n
push!(segments, Segment(Point(coordinates[i]...), Point(coordinates[j]...)))
end
end
intersections_brute = find_intersections_brute(segments);
intersections_bo = find_intersections(segments, tol=1e-9);
fig, ax = plt.subplots(1,2, sharex=true, sharey=true, figsize=(10,4))
for segment in segments
for axis in ax
axis.plot(
[segment.p.x, segment.q.x],
[segment.p.y, segment.q.y],
color = "C0",
linewidth = 1,
)
end
end
ax[1].set_title("Brute force algorithm")
xs = [point.x for point in intersections_brute];
ys = [point.y for point in intersections_brute];
ax[1].scatter(xs, ys, color = "red", s = 30)
xs = [point.x for point in intersections_bo];
ys = [point.y for point in intersections_bo];
ax[2].scatter(xs, ys, color = "red", s = 30)
ax[2].set_title("Bentley-Ottmann algorithm")
plt.subplots_adjust(wspace=0, hspace=0)
fig.savefig("complete_graph.png", dpi=300, bbox_inches="tight")
end
plot_random_lines()
plot_complete_graph(14)
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 241 | module SegmentIntersections
include("point.jl")
include("segment.jl")
include("bentley_ottmann/queue.jl")
include("bentley_ottmann/event.jl")
include("bentley_ottmann/status.jl")
include("bentley_ottmann/base.jl")
include("brute.jl")
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 1131 | export find_intersections_brute
"""
find_intersections_brute(segments::Vector{Segment{T}}) where {T<:AbstractFloat}
Compute all the intersections between the segments performing a brute force algorithm O(n^2).
This is the O(N^2) brute force version in which we test all segments
against all segments for intersections. This method is faster
than the BentleyOttman one when there are few points and lots of intersections.
# Examples
```jldoctest
julia> segments = [Segment(0,0,5,5), Segment(0,5,5,0)];
julia> intersections = find_intersections_brute(segments)
julia> [Point(2.5, 2.5)]
```
"""
function find_intersections_brute(segments::Vector{Segment{T}}) where {T<:AbstractFloat}
A = zeros((2, 2))
b = zeros(2)
n_segments = length(segments)
intersections = typeof(segments[1].p)[]
for i = 1:(n_segments-1)
s1 = segments[i]
for j = (i+1):n_segments
s2 = segments[j]
do_intersect, intersection = intersect!(s1, s2, A, b)
if do_intersect
push!(intersections, intersection)
end
end
end
return intersections
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 415 | export Point
struct Point{T<:Float64}
x::T
y::T
end
function Point(x::Int, y::Int)
return Point(convert(Float64, x), convert(Float64, y))
end
Base.:(==)(p::Point, q::Point) = (p.x == q.x) & (p.y == q.y)
Base.isless(p::Point, q::Point) = (p.y > q.y) | ((p.y == q.y) & (p.x < q.x))
Base.isapprox(p::Point, q::Point; rtol = 1e-6) =
isapprox(p.x, q.x, rtol = rtol) && isapprox(p.y, q.y, rtol = rtol)
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 3743 | export Segment,
Segments,
intersect!,
is_lower_end,
is_upper_end,
contains,
find_leftmost,
find_rightmost,
contains,
is_singular,
min_x,
min_y,
max_x,
max_y,
trivial_miss
struct Segment{T<:Float64}
p::Point{T}
q::Point{T}
slope::T
function Segment(p::Point{T}, q::Point{T}) where {T<:Float64}
if p > q
p, q = q, p
end
slope = (q.x - p.x) / (q.y - p.y)
return new{T}(p, q, slope)
end
end
Base.isless(s::Segment, t::Segment) = (s.p.y > t.q.y) | ((s.p.y == t.p.y) && (s.p.x < t.p.x))
Base.:(==)(s::Segment, t::Segment) = (s.p == t.p) && (s.q == t.q)
get_x(segment::Segment, y) = segment.p.x + segment.slope * (y - segment.p.y)
get_y(segment::Segment, x) = segment.p.y + (x - segment.p.x) / segment.slope
min_x(segment::Segment) = min(segment.p.x, segment.q.x)
min_y(segment::Segment) = min(segment.p.y, segment.q.y)
max_x(segment::Segment) = max(segment.p.x, segment.q.x)
max_y(segment::Segment) = max(segment.p.y, segment.q.y)
Segment(px, py, qx, qy) = Segment(Point(px, py), Point(qx, qy))
"""
Checks if the segment s is purely horizontal or vertical.
"""
function is_singular(s::Segment)
if (s.p.x == s.q.x) || (s.p.y == s.q.y)
return true
else
return false
end
end
function trivial_miss(s1::Segment, s2::Segment)
if s1.slope โ s2.slope rtol=1e-10
return true
elseif max_x(s1) < min_x(s2)
return true
elseif max_x(s2) < min_x(s1)
return true
elseif max_y(s1) < min_y(s2)
return true
elseif max_y(s2) < min_y(s1)
return true
end
return false
end
"""
Checks for the intersection of two segments s1, s2.
"""
function Base.intersect!(
s1::Segment{T},
s2::Segment{T},
A::Matrix{T},
b::Vector{T},
tol=1e-9
) where {T<:AbstractFloat}
if is_singular(s1) || is_singular(s2)
return false, Point(0.0, 0.0)
end
if trivial_miss(s1, s2)
return false, Point(0.0, 0.0)
end
A[1, 1] = s1.q.x - s1.p.x
A[1, 2] = s2.p.x - s2.q.x
A[2, 1] = s1.q.y - s1.p.y
A[2, 2] = s2.p.y - s2.q.y
b[1] = s2.p.x - s1.p.x
b[2] = s2.p.y - s1.p.y
sol = 0.0
try
sol = A \ b
catch
@warn "Singular matrix. Check for edge cases!"
return false, Point(0.0, 0.0)
end
if (-tol < sol[1] < 1+tol) && (-tol < sol[2] < 1+tol)
intersection =
Point(s1.p.x + sol[1] * (s1.q.x - s1.p.x), s1.p.y + sol[1] * (s1.q.y - s1.p.y))
return true, intersection
else
return false, Point(0.0, 0.0)
end
end
function Base.intersect!(s1::Segment{T}, s2::Segment{T}) where {T<:AbstractFloat}
A = zeros((2, 2))
b = zeros(2)
return intersect!(s1, s2, A, b)
end
is_lower_end(segment::Segment, Point::Point) = (segment.q == Point)
is_upper_end(segment::Segment, Point::Point) = (segment.p == Point)
function Base.contains(segment::Segment, point::Point, tol=1e-9)
if is_lower_end(segment, point) | is_upper_end(segment, point)
return false
end
y = get_y(segment, point.x)
if y โ point.y
atol = tol
return true
else
return false
end
end
function find_leftmost(segment_set, y, tol=1e-9)
ret = nothing
xmin = Inf
for segment in segment_set
x = get_x(segment, y - tol)
if x < xmin
xmin = x
ret = segment
end
end
return ret, xmin
end
function find_rightmost(segment_set, y, tol=1e-9)
ret = nothing
xmax = 0
for segment in segment_set
x = get_x(segment, y - tol)
if x > xmax
xmax = x
ret = segment
end
end
return ret, xmax
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 2414 | export find_intersections
"""
find_intersections(segments::Vector{Segment{T}}; tol=1e-9) where {T<:AbstractFloat}
Compute all the intersections between the segments using the Bentley-Ottmann algorithm.
Seting a tolerance value to work for all situations is tricky, so try to play with it if you
are finding that not all intersections are found.
# Examples
```jldoctest
julia> segments = [Segment(0,0,5,5), Segment(0,5,5,0)];
julia> intersections = find_intersections(segments)
julia> [Point(2.5, 2.5)]
```
"""
function find_intersections(segments::Vector{Segment{T}}; tol=1e-9) where {T<:AbstractFloat}
Q = EventQueue(segments)
y0 = Q.tree[1].y
ys = [y0]
status = Status(y0, tol)
intersections = []
while length(Q) != 0
event = fetch_event!(Q)
handle_event_point(Q, event, status, intersections)
end
return intersections
end
function handle_event_point(
Q::EventQueue,
event::Event,
status::Status,
intersections,
)
p = event.point
status.y_sweep = p.y
U = event.segments
L = Set{Segment}()
C = Set{Segment}()
for (key, segment) in status.dict
if is_lower_end(segment, p)
push!(L, segment)
elseif contains(segment, p, status.tol)
push!(C, segment)
end
end
CL = union(C, L)
total = union(CL, U)
if length(total) > 1
push!(intersections, p)
end
UC = union(U, C)
update!(status, insert=UC, delete=CL)
if length(UC) == 0
sl = find_left(status, p.x)
sr = find_right(status, p.x)
find_new_event(Q, sl, sr, p)
else
sp, xp = find_leftmost(UC, p.y, status.tol)
sl = find_left(status, xp)
find_new_event(Q, sl, sp, p)
spp, xpp = find_rightmost(UC, p.y, status.tol)
sr = find_right(status, xpp)
find_new_event(Q, spp, sr, p)
end
return
end
function find_new_event(Q::EventQueue, s1::Segment, s2::Segment, p::Point, tol=1e-9)
if s1 == s2
return
end
do_intersect, intersection = intersect!(s1, s2)
if do_intersect
if intersection.y < p.y
insert!(Q, intersection)
elseif (intersection.y โ p.y) && (intersection.x > p.x)
insert!(Q, intersection)
else
return
end
end
end
# for when s1 or s2 are nothing
find_new_event(Q::EventQueue, s1, s2, p::Point) = nothing
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 432 | export Event, Events, handle_event_point
struct Event{T}
point::Point{T}
segments::Set{Segment{T}}
end
Event(point::Point{T}) where {T<:Number} = Event(point, Set{Segment{T}}())
Event(x::Float64, y::Float64) = Event(Point(x, y))
Event(x::Number, y::Number) = Event(convert(Float64, x), convert(Float64, y))
Base.:(==)(e1::Event, e2::Event) = e1.point == e2.point
Base.isless(e1::Event, e2::Event) = e1.point < e2.point
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 1377 | import Base: insert!, length, delete!
export EventQueue, fetch_event!, insert_event!
using DataStructures
struct EventQueue{T}
tree::AVLTree{Point{T}}
segments::Dict{Point{T},Set{Segment{T}}} # maps upper point -> list of segments
end
length(Q::EventQueue) = length(Q.tree)
function delete!(Q::EventQueue, p::Point)
delete!(Q.tree, p)
delete!(Q.segments, p)
end
EventQueue() =
EventQueue(AVLTree{Point{Float64}}(), Dict{Point{Float64},Set{Segment{Float64}}}())
function insert!(Q::EventQueue, segment::Segment)
p = segment.p # guaranteed to be the upper point
q = segment.q
if p โ Q.tree
push!(Q.segments[p], segment)
else
push!(Q.tree, p)
Q.segments[p] = Set([segment])
end
if q โ Q.tree
push!(Q.tree, q)
Q.segments[q] = Set(Segment[]) # we only store for the upper
end
end
function insert!(Q::EventQueue, p::Point)
if p โ Q.tree
push!(Q.tree, p)
end
end
function EventQueue(segments::Vector{Segment{T}}) where {T<:AbstractFloat}
Q = EventQueue()
for segment in segments
insert!(Q, segment)
end
return Q
end
function fetch_event!(Q::EventQueue)
p = Q.tree[1]
if p in keys(Q.segments)
segments = Q.segments[p]
else
segments = Set{Segment{Float64}}()
end
delete!(Q, p)
return Event(p, segments)
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 1247 | export Status, find_left, find_right, insert!, update!, delete!
using DataStructures
mutable struct Status{T <: AbstractFloat}
dict::SortedDict{T, Segment{T}}
y_sweep::T
tol::T
end
Status(y0, tol=1e-9) = Status(SortedDict{Float64, Segment{Float64}}(), y0, tol)
function insert!(status::Status, segment::Segment)
y = status.y_sweep - status.tol
x = get_x(segment, y)
status.dict[x] = segment
end
function update!(status::Status; insert, delete)
y = status.y_sweep - status.tol
new_dict = SortedDict{Float64, Segment{Float64}}()
for (key, segment) in status.dict
if segment โ delete
new_dict[get_x(segment, y)] = segment
end
end
for segment in insert
new_dict[get_x(segment, y)] = segment
end
status.dict = new_dict
end
function find_left(status::Status, x)
x -= 1e-9
(length(status.dict) == 0) && return nothing
(x < first(status.dict).first) && return nothing
return status.dict[searchsortedlast(status.dict, x)]
end
function find_right(status::Status, x)
x += 1e-9
(length(status.dict) == 0) && return nothing
(x > last(status.dict).first) && return nothing
return status.dict[searchsortedfirst(status.dict, x)]
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 657 | using Test, SegmentIntersections
@testset "Test results" begin
segments = [
Segment(4.9, 3.56, 2.86, 1.62),
Segment(3.63, 3.88, 5.45, 2.32),
Segment(2.99, 3.29, 4.47, 1.8),
Segment(2.66, 2.85, 3.81, 1.44),
];
expected_intersections = [Point(4.48, 3.16), Point(3.78, 2.5), Point(3.31, 2.05)];
intersections = find_intersections_brute(segments)
for expected in expected_intersections
match = false
for intersection in intersections
if isapprox(intersection, expected, rtol = 1e-2)
match = true
end
end
@test match == true
end
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 773 | using SegmentIntersections, Test
@testset "create point" begin
p = Point(1,2)
@test p.x == 1.0
@test typeof(p.x) == Float64
@test p.y == 2.0
@test typeof(p.y) == Float64
p = Point(1.0,2.0)
@test p.x == 1.0
@test typeof(p.x) == Float64
@test p.y == 2.0
@test typeof(p.y) == Float64
end
@testset "Test point order" begin
@testset "Test different y" begin
p = Point(1,2)
q = Point(3,4)
@test q < p
@test q <= p
@test !(q > p)
@test !(q >= p)
@test !(q == p)
end
@testset "Test same y" begin
p = Point(1,2)
q = Point(3,2)
@test !(q < p)
@test !(q <= p)
@test (q > p)
@test (q >= p)
@test !(q == p)
end
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 286 | using SegmentIntersections, Test
include("point_tests.jl")
include("segment_tests.jl")
include("brute_tests.jl")
include("bentley_ottmann/event_tests.jl")
include("bentley_ottmann/queue_tests.jl")
include("bentley_ottmann/status_tests.jl")
include("bentley_ottmann/results_tests.jl")
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 3497 | using Test, SegmentIntersections
@testset "test segment init" begin
s1 = Segment(1,2,3,4)
@test s1.p == Point(3,4)
@test s1.q == Point(1,2)
end
@testset "Test min max segment" begin
s = Segment(1,2,3,4)
@test min_x(s) == 1
@test min_y(s) == 2
@test max_x(s) == 3
@test max_y(s) == 4
end
@testset "Test segment order" begin
s1 = Segment(1,2,3,4)
s2 = Segment(1,5,3,4)
@test s2 < s1
s1 = Segment(3,5,3,4)
s2 = Segment(1,5,3,4)
@test s2 < s1
end
@testset "Test singular segment" begin
s = Segment(1, 2, 1, 4)
@test is_singular(s) == true
s = Segment(3, 2, 3, 2)
@test is_singular(s) == true
s = Segment(1, 2, 3, 4)
@test is_singular(s) == false
end
@testset "Test trivial misses" begin
s1 = Segment(0,0,1,1)
s2 = Segment(0,0,2,2)
@test trivial_miss(s1, s2) == true
s1 = Segment(1,2,3,4)
s2 = Segment(4,2,5,4)
@test trivial_miss(s1, s2) == true
s2 = Segment(1,2,3,4)
s1 = Segment(4,2,5,4)
@test trivial_miss(s1, s2) == true
s1 = Segment(1,3,3,6)
s2 = Segment(4,7,5,8)
@test trivial_miss(s1, s2) == true
s2 = Segment(1,3,3,6)
s1 = Segment(4,7,5,8)
@test trivial_miss(s1, s2) == true
end
@testset "Test segment intersection" begin
# test trivial miss
segment1 = Segment(1,2,3,4)
segment2 = Segment(4,2,5,4)
do_intersect, intersection = intersect!(segment1, segment2)
@test do_intersect == false
@test intersection == Point(0.0, 0.0)
# do intersect
segment1 = Segment(2.3, 7.99, 10.64, 3.93)
segment2 = Segment(2.86, 3.45, 11.0, 7.0)
do_intersect, intersection = intersect!(segment1, segment2)
@test do_intersect == true
@test intersection.x โ 7.48 rtol = 1e-2
@test intersection.y โ 5.47 rtol = 1e-2
# don't intersect
segment1 = Segment(5, -2, 9.4, -2.79)
segment2 = Segment(4.82, -5.83, 7.2, -3.41)
do_intersect, intersection = intersect!(segment1, segment2)
@test do_intersect == false
@test intersection == Point(0.0, 0.0)
# ignore singular
segment1 = Segment(1, 2, 1, 4)
segment2 = Segment(1, 2, 3, 4)
do_intersect, intersection = intersect!(segment1, segment2)
@test do_intersect == false
@test intersection == Point(0.0, 0.0)
end
@testset "Test is end" begin
segment = Segment(1,2,3,4)
@test is_lower_end(segment, Point(3,4)) == false
@test is_lower_end(segment, Point(1,2)) == true
@test is_lower_end(segment, Point(10,2)) == false
@test is_upper_end(segment, Point(3,4)) == true
@test is_upper_end(segment, Point(1,2)) == false
@test is_upper_end(segment, Point(10,2)) == false
end
@testset "Test contains" begin
segment = Segment(1,2,3,4)
@test contains(segment, Point(1,2)) == false
@test contains(segment, Point(3,4)) == false
@test contains(segment, Point(2,3)) == true
@test contains(segment, Point(2,3)) == true
@test contains(segment, Point(1.5,2.5)) == true
end
@testset "Test leftmost and rightmost" begin
segment1 = Segment(12.4, 5.56, 9.69, 2.85)
segment2 = Segment(12.4, 5.56, 11.71, 2.61)
segment3 = Segment(12.4, 5.56, 13.06, 2.68)
segment4 = Segment(12.4, 5.56, 14.55, 3.47)
segment_set = Set([segment1, segment2, segment3, segment4])
segment, xmin = find_leftmost(segment_set, 4)
@test segment == segment1
@test xmin โ 10.84
segment, xmax = find_rightmost(segment_set, 4)
@test segment == segment4
@test xmax โ 14.0 rtol=1e-2
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 205 | using Test, SegmentIntersections
@testset "Test event creation" begin
e1 = Event(1,2)
e2 = Event(0,5)
e3 = Event(1,2)
@test e1.point == Point(1,2)
@test e2 < e1
@test e1 == e3
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 691 | using Test, SegmentIntersections
@testset "Test queue" begin
Q = EventQueue()
s1 = Segment(1, 2, 3, 4)
s2 = Segment(5, 6, 3, 4)
s3 = Segment(4, 3, 2, 7)
s4 = Segment(4, 3, 2, 4)
s5 = Segment(0, 1, 2, 7)
insert!(Q, s1)
insert!(Q, s2)
insert!(Q, s3)
insert!(Q, s4)
insert!(Q, s5)
e = fetch_event!(Q)
@test e.point == Point(2,7)
@test e.segments == Set([s3, s5])
e = fetch_event!(Q)
@test e.point == Point(5,6)
@test e.segments == Set([s2])
e = fetch_event!(Q)
@test e.point == Point(2,4)
@test e.segments == Set([s4])
e = fetch_event!(Q)
@test e.point == Point(3,4)
@test e.segments == Set([s1])
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 1610 | using Test, SegmentIntersections
@testset "Test random lines" begin
random_data = 100 .* rand(100, 4);
segments = Segment{Float64}[]
for datap in eachrow(random_data)
push!(segments, Segment(datap...))
end
bo_intersections = find_intersections(segments)
brute_intersections = find_intersections_brute(segments)
@test length(bo_intersections) == length(brute_intersections)
for expected in brute_intersections
match = false
for intersection in bo_intersections
if isapprox(intersection, expected, rtol = 1e-2)
match = true
end
end
@test match == true
end
end
# This test does currently not pass
#@testset "Test complete graph" begin
# n = 4
# r = 100
# theta_values = range(0, 2ฯ, length=n+1)[1:end-1]
# coordinates = [[r*cos(theta), r*sin(theta)] for theta in theta_values]
# segments = Segment{Float64}[]
# for i in 1:n
# for j in (i+1):n
# push!(segments, Segment(Point(coordinates[i]...), Point(coordinates[j]...)))
# end
# end
# bo_intersections = find_intersections(segments)
# brute_intersections = find_intersections_brute(segments)
# @test length(bo_intersections) == length(brute_intersections)
# matches = 0
# println("checking intersections...")
# for expected in brute_intersections
# for intersection in bo_intersections
# if isapprox(intersection, expected, rtol = 1e-2)
# matches += 1
# end
# end
# end
# @test matches == length(bo_intersections)
#end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | code | 1900 | using Test, SegmentIntersections
@testset "Test insert segment" begin
status = Status(3.0)
segment = Segment(1,2,3,4)
insert!(status, segment)
xs = collect(keys(status.dict))
@test length(xs) == 1
@test xs[1] โ 2.0
@test status.dict[xs[1]] == segment
segment = Segment(0.41, 1.11, 6.15, 0.29)
status.y_sweep = 0.5
insert!(status, segment)
xs = collect(keys(status.dict))
@test length(xs) == 2
@test xs[1] โ 2.0
@test xs[2] โ 4.68
@test status.dict[xs[2]] == segment
end
@testset "Test update status" begin
status = Status(3.0)
segment = Segment(1,2,3,4)
insert!(status, segment)
segment_set = Set([Segment(10, 2, 12, 4), Segment(5,2,7,4)])
update!(status, insert=segment_set, delete=Set())
xs = collect(keys(status.dict))
@test xs[1] โ 2
@test status.dict[xs[1]] == Segment(1,2,3,4)
@test xs[2] โ 6
@test status.dict[xs[2]] == Segment(5,2,7,4)
@test xs[3] โ 11
@test status.dict[xs[3]] == Segment(10,2,12,4)
end
@testset "Test delete segment from status" begin
status = Status(3.0)
segment = Segment(1,2,3,4)
insert!(status, segment)
@test length(status.dict) == 1
xs = collect(keys(status.dict))
@test xs[1] โ 2
@test status.dict[xs[1]] == segment
update!(status, insert=Set(), delete=Set([segment]))
@test length(status.dict) == 0
xs = collect(keys(status.dict))
@test length(xs) == 0
end
@testset "Test find segment side of point" begin
segment1 = Segment(7.0, 10.0, 4.48, 5.09)
segment2 = Segment(7.76, 9.33, 9.4, 7.83)
segment3 = Segment(14.3, 7.61, 16.3, 9.57)
status = Status(0.0)
point = Point(11.8, 8.69)
status.y_sweep = point.y
update!(status, insert=Set([segment1, segment2, segment3]), delete=Set())
@test find_left(status, point.x) == segment2
@test find_right(status, point.x) == segment3
end
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.1.0 | c3648d7f66b8308deeb5367c38e9270f2408ea60 | docs | 1242 | [](https://github.com/arnauqb/SegmentIntersections.jl/actions/workflows/blank.yml)
[](https://codecov.io/gh/arnauqb/SegmentIntersections.jl)
# SegmentIntersections.jl
This package implements two algorithms for computing the intersection points between a set of finite segments.
* A brute force algorithm, in which each segment is tested for intersection against all the other segments. The scaling of this algorithm is thus O(N^2).
* The [Bentley-Ottmann algorithm](https://en.wikipedia.org/wiki/Bentley%E2%80%93Ottmann_algorithm) which should scale much better for a high number of points. However, in many situations the brute force performs better. This could also be because of the BO algorithm needs a bit of memory optimization.

The intersections for a complete K-graph need to be debugged, probably a toleranece error.

# Limitations
The algorithm ignores purely horizontal and vertical segments. This will probably be implemented in the future.
| SegmentIntersections | https://github.com/arnauqb/SegmentIntersections.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1336 |
using RDatasets, JDF, DataFrames
a = dataset("datasets", "iris");
first(a, 2)
@time jdffile = JDF.save("iris.jdf", a)
@time a2 = DataFrame(JDF.load("iris.jdf"))
all(names(a2) .== names(a)) # true
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)])) #true
a2_selected = DataFrame(JDF.load("iris.jdf", cols = [:Species, :SepalLength, :PetalWidth]))
jdf"path/to/JDF.jdf"
path_to_JDF = "path/to/JDF.jdf"
JDFFile(path_to_JDF)
afile = JDFFile("iris.jdf")
afile[:Species] # load Species column
using Tables
ajdf = JDFFile("iris.jdf")
Tables.columnaccess(ajdf)
Tables.columns(ajdf)
Tables.schema(ajdf)
getproperty(Tables.columns(ajdf), :Species)
jdffile = jdf"iris.jdf"
for col in eachcol(jdffile)
# do something to col
# where `col` is the content of one column of iris.jdf
end
jdffile = jdf"iris.jdf"
for (name, col) in zip(names(jdffile), eachcol(jdffile))
# `name::Symbol` is the name of the column
# `col` is the content of one column of iris.jdf
end
using JDF, DataFrames
df = DataFrame(a = 1:3, b = 1:3)
JDF.save(df, "plsdel.jdf")
names(jdf"plsdel.jdf") # [:a, :b]
# clean up
rm("plsdel.jdf", force = true, recursive = true)
@time jdffile = ssavejdf("iris.jdf", a)
@time jdffile = sloadjdf("iris.jdf")
type_compress!(df)
type_compress!(df, compress_float = true)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 257 | # Weave readme
using Pkg
cd("c:/git/JDF/readme-build")
Pkg.activate("c:/git/JDF/readme-build")
Pkg.update()
upcheck()
using Weave
weave("../README.jmd", out_path="./", doctype="github")
if false
# debug
tangle("../README.jmd")
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 9651 | using CSV, Feather
#using JLD2
#using JLD#, JLSO
using JDF, FileIO, Blosc, StatsPlots, RCall
using DataFrames, WeakRefStrings # required for JLD2, JDF
Blosc.set_num_threads(6)
gen_benchmark(dirpath, largest_file, outpath, data_label; delim = ',', header=true) = begin
if !isdir(outpath)
mkpath(outpath)
end
csv_read1 = @elapsed df = CSV.read(joinpath(dirpath, largest_file), delim = delim, header = header, threaded=false);
csv_read2 = @elapsed CSV.read(joinpath(dirpath, largest_file), delim = delim, header = header, threaded=false);
csv_write1 = 0
csv_write2 = 0
try
csv_write1 = @elapsed CSV.write(joinpath(outpath, largest_file*".csv"), df);
csv_write2 = @elapsed CSV.write(joinpath(outpath, largest_file*".csv"), df);
catch err
end
R"""
library(data.table)
library(fst)
# memory.limit(Inf)
data_table_read1 = system.time(a <- data.table::fread($(joinpath(dirpath, largest_file))))[3]
data_table_read2 = system.time(data.table::fread($(joinpath(dirpath, largest_file))))[3]
data_table_write1 = system.time(data.table::fwrite(a, $(joinpath(outpath, largest_file*".data.table.csv"))))[3]
data_table_write2 = system.time(data.table::fwrite(a, $(joinpath(outpath, largest_file*".data.table.csv"))))[3]
fst_write1 = system.time(fst::write_fst(a, $(joinpath(outpath, largest_file*".fst"))))[3]
fst_write2 = system.time(fst::write_fst(a, $(joinpath(outpath, largest_file*".fst"))))[3]
fst_read1 = system.time(fst::read_fst($(joinpath(outpath, largest_file*".fst"))))[3]
fst_read2 = system.time(fst::read_fst($(joinpath(outpath, largest_file*".fst"))))[3]
parquet_r_write1 = system.time(arrow::write_parquet(a, $(joinpath(outpath, largest_file*".parquet"))))[3]
parquet_r_write2 = system.time(arrow::write_parquet(a, $(joinpath(outpath, largest_file*".parquet"))))[3]
parquet_r_read1 = system.time(arrow::read_parquet($(joinpath(outpath, largest_file*".parquet"))))[3]
parquet_r_read2 = system.time(arrow::read_parquet($(joinpath(outpath, largest_file*".parquet"))))[3]
rm(a)
gc()
"""
@rget data_table_read1
@rget data_table_read2
@rget data_table_write1
@rget data_table_write2
@rget fst_write1
@rget fst_write2
@rget fst_read1
@rget fst_read2
@rget parquet_r_read1
@rget parquet_r_read2
@rget parquet_r_write1
@rget parquet_r_write2
# jlso_write1 = 0
# jlso_write2 = 0
# try
# jlso_write1 = @elapsed JLSO.save(joinpath(outpath, largest_file*".jlso"), df);
# jlso_write2 = @elapsed JLSO.save(joinpath(outpath, largest_file*".jlso"), df);
# catch err
# end
# jld2_write1 = 0
# jld2_write2 = 0
# try
# jld2_write1 = @elapsed save(joinpath(outpath, largest_file*".jld2"), Dict("df" => df));
# jld2_write2 = @elapsed save(joinpath(outpath, largest_file*".jld2"), Dict("df" => df));
# catch err
# end
# jld_write1 = 0
# jld_write2 = 0
# try
# jld_write1 = @elapsed save(joinpath(outpath, largest_file*".jld"), Dict("df" => df));
# jld_write2 = @elapsed save(joinpath(outpath, largest_file*".jld"), Dict("df" => df));
# catch err
# end
jdf_write1 = 0
jdf_write2 = 0
try
jdf_write1 = @elapsed savejdf(joinpath(outpath, largest_file*".jdf"), df);
jdf_write2 = @elapsed savejdf(joinpath(outpath, largest_file*".jdf"), df);
catch err
end
# # feather can't handle all missing
for n in names(df)
if eltype(df[!,n]) == Missing
println("Removed $n for Feather.jl")
select!(df, Not(n))
df[!,n] = Vector{Union{Missing, Bool}}(missing, size(df, 1))
end
end
feather_write1 = 0
feather_write2 = 0
try
feather_write1 = @elapsed Feather.write(joinpath(outpath, largest_file*".feather"), df);
feather_write2 = @elapsed Feather.write(joinpath(outpath, largest_file*".feather"), df);
catch err
end
######################################### loading
feather_read1 = 0
feather_read2 = 0
try
feather_read1 = @elapsed Feather.read(joinpath(outpath, largest_file*".feather"));
feather_read2 = @elapsed Feather.read(joinpath(outpath, largest_file*".feather"));
catch err
end
# jld_read1 = 0
# jld_read2 = 0
# try
# jld_read1 = @elapsed load(joinpath(outpath, largest_file*".jld"))["df"];
# jld_read2 = @elapsed load(joinpath(outpath, largest_file*".jld"))["df"];
# catch err
# end
# jld2_read1 = 0
# jld2_read2 = 0
# try
# jld2_read1 = @elapsed load(joinpath(outpath, largest_file*".jld2"))["df"];
# jld2_read2 = @elapsed load(joinpath(outpath, largest_file*".jld2"))["df"];
# catch err
# end
#
# jlso_read1 = 0
# jlso_read2 = 0
# try
# jlso_read1 = @elapsed JLSO.load(joinpath(outpath, largest_file*".jlso"))["data"];
# jlso_read2 = @elapsed JLSO.load(joinpath(outpath, largest_file*".jlso"))["data"];
# catch err
# end
jdf_read1 = 0
jdf_read2 = 0
try
jdf_read1 = @elapsed loadjdf(joinpath(outpath, largest_file*".jdf"));
jdf_read2 = @elapsed loadjdf(joinpath(outpath, largest_file*".jdf"));
catch err
end
# write_perf = [jdf_write1, jdf_write2, csv_write1, csv_write2, feather_write1, feather_write2, jld2_write1, jld2_write2, jlso_write1, jlso_write2]
# read_perf = [jdf_read1, jdf_read2, csv_read1, csv_read2, feather_read1, feather_read2, jld2_read1, jld2_read2, jlso_read1, jlso_read2]
write_perf = [jdf_write1, jdf_write2, csv_write1, csv_write2, feather_write1, feather_write2, data_table_write1, data_table_write2, fst_write1, fst_write2, parquet_r_write1, parquet_r_write2]
read_perf = [jdf_read1, jdf_read2, csv_read1, csv_read2, feather_read1, feather_read2, data_table_read1, data_table_read2, fst_read1, fst_read2, parquet_r_read1, parquet_r_read2]
pkgs = repeat(["JDF.jl", "CSV.jl", "Feather.jl", "data.table", "fst", "parquet R"], inner = 2)
run_group = repeat(["1st", "2nd"], outer = 6)
plot_write = groupedbar(
pkgs,
write_perf,
group = run_group,
ylab = "Seconds",
title = "Disk-format Write performance comparison \n Data: $data_label data \n Size: $(size(df)) filesize:$(round(filesize(joinpath(dirpath, largest_file))/1024^3, digits=1))GB \n Julia $(VERSION)"
)
savefig(plot_write, joinpath(outpath, largest_file*"plot_write.png"))
# plot_write_wo_jlso = groupedbar(
# repeat(["JDF.jl", "CSV.jl", "Feather.jl", "JLD2.jl"], inner = 2),
# [jdf_write1, jdf_write2, csv_write1, csv_write2, feather_write1, feather_write2, jld2_write1, jld2_write2],
# group = repeat(["1st", "2nd"], outer = 4),
# ylab = "Seconds",
# title = "Disk-format Write performance comparison \n Data: $data_label data \n Size: $(size(df)) filesize:$(round(filesize(joinpath(dirpath, largest_file))/1024^3, digits=1))GB \n Julia $(VERSION)"
# )
# savefig(plot_write_wo_jlso, joinpath(outpath, largest_file*"plot_write_wo_jlso.png"))
plot_read = groupedbar(
pkgs,
read_perf,
group = run_group,
ylab = "Seconds",
title = "Disk-format Read performance comparison \n Data: $data_label data \n Size: $(size(df)) filesize:$(round(filesize(joinpath(dirpath, largest_file))/1024^3, digits=1))GB \n Julia $(VERSION)"
)
savefig(plot_read, joinpath(outpath, largest_file*"plot_read.png"))
# plot_read_wo_csv_jlso = groupedbar(
# repeat(["JDF.jl", "Feather.jl", "JLD2.jl"], inner = 2),
# [jdf_read1, jdf_read2, feather_read1, feather_read2, jld2_read1, jld2_read2],
# group = repeat(["1st", "2nd"], outer = 3),
# ylab = "Seconds",
# title = "Disk-format Read performance comparison \n Data: $data_label data \n Size: $(size(df)) filesize:$(round(filesize(joinpath(dirpath, largest_file))/1024^3, digits=1))GB \n Julia $(VERSION)"
# )
# savefig(plot_read_wo_csv_jlso, joinpath(outpath, largest_file*"plot_read_wo_csv_jlso.png"))
#
# plot_read_wo_jlso = groupedbar(
# repeat(["JDF.jl", "CSV.jl", "Feather.jl", "JLD2.jl"], inner = 2),
# [jdf_read1, jdf_read2, csv_read1, csv_read2, feather_read1, feather_read2, jld2_read1, jld2_read2],
# group = repeat(["1st", "2nd"], outer = 4),
# ylab = "Seconds",
# title = "Disk-format Read performance comparison \n Data: $data_label data \n Size: $(size(df)) filesize:$(round(filesize(joinpath(dirpath, largest_file))/1024^3, digits=1))GB \n Julia $(VERSION)"
# )
# savefig(plot_read_wo_jlso, joinpath(outpath, largest_file*"plot_read_wo_jlso.png"))
(write_perf, read_perf, dirpath, outpath, largest_file, df)
end
sum_file_size(dir) = begin
res = Int[]
for f in joinpath.(dir, readdir(dir))
if isfile(f)
push!(res, filesize(f))
elseif isdir(f)
println(f)
push!(res, sum(filesize.(joinpath.(f,readdir(f)))))
end
end
df = DataFrame(file = readdir(dir), fs = res)
tmpfn = (x->x[end-3:end])
df[!,:ext] = tmpfn.(df[!, :file])
filter!(r -> r.ext != ".png", df)
df = sort!(df, :ext)[[1;3:4;6:7], :]
df[:pkg] = ["CSV", "fst", "JDF.jl", "parquet R", "Feather.jl"]
sort!(df, :pkg)
df
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1109 | # dirpath = "C:/data/Performance_All/"
# largest_file = "Performance_2000Q4.txt"
# outpath = "c:/data"
#
# download("https://packages.revolutionanalytics.com/datasets/AirOnTime87to12/AirOnTimeCSV.zip")
# ;unzip c:/data/AirOnTimeCSV AirOnTimeCSV.zip
# Uncomment for debugging
dirpath = "c:/data/AirOnTimeCSV/"
largest_file = "airOT199302.csv"
outpath = "c:/data/jdf-bench/airOT199302.csv"
data_label = "Air On Time 199302"
delim = ','
header = true
include("C:/Users/RTX2080/git/JDF/benchmarks/benchmarks.jl")
@time res = gen_benchmark("c:/data/AirOnTimeCSV/", "airOT199302.csv", "c:/data/jdf-bench/airOT199302.csv", data_label)
# gen_benchmark("C:/data/Performance_All/", "Performance_2000Q4.txt", "c:/data/jdf-bench/Performance_2000Q4.txt", "Fannie Mae Performance 2000Q4")
sizedf = sum_file_size(outpath)
using StatsPlots
p = plot(
sizedf.pkg,
sizedf.fs/1024^3,
linetype = :bar,
ylab = "Size (GB)",
legend = false,
title = "On-disk file Size for various formats\n $data_label data")
savefig(p, joinpath(outpath, largest_file*"_filesize.png"))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 3404 | # Fannie Mae
# download("http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/mortgage_2000-2007.tgz", "c:/data/mortgage_2000-2007.tgz")
# un-tar and uncompress the file
# ;tar zxvg mortgage_2000-2007.tgz
# uncomment for debugging
dirpath = "C:/data/perf/"
largest_file = "Performance_2004Q3.txt"
outpath = "c:/data/jdf-bench/Performance_2004Q3.txt"
data_label = "Fannie Mae Performance 2004Q3"
# delim = ','
# header = true
include("C:/Users/RTX2080/git/JDF/benchmarks/benchmarks.jl")
#@timegen_benchmark("c:/data/AirOnTimeCSV/", "airOT199302.csv", "c:/data/jdf-bench/airOT199302.csv", "Air On Time 199302")
@time res = gen_benchmark(dirpath, largest_file, outpath, data_label, delim = '|', header = false);
write_perf = res[1]
read_perf = res[2]
pkgs = repeat(["JDF.jl", "CSV.jl", "Feather.jl", "data.table", "fst", "parquet R"], inner = 2)
run_group = repeat(["1st", "2nd"], outer = 6)
write_perf = write_perf[pkgs .!= "Feather.jl"]
run_group = run_group[pkgs .!= "Feather.jl"]
pkgs1 = pkgs[pkgs .!= "Feather.jl"]
plot_write = groupedbar(
pkgs1,
write_perf,
group = run_group,
ylab = "Seconds",
title = "Disk-format Write performance comparison \n Julia $(VERSION)"
)
savefig(plot_write, joinpath(outpath, largest_file*"plot_write_less.png"))
pkgs = repeat(["JDF.jl", "CSV.jl", "Feather.jl", "data.table", "fst", "parquet R"], inner = 2)
run_group = repeat(["1st", "2nd"], outer = 6)
read_perf = read_perf[.!in.(pkgs, Ref(["CSV.jl", "parqeut R"]))]
run_group = run_group[.!in.(pkgs, Ref(["CSV.jl", "parqeut R"]))]
pkgs2 = pkgs[.!in.(pkgs, Ref(["CSV.jl", "parqeut R"]))]
plot_read = groupedbar(
pkgs2,
read_perf,
group = run_group,
ylab = "Seconds",
title = "Disk-format Read performance comparison \n Julia $(VERSION)"
)
savefig(plot_read, joinpath(outpath, largest_file*"plot_read_less.png"))
write_perf = res[1]
read_perf = res[2]
pkgs = repeat(["JDF.jl", "CSV.jl", "Feather.jl", "data.table", "fst", "parquet R"], inner = 2)
run_group = repeat(["1st", "2nd"], outer = 6)
write_perf = write_perf[pkgs .!= "Feather.jl"]
run_group = run_group[pkgs .!= "Feather.jl"]
pkgs1 = pkgs[pkgs .!= "Feather.jl"]
plot_write = groupedbar(
pkgs1,
write_perf,
group = run_group,
ylab = "Seconds",
title = "Disk-format Write performance comparison \n Julia $(VERSION)"
)
savefig(plot_write, joinpath(outpath, largest_file*"plot_write_less.png"))
pkgs = repeat(["JDF.jl", "CSV.jl", "Feather.jl", "data.table", "fst", "parquet R"], inner = 2)
run_group = repeat(["1st", "2nd"], outer = 6)
read_perf = read_perf[.!in.(pkgs, Ref(["CSV.jl", "parqeut R"]))]
run_group = run_group[.!in.(pkgs, Ref(["CSV.jl", "parqeut R"]))]
pkgs2 = pkgs[.!in.(pkgs, Ref(["CSV.jl", "parqeut R"]))]
plot_read = groupedbar(
pkgs2,
read_perf,
group = run_group,
ylab = "Seconds",
title = "Disk-format Read performance comparison \n Julia $(VERSION)"
)
savefig(plot_read, joinpath(outpath, largest_file*"plot_read_less.png"))
sizedf = sum_file_size(outpath)
using StatsPlots
p = plot(
sizedf.pkg,
sizedf.fs/1024^3,
linetype = :bar,
ylab = "Size (GB)",
legend = false,
title = "On-disk file Size for various formats\n $data_label data")
savefig(p, joinpath(outpath, largest_file*"_filesize.png"))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1068 | # Fannie Mae
# download("http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/mortgage_2000.tgz", "c:/data/mortgage_2000.tgz")
# un-tar and uncompress the file
# ;tar zxvg c:/data/mortgage_2000.tgz
# uncomment for debugging
dirpath = "C:/data/Performance_All/"
largest_file = "Performance_2000Q4.txt"
outpath = "c:/data/jdf-bench/Performance_2000Q4.txt"
data_label = "Fannie Mae Performance 2000Q4"
# delim = ','
# header = true
include("C:/Users/RTX2080/git/JDF/benchmarks/benchmarks.jl")
#@timegen_benchmark("c:/data/AirOnTimeCSV/", "airOT199302.csv", "c:/data/jdf-bench/airOT199302.csv", "Air On Time 199302")
@time gen_benchmark(dirpath, largest_file, outpath, data_label, delim = '|', header = false);
sizedf = sum_file_size(outpath)
using StatsPlots
p = plot(
sizedf.pkg,
sizedf.fs/1024^3,
linetype = :bar,
ylab = "Size (GB)",
legend = false,
title = "On-disk file Size for various formats\n $data_label data")
savefig(p, joinpath(outpath, largest_file*"_filesize.png"))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 344 | using Documenter
using JDF
makedocs(
sitename = "JDF",
format = Documenter.HTML(),
modules = [JDF]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
#=deploydocs(
repo = "<repository url>"
)=#
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1483 | module JDF
using Blosc: Blosc
using Missings: Missings
using BufferedStreams
#using RLEVectors
using WeakRefStrings, TimeZones
using StatsBase: rle, inverse_rle, countmap, sample
import Base: show, getindex, setindex!, eltype, names
using Base: size
using Serialization: serialize, deserialize
import Base.Threads: @spawn
function __init__()
Blosc.set_num_threads(Threads.nthreads())
end
export savejdf, loadjdf
export column_loader, column_loader!
export type_compress!, type_compress
export compress_then_write
export JDFFile, @jdf_str, jdfmetadata, metadata, size, names
export IsBitsType, eachcol, some_elm, getindex, istable
include("JDFFile.jl")
include("type-writer-loader/Bool.jl")
include("type-writer-loader/Char.jl")
include("type-writer-loader/DateTime.jl")
include("type-writer-loader/categorical-arrays.jl")
include("type-writer-loader/pooled-arrays.jl")
include("type-writer-loader/Missing.jl")
include("type-writer-loader/Nothing.jl")
include("type-writer-loader/String.jl")
include("type-writer-loader/StringArray.jl")
include("type-writer-loader/Symbol.jl")
include("type-writer-loader/ZonedDateTime.jl")
include("type-writer-loader/substring.jl")
include("column_loader.jl")
include("compress_then_write.jl")
include("load-columns.jl")
include("loadjdf.jl")
include("savejdf.jl")
include("type_compress.jl")
include("metadata.jl")
include("eachcol.jl")
include("Tables.jl")
end # module
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1921 | export JDFFile, @jdf_str, path, getindex
import Base: getindex, view
"""
jdf"path/to/JDFfile.jdf"
JDFFile("path/to/JDFfile.jdf")
Define a JDF file, which you use with methods like `names` and `size`.
## Example
using JDF, DataFrames
df = DataFrame(a = 1:3, b = 1:3)
savejdf(df, "plsdel.jdf")
names(jdf"plsdel.jdf") # [:a, :b]
size(jdf"plsdel.jdf") # (2, 3)
size(jdf"plsdel.jdf", 1) # (2, 3)
size(jdf"plsdel.jdf", 1) # (2, 3)
# clean up
rm("plsdel.jdf", force = true, recursive = true)
"""
struct JDFFile{T<:AbstractString}
path::T
end
"""
jdf"path/to/JDFfile.jdf"
JDFFile("path/to/JDFfile.jdf")
Define a JDF file, which you can apply `names` and `size`.
## Example
using JDF, DataFrames
df = DataFrame(a = 1:3, b = 1:3)
savejdf(df, "plsdel.jdf")
names(jdf"plsdel.jdf") # [:a, :b]
ncol(jdf"plsdel.jdf") # 2
size(jdf"plsdel.jdf") # (2, 3)
size(jdf"plsdel.jdf", 1) # (2, 3)
size(jdf"plsdel.jdf", 1) # (2, 3)
# clean up
rm("plsdel.jdf", force = true, recursive = true)
"""
macro jdf_str(path)
return :(JDFFile($path))
end
"""
path(jdf::JDFFile)
Return the path of the JDF
"""
path(jdf) = getfield(jdf, :path)
function Base.getindex(file::JDFFile, col::String)
getindex(file, Symbol(col))
end
function Base.getindex(file::JDFFile, col::Symbol)
# TODO make it load from column loader for faster access
JDF.load(file; cols = [col])[col]
end
function Base.getindex(file::JDFFile, rows, col::String)
# TODO make it load from column loader for faster access
getfield(JDF.load(file; cols = [col]), Symbol(col))[rows, :]
end
function Base.getindex(file::JDFFile, rows, cols::AbstractVector{String})
JDF.load(file; cols = cols)[rows, :]
end
Base.view(file::JDFFile, rows, cols) = getindex(file, rows, cols)
getindex(file::JDFFile, rows, cols) = JDF.load(file)[rows, cols]
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1157 | import Tables: rows, columns, istable, rowaccess, columnaccess, schema, Schema
import Base: propertynames, getproperty, getindex
export istable, columns
istable(::Type{JDFFile}) = true
istable(::JDFFile) = true
rowaccess(::JDFFile) = false
columnaccess(::JDFFile) = true
rowaccess(::Type{<:JDFFile}) = false
columnaccess(::Type{<:JDFFile}) = true
propertynames(jdf::JDFFile) = names(jdf)
getproperty(jdf::JDFFile, col::Symbol) = JDF.load(jdf; cols = [col]).columns[col]
schema(jdf::JDFFile) = begin
meta = metadata(jdf)
Schema(meta.names, map(x -> x.type, meta.metadatas))
end
columns(jdf::JDFFile) = jdf
# this is the table type specific to JDF
struct Table
columns::NamedTuple
end
nrow(t::Table) = length(t.columns[1])
ncol(t::Table) = length(t.columns)
Tables.columns(t::Table) = t.columns
Tables.istable(t::Table) = true
function Base.getindex(t::Table, col::Symbol)
t.columns[col]
end
function Base.getindex(t::Table, rows, col::Symbol)
t.columns[col][rows]
end
function Base.getindex(t::Table, rows, ::Colon)
# TODO probably not efficient
NamedTuple{names(t.columns)}([nt[rows] for nt in t.columns])
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 392 | """
Load data from file using metadata
"""
function column_loader(t::Type{T}, io, metadata) where {T}
buffer = Vector{UInt8}(undef, metadata.len)
column_loader!(buffer, t, io, metadata)
end
# load bytes from io decompress into type
function column_loader!(buffer, ::Type{T}, io, metadata) where {T}
readbytes!(io, buffer, metadata.len)
return Blosc.decompress(T, buffer)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 317 | function compress_then_write(b::AbstractVector{T}, io) where {T}
compress_then_write(Vector(b), io)
end
# the generic dispatch for T where is isbits
function compress_then_write(b::Vector{T}, io) where {T}
bbc = Blosc.compress(b)
res = length(bbc)
write(io, bbc)
return (len = res, type = T)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 840 | import Base: iterate, length, eachcol
struct JDFFileColIterator
jdf::JDFFile
cols::Vector{Union{String,Symbol}}
end
eachcol(jdf::JDFFile) = JDFFileColIterator(jdf, names(jdf))
Base.length(jdf::JDFFileColIterator) = length(jdf.cols)
function Base.iterate(jdf::JDFFileColIterator, state = 1)
if state > length(jdf.cols)
return nothing
end
# TODO isoloate this into a column loader
indir = path(jdf.jdf)
# load the metadatas
metadatas = open(joinpath(indir, "metadata.jls")) do io
deserialize(io)
end
name = metadatas.names[state]
metadata = metadatas.metadatas[state]
io = BufferedInputStream(open(joinpath(indir, string(name)), "r"))
result = column_loader(metadata.type, io, metadata)
close(io)
return (result, state+1)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 475 | # ######################
# A DataFrame way to get file
import Base: getindex, view
export getindex
function Base.getindex(file::JDFFile, rows, col::String)
JDF.load(file; cols = [col])[rows, 1]
end
Base.getindex(file::JDFFile, rows, cols::AbstractVector{String}) = begin
JDF.load(file; cols = cols)[rows, :]
end
Base.view(file::JDFFile, rows, cols) = getindex(file, rows, cols)
getindex(file::JDFFile, rows, cols) = JDF.load(file)[rows, cols]
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 2127 | """
Load the columns of the JDF as vectors
"""
load_columns(jdf::JDFFile; args...) = load_columns(path(jdf); args...)
function load_columns(indir; cols = Symbol[], verbose = false)
# starting from DataFrames.jl 0.21 the colnames are strings
cols = collect(string.(cols))
if verbose
println("loading $indir in parallel")
end
metadatas = open(joinpath(indir, "metadata.jls")) do io
deserialize(io)
end
if length(cols) == 0
cols =collect(string.(metadatas.names))
else
scmn = setdiff(cols, collect(string.(metadatas.names)))
if length(scmn) > 0
throw("columns $(reduce((x,y) -> string(x) * ", " * string(y), scmn)) are not available, please ensure you have spelt them correctly")
end
end
results = Vector{Any}(undef, length(cols))
names = Vector{String}(undef, length(cols))
# rate limit channel
c1 = Channel{Bool}(Threads.nthreads())
atexit(() -> close(c1))
i = 1
for (name, metadata) in zip(metadatas.names, metadatas.metadatas)
name_str = string(name)
if name_str in cols
put!(c1, true)
results[i] = @spawn begin
io = BufferedInputStream(open(joinpath(indir, string(name)), "r"))
new_result = column_loader(metadata.type, io, metadata)
close(io)
(name = name_str, task = new_result)
end
take!(c1)
i += 1
end
end
# run the collection of results this serially
result_vectors = Vector{Any}(undef, length(cols))
for (i, result) in enumerate(results)
if verbose
println("Extracting $(fetch(result).name)")
end
new_result = fetch(result).task
colname = fetch(result).name
names[i] = colname
if new_result === nothing
result_vectors[i] = Vector{Missing}(missing, metadatas.rows)
else
result_vectors[i] = new_result
end
end
return names, result_vectors
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1215 | """
JDF.load(indir, verbose = true)
JDF.load(indir, cols = Vector{Symbol}, verbose = true)
Load a `Tables.jl` table from JDF saved at `outdir`. On Julia > v1.3, a multithreaded
version is used.
"""
load(indir; cols = Symbol[], verbose = false) = begin
# starting from DataFrames.jl 0.21 the colnames are strings
cols = collect(string.(cols))
metadatas = jdfmetadata(indir)
# TODO simplify this this is duplicated in load_columns
if length(cols) == 0
cols =collect(string.(metadatas.names))
else
scmn = setdiff(cols,collect(string.(metadatas.names)))
if length(scmn) > 0
throw("columns $(reduce((x,y) -> string(x) * ", " * string(y), scmn)) are not available, please ensure you have spelt them correctly")
end
end
cols_in_loaded_order, result_vectors = load_columns(indir; cols = cols, verbose = verbose)
# reorders to specified order
reorder_idx = indexin(cols_in_loaded_order, cols)
Table(NamedTuple{Tuple(Symbol.(cols))}(@view result_vectors[reorder_idx]))
end
load(jdf::JDFFile; args...) = load(path(jdf); args...)
loadjdf(args...; kwargs...) = load(args...; kwargs...)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1011 | """
JDF.metadata(indir)
Load the metadata associated with the JDF in `indir`
"""
jdfmetadata(indir) = begin
open(joinpath(indir, "metadata.jls")) do io
deserialize(io)
end
end
"""
JDF.metadata(indir)
Load the metadata associated with the JDF in `indir`
"""
metadata(jdf::JDFFile) = jdfmetadata(path(jdf))
"""
JDF.names(indir)
Load the column names associated with the JDF in `indir`
# Examples
```julia
using JDF, DataFrames
JDF.save(DataFrame(a = 1:3, b = 1:3), "plsdel.jdf")
JDF.names("plsdel.jdf")
```
"""
names(jdf::JDFFile) = metadata(jdf).names
# names(indir) = jdfmetadata(indir).names
"""
JDF.size(indir)
Returns the JDF's `size`
"""
# JDF.size(indir) = begin
# m = metadata(indir)
# (m.rows, length(m.names))
# end
JDF.size(jdf::JDFFile) = begin
m = metadata(jdf)
(m.rows, length(m.names))
end
# JDF.size(indir, v) = JDF.size(indir, Val(v))
JDF.size(jdf::JDFFile, val) = JDF.size(jdf)[val]
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 2955 | using Tables
"""
some_elm(::Type{T})
Some arbitrary element of type `T`
"""
function some_elm(::Type{T}) where {T}
try
return zero(T)
catch
try
return T(0)
catch
try
rand(T)
catch
try
Vector{T}(undef, 1)[1]
catch
throw("the type $T is not supported by JDF.jl yet. Try to update JDF.jl. If it still doesn't work after update, please submit an issue at https://github.com/xiaodaigh/JDF.jl/issues")
end
end
end
end
end
some_elm(::Type{Date}) = Date(0)
"""
JDF.save(outdir, table)
JDF.save(table, outdir)
Save a `Tables.jl` compatitable table to the `outdir`. On Julia > v1.3, a multi-threaded version is
used.
The columns of the table can be of the following vector types columns are
supported
* `isbits` types e.g. `Int*`, `UInt*`, `Float*`
* `Bool`
* `Strings`
* `WeakRefStrings.StringVector`
* `CategoricalArrays`
* `Union{Missing, T}`` for `T` support above
"""
save(df, outdir::AbstractString; kwargs...) = save(outdir, df; kwargs...)
function save(outdir::AbstractString, df; verbose = false)
@assert Tables.istable(df)
pmetadatas = Any[missing for i = 1:length(Tables.columnnames(df))]
if !isdir(outdir)
mkpath(outdir)
end
# use a bounded channel to limit the number simultaneous writes
c1 = Channel{Bool}(Threads.nthreads())
atexit(() -> close(c1))
for (i, n) in enumerate(Tables.columnnames(df))
if verbose
println(n)
end
put!(c1, true)
pmetadatas[i] = @spawn begin
io = BufferedOutputStream(open(joinpath(outdir, string(n)), "w"))
res = compress_then_write(Tables.getcolumn(df, n), io)
close(io)
res
end
take!(c1)
end
metadatas = fetch.(pmetadatas)
fnl_metadata = (
names = Tables.columnnames(df),
rows = length(Tables.columns(df)[1]),
metadatas = metadatas,
version = v"0.2",
)
open(joinpath(outdir, "metadata.jls"), "w") do io
serialize(io, fnl_metadata)
end
#fnl_metadata
JDFFile(outdir)
end
# figure out from metadata how much space is allocated
""" Get tthe number of bytes used by the file"""
get_bytes(metadata) = begin
if metadata.type == String
return max(metadata.string_compressed_bytes, metadata.string_len_bytes)
elseif metadata.type == Missing
return 0
elseif metadata.type >: Missing
return max(get_bytes(metadata.Tmeta), get_bytes(metadata.missingmeta))
else
return metadata.len
end
end
hasfieldnames(::Type{T}) where {T} = fieldnames(T) >= 1
savejdf(args...; kwargs...) = save(args...; kwargs...)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 6718 | # TODO compress Union{Int, Missing} is no missing present
# TODO allow int to uint
"""
type_compress!(df, compress_float = false, verbose = false)
Compress a DataFrame by using types of smaller bit. If safe to do so, it will
"downgrade" `Int*` and `UInt*` if safe to do some. It will compress any
`CategoricalVector` with `DataFrames.compress`.
For `Vector{String}`, if the number unique values is less than 2^16 then it will
be converted to `CategoricalVector` and otherwise will be stored as
`WeakRefStrings.StringVector`.
If `compress_float = true` then `Float64` will be downgraded to `Float32`; but
beware that this means the calculation will be done with reduce precision.
"""
type_compress!(df; compress_float = false, verbose = false) = begin
@assert Tables.istable(df)
for n in names(df)
if verbose
println("Compressing $n")
end
if compress_float || (nonmissingtype(eltype(df[!, n])) != Float64)
df[!, n] = type_compress(df[!, n])
end
end
df
end
type_compress(v::Vector{T}) where {T<:Union{Int128,Int64,Int32,Int16}} = begin
min1, max1 = extrema(v)
if typemin(Int8) <= min1 && max1 <= typemax(Int8)
return Int8.(v)
elseif typemin(Int16) <= min1 && max1 <= typemax(Int16)
return Int16.(v)
elseif typemin(Int32) <= min1 && max1 <= typemax(Int32)
return Int32.(v)
elseif typemin(Int64) <= min1 && max1 <= typemax(Int64)
return Int64.(v)
end
end
type_compress(v::Vector{T}) where {T<:Union{UInt128,UInt64,UInt32,UInt16}} = begin
max1 = maximum(v)
if max1 <= typemax(UInt8)
return UInt8.(v)
elseif max1 <= typemax(UInt16)
return UInt16.(v)
elseif max1 <= typemax(UInt32)
return UInt32.(v)
elseif max1 <= typemax(UInt64)
return UInt64.(v)
end
end
type_compress(v::Vector{Union{Missing,T}}) where {T<:Union{UInt128,UInt64,UInt32,UInt16}} =
begin
max1 = maximum(skipmissing(v))
if max1 <= typemax(UInt8)
return Vector{Union{Missing,UInt8}}(v)
elseif max1 <= typemax(UInt16)
return Vector{Union{Missing,UInt16}}(v)
elseif max1 <= typemax(UInt32)
return Vector{Union{Missing,UInt32}}(v)
elseif max1 <= typemax(UInt64)
return Vector{Union{Missing,UInt64}}(v)
end
end
type_compress(v::Vector{Union{Missing,T}}) where {T<:Union{Int128,Int64,Int32,Int16}} =
begin
min1, max1 = extrema(skipmissing(v))
if typemin(Int8) <= min1 && max1 <= typemax(Int8)
return Vector{Union{Missing,Int8}}(v)
elseif typemin(Int16) <= min1 && max1 <= typemax(Int16)
return Vector{Union{Missing,Int16}}(v)
elseif typemin(Int32) <= min1 && max1 <= typemax(Int32)
return Vector{Union{Missing,Int32}}(v)
elseif typemin(Int64) <= min1 && max1 <= typemax(Int64)
return Vector{Union{Missing,Int64}}(v)
end
end
type_compress(v::Vector{Float64}) = Vector{Float32}(v)
type_compress(v::Vector{Union{Missing,Float64}}) = Vector{Union{Missing,Float32}}(v)
type_compress(v::Vector{String}) = begin
# TODO recommend a PooledString if necessary
# use some heuristic to decide whether to compress
## if the number of unique elements is predicted to be less than typemax(UInt16) + 1
## then we can use categorical array to compress them
# sv = countmap(sample(v, 888))
#
# # Estimate the number of unique items
# # https://stats.stackexchange.com/questions/19014/how-can-i-estimate-unique-occurrence-counts-from-a-random-sampling-of-data
# u1 = length([key for (key, value) in sv if value == 1])
# u = length(sv)
# # if the estimated number of estimtes is less than about 32000 then it's
# # worth putting it into a categorical array
# if u + u1/888 * (length(v) - 888) < typemax(UInt16) + 1
# return categorical(v)
# # rlev = rle(categorical(v))
# # rlev[2] .= cumsum(rlev[2])
# # return RLEVector(rlev...)
# end
if length(Set(v)) <= 2^16
return categorical(v)
end
# check if the string is more compact at RLE level
rlev = rle(v)
# firstly check if the rlev can be whittle down further
offset_dict = Dict{String,UInt}()
# an IOBuffer to build up the string buffer
long_string_builder = IOBuffer()
offset_array_builder = IOBuffer()
string_len_builder = IOBuffer()
values = rlev[1]
offset_upto = 0
# add the first entry
offset_dict[values[1]] = offset_upto
write(offset_array_builder, offset_upto)
offset_update = write(long_string_builder, values[1])
write(string_len_builder, offset_update)
offset_upto += offset_update
for v in values[2:end]
if !haskey(offset_dict, v)
write(offset_array_builder, offset_upto)
offset_update = write(long_string_builder, v)
write(string_len_builder, offset_update)
# update the offset_upto for next one
offset_dict[v] = offset_upto
offset_upto += offset_update
# println(v, " : ", offset_dict[v])
else
# If the string doesn't existing then obtain the offset from the
# offset_dict.
# There is no need to write it to the string aagain because it's
# already there
offset_old = offset_dict[v]
# still write the lengths
write(string_len_builder, sizeof(v))
# still write the offset
write(offset_array_builder, offset_old)
end
end
#long_str =String(take!(long_string_builder))
offsets = inverse_rle(reinterpret(Int, take!(offset_array_builder)), rlev[2])
lengths = inverse_rle(reinterpret(Int, take!(string_len_builder)), rlev[2])
buffer = take!(long_string_builder)
close(long_string_builder)
close(offset_array_builder)
close(string_len_builder)
#return String(buffer), offsets, lengths
#return (StringArray{String, 1}(buffer, offsets, lengths), String(buffer), offsets, lengths)
return StringArray{String,1}(buffer, offsets, lengths)
end
type_compress(v::Vector{Missing}) = v
type_compress(v::CategoricalVector) where {T,IntType} = compress(v)
type_compress(v) = begin
# println("The compression for $(typeof(v)) is not yet supported. No compression is performed. Submit an idea if you think JDF should support it: https://github.com/xiaodaigh/JDF.jl/issues")
v
end
if false
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 556 | column_loader(T::Type{Bool}, io, metadata) = begin
# Bool are saved as UInt8
buffer = Vector{UInt8}(undef, metadata.len)
readbytes!(io, buffer, metadata.len)
Bool.(Blosc.decompress(UInt8, buffer))
end
column_loader!(buffer, T::Type{Bool}, io, metadata) = begin
# Bool are saved as UInt8
read!(io, buffer)
res = Blosc.decompress(UInt8, buffer)
Bool.(res)
end
compress_then_write(b::Vector{Bool}, io) = begin
b8 = UInt8.(b)
bbc = Blosc.compress(b8)
write(io, bbc)
return (len = length(bbc), type = Bool)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 437 | # save symbols
compress_then_write(b::Vector{Char}, io) = begin
meta = compress_then_write(Int.(b), io)
(type = Char, len = meta.len, metadata = meta)
end
# load a Symbol column
column_loader(::Type{Char}, io, metadata) = begin
strs = column_loader(Int, io, metadata.metadata)
Char.(strs)
end
column_loader!(_, ::Type{Char}, io, metadata) = begin
column_loader(Char, io, metadata)
end
some_elm(::Type{Char}) = 'J'
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 600 | # save symbols
# compress_then_write(b::Vector{DateTime}, io) = begin
# meta = compress_then_write(map(x->x.instant, b), io)
# (type = DateTime, len = meta.len, metadata = meta)
# end
#
# # load a Symbol column
# column_loader(::Type{DateTime}, io, metadata) = begin
# strs = column_loader(meta.type, io, metadata.metadata)
# DateTime.(strs)
# end
#
# column_loader!(_, ::Type{DateTime}, io, metadata) = begin
# column_loader(DateTime, io, metadata)
# end
#
some_elm(::Type{DateTime}) = DateTime(2000,1,1,1,1,1)
# turns out DateTime is BLosc Compressable; no need to do anything
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1203 | using Missings: allowmissing
some_elm(::Type{Missing}) = missing
# the dispatch for Union{T, Missing}
# 1. compress the missing
# 2. and also load the missing
function compress_then_write(b::Vector{Union{T,Missing}}, io) where {T}
b_S = coalesce.(b, some_elm(T))
metadata = compress_then_write(b_S, io)
b_m = ismissing.(b)
metadata2 = compress_then_write(b_m, io)
(
Tmeta = metadata,
missingmeta = metadata2,
type = eltype(b),
len = max(metadata.len, metadata2.len),
)
end
# just write it out as missing
# notice how io is not needed since nothing need to be written
compress_then_write(b::Vector{Missing}, _) =
(len = 0, type = Missing, orig_len = length(b))
function column_loader!(buffer, ::Type{Union{Missing,T}}, io, metadata) where {T}
# read the content
Tmeta = metadata.Tmeta
t_pre = column_loader!(buffer, Tmeta.type, io, Tmeta) |> allowmissing
# read the missings as bool
m = column_loader(Bool, io, metadata.missingmeta)
t_pre[m] .= missing
t_pre
end
column_loader!(_, ::Type{Missing}, io, metadata) =
Vector{Missing}(missing, metadata.orig_len)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1096 | some_elm(::Type{Nothing}) = nothing
# just write it out as nothing
compress_then_write(b::Vector{Nothing}, io) =
(len = 0, type = Nothing, orig_len = length(b))
# the dispatch for Union{T, Missing}
# 1. comporess the missing
# 2. and also load the missing
compress_then_write(b::Vector{Union{T,Nothing}}, io) where {T} = begin
#S = nonmissingtype(eltype(b))
b_S = T[isnothing(b) ? some_elm(T) : b for b in b]
metadata = compress_then_write(b_S, io)
b_m = isnothing.(b)
metadata2 = compress_then_write(b_m, io)
(
len = max(metadata.len, metadata2.len),
type = T,
metadata = metadata,
missingmeta = metadata2,
)
end
column_loader!(buffer, ::Type{Union{Nothing,T}}, io, metadata) where {T} = begin
# read the content
Tmeta = metadata.metadata
t_pre = Vector{Union{Nothing,T}}(column_loader!(buffer, Tmeta.type, io, Tmeta))
m = column_loader(Bool, io, metadata.missingmeta)
t_pre[m] .= nothing
t_pre
end
column_loader!(_, ::Type{Nothing}, _, metadata) =
Vector{Nothing}(nothing, metadata.orig_len)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 2446 | some_elm(::Type{String}) = ""
"""
Saving a String
"""
compress_then_write(b::Vector{String}, io) = begin
# TODO compare whether StringArray is better
# return compress_then_write(StringArray(b), io)
# write the string one by one
# do a Run-length encoding (RLE)
previous_b = b[1]
cnt = 1
lens = Int[]
str_lens = Int[]
for i = 2:length(b)
if b[i] != previous_b
push!(str_lens, write(io, previous_b))
push!(lens, cnt)
cnt = 0
previous_b = b[i]
end
cnt += 1
end
# reach the end: two situation
# 1) it's a new element, so write it
# 2) it's an existing element. Also write it
push!(str_lens, write(io, previous_b))
push!(lens, cnt)
@assert sum(lens) == length(b)
str_lens_compressed = Blosc.compress(Vector{UInt32}(str_lens))
str_lens_bytes = write(io, str_lens_compressed)
lens_compressed = Blosc.compress(Vector{UInt64}(lens))
rle_bytes = write(io, lens_compressed)
# return metadata
return (
string_compressed_bytes = sum(str_lens),
string_len_bytes = str_lens_bytes,
rle_bytes = rle_bytes,
rle_len = length(str_lens),
type = String,
len = max(sum(str_lens), str_lens_bytes, rle_bytes),
)
end
# load a string column
"""
metadata should consists of length, compressed byte size of string-lengths,
string content lengths
"""
column_loader!(_, ::Type{String}, io, metadata) = begin
column_loader(String, io, metadata)
end
column_loader(::Type{String}, io, metadata) = begin
buffer = Vector{UInt8}(undef, metadata.string_compressed_bytes)
readbytes!(io, buffer, metadata.string_compressed_bytes)
#return String(buffer)
# read the string-lengths
buffer2 = Vector{UInt8}(undef, metadata.string_len_bytes)
readbytes!(io, buffer2, metadata.string_len_bytes)
buffer3 = Vector{UInt8}(undef, metadata.rle_bytes)
readbytes!(io, buffer3, metadata.rle_bytes)
counts = Blosc.decompress(UInt64, buffer3)
str_lens = Blosc.decompress(UInt32, buffer2)
#return (String(buffer), str_lens, counts)
lengths = inverse_rle(str_lens, counts)
offsets = inverse_rle(vcat(0, cumsum(str_lens[1:end-1])), counts)
#res = StringArray{String, 1}(buffer, vcat(1, cumsum(Blosc.decompress(UInt64, buffer3))[1:end-1]) .-1, )
res = StringArray{String,1}(buffer, offsets, lengths)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 924 | compress_then_write(b::StringVector{T}, io) where {T} = begin
buffer_meta = (type = eltype(b.buffer), len = write(io, Blosc.compress(b.buffer)))
offsets_meta =
(type = eltype(b.offsets), len = write(io, Blosc.compress(b.offsets)))
lengths_meta =
(type = eltype(b.lengths), len = write(io, Blosc.compress(b.lengths)))
(
metadata = (buffer = buffer_meta, offsets = offsets_meta, lengths = lengths_meta),
type = typeof(b),
)
end
column_loader(::Type{StringVector{T}}, io, metadata) where {T} = begin
buffer = column_loader(metadata.metadata.buffer.type, io, metadata.metadata.buffer)
offsets =
column_loader(metadata.metadata.offsets.type, io, metadata.metadata.offsets)
lengths =
column_loader(metadata.metadata.lengths.type, io, metadata.metadata.lengths)
metadata.type(buffer, offsets, lengths)
end
# tests at test/test-stringarray.jl
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 479 | # save symbols
compress_then_write(b::Vector{Symbol}, io) = begin
string_meta = compress_then_write(String.(b), io)
(type = Symbol, len = string_meta.len, metadata = string_meta)
end
# load a Symbol column
column_loader(::Type{Symbol}, io, metadata) = begin
strs = column_loader(String, io, metadata.metadata)
Symbol.(strs)
end
column_loader!(_, ::Type{Symbol}, io, metadata) = begin
column_loader(Symbol, io, metadata)
end
some_elm(::Type{Symbol}) = :JDF
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 5586 | # ##############################################################################
# ZonedDateTime
# ##############################################################################
# the structure
# utc_datetime : isbits
# timezone : VariableTimeZone
# name : string
# transitions : Vector{TimeZones.Transitions}
# utc_datetime : isbits
# zone: FixedTimeZone
# name : string
# offset : isbits
# cutoff : isbits (Union{Nothing, DateTime})
# zone
# name : string
# offset : isbits
compress_then_write(b::Vector{ZonedDateTime}, io) = begin
utc_datetime_meta = compress_then_write([b.utc_datetime for b in b], io)
timezone_meta = compress_then_write([b.timezone for b in b], io)
zone_meta = compress_then_write([b.zone for b in b], io)
(
type = ZonedDateTime,
len = utc_datetime_meta.len + timezone_meta.len + zone_meta.len,
utc_datetime_meta = utc_datetime_meta,
timezone_meta = timezone_meta,
zone_meta = zone_meta,
)
end
# load a ZonedDateTime column
column_loader(::Type{ZonedDateTime}, io, metadata) = begin
v1 = column_loader(metadata.utc_datetime_meta.type, io, metadata.utc_datetime_meta)
v2 = column_loader(metadata.timezone_meta.type, io, metadata.timezone_meta)
v3 = column_loader(metadata.zone_meta.type, io, metadata.zone_meta)
[ZonedDateTime(v1, v2, v3) for (v1, v2, v3) in zip(v1, v2, v3)]
end
column_loader!(_, ::Type{ZonedDateTime}, io, metadata) = begin
column_loader(ZonedDateTime, io, metadata)
end
some_elm(::Type{ZonedDateTime}) = ZonedDateTime(1984, 1, 5, tz"Asia/Shanghai")
# ##############################################################################
# TimeZones.Transitions
# ##############################################################################
compress_then_write(b::Vector{TimeZones.Transition}, io) = begin
# the structure of Transition is like
# (utc_datetime::isbits, zone::string)
utc_datetime_meta = compress_then_write([b.utc_datetime for b in b], io)
zone_meta = compress_then_write([b.zone for b in b], io)
(
type = TimeZones.Transition,
len = utc_datetime_meta.len + zone_meta.len,
utc_datetime_meta = utc_datetime_meta,
zone_meta = zone_meta,
)
end
column_loader(::Type{TimeZones.Transition}, io, metadata) = begin
v1 = column_loader(metadata.utc_datetime_meta.type, io, metadata.utc_datetime_meta)
v2 = column_loader(metadata.zone_meta.type, io, metadata.zone_meta)
[TimeZones.Transition(v1, v2) for (v1, v2) in zip(v1, v2)]
end
column_loader!(::Type{TimeZones.Transition}, io, metadata) = begin
column_loader(TimeZones.Transition, io, metadata)
end
# ##############################################################################
# VariableTimeZone
# ##############################################################################
compress_then_write(b::Vector{VariableTimeZone}, io) = begin
# the structure of Transition is like
# (name::string, transitions, cutoff::string)
name_meta = compress_then_write([b.name for b in b], io)
len_transitions = [length(b.transitions) for b in b]
len_transitions_meta = compress_then_write(len_transitions, io)
transitions = Vector{TimeZones.Transition}(undef, sum(len_transitions))
i = 1
for b in b
for t in b.transitions
transitions[i] = t
i += 1
end
end
transitions_meta = compress_then_write(transitions, io)
cutoff_meta = compress_then_write([b.cutoff for b in b], io)
(
type = VariableTimeZone,
len = name_meta.len +
len_transitions_meta.len +
transitions_meta.len +
cutoff_meta.len,
name_meta = name_meta,
len_transitions_meta = len_transitions_meta,
transitions_meta = transitions_meta,
cutoff_meta = cutoff_meta,
)
end
column_loader(::Type{VariableTimeZone}, io, metadata) = begin
name = column_loader(metadata.name_meta.type, io, metadata.name_meta)
len_transitions = column_loader(
metadata.len_transitions_meta.type,
io,
metadata.len_transitions_meta,
)
hi = cumsum(len_transitions)
lo = vcat(0, hi[1:end-1]) .+ 1
transitions =
column_loader(metadata.transitions_meta.type, io, metadata.transitions_meta)
metadata.cutoff_meta.type
cutoff = column_loader(metadata.cutoff_meta.type, io, metadata.cutoff_meta)
[
VariableTimeZone(name, transitions[lo:hi], cutoff)
for (name, lo, hi, cutoff) in zip(name, lo, hi, cutoff)
]
end
column_loader!(::Type{VariableTimeZone}, io, metadata) = begin
column_loader(VariableTimeZone, io, metadata)
end
# ##############################################################################
# FixedTimeZone
# ##############################################################################
compress_then_write(b::Vector{FixedTimeZone}, io) = begin
# the structure of Transition is like
# (name, offset)
m1 = compress_then_write([b.name for b in b], io)
m2 = compress_then_write([b.offset for b in b], io)
(type = FixedTimeZone, len = m1.len + m2.len, name_meta = m1, offset_meta = m2)
end
column_loader(::Type{FixedTimeZone}, io, metadata) = begin
v1 = column_loader(metadata.name_meta.type, io, metadata.name_meta)
v2 = column_loader(metadata.offset_meta.type, io, metadata.offset_meta)
[FixedTimeZone(v1, v2) for (v1, v2) in zip(v1, v2)]
end
column_loader!(::Type{FixedTimeZone}, io, metadata) = begin
column_loader(FixedTimeZone, io, metadata)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 2227 | using DataAPI
using CategoricalArrays: CategoricalVector, CategoricalArray, CategoricalPool
function compress_then_write(b::CategoricalVector{T,IntType}, io) where {T, IntType<:Integer}
compress_refs = compress_then_write(b.refs, io)
compress_poolindex = compress_then_write(DataAPI.levels(b), io)
(
type = CategoricalVector,
refs = compress_refs,
poolindex = compress_poolindex,
ordered = b.pool.ordered,
)
end
# function column_loader(::Type{CategoricalVector{Union{Missing, T}, I}}, io, metadata) where {T, I}
# println("got here1")
# refs_meta = metadata.refs
# pi_meta = metadata.poolindex
# ref = column_loader(refs_meta.type, io, refs_meta)
# poolindex = column_loader(pi_meta.type, io, pi_meta)
# return CategoricalArray{pi_meta.type,1}(
# ref,
# CategoricalPool{eltype(poolindex),eltype(ref)}(Array(poolindex), metadata.ordered),
# )
# end
function column_loader(::Type{CategoricalVector}, io, metadata)
refs_meta = metadata.refs
pi_meta = metadata.poolindex
ref = column_loader(refs_meta.type, io, refs_meta)
poolindex = column_loader(pi_meta.type, io, pi_meta)
# this checks for missing in the values which would be represented by ref = 0
if any(==(0), ref)
return CategoricalArray{Union{pi_meta.type, Missing},1}(
ref,
CategoricalPool{eltype(poolindex),eltype(ref)}(Array(poolindex), metadata.ordered),
)
else
# no missing in the values, just return
return CategoricalArray{pi_meta.type,1}(
ref,
CategoricalPool{eltype(poolindex),eltype(ref)}(Array(poolindex), metadata.ordered),
)
end
end
if false
# # compress_then_write-cate.jl
# using Revise
# using JDF
# using DataFrames, Debugger
# b = categorical(["a", "b", "a", "c"])
# io = open("c:/data/test.io", "w")
# metadata = compress_then_write(a, io)
# close(io)
#
# io = open("c:/data/test.io", "r")
# aa = column_loader(CategoricalVector, io, metadata)
# close(io)
#
# df = DataFrame(a = a)
#
# savejdf("c:/data/pls_del.jdf", df)
#
# loadjdf("c:/data/pls_del.jdf")
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 752 | using DataAPI, PooledArrays
function compress_then_write(b::PooledVector, io)
compress_refs = compress_then_write(b.refs, io)
compress_poolindex = compress_then_write(b.pool, io)
(type = PooledVector, refs = compress_refs, poolindex = compress_poolindex)
end
function column_loader(b::Type{PooledVector}, io, metadata)
refs_meta = metadata.refs
pi_meta = metadata.poolindex
ref = column_loader(refs_meta.type, io, refs_meta)
poolindex = column_loader(pi_meta.type, io, pi_meta)
# TODO more efficient pooledArray construction
# PooledVector{pi_meta.type}(ref, CategoricalPool{eltype(poolindex), eltype(ref)}(Array(poolindex)))
ref = max.(1, min.(length(poolindex), ref))
PooledArray(poolindex[ref])
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 444 |
function compress_then_write(b::Vector{SubString{String}}, io)
@warn "JDF is writing a SubString vector. When loaded back it will be a String vector not a SubString vector"
compress_then_write(String.(b), io)
end
function column_loader(::Type{SubString{String}}, io, metadata)
@warn "JDF is loading SubString vector. It will be loaded as a String vector not a SubString vector"
column_loader(String, io, metadata)
end | JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1569 | using JDF
using Test
using DataFrames
using Random: randstring
using WeakRefStrings
include("test-categorical-ararys.jl")
include("test-stringarray.jl")
include("test-symbol.jl")
include("test-jdf-str.jl")
include("test-eachcol.jl")
include("test-ZonedDateTime.jl")
include("test-substring.jl")
include("test-date-w-missing.jl")
@testset "JDF.jl parallel" begin
df = DataFrame([collect(1:100) for i = 1:3000], :auto)
df[!, :int_missing] =
rand([rand(rand([UInt, Int, Float64, Float32, Bool])), missing], nrow(df))
df[!, :missing] .= missing
df[!, :strs] = [randstring(8) for i = 1:nrow(df)]
df[!, :stringarray] = StringVector([randstring(8) for i = 1:nrow(df)])
df[!, :strs_missing] = [rand([missing, randstring(8)]) for i = 1:nrow(df)]
df[!, :stringarray_missing] =
StringVector([rand([missing, randstring(8)]) for i = 1:nrow(df)])
df[!, :symbol_missing] = [rand([missing, Symbol(randstring(8))]) for i = 1:nrow(df)]
df[!, :char] = getindex.(df[!, :strs], 1)
df[!, :char_missing] = allowmissing(df[!, :char])
df[rand(1:nrow(df), 10), :char_missing] .= missing
@time JDF.save("a.jdf", df)
@time df2 = DataFrame(JDF.load("a.jdf"), copycols=false)
isequal(df, df2)
df2 = DataFrame(JDF.load("a.jdf"), copycols=false)
@test ncol(df2) == 3009
@test nrow(df2) == 100
df2[!, :stringarray_missing]
@test all(all(isequal(df[!, n], df2[!, n])) for n in names(df))
# clean up
rm("a.jdf", force = true, recursive = true)
end | JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 242 | using Test, RDatasets
using JDF, Tables
iris = dataset("datasets", "iris")
ok = savejdf(iris, "plsdel.jdf")
Tables.columns(ok)
Tables.schema(ok)
propertynames(ok)
getproperty(ok, :Species)
rm("plsdel.jdf", force = true, recursive = true)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1043 | using Test
using JDF, TimeZones, DataFrames
a = some_elm(ZonedDateTime)
ar = Vector{ZonedDateTime}(undef, 10)
ar .= a
meta = open("plsdel.io", "w") do io
compress_then_write(ar, io)
end
ar_loaded = open("plsdel.io") do io
column_loader(meta.type, io, meta)
end
@test ar == ar_loaded
df = DataFrame(ar = ar)
JDF.save(df, "df.jdf")
df_loaded = DataFrame(JDF.load("df.jdf"))
@test df == df_loaded
ar_timezone = [ar.timezone for ar in ar]
meta = open("plsdel.io", "w") do io
compress_then_write(ar_timezone, io)
end
ar_timezone_copy = open("plsdel.io") do io
column_loader(meta.type, io, meta)
end
@test ar_timezone == ar_timezone_copy
meta = open("plsdel.io", "w") do io
compress_then_write(a.timezone.transitions, io)
end
a_timezone_transitions_loaded = open("plsdel.io") do io
column_loader(meta.type, io, meta)
end
@test a.timezone.transitions == a_timezone_transitions_loaded
rm("plsdel.io", force = true)
rm("df.jdf", force = true, recursive = true)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1204 | using Test
using JDF
using RDatasets
using DataFrames: DataFrame
using CategoricalArrays
using Tables
@testset "JDF.jl categorical arrays" begin
df = DataFrame([collect(1:100) for i = 1:3], :auto)
df[!, :x1] = categorical(df[!, :x1])
df[!, :x2] = categorical(string.(df[!, :x2]))
JDF.save(df, "a3cate.jdf")
df_loaded_back = JDF.load("a3cate.jdf", cols = [:x2, :x1])
df2 = DataFrame(df_loaded_back; copycols = true)
@test size(df2, 2) == 2
@test size(df2, 1) == 100
@time df2[!, :x1] isa CategoricalVector{Int}
@time df2[!, :x2] isa CategoricalVector{String}
rm("a3cate.jdf", force = true, recursive = true)
end
@testset "Guard against Github #27" begin
iris = dataset("datasets", "iris")
JDF.save(iris, "iris.jdf")
JDF.load("iris.jdf")
rm("iris.jdf", force = true, recursive = true)
end
@testset "CategoricalArray{Union{Missing, String}}" begin
# Guard against github 73
df2 = DataFrame(sex = categorical(["Male", missing, "Female"]))
JDF.save("df2.jdf", df2)
b = JDF.load("df2.jdf") |> DataFrame
@test any(ismissing, b.sex)
rm("df2.jdf", force=true, recursive=true)
end | JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 727 | # using Revise
using Test
using JDF, DataFrames
@testset "JDF.jl Char" begin
ac = ['a', 'b', 'a', 'c', 'd']
metadata_ac = open("io.jdf", "w") do io
compress_then_write(ac, io)
end
a = ['a', 'b', 'a', missing, 'd']
metadata = open("io.jdf", "w") do io
compress_then_write(a, io)
end
aa = open("io.jdf", "r") do io
column_loader(Union{Missing,Symbol}, io, metadata)
end
df = DataFrame(a = a, ac = ac)
savejdf("pls_del.jdf", df)
df2 = loadjdf("pls_del.jdf")
@test size(df) == size(df2)
@test all(isequal.(df.a, df2.a))
@test all(isequal.(df.ac, df2.ac))
rm("io.jdf", force = true)
rm("pls_del.jdf", force = true, recursive = true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 586 | # Guard aginst github #62
using JDF
using Dates
col = [Date(1999,1,1), missing]
df = (d = col, a = [1, missing])
using Tables
JDF.save("plsdel-date-w-missing.jdf", df)
DataFrame(JDF.load("plsdel-date-w-missing.jdf"))
rm("plsdel-date-w-missing.jdf", recursive=true)
# Guard aginst github #72
using DataFrames
using Dates
df = DataFrame()
df[!, :test] = [Dates.DateTime(2000,1,1,1,1,1), missing]
df
using JDF
JDF.savejdf("plsdel-datetime-w-missing.jdf", df)
DataFrame(JDF.load("plsdel-datetime-w-missing.jdf"))
rm("plsdel-datetime-w-missing.jdf", recursive=true) | JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1299 | using Test
using DataFrames
using Random: randstring
using WeakRefStrings: StringVector
@testset "JDF.jl eachcol" begin
df = DataFrame([collect(1:100) for i = 1:3000], :auto)
df[!, :int_missing] =
rand([rand(rand([UInt, Int, Float64, Float32, Bool])), missing], size(df, 1))
df[!, :missing] .= missing
df[!, :strs] = [randstring(8) for i = 1:size(df, 1)]
df[!, :stringarray] = StringVector([randstring(8) for i = 1:size(df, 1)])
df[!, :strs_missing] = [rand([missing, randstring(8)]) for i = 1:size(df, 1)]
df[!, :stringarray_missing] =
StringVector([rand([missing, randstring(8)]) for i = 1:size(df, 1)])
df[!, :symbol_missing] = [rand([missing, Symbol(randstring(8))]) for i = 1:size(df, 1)]
df[!, :char] = getindex.(df[!, :strs], 1)
df[!, :char_missing] = allowmissing(df[!, :char])
df[rand(1:size(df, 1), 10), :char_missing] .= missing
JDF.save("a.jdf", df)
df2 = jdf"a.jdf"
@test size(df2, 2) == 3009
@test size(df2, 1) == 100
@time df3 = [a for a in eachcol(df2)]
df4 = DataFrame(df3, :auto)
@test size(df4) == size(df)
@test all([isequal(df4[!, n], df[!, n]) for n = 1:size(df4, 2)])
# clean up
rm("a.jdf", force = true, recursive = true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 230 | using Test;
using JDF, DataFrames
a = DataFrame(a = 1:3, b =4:6)
tf = tempname()
JDF.save(tf, a)
a = DataFrame(JDF.load(tf))
a1 = DataFrame(JDF.load(tf; cols = ["b", "a"]))
@test a1.a == 1:3
@test a1.b == 4:6
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 860 | # Based on the Julia implementation
# InlineStringN are all `isbitstype` so they work with JDF automatically
# However for other languages, we may still need to explicitly support them and conver them
# to string is not available in those languages
using JDF
using InlineStrings, DataFrames, CSV
using Random:randstring
@testset "Test InlineStrings get loaded and saved properly" begin
a = DataFrames.DataFrame(a = [randstring(254) |> String255 for i in 1:100])
path = tempdir()
CSV.write(joinpath(path, "tmp.csv"), a)
a1 = CSV.read(joinpath(path, "tmp.csv"), DataFrame)
a1.a = a1.a .|> String255
JDF.save(joinpath(path, "tmp.jdf"), a1)
a2 = JDF.load(joinpath(path, "tmp.jdf")) |> DataFrame
@test eltype(a2.a) == String255
# clean up
rm("tmp.csv"; force=true)
rm("tmp.jdf"; force=true, recursive=true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1275 | @testset "JDF.jl jdf_str" begin
df = DataFrame([collect(1:100) for i = 1:3000], :auto)
df[!, :int_missing] =
rand([rand(rand([UInt, Int, Float64, Float32, Bool])), missing], size(df, 1))
df[!, :missing] .= missing
df[!, :strs] = [randstring(8) for i = 1:size(df, 1)]
df[!, :stringarray] = StringVector([randstring(8) for i = 1:size(df, 1)])
df[!, :strs_missing] = [rand([missing, randstring(8)]) for i = 1:size(df, 1)]
df[!, :stringarray_missing] =
StringVector([rand([missing, randstring(8)]) for i = 1:size(df, 1)])
df[!, :symbol_missing] = [rand([missing, Symbol(randstring(8))]) for i = 1:size(df, 1)]
df[!, :char] = getindex.(df[!, :strs], 1)
df[!, :char_missing] = allowmissing(df[!, :char])
df[rand(1:size(df, 1), 10), :char_missing] .= missing
JDF.save("a.jdf", df)
df2 = DataFrame(JDF.load(jdf"a.jdf"), copycols=false)
@test size(df2, 2) == 3009
@test size(df2, 1) == 100
@test all(all(isequal(df[!, n], df2[!, n])) for n in names(df))
df3 = DataFrame(JDF.load(jdf"a.jdf", cols = [:missing, :strs]), copycols=false)
@test size(df3, 2) == 2
@test size(df3, 1) == 100
# clean up
rm("a.jdf", force = true, recursive = true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 924 | using DataFrames, Random, WeakRefStrings
using JDF
@testset "JDF.jl nrow" begin
df = DataFrame([collect(1:100) for i = 1:3000])
df[!, :int_missing] =
rand([rand(rand([UInt, Int, Float64, Float32, Bool])), missing], nrow(df))
df[!, :missing] .= missing
df[!, :strs] = [randstring(8) for i = 1:nrow(df)]
df[!, :stringarray] = StringVector([randstring(8) for i = 1:nrow(df)])
df[!, :strs_missing] = [rand([missing, randstring(8)]) for i = 1:nrow(df)]
df[!, :stringarray_missing] =
StringVector([rand([missing, randstring(8)]) for i = 1:nrow(df)])
JDF.save("a.jdf", df)
dfjdf = jdf"a.jdf"
@test ncol(jdf"a.jdf") == 3006
@test nrow(jdf"a.jdf") == 100
@test size(jdf"a.jdf") == (100, 3006)
@test size(jdf"a.jdf", 1) == 100
@test size(jdf"a.jdf", 2) == 3006
# clean up
rm("a.jdf", force = true, recursive = true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 983 | using Test
using JDF
using DataFrames: DataFrame
using PooledArrays
using Missings: allowmissing
@testset "JDF.jl pooledarrays arrays" begin
df = DataFrame([rand(1:10, 100) for i = 1:3])
df[!, :x3] = df[:, :x1] |> allowmissing
df[!, :x4] = string.(df[!, :x2]) |> allowmissing
df[50, :x3] = missing
df[66, :x4] = missing
df[!, :x1] = PooledArray(df[!, :x1])
df[!, :x2] = PooledArray(string.(df[!, :x2]))
df[!, :x3] = PooledArray(df[!, :x3])
df[!, :x4] = PooledArray(df[!, :x4])
JDF.save(df, "a3pooled.jdf")
df2 = JDF.load("a3pooled.jdf")
@test size(df2, 2) == 4
@test size(df2, 1) == 100
@time df2[!, :x1] isa PooledVector{Int}
@time df2[!, :x2] isa PooledVector{String}
@time df2[!, :x3] isa PooledVector{Union{Missing,Int}}
@time df2[!, :x4] isa PooledVector{Union{Missing,String}}
for n in names(df)
@test isequal(df2[n], df[n])
end
rm("a3pooled.jdf", force = true, recursive = true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 540 | # using Revise
using Test
using JDF, WeakRefStrings, DataFrames
@testset "JDF.jl WeakRefStrings.StringArrays" begin
a = StringVector(["a", "b", "a", missing, "c"])
io = open("io.jdf", "w")
metadata = compress_then_write(a, io)
close(io)
io = open("io.jdf", "r")
aa = column_loader(StringVector{String}, io, metadata)
close(io)
df = DataFrame(a = a)
savejdf("pls_del.jdf", df)
df2 = loadjdf("pls_del.jdf")
rm("io.jdf", force = true)
rm("pls_del.jdf", force = true, recursive = true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 430 | using DataFrames
using Random: randstring
using JDF
using Test
df = DataFrame([collect(1:10) for i = 1:3000], :auto);
df[!, :strs] = [randstring(8) for i = 1:DataFrames.nrow(df)]
df[!, :substrs] = [SubString(x, 1, 3) for x in df[!, :strs]]
JDF.save("tmp-substring.jdf", df)
df2 = DataFrame(JDF.load("tmp-substring.jdf"))
@test df.substrs == df2.substrs
rm("tmp-substring.jdf", force=true, recursive=true)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 762 | using Test
using JDF, DataFrames
@testset "JDF.jl Symbol" begin
ac = [:a, :b, :a, :c, :d]
metadata_ac = open("io.jdf", "w") do io
compress_then_write(ac, io)
end
a = [:a, :b, :a, missing, :c]
metadata = open("io.jdf", "w") do io
compress_then_write(a, io)
end
aa = open("io.jdf", "r") do io
column_loader(Union{Missing,Symbol}, io, metadata)
end
df = DataFrame(a = a, ac = ac)
JDF.save("pls_del.jdf", df)
df2 = DataFrame(JDF.load("pls_del.jdf"); copycols=false)
@test size(df) == size(df2)
@test all(isequal.(df.a, df2.a))
@test all(isequal.(df.ac, df2.ac))
rm("io.jdf", force = true)
rm("pls_del.jdf", force = true, recursive = true)
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 344 | gf() = begin
CSV.read("c:/data/AirOnTimeCSV/airOT198710.csv")
end
gf2() = begin
p = "c:/data/AirOnTimeCSV/"
f = joinpath.(p, readdir(p))
sort!(f, by = x -> filesize(x), rev = true)
reduce(vcat, CSV.read.(f[1:100]))
end
iow() = begin
open("c:/data/bin.bin", "w")
end
ior() = begin
open("c:/data/bin.bin", "r")
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 568 | using CSV
a = gf()
CSV.write("c:/data/a.csv", a)
;gzip c:/data/a.csv
using CodecZlib, BufferedStreams
io = open("c:/data/a.csv.gz") |> GzipDecompressorStream |> BufferedInputStream
@time a2 = CSV.read(io)
close(io)
using ZipFile, CSV, DataFrames, BufferedStreams
a = DataFrame(a = 1:3)
CSV.write("c:/data/a.csv", a)
# zip the file; Windows users who do not have zip available on the PATH can manual zip the CSV
;zip c:/data/a.zip c:/data/a.csv
io = open("c:/data/a.zip", "r")
z = ZipFile.Reader(io)
df = CSV.read(z.files[1])
close(io)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 520 | using Revise
using JDF, CSV, DataFrames
@time a = gf()
# 2G file
# @time a = CSV.read(
# "C:/data/Performance_All/Performance_2010Q3.txt",
# delim = '|',
# header = false
# );
b = type_compress(Array(a[!, :ORIGIN]));
io = iow()
@time metadata = compress_then_write(b, io)
close(io)
using Revise
using JDF, CSV, DataFrames
# using JLSO
# metadata = JLSO.load("C:/data/metatmp")["data"]
io = ior()
@time oo = column_loader(metadata.type, io, metadata);
close(io)
all(b.==oo)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 518 | using Revise
using JDF, CSV, DataFrames
const path = "c:/data/AirOnTimeCSV"
const outpath = "c:/data/AirOnTimeCSV_jdf_pp"
convert_csv_to_jdf(infile, outdir) = begin
print("$(infile): csv read: ")
@time df = CSV.read(infile)
print("$(infile): compression: ")
@time type_compress!(df)
print("$(infile): write_out: ")
@time psavejdf(outdir, df)
end
for infile in readdir(path)
@time convert_csv_to_jdf(
joinpath(path, infile),
joinpath(outpath, infile)
)
println("")
println("")
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 763 | using Revise
using JDF, CSV, DataFrames, Blosc, Base.GC, BenchmarkTools, Serialization
# use 12 threads
Blosc.set_num_threads(6)
# GC.gc()
@time a = CSV.read(
"C:/data/Performance_All/Performance_2003Q3.txt",
delim = '|',
header = false
);
GC.gc()
t = time()
@time metadatas = psavejdf(a, "c:/data/large12.dir.jdf");
time() - t
GC.gc()
GC.gc()
@time metadatas = savejdf(a, "c:/data/large12.jdf");
GC.gc()
serialize("c:/data/large12.meta", metadatas)
using Revise, JDF, DataFrames, Serialization
metadatas = deserialize("c:/data/large12.meta")
@time a2 = loadjdf("c:/data/large12.jdf", metadatas);
GC.gc()
all(names(a) .== names(a2))
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)]))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1025 | using Revise
using JDF, CSV, DataFrames, Blosc, Base.GC, BenchmarkTools, Serialization
# use 12 threads
Blosc.set_num_threads(6)
# GC.gc()
@time a = CSV.read(
"C:/data/Performance_All/Performance_2010Q3.txt",
delim = '|',
header = false
);
GC.gc()
@time metadatas = savejdf("c:/data/large.jdf", a);
using Revise, JDF, DataFrames, Serialization
@time a2 = loadjdf("c:/data/large.jdf"; verbose);
GC.gc()
@time type_compress!(a2);
io = iow()
compress_then_write(a2.Column2, io)
close(io)
@time psavejdf("c:/data/largec.jdf", a2);
using Revise, JDF, DataFrames, Serialization
@time a2 = ploadjdf("c:/data/largec.jdf");
GC.gc()
@time type_compress!(a2, verbose = true)
@time psavejdf("c:/data/largec2.jdf", a2);
@time a2 = loadjdf("c:/data/largec.jdf");
using Revise, JDF, DataFrames, Serialization
@time a2 = ploadjdf("c:/data/largec.jdf");
GC.gc()
all(names(a) .== names(a2))
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)]))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1654 | using Revise
using JDF
using CSV, DataFrames, Blosc, JLSO, Base.GC
# use 12 threads
Blosc.set_num_threads(6)
# GC.gc()
@time a = CSV.read(
"C:/data/Performance_All/Performance_2016Q1.txt",
delim = '|',
header = false
);
GC.gc()
t = time()
@time metadatas = psavejdf(a, "c:/data/large.jdf");
time() - t
GC.gc()
GC.gc()
@time metadatas = savejdf(a, "c:/data/large.jdf");
GC.gc()
@time JLSO.save("c:/data/large.meta.jlso", metadatas)
GC.gc()
using Revise, JLSO, JDF
@time metadatas = JLSO.load("c:/data/large.meta.jlso")["data"];
GC.gc()
@time a2 = loadjdf("c:/data/large.jdf", metadatas);
GC.gc()
all(names(a) .== names(a2))
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)]))
# CSVFiles vs CSV
using Revise
using CSV, DataFrames, JDF, Base.GC
import Base.Threads: @spawn
# a = gf()
fp = [f for f in readdir("c:/data/AirOnTimeCSV/") if endswith(f, ".csv")];
using CSVFiles
@time a= load(joinpath("c:/data/AirOnTimeCSV", fp[1]), type_detect_rows = 2000)
#psavejdf(a, joinpath("c:/data/hehe/", fp[1]))
sort!(fp, by = x->filesize(joinpath("c:/data/AirOnTimeCSV/", x)), rev=true)
fn4(fp) = begin
#@time a = load(joinpath("C:/data/AirOnTimeCSV", fp), type_detect_rows = 2000) |> DataFrame
@time a = CSV.read(joinpath("C:/data/AirOnTimeCSV", fp))
#mkdir(joinpath("c:/data/hehe/", fp))
res = psavejdf(a, joinpath("c:/data/hehe/", fp))
a = nothing
res
end
res = Vector{Any}(undef, length(fp))
@time for (i, f) in enumerate(fp[1:6])
print(i)
res[i] = fn4(f)
end
#@time fn4(fp[1])
#@time fn4.(fp)
2+2
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1139 | using Revise
using JDF, CSV, DataFrames, Blosc, Base.GC, BenchmarkTools, Serialization
# use 12 threads
Blosc.set_num_threads(6)
# GC.gc()
@time a = CSV.read(
"C:/data/Performance_All/Performance_2003Q1.txt",
delim = '|',
header = false
);
GC.gc()
t = time()
@time metadatas = psavejdf(a, "c:/data/large8.dir.jdf");
time() - t
GC.gc()
GC.gc()
@time metadatas = savejdf(a, "c:/data/large8.jdf");
GC.gc()
serialize("c:/data/large8.meta", metadatas)
using Revise, JDF, DataFrames, Serialization
metadatas = deserialize("c:/data/large8.meta")
@time a2 = loadjdf("c:/data/large8.jdf", metadatas);
# before=Base.summarysize(a2)
@time type_compress!(a2, verbose = true)
# after=Base.summarysize(a2)
#
# before/after
GC.gc()
@time metadatas = savejdf(a2, "c:/data/large8c.jdf");
GC.gc()
serialize("c:/data/large8c.meta", metadatas)
using Serialization, JDF
metadatas = deserialize("c:/data/large8c.meta")
@time a2 = loadjdf("c:/data/large8c.jdf", metadatas);
GC.gc()
all(names(a) .== names(a2))
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)]))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 377 | using Revise
using JDF
using CSV, DataFrames, Feather
@time a = CSV.read("c:/data/a.feather");
@time pmetadatas = psavejdf(a, "c:/data/a.jdf");
@time metadatas = savejdf(a, "c:/data/a.jdf");
#a = nothing
@time a2 = loadjdf("c:/data/a.jdf", metadatas);
all(names(a) .== names(a2))
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)]))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 1826 | using Revise
using JDF
using CSV, DataFrames, Blosc, JLSO, Base.GC
# use 12 threads
Blosc.set_num_threads(6)
@time a = CSV.read("C:/data/Performance_All/Performance_2010Q3.txt", delim = '|', header = false);
strs = "id".*string.(rand(UInt16, 100_000_000));
# write randomstring to io
strs = coalesce.(Array(a[:Column3]), "")
io = open("c:/data/string.jdf", "w")
@time string_byte_sizes = write.(Ref(io), strs);
close(io)
load_str(file, string_byte_sizes) = begin
io = open(file, "r")
tot_strings_in_bytes = sum(string_byte_sizes)
strings_in_bytes = Vector{UInt8}(undef, tot_strings_in_bytes)
@time read!(io, strings_in_bytes)
close(io)
i = 1
j = 0
ptr_to_string_in_bytes = pointer(strings_in_bytes)
@time reconstituted_strings = String[" "^s for s in string_byte_sizes]
@time for string_byte_size in string_byte_sizes
#global i, j, reconstituted_strings
#reconstituted_strings[i] = unsafe_string(ptr_to_string_in_bytes+j, string_byte_size)
unsafe_copyto!(
reconstituted_strings[i] |> pointer,
ptr_to_string_in_bytes + j,
string_byte_size)
i += 1
j += string_byte_size
end
reconstituted_strings
end
@time reconstituted_strings = load_str("c:/data/string.jdf", string_byte_sizes);
all(reconstituted_strings .== strs) # true: works but FAST
@time metadata = compress_then_write(a, "c:/data/string.jdf");
x = "abc"
xp = pointer(x)
unsafe_load(xp-8)
unsafe_pointer_to_objref(pointer(UInt8[3, 0, 0, 0, 0, 0, 0, 0, 63, 63, 63]))
unsafe_pointer_to_objref(xp-8)
y = unsafe_string(xp)
yp = pointer(y)
yp == xp # false
reinterpret(8, UInt8)
pointer(8)
unsafe_pointer_to_objref(pointer(UInt8[0,0,0,0, 0,0,0,2, 64, 65]))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 579 | using Revise
using DataFrames, CSV, JDF
using WeakRefStrings
a[!, :stringarr] = StringArray(rand(["a", "a", "b"], size(a,1)))
a[!, :cate] = categorical(a[!, :stringarr])
@time a = CSV.read("c:/data/feature_matrix_cleaned.csv");
@time savejdf("c:/data/feature_matrix_cleaned.csv.jdf", a)
a = nothing
@time a = loadjdf("c:/data/feature_matrix_cleaned.csv.jdf")
type_compress!(a, compress_float=true)
@time savejdf("c:/data/feature_matrix_cleaned.csv.compressed.jdf", a)
using BenchmarkTools
@benchmark a = loadjdf("c:/data/feature_matrix_cleaned.csv.jdf")
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 3212 | using Revise
using JDF
a = gf()
b = Array(a.ORIGIN)
io = iow()
metadata = compress_then_write(b, io)
close(io)
io = ior()
oo = column_loader(String, io, metadata)
close(io)
all(b.==oo)
b = Array(a.FL_DATE)
io = iow()
metadata = compress_then_write(b, io)
close(io)
io = ior()
oo = column_loader(metadata.type, io, metadata)
close(io)
all(b.==oo)
@time metadatas = savejdf(a, "c:/data/a.jdf")
@time a2 = loadjdf("c:/data/a.jdf", metadatas)
using LambdaFn
fn = @ฮป joinpath.(_1, readdir(_1))
files = fn("d:/data/AirOnTimeCSV")
sort!(files, by = x->filesize(x))
@time listdf =reduce(vcat, CSV.read(f) for f in files[1:50]);
using DataFrames, Blosc
@time schema = savejdf(listdf, "d:/data/plsdel.jdf");
@time loadjdf("d:/data/plsdel.jdf", schema);
bb = [Blosc.compress(Array(b)) for b in eachcol(a)]
bbc = bb[1])
bbc = rand(UInt8, 100)
io = open("io.test", "w")
w = Array{Float32,2}(undef,n1,nw)
read!(fileID, w[:,iw])
write(io, bbc)
close(io)
io = open("io.test", "r")
bbc_new = read(io, Vector)
bbc_new = Array{UInt8,1}(undef, 100)
read!(io, bbc_new)
close(io)
N = 100_000_000;
K = 100;
# faster string sort
svec = rand("id".*string.(1:NรทK), N);
using Blosc
# save with no compression
io = open("c:/data/no_compression.bin", "w")
@time ncw = write.(Ref(io), codeunits.(svec));
ncwtot=sum(ncw)
@time ncw_compressed_tot = write(io, Blosc.compress(UInt16.(ncw)))
close(io)
@time res = column_loader(String, io, ncwtot)
io = open("c:/data/no_compression.bin", "r")
@time tmp = Vector{UInt8}(undef, sum(ncw))
@time read!(io, tmp)
close(io)
@time tmp_compressed = Blosc.compress(tmp)
io = open("c:/data/compression.bin", "w")
write(io, tmp_compressed)
close(io)
tot = sum(ncw)
ok(tmp, ncw) = begin
cncw = cumsum(ncw)
str = unsafe_string(pointer(tmp))
getindex.(Ref(str), Colon().(vcat(1, cncw[1:end-1].+1), cncw))
end
io = open("c:/data/bin.bin", "r")
@time tmp = Vector{UInt8}(undef, tot)
@time read!(io, tmp)
@time ok(tmp, ncw)
close(io)
# @time nc = ncodeunits.(svec)
# @time aa = Vector{UInt8}(undef, sum(nc))
# meh!(aa, svec, nc) = begin
# start = 1
# @time for (n, s) in zip(nc, svec)
# aa[1:n] = codeunits(s)
# start += n
# end
# end
@time meh!(aa, svec, nc)
@time a = codeunits.(svec)
close(io)
@time ncw = write.(Ref(io), a);
using Blosc
io = open("c:/data/compression.bin", "w")
@time ncw = write.(Ref(io), Blosc.compress.(codeunits.(svec)));
close(io)
stringit(n, io) = begin
tmp = Vector{UInt8}(undef, n)
read!(io, tmp)
tmp
end
tot = sum(ncw)
ok(tmp, ncw) = begin
cncw = cumsum(ncw)
str = unsafe_string(pointer(tmp))
getindex.(Ref(str), Colon().(vcat(1, cncw[1:end-1].+1), cncw))
end
io = open("c:/data/bin.bin", "r")
@time tmp = Vector{UInt8}(undef, tot)
@time read!(io, tmp)
@time ok(tmp, ncw)
close(io)
a = pointer(tmp[1:8])
@time str = unsafe_string(a)
close(io)
io = open("c:/data/bin.bin", "r")
@time svec2 = stringit.(ncw, Ref(io))
close(io)
@time svec2 = unsafe_string.(pointer.(aa))
all(svec2 .== svec)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 998 |
unsafe_load(pointer.(strs) + 3)
a = "def"
b = unsafe_string(pointer(a))
pointer(a)
pointer(b)
a = UInt8[64, 65, C_NULL, 62, 53]
pp= pointer(a)
print(pp)
@time as = String(a)
pointer(as)
unsafe_wrap(a, Ptr{String})
a = "def"
unsafe_load(pointer(a)+3)
x = "id".*string.(rand(UInt8, 1_000_000))
io = open("c:/data/io.bin", "w")
ncw = write.(Ref(io), x)
close(io)
io = open("c:/data/io.bin", "r")
buffer = Vector{UInt8}(undef, sum(ncw))
readbytes!(io, buffer, sum(ncw))
close(io)
@time aa = test(buffer)
function test(array)
start = 1
strings = String[]
GC.@preserve array begin
ptr = pointer(array) - 1
for i in eachindex(array)
@inbounds char = array[i]
if char == UInt8(',')
len = i - start
str = unsafe_string(ptr + start, len)
push!(strings, str)
start = i + 1
end
end
end
strings
end
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 719 |
a = CSV.read("d:/data\\AirOnTimeCSV\\airOT199401.csv")
bb = [Array(b) for b in eachcol(a)]
bbc = Blosc.compress(bb[1])
bbc = Blosc.compress([1,2,3])
io = open("io.test", "w")
write(io, bbc)
write(io, bbc)
close(io)
io = open("io.test", "r")
bbc_new = Array{UInt8,1}(undef, length(bbc))
bbc_new2 = similar(bbc_new)
read!(io, bbc_new)
read!(io, bbc_new2)
close(io)
Blosc.decompress(Int64, bbc_new)
Blosc.decompress(Int64, bbc_new2)
all(Blosc.decompress(Int64, bbc_new) .== [1,2,3])
using CSV, DataFrames, Bloss, Missing
# a = DataFrame(
# a = rand(Int32, 100_000_000), b = rand(Float32, 100_000_000)
# )
@time a = CSV.read("d:/data/AirOnTimeCSV/airOT199401.csv")
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 517 | using Revise
using JDF, DataFrames
# a = gf()
# savejdf("c:/data/a.jdf", a)
@time a = loadjdf("c:/data/large.jdf")
# 2G file
# @time a = CSV.read(
# "C:/data/Performance_All/Performance_2010Q3.txt",
# delim = '|',
# header = false
# );
@time b = Array(a[:, :Column2]);
@time res = type_compress(b);
all(b .== res)
# ne = findfirst(b .!= b1)
#
# b[ne], res[ne]
using RLEVectors, StatsBase
ar = rle(res)
ar[2] .= cumsum(ar[2])
rlev= RLEVector(ar...)
all(b .== rlev)
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | code | 358 | using Revise
using JDF
a=gf()
b = Array(a.ORIGIN_STATE_ABR)
io = iow()
@which compress_then_write(eltype(b), b, io)
@time metadata = compress_then_write(eltype(b), b, io)
close(io)
io = ior()
@which column_loader(eltype(b), b, io)
@time okhl = column_loader(eltype(b), io, metadata)
close(io)
all(skipmissing(b) .== skipmissing(okhl))
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | docs | 599 | ## 0.4.1
fixed date with missing issues
## 0.4
Returning a JDFFile type that is Tables.jl compatible
## 0.2.19
Added support for `SubString{String}`
## 0.2.18
Added PooledArrays support; fixes #45
Added `save` and `load` functions
## 0.2.16
Updated to DataFrames v0.21
## 0.2.11
Updated to Tables v1
## 0.2.9
Updated dep to allow DataFrames.jl v0.20.0
## 0.2.8
Tables.jl support
`savejdf` now return the `JDFFile` instead of the metadata
## 0.2.7
* df[rows, cols] interface
## 0.2.6
Bug fixes for CategoricalArrays
## 0.2.5
Support for Julia 1.0 is added
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | docs | 7342 | # What is JDF.jl?
JDF is a `DataFrame`s serialization format with the following goals
* Fast save and load times
* Compressed storage on disk
* Enable disk-based data manipulation (not yet achieved)
* Supports machine learning workloads, e.g. mini-batch, sampling (not yet achieved)
JDF.jl is the Julia pacakge for all things related to JDF.
JDF stores a `DataFrame` in a folder with each column stored as a separate file.
There is also a `metadata.jls` file that stores metadata about the original
`DataFrame`. Collectively, the column files, the metadata file, and the folder
is called a JDF "file".
`JDF.jl` is a pure-Julia solution and there are a lot of ways to do nifty things
like compression and encapsulating the underlying struture of the arrays that's
hard to do in R and Python. E.g. Python's numpy arrays are C objects, but all
the vector types used in JDF are Julia data types.
## Please note
The next major version of JDF will contain breaking changes. But don't worry I am fully committed to providing an automatic upgrade path. This means that you can safely use JDF.jl to save your data and not have to worry about the impending breaking change breaking all your JDF files.
## Example: Quick Start
```julia
using RDatasets, JDF, DataFrames
a = dataset("datasets", "iris");
first(a, 2)
```
### *Saving* and *Loading* data
By default JDF loads and saves `DataFrame`s using multiple threads starting from
Julia 1.3. For Julia < 1.3, it saves and loads using one thread only.
```julia
@time jdffile = JDF.save("iris.jdf", a)
@time a2 = DataFrame(JDF.load("iris.jdf"))
```
Simple checks for correctness
```julia
all(names(a2) .== names(a)) # true
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)])) #true
```
### Loading only certain columns
You can load only a few columns from the dataset by specifying `cols =
[:column1, :column2]`. For example
```julia
a2_selected = DataFrame(JDF.load("iris.jdf", cols = [:Species, :SepalLength, :PetalWidth]))
```
The difference with loading the whole datasets and then subsetting the columns
is that it saves time as only the selected columns are loaded from disk.
### Some `DataFrame`-like convenience syntax/functions
To take advatnage of some these convenience functions, you need to create a variable of type `JDFFile` pointed to the JDF file on disk. For example
```julia
jdf"path/to/JDF.jdf"
```
or
```julia
path_to_JDF = "path/to/JDF.jdf"
JDFFile(path_to_JDF)
```
#### Using `df[col::Symbol]` syntax
You can load arbitrary `col` using the `df[col]` syntax. However, some of these operations are not
yet optimized and hence may not be efficient.
```julia
afile = JDFFile("iris.jdf")
afile[:Species] # load Species column
```
#### JDFFile is Table.jl columm-accessible
```julia
using Tables
ajdf = JDFFile("iris.jdf")
Tables.columnaccess(ajdf)
```
```julia
Tables.columns(ajdf)
```
```julia
Tables.schema(ajdf)
```
```julia
getproperty(Tables.columns(ajdf), :Species)
```
#### Load each column from disk
You can load each column of a JDF file from disk using iterations
```julia
jdffile = jdf"iris.jdf"
for col in eachcol(jdffile)
# do something to col
# where `col` is the content of one column of iris.jdf
end
```
To iterate through the columns names and the `col`
```julia
jdffile = jdf"iris.jdf"
for (name, col) in zip(names(jdffile), eachcol(jdffile))
# `name::Symbol` is the name of the column
# `col` is the content of one column of iris.jdf
end
```
#### Metadata Names & Size from disk
You can obtain the column names and number of columns `ncol` of a JDF, for
example:
```julia
using JDF, DataFrames
df = DataFrame(a = 1:3, b = 1:3)
JDF.save(df, "plsdel.jdf")
names(jdf"plsdel.jdf") # [:a, :b]
# clean up
rm("plsdel.jdf", force = true, recursive = true)
```
### Additional functionality: In memory `DataFrame` compression
`DataFrame` sizes are out of control. A 2GB CSV file can easily take up 10GB in
RAM. One can use the function `type_compress!(df)` to compress any
`df::DataFrame`. E.g.
```julia
type_compress!(df)
```
The function looks at `Int*` columns and see if it can be safely "downgraded" to
another `Int*` type with a smaller bits size. It will convert `Float64` to
`Float32` if `compress_float = true`. E.g.
```julia
type_compress!(df, compress_float = true)
```
`String` compression is _planned_ and will likely employ categorical encoding
combined with RLE encoding.
## Benchmarks
Here are some benchmarks using the [Fannie Mae Mortgage
Data](https://docs.rapids.ai/datasets/mortgage-data). Please note that a reading
of zero means that the method has failed to read or write.
JDF is a decent performer on both read and write and can achieve comparable
performance to [R's {fst}](https://www.fstpackage.org/), once compiled. The JDF
format also results in much smaller file size vs Feather.jl in this particular
example (probably due to Feather.jl's inefficient storage of `Union{String,
Missing}`).



Please note that the benchmarks were obtained on Julia 1.3+. On earlier versions
of Julia where multi-threading isn't available, JDF is roughly 2x slower than as
shown in the benchmarks.
## Supported data types
I believe that restricting the types that JDF supports is vital for simplicity and maintainability.
There is support for
* `WeakRefStrings.StringVector`
* `Vector{T}`, `Vector{Union{Mising, T}}`, `Vector{Union{Nothing, T}}`
* `CategoricalArrays.CategoricalVetors{T}` and `PooledArrays.PooledVector`
where `T` can be `String`, `Bool`, `Symbol`, `Char`, `SubString{String}`, `TimeZones.ZonedDateTime` (experimental) and `isbits` types i.e. `UInt*`, `Int*`,
and `Float*` `Date*` types etc.
`RLEVectors` support will be considered in the future when `missing` support
arrives for `RLEVectors.jl`.
## Resources
[@bkamins](https://github.com/bkamins/)'s excellent [DataFrames.jl tutorial](https://github.com/bkamins/Julia-DataFrames-Tutorial/blob/master/04_loadsave.ipynb) contains a section on using JDF.jl.
## How does JDF work?
When saving a JDF, each vector is Blosc compressed (using the default settings)
if possible; this includes all `T` and `Unions{Missing, T}` types where `T` is
`isbits`. For `String` vectors, they are first converted to a Run Length
Encoding (RLE) representation, and the lengths component in the RLE are `Blosc`
compressed.
## Development Plans
I fully intend to develop JDF.jl into a language neutral format by version v0.4. However, I have other OSS commitments including [R's
{disk.frame}](http:/diskframe.com) and hence new features might be slow to come onboard. But I am fully committed to making JDF files created using JDF.jl v0.2 or higher loadable in all future JDF.jl versions.
## Notes
* Parallel read and write support is only available from Julia 1.3.
* The design of JDF was inspired by [fst](https://www.fstpackage.org/) in terms of using compressions and allowing random-access to columns
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | docs | 11055 | # What is JDF.jl?
JDF is a `DataFrame`s serialization format with the following goals
* Fast save and load times
* Compressed storage on disk
* Enable disk-based data manipulation (not yet achieved)
* Supports machine learning workloads, e.g. mini-batch, sampling (not yet achieved)
JDF.jl is the Julia pacakge for all things related to JDF.
JDF stores a `DataFrame` in a folder with each column stored as a separate file.
There is also a `metadata.jls` file that stores metadata about the original
`DataFrame`. Collectively, the column files, the metadata file, and the folder
is called a JDF "file".
`JDF.jl` is a pure-Julia solution and there are a lot of ways to do nifty things
like compression and encapsulating the underlying struture of the arrays that's
hard to do in R and Python. E.g. Python's numpy arrays are C objects, but all
the vector types used in JDF are Julia data types.
## Please note
The next major version of JDF will contain breaking changes. But don't worry I am fully committed to providing an automatic upgrade path. This means that you can safely use JDF.jl to save your data and not have to worry about the impending breaking change breaking all your JDF files.
## Example: Quick Start
```julia
using RDatasets, JDF, DataFrames
a = dataset("datasets", "iris");
first(a, 2)
```
```
2ร5 DataFrame
Row โ SepalLength SepalWidth PetalLength PetalWidth Species
โ Float64 Float64 Float64 Float64 Catโฆ
โโโโโโผโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
1 โ 5.1 3.5 1.4 0.2 setosa
2 โ 4.9 3.0 1.4 0.2 setosa
```
### *Saving* and *Loading* data
By default JDF loads and saves `DataFrame`s using multiple threads starting from
Julia 1.3. For Julia < 1.3, it saves and loads using one thread only.
```julia
@time jdffile = JDF.save("iris.jdf", a)
@time a2 = DataFrame(JDF.load("iris.jdf"))
```
```
0.091923 seconds (157.33 k allocations: 9.226 MiB, 98.89% compilation tim
e)
0.165332 seconds (197.31 k allocations: 11.476 MiB, 98.49% compilation ti
me)
150ร5 DataFrame
Row โ SepalLength SepalWidth PetalLength PetalWidth Species
โ Float64 Float64 Float64 Float64 Catโฆ
โโโโโโผโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
1 โ 5.1 3.5 1.4 0.2 setosa
2 โ 4.9 3.0 1.4 0.2 setosa
3 โ 4.7 3.2 1.3 0.2 setosa
4 โ 4.6 3.1 1.5 0.2 setosa
5 โ 5.0 3.6 1.4 0.2 setosa
6 โ 5.4 3.9 1.7 0.4 setosa
7 โ 4.6 3.4 1.4 0.3 setosa
8 โ 5.0 3.4 1.5 0.2 setosa
โฎ โ โฎ โฎ โฎ โฎ โฎ
144 โ 6.8 3.2 5.9 2.3 virginica
145 โ 6.7 3.3 5.7 2.5 virginica
146 โ 6.7 3.0 5.2 2.3 virginica
147 โ 6.3 2.5 5.0 1.9 virginica
148 โ 6.5 3.0 5.2 2.0 virginica
149 โ 6.2 3.4 5.4 2.3 virginica
150 โ 5.9 3.0 5.1 1.8 virginica
135 rows omitted
```
Simple checks for correctness
```julia
all(names(a2) .== names(a)) # true
all(skipmissing([all(a2[!,name] .== Array(a[!,name])) for name in names(a2)])) #true
```
```
true
```
### Loading only certain columns
You can load only a few columns from the dataset by specifying `cols =
[:column1, :column2]`. For example
```julia
a2_selected = DataFrame(JDF.load("iris.jdf", cols = [:Species, :SepalLength, :PetalWidth]))
```
```
150ร3 DataFrame
Row โ SepalLength PetalWidth Species
โ Float64 Float64 Catโฆ
โโโโโโผโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
1 โ 5.1 0.2 setosa
2 โ 4.9 0.2 setosa
3 โ 4.7 0.2 setosa
4 โ 4.6 0.2 setosa
5 โ 5.0 0.2 setosa
6 โ 5.4 0.4 setosa
7 โ 4.6 0.3 setosa
8 โ 5.0 0.2 setosa
โฎ โ โฎ โฎ โฎ
144 โ 6.8 2.3 virginica
145 โ 6.7 2.5 virginica
146 โ 6.7 2.3 virginica
147 โ 6.3 1.9 virginica
148 โ 6.5 2.0 virginica
149 โ 6.2 2.3 virginica
150 โ 5.9 1.8 virginica
135 rows omitted
```
The difference with loading the whole datasets and then subsetting the columns
is that it saves time as only the selected columns are loaded from disk.
### Some `DataFrame`-like convenience syntax/functions
To take advatnage of some these convenience functions, you need to create a variable of type `JDFFile` pointed to the JDF file on disk. For example
```julia
jdf"path/to/JDF.jdf"
```
```
JDFFile{String}("path/to/JDF.jdf")
```
or
```julia
path_to_JDF = "path/to/JDF.jdf"
JDFFile(path_to_JDF)
```
```
JDFFile{String}("path/to/JDF.jdf")
```
#### Using `df[col::Symbol]` syntax
You can load arbitrary `col` using the `df[col]` syntax. However, some of these operations are not
yet optimized and hence may not be efficient.
```julia
afile = JDFFile("iris.jdf")
afile[:Species] # load Species column
```
```
150-element CategoricalArrays.CategoricalArray{String,1,UInt8}:
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
โฎ
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
```
#### JDFFile is Table.jl columm-accessible
```julia
using Tables
ajdf = JDFFile("iris.jdf")
Tables.columnaccess(ajdf)
```
```
true
```
```julia
Tables.columns(ajdf)
```
```
JDFFile{String}("iris.jdf")
```
```julia
Tables.schema(ajdf)
```
```
Tables.Schema:
:SepalLength Float64
:SepalWidth Float64
:PetalLength Float64
:PetalWidth Float64
:Species CategoricalVector (alias for CategoricalArrays.CategoricalAr
ray{T, 1} where T)
```
```julia
getproperty(Tables.columns(ajdf), :Species)
```
```
150-element CategoricalArrays.CategoricalArray{String,1,UInt8}:
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
"setosa"
โฎ
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
"virginica"
```
#### Load each column from disk
You can load each column of a JDF file from disk using iterations
```julia
jdffile = jdf"iris.jdf"
for col in eachcol(jdffile)
# do something to col
# where `col` is the content of one column of iris.jdf
end
```
To iterate through the columns names and the `col`
```julia
jdffile = jdf"iris.jdf"
for (name, col) in zip(names(jdffile), eachcol(jdffile))
# `name::Symbol` is the name of the column
# `col` is the content of one column of iris.jdf
end
```
#### Metadata Names & Size from disk
You can obtain the column names and number of columns `ncol` of a JDF, for
example:
```julia
using JDF, DataFrames
df = DataFrame(a = 1:3, b = 1:3)
JDF.save(df, "plsdel.jdf")
names(jdf"plsdel.jdf") # [:a, :b]
# clean up
rm("plsdel.jdf", force = true, recursive = true)
```
### Additional functionality: In memory `DataFrame` compression
`DataFrame` sizes are out of control. A 2GB CSV file can easily take up 10GB in
RAM. One can use the function `type_compress!(df)` to compress any
`df::DataFrame`. E.g.
```julia
type_compress!(df)
```
```
3ร2 DataFrame
Row โ a b
โ Int8 Int8
โโโโโโผโโโโโโโโโโโโ
1 โ 1 1
2 โ 2 2
3 โ 3 3
```
The function looks at `Int*` columns and see if it can be safely "downgraded" to
another `Int*` type with a smaller bits size. It will convert `Float64` to
`Float32` if `compress_float = true`. E.g.
```julia
type_compress!(df, compress_float = true)
```
```
3ร2 DataFrame
Row โ a b
โ Int8 Int8
โโโโโโผโโโโโโโโโโโโ
1 โ 1 1
2 โ 2 2
3 โ 3 3
```
`String` compression is _planned_ and will likely employ categorical encoding
combined with RLE encoding.
## Benchmarks
Here are some benchmarks using the [Fannie Mae Mortgage
Data](https://docs.rapids.ai/datasets/mortgage-data). Please note that a reading
of zero means that the method has failed to read or write.
JDF is a decent performer on both read and write and can achieve comparable
performance to [R's {fst}](https://www.fstpackage.org/), once compiled. The JDF
format also results in much smaller file size vs Feather.jl in this particular
example (probably due to Feather.jl's inefficient storage of `Union{String,
Missing}`).



Please note that the benchmarks were obtained on Julia 1.3+. On earlier versions
of Julia where multi-threading isn't available, JDF is roughly 2x slower than as
shown in the benchmarks.
## Supported data types
I believe that restricting the types that JDF supports is vital for simplicity and maintainability.
There is support for
* `WeakRefStrings.StringVector`
* `Vector{T}`, `Vector{Union{Mising, T}}`, `Vector{Union{Nothing, T}}`
* `CategoricalArrays.CategoricalVetors{T}` and `PooledArrays.PooledVector`
where `T` can be `String`, `Bool`, `Symbol`, `Char`, `SubString{String}`, `TimeZones.ZonedDateTime` (experimental) and `isbits` types i.e. `UInt*`, `Int*`,
and `Float*` `Date*` types etc.
`RLEVectors` support will be considered in the future when `missing` support
arrives for `RLEVectors.jl`.
## Resources
[@bkamins](https://github.com/bkamins/)'s excellent [DataFrames.jl tutorial](https://github.com/bkamins/Julia-DataFrames-Tutorial/blob/master/04_loadsave.ipynb) contains a section on using JDF.jl.
## How does JDF work?
When saving a JDF, each vector is Blosc compressed (using the default settings)
if possible; this includes all `T` and `Unions{Missing, T}` types where `T` is
`isbits`. For `String` vectors, they are first converted to a Run Length
Encoding (RLE) representation, and the lengths component in the RLE are `Blosc`
compressed.
## Development Plans
I fully intend to develop JDF.jl into a language neutral format by version v0.4. However, I have other OSS commitments including [R's
{disk.frame}](http:/diskframe.com) and hence new features might be slow to come onboard. But I am fully committed to making JDF files created using JDF.jl v0.2 or higher loadable in all future JDF.jl versions.
## Notes
* Parallel read and write support is only available from Julia 1.3.
* The design of JDF was inspired by [fst](https://www.fstpackage.org/) in terms of using compressions and allowing random-access to columns
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 0.5.3 | 6e2645607454affcfdff0d433ebb61ec2e4b88f4 | docs | 38 | # JDF.jl
Documentation for JDF.jl
| JDF | https://github.com/xiaodaigh/JDF.jl.git |
|
[
"MIT"
] | 1.2.0 | 569556d349015ef22131b7e0e8867d18ee77b679 | code | 485 | abstract type PullType end
struct PullAction{pt <: PullType, ARGS <: Tuple} <: Function
f::Function
args::ARGS
end
PullAction(f, args, pt = StandardPull) = PullAction{pt, typeof(args)}(f, args)
pull_args(pa::PullAction) = pull_args(pa.args)
pull_args(args) = map(args) do arg
pull!(arg)
end
valid_args(args) = all(args) do arg
!(typeof(arg) <: Signal) ? true : valid(arg)
end
valid(pa::PullAction) = valid_args(pa.args)
(pa::PullAction)() = pa.f(pull_args(pa)...)
| Signals | https://github.com/TsurHerman/Signals.jl.git |
|
[
"MIT"
] | 1.2.0 | 569556d349015ef22131b7e0e8867d18ee77b679 | code | 3185 | mutable struct SignalData
x
valid::Bool
propagated::Bool
end
SignalData(x) = SignalData(x, true, false)
SignalData() = SignalData(nothing, false, false)
SignalData(::Nothing) = SignalData()
struct Signal
data::SignalData
action::PullAction
children::Vector{Signal}
binders::Vector{Signal}
strict_push::Bool
state::Ref
end
add_child!(arg_signal::Signal,s::Signal) = push!(arg_signal.children,s)
add_child!(arg_signal,s) = nothing
function store!(sd::SignalData, val)
sd.propagated = false
sd.valid = true;
sd.x = val
end
store!(s::Signal, val) = store!(s.data, val)
value(s::Signal) = value(s.data)
value(sd::SignalData) = sd.x
"""Retrieve the internal state of a `Signal`"""
state(s::Signal) = state(s.state)
state(ref::Ref) = ref.x
state(s::Signal, val) = state(s.state, val)
state(ref::Ref, val) = ref.x = val
propagated(s::Signal) = propagated(s.data)
propagated(sd::SignalData) = sd.propagated
propagated(s::Signal, val::Bool) = propagated(s.data, val)
propagated(sd::SignalData, val::Bool) = sd.propagated = val
valid(s::Signal) = valid(s.data)
valid(sd::SignalData) = sd.valid
Signal(val; kwargs...) = Signal(() -> val; kwargs...)
abstract type Stateless end
function Signal(f::Function, args...; state = Stateless, strict_push = false,
pull_type = StandardPull, v0 = nothing)
_state = Ref(state)
if state != Stateless
args = (args..., _state)
end
sd = SignalData(v0)
action = PullAction(f, args, pull_type)
s=Signal(sd, action, _state, strict_push)
v0 === nothing && s()
s
end
function Signal(sd::SignalData, action::PullAction, state = Stateless, strict_push = false)
if debug_mode()
f(x) = @async println("signal deleted")
finalizer(f, sd)
end
!(typeof(state) <: Ref) && (state = Ref(state))
s = Signal(sd, action, Signal[], Signal[], strict_push, state)
for arg in action.args
add_child!(arg,s)
# isa(arg, Signal) && push!(arg.children, s)
end
s
end
import Base.getindex
function getindex(s::Signal)
value(s)
end
import Base.setindex!
function setindex!(s::Signal, val)
set_value!(s, val)
end
function set_value!(s::Signal, val)
invalidate!(s)
store!(s, val)
end
function invalidate!(s::Signal)
if valid(s)
invalidate!(s.data)
foreach(invalidate!, s.children)
end
end
function invalidate!(sd::SignalData)
sd.valid = false
sd.propagated = false
end
function validate(s::Signal)
valid(s) && return
if valid(s.action)
validate(s.data)
foreach(validate, s.children)
end
end
function validate(sd::SignalData)
sd.propagated = false
sd.valid = true
end
import Base.show
show(io::IO, s::Signal) = show(io, MIME"text/plain"(), s)
function show(io::IO, ::MIME"text/plain", s::Signal)
state_str = "\nstate{$(typeof(s.state.x))}: $(s.state.x)"
state_str = state(s) === Signals.Stateless ? "" : state_str
valid_str = valid(s) ? "" : "(invalidated)"
printstyled(io, "Signal"; bold = true,color = 200)
val = s[] === nothing ? Nothing : s[]
print(io, "$valid_str $state_str \nvalue: ", val)
end
| Signals | https://github.com/TsurHerman/Signals.jl.git |
|
[
"MIT"
] | 1.2.0 | 569556d349015ef22131b7e0e8867d18ee77b679 | code | 2173 | __precompile__()
module Signals
export Signal
const _async_mode = Ref(true)
async_mode() = _async_mode.x
async_mode(b::Bool) = _async_mode.x = b
const _debug_mode = Ref(false)
debug_mode() = _debug_mode.x
debug_mode(b::Bool) = _debug_mode.x = b
include("PullAction.jl")
include("Signal.jl")
include("error.jl")
include("pushpull.jl")
include("eventloop.jl")
include("operators.jl")
include("bind.jl")
include("async_remote.jl")
include("time.jl")
include("TypedSignal.jl")
@doc """
S = Signal(val; strict_push = false)
Create a source `Signal` with initial value `val`, setting `strict_push` to `true`
guarantees that every push to this `Signal` will be carried out independently.
Otherwise if updates occur faster than what the `eventloop` can process, then only
the last value before the `eventloop` kicks in will be used(*default*).
S = Signal(f,args...; v0 = nothing)
Create a derived `Signal` whos value is `f(args...)`, args can be of any type,
`Signal` args get replaced by their value before calling `f(args...)`. reads best with
with `do` notation(see example).if `v0` is not `nothing` then `f(args...)` will not
be called directly after Signal creation instead the Signal will be initialized to have value v0.
# Syntax
S[] = val
Set the value of `S` to `val` without propogating the change to the rest of the signal graph,
usefull in pull based paradigm.
S()
`pull!` Signal, pulling any changes in the Signal graph that affects `S`.
S(val)
Set the value of `S` to `val` and pushes the changes along the Signal graph.
S[]
Get the current value stored in `S` without pulling changes from the graph.
# Examples
julia> A = Signal(1) # source Signal
Signal
value: 1
julia> B = 2 # non-Signal input
2
julia> C = Signal(A, B) do a, b # derived Signal
a + b
end
Signal
value: 3
julia> A[] = 10 # set value without propogation
10
julia> C[] # reads current value
3
julia> C() # pull latest changes from the Signal graph
12
julia> A(100) # set value to a signal and propogate this change
100
julia> C[]
102
""" Signal
end # module
| Signals | https://github.com/TsurHerman/Signals.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.