licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.2 | 805af53b51077c382eafab33772073fcbfd83d11 | docs | 1043 | # Extras for `SimplePadics`
## `sqrt_table.jl`
This file includes the following functions. Use Julia's help facility for more information.
### `sqrt_table`
Print out a table of `p`-adic square roots for a range of integers.
```
julia> sqrt_table(7,-20,20)
√-20 …5513441121.0_{7}
√-19 …6045320643.0_{7}
√-17 …5064235312.0_{7}
√-13 …2211416261.0_{7}
√-12 …344121533.0_{7}
√-10 …6062122632.0_{7}
√-6 …6301140231.0_{7}
√-5 …425112623.0_{7}
√-3 …6511256052.0_{7}
√0 …0.0_{7}
√1 …1.0_{7}
√2 …6421216213.0_{7}
√4 …2.0_{7}
√8 …524231241.0_{7}
√9 …3.0_{7}
√11 …3144334422.0_{7}
√15 …2343420311.0_{7}
√16 …6666666663.0_{7}
√18 …5563654642.0_{7}
```
### `has_i` and `has_i_list`
Use `has_i(p)` to determine if `√-1` exists as a `p`-adic number.
```
julia> has_i(3)
false
julia> has_i(5)
true
```
Use `has_i_list` to create a list of primes `p` such that `√-1` exists as a `p`-adic number.
```
julia> has_i_list(50)
6-element Vector{Int64}:
5
13
17
29
37
41
``` | SimplePadics | https://github.com/scheinerman/SimplePadics.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1286 | using Pkg
using LuxorGraphPlot
using Documenter
using DocThemeIndigo
using Literate
for each in readdir(pkgdir(LuxorGraphPlot, "examples"))
input_file = pkgdir(LuxorGraphPlot, "examples", each)
endswith(input_file, ".jl") || continue
@info "building" input_file
output_dir = pkgdir(LuxorGraphPlot, "docs", "src", "generated")
Literate.markdown(input_file, output_dir; name=each[1:end-3], execute=false)
end
indigo = DocThemeIndigo.install(LuxorGraphPlot)
DocMeta.setdocmeta!(LuxorGraphPlot, :DocTestSetup, :(using LuxorGraphPlot); recursive=true)
makedocs(;
modules=[LuxorGraphPlot],
authors="Jinguo Liu",
#repo="https://github.com/GiggleLiu/LuxorGraphPlot.jl/blob/{commit}{path}#{line}",
sitename="LuxorGraphPlot.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://giggleliu.github.io/LuxorGraphPlot.jl",
assets=String[indigo],
),
pages=[
"Home" => "index.md",
"Examples" => [
"Tutorials" => "generated/tutorials.md",
"Features" => "generated/features.md",
],
"References" => "ref.md",
],
doctest=false,
warnonly = :missing_docs,
)
deploydocs(;
repo="github.com/GiggleLiu/LuxorGraphPlot.jl",
)
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 832 | function serve(;host::String="0.0.0.0", port::Int=8000)
# setup environment
docs_dir = @__DIR__
julia_cmd = "using Pkg; Pkg.instantiate()"
run(`$(Base.julia_exename()) --project=$docs_dir -e $julia_cmd`)
serve_cmd = """
using LiveServer;
LiveServer.servedocs(;
doc_env=true,
skip_dirs=[
#joinpath("docs", "src", "generated"),
joinpath("docs", "src"),
joinpath("docs", "build"),
joinpath("docs", "Manifest.toml"),
],
literate="examples",
host=\"$host\",
port=$port,
)
"""
try
run(`$(Base.julia_exename()) --project=$docs_dir -e $serve_cmd`)
catch e
if e isa InterruptException
return
else
rethrow(e)
end
end
return
end
serve()
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 2982 | using LuxorGraphPlot, LuxorGraphPlot.Luxor
using LuxorGraphPlot.TensorNetwork
# ## Node styles
# We a combination of [`nodestore`](@ref) and [`with_nodes`](@ref) to draw nodes with automatically inferred bounding boxes.
nodestore() do ns # store nodes in the nodestore (used to infer the bounding box)
a = circle!((0, 0), 30)
b = ellipse!((100, 0), 60, 40)
c = box!((200, 0), 50, 50; smooth=10)
d = polygon!([rotatepoint(Point(30, 0), i*π/3) for i=1:6] .+ Ref(Point(300, 0)); smooth=5)
with_nodes(ns) do # the context manager to draw nodes
fontsize(6)
for (node, shape) in [(a, "circle"), (b, "ellipse"), (c, "box"), (d, "polygon")]
stroke(node)
text(shape, node)
for p in [left, right, top, bottom, topleft, bottomleft, topright, bottomright, LuxorGraphPlot.center]
text(string(p), offset(fill(circlenode(p(node), 3)), (0, 6)))
end
end
end
end
# ## Connection points
nodestore() do ns
a1 = circle!((150, 150), 30)
a2 = circle!((450, 150), 30)
box1s = [offset(boxnode(rotatepoint(Point(100, 0), i*π/8), 20, 20), a1.loc) for i=1:16]
box2s = offset.(box1s, Ref(a2.loc-a1.loc))
append!(ns, box1s)
append!(ns, box2s)
with_nodes(ns) do
fontsize(14)
stroke(a1)
stroke(a2)
for b in box1s
stroke(b)
line(a1, b; mode=:exact)
end
for b in box2s
stroke(b)
line(a2, b; mode=:natural)
end
text("exact", a1)
text("natural", a2)
end
end
# ## Connector styles
nodestore() do ns
radius = 30
a = boxnode(Point(50, 50), 40, 40; smooth=5)
b = offset(a, (100, 0))
groups = Matrix{Vector{Node}}(undef, 2, 3)
for j=0:1
for k = 0:2
items = [offset(a, (200k, 150j)), offset(b, (200k, 150j))]
groups[j+1, k+1] = items
append!(ns, items)
push!(ns, offset(midpoint(items...), (0, 70)))
end
end
with_nodes() do
fontsize(28)
## the default smooth method is "curve", it must take two control points.
for j=1:2
for k = 1:3
a, b = groups[j, k]
cps = [[offset(midpoint(a, b), (0, 50))], [offset(a, (0, 50)), offset(b, (0, 50))]][j]
smoothprops = [
Dict(:method=>length(cps) == 1 ? "nosmooth" : "curve"),
Dict(:method=>"smooth", :radius=>10),
Dict(:method=>"bezier", :radius=>10),
][k]
stroke(a)
stroke(b)
text("A", a)
text("B", b)
Connection(a, b; smoothprops, control_points=cps) |> stroke
@layer begin
fontsize(14)
text(string(get(smoothprops, :method, "")), offset(midpoint(a, b), (0, 70)))
end
end
end
end
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 2163 | ## Show a graph
using Graphs, LuxorGraphPlot, LuxorGraphPlot.Luxor
# Show a graph with spring (default) layout.
graph = smallgraph(:petersen)
show_graph(graph)
# specify the layout and texts manually
rot15(a, b, i::Int) = cos(2i*π/5)*a + sin(2i*π/5)*b, cos(2i*π/5)*b - sin(2i*π/5)*a
locations = [[rot15(0.0, 50.0, i) for i=0:4]..., [rot15(0.0, 25.0, i) for i=0:4]...]
show_graph(graph, locations,
texts=[string('a'+i) for i=0:9], padding_right=300,
config=GraphDisplayConfig(; background="gray")) do nd
## extra commands, transformer is a function that convert graph-axis to canvas axis.
LuxorGraphPlot.Luxor.fontsize(22)
xmin, xmax, ymin, ymax = LuxorGraphPlot.get_bounding_box(nd)
LuxorGraphPlot.text("haha, the fontsize is so big!", Point(xmax + 20, (ymin + ymax) / 2))
end
# specify colors, shapes and sizes
show_graph(graph;
vertex_colors=rand(["blue", "red"], 10),
vertex_sizes=rand(10) .* 10 .+ 5,
vertex_stroke_colors=rand(["blue", "red"], 10),
vertex_text_colors=rand(["white", "black"], 10),
edge_colors=rand(["blue", "red"], 15),
vertex_shapes=rand([:circle, :box], 10)
)
# for uniform colors/sizes, you can make life easier by specifying global colors.
show_graph(graph;
config = GraphDisplayConfig(
vertex_color="blue",
vertex_size=7.5,
vertex_stroke_color="transparent",
vertex_text_color="white",
edge_color="green"
)
)
# One can also dump an image to a file
show_graph(graph; format=:svg)
# or render it in another format
show_graph(graph; format=:svg)
# ## Layouts
# The default layout is `:auto`, which uses `:spring` if `locs` is `nothing`.
show_graph(graph, SpringLayout())
show_graph(graph, StressLayout())
show_graph(graph, SpectralLayout())
# ## Show a gallery
# One can use a boolean vector to represent boolean variables on a vertex or an edge.
locs = render_locs(graph, SpringLayout())
matrix = [GraphViz(graph, locs; vertex_colors=[rand(Luxor.RGB) for i=1:10],
edge_colors=[rand(Luxor.RGB) for i=1:15]) for i=1:2, j=1:4]
show_gallery(matrix; format=:png, padding_left=20, padding_right=20, padding_top=20, padding_bottom=20)
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 932 | module LuxorGraphPlot
using Luxor, Graphs
using LinearAlgebra
using MLStyle: @match
export show_graph, show_gallery, left, right, top, bottom, center, boundary, connect, offset, topright, topleft, bottomleft, bottomright
export bottomalign, topalign, rightalign, leftalign
export Node, Connection, circlenode, ellipsenode, boxnode, polygonnode, dotnode, linenode, tonode
export NodeStore, nodestore, with_nodes
export box!, circle!, polygon!, dot!, line!, stroke, ellipse!
export GraphDisplayConfig, GraphViz
export SpringLayout, StressLayout, SpectralLayout, render_locs, LayeredStressLayout, LayeredSpringLayout, AbstractLayout, Layred
export lighttheme!, darktheme!
include("nodes.jl")
include("layouts/layouts.jl")
using .Layouts: SpringLayout, StressLayout, SpectralLayout, render_locs, LayeredStressLayout, LayeredSpringLayout, AbstractLayout, Layered
include("nodestore.jl")
include("graphplot.jl")
include("tnet.jl")
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 11985 | const CONFIGHELP = """
* `fontsize::Float64 = 12.0`, the font size
* `fontface::String = ""`, the font face, leave empty to follow system
* `vertex_text_color = "black"`, the default text color
* `vertex_stroke_color = "black"`, the default stroke color for vertices
* `vertex_color = "transparent"`, the default default fill color for vertices
* `vertex_size::Float64 = 10.0`, the default vertex size
* `vertex_shape::Symbol = :circle`, the default vertex shape, which can be :circle, :box or :dot
* `vertex_line_width::Float64 = 1`, the default vertex stroke line width
* `vertex_line_style::String = "solid"`, the line style of vertex stroke, which can be one of ["solid", "dotted", "dot", "dotdashed", "longdashed", "shortdashed", "dash", "dashed", "dotdotdashed", "dotdotdotdashed"]
* `edge_color = "black"`, the default edge color
* `edge_line_width::Float64 = 1`, the default line width
* `edge_style::String = "solid"`, the line style of edges, which can be one of ["solid", "dotted", "dot", "dotdashed", "longdashed", "shortdashed", "dash", "dashed", "dotdotdashed", "dotdotdotdashed"]
"""
const VIZHELP = """
* `vertex_colors` is a vector of color strings for specifying vertex fill colors.
* `vertex_sizes` is a vector of real numbers for specifying vertex sizes.
* `vertex_shapes` is a vector of strings for specifying vertex shapes, the string should be "circle" or "box".
* `vertex_stroke_colors` is a vector of color strings for specifying vertex stroke colors.
* `vertex_text_colors` is a vector of color strings for specifying vertex text colors.
* `edge_colors` is a vector of color strings for specifying edge colors.
* `texts` is a vector of strings for labeling vertices.
"""
"""
GraphDisplayConfig
The configuration for graph display.
Keyword arguments
-------------------------------
* `locs` is a vector of tuples for specifying the vertex locations.
* `edges` is a vector of tuples for specifying the edges.
$CONFIGHELP
"""
Base.@kwdef mutable struct GraphDisplayConfig
# line, vertex and text
fontface::String = ""
background::String = "white"
fontsize::Float64 = 12.0
# vertex
vertex_shape::Symbol = :circle
vertex_line_width::Float64 = 1.0 # in pt
vertex_line_style::String = "solid"
vertex_text_color::String = "black"
vertex_stroke_color::String = "black"
vertex_color::String = "transparent"
vertex_size::Float64 = 10.0
# edge
edge_color::String = "black"
edge_line_width::Float64 = 1.0 # in pt
edge_line_style::String = "solid"
end
Base.copy(config::GraphDisplayConfig) = deepcopy(config)
"""
darktheme!(config::GraphDisplayConfig)
Set the dark theme for the graph display.
"""
darktheme!(config::GraphDisplayConfig) = begin
config.background = "transparent"
config.vertex_text_color = "white"
config.vertex_stroke_color = "white"
config.edge_color = "white"
return config
end
"""
lighttheme!(config::GraphDisplayConfig)
Set the light theme for the graph display.
"""
lighttheme!(config::GraphDisplayConfig) = begin
config.background = "transparent"
config.vertex_text_color = "black"
config.vertex_stroke_color = "black"
config.edge_color = "black"
return config
end
"""
GraphViz
The struct for storing graph visualization information.
Keyword arguments
-------------------------------
$VIZHELP
"""
Base.@kwdef mutable struct GraphViz
locs::Vector{Tuple{Float64, Float64}}
edges::Vector{Tuple{Int, Int}}
vertex_shapes = nothing
vertex_sizes = nothing
vertex_colors = nothing
vertex_stroke_colors = nothing
vertex_text_colors = nothing
edge_colors = nothing
texts = nothing
end
function GraphViz(graph::SimpleGraph, locs=SpringLayout(); kwargs...)
rlocs = getfield.(render_locs(graph, locs), :data)
return GraphViz(; locs=rlocs, edges=[(src(e), dst(e)) for e in edges(graph)], kwargs...)
end
get_bounding_box(g::GraphViz) = (minimum(getindex.(g.locs, 1)), maximum(getindex.(g.locs, 1)), minimum(getindex.(g.locs, 2)), maximum(getindex.(g.locs, 2)))
struct GraphDiagram <: AbstractNodeStore
nodes::Vector{Node}
edges::Vector{Connection}
end
nodes(d::GraphDiagram) = d.nodes
function offset(d::GraphDiagram, point)
GraphDiagram([offset(n, point) for n in d.nodes], [offset(e, point) for e in d.edges])
end
"""
show_graph([f, ]graph::AbstractGraph;
kwargs...
)
Show a graph in VSCode, Pluto or Jupyter notebook, or save it to a file.
Positional arguments
-----------------------------
* `f` is a function that returns extra `Luxor` plotting statements.
* `graph` is a graph instance.
* `locs` is a vector of tuples for specifying the vertex locations, or a [`AbstractLayout`](@ref) instance.
Keyword arguments
-----------------------------
* `config` is a [`GraphDisplayConfig`](@ref) instance.
$VIZHELP
* `padding_left::Int = 10`, the padding on the left side of the drawing
* `padding_right::Int = 10`, the padding on the right side of the drawing
* `padding_top::Int = 10`, the padding on the top side of the drawing
* `padding_bottom::Int = 10`, the padding on the bottom side of the drawing
* `format` is the output format, which can be `:svg`, `:png` or `:pdf`.
* `filename` is a string as the output filename.
Example
------------------------------
```jldoctest
julia> using Graphs, LuxorGraphPlot
julia> show_graph(smallgraph(:petersen); format=:png, vertex_colors=rand(["blue", "red"], 10));
```
"""
show_graph(graph::GraphViz; kwargs...) = show_graph(x->nothing, graph; kwargs...)
show_graph(graph::SimpleGraph, locs=SpringLayout(); kwargs...) = show_graph(x->nothing, graph, locs; kwargs...)
function show_graph(f, g::GraphViz;
format = :svg,
filename = nothing,
padding_left = 10,
padding_right = 10,
padding_top = 10,
padding_bottom = 10,
config = GraphDisplayConfig(),
)
diag = diagram(g.locs, g.edges; vertex_shapes=g.vertex_shapes, vertex_sizes=g.vertex_sizes, config)
with_nodes(diag; format, filename, padding_bottom, padding_left, padding_right, padding_top, background=config.background) do
f(diag)
show_diagram(diag; config,
texts=g.texts,
vertex_colors=g.vertex_colors,
vertex_stroke_colors = g.vertex_stroke_colors,
vertex_text_colors = g.vertex_text_colors,
edge_colors = g.edge_colors)
end
end
function diagram(locs, edges; vertex_sizes=nothing, vertex_shapes=nothing, config=GraphDisplayConfig())
nodes = Node[]
for i in eachindex(locs)
shape = _get(vertex_shapes, i, config.vertex_shape)
vertex_size = _get(vertex_sizes, i, config.vertex_size)
props = Dict(
:circle => Dict(:radius=>vertex_size),
:box => Dict(:width=>2*vertex_size, :height=>2*vertex_size),
:dot => Dict()
)[shape]
push!(nodes, Node(shape, locs[i]; props...))
end
edgs = Connection[]
for (i, j) in edges
push!(edgs, Connection(nodes[i], nodes[j]))
end
return GraphDiagram(nodes, edgs)
end
function show_graph(f, graph::SimpleGraph, locs=SpringLayout();
vertex_shapes = nothing,
vertex_sizes = nothing,
vertex_colors = nothing,
vertex_stroke_colors = nothing,
vertex_text_colors = nothing,
edge_colors = nothing,
texts = nothing,
padding_left = 10,
padding_right = 10,
padding_top = 10,
padding_bottom = 10,
format = :svg,
filename = nothing,
config = GraphDisplayConfig()
)
viz = GraphViz(graph, locs;
vertex_shapes, vertex_sizes, vertex_colors, vertex_stroke_colors,
vertex_text_colors, edge_colors, texts)
show_graph(f, viz; format, filename, padding_bottom, padding_left, padding_right, padding_top, config)
end
function show_diagram(diag::GraphDiagram;
config=GraphDisplayConfig(),
vertex_colors,
vertex_stroke_colors,
vertex_text_colors,
texts,
edge_colors)
render_edges(diag.edges, config; edge_colors)
render_nodes(diag.nodes, config; texts, vertex_colors, vertex_stroke_colors, vertex_text_colors)
end
function render_nodes(nodes::AbstractVector, config::GraphDisplayConfig; texts=nothing, vertex_colors=nothing, vertex_stroke_colors=nothing, vertex_text_colors=nothing)
setline(config.vertex_line_width)
setdash(config.vertex_line_style)
Luxor.fontsize(config.fontsize)
!isempty(config.fontface) && Luxor.fontface(config.fontface)
for (i, node) in enumerate(nodes)
setcolor(_get(vertex_colors, i, config.vertex_color))
fill(node)
setcolor(_get(vertex_stroke_colors, i, config.vertex_stroke_color))
stroke(node)
text = _get(texts, i, "")
if !isempty(text)
setcolor(_get(vertex_text_colors, i, config.vertex_text_color))
Luxor.text(text, node)
end
end
end
function render_edges(edges::AbstractVector, config::GraphDisplayConfig; edge_colors=nothing)
setline(config.edge_line_width)
setdash(config.edge_line_style)
for (k, e) in enumerate(edges)
setcolor(_get(edge_colors, k, config.edge_color))
stroke(e)
end
end
_get(::Nothing, i, default) = default
_get(x, i, default) = x[i]
"""
show_gallery([f, ]stores::AbstractMatrix{GraphViz};
kwargs...
)
Show a gallery of graphs in VSCode, Pluto or Jupyter notebook, or save it to a file.
Positional arguments
-----------------------------
* `f` is a function that returns extra `Luxor` plotting statements.
* `stores` is a matrix of `GraphViz` instances.
Keyword arguments
-----------------------------
* `config` is a [`GraphDisplayConfig`](@ref) instance.
* `padding_left::Int = 10`, the padding on the left side of the drawing
* `padding_right::Int = 10`, the padding on the right side of the drawing
* `padding_top::Int = 10`, the padding on the top side of the drawing
* `padding_bottom::Int = 10`, the padding on the bottom side of the drawing
* `format` is the output format, which can be `:svg`, `:png` or `:pdf`.
* `filename` is a string as the output filename.
"""
function show_gallery(f, stores::AbstractMatrix{GraphViz};
padding_left=10, padding_right=10,
padding_top=10, padding_bottom=10,
config=GraphDisplayConfig(),
format=:svg,
filename=nothing
)
if isempty(stores)
return Luxor.Drawing(1, 1, filename === nothing ? format : filename)
end
xmin, _, ymin, _ = get_bounding_box(stores[1, 1]) .+ (-padding_left, padding_right, -padding_top, padding_bottom)
xspans = map(stores[1, :]) do d
xmin, xmax, _, _ = get_bounding_box(d) .+ (-padding_left, padding_right, -padding_top, padding_bottom)
xmax - xmin
end
yspans = map(stores[:, 1]) do d
_, _, ymin, ymax = get_bounding_box(d) .+ (-padding_left, padding_right, -padding_top, padding_bottom)
ymax - ymin
end
m, n = size(stores)
xoffsets = cumsum([0; xspans[1:end-1]])
yoffsets = cumsum([0; yspans[1:end-1]])
Luxor.Drawing(ceil(Int, sum(xspans)), ceil(Int, sum(yspans)), filename === nothing ? format : filename)
Luxor.origin(-xmin, -ymin)
Luxor.background(config.background)
for i=1:m, j=1:n
g = stores[i, j]
diag_ = diagram(g.locs, g.edges; vertex_shapes=g.vertex_shapes, vertex_sizes=g.vertex_sizes, config)
diag = offset(diag_, (xoffsets[j], yoffsets[i]))
f(diag)
show_diagram(diag; config,
texts=g.texts,
vertex_colors=g.vertex_colors,
vertex_stroke_colors = g.vertex_stroke_colors,
vertex_text_colors = g.vertex_text_colors,
edge_colors = g.edge_colors)
end
Luxor.finish()
Luxor.preview()
end
show_gallery(stores::AbstractMatrix{GraphViz}; kwargs...) = show_gallery(x->nothing, stores; kwargs...)
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 13126 | const REQUIRED_PARAMS = Dict(
:circle => [:radius],
:ellipse => [:width, :height],
:box => [:width, :height],
:polygon => [:relpath],
:line => [:relpath],
:dot => Symbol[]
)
const OPTIONAL_PARAMS = Dict(
:circle => Dict{Symbol, Any}(),
:ellipse => Dict{Symbol, Any}(),
:box => Dict{Symbol, Any}(:smooth=>0),
:polygon => Dict{Symbol, Any}(:smooth=>0, :close=>true),
:line => Dict{Symbol, Any}(:arrowstyle=>"-"),
:dot => Dict{Symbol, Any}()
)
dict2md(d::Dict) = join(["- `$(k)`: $(v)" for (k, v) in d], "\n")
"""
Node(shape::Symbol, loc; props...)
Create a node with a shape and a location. The shape can be `:circle`, `:ellipse`, `:box`, `:polygon`, `:line` or `:dot`.
### Required Keyword Arguments
$(dict2md(REQUIRED_PARAMS))
### Optional Keyword Arguments
$(dict2md(OPTIONAL_PARAMS))
"""
struct Node
shape::Symbol
loc::Point
props::Dict{Symbol, Any}
end
function Node(shape::Symbol, loc; props...)
d = Dict{Symbol, Any}(props)
check_props!(shape, d)
return Node(shape, topoint(loc), d)
end
"""
offset(n::Node, p::Union{Tuple,Point})
offset(n::Node, direction, distance)
offset(n::Node, direction::Node, distance)
Offset a node towards a direction or another node. The direction can be specified by a tuple, a `Point` or a `Node`.
"""
offset(n::Node, p::Union{Tuple,Point}) = Node(n.shape, n.loc + topoint(p), n.props)
offset(n::Node, direction, distance) = offset(n, render_offset(direction, distance))
function offset(n::Node, direction::Node, distance)
p = direction.loc - n.loc
return offset(n, normalize(p) * distance)
end
function render_offset(direction, distance)
angle = render_direction(direction)
return Point(distance * cos(angle), -distance * sin(angle))
end
render_direction(s) = @match s begin
::Real => Float64(s)
"right" => 0.0
"topright" => 7π/4
"top" => -π/2
"topleft" => 5π/4
"left" => 1.0π
"bottomleft" => 3π/4
"bottom" => -3π/2
"bottomright" => π/4
end
Luxor.distance(a::Node, b::Node) = distance(a.loc, b.loc)
topoint(x::Point) = x
topoint(x::Node) = x.loc
topoint(x::Tuple) = Point(x...)
"""
dotnode(x, y)
dotnode(p::Point)
Create a node with a shape `:dot` and a location.
"""
dotnode(x::Real, y::Real) = dotnode(Point(x, y))
dotnode(p) = Node(:dot, topoint(p))
"""
circle(loc, radius; props...) = Node(:circle, loc; radius, props...)
"""
circlenode(loc, radius) = Node(:circle, loc; radius)
"""
ellipse(loc, width, height; props...) = Node(:ellipse, loc; width, height, props...)
"""
ellipsenode(loc, width, height) = Node(:ellipse, loc; width, height)
"""
box(loc, width, height; props...) = Node(:box, loc; width, height, props...)
"""
boxnode(loc, width, height; kwargs...) = Node(:box, loc; width, height, kwargs...)
"""
polygon([loc, ]relpath::AbstractVector; props...) = Node(:polygon, loc; relpath, props...)
"""
polygonnode(loc, relpath::AbstractVector; kwargs...) = Node(:polygon, loc; relpath=topoint.(relpath), kwargs...)
function polygonnode(path::AbstractVector; kwargs...)
mid, relpath = centerize([topoint(x) for x in path])
Node(:polygon, mid; relpath, kwargs...)
end
"""
line(args...; props...) = Node(:line, mid; relpath, props...)
"""
function linenode(args...)
mid, relpath = centerize([topoint(x) for x in args])
return Node(:line, mid; relpath)
end
function centerize(path)
mid = sum(path) / length(path)
return mid, path .- mid
end
function check_props!(shape, props)
assert_has_props!(shape, props, REQUIRED_PARAMS[shape], OPTIONAL_PARAMS[shape])
end
function assert_has_props!(shape, props, syms, optional)
# required arguments
for sym in syms
if !haskey(props, sym)
error("missing property (keyword argument) for shape $shape: $sym ")
end
end
# optional arguments
for (k, v) in optional
if !haskey(props, k)
props[k] = v
end
end
# not recognized arguments
for (k, v) in props
if !(k ∈ syms || haskey(optional, k))
@warn "property not recognized by shape $shape: $k"
end
end
return true
end
function Base.getproperty(n::Node, p::Symbol)
return hasfield(Node, p) ? getfield(n, p) : n.props[p]
end
"""
Connection(start, stop; isarrow=false, mode=:exact, arrowprops=Dict{Symbol, Any}(), control_points=Point[], smoothprops=Dict{Symbol, Any}())
Create a connection between two nodes. The connection can be a line, a curve, a bezier curve, a smooth curve or a zig-zag line.
### Required Arguments
- `start::Node`: the start node
- `stop::Node`: the stop node
### Optional Keyword Arguments
- `isarrow=false`: whether to draw an arrow at the end of the connection
- `mode=:exact`: the mode to get the connection point, can be `:exact` or `:natural`
- `arrowprops=Dict{Symbol, Any}()`: the properties of the arrow
- `control_points=Point[]`: the control points for the connection
- `smoothprops=Dict{Symbol, Any}()`: the properties of the smooth curve
"""
struct Connection
start::Node
stop::Node
mode::Symbol
isarrow::Bool
arrowprops::Dict{Symbol, Any}
control_points::Vector{Point}
smoothprops::Dict{Symbol, Any}
end
# TODO: polish arrow props, smooth corners
function Connection(start::Node, stop::Node; isarrow=false, mode=:exact, arrowprops=Dict{Symbol, Any}(), control_points=Point[], smoothprops=Dict{Symbol, Any}())
return Connection(start, stop, mode, isarrow, arrowprops, Point[topoint(x) for x in control_points], smoothprops)
end
offset(c::Connection, p::Union{Tuple,Point}) = Connection(offset(c.start, p), offset(c.stop, p); c.mode, c.isarrow, c.arrowprops, c.control_points, c.smoothprops)
connect(a, b; kwargs...) = Connection(tonode(a), tonode(b); kwargs...)
tonode(a::Point) = dotnode(a)
tonode(a::Node) = a
"""
circle(n::Node, action=:stroke)
Stroke a node with line.
"""
stroke(n::Union{Node, Connection}) = (apply_action(n, :stroke); n)
Base.fill(n::Union{Node, Connection}) = (apply_action(n, :fill); n)
function Luxor.text(t::AbstractString, n::Node; angle=0.0)
text(t, n.loc; valign=:middle, halign=:center, angle)
end
function apply_action(n::Node, action)
@match n.shape begin
:circle => circle(n.loc, n.radius, action)
:ellipse => ellipse(n.loc, n.width, n.height, action)
:box => box(n.loc, n.width, n.height, n.smooth, action)
:polygon => if n.props[:smooth] == 0
poly(Ref(n.loc) .+ n.relpath, action; close=n.props[:close])
else
#move(n.loc + n.relpath[1])
polysmooth(Ref(n.loc) .+ n.relpath, n.props[:smooth], action)
end
:line => line((Ref(n.loc) .+ n.relpath)..., action)
:dot => nothing #circle(n.loc, 1, action) # dot has unit radius
end
end
function apply_action(n::Connection, action)
a_ = get_connect_point(n.start, isempty(n.control_points) ? n.stop.loc : n.control_points[1]; mode=n.mode)
b_ = get_connect_point(n.stop, isempty(n.control_points) ? n.start.loc : n.control_points[end]; mode=n.mode)
if n.isarrow
# arrow, line or curve
arrow(a_, n.control_points..., b_; n.arrowprops...)
do_action(action)
else
method = get(n.smoothprops, :method, "curve")
@assert method ∈ ["nosmooth", "smooth", "bezier", "curve"]
if method == "nosmooth" || isempty(n.control_points)
if isempty(n.control_points)
# line
line(a_, b_, action)
else
# zig-zag line
# TODO: support arrow
poly([a_, n.control_points..., b_], action; close=false)
end
elseif method == "smooth"
# TODO: support close=false
#move(a_)
polysmooth([a_, n.control_points..., b_], get(n.smoothprops, :radius, 5), action; close=false)
elseif method == "bezier"
# bezier curve
pts = [a_, n.control_points..., b_]
bezpath = makebezierpath(pts)
drawbezierpath(bezpath, action, close=false)
else
# curve
move(a_)
curve(n.control_points..., b_)
do_action(action)
end
end
end
xmin(path) = minimum(x->x.x, path)
xmax(path) = maximum(x->x.x, path)
ymin(path) = minimum(x->x.y, path)
ymax(path) = maximum(x->x.y, path)
for F in [:left, :right, :top, :bottom, :topright, :topleft, :bottomleft, :bottomright]
SF = String(F)
@eval begin
"""
$($SF)(n::Node)
Get the $($SF) boundary point of a node. Returns a `Node` of shape `:dot`.
"""
$F(n::Node) = boundary(n, $(String(F)))
end
end
"""
midpoint(a::Node, b::Node)
Get the midpoint of two nodes. Returns a `Node` of shape `:dot`.
"""
Luxor.midpoint(a::Node, b::Node) = dotnode(midpoint(a.loc, b.loc))
"""
center(n::Node)
Get the center point of a node. Returns a `Node` of shape `:dot`.
"""
center(n::Node) = dotnode(n.loc)
"""
boundary(n::Node, s::String)
boundary(n::Node, angle::Real)
Get the boundary point of a node in a direction. The direction can be specified by a string or an angle.
Possible strings are: "left", "right", "top", "bottom", "topright", "topleft", "bottomleft", "bottomright".
"""
boundary(n::Node, s::String) = boundary(n, render_direction(s))
function boundary(n::Node, angle::Real)
@match n.shape begin
:circle => dotnode(n.loc.x + n.radius * cos(angle), n.loc.y + n.radius * sin(angle))
:ellipse => dotnode(n.loc.x + n.width/2 * cos(angle), n.loc.y + n.height/2 * sin(angle))
# TODO: polish for rounded corners
:box || :polygon => begin
path = getpath(n)
radi = max(xmax(path) - xmin(path), ymax(path) - ymin(path))
x = n.loc.x + 2*radi * cos(angle)
y = n.loc.y + 2*radi * sin(angle)
# NOTE: polygon must intersect with its center!
intersect = intersectlinepoly(n.loc, Point(x, y), path)
if isempty(intersect)
@warn "boundary point not found, return center instead: path=$path, angle=$angle"
return center(n)
else
return dotnode(intersect[1])
end
end
:dot => n
:line => begin
path = getpath(n)
# project to angle direction, find the one with the largest norm
unitv = Point(cos(angle), sin(angle))
projects = dotproduct.(Ref(unitv), path)
mval, mloc = findmax(projects)
return dotnode(path[mloc])
end
_ => error("can not get boundary point for shape: $(n.shape)")
end
end
bottomalign(n::Node, target::Node) = bottomalign(n, target.loc[1])
bottomalign(n::Node, x::Real) = dotnode(x, bottom(n).loc[2])
topalign(n::Node, target::Node) = topalign(n, target.loc[1])
topalign(n::Node, x::Real) = dotnode(x, top(n).loc[2])
leftalign(n::Node, target::Node) = topalign(n, target.loc[2])
leftalign(n::Node, y::Real) = dotnode(left(n).loc[1], y)
rightalign(n::Node, target::Node) = rightalign(n, target.loc[2])
rightalign(n::Node, y::Real) = dotnode(right(n).loc[1], y)
# get the path of a node
function getpath(n::Node)
@match n.shape begin
:circle => [Point(n.loc.x + n.radius * cos(θ), n.loc.y + n.radius * sin(θ)) for θ in 0:π/8:2π]
:ellipse => [Point(n.loc.x + n.width/2 * cos(θ), n.loc.y + n.height/2 * sin(θ)) for θ in 0:π/8:2π]
:box => begin
x, y = n.loc
w, h = n.width, n.height
[Point(x-w/2, y-h/2), Point(x-w/2, y+h/2), Point(x+w/2, y+h/2), Point(x+w/2, y-h/2)]
end
:polygon => Ref(n.loc) .+ n.relpath
:dot => [n.loc]
:line => Ref(n.loc) .+ n.relpath
end
end
function Luxor.line(a::Node, b::Node, action=:stroke; mode=:exact, kwargs...)
a_ = get_connect_point(a, b.loc; mode)
b_ = get_connect_point(b, a.loc; mode)
line(a_, b_, action; kwargs...)
end
function Luxor.arrow(a::Node, b::Node, action=:stroke; mode=:exact, kwargs...)
a_ = get_connect_point(a, b.loc; mode)
b_ = get_connect_point(b, a.loc; mode)
arrow(a_, b_; kwargs...)
do_action(action)
end
function get_connect_point(a::Node, bloc::Point; mode)
@match a.shape begin
:circle => intersectionlinecircle(a.loc, bloc, a.loc, a.radius)[2]
:ellipse => boundary(a, angleof(bloc-a.loc)).loc
:dot => a.loc
:line => a.loc + a.relpath[end] # the last node
:box || :polygon => @match mode begin
:natural => closest_natural_point(getpath(a), bloc)
:exact => boundary(a, angleof(bloc-a.loc)).loc
_ => error("Connection point mode `:$(mode)` is not defined!")
end
end
end
angleof(p::Point) = atan(p.y, p.x)
function closest_natural_point(path::AbstractVector, p::Point)
minval, idx = findmin(x->distance(p, x), path)
mid = i->midpoint(path[i], path[mod1(i+1, length(path))])
minval2, idx2 = findmin(i->distance(p, mid(i)), 1:length(path))
return minval > minval2 ? mid(idx2) : path[idx]
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 4116 | abstract type AbstractNodeStore end
"""
NodeStore <: AbstractNodeStore
A collection of nodes, which is used to infer the bounding box of a drawing.
"""
struct NodeStore <: AbstractNodeStore
nodes::Vector{Node}
end
NodeStore() = NodeStore(Node[])
nodes(d::NodeStore) = d.nodes
function Base.push!(d::NodeStore, obj::Node)
push!(d.nodes, obj)
return d
end
function Base.append!(d::NodeStore, objs)
append!(d.nodes, objs)
return d
end
function get_bounding_box(d::AbstractNodeStore)
nds = nodes(d)
isempty(nds) && return (0.0, 0.0, 0.0, 0.0)
xmin_val, xmax_val, ymin_val, ymax_val = Inf, -Inf, Inf, -Inf
for n in nds
path = getpath(n)
xmin_val = min(xmin_val, xmin(path))
xmax_val = max(xmax_val, xmax(path))
ymin_val = min(ymin_val, ymin(path))
ymax_val = max(ymax_val, ymax(path))
end
return xmin_val, xmax_val, ymin_val, ymax_val
end
const CURRENT_CONTEXT = Base.RefValue{AbstractNodeStore}(NodeStore())
function setcontext!(d::AbstractNodeStore)
CURRENT_CONTEXT[] = d
return d
end
function emptycontext!()
CURRENT_CONTEXT[] = NodeStore()
end
function getcontext!()
return CURRENT_CONTEXT[]
end
for F in [:line, :dot, :circle, :box, :polygon, :ellipse]
SF = String(F)
@eval begin
"""
$($SF)!([nodestore, ]args...; kwargs...) = push!(nodestore, $($SF)node(args...; kwargs...))
Add a $($SF) shaped node to the nodestore. Please refer to [`$($SF)node`](@ref) for more information.
If `nodestore` is not provided, the current nodestore is used.
"""
function $(Symbol(F, :!))(args...; kwargs...)
obj = $(Symbol(F, :node))(args...; kwargs...)
push!(getcontext!(), obj)
return obj
end
function $(Symbol(F, :!))(d::AbstractNodeStore, args...; kwargs...)
obj = $(Symbol(F, :node))(args...; kwargs...)
push!(d, obj)
return obj
end
end
end
"""
nodestore(f)
Create a [`NodeStore`](@ref) context, such that [`box!`](@ref), [`circle!`](@ref), [`polygon!`](@ref), [`dot!`](@ref) and [`line!`](@ref) will add nodes to the nodestore.
The nodestore is passed to the function `f` as an argument.
### Example
```julia
julia> using LuxorGraphPlot, LuxorGraphPlot.Luxor
julia> nodestore() do ns
box = box!(ns, (100, 100), 100, 100)
circle = circle!(ns, (200, 200), 50)
with_nodes(ns) do
stroke(box)
stroke(circle)
Luxor.line(topright(box), circle)
end
end
```
"""
function nodestore(f)
d = NodeStore()
setcontext!(d)
drawing = f(d)
emptycontext!()
return drawing
end
"""
with_nodes(f[, nodestore]; kwargs...)
Create a drawing with the nodes in the nodestore.
The bounding box of the drawing is determined by the bounding box of the nodes in the nodestore.
If `nodestore` is not provided, the current nodestore is used.
### Keyword arguments
- `padding_left::Int=10`: Padding on the left side of the drawing.
- `padding_right::Int=10`: Padding on the right side of the drawing.
- `padding_top::Int=10`: Padding on the top side of the drawing.
- `padding_bottom::Int=10`: Padding on the bottom side of the drawing.
- `format::Symbol=:svg`: The format of the drawing. Available formats are `:png`, `:pdf`, `:svg`...
- `filename::String=nothing`: The filename of the drawing. If `nothing`, a temporary file is created.
- `background::String="white"`: The background color of the drawing.
"""
with_nodes(f; kwargs...) = with_nodes(f, getcontext!(); kwargs...)
function with_nodes(f, d::AbstractNodeStore;
padding_left=10, padding_right=10,
padding_top=10, padding_bottom=10,
format=:svg, filename=nothing,
background="white"
)
xmin, xmax, ymin, ymax = get_bounding_box(d) .+ (-padding_left, padding_right, -padding_top, padding_bottom)
Luxor.Drawing(ceil(Int, xmax - xmin), ceil(Int, ymax - ymin), filename === nothing ? format : filename)
Luxor.origin(-xmin, -ymin)
Luxor.background(background)
f()
Luxor.finish()
Luxor.preview()
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 840 | module TensorNetwork
using ..LuxorGraphPlot
import ..LuxorGraphPlot: AbstractNodeStore, nodes
using Luxor
export mps, TensorNetworkDiagram
struct TensorNetworkDiagram <: AbstractNodeStore
nodes::Vector{Node}
dots::Vector{Node}
edges::Vector{Connection}
end
nodes(tn::TensorNetworkDiagram) = [tn.nodes..., tn.dots...]
# tensor network visualization
function mps(n::Int; radius=15, distance=50, offset=(0, 0))
nodes = [circlenode(LuxorGraphPlot.topoint(offset) + Point((i-1) * distance, 0), radius) for i=1:n]
# pins
pins = [LuxorGraphPlot.offset(center(a), "top", distance ÷ 2) for a in nodes]
# edges
edges = [[connect(a, b) for (a, b) in zip(nodes[1:end-1], nodes[2:end])]...,
[connect(a, b) for (a, b) in zip(nodes, pins)]...]
return TensorNetworkDiagram(nodes, pins, edges)
end
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1886 | """
AbstractLayout
Abstract type for layout algorithms.
"""
abstract type AbstractLayout end
"""
render_locs(graph, layout::Layout)
Render the vertex locations for a graph from an [`AbstractLayout`](@ref) instance.
### Arguments
* `graph::AbstractGraph`: the graph to render
* `layout::AbstractLayout`: the layout algorithm
"""
function render_locs(graph::AbstractGraph, layout::AbstractVector)
@assert nv(graph) == length(layout) "The number of vertices in the graph must match the number of locations, got $(nv(graph)) vertices and $(length(layout)) locations"
return Point.(layout)
end
"""
Layered <: AbstractLayout
Layered version of a parent layout algorithm.
### Fields
* `parent::LT`: the parent layout algorithm
* `zlocs::Vector{T}`: the z-axis locations
* `aspect_ratio::Float64`: the aspect ratio of the z-axis
"""
struct Layered{LT<:AbstractLayout, T} <: AbstractLayout
parent::LT
zlocs::Vector{T}
aspect_ratio::Float64
end
function render_locs(graph, l::Layered)
@assert nv(graph) == length(l.zlocs) "The number of vertices in the graph must match the number of z-axis locations, got $(nv(graph)) vertices and $(length(l.zlocs)) z-axis locations"
locs = render_locs(graph, l.parent)
map(lz->Point(lz[1][1], lz[1][2]* l.aspect_ratio + lz[2]), zip(locs, l.zlocs))
end
struct LayoutQuality
closeness::Float64
mean_distance_deviation::Float64
end
function quality_of_layout(graph, locs, optimal_distance)
average_distance_con = sum([Layouts.distance(locs[e.src], locs[e.dst]) for e in edges(graph)])/ne(graph)
average_distance_dis = sum([Layouts.distance(locs[e.src], locs[e.dst]) for e in edges(complement(graph))])
deviation = abs(average_distance_con - optimal_distance) / min(optimal_distance, average_distance_con)
return LayoutQuality(average_distance_dis/average_distance_con, deviation)
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 153 | module Layouts
using LinearAlgebra, Graphs
include("point.jl")
include("Core.jl")
include("spring.jl")
include("stress.jl")
include("spectral.jl")
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1401 | """
Point{D, T}
A point in D-dimensional space, with coordinates of type T.
"""
struct Point{D, T <: Real}
data::NTuple{D, T}
end
const Point2D{T} = Point{2, T}
const Point3D{T} = Point{3, T}
dimension(::Point{D}) where D = D
Base.eltype(::Type{Point{D, T}}) where {D, T} = T
Point(x::Real...) = Point((x...,))
Point(x::Point) = x
LinearAlgebra.dot(x::Point, y::Point) = mapreduce(*, +, x.data .* y.data)
LinearAlgebra.norm(x::Point) = sqrt(sum(abs2, x.data))
Base.:*(x::Real, y::Point) = Point(x .* y.data)
Base.:*(x::Point, y::Real) = Point(x.data .* y)
Base.:/(y::Point, x::Real) = Point(y.data ./ x)
Base.:+(x::Point, y::Point) = Point(x.data .+ y.data)
Base.:-(x::Point, y::Point) = Point(x.data .- y.data)
Base.isapprox(x::Point, y::Point; kwargs...) = all(isapprox.(x.data, y.data; kwargs...))
Base.getindex(p::Point, i::Int) = p.data[i]
Base.broadcastable(p::Point) = p.data
Base.iterate(p::Point, args...) = iterate(p.data, args...)
Base.zero(::Type{Point{D, T}}) where {D, T} = Point(ntuple(i->zero(T), D))
Base.zero(::Point{D, T}) where {D, T} = Point(ntuple(i->zero(T), D))
distance(p::Point, q::Point) = norm(p - q)
Base.rand(::Type{Point{D, T}}) where {D, T} = Point(ntuple(_->rand(T), D))
Base.randn(::Type{Point{D, T}}) where {D, T} = Point(ntuple(_->randn(T), D))
Base.isfinite(p::Point) = all(isfinite, p.data)
rand_points_2d(n::Int) = [Point(randn(), randn()) for _ in 1:n] | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 2070 | """
SpectralLayout <: AbstractLayout
A layout algorithm based on spectral graph theory.
### Fields
* `optimal_distance::Float64`: the optimal distance between vertices
* `dimension::Int`: the number of dimensions
"""
@kwdef struct SpectralLayout <: AbstractLayout
optimal_distance::Float64 = 50.0
dimension::Int = 2
end
function render_locs(graph, l::SpectralLayout)
return spectral_layout(graph;
optimal_distance=l.optimal_distance,
dimension=l.dimension
)
end
"""
spectral_layout(g::AbstractGraph, weight=nothing; optimal_distance=50.0)
Spectral layout for graph plotting, returns a vector of vertex locations.
"""
function spectral_layout(g::AbstractGraph, weight=nothing; optimal_distance=50.0, dimension=2)
if nv(g) == 1
return [zero(Point{dimension, Float64})]
elseif nv(g) == 2
return [zero(Point{dimension, Float64}), Point(ntuple(i->i==1 ? Float64(optimal_distance) : 0.0, dimension))]
end
if weight === nothing
weight = ones(ne(g))
end
if nv(g) > 500
A = Graphs.sparse(Int[src(e) for e in edges(g)],
Int[dst(e) for e in edges(g)],
weight, nv(g), nv(g))
if is_directed(g)
A = A + transpose(A)
end
return _spectral(A, dimension) .* 16optimal_distance
else
L = laplacian_matrix(g)
return _spectral(Matrix(L), dimension) .* 16optimal_distance
end
end
function _spectral(L::Matrix, dimension)
eigenvalues, eigenvectors = eigen(L)
index = sortperm(eigenvalues)[2:1+dimension]
return Point.([eigenvectors[:, idx] for idx in index]...)
end
function _spectral(A, dimension)
data = vec(sum(A, dims=1))
D = Graphs.sparse(Base.OneTo(length(data)), Base.OneTo(length(data)), data)
L = D - A
eigenvalues, eigenvectors = Graphs.LinAlg.eigs(L, nev=3, which=Graphs.SR())
index = sortperm(real(eigenvalues))[2:1+dimension]
return Point.([real.(eigenvectors[:, idx]) for idx in index]...)
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 4133 | """
SpringLayout <: AbstractLayout
A layout algorithm based on a spring model.
### Fields
* `optimal_distance::Float64`: the optimal distance between vertices
* `maxiter::Int`: the maximum number of iterations
* `α0::Float64`: the initial moving speed
* `meta::Dict{Symbol, Any}`: graph dependent meta information, including
* `initial_locs`: initial vertex locations
* `mask`: boolean mask for which vertices to relocate
"""
@kwdef struct SpringLayout <: AbstractLayout
optimal_distance::Float64 = 50.0
maxiter::Int = 100
α0::Float64 = 2*optimal_distance # initial moving speed
meta::Dict{Symbol, Any} = Dict{Symbol, Any}()
end
function render_locs(graph, l::SpringLayout)
return spring_layout(graph;
optimal_distance=l.optimal_distance,
maxiter=l.maxiter,
α0=l.α0,
locs=get(l.meta, :initial_locs, nothing),
mask=get(l.meta, :mask, nothing),
)
end
"""
spring_layout(g::AbstractGraph;
locs=nothing,
optimal_distance=50.0, # the optimal vertex distance
maxiter=100,
α0=2*optimal_distance, # initial moving speed
mask::AbstractVector{Bool}=trues(nv(g)) # mask for which to relocate
)
Spring layout for graph plotting, returns a vector of vertex locations.
!!! note
This function is copied from [`GraphPlot.jl`](https://github.com/JuliaGraphs/GraphPlot.jl),
where you can find more information about his function.
"""
function spring_layout(g::AbstractGraph;
locs=nothing,
optimal_distance=50.0, # the optimal vertex distance
maxiter=100,
α0=2*optimal_distance,
mask=nothing,
)
locs = locs === nothing ? rand_points_2d(nv(g)) : Point.(locs) ./ optimal_distance
mask = mask === nothing ? trues(nv(g)) : mask
@assert nv(g) == length(locs) "number of vertices in graph and locs must be the same, got $(nv(g)) and $(length(locs))"
# Store forces and apply at end of iteration all at once
force = zero(locs)
# Iterate maxiter times
@inbounds for iter = 1:maxiter
# Cool down
temp = α0 / iter
spring_step!(g, locs, force; optimal_distance=1.0, temp, mask)
end
optimal_distance .* locs
end
function spring_step!(g::AbstractGraph, locs, force;
optimal_distance, temp, mask)
# Calculate forces
for i = 1:nv(g)
force_i = zero(eltype(locs))
for j = 1:nv(g)
i == j && continue
dist = distance(locs[i], locs[j])
if has_edge(g, i, j)
# Attractive + repulsive force
# F_d = dist² / k - k² / dist # original FR algorithm
F_d = dist / optimal_distance - optimal_distance^2 / dist^2
else
# Just repulsive
# F_d = -k² / dist # original FR algorithm
F_d = -optimal_distance^2 / dist^2
end
force_i += F_d * (locs[j] - locs[i])
end
force[i] = force_i
end
# Now apply them, but limit to temperature
for i = 1:nv(g)
mask[i] || continue
force_mag = norm(force[i])
scale = min(force_mag, temp) / force_mag
locs[i] += force[i] * scale
end
end
"""
LayeredSpringLayout(; zlocs, optimal_distance, aspect_ration=0.2)
Create a layered spring layout.
### Keyword Arguments
* `zlocs`: the z-axis locations
* `optimal_distance::Float64`: the optimal distance between vertices
* `aspect_ration::Float64`: the aspect ratio of the z-axis
* `α0::Float64`: the initial moving speed
* `maxiter::Int`: the maximum number of iterations
"""
function LayeredSpringLayout(; zlocs, optimal_distance=50.0, aspect_ratio=0.2, α0=2*optimal_distance, maxiter=100)
return Layered(SpringLayout(; optimal_distance, α0, maxiter), zlocs, aspect_ratio)
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 5175 | """
StressLayout <: AbstractLayout
A layout algorithm based on stress majorization.
### Fields
* `optimal_distance::Float64`: the optimal distance between vertices
* `maxiter::Int`: the maximum number of iterations
* `rtol::Float64`: the absolute tolerance
* `initial_locs`: initial vertex locations
* `mask`: boolean mask for which vertices to relocate
* `meta::Dict{Symbol, Any}`: graph dependent meta information, including
* `initial_locs`: initial vertex locations
* `mask`: boolean mask for which vertices to relocate
"""
@kwdef struct StressLayout <: AbstractLayout
optimal_distance::Float64 = 50.0
maxiter::Int = 100
rtol::Float64 = 1e-2
meta::Dict{Symbol, Any} = Dict{Symbol, Any}()
end
function render_locs(graph, l::StressLayout)
return stressmajorize_layout(graph;
optimal_distance=l.optimal_distance,
maxiter=l.maxiter,
rtol=l.rtol,
locs=get(l.meta, :initial_locs, nothing),
mask=get(l.meta, :mask, nothing),
)
end
"""
stressmajorize_layout(g::AbstractGraph;
locs=rand_points_2d(nv(g)),
w=nothing,
optimal_distance=50.0, # the optimal vertex distance
maxiter = 400 * nv(g)^2,
rtol=1e-2,
)
Stress majorization layout for graph plotting, returns a vector of vertex locations.
### References
* https://github.com/JuliaGraphs/GraphPlot.jl/blob/e97063729fd9047c4482070870e17ed1d95a3211/src/stress.jl
"""
function stressmajorize_layout(g::AbstractGraph;
optimal_distance=50.0, # the optimal vertex distance
locs=nothing,
w=nothing,
maxiter = 400 * nv(g)^2,
rtol=1e-2,
mask=nothing,
)
locs = locs === nothing ? rand_points_2d(nv(g)) .* optimal_distance : Point.(locs)
mask = mask === nothing ? trues(nv(g)) : mask
# the extra factor 3 is for matching the spring layout result
δ = 3 * optimal_distance .* hcat([gdistances(g, i) for i=1:nv(g)]...)
if w === nothing
w = δ .^ -2
w[.!isfinite.(w)] .= 0
end
@assert length(locs)==size(δ, 1)==size(δ, 2)==size(w, 1)==size(w, 2)
locs = copy(locs)
Lw = weighted_laplacian(w)
pinvLw = pinv(Lw)
newstress = stress(locs, δ, w)
iter = 0
L = zeros(eltype(Lw), nv(g), nv(g))
local locs_new
for outer iter = 1:maxiter
lz = LZ!(L, locs, δ, w)
locs_new = pinvLw * (lz * locs)
@assert all(isfinite.(locs_new))
newstress, oldstress = stress(locs_new, δ, w), newstress
@debug """Iteration $iter
Change in coordinates: $(sum(distance.(locs_new, locs))/length(locs))
Stress: $newstress (change: $(newstress-oldstress))
"""
isapprox(newstress, oldstress; rtol) && break
locs[mask] = locs_new[mask]
end
iter == maxiter && @warn("Maximum number of iterations reached without convergence")
return locs
end
function stress(locs::AbstractVector{Point{D, T}}, d, w) where {D, T}
s = 0.0
n = length(locs)
@assert n==size(d, 1)==size(d, 2)==size(w, 1)==size(w, 2)
@inbounds for j=1:n, i=1:j-1
s += w[i, j] * (distance(locs[i], locs[j]) - d[i,j])^2
end
return s
end
function weighted_laplacian(w::AbstractMatrix{T}) where T
n = LinearAlgebra.checksquare(w)
Lw = zeros(T, n, n)
for i=1:n
D = zero(T)
for j=1:n
i==j && continue
Lw[i, j] = -w[i, j]
D += w[i, j]
end
Lw[i, i] = D
end
return Lw
end
function LZ!(L::AbstractMatrix{T}, locs::AbstractVector{Point{D, T2}}, d, w) where {D, T, T2}
@assert length(locs)==size(d, 1)==size(d, 2)==size(w, 1)==size(w, 2)
fill!(L, zero(T))
n = length(locs)
@inbounds for i=1:n-1
diag = zero(T)
for j=i+1:n
nrmz = distance(locs[i], locs[j])
δ = w[i, j] * d[i, j]
lij = -δ/max(nrmz, 1e-8)
L[i, j] = lij
diag -= lij
end
L[i, i] += diag
end
@inbounds for i=2:n
diag = zero(T)
for j=1:i-1
lij = L[j,i]
L[i,j] = lij
diag -= lij
end
L[i,i] += diag
end
return L
end
"""
LayeredStressLayout(; zlocs, optimal_distance, aspect_ration=0.2)
Create a layered stress layout.
### Keyword Arguments
* `zlocs`: the z-axis locations
* `optimal_distance::Float64`: the optimal distance between vertices
* `aspect_ration::Float64`: the aspect ratio of the z-axis
* `maxiter::Int`: the maximum number of iterations
* `rtol::Float64`: the absolute tolerance
"""
function LayeredStressLayout(; zlocs, optimal_distance=50.0, aspect_ratio=0.2, maxiter=100, rtol=1e-2)
return Layered(StressLayout(; optimal_distance, maxiter, rtol), zlocs, aspect_ratio)
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1393 | using LuxorGraphPlot, Graphs
using Luxor
using Test
@testset "GraphDisplayConfig" begin
config = GraphDisplayConfig()
@test config isa GraphDisplayConfig
c1 = darktheme!(copy(config))
@test c1 isa GraphDisplayConfig
@test c1.vertex_stroke_color == "white"
c2 = lighttheme!(copy(config))
@test c2 isa GraphDisplayConfig
@test c2.vertex_stroke_color == "black"
end
@testset "GraphViz" begin
graph = smallgraph(:petersen)
gv = GraphViz(graph)
@test gv isa GraphViz
@test gv.locs isa Array
end
@testset "graph plot" begin
locations = [(50.0, 100.0), (100.0, 150.0)]
@test show_graph(GraphViz(locs=locations, edges=[(1, 2)])) isa Drawing
gv = GraphViz(locs=[], edges=[])
@test show_graph(gv) isa Drawing
@test show_graph(gv; format=:pdf) isa Drawing
@test show_graph(gv; filename=tempname()*".svg") isa Drawing
graph = smallgraph(:petersen)
@test show_graph(graph) isa Drawing
show_graph(graph; vertex_shapes=fill(:box, 10)) isa Drawing
end
@testset "gallery" begin
graph = smallgraph(:petersen)
locs = render_locs(graph, StressLayout())
matrix = [GraphViz(graph, locs; vertex_colors=[rand(Luxor.RGB) for i=1:10], edge_colors=[rand(Luxor.RGB) for i=1:15]) for i=1:2, j=1:4]
# gallery
@test show_gallery(matrix) isa Drawing
@test show_gallery(reshape(GraphViz[], 0, 0)) isa Drawing
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 2060 | using Test
using LuxorGraphPlot: angleof, closest_natural_point, getpath
using LuxorGraphPlot
using Luxor
@testset "angleof and boundary" begin
@test angleof(Point(0.5*sqrt(3.0), 0.5)) ≈ π/6
n = boxnode(O, 100, 100)
path = getpath(n)
@test boundary(n, angleof(Point(0.5*sqrt(3.0), 0.5))).loc ≈ Point(50, 50/sqrt(3))
@test closest_natural_point(path, Point(100, 120)) ≈ Point(50, 50)
@test closest_natural_point(path, Point(130, -120)) ≈ Point(50, -50)
@test closest_natural_point(path, Point(130, -10)) ≈ Point(50, 0)
end
@testset "nodes" begin
# circle
n = circlenode((0.2, 0.4), 0.5)
@test right(n).loc == Point(0.7, 0.4)
@test left(n).loc == Point(-0.3, 0.4)
@test bottom(n).loc == Point(0.2, 0.9)
@test top(n).loc == Point(0.2, -0.1)
# ellipse
n = ellipsenode((0.2, 0.4), 1.0, 2.0)
@test right(n).loc == Point(0.7, 0.4)
@test left(n).loc == Point(-0.3, 0.4)
@test bottom(n).loc == Point(0.2, 1.4)
@test top(n).loc == Point(0.2, -0.6)
# box
n = boxnode((0.2, 0.4), 1.0, 0.4)
@test right(n).loc == Point(0.7, 0.4)
@test left(n).loc == Point(-0.3, 0.4)
@test top(n).loc == Point(0.2, 0.2)
@test bottom(n).loc == Point(0.2, 0.6)
# polygon
path = getpath(n)
n = polygonnode((0.2, 0.4), path .- Ref(Point(0.2, 0.4)))
@test right(n).loc == Point(0.7, 0.4)
@test left(n).loc == Point(-0.3, 0.4)
@test bottom(n).loc == Point(0.2, 0.6)
@test top(n).loc == Point(0.2, 0.2)
# dot
n = dotnode((0.2, 0.4))
@test right(n).loc == Point(0.2, 0.4)
@test left(n).loc == Point(0.2, 0.4)
@test top(n).loc == Point(0.2, 0.4)
@test bottom(n).loc == Point(0.2, 0.4)
# line
n = linenode((-0.1, 0.2), (0.3, 0.4))
@test right(n).loc == Point(0.3, 0.4)
@test left(n).loc == Point(-0.1, 0.2)
@test bottom(n).loc == Point(0.3, 0.4)
@test top(n).loc == Point(-0.1, 0.2)
end
@testset "connection" begin
n = boxnode((0.2, 0.4), 1.0, 0.4)
@test Connection(left(n), right(n)) isa Connection
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 476 | using LuxorGraphPlot
using Luxor
using Test
@testset "nodestore" begin
drawing = nodestore() do ns
c1 = circle!((0.4, 0.5), 30)
c2 = circle!((0.4, 0.5), 80)
with_nodes() do
sethue("white")
fill(c1)
fill(c2)
sethue("black")
text("y", c1)
text("x", c2)
line(c1, c2)
stroke(c1)
stroke(c2)
end
end
@test drawing isa Drawing
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 323 | using LuxorGraphPlot, Graphs
using Test, Documenter
@testset "layouts" begin
include("layouts/layouts.jl")
end
@testset "graphplot" begin
include("graphplot.jl")
end
@testset "nodes" begin
include("nodes.jl")
end
@testset "nodestore" begin
include("nodestore.jl")
end
Documenter.doctest(LuxorGraphPlot) | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 414 | using LuxorGraphPlot.TensorNetwork, Luxor, Test, LuxorGraphPlot
@testset "mps" begin
diagram = mps(4)
fig = with_nodes(diagram) do
for (i, node) in enumerate(diagram.nodes)
LuxorGraphPlot.stroke(node)
text("A($i)", node)
end
for edge in diagram.edges
LuxorGraphPlot.stroke(edge)
end
end
display(fig)
@test fig isa Drawing
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1144 | module LayoutsTest
using Test, LuxorGraphPlot, Graphs
@testset "point" begin
include("point.jl")
end
@testset "spring" begin
include("spring.jl")
end
@testset "stress" begin
include("stress.jl")
end
@testset "spectral" begin
include("spectral.jl")
end
@testset "layouts" begin
graph = smallgraph(:petersen)
for layout in [
[(randn(), randn()) for i=1:nv(graph)],
SpringLayout(),
StressLayout(),
SpectralLayout(),
LayeredStressLayout(zlocs=rand([0,200], nv(graph))),
LayeredSpringLayout(zlocs=rand([0,200], nv(graph))),
]
@test show_graph(graph, layout) isa Drawing
gs = [GraphViz(graph, layout; vertex_sizes=rand(Bool, 10) .* 100, edge_colors=rand(RGB, 15)) for i=1:2, j=1:4]
@test show_gallery(gs) isa Drawing
end
locs = [(randn(2)...,) for i=1:10]
@test show_graph(graph, SpringLayout()) isa Drawing
gs = [GraphViz(graph, SpringLayout(); vertex_sizes=rand(Bool, 10) .* 100, edge_colors=rand(RGB, 15)) for i=1:2, j=1:4]
@test show_gallery(gs) isa Drawing
end
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 474 | using Test, LuxorGraphPlot
using LuxorGraphPlot.Layouts: Point, distance, norm, rand_points_2d
@testset "Point" begin
p1 = Point(1.0, 2.0)
@test p1[1] == 1.0
p2 = Point(3.0, 4.0)
@test p1 + p2 ≈ Point(4.0, 6.0)
@test norm(p1) == sqrt(5)
@test distance(p1, p2) == sqrt(8)
@test rand_points_2d(10) isa Vector{Point{2, Float64}}
@test rand(Point{2, Float64}) isa Point{2, Float64}
@test randn(Point{2, Float64}) isa Point{2, Float64}
end
| LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1197 | using Test, LuxorGraphPlot, Graphs
using Luxor: Drawing, RGB
using LuxorGraphPlot.Layouts
@testset "stress layout" begin
Random.seed!(0)
for n =[100, 2000]
graph = random_regular_graph(n, 3)
optimal_distance = 50
locs = Layouts.spectral_layout(graph; optimal_distance)
@test locs isa Vector{<:Layouts.Point{2}}
Q = Layouts.quality_of_layout(graph, locs, optimal_distance)
@test Q.closeness > 10000 && Q.mean_distance_deviation < 3
end
graph = SimpleGraph(1)
optimal_distance = 50
locs = Layouts.spectral_layout(graph; optimal_distance)
@test locs isa Vector{<:Layouts.Point{2}} && length(locs) == 1
graph = SimpleGraph(2)
add_edge!(graph, 1, 2)
optimal_distance = 50
locs = Layouts.spectral_layout(graph; optimal_distance)
@test locs isa Vector{<:Layouts.Point{2}} && length(locs) == 2
end
@testset "data types" begin
graph = random_regular_graph(100, 3)
optimal_distance = 50.0
# without initial locations
layout = Layouts.SpectralLayout(; optimal_distance)
@test layout isa Layouts.SpectralLayout
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1397 | using Test, LuxorGraphPlot, Graphs
using LuxorGraphPlot.Layouts
using Luxor: Drawing, RGB
using Random
@testset "spring layout" begin
graph = random_regular_graph(100, 3)
optimal_distance = 50
locs = Layouts.spring_layout(graph; optimal_distance)
@test locs isa Vector{<:Layouts.Point{2}}
Q = Layouts.quality_of_layout(graph, locs, optimal_distance)
@test Q.closeness > 10000 && Q.mean_distance_deviation < 5
end
@testset "data types" begin
graph = random_regular_graph(100, 3)
optimal_distance = 50.0
# without initial locations
layout = Layouts.SpringLayout(; optimal_distance)
@test layout isa Layouts.SpringLayout
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
# with initial locations
layout = Layouts.SpringLayout(; optimal_distance, meta=Dict(:initial_locs=>Layouts.rand_points_2d(100)))
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
# with initial locations and mask
layout = Layouts.SpringLayout(; optimal_distance, meta=Dict(:initial_locs=>Layouts.rand_points_2d(100), :mask=>trues(100)))
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
# layered
zlocs = rand([0,200], nv(graph))
layout = Layouts.LayeredSpringLayout(; zlocs, optimal_distance)
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | code | 1533 | using Test, LuxorGraphPlot, Graphs
using Luxor: Drawing, RGB
using LuxorGraphPlot.Layouts
@testset "helpers" begin
g = smallgraph(:petersen)
@test Layouts.weighted_laplacian(adjacency_matrix(g)) == laplacian_matrix(g)
end
@testset "stress layout" begin
graph = random_regular_graph(100, 3)
optimal_distance = 50
locs = Layouts.stressmajorize_layout(graph; optimal_distance)
@test locs isa Vector{<:Layouts.Point{2}}
Q = Layouts.quality_of_layout(graph, locs, optimal_distance)
@test Q.closeness > 10000 && Q.mean_distance_deviation < 5
end
@testset "data types" begin
graph = random_regular_graph(100, 3)
optimal_distance = 50.0
# without initial locations
layout = Layouts.StressLayout(; optimal_distance)
@test layout isa Layouts.StressLayout
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
# with initial locations
layout = Layouts.StressLayout(; optimal_distance, meta=Dict(:initial_locs=>Layouts.rand_points_2d(100)))
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
# with initial locations and mask
layout = Layouts.StressLayout(; optimal_distance, meta=Dict(:initial_locs=>Layouts.rand_points_2d(100), :mask=>trues(100)))
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
# layered
zlocs = rand([0,200], nv(graph))
layout = Layouts.LayeredStressLayout(; zlocs, optimal_distance)
@test Layouts.render_locs(graph, layout) isa Vector{<:Layouts.Point{2}}
end | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | docs | 1287 | # LuxorGraphPlot
A minimum package for displaying a graph and configurations defined on graphs.
It is the [`Luxor`](https://github.com/JuliaGraphics/Luxor.jl) version of [`GraphPlot`](https://github.com/JuliaGraphs/GraphPlot.jl).
Install by typing `using Pkg; Pkg.add("LuxorGraphPlot")` in a julia REPL.
(NOTE: After implementing this package, I noticed there is a similar package with more features: https://github.com/cormullion/Karnak.jl.)
## Example
In a notebook or IDE with graphical display, use the following statements to show your graph.
```julia
julia> using LuxorGraphPlot, Graphs
julia> show_graph(smallgraph(:petersen); format=:svg)
```

### Lower-level API
You can also use the lower-level API to customize the graph display.
```julia
using LuxorGraphPlot, LuxorGraphPlot.Luxor
drawing = nodestore() do ns
c1 = circle!((-20.0, 0.0), 10)
c2 = circle!((20.0, 0.0), 10)
c3 = dot!((0.0, 20.0))
with_nodes() do
sethue("cyan")
fill(c1)
fill(c2)
sethue("black")
text("y", c1)
text("x", c2)
text("z", offset(c3, (10, 0)))
line(c1, c2)
line(midpoint(c1, c2), c3)
end
end
```
The output is as follows:
 | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | docs | 572 | ```@meta
CurrentModule = LuxorGraphPlot
```
# LuxorGraphPlot
## Features
1. automatically detecting the size of the diagram by combining [`nodestore`](@ref) and [`with_nodes`](@ref).
2. connecting nodes with edges, finding middle points and corners of the nodes. Related APIs: `Luxor.midpoint`, [`left`](@ref), [`right`](@ref), [`top`](@ref), [`bottom`](@ref), [`center`](@ref), [`boundary`](@ref).
3. simple graph layouts, such as [`SpringLayout`](@ref), [`StressLayout`](@ref), [`SpectralLayout`](@ref), [`LayeredSpringLayout`](@ref) and [`LayeredStressLayout`](@ref). | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.5.1 | 6ddd60ec24dbb8964a5d4b1cd2c05ca397cdb69d | docs | 110 | # API manual
```@autodocs
Modules = [LuxorGraphPlot, LuxorGraphPlot.Layouts]
Order = [:function, :type]
``` | LuxorGraphPlot | https://github.com/GiggleLiu/LuxorGraphPlot.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 622 | using MOTIFs
using Documenter
DocMeta.setdocmeta!(MOTIFs, :DocTestSetup, :(using MOTIFs); recursive=true)
makedocs(;
modules=[MOTIFs],
authors="Shane Kuei-Hsien Chu ([email protected])",
repo="https://github.com/kchu25/MOTIFs.jl/blob/{commit}{path}#{line}",
sitename="MOTIFs.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://kchu25.github.io/MOTIFs.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/kchu25/MOTIFs.jl",
devbranch="main",
)
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2462 | module MOTIFs
# modules for motif discovery
using Flux, CUDA, Zygote, StatsBase, LinearAlgebra, Random, SeqShuffle
# modules for inferring the results
using Dictionaries, DataStructures, StaticArrays
# modules for rendering the results
using HypothesisTests, FLoops, Mustache, DataFrames
using Zygote: @ignore
export discover_motifs
const float_type = Float32
const convolution = Flux.NNlib.conv;
function randomly_initialize_filters(;
dim=4,
rng=Random.GLOBAL_RNG,
repeats=5,
how_many_filters=10,
float_type=Float16)
arr = zeros(float_type,(dim+1,
repeats,
1,
how_many_filters));
for i = 1:repeats, j = 1:how_many_filters
unif = rand(rng, dim-1);
arr[2:dim,i,1,j] .= sort(unif);
end
arr[dim+1,:,:,:] .= 1;
return reshape(diff(arr, dims=1), (dim*repeats,1,how_many_filters));
end
function get_data_bg(data)
this_bg = reshape(sum(reshape(data.data_matrix, (4, data.L, data.N)), dims=(2,3)), (4,))
this_bg = float_type.(this_bg ./ sum(this_bg))
return this_bg
end
include("loadfasta/helpers.jl")
include("loadfasta/fasta.jl")
include("model.jl")
include("train.jl")
include("inference/_0_const.jl")
include("inference/_1_code_retrieval.jl")
include("inference/_2_enumerate.jl")
include("inference/_3_make_pfms.jl")
include("inference/_s1_make_motifs.jl")
include("inference/_s2_filter_pos_w_scores.jl")
include("inference/_h0_trim.jl")
include("inference/_h1_0_merge_header.jl")
include("inference/_h1_1_merge_H.jl")
include("inference/_h1_2_merge_countmats.jl")
include("inference/_h2_Touzet.jl")
include("inference/_h3_1_alignment.jl")
include("inference/_h3_2_alignment_merge.jl")
include("inference/_h4_overlap_ratio.jl")
include("inference/_h5_expansion.jl")
include("inference/_h6_positions2countmat.jl")
include("inference/_h7_fisher.jl")
include("inference/_h8_remove_redundancy.jl")
include("inference/_h9_order_for_display.jl")
include("inference/_g1_obtain_coutmats.jl")
include("render/const.jl")
include("render/helpers.jl")
include("render/html_template_olap.jl")
include("render/html_template_no_olap.jl")
include("render/plotting.jl")
include("render/pvec_calculations.jl")
include("render/render.jl")
include("wrap.jl")
# TODOs:
# use progress meter to track the training progress?
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 19298 | Base.@kwdef mutable struct Hyperparam
filter_len::Int = 8 # length of each filter
f_len::Int = filter_len*4 # length of the filter adjusted for one hot DNA sequence
M::Int = 50 # number of filters
twoM::Int = 2*M # 2*M
h::Int = 12 # height of the syntax filter
K::Int = 24 # number of syntax filters
q::Int = 32 # how sparse the syntax code should be for each sequence
batch_size::Int = 6 # batch size
num_pass_xyz::Int = 6 # number of passes for the x,y,z parameters
num_pass_df::Int = 3 # number of passes for the d,f parameters
magnifying_factor::float_type = 10 # magnifying factor for the sparse code Z, Y
gamma::float_type = 0.1 # regularization for filter incoherence
end
struct length_info
L::Int # length of the sequence
C::Int # code length
c::Int # code length divided by 4
l::Int # syntax code length
MB::Int # hp.M * hp.batch_size
KB::Int # hp.K * hp.batch_size
CS_vlen::Int # C + L - 1
last_avail_ind::Int # last data point in the code that can be used for a full batch
function length_info(hp, data)
L = 4*data.L
C = L-hp.f_len+1
c = data.L-hp.filter_len+1
l = c-hp.h+1
MB = hp.M * hp.batch_size
KB = hp.K * hp.batch_size
CS_vlen = C + L - 1;
last_avail_ind = (data.N - data.N % hp.batch_size);
new(L, C, c, l, MB, KB, CS_vlen, last_avail_ind)
end
end
struct projectors
mapdrange::CuArray{float_type, 2}
mapclarge::CuArray{float_type, 2}
z_mask_n::CuArray{float_type, 3, CUDA.Mem.DeviceBuffer}
pseudocount_matrix::CuArray{float_type, 3}
function projectors(hp, len)
mapdrange = zeros(float_type, (hp.f_len, len.C+len.L-1));
mapdrange[:, len.C:len.C+hp.f_len-1] = Matrix(I, hp.f_len, hp.f_len);
mapdrange = cu(mapdrange);
mapclarge = zeros(float_type, (len.C, len.c));
mapclarge[1:4:end,:] = Matrix(I, len.c, len.c);
mapclarge = cu(mapclarge);
z_mask_col = 1:len.C .∈ [1:4:len.C];
z_mask_n = cu(float_type.(repeat(z_mask_col, outer=(1,hp.M,hp.batch_size))));
pseudocount_matrix = fill(0.001f0, (4, hp.filter_len, hp.M))
new(
mapdrange,
mapclarge,
z_mask_n,
pseudocount_matrix
)
end
end
struct ucdl
lambda_sparsity_warmup::float_type # sparsity param for Z and Y when their initial values are zero
lambda_sparsity::Array{float_type, 1} # sparsity param for Z and Y during iterations
kappa_sparsity::Array{float_type, 1} # sparsity param for F during iterations
lambda_stepsize_warmup::float_type # step size for Z and Y when their initial values are zero
omega_stepsize_warmup::float_type # step size for X when its initial values is zero
lambda_stepsize::Array{float_type, 1} # step size for Z and Y during iterations
omega_stepsize::Array{float_type, 1} # step size for X during iterations
kappa_stepsize::Array{float_type, 1} # step size for F during iterations
D::CuArray{float_type, 3} # filters (f_len, 1, M, K)
F::CuArray{float_type, 4} # syntax filters (hp.h, hp.twoM, 1, hp.K)
penalty_xyz::Array{float_type, 1} # penalty for x,y,z
mu::Array{float_type, 1} # step size for D during iterations
function ucdl(hp; η₁=float_type(0.05))
D = cu(randomly_initialize_filters(
repeats=hp.filter_len,
how_many_filters=hp.M,
float_type=float_type));
D = sqrt.(D);
F = cu(abs.(0.1 .* randn(float_type, (hp.h, hp.twoM, 1, hp.K))));
lambda_sparsity_warmup = η₁ * rand(float_type);
lambda_sparsity = η₁ * rand(float_type, hp.num_pass_xyz);
kappa_sparsity = η₁ * rand(float_type, hp.num_pass_df);
lambda_stepsize_warmup = η₁ * rand(float_type);
omega_stepsize_warmup = η₁ * rand(float_type);
lambda_stepsize = η₁ * rand(float_type, hp.num_pass_xyz);
omega_stepsize = η₁ * rand(float_type, hp.num_pass_xyz);
kappa_stepsize = η₁ * rand(float_type, hp.num_pass_df);
penalty_xyz = η₁ * rand(float_type, hp.num_pass_xyz);
mu = η₁ * rand(float_type, hp.num_pass_df);
new(
lambda_sparsity_warmup, lambda_sparsity, kappa_sparsity,
lambda_stepsize_warmup, omega_stepsize_warmup, lambda_stepsize, omega_stepsize, kappa_stepsize,
D, F, penalty_xyz, mu
)
end
function ucdl(lambda_sparity_warmup,
lambda_sparsity,
kappa_sparsity,
lambda_stepsize_warmup,
omega_stepsize_warmup,
lambda_stepsize,
omega_stepsize,
kappa_stepsize,
D,
F,
penalty_xyz,
mu)
return new(
lambda_sparity_warmup,
lambda_sparsity,
kappa_sparsity,
lambda_stepsize_warmup,
omega_stepsize_warmup,
lambda_stepsize,
omega_stepsize,
kappa_stepsize,
D,
F,
penalty_xyz,
mu
)
end
end
Flux.@functor ucdl
function prep_filters(D, hp, projs)
D_init = D .^ 2
D_init_r = reshape(D_init, (4, hp.filter_len, hp.M))
D_init_r += projs.pseudocount_matrix # pseudocounts to avoid division by zero
D_init_r = D_init_r ./ sum(D_init_r, dims=1)
D_init = reshape(D_init_r, (hp.f_len, 1, hp.M))
return D_init
end
function prep_syntax_filters(F)
F = F.^2
return F ./ (sqrt.(sum(F.^2, dims=(1,2)))) # normalize F
end
function prep_params(ucdl, hp, projs)
lambda_sparsity_warmup = ucdl.lambda_sparsity_warmup^2
lambda_sparsity = ucdl.lambda_sparsity.^2
kappa_sparsity = ucdl.kappa_sparsity.^2
lambda_stepsize_warmup = ucdl.lambda_stepsize_warmup^2
omega_stepsize_warmup = ucdl.omega_stepsize_warmup^2
lambda_stepsize = ucdl.lambda_stepsize.^2
omega_stepsize = ucdl.omega_stepsize.^2
kappa_stepsize = ucdl.kappa_stepsize.^2
penalty_xyz = ucdl.penalty_xyz.^2
mu = ucdl.mu.^2
D = prep_filters(ucdl.D, hp, projs)
F = prep_syntax_filters(ucdl.F)
return lambda_sparsity_warmup, lambda_sparsity, kappa_sparsity,
lambda_stepsize_warmup, omega_stepsize_warmup, lambda_stepsize, omega_stepsize, kappa_stepsize,
penalty_xyz, mu, D, F
end
function warmup_ZY(S, D, lambda_stepsize_warmup, lambda_sparsity_warmup, projs)
DᵀS = convolution(S, D, pad=0, flipped=true)
DS = convolution(S, D, pad=0)
Z_update = (lambda_stepsize_warmup .* DᵀS) .- (lambda_sparsity_warmup * lambda_stepsize_warmup)
Y_update = (lambda_stepsize_warmup .* DS) .- (lambda_sparsity_warmup * lambda_stepsize_warmup)
Z = Flux.NNlib.relu.(projs.z_mask_n .* Z_update)
Y = Flux.NNlib.relu.(projs.z_mask_n .* Y_update)
return Z, Y
end
function generate_bitmat(X, hp)
bitmat = CUDA.zeros(eltype(X), (size(X, 1), 1, hp.K, hp.batch_size))
X_reshape = reshape(X, (size(X, 1)*hp.K, hp.batch_size))
vals = reshape(partialsort.(eachcol(X_reshape), hp.q, rev=true) |> cu, (1,1,1, hp.batch_size))
bitmat[X .≥ vals] .= 1;
return bitmat
end
function project_X(X, hp)
bitmat = @ignore generate_bitmat(X, hp)
return X .* bitmat
end
function create_ZY_mask(ZY)
ZY_mask = CUDA.zeros(eltype(ZY), size(ZY))
Z_nz = ZY[ZY .> 0]
if isempty(Z_nz)
return nothing
else
Z_nz_median = median(Z_nz)
ZY_mask[ZY .≥ Z_nz_median] .= 1
return ZY_mask
end
end
function cat_ZY(Z, Y, hp, len)
ZY = reshape(hcat(Z[1:4:end,:,:], Y[1:4:end,:,:]), (len.c, hp.twoM, 1, hp.batch_size))
ZY_mask = @ignore create_ZY_mask(ZY);
return isnothing(ZY_mask) ? hp.magnifying_factor .* ZY : hp.magnifying_factor .* (ZY_mask .* ZY)
end
function warmup_X(F, Z, Y, omega_stepsize_warmup, hp, len)
ZY = cat_ZY(Z, Y, hp, len) # (len.c, 2hp.M, 1, hp.batch_size
X_updated = omega_stepsize_warmup .* convolution(ZY, F, pad=0, flipped=true) # (len.l, 1, hp.K, hp.batch_size)
return project_X(X_updated, hp)
end
function return_left_right_FX(FX, hp, len, projs)
left_FX = reshape(FX[:, 1:hp.M, :, :], (len.c, hp.M, hp.batch_size))
right_FX = reshape(FX[:, hp.M+1:end, :, :], (len.c, hp.M, hp.batch_size))
return left_FX, right_FX
end
function warmup_XYZ(S, D, F, lambda_stepsize_warmup, lambda_sparsity_warmup,
omega_stepsize_warmup, hp, len, projs
)
Z, Y = warmup_ZY(S, D, lambda_stepsize_warmup, lambda_sparsity_warmup, projs)
X = warmup_X(F, Z, Y, omega_stepsize_warmup, hp, len)
FX = sum(convolution(X, F, pad=(hp.h-1, hp.twoM-1), groups=hp.K), dims=3)
left_FX, right_FX = return_left_right_FX(FX, hp, len, projs)
return Z, Y, X, FX, left_FX, right_FX
end
#=
eta, mu: scaled dual variables
=#
function update_ZY(S, Z, Y, D, left_FX, right_FX, alpha, beta, lambda_sparsity, lambda_stepsize, penalty_xyz, hp, projs, num_pass)
ZD, YD = convolution(Z, D, pad=hp.f_len-1, groups=hp.M), convolution(Y, D, pad=hp.f_len-1, groups=hp.M, flipped=true)
diff = sum(ZD + YD, dims=2) - S
z_grad = convolution(diff, D, pad=0, flipped=true) + penalty_xyz[num_pass] .* (Z - batched_mul(projs.mapclarge, left_FX + alpha))
y_grad = convolution(diff, D, pad=0) + penalty_xyz[num_pass] .* (Y - batched_mul(projs.mapclarge, right_FX + beta))
Z_updated = Z - lambda_stepsize[num_pass] .* z_grad .- (lambda_sparsity[num_pass] .* lambda_stepsize[num_pass])
Y_updated = Y - lambda_stepsize[num_pass] .* y_grad .- (lambda_sparsity[num_pass] .* lambda_stepsize[num_pass])
return Flux.NNlib.relu.(projs.z_mask_n .* Z_updated), Flux.NNlib.relu.(projs.z_mask_n .* Y_updated)
end
function update_X(FX, Z, Y, X, F, alpha, beta, omega_stepsize, hp, len, num_pass)
alpha_beta = reshape(hcat(alpha, beta), (len.c, hp.twoM, 1, hp.batch_size))
ZY = cat_ZY(Z, Y, hp, len)
diff = sum(FX, dims=3) - (ZY - alpha_beta)
x_grad = convolution(diff, F, pad=0, flipped=true)
X_updated = X - omega_stepsize[num_pass] .* x_grad
return project_X(X_updated, hp)
end
function one_forward_step_XYZ(S, Z, Y, D, X, F, left_FX, right_FX, FX, alpha, beta,
lambda_sparsity, lambda_stepsize,
omega_stepsize, penalty_xyz,
hp, len, projs, num_pass
)
Z, Y = update_ZY(S, Z, Y, D, left_FX, right_FX, alpha, beta, lambda_sparsity, lambda_stepsize, penalty_xyz, hp, projs, num_pass)
X = update_X(FX, Z, Y, X, F, alpha, beta, omega_stepsize, hp, len, num_pass)
FX = sum(convolution(X, F, pad=(hp.h-1, hp.twoM-1), groups=hp.K), dims=3)
left_FX, right_FX = return_left_right_FX(FX, hp, len, projs)
alpha = alpha + left_FX - (@view Z[1:4:end,:,:])
beta = beta + right_FX - (@view Y[1:4:end,:,:])
return Z, Y, X, FX, left_FX, right_FX, alpha, beta
end
conv_code_diff(code, diff, hp, len) = reshape(
convolution(reshape(upsample_nearest(diff, (1,hp.M,1)), (len.L, len.MB, 1)),
reshape(code, (len.C, 1, len.MB)) , pad=len.C-1, groups=len.MB, flipped=true),
(len.CS_vlen, hp.M, hp.batch_size));
function update_D(S, Z, Y, D, mu, hp, len, projs, num_pass)
sumZD = sum(convolution(Z, D, pad=hp.f_len-1, groups=hp.M), dims=2)
sumYRD = sum(convolution(Y, D, pad=hp.f_len-1, groups=hp.M, flipped=true), dims=2)
ZᵀsumZD = conv_code_diff(Z, sumZD, hp, len)
YᵀsumZD = conv_code_diff(Y, sumZD, hp, len)
ZᵀsumYRD = conv_code_diff(Z, sumYRD, hp, len)
YᵀsumYRD = conv_code_diff(Y, sumYRD, hp, len)
ZᵀS = conv_code_diff(Z, S, hp, len)
YᵀS = conv_code_diff(Y, S, hp, len)
D_grad = reshape(
projs.mapdrange*reshape(sum(ZᵀsumZD + ZᵀsumYRD + ZᵀS + reverse(YᵀsumZD + YᵀsumYRD +YᵀS, dims=1), dims=3), (len.C+len.L-1, hp.M)),
(hp.f_len, 1, hp.M));
Breg_num = reshape(D .* exp.(-mu[num_pass] .* D_grad), (4, hp.filter_len, 1, hp.M));
D_updated = reshape((Breg_num ./ sum(Breg_num,dims=1)), (hp.f_len, 1, hp.M));
return D_updated
end
function F_gradient(ZY, X, F, hp, len, theta)
diff_X_upsampled =
upsample_nearest(sum(convolution(X, F, pad=(hp.h-1, hp.twoM-1), groups=hp.K), dims=3) - (ZY + theta)
, (1, 1, hp.K, 1))
diff_r = reshape(diff_X_upsampled, (len.c, hp.twoM, hp.K*hp.batch_size, 1))
X_r = reshape(X, (len.l, 1, 1, hp.K*hp.batch_size))
conv_diff_X = convolution(diff_r, X_r, pad=0, flipped=true, groups=len.KB)
F_conv = reshape(conv_diff_X, (hp.h, hp.twoM, hp.K, hp.batch_size))
F_grad = reshape(sum(F_conv, dims=4), (hp.h, hp.twoM, 1, hp.K))
return F_grad
end
function update_F(ZY, X, F, hp, len, theta, kappa_stepsize, kappa_sparsity, num_pass)
F_grad = F_gradient(ZY, X, F, hp, len, theta)
F_updated = Flux.NNlib.relu.(F - kappa_stepsize[num_pass] * F_grad .-(kappa_stepsize[num_pass] * kappa_sparsity[num_pass]))
return F_updated ./ (sqrt.(sum(F_updated.^2, dims=(1,2))))
end
function loss(S, Z, Y, X, D, ZY, F, F_orig, hp)
normalize_factor = (1.0f0/float_type(hp.batch_size));
DZ = sum(convolution(Z, D, pad=hp.f_len-1, groups=hp.M), dims=2)
DY = sum(convolution(Y, D, pad=hp.f_len-1, groups=hp.M, flipped=true), dims=2)
reconstruction_loss = normalize_factor*sum((DZ+DY-S).^2)
FX = sum(convolution(X, F, pad=(hp.h-1, hp.twoM-1), groups=hp.K), dims=3)
syntax_reconstruction_loss = normalize_factor*sum((FX - ZY).^2)
# abs_F_orig = abs.(F_orig)
# l1l2_loss_orig = (sum(abs_F_orig) + sum(sqrt.(sum(abs_F_orig.^2, dims=(1,2)))))
# l1l2_loss_orig = sum(abs_F_orig)
# println("reconstruction_loss: ", reconstruction_loss)
# println("syntax_reconstruction_loss: ", syntax_reconstruction_loss)
# println("l1l2_loss_orig: ", l1l2_loss_orig)
return reconstruction_loss + syntax_reconstruction_loss
end
create_alpha_beta_as_zeros(S, hp, len) =
CUDA.zeros(eltype(S), (len.c, hp.M, hp.batch_size)), CUDA.zeros(eltype(S), (len.c, hp.M, hp.batch_size));
function ADMM_XYZ(S, D, F,
lambda_stepsize_warmup, lambda_stepsize,
lambda_sparsity_warmup, lambda_sparsity,
omega_stepsize_warmup, omega_stepsize,
penalty_xyz,
hp, len, projs
)
# create initial scaled dual variables as zeros
alpha, beta = @ignore create_alpha_beta_as_zeros(S, hp, len)
# warm up
Z, Y, X, FX, left_FX, right_FX =
warmup_XYZ(S, D, F,
lambda_stepsize_warmup, lambda_sparsity_warmup,
omega_stepsize_warmup, hp, len, projs)
# iterations
for num_pass = 1:hp.num_pass_xyz
Z, Y, X, FX, left_FX, right_FX, alpha, beta =
one_forward_step_XYZ(S, Z, Y, D, X, F,
left_FX, right_FX, FX,
alpha, beta,
lambda_sparsity, lambda_stepsize,
omega_stepsize, penalty_xyz,
hp, len, projs, num_pass)
end
return Z, Y, X
end
create_theta_as_zeros(S, hp, len) =
CUDA.zeros(eltype(S), (len.c, hp.twoM, 1, hp.batch_size));
function ADMM_DF(S, Z, Y, X, D, F, mu,
kappa_sparsity, kappa_stepsize, hp, len, projs)
# create the initial scaled dual variable as zeros
theta = @ignore create_theta_as_zeros(S, hp, len)
ZY = cat_ZY(Z, Y, hp, len)
for num_pass = 1:hp.num_pass_df
D = update_D(S, Z, Y, D, mu, hp, len, projs, num_pass)
F = update_F(ZY, X, F, hp, len, theta, kappa_stepsize, kappa_sparsity, num_pass)
theta = theta + sum(convolution(X, F, pad=(hp.h-1, hp.twoM-1), groups=hp.K), dims=3) - ZY
end
return ZY, D, F
end
function forward_pass_return_loss(S, cdl, hp, len, projs)
lambda_sparsity_warmup, lambda_sparsity, kappa_sparsity,
lambda_stepsize_warmup, omega_stepsize_warmup, lambda_stepsize, omega_stepsize, kappa_stepsize,
penalty_xyz, mu, D, F_orig = prep_params(cdl, hp, projs)
Z, Y, X = ADMM_XYZ(S, D, F_orig,
lambda_stepsize_warmup, lambda_stepsize,
lambda_sparsity_warmup, lambda_sparsity,
omega_stepsize_warmup, omega_stepsize,
penalty_xyz, hp, len, projs
)
ZY, D, F = ADMM_DF(S, Z, Y, X, D, F_orig, mu,
kappa_sparsity, kappa_stepsize,
hp, len, projs
)
l = loss(S, Z, Y, X, D, ZY, F, F_orig, hp)
println("loss $l")
# return loss(S, Z, Y, X, D, ZY, F, F_orig, hp)
return l
end
function retrieve_code(S, cdl, hp, len, projs)
lambda_sparsity_warmup, lambda_sparsity, kappa_sparsity,
lambda_stepsize_warmup, omega_stepsize_warmup, lambda_stepsize, omega_stepsize, kappa_stepsize,
penalty_xyz, _, D, F_orig = prep_params(cdl, hp, projs)
Z, Y, X = ADMM_XYZ(S, D, F_orig,
lambda_stepsize_warmup, lambda_stepsize,
lambda_sparsity_warmup, lambda_sparsity,
omega_stepsize_warmup, omega_stepsize,
penalty_xyz,
hp, len, projs
)
return F_orig, Z, Y, X
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 1941 | function setup_num_epochs(number_training_samples)
if number_training_samples < 1000
return 25
elseif number_training_samples < 10000
return 10
elseif number_training_samples < 100000
return 5
else
return 3
end
end
function train_ucdl(data;
num_epochs=nothing,
# filter_len::Int = 8,
# f_len::Int = filter_len*4,
# M::Int = 45,
# twoM::Int = 2*M,
# h::Int = 12,
# K::Int = 25,
# q::Int = 20,
# batch_size::Int = 16,
# num_pass_xyz::Int = 6,
# num_pass_df::Int = 3,
# magnifying_factor::float_type = 10,
# gamma::float_type = 0.1
l1_loss_thresh=float_type(95.0)
)
hp = Hyperparam();
len = length_info(hp, data);
projs = projectors(hp, len);
cdl = ucdl(hp);
data_load = Flux.DataLoader(data.data_matrix, batchsize=hp.batch_size, shuffle=true, partial=false);
ps = Flux.params(cdl);
opt = Flux.AdaBelief();
num_epochs = isnothing(num_epochs) ? setup_num_epochs(data.N) : num_epochs;
break_condition = false;
for i in 1:num_epochs
for S in data_load
S = S |> gpu;
gs = gradient(ps) do
forward_pass_return_loss(S, cdl, hp, len, projs)
end
Flux.Optimise.update!(opt, ps, gs) # update parameters
l1_loss = sum(abs.(prep_syntax_filters(cdl.F)))
# "l1 loss: $l1_loss" |> println
if l1_loss < l1_loss_thresh
break_condition = true
break
end
end
break_condition && break
println("Epoch: $i completed")
end
return cdl, hp, len, projs
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 440 | function discover_motifs(datapath, save_path;
num_epochs=nothing)
@info "load data"
data = FASTA_DNA{float_type}(datapath)
this_bg = get_data_bg(data)
@info "training..."
cdl, hp, len, projs = train_ucdl(data; num_epochs=num_epochs)
@info "extract motifs..."
ms = run_thru(data, cdl, hp, len, projs, this_bg)
render_result!(save_path, ms, data, this_bg)
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2823 | const float_type_retrieval = Float16
const stored_code_component_t =
NamedTuple{(:position, :fil, :seq, :mag), Tuple{UInt8, UInt8, UInt32, float_type_retrieval}}
const composition_key_type =
NamedTuple{(:f1,:f2,:f3,:d12,:d13,:len), Tuple{Int8, Int8, Int8, UInt8, UInt8, UInt8}}
const value_type =
NamedTuple{(:seq_num, :pos, :comp), Tuple{UInt32, UInt16, Bool}}
# const se_type =
# NamedTuple{(:start_, :end_), Tuple{Int16, Int16}}
const unit_range_int_t = UInt32
const cover_more_than = 200
const cover_at_least = 10
const syntax_filter_thresh = 0.01
const max_pwm_length_Touzet = 15
# Touzet's threshold calulation pvalue_Touzet2
const _granularity_ = 1e-1; # initial granularity for score2pvalue and pval2score
const _k_ = 100; # decreasing factor for finer granularity in each iteration
const _bg_ = [.25,.25,.25,.25]; # default background
const pvalue_Touzet = 0.0002
# for finding the best socre threshold
const score_thresh_increment = float_type_retrieval(0.5)
const background = float_type_retrieval.([0.25 for _ = 1:4])
const threads_1d = 512;
const threads_2d = 32;
const threads_3d = 10;
const ker_1d = threads_1d;
const ker_2d = (threads_2d, threads_2d);
const ker_3d = (threads_3d, threads_3d, threads_3d);
b_size_1d(X) = ceil.(Int, size(X) ./ threads_1d)
b_size_2d(X) = ceil.(Int, size(X) ./ threads_2d)
b_size_3d(X) = ceil.(Int, size(X) ./ threads_3d)
const atcg2dummy = Dict{Char, Vector{Float32}}('a'=>[1,0,0,0], 'A'=>[1,0,0,0],
'c'=>[0,1,0,0], 'C'=>[0,1,0,0],
'g'=>[0,0,1,0], 'G'=>[0,0,1,0],
't'=>[0,0,0,1], 'T'=>[0,0,0,1],
'z'=>[0,0,0,0])
const atcg_comp = Dict{Char, Char}('a'=>'t', 'A'=>'T',
'c'=>'g', 'C'=>'G',
'g'=>'c', 'G'=>'C',
't'=>'a', 'T'=>'A')
four_based_ind(ind) = (ind-1)*4+1
promote_i(x...) = Int.(x);
const num_pfms2process = 500
const pvalue_fisher_thresh = 1e-10 # cannot use float16 since it's pvalue ( use log scale instead?)
const _pseudocounts_ = float_type_retrieval(0.0001)
const allr_thresh = float_type_retrieval(0.875)
const ic_trim_thresh = float_type_retrieval(1.0)
const ic_expand_thresh = float_type_retrieval(0.1)
const max_pwm_length_Touzet2 = 15
const pvalue_Touzet_large = 0.0001
const pvalue_Touzet_mid = 0.0001
const pvalue_Touzet_small = 0.0003
const max_allowed_diff = 80
const indep_run = 13;
const dep_run = 10;
const merge_add_counts = true
const code_percentile = 0.01
const effective_pos_ic_thresh = 0.5
const mv_avg_window = 3
const pfm_minimal_length = 8 | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2227 | #=
Couple things:
1. Get code components
2. Filter code components if necessary:
a. filter out low magnitude components by setting the threshold
to the median of the magnitude or the v percentile of the magnitude
3. Get the scanning range of the filtered code components
4. Enumerate the triplet (word combinations) of the filtered code components
using both the scanning range and the filtered code components
5. Get the start and end range for each syntax filter
=#
get_code_component_this_batch(cartesian_ind, magnitude, i) =
(position=cartesian_ind[1], fil=cartesian_ind[3], seq=cartesian_ind[4]+i-1, mag=magnitude)
get_magnitude(x) = x.mag
get_fils(cartesian_inds_sorted, i,j,k) =
cartesian_inds_sorted[i][2], cartesian_inds_sorted[j][2], cartesian_inds_sorted[k][2]
get_d12_d13(cartesian_inds_sorted, i,j,k) =
cartesian_inds_sorted[j][1] - cartesian_inds_sorted[i][1],
cartesian_inds_sorted[k][1] - cartesian_inds_sorted[i][1]
get_f1_pos(cartesian_inds_sorted, i) = cartesian_inds_sorted[i][1]
function append_code_component!(X, stored_code_components, i)
cartesian_inds = findall(X .> 0);
append!(stored_code_components,
get_code_component_this_batch.(cartesian_inds |> cpu, float_type_retrieval.(X[cartesian_inds]) |> cpu, i))
end
function code_retrieval(data, cdl, hp, len, projs)
lambda_sparsity_warmup, lambda_sparsity, _,
lambda_stepsize_warmup, omega_stepsize_warmup, lambda_stepsize, omega_stepsize, _,
penalty_xyz, _, D, F_orig = prep_params(cdl, hp, projs)
data_load = Flux.DataLoader(data.data_matrix, batchsize=hp.batch_size, shuffle=false, partial=false)
stored_code_components = stored_code_component_t[]
i = 1;
@inbounds for S in data_load
S = S |> gpu;
_, _, X = ADMM_XYZ(S, D, F_orig,
lambda_stepsize_warmup, lambda_stepsize,
lambda_sparsity_warmup, lambda_sparsity,
omega_stepsize_warmup, omega_stepsize,
penalty_xyz,
hp, len, projs
)
append_code_component!(X, stored_code_components, i)
i += hp.batch_size;
end
return stored_code_components
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2605 | create_key(c1,c2,c3,d12,d13,len) = (f1=c1, f2=c2, f3=c3, d12=d12, d13=d13, len=len)
create_value(seq_num, pos) = (seq_num=UInt32(seq_num), pos=pos, comp=false)
values_comp(v) = (seq_num=v.seq_num, pos=v.pos, comp=true)
function filter_code_components_using_median!(stored_code_components)
mag_median = median(get_magnitude.(stored_code_components))
filter!(x -> x.mag > mag_median, stored_code_components)
end
function filter_code_components_using_quantile!(stored_code_components; p=code_percentile)
mag_percentile = quantile(get_magnitude.(stored_code_components), p)
filter(x -> x.mag > mag_percentile, stored_code_components)
end
function filter_code_components(stored_code_components;
filter_fcn=filter_code_components_using_quantile!,
quantile_v=code_percentile)
if filter_fcn != filter_code_components_using_quantile!
return filter_fcn(stored_code_components)
else
return filter_fcn(stored_code_components; p=quantile_v)
end
end
function get_scanning_range_of_filtered_code_components(stored_code_components)
cur_seq = 1; cur_range_start = 1; ranges = UnitRange{unit_range_int_t}[];
@inbounds for i in eachindex(stored_code_components)
if stored_code_components[i].seq != cur_seq
push!(ranges, cur_range_start:i-1)
cur_range_start = i
cur_seq += 1
end
end
return ranges
end
function insert_H!(H::Dictionary{composition_key_type, Vector{value_type}},
cartesian_inds_sorted, i::Int, j::Int, k::Int, ind::Int, h)
c1,c2,c3 = get_fils(cartesian_inds_sorted, i,j,k)
c1_pos = get_f1_pos(cartesian_inds_sorted, i)
d12,d13 = get_d12_d13(cartesian_inds_sorted, i,j,k)
len = d13 + h
key = create_key(c1,c2,c3,d12,d13,len)
value = create_value(ind, c1_pos)
haskey(H, key) ? push!(H[key], value) : insert!(H, key, [value])
end
sort_by_x1(x) = x[1]
function enumerate_triplets(stored_code_component_filtered, seq_ranges, hp)
H = Dictionary{composition_key_type, Vector{value_type}}();
@inbounds for ind in eachindex(seq_ranges)
whats_in_store = @view stored_code_component_filtered[seq_ranges[ind]]
cartesian_inds_sorted = sort(whats_in_store, by=sort_by_x1)
car_len = length(cartesian_inds_sorted)
for i = 1:car_len-2
for j = i+1:car_len-1
for k = j+1:car_len
insert_H!(H, cartesian_inds_sorted, i, j, k, ind, hp.h)
end
end
end
end
return H
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 1896 | ##### From the word combinations H, obtain the count matrices from enriched keys ########
function get_words(H, enriched_keys, len_enriched_keys; max_word_combinations=num_pfms2process)
q = getindices(H, enriched_keys);
q_sorted = sort(length.(q), rev=true)
if len_enriched_keys > max_word_combinations
return Indices(collect(keys(q_sorted))[1:max_word_combinations])
else
return Indices(collect(keys(q_sorted))[1:len_enriched_keys])
end
end
function get_enriched_keys(H; max_word_combinations = num_pfms2process,
dec = -5,
count_from = cover_more_than,
count_to = cover_at_least
)
enriched_keys = nothing
for count = count_from:dec:count_to
enriched_keys = findall(x->length(x)> count, H)
len_enriched_keys = length(enriched_keys)
len_enriched_keys > max_word_combinations &&
(return get_words(H, enriched_keys, len_enriched_keys; max_word_combinations=num_pfms2process))
end
return enriched_keys
end
function obtain_count_matrices(data, H)
enriched_keys = keys(H)
count_matrices_lengths = Array{Int}(undef, length(enriched_keys));
@inbounds for (ind,k) in enumerate(enriched_keys)
count_matrices_lengths[ind] = k.len
end
count_matrices = [zeros(float_type, (4, count_matrices_lengths[i])) for i = 1:length(enriched_keys)];
@inbounds for (ind,k) in enumerate(enriched_keys)
for v in H[k]
pos_start = four_based_ind(v.pos)
pos_end = four_based_ind(v.pos+count_matrices_lengths[ind]-1)+3
onehot_code = reshape((@view data.data_matrix[pos_start:pos_end,1,v.seq_num]),
(4, count_matrices_lengths[ind]))
count_matrices[ind] .+= v.comp ? reverse(onehot_code) : onehot_code
end
end
return count_matrices
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 10825 | function obtain_filtered_stored_code_components(data, cdl, hp, len, projs;
filter_fcn = filter_code_components_using_quantile!,
quantile_v = code_percentile)
@info "Retrieving the non-zero code components..."
stored_code_component = code_retrieval(data, cdl, hp, len, projs)
@info "Filtering out the low-magnitude non-zero code components..."
stored_code_component_filtered = filter_code_components(stored_code_component;
filter_fcn=filter_fcn,
quantile_v=quantile_v)
@info "Get the scanning ranges..."
scanning_range =
get_scanning_range_of_filtered_code_components(stored_code_component_filtered)
return stored_code_component_filtered, scanning_range
end
function obtain_enriched_word_combinations(stored_code_component_filtered, scanning_range, hp)
H = enumerate_triplets(stored_code_component_filtered, scanning_range, hp)
@info "Obtaining the enriched patterns..."
enriched_keys = get_enriched_keys(H);
H_w_enriched_keys = getindices(H, enriched_keys)
return H_w_enriched_keys
end
function obtain_H_w_enriched_keys(data, cdl, hp, len, projs)
stored_code_component_filtered, scanning_range =
obtain_filtered_stored_code_components(data, cdl, hp, len, projs)
H_w_enriched_keys =
obtain_enriched_word_combinations(stored_code_component_filtered, scanning_range, hp)
return H_w_enriched_keys
end
function merge_trim_merge_H_w_enriched_keys(data, hp, H_w_enriched_keys, bg)
H_w_enriched_keys_merged = merge_H(data, H_w_enriched_keys, hp, bg)
H_w_enriched_keys_merged_and_trimmed = trim_H(data, H_w_enriched_keys_merged, bg)
H_w_enriched_keys_merged_and_trimmed_and_merged =
merge_H(data, H_w_enriched_keys_merged_and_trimmed, hp, bg)
return H_w_enriched_keys_merged_and_trimmed_and_merged
end
function greedy_align(H_w_enriched_keys_mtm, data, bg)
try
ms = enriched_keys2motifs(H_w_enriched_keys_mtm, data, bg);
ms = alignment_merge!(ms, data, bg);
for i = 1:indep_run
println("indep run $i")
scan_w_gpu!(ms, data);
scan_w_gpu!(ms, data; bg=true);
filter_positions_scores_usecomp!(ms, data, bg);
ms = filter_insignificant_motifs(ms, data, bg);
expansions_ms!(ms, data, bg);
ms = alignment_merge!(ms, data, bg);
new_cmats = posdicts2countmats(ms, data.data_matrix);
new_cmats = trim_cmats(new_cmats, bg);
new_cmats = merge_count_matrices(new_cmats, bg);
ms = countmats2motifs(new_cmats, bg);
end
for i = 1:2
println("indep run $i")
scan_w_gpu!(ms, data);
scan_w_gpu!(ms, data; bg=true);
filter_positions_scores_usecomp!(ms, data, bg);
ms = filter_insignificant_motifs(ms, data, bg);
ms = alignment_merge!(ms, data, bg);
new_cmats = posdicts2countmats(ms, data.data_matrix);
new_cmats = trim_cmats(new_cmats, bg);
new_cmats = merge_count_matrices(new_cmats, bg);
ms = countmats2motifs(new_cmats, bg);
end
return ms
catch e
if isa(e, MethodError)
@info "caught a method error"
end
end
return nothing
# for j = 1:dep_run
# println("dep run $j")
# scan_w_gpu!(ms, data);
# scan_w_gpu!(ms, data; bg=true);
# # filter_positions_scores_usecomp!(ms, data);
# filter_positions_scores_usecomp!(ms, data);
# # ms = non_overlap_scan!(ms, data.N);
# ms = filter_insignificant_motifs(ms, data);
# expansions_ms!(ms, data);
# ms = alignment_merge!(ms, data);
# new_cmats = posdicts2countmats(ms, data.data_matrix);
# new_cmats = trim_cmats(new_cmats);
# new_cmats = merge_count_matrices(new_cmats);
# ms = countmats2motifs(new_cmats);
# end
end
# function run_thru(cdl, data, hp, len, projs)
# H_w_enriched_keys = obtain_H_w_enriched_keys(data, cdl, hp, len, projs)
# H_w_enriched_keys_mtm = merge_trim_merge_H_w_enriched_keys(data, hp, H_w_enriched_keys)
# ms = greedy_align(H_w_enriched_keys_mtm, data, hp)
# return ms
# end
function get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.35, get_how_many=500)
@info "Filtering out the low-magnitude non-zero code components... ($quantile)"
stored_code_component_filtered = filter_code_components(stored_code_component;
filter_fcn=filter_code_components_using_quantile!,
quantile_v=quantile)
@info "Get the scanning ranges..."
scanning_range = get_scanning_range_of_filtered_code_components(stored_code_component_filtered)
H = enumerate_triplets(stored_code_component_filtered, scanning_range, hp)
@info "Obtaining the enriched patterns..."
enriched_keys = get_enriched_keys(H; max_word_combinations=get_how_many);
H_w_enriched_keys = getindices(H, enriched_keys)
H_w_enriched_keys_mtm = merge_trim_merge_H_w_enriched_keys(data, hp, H_w_enriched_keys, this_bg)
return H_w_enriched_keys_mtm
end
function get_H_base(stored_code_component, hp; base_thresh=0.01, enriched_atleast=3)
stored_code_component_filtered = filter_code_components(stored_code_component;
filter_fcn=filter_code_components_using_quantile!,
quantile_v=base_thresh)
scanning_range = get_scanning_range_of_filtered_code_components(stored_code_component_filtered)
H = enumerate_triplets(stored_code_component_filtered, scanning_range, hp)
enriched_keys = findall(x->length(x)> enriched_atleast, H)
@info "Got H_base"
return getindices(H, enriched_keys)
end
get_backup_enriched_keys(H_base, data, hp, this_bg) = merge_trim_merge_H_w_enriched_keys(data, hp, H_base, this_bg)
function run_thru(data, cdl, hp, len, projs, this_bg)
@info "Retrieving the non-zero code components..."
stored_code_component = code_retrieval(data, cdl, hp, len, projs)
H_base = get_H_base(stored_code_component, hp)
H_w_enriched_keys_mtm_p25 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.25, get_how_many=1000)
H_w_enriched_keys_mtm_p35 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.35, get_how_many=1000)
H_w_enriched_keys_mtm_p45 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.45, get_how_many=1000)
H_w_enriched_keys_mtm_p5 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.5, get_how_many=1000)
H_w_enriched_keys_mtm_p65 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.65, get_how_many=1000)
H_w_enriched_keys_mtm_p75 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.75, get_how_many=1000)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm_p75, H_w_enriched_keys_mtm_p65)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p5)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p45)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p35)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p25)
for k in keys(H_w_enriched_keys_mtm)
if haskey(H_base, k)
H_w_enriched_keys_mtm[k] = union(H_w_enriched_keys_mtm[k], H_base[k])
else
H_w_enriched_keys_mtm[k] = union(H_w_enriched_keys_mtm[k])
end
end
length(H_w_enriched_keys_mtm) == 0 && (H_w_enriched_keys_mtm = get_backup_enriched_keys(H_base, data, hp, this_bg))
length(H_w_enriched_keys_mtm) == 0 && return nothing
ms = enriched_keys2motifs(H_w_enriched_keys_mtm, data, this_bg);
expansions_ms!(ms, data, this_bg);
ms = alignment_merge!(ms, data, this_bg);
ms = merge_to_remove_redundancy!(ms, data, this_bg)
new_cmats = posdicts2countmats(ms, data.data_matrix);
new_cmats = trim_cmats(new_cmats, this_bg);
new_cmats = merge_count_matrices(new_cmats, this_bg);
return countmats2motifs(new_cmats, this_bg);
end
function run_thru2(data, cdl, hp, len, projs, this_bg)
@info "Retrieving the non-zero code components..."
stored_code_component = code_retrieval(data, cdl, hp, len, projs)
H_base = get_H_base(stored_code_component, hp)
H_w_enriched_keys_mtm_p25 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.25, get_how_many=1000)
H_w_enriched_keys_mtm_p35 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.35, get_how_many=1000)
H_w_enriched_keys_mtm_p45 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.45, get_how_many=1000)
H_w_enriched_keys_mtm_p5 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.5, get_how_many=1000)
H_w_enriched_keys_mtm_p65 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.65, get_how_many=1000)
H_w_enriched_keys_mtm_p75 = get_H_w_enriched_keys_mtm_quantile(stored_code_component, data, this_bg, hp; quantile=0.75, get_how_many=1000)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm_p75, H_w_enriched_keys_mtm_p65)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p5)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p45)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p35)
H_w_enriched_keys_mtm = merge(H_w_enriched_keys_mtm, H_w_enriched_keys_mtm_p25)
for k in keys(H_w_enriched_keys_mtm)
if haskey(H_base, k)
H_w_enriched_keys_mtm[k] = union(H_w_enriched_keys_mtm[k], H_base[k])
else
H_w_enriched_keys_mtm[k] = union(H_w_enriched_keys_mtm[k])
end
end
length(H_w_enriched_keys_mtm) == 0 && (H_w_enriched_keys_mtm = get_backup_enriched_keys(H_base, data, hp, this_bg))
length(H_w_enriched_keys_mtm) == 0 && return nothing
ms = enriched_keys2motifs(H_w_enriched_keys_mtm, data, this_bg);
# expansions_ms!(ms, data, this_bg);
return ms
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 1862 | function cmat2ic(cmat; bg=_bg_, ps = _pseudocounts_) # TODO change ps
cmat_w_ps = cmat .+ ps
freq_mat = cmat_w_ps ./ sum(cmat_w_ps, dims=1)
reshape(sum(freq_mat .* log2.(freq_mat ./ bg), dims=1), size(cmat_w_ps,2))
end
function ic_span(ic_vec; ic_thresh=ic_trim_thresh, length_thresh=pfm_minimal_length)
ic_vec_len = length(ic_vec)
span_start = 1;
span_end = ic_vec_len
for i = 1:ic_vec_len
if ic_vec[i] < ic_thresh
span_start += 1
else
break
end
end
for i = ic_vec_len:-1:1
if ic_vec[i] < ic_thresh
span_end -= 1
else
break
end
end
keep = span_end-span_start+1 ≥ length_thresh
return span_start, span_end, keep
end
function trim_cmats(cmats, bg)
new_cmats = Vector{eltype(cmats)}()
ic = cmat2ic.(cmats; bg=bg)
trim_info = ic_span.(ic)
for (i, (span_start, span_end, keep)) in enumerate(trim_info)
keep && push!(new_cmats, cmats[i][:,span_start:span_end])
end
return new_cmats
end
function trim_H(data, H, bg)
count_matrices = obtain_count_matrices(data, H)
ic = cmat2ic.(count_matrices; bg=bg)
trim_info = ic_span.(ic)
new_dict = Dictionary{composition_key_type, Vector{value_type}}();
for ((i, (span_start, span_end, keep)), k) in zip(enumerate(trim_info), keys(H))
if keep
vs = Vector{value_type}(undef, length(H[k]))
len = span_end - span_start + 1
new_k = (f1=k.f1, f2=k.f2, f3=k.f3, d12=k.d12, d13=k.d13, len=len)
for (ind,v) in enumerate(H[k])
vs[ind] = (seq_num=v.seq_num, pos=v.pos+span_start-1, comp=v.comp)
end
insert!(new_dict, new_k, vs)
end
end
println("dict length : $(length(new_dict))")
return new_dict
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 1593 | function get_matrices(countmat1, countmat2, pseudocount, bg)
countmat1 = countmat1 .+ pseudocount
countmat2 = countmat2 .+ pseudocount
pfm1 = countmat1 ./ sum(countmat1, dims=1)
pfm2 = countmat2 ./ sum(countmat2, dims=1)
pwm1 = log2.(pfm1 ./ bg);
pwm2 = log2.(pfm2 ./ bg);
return countmat1, countmat2, pwm1, pwm2
end
function avg_allr(countmat1, countmat2, bg;
pseudocount=float_type(0.01))
countmat1, countmat2, pwm1, pwm2 = get_matrices(countmat1, countmat2, pseudocount, bg)
allr_vec = sum(countmat2 .* pwm1 .+ countmat1 .* pwm2, dims=1) ./
sum(countmat1 + countmat2, dims=1)
return sum(allr_vec)/length(allr_vec)
end
function obtain_uniq_len_and_len_indicator(count_matrices)
count_matrices_lengths = size.(count_matrices, 2)
uniq_lens = count_matrices_lengths |> unique |> sort;
length_indicator = BitMatrix(undef, (length(count_matrices), length(uniq_lens)));
@inbounds for (ind, uniq_len) in enumerate(uniq_lens)
length_indicator[:, ind] = count_matrices_lengths .== uniq_len
end
return uniq_lens, length_indicator
end
function obtain_cmats_and_its_info(count_matrices, length_indicator, i)
cmat_w_len_i = count_matrices[view(length_indicator,:, i)]
cmat_w_len_i_c = reverse.(cmat_w_len_i)
number_of_count_matrices_this_len = length(cmat_w_len_i)
unmerged = fill(true, number_of_count_matrices_this_len)
return cmat_w_len_i, cmat_w_len_i_c, number_of_count_matrices_this_len, unmerged
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2644 | function merging_h!(count_matrices, H_w_enriched_keys, enriched_keys_vec, bg)
uniq_lens, length_indicator =
obtain_uniq_len_and_len_indicator(count_matrices)
@inbounds for i = 1:length(uniq_lens)
cmat_w_len_i, cmat_w_len_i_c, num_of_cmats_this_len, unmerged =
obtain_cmats_and_its_info(count_matrices, length_indicator, i)
enriched_keys_here = enriched_keys_vec[view(length_indicator,:, i)]
for j = 1:num_of_cmats_this_len, k=j+1:num_of_cmats_this_len
if unmerged[j] && unmerged[k]
allr = avg_allr(cmat_w_len_i[j], cmat_w_len_i[k], bg) > allr_thresh
allr_c = avg_allr(cmat_w_len_i[j], cmat_w_len_i_c[k], bg) > allr_thresh
if allr || allr_c
unmerged[k] = false
if merge_add_counts
if allr > allr_c
append!(H_w_enriched_keys[enriched_keys_here[j]],
H_w_enriched_keys[enriched_keys_here[k]])
else
append!(H_w_enriched_keys[enriched_keys_here[j]],
values_comp.(H_w_enriched_keys[enriched_keys_here[k]]))
end
end
H_w_enriched_keys[enriched_keys_here[k]] = Vector{value_type}() # make empty
end
end
end
end
merged_into_keys = findall(x->length(x)>0, H_w_enriched_keys)
enriched_keys_vec = collect(merged_into_keys)
H_w_enriched_keys = getindices(H_w_enriched_keys, merged_into_keys)
return H_w_enriched_keys, enriched_keys_vec
end
function merge_H(data, H_w_enriched_keys, hp, bg)
count_matrices = obtain_count_matrices(data, H_w_enriched_keys)
enriched_keys_vec = collect(keys(H_w_enriched_keys))
len_cmats = count_matrices |> length;
H_w_enriched_keys, enriched_keys_vec = merging_h!(count_matrices, H_w_enriched_keys, enriched_keys_vec, bg)
len_merged_cmats = keys(H_w_enriched_keys) |> length;
while len_cmats > len_merged_cmats
println("Removed $(len_cmats-len_merged_cmats) redundant count matrices;
left with $(len_merged_cmats) count matrices.")
count_matrices = obtain_count_matrices(data, H_w_enriched_keys)
len_cmats = count_matrices |> length;
H_w_enriched_keys, enriched_keys_vec = merging_h!(count_matrices, H_w_enriched_keys, enriched_keys_vec, bg)
len_merged_cmats = keys(H_w_enriched_keys) |> length;
end
return H_w_enriched_keys
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 1785 | function merging(count_matrices, bg)
uniq_lens, length_indicator =
obtain_uniq_len_and_len_indicator(count_matrices)
# merge the count matrices
merged_cmats = Vector{Vector{Matrix{float_type_retrieval}}}();
@inbounds for i = 1:length(uniq_lens)
cmat_w_len_i, cmat_w_len_i_c, num_of_cmats_this_len, unmerged =
obtain_cmats_and_its_info(count_matrices, length_indicator, i)
for j = 1:num_of_cmats_this_len, k=j+1:num_of_cmats_this_len
if unmerged[j] && unmerged[k]
allr = avg_allr(cmat_w_len_i[j], cmat_w_len_i[k], bg) > allr_thresh
allr_c = avg_allr(cmat_w_len_i[j], cmat_w_len_i_c[k], bg) > allr_thresh
if allr || allr_c
unmerged[k] = false
if merge_add_counts
if allr > allr_c
cmat_w_len_i[j] .+= cmat_w_len_i[k]
else
cmat_w_len_i[j] .+= cmat_w_len_i_c[k]
end
end
end
end
end
push!(merged_cmats, cmat_w_len_i[unmerged])
end
return Iterators.flatten(merged_cmats) |> collect
end
function merge_count_matrices(count_matrices, bg)
len_cmats = count_matrices |> length;
merged_cmats = merging(count_matrices, bg)
len_merged_cmats = merged_cmats |> length;
while len_cmats > len_merged_cmats
println("Removed $(len_cmats-len_merged_cmats) redundant count matrices;
left with $(len_merged_cmats) count matrices.")
len_cmats = len_merged_cmats
merged_cmats = merging(merged_cmats, bg)
len_merged_cmats = merged_cmats |> length;
end
return merged_cmats
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 5585 | #=
Best possible score of a PWM
# Input
`pwm::Matrix{Real}`: a 4 x m matrix
# Output
the best possible score this matrix can get
=#
best_score(pwm::AbstractArray{T,2}) where {T <: Real} = @inbounds sum(maximum( view(pwm,:,i) ) for i in axes(pwm,2));
best_score(pwm_col::AbstractArray{T,1}) where {T<: Real} = maximum(pwm_col);
#=
Worst possible score of a PWM
# Input
`pwm::Matrix{Real}`: a 4 x m matrix
# Output
the worst possible score this matrix can get
=#
worst_score(pwm::AbstractArray{T,2}) where {T <: Real} = @inbounds sum(minimum(view(pwm,:,i)) for i in axes(pwm,2));
worst_score(pwm_col::AbstractArray{T,1}) where {T <: Real} = minimum(pwm_col);
#=
Return a column-permuted PWM that minimize the score range so that δ₁ ≥ δ₂ ≥ … ≥ δₘ
where δᵢ = best_score(pwm[:,i])-worst_score(pwm[:,i]).
# Input
`pwm::Matrix{Real}`: a 4 x m matrix
# Output
a column-permuted pwm
=#
min_score_range(pwm) =
@inbounds view(pwm,:,sortperm([best_score(view(pwm,:,i))-worst_score(view(pwm,:,i))
for i in axes(pwm,2)],rev=true));
#=
"Round the PWM"
# Input
`pwm::Matrix{Real}`: a 4 x m matrix
`granularity`: a small positive real number e.g. 0.01 or 0.001, etc.
# Output
a rounded pwm of the input pwm
=#
round_pwm(pwm, granularity) = floor.(pwm ./ granularity) * granularity;
#=
The maximum error induced by the rounded pwm M_ϵ
(see definition 3 in https://almob.biomedcentral.com/articles/10.1186/1748-7188-2-15; this is the quantity E)
# Input
`pwm::Matrix{Real}`: a 4 x m matrix
`granularity`:
# Output
A positve real number that's the maximum error induced by the rounded pwm M_ϵ
=#
calc_E(pwm, pwm_rounded) =
@inbounds sum(maximum((view(pwm,:,i))-(@view pwm_rounded[:,i])) for i in axes(pwm,2));
#=
Note: Use a nested dictionary to represent the distribution Q
Since Q is used to reference the probability of (M[1…i],score),
the keys in the first layer is i, and the value in the first layer
are dictionaries with scores as keys and probability as values
call create_Q(m) to initialize such a distribution Q
where m is the "width" of the PWM
=#
create_Q(m) =
Dict{Int16, SortedDict{Float64,Float64}}(i==0 ? i=>SortedDict(0=>1) : i=>SortedDict() for i=0:m);
#=
Input:
pwm: a 4 x m matrix
α, β: score interval [α, β]
bg: 4 x 1 vector that specifies the multinomial genomic background; default to flat background.
Output:
Q: a probability mass table
e.g. Q[m] shows all the weights of P[pwm_score = η] for α ≤ η ≤ β
=#
function most_inner_Q!(Q, i, t, score, bg, j)
@inbounds if haskey(Q[i], t)
Q[i][t] += Q[i-1][score]*bg[j];
else
Q[i][t] = Q[i-1][score]*bg[j];
end
end
function modifyQ!(Q, score, pwm_, i, alpha, bs, β, ws, bg)
@inbounds for j = 1:4
t = score + pwm_[j,i];
(alpha - bs ≤ t ≤ β - ws) && most_inner_Q!(Q, i, t, score, bg, j)
end
end
function inner_Q!(Q, i, alpha, β, bg, pwm_, m)
bs = i+1 > m ? 0 : best_score(@view pwm_[:,i+1:m]);
ws = i+1 > m ? 0 : worst_score(@view pwm_[:,i+1:m]);
@inbounds for score in keys(Q[i-1])
modifyQ!(Q, score, pwm_, i, alpha, bs, β, ws, bg)
end
end
function score_distribution(pwm_::Matrix{T},
alpha::Real, β::Real,
bg=_bg_
) where T <: Real
m = size(pwm_,2);
Q = create_Q(m);
@inbounds for i = 1:m
inner_Q!(Q, i, alpha, β, bg, pwm_, m)
end
return Q
end
# return the sum of all the weights
Q_sum(Q_m::SortedDict{Float64,Float64}) = sum(values(Q_m));
function find_largest_alpha(Q_m::SortedDict{T,T}, pval::T) where T <: Real
q_sum = Q_sum(Q_m);
largest_k = nothing;
for (k,v) in Q_m
if q_sum ≥ pval
largest_k = k;
else
return k
end
q_sum -= v;
end
return largest_k
end
function pval_w_Qm(Qm::SortedDict{T,T}, alpha::Real) where T <: Real
pval = 0.0;
for (k,v) in Qm
k ≥ alpha && (pval += v;)
end
return pval
end
function find_δ(Q_m::SortedDict{T,T}, pval_ϵ::Real, pval::Real) where T <: Real
q_sum_plus_pval_ϵ = Q_sum(Q_m)+pval_ϵ;
largest_δ = nothing;
for (k,v) in Q_m
if q_sum_plus_pval_ϵ ≥ pval
largest_δ = k;
else
return k
end
q_sum_plus_pval_ϵ -= v;
end
return largest_δ
end
"""
pval2score(pwm, pval, ϵ=1e-1, k=10, bg=[.25,.25,.25,.25])
Returns the highest score(M,pval) of a `pwm` such that p-value is greater or equal to `pval`.
Input:
* `pwm`: a 4 x m matrix
* `pval`: a p-value; e.g. pval = 1e-3
* `ϵ`: initial granularity (optional)
* `k`: Refinement parameter (optional)
* `bg`: multinomial background (optional)
Output
* `alpha`: the highest score-threshold
"""
function pvalue2score(pwm::Matrix{T},
pval::Real,
ϵ=_granularity_;
bg=_bg_
) where T <: Real
@assert 0 ≤ pval ≤ 1 "pvalue must be in [0,1]"
@assert size(pwm,1) == 4 "The input matrix must have only 4 rows"
pwm_Float64 = Float64.(pwm);
pval_Float64 = Float64(pval);
bg_Float64 = Float64.(bg);
mpwm = min_score_range(pwm_Float64);
m = size(pwm, 2);
pwm_ϵ = round_pwm(mpwm, ϵ);
Q = create_Q(m);
Q = score_distribution(pwm_ϵ, worst_score(pwm_ϵ), Inf, bg_Float64);
@inbounds alpha = find_largest_alpha(Q[m], pval_Float64);
return alpha;
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 7918 | # function make_effective_pos_mat(ms)
# len_max = maximum(ms.lens)
# effective_positions_mat = fill(false, (ms.num_motifs, len_max))
# @inbounds for i = 1:ms.num_motifs
# effective_positions_mat[i, 1:ms.lens[i]] = ms.effective_positions[i]
# end
# return cu(effective_positions_mat)
# end
const record_t = NTuple{3, UInt32}
const found_record_t = Vector{record_t};
const batch_size_greedy = 5000;
map_cartesian_n_increment(c, n)::record_t = c[1], c[2] + n -1, c[3]
############## greedy alignment ###############
# greedy search kernel
function greedy_search!(pwms, data_dat_gpu, lens, pos_scores)
k = (blockIdx().x - 1) * blockDim().x + threadIdx().x; # kth pair
n = (blockIdx().y - 1) * blockDim().y + threadIdx().y; # nth sequence
l = (blockIdx().z - 1) * blockDim().z + threadIdx().z; # lth position
L, N = size(data_dat_gpu); L_div_4 = CUDA.Int(L/4);
K, _, _ = size(pwms);
if k ≤ K && n ≤ N && l ≤ L_div_4-lens[k]+1
@inbounds for (ind,i) in enumerate(l:l+lens[k]-1)
# if eff_pos_mat[k,ind]
for a = 1:4
pos_scores[k,n,l] += pwms[k,a,ind]*data_dat_gpu[(i-1)*4+a,n];
end
# end
end
pos_scores[k,n,l] = pos_scores[k,n,l] > 0f0 ? pos_scores[k,n,l] : 0f0;
end
return nothing
end
function modify_w_found!(found_record, score_record, positions, scores, use_comp; rc=false)
comp = rc ? true : false;
@inbounds for (f, s) in zip(found_record, score_record)
m, n, l = f[1], f[2], f[3];
if haskey(positions[m], n)
push!(positions[m][n], l)
push!(scores[m][n], s)
push!(use_comp[m][n], comp)
else
positions[m][n] = [l];
scores[m][n] = [s];
use_comp[m][n] = [comp];
end
end
end
data_(data; test=false) = test ? data.data_matrix_test : data.data_matrix
data_bg(data; test=false) = test ? data.data_matrix_bg_test : data.data_matrix_bg
function get_pos_scores_arr(ms, data; rc=false, bg=false, test=false)
data_matrix = bg ? data_bg(data; test=test) : data_(data; test=test);
found_record = found_record_t()
score_record = float_type_retrieval[]
# TODO fix this ugly hack
length(size(data_matrix)) == 2 && (data_matrix = reshape(data_matrix, (size(data_matrix,1), 1, size(data_matrix,2))))
L, _, N = size(data_matrix)
maxlen = maximum(ms.lens);
pwms = zeros(float_type_retrieval, ms.num_motifs, 4, maxlen);
@inbounds for i = 1:ms.num_motifs pwms[i,:,1:ms.lens[i]] =
rc ? reverse(ms.pwms[i]) : ms.pwms[i]; end
for n = 1:batch_size_greedy:N
nend = min(n+batch_size_greedy-1, N)
this_batch_size = nend-n+1
data_matrix_gpu = reshape(cu(float_type_retrieval.(data_matrix[:,1,n:nend])), (L, this_batch_size));
pos_scores = CUDA.zeros(float_type_retrieval, ms.num_motifs, this_batch_size, L);
@cuda threads=ker_3d blocks=b_size_3d(pos_scores) greedy_search!(cu(pwms),
data_matrix_gpu,
cu(ms.lens),
pos_scores
);
pos_scores_arr = Array(pos_scores);
found = findall(pos_scores_arr .> 0f0);
append!(found_record, map_cartesian_n_increment.(found, n))
append!(score_record, pos_scores_arr[found])
end
return found_record, score_record
end
function gpu_scan(ms, data; bg=false, test=false)
found_record, score_record =
get_pos_scores_arr(ms, data; rc=false, bg=bg, test=test);
found_record_rc, score_record_rc =
get_pos_scores_arr(ms, data; rc=true, bg=bg, test=test);
positions, scores, use_comp = motifs_prep(ms);
modify_w_found!(found_record, score_record, positions, scores, use_comp; rc=false)
modify_w_found!(found_record_rc, score_record_rc, positions, scores, use_comp; rc=true)
return positions, scores, use_comp
end
function scan_w_gpu!(ms, data; bg=false)
positions, scores, use_comp = gpu_scan(ms, data; bg=bg)
if bg
ms.positions_bg = positions;
ms.scores_bg = scores;
ms.use_comp_bg = use_comp;
else
ms.positions = positions;
ms.scores = scores;
ms.use_comp = use_comp;
end
end
############## greedy alignment with non-overlap ###############
function max_score_ind(scores_n, scores_n_mask)
max_score = -Inf;
maxscore_ind = nothing;
maxscore_ind_m = nothing;
@inbounds for (ind_1,s) in enumerate(scores_n)
for (ind_2, s_) in enumerate(s)
if s_ > max_score && scores_n_mask[ind_1][ind_2]
max_score = s_
maxscore_ind = ind_2;
maxscore_ind_m = ind_1;
end
end
end
return maxscore_ind, maxscore_ind_m
end
max_pos_ind(positions_n, max_score_ind, max_score_m) =
positions_n[max_score_m][max_score_ind]
_intersect_(p, p_end, p_max, p_max_end) =
(p ≤ p_max ≤ p_end) || (p ≤ p_max_end ≤ p_end) || (p_max ≤ p ≤ p_max_end) || (p_max ≤ p_end ≤ p_max_end)
function __non_overlap_scan__!(positions, scores, use_comp, lens, N)
@inbounds for n = 1:N
spans_pos = Int[];
spans_len = Int[];
indices_to_keep_n = [
haskey(positions[i], n) ? fill(false, length(positions[i][n])) : Bool[]
for i in eachindex(positions)];
scores_n = [haskey(scores[i], n) ? scores[i][n] : float_type_retrieval[] for i in eachindex(positions)];
scores_n_mask = [fill(true, length(s)) for s in scores_n]; # so entries in ms.scores aren't modified
positions_n = [haskey(positions[i], n) ? positions[i][n] : Int[] for i in eachindex(positions)];
maxscore_ind, maxscore_ind_m = max_score_ind(scores_n, scores_n_mask)
if !isnothing(maxscore_ind)
while !isnothing(maxscore_ind)
maxpos_ind = max_pos_ind(positions_n, maxscore_ind, maxscore_ind_m)
intersect_ = false;
for (p,l) in zip(spans_pos, spans_len)
# check whether this "segment" intersect with any previous segments
p_end = p+l-1;
p_max = positions_n[maxscore_ind_m][maxscore_ind];
p_max_end = p_max+lens[maxscore_ind_m]-1;
if _intersect_(p, p_end, p_max, p_max_end)
intersect_ = true;
break
end
end
if !intersect_
indices_to_keep_n[maxscore_ind_m][maxscore_ind] = true;
push!(spans_pos, maxpos_ind);
push!(spans_len, lens[maxscore_ind_m]);
end
scores_n_mask[maxscore_ind_m][maxscore_ind]=false;
maxscore_ind, maxscore_ind_m = max_score_ind(scores_n, scores_n_mask)
end
for j in eachindex(positions)
if haskey(positions[j], n)
positions[j][n] = positions[j][n][indices_to_keep_n[j]];
scores[j][n] = scores[j][n][indices_to_keep_n[j]];
use_comp[j][n] = use_comp[j][n][indices_to_keep_n[j]];
# so that the set of sequences covered by pwms are disjoint
end
end
end
end
return positions, scores, use_comp
end
function non_overlap_scan!(ms::motifs{T,S}, N, bg) where {T <: Int, S <: Real}
ms.positions, ms.scores, ms.use_comp =
__non_overlap_scan__!(ms.positions, ms.scores, ms.use_comp, ms.lens, N)
keep = get_activate_counts(ms) .> 0;
return filter_motifs_w_filter_vec(ms, keep, bg);
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 5737 | #################################### obtain the enlarged matrix ####################################
function submat_comlement(data_matrix, start_, end_, k, len)
piece = @view data_matrix[start_:end_,1,k];
reverse(reshape(piece,(4,len)))
end
function enlarged_mat_add!(enlarged_mat, data_matrix, _start_, _end_, seq_ind, use_comp)
if use_comp
enlarged_mat .+= submat_comlement(data_matrix, four_based_ind(_start_), four_based_ind(_end_)+3, seq_ind, size(enlarged_mat,2))
else
enlarged_mat .+= reshape(view(data_matrix, four_based_ind(_start_):four_based_ind(_end_)+3,1,seq_ind), (4,size(enlarged_mat,2)))
end
end
function enlarged_mat_increment!(ms, pfm_ind, seq_ind, ind_k, diff_front, diff_end, data_matrix, L,
enlarged_mat, enlarged_dict, enlarged_dict_use_comp; comp_match=false)
use_comp = ms.use_comp[pfm_ind][seq_ind][ind_k];
front_diff = use_comp ? diff_end : diff_front;
end_diff = diff_front + diff_end
_start_ = ms.positions[pfm_ind][seq_ind][ind_k] - front_diff;
_end_ = _start_ + ms.lens[pfm_ind] + end_diff - 1;
if (1 ≤ _start_) && (_end_ ≤ L)
enlarged_mat_add!(enlarged_mat, data_matrix, _start_, _end_, seq_ind, ms.use_comp[pfm_ind][seq_ind][ind_k])
if !haskey(enlarged_dict, seq_ind)
enlarged_dict[seq_ind] = [_start_]
enlarged_dict_use_comp[seq_ind] = [ifelse(comp_match, !use_comp, use_comp)]
else
push!(enlarged_dict[seq_ind], _start_)
push!(enlarged_dict_use_comp[seq_ind], comp_match)
end
end
end
function obtain_enlarged_matrix(ms::motifs{T,S}, pfm_ind, diff_front, diff_end, data_matrix, L; comp_match=false) where {T,S}
enlarged_length = ms.lens[pfm_ind] + diff_front + diff_end;
enlarged_mat = zeros(S, (4, enlarged_length))
enlarged_dict = Dict{T,Vector{T}}(i=>[] for i in keys(ms.positions[pfm_ind]))
enlarged_dict_use_comp = Dict{T,Vector{Bool}}(i=>[] for i in keys(ms.positions[pfm_ind]))
@inbounds for seq_ind in keys(ms.positions[pfm_ind])
for ind_k in 1:length(ms.positions[pfm_ind][seq_ind])
enlarged_mat_increment!(ms, pfm_ind, seq_ind, ind_k, diff_front, diff_end, data_matrix, L,
enlarged_mat, enlarged_dict, enlarged_dict_use_comp; comp_match=comp_match)
end
end
return ifelse(comp_match, reverse(enlarged_mat), enlarged_mat), enlarged_dict, enlarged_dict_use_comp
end
####################################################################################################
function this_cmat_pair_of_different_len_has_high_allr(cmat1, cmat2, i, j, bg; max_allowed_diff = max_allowed_diff, allr_thresh=allr_thresh)
len_diff = size(cmat1,2)-size(cmat2,2)
(abs(len_diff) > max_allowed_diff) && return nothing, nothing, nothing, nothing, nothing, false
len_cond = len_diff < 0;
smaller_mat = len_cond ? cmat1 : cmat2
larger_mat = len_cond ? cmat2 : cmat1
smaller_ind = len_cond ? i : j
larger_ind = len_cond ? j : i
smaller_mat_r = reverse(smaller_mat); # TODO later make it without a copy
larger_mat_size = size(larger_mat,2)
comp_match = false; diff_front = nothing; diff_end = nothing
numcol_smaller_mat = size(smaller_mat,2);
for i = 1:abs(len_diff)+1
allr = avg_allr(smaller_mat, view(larger_mat, :,i:i+numcol_smaller_mat-1), bg)
if allr > allr_thresh
diff_front = i-1
diff_end = larger_mat_size - (i+numcol_smaller_mat-1)
break
end
allr_c = avg_allr(smaller_mat_r, view(larger_mat, :,i:i+numcol_smaller_mat-1), bg)
if allr_c > allr_thresh
comp_match = true
diff_front = larger_mat_size - (i+numcol_smaller_mat-1)
diff_end = i-1
break
end
end
return diff_front, diff_end, smaller_ind, larger_ind, larger_mat, comp_match
end
function alignment_merge!(ms, data, bg; allr_thresh=allr_thresh)
count_matrix_each = posdicts2countmats(ms, data.data_matrix);
ms.cmats = count_matrix_each;
merged = fill(false, ms.num_motifs)
for i = 1:ms.num_motifs
for j = i+1:ms.num_motifs
merged[i] && continue
merged[j] && continue
diff_front, diff_end, smaller_ind, larger_ind, larger_mat, comp_match =
this_cmat_pair_of_different_len_has_high_allr(ms.cmats[i], ms.cmats[j], i, j, bg)
if !isnothing(diff_front) && !isnothing(diff_end)
enlarged_matrix, enlarged_dict, enlarged_dict_use_comp =
obtain_enlarged_matrix(ms, smaller_ind, diff_front, diff_end, data.data_matrix, data.L; comp_match=comp_match)
if avg_allr(enlarged_matrix, larger_mat, bg) > allr_thresh
merge_add_counts && (ms.cmats[larger_ind] .+= enlarged_matrix)
# merge the positions
ms.positions[larger_ind] = mergewith(vcat, ms.positions[larger_ind], enlarged_dict)
# merge the use_comp
ms.use_comp[larger_ind] = mergewith(vcat,
ms.use_comp[larger_ind],
enlarged_dict_use_comp
)
merged[smaller_ind] = true;
end
end
end
end
unmerged_cmats = ms.cmats[.!merged];
unmerged_positions = ms.positions[.!merged];
unmerged_use_comp = ms.use_comp[.!merged];
return countmats2motifs(unmerged_cmats, unmerged_positions, unmerged_use_comp, bg)
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 3574 | unique_positions(positions_array) = unique(positions_array)
#TODO speed this up
function get_uniq_pos(positions_i)
positions_copy = typeof(positions_i)()
for k in keys(positions_i)
positions_copy[k] = unique_positions(positions_i[k])
end
return positions_copy
end
get_uniq_counts(ms) =
active_counts_position(get_uniq_pos.(ms.positions)),
active_counts_position(get_uniq_pos.(ms.positions_bg))
###########################################################################
function num_overlap(r1::UnitRange, r2::UnitRange)
@assert r1[1] ≤ r1[end] && r2[1] ≤ r2[end] "range is not valid"
@inbounds if r1[1] ≤ r2[1] ≤ r1[end]
return min(r1[end],r2[end])-r2[1]+1;
elseif r2[1] ≤ r1[1] ≤ r2[end]
return min(r1[end],r2[end])-r1[1]+1;
else
return 0;
end
end
function total_active_position(position)
activate_count = 0
for key in keys(position)
for r in position[key]
activate_count += length(r)
end
end
return activate_count
end
function push_ranges!(ranges, _ranges_, i)
if _ranges_[end][end] ≥ ranges[i][1]
_ranges_[end] = _ranges_[end][1]:ranges[i][end]
else
push!(_ranges_, ranges[i])
end
end
function union_ranges(ranges)
length(ranges) == 0 && return eltype(ranges)[]
ranges = sort(ranges, by = x->x[1])
_ranges_ = [ranges[1]]
@inbounds for i in eachindex(@view ranges[2:end])
push_ranges!(ranges, _ranges_, i)
end
return _ranges_
end
function union_pos(positions_arr_k, len)
ranges = [positions_arr_k[i]:positions_arr_k[i]+len-1 for i in eachindex(positions_arr_k)]
return union_ranges(ranges)
end
function get_union_ranges(positions_i, len_i)
positions_copy = Dict{Int, Vector{UnitRange{Int}}}(k=>[] for k in keys(positions_i));
for k in keys(positions_copy)
positions_copy[k] = union_pos(positions_i[k], len_i)
end
return positions_copy
end
function get_total_occupied_positions(position_ranges)
total_positions = 0
for k in keys(position_ranges)
for r in position_ranges[k]
total_positions += length(r)
end
end
return total_positions
end
# function overlap_inc!(pos_i, pos_j, overlap_ij, mutual_keys)
# end
function get_overlap_ratio(ms)
# @info "Computing union ranges..."
# union_poses = get_union_ranges.(ms.positions, ms.lens)
union_poses = Vector{Dict{Int64, Vector{UnitRange{Int64}}}}(undef, ms.num_motifs)
@floop for i = 1:ms.num_motifs
union_poses[i] = get_union_ranges(ms.positions[i], ms.lens[i])
end
# @info "Computing total active positions..."
acs = total_active_position.(union_poses)
olap_ratio_ij = zeros(Float32, (ms.num_motifs, ms.num_motifs))
# @info "Computing overlap ratio..."
@floop for i = 1:ms.num_motifs
# println(i)
@inbounds for j = i+1:ms.num_motifs
pos_i = union_poses[i]
pos_j = union_poses[j]
overlap_ij = 0f0
mutual_keys = intersect(keys(pos_i), keys(pos_j))
for k in mutual_keys
length(pos_i[k]) == 0 || length(pos_j[k]) == 0 && continue
for pos_i_range in pos_i[k]
for pos_j_range in pos_j[k]
overlap_ij += num_overlap(pos_i_range, pos_j_range)
end
end
end
olap_ratio_ij[i,j] = olap_ratio_ij[j,i] = overlap_ij / (acs[i]+acs[j]-overlap_ij)
end
end
return olap_ratio_ij
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 6791 | function countvec2ic(count_vec, pos_ki; ps=0.01)
count_vec = count_vec .+ ps;
count_vec_sum = sum(count_vec);
freq_vec = count_vec ./ count_vec_sum
count_vec_sum/length(pos_ki), 2 + sum(freq_vec .* log2.(freq_vec))
end
get_char(data, p, char_ind, comp) =
comp ?
atcg2dummy[ atcg_comp[data.raw_data[p[2]][char_ind]] ] :
atcg2dummy[ data.raw_data[p[2]][char_ind] ]
# get_char(data, p, char_ind, comp) =
# comp ?
# atcg2dummy[ atcg_comp[data.raw_data[p[2]].str[char_ind]] ] :
# atcg2dummy[ data.raw_data[p[2]].str[char_ind] ]
count_vec_at_pos_ms(p, data, char_ind, comp) =
char_ind < 1 || char_ind > data.L ?
atcg2dummy['z'] :
get_char(data, p, char_ind, comp)
get_expand_ind_left(p, Δ, comp) = comp ? p[1][end]+Δ : p[1][1]-Δ;
get_expand_ind_right(p, Δ, comp) = comp ? p[1][1]-Δ : p[1][end]+Δ;
get_shrink_ind_left(p, Δ, comp) = comp ? p[1][end]-Δ : p[1][1]+Δ;
get_shrink_ind_right(p, Δ, comp) = comp ? p[1][1]+Δ : p[1][end]-Δ;
function left_expansion_ms(data, pos_ki, dec; ic_expand_thresh=0.5, pc=1.0, tol=0.975)
count_vec = @SVector zeros(Float64, 4)
for p in pos_ki
count_vec +=
count_vec_at_pos_ms(p, data, get_expand_ind_left(p, dec, p[3]), p[3])
end
percentage_used, ic = countvec2ic(count_vec .+ pc, pos_ki)
return percentage_used > tol && ic > ic_expand_thresh
end
function right_expansion_ms(data, pos_ki, inc; ic_expand_thresh=0.5, pc=1.0, tol=0.975)
count_vec = @SVector zeros(Float64, 4)
for p in pos_ki
count_vec +=
count_vec_at_pos_ms(p, data, get_expand_ind_right(p, inc, p[3]), p[3])
end
percentage_used, ic = countvec2ic(count_vec .+ pc, pos_ki)
return percentage_used > tol && ic > ic_expand_thresh
end
function left_shrinkage_ms(data, pos_ki, inc; ic_shrink_thresh=0.2, tol=0.975)
count_vec = @SVector zeros(Float64, 4)
for p in pos_ki
count_vec +=
count_vec_at_pos_ms(p, data, get_shrink_ind_left(p, inc, p[3]), p[3])
end
percentage_used, ic = countvec2ic(count_vec, pos_ki)
return percentage_used > tol && ic < ic_shrink_thresh
end
function right_shrinkage_ms(data, pos_ki, dec; ic_shrink_thresh=0.2, tol=0.975)
count_vec = @SVector zeros(Float64, 4)
for p in pos_ki
count_vec +=
count_vec_at_pos_ms(p, data, get_shrink_ind_right(p, dec, p[3]), p[3])
end
percentage_used, ic = countvec2ic(count_vec, pos_ki)
return percentage_used > tol && ic < ic_shrink_thresh
end
function expansion_left_right_ms(data, pos_ki, ic_expand_thresh)
expand_left = true; expand_right = true;
left_dec = 1; right_inc = 1;
while expand_left
expand_left =
left_expansion_ms(data, pos_ki, left_dec; ic_expand_thresh=ic_expand_thresh)
expand_left ? left_dec+=1 : left_dec-=1;
end
while expand_right
expand_right =
right_expansion_ms(data, pos_ki, right_inc; ic_expand_thresh=ic_expand_thresh)
expand_right ? right_inc+=1 : right_inc-=1;
end
return left_dec, right_inc
end
function trimming_left_right_ms(expansion::Tuple{Int, Int}, data, pos_ki, ic_shrink_thresh)
# returns how much increment (decrement) from the left (from the right)
shrink_left_ = expansion[1] == 0 ? true : false;
shrink_right_ = expansion[2] == 0 ? true : false;
left_inc = shrink_left_ ? 1 : 0;
right_dec = shrink_right_ ? 1 : 0;
while shrink_left_
shrink_left_ =
left_shrinkage_ms(data, pos_ki, left_inc-1; ic_shrink_thresh=ic_shrink_thresh)
shrink_left_ ? left_inc+=1 : left_inc -= 1;
end
while shrink_right_
shrink_right_ =
right_shrinkage_ms(data, pos_ki, right_dec-1; ic_shrink_thresh=ic_shrink_thresh)
shrink_right_ ? right_dec+=1 : right_dec-=1;
end
return left_inc, right_dec
end
function msa_expansion_ms(pos, data, ic_expand_thresh, ic_shrink_thresh)
expansions = Vector{Tuple{Int,Int}}();
shrinkage = Vector{Tuple{Int,Int}}();
pos_lens = length.(pos)
for (ind, pos_ki) in enumerate(pos)
if pos_lens[ind] > 0
push!(expansions, expansion_left_right_ms(data, pos_ki, ic_expand_thresh))
else
push!(expansions, (0,0))
end
end
for (ind, expansion) in enumerate(expansions)
if pos_lens[ind] > 0
push!(shrinkage, trimming_left_right_ms(expansion, data, pos[ind], ic_shrink_thresh))
else
push!(shrinkage, (0,0))
end
end
return expansions, shrinkage
end
function posdict2pos(ms)
pos = [Vector{Tuple{UnitRange{Int64}, Int64, Bool}}() for _ = 1:ms.num_motifs]
for i = 1:ms.num_motifs
for k in keys(ms.positions[i])
for (ind,w) in enumerate(ms.positions[i][k])
push!(pos[i], (w:w+ms.lens[i]-1, k, ms.use_comp[i][k][ind]))
end
end
end
return pos
end
function edit_posdict_i!(expand__left, expand__right,
shrink__left, shrink__right, ms, i, L)
@assert expand__left == 0 || shrink__left == 0 "one left shink/expand has to be 0"
@assert expand__right == 0 || shrink__right == 0 "one right shink/expand has to be 0"
left_Δ = -expand__left + shrink__left
right_Δ = expand__right - shrink__right
ms.lens[i] += -left_Δ + right_Δ
# @info "motif: $i, Left-Δ: $left_Δ, right-Δ: $right_Δ"
for k in keys(ms.positions[i])
indices_to_keep = fill(true, length(ms.positions[i][k]))
for ind in 1:length(ms.positions[i][k])
Δ = ifelse(ms.use_comp[i][k][ind],
ms.positions[i][k][ind] - right_Δ,
ms.positions[i][k][ind] + left_Δ)
if 1 ≤ Δ ≤ L && Δ + ms.lens[i] - 1 ≤ L
ms.positions[i][k][ind] = Δ
else
indices_to_keep[ind] = false
end
end
ms.positions[i][k] = ms.positions[i][k][indices_to_keep]
ms.use_comp[i][k] = ms.use_comp[i][k][indices_to_keep]
# TODO scores as well?
end
end
function expansions_ms!(ms, data, bg; ic_expand_t=ic_expand_thresh, ic_shrink_t=ic_trim_thresh)
use_next = fill(true, ms.num_motifs)
ms_pos = posdict2pos(ms)
expansions, shrinkage =
msa_expansion_ms(ms_pos, data, ic_expand_t, ic_shrink_t)
for (ind, ((el,er),(sl,sr))) in enumerate(zip(expansions, shrinkage))
if sl + sr > ms.lens[ind]
use_next[ind] = false
else
edit_posdict_i!(el, er, sl, sr, ms, ind, data.L)
end
end
ms = countmats2motifs(ms.cmats[use_next],
ms.positions[use_next],
ms.use_comp[use_next], bg)
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2007 | function get_start_end(positions, len4, i, k, ind_k)
start_ = (positions[i][k][ind_k]-1)*4+1;
end_ = start_+len4-1;
return start_, end_
end
function msa_add!(i, len4, positions, use_complement,
lens, msa, data_matrix; ps=0.01, return_count_mat=false)
@inbounds for k in keys(positions[i])
for ind_k in 1:length(positions[i][k])
# println("i: $i, k: $k, ind_k: $ind_k")
start_, end_ = get_start_end(positions, len4, i, k, ind_k)
if use_complement[i][k][ind_k]
msa .+=
reshape(submat_comlement(data_matrix,start_,end_,k, lens[i]),(4, lens[i]));
else
msa .+= reshape((@view data_matrix[start_:end_,1,k]), (4, lens[i]));
end
end
end
return return_count_mat ? msa .+ ps : countmat2pfm(msa .+ ps)
end
f_retrieval_t(x) = float_type_retrieval.(x)
function posdicts2countmats(ms::motifs, data_matrix::Array{S,3}) where {S<:Real}
count_mats = Vector{Matrix{S}}(undef, ms.num_motifs);
@inbounds for i in 1:ms.num_motifs
# println(i)
msa = zeros(S, (4, ms.lens[i]));
count_mat = msa_add!(i, 4*ms.lens[i], ms.positions,
ms.use_comp, ms.lens, msa, data_matrix; return_count_mat=true)
count_mats[i] = count_mat
end
return f_retrieval_t.(count_mats)
end
function posdicts2countmats(pos, use_comp, len, data_matrix::Array{S,3}) where {S<:Real}
len4 = 4*len;
count_mat = zeros(eltype(data_matrix), (4, len) );
for k in keys(pos)
for ind_k in 1:length(pos[k])
start_ = (pos[k][ind_k]-1)*4+1;
end_ = start_+len4-1;
if use_comp[k][ind_k]
count_mat .+=
reshape(submat_comlement(data_matrix, start_, end_, k, len),(4, len));
else
count_mat .+= reshape((@view data_matrix[start_:end_, 1, k]), (4, len));
end
end
end
return Float16.(count_mat)
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 1941 | function active_counts_position(positions)
activate_count = Vector{Float64}(undef, length(positions))
@inbounds for i in eachindex(positions)
sum_p = 0
for key in keys(positions[i])
sum_p += length(positions[i][key])
end
activate_count[i] = sum_p
end
return activate_count
end
function get_activate_counts(ms::motifs; bg = false)
positions = bg ? ms.positions_bg : ms.positions;
return active_counts_position(positions)
end
get_both_activate_counts(ms::motifs) =
get_activate_counts(ms), get_activate_counts(ms; bg=true)
function fisher_pvec(activate_counts, activate_counts_bg, data; test=false)
activate_sum = (test ? data.N_test : data.N) * data.L # total number of components that can be activated
pvalues = fill(0.0, length(activate_counts));
for i in eachindex(activate_counts)
a = activate_counts[i]; b = activate_counts_bg[i];
c = activate_sum - a; d = activate_sum - b;
if a == 0 && b == 0
pvalues[i] = 1.0 # if there's no activation in both just throw it away
else
q = FisherExactTest(promote_i(a, c, b, d)...);
pvalues[i] = HypothesisTests.pvalue(q, tail=:right);
end
end
return pvalues
end
function get_fisher_p_values(ms::motifs, data; test=false)
union_positions, union_positions_bg =
get_union_ranges.(ms.positions, ms.lens), get_union_ranges.(ms.positions_bg, ms.lens)
total_positions, total_positions_bg =
get_total_occupied_positions.(union_positions), get_total_occupied_positions.(union_positions_bg)
return fisher_pvec(total_positions, total_positions_bg, data; test=test)
end
function filter_insignificant_motifs(ms::motifs, data, this_bg; test=false)
pvec = get_fisher_p_values(ms, data; test=test);
keep = pvec .< pvalue_fisher_thresh
return filter_motifs_w_filter_vec(ms, keep, this_bg)
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 16072 | ############# take out and merge procedure #############
pfm2pwm(pfm, bg) = log2.(pfm ./ bg);
#=
Delete the motifs that are indicated by the indicator vector
=#
function delete_indicated_motif!(ms, indicator_vec)
!isnothing(ms.cmats) && (ms.cmats = ms.cmats[.!indicator_vec])
!isnothing(ms.pfms) && (ms.pfms = ms.pfms[.!indicator_vec])
!isnothing(ms.pwms) && (ms.pwms = ms.pwms[.!indicator_vec])
!isnothing(ms.effective_segments) && (ms.effective_segments = ms.effective_segments[.!indicator_vec])
!isnothing(ms.max_effective_lens) && (ms.max_effective_lens = ms.max_effective_lens[.!indicator_vec])
!isnothing(ms.max_scores) && (ms.max_scores = ms.max_scores[.!indicator_vec])
!isnothing(ms.min_scores) && (ms.min_scores = ms.min_scores[.!indicator_vec])
!isnothing(ms.score_thresh) && (ms.score_thresh = ms.score_thresh[.!indicator_vec])
!isnothing(ms.lens) && (ms.lens = ms.lens[.!indicator_vec])
!isnothing(ms.positions) && (ms.positions = ms.positions[.!indicator_vec])
!isnothing(ms.scores) && (ms.scores = ms.scores[.!indicator_vec])
!isnothing(ms.use_comp) && (ms.use_comp = ms.use_comp[.!indicator_vec])
!isnothing(ms.positions_bg) && (ms.positions_bg = ms.positions_bg[.!indicator_vec])
!isnothing(ms.scores_bg) && (ms.scores_bg = ms.scores_bg[.!indicator_vec])
!isnothing(ms.use_comp_bg) && (ms.use_comp_bg = ms.use_comp_bg[.!indicator_vec])
ms.num_motifs = length(ms.cmats)
end
function delete_by_indices!(ms, indices_vec)
deleteat!(ms.cmats, indices_vec)
deleteat!(ms.effective_segments, indices_vec)
deleteat!(ms.positions, indices_vec)
deleteat!(ms.use_comp, indices_vec)
deleteat!(ms.lens, indices_vec)
ms.num_motifs -= length(indices_vec)
end
#=
take out the sub-motifs that are within the range of the length tuple
two objectives:
# modify the ms object so that it no longer contains the sub-motifs
that are within the range of the length tuple
# return the sub-motifs that are within the range of the length tuple
=#
function take_out_sub_ms_by_range_indicator!(ms, bg, range_low_bdd, range_uppper_bdd)
len_this_range = range_low_bdd .≤ ms.lens .≤ range_uppper_bdd
all(len_this_range .== false) && return
this_len_range_cmats = ms.cmats[len_this_range]
this_len_positions = ms.positions[len_this_range]
this_len_use_comp = ms.use_comp[len_this_range]
this_len_ms = countmats2motifs(this_len_range_cmats, this_len_positions, this_len_use_comp, bg)
delete_indicated_motif!(ms, len_this_range)
return this_len_ms
end
#=
merge the two ms
=#
function merge_ms(ms1, ms2, bg)
cat_cmats = vcat(ms1.cmats, ms2.cmats)
cat_positions = vcat(ms1.positions, ms2.positions)
cat_use_comp = vcat(ms1.use_comp, ms2.use_comp)
return countmats2motifs(cat_cmats, cat_positions, cat_use_comp, bg)
end
############# calculate jaccard similarity and BFS for connected components #############
# ms_this_range = take_out_sub_ms_by_range_indicator!(ms, bg, 30, 35)
# # if !isnothing(ms_this_range) ...
# olap_ratio = get_overlap_ratio(ms_this_range)
function return_connected_components(olap_ratio, jaccard_thresh=0.8)
A = olap_ratio .> jaccard_thresh;
l = size(A, 1);
marked = fill(false, l);
trees = Vector{Int}[];
for i = 1:l
start = i # if not discovered; add conditions later
queue = Deque{Int}();
@inbounds if !marked[i]
marked[i] = true;
subtree = Int[];
push!(subtree, start)
pushfirst!(queue, start)
while length(queue) != 0
v = pop!(queue);
for j in findall(A[v,:] .> 0)
if !marked[j]
marked[j] = true;
push!(subtree, j);
pushfirst!(queue, j);
end
end
end
# if size(subtree)[1] > 1
push!(trees, subtree)
# end
end
end
return trees
end
function allr(p, q, p_count, q_count, bg)
allr_score = Float64[];
for i = 1:size(p,2)
view_p_col = Base.view(p, :, i);
view_q_col = Base.view(q, :, i);
nb_p = p_count .* view_p_col;
nb_q = q_count .* view_q_col;
a1=sum(nb_p .* pfm2pwm(view_q_col, bg));
a2=sum(nb_q .* pfm2pwm(view_p_col, bg));
push!(allr_score, (a1+a2)/(sum(nb_p)+sum(nb_q)))
end
return sum(allr_score)
end
function convolve_allr(pfm_c2, pfm,
counts_pfm_c2,
counts_pfm,
len_pfm_c2,
len_pfm,
bg; len_diff_tol=4
)
#= len_pfm_c2 will always be smaller since we've select the ones
with minimal length
=#
min_col = len_pfm_c2 - len_diff_tol;
allrs = Float64[];
# start and end indices for pwms
# s1e2 for pfm_c2, s2e2 for pfm
s1e1s = UnitRange{Int}[];
s2e2s = UnitRange{Int}[];
l_dec_1 = Int[]; l_dec_2 = Int[];
r_inc_1 = Int[]; r_inc_2 = Int[];
for i = 1:(len_pfm_c2+len_pfm-1)
s1 = max(1, len_pfm_c2-i+1); e1 = min(len_pfm_c2, len_pfm_c2-(i-len_pfm));
s2 = max(1, i-len_pfm_c2+1); e2 = min(i, len_pfm);
overlap_count = e1-s1+1;
push!(s1e1s, s1:e1); push!(s2e2s, s2:e2);
#=
Note that:
1) no need to calculate if the number of columns of the
pfm is less than min_col as specified
2) no need to calculate the placements for which
maximal value of the score is below the threshold
=#
if overlap_count ≥ min_col
push!(allrs, allr(Base.view(pfm_c2,:,s1:e1), Base.view(pfm,:,s2:e2),
counts_pfm_c2, counts_pfm, bg));
else
push!(allrs, -Inf);
end
push!(l_dec_1, max(s2-1,0)); push!(l_dec_2, max(s1-1,0));
push!(r_inc_1, max(0,len_pfm-i)); push!(r_inc_2, max(i-e2,0));
end
argmax_ind = argmax(allrs);
return allrs[argmax_ind],
l_dec_1[argmax_ind],
r_inc_1[argmax_ind],
l_dec_2[argmax_ind],
r_inc_2[argmax_ind]
end
reduce_merge_with_two_dict(d1, d2) = mergewith(vcat, d1, d2)
# modify_subtree_positions_lens!(subtree_pos, subtree_lens, subtree_use_comb, subtree_num_ms, reverse_comp,
# ld1_matches, ri1_matches, ld2_matches, ri2_matches, data.L)
function modify_subtree_positions_lens!(
subtree_pos, subtree_lens, subtree_use_comb, subtree_num_ms, reverse_comp,
ld1_matches, ri1_matches, ld2_matches, ri2_matches, L)
# modify the positions
max_start_decrement_root = maximum(ld1_matches)
max_start_decrement_root_c = maximum(ri1_matches)
merged_len = subtree_lens[1] + max_start_decrement_root + max_start_decrement_root_c
mark2delete = [Dict{Int, Vector{Int}}() for _ = 1:subtree_num_ms]
# mark2delete = [Dict{Int, Vector{Int}}(k=>[] for k in keys(subtree_pos[i])) for i = 1:subtree_num_ms]
# mark2delete = [Dict{Int, BitVector}(k=>trues(length(subtree_pos[i][k])) for k in keys(subtree_pos[i])) for i = 1:subtree_num_ms]
println("reverse_comp: ", reverse_comp)
for key in keys(subtree_pos[1])
for (ind, pos) in enumerate(subtree_pos[1][key])
modified_start = subtree_use_comb[1][key][ind] ?
pos - max_start_decrement_root_c : pos - max_start_decrement_root;
if modified_start > 0 && modified_start + merged_len -1 <= L
subtree_pos[1][key][ind] = modified_start
else
# mark2delete[1][key][ind] = false
if haskey(mark2delete[1], key)
push!(mark2delete[1][key], ind)
else
mark2delete[1][key] = [ind]
end
end
end
end
# modify the rest of the positions
for i = 2:subtree_num_ms
for key in keys(subtree_pos[i])
for (ind, pos) in enumerate(subtree_pos[i][key])
diff = ifelse(reverse_comp[i-1],
subtree_use_comb[i][key][ind] ?
ld2_matches[i-1]+max_start_decrement_root-ld1_matches[i-1] :
ri2_matches[i-1]+max_start_decrement_root_c-ri1_matches[i-1]
,
subtree_use_comb[i][key][ind] ?
ri2_matches[i-1]+max_start_decrement_root_c-ri1_matches[i-1] :
ld2_matches[i-1]+max_start_decrement_root-ld1_matches[i-1]
)
subtree_use_comb[i][key][ind] = ifelse(reverse_comp[i-1],
!subtree_use_comb[i][key][ind],
subtree_use_comb[i][key][ind])
modified_start = pos - diff
if modified_start > 0 && modified_start + merged_len - 1 <= L
subtree_pos[i][key][ind] = modified_start
else
# mark2delete[1][key][ind] = false
if haskey(mark2delete[i], key)
push!(mark2delete[i][key], ind)
else
mark2delete[i][key] = [ind]
end
end
end
end
end
# for i = 1:subtree_num_ms
# for key in keys(mark2delete[i])
# println("=====================================")
# println("i: ", i)
# println("key: ", key)
# println("mark2delete[i][key]: ", mark2delete[i][key])
# println("subtree_pos[i][key]: ", subtree_pos[i][key])
# println("subtree_use_comb[i][key]: ", subtree_use_comb[i][key])
# println("=====================================")
# end
# end
# if haskey(subtree_pos[2], 3821)
# println(subtree_pos[2][3821]);
# println(subtree_use_comb[2][3821]);
# println("=====================================")
# end
# println(mark2delete)
for i = 1:subtree_num_ms
println(i)
for key in keys(mark2delete[i])
keep_indices= filter(x->!(x in mark2delete[i][key]), eachindex(subtree_pos[i][key]))
# println(subtree_pos[i][key])
# println(keep_indices)
subtree_pos[i][key] = subtree_pos[i][key][keep_indices]
subtree_use_comb[i][key] = subtree_use_comb[i][key][keep_indices]
# try
# deleteat!(subtree_pos[i][key], mark2delete[i][key])
# deleteat!(subtree_use_comb[i][key], mark2delete[i][key])
# catch e
# if isa(e, BoundsError)
# println("i: ", i)
# println("key: ", key)
# println("mark2delete[i][key]: ", mark2delete[i][key])
# println("subtree_pos[i][key]: ", subtree_pos[i][key])
# println("subtree_use_comb[i][key]: ", subtree_use_comb[i][key])
# end
# end
end
# println("i: ", i)
# if haskey(subtree_pos[2], 3821)
# println(subtree_pos[2][3821]);
# println(subtree_use_comb[2][3821]);
# end
end
merged_pos = reduce(reduce_merge_with_two_dict, subtree_pos)
merged_use_comp = reduce(reduce_merge_with_two_dict, subtree_use_comb)
return merged_pos, merged_use_comp, merged_len
end
function update_matches!(i, ld1, ri1, ld2, ri2, rc,
ld1_matches, ri1_matches, ld2_matches, ri2_matches, reverse_comp)
ld1_matches[i-1] = ld1
ri1_matches[i-1] = ri1
ld2_matches[i-1] = ld2
ri2_matches[i-1] = ri2
reverse_comp[i-1] = rc
end
function process_subtree(subtree, ms_this_range, data, bg)
println("subtree: $subtree")
# sort subtree by length
sort_inds = subtree[sortperm(ms_this_range.lens[subtree])]
println("sort_inds: $sort_inds")
subtree_cmats = ms_this_range.cmats[sort_inds]
subtree_pfms = ms_this_range.pfms[sort_inds]
subtree_pos = ms_this_range.positions[sort_inds]
subtree_use_comb = ms_this_range.use_comp[sort_inds]
subtree_lens = ms_this_range.lens[sort_inds]
subtree_counts = [sum((@view cmat[:,1])) for cmat in subtree_cmats]
subtree_num_ms = length(subtree)
ld1_matches = Vector{Int}(undef, subtree_num_ms-1);
ri1_matches = Vector{Int}(undef, subtree_num_ms-1);
ld2_matches = Vector{Int}(undef, subtree_num_ms-1);
ri2_matches = Vector{Int}(undef, subtree_num_ms-1);
reverse_comp = Vector{Bool}(undef, subtree_num_ms-1);
for i in 2:subtree_num_ms
allr_score, ld1, ri1, ld2, ri2 = convolve_allr(
subtree_pfms[1],
subtree_pfms[i],
subtree_counts[1],
subtree_counts[i],
subtree_lens[1],
subtree_lens[i],
bg);
allr_score_c, ld1_c, ri1_c, ld2_c, ri2_c = convolve_allr(
subtree_pfms[1],
reverse(subtree_pfms[i]),
subtree_counts[1],
subtree_counts[i],
subtree_lens[1],
subtree_lens[i],
bg);
if allr_score_c > allr_score
update_matches!(i, ld1_c, ri1_c, ld2_c, ri2_c, true,
ld1_matches, ri1_matches, ld2_matches, ri2_matches, reverse_comp)
else
update_matches!(i, ld1, ri1, ld2, ri2, false,
ld1_matches, ri1_matches, ld2_matches, ri2_matches, reverse_comp)
end
end
merged_pos, merged_use_comp, merged_len =
modify_subtree_positions_lens!(subtree_pos, subtree_lens, subtree_use_comb, subtree_num_ms, reverse_comp,
ld1_matches, ri1_matches, ld2_matches, ri2_matches, data.L)
merged_cmat = posdicts2countmats(merged_pos, merged_use_comp, merged_len, data.data_matrix)
return merged_cmat, merged_pos, merged_use_comp
end
function merge_to_remove_redundancy!(ms, data, bg; max_len_diff=5, olap_ratio_thresh=0.95)
_ms_ = deepcopy(ms);
while(true)
total_num_ms = _ms_.num_motifs;
min_len = minimum(_ms_.lens)
max_len = maximum(_ms_.lens)
for i = min_len:(max_len-max_len_diff+1)
@info "merging motifs in range $i to $(i+max_len_diff-1)"
ms_this_range = take_out_sub_ms_by_range_indicator!(_ms_, bg, i, i+max_len_diff-1);
if !isnothing(ms_this_range)
olap_ratio = get_overlap_ratio(ms_this_range)
trees = return_connected_components(olap_ratio, olap_ratio_thresh)
merged_cmats, merged_poses, merged_use_comps =
Vector{eltype(ms_this_range.cmats)}(undef, length(trees)),
Vector{eltype(ms_this_range.positions)}(undef, length(trees)),
Vector{eltype(ms_this_range.use_comp)}(undef, length(trees))
for (j, subtree) in enumerate(trees)
merged_cmat, merged_pos, merged_use_comp = nothing, nothing, nothing
if length(subtree) == 1
merged_cmat, merged_pos, merged_use_comp =
ms_this_range.cmats[j], ms_this_range.positions[j], ms_this_range.use_comp[j]
else
merged_cmat, merged_pos, merged_use_comp = process_subtree(subtree, ms_this_range, data, bg)
end
merged_cmats[j] = merged_cmat; merged_poses[j] = merged_pos; merged_use_comps[j] = merged_use_comp
end
ms_this_tree = countmats2motifs(merged_cmats, merged_poses, merged_use_comps, bg)
_ms_ = merge_ms(_ms_, ms_this_tree, bg)
end
end
@info "total number of motifs: $total_num_ms, after merging: $(_ms_.num_motifs)"
total_num_ms == _ms_.num_motifs && break
end
return _ms_
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 9587 | const ind2dna_str = Dict{Int, String}(1=>"A", 2=>"C", 3=>"G", 4=>"T")
function get_relaxed_consensus_str(pfm; any_regex=".", prob_thresh=0.5)
argmax_inds = reshape(argmax(pfm, dims=1), (size(pfm,2),));
char_array = [ind2dna_str[i[1]] for i in argmax_inds]
char_array[findall((@view pfm[argmax_inds]) .< prob_thresh)] .= any_regex
return join(char_array)
end
function unitranges_to_bitarray(uranges::Vector{UnitRange{Int}}, len::Int)
bits = falses(len)
for urange in uranges
bits[urange] .= true
end
return bits
end
function apply_bitarray(s::AbstractString, b::BitArray)
@assert length(s) == length(b)
res = ""
for i in eachindex(s)
if b[i]
res *= s[i]
else
res *= "-"
end
end
return res
end
get_dashed_strings(consensus_str, str_bitarray) = apply_bitarray(consensus_str, str_bitarray)
function get_dashed_strings(ms)
consensus_strs = get_relaxed_consensus_str.(ms.pfms)
str_bitarrays = unitranges_to_bitarray.(ms.effective_segments, ms.lens)
return get_dashed_strings.(consensus_strs, str_bitarrays)
end
function clear_islands_of_len!(char_array, str, sep, len)
for i = 1:length(str)-(len+2)+1
if char_array[i] == sep && char_array[i+len+1] == sep
for j = 1:len
char_array[i+j] = sep
end
end
end
end
function clean_str(str; sep='-', islands2clear=[2,3])
char_array = Vector{Char}(undef, length(str))
char_array[1] = str[1]; char_array[end] = str[end];
for i = 2:length(str)-1
if str[i] == sep
char_array[i] = sep
elseif str[i-1] == sep && str[i+1] == sep
char_array[i] = sep
else
char_array[i] = str[i]
end
end
# clear out all the islands of length 2
for len in islands2clear
clear_islands_of_len!(char_array, str, sep, len)
end
join(char_array)
end
# clear all the dashes in the beginning and end of the string given a vector of strings
function clear_dash(str; sep1 = '-', sep2 = '.')
strlen = length(str)
front_break_ind = 0
for i = 1:strlen
(str[i] != sep1 && str[i] != sep2) && break
front_break_ind = i
end
back_break_ind = strlen+1
for i = strlen:-1:1
(str[i] != sep1 && str[i] != sep2) && break
back_break_ind = i
end
front_break_ind > back_break_ind && return ""
return str[front_break_ind+1:back_break_ind-1]
end
get_cleaned_dashed_strs(ms) = clear_dash.(clean_str.(get_dashed_strings(ms)))
function edit_distance(s, t)
s, t = (length(t) > length(s)) ? (s, t) : (t, s);
slen = length(s);
tlen = length(t);
@inbounds while slen > 0 && s[slen] == t[tlen]
slen -= 1; tlen -= 1;
end
start = 0;
@inbounds if s[1]==t[1] || slen == 0
while start < slen && s[start+1] == t[start+1]
start += 1;
end
slen -= start;
tlen -= start;
slen == 0 && return tlen;
# t = t[start+1:start+tlen];
end
v_0 = [i for i = 1:tlen]; # preallocate this later; needed for GPU kernel
cur = 0
@inbounds for i = 1:slen
schar = s[start+i]; # check this
left = cur = i-1;
for j = 1:tlen
abv = cur;
cur = left;
left = v_0[j]
if schar != t[start+j]
cur += 1;
insdel = abv + 1;
cur = (insdel < cur) ? insdel : cur;
insdel = left + 1;
cur = (insdel < cur) ? insdel : cur;
end
v_0[j] = cur;
end
end
return cur
end
function fill_pseudo_normalized_edit_similarity_matrix(dashed_strs)
len_dashed_strs = length(dashed_strs)
len_each_dashed_strs = length.(dashed_strs)
str_similarity_mat = zeros(Float64, (len_dashed_strs, len_dashed_strs))
for i = 1:len_dashed_strs
for j = i+1:len_dashed_strs
str_similarity_mat[i,j] = str_similarity_mat[j,i] =
min(len_each_dashed_strs[i], len_each_dashed_strs[j]) / edit_distance(dashed_strs[i], dashed_strs[j])
end
end
return str_similarity_mat
end
mean_length_of_connected_components(trees) = mean.(trees)
# function obtain_groupings_for_display(ms; component_cut_off=2.75)
# dashed_strs = get_cleaned_dashed_strs(ms)
# str_similarity_mat = fill_pseudo_normalized_edit_similarity_matrix(dashed_strs)
# trees = return_connected_components(str_similarity_mat, component_cut_off)
# mean_lengths = mean_length_of_connected_components(trees)
# # display the groups that have more than 1 member first
# # within these groups, display the ones with the longest mean length first
# groups_with_more_than_1_member = findall(length.(trees) .> 1)
# groups_with_more_than_1_member_order = sortperm(mean_lengths[groups_with_more_than_1_member], rev=true)
# groups_with_just_1_member = findall(length.(trees) .== 1)
# groups_with_just_1_member_order = sortperm(mean_lengths[groups_with_just_1_member], rev=true)
# order_for_display = Vector{Int}(undef, ms.num_motifs)
# k = 1
# for i in groups_with_more_than_1_member_order
# group_indices =
# trees[groups_with_more_than_1_member[i]]
# len_group_indices = length(group_indices)
# order_for_display[k:k+len_group_indices-1] = group_indices
# k += len_group_indices
# end
# for i in groups_with_just_1_member_order
# group_index = trees[groups_with_just_1_member[i]][1]
# order_for_display[k] = group_index
# k += 1
# end
# return order_for_display
# end
function get_length_tree(dashed_strs_here, tree)
length_tree = Vector{Vector{Int}}(undef, length(tree))
for (i, l) in enumerate(tree)
length_tree[i] = length.(dashed_strs_here[l])
end
length_tree
end
function get_order_for_this_group(dash_strs_here, component_cut_off)
str_similarity_mat_abv = fill_pseudo_normalized_edit_similarity_matrix(dash_strs_here)
trees_here = return_connected_components(str_similarity_mat_abv, component_cut_off)
mean_lengths_here = mean_length_of_connected_components(get_length_tree(dash_strs_here, trees_here))
groups_with_more_than_1_member = findall(length.(trees_here) .> 1)
groups_with_more_than_1_member_order = sortperm(mean_lengths_here[groups_with_more_than_1_member], rev=true)
groups_with_just_1_member = findall(length.(trees_here) .== 1)
groups_with_just_1_member_order = sortperm(mean_lengths_here[groups_with_just_1_member], rev=true)
order_for_display_here = Vector{Int}(undef, length(dash_strs_here))
k = 1
for i in groups_with_more_than_1_member_order
group_indices =
trees_here[groups_with_more_than_1_member[i]]
len_group_indices = length(group_indices)
order_for_display_here[k:k+len_group_indices-1] = group_indices
k += len_group_indices
end
for i in groups_with_just_1_member_order
group_index = trees_here[groups_with_just_1_member[i]][1]
order_for_display_here[k] = group_index
k += 1
end
return order_for_display_here
end
# function obtain_groupings_for_display(ms, component_cut_off=2.75)
# dashed_strs = get_cleaned_dashed_strs(ms)
# more_than_one_island_indicator = length.(ms.effective_segments) .> 1
# one_island_indicator = .!more_than_one_island_indicator
# # organize all the inds of the motifs with more than one island to be display first
# inds_abv = findall(more_than_one_island_indicator)
# inds_below = findall(one_island_indicator)
# dashed_strs_abv = @view dashed_strs[more_than_one_island_indicator]
# dashed_strs_below = @view dashed_strs[one_island_indicator]
# # get the sort perm
# inds_abv_sort_perm = get_order_for_this_group(dashed_strs_abv, component_cut_off)
# inds_below_sort_perm = get_order_for_this_group(dashed_strs_below, component_cut_off)
# display_order = vcat(inds_abv[inds_abv_sort_perm], inds_below[inds_below_sort_perm])
# return display_order
# end
function obtain_groupings_for_display1(ms; component_cut_off=2.75, big_pattern_thresh=80)
dashed_strs = get_cleaned_dashed_strs(ms)
big_pattern_indicator = length.(dashed_strs) .> big_pattern_thresh
not_big_pattern_indicator = .!big_pattern_indicator
more_than_one_island_indicator = (length.(ms.effective_segments) .> 1) .& not_big_pattern_indicator
one_island_indicator = (.!more_than_one_island_indicator) .& not_big_pattern_indicator
# organize all the inds of the motifs with more than one island to be display first
inds_abv = findall(big_pattern_indicator)
inds_middle = findall(more_than_one_island_indicator)
inds_below = findall(one_island_indicator)
dashed_strs_abv = @view dashed_strs[big_pattern_indicator]
dashed_strs_middle = @view dashed_strs[more_than_one_island_indicator]
dashed_strs_below = @view dashed_strs[one_island_indicator]
# get the sort perm
inds_abv_sort_perm = get_order_for_this_group(dashed_strs_abv, component_cut_off)
inds_middle_sort_perm = get_order_for_this_group(dashed_strs_middle, component_cut_off)
inds_below_sort_perm = get_order_for_this_group(dashed_strs_below, component_cut_off)
display_order = vcat(inds_abv[inds_abv_sort_perm], inds_middle[inds_middle_sort_perm], inds_below[inds_below_sort_perm])
return display_order
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 9487 | mutable struct motifs{T <: Integer, S <: Real}
cmats::Vector{Matrix{S}}
pfms::Vector{Matrix{S}}
pwms::Union{Nothing,Vector{Matrix{S}}}
effective_segments::Vector{Vector{UnitRange{Int}}}
max_effective_lens::Vector{T}
max_scores::Union{Nothing, Vector{S}}
min_scores::Union{Nothing, Vector{S}}
score_thresh::Union{Nothing, Vector{S}} # of the effective length
lens::Vector{T}
num_motifs::T
positions::Union{Nothing, Vector{Dict{T, Vector{T}}}}
scores::Union{Nothing, Vector{Dict{T, Vector{S}}}}
use_comp::Union{Nothing, Vector{Dict{T, Vector{Bool}}}}
positions_bg::Union{Nothing, Vector{Dict{T, Vector{T}}}}
scores_bg::Union{Nothing, Vector{Dict{T, Vector{S}}}}
use_comp_bg::Union{Nothing, Vector{Dict{T, Vector{Bool}}}}
end
ic(pfm,pwm) = reshape(sum(pfm .* pwm, dims=1), size(pfm,2))
pad_bit_ic_vec(q) = [false; q; false]
function moving_average(A::AbstractArray, m::Int)
out = similar(A)
R = CartesianIndices(A)
Ifirst, Ilast = first(R), last(R)
I1 = m÷2 * oneunit(Ifirst)
for I in R
n, s = 0, zero(eltype(out))
for J in max(Ifirst, I-I1):min(Ilast, I+I1)
s += A[J]
n += 1
end
out[I] = s/n
end
return out
end
function get_high_ic_segments(this_ic_vec; m=mv_avg_window, ic_threshold=effective_pos_ic_thresh)
mv_this_ic_vec = pad_bit_ic_vec(moving_average(this_ic_vec, m) .> ic_threshold)
diff_this_ic_vec = diff(push!(mv_this_ic_vec, false)) # append a false to the end
starts = findall(diff_this_ic_vec .> 0)
ends = findall(diff_this_ic_vec .< 0)
# println("starts: ", starts)
# println("ends: ", ends)
return [start_:end_-1 for (start_, end_) in zip(starts, ends)]
end
filter_zero_eff_segs(eff_segs::Vector{Vector{UnitRange{Int}}}) = length.(eff_segs) .> 0
function get_filter_vec(cmats)
# println(cmats)
eff_segs = get_high_ic_segments.(cmat2ic.(cmats))
# println("this eff seg: ", eff_segs)
filter_vec = filter_zero_eff_segs(eff_segs)
return filter_vec
end
function return_effective_segments_and_filter_vec(cmats)
eff_segs = get_high_ic_segments.(cmat2ic.(cmats))
filter_vec = filter_zero_eff_segs(eff_segs)
return eff_segs, filter_vec
end
max_eff_len(range_vec) = maximum(length.(range_vec))
countmat2pfm(count_matrix,
countmat_sum_col;
ps=float_type_retrieval(0.01)) =
float_type_retrieval.((count_matrix .+ ps) ./ (countmat_sum_col .+ (4*ps)));
countmat2pfm(count_matrix; ps=float_type_retrieval(0.01)) =
countmat2pfm(count_matrix, sum(count_matrix, dims=1); ps=ps)
freq2pwm(pfm, bg) = log2.(pfm ./ bg)
function return_pfm_pwm_lens_nummats(countmats, bg)
pfms = countmat2pfm.(countmats)
pwms = [freq2pwm(pfm, bg) for pfm in pfms]
lens = size.(pwms,2)
return pfms, pwms, lens, length(lens)
end
function return_ms_others(ms, filter_vec)
max_scores = isnothing(ms.max_scores) ? nothing : ms.max_scores[filter_vec]
min_scores = isnothing(ms.min_scores) ? nothing : ms.min_scores[filter_vec]
score_thresh = isnothing(ms.score_thresh) ? nothing : ms.score_thresh[filter_vec]
positions = isnothing(ms.positions) ? nothing : ms.positions[filter_vec]
scores = isnothing(ms.scores) ? nothing : ms.scores[filter_vec]
use_comp = isnothing(ms.use_comp) ? nothing : ms.use_comp[filter_vec]
positions_bg = isnothing(ms.positions_bg) ? nothing : ms.positions_bg[filter_vec]
scores_bg = isnothing(ms.scores_bg) ? nothing : ms.scores_bg[filter_vec]
use_comp_bg = isnothing(ms.use_comp_bg) ? nothing : ms.use_comp_bg[filter_vec]
return max_scores, min_scores, score_thresh, positions,
scores, use_comp, positions_bg, scores_bg, use_comp_bg
end
function countmats2motifs(count_mats, bg) # pval_thresh = 0.00027
count_mats = count_mats[get_filter_vec(count_mats)]
pfms, pwms, lens, num_pfms = return_pfm_pwm_lens_nummats(count_mats, bg)
eff_segs = get_high_ic_segments.(cmat2ic.(count_mats))
max_eff_lens = max_eff_len.(eff_segs)
return motifs{Int, float_type_retrieval}(
count_mats,
pfms,
pwms,
eff_segs,
max_eff_lens,
nothing,
nothing,
nothing,
lens,
num_pfms,
nothing,
nothing,
nothing,
nothing,
nothing,
nothing
);
end
function countmats2motifs(count_mats, positions, use_comp, bg)
filter_vec = get_filter_vec(count_mats)
count_mats = count_mats[filter_vec]
pfms, pwms, lens, num_pfms = return_pfm_pwm_lens_nummats(count_mats, bg)
eff_segs = get_high_ic_segments.(cmat2ic.(count_mats))
max_eff_lens = max_eff_len.(eff_segs)
return motifs{Int, float_type_retrieval}(
count_mats,
pfms,
pwms,
eff_segs,
max_eff_lens,
nothing,
nothing,
nothing,
lens,
num_pfms,
positions[filter_vec],
nothing,
use_comp[filter_vec],
nothing,
nothing,
nothing
);
end
function filter_motifs_w_filter_vec(ms::motifs, filter_vec, bg)
cmats = ms.cmats[filter_vec]
eff_segs = get_high_ic_segments.(cmat2ic.(cmats))
max_eff_lens = max_eff_len.(eff_segs)
pfms, pwms, lens, num_pfms = return_pfm_pwm_lens_nummats(cmats, bg)
max_scores, min_scores, score_thresh,
positions, scores, use_comp,
positions_bg, scores_bg, use_comp_bg = return_ms_others(ms, filter_vec)
return motifs{Int, float_type_retrieval}(
cmats,
pfms,
pwms,
eff_segs,
max_eff_lens,
max_scores,
min_scores,
score_thresh,
lens,
num_pfms,
positions,
scores,
use_comp,
positions_bg,
scores_bg,
use_comp_bg
);
end
function motifs_prep(ms::motifs{T,S}) where {T<:Integer,S<:Real}
positions = [Dict{T,Vector{T}}() for _ = 1:ms.num_motifs];
scores = [Dict{T,Vector{S}}() for _ = 1:ms.num_motifs];
use_comp = [Dict{T,Vector{Bool}}() for _ = 1:ms.num_motifs];
return positions, scores, use_comp
end
function push_position!(positions_i, seq_num, pos)
if haskey(positions_i, seq_num)
push!(positions_i[seq_num], pos)
else
positions_i[seq_num] = [pos]
end
end
function push_use_comp!(use_comp_i, seq_num, use_comp)
if haskey(use_comp_i, seq_num)
push!(use_comp_i[seq_num], use_comp)
else
use_comp_i[seq_num] = [use_comp]
end
end
function obtain_count_mat_pos_use_comp(H, data)
count_matrices_lengths = Array{Int}(undef, length(H));
enriched_keys = keys(H);
@inbounds for (ind,k) in enumerate(enriched_keys)
count_matrices_lengths[ind] = k.len
end
count_matrices = [zeros(Float32, (4, count_matrices_lengths[i])) for i in axes(count_matrices_lengths, 1)];
positions = [Dict{Int, Vector{Int}}() for _ in eachindex(count_matrices)]
use_comp = [Dict{Int, Vector{Bool}}() for _ in eachindex(count_matrices)]
for (ind, k) in enumerate(keys(H))
for v in H[k]
push_position!(positions[ind], v.seq_num, v.pos)
push_use_comp!(use_comp[ind], v.seq_num, v.comp)
pos_start = four_based_ind(v.pos)
pos_end = four_based_ind(v.pos+count_matrices_lengths[ind]-1)+3
onehot_code = reshape(data.data_matrix[pos_start:pos_end,1,v.seq_num],
(4, count_matrices_lengths[ind]))
count_matrices[ind] .+= v.comp ? reverse(onehot_code) : onehot_code
end
end
return count_matrices, positions, use_comp
end
function enriched_keys2motifs(H, data, bg)
count_matrices_lengths = Array{Int}(undef, length(H));
enriched_keys = keys(H);
@inbounds for (ind,k) in enumerate(enriched_keys)
count_matrices_lengths[ind] = k.len
end
count_matrices = [zeros(Float32, (4, count_matrices_lengths[i])) for i in axes(count_matrices_lengths, 1)];
positions = [Dict{Int, Vector{Int}}() for _ in eachindex(count_matrices)]
use_comp = [Dict{Int, Vector{Bool}}() for _ in eachindex(count_matrices)]
for (ind, k) in enumerate(keys(H))
for v in H[k]
push_position!(positions[ind], v.seq_num, v.pos)
push_use_comp!(use_comp[ind], v.seq_num, v.comp)
pos_start = four_based_ind(v.pos)
pos_end = four_based_ind(v.pos+count_matrices_lengths[ind]-1)+3
onehot_code = reshape(data.data_matrix[pos_start:pos_end,1,v.seq_num],
(4, count_matrices_lengths[ind]))
count_matrices[ind] .+= v.comp ? reverse(onehot_code) : onehot_code
end
end
return countmats2motifs(count_matrices, positions, use_comp, bg)
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 5109 | # filter position by best threshold
function get_hits(scores, thresh)
hits = 0
@inbounds for k in keys(scores)
hits += sum(scores[k] .> thresh)
end
return hits
end
function get_max_score(scores, scores_bg)
max_score = -float_type_retrieval(Inf)
@inbounds for k in keys(scores)
max_ik = maximum(scores[k])
max_score = max_ik > max_score ? max_ik : max_score;
end
@inbounds for k in keys(scores_bg)
max_ik = maximum(scores_bg[k])
max_score = max_ik > max_score ? max_ik : max_score;
end
return max_score
end
function get_min_score(scores, scores_bg)
min_score = float_type_retrieval(Inf)
@inbounds for k in keys(scores)
min_ik = minimum(scores[k])
min_score = min_ik < min_score ? min_ik : min_score;
end
@inbounds for k in keys(scores_bg)
min_ik = minimum(scores_bg[k])
min_score = min_ik < min_score ? min_ik : min_score;
end
return min_score
end
function get_pvalue(pwm)
size_pwm = size(pwm,2)
9 < size_pwm ≤ 11 && (return pvalue_Touzet_mid)
size_pwm ≤ 9 && (return pvalue_Touzet_small)
return pvalue_Touzet_large
end
function get_high_scoring_pwm_segments(pwm; seg=max_pwm_length_Touzet2)
pwm_len = size(pwm,2);
pwm_len % seg
max_score_each = [maximum(view(pwm, :, i)) for i = 1:pwm_len]
sort_perm_max_cols = sortperm(max_score_each, rev=true)
pwm_seg_views = [view(sort_perm_max_cols, i:i+seg-1) for i = 1:seg:(pwm_len-pwm_len % seg)]
return [pwm[:, pwm_seg_views[i]] for i in eachindex(pwm_seg_views)]
end
# function get_best_thresh(eff_pos, pwm)
# best_thresh = 0f0
# for pos in eff_pos
# length(pos) ≤ 1 && (continue)
# if length(pos) > max_pwm_length_Touzet2
# pwm_segments = get_high_scoring_pwm_segments(pwm[:, pos])
# for pwm_seg in pwm_segments
# best_thresh += pvalue2score(pwm_seg, get_pvalue(pwm_seg))
# end
# else
# this_pwm = pwm[:, pos];
# best_thresh += pvalue2score(this_pwm, get_pvalue(this_pwm))
# end
# end
# return best_thresh
# end
# function filter_position_by_best_thresh!(positions, scores, use_comp, best_thresh)
# if !isnothing(positions) && !isnothing(scores) && !isnothing(use_comp)
# @inbounds for k in keys(positions)
# mask = scores[k] .> best_thresh
# positions[k] = positions[k][mask]
# scores[k] = scores[k][mask]
# use_comp[k] = use_comp[k][mask]
# end
# end
# end
# function filter_positions_scores_usecomp!(ms)
# ms.score_thresh = get_best_thresh.(ms.effective_segments, ms.pwms);
# filter_position_by_best_thresh!.(ms.positions, ms.scores, ms.use_comp, ms.score_thresh);
# filter_position_by_best_thresh!.(ms.positions_bg, ms.scores_bg, ms.use_comp_bg, ms.score_thresh);
# end
function get_best_thresh(scores, bg_scores, max_score, min_score, max_eff_len, eff_pos, pwm, asum, bg)
if any(length.(eff_pos) .< max_pwm_length_Touzet2)
best_thresh = 0f0
for pos in eff_pos
(length(pos) > max_pwm_length_Touzet2 || length(pos) ≤ 1) && continue
this_pwm = pwm[:, pos];
best_thresh += pvalue2score(this_pwm, get_pvalue(this_pwm); bg=bg)
end
return best_thresh
end
best_thresh = min_score
score_thresh = min_score
best_p = 1f0
while score_thresh < max_score
a = get_hits(scores, score_thresh)
b = get_hits(bg_scores, score_thresh)
c = asum-a
d = asum-b
q = FisherExactTest(promote_i(a, c, b, d)...);
p = HypothesisTests.pvalue(q, tail=:right)
p < best_p && (best_p = p; best_thresh = score_thresh)
score_thresh += score_thresh_increment
end
return best_thresh
end
function filter_position_by_best_thresh!(positions, scores, use_comp, best_thresh)
if !isnothing(positions) && !isnothing(scores) && !isnothing(use_comp)
@inbounds for k in keys(positions)
mask = scores[k] .> best_thresh
positions[k] = positions[k][mask]
scores[k] = scores[k][mask]
use_comp[k] = use_comp[k][mask]
end
end
end
function filter_positions_scores_usecomp!(ms, data, bg)
@info "number of motifs: $(ms.num_motifs)"
@info "filtering motifs positions..."
ms.max_scores = get_max_score.(ms.scores, ms.scores_bg);
ms.min_scores = get_min_score.(ms.scores, ms.scores_bg);
ms.score_thresh = Vector{float_type_retrieval}(undef, ms.num_motifs)
for i = 1:ms.num_motifs
ms.score_thresh[i] = get_best_thresh(ms.scores[i], ms.scores_bg[i], ms.max_scores[i], ms.min_scores[i], ms.max_effective_lens[i], ms.effective_segments[i], ms.pwms[i], data.N*data.L, bg);
end
filter_position_by_best_thresh!.(ms.positions, ms.scores, ms.use_comp, ms.score_thresh);
filter_position_by_best_thresh!.(ms.positions_bg, ms.scores_bg, ms.use_comp_bg, ms.score_thresh);
@info "done filtering motifs positions..."
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 3993 |
const dna_meta_data = Vector{NamedTuple{(:str, :motif_where, :mode),
Tuple{String, UnitRange{Int64}, Int64}}}
mutable struct FASTA_DNA{S <: Real}
N::Int
L::Int
acgt_freq::Vector{S}
markov_bg_mat::Matrix{S}
raw_data::Vector{String}
raw_data_test::Vector{String}
data_matrix::Union{Array{S,3}, Array{S,2}}
data_matrix_gpu::Union{CuArray{S,3}, CuArray{S,2}, Nothing}
data_matrix_bg::Union{Array{S,3}, Array{S,2}}
data_matrix_bg_gpu::Union{CuArray{S,3}, CuArray{S,2}, Nothing}
labels::Union{Nothing, Vector{String}, Vector{Int}}
meta_data::Union{Nothing, dna_meta_data}
acgt_freq_test::Union{Nothing, Vector{S}}
markov_bg_mat_test::Union{Nothing, Matrix{S}}
data_matrix_test::Union{Nothing, Array{S,3}, Array{S,2}}
data_matrix_bg_test::Union{Nothing, Array{S,3}, Array{S,2}}
N_test::Int
function FASTA_DNA{S}(dna_read::Vector{String};
k_train=1, k_test=2, # kmer frequency in the test set
train_test_split_ratio=0.9,
shuffle=true) where {S <: Real}
data_matrix, data_matrix_bg, _, acgt_freq, markov_bg_mat,
data_matrix_test, data_matrix_bg_test, _, acgt_freq_test,
markov_bg_mat_test, N_train, N_test, train_set_inds, test_set_inds =
get_data_matrices(dna_read; k_train=k_train, k_test=k_test,
train_test_split_ratio=train_test_split_ratio,
shuffle=shuffle,
FloatType=S);
L = Int(size(data_matrix,1)/4);
data_matrix = reshape(data_matrix, 4*L, 1, N_train);
data_matrix_test = reshape(data_matrix_test, 4*L, 1, N_test)
data_matrix_bg = reshape(data_matrix_bg, 4*L, 1, N_train)
new(
N_train,
L,
acgt_freq,
markov_bg_mat,
dna_read[train_set_inds],
dna_read[test_set_inds],
data_matrix,
nothing,
data_matrix_bg,
nothing,
nothing,
nothing,
acgt_freq_test,
markov_bg_mat_test,
data_matrix_test,
data_matrix_bg_test,
N_test
)
end
function FASTA_DNA{S}(fasta_location::String;
max_entries=max_num_read_fasta,
k_train=1, k_test=2, # kmer frequency in the test set
train_test_split_ratio=0.9,
shuffle=true
) where {S <: Real}
dna_read = nothing; labels = nothing;
dna_read = read_fasta(fasta_location; max_entries);
# dna_read[1] |> println
data_matrix, data_matrix_bg, _, acgt_freq, markov_bg_mat,
data_matrix_test, data_matrix_bg_test, _, acgt_freq_test,
markov_bg_mat_test, N_train, N_test, train_set_inds, test_set_inds =
get_data_matrices(dna_read; k_train=k_train, k_test=k_test,
train_test_split_ratio=train_test_split_ratio,
shuffle=shuffle,
FloatType=S);
L = Int(size(data_matrix,1)/4);
data_matrix = reshape(data_matrix, 4*L, 1, N_train);
data_matrix_test = reshape(data_matrix_test, 4*L, 1, N_test)
data_matrix_bg = reshape(data_matrix_bg, 4*L, 1, N_train)
new(
N_train,
L,
acgt_freq,
markov_bg_mat,
dna_read[train_set_inds],
dna_read[test_set_inds],
data_matrix,
nothing,
data_matrix_bg,
nothing,
labels,
nothing,
acgt_freq_test,
markov_bg_mat_test,
data_matrix_test,
data_matrix_bg_test,
N_test
)
end
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 9675 | ######## fasta load settings: ##################
const max_num_read_fasta = 100000;
const int_t = Int32; # integer type
#= each label must associated with at least 200 sequences
for classification
=#
const label_count_thresh = 250;
const DNA_complement = Dict('A'=>'T','C'=>'G','G'=>'C','T'=>'A');
reverse_complement(s::String) =
join(islowercase(s[si]) ? s[si] : DNA_complement[s[si]] for si = length(s):-1:1)
get_count_map(v) = countmap(v)
"""
Assume all dna strings are of the same length and contains no masked strings
"""
function reading_for_DNA_regression(filepath::String; parse_float_type=Float32)
reads = read(filepath, String);
splits = split(reads, '>');
dna_reads = Vector{String}();
labels = Vector{parse_float_type}();
for i in splits
if !isempty(i)
splits_i = split(i, "\n");
if !occursin("N", splits_i[2]) && !occursin("n", splits_i[2])
push!(labels, parse(parse_float_type, splits_i[1]));
push!(dna_reads, splits_i[2]);
end
end
end
return labels, dna_reads
end
function permute_dataset(labels, seqs; train_test_ratio=0.8)
shuffled_indices = randperm(labels |> length)
n_rows = length(shuffled_indices);
n_train = round(Int, n_rows*train_test_ratio)
n_test = n_rows - n_train;
labels_train = @view labels[shuffled_indices][1:n_train]
seqs_train = @view seqs[shuffled_indices][1:n_train]
labels_test = @view labels[shuffled_indices][n_train+1:end]
seqs_test = @view seqs[shuffled_indices][n_train+1:end]
return n_train, n_test, labels_train, seqs_train, labels_test, seqs_test
end
function read_and_permute(filepath::String; train_test_ratio=0.8, parse_float_type=Float32)
labels, seqs = reading_for_DNA_regression(filepath; parse_float_type=parse_float_type)
return permute_dataset(labels, seqs; train_test_ratio=train_test_ratio)
end
"""
Read the strings from the datasets provided by
https://github.com/ML-Bioinfo-CEITEC/genomic_benchmarks
and processed by
https://github.com/kchu25/genomic_benchmark_datasets
Assume all dna strings are of the same length and contains no masked strings
"""
function reading_for_DNA_classification(filepath::String)
reads = read(filepath, String);
splits = split(reads, '>');
dna_reads = Vector{String}();
labels = Vector{Int}();
for i in splits
if !isempty(i)
splits_i = split(i, "\n");
if !occursin("N", splits_i[2]) && !occursin("n", splits_i[2])
push!(labels, parse(Int, splits_i[1]));
push!(dna_reads, splits_i[2]);
end
end
end
uniq_labels = unique(labels)
labels = reduce(hcat, [uniq_labels .== v for v in labels]); # bitarrays for class indicates
return labels, dna_reads
end
function reading(filepath::String;
max_entries=max_num_read_fasta)
# read the file; process the fasta file to get the DNA part; # rid of sequences that contains "n"
reads = read(filepath, String);
dna_reads = Vector{String}();
for i in split(reads, '>')
if !isempty(i)
splits = split(i, "\n");
this_read = join(splits[2:end]);
!occursin("N", this_read) && !occursin("n", this_read) && (push!(dna_reads, this_read);)
end
end
dna_reads = length(dna_reads) > max_entries ? dna_reads[1:max_entries] : dna_reads;
# rid of all dna sequences that's not the same length as sequence 1
# o/w markov mat assignment may report error
dna_reads = [s for s in dna_reads if length(s) == length(dna_reads[1])];
return dna_reads
end
function read_fasta(filepath::String;
max_entries=max_num_read_fasta
)::Vector{String}
#= read a fasta file =#
dna_reads = reading(filepath; max_entries);
return [uppercase(i) for i in dna_reads]
end
function dna2dummy(dna_string::String, dummy::Dict; F=Float32)
v = Array{F,1}(undef, 4*length(dna_string));
for (index, alphabet) in enumerate(dna_string)
start = (index-1)*4+1;
v[start:start+3] = dummy[uppercase(alphabet)];
end
return v
end
#=
get the set of dummy-vectors from a set of dna-strings
the dummy-vectors are all of same length (for now)
=#
function data_2_dummy(dna_strings; F=Float32)
dummy = Dict('A'=>Array{F}([1, 0, 0, 0]),
'C'=>Array{F}([0, 1, 0, 0]),
'G'=>Array{F}([0, 0, 1, 0]),
'T'=>Array{F}([0, 0, 0, 1]));
how_many_strings = length(dna_strings);
@assert how_many_strings != 0 "There aren't DNA strings found in the input";
how_many_strings == 0 && return nothing;
_len_ = length(dna_strings[1]); # length of each dna string in data
_S_ = Array{F, 2}(undef, (4*_len_, how_many_strings));
for i = 1:how_many_strings
length(dna_strings[i]) == _len_ && (@inbounds _S_[:, i] = dna2dummy(dna_strings[i], dummy))
end
return _S_
end
function get_train_test_inds(dna_read, train_test_split_ratio, shuffle)
# println(shuffle)
len_dna_read = length(dna_read)
how_may_entries_in_test = Int(floor((1-train_test_split_ratio)*len_dna_read));
test_set_inds = nothing;
shuffled_inds = randperm(len_dna_read)
# inds was 1:len_dna_read
if shuffle
test_set_inds = sample(shuffled_inds,
how_may_entries_in_test,
replace=false)
else
test_set_inds = collect(len_dna_read-how_may_entries_in_test+1:len_dna_read)
end
# println("test_set_inds: ", test_set_inds)
train_set_inds = setdiff(shuffled_inds, test_set_inds)
return train_set_inds, test_set_inds
end
function get_data_matrices2(dna_read, labels;
k_train=1, k_test=2,
FloatType=float_type,
train_test_split_ratio=0.85,
shuffle=true)
# set train_test_split_ratio = 1.0 if no test set is needed
train_set_inds, test_set_inds = get_train_test_inds(dna_read, train_test_split_ratio, shuffle)
# println(train_set_inds)
dna_read_train = @view dna_read[train_set_inds]
dna_read_test = @view dna_read[test_set_inds]
labels_train = labels[train_set_inds]
labels_test = labels[test_set_inds]
shuffled_dna_read_train = seq_shuffle.(dna_read_train; k=k_train);
data_matrix_train = data_2_dummy(dna_read_train; F=FloatType);
data_matrix_bg_train = data_2_dummy(shuffled_dna_read_train; F=FloatType);
shuffled_dna_read_test = seq_shuffle.(dna_read_test; k=k_test);
data_matrix_test = data_2_dummy(dna_read_test; F=FloatType);
data_matrix_bg_test = data_2_dummy(shuffled_dna_read_test; F=FloatType);
# estimate the Markov background (order 1)
acgt_freq_train, markov_mat_train = est_1st_order_markov_bg(shuffled_dna_read_train; F=FloatType);
acgt_freq_test, markov_mat_test = est_1st_order_markov_bg(shuffled_dna_read_test; F=FloatType);
data_bg_prob_train = SeqShuffle.assign_bg_prob(shuffled_dna_read_train, markov_mat_train, acgt_freq_train);
data_bg_prob_test = SeqShuffle.assign_bg_prob(shuffled_dna_read_test, markov_mat_test, acgt_freq_test);
return data_matrix_train,
data_matrix_bg_train,
data_bg_prob_train,
acgt_freq_train,
markov_mat_train,
data_matrix_test,
data_matrix_bg_test,
data_bg_prob_test,
acgt_freq_test,
markov_mat_test,
length(dna_read_train),
length(dna_read_test),
train_set_inds,
test_set_inds,
labels_train,
labels_test
end
function get_data_matrices(dna_read;
k_train=1, k_test=2,
FloatType=float_type,
train_test_split_ratio=0.85,
shuffle=true)
# set train_test_split_ratio = 1.0 if no test set is needed
train_set_inds, test_set_inds = get_train_test_inds(dna_read, train_test_split_ratio, shuffle)
# println(train_set_inds)
dna_read_train = @view dna_read[train_set_inds]
dna_read_test = @view dna_read[test_set_inds]
shuffled_dna_read_train = seq_shuffle.(dna_read_train; k=k_train);
data_matrix_train = data_2_dummy(dna_read_train; F=FloatType);
data_matrix_bg_train = data_2_dummy(shuffled_dna_read_train; F=FloatType);
shuffled_dna_read_test = seq_shuffle.(dna_read_test; k=k_test);
data_matrix_test = data_2_dummy(dna_read_test; F=FloatType);
data_matrix_bg_test = data_2_dummy(shuffled_dna_read_test; F=FloatType);
# estimate the Markov background (order 1)
acgt_freq_train, markov_mat_train = est_1st_order_markov_bg(shuffled_dna_read_train; F=FloatType);
acgt_freq_test, markov_mat_test = est_1st_order_markov_bg(shuffled_dna_read_test; F=FloatType);
data_bg_prob_train = SeqShuffle.assign_bg_prob(shuffled_dna_read_train, markov_mat_train, acgt_freq_train);
data_bg_prob_test = SeqShuffle.assign_bg_prob(shuffled_dna_read_test, markov_mat_test, acgt_freq_test);
return data_matrix_train,
data_matrix_bg_train,
data_bg_prob_train,
acgt_freq_train,
markov_mat_train,
data_matrix_test,
data_matrix_bg_test,
data_bg_prob_test,
acgt_freq_test,
markov_mat_test,
length(dna_read_train),
length(dna_read_test),
train_set_inds,
test_set_inds
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 261 | const grey_tag_front = "<p style=\"color:grey\">"
const grey_tag_back = "</p>"
const logo_olap_folder_name = "logos_olap"
const logo_no_olap_folder_name = "logos_no_olap"
const pics_olap_folder_name = "pics_olap"
const pics_no_olap_folder_name = "pics_no_olap"
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2595 | make_grey(s::String) = grey_tag_front*s*grey_tag_back
get_folder_names(target_folder::String) =
target_folder*"/"*logo_olap_folder_name,
target_folder*"/"*logo_no_olap_folder_name,
target_folder*"/"*pics_olap_folder_name,
target_folder*"/"*pics_no_olap_folder_name
function make_folder_paths(folders::Vector{String})
for folder in folders
!isdir(folder) && mkpath(folder)
end
end
function get_rounded_pval(pval::Real, low_pval)
str = "$pval"; s = nothing;
if !occursin("e-", str)
s = string(round(pval, sigdigits=3));
else
q = split(str, "e-");
q1len = length(q[1]);
s = join([q[1][1:min(q1len, 4)], q[2]], "e-");
end
return !low_pval ? make_grey(s) : s;
end
function save_pfms_as_transfac(logo_folder::String, cmats, sort_perm::Vector{Int}, numbers)
pfms = countmat2pfm.(cmats)
@floop for (i,ind) in zip(numbers, sort_perm)
this_pwm_size = "medium"
io = open(logo_folder*"/d$i.transfac", "w")
println(io, "ID\t")
println(io, "XX\t")
println(io, "BF\t")
println(io, "XX\t")
println(io, "P0\tA\tC\tG\tT")
q = Int.(floor.(pfms[ind] .* 100)); # make it a count matrix
for j = 1:size(pfms[ind],2)
cur_rows = j < 10 ? string(Int(floor(j/10)))*"$j" : string(j);
println(io, cur_rows*"\t$(q[1,j])\t$(q[2,j])\t$(q[3,j])\t$(q[4,j])")
end
println(io, "XX\t")
close(io)
Base.run(`weblogo -D transfac -f $(logo_folder)/d$(i).transfac -n 150 --number-fontsize 17 --errorbars NO -F png --fineprint " " --resolution 96 -s $this_pwm_size --fontsize 24 --small-fontsize 18 --color-scheme classic -o $(logo_folder)/d$(i).png`);
# do it for the reverse complement as well
io = open(logo_folder*"/d$(i)_c.transfac", "w")
println(io, "ID\t")
println(io, "XX\t")
println(io, "BF\t")
println(io, "XX\t")
println(io, "P0\tA\tC\tG\tT")
q = Int.(floor.(reverse(pfms[ind]) .* 100));
for j = 1:size(pfms[ind],2)
cur_rows = j < 10 ? string(Int(floor(j/10)))*"$j" : string(j);
println(io, cur_rows*"\t$(q[1,j])\t$(q[2,j])\t$(q[3,j])\t$(q[4,j])")
end
println(io, "XX\t")
close(io)
Base.run(`weblogo -D transfac -f $(logo_folder)/d$(i)_c.transfac -n 150 --number-fontsize 17 --errorbars NO -F png --fineprint " " --resolution 96 -s $this_pwm_size --fontsize 24 --small-fontsize 18 --color-scheme classic -o $(logo_folder)/d$(i)_c.png`);
end
end | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 3854 | const html_template_no_olap=mt"""<!DOCTYPE html>
<html>
<meta charset="utf-8">
<head>
<title></title>
<style>
abbr[title] {
text-decoration: none;
}
table, td {
border-collapse: collapse;
margin: 15px 15px;
padding: 5px 5px;
table-layout: fixed;
min-width: 85px;
}
.top_row {
font-weight: bold;
color: #808080;
}
thead,tfoot {
font-weight: bold;
background-color: #333;
color:white;
}
.info {
background-color: #E2E2E2;
margin:5px;
padding:5px;
}
</style>
<script id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/[email protected]/es5/tex-mml-chtml.js">
</script>
</head>
<body>
<a href="summary.html">soft cluster representation</a> | hard cluster representation
<div style="display:flex;">
<div style="float:left; margin:25px; border:1px solid black; max-width:3500px; padding:10px;" >
Number of sequences: {{:num_seq}} <br>
<table>
<thead>
<tr>
<th colspan="100%">
Discovered motifs
</th>
</tr>
</thead>
<tbody>
<tr class="top_row">
<td style="text-align: center"><abbr title="A label assigned to each discovered motif">Label</abbr></td>
<td style="text-align: center"><abbr title="Significance of the enrichment of each motif. Done via fisher exact test.">
P-value</abbr></td>
<td style="text-align: center"><abbr title="An estimate of the number of instances in the dataset"># instances</abbr></td>
<td style="text-align: center"><abbr title="Position weight matrix">Logo</abbr></td>
</tr>
{{#:DF}}
<tr>
<td style="text-align:center"><a href="{{{:logo_folder}}}/{{:logo}}.transfac">{{:label}}</a></td>
<td>{{{:eval}}}</td>
<td style="text-align:center">{{{:counts}}}</td>
<td><img id="d_logo_{{:label}}" height=65 src="{{{:logo_folder}}}/{{:logo}}.png"><br>
<div id="d_orientation_{{:label}}"></div><br>
<button type="button" onclick="discovered_{{:label}}_changeToRC()">Reverse complement</button>
</td>
<td style="text-align:center">{{{:counts}}}</td>
<script type="text/javascript">
function discovered_{{:label}}_changeToRC() {
var image = document.getElementById("d_logo_{{:label}}");
if (image.src.match("_c")) {
image.src = "{{{:logo_folder}}}/{{:logo}}.png";
} else {
image.src = "{{{:logo_folder}}}/{{:logo}}_c.png";
}
var orientation = document.getElementById("d_orientation_{{:label}}");
if (orientation.innerHTML === ""){
orientation.innerHTML = "reverse-complement";
} else {
orientation.innerHTML = "";
}
}
</script>
</tr>
{{/:DF}}
</tbody>
</table>
<br><br>
</div>
</div>
</body>
</html>
""" | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 3817 | const html_template_olap=mt"""<!DOCTYPE html>
<html>
<meta charset="utf-8">
<head>
<title></title>
<style>
abbr[title] {
text-decoration: none;
}
table, td {
border-collapse: collapse;
margin: 15px 15px;
padding: 5px 5px;
table-layout: fixed;
min-width: 85px;
}
.top_row {
font-weight: bold;
color: #808080;
}
thead,tfoot {
font-weight: bold;
background-color: #333;
color:white;
}
.info {
background-color: #E2E2E2;
margin:5px;
padding:5px;
}
</style>
<script id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/[email protected]/es5/tex-mml-chtml.js">
</script>
</head>
<body>
soft cluster representation
<div style="display:flex;">
<div style="float:left; margin:25px; border:1px solid black; max-width:3500px; padding:10px;" >
Number of sequences: {{:num_seq}} <br>
<table>
<thead>
<tr>
<th colspan="100%">
Discovered motifs
</th>
</tr>
</thead>
<tbody>
<tr class="top_row">
<td style="text-align: center"><abbr title="A label assigned to each discovered motif">Label</abbr></td>
<td style="text-align: center"><abbr title="Significance of the enrichment of each motif. Done via fisher exact test.">
P-value</abbr></td>
<td style="text-align: center"><abbr title="An estimate of the number of instances in the dataset"># instances</abbr></td>
<td style="text-align: center"><abbr title="Position weight matrix">Logo</abbr></td>
</tr>
{{#:DF}}
<tr>
<td style="text-align:center"><a href="{{{:logo_folder}}}/{{:logo}}.transfac">{{:label}}</a></td>
<td>{{{:eval}}}</td>
<td style="text-align:center">{{{:counts}}}</td>
<td><img id="d_logo_{{:label}}" height=65 src="{{{:logo_folder}}}/{{:logo}}.png"><br>
<div id="d_orientation_{{:label}}"></div><br>
<button type="button" onclick="discovered_{{:label}}_changeToRC()">Reverse complement</button>
</td>
<script type="text/javascript">
function discovered_{{:label}}_changeToRC() {
var image = document.getElementById("d_logo_{{:label}}");
if (image.src.match("_c")) {
image.src = "{{{:logo_folder}}}/{{:logo}}.png";
} else {
image.src = "{{{:logo_folder}}}/{{:logo}}_c.png";
}
var orientation = document.getElementById("d_orientation_{{:label}}");
if (orientation.innerHTML === ""){
orientation.innerHTML = "reverse-complement";
} else {
orientation.innerHTML = "";
}
}
</script>
</tr>
{{/:DF}}
</tbody>
</table>
<br><br>
</div>
</div>
</body>
</html>
""" | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 2121 | function plot_position_overlap(ms, sort_perm, pics_olap_folder; width_factor=33)
olap_ratio_ij = get_overlap_ratio(ms)
olap_ratio_ij = olap_ratio_ij[sort_perm, sort_perm]
x_ticks_1 = [i for i in 1:(ms.num_motifs)]; x_ticks_2 = ["D$i" for i in 1:ms.num_motifs]
for i = 1:ms.num_motifs
mask = (1:ms.num_motifs .!= i);
x = collect(1:ms.num_motifs)[mask]
y = @view olap_ratio_ij[i,:][mask]
width = max(width_factor*ms.num_motifs, 400)
fig = Figure(resolution = (width, 400), fonts = (; regular= "sans"));
ax = Axis(fig[1, 1]; xticklabelrotation = pi/4, ylabel = "Jaccard index");
ax.xticks = (x_ticks_1[mask], x_ticks_2[mask])
ax.xticklabelsize = 18; ax.xlabelsize = 25;
ax.yticklabelsize = 18; ax.ylabelsize = 25;
ax.yticks = ([0.0, 0.25, 0.5, 0.75, 1.0], ["0.0", "0.25", "0.5", "0.75", "1.0"])
ylims!(ax, high=1.0)
barplot!(ax, x, y; strokewidth = 1, strokecolor = :black)
save(joinpath(pics_olap_folder, "olap_d$i.png"), fig, px_per_unit = 0.6)
end
end
function print_cmat_at_folder(cmats, folder; gt=false)
char_ = gt ? "g" : "d"
println("printing $(length(cmats)) count matrices at $folder")
pfms = [cmats[i] ./ sum(cmats[i], dims=1) for i in 1:length(cmats)];
logo_folder = folder
for i in eachindex(pfms)
io = open(logo_folder*"/d$i.transfac", "w")
println(io, "ID\t")
println(io, "XX\t")
println(io, "BF\t")
println(io, "XX\t")
println(io, "P0\tA\tC\tG\tT")
g = Int.(floor.(pfms[i] .* 1000)); # make it a count matrix
for j = 1:size(pfms[i],2)
cur_rows = j < 10 ? string(Int(floor(j/10)))*"$j" : string(j);
println(io, cur_rows*"\t$(g[1,j])\t$(g[2,j])\t$(g[3,j])\t$(g[4,j])")
end
println(io, "XX\t")
close(io)
Base.run(`weblogo -D transfac -f $(logo_folder)/d$(i).transfac -n 40 --errorbars NO -F png --fineprint " " --resolution 72 -s large --fontsize 16 --color-scheme classic -o $(logo_folder)/$(char_)$(i).png`);
end
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 3441 | function pvec_from_test_data(ms, data; no_olap=false)
positions_test, scores_test, use_comp_test = gpu_scan(ms, data; bg=false, test=true)
positions_test_bg, scores_test_bg, use_comp_bg_test = gpu_scan(ms, data; bg=true, test=true)
filter_position_by_best_thresh!.(positions_test, scores_test, use_comp_test, ms.score_thresh)
filter_position_by_best_thresh!.(positions_test_bg, scores_test_bg, use_comp_bg_test, ms.score_thresh)
# make it non-overlap
if no_olap
positions_test, scores_test, use_comp_test =
__non_overlap_scan__!(positions_test, scores_test, use_comp_test, ms.lens, data.N_test)
end
union_test_pos = get_union_ranges.(positions_test, ms.lens)
union_test_pos_bg = get_union_ranges.(positions_test_bg, ms.lens)
total_test_pos = get_total_occupied_positions.(union_test_pos)
total_test_pos_bg = get_total_occupied_positions.(union_test_pos_bg)
pvec = fisher_pvec(total_test_pos, total_test_pos_bg, data; test=true)
uniq_active_counts_test = active_counts_position(get_uniq_pos.(positions_test))
return pvec, uniq_active_counts_test
end
function modified_sort_perm(pvec, ms, alpha_fisher)
#=
1. partition the motifs into significant and non-significant
2. sort the significant motifs by length and pvalue
3. sort the non-significant motifs by length and pvalue
4. combine the two sorted lists
=#
len_p = [(l,p) for (l,p) in zip(ms.lens, pvec)]
len_p_significant_inds = findall(x->x[2] < alpha_fisher, len_p)
len_p_non_significant_inds = findall(x->x[2] >= alpha_fisher, len_p)
len_p_significant = len_p[len_p_significant_inds]
len_p_non_significant = len_p[len_p_non_significant_inds]
len_p_significant_sorted_inds = reverse!(sortperm(len_p_significant, by=x->(x[1], 1.0-x[2])))
len_p_non_significant_sorted_inds = reverse!(sortperm(len_p_non_significant, by=x->(x[1], 1.0-x[2])))
return vcat(len_p_significant_inds[len_p_significant_sorted_inds], len_p_non_significant_inds[len_p_non_significant_sorted_inds])
end
function get_pvec_and_related_info(ms, data, alpha_fisher; no_olap=false)
pvec, uniq_active_counts_test = pvec_from_test_data(ms, data; no_olap=no_olap)
use_vec = pvec .< alpha_fisher
# sort_perm = sortperm(pvec); # sort it according to pvalues (small to big)
sort_perm = modified_sort_perm(pvec, ms, alpha_fisher)
p_vec_sort_perm, use_vec_sort_perm = pvec[sort_perm], use_vec[sort_perm]
pvalues = get_rounded_pval.(p_vec_sort_perm, use_vec_sort_perm);
return pvalues, sort_perm, uniq_active_counts_test
end
function get_pvec_and_related_info2(ms, data, alpha_fisher, order_for_display::Vector{Int}; no_olap=false)
pvec, uniq_active_counts_test = pvec_from_test_data(ms, data; no_olap=no_olap)
pvec_order_for_display = pvec[order_for_display]
indices_of_significant_motifs_to_display_ontop = @view order_for_display[pvec_order_for_display .< alpha_fisher]
indices_of_insignificant_motifs_to_display_onbot = @view order_for_display[pvec_order_for_display .≥ alpha_fisher]
indices2display = vcat(indices_of_significant_motifs_to_display_ontop, indices_of_insignificant_motifs_to_display_onbot)
pvec2display = pvec[indices2display]
use_vec = pvec2display .< alpha_fisher
pvalues = get_rounded_pval.(pvec2display, use_vec);
return pvalues, indices2display, uniq_active_counts_test
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 4502 | function render_main_summary_page_olap(labels,
pvalues,
logos,
target_folder,
target_folder_olap_pwms,
target_folder_pics,
valid_alphas,
activate_counts,
num_seqs
)
df = DataFrame(label=labels, eval=pvalues, logo=logos, counts=activate_counts);
if length(valid_alphas) > 0
out = Mustache.render(html_template_olap,
target_folder=target_folder,
target_folder_pics=target_folder_pics,
valid_alphas="$valid_alphas",
num_alphas="$(length(valid_alphas)-1)",
min_alpha="$(valid_alphas[1])",
num_seq=num_seqs,
pic_folder=target_folder_pics,
logo_folder=target_folder_olap_pwms, DF=df);
else
out = Mustache.render(html_template_olap,
target_folder=target_folder, num_seq=num_seqs,
pic_folder=target_folder_pics,
logo_folder=target_folder_olap_pwms, DF=df);
end
io = open(target_folder*"/summary.html", "w")
print(io, out);
close(io)
end
function render_main_summary_page_no_olap(labels,
pvalues,
logos,
target_folder,
target_folder_no_olap_pwms,
valid_alphas,
activate_counts,
num_seqs
)
df = DataFrame(label=labels, eval=pvalues, logo=logos, counts=activate_counts);
if length(valid_alphas) > 0
out = Mustache.render(html_template_no_olap,
target_folder=target_folder,
valid_alphas="$valid_alphas",
num_alphas="$(length(valid_alphas)-1)",
min_alpha="$(valid_alphas[1])",
num_seq=num_seqs,
logo_folder=logo_folder_name, DF=df);
else
out = Mustache.render(html_template_no_olap,
target_folder=target_folder, num_seq=num_seqs,
logo_folder=target_folder_no_olap_pwms, DF=df);
end
io = open(target_folder*"/summary_no_olap.html", "w")
print(io, out);
close(io)
end
function render_result!(target_folder::String, ms, data, bg; alpha_fisher = 1e-5)
logo_olap_folder, logo_no_olap_folder, pics_olap_folder, pics_no_olap_folder = get_folder_names(target_folder);
make_folder_paths([target_folder, logo_olap_folder, logo_no_olap_folder, pics_olap_folder, pics_no_olap_folder]);
order_for_display = obtain_groupings_for_display1(ms);
############## overlap version render ##############
@info "Scanning the foreground..."
scan_w_gpu!(ms, data);
@info "Scanning the shuffled background..."
scan_w_gpu!(ms, data; bg=true);
@time filter_positions_scores_usecomp!(ms, data, bg);
active_counts, _ =
get_uniq_counts(ms)
@info "Calculating p-values..."
pvalues, sort_perm_1, uniq_active_counts_test =
get_pvec_and_related_info2(ms, data, alpha_fisher, order_for_display)
labels = ["D$j" for j = 1:ms.num_motifs];
logo_names = ["d$(j)" for j = 1:ms.num_motifs];
# @info "plot the overlap..."
@info "save the PWMs..."
save_pfms_as_transfac(logo_olap_folder, ms.cmats, sort_perm_1, collect(1:ms.num_motifs));
activate_counts_total = (Int.(active_counts .+ uniq_active_counts_test))[sort_perm_1]
valid_alphas = Int[]; # TODO: verify the purpose of this and remove it if necessary
render_main_summary_page_olap(labels,
pvalues,
logo_names,
target_folder,
logo_olap_folder_name,
pics_olap_folder_name,
valid_alphas,
activate_counts_total,
data.N+data.N_test
);
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | code | 85 | using MOTIFs
using Test
@testset "MOTIFs.jl" begin
# Write your tests here.
end
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | docs | 7783 | ## Finding Motifs Using DNA Images Derived From Sparse Representations
<!-- [](https://kchu25.github.io/MOTIFs.jl/stable/)
[](https://kchu25.github.io/MOTIFs.jl/dev/)
[](https://github.com/kchu25/MOTIFs.jl/actions/workflows/CI.yml?query=branch%3Amain) -->
General purpose motif discovery package that includes the discovery of flexible (long or gapped) motifs.
This code repository corresponds to the paper [Finding Motifs Using DNA Images Derived From Sparse Representations](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btad378/7192989?utm_source=advanceaccess&utm_campaign=bioinformatics&utm_medium=email), which has been published in Oxford Bioinformatics.
## Table of contents
- [Motivation](#Motivation)
- [Installation](#Installation)
- [Usage](#Usage)
- [Software requirements](#Software-requirements)
- [Hardware requirements](#Hardware-requirements)
- [Adjustable Hyperparameters](#Adjustable-Hyperparameters)
- [Interpret the results](#Interpret-the-results)
- [Cite this work](#Cite-this-work)
- [Contact](#Contact)
## Motivation
Traditional methods such as [STREME](https://meme-suite.org/meme/doc/streme.html) and [HOMER](http://homer.ucsd.edu/homer/motif/) excel at efficiently finding the primary motifs of a transcription factor. This raises the question: why do we require an additional motif discovery method?
Because there may be more patterns in the datasets that aren't fully captured. This is especially evident for context-dependent binding sites, such as C2H2 zinc finger, and cooperative binding patterns observed in in-vivo datasets from ChIP-Seq.
Our work reveals that over half of the ChIP-Seq datasets selected from the [JASPAR 2022](https://jaspar.genereg.net/) database contain transposable elements that overlap the primary binding sites. For instance, see [NFE2L2](https://en.wikipedia.org/wiki/NFE2L2), [YY1](https://en.wikipedia.org/wiki/YY1), [STAT1](https://en.wikipedia.org/wiki/STAT1), [SRF](https://en.wikipedia.org/wiki/Serum_response_factor), [AR](https://en.wikipedia.org/wiki/Androgen_receptor) ([Manuscript Figure 4](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btad378/7192989?utm_source=advanceaccess&utm_campaign=bioinformatics&utm_medium=email)):

These long patterns present challenges for traditional k-mer-based methods due to their exponential space complexity.
Furthermore, many datasets exhibit a large presence of gapped motifs. For example, we found that ChIP-Seq datasets from both [JASPAR](https://jaspar.genereg.net/) and [Factorbook](https://www.factorbook.org/) often contains gapped motifs ([Manuscript Figure 6](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btad378/7192989?utm_source=advanceaccess&utm_campaign=bioinformatics&utm_medium=email)):

and the spacers that characterized the gapped motifs [can be widely varied (Supplementary Material Figure 2)](./imgs/gaps.png).
Last, there are cooperative binding patterns, e.g., ([Manuscript Figure 5](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btad378/7192989?utm_source=advanceaccess&utm_campaign=bioinformatics&utm_medium=email)):

for which we see consecutive occurrences of [Oct4](https://en.wikipedia.org/wiki/Oct-4) and cooccurrence of [Oct4](https://en.wikipedia.org/wiki/Oct-4) and [Zic3](https://en.wikipedia.org/wiki/ZIC3), in addition to the Oct4-Sox2 motif. The presence of gapped motifs and cooperative binding patterns presents challenges for k-mer-based methods as well, as these methods are primarily designed to detect ungapped motifs.
## Installation
To install MOTIFs.jl use Julia's package manager:
```
pkg> add MOTIFs
```
## Usage
In Julia:
````julia
using MOTIFs
# Do motif discovery on a set of DNA sequences in a fasta file,
# where the `<fasta-path>` and `<output-folder-path>` are the
# absolute filepaths as strings.
discover_motifs(<fasta-path>, <output-folder-path>)
# for example
discover_motifs("home/shane/mydata/fasta.fa",
"home/shane/mydata/out/")
````
## Software requirements
This package currectly requires [Weblogo](http://weblogo.threeplusone.com/manual.html#download) for PWM plotting. Install Weblogo by running the following command with python3 and pip3:
```bash
pip3 install weblogo
```
## Hardware requirements
Currently, a GPU is required for this package as it utilizes [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl) to accelerate certain computations. However, I plan to implement a CPU extension in the future.
## Adjustable Hyperparameters
````julia
# The user can adjust the number of epochs for training the network.
discover_motifs(<fasta-path>, <output-folder-path>; num_epochs=10)
````
## Interpret the results
### Summary page
Once the motif discovery process is complete, a summary.html page is generated in the output folder, providing a comprehensive overview of the results.
For instance, here is an example result page showcasing data from the [SP1 transcription factor from JASPAR](https://jaspar.genereg.net/matrix/MA0079.3/):
> 
The top of the result page has
- **Number of sequences**: The total number of DNA sequences in the dataset.
- **Label**: A label assigned for each discovered motifs.
* Each label is hyperlinked to a text file in TRANSFAC format that can be parsed.
- **P-value**: The satistical significance of the discovered motif using Fisher exact test ([Manuscript section 2.7.2](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btad378/7192989?utm_source=advanceaccess&utm_campaign=bioinformatics&utm_medium=email)).
- **\# instances**: An estimate of the number of occurrences in the dataset ([Manuscript section 2.7.3](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btad378/7192989?utm_source=advanceaccess&utm_campaign=bioinformatics&utm_medium=email)).
- **Logo**: Position weight matricies.
* Press the *reverse complement* button to view the logo in alternative orientation.
Note that in in-vivo datasets, especially for zinc-finger proteins, a large number of motifs can be observed, often characterized by variable spacings in their binding sites.
> 
### Statistically insignificant motifs
Some of the motifs shown here have their p-values in grey, indicating that they have a relatively high p-value (p > 0.01, Fisher exact test). This statistical result simply suggests that these motifs are not significantly enriched relative to the shuffled DNA strings ([Manuscript section 2.7.2](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btad378/7192989?utm_source=advanceaccess&utm_campaign=bioinformatics&utm_medium=email)); it does not imply that these motifs do not exist in the dataset.
> 
## Cite this work
You can cite this work using the following BibTex entry:
```
@article{chu2023finding,
title={Finding Motifs Using DNA Images Derived From Sparse Representations},
author={Chu, Shane K and Stormo, Gary D},
journal={Bioinformatics},
pages={btad378},
year={2023},
publisher={Oxford University Press}
}
```
## Contact
If you have any questions or suggestions regarding the usage or source code, please feel free to reach out to me at <[email protected]>. | MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.2 | f73e0d7e0a3bc5099bf62383e699916bc39ec773 | docs | 164 | ```@meta
CurrentModule = MOTIFs
```
# MOTIFs
Documentation for [MOTIFs](https://github.com/kchu25/MOTIFs.jl).
```@index
```
```@autodocs
Modules = [MOTIFs]
```
| MOTIFs | https://github.com/kchu25/MOTIFs.jl.git |
|
[
"MIT"
] | 0.1.0 | 60426076ff7a89beb4dff7bf0e16e221f1696727 | code | 2434 | using SimulatedAnnealing
using Random
"""
Tour
Struct representing a tour visiting cities.
Fields
------
`order::Vector{Int}` order in which each city is visited.
`D::Matrix` Matrix of distances between each pair of cities. This is stored
to be able to pass it down to new candidates without the need of
recomputing it.
"""
struct Tour
order::Vector{Int}
D::Matrix{Float64}
end
"""
Tour(D::Matrix)
Create a tour with random ordering of the cities.
"""
Tour(D::Matrix) = Tour(randcycle(size(D, 1)), D)
"""
energy(tour::Tour)
Compute the energy of a `Tour` by summing the distances of successive cities
in the tour.
"""
function energy(tour::Tour)
s = 0.0
for (k, c1) in enumerate(tour.order)
c2 = tour.order[mod1(k + 1, length(tour.order))]
s += tour.D[c1, c2]
end
return s
end
"""
propose_candidate(tour::Tour)
Propose a new candidate `Tour` by reversing the order of cities between two
random indices.
"""
function propose_candidate(tour::Tour)
n = length(tour.order)
order = copy(tour.order)
# Order will be reversed between i1 and i2 included
i1 = rand(1:n)
i2 = mod1(rand(1:n-1) + i1, n) # Make sure i1 != i2
# Make sure that i1 < i2
if i1 > i2
i1, i2 = i2, i1
end
if i1 == 1 && i2 == n
return Tour(order, tour.D), 0.0
end
city_a1 = order[i1]
city_a2 = order[i2]
city_b1 = order[mod1(i1 - 1, n)] # Index following i1
city_b2 = order[mod1(i2 + 1, n)] # Index preceding i2
dE = (tour.D[city_a1, city_b2] + tour.D[city_a2, city_b1]
- tour.D[city_a1, city_b1] - tour.D[city_a2, city_b2])
order[i1:i2] = reverse(order[i1:i2])
dE = energy(Tour(order, tour.D)) - energy(tour)
return Tour(order, tour.D), dE
end
cities = [
[0.0 1.0 2.0 3.0 3.0 3.0 3.0 2.0 1.0 0.0 0.0 0.0] ;
[0.0 0.0 0.0 0.0 1.0 2.0 3.0 3.0 3.0 3.0 2.0 1.0]
]
# Distance matrix, common for all Tour objects
D = sqrt.((cities[1, :] .- cities[1, :]').^2 +
(cities[2, :] .- cities[2, :]').^2)
n = size(D, 1)
# Initial sampling of the configuration space
samples = [Tour(D) for _ in 1:1000]
# Run the algorithm with default parameters
prob = AnnealingOptimization(energy, propose_candidate, samples, n*(n - 1))
best_tour, tour_length = simulated_annealing(prob) | SimulatedAnnealing | https://github.com/Kolaru/SimulatedAnnealing.jl.git |
|
[
"MIT"
] | 0.1.0 | 60426076ff7a89beb4dff7bf0e16e221f1696727 | code | 5534 | module SimulatedAnnealing
using Parameters
using Statistics
export
AnnealingOptimization, AnnealingState,
OVGCriterion, SSVCriterion,
ConstantDecrementRule, AVLDecrementRule,
simulated_annealing
"""
AnnealingOptimization
Object representing an optimization done using a Simulated Annealing
algorithm.
Implement single stage homogeneous Simulated Annealing as (very well) described
in
Varanelli, 1996, *On the acceleration of simulated annealing*, Chapter 2.
Notation and naming convention is consistent with that work.
A configuration of the system being optimized is represented by the parametric
type `C`. The configurations to optimize can be of any type.
The object is iterable to allow access to the internal state during the
optimization process.
Parameters
==========
energy: callable(::C)::T
Callable returning the energy of a configuration.
propose_candiate: callable(::C)::Tuple{::C, ::T}
Callable returning a new configuration together based on an existing one
togetehr with the energy difference between the two.
stop_criterion: callable(::AnnealingState)::Bool
Callable returning whether the annealing must stop.
Default is Otten-van Ginneken adaptive stop criterion.
decrement_rule: callable(::AneealingState)::T
Callable returning the new temperature to use at the end of a Markov step.
Default is Aarts and van Laarhoven decrement rule.
initial_temperature: Real
initial_configuration: C
neighborhood_size: integer
Number of states that can be accessed from a given one.
"""
struct AnnealingOptimization{T, C, EFUNC, PFUNC, SFUNC, TFUNC}
energy::EFUNC
propose_candidate::PFUNC
stop_criterion::SFUNC
decrement_rule::TFUNC
initial_temperature::T
initial_configuration::C
neighborhood_size::Int
end
function AnnealingOptimization(
energy,
propose_candidate,
initial_configuration,
initial_temperature::Real,
neighborhood_size::Integer)
return AnnealingOptimization(
energy,
propose_candidate,
SSVCriterion(),
AVLDecrementRule(),
initial_temperature,
initial_configuration,
neighborhood_size)
end
"""
AnnealingOptimization(energy, propose_candidate, samples, neighborhood_size)
Given samples of the configuration space, determine the initial temperature.
The first sample is used as initial configuration.
Initial temperature is the standard deviation of the energy in the sample,
according to White criterion (see eq. 2.36 in Varanelli).
"""
function AnnealingOptimization(energy, propose_candidate, samples::Vector,
neighborhood_size::Integer)
energies = energy.(samples)
return AnnealingOptimization(
energy,
propose_candidate,
samples[1],
std(energies),
neighborhood_size)
end
"""
AnnealingState
Struct representing the sate of the Simulated Annealing algorithm at the end
of a Markov chain step.
Fields
======
temperature
current_configuration
current_energy
bsf_configuration
Best configuration found so far.
bsf_energy
Energy of the best configuratin found so far.
energies
Array of all energies encountered during the last Markov chain step.
"""
struct AnnealingState{T, C}
temperature::T
current_configuration::C
current_energy::T
bsf_configuration::C
bsf_energy::T
energies::Vector{T}
end
function initial_state(search::AnnealingOptimization{T}) where T
config = search.initial_configuration
E = search.energy(config)
return AnnealingState(search.initial_temperature,
config, E,
config, E,
zeros(T, 0))
end
include("decrement_rule.jl")
include("stop_criterion.jl")
Base.eltype(::Type{A}) where {T, C, A <: AnnealingOptimization{T, C}} = AnnealingState{T, C}
Base.IteratorSize(::Type{A}) where {A <: AnnealingOptimization} = Base.SizeUnknown()
function Base.iterate(
search::AnnealingOptimization{T},
state=initial_state(search)) where T
length(state.energies) > 0 && search.stop_criterion(state) && return nothing
# Initialize all internal loop variables
configuration = state.current_configuration
E = state.current_energy
bsf = state.bsf_configuration
bsf_energy = state.bsf_energy
energies = zeros(T, search.neighborhood_size)
# Markov chain at constant temperature
for k in 1:search.neighborhood_size
candidate, dE = search.propose_candidate(configuration)
if dE < 0 || rand() < exp(-dE/state.temperature)
configuration = candidate
E += dE
if E < state.bsf_energy
bsf = configuration
bsf_energy = E
end
end
energies[k] = E
end
new_state = AnnealingState(search.decrement_rule(state),
configuration, E,
bsf, bsf_energy,
energies)
return new_state, new_state
end
function simulated_annealing(search::AnnealingOptimization)
local state = nothing # Avoid being shadowed inside the loop
for s in search # Go to the end of the search
state = s
end
return state.bsf_configuration, state.bsf_energy
end
end | SimulatedAnnealing | https://github.com/Kolaru/SimulatedAnnealing.jl.git |
|
[
"MIT"
] | 0.1.0 | 60426076ff7a89beb4dff7bf0e16e221f1696727 | code | 863 | """
ConstantDecrementRule
Fixed temperature decrement rule. The temperature is decrease by a constant
factor after each Markov chain.
Parameters
==========
factor: the constant factor multiplying the temperature.
"""
@with_kw struct ConstantDecrementRule{T}
factor::T = 0.9
end
function (decrement_rule::ConstantDecrementRule)(state::AnnealingState)
return decrement_rule.factor * state.temperature
end
"""
AVLDecrementRule
Aarts and van Laarhoven temperature decrement rule.
Parameters
==========
distance_parameter
Reference
=========
Varanelli, eq. 2.42.
"""
@with_kw struct AVLDecrementRule{T}
distance_parameter::T = 0.085
end
function (decrement_rule::AVLDecrementRule)(state::AnnealingState)
T = state.temperature
σ = std(state.energies)
δ = decrement_rule.distance_parameter
return T/(1 + T*log(1 + δ)/3σ)
end | SimulatedAnnealing | https://github.com/Kolaru/SimulatedAnnealing.jl.git |
|
[
"MIT"
] | 0.1.0 | 60426076ff7a89beb4dff7bf0e16e221f1696727 | code | 1303 | """
OVGCriterion
Otten-van Ginneken adaptive stop criterion.
Parameters
==========
threshold
Reference
=========
Varanelli, eq. 2.47.
"""
@with_kw struct OVGCriterion{T}
energy_reference::T
threshold::T = 0.001
end
function (criterion::OVGCriterion)(state::AnnealingState)
σ = std(state.energies)
dμ = criterion.energy_reference - mean(state.energies)
σ == 0 && return true
if dμ >= 0
stop_measure = σ^2/(state.temperature*dμ)
else
stop_measure = Inf
end
return stop_measure < criterion.threshold
end
"""
SSVCriterion
Sechen and Sangiovanni-Vincentelli stop criterion. Stops if the same energy
appears a given amount of time at the end of Markov chains.
Parameters
==========
maximum_repeat: maximum number of time the same BSF energy can be repeated.
Reference
=========
Varanelli, p. 26.
"""
@with_kw mutable struct SSVCriterion{T}
maximum_repeat::Int = 3
last_energy::T = Inf
repeat_count::Int = 1
end
function (criterion::SSVCriterion)(state::AnnealingState)
if state.bsf_energy == criterion.last_energy
criterion.repeat_count += 1
else
criterion.last_energy = state.bsf_energy
criterion.repeat_count = 1
end
return criterion.repeat_count == criterion.maximum_repeat
end | SimulatedAnnealing | https://github.com/Kolaru/SimulatedAnnealing.jl.git |
|
[
"MIT"
] | 0.1.0 | 60426076ff7a89beb4dff7bf0e16e221f1696727 | code | 1013 | using SimulatedAnnealing
using Statistics
using Test
@testset "Travelling salesman" begin
include("../example/travelling_salesman.jl")
energies = energy.(samples)
decrement_rules = Dict(
"constant" => ConstantDecrementRule,
"AVL" => AVLDecrementRule)
stop_criteria = Dict(
"OVG" => () -> OVGCriterion(energy_reference=mean(energies)),
"SSV" => SSVCriterion)
for (rulename, rule) in decrement_rules
for (critname, criterion) in stop_criteria
@testset "Decrement: $rulename, Stop: $critname" begin
sa_prob = AnnealingOptimization(
energy,
propose_candidate,
criterion(),
rule(),
std(energies),
samples[1],
n*(n - 1))
best_tour, tour_length = simulated_annealing(sa_prob)
@test tour_length == 12
end
end
end
end | SimulatedAnnealing | https://github.com/Kolaru/SimulatedAnnealing.jl.git |
|
[
"MIT"
] | 0.1.0 | 60426076ff7a89beb4dff7bf0e16e221f1696727 | docs | 2535 | # SimulatedAnnealing.jl
This package aims is to provide a simple and flexible implementation of the simulated annealing algorithm in Julia.
The implementation is fully based on the following PhD thesis:
> Varanelli, 1996, *On the acceleration of simulated annealing*
Currently, nothing fancy is done and only the basic homogenous algorithm as describe in Chapter 2 of this work is implemented (in particular the whole part about accelerating simulated annealing is ignored).
## Basics
Simulated annealing try to minimize the energy of a configuration by successively probing neighboring configurations. In this package, no restriction is imposed on the type of configuration being optimize.
Before starting the algorithm we need several things:
- An `energy` function, that takes a configuration as an argument and return its energy, the quantity that the algorithm minimizes.
- A `propose_candidate` function that takes a configuration and return a candidate configuration and the energy difference between the two. This is the main function used in the algorithm and the performance mostly depends on it. Returning the energy difference is required as it is often possible to make this computation very efficient compared to explicitly computing the energy of the two configurations and taking their difference. In fact if there is no efficient way of computing this difference, it may indicate simulated annealing is not the correct algorithm to use for the problem.
- An unifrom sampling of the space of configuration. This is used to determine the initial temperature as well as an energy reference for the stop criterion. Around 1000 samples should be enough.
- The size of the neighborhood of a configuration. This is needed to have sufficient sampling and thus ensure (in probability) convergence.
When all that is set, an optimization problem can be created and the algorithm can be run on it, yielding the best configuration and its energy.
```julia
sa_problem = AnnealingOptimization(energy, propose_candidate, samples, neighborhood_size)
best_configuration, best_energy = simulated_annealing(sa_problem)
```
Please refer to the example folder for a complete example.
## Customization
Various temperature decrement rules and stop criterion can be chosen. New ones can also be created. Please refer to the docstrings for more information, or open an issue if something is not clear (I am too lazy to write a full documentation before seeing if there is interest for the package ^^). | SimulatedAnnealing | https://github.com/Kolaru/SimulatedAnnealing.jl.git |
|
[
"MIT"
] | 0.7.4 | 47cee2085962dad41ca9ec811e37694d7445531f | code | 358 | module CodecXz
export
XzCompressor,
XzCompressorStream,
XzDecompressor,
XzDecompressorStream
import TranscodingStreams:
TranscodingStreams,
TranscodingStream,
Memory,
Error,
initialize,
finalize,
splitkwargs
using XZ_jll
include("liblzma.jl")
include("compression.jl")
include("decompression.jl")
end # module
| CodecXz | https://github.com/JuliaIO/CodecXz.jl.git |
|
[
"MIT"
] | 0.7.4 | 47cee2085962dad41ca9ec811e37694d7445531f | code | 2600 | # Compressor Codec
# ================
struct XzCompressor <: TranscodingStreams.Codec
stream::LZMAStream
preset::UInt32
check::Cint
end
function Base.show(io::IO, codec::XzCompressor)
print(io, summary(codec), "(level=$(codec.preset), check=$(codec.check))")
end
const DEFAULT_COMPRESSION_LEVEL = 6
const DEFAULT_CHECK = LZMA_CHECK_CRC64
"""
XzCompressor(;level=$(DEFAULT_COMPRESSION_LEVEL), check=LZMA_CHECK_CRC64)
Create an xz compression codec.
Arguments
---------
- `level`: compression level (0..9)
- `check`: integrity check type (`LZMA_CHECK_{NONE,CRC32,CRC64,SHA256}`)
"""
function XzCompressor(;level::Integer=DEFAULT_COMPRESSION_LEVEL, check::Cint=DEFAULT_CHECK)
if !(0 ≤ level ≤ 9)
throw(ArgumentError("compression level must be within 0..9"))
elseif check ∉ (LZMA_CHECK_NONE, LZMA_CHECK_CRC32, LZMA_CHECK_CRC64, LZMA_CHECK_SHA256)
throw(ArgumentError("invalid integrity check"))
end
return XzCompressor(LZMAStream(), level, check)
end
const XzCompressorStream{S} = TranscodingStream{XzCompressor,S} where S<:IO
"""
XzCompressorStream(stream::IO; kwargs...)
Create an xz compression stream (see `XzCompressor` for `kwargs`).
"""
function XzCompressorStream(stream::IO; kwargs...)
x, y = splitkwargs(kwargs, (:level, :check))
return TranscodingStream(XzCompressor(;x...), stream; y...)
end
# Methods
# -------
function TranscodingStreams.initialize(codec::XzCompressor)
ret = easy_encoder(codec.stream, codec.preset, codec.check)
if ret != LZMA_OK
lzmaerror(codec.stream, ret)
end
return
end
function TranscodingStreams.finalize(codec::XzCompressor)
free(codec.stream)
end
function TranscodingStreams.startproc(codec::XzCompressor, mode::Symbol, error::Error)
ret = easy_encoder(codec.stream, codec.preset, codec.check)
if ret != LZMA_OK
error[] = ErrorException("xz error")
return :error
end
return :ok
end
function TranscodingStreams.process(codec::XzCompressor, input::Memory, output::Memory, error::Error)
stream = codec.stream
stream.next_in = input.ptr
stream.avail_in = input.size
stream.next_out = output.ptr
stream.avail_out = output.size
ret = code(stream, input.size > 0 ? LZMA_RUN : LZMA_FINISH)
Δin = Int(input.size - stream.avail_in)
Δout = Int(output.size - stream.avail_out)
if ret == LZMA_OK
return Δin, Δout, :ok
elseif ret == LZMA_STREAM_END
return Δin, Δout, :end
else
error[] = ErrorException(lzma_error_string(ret))
return Δin, Δout, :error
end
end
| CodecXz | https://github.com/JuliaIO/CodecXz.jl.git |
|
[
"MIT"
] | 0.7.4 | 47cee2085962dad41ca9ec811e37694d7445531f | code | 2577 | # Decompressor Codec
# ==================
struct XzDecompressor <: TranscodingStreams.Codec
stream::LZMAStream
memlimit::Integer
flags::UInt32
end
function Base.show(io::IO, codec::XzDecompressor)
print(io, summary(codec), "(memlimit=$(codec.memlimit), flags=$(codec.flags))")
end
const DEFAULT_MEM_LIMIT = typemax(UInt64)
"""
XzDecompressor(;memlimit=$(DEFAULT_MEM_LIMIT), flags=LZMA_CONCATENATED)
Create an xz decompression codec.
Arguments
---------
- `memlimit`: memory usage limit as bytes
- `flags`: decoder flags
"""
function XzDecompressor(;memlimit::Integer=DEFAULT_MEM_LIMIT, flags::UInt32=LZMA_CONCATENATED)
if memlimit ≤ 0
throw(ArgumentError("memlimit must be positive"))
end
# NOTE: flags are checked in liblzma
return XzDecompressor(LZMAStream(), memlimit, flags)
end
const XzDecompressorStream{S} = TranscodingStream{XzDecompressor,S} where S<:IO
"""
XzDecompressorStream(stream::IO; kwargs...)
Create an xz decompression stream (see `XzDecompressor` for `kwargs`).
"""
function XzDecompressorStream(stream::IO; kwargs...)
x, y = splitkwargs(kwargs, (:memlimit, :flags))
return TranscodingStream(XzDecompressor(;x...), stream; y...)
end
# Methods
# -------
function TranscodingStreams.initialize(codec::XzDecompressor)
ret = stream_decoder(codec.stream, codec.memlimit, codec.flags)
if ret != LZMA_OK
lzmaerror(codec.stream, ret)
end
return
end
function TranscodingStreams.finalize(codec::XzDecompressor)
free(codec.stream)
end
function TranscodingStreams.startproc(codec::XzDecompressor, mode::Symbol, error::Error)
ret = stream_decoder(codec.stream, codec.memlimit, codec.flags)
if ret != LZMA_OK
error[] = ErrorException("xz error")
return :error
end
return :ok
end
function TranscodingStreams.process(codec::XzDecompressor, input::Memory, output::Memory, error::Error)
stream = codec.stream
stream.next_in = input.ptr
stream.avail_in = input.size
stream.next_out = output.ptr
stream.avail_out = output.size
if codec.flags & LZMA_CONCATENATED != 0
action = stream.avail_in > 0 ? LZMA_RUN : LZMA_FINISH
else
action = LZMA_RUN
end
ret = code(stream, action)
Δin = Int(input.size - stream.avail_in)
Δout = Int(output.size - stream.avail_out)
if ret == LZMA_OK
return Δin, Δout, :ok
elseif ret == LZMA_STREAM_END
return Δin, Δout, :end
else
error[] = ErrorException(lzma_error_string(ret))
return Δin, Δout, :error
end
end
| CodecXz | https://github.com/JuliaIO/CodecXz.jl.git |
|
[
"MIT"
] | 0.7.4 | 47cee2085962dad41ca9ec811e37694d7445531f | code | 2617 | # The liblzma Interfaces
# ======================
# Return code
const LZMA_OK = Cint(0)
const LZMA_STREAM_END = Cint(1)
const LZMA_NO_CHECK = Cint(2)
const LZMA_UNSUPPORTED_CHECK = Cint(3)
const LZMA_GET_CHECK = Cint(4)
const LZMA_MEM_ERROR = Cint(5)
const LZMA_MEMLIMIT_ERROR = Cint(6)
const LZMA_FORMAT_ERROR = Cint(7)
const LZMA_OPTIONS_ERROR = Cint(8)
const LZMA_DATA_ERROR = Cint(9)
const LZMA_BUF_ERROR = Cint(10)
const LZMA_PROG_ERROR = Cint(11)
# Action code
const LZMA_RUN = Cint(0)
const LZMA_SYNC_FLUSH = Cint(1)
const LZMA_FULL_FLUSH = Cint(2)
const LZMA_FULL_BARRIER = Cint(4)
const LZMA_FINISH = Cint(3)
# Flag
const LZMA_TELL_NO_CHECK = UInt32(0x01)
const LZMA_TELL_UNSUPPORTED_CHECK = UInt32(0x02)
const LZMA_TELL_ANY_CHECK = UInt32(0x04)
const LZMA_IGNORE_CHECK = UInt32(0x10)
const LZMA_CONCATENATED = UInt32(0x08)
# Check
const LZMA_CHECK_NONE = Cint(0)
const LZMA_CHECK_CRC32 = Cint(1)
const LZMA_CHECK_CRC64 = Cint(4)
const LZMA_CHECK_SHA256 = Cint(10)
mutable struct LZMAStream
next_in::Ptr{UInt8}
avail_in::Csize_t
total_in::UInt64
next_out::Ptr{UInt8}
avail_out::Csize_t
total_out::UInt64
allocator::Ptr{Cvoid}
internal::Ptr{Cvoid}
reserved_ptr::NTuple{4,Ptr{Cvoid}}
reserved_uint::NTuple{2,UInt64}
reserved_size::NTuple{2,Csize_t}
reserved_enum::NTuple{2,Cint}
end
function LZMAStream()
return LZMAStream(
C_NULL, 0, 0,
C_NULL, 0, 0,
C_NULL,
C_NULL,
(C_NULL, C_NULL, C_NULL, C_NULL),
(0, 0), (0, 0),
(0, 0))
end
function lzmaerror(stream::LZMAStream, code::Cint)
error(lzma_error_string(code))
end
function lzma_error_string(code::Cint)
return "lzma error: code = $(code)"
end
function easy_encoder(stream::LZMAStream, preset::Integer, check::Integer)
return ccall(
(:lzma_easy_encoder, liblzma),
Cint,
(Ref{LZMAStream}, UInt32, Cint),
stream, preset, check)
end
function stream_decoder(stream::LZMAStream, memlimit::Integer, flags::Integer)
return ccall(
(:lzma_stream_decoder, liblzma),
Cint,
(Ref{LZMAStream}, UInt64, UInt32),
stream, memlimit, flags)
end
function code(stream::LZMAStream, action::Integer)
return ccall(
(:lzma_code, liblzma),
Cint,
(Ref{LZMAStream}, Cint),
stream, action)
end
function free(stream::LZMAStream)
ccall((:lzma_end, liblzma), Cvoid, (Ref{LZMAStream},), stream)
end
| CodecXz | https://github.com/JuliaIO/CodecXz.jl.git |
|
[
"MIT"
] | 0.7.4 | 47cee2085962dad41ca9ec811e37694d7445531f | code | 1751 | using CodecXz
using TranscodingStreams: TranscodingStreams
using TestsForCodecPackages
using Test
@testset "Xz Codec" begin
codec = XzCompressor()
@test codec isa XzCompressor
@test occursin(r"^(CodecXz\.)?XzCompressor\(level=\d, check=\d+\)$", sprint(show, codec))
@test CodecXz.initialize(codec) === nothing
@test CodecXz.finalize(codec) === nothing
codec = XzDecompressor()
@test codec isa XzDecompressor
@test occursin(r"^(CodecXz\.)?XzDecompressor\(memlimit=\d+, flags=\d+\)$", sprint(show, codec))
@test CodecXz.initialize(codec) === nothing
@test CodecXz.finalize(codec) === nothing
# Generated by `lzma.compress(b"foo")` on CPython 3.5.2.
data = Vector(b"\xfd7zXZ\x00\x00\x04\xe6\xd6\xb4F\x02\x00!\x01\x16\x00\x00\x00t/\xe5\xa3\x01\x00\x02foo\x00\x00X\x15\xa9{,\xe6,\x98\x00\x01\x1b\x03\x0b/\xb9\x10\x1f\xb6\xf3}\x01\x00\x00\x00\x00\x04YZ")
@test read(XzDecompressorStream(IOBuffer(data))) == b"foo"
@test read(XzDecompressorStream(IOBuffer(vcat(data, data)))) == b"foofoo"
# corrupt data
data[[1,3,5]] = b"bug"
@test_throws ErrorException read(XzDecompressorStream(IOBuffer(data)))
@test XzCompressorStream <: TranscodingStreams.TranscodingStream
@test XzDecompressorStream <: TranscodingStreams.TranscodingStream
test_roundtrip_read(XzCompressorStream, XzDecompressorStream)
test_roundtrip_write(XzCompressorStream, XzDecompressorStream)
test_roundtrip_lines(XzCompressorStream, XzDecompressorStream)
test_roundtrip_seekstart(XzCompressorStream, XzDecompressorStream)
test_roundtrip_transcode(XzCompressor, XzDecompressor)
@test_throws ArgumentError XzCompressor(level=10)
@test_throws ArgumentError XzDecompressor(memlimit=0)
end
| CodecXz | https://github.com/JuliaIO/CodecXz.jl.git |
|
[
"MIT"
] | 0.7.4 | 47cee2085962dad41ca9ec811e37694d7445531f | docs | 1248 | CodecXz.jl
==========
## Installation
```julia
Pkg.add("CodecXz")
```
## Usage
```julia
using CodecXz
# Some text.
text = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sollicitudin
mauris non nisi consectetur, a dapibus urna pretium. Vestibulum non posuere
erat. Donec luctus a turpis eget aliquet. Cras tristique iaculis ex, eu
malesuada sem interdum sed. Vestibulum ante ipsum primis in faucibus orci luctus
et ultrices posuere cubilia Curae; Etiam volutpat, risus nec gravida ultricies,
erat ex bibendum ipsum, sed varius ipsum ipsum vitae dui.
"""
# Streaming API.
stream = XzCompressorStream(IOBuffer(text))
for line in eachline(XzDecompressorStream(stream))
println(line)
end
close(stream)
# Array API.
compressed = transcode(XzCompressor, text)
@assert sizeof(compressed) < sizeof(text)
@assert transcode(XzDecompressor, compressed) == Vector{UInt8}(text)
```
This package exports following codecs and streams:
| Codec | Stream |
| ---------------- | ---------------------- |
| `XzCompressor` | `XzCompressorStream` |
| `XzDecompressor` | `XzDecompressorStream` |
See docstrings and [TranscodingStreams.jl](https://github.com/bicycle1885/TranscodingStreams.jl) for details.
| CodecXz | https://github.com/JuliaIO/CodecXz.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 392 | using Documenter, NumberTheoreticTransforms
makedocs(modules = [NumberTheoreticTransforms],
sitename = "NumberTheoreticTransforms.jl",
pages = Any[
"Home" => "index.md",
"Functions" => "api.md",
"Examples" => "examples.md"
])
deploydocs(
repo = "github.com/jakubwro/NumberTheoreticTransforms.jl.git",
target = "build"
)
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 1267 | using Images, TestImages, Colors, ZernikePolynomials, FFTW
using NumberTheoreticTransforms
image_float = channelview(testimage("cameraman"))
image_int = map(x -> x.:i, image_float) .|> Int64
blur_float = evaluateZernike(LinRange(-41,41,512), [12, 4, 0], [1.0, -1.0, 2.0], index=:OSA)
blur_float ./= (sum(blur_float))
blur_float = blur_float
blur_int = blur_float .|> Normed{UInt8, 8} .|> x -> x.:i .|> Int64
blur_int = fftshift(blur_int)
@assert sum(blur_int) == 256 # to not change brightness
blurred_image_float = ifft(fft(image_float) .* fft(fftshift(blur_float))) |> real
t = 4
(g, q) = (314, 2^2^t+1)
X = fnt(image_int, g, q)
@assert ifnt(X, g, q) == image_int
H = fnt(blur_int, g, q)
@assert ifnt(H, g, q) == blur_int
Y = mod.((X .* H), q)
y = ifnt(Y, g, q)
blurred_image_int = y / 2^16
using Statistics
@show maximum(abs.(blurred_image_float .- blurred_image_int))
@show mean(abs.(blurred_image_float .- blurred_image_int))
@show var(abs.(blurred_image_float .- blurred_image_int))
save("doc/src/fnt/original.jpg", Images.clamp01nan.(image_float))
save("doc/src/fnt/blurred_fft.jpg", Images.clamp01nan.(Gray.(Normed{UInt8, 8}.(blurred_image_float))))
save("doc/src/fnt/blurred_fnt.jpg", Images.clamp01nan.(Gray.(Normed{UInt8, 8}.(blurred_image_int))))
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 1407 | using Images, TestImages
using Deconvolution
using FFTW
using Colors
using ZernikePolynomials
using FFTW
using NumberTheoreticTransforms
trim = 225:288 # need to limit size due to O(N^4) complexity
image_float = channelview(testimage("cameraman"))[trim.-110, trim.+40]
image_int = map(x -> x.:i, image_float) .|> Int64
blur_float = evaluateZernike(LinRange(-41,41,512), [12, 4, 0], [1.0, -1.0, 2.0], index=:OSA)
blur_float ./= (sum(blur_float))
blur_float = blur_float[trim, trim]
blur_int = blur_float .|> Normed{UInt8, 8} .|> x -> x.:i .|> Int64
blur_int = fftshift(blur_int)
@assert sum(blur_int) == 256 # to not change brightness
blurred_image_float = ifft(fft(image_float) .* fft(fftshift(blur_float))) |> real
(g, q, n) = (4, 274177, 64)
X = ntt(image_int, g, q)
@assert intt(X, g, q) == image_int
H = ntt( blur_int, g, q)
@assert intt(H, g, q) == blur_int
Y = mod.((X .* H), q)
y = intt(Y, g, q)
blurred_image_int = y / 2^16
using Statistics
@show maximum(abs.(blurred_image_float .- blurred_image_int))
@show mean(abs.(blurred_image_float .- blurred_image_int))
@show var(abs.(blurred_image_float .- blurred_image_int))
save("doc/src/ntt/original.jpg", Images.clamp01nan.(image_float))
save("doc/src/ntt/blurred_fft.jpg", Images.clamp01nan.(Gray.(Normed{UInt8, 8}.(blurred_image_float))))
save("doc/src/ntt/blurred_ntt.jpg", Images.clamp01nan.(Gray.(Normed{UInt8, 8}.(blurred_image_int))))
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 1445 |
Maybe{T} = Union{T, Nothing}
struct FindNTT{G<:Maybe{BigInt}, Q<:Maybe{BigInt}, N<:Maybe{Int}}
g::G
q::Q
n::N
end
function (params::FindNTT{Nothing, Nothing, Int})()
n = params.n
for q in primes(n, 1000000n)
for g in 2:min(q, 100)
if mod(q - 1, n) == 0 && powermod(g, n, q) == 1 && !(1 in powermod.(g, 2:n - 1, q))
println("(g, q, n) = ($g, $q, $n)")
end
end
end
end
function (params::FindNTT{Nothing, BigInt, Nothing})()
q = params.q
for g in 2:q
for n in 1:q
if powermod(g, n, q) == 1
println("(g, q, n) = ($g, $q, $n)")
break
end
end
end
end
function (params::FindNTT{BigInt, BigInt, Nothing})()
(g, q) = (params.g, params.q)
@assert g >= 0
@assert g < q
for n in 1:q
if powermod(g, n, q) == 1
println("(g, q, n) = ($g, $q, $n)")
break
end
end
end
function (params::FindNTT{Nothing, BigInt, Int})()
(q, n) = (params.q, params.n)
for g in 2:q
if powermod(g, n, q) == 1
println("(g, q, n) = ($g, $q, $n)")
break
end
end
end
function (params::FindNTT{BigInt, BigInt, Int})()
g, q, n = params.g, params.q, params.n
if mod(q - 1, n) == 0 && powermod(g, n, q) == 1 && !(1 in powermod.(g, 2:n - 1, q))
println("(g, q, n) = ($g, $q, $n)")
end
end
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 636 | doc = """NTT parameter finder.
Usage:
ntt-params.jl --modulus <q> [ --base <g> ]
ntt-params.jl --length <n> [ --modulus <q> ] [ --base <g> ]
ntt-params.jl -h | --help
Options:
-h --help Show this screen.
-n --length Length of input vector.
-q --modulus Modulo arithmetic.
-g --base Transform exponential base.
"""
using DocOpt, Primes
args = docopt(doc, version=v"2.0.0")
args_parse(T, key) = args[key] === nothing ? nothing : parse(T, args[key])
(g, q, n) = args_parse(BigInt, "<g>"), args_parse(BigInt, "<q>"), args_parse(Int64, "<n>")
include("find-ntt-methods.jl")
findntt = FindNTT(g, q, n)
findntt()
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 84 | module NumberTheoreticTransforms
include("ntt.jl")
include("fnt.jl")
end # module
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 4108 | ### fnt.jl
#
# Copyright (C) 2020 Andrey Oskin.
# Copyright (C) 2020 Jakub Wronowski.
#
# Maintainer: Jakub Wronowski <[email protected]>
# Keywords: number theoretic transform, fermat number transform
#
# This file is a part of NumberTheoreticTransforms.jl.
#
# License is MIT.
#
### Commentary:
#
# This file contains implementation of Fermat Number Transform.
#
### Code:
export fnt, fnt!, ifnt, ifnt!, modfermat, isfermat
"""
isfermat(n)
Checks if a given number is a Fermat number \$ 2^{2^t}+1 \$.
"""
function isfermat(number::T) where {T<:Integer}
if !ispow2(number - one(T))
return false
end
return ispow2(trailing_zeros(number - one(T)))
end
"""
modfermat(n, q)
Equivalent of mod(n, q) but uses faster algorithm.
Constraints:
- `q` must be a Fermat number \$ 2^{2^t}+1 \$
- `n` must be smaller or equal to \$ (q-1)^2 \$
- `n` must be grater or equal to \$ 0 \$
If above constraints are not met, the result is undefined.
"""
function modfermat(n::T, q::T) where T <: Integer
x = n & (q - T(2)) - n >>> trailing_zeros(q - T(1)) + q
x = x >= q ? x - q : x
end
"""
Order input to perform radix-2 structured calculation.
It sorts array by bit-reversed 0-based sample index.
"""
function radix2sort!(data::Array{T, 1}) where {T<:Integer}
N = length(data)
@assert ispow2(N)
l = 1
for k in 1:N
if l > k
data[l], data[k] = data[k], data[l]
end
l = l - 1
m = N
while l & (m >>= 1) != 0
l &= ~m
end
l = (l | m) + 1
end
return data
end
function fnt!(x::Array{T, 1}, g::T, q::T) where {T<:Integer}
N = length(x)
@assert ispow2(N)
@assert isfermat(q)
@assert all(v -> 0 <= v < q, x)
radix2sort!(x)
logN = trailing_zeros(N)
for l in 1:logN
M = 1 << (l-1) # [1,2,4,8,...,N/2]
interval = 1 << l # [2,4,8,...,N]
p = 1 << (logN - l) # [N/2,...,4,2,1]
gp = powermod(g, p, q)
W = 1
for m in 1:M
for i in m:interval:N
j = i + M
xi, xj = x[i], x[j]
Wxj = modfermat(W * xj, q)
xi, xj = xi + Wxj, xi - Wxj + q
xi = xi >= q ? xi - q : xi # mod q
xj = xj >= q ? xj - q : xj # mod q
x[i], x[j] = xi, xj
end
W = modfermat(W * gp, q)
end
end
return x
end
"""
fnt!(x, g, q)
In-place version of `fnt`. That means it will store result in the `x` array.
"""
function fnt!(x::Array{T,2}, g::T, q::T) where {T<:Integer}
N, M = size(x)
@assert N == M #TODO: make it work for N != M (need different g for each dim)
for n in 1:N
x[n, :] = fnt!(x[n, :], g, q)
end
for m in 1:M
x[:, m] = fnt!(x[:, m], g, q)
end
return x
end
"""
fnt(x, g, q)
The Fermat Number Transform returns the same result as `ntt` function using
more performant algorithm. When `q` has \$ 2^{2^t}+1 \$ form the calculation
can be performed with O(N*log(N)) operation instead of O(N^2) for `ntt`.
"""
function fnt(x::Array{T}, g::T, q::T) where {T<:Integer}
return fnt!(copy(x), g, q)
end
"""
ifnt!(y, g, q)
In-place version of `ifnt`. That means it will store result in the `y` array.
"""
function ifnt!(y::Array{T,1}, g::T, q::T) where {T<:Integer}
N = length(y)
inv_N = invmod(N, q)
inv_g = invmod(g, q)
x = fnt!(y, inv_g, q)
for i in eachindex(x)
x[i] = mod(inv_N * x[i], q)
end
return x
end
function ifnt!(y::Array{T,2}, g::T, q::T) where {T<:Integer}
N, M = size(y)
for m in 1:M
y[:, m] = ifnt!(y[:, m], g, q)
end
for n in 1:N
y[n, :] = ifnt!(y[n, :], g, q)
end
return y
end
"""
ifnt(y, g, q)
Calculates inverse of Fermat Number Transform for array `y` using
mod \$ 2^{2^t}+1 \$ arithmetic.
The input must be array of integers calculated by `fnt` function with the same
`g` and `q` params.
"""
function ifnt(y::Array{T}, g::T, q::T) where {T<:Integer}
return ifnt!(copy(y), g, q)
end
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 3882 | ### ntt.jl
#
# Copyright (C) 2019 Jakub Wronowski.
#
# Maintainer: Jakub Wronowski <[email protected]>
# Keywords: number theoretic transform
#
# This file is a part of NumberTheoreticTransforms.jl.
#
# License is MIT.
#
### Commentary:
#
# This file contains implementation of general Number Theoretic Transform.
#
### Code:
export ntt, intt
"""
Constraints on NTT params to ensure that inverse can be computed
"""
function validate(N, g, q)
@assert mod(q - 1, N) == 0
@assert powermod(g, N, q) == 1
@assert !(1 in powermod.(g, 2:N-1, q)) # this may be redundant
@assert gcd(N, q) == 1
end
"""
ntt(x::Array{T,1}, g, q) -> Array{T,1}
The [Number Theoretic Transform](https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general)#Number-theoretic_transform)
transforms data in a similar fashion to DFT, but instead complex roots of unity
it uses integer roots with all operation defined in a finite field (modulo an
integer number).
`ntt` function implements Number Theoretic Transform directly from the formula,
so it is flexible about choosing transformation params but lacks performance.
``\\bar{x}_k = \\sum_{n=1}^N{x_n g^{(n-1)(k-1)} } \\mod q``
There is also a few constraints on choosing parameters and input length to
ensure that inverse exists and equals to the original input:
- ``g`` must ``N``-th root of one in modulo ``q`` arithmetic
- ``q-1`` mod ``N`` must be equal zero
- ``q`` must be grater than maximum element present in transformed array
To find parameter set you may use fint-ntt script.
The arguments of `ntt` function are
- `x`: input data, its elements must be smaller than q
- `g`: transform power base, must have inversion modulo q
- `q`: defines modulo arithmetic
"""
function ntt(x::Array{T,1}, g::T, q::T) where {T<:Integer}
N = length(x)
#TODO: more validation of p,q, decompose it to struct
#TODO: create transform object that validates input in the constructor
validate(N, g, q)
(lo, hi) = extrema(x)
@assert lo >= 0
@assert hi <= q-1
t = [powermod(g, n * k, q) for n in 0:N-1, k in 0:N-1]
#TODO: make result of ntt a struct that will hold infrmation about g
return mod.(t * x, q)
end
# TODO: change implementation to handle any number of dimensions
# it can be done calling N-1 dimensional transform in a loop
function ntt(x::Array{T,2}, g::T, q::T) where {T<:Integer}
N, M = size(x)
@assert N == M #TODO: make it work for N != M (need different g for each dim)
y = zeros(T, size(x))
for n in 1:N
y[n, :] = ntt(x[n, :], g, q)
end
for m in 1:M
y[:, m] = ntt(y[:, m], g, q)
end
return y
end
"""
intt(y::Array{T,1}, g, q) -> Array{T,1}
Inverse Number Theoretic Transform implementation directly from the formula.
``x_k = N^{-1} \\sum_{n=1}^N{\\bar{x}_n g^{-(n-1)(k-1)} } \\mod q``
The same input parameters constraints as for `ntt` function must be applied
"""
function intt(y::Array{T,1}, g::T, q::T) where {T<:Integer}
N = length(y)
validate(N, g, q)
inv_g = invmod(g, q)
inv_N = invmod(N, q)
t = [powermod(inv_g, l * k, q) for k in 0:N-1, l in 0:N-1]
return mod.(inv_N * t * y, q)
end
function intt(y::Array{T,2}, g::T, q::T) where {T<:Integer}
N, M = size(y)
x = zeros(T, size(y))
for m in 1:M
x[:, m] = intt(y[:, m], g, q)
end
for n in 1:N
x[n, :] = intt(x[n, :], g, q)
end
return x
end
# those implementations are incorrect but transform results were interesing anyway
# TODO: check if those calculations are useful
# function ntt(g::T, q::T, x::Array{T,N}) where {T <: Integer,N}
# return reshape(ntt(g, q, reshape(x, length(x))), size(x))
# end
# function intt(g::T, q::T, y::Array{T,N}) where {T <: Integer,N}
# return reshape(intt(g, q, reshape(y, length(y))), size(y))
# end
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 236 | using BenchmarkTools
using NumberTheoreticTransforms, FFTW
const x = mod.(rand(Int, 4096), 65537);
@btime fnt($x, $169, $65537);
@btime fft($x);
const x2 = mod.(rand(Int, 8192), 65537);
@btime fnt($x2, $225, $65537);
@btime fft($x2);
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 2597 |
@testset "FNT 1D" begin
for t in BigInt.(1:13)
@show t
x = [1:2^(t+1);] .|> BigInt
@show length(x)
g = 2 |> BigInt
q = 2^2^t + 1 |> BigInt
@time @test ifnt(fnt(x, g, q), g, q) == x
end
end
@testset "FNT and NTT coherence" begin
# FNT is special case with faster implementation of general NTT and should
# give the same results
t = 2
F_t = 2^2^t + 1 #a Fermat number
x = [1:8;]
@test fnt(x, 2, F_t) == ntt(x, 2, F_t)
end
@testset "FNT 1D convolution" begin
using DSP
t = 3
q = 2^2^t + 1
g = 2
# padding inputs with zeros to get non circular convolution
x = [1:8; zeros(Int, 8);]
h = [1:8; zeros(Int, 8);]
X = fnt(x, g, q)
H = fnt(h, g, q)
Y = mod.(X .* H, q)
y = ifnt(Y, g, q)
@test y[1:15] == DSP.conv(1:8, 1:8)
end
@testset "FNT 2D" begin
t = 2
q = 2^2^t + 1
g = 2
x = mod.(rand(Int, 8, 8), q)
@test ifnt(fnt(x, g, q), g, q) == x
t = 4
q = 2^2^t + 1
g = 314
x = mod.(rand(Int, 512, 512), q)
@test ifnt(fnt(x, g, q), g, q) == x
end
@testset "FNT 2D convolution" begin
using DSP
t = 3
q = 2^2^t + 1
g = 2
x = mod.(rand(Int, 8, 8), 5)
x_padded = zeros(Int64, 16, 16)
x_padded[1:8, 1:8] = x
h = mod.(rand(Int, 8, 8), 3)
h_padded = zeros(Int64, 16, 16)
h_padded[1:8, 1:8] = h
X = fnt(x_padded, g, q)
H = fnt(h_padded, g, q)
Y = mod.(X .* H, q)
y = ifnt(Y, g, q)
@test y[1:15, 1:15] == DSP.conv(x, h)
end
@testset "FNT in-place" begin
t = 2
q = 2^2^t + 1
g = 2
x = mod.(rand(Int, 8), q)
y = fnt(x, g, q)
@test x != y
fnt!(x, g, q)
@test x == y
x = mod.(rand(Int, 8, 8), q)
y = fnt(x, g, q)
@test x != y
fnt!(x, g, q)
@test x == y
end
@testset "FNT fails for non fermat modulus" begin
(g, q, n) = (2, 31, 5)
x = [1:n;]
@test_throws AssertionError fnt(x, g, q)
(g, q, n) = (2, 33, 10)
x = [1:n;]
@test_throws AssertionError fnt(x, g, q)
end
@testset "isfermat() tests" begin
known = map(n->2^2^n+1, [0:4;])
expected = map(v -> v in known, [1:maximum(known);])
actual = isfermat.([1:maximum(known);])
@test expected == actual
end
@testset "modfermat() tests" begin
for t in 0:3
q = 2^2^t+1
x = 0:(q-1)^2
@test mod.(x, q) == modfermat.(x, q)
end
for t in 0:10
q = BigInt(2)^2^t+1
limit = (q-1)^2
x = mod.(rand(0:limit, 1000), (q-1)^2)
@test mod.(x, q) == modfermat.(x, q)
end
end | NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 1344 | @testset "NTT 1D" begin
g = 2
q = 31
x = [1:5;]
@test intt(ntt(x, g, q), g, q) == x
g = 3
q = 257
x = [1:256;]
@test intt(ntt(x, g, q), g, q) == x
end
@testset "NTT 1D convolution" begin
using DSP
g = 9
q = 271
# padding inputs with zeros to get non circular convolution
x = [1:8; zeros(Int64, 7);]
h = [1:8; zeros(Int64, 7);]
X = ntt(x, g, q)
H = ntt(h, g, q)
Y = X .* H
y = intt(Y, g, q)
@test y == DSP.conv(1:8, 1:8)
end
@testset "NTT 2D" begin
g = 16
q = 257
x = reshape([1:16;] , (4,4))
y = ntt(x, g, q)
@test intt(y, g, q) == x
g = 4
q = 17
x = reshape([1:16;] , (4,4))
y = ntt(x, g, q)
@test intt(y, g, q) == x
g = 7
q = 19
x = reshape([1:9;] , (3,3))
y = ntt(x, g, q)
@test intt(y, g, q) == x
end
@testset "NTT 2D convolution" begin
using DSP
(g, q, n) = (7, 4733, 7)
#zero padding to get non circular convolution
x = reshape([1:16;] , (4,4))
x_padded = zeros(Int64, 7, 7)
x_padded[1:4, 1:4] = x
h = reshape([4 3 2 0; 4 1 0 0; 0 0 1 2; 0 0 2 3] , (4,4))
h_padded = zeros(Int64, 7, 7)
h_padded[1:4, 1:4] = h
X = ntt(x_padded, g, q)
H = ntt(h_padded, g, q)
Y = X .* H
y = intt(Y, g, q)
@test y == DSP.conv(x, h)
end | NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | code | 101 | using Test, NumberTheoreticTransforms, Random
Random.seed!(42)
include("ntt.jl")
include("fnt.jl")
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | docs | 1802 | # NumberTheoreticTransforms.jl
[](https://travis-ci.org/jakubwro/NumberTheoreticTransforms.jl)
[](https://coveralls.io/github/jakubwro/NumberTheoreticTransforms.jl)
[](https://jakubwro.github.io/NumberTheoreticTransforms.jl/dev)
This package provides implementations of general Number Theoretic Transform and
Fermat Number Transform which is a special case of NTT. The
latter is computed with a FFT-like radix-2 DIT algorithm, although the
goal of this package is not to outperform FFT but rather yield more accurate
results solving inverse problems like
[deconvolution](https://github.com/JuliaDSP/Deconvolution.jl).
## Installation
The package is available for Julia versions 1.0 and up.
To install it, run
```julia
using Pkg
Pkg.add("NumberTheoreticTransforms")
```
from the Julia REPL.
## Documentation
The complete manual of `NumberTheoreticTransforms.jl` is available at
https://jakubwro.github.io/NumberTheoreticTransforms.jl/dev.
## Development
The package is developed at https://github.com/jakubwro/NumberTheoreticTransforms.jl.
There you can submit bug reports, propose new calculation algorithms with pull
requests, and make suggestions.
## Credits
Amazing performance improvements for Fermat Number Transform implementation were suggested by Andrey Oskin in [this thread](https://discourse.julialang.org/t/performance-advice-needed/33467/11).
## License
The `NumberTheoreticTransforms.jl` package is licensed under the MIT License. The
original author is Jakub Wronowski. Significant contributions were done by Andrey
Oskin.
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | docs | 437 | ```@meta
DocTestSetup = :(using NumberTheoreticTransforms)
```
# Number Theoretic Transforms
## Number Theoretic Transform
```@docs
NumberTheoreticTransforms.ntt
NumberTheoreticTransforms.intt
```
## Fermat Number Transform
```@docs
NumberTheoreticTransforms.fnt
NumberTheoreticTransforms.fnt!
NumberTheoreticTransforms.ifnt
NumberTheoreticTransforms.ifnt!
NumberTheoreticTransforms.isfermat
NumberTheoreticTransforms.modfermat
```
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | docs | 2157 | # Examples
## Image blurring
Here is an example of use of Fermat Number Transform to perform image
blurring by 2D convolution with a point spread function that models optical
aberration.
``` {.sourceCode .julia}
using Images, TestImages, Colors, ImageView
using ZernikePolynomials, NumberTheoreticTransforms
image_float = channelview(testimage("cameraman"))
image = map(x -> x.:i, image_float) .|> Int64
# lens abberation blur model
blur_float = evaluateZernike(LinRange(-41,41,512), [12, 4, 0], [1.0, -1.0, 2.0], index=:OSA)
blur_float ./= (sum(blur_float))
blur = blur_float .|> Normed{UInt8, 8} .|> x -> x.:i .|> Int64
blur = circshift(blur, (256, 256))
# 2D convolution with FNT
t = 4
(g, q) = (314, 2^2^t+1) # g for N = 512 found with scripts/find-ntt.jl
X = fnt(image, g, q)
H = fnt(blur, g, q)
Y = X .* H
y = ifnt(Y, g, q)
blurred_image = y .>> 8
imshow(image)
imshow(blurred_image)
```
| Original image | Blurred image |
| :--------------------------------- | :------------------------------- |
|  |  |
## Finding transformation parameters
Input vector length, modulo arithmetic used and exponential base of the
transform are tightly coupled. For finding them there is a script prepared.
### Finding parameters for given input length
Let's say we need to convolve vectors of integers in range 0-10000 of length
N=1024.
``` {.sourceCode .bash}
$ julia find-ntt.jl -n 512
(g, q, n) = (62, 7681, 512)
(g, q, n) = (10, 10753, 512)
(g, q, n) = (24, 11777, 512)
(g, q, n) = (3, 12289, 512)
(g, q, n) = (27, 12289, 512)
(g, q, n) = (15, 13313, 512)
```
One of tuples printed is (g, q, n) = (3, 12289, 512), that means for arithmetic
modulo 12289 and base 3 vectors of length 512 can be transformed.
### Finding parameters for given finite field
To list all possible vector lengths for predefined modulo, parameter -q can be
used.
``` {.sourceCode .bash}
$ julia find-ntt.jl -q 17
(g, q, n) = (2, 17, 8)
(g, q, n) = (3, 17, 16)
(g, q, n) = (4, 17, 4)
(g, q, n) = (16, 17, 2)
```
This shows that possible lengths are 2, 4, 8 and 16.
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 8add6e24b5c2f363f9f3564bb11f42dcb197f4f8 | docs | 1345 | # NumberTheoreticTransforms.jl
---
This package provides implementations of general Number Theoretic Transform and
its special case: Fermat Number Transform. The
latter is computed with a FFT-like radix-2 DIT algorithm, although the
goal of this package is not to outperform FFT but rather yield more accurate
results solving inverse problems like [deconvolution](https://github.com/JuliaDSP/Deconvolution.jl).
## Installation
The package is available for Julia versions 1.0 and up.
To install it, run
```julia
using Pkg
Pkg.add("NumberTheoreticTransforms")
```
from the Julia REPL.
## Documentation
The complete manual of `NumberTheoreticTransforms.jl` is available at
https://jakubwro.github.io/NumberTheoreticTransforms.jl/dev.
## Development
The package is developed at https://github.com/jakubwro/NumberTheoreticTransforms.jl.
There you can submit bug reports, propose new calculation algorithms with pull
requests, and make suggestions.
## Credits
Amazing performance improvements for Fermat Number Transform implementation were suggested by Andrey Oskin in [this thread](https://discourse.julialang.org/t/performance-advice-needed/33467/11).
## License
The `NumberTheoreticTransforms.jl` package is licensed under the MIT License. The
original author is Jakub Wronowski. Significant contributions were done by Andrey
Oskin.
| NumberTheoreticTransforms | https://github.com/jakubwro/NumberTheoreticTransforms.jl.git |
|
[
"MIT"
] | 1.0.0 | 6035ba56fb2e69e3d6562e3518ba5e58d92ed013 | code | 262 | module BatchAssign
export @all
macro all(args...)
vars = args[1:end-1]
last = args[end]
ex = Expr(:block)
for var in vars
push!(ex.args, Expr(last.head, var, last.args[end]))
end
push!(ex.args, last)
return esc(ex)
end
end
| BatchAssign | https://github.com/akjake616/BatchAssign.jl.git |
|
[
"MIT"
] | 1.0.0 | 6035ba56fb2e69e3d6562e3518ba5e58d92ed013 | code | 1990 | using BatchAssign
using Test
@testset "BatchAssign.jl" begin
# Test simple assignment
@testset "Simple Assignment" begin
@all a b c = 5
@test a == 5
@test b == 5
@test c == 5
end
# Test addition assignment
@testset "Addition Assignment" begin
a, b, c = 1, 2, 3
@all a b c += 2
@test a == 3
@test b == 4
@test c == 5
end
# Test subtraction assignment
@testset "Subtraction Assignment" begin
a, b, c = 5, 6, 7
@all a b c -= 2
@test a == 3
@test b == 4
@test c == 5
end
# Test multiplication assignment
@testset "Multiplication Assignment" begin
a, b, c = 1, 2, 3
@all a b c *= 2
@test a == 2
@test b == 4
@test c == 6
end
# Test division assignment
@testset "Division Assignment" begin
a, b, c = 10, 20, 30
@all a b c /= 2
@test a == 5
@test b == 10
@test c == 15
end
# Test modulo assignment
@testset "Modulo Assignment" begin
a, b, c = 10, 20, 30
@all a b c %= 3
@test a == 1
@test b == 2
@test c == 0
end
# Test simple matrix assignment
@testset "Matrix Assignment" begin
M = [1 2; 3 4]
@all A B C = M
@test A == M
@test B == M
@test C == M
end
# Test element-wise matrix addition
@testset "Element-wise Matrix Addition" begin
A = [1 2; 3 4]
B = [1 2; 3 4]
C = [1 2; 3 4]
@all A B C .+= 1
@test A == [2 3; 4 5]
@test B == [2 3; 4 5]
@test C == [2 3; 4 5]
end
# Test element-wise matrix multiplication
@testset "Element-wise Matrix Multiplication" begin
A = [1 2; 3 4]
B = [1 2; 3 4]
C = [1 2; 3 4]
@all A B C .*= 2
@test A == [2 4; 6 8]
@test B == [2 4; 6 8]
@test C == [2 4; 6 8]
end
end | BatchAssign | https://github.com/akjake616/BatchAssign.jl.git |
|
[
"MIT"
] | 1.0.0 | 6035ba56fb2e69e3d6562e3518ba5e58d92ed013 | docs | 1602 |
# BatchAssign.jl
The `BatchAssign.jl` module provides a convenient way to perform batch assignments in Julia using the macro `@all`. By leveraging metaprogramming techniques in Julia, this macro can assign a single value to multiple variables in one line, making your code cleaner and more concise.
## Installation
To install the `BatchAssign` module, simply download it with the package manager:
```julia
using Pkg
Pkg.add("BatchAssign")
```
## Usage
First, include the module in your script:
```julia
using BatchAssign
```
Then, you can use the `@all` macro for batch assignments:
```julia
@all a b c = 1
```
This line will assign the value `1` to variables `a`, `b`, and `c` simultaneously, and is equivalent to the following:
```julia
a, b, c, = 1, 1, 1 # or,
a = b = c = 1
```
The macro can also be used to assign arrays to variables:
```julia
@all A B C = zeros(2, 3)
```
It is noted that calling the following got disctinct random values for the vaairables:
```julia
@all a b c = rand()
```
The `@all` macro also supports compound assignments such as +=, -=, *=, and /=. This allows you to update multiple variables simultaneously:
```julia
@all a b c += 1
```
For more examples, please refer to [`runtest.jl`](./test/runtests.jl) in the test folder.
## Benefits
- **Conciseness:** Reduce multiple lines of assignments to a single line.
- **Readability:** Enhance code readability by minimizing repetitive assignment lines.
- **Efficiency:** Simplify the process of initializing multiple variables to the same value.
Enjoy the simplicity of batch assignments with `@all`!
| BatchAssign | https://github.com/akjake616/BatchAssign.jl.git |
|
[
"MIT"
] | 0.4.6 | d4868b522f15a1c53ea255d8f6c97db38907674a | code | 936 | module EDKit
using LinearAlgebra, SparseArrays, Random, Combinatorics
using ITensors, ITensorMPS, LRUCache
import Base: +, -, *, /, Array, Vector, Matrix, size, length, eltype, digits, copy
import LinearAlgebra: norm, mul!
include("Basis/AbstractBasis.jl")
include("Basis/ProjectedBasis.jl")
include("Basis/TranslationalBasis.jl")
include("Basis/TranslationalParityBasis.jl")
include("Basis/TranslationalFlipBasis.jl")
include("Basis/ParityBasis.jl")
include("Basis/FlipBasis.jl")
include("Basis/ParityFlipBasis.jl")
include("Basis/AbelianBasis.jl")
include("LinearMap.jl")
include("Schmidt.jl")
include("Operator.jl")
include("ToolKit.jl")
for file in readdir("$(@__DIR__)/algorithms/")
if file[end-2:end] == ".jl"
include("$(@__DIR__)/algorithms/$file")
end
end
for file in readdir("$(@__DIR__)/ITensors/")
if file[end-2:end] == ".jl"
include("$(@__DIR__)/ITensors/$file")
end
end
end # module
| EDKit | https://github.com/jayren3996/EDKit.jl.git |
|
[
"MIT"
] | 0.4.6 | d4868b522f15a1c53ea255d8f6c97db38907674a | code | 4018 | #-----------------------------------------------------------------------------------------------------
# Helpers
#-----------------------------------------------------------------------------------------------------
index_nocheck(b::AbstractBasis) = index(b)
index_nocheck(b::ProjectedBasis) = index(b, check=false)
cyclelength(b::TranslationalBasis) = length(b)
cyclelength(b::AbstractTranslationalParityBasis) = 2 * length(b)
#-----------------------------------------------------------------------------------------------------
# DoubleBasis
#-----------------------------------------------------------------------------------------------------
export DoubleBasis
"""
DoubleBasis{Tb1<:AbstractBasis, Tb2<:AbstractBasis}
Basis for constructing transition matrix from one symmetry sector to another.
Note that DoubleBasis can be used as the projector, meaning that it will ignore the symmetry violation.
Therefore, extra care is needed when working with this basis.
Properties:
-----------
- `dgt`: Digits.
- `B1` : Basis of the target symmetry sector.
- `B2` : Basis of the starting symmetry sector.
- `B` : Base.
"""
struct DoubleBasis{Tb1<:AbstractBasis, Tb2<:AbstractBasis} <: AbstractBasis
dgt::Vector{Int64}
B1::Tb1
B2::Tb2
B::Int64
end
DoubleBasis(B1::AbstractBasis, B2::AbstractBasis) = DoubleBasis(B1.dgt, B1, B2, B2.B)
eltype(b::DoubleBasis) = promote_type(eltype(b.B1), eltype(b.B2))
function change!(b::DoubleBasis, i::Integer)
N = change!(b.B2, i)
b.dgt .= b.B2.dgt
N
end
index(b::DoubleBasis; check::Bool=false) = check ? index(b.B1) : index_nocheck(b.B1)
size(b::DoubleBasis) = size(b.B1, 1), size(b.B2, 2)
size(b::DoubleBasis, i::Integer) = isone(i) ? size(b.B1, 1) : isequal(i, 2) ? size(b.B2, 2) : 1
copy(b::DoubleBasis) = DoubleBasis(copy(b.B1), copy(b.B2))
#---------------------------------------------------------------------------------------------------
# Transform a vector from basis `B1` to basis `B2`.
#---------------------------------------------------------------------------------------------------
function (B::DoubleBasis{<:AbstractOnsiteBasis, <:AbstractOnsiteBasis})(v::AbstractVecOrMat)
dtype = promote_type(eltype(B), eltype(v))
out = isa(v, AbstractVector) ? zeros(dtype, size(B, 1)) : zeros(dtype, size(B, 1), size(v, 2))
for i in axes(v, 1)
change!(B, i)
N, j = index(B)
isa(v, AbstractVector) ? out[j] = N * v[i] : out[j, :] = N * v[i, :]
end
out
end
function (B::DoubleBasis{<:AbstractOnsiteBasis, <:AbstractPermuteBasis})(v::AbstractVecOrMat)
dtype = promote_type(eltype(B), eltype(v))
out = isa(v, AbstractVector) ? zeros(dtype, size(B, 1)) : zeros(dtype, size(B, 1), size(v, 2))
B1, B2 = B.B1, B.B2
L = cyclelength(B2)
for i in 1:size(B,1)
change!(B1, i)
B2.dgt .= B1.dgt
N, j = index(B2)
iszero(N) && continue
isa(v, AbstractVector) ? out[i] = conj(N) * v[j] / L : out[i, :] = conj(N) * v[j, :] / L
end
out
end
function (B::DoubleBasis{<:AbstractPermuteBasis, <:AbstractOnsiteBasis})(v::AbstractVecOrMat)
dtype = promote_type(eltype(B), eltype(v))
out = isa(v, AbstractVector) ? zeros(dtype, size(B, 1)) : zeros(dtype, size(B, 1), size(v, 2))
L = cyclelength(B.B1)
for i in axes(v, 1)
change!(B, i)
N, j = index(B)
iszero(N) && continue
isa(v, AbstractVector) ? out[j] = L * v[i] / N : out[j, :] = L * v[i, :] / N
end
out
end
function (B::DoubleBasis{<:TranslationalBasis, <:AbstractTranslationalParityBasis})(v::AbstractVecOrMat)
dtype = promote_type(eltype(B), eltype(v))
out = isa(v, AbstractVector) ? zeros(dtype, size(B, 1)) : zeros(dtype, size(B, 1), size(v, 2))
B1, B2 = B.B1, B.B2
L = 2
for i in 1:size(B,1)
R1 = change!(B1, i)
B2.dgt .= B1.dgt
N, j = index(B2)
iszero(N) && continue
isa(v, AbstractVector) ? out[i] = N * v[j] / R1 / L : out[i, :] = N * v[j, :] / R1 / L
end
out
end | EDKit | https://github.com/jayren3996/EDKit.jl.git |
|
[
"MIT"
] | 0.4.6 | d4868b522f15a1c53ea255d8f6c97db38907674a | code | 13573 | """
Operator
Construction of `Operator` object.
"""
#---------------------------------------------------------------------------------------------------
export operator, trans_inv_operator
"""
Operator{Tv}
Object that store the local operator.
Properties:
-----------
- M : Vector{SparseMatrixCSC{Tv, Int}}, local operators represented by CSC sparse matrices.
- I : Vector{Vector{Int}}, indices of sites that the operators act on.
- B : Basis.
"""
struct Operator{Tv<:Number, Tb<:AbstractBasis}
M::Vector{SparseMatrixCSC{Tv, Int}}
I::Vector{Vector{Int}}
B::Tb
end
#---------------------------------------------------------------------------------------------------
eltype(opt::Operator{Tv, Tb}) where Tv where Tb = promote_type(Tv, eltype(opt.B))
length(opt::Operator) = length(opt.M)
size(opt::Operator) = size(opt.B)
size(opt::Operator, i::Integer) = size(opt.B, i)
function Base.display(opt::Operator)
println("Operator of size $(size(opt)) with $(length(opt)) terms.")
end
#---------------------------------------------------------------------------------------------------
"""
operator(mats::AbstractVector{<:AbstractMatrix}, inds::AbstractVector{<:AbstractVector}, B::AbstractBasis)
Constructor for `Operator`.
Inputs:
-------
- `mats`: List of matrices for local operators.
- `inds`: List of sites on which local operators act.
- `B` : Basis.
Outputs:
--------
- `O` : Operator.
"""
function operator(mats::AbstractVector{<:AbstractMatrix}, inds::AbstractVector{<:AbstractVector}, B::AbstractBasis)
num = length(mats)
@assert num == length(inds) "Numbers mismatch: $num matrices and $(length(inds)) indices."
dtype = promote_type(eltype.(mats)...)
M = Vector{SparseMatrixCSC{dtype, Int64}}(undef, num)
I = Vector{Vector{Int64}}(undef, num)
N = 0
for i = 1:num
iszero(mats[i]) && continue
ind = inds[i]
pos = findfirst(x -> isequal(x, ind), view(I, 1:N))
if isnothing(pos)
N += 1
I[N] = ind
M[N] = sparse(mats[i])
else
M[pos] += mats[i]
end
end
deleteat!(M, N+1:num)
deleteat!(I, N+1:num)
Operator(M, I, B)
end
function operator(mats::AbstractVector{<:AbstractMatrix}, inds::AbstractVector{<:AbstractVector}, L::Integer)
base = find_base(size(mats[1], 1), length(inds[1]))
basis = TensorBasis(L=L, base=base)
operator(mats, inds, basis)
end
operator(mats::AbstractVector{<:AbstractMatrix}, inds::AbstractVector{<:Integer}, C) = operator(mats, [[i] for i in inds], C)
operator(mats::AbstractVector{<:AbstractMatrix}, L::Integer) = operator(mats, [[i] for i in 1:L], L)
operator(mats::AbstractVector{<:AbstractMatrix}, B::AbstractBasis) = operator(mats, [[i] for i in 1:length(B.dgt)], B)
operator(mat::AbstractMatrix, ind::AbstractVector{<:Integer}, C) = operator([mat], [ind], C)
#---------------------------------------------------------------------------------------------------
function trans_inv_operator(mat::AbstractMatrix, ind::AbstractVector{<:Integer}, B::AbstractBasis)
L = length(B.dgt)
smat = sparse(mat)
mats = fill(smat, L)
inds = [mod.(ind .+ i, L) .+ 1 for i = -1:L-2]
operator(mats, inds, B)
end
function trans_inv_operator(mat::AbstractMatrix, ind::AbstractVector{<:Integer}, L::Integer)
base = find_base(size(mat, 1), length(ind))
B = TensorBasis(L=L, base=base)
trans_inv_operator(mat, ind, B)
end
trans_inv_operator(mat::AbstractMatrix, M::Integer, C) = trans_inv_operator(mat, 1:M, C)
#---------------------------------------------------------------------------------------------------
*(c::Number, opt::Operator) = operator(c .* opt.M, opt.I, opt.B)
*(opt::Operator, c::Number) = c * opt
/(opt::Operator, c::Number) = operator(opt.M ./ c, opt.I, opt.B)
function +(opt1::Operator, opt2::Operator)
mats = vcat(opt1.M, opt2.M)
inds = vcat(opt1.I, opt2.I)
operator(mats, inds, opt1.B)
end
+(opt1::Operator, ::Nothing) = opt1
+(::Nothing, opt1::Operator) = opt1
-(opt::Operator) = Operator(-opt.M, opt.I, opt.B)
-(opt1::Operator, opt2::Operator) = opt1 + (-opt2)
#---------------------------------------------------------------------------------------------------
function LinearAlgebra.adjoint(opt::Operator)
M = adjoint.(opt.M)
operator(M, opt.I, opt.B)
end
#---------------------------------------------------------------------------------------------------
function find_base(a::Integer, b::Integer)
isone(b) && return a
for i in 2:a
i^b == a && return i
end
error("Incompatible dimension: ($a, $b).")
end
#---------------------------------------------------------------------------------------------------
# Operator to matrices
#---------------------------------------------------------------------------------------------------
export addto!
function addto!(M::AbstractMatrix, opt::Operator)
for j = 1:size(opt.B, 2)
colmn!(view(M, :, j), opt, j)
end
M
end
#---------------------------------------------------------------------------------------------------
function Array(opt::Operator)
M = zeros(eltype(opt), size(opt))
if size(M, 1) > 0 && size(M, 2) > 0
addto!(M, opt)
end
M
end
#---------------------------------------------------------------------------------------------------
function SparseArrays.sparse(opt::Operator)
M = spzeros(eltype(opt), size(opt)...)
if size(M, 1) > 0 && size(M, 2) > 0
addto!(M, opt)
end
M
end
#---------------------------------------------------------------------------------------------------
LinearAlgebra.Hermitian(opt::Operator) = Array(opt) |> Hermitian
LinearAlgebra.Symmetric(opt::Operator) = Array(opt) |> Symmetric
LinearAlgebra.eigen(opt::Operator) = Array(opt) |> eigen
LinearAlgebra.eigvals(opt::Operator) = Array(opt) |>eigvals
LinearAlgebra.svd(opt::Operator) = Array(opt) |> svd
LinearAlgebra.svdvals(opt::Operator) = Array(opt) |> svdvals
#---------------------------------------------------------------------------------------------------
function mul!(target::AbstractVector, opt::Operator, v::AbstractVector)
for j = 1:length(v)
colmn!(target, opt, j, v[j])
end
target
end
function mul!(target::AbstractMatrix, opt::Operator, m::AbstractMatrix)
for j = 1:size(m, 1)
colmn!(target, opt, j, view(m, j, :))
end
target
end
export mul
"""
Multi-threaded operator multiplication.
"""
function mul(opt::Operator, v::AbstractVector)
ctype = promote_type(eltype(opt), eltype(v))
nt = Threads.nthreads()
ni = dividerange(length(v), nt)
Ms = [zeros(ctype, size(opt, 1)) for i in 1:nt]
Threads.@threads for i in 1:nt
opt_c = Operator(opt.M, opt.I, copy(opt.B))
for j in ni[i]
colmn!(Ms[i], opt_c, j, v[j])
end
end
sum(m for m in Ms)
end
function mul(opt::Operator, m::AbstractMatrix)
ctype = promote_type(eltype(opt), eltype(m))
nt = Threads.nthreads()
ni = dividerange(size(m,1), nt)
Ms = [zeros(ctype, size(opt, 1), size(m, 2)) for i in 1:nt]
Threads.@threads for i in 1:nt
opt_c = Operator(opt.M, opt.I, copy(opt.B))
for j in ni[i]
colmn!(Ms[i], opt_c, j, view(m, j, :))
end
end
sum(m for m in Ms)
end
function *(opt::Operator, v::AbstractVector)
ctype = promote_type(eltype(opt), eltype(v))
M = zeros(ctype, size(opt, 1))
mul!(M, opt, v)
end
function *(opt::Operator, m::AbstractMatrix)
ctype = promote_type(eltype(opt), eltype(m))
M = zeros(ctype, size(opt, 1), size(m, 2))
mul!(M, opt, m)
end
#---------------------------------------------------------------------------------------------------
# Helper functions
#---------------------------------------------------------------------------------------------------
"""
colmn!(target::AbstractVecOrMat, M::SparseMatrixCSC, I::Vector{Int}, b::AbstractBasis, coeff=1)
Central helper function for operator multiplication.
For a local matrix `M` acting on indices `I`, `colmn!` return the j-th colume (given by `b.dgt`) in
the many-body basis. The result is writen inplace on the vector `target`.
"""
function colmn!(target::AbstractVecOrMat, M::SparseMatrixCSC, I::Vector{Int}, b::AbstractBasis, coeff=1)
rows, vals = rowvals(M), nonzeros(M)
j = index(b.dgt, I, base=b.B)
change = false
for i in nzrange(M, j)
row, val = rows[i], vals[i]
change!(b.dgt, I, row, base=b.B)
C, pos = index(b)
isa(target, AbstractVector) ? (target[pos] += coeff * C * val) : (target[pos, :] .+= (C * val) .* coeff)
change = true
end
change && change!(b.dgt, I, j, base=b.B)
nothing
end
#---------------------------------------------------------------------------------------------------
function colmn!(target::AbstractVecOrMat, opt::Operator, j::Integer, coeff=1)
b, M, I = opt.B, opt.M, opt.I
r = change!(b, j)
C = isone(r) ? coeff : coeff / r
for i = 1:length(M)
colmn!(target, M[i], I[i], b, C)
end
end
#---------------------------------------------------------------------------------------------------
# Spin Matrices
#---------------------------------------------------------------------------------------------------
const PAULI = sparse.(
[ I(2), [0 1; 1 0], [0 -1im; 1im 0], [1 0; 0 -1] ]
)
const GELLMANN = sparse.([
I(3),
[0 1 0; 1 0 0; 0 0 0],
[0 -1im 0; 1im 0 0; 0 0 0],
[1 0 0; 0 -1 0; 0 0 0],
[0 0 1; 0 0 0; 1 0 0],
[0 0 -1im; 0 0 0; 1im 0 0],
[0 0 0; 0 0 1; 0 1 0],
[0 0 0; 0 0 -1im; 0 1im 0],
[1 0 0; 0 1 0; 0 0 -2] / sqrt(3),
[1 0 0; 0 1 0; 0 0 1]
])
σ(i::Integer) = PAULI[i+1]
σ(Is::Integer...) = kron((σ(i) for i in Is)...)
σ(Is::Union{<:Tuple, <:AbstractVector}) = σ(Is...)
λ(i::Integer) = GELLMANN[i+1]
λ(Is::Integer...) = kron((λ(i) for i in Is)...)
λ(Is::Union{<:Tuple, <:AbstractVector}) = λ(Is...)
#---------------------------------------------------------------------------------------------------
export spin
spin_coeff(D::Integer) = [sqrt(i*(D-i)) for i = 1:D-1]
spin_Sp(D::Integer) = sparse(1:D-1, 2:D, spin_coeff(D), D, D)
spin_Sm(D::Integer) = sparse(2:D, 1:D-1, spin_coeff(D), D, D)
function spin_Sx(D::Integer)
coeff = spin_coeff(D) / 2
sp = sparse(1:D-1, 2:D, coeff, D, D)
sp + sp'
end
function spin_iSy(D::Integer)
coeff = spin_coeff(D) / 2
sp = sparse(1:D-1, 2:D, coeff, D, D)
sp - sp'
end
function spin_Sz(D::Integer)
J = (D-1) / 2
sparse(1:D, 1:D, J:-1:-J)
end
#---------------------------------------------------------------------------------------------------
function spin_dict(c::Char, D::Integer)
isequal(c, '+') && return spin_Sp(D)
isequal(c, '-') && return spin_Sm(D)
isequal(c, 'x') && return spin_Sx(D)
isequal(c, 'y') && return spin_iSy(D)
isequal(c, 'z') && return spin_Sz(D)
isequal(c, '1') && return spdiagm(ones(D))
isequal(c, 'I') && return spdiagm(ones(D))
@assert isequal(D, 2) "Invalid spin symbol: $c."
isequal(c, 'X') && return sparse([0 1; 1 0])
isequal(c, 'Y') && return sparse([0 1;-1 0])
isequal(c, 'Z') && return sparse([1 0; 0 -1])
error("Invalid spin symbol: $c.")
end
#---------------------------------------------------------------------------------------------------
"""
spin(s::String; D::Integer=2)
Return matrix for spin operators.
"""
function spin(s::String; D::Integer=2)
ny = if isequal(D, 2)
sum(isequal(si, 'y')||isequal(si, 'Y') for si in s)
else
sum(isequal(si, 'y') for si in s)
end
mat = isone(length(s)) ? spin_dict(s[1], D) : kron([spin_dict(si, D) for si in s]...)
sign = iszero(mod(ny, 2)) ? (-1)^(ny÷2) : (-1im)^ny
sign * mat
end
spin(c::Number, s::String; D::Integer=2) = c * spin(s, D=D)
#---------------------------------------------------------------------------------------------------
"""
spin(spins::Tuple{<:Number, String}...; D::Integer=2)
Return matrix for spin operators.
The spins should be an iterable onject, each item is of the form (::Number, ::String).
"""
function spin(spins::Tuple{<:Number, String}...; D::Integer=2)
sum(ci * spin(si, D=D) for (ci, si) in spins)
end
spin(spins::AbstractVector{<:Tuple{<:Number, String}}; D::Integer=2) = spin(spins..., D=D)
#---------------------------------------------------------------------------------------------------
function operator(s::String, inds::AbstractVector{<:Integer}, basis::AbstractBasis)
mat = spin(s, D=basis.B)
operator(mat, inds, basis)
end
#---------------------------------------------------------------------------------------------------
function operator(s::String, inds::AbstractVector{<:Integer}, L::Integer; base::Integer=2)
basis = TensorBasis(L=L, base=base)
operator(s, inds, basis)
end
#---------------------------------------------------------------------------------------------------
function trans_inv_operator(s::String, inds::AbstractVector{<:Integer}, basis::AbstractBasis)
mat = spin(s, D=basis.B)
trans_inv_operator(mat, inds, basis)
end
#---------------------------------------------------------------------------------------------------
function trans_inv_operator(s::String, basis::AbstractBasis)
mat = spin(s, D=basis.B)
trans_inv_operator(mat, length(s), basis)
end
#---------------------------------------------------------------------------------------------------
function trans_inv_operator(s::String, L::Integer; base::Integer=2)
basis = TensorBasis(L=L, base=base)
trans_inv_operator(s, basis)
end
| EDKit | https://github.com/jayren3996/EDKit.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.