licenses
listlengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1789 | """
show_nodes(io::IO, graph::DAG)
Print a graph's nodes. Should only be used for small graphs as it prints every node in a list.
"""
function show_nodes(io::IO, graph::DAG)
print(io, "[")
first = true
for n in graph.nodes
if first
first = false
else
print(io, ", ")
end
print(io, n)
end
return print(io, "]")
end
"""
show(io::IO, graph::DAG)
Print the given graph to io. If there are too many nodes it will print only a summary of them.
"""
function Base.show(io::IO, graph::DAG)
apply_all!(graph)
println(io, "Graph:")
print(io, " Nodes: ")
nodeDict = Dict{Type,Int64}()
noEdges = 0
for node in graph.nodes
if haskey(nodeDict, typeof(task(node)))
nodeDict[typeof(task(node))] = nodeDict[typeof(task(node))] + 1
else
nodeDict[typeof(task(node))] = 1
end
noEdges += length(parents(node))
end
if length(graph.nodes) <= 20
show_nodes(io, graph)
else
print(io, "Total: ", length(graph.nodes), ", ")
first = true
i = 0
for (type, number) in zip(keys(nodeDict), values(nodeDict))
i += 1
if first
first = false
else
print(io, ", ")
end
if (i % 3 == 0)
print(io, "\n ")
end
print(io, type, ": ", number)
end
end
println(io)
println(io, " Edges: ", noEdges)
properties = get_properties(graph)
println(io, " Total Compute Effort: ", properties.computeEffort)
println(io, " Total Data Transfer: ", properties.data)
return println(io, " Total Compute Intensity: ", properties.computeIntensity)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1433 | """
get_properties(graph::DAG)
Return the graph's [`GraphProperties`](@ref).
"""
function get_properties(graph::DAG)
# make sure the graph is fully generated
apply_all!(graph)
# TODO: tests stop working without the if condition, which means there is probably a bug in the lazy evaluation and in the tests
if (graph.properties.computeEffort <= 0.0)
graph.properties = GraphProperties(graph)
end
return graph.properties
end
"""
get_exit_node(graph::DAG)
Return the graph's exit node. This assumes the graph only has a single exit node. If the graph has multiple exit nodes, the one encountered first will be returned.
"""
function get_exit_node(graph::DAG)
for node in graph.nodes
if (is_exit_node(node))
return node
end
end
@assert false "The given graph has no exit node! It is either empty or not acyclic!"
end
"""
get_entry_nodes(graph::DAG)
Return a vector of the graph's entry nodes.
"""
function get_entry_nodes(graph::DAG)
apply_all!(graph)
result = Vector{Node}()
for node in graph.nodes
if (is_entry_node(node))
push!(result, node)
end
end
return result
end
"""
operation_stack_length(graph::DAG)
Return the number of operations applied to the graph.
"""
function operation_stack_length(graph::DAG)
return length(graph.appliedOperations) + length(graph.operationsToApply)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1956 | using DataStructures
"""
PossibleOperations
A struct storing all possible operations on a [`DAG`](@ref).
To get the [`PossibleOperations`](@ref) on a [`DAG`](@ref), use [`get_operations`](@ref).
"""
mutable struct PossibleOperations
nodeReductions::Set{NodeReduction}
nodeSplits::Set{NodeSplit}
end
"""
DAG
The representation of the graph as a set of [`Node`](@ref)s.
[`Operation`](@ref)s can be applied on it using [`push_operation!`](@ref) and reverted using [`pop_operation!`](@ref) like a stack.
To get the set of possible operations, use [`get_operations`](@ref).
The members of the object should not be manually accessed, instead always use the provided interface functions.
"""
mutable struct DAG
nodes::Set{Union{DataTaskNode,ComputeTaskNode}}
# The operations currently applied to the set of nodes
appliedOperations::Stack{AppliedOperation}
# The operations not currently applied but part of the current state of the DAG
operationsToApply::Deque{Operation}
# The possible operations at the current state of the DAG
possibleOperations::PossibleOperations
# The set of nodes whose possible operations need to be reevaluated
dirtyNodes::Set{Union{DataTaskNode,ComputeTaskNode}}
# "snapshot" system: keep track of added/removed nodes/edges since last snapshot
# these are muted in insert_node! etc.
diff::Diff
# the cached properties of the DAG
properties::GraphProperties
end
"""
PossibleOperations()
Construct and return an empty [`PossibleOperations`](@ref) object.
"""
function PossibleOperations()
return PossibleOperations(Set{NodeReduction}(), Set{NodeSplit}())
end
"""
DAG()
Construct and return an empty [`DAG`](@ref).
"""
function DAG()
return DAG(
Set{Node}(),
Stack{AppliedOperation}(),
Deque{Operation}(),
PossibleOperations(),
Set{Node}(),
Diff(),
GraphProperties(),
)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1674 | """
is_connected(graph::DAG)
Return whether the given graph is connected.
"""
function is_connected(graph::DAG)
nodeQueue = Deque{Node}()
push!(nodeQueue, get_exit_node(graph))
seenNodes = Set{Node}()
while !isempty(nodeQueue)
current = pop!(nodeQueue)
push!(seenNodes, current)
for child in current.children
push!(nodeQueue, child[1])
end
end
return length(seenNodes) == length(graph.nodes)
end
"""
is_valid(graph::DAG)
Validate the entire graph using asserts. Intended for testing with `@assert is_valid(graph)`.
"""
function is_valid(graph::DAG)
for node in graph.nodes
@assert is_valid(graph, node)
end
for op in graph.operationsToApply
@assert is_valid(graph, op)
end
for nr in graph.possibleOperations.nodeReductions
@assert is_valid(graph, nr)
end
for ns in graph.possibleOperations.nodeSplits
@assert is_valid(graph, ns)
end
for node in graph.dirtyNodes
@assert node in graph "Dirty Node is not part of the graph!"
@assert ismissing(node.nodeReduction) "Dirty Node has a NodeReduction!"
@assert ismissing(node.nodeSplit) "Dirty Node has a NodeSplit!"
end
@assert is_connected(graph) "Graph is not connected!"
return true
end
"""
is_scheduled(graph::DAG)
Validate that the entire graph has been scheduled, i.e., every [`ComputeTaskNode`](@ref) has its `.device` set.
"""
function is_scheduled(graph::DAG)
for node in graph.nodes
if (node isa DataTaskNode)
continue
end
@assert !ismissing(node.device)
end
return true
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1623 |
"""
AbstractModel
Base type for all models. From this, [`AbstractProblemInstance`](@ref)s can be constructed.
See also: [`problem_instance`](@ref)
"""
abstract type AbstractModel end
"""
problem_instance(::AbstractModel, ::Vararg)
Interface function that must be implemented for any implementation of [`AbstractModel`](@ref). This function should return a specific [`AbstractProblemInstance`](@ref) given some parameters.
"""
function problem_instance end
"""
AbstractProblemInstance
Base type for problem instances. An object of this type of a corresponding [`AbstractModel`](@ref) should uniquely identify a problem instance of that model.
"""
abstract type AbstractProblemInstance end
"""
input_type(problem::AbstractProblemInstance)
Return the input type for a specific [`AbstractProblemInstance`](@ref). This can be a specific type or a supertype for which all child types are expected to work.
"""
function input_type end
"""
graph(::AbstractProblemInstance)
Generate the [`DAG`](@ref) for the given [`AbstractProblemInstance`](@ref). Every entry node (see [`get_entry_nodes`](@ref)) to the graph must have a name set. Implement [`input_expr`](@ref) to return a valid expression for each of those names.
"""
function graph end
"""
input_expr(instance::AbstractProblemInstance, name::String, input_symbol::Symbol)
For the given [`AbstractProblemInstance`](@ref), the entry node name, and the symbol of the problem input (where a variable of type `input_type(...)` will exist), return an `Expr` that gets that specific input value from the input symbol.
"""
function input_expr end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 943 | """
==(e1::Edge, e2::Edge)
Equality comparison between two edges.
"""
function Base.:(==)(e1::Edge, e2::Edge)
return e1.edge[1] == e2.edge[1] && e1.edge[2] == e2.edge[2]
end
"""
==(n1::Node, n2::Node)
Fallback equality comparison between two nodes. For equal node types, the more specific versions of this function will be called.
"""
function Base.:(==)(n1::Node, n2::Node)
return false
end
"""
==(n1::ComputeTaskNode, n2::ComputeTaskNode)
Equality comparison between two [`ComputeTaskNode`](@ref)s.
"""
function Base.:(==)(
n1::ComputeTaskNode{TaskType}, n2::ComputeTaskNode{TaskType}
) where {TaskType<:AbstractComputeTask}
return n1.id == n2.id
end
"""
==(n1::DataTaskNode, n2::DataTaskNode)
Equality comparison between two [`DataTaskNode`](@ref)s.
"""
function Base.:(==)(
n1::DataTaskNode{TaskType}, n2::DataTaskNode{TaskType}
) where {TaskType<:AbstractDataTask}
return n1.id == n2.id
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2622 |
function DataTaskNode(t::AbstractDataTask, name="")
return DataTaskNode(
t,
Vector{Node}(),
Vector{Tuple{Node,Int}}(), # TODO this can only ever be a single child
UUIDs.uuid1(rng[threadid()]),
missing,
missing,
name,
)
end
function ComputeTaskNode(t::AbstractComputeTask)
return ComputeTaskNode(
t, # task
Vector{Node}(), # parents
Vector{Tuple{Node,Int}}(), # children
UUIDs.uuid1(rng[threadid()]), # id
missing, # node reduction
missing, # node split
missing, # device
)
end
Base.copy(m::Missing) = missing
Base.copy(n::ComputeTaskNode) = ComputeTaskNode(copy(task(n)))
Base.copy(n::DataTaskNode) = DataTaskNode(copy(task(n)), n.name)
"""
make_node(t::AbstractTask)
Fallback implementation of `make_node` for an [`AbstractTask`](@ref), throwing an error.
"""
function make_node(t::AbstractTask)
return error("Cannot make a node from this task type")
end
"""
make_node(t::AbstractDataTask)
Construct and return a new [`DataTaskNode`](@ref) with the given task.
"""
function make_node(t::AbstractDataTask, name::String="")
return DataTaskNode(t, name)
end
"""
make_node(t::AbstractComputeTask)
Construct and return a new [`ComputeTaskNode`](@ref) with the given task.
"""
function make_node(t::AbstractComputeTask)
return ComputeTaskNode(t)
end
"""
make_edge(n1::Node, n2::Node, index::Int)
Fallback implementation of `make_edge` throwing an error. If you got this error it likely means you tried to construct an edge between two nodes of the same type.
"""
function make_edge(n1::Node, n2::Node, index::Int=0)
return error("can only create edges from compute to data node or reverse")
end
"""
make_edge(n1::ComputeTaskNode, n2::DataTaskNode, index::Int)
Construct and return a new [`Edge`](@ref) pointing from `n1` (child) to `n2` (parent).
The index parameter is 0 by default and is passed to the parent node as argument index for its child.
"""
function make_edge(n1::ComputeTaskNode, n2::DataTaskNode, index::Int=0)
return Edge((n1, n2), index)
end
"""
make_edge(n1::DataTaskNode, n2::ComputeTaskNode)
Construct and return a new [`Edge`](@ref) pointing from `n1` (child) to `n2` (parent).
The index parameter is 0 by default and is passed to the parent node as argument index for its child.
"""
function make_edge(n1::DataTaskNode, n2::ComputeTaskNode, index::Int=0)
return Edge((n1, n2), index)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 576 | """
show(io::IO, n::Node)
Print a short string representation of the node to io.
"""
function Base.show(io::IO, n::Node)
return print(io, "Node(", task(n), ")")
end
"""
show(io::IO, e::Edge)
Print a short string representation of the edge to io.
"""
function Base.show(io::IO, e::Edge)
return print(io, "Edge(", e.edge[1], ", ", e.edge[2], ")")
end
"""
to_var_name(id::UUID)
Return the uuid as a string usable as a variable name in code generation.
"""
function to_var_name(id::UUID)
str = "_" * replace(string(id), "-" => "_")
return str
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 3248 | """
is_entry_node(node::Node)
Return whether this node is an entry node in its graph, i.e., it has no children.
"""
is_entry_node(node::Node) = length(children(node)) == 0
"""
is_exit_node(node::Node)
Return whether this node is an exit node of its graph, i.e., it has no parents.
"""
is_exit_node(node::Node)::Bool = length(parents(node)) == 0
"""
task(node::Node)
Return the node's task.
"""
function task(
node::DataTaskNode{TaskType}
)::TaskType where {TaskType<:Union{AbstractDataTask,AbstractComputeTask}}
return node.task
end
function task(
node::ComputeTaskNode{TaskType}
)::TaskType where {TaskType<:Union{AbstractDataTask,AbstractComputeTask}}
return node.task
end
"""
children(node::Node)
Return node's children.
A node's children are its prerequisite nodes, nodes that need to execute before the task of this node.
A node's children are the nodes that must run before it.
"""
function children(node::DataTaskNode)
return node.children
end
function children(node::ComputeTaskNode)
return node.children
end
"""
parents(node::Node)
Return the node's parents.
A node's parents are its subsequent nodes, nodes that need this node to execute.
"""
function parents(node::DataTaskNode)
return node.parents
end
function parents(node::ComputeTaskNode)
return node.parents
end
"""
siblings(node::Node)
Return a vector of all siblings of this node.
A node's siblings are all children of any of its parents. The result contains no duplicates and includes the node itself.
"""
function siblings(node::Node)::Set{Node}
result = Set{Node}()
push!(result, node)
for parent in parents(node)
union!(result, getindex.(children(parent), 1))
end
return result
end
"""
partners(node::Node)
Return a vector of all partners of this node.
A node's partners are all parents of any of its children. The result contains no duplicates and includes the node itself.
!!! note
This is very slow when there are multiple children with many parents.
This is less of a problem in [`siblings(node::Node)`](@ref) because (depending on the model) there are no nodes with a large number of children, or only a single one.
"""
function partners(node::Node)::Set{Node}
result = Set{Node}()
push!(result, node)
for (child, index) in children(node)
union!(result, parents(child))
end
return result
end
"""
partners(node::Node, set::Set{Node})
Alternative version to [`partners(node::Node)`](@ref), avoiding allocation of a new set. Works on the given set and returns `nothing`.
"""
function partners(node::Node, set::Set{Node})
push!(set, node)
for (child, index) in children(node)
union!(set, parents(child))
end
return nothing
end
"""
is_parent(potential_parent::Node, node::Node)
Return whether the `potential_parent` is a parent of `node`.
"""
function is_parent(potential_parent::Node, node::Node)::Bool
return potential_parent in parents(node)
end
"""
is_child(potential_child::Node, node::Node)
Return whether the `potential_child` is a child of `node`.
"""
function is_child(potential_child::Node, node::Node)::Bool
return potential_child in getindex.(children(node), 1)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 4067 | using Random
using UUIDs
using Base.Threads
# TODO: reliably find out how many threads we're running with (nthreads() returns 1 when precompiling :/)
rng = [Random.MersenneTwister(0) for _ in 1:128]
"""
Node
The abstract base type of every node.
See [`DataTaskNode`](@ref), [`ComputeTaskNode`](@ref) and [`make_node`](@ref).
"""
abstract type Node end
# declare this type here because it's needed
abstract type Operation end
"""
DataTaskNode <: Node
Any node that transfers data and does no computation.
# Fields
`.task`: The node's data task type. Usually [`DataTask`](@ref).\\
`.parents`: A vector of the node's parents (i.e. nodes that depend on this one).\\
`.children`: A vector of tuples of the node's children (i.e. nodes that this one depends on) and their indices, indicating their order in the resulting function call passed to the task.\\
`.id`: The node's id. Improves the speed of comparisons and is used as a unique identifier.\\
`.nodeReduction`: Either this node's [`NodeReduction`](@ref) or `missing`, if none. There can only be at most one.\\
`.nodeSplit`: Either this node's [`NodeSplit`](@ref) or `missing`, if none. There can only be at most one.\\
`.name`: The name of this node for entry nodes into the graph ([`is_entry_node`](@ref)) to reliably assign the inputs to the correct nodes when executing.\\
"""
mutable struct DataTaskNode{TaskType<:AbstractDataTask} <: Node
task::TaskType
# use vectors as sets have way too much memory overhead
parents::Vector{Node}
children::Vector{Tuple{Node,Int}}
# need a unique identifier unique to every *constructed* node
# however, it can be copied when splitting a node
id::Base.UUID
# the NodeReduction involving this node, if it exists
# Can't use the NodeReduction type here because it's not yet defined
nodeReduction::Union{Operation,Missing}
# the NodeSplit involving this node, if it exists
nodeSplit::Union{Operation,Missing}
# for input nodes we need a name for the node to distinguish between them
name::String
end
"""
ComputeTaskNode <: Node
Any node that computes a result from inputs using an [`AbstractComputeTask`](@ref).
# Fields
`.task`: The node's compute task type. A concrete subtype of [`AbstractComputeTask`](@ref).\\
`.parents`: A vector of the node's parents (i.e. nodes that depend on this one).\\
`.children`: A vector of tuples with the node's children (i.e. nodes that this one depends on) and their index, used to order the arguments for the [`AbstractComputeTask`](@ref).\\
`.id`: The node's id. Improves the speed of comparisons and is used as a unique identifier.\\
`.nodeReduction`: Either this node's [`NodeReduction`](@ref) or `missing`, if none. There can only be at most one.\\
`.nodeSplit`: Either this node's [`NodeSplit`](@ref) or `missing`, if none. There can only be at most one.\\
`.device`: The Device this node has been scheduled on by a [`Scheduler`](@ref).
"""
mutable struct ComputeTaskNode{TaskType<:AbstractComputeTask} <: Node
task::TaskType
parents::Vector{Node}
children::Vector{Tuple{Node,Int}}
id::Base.UUID
nodeReduction::Union{Operation,Missing}
nodeSplit::Union{Operation,Missing}
# the device this node is assigned to execute on
device::Union{AbstractDevice,Missing}
end
"""
Edge
Type of an edge in the graph. Edges can only exist between a [`DataTaskNode`](@ref) and a [`ComputeTaskNode`](@ref) or vice versa, not between two of the same type of node.
An edge always points from child to parent: `child = e.edge[1]` and `parent = e.edge[2]`. Additionally, the `Edge`` contains the `index` which is used as the child's index in the parent node.
The child is the prerequisite node of the parent.
"""
struct Edge
# edge points from child to parent
edge::Union{Tuple{DataTaskNode,ComputeTaskNode},Tuple{ComputeTaskNode,DataTaskNode}}
# the index of the child in parent
index::Int
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2088 | """
is_valid_node(graph::DAG, node::Node)
Verify that a given node is valid in the graph. Call like `@test is_valid_node(g, n)`. Uses `@assert` to fail if something is invalid but also provide an error message.
This function is very performance intensive and should only be used when testing or debugging.
See also this function's specific versions for the concrete Node types [`is_valid(graph::DAG, node::ComputeTaskNode)`](@ref) and [`is_valid(graph::DAG, node::DataTaskNode)`](@ref).
"""
function is_valid_node(graph::DAG, node::Node)
@assert node in graph "Node is not part of the given graph!"
for parent in node.parents
@assert typeof(parent) != typeof(node) "Node's type is the same as its parent's!"
@assert parent in graph "Node's parent is not in the same graph!"
@assert is_child(node, parent) "Node is not a child of its parent!"
end
for (child, index) in node.children
@assert typeof(child) != typeof(node) "Node's type is the same as its child's!"
@assert child in graph "Node's child is not in the same graph!"
@assert node in child.parents "Node is not a parent of its child!"
end
#=if !ismissing(node.nodeReduction)
@assert is_valid(graph, node.nodeReduction)
end
if !ismissing(node.nodeSplit)
@assert is_valid(graph, node.nodeSplit)
end=#
return true
end
"""
is_valid(graph::DAG, node::ComputeTaskNode)
Verify that the given compute node is valid in the graph. Call with `@assert` or `@test` when testing or debugging.
This also calls [`is_valid_node(graph::DAG, node::Node)`](@ref).
"""
function is_valid(graph::DAG, node::ComputeTaskNode)
@assert is_valid_node(graph, node)
return true
end
"""
is_valid(graph::DAG, node::DataTaskNode)
Verify that the given compute node is valid in the graph. Call with `@assert` or `@test` when testing or debugging.
This also calls [`is_valid_node(graph::DAG, node::Node)`](@ref).
"""
function is_valid(graph::DAG, node::DataTaskNode)
@assert is_valid_node(graph, node)
return true
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 6060 | """
apply_all!(graph::DAG)
Apply all unapplied operations in the DAG. Is automatically called in all functions that require the latest state of the [`DAG`](@ref).
"""
function apply_all!(graph::DAG)
while !isempty(graph.operationsToApply)
# get next operation to apply from front of the deque
op = popfirst!(graph.operationsToApply)
# apply it
appliedOp = apply_operation!(graph, op)
# push to the end of the appliedOperations deque
push!(graph.appliedOperations, appliedOp)
end
return nothing
end
"""
apply_operation!(graph::DAG, operation::Operation)
Fallback implementation of apply_operation! for unimplemented operation types, throwing an error.
"""
function apply_operation!(graph::DAG, operation::Operation)
return error("unknown operation type")
end
"""
apply_operation!(graph::DAG, operation::NodeReduction)
Apply the given [`NodeReduction`](@ref) to the graph. Generic wrapper around [`node_reduction!`](@ref).
Return an [`AppliedNodeReduction`](@ref) object generated from the graph's [`Diff`](@ref).
"""
function apply_operation!(graph::DAG, operation::NodeReduction)
diff = node_reduction!(graph, operation.input)
graph.properties += GraphProperties(diff)
return AppliedNodeReduction(operation, diff)
end
"""
apply_operation!(graph::DAG, operation::NodeSplit)
Apply the given [`NodeSplit`](@ref) to the graph. Generic wrapper around [`node_split!`](@ref).
Return an [`AppliedNodeSplit`](@ref) object generated from the graph's [`Diff`](@ref).
"""
function apply_operation!(graph::DAG, operation::NodeSplit)
diff = node_split!(graph, operation.input)
graph.properties += GraphProperties(diff)
return AppliedNodeSplit(operation, diff)
end
"""
revert_operation!(graph::DAG, operation::AppliedOperation)
Fallback implementation of operation reversion for unimplemented operation types, throwing an error.
"""
function revert_operation!(graph::DAG, operation::AppliedOperation)
return error("unknown operation type")
end
"""
revert_operation!(graph::DAG, operation::AppliedNodeReduction)
Revert the applied node reduction on the graph. Return the original [`NodeReduction`](@ref) operation.
"""
function revert_operation!(graph::DAG, operation::AppliedNodeReduction)
revert_diff!(graph, operation.diff)
return operation.operation
end
"""
revert_operation!(graph::DAG, operation::AppliedNodeSplit)
Revert the applied node split on the graph. Return the original [`NodeSplit`](@ref) operation.
"""
function revert_operation!(graph::DAG, operation::AppliedNodeSplit)
revert_diff!(graph, operation.diff)
return operation.operation
end
"""
revert_diff!(graph::DAG, diff::Diff)
Revert the given diff on the graph. Used to revert the individual [`AppliedOperation`](@ref)s with [`revert_operation!`](@ref).
"""
function revert_diff!(graph::DAG, diff::Diff)
# add removed nodes, remove added nodes, same for edges
# note the order
for edge in diff.addedEdges
_remove_edge!(graph, edge.edge[1], edge.edge[2]; track=false)
end
for node in diff.addedNodes
_remove_node!(graph, node; track=false)
end
for node in diff.removedNodes
_insert_node!(graph, node; track=false)
end
for edge in diff.removedEdges
_insert_edge!(graph, edge.edge[1], edge.edge[2], edge.index; track=false)
end
graph.properties -= GraphProperties(diff)
return nothing
end
"""
node_reduction!(graph::DAG, nodes::Vector{Node})
Reduce the given nodes together into one node, return the applied difference to the graph.
For details see [`NodeReduction`](@ref).
"""
function node_reduction!(graph::DAG, nodes::Vector{Node})
@assert is_valid_node_reduction_input(graph, nodes)
# clear snapshot
get_snapshot_diff(graph)
n1 = nodes[1]
n1_children = copy(children(n1))
n1_parents = Set(parents(n1))
# set of the new parents of n1 together with the index of the child nodes
new_parents = Set{Tuple{Node,Int}}()
# names of the previous children that n1 now replaces per parent
new_parents_child_names = Dict{Node,Symbol}()
# remove all of the nodes' parents and children and the nodes themselves (except for first node)
for i in 2:length(nodes)
n = nodes[i]
for (child, index) in n1_children
# no need to care about the indices here
_remove_edge!(graph, child, n)
end
for parent in copy(parents(n))
removed_index = _remove_edge!(graph, n, parent)
# collect all parents
push!(new_parents, (parent, removed_index))
new_parents_child_names[parent] = Symbol(to_var_name(n.id))
end
_remove_node!(graph, n)
end
for (parent, index) in new_parents
# now add parents of all input nodes to n1 without duplicates
if !(parent in n1_parents)
# don't double insert edges
_insert_edge!(graph, n1, parent, index)
end
end
return get_snapshot_diff(graph)
end
"""
node_split!(graph::DAG, n1::Node)
Split the given node into one node per parent, return the applied difference to the graph.
For details see [`NodeSplit`](@ref).
"""
function node_split!(
graph::DAG, n1::Union{DataTaskNode{TaskType},ComputeTaskNode{TaskType}}
) where {TaskType<:AbstractTask}
@assert is_valid_node_split_input(graph, n1)
# clear snapshot
get_snapshot_diff(graph)
n1_parents = copy(parents(n1))
n1_children = copy(children(n1))
for parent in n1_parents
_remove_edge!(graph, n1, parent)
end
for (child, index) in n1_children
_remove_edge!(graph, child, n1)
end
_remove_node!(graph, n1)
for parent in n1_parents
n_copy = copy(n1)
_insert_node!(graph, n_copy)
_insert_edge!(graph, n_copy, parent)
for (child, index) in n1_children
_insert_edge!(graph, child, n_copy)
end
end
return get_snapshot_diff(graph)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2640 | # These are functions for "cleaning" nodes, i.e. regenerating the possible operations for a node
"""
find_reductions!(graph::DAG, node::Node)
Find node reductions involving the given node. The function pushes the found [`NodeReduction`](@ref) (if any) everywhere it needs to be and returns nothing.
"""
function find_reductions!(graph::DAG, node::Node)
# there can only be one reduction per node, avoid adding duplicates
if !ismissing(node.nodeReduction)
return nothing
end
reductionVector = nothing
# possible reductions are with nodes that are partners, i.e. parents of children
partners_ = partners(node)
delete!(partners_, node)
for partner in partners_
@assert partner in graph.nodes
if can_reduce(node, partner)
if reductionVector === nothing
# only when there's at least one reduction partner, insert the vector
reductionVector = Vector{Node}()
push!(reductionVector, node)
end
push!(reductionVector, partner)
end
end
if reductionVector !== nothing
nr = NodeReduction(reductionVector)
push!(graph.possibleOperations.nodeReductions, nr)
for node in reductionVector
if !ismissing(node.nodeReduction)
# it can happen that the dirty node becomes part of an existing NodeReduction and overrides those ones now
# this is only a problem insofar the existing NodeReduction has to be deleted and replaced also in the possibleOperations
invalidate_caches!(graph, node.nodeReduction)
end
node.nodeReduction = nr
end
end
return nothing
end
"""
find_splits!(graph::DAG, node::Node)
Find the node split of the given node. The function pushes the found [`NodeSplit`](@ref) (if any) everywhere it needs to be and returns nothing.
"""
function find_splits!(graph::DAG, node::Node)
if !ismissing(node.nodeSplit)
return nothing
end
if (can_split(node))
ns = NodeSplit(node)
push!(graph.possibleOperations.nodeSplits, ns)
node.nodeSplit = ns
end
return nothing
end
"""
clean_node!(graph::DAG, node::Node)
Sort this node's parent and child sets, then find reductions and splits involving it. Needs to be called after the node was changed in some way.
"""
function clean_node!(
graph::DAG, node::Union{DataTaskNode{TaskType},ComputeTaskNode{TaskType}}
) where {TaskType<:AbstractTask}
sort_node!(node)
find_reductions!(graph, node)
find_splits!(graph, node)
return nothing
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 4390 | # functions that find operations on the inital graph
using Base.Threads
"""
insert_operation!(nf::NodeReduction)
Insert the given node reduction into its input nodes' operation caches. This is thread-safe.
"""
function insert_operation!(nr::NodeReduction)
for n in nr.input
n.nodeReduction = nr
end
return nothing
end
"""
insert_operation!(nf::NodeSplit)
Insert the given node split into its input node's operation cache. This is thread-safe.
"""
function insert_operation!(ns::NodeSplit)
ns.input.nodeSplit = ns
return nothing
end
"""
nr_insertion!(operations::PossibleOperations, nodeReductions::Vector{Vector{NodeReduction}})
Insert the node reductions into the graph and the nodes' caches. Employs multithreading for speedup.
"""
function nr_insertion!(
operations::PossibleOperations, nodeReductions::Vector{Vector{NodeReduction}}
)
total_len = 0
for vec in nodeReductions
total_len += length(vec)
end
sizehint!(operations.nodeReductions, total_len)
t = @task for vec in nodeReductions
union!(operations.nodeReductions, Set(vec))
end
schedule(t)
@threads for vec in nodeReductions
for op in vec
insert_operation!(op)
end
end
wait(t)
return nothing
end
"""
ns_insertion!(operations::PossibleOperations, nodeSplits::Vector{Vector{NodeSplits}})
Insert the node splits into the graph and the nodes' caches. Employs multithreading for speedup.
"""
function ns_insertion!(
operations::PossibleOperations, nodeSplits::Vector{Vector{NodeSplit}}
)
total_len = 0
for vec in nodeSplits
total_len += length(vec)
end
sizehint!(operations.nodeSplits, total_len)
t = @task for vec in nodeSplits
union!(operations.nodeSplits, Set(vec))
end
schedule(t)
@threads for vec in nodeSplits
for op in vec
insert_operation!(op)
end
end
wait(t)
return nothing
end
"""
generate_operations(graph::DAG)
Generate all possible operations on the graph. Used initially when the graph is freshly assembled or parsed. Uses multithreading for speedup.
Safely inserts all the found operations into the graph and its nodes.
"""
function generate_operations(graph::DAG)
generatedReductions = [Vector{NodeReduction}() for _ in 1:nthreads()]
generatedSplits = [Vector{NodeSplit}() for _ in 1:nthreads()]
# make sure the graph is fully generated through
apply_all!(graph)
nodeArray = collect(graph.nodes)
# sort all nodes
@threads for node in nodeArray
sort_node!(node)
end
checkedNodes = Set{Node}()
checkedNodesLock = SpinLock()
# --- find possible node reductions ---
@threads for node in nodeArray
# we're looking for nodes with multiple parents, those parents can then potentially reduce with one another
if (length(node.parents) <= 1)
continue
end
candidates = node.parents
# sort into equivalence classes
trie = NodeTrie()
for candidate in candidates
# insert into trie
insert!(trie, candidate)
end
nodeReductions = collect(trie)
for nrVec in nodeReductions
# parent sets are ordered and any node can only be part of one nodeReduction, so a NodeReduction is uniquely identifiable by its first element
# this prevents duplicate nodeReductions being generated
lock(checkedNodesLock)
if (nrVec[1] in checkedNodes)
unlock(checkedNodesLock)
continue
else
push!(checkedNodes, nrVec[1])
end
unlock(checkedNodesLock)
push!(generatedReductions[threadid()], NodeReduction(nrVec))
end
end
# launch thread for node reduction insertion
# remove duplicates
nr_task = @spawn nr_insertion!(graph.possibleOperations, generatedReductions)
# find possible node splits
@threads for node in nodeArray
if (can_split(node))
push!(generatedSplits[threadid()], NodeSplit(node))
end
end
# launch thread for node split insertion
ns_task = @spawn ns_insertion!(graph.possibleOperations, generatedSplits)
empty!(graph.dirtyNodes)
wait(nr_task)
wait(ns_task)
return nothing
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 453 | # function to return the possible operations of a graph
using Base.Threads
"""
get_operations(graph::DAG)
Return the [`PossibleOperations`](@ref) of the graph at the current state.
"""
function get_operations(graph::DAG)
apply_all!(graph)
if isempty(graph.possibleOperations)
generate_operations(graph)
end
clean_node!.(Ref(graph), graph.dirtyNodes)
empty!(graph.dirtyNodes)
return graph.possibleOperations
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1369 | const _POSSIBLE_OPERATIONS_FIELDS = fieldnames(PossibleOperations)
_POIteratorStateType = NamedTuple{
(:result, :state),Tuple{Union{NodeReduction,NodeSplit},Tuple{Symbol,Int64}}
}
@inline function Base.iterate(
possibleOperations::PossibleOperations
)::Union{Nothing,_POIteratorStateType}
for fieldname in _POSSIBLE_OPERATIONS_FIELDS
iterator = iterate(getfield(possibleOperations, fieldname))
if (!isnothing(iterator))
return (result=iterator[1], state=(fieldname, iterator[2]))
end
end
return nothing
end
@inline function Base.iterate(
possibleOperations::PossibleOperations, state
)::Union{Nothing,_POIteratorStateType}
newStateSym = state[1]
newStateIt = iterate(getfield(possibleOperations, newStateSym), state[2])
if !isnothing(newStateIt)
return (result=newStateIt[1], state=(newStateSym, newStateIt[2]))
end
# cycle to next field
index = findfirst(x -> x == newStateSym, _POSSIBLE_OPERATIONS_FIELDS) + 1
while index <= length(_POSSIBLE_OPERATIONS_FIELDS)
newStateSym = _POSSIBLE_OPERATIONS_FIELDS[index]
newStateIt = iterate(getfield(possibleOperations, newStateSym))
if !isnothing(newStateIt)
return (result=newStateIt[1], state=(newStateSym, newStateIt[2]))
end
index += 1
end
return nothing
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 942 | """
show(io::IO, ops::PossibleOperations)
Print a string representation of the set of possible operations to io.
"""
function Base.show(io::IO, ops::PossibleOperations)
print(io, length(ops.nodeReductions))
println(io, " Node Reductions: ")
for nr in ops.nodeReductions
println(io, " - ", nr)
end
print(io, length(ops.nodeSplits))
println(io, " Node Splits: ")
for ns in ops.nodeSplits
println(io, " - ", ns)
end
end
"""
show(io::IO, op::NodeReduction)
Print a string representation of the node reduction to io.
"""
function Base.show(io::IO, op::NodeReduction)
print(io, "NR: ")
print(io, length(op.input))
print(io, "x")
return print(io, task(op.input[1]))
end
"""
show(io::IO, op::NodeSplit)
Print a string representation of the node split to io.
"""
function Base.show(io::IO, op::NodeSplit)
print(io, "NS: ")
return print(io, task(op.input))
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2379 | """
Operation
An abstract base class for operations. An operation can be applied to a [`DAG`](@ref), changing its nodes and edges.
Possible operations on a [`DAG`](@ref) can be retrieved using [`get_operations`](@ref).
See also: [`push_operation!`](@ref), [`pop_operation!`](@ref)
"""
abstract type Operation end
"""
AppliedOperation
An abstract base class for already applied operations.
An applied operation can be reversed iff it is the last applied operation on the DAG.
Every applied operation stores a [`Diff`](@ref) from when it was initially applied to be able to revert the operation.
See also: [`revert_operation!`](@ref).
"""
abstract type AppliedOperation end
"""
NodeReduction <: Operation
The NodeReduction operation. Represents the reduction of two or more nodes with one another.
Only one of the input nodes is kept, while all others are deleted and their parents are accumulated in the kept node's parents instead.
After the node reduction is applied, the graph has `length(nr.input) - 1` fewer nodes.
# Requirements for successful application
A vector of nodes can be reduced if:
- All nodes are in the graph.
- All nodes have the same task type.
- All nodes have the same set of children.
[`is_valid_node_reduction_input`](@ref) can be used to `@assert` these requirements.
See also: [`can_reduce`](@ref)
"""
struct NodeReduction{NodeType<:Node} <: Operation
input::Vector{NodeType}
end
"""
AppliedNodeReduction <: AppliedOperation
The applied version of the [`NodeReduction`](@ref).
"""
struct AppliedNodeReduction{NodeType<:Node} <: AppliedOperation
operation::NodeReduction{NodeType}
diff::Diff
end
"""
NodeSplit <: Operation
The NodeSplit operation. Represents the split of its input node into one node for each of its parents. It is the reverse operation to the [`NodeReduction`](@ref).
# Requirements for successful application
A node can be split if:
- It is in the graph.
- It has at least 2 parents.
[`is_valid_node_split_input`](@ref) can be used to `@assert` these requirements.
See also: [`can_split`](@ref)
"""
struct NodeSplit{NodeType<:Node} <: Operation
input::NodeType
end
"""
AppliedNodeSplit <: AppliedOperation
The applied version of the [`NodeSplit`](@ref).
"""
struct AppliedNodeSplit{NodeType<:Node} <: AppliedOperation
operation::NodeSplit{NodeType}
diff::Diff
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 3559 | """
isempty(operations::PossibleOperations)
Return whether `operations` is empty, i.e. all of its fields are empty.
"""
function Base.isempty(operations::PossibleOperations)
return isempty(operations.nodeReductions) && isempty(operations.nodeSplits)
end
"""
length(operations::PossibleOperations)
Return a named tuple with the number of each of the operation types as a named tuple. The fields are named the same as the [`PossibleOperations`](@ref)'.
"""
function Base.length(operations::PossibleOperations)
return (
nodeReductions=length(operations.nodeReductions),
nodeSplits=length(operations.nodeSplits),
)
end
"""
delete!(operations::PossibleOperations, op::NodeReduction)
Delete the given node reduction from the possible operations.
"""
function Base.delete!(operations::PossibleOperations, op::NodeReduction)
delete!(operations.nodeReductions, op)
return operations
end
"""
delete!(operations::PossibleOperations, op::NodeSplit)
Delete the given node split from the possible operations.
"""
function Base.delete!(operations::PossibleOperations, op::NodeSplit)
delete!(operations.nodeSplits, op)
return operations
end
"""
can_reduce(n1::Node, n2::Node)
Return whether the given two nodes can be reduced. See [`NodeReduction`](@ref) for the requirements.
"""
function can_reduce(n1::Node, n2::Node)
return false
end
function can_reduce(
n1::NodeType, n2::NodeType
) where {
TaskType<:AbstractTask,NodeType<:Union{DataTaskNode{TaskType},ComputeTaskNode{TaskType}}
}
n1_length = length(children(n1))
n2_length = length(children(n2))
if (n1_length != n2_length)
return false
end
# this seems to be the most common case so do this first
# doing it manually is a lot faster than using the sets for a general solution
if (n1_length == 2)
if (children(n1)[1] != children(n2)[1])
if (children(n1)[1] != children(n2)[2])
return false
end
# 1_1 == 2_2
if (children(n1)[2] != children(n2)[1])
return false
end
return true
end
# 1_1 == 2_1
if (children(n1)[2] != children(n2)[2])
return false
end
return true
end
# this is simple
if (n1_length == 1)
return children(n1)[1] == children(n2)[1]
end
# this takes a long time
return Set(children(n1)) == Set(children(n2))
end
"""
can_split(n1::Node)
Return whether the given node can be split. See [`NodeSplit`](@ref) for the requirements.
"""
function can_split(n::Node)
return length(parents(n)) > 1
end
"""
==(op1::Operation, op2::Operation)
Fallback implementation of operation equality. Return false. Actual comparisons are done by the overloads of same type operation comparisons.
"""
function Base.:(==)(op1::Operation, op2::Operation)
return false
end
"""
==(op1::NodeReduction, op2::NodeReduction)
Equality comparison between two node reductions. Two node reductions are considered equal when they have the same inputs.
"""
function Base.:(==)(op1::NodeReduction, op2::NodeReduction)
# node reductions are equal exactly if their first input is the same
return op1.input[1].id == op2.input[1].id
end
"""
==(op1::NodeSplit, op2::NodeSplit)
Equality comparison between two node splits. Two node splits are considered equal if they have the same input node.
"""
function Base.:(==)(op1::NodeSplit, op2::NodeSplit)
return op1.input == op2.input
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2980 | # functions to throw assertion errors for inconsistent or wrong node operations
# should be called with @assert
# the functions throw their own errors though, to still have helpful error messages
"""
is_valid_node_reduction_input(graph::DAG, nodes::Vector{Node})
Assert for a gven node reduction input whether the nodes can be reduced. For the requirements of a node reduction see [`NodeReduction`](@ref).
Intended for use with `@assert` or `@test`.
"""
function is_valid_node_reduction_input(graph::DAG, nodes::Vector{Node})
for n in nodes
if n ∉ graph
throw(
AssertionError(
"[Node Reduction] the given nodes are not part of the given graph"
),
)
end
@assert is_valid(graph, n)
end
t = typeof(task(nodes[1]))
for n in nodes
if typeof(task(n)) != t
throw(
AssertionError("[Node Reduction] the given nodes are not of the same type")
)
end
if (typeof(n) <: DataTaskNode)
if (n.name != nodes[1].name)
throw(
AssertionError(
"[Node Reduction] the given nodes do not have the same name"
),
)
end
end
end
n1_children = children(nodes[1])
for n in nodes
if Set(n1_children) != Set(children(n))
throw(
AssertionError(
"[Node Reduction] the given nodes do not have equal prerequisite nodes which is required for node reduction",
),
)
end
end
return true
end
"""
is_valid_node_split_input(graph::DAG, n1::Node)
Assert for a gven node split input whether the node can be split. For the requirements of a node split see [`NodeSplit`](@ref).
Intended for use with `@assert` or `@test`.
"""
function is_valid_node_split_input(graph::DAG, n1::Node)
if n1 ∉ graph
throw(AssertionError("[Node Split] the given node is not part of the given graph"))
end
if length(n1.parents) <= 1
throw(
AssertionError(
"[Node Split] the given node does not have multiple parents which is required for node split",
),
)
end
@assert is_valid(graph, n1)
return true
end
"""
is_valid(graph::DAG, nr::NodeReduction)
Assert for a given [`NodeReduction`](@ref) whether it is a valid operation in the graph.
Intended for use with `@assert` or `@test`.
"""
function is_valid(graph::DAG, nr::NodeReduction)
@assert is_valid_node_reduction_input(graph, nr.input)
return true
end
"""
is_valid(graph::DAG, nr::NodeSplit)
Assert for a given [`NodeSplit`](@ref) whether it is a valid operation in the graph.
Intended for use with `@assert` or `@test`.
"""
function is_valid(graph::DAG, ns::NodeSplit)
@assert is_valid_node_split_input(graph, ns.input)
return true
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1965 | """
GreedyOptimizer
An implementation of the greedy optimization algorithm, simply choosing the best next option evaluated with the given estimator.
The fixpoint is reached when any leftover operation would increase the graph's total cost according to the given estimator.
"""
struct GreedyOptimizer{EstimatorType<:AbstractEstimator} <: AbstractOptimizer
estimator::EstimatorType
end
function optimize_step!(optimizer::GreedyOptimizer, graph::DAG)
# generate all options
operations = get_operations(graph)
if isempty(operations)
return false
end
result = nothing
lowestCost = reduce(
(acc, op) -> begin
op_cost = operation_effect(optimizer.estimator, graph, op)
if isless(op_cost, acc)
result = op
return op_cost
end
return acc
end,
operations;
init=typemax(cost_type(optimizer.estimator)),
)
if lowestCost > zero(cost_type(optimizer.estimator))
return false
end
push_operation!(graph, result)
return true
end
function fixpoint_reached(optimizer::GreedyOptimizer, graph::DAG)
# generate all options
operations = get_operations(graph)
if isempty(operations)
return true
end
lowestCost = reduce(
(acc, op) -> begin
op_cost = operation_effect(optimizer.estimator, graph, op)
if isless(op_cost, acc)
return op_cost
end
return acc
end,
operations;
init=typemax(cost_type(optimizer.estimator)),
)
if lowestCost > zero(cost_type(optimizer.estimator))
return true
end
return false
end
function optimize_to_fixpoint!(optimizer::GreedyOptimizer, graph::DAG)
while optimize_step!(optimizer, graph)
end
return nothing
end
function String(optimizer::GreedyOptimizer)
return "greedy_optimizer_$(optimizer.estimator)"
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2126 |
"""
AbstractOptimizer
Abstract base type for optimizer implementations.
"""
abstract type AbstractOptimizer end
"""
optimize_step!(optimizer::AbstractOptimizer, graph::DAG)
Interface function that must be implemented by implementations of [`AbstractOptimizer`](@ref). Returns `true` if an operations has been applied, `false` if not, usually when a fixpoint of the algorithm has been reached.
It should do one smallest logical step on the given [`DAG`](@ref), muting the graph and, if necessary, the optimizer's state.
"""
function optimize_step! end
"""
optimize!(optimizer::AbstractOptimizer, graph::DAG, n::Int)
Function calling the given optimizer `n` times, muting the graph. Returns `true` if the requested number of operations has been applied, `false` if not, usually when a fixpoint of the algorithm has been reached.
If a more efficient method exists, this can be overloaded for a specific optimizer.
"""
function optimize!(optimizer::AbstractOptimizer, graph::DAG, n::Int)
for i in 1:n
if !optimize_step!(optimizer, graph)
return false
end
end
return true
end
"""
fixpoint_reached(optimizer::AbstractOptimizer, graph::DAG)
Interface function that can be implemented by optimization algorithms that can reach a fixpoint, returning as a `Bool` whether it has been reached. The default implementation returns `false`.
See also: [`optimize_to_fixpoint!`](@ref)
"""
function fixpoint_reached(optimizer::AbstractOptimizer, graph::DAG)
return false
end
"""
optimize_to_fixpoint!(optimizer::AbstractOptimizer, graph::DAG)
Interface function that can be implemented by optimization algorithms that can reach a fixpoint. The algorithm will be run until that fixpoint is reached, at which point [`fixpoint_reached`](@ref) should return true.
A usual implementation might look like this:
```julia
function optimize_to_fixpoint!(optimizer::MyOptimizer, graph::DAG)
while !fixpoint_reached(optimizer, graph)
optimize_step!(optimizer, graph)
end
return nothing
end
```
"""
function optimize_to_fixpoint! end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1526 | using Random
"""
RandomWalkOptimizer
An optimizer that randomly pushes or pops operations. It doesn't optimize in any direction and is useful mainly for testing purposes.
This algorithm never reaches a fixpoint, so it does not implement [`optimize_to_fixpoint!`](@ref).
"""
struct RandomWalkOptimizer <: AbstractOptimizer
rng::AbstractRNG
end
function optimize_step!(optimizer::RandomWalkOptimizer, graph::DAG)
operations = get_operations(graph)
if sum(length(operations)) == 0 &&
length(graph.appliedOperations) + length(graph.operationsToApply) == 0
# in case there are zero operations possible at all on the graph
return false
end
r = optimizer.rng
# try until something was applied or popped
while true
# choose push or pop
if rand(r, Bool)
# push
# choose one of split/reduce
option = rand(r, 1:2)
if option == 1 && !isempty(operations.nodeReductions)
push_operation!(graph, rand(r, collect(operations.nodeReductions)))
return true
elseif option == 2 && !isempty(operations.nodeSplits)
push_operation!(graph, rand(r, collect(operations.nodeSplits)))
return true
end
else
# pop
if (can_pop(graph))
pop_operation!(graph)
return true
end
end
end
end
function String(::RandomWalkOptimizer)
return "random_walker"
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1061 | """
ReductionOptimizer
An optimizer that simply applies an available [`NodeReduction`](@ref) on each step. It implements [`optimize_to_fixpoint!`](@ref). The fixpoint is reached when there are no more possible [`NodeReduction`](@ref)s in the graph.
See also: [`SplitOptimizer`](@ref)
"""
struct ReductionOptimizer <: AbstractOptimizer end
function optimize_step!(optimizer::ReductionOptimizer, graph::DAG)
# generate all options
operations = get_operations(graph)
if fixpoint_reached(optimizer, graph)
return false
end
push_operation!(graph, first(operations.nodeReductions))
return true
end
function fixpoint_reached(optimizer::ReductionOptimizer, graph::DAG)
operations = get_operations(graph)
return isempty(operations.nodeReductions)
end
function optimize_to_fixpoint!(optimizer::ReductionOptimizer, graph::DAG)
while !fixpoint_reached(optimizer, graph)
optimize_step!(optimizer, graph)
end
return nothing
end
function String(::ReductionOptimizer)
return "reduction_optimizer"
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1021 | """
SplitOptimizer
An optimizer that simply applies an available [`NodeSplit`](@ref) on each step. It implements [`optimize_to_fixpoint!`](@ref). The fixpoint is reached when there are no more possible [`NodeSplit`](@ref)s in the graph.
See also: [`ReductionOptimizer`](@ref)
"""
struct SplitOptimizer <: AbstractOptimizer end
function optimize_step!(optimizer::SplitOptimizer, graph::DAG)
# generate all options
operations = get_operations(graph)
if fixpoint_reached(optimizer, graph)
return false
end
push_operation!(graph, first(operations.nodeSplits))
return true
end
function fixpoint_reached(optimizer::SplitOptimizer, graph::DAG)
operations = get_operations(graph)
return isempty(operations.nodeSplits)
end
function optimize_to_fixpoint!(optimizer::SplitOptimizer, graph::DAG)
while !fixpoint_reached(optimizer, graph)
optimize_step!(optimizer, graph)
end
return nothing
end
function String(::SplitOptimizer)
return "split_optimizer"
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2166 | """
GraphProperties()
Create an empty [`GraphProperties`](@ref) object.
"""
function GraphProperties()
return (
data=0.0, computeEffort=0.0, computeIntensity=0.0, noNodes=0, noEdges=0
)::GraphProperties
end
@inline function _props(
node::DataTaskNode{TaskType}
)::Tuple{Float64,Float64,Int64} where {TaskType<:AbstractDataTask}
return (data(task(node)) * length(parents(node)), 0.0, length(parents(node)))
end
@inline function _props(
node::ComputeTaskNode{TaskType}
)::Tuple{Float64,Float64,Int64} where {TaskType<:AbstractComputeTask}
return (0.0, compute_effort(task(node)), length(parents(node)))
end
"""
GraphProperties(graph::DAG)
Calculate the graph's properties and return the constructed [`GraphProperties`](@ref) object.
"""
function GraphProperties(graph::DAG)
# make sure the graph is fully generated
apply_all!(graph)
d = 0.0
ce = 0.0
ed = 0
for node in graph.nodes
props = _props(node)
d += props[1]
ce += props[2]
ed += props[3]
end
return (
data=d,
computeEffort=ce,
computeIntensity=(d == 0) ? 0.0 : ce / d,
noNodes=length(graph.nodes),
noEdges=ed,
)::GraphProperties
end
"""
GraphProperties(diff::Diff)
Create the graph properties difference from a given [`Diff`](@ref).
The graph's properties after applying the [`Diff`](@ref) will be `get_properties(graph) + GraphProperties(diff)`.
For reverting a diff, it's `get_properties(graph) - GraphProperties(diff)`.
"""
function GraphProperties(diff::Diff)
ce =
reduce(+, compute_effort(task(n)) for n in diff.addedNodes; init=0.0) -
reduce(+, compute_effort(task(n)) for n in diff.removedNodes; init=0.0)
d =
reduce(+, data(task(n)) for n in diff.addedNodes; init=0.0) -
reduce(+, data(task(n)) for n in diff.removedNodes; init=0.0)
return (
data=d,
computeEffort=ce,
computeIntensity=(d == 0) ? 0.0 : ce / d,
noNodes=length(diff.addedNodes) - length(diff.removedNodes),
noEdges=length(diff.addedEdges) - length(diff.removedEdges),
)::GraphProperties
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 489 | """
GraphProperties
Representation of a [`DAG`](@ref)'s properties.
# Fields:
`.data`: The total data transfer.\\
`.computeEffort`: The total compute effort.\\
`.computeIntensity`: The compute intensity, will always equal `.computeEffort / .data`.\\
`.noNodes`: Number of [`Node`](@ref)s.\\
`.noEdges`: Number of [`Edge`](@ref)s.
"""
const GraphProperties = NamedTuple{
(:data, :computeEffort, :computeIntensity, :noNodes, :noEdges),
Tuple{Float64,Float64,Float64,Int,Int},
}
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1823 | """
-(prop1::GraphProperties, prop2::GraphProperties)
Subtract `prop1` from `prop2` and return the result as a new [`GraphProperties`](@ref).
Also take care to keep consistent compute intensity.
"""
function Base.:-(prop1::GraphProperties, prop2::GraphProperties)
return (
data=prop1.data - prop2.data,
computeEffort=prop1.computeEffort - prop2.computeEffort,
computeIntensity=if (prop1.data - prop2.data == 0)
0.0
else
(prop1.computeEffort - prop2.computeEffort) / (prop1.data - prop2.data)
end,
noNodes=prop1.noNodes - prop2.noNodes,
noEdges=prop1.noEdges - prop2.noEdges,
)::GraphProperties
end
"""
+(prop1::GraphProperties, prop2::GraphProperties)
Add `prop1` and `prop2` and return the result as a new [`GraphProperties`](@ref).
Also take care to keep consistent compute intensity.
"""
function Base.:+(prop1::GraphProperties, prop2::GraphProperties)
return (
data=prop1.data + prop2.data,
computeEffort=prop1.computeEffort + prop2.computeEffort,
computeIntensity=if (prop1.data + prop2.data == 0)
0.0
else
(prop1.computeEffort + prop2.computeEffort) / (prop1.data + prop2.data)
end,
noNodes=prop1.noNodes + prop2.noNodes,
noEdges=prop1.noEdges + prop2.noEdges,
)::GraphProperties
end
"""
-(prop::GraphProperties)
Unary negation of the graph properties. `.computeIntensity` will not be negated because `.data` and `.computeEffort` both are.
"""
function Base.:-(prop::GraphProperties)
return (
data=-prop.data,
computeEffort=-prop.computeEffort,
computeIntensity=prop.computeIntensity, # no negation here!
noNodes=-prop.noNodes,
noEdges=-prop.noEdges,
)::GraphProperties
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1821 |
"""
GreedyScheduler
A greedy implementation of a scheduler, creating a topological ordering of nodes and naively balancing them onto the different devices.
"""
struct GreedyScheduler <: AbstractScheduler end
function schedule_dag(::GreedyScheduler, graph::DAG, machine::Machine)
nodeQueue = PriorityQueue{Node,Int}()
# use a priority equal to the number of unseen children -> 0 are nodes that can be added
for node in get_entry_nodes(graph)
enqueue!(nodeQueue, node => 0)
end
schedule = Vector{FunctionCall}()
sizehint!(schedule, length(graph.nodes))
# keep an accumulated cost of things scheduled to this device so far
deviceAccCost = PriorityQueue{AbstractDevice,Float64}()
for device in machine.devices
enqueue!(deviceAccCost, device => 0)
end
node = nothing
while !isempty(nodeQueue)
@assert peek(nodeQueue)[2] == 0
node = dequeue!(nodeQueue)
# assign the device with lowest accumulated cost to the node (if it's a compute node)
if (isa(node, ComputeTaskNode))
lowestDevice = peek(deviceAccCost)[1]
node.device = lowestDevice
deviceAccCost[lowestDevice] = compute_effort(task(node))
end
if (node isa DataTaskNode && length(children(node)) == 0)
push!(schedule, get_init_function_call(node, entry_device(machine)))
else
push!(schedule, get_function_call(node)...)
end
for parent in parents(node)
# reduce the priority of all parents by one
if (!haskey(nodeQueue, parent))
enqueue!(nodeQueue, parent => length(children(parent)) - 1)
else
nodeQueue[parent] = nodeQueue[parent] - 1
end
end
end
return schedule
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 858 |
"""
AbstractScheduler
Abstract base type for scheduler implementations. The scheduler is used to assign each node to a device and create a topological ordering of tasks.
"""
abstract type AbstractScheduler end
"""
schedule_dag(::Scheduler, ::DAG, ::Machine)
Interface functions that must be implemented for implementations of [`Scheduler`](@ref).
The function assigns each [`ComputeTaskNode`](@ref) of the [`DAG`](@ref) to one of the devices in the given [`Machine`](@ref) and returns a `Vector{Node}` representing a topological ordering.
[`DataTaskNode`](@ref)s are not scheduled to devices since they do not compute. Instead, a data node transfers data from the [`AbstractDevice`](@ref) of their child to all [`AbstractDevice`](@ref)s of its parents.
Return a `Vector{FunctionCall}`. See [`FunctionCall`](@ref)
"""
function schedule_dag end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 568 | using StaticArrays
"""
FunctionCall{N}
Type representing a function call with `N` parameters. Contains the function to call, argument symbols, the return symbol and the device to execute on.
"""
struct FunctionCall{VectorType<:AbstractVector,N}
func::Function
# TODO: this should be a tuple
value_arguments::SVector{N,Any} # value arguments for the function call, will be prepended to the other arguments
arguments::VectorType # symbols of the inputs to the function call
return_symbol::Symbol
device::AbstractDevice
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 768 | """
==(t1::AbstractTask, t2::AbstractTask)
Fallback implementation of equality comparison between two abstract tasks. Always returns false. For equal specific types of t1 and t2, a more specific comparison is called instead, doing an actual comparison.
"""
function Base.:(==)(t1::AbstractTask, t2::AbstractTask)
return false
end
"""
==(t1::AbstractComputeTask, t2::AbstractComputeTask)
Equality comparison between two compute tasks.
"""
function Base.:(==)(t1::AbstractComputeTask, t2::AbstractComputeTask)
return typeof(t1) == typeof(t2)
end
"""
==(t1::AbstractDataTask, t2::AbstractDataTask)
Equality comparison between two data tasks.
"""
function Base.:(==)(t1::AbstractDataTask, t2::AbstractDataTask)
return data(t1) == data(t2)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 2714 | using StaticArrays
"""
get_function_call(n::Node)
get_function_call(t::AbstractTask, device::AbstractDevice, in_symbols::AbstractVector, out_symbol::Symbol)
For a node or a task together with necessary information, return a vector of [`FunctionCall`](@ref)s for the computation of the node or task.
For ordinary compute or data tasks the vector will contain exactly one element.
"""
function get_function_call(
t::CompTask, device::AbstractDevice, in_symbols::AbstractVector, out_symbol::Symbol
) where {CompTask<:AbstractComputeTask}
return [FunctionCall(compute, SVector{1,Any}(t), in_symbols, out_symbol, device)]
end
function get_function_call(node::ComputeTaskNode)
@assert length(children(node)) <= children(task(node)) "node $(node) has too many children for its task: node has $(length(node.children)) versus task has $(children(task(node)))\nnode's children: $(getfield.(node.children, :children))"
@assert !ismissing(node.device) "trying to get expression for an unscheduled ComputeTaskNode\nnode: $(node)"
# make sure the node is sorted so the arguments keep their order
sort_node!(node)
if (length(node.children) <= 800)
#only use an SVector when there are few children
return get_function_call(
node.task,
node.device,
SVector{length(node.children),Symbol}(
Symbol.(to_var_name.(getfield.(getindex.(children(node), 1), :id)))...
),
Symbol(to_var_name(node.id)),
)
else
return get_function_call(
node.task,
node.device,
Symbol.(to_var_name.(getfield.(getindex.(children(node), 1), :id))),
Symbol(to_var_name(node.id)),
)
end
end
function get_function_call(node::DataTaskNode)
@assert length(children(node)) == 1 "trying to call get_expression on a data task node that has $(length(node.children)) children instead of 1"
# TODO: dispatch to device implementations generating the copy commands
return [
FunctionCall(
unpack_identity,
SVector{0,Any}(),
SVector{1,Symbol}(Symbol(to_var_name(first(children(node))[1].id))),
Symbol(to_var_name(node.id)),
first(children(node))[1].device,
),
]
end
function get_init_function_call(node::DataTaskNode, device::AbstractDevice)
@assert isempty(children(node)) "trying to call get_init_expression on a data task node that is not an entry node."
return FunctionCall(
unpack_identity,
SVector{0,Any}(),
SVector{1,Symbol}(Symbol("$(to_var_name(node.id))_in")),
Symbol(to_var_name(node.id)),
device,
)
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 340 | """
copy(t::AbstractDataTask)
Fallback implementation of the copy of an abstract data task, throwing an error.
"""
Base.copy(t::AbstractDataTask) = error("need to implement copying for your data tasks")
"""
copy(t::AbstractComputeTask)
Return a copy of the given compute task.
"""
Base.copy(t::AbstractComputeTask) = typeof(t)()
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 1169 | """
compute(t::AbstractTask; data...)
Fallback implementation of the compute function of a compute task, throwing an error.
"""
function compute end
"""
compute_effort(t::AbstractTask)
Fallback implementation of the compute effort of a task, throwing an error.
"""
function compute_effort end
"""
data(t::AbstractTask)
Fallback implementation of the data of a task, throwing an error.
"""
function data end
"""
compute_effort(t::AbstractDataTask)
Return the compute effort of a data task, always zero, regardless of the specific task.
"""
compute_effort(t::AbstractDataTask)::Float64 = 0.0
"""
data(t::AbstractDataTask)
Return the data of a data task. Given by the task's `.data` field.
"""
data(t::AbstractDataTask)::Float64 = getfield(t, :data)
"""
copy(t::DataTask)
Copy the data task and return it.
"""
Base.copy(t::DataTask) = DataTask(t.data)
"""
children(::DataTask)
Return the number of children of a data task (always 1).
"""
children(::DataTask) = 1
"""
data(t::AbstractComputeTask)
Return the data of a compute task, always zero, regardless of the specific task.
"""
data(t::AbstractComputeTask)::Float64 = 0.0
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 522 | """
AbstractTask
The shared base type for any task.
"""
abstract type AbstractTask end
"""
AbstractComputeTask <: AbstractTask
The shared base type for any compute task.
"""
abstract type AbstractComputeTask <: AbstractTask end
"""
AbstractDataTask <: AbstractTask
The shared base type for any data task.
"""
abstract type AbstractDataTask <: AbstractTask end
"""
DataTask <: AbstractDataTask
Task representing a specific data transfer.
"""
struct DataTask <: AbstractDataTask
data::Float64
end
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 179 | using SafeTestsets
@safetestset "Utility Unit Tests " begin
include("unit_tests_utility.jl")
end
# TODO: Make a new simple test model and rewrite tests here
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | code | 438 | using ComputableDAGs
import ComputableDAGs.bytes_to_human_readable
@test bytes_to_human_readable(0) == "0.0 B"
@test bytes_to_human_readable(1020) == "1020.0 B"
@test bytes_to_human_readable(1025) == "1.001 KiB"
@test bytes_to_human_readable(684235) == "668.2 KiB"
@test bytes_to_human_readable(86214576) == "82.22 MiB"
@test bytes_to_human_readable(9241457698) == "8.607 GiB"
@test bytes_to_human_readable(3218598654367) == "2.927 TiB"
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 2087 | # ComputableDAGs.jl
[](https://github.com/ComputableDAGs/ComputableDAGs.jl/actions/workflows/unit_tests.yml/)
[](https://ComputableDAGs.github.io/ComputableDAGs.jl/dev/)
[](https://github.com/invenia/BlueStyle)
Represent computations as Directed Acyclic Graphs (DAGs), analyze and optimize them, then compile to native code and run!
## Usage
For all the julia calls, use `-t n` to give julia `n` threads.
Instantiate the project first:
`julia --project=./ -e 'import Pkg; Pkg.instantiate()'`
### Run Tests
To run all tests, run
`julia --project=./ -e 'import Pkg; Pkg.test()' -O0`
## Concepts
### Generate Operations from chains
We assume we have a (valid) DAG given. We can generate all initially possible graph operations from it, and we can calculate the graph properties like compute effort and total data transfer.
Goal: For some operation, regenerate possible operations after that one has been applied, but without having to copy the entire graph. This would be helpful for optimization algorithms to try paths of optimizations and build up tree structures, like for example chess computers do.
Idea: Keep the original graph, a list of possible operations at the current state, and a queue of applied operations together. The "actual" graph is then the original graph with all operations in the queue applied. We can push and pop new operations to/from the queue, automatically updating the graph's global metrics and possible optimizations from there.
## Acknowledgements and Funding
This work was partly funded by the Center for Advanced Systems Understanding (CASUS) that is financed by Germany’s Federal Ministry of Education and Research (BMBF) and by the Saxon Ministry for Science, Culture and Tourism (SMWK) with tax funds on the basis of the budget approved by the Saxon State Parliament.
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 146 | # Contribution
Feel free to open issues or pull requests to the official repository. Ideas, tips, bug reports, or contributions are all welcome.
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 3703 | # ComputableDAGs.jl
*A domain-specific DAG-optimizer*
## General
This packages provides a way to represent large computations in a graph representation. Once such a graph is created, it can
- be analyzed to extract properties such as total compute effort or data transfer necessary,
- be optimized using optimization algorithms,
- be scheduled on heterogeneous machines, making use of all available hardware
- be compiled and executed within the same session of julia.
### Requirements for use
There are some hard requirements for this to be possible to a specific computation problem:
- The computation must be static, i.e., the structure of the graph may not dynamically change during the computation.
- All data dependencies within the graph must be known in advance.
- The overall computation must be separable into smaller parts with less than total interdependency.
Some more soft requirements exist for the project to be *useful*:
- For optimizations to be effective, the functions should have a predictable compute effort that can be known in advance.
- The individual tasks should not be too *small* (ideally at least a few dozen FLOPs) because the compiler is smarter at optimizing very small functions than we can be.
- The individual tasks should not be too *large* so the graph has a large enough number of nodes to allow for a larger optimization space.
- Tasks should [not have side-effects](https://en.wikipedia.org/wiki/Side_effect_(computer_science)) because the order and number of times a function is executed can not be relied upon.
### Overview of the Project Structure

The project consists of several parts that are designed to be mostly orthogonal interfaces, extendable with new implementations without having to change other parts of the code. For example implementations, refer to the [manual](manual.md), the tests, or other projects in the [ComputableDAGs project](https://github.com/ComputableDAGs).
The [*Graph*](lib/internals/graph.md) is the central part. It consists of [*Nodes*](lib/internals/node.md) and *Edges*. Nodes represent a [*Task*](lib/internals/task.md), which is either a computation or a data transfer. Edges purely represent the dependencies between the nodes.
A graph has to be generated first, which is done by defining a [*Model*](lib/internals/models.md) and providing some form of *Generator* for a specific problem instance of that model. This part is entirely up to the user. A generator might parse a file and generate a graph from that, or it may generate a basic graph by itself.
[*Estimators*](lib/internals/estimator.md) can be used to collect properties of the graph, for example the total compute effort defined by tasks.
From any state of the graph, possible [*Operations*](lib/internals/operation.md) can be generated. These represent topological changes to the graph which do not change the total computation. Operations can be applied and popped similar to a [stack](https://en.wikipedia.org/wiki/Stack_(abstract_data_type)).
The [*Optimizer*](lib/internals/optimization.md) interface then allows to use an estimator to push and pop operations to reduce the execution time.
Finally, the [*Scheduler*](lib/internals/scheduler.md) can use [*Device*](lib/internals/devices.md) information to [*generate*](lib/internals/code_gen.md) the code.
For detailed information on all the interfaces und functionality provided, please refer to the [public](lib/public.md) documentation or the respective internals, as linked above.
## Library Outline
```@contents
Pages = [
"lib/public.md",
"lib/internals.md"
]
```
### [Index](@id main-index)
```@index
Pages = ["lib/public.md"]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 413 | # Manual
## Application repositories
The following repositories use this package productively, which can be referred to as examples:
- [QEDFeynman](https://github.com/ComputableDAGs/QEDFeynman.jl): Compute differential cross-sections of scattering processes in ABC and QED models.
## Jupyter Notebooks
In the `notebooks` directory are notebooks containing some examples of the usage of this repository.
TBW
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 368 | # Public Documentation
Documentation for `ComputableDAGs.jl`'s public interface.
See the Internals section of the manual for documentation of everything else.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["ComputableDAGs.jl"]
Order = [:module]
```
## Contents
```@contents
Pages = ["public.md"]
Depth = 2
```
## Index
```@index
Pages = ["public.md"]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 750 | # Code Generation
## Types
```@autodocs
Modules = [ComputableDAGs]
Pages = ["code_gen/type.jl"]
Order = [:type, :constant, :function]
```
## Function Generation
Implementations for generation of a callable function. A function generated this way cannot immediately be called. One Julia World Age has to pass before this is possible, which happens when the global Julia scope advances. If the DAG and therefore the generated function becomes too large, use the tape machine instead, since compiling large functions becomes infeasible.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["code_gen/function.jl"]
Order = [:function]
```
## Tape Machine
```@autodocs
Modules = [ComputableDAGs]
Pages = ["code_gen/tape_machine.jl"]
Order = [:function]
``` | ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 695 | # Devices
## Interface
```@autodocs
Modules = [ComputableDAGs]
Pages = ["devices/interface.jl"]
Order = [:type, :constant, :function]
```
## Detect
```@autodocs
Modules = [ComputableDAGs]
Pages = ["devices/detect.jl"]
Order = [:function]
```
## Measure
```@autodocs
Modules = [ComputableDAGs]
Pages = ["devices/measure.jl"]
Order = [:function]
```
## Implementations
### General
```@autodocs
Modules = [ComputableDAGs]
Pages = ["devices/impl.jl"]
Order = [:type, :function]
```
### NUMA
```@autodocs
Modules = [ComputableDAGs]
Pages = ["devices/numa/impl.jl"]
Order = [:type, :function]
```
### GPUs
```@autodocs
Modules = [ComputableDAGs]
Pages = ["devices/ext.jl"]
Order = [:type]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 320 | # Diff
## Type
```@autodocs
Modules = [ComputableDAGs]
Pages = ["diff/type.jl"]
Order = [:type]
```
## Properties
```@autodocs
Modules = [ComputableDAGs]
Pages = ["diff/properties.jl"]
Order = [:function]
```
## Printing
```@autodocs
Modules = [ComputableDAGs]
Pages = ["diff/print.jl"]
Order = [:function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 473 | # Estimation
## Interface
The interface that has to be implemented for an estimator.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["estimator/interface.jl"]
Order = [:type, :constant, :function]
```
## Global Metric Estimator
Implementation of a global metric estimator. It uses the graph properties compute effort, data transfer, and compute intensity.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["estimator/global_metric.jl"]
Order = [:type, :function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 749 | # Graph
## Type
```@autodocs
Modules = [ComputableDAGs]
Pages = ["graph/type.jl"]
Order = [:type]
```
## Interface
```@autodocs
Modules = [ComputableDAGs]
Pages = ["graph/interface.jl"]
Order = [:function]
```
## Compare
```@autodocs
Modules = [ComputableDAGs]
Pages = ["graph/compare.jl"]
Order = [:function]
```
## Mute
```@autodocs
Modules = [ComputableDAGs]
Pages = ["graph/mute.jl"]
Order = [:function]
```
## Print
```@autodocs
Modules = [ComputableDAGs]
Pages = ["graph/print.jl"]
Order = [:function]
```
## Properties
```@autodocs
Modules = [ComputableDAGs]
Pages = ["graph/properties.jl"]
Order = [:function]
```
## Validate
```@autodocs
Modules = [ComputableDAGs]
Pages = ["graph/validate.jl"]
Order = [:function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 246 | # Models
## Interface
The interface that has to be implemented for a model to be usable is defined in `src/models/interface.jl`.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["models/interface.jl"]
Order = [:type, :constant, :function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 635 | # Node
## Type
```@autodocs
Modules = [ComputableDAGs]
Pages = ["node/type.jl"]
Order = [:type]
```
## Create
```@autodocs
Modules = [ComputableDAGs]
Pages = ["node/create.jl"]
Order = [:function]
```
## Compare
```@autodocs
Modules = [ComputableDAGs]
Pages = ["node/compare.jl"]
Order = [:function]
```
## Properties
```@autodocs
Modules = [ComputableDAGs]
Pages = ["node/properties.jl"]
Order = [:function]
```
## Print
```@autodocs
Modules = [ComputableDAGs]
Pages = ["node/print.jl"]
Order = [:function]
```
## Validate
```@autodocs
Modules = [ComputableDAGs]
Pages = ["node/validate.jl"]
Order = [:function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 867 | # Operation
## Types
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/type.jl"]
Order = [:type]
```
## Find
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/find.jl"]
Order = [:function]
```
## Apply
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/apply.jl"]
Order = [:function]
```
## Get
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/get.jl"]
Order = [:function]
```
## Clean
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/clean.jl"]
Order = [:function]
```
## Utility
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/utility.jl"]
Order = [:function]
```
## Print
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/print.jl"]
Order = [:function]
```
## Validate
```@autodocs
Modules = [ComputableDAGs]
Pages = ["operation/validate.jl"]
Order = [:function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 974 | # Optimization
## Interface
The interface that has to be implemented for an optimization algorithm.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["optimization/interface.jl"]
Order = [:type, :constant, :function]
```
## Random Walk Optimizer
Implementation of a random walk algorithm.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["optimization/random_walk.jl"]
Order = [:type, :function]
```
## Reduction Optimizer
Implementation of a an optimizer that reduces as far as possible.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["optimization/reduce.jl"]
Order = [:type, :function]
```
## Split Optimizer
Implementation of an optimizer that splits as far as possible.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["optimization/split.jl"]
Order = [:type, :function]
```
## Greedy Optimizer
Implementation of a greedy optimization algorithm.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["optimization/greedy.jl"]
Order = [:type, :function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 337 | # Properties
## Type
```@autodocs
Modules = [ComputableDAGs]
Pages = ["properties/type.jl"]
Order = [:type]
```
## Create
```@autodocs
Modules = [ComputableDAGs]
Pages = ["properties/create.jl"]
Order = [:function]
```
## Utility
```@autodocs
Modules = [ComputableDAGs]
Pages = ["properties/utility.jl"]
Order = [:function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 363 | # Scheduler
## Interface
```@autodocs
Modules = [ComputableDAGs]
Pages = ["scheduler/interface.jl"]
Order = [:type, :function]
```
## Types
```@autodocs
Modules = [ComputableDAGs]
Pages = ["scheduler/type.jl"]
Order = [:type, :function]
```
## Greedy
```@autodocs
Modules = [ComputableDAGs]
Pages = ["scheduler/greedy.jl"]
Order = [:type, :function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 531 | # Task
## Type
```@autodocs
Modules = [ComputableDAGs]
Pages = ["task/type.jl"]
Order = [:type]
```
## Create
```@autodocs
Modules = [ComputableDAGs]
Pages = ["task/create.jl"]
Order = [:function]
```
## Compare
```@autodocs
Modules = [ComputableDAGs]
Pages = ["task/compare.jl"]
Order = [:function]
```
## Compute
```@autodocs
Modules = [ComputableDAGs]
Pages = ["task/compute.jl"]
Order = [:function]
```
## Properties
```@autodocs
Modules = [ComputableDAGs]
Pages = ["task/properties.jl"]
Order = [:function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT"
]
| 0.1.1 | 30cc181d3443bdf6e274199b938e3354f6ad87fb | docs | 399 | # Utility
## Helper Functions
```@autodocs
Modules = [ComputableDAGs]
Pages = ["utils.jl"]
Order = [:type, :function]
```
## Trie Helper
This is a simple implementation of a [Trie Data Structure](https://en.wikipedia.org/wiki/Trie) to greatly improve the performance of the Node Reduction search.
```@autodocs
Modules = [ComputableDAGs]
Pages = ["trie.jl"]
Order = [:type, :function]
```
| ComputableDAGs | https://github.com/ComputableDAGs/ComputableDAGs.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 2208 | module DifferentialDynamicProgramming
using LinearTimeVaryingModelsBase, Requires, ValueHistories, LinearAlgebra, Statistics, Printf
const DEBUG = false # Set this flag to true in order to print debug messages
export QPTrace, boxQP, demoQP, iLQG,iLQGkl, demo_linear, demo_linear_kl, demo_pendcart, GaussianPolicy
eye(n) = Matrix{Float64}(I,n,n)
function __init__()
@require Plots="91a5bcdd-55d7-5caf-9e0b-520d859cae80" begin
@eval LinearAlgebra.adjoint(x::String) = x
@eval function plotstuff_linear(x,u,cost,totalcost)
p = Plots.plot(layout=(2,2))
Plots.plot!(p,x', title="State Trajectories", xlabel="Time step",legend=false, subplot=1, show=false)
Plots.plot!(p,cost,c=:black,linewidth=3, title="Cost", xlabel="Time step", subplot=2, show=false)
Plots.plot!(p,u',title="Control signals", xlabel="Time step", subplot=3, show=false)
Plots.plot!(p,totalcost,title="Total cost", xlabel="Iteration", subplot=4, show=false)
Plots.gui()
end
@eval function plotstuff_pendcart(x00, u00, x,u,cost00,cost,otrace)
cp = Plots.plot(layout=(1,3))
sp = Plots.plot(x00',title=["\$x_$(i)\$" for i=1:size(x00,1)]', lab="Simulation", layout=(2,2))
Plots.plot!(sp,x', title=["\$x_$(i)\$" for i=1:size(x00,1)]', lab="Optimized", xlabel="Time step", legend=true)
Plots.plot!(cp,cost00, title="Cost", lab="Simulation", subplot=2)
Plots.plot!(cp,u', legend=true, title="Control signal",lab="Optimized", subplot=1)
Plots.plot!(cp,cost[2:end], legend=true, title="Cost",lab="Optimized", xlabel="Time step", subplot=2, yscale=:log10)
iters = sum(cost .> 0)
filter!(x->x>0,cost)
Plots.plot!(cp, get(otrace, :cost)[2], yscale=:log10,xscale=:log10, title="Total cost", xlabel="Iteration", legend=false, subplot=3)
Plots.plot(sp,cp)
Plots.gui()
end
end
end
dir(paths...) = joinpath(@__DIR__, "..", paths...)
include("boxQP.jl")
include("iLQG.jl")
include("iLQGkl.jl")
include("forward_pass.jl")
include("backward_pass.jl")
include("demo_linear.jl")
include("system_pendcart.jl")
function debug(x)
DEBUG && printstyled(string(x),"\n", color=:blue)
end
end # module
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 11938 | choleskyvectens(a,b) = permutedims(sum(a.*b,1), [3 2 1])
macro setupQTIC()
quote
m = size(u,1)
n,_,N = size(fx)
@assert size(cx) == (n, N) "size(cx) should be (n, N)"
@assert size(cu) == (m, N) "size(cu) should be (m, N)"
@assert size(cxx) == (n, n) "size(cxx) should be(n, n) "
@assert size(cxu) == (n, m) "size(cxu) should be(n, m) "
k = zeros(m,N)
K = zeros(m,n,N)
Vx = zeros(n,N)
Vxx = zeros(n,n,N)
Quu = Array{T}(undef,m,m,N)
Quui = Array{T}(undef,m,m,N)
dV = [0., 0.]
Vx[:,N] = cx[:,N]
Vxx[:,:,N] = cxx
Quu[:,:,N] = cuu
diverge = 0
end |> esc
end
macro end_backward_pass()
quote
QuF = Qu
if isempty(lims) || lims[1,1] > lims[1,2]
# debug("# no control limits: Cholesky decomposition, check for non-PD")
local R
try
R = cholesky(Hermitian(QuuF))
catch
diverge = i
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx, dV
end
# debug("# find control law")
k_i = -(R\QuF)
K_i = -(R\Qux_reg)
else
# debug("# solve Quadratic Program")
lower = lims[:,1]-u[:,i]
upper = lims[:,2]-u[:,i]
local k_i,result,free
try
k_i,result,R,free = boxQP(QuuF,QuF,lower,upper,k[:,min(i+1,N-1)])
catch
result = 0
end
if result < 1
diverge = i
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx, dV
end
K_i = zeros(m,n)
if any(free)
Lfree = -R\(R'\Qux_reg[free,:])
K_i[free,:] = Lfree
end
end
# debug("# update cost-to-go approximation")
dV = dV + [k_i'Qu; .5*k_i'Quu[:,:,i]*k_i]
Vx[:,i] = Qx + K_i'Quu[:,:,i]*k_i + K_i'Qu + Qux'k_i
Vxx[:,:,i] = Qxx + K_i'Quu[:,:,i]*K_i + K_i'Qux + Qux'K_i
Vxx[:,:,i] = .5*(Vxx[:,:,i] + Vxx[:,:,i]')
# debug("# save controls/gains")
k[:,i] = k_i
K[:,:,i] = K_i
end |> esc
end
function back_pass(cx,cu,cxx::AbstractArray{T,3},cxu,cuu,fx::AbstractArray{T,3},fu,fxx,fxu,fuu,λ,regType,lims,x,u) where T # nonlinear time variant
m,N = size(u)
n = size(cx,1)
@assert size(cx) == (n, N)
@assert size(cu) == (m, N)
@assert size(cxx) == (n, n, N)
@assert size(cxu) == (n, m, N)
@assert size(cuu) == (m, m, N)
k = zeros(m,N)
K = zeros(m,n,N)
Vx = zeros(n,N)
Vxx = zeros(n,n,N)
Quu = Array{T}(undef,m,m,N)
Quui = Array{T}(undef,m,m,N)
dV = [0., 0.]
Vx[:,N] = cx[:,N]
Vxx[:,:,N] = cxx[:,:,N]
Quu[:,:,N] = cuu[:,:,N]
diverge = 0
for i = N-1:-1:1
Qu = cu[:,i] + fu[:,:,i]'Vx[:,i+1]
Qx = cx[:,i] + fx[:,:,i]'Vx[:,i+1]
Qux = cxu[:,:,i]' + fu[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
if !isempty(fxu)
fxuVx = vectens(Vx[:,i+1],fxu[:,:,:,i])
Qux = Qux + fxuVx
end
Quu[:,:,i] = cuu[:,:,i] + fu[:,:,i]'Vxx[:,:,i+1]*fu[:,:,i]
if !isempty(fuu)
fuuVx = vectens(Vx[:,i+1],fuu[:,:,:,i])
Quu[:,:,i] .+= fuuVx
end
Qxx = cxx[:,:,i] + fx[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
isempty(fxx) || (Qxx .+= vectens(Vx[:,i+1],fxx[:,:,:,i]))
Vxx_reg = Vxx[:,:,i+1] .+ (regType == 2 ? λ*eye(n) : 0)
Qux_reg = cxu[:,:,i]' + fu[:,:,i]'Vxx_reg*fx[:,:,i]
isempty(fxu) || (Qux_reg .+= fxuVx)
QuuF = cuu[:,:,i] + fu[:,:,i]'Vxx_reg*fu[:,:,i] .+ (regType == 1 ? λ*eye(m) : 0)
isempty(fuu) || (QuuF .+= fuuVx)
@end_backward_pass
end
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx,dV
end
function back_pass(cx,cu,cxx::AbstractArray{T,2},cxu,cuu,fx::AbstractArray{T,3},fu,fxx,fxu,fuu,λ,regType,lims,x,u) where T # quadratic timeinvariant cost, dynamics nonlinear time variant
@setupQTIC
for i = N-1:-1:1
Qu = cu[:,i] + fu[:,:,i]'Vx[:,i+1]
Qx = cx[:,i] + fx[:,:,i]'Vx[:,i+1]
Qux = cxu' + fu[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
if !isempty(fxu)
fxuVx = vectens(Vx[:,i+1],fxu[:,:,:,i])
Qux = Qux + fxuVx
end
Quu[:,:,i] = cuu + fu[:,:,i]'Vxx[:,:,i+1]*fu[:,:,i]
if !isempty(fuu)
fuuVx = vectens(Vx[:,i+1],fuu[:,:,:,i])
Quu[:,:,i] = Quu[:,:,i] + fuuVx
end
Qxx = cxx + fx[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
isempty(fxx) || (Qxx .+= vectens(Vx[:,i+1],fxx[:,:,:,i]))
Vxx_reg = Vxx[:,:,i+1] .+ (regType == 2 ? λ*eye(n) : 0)
Qux_reg = cxu' + fu[:,:,i]'Vxx_reg*fx[:,:,i]
isempty(fxu) || (Qux_reg .+= fxuVx)
QuuF = cuu + fu[:,:,i]'Vxx_reg*fu[:,:,i] .+ (regType == 1 ? λ*eye(m) : 0)
isempty(fuu) || (QuuF .+= fuuVx)
@end_backward_pass
end
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx,dV
end
function back_pass(cx,cu,cxx::AbstractArray{T,2},cxu,cuu,fx::AbstractArray{T,3},fu,λ,regType,lims,x,u) where T # quadratic timeinvariant cost, linear time variant dynamics
@setupQTIC
for i = N-1:-1:1
Qu = cu[:,i] + fu[:,:,i]'Vx[:,i+1]
Qx = cx[:,i] + fx[:,:,i]'Vx[:,i+1]
Qux = cxu' + fu[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
Quu[:,:,i] = cuu + fu[:,:,i]'Vxx[:,:,i+1]*fu[:,:,i]
Qxx = cxx + fx[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
Vxx_reg = Vxx[:,:,i+1] .+ (regType == 2 ? λ*eye(n) : 0)
Qux_reg = cxu' + fu[:,:,i]'Vxx_reg*fx[:,:,i]
QuuF = cuu + fu[:,:,i]'Vxx_reg*fu[:,:,i] .+ (regType == 1 ? λ*eye(m) : 0)
@end_backward_pass
end
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx,dV
end
function back_pass(cx,cu,cxx::AbstractArray{T,3},cxu,cuu,fx::AbstractArray{T,3},fu,λ,regType,lims,x,u) where T # quadratic timeVariant cost, linear time variant dynamics
m = size(u,1)
n,N = size(fx,1,3)
@assert size(cx) == (n, N)
@assert size(cu) == (m, N)
@assert size(cxx) == (n, n, N)
@assert size(cxu) == (n, m, N)
@assert size(cuu) == (m, m, N)
k = zeros(m,N)
K = zeros(m,n,N)
Vx = zeros(n,N)
Vxx = zeros(n,n,N)
Quu = Array{T}(undef,m,m,N)
Quui = Array{T}(undef,m,m,N)
dV = [0., 0.]
Vx[:,N] = cx[:,N]
Vxx[:,:,N] = cxx[:,:,end]
Quu[:,:,N] = cuu[:,:,N]
diverge = 0
for i = N-1:-1:1
Qu = cu[:,i] + fu[:,:,i]'Vx[:,i+1]
Qx = cx[:,i] + fx[:,:,i]'Vx[:,i+1]
Vxx_reg = Vxx[:,:,i+1] .+ (regType == 2 ? λ*eye(n) : 0)
Qux_reg = cxu[:,:,i]' + fu[:,:,i]'Vxx_reg*fx[:,:,i]
QuuF = cuu[:,:,i] + fu[:,:,i]'Vxx_reg*fu[:,:,i] .+ (regType == 1 ? λ*eye(m) : 0)
Qux = cxu[:,:,i]' + fu[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
Quu[:,:,i] .= cuu[:,:,i] .+ fu[:,:,i]'Vxx[:,:,i+1]*fu[:,:,i]
Qxx = cxx[:,:,i] + fx[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
@end_backward_pass
end
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx,dV
end
function back_pass(cx,cu,cxx::AbstractArray{T,2},cxu,cuu,fx::AbstractMatrix{T},fu,λ,regType,lims,x,u) where T # cost quadratic and cost and LTI dynamics
m,N = size(u)
n = size(fx,1)
@assert size(cx) == (n, N)
@assert size(cu) == (m, N)
@assert size(cxx) == (n, n)
@assert size(cxu) == (n, m)
@assert size(cuu) == (m, m)
k = zeros(m,N)
K = zeros(m,n,N)
Vx = zeros(n,N)
Vxx = zeros(n,n,N)
Quu = Array{T}(undef,m,m,N)
Quui = Array{T}(undef,m,m,N)
dV = [0., 0.]
Vx[:,N] = cx[:,N]
Vxx[:,:,N] = cxx
Quu[:,:,N] = cuu
diverge = 0
for i = N-1:-1:1
Qu = cu[:,i] + fu'Vx[:,i+1]
Qx = cx[:,i] + fx'Vx[:,i+1]
Qux = cxu' + fu'Vxx[:,:,i+1]*fx
Quu[:,:,i] = cuu + fu'Vxx[:,:,i+1]*fu
Qxx = cxx + fx'Vxx[:,:,i+1]*fx
Vxx_reg = Vxx[:,:,i+1] .+ (regType == 2 ? λ*eye(n) : 0)
Qux_reg = cxu' + fu'Vxx_reg*fx
QuuF = cuu + fu'Vxx_reg*fu .+ (regType == 1 ? λ*eye(m) : 0)
@end_backward_pass
end
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx,dV
end
function graphics(x...)
return 0
end
function back_pass_gps(cx,cu,cxx::AbstractArray{T,3},cxu,cuu, fx::AbstractArray{T,3},fu,lims,x,u,kl_cost_terms) where T # quadratic timeVariant cost, linear time variant dynamics
m = size(u,1)
n,_,N = size(fx)
ηbracket = kl_cost_terms[2]
η = isa(ηbracket,AbstractMatrix) ? ηbracket[2,N] : ηbracket[2]
cxkl,cukl,cxxkl,cxukl,cuukl = kl_cost_terms[1]
@assert size(cx) == (n, N)
@assert size(cu) == (m, N)
@assert size(cxx) == (n, n, N)
@assert size(cxu) == (n, m, N)
@assert size(cuu) == (m, m, N)
k = zeros(m,N)
K = zeros(m,n,N)
Vx = zeros(n,N)
Vxx = zeros(n,n,N)
Quu = Array{T}(undef,m,m,N)
Quui = Array{T}(undef,m,m,N)
dV = [0., 0.]
Vx[:,N] = cx[:,N]
Vxx[:,:,N] = cxx[:,:,end]
Quu[:,:,N] = cuu[:,:,N]./ η .+ cuukl[:,:,N]
Quui[:,:,N] = inv(Quu[:,:,N])
diverge = 0
for i = N-1:-1:1
Qu = cu[:,i] + fu[:,:,i]'Vx[:,i+1]
Qx = cx[:,i] + fx[:,:,i]'Vx[:,i+1]
Qux = cxu[:,:,i]' + fu[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
Quu[:,:,i] .= cuu[:,:,i] .+ fu[:,:,i]'Vxx[:,:,i+1]*fu[:,:,i]
Qxx = cxx[:,:,i] + fx[:,:,i]'Vxx[:,:,i+1]*fx[:,:,i]
ηbracket = kl_cost_terms[2]
η = isa(ηbracket,AbstractMatrix) ? ηbracket[2,i] : ηbracket[2]
Qu = Qu ./ η + cukl[:,i]
Qux = Qux ./ η + cxukl[:,:,i]
Quu[:,:,i] = Quu[:,:,i] ./ η + cuukl[:,:,i]
Qx = Qx ./ η + cxkl[:,i]
Qxx = Qxx ./ η + cxxkl[:,:,i]
Quu[:,:,i] = 0.5*(Quu[:,:,i] + Quu[:,:,i]')
if isempty(lims) || lims[1,1] > lims[1,2]
# debug("# no control limits: Cholesky decomposition, check for non-PD")
local R
try
R = cholesky(Hermitian(Quu[:,:,i]))
catch
diverge = i
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx, dV
end
# debug("# find control law")
k_i = -(R\Qu)
K_i = -(R\Qux)
else
# debug("# solve Quadratic Program")
lower = lims[:,1]-u[:,i]
upper = lims[:,2]-u[:,i]
local k_i,result,free
try
k_i,result,R,free = boxQP(Quu[:,:,i],Qu,lower,upper,k[:,min(i+1,N-1)])
catch
result = 0
end
if result < 1
diverge = i
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx, dV
end
K_i = zeros(m,n)
if any(free)
Lfree = -R\(R'\Qux[free,:])
K_i[free,:] = Lfree
end
end
# debug("# update cost-to-go approximation")
dV = dV + [k_i'Qu; .5*k_i'Quu[:,:,i]*k_i]
Vx[:,i] = Qx + K_i'Quu[:,:,i]*k_i + K_i'Qu + Qux'k_i
Vxx[:,:,i] = Qxx + K_i'Quu[:,:,i]*K_i + K_i'Qux + Qux'K_i
Vxx[:,:,i] = .5*(Vxx[:,:,i] + Vxx[:,:,i]')
# debug("# save controls/gains")
k[:,i] = k_i
K[:,:,i] = K_i
Quui[:,:,i] = inv(Quu[:,:,i])
end
return diverge, GaussianPolicy(N,n,m,K,k,Quui,Quu), Vx, Vxx,dV
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 5996 | mutable struct QPTrace
x
xc
value
search
clamped
nfactor
end
"""
Minimize `0.5*x'*H*x + x'*g` s.t. lower<=x<=upper
inputs:
`H` - positive definite matrix (n * n)
`g` - bias vector (n)
`lower` - lower bounds (n)
`upper` - upper bounds (n)
optional inputs:
`x0` - initial state (n)
`options` - see below (7)
outputs:
`x` - solution (n)
`result` - result type (roughly, higher is better, see below)
`Hfree` - subspace cholesky factor (n_free * n_free)
`free` - set of free dimensions (n)
"""
function boxQP(H,g,lower,upper,x0::AbstractVector;
maxIter = 100,
minGrad = 1e-8,
minRelImprove = 1e-8,
stepDec = 0.6,
minStep = 1e-22,
Armijo = 0.1,
print = 0)
# maxIter = 100 # maximum number of iterations
# minGrad = 1e-8 # minimum norm of non-fixed gradient
# minRelImprove = 1e-8 # minimum relative improvement
# stepDec = 0.6 # factor for decreasing stepsize
# minStep = 1e-22 # minimal stepsize for linesearch
# Armijo = 0.1 # Armijo parameter (fraction of linear improvement required)
# print = 0 # verbosity
n = size(H,1)
clamped = falses(n)
free = trues(n)
oldvalue = 0.
result = 0
gnorm = 0.
nfactor = 0
trace = Array{QPTrace}(undef, maxIter)
Hfree = zeros(n,n)
# debug("# initial state")
x = clamp.(x0,lower,upper)
LU = [lower upper]
LU[.!isfinite.(LU)] .= NaN
# debug("# initial objective value")
value = (x'g + 0.5x'H*x )[1]
if print > 0
@printf("==========\nStarting box-QP, dimension %-3d, initial value: %-12.3f\n",n, value)
end
# debug("# main loop")
iter = 1
while iter <= maxIter
if result != 0
break
end
# debug("# check relative improvement")
if iter>1 && (oldvalue - value) < minRelImprove*abs(oldvalue)
result = 4
break
end
oldvalue = value
# debug("# get gradient")
grad = g + H*x
# debug("# find clamped dimensions")
old_clamped = clamped
clamped = falses(n)
# clamped[(x[:,1] .== lower)&(grad[:,1].>0)] = true
# clamped[(x[:,1] .== upper)&(grad[:,1].<0)] = true
for i = 1:n
clamped[i] = ((x[i,1] == lower[i])&&(grad[i,1]>0)) || ((x[i,1] == upper[i])&&(grad[i,1]<0))
end
free = .!clamped
# debug("# check for all clamped")
if all(clamped)
result = 6
break
end
# debug("# factorize if clamped has changed")
if iter == 1
factorize = true
else
factorize = any(old_clamped != clamped)
end
if factorize
Hfree = cholesky(H[free,free]).U # was (Hfree, indef) = chol(H[free,free])
# if indef
# result = -1
# break
# end
nfactor += 1
end
# debug("# check gradient norm")
gnorm = norm(grad[free])
if gnorm < minGrad
result = 5
break
end
# debug("# get search direction")
grad_clamped = g + H*(x.*clamped)
search = zeros(n,1)
search[free] = -Hfree\(Hfree'\grad_clamped[free]) - x[free]
# debug("# check for descent direction")
sdotg = sum(search.*grad)
if sdotg >= 0 # (should not happen)
break
end
# debug("# armijo linesearch")
step = 1
nstep = 0
xc = clamp.(x+step*search,lower,upper)
vc = (xc'*g + 0.5*xc'*H*xc)[1]
while (vc - oldvalue)/(step*sdotg) < Armijo
step = step*stepDec
nstep += 1
xc = clamp.(x+step*search,lower,upper)
vc = (xc'*g + 0.5*xc'*H*xc)[1]
if step<minStep
result = 2
break
end
end
if print > 1
@printf("iter %-3d value % -9.5g |g| %-9.3g reduction %-9.3g linesearch %g^%-2d n_clamped %d\n",
iter, vc, gnorm, oldvalue-vc, stepDec, nstep, sum(clamped))
end
trace[iter] = QPTrace(x,xc,value,search,clamped,nfactor )
# debug("# accept candidate")
x = xc
value = vc
iter += 1
end
if iter == maxIter
result = 1
end
results = ["Hessian is not positive definite", # result = -1
"No descent direction found", # result = 0 SHOULD NOT OCCUR
"Maximum main iterations exceeded", # result = 1
"Maximum line-search iterations exceeded", # result = 2
"No bounds, returning Newton point", # result = 3
"Improvement smaller than tolerance", # result = 4
"Gradient norm smaller than tolerance", # result = 5
"All dimensions are clamped"] # result = 6
if print > 0
@printf("RESULT: %s.\niterations %d gradient %-12.6g final value %-12.6g factorizations %d\n",
results[result+2], iter, gnorm, value, nfactor)
end
return x,result,Hfree,free,trace
end
function demoQP(;kwargs...)
n = 500
g = randn(n)
H = randn(n,n)
H = H*H'
lower = -ones(n)
upper = ones(n)
@time boxQP(H, g, lower, upper, randn(n);print=1, kwargs...)
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 4033 | plotstuff_linear(args...) = println("Install package Plots.jl (and call using Plots) to plot results in the end of demo_linear")
function demo_linear(;kwargs...)
println("Running linear demo function for DifferentialDynamicProgramming.jl")
# make stable linear dynamics
h = .01 # time step
n = 10 # state dimension
m = 2 # control dimension
A = randn(n,n)
A = A-A' # skew-symmetric = pure imaginary eigenvalues
A = exp(h*A) # discrete time
B = h*randn(n,m)
# quadratic costs
Q = h*eye(n)
R = .1*h*eye(m)
# control limits
lims = [] #ones(m,1)*[-1 1]*.6
T = 1000 # horizon
x0 = ones(n,1) # initial state
u0 = .1*randn(m,T) # initial controls
# optimization problem
N = T+1
fx = A
fu = B
cxx = Q
cxu = zeros(size(B))
cuu = R
function lin_dyn_df(x,u,Q,R)
u[isnan.(u)] .= 0
cx = Q*x
cu = R*u
fxx=fxu=fuu = []
return fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu
end
function lin_dyn_f(x,u,A,B,Q,R)
u[isnan.(u)] .= 0
xnew = A*x + B*u
return xnew
end
lin_dyn_fT(x,Q) = 0.5*sum(x.*(Q*x))
f(x,u,i) = lin_dyn_f(x,u,A,B,Q,R)
costfun(x,u) = 0.5*sum(x.*(Q*x)) + 0.5*sum(u.*(R*u))
df(x,u) = lin_dyn_df(x,u,Q,R)
# plotFn(x) = plot(squeeze(x,2)')
# run the optimization
@time x, u, traj_new, Vx, Vxx, cost, otrace = iLQG(f,costfun,df, x0, u0; lims=lims,kwargs...);
totalcost = get(otrace, :cost)[2]
plotstuff_linear(x,u,[cost],totalcost)
x, u, traj_new, Vx, Vxx, cost, otrace
end
function demo_linear_kl(;kwargs...)
println("Running linear demo function with KL-divergence constraint for DifferentialDynamicProgramming.jl")
# make stable linear dynamics
h = .01 # time step
n = 10 # state dimension
m = 2 # control dimension
A = randn(n,n)
A = A-A' # skew-symmetric = pure imaginary eigenvalues
A = exp(h*A) # discrete time
B = h*randn(n,m)
# quadratic costs
Q = h*eye(n)
R = .1*h*eye(m)
# control limits
lims = [] #ones(m,1)*[-1 1]*.6
T = 1000 # horizon
x0 = ones(n) # initial state
u = .1*randn(m,T) # initial controls
# optimization problem
N = T+1
fx = repeat(A,1,1,T)
fu = repeat(B,1,1,T)
cxx = repeat(Q,1,1,T)
cxu = repeat(zeros(size(B)),1,1,T)
cuu = repeat(R,1,1,T)
function lin_dyn_df(x,u,Q,R)
u[isnan.(u)] .= 0
cx = Q*x
cu = R*u
fxx=fxu=fuu = []
return fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu
end
function lin_dyn_f(x,u,A,B,Q,R)
u[isnan.(u)] .= 0
xnew = A*x + B*u
return xnew
end
dyn = (x,u,i) -> lin_dyn_f(x,u,A,B,Q,R)
costf = (x,u) -> 0.5*(sum(x.*(Q*x),dims=1) + sum(u.*(R*u),dims=1))[:]
diffdyn = (x,u) -> lin_dyn_df(x,u,Q,R)
function rollout(u)
x = zeros(n,T)
x[:,1] = x0
for t = 1:T-1
x[:,t+1] = dyn(x[:,t],u[:,t],t)
end
x
end
x = rollout(u)
model = LinearTimeVaryingModelsBase.SimpleLTVModel(repeat(A,1,1,N),repeat(B,1,1,N),false)
# plotFn(x) = plot(squeeze(x,2)')
traj = GaussianPolicy(Float64,T,n,m)
# run the optimization
local Vx, Vxx, cost, otrace, totalcost
outercosts = zeros(5)
@time for iter = 1:5
cost0 = 0.5*sum(x.*(Q*x)) + 0.5*sum(u.*(R*u))
x, u, traj, Vx, Vxx, cost, otrace = iLQGkl(dyn,costf,diffdyn, x, traj, model; cost=cost0, lims=lims,kwargs...);
totalcost = get(otrace, :cost)[2]
outercosts[iter] = sum(totalcost)
println("Outer loop: Cost = ", sum(cost))
end
totalcost = get(otrace, :cost)[2]
plotstuff_linear(x,u,[cost],min.(totalcost,400))
# plotstuff_linear(x,u,totalcost,outercosts)
x, u, traj, Vx, Vxx, cost, otrace
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 1721 | """
xnew,unew,cnew,sigmanew = forward_pass(traj_new, x0,u,x,α,f,costfun,lims,diff)
# Arguments
- α: step size (αk is applied to old trajectory)
- diff: function to determine difference `diff(xnew[:,i], x[:,i])`
- f: forward dynamics `x(k+1) = f(x(k), u(k), k)`
- `cnew = costfun(xnew, unew)`
"""
function forward_pass(traj_new, x0,u,x,α,f,costfun,lims,diff)
n = size(x0,1)
m,N = size(u)
xnew = Array{eltype(x0)}(undef,n,N)
xnew[:,1] = x0
unew = copy(u)
cnew = zeros(N)
for i = 1:N
if !isempty(traj_new)
unew[:,i] .+= traj_new.k[:,i]*α
dx = diff(xnew[:,i], x[:,i])
unew[:,i] .+= traj_new.K[:,:,i]*dx
end
if !isempty(lims)
unew[:,i] = clamp.(unew[:,i],lims[:,1], lims[:,2])
end
xnewi = f(xnew[:,i], unew[:,i], i)
if i < N
xnew[:,i+1] = xnewi
end
end
cnew = costfun(xnew, unew)
return xnew,unew,cnew
end
function forward_covariance(model, x, u, traj)
fx,fu,fxx,fxu,fuu = df(model, x, u)
n = size(fx,1)
m = size(fu,2)
N = size(fx,3)
R1 = covariance(model,x,u) # Simple empirical prediction covariance
Σ0 = R1 # TODO: I was lazy here
ix = 1:n
iu = n+1:n+m
sigmanew = Array{Float64}(undef,n+m,n+m,N)
sigmanew[ix,ix,1] = Σ0
for i = 1:N-1
K,Σ = traj.K[:,:,i], traj.Σ[:,:,i]
sigmanew[ix,ix,i+1] = fx[:,:,i]*sigmanew[ix,ix,i]*fx[:,:,i]' + R1 # Iterate dLyap forward
sigmanew[iu,ix,i] = K*sigmanew[ix,ix,i]
sigmanew[ix,iu,i] = sigmanew[ix,ix,i]*K'
sigmanew[iu,iu,i] = K*sigmanew[ix,ix,i]*K' + Σ
end
sigmanew
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 13461 | import Base: length
EmptyMat3 = Array{Float64}(undef, 0,0,0)
EmptyMat2 = Array{Float64}(undef, 0,0)
emptyMat3(P) = Array{P}(undef, 0,0,0)
emptyMat2(P) = Array{P}(undef, 0,0)
mutable struct Trace
iter::Int64
λ::Float64
dλ::Float64
cost::Float64
α::Float64
grad_norm::Float64
improvement::Float64
reduce_ratio::Float64
time_derivs::Float64
time_forward::Float64
time_backward::Float64
divergence::Float64
η::Float64
Trace() = new(0,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.)
end
(t::MVHistory)(args...) = increment!(t, args...)
"""
`GaussianPolicy{P}`
# Fileds:
```
T::Int # number of time steps
n::Int # State dimension
m::Int # Number of control inputs
K::Array{P,3} # Time-varying feedback gain ∈ R(n,m,T)
k::Array{P,2} # Open loop control signal ∈ R(m,T)
Σ::Array{P,3} # Time-varying controller covariance ∈ R(m,m,T)
Σi::Array{P,3} # The inverses of Σ
```
"""
mutable struct GaussianPolicy{P}
T::Int
n::Int
m::Int
K::Array{P,3}
k::Array{P,2}
Σ::Array{P,3}
Σi::Array{P,3}
end
eye(P,n) = Matrix{P}(I,n,n)
GaussianPolicy(P) = GaussianPolicy(0,0,0,emptyMat3(P),emptyMat2(P),emptyMat3(P),emptyMat3(P))
GaussianPolicy(P,T,n,m) = GaussianPolicy(T,n,m,zeros(P,m,n,T),zeros(P,m,T),cat([eye(P,m) for t=1:T]..., dims=3),cat([eye(P,m) for t=1:T]..., dims=3))
Base.isempty(gp::GaussianPolicy) = gp.T == gp.n == gp.m == 0
Base.length(gp::GaussianPolicy) = gp.T
include("klutils.jl")
"""
iLQG - solve the deterministic finite-horizon optimal control problem.
minimize sum_i cost(x[:,i],u[:,i]) + cost(x[:,end])
s.t. x[:,i+1] = f(x[:,i],u[:,i])
Inputs
======
`f, costfun, df`
1) step:
`xnew = f(x,u,i)` is called during the forward pass.
Here the state x and control u are vectors: size(x)==(n,),
size(u)==(m,). The time index `i` is a scalar.
2) cost:
`cost = costfun(x,u)` is called in the forward pass to compute
the cost per time-step along the trajectory `x,u`.
3) derivatives:
`fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu = df(x,u)` computes the
derivatives along a trajectory. In this case size(x)==(n, N) where N
is the trajectory length. size(u)==(m, N). The time indexes are I=(1:N).
Dimensions match the variable names e.g. size(fxu)==(n, n, m, N)
If cost function or system is time invariant, the dimension of the corresponding
derivatives can be reduced by dropping the time dimension
`x0` - The initial state from which to solve the control problem.
Should be a column vector. If a pre-rolled trajectory is available
then size(x0)==(n, N) can be provided and cost set accordingly.
`u0` - The initial control sequence. A matrix of size(u0)==(m, N)
where m is the dimension of the control and N is the number of state
transitions.
Outputs
=======
`x` - the optimal state trajectory found by the algorithm.
size(x)==(n, N)
`u` - the optimal open-loop control sequence.
size(u)==(m, N)
`traj_new` - A new `GaussianPolicy` object containing feedforward control trajectory and feedback-gains, these gains multiply the
deviation of a simulated trajectory from the nominal trajectory x. See `?GaussianPolicy` for more help.
`Vx` - the gradient of the cost-to-go. size(Vx)==(n, N)
`Vxx` - the Hessian of the cost-to-go. size(Vxx)==(n, n N)
`cost` - the costs along the trajectory. size(cost)==(1, N)
the cost-to-go is V = fliplr(cumsum(fliplr(cost)))
`trace` - a trace of various convergence-related values. One row for each
iteration, the columns of trace are
`[iter λ α g_norm Δcost z sum(cost) dλ]`
see below for details.
# Keyword arguments
`lims`, [], control limits\n
`α`, logspace(0,-3,11), backtracking coefficients\n
`tol_fun`, 1e-7, reduction exit criterion\n
`tol_grad`, 1e-4, gradient exit criterion\n
`max_iter`, 500, maximum iterations\n
`λ`, 1, initial value for λ\n
`dλ`, 1, initial value for dλ\n
`λfactor`, 1.6, λ scaling factor\n
`λmax`, 1e10, λ maximum value\n
`λmin`, 1e-6, below this value λ = 0\n
`regType`, 1, regularization type 1: q_uu+λ*I 2: V_xx+λ*I\n
`reduce_ratio_min`, 0, minimal accepted reduction ratio\n
`diff_fun`, -, user-defined diff for sub-space optimization\n
`plot`, 1, 0: no k>0: every k iters k<0: every k iters, with derivs window\n
`verbosity`, 2, 0: no 1: final 2: iter 3: iter, detailed\n
`plot_fun`, x->0, user-defined graphics callback\n
`cost`, [], initial cost for pre-rolled trajectory
This code consists of a port and extension of a MATLAB library provided by the autors of
` INPROCEEDINGS{author={Tassa, Y. and Mansard, N. and Todorov, E.},
booktitle={Robotics and Automation (ICRA), 2014 IEEE International Conference on},
title={Control-Limited Differential Dynamic Programming},
year={2014}, month={May}, doi={10.1109/ICRA.2014.6907001}}`
"""
function iLQG(f,costfun,df, x0, u0;
lims = [],
α = exp10.(range(0, stop=-3, length=11)),
tol_fun = 1e-7,
tol_grad = 1e-4,
max_iter = 500,
λ = 1.,
dλ = 1.,
λfactor = 1.6,
λmax = 1e10,
λmin = 1e-6,
regType = 1,
reduce_ratio_min = 0,
diff_fun = -,
plot = 1,
verbosity = 2,
plot_fun = x->0,
cost = [],
traj_prev = 0
)
debug("Entering iLQG")
local fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu,xnew,unew,costnew,g_norm,Vx,Vxx,dV,αi
# --- initial sizes and controls
n = size(x0, 1) # dimension of state vector
m = size(u0, 1) # dimension of control vector
N = size(u0, 2) # number of state transitions
u = u0 # initial control sequence
traj_new = GaussianPolicy(Float64)
# traj_prev = GaussianDist(Float64)
# --- initialize trace data structure
trace = MVHistory()
trace(:λ, 0, λ)
trace(:dλ, 0, dλ)
# --- initial trajectory
debug("Setting up initial trajectory")
if size(x0,2) == 1 # only initial state provided
diverge = true
for outer αi ∈ α
debug("# test different backtracing parameters α and break loop when first succeeds")
x,un,cost, = forward_pass(traj_new,x0[:,1],αi*u,[],1,f,costfun, lims,diff_fun)
debug("# simplistic divergence test")
if all(abs.(x) .< 1e8)
u = un
diverge = false
break
end
end
elseif size(x0,2) == N
debug("# pre-rolled initial forward pass, initial traj provided")
x = x0
diverge = false
isempty(cost) && error("Initial trajectory supplied, initial cost must also be supplied")
else
error("pre-rolled initial trajectory must be of correct length (size(x0,2) == N)")
end
trace(:cost, 0, sum(cost))
# plot_fun(x) # user plotting
if diverge
if verbosity > 0
@printf("\nEXIT: Initial control sequence caused divergence\n")
end
return
end
# constants, timers, counters
flg_change = true
Δcost = 0.
expected_reduction = 0.
print_head = 10 # print headings every print_head lines
last_head = print_head
t_start = time()
verbosity > 0 && @printf("\n---------- begin iLQG ----------\n")
satisfied = true
iter = accepted_iter = 1
while accepted_iter <= max_iter
reduce_ratio = 0.
# ====== STEP 1: differentiate dynamics and cost along new trajectory
if flg_change
_t = @elapsed fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu = df(x, u)
trace(:time_derivs, iter, _t)
flg_change = false
end
# Determine what kind of system we are dealing with
linearsys = isempty(fxx) && isempty(fxu) && isempty(fuu); debug("linear system: $linearsys")
# ====== STEP 2: backward pass, compute optimal control law and cost-to-go
back_pass_done = false
while !back_pass_done
_t = @elapsed diverge, traj_new,Vx, Vxx,dV = if linearsys
back_pass(cx,cu,cxx,cxu,cuu,fx,fu,λ, regType, lims,x,u)
else
back_pass(cx,cu,cxx,cxu,cuu,fx,fu,fxx,fxu,fuu,λ, regType, lims,x,u)
end
increment!(trace, :time_backward, iter, _t)
iter == 1 && (traj_prev = traj_new) # TODO: set k μu to zero fir traj_prev
if diverge > 0
verbosity > 2 && @printf("Cholesky failed at timestep %d.\n",diverge)
dλ,λ = max(dλ*λfactor, λfactor), max(λ*dλ, λmin)
if λ > λmax; break; end
continue
end
back_pass_done = true
end
k, K = traj_new.k, traj_new.K
# check for termination due to small gradient
g_norm = mean(maximum(abs.(k) ./ (abs.(u) .+ 1), dims=1))
trace(:grad_norm, iter, g_norm)
if g_norm < tol_grad && λ < 1e-5 && satisfied
verbosity > 0 && @printf("\nSUCCESS: gradient norm < tol_grad\n")
break
end
# ====== STEP 3: line-search to find new control sequence, trajectory, cost
fwd_pass_done = false
if back_pass_done
debug("# serial backtracking line-search")
@elapsed(for outer αi = α
xnew,unew,costnew = forward_pass(traj_new, x0[:,1] ,u, x,αi,f,costfun, lims, diff_fun)
Δcost = sum(cost) - sum(costnew)
expected_reduction = -αi*(dV[1] + αi*dV[2])
reduce_ratio = if expected_reduction > 0
Δcost/expected_reduction
else
@warn("negative expected reduction: should not occur")
sign(Δcost)
end
if reduce_ratio > reduce_ratio_min
fwd_pass_done = true
break
end
end) |> x -> trace(:time_forward, iter, x)
end
# ====== STEP 4: accept step (or not), print status
# print headings
if verbosity > 1 && last_head == print_head
last_head = 0
@printf("%-12s", "iteration cost reduction expected gradient log10(λ) η divergence\n")
end
if fwd_pass_done && satisfied # TODO: I added satisfied here, verify if this is reasonable
if verbosity > 1
@printf("%-12d%-12.6g%-12.3g%-12.3g%-12.3g%-12.1f\n",
iter, sum(cost), Δcost, expected_reduction, g_norm, log10(λ))
last_head += 1
end
dλ = min(dλ / λfactor, 1/ λfactor)
λ *= dλ
# accept changes
x,u,cost = copy(xnew),copy(unew),copy(costnew)
traj_new.k = copy(u)
flg_change = true
plot_fun(x)
if Δcost < tol_fun
verbosity > 0 && @printf("\nSUCCESS: cost change < tol_fun\n")
break
end
accepted_iter += 1
else # no cost improvement
αi = NaN
dλ,λ = max(dλ * λfactor, λfactor), max(λ * dλ, λmin)# increase λ
if verbosity > 1
@printf("%-12d%-12s%-12.3g%-12.3g%-12.3g%-12.1f\n",
iter,"NO STEP", Δcost, expected_reduction, g_norm, log10(λ))
last_head = last_head+1
end
if λ > λmax # terminate ?
verbosity > 0 && @printf("\nEXIT: λ > λmax\n")
break
end
end
# update trace
trace(:λ, iter, λ)
trace(:dλ, iter, dλ)
trace(:α, iter, αi)
trace(:improvement, iter, Δcost)
trace(:cost, iter, sum(cost))
trace(:reduce_ratio, iter, reduce_ratio)
iter += 1
end
iter == max_iter && verbosity > 0 && @printf("\nEXIT: Maximum iterations reached.\n")
iter == 1 && error("Failure: no iterations completed, something is wrong. Try enabling the debug flag in DifferentialDynamicProgramming.jl for verbose printing.")
verbosity > 0 && print_timing(trace,iter,t_start,cost,g_norm,λ)
return x, u, traj_new, Vx, Vxx, cost, trace
end
function print_timing(trace,iter,t_start,cost,g_norm,λ)
diff_t = get(trace, :time_derivs)[2]
diff_t = sum(diff_t[.!isnan.(diff_t)])
back_t = get(trace, :time_backward)[2]
back_t = sum(back_t[.!isnan.(back_t)])
fwd_t = get(trace, :time_forward)[2]
fwd_t = sum(fwd_t[.!isnan.(fwd_t)])
total_t = time()-t_start
info = 100/total_t*[diff_t, back_t, fwd_t, (total_t-diff_t-back_t-fwd_t)]
try
@printf("\n iterations: %-3d\n
final cost: %-12.7g\n
final grad: %-12.7g\n
final λ: %-12.7e\n
time / iter: %-5.0f ms\n
total time: %-5.2f seconds, of which\n
derivs: %-4.1f%%\n
back pass: %-4.1f%%\n
fwd pass: %-4.1f%%\n
other: %-4.1f%% (graphics etc.)\n =========== end iLQG ===========\n",iter,sum(cost),g_norm,λ,1e3*total_t/iter,total_t,info[1],info[2],info[3],info[4])
catch
@show g_norm
end
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 12252 | """
`x, u, traj_new, Vx, Vxx, cost, trace = iLQGkl(dynamics,costfun,derivs, x0, traj_prev, model;
constrain_per_step = false,
kl_step = 0,
lims = [], # Control signal limits ::Matrix ∈ R(m,2)
tol_fun = 1e-7,
tol_grad = 1e-4,
max_iter = 50,
print_head = 10, # Print headers this often
print_period = 1, # Print this often
reduce_ratio_min = 0, # Not used ATM
diff_fun = -,
verbosity = 2, # ∈ (0,3)
plot_fun = x->0, # Not used
cost = [], # Supply if pre-rolled trajectory supplied
ηbracket = [1e-8,1,1e16], # dual variable bracket [min_η, η, max_η]
del0 = 0.0001, # Start of dual variable increase
gd_alpha = 0.01 # Step size in GD (ADAMOptimizer) when constrain_per_step is true
)`
Solves the iLQG problem with constraints on control signals `lims` and bound on the KL-divergence `kl_step` from the old trajectory distribution `traj_prev::GaussianPolicy`.
To solve the maximum entropy problem, use controller `controller(xi,i) = u[:,i] + K[:,:,i]*(xi-x[:,i]) + chol(Σ)*randn(m)` where `K` comes from `traj_new`. Note that multiplying the cost by a constant changes the relative weight between the cost term and the entropy term, i.e., higher cost produces less noise through chol(Σ) since (Σ = Qᵤᵤ⁻¹).
"""
function iLQGkl(dynamics,costfun,derivs, x0, traj_prev, model;
constrain_per_step = false,
kl_step = 1,
lims = [],
tol_fun = 1e-7,
tol_grad = 1e-4,
max_iter = 50,
print_head = 10,
print_period = 1,
reduce_ratio_min = 0,
diff_fun = -,
verbosity = 2,
plot_fun = x->0,
cost = [],
ηbracket = [1e-8,1,1e16], # min_η, η, max_η
del0 = 0.0001,
gd_alpha = 0.01
)
debug("Entering iLQG")
local fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu,xnew,unew,costnew,sigmanew,g_norm,Vx,Vxx,dV
# --- initial sizes and controls
u = copy(traj_prev.k) # initial control sequence
n = size(x0, 1) # dimension of state vector
m,N = size(u) # dimension of control vector and number of state transitions
traj_new = GaussianPolicy(Float64)
k_old = copy(traj_prev.k)
traj_prev.k *= 0 # We are adding new k to u, so must set this to zero for correct kl calculations
ηbracket = copy(ηbracket) # Because we do changes in this Array
if constrain_per_step
ηbracket = ηbracket.*ones(1,N)
kl_step = kl_step*ones(N)
end
η = view(ηbracket,2,:)
# --- initialize trace data structure
trace = MVHistory()
# --- initial trajectory
debug("Checking initial trajectory")
if size(x0,2) == N
debug("# pre-rolled initial forward pass, initial traj provided")
x = x0
diverge = false
isempty(cost) && error("Initial trajectory supplied, initial cost must also be supplied")
else
error("pre-rolled initial trajectory must be of correct length (size(x0,2) == N)")
end
trace(:cost, 0, sum(cost))
# constants, timers, counters
Δcost = 0.
expected_reduction = 0.
divergence = 0.
step_mult = 1.
iter = 0
last_head = print_head
t_start = time()
verbosity > 0 && @printf("\n---------- begin iLQG ----------\n")
satisfied = false # Indicating KL-constraint satisfied
# ====== STEP 1: differentiate dynamics and cost along new trajectory
_t = @elapsed fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu = derivs(x, u)
trace(:time_derivs, 0, _t)
reduce_ratio = 0.
kl_cost_terms = (∇kl(traj_prev), ηbracket) # This tuple is sent into back_pass, elements in ηbracket are mutated.
for outer iter = 1:(constrain_per_step ? 0 : max_iter) # Single KL constraint
diverge = 1
# ====== STEP 2: backward pass, compute optimal control law and cost-to-go
back_pass_done = false
while diverge > 0 # Done when regularization (through 1/η) for Quu is high enough
# debug("Entering back_pass with η=$ηbracket")
# η is the only regularization when optimizing KL, hence λ = 0 and regType arbitrary
_t = @elapsed diverge, traj_new,Vx, Vxx,dV = back_pass_gps(cx,cu,cxx,cxu,cuu,fx,fu, lims,x,u,kl_cost_terms) # Set λ=0 since we use η
trace(:time_backward, iter, _t)
if diverge > 0
ηbracket[2] += del0 # η increased, used in back_pass through kl_cost_terms
# Higher η downweights the original Q function and upweights KL-cost terms
del0 *= 2
if verbosity > 2
println("Inversion failed at timestep $diverge. η-bracket: ", ηbracket)
end
# if ηbracket[2] > 0.999ηbracket[3] # terminate ?
# if verbosity > 0
# @printf("\nEXIT: η > ηmax (back_pass failed)\n")
# @show kl_cost_terms[1][5]
# @show diverge
# @show traj_new.Σ[:,:,diverge]
# @show traj_new.Σi[:,:,diverge]
# @show ηbracket[2], 0.999ηbracket[3]
# end
# break
# end
end
end
# check for termination due to small gradient
g_norm = mean(maximum(abs.(traj_new.k) ./ (abs.(u) .+1),dims=1))
trace(:grad_norm, iter, g_norm)
# ====== STEP 3: Forward pass
_t = @elapsed begin
# debug("# entering forward_pass")
xnew,unew,costnew = forward_pass(traj_new, x0[:,1] ,u, x,1,dynamics,costfun, lims, diff_fun)
sigmanew = forward_covariance(model, x, u, traj_new)
traj_new.k .+= traj_prev.k # unew = k_new + k_old + Knew*Δx, this doesn't matter since traj_prev.k set to 0 above
Δcost = sum(cost) - sum(costnew)
expected_reduction = -(dV[1] + dV[2]) # According to second order approximation
reduce_ratio = Δcost/expected_reduction
# calc_η modifies the dual variables η according to current constraint_violation
ηbracket, satisfied, divergence = calc_η(xnew,x,sigmanew,ηbracket, traj_new, traj_prev, kl_step)
end
trace(:time_forward, iter, _t)
debug("Forward pass done: η: $ηbracket")
# ====== STEP 4: accept step (or not), print status
# print headings
if verbosity > 1 && iter % print_period == 0
if last_head == print_head
last_head = 0
@printf("%-12s", "iteration est. cost reduction expected gradient log10(η) divergence entropy\n")
end
@printf("%-14d%-14.6g%-14.3g%-14.3g%-12.3g%-12.2f%-14.3g%-12.3g\n",
iter, sum(costnew), Δcost, expected_reduction, g_norm, log10(mean(η)), mean(divergence), entropy(traj_new))
last_head += 1
end
# update trace
trace(:alpha, iter, 1)
trace(:improvement, iter, Δcost)
trace(:cost, iter, sum(costnew))
trace(:reduce_ratio, iter, reduce_ratio)
trace(:divergence, iter, mean(divergence))
trace(:η, iter, ηbracket[2])
# Termination checks
# if g_norm < tol_grad && divergence-kl_step > 0 # In this case we're only going to get even smaller gradients and might as well quit
# verbosity > 0 && @printf("\nEXIT: gradient norm < tol_grad while constraint violation too large\n")
# break
# end
if satisfied # KL-constraint is satisfied and we're happy (at least if Δcost is positive)
plot_fun(x)
verbosity > 0 && @printf("\nSUCCESS: abs(KL-divergence) < kl_step\n")
break
end
if ηbracket[2] > 0.999ηbracket[3]
verbosity > 0 && @printf("\nEXIT: η > ηmax\n")
break
end
# graphics(xnew,unew,cost,traj_new.K,Vx,Vxx,fx,fxx,fu,fuu,trace[1:iter],0)
end # !constrain_per_step
if constrain_per_step # This implements the gradient descent procedure for η
optimizer = ADAMOptimizer(kl_step, α=gd_alpha)
for outer iter = 1:max_iter
diverge = 1
del = del0*ones(N)
while diverge > 0
diverge, traj_new,Vx, Vxx,dV = back_pass_gps(cx,cu,cxx,cxu,cuu,fx,fu, lims,x,u,kl_cost_terms)
if diverge > 0
delind = diverge # This is very inefficient since back_pass only returs a single diverge per call.
ηbracket[2,delind] .+= del[delind]
del[delind] *= 2
if verbosity > 2; println("Inversion failed at timestep $diverge. η-bracket: ", mean(η)); end
if all(ηbracket[2,:] .> 0.999ηbracket[3,:])
# TODO: This termination criteria could be improved
verbosity > 0 && @printf("\nEXIT: η > ηmax\n")
break
end
end
end
xnew,unew,costnew = forward_pass(traj_new, x0[:,1] ,u, x,1,dynamics,costfun, lims, diff_fun)
sigmanew = forward_covariance(model, x, u, traj_new)
traj_new.k .+= traj_prev.k # unew = k_new + k_old + Knew*Δx
Δcost = sum(cost) - sum(costnew)
expected_reduction = -(dV[1] + dV[2])
reduce_ratio = Δcost/expected_reduction
divergence = kl_div_wiki(xnew,x,sigmanew, traj_new, traj_prev)
constraint_violation = divergence - kl_step
lη = log.(η) # Run GD in log-space (much faster)
η .= exp.(optimizer(lη, -constraint_violation, iter))
# η .= optimizer(η, -constraint_violation, iter)
# println(maximum(constraint_violation), " ", extrema(η), " ", indmax(constraint_violation))
# println(round.(constraint_violation,4))
η .= clamp.(η, ηbracket[1,:], ηbracket[3,:])
g_norm = mean(maximum(abs.(traj_new.k) ./ (abs.(u) .+1),dims=1))
trace(:grad_norm, iter, g_norm)
# @show maximum(constraint_violation)
if all(divergence .< 2*kl_step) && mean(constraint_violation) < 0.1*kl_step[1]
satisfied = true
break
end
if verbosity > 1 && iter % print_period == 0
if last_head == print_head
last_head = 0
@printf("%-12s", "iteration est. cost reduction expected log10(η) divergence entropy\n")
end
@printf("%-14d%-14.6g%-14.3g%-14.3g%-12.3f%-12.3g%-14.3g\n",
iter, sum(costnew), Δcost, expected_reduction, mean(log10.(η)), mean(divergence), entropy(traj_new))
last_head += 1
end
end
end
iter == max_iter && verbosity > 0 && @printf("\nEXIT: Maximum iterations reached.\n")
# if costnew < 1.1cost # In this case we made an (approximate) improvement under the model and accept the changes
x,u,cost = xnew,unew,costnew
traj_new.k = copy(u)
# else
# traj_new = traj_prev
# verbosity > 0 && println("Cost (under model) increased, did not accept changes to u")
# end
traj_prev.k = k_old
any((divergence .> kl_step) .& (abs.(divergence - kl_step) .> 0.1*kl_step)) && @warn("KL divergence too high for some time steps when done")
verbosity > 0 && print_timing(trace,iter,t_start,cost,g_norm,mean(ηbracket[2,:]))
return x, u, traj_new, Vx, Vxx, cost, trace
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 7514 | """
Calculate the Q terms related to the KL-constraint. (Actually, only related to log(p̂(τ)) since the constraint is rewritten as Entropy term and other term dissapears into expectation under p(τ).)
Qtt is [Qxx Qxu; Qux Quu]
Qt is [Qx; Qu]
These terms should be added to the Q terms calculated in the backwards pass to produce the final Q terms.
This Function should be called from within the backwards_pass Function or just prior to it to adjust the cost derivative matrices.
"""
function ∇kl(traj_prev)
isempty(traj_prev) && (return (0,0,0,0,0))
debug("Calculating KL cost addition terms")
m,n,T = traj_prev.m,traj_prev.n,traj_prev.T
cx,cu,cxx,cuu,cxu = zeros(n,T),zeros(m,T),zeros(n,n,T),zeros(m,m,T),zeros(m,n,T)
for t in 1:T
K, k = traj_prev.K[:,:,t], traj_prev.k[:,t]
Σi = traj_prev.Σi[:,:,t]
cx[:,t] = K'*Σi*k
cu[:,t] = -Σi*k
cxx[:,:,t] = K'*Σi*K
cuu[:,:,t] = Σi
cxu[:,:,t] = -Σi*K # https://github.com/cbfinn/gps/blob/master/python/gps/algorithm/traj_opt/traj_opt_lqr_python.py#L355
end
return cx,cu,cxx,cxu,cuu
end
"""
This is the inverse of Σₓᵤ
"""
function KLmv(Σi,K,k)
M =
[K'*Σi*K -K'*Σi;
-Σi*K Σi ]
v = [K'*Σi*k; -Σi*k]
M,v
end
"""
This function produces lots of negative values which are clipped by the max(0,kl)
"""
function kl_div(xnew,xold, Σ_new, traj_new, traj_prev)
(isempty(traj_new) || isempty(traj_prev)) && (return 0)
μ_new = [xnew-xold; unew]
T = traj_new.T
# m = traj_new.m
kldiv = zeros(T)
for t = 1:T
μt = μ_new[:,t]
Σt = Σ_new[:,:,t]
Kp = traj_prev.K[:,:,t]
Kn = traj_new.K[:,:,t]
kp = traj_prev.k[:,t]
kn = traj_new.k[:,t] + kp # unew must be added here
Σp = traj_prev.Σ[:,:,t]
Σn = traj_new.Σ[:,:,t]
Σip = traj_prev.Σi[:,:,t]
Σin = traj_new.Σi[:,:,t]
Mp,vp = KLmv(Σip,Kp,kp)
Mn,vn = KLmv(Σin,Kn,kn)
cp = .5*kp'Σip*kp
cn = .5*kn'Σin*kn
kldiv[t] = -0.5μt'*(Mn-Mp)*μt - μt'*(vn-vp) - cn + cp -0.5sum(Σt*(Mn-Mp)) -0.5logdet(Σn) + 0.5logdet(Σp)
kldiv[t] = max.(0,kldiv[t])
end
return kldiv
end
"""
This version seems to be symmetric and positive
"""
function kl_div_wiki(xnew,xold, Σ_new, traj_new, traj_prev)
μ_new = xnew-xold
T,m,n = traj_new.T, traj_new.m, traj_new.n
kldiv = zeros(T)
for t = 1:T
μt = μ_new[:,t]
Σt = Σ_new[1:n,1:n,t]
Kp = traj_prev.K[:,:,t]
Kn = traj_new.K[:,:,t]
kp = traj_prev.k[:,t]
kn = traj_new.k[:,t] #traj_new.k[:,t] contains kp already
Σp = traj_prev.Σ[:,:,t]
Σn = traj_new.Σ[:,:,t]
Σip = traj_prev.Σi[:,:,t]
Σin = traj_new.Σi[:,:,t]
dim = m
k_diff = kp-kn
K_diff = Kp-Kn
try
kldiv[t] = 1/2 * (tr(Σip*Σn) + k_diff'Σip*k_diff - dim + logdet(Σp) - logdet(Σn) ) # Wikipedia term
kldiv[t] += 1/2 *( μt'K_diff'Σip*K_diff*μt + tr(K_diff'Σip*K_diff*Σt) )[1]
kldiv[t] += k_diff'Σip*K_diff*μt
catch e
println(e)
@show Σip, Σin, Σp, Σn
return Inf
end
end
kldiv = max.(0,kldiv)
return kldiv
end
entropy(traj::GaussianPolicy) = mean(logdet(traj.Σ[:,:,t])/2 for t = 1:traj.T) + traj.m*log(2π)/2
"""
new_η, satisfied, divergence = calc_η(xnew,xold,sigmanew,η, traj_new, traj_prev, kl_step)
This Function caluculates the step size
"""
function calc_η(xnew,xold,sigmanew,ηbracket, traj_new, traj_prev, kl_step::Number)
kl_step > 0 || (return (ηbracket, true,0))
divergence = kl_div_wiki(xnew,xold,sigmanew, traj_new, traj_prev) |> mean
constraint_violation = divergence - kl_step
# Convergence check - constraint satisfaction.
satisfied = abs(constraint_violation) < 0.1*kl_step # allow some small constraint violation
if satisfied
debug(@sprintf("KL: %12.7f / %12.7f, converged", divergence, kl_step))
else
if constraint_violation < 0 # η was too big.
ηbracket[3] = ηbracket[2]
ηbracket[2] = max(geom(ηbracket), 0.1*ηbracket[3])
debug(@sprintf("KL: %12.4f / %12.4f, η too big, new η: (%-5.3g < %-5.3g < %-5.3g)", divergence, kl_step, ηbracket...))
else # η was too small.
ηbracket[1] = ηbracket[2]
ηbracket[2] = min(geom(ηbracket), 10.0*ηbracket[1])
debug(@sprintf("KL: %12.4f / %12.4f, η too small, new η: (%-5.3g < %-5.3g < %-5.3g)", divergence, kl_step, ηbracket...))
end
end
return ηbracket, satisfied, divergence
end
function calc_η(xnew,xold,sigmanew,ηbracket, traj_new, traj_prev, kl_step::AbstractVector)
any(kl_step .> 0) || (return (ηbracket, true,0))
divergence = kl_div_wiki(xnew,xold,sigmanew, traj_new, traj_prev)
if !isa(kl_step,AbstractVector)
divergence = mean(divergence)
end
constraint_violation = divergence - kl_step
# Convergence check - constraint satisfaction.
satisfied = all(abs.(constraint_violation) .< 0.1*kl_step) # allow some small constraint violation
if satisfied
debug(@sprintf("KL: %12.7f / %12.7f, converged", mean(divergence), mean(kl_step)))
else
too_big = constraint_violation .< 0
debug("calc_η: Sum(too big η) = $sum(too_big)")
ηbracket[3,too_big] = ηbracket[2,too_big]
ηbracket[2,too_big] = max.(geom(ηbracket[:,too_big]), 0.1*ηbracket[3,too_big])
ηbracket[1,!too_big] = ηbracket[2,!too_big]
ηbracket[2,!too_big] = min.(geom(ηbracket[:,!too_big]), 10.0*ηbracket[1,!too_big])
end
return ηbracket, satisfied, divergence
end
geom(ηbracket::AbstractMatrix) = sqrt.(ηbracket[1,:].*ηbracket[3,:])
geom(ηbracket::AbstractVector) = sqrt(ηbracket[1]*ηbracket[3])
# # using Base.Test
# n,m,T = 1,1,3
# Σnew = cat([eye(n+m) for t=1:T]..., dims=3)
# Σ = cat([eye(m) for t=1:T]..., dims=3)
# K = zeros(m,n,T)
# k = zeros(m,T)
#
# traj_new = DifferentialDynamicProgramming.GaussianPolicy(T,n,m,K,k,Σ,Σ)
# traj_prev = DifferentialDynamicProgramming.GaussianPolicy(T,n,m,copy(K),copy(k),copy(Σ),copy(Σ))
# xnew = zeros(n,T)
# xold = zeros(n,T)
# unew = zeros(m,T)
#
# kl_div_wiki(xnew,xold, Σnew, traj_new, traj_prev)
#
# traj_new.k = ones(m,T)
# traj_prev.k = ones(m,T)
# kl_div_wiki(xnew,xold, Σnew, traj_new, traj_prev)
# traj_new.k .*= 0
#
# traj_new.K = ones(m,n,T)
# kl_div_wiki(xnew,xold, Σnew, traj_new, traj_prev)
# traj_new.K .*= 0
#
# traj_new.Σ .*=2
# kl_div_wiki(xnew,xold, Σnew, traj_new, traj_prev)
mutable struct ADAMOptimizer{T,N}
α::T
β1::T
β2::T
ɛ::T
m::Array{T,N}
v::Array{T,N}
end
ADAMOptimizer(g::AbstractArray{T,N}; α = 0.005, β1 = 0.9, β2 = 0.999, ɛ = 1e-8, m=zeros(g), v=zeros(g)) where {T,N} = ADAMOptimizer{T,N}(α, β1, β2, ɛ, m, v)
"""
(a::ADAMOptimizer{T,N})(Θ::Array{T,N}, g::Array{T,N}, t::Number)
Applies the gradient `g` to the parameters `Θ` (mutating) at iteration `t`
ADAM GD update http://sebastianruder.com/optimizing-gradient-descent/index.html#adam
"""
function (a::ADAMOptimizer{T,N})(Θ::AbstractArray{T,N}, g::AbstractArray{T,N}, t) where {T,N}
a.m .= a.β1 .* a.m .+ (1-a.β1) .* g
m̂ = a.m ./ (1 - a.β1 ^ t)
a.v .= a.β2 .* a.v .+ (1-a.β2) .* g.^2
v̂ = a.v ./ (1 - a.β2 ^ t)
# @show size(Θ), size(m̂), size(v̂)
Θ .-= a.α .* m̂ ./ (sqrt.(v̂) .+ a.ɛ)
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 5542 | plotstuff_pendcart(args...) = println("Install package Plots.jl (and call using Plots) to plot results in the end of demo_pendcart")
function care(A, B, Q, R)
G = try
B*inv(R)*B'
catch
error("R must be non-singular.")
end
Z = [A -G;
-Q -A']
S = schur(Z)
S = ordschur(S, real(S.values).<0)
U = S.Z
(m, n) = size(U)
U11 = U[1:div(m, 2), 1:div(n,2)]
U21 = U[div(m,2)+1:m, 1:div(n,2)]
return U21/U11
end
function lqr(A, B, Q, R)
S = care(A, B, Q, R)
K = R\B'*S
return K
end
"""
demo_pendcart(;kwargs...)
Run the iLQG Function to find an optimal trajectory For the "pendulum on a cart system". Requires package ControlSystems.jl
# Arguments
`x0 = [π-0.6,0,0,0]`
`goal = [π,0,0,0]`
`Q = Diagonal([10,1,2,1])` : State weight matrix
`R = 1` : Control weight matrix
`lims = 5.0*[-1 1]` : control limits,
`T = 600` : Number of time steps
`doplot = true` : Plot results
"""
function demo_pendcart(;x0 = [π-0.6,0,0,0], goal = [π,0,0,0],
Q = Diagonal([10.,1,2,1]), # State weight matrix
R = 1., # Control weight matrix
lims = 5.0*[-1 1], # control limits,
T = 600, # Number of time steps
doplot = true # Plot results
)
N = T+1
g = 9.82
l = 0.35 # Length of pendulum
h = 0.01 # Sample time
d = 0.99
A = [0 1 0 0; # Linearlized system dynamics matrix, continuous time
g/l -d 0 0;
0 0 0 1;
0 0 0 0]
B = [0, -1/l, 0, 1]
C = eye(4) # Assume all states are measurable
D = 4
L = lqr(A,B,Q,R) # Calculate the optimal state feedback
I = T
function fsys_closedloop(t,x,L,xd)
dx = copy(x)
dx[1] -= pi
u = -(L*dx)[1]
xd[1] = x[2]
xd[2] = -g/l * sin(x[1]) + u/l * cos(x[1]) - d*x[2]
xd[3] = x[4]
xd[4] = u
end
function fsys(t,x,u,xd)
xd[1] = x[2]
xd[2] = -g/l * sin(x[1]) + u/l * cos(x[1]) - d*x[2]
xd[3] = x[4]
xd[4] = u
end
dfvec = zeros(4)
function dfsys(x,u)
dfvec[1] = x[1]+h*x[2]
dfvec[2] = x[2]+h*(-g/l*sin(x[1])+u[1]/l*cos(x[1])- d*x[2])
dfvec[3] = x[3]+h*x[4]
dfvec[4] = x[4]+h*u[1]
dfvec
end
function cost_quadratic(x,u)
local d = (x.-goal)
0.5(d'*Q*d + u'R*u)[1]
end
function cost_quadratic(x::Matrix,u)
local d = (x.-goal)
T = size(u,2)
c = Vector{Float64}(undef,T+1)
for t = 1:T
c[t] = 0.5(d[:,t]'*Q*d[:,t] + u[:,t]'R*u[:,t])[1]
end
c[end] = cost_quadratic(x[:,end][:],[0.0])
return c
end
cx = zeros(4,T)
cu = zeros(1,T)
cxu = zeros(D,1)
function dcost_quadratic(x,u)
cx .= Q*(x.-goal)
cu .= R.*u
return cx,cu,cxu
end
function lin_dyn_f(x,u,i)
u[isnan.(u)] .= 0
f = dfsys(x,u)
end
fxc = Array{Float64}(undef,D,D,I)
fuc = Array{Float64}(undef,D,1,I)
fxd = Array{Float64}(undef,D,D,I)
fud = Array{Float64}(undef,D,1,I)
for ii = 1:I
fxc[:,:,ii] = [0 1 0 0;
0 0 0 0;
0 0 0 1;
0 0 0 0]
fuc[:,:,ii] = [0, 0, 0, 1]
end
function lin_dyn_df(x,u)
u[isnan.(u)] .= 0
D = size(x,1)
nu,I = size(u)
cx,cu,cxu = dcost_quadratic(x,u)
cxx = Q
cuu = [R]
for ii = 1:I
fxc[2,1,ii] = -g/l*cos(x[1,ii])-u[ii]/l*sin(x[1,ii])
fxc[2,2,ii] = -d
fuc[2,1,ii] = cos(x[1,ii])/l
ABd = exp([fxc[:,:,ii]*h fuc[:,:,ii]*h; zeros(nu, D + nu)])# ZoH sampling
fxd[:,:,ii] = ABd[1:D,1:D]
fud[:,:,ii] = ABd[1:D,D+1:D+nu]
end
fxx=fxu=fuu = []
return fxd,fud,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu
end
x = zeros(4,N)
u = zeros(1,T)
"""
Simulate a pendulum on a cart using the non-linear equations
"""
function simulate_pendcart(x0,L, dfsys, cost)
x[:,1] = x0
u[1] = 0
for t = 2:T
dx = copy(x[:,t-1])
dx[1] -= pi
u[t] = -(L*dx)[1]
if !isempty(lims)
u[t] = clamp(u[t],lims[1],lims[2])
end
x[:,t] = dfsys(x[:,t-1],u[t])
end
dx = copy(x[:,T])
dx[1] -= pi
uT = -(L*dx)[1]
if !isempty(lims)
uT = clamp(uT,lims[1],lims[2])
end
x[:,T+1] = dfsys(x[:,T],uT)
c = cost(x,u)
return x, u, c
end
# Simulate the closed loop system with regular LQG control and watch it fail due to control limits
x00, u00, cost00 = simulate_pendcart(x0, L, dfsys, cost_quadratic)
f(x,u,i) = lin_dyn_f(x,u,i)
df(x,u) = lin_dyn_df(x,u)
# plotFn(x) = plot(squeeze(x,2)')
println("Entering iLQG function")
# subplot(n=4,nc=2)
x, u, L, Vx, Vxx, cost, trace = iLQG(f,cost_quadratic, df, x0, 0*u00,
lims = lims,
# plotFn = x -> Plots.subplot!(x'),
regType = 2,
α = exp10.(range(0.2, stop=-3, length=6)),
λmax = 1e15,
verbosity = 3,
tol_fun = 1e-8,
tol_grad = 1e-8,
max_iter = 1000);
doplot && plotstuff_pendcart(x00, u00, x,u,cost00,cost,trace)
println("Done")
return x, u, L, Vx, Vxx, cost, trace
end
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 346 | using DifferentialDynamicProgramming
using Test, Statistics, LinearAlgebra
@info("Compile time is high for this package, this is expected and is not an error.")
# write your own tests here
include("test_readme.jl")
demo_linear()
demo_linear_kl(kl_step=100)
demo_pendcart()
demoQP()
# include(Pkg.dir("GuidedPolicySearch","examples","bb.jl"))
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 4378 | using ControlSystems
function fsys_closedloop(t,x,L,xd)
dx = copy(x)
dx[1] -= pi
u = -(L*dx)[1]
xd[1] = x[2]
xd[2] = -g/l * sin(x[1]) + u/l * cos(x[1])
xd[3] = x[4]
xd[4] = u
end
function fsys(t,x,u,xd)
xd[1] = x[2]
xd[2] = -g/l * sin(x[1]) + u/l * cos(x[1])
xd[3] = x[4]
xd[4] = u
end
function dfsys(x,u)
[x[1]+h*x[2]; x[2]+h*(-g/0.35*sin(x[1])+u/0.35*cos(x[1])); x[3]+h*x[4]; x[4]+h*u]
end
function cost_quadratic(x,u)
d = (x.-goal)
0.5(d'*Q*d + u'R*u)[1]
end
function cost_quadratic(x::Matrix,u)
d = (x.-goal)
T = size(u,2)
c = Vector{Float64}(T+1)
for t = 1:T
c[t] = 0.5(d[:,t]'*Q*d[:,t] + u[:,t]'R*u[:,t])[1]
end
c[end] = cost_quadratic(x[:,end][:],[0.0])
return c
end
function dcost_quadratic(x,u)
cx = Q*(x.-goal)
cu = R.*u
cxu = zeros(D,1)
return cx,cu,cxu
end
function lin_dyn_f(x,u,i)
u[isnan(u)] = 0
f = dfsys(x,u)
c = cost_quadratic(x,u)
return f,c
end
function lin_dyn_fT(x)
cost_quadratic(x,0.0)
end
function lin_dyn_df(x,u)
u[isnan(u)] = 0
D = size(x,1)
nu,I = size(u)
fx = Array{Float64}(D,D,I)
fu = Array{Float64}(D,1,I)
cx,cu,cxu = dcost_quadratic(x,u)
cxx = Q
cuu = [R]
for ii = 1:I
fx[:,:,ii] = [0 1 0 0;
-g/0.35*cos(x[1,ii])-u[ii]/0.35*sin(x[1,ii]) 0 0 0;
0 0 0 1;
0 0 0 0]
fu[:,:,ii] = [0, cos(x[1,ii])/0.35, 0, 1]
ABd = exp([fx[:,:,ii]*h fu[:,:,ii]*h; zeros(nu, D + nu)])# ZoH sampling
fx[:,:,ii] = ABd[1:D,1:D]
fu[:,:,ii] = ABd[1:D,D+1:D+nu]
end
fxx=fxu=fuu = []
return fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu
end
"""
Simulate a pendulum on a cart using the non-linear equations
"""
function simulate_pendcart(x0,L, dfsys, cost)
x = zeros(4,N)
u = zeros(1,T)
x[:,1] = x0
u[1] = 0
for t = 2:T
dx = copy(x[:,t-1])
dx[1] -= pi
u[t] = -(L*dx)[1]
if !isempty(lims)
u[t] = clamp(u[t],lims[1],lims[2])
end
x[:,t] = dfsys(x[:,t-1],u[t])
end
dx = copy(x[:,T])
dx[1] -= pi
uT = -(L*dx)[1]
if !isempty(lims)
uT = clamp(uT,lims[1],lims[2])
end
x[:,T+1] = dfsys(x[:,T],uT)
c = cost(x,u)
return x, u, c
end
T = 600 # Number of time steps
N = T+1
g = 9.82
l = 0.35 # Length of pendulum
h = 0.01 # Sample time
lims = []#5.0*[-1 1] # control limits, e.g. ones(m,1)*[-1 1]*.6
goal = [π,0,0,0] # Reference point
A = [0 1 0 0; # Linearlized system dynamics matrix, continuous time
g/l 0 0 0;
0 0 0 1;
0 0 0 0]
B = [0, -1/l, 0, 1]
C = eye(4) # Assume all states are measurable
D = 4
sys = ss(A,B,C,zeros(4))
Q = h*diagm([10,1,2,1]) # State weight matrix
R = h*1 # Control weight matrix
L = lqr(sys,Q,R) # Calculate the optimal state feedback
x0 = [π-0.6,0,0,0]
# Simulate the closed loop system with regular LQG control and watch it fail due to control limits
x00, u00, cost00 = simulate_pendcart(x0, L, dfsys, cost_quadratic)
u0 = 0u00
fx = A
fu = B
cxx = Q
cxu = zeros(size(B))
cuu = R
f(x,u,i) = lin_dyn_f(x,u,i)
fT(x) = lin_dyn_fT(x)
df(x,u) = lin_dyn_df(x,u)
# plotFn(x) = plot(squeeze(x,2)')
# run the optimization
println("Entering iLQG function")
# subplot(n=4,nc=2)
alpha = exp10.(range(0, stop=-3, length=11))
tol_fun = 1e-7
tol_grad = 1e-4
max_iter = 500
λ = 1
dλ = 1
λfactor = 1.6
λmax = 1e10
λmin = 1e-6
regType = 1
reduce_ratio_min = 0
diff_fun = -
plot = 1
verbosity = 2
plot_fun = x->0
cost = []
kl_step = 0
traj_prev = 0
regType=2
alpha= exp10.(range(0.2, stop=-3, length=6))
verbosity=3
tol_fun = 1e-7
max_iter=1000
import Base: length
EmptyMat3 = Array(Float64,0,0,0)
EmptyMat2 = Array(Float64,0,0)
emptyMat3(P) = Array(P,0,0,0)
emptyMat2(P) = Array(P,0,0)
mutable struct GaussianDist{P}
T::Int
n::Int
m::Int
fx::Array{P,3}
fu::Array{P,3}
Σ::Array{P,3}
μx::Array{P,2}
μu::Array{P,2}
end
GaussianDist(P) = GaussianDist(0,0,0,emptyMat3(P),emptyMat3(P),emptyMat3(P),emptyMat2(P),emptyMat2(P))
Base.isempty(gd::GaussianDist) = gd.T == gd.n == gd.m == 0
mutable struct GaussianTrajDist{P}
policy::GaussianDist{P}
dynamics::GaussianDist{P}
end
debug(x) = println(x)
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | code | 1930 | using Test, Statistics, LinearAlgebra, Random
# make stable linear dynamics
Random.seed!(0)
eye = DifferentialDynamicProgramming.eye
costs = map(1:10) do MCiteration
h = .01 # time step
n = 10 # state dimension
m = 2 # control dimension
A = randn(n,n)
A = A-A' # skew-symmetric = pure imaginary eigenvalues
A = exp(h*A) # discrete time
B = h*randn(n,m)
# quadratic costs
Q = h*eye(n)
R = .1*h*eye(m)
# control limits
lims = []# ones(m,1)*[-1 1]*.6
T = 1000 # horizon
x0 = ones(n,1) # initial state
u0 = .1*randn(m,T) # initial controls
# optimization problem
N = T+1
fx = A
fu = B
cxx = Q
cxu = zeros(size(B))
cuu = R
# Specify dynamics functions
function lin_dyn_df(x,u,Q,R)
u[isnan.(u)] .= 0
cx = Q*x
cu = R*u
fxx=fxu=fuu = []
return fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu
end
function lin_dyn_f(x,u,A,B)
u[isnan.(u)] .= 0
xnew = A*x + B*u
return xnew
end
function lin_dyn_cost(x,u,Q)
c = 0.5*sum(x.*(Q*x)) + 0.5*sum(u.*(R*u))
return c
end
f(x,u,i) = lin_dyn_f(x,u,A,B)
costfun(x,u) = lin_dyn_cost(x,u,Q)
df(x,u) = lin_dyn_df(x,u,Q,R)
# plotFn(x) = plot(squeeze(x,2)')
# run the optimization
@time x, u, L, Vx, Vxx, cost, otrace = iLQG(f,costfun,df, x0, u0, lims=lims, verbosity=3);
# using Plots
# plot(x', title="States", subplot=1, layout=(3,1), show=true)
# plot!(u', title="Control signals", subplot=2, show=true)
# plot!(cost, title="Cost", subplot=3, show=true)
sum(cost)
end
@test maximum(costs) < 25 # This should be the case most of the times
@test mean(costs) < 10 # This should be the case most of the times
@test minimum(costs) < 5 # This should be the case most of the times
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT",
"BSD-3-Clause"
]
| 0.4.0 | ab5c53d7ce5b457f9b91217fa33e133cf3386b39 | docs | 4151 | # DifferentialDynamicProgramming
[](https://travis-ci.org/baggepinnen/DifferentialDynamicProgramming.jl)
[](https://coveralls.io/github/baggepinnen/DifferentialDynamicProgramming.jl?branch=master)
## Installation
The package is registered and can be added with
`] add DifferentialDynamicProgramming`
The latest version is formally compatible with Julia v1.1+ (but probably works well for julia v1.0 as well if you `dev` it).
## Demo functions
The following demo functions are provided
`demo_linear()` To run the iLQG DDP algorithm on a simple linear problem
`demoQP` To solve a demo quadratic program
`demo_pendcart()` Where a pendulum attached to a cart is simulated.
## Usage
### Demo linear
See demo file `demo_linear.jl` for a usage example.
```julia
# make stable linear dynamics
h = .01 # time step
n = 10 # state dimension
m = 2 # control dimension
A = randn(n,n)
A = A-A' # skew-symmetric = pure imaginary eigenvalues
A = exp(h*A) # discrete time
B = h*randn(n,m)
# quadratic costs
Q = h*eye(n)
R = .1*h*eye(m)
# control limits
lims = [] #ones(m,1)*[-1 1]*.6
T = 1000 # horizon
x0 = ones(n,1) # initial state
u0 = .1*randn(m,T) # initial controls
# optimization problem
N = T+1
fx = A
fu = B
cxx = Q
cxu = zeros(size(B))
cuu = R
# Specify dynamics functions
function lin_dyn_df(x,u,Q,R)
u[isnan(u)] = 0
cx = Q*x
cu = R*u
fxx=fxu=fuu = []
return fx,fu,fxx,fxu,fuu,cx,cu,cxx,cxu,cuu
end
function lin_dyn_f(x,u,A,B)
u[isnan(u)] = 0
xnew = A*x + B*u
return xnew
end
function lin_dyn_cost(x,u,Q)
c = 0.5*sum(x.*(Q*x)) + 0.5*sum(u.*(R*u))
return c
end
f(x,u,i) = lin_dyn_f(x,u,A,B,Q,R)
costfun(x,u) = lin_dyn_cost(x,u,Q)
df(x,u) = lin_dyn_df(x,u,Q,R)
# run the optimization
@time x, u, L, Vx, Vxx, cost, otrace = iLQG(f, costfun ,df, x0, u0, lims=lims);
```
### Demo pendulum on cart
There is an additional demo function `demo_pendcart()`, where a pendulum attached to a cart is simulated. In this example, regular LQG control fails in stabilizing the pendulum at the upright position due to control limitations. The DDP-based optimization solves this by letting the pendulum fall, and increases the energy in the pendulum during the fall such that it will stay upright after one revolution.


# Citing
This code consists of a port and extensions of a MATLAB library provided by the autors of
```
BIBTeX:
@INPROCEEDINGS{
author = {Tassa, Y. and Mansard, N. and Todorov, E.},
booktitle = {Robotics and Automation (ICRA), 2014 IEEE International Conference on},
title = {Control-Limited Differential Dynamic Programming},
year = {2014}, month={May}, doi={10.1109/ICRA.2014.6907001}}
http://www.mathworks.com/matlabcentral/fileexchange/52069-ilqg-ddp-trajectory-optimization
http://www.cs.washington.edu/people/postdocs/tassa/
```
The code above was extended with KL-divergence constrained optimization for the thesis
[Bagge Carlson, F.](https://www.control.lth.se/staff/fredrik-bagge-carlson/), ["Machine Learning and System Identification for Estimation in Physical Systems"](https://lup.lub.lu.se/search/publication/ffb8dc85-ce12-4f75-8f2b-0881e492f6c0) (PhD Thesis 2018).
```bibtex
@thesis{bagge2018,
title = {Machine Learning and System Identification for Estimation in Physical Systems},
author = {Bagge Carlson, Fredrik},
keyword = {Machine Learning,System Identification,Robotics,Spectral estimation,Calibration,State estimation},
month = {12},
type = {PhD Thesis},
number = {TFRT-1122},
institution = {Dept. Automatic Control, Lund University, Sweden},
year = {2018},
url = {https://lup.lub.lu.se/search/publication/ffb8dc85-ce12-4f75-8f2b-0881e492f6c0},
}
```
| DifferentialDynamicProgramming | https://github.com/baggepinnen/DifferentialDynamicProgramming.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 685 | export bibliography,
file
using BibTeX
using BibTeXFormat
using Mustache
read_file(path) = read(open(path), String)
function bibliography(cites::Vector{String})
bibliography = Bibliography(read_file(joinpath(dirname(@__FILE__), "SpectralClustering.bib")))
formatted_entries = format_entries(UNSRTAlphaStyle,bibliography)
return write_to_string( HTMLBackend() ,formatted_entries, cites)
end
function file(name::String)
file = read_file(joinpath(dirname(@__FILE__),"src", "js",name))
return file
end
function file(name::String, data::Dict{String, String})
file = read_file(joinpath(dirname(@__FILE__),"src", "js",name))
return render(file, data)
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 600 | using Pkg
packages = ["Documenter","Formatting", "Images", "TestImages",
"RDatasets", "InfoZIP", "ImageView", "ImageMagick","Mustache",
"StringEncodings", "TextAnalysis", "Latexify", "IJulia"]
for p in packages
try
if Pkg.installed(p) == nothing
Pkg.add(p)
end
catch e
Pkg.add(p)
end
end
using Documenter
using SpectralClustering
try
Pkg.installed("BibTeX")
catch
Pkg.clone("https://github.com/JuliaTeX/BibTeX.jl.git")
end
try
Pkg.installed("BibTeXFormat")
catch
Pkg.clone("https://github.com/lucianolorenti/BibTeXFormat.jl.git")
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 1717 | using Documenter
using SpectralClustering
makedocs(
modules = [SpectralClustering],
format = Documenter.HTML(prettyurls = true),
source = "src",
clean = false,
sitename = "SpectralClustering.jl",
pages = Any[
"Home" => "index.md",
"Getting Started"=>"start.md",
"Main Modules" => Any[
"Graph Creation" => "man/graphcreation.md",
"Embedding" => "man/embedding.md",
"Approximate Embedding" => "man/approximate.md",
"Eigenvector Clustering" => "man/clusterize.md",
"Co-Regularized" => "man/multiview.md",
"Incremental" => "man/incremental.md"
],
"Utility Modules" => Any[
"Data Access" => "man/data_access.md",
"Graph" => "man/graph.md",
"Landmarks Selection" => "man/landmark_selection.md"
],
],
doctest = false
)
notebook_output_dir = joinpath(dirname(@__FILE__), "build","notebooks")
mkpath(notebook_output_dir)
using IJulia
jupyter_path = first(IJulia.find_jupyter_subcommand("nbconvert"))
for file in readdir(joinpath(dirname(@__FILE__), "notebooks"))
full_path = joinpath(dirname(@__FILE__), "notebooks", file)
if (endswith(file,".ipynb"))
run(`$(jupyter_path) nbconvert --to html $full_path --output-dir=$notebook_output_dir`)
elseif (file != ".ipynb_checkpoints")
cp(full_path, joinpath(notebook_output_dir,file), force=true)
end
end
deploydocs(
repo = "github.com/lucianolorenti/SpectralClustering.jl.git",
deps = nothing,
make = nothing,
target = "build"
)
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 100 | push!(LOAD_PATH, joinpath(dirname(@__FILE__), "../"))
using IJulia
notebook(dir=dirname(@__FILE__))
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 10519 | using Clustering,
LightGraphs,
SparseArrays,
LinearAlgebra,
Arpack
export NystromMethod,
LandmarkBasedRepresentation,
DNCuts
"""
Large Scale Spectral Clustering with Landmark-Based Representation
Xinl ei Chen Deng Cai
# Members
- `landmark_selector::{T <: AbstractLandmarkSelection}` Method for extracting landmarks
- `number_of_landmarks::Integer` Number of landmarks to obtain
- `n_neighbors::Integer` Number of nearest neighbors
- `nev::Integer` Number of eigenvectors
- `w::Function` Number of clusters to obtain
- `normalize::Bool`
"""
struct LandmarkBasedRepresentation{T <: AbstractLandmarkSelection}
landmark_selector::T
number_of_landmarks::Integer
n_neighbors::Integer
nev::Integer
w::Function
normalize::Bool
end
"""
```
embedding(cfg::LandmarkBasedRepresentation, X)
```
"""
function embedding(cfg::LandmarkBasedRepresentation, X)
n = number_of_patterns(X)
p = cfg.number_of_landmarks
landmark_indices = select_landmarks(cfg.landmark_selector, p, X)
landmarks = get_element(X, landmark_indices)
neighbors_cfg = KNNNeighborhood(landmarks, cfg.n_neighbors)
I = zeros(Integer, n*cfg.n_neighbors)
J = zeros(Integer, n*cfg.n_neighbors)
V = zeros(n*cfg.n_neighbors)
for i = 1:n
i_neighbors = neighbors(neighbors_cfg, get_element(X, i))
weights = cfg.w(i, [], get_element(X, i), get_element(landmarks, i_neighbors))
weights ./= sum(weights)
for (j, (neigh, w)) in enumerate(zip(i_neighbors, weights))
I[(i-1)*cfg.n_neighbors + j] = neigh
J[(i-1)*cfg.n_neighbors + j] = i
V[(i-1)*cfg.n_neighbors + j] = w
end
end
Z = sparse(I, J, V, p, n)
(svd) = svds(Z*Z', nsv=cfg.nev)[1]
v = svd.S
S = spdiagm(0=>1 ./ (sqrt.(v)))[1]
B = S * svd.U' * Z
if cfg.normalize
B = normalize_cols(B)
end
return B'
end
"""
```julia
type NystromMethod{T<:AbstractLandmarkSelection}
landmarks_selector::T
number_of_landmarks::Integer
w::Function
nvec::Integer
end
```
The type ```NystromMethod``` proposed in Spectral Grouping Using the Nystrom Method by Charless
Fowlkes, Serge Belongie, Fan Chung, and Jitendra Malik. It has to be defined:
- `landmarks_selector::T<:AbstractLandmarkSelection`. A mechanism to select the sampled
points.
- `number_of_landmarks::Integer`. The number of points to sample
- `w::Function`. The weight function for compute the similiarity. The signature of the weight function has to be `weight(i, j, e1,e2)`. Where `e1` and `e2` ara the data elements i-th and j-th respectivily, obtained via `get_element`, usually is a vector.
- `nvec::Integer`. The number of eigenvector to obtain.
- `threaded::Bool`. Default: True. Specifies whether the threaded version is used.
"""
struct NystromMethod{T <: AbstractLandmarkSelection} <: EigenvectorEmbedder
landmarks_selector::T
number_of_landmarks::Integer
w::Function
nvec::Integer
threaded::Bool
end
function NystromMethod(landmarks_selector::T,
number_of_landmarks::Integer,
w::Function,
nvec::Integer) where T <: AbstractLandmarkSelection
return NystromMethod(landmarks_selector,
number_of_landmarks,
w,
nvec,
true)
end
"""
create_A_B(cfg::NystromMethod, landmarks::Vector{Int},X)
Arguments:
- `cfg::NystromMethod`. The method configuration.
- `landmarks::Vector{T}`. A vector of integer that containts the \$n\$ indexes sampled from the data.
- `X` is the data that containt \$ N \$ patterns.
Let \$ W \\in \\mathbb{R}^{N \\times N}, W = \\begin{bmatrix} A & B^T \\\\ B & C \\end{bmatrix}, A
\\in \\mathbb{R}^{ n \\times n }, B \\in \\mathbb{R}^{(N-n) \\times n}, C \\in
\\mathbb{R}^{(N-n)\\times (N-n)} \$ . \$A\$ represents the subblock of weights among the random
samples, \$B\$ contains the weights from the random samples to the rest of the pixels, and
\$C\$ contains the weights between all of the remaining pixels.
The function computes \$A\$ and \$B\$ from the data ```X``` using the weight function defined in
```cfg```.
"""
function create_A_B(cfg::NystromMethod, landmarks::Vector{<:Integer}, X)
n = number_of_patterns(X)
p = length(landmarks)
indexes_b = setdiff(collect(1:n), landmarks)
m = length(indexes_b)
n = p
A = zeros(Float32, p, p)
B = zeros(Float32, p, m)
qq = length(get_element(X, 1))
landmarks_m = zeros(Float32, length(get_element(X, 1)), length(landmarks))
# Get a copy of the landamrks
for j = 1:length(landmarks)
get_element!(view(landmarks_m, :, j), X, landmarks[j])
end
Threads.@threads for j = 1:length(landmarks)
A[:,j] = cfg.w(landmarks[j], landmarks, view(landmarks_m, :, j), landmarks_m)
end
vec_k = zeros(Float32, length(get_element(X, 1)), Threads.nthreads())
Threads.@threads for k = 1:length(indexes_b)
thread_id = Threads.threadid()
get_element!(view(vec_k, :, thread_id), X, indexes_b[k])
B[:,k] = cfg.w(indexes_b[k], landmarks, view(vec_k, :, thread_id), landmarks_m)
end
return (A, B)
end
function create_A_B_single_thread(cfg::NystromMethod, landmarks::Vector{<:Integer}, X)
n = number_of_patterns(X)
p = length(landmarks)
indexes_b = setdiff(collect(1:n), landmarks)
m = length(indexes_b)
n = p
A = zeros(Float32, p, p)
B = zeros(Float32, p, m)
qq = length(get_element(X, 1))
landmarks_m = zeros(Float32, length(get_element(X, 1)), length(landmarks))
for j = 1:length(landmarks)
get_element!(view(landmarks_m, :, j), X, landmarks[j])
end
for j = 1:length(landmarks)
A[:,j] = cfg.w(landmarks[j], landmarks, view(landmarks_m, :, j), landmarks_m)
end
vec_k = zeros(Float32, length(get_element(X, 1)))
for k = 1:length(indexes_b)
get_element!(vec_k, X, indexes_b[k])
B[:,k] = cfg.w(indexes_b[k], landmarks, vec_k, landmarks_m)
end
return (A, B)
end
"""
create_A_B(cfg::NystromMethod, X)
# Arguments:
- `cfg::NystromMethod`
- `X`
#Return values
- Sub-matrix A
- Sub-matrix B
- `Vector{Int}`. The sampled points used build the sub-matrices
This is an overloaded method. Computes the submatrix A and B according to
[`create_A_B(::NystromMethod, ::Vector{Int}, ::Any)`](@ref).
Returns the two submatrices and the sampled points used to calcluate it
"""
function create_A_B(cfg::NystromMethod, X)
landmarks = select_landmarks(cfg.landmarks_selector, cfg.number_of_landmarks, X)
if (cfg.threaded)
(A, B) = create_A_B(cfg::NystromMethod, landmarks, X)
else
(A, B) = create_A_B_single_thread(cfg::NystromMethod, landmarks, X)
end
return (A, B, landmarks)
end
"""
embedding(cfg::NystromMethod, X)
This is an overloaded function
"""
function embedding(cfg::NystromMethod, X)
n = number_of_patterns(X)
landmarks = select_landmarks(cfg.landmarks_selector,
cfg.number_of_landmarks,
X)
return embedding(cfg, landmarks, X)
end
"""
embedding(cfg::NystromMethod, landmarks::Vector{Int}, X)
# Arguments
- `cfg::[NystromMethod](@ref)`
- `landmarks::Vector{Int}`
- `x::Any`
# Return values
- `(E, L)`: The approximated eigenvectors, the aprooximated eigenvalues
Performs the eigenvector embedding according to
"""
function embedding(cfg::NystromMethod, landmarks::Vector{<:Integer}, X)
if (cfg.threaded)
(A, B) = create_A_B(cfg, landmarks, X)
else
(A, B) = create_A_B_single_thread(cfg, landmarks, X)
end
return embedding(cfg, A, B, landmarks)
end
function compute_dhat(AA::Matrix{T}, BB::Matrix{T}) where T
n = size(AA, 1)
m = size(BB, 2)
dhat = zeros(T, n + m)
dhat[1:n] = sum(AA, dims = 1) + sum(BB, dims = 2)'
dhat[n + 1:end] = sum(BB, dims = 1) + sum(BB, dims = 2)' * pinv(AA) * BB
dhat[dhat .< 0] .= 0
return 1 ./ (sqrt.(dhat) .+ eps())
end
function compute_V(AA::Matrix{T}, BB::Matrix{T}, nvec::Integer) where T <: Number
n = size(AA, 1)
m = size(BB, 2)
Asi = real(sqrt(Symmetric(pinv(AA))))
F = svd(AA + ((Asi * (BB * BB')) * Asi))
V_1 = (Asi * F.U) .* vec((1 ./ (sqrt.(F.S) .+ eps())))'
VA = AA * V_1[:,1:nvec + 1]
VB = BB' * V_1[:,1:nvec + 1]
return vcat(VA, VB)
end
function normalize_A_and_B!(AA::Matrix, BB::Matrix)
n = size(AA, 1)
m = size(BB, 2)
dhat = compute_dhat(AA, BB)
vv = view(dhat, 1:n)
vb = view(dhat, n .+ (1:m))
for I in CartesianIndices(size(AA))
@inbounds AA[I] *= vv[I[1]] * vv[I[2]]
end
for I in CartesianIndices(size(BB))
@inbounds BB[I] *= vv[I[1]] * vb[I[2]]
end
end
"""
embedding(cfg::NystromMethod, A::Matrix, B::Matrix, landmarks::Vector{Int})
Performs the eigenvector approximation given the two submatrices A and B.
"""
function embedding(cfg::NystromMethod, AA::Matrix, BB::Matrix, landmarks::Vector{<:Integer})
n = size(AA, 1)
m = size(BB, 2)
normalize_A_and_B!(AA, BB)
V = compute_V(AA, BB, cfg.nvec)
indexes_b = setdiff(collect(1:(n + m)), landmarks)
indexes = sortperm(vcat(landmarks, indexes_b))
for i = 2:cfg.nvec + 1
V[:,i] = V[:,i] ./ V[:,1]
end
return V[indexes,2:cfg.nvec + 1]
end
"""
```@julia
type DNCuts
```
# Multiscale Combinatorial Grouping for Image Segmentation and Object Proposal Generation
## Jordi Pont-Tuset, Pablo Arbeláez, Jonathan T. Barron, Member, Ferran Marques, Jitendra Malik
"""
struct DNCuts <: EigenvectorEmbedder
scales::Integer
nev::Integer
img_size
end
function pixel_decimate(img_size::Tuple{Int,Int}, steps)
(nr, nc) = img_size
decimated = CartesianIndices(img_size)[1:steps:nr, 1:steps:nc]
return (LinearIndices(img_size)[decimated][:], size(decimated))
end
embedding(d::DNCuts, g::Graph) = embedding(d, adjacency_matrix(g))
"""
```
embedding(d::DNCuts, L)
```
"""
function embedding(d::DNCuts, W::AbstractMatrix{T}) where T<:Number
matrices = []
img_size = d.img_size
for j = 1:d.scales
(idx, img_size) = pixel_decimate(img_size, 2)
B = W[:, idx]
C = Diagonal(vec(1 ./ sum(B, dims=2))) * B
push!(matrices, C)
W = C' * B
end
ss = ShiMalikLaplacian(d.nev)
V = embedding(ss, NormalizedLaplacian(NormalizedAdjacency(CombinatorialAdjacency(W))))
for s = d.scales:-1:1
V = matrices[s] * V
end
return svd_whiten(V)
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 2876 | export clusterize,
KMeansClusterizer,
YuEigenvectorRotation,
EigenvectorClusterizer,
EigenvectorClusteringResult
using Clustering
import Clustering.assignments
import Clustering.ClusteringResult
struct EigenvectorClusteringResult{T<:Integer} <: ClusteringResult
assignments::Vector{T}
end
function assignments(r::EigenvectorClusteringResult)
return r.assignments
end
abstract type EigenvectorClusterizer end
"""
```julia
struct KMeansClusterizer <: EigenvectorClusterizer
k::Integer
init::Symbol
end
```
"""
struct KMeansClusterizer <: EigenvectorClusterizer
k::Integer
init::Symbol
end
function KMeansClusterizer(k::Integer)
return KMeansClusterizer(k, :kmpp)
end
function clusterize(t::KMeansClusterizer, E)
model = kmeans(Matrix(E'), t.k, init =t.init)
return EigenvectorClusteringResult(assignments(model))
end
"""
Multiclass Spectral Clustering
"""
struct YuEigenvectorRotation <: EigenvectorClusterizer
maxIter::Integer
end
function YuEigenvectorRotation()
return YuEigenvectorRotation(500)
end
function clusterize(cfg::YuEigenvectorRotation, X_star_hat::Matrix)
(N,k) = size(X_star_hat)
X_star_hat = spdiagm(0=>1 ./ sqrt.(vec(mapslices(norm, X_star_hat, dims=[2]))))*X_star_hat
hasConverged = false
R_star = zeros(k,k)
R_star[:, 1] = [X_star_hat[rand(1:N), i] for i = 1:k ]
c = zeros(N)
for j=2:k
c = c + abs.(X_star_hat*R_star[:,j-1])
i = findmin(c)[2]
R_star[:, j] = X_star_hat[i, :]'
end
lastObjectiveValue = Inf
nIter = 0
ncut_value = 0
X_star = nothing
while !hasConverged
nIter = nIter+ 1
X_hat = X_star_hat*R_star
#non maximum supression
labels = vec([I[2] for I in findmax(X_hat, dims=2)[2]])
X_star = zeros(size(X_star_hat))
for (i, l) = enumerate(labels)
X_star[i, l] = l
end
F = svd(X_star' * X_star_hat, full=true)
U, S, Vh = (F.U, F.S, F.Vt)
ncutValue = sum(S)
if ((abs(ncutValue - lastObjectiveValue) < eps()) || (nIter > cfg.maxIter))
hasConverged = true
else
lastObjectiveValue = ncutValue
R_star = Vh'*U'
end
end
labels = vec([I[2] for I in findmax(X_star, dims=2)[2]])
return EigenvectorClusteringResult(labels)
end
"""
```julia
function clusterize{T<:EigenvectorEmbedder, C<:EigenvectorClusterizer}(cfg::T, clus::C, X)
```
Given a set of patterns `X` generates an eigenvector space according to `T<:EigenvectorEmbeddder` and then clusterize the eigenvectors using the algorithm defined
by `C<:EigenvectorClusterize`.
"""
function clusterize(cfg::T, clus::C, X, params...) where T<:EigenvectorEmbedder where C<:EigenvectorClusterizer
E = embedding(cfg,X, params...)
return clusterize(clus, E)
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 9966 | export embedding,
NgLaplacian,
ShiMalikLaplacian,
YuShiPopout,
PartialGroupingConstraints
using LightGraphs
using LightGraphs.LinAlg
using ArnoldiMethod
using LinearAlgebra
using Arpack
abstract type AbstractEmbedding <: EigenvectorEmbedder
end
"""
```julia
type NgLaplacian <: AbstractEmbedding
```
# Members
- `nev::Integer`. The number of eigenvectors to obtain
- `normalize::Bool`. Wether normalize the obtained eigenvectors
Given a affinity matrix `` W \\in \\mathbb{R}^{n \\times n} ``. Ng et al defines the laplacian as `` L = D^{-\\frac{1}{2}} W D^{-\\frac{1}{2}} `` where `` D `` is a diagonal matrix whose (i,i)-element is the sum of W's i-th row.
The embedding function solves a relaxed version of the following optimization problem:
``\\begin{array}{crclcl}
\\displaystyle \\max_{ U \\in \\mathbb{R}^{n\\times k} \\hspace{10pt} } & \\mathrm{Tr}(U^T L U) &\\\\
\\textrm{s.a.} {U^T U} = I &&
\\end{array}``
U is a matrix that contains the `nev` largest eigevectors of `` L ``.
# References
- [On Spectral Clustering: Analysis and an algorithm. Andrew Y. Ng, Michael I. Jordan, Yair Weiss](http://ai.stanford.edu/~ang/papers/nips01-spectral.pdf)
"""
struct NgLaplacian <: AbstractEmbedding
nev::Integer
normalize::Bool
end
NgLaplacian(nev::Integer) = NgLaplacian(nev, true)
"""
```julia
embedding(cfg::NgLaplacian, W::CombinatorialAdjacency)
```
Performs the eigendecomposition of the laplacian matrix of the weight matrix `` W `` defined according to [`NgLaplacian`](@ref)
"""
function embedding(cfg::NgLaplacian, W::CombinatorialAdjacency)
return embedding(cfg, NormalizedAdjacency(W))
end
"""
```julia
embedding(cfg::NgLaplacian, L::NormalizedAdjacency)
```
Performs the eigendecomposition of the laplacian matrix of the weight matrix `` W `` defined according to [`NgLaplacian`](@ref)
"""
embedding(cfg::NgLaplacian, L::NormalizedAdjacency) = embedding(cfg, sparse(L))
"""
```julia
embedding(cfg::NgLaplacian, L::SparseMatrixCSC)
```
Performs the eigendecomposition of the laplacian matrix of the weight matrix `` W `` defined according to [`NgLaplacian`](@ref)
"""
function embedding(cfg::NgLaplacian, L::AbstractMatrix)
(vals, vec) = LightGraphs.eigs(L, nev = cfg.nev + 5, which = LM(), restarts=5000)
vec = real(vec)
a = (.!(isapprox.(vals, 1)))
vec = vec[:, a]
vec = vec[:, 1:cfg.nev]
if cfg.normalize
return normalize_rows(vec)
else
return vec
end
end
"""
The normalized laplacian as defined in `` D^{-\\frac{1}{2}} (D-W) D^{-\\frac{1}{2}} ``.
## References:
- Spectral Graph Theory. Fan Chung
- Normalized Cuts and Image Segmentation. Jiambo Shi and Jitendra Malik
```
type ShiMalikLaplacian <: AbstractEmbedding
```
# Members
- `nev::Integer`. The number of eigenvector to obtain.
- `normalize::Bool`. Wether normalize the obtained eigenvectors
"""
struct ShiMalikLaplacian <: AbstractEmbedding
nev::Integer
normalize::Bool
end
ShiMalikLaplacian(nev::Integer) = ShiMalikLaplacian(nev, true)
"""
```
struct PartialGroupingConstraints <: AbstractEmbedding
```
# Members
- `nev::Integer`. The number of eigenvector to obtain.
- `smooth::Bool`. Whether to user Smooth Constraints
- `normalize::Bool`. Whether to normalize the rows of the obtained vectors
Segmentation Given Partial Grouping Constraints
Stella X. Yu and Jianbo Shi
"""
struct PartialGroupingConstraints <: AbstractEmbedding
nev::Integer
smooth::Bool
normalize::Bool
end
function PartialGroupingConstraints(nev::Integer; smooth::Bool=true, normalize::Bool=false)
return PartialGroupingConstraints(nev, smooth, normalize)
end
"""
```
struct PGCMatrix{T,I,F} <: AbstractMatrix{T}
```
Partial grouping constraint structure. This sturct is passed to eigs to
performe the L*x computation according to (41), (42) and (43) of
""Segmentation Given Partial Grouping Constraints""
"""
struct PGCMatrix{T} <: AbstractMatrix{T}
W::NormalizedAdjacency{T}
At
end
import Base.size
import LinearAlgebra.issymmetric
import Base.*
import LinearAlgebra.mul!
function size(a::PGCMatrix)
return size(a.W)
end
function issymmetric(a::PGCMatrix)
return true
end
function mul!(dest::AbstractVector, a::PGCMatrix, x::AbstractVector)
z = x - (a.At * x)
y = a.W * z
dest[:] = y - (a.At* y)
end
function restriction_matrix(nv::Integer, restrictions::Vector{Vector{Integer}})
I = Integer[]
J = Integer[]
V = Float64[]
k = 0
for j = 1:length(restrictions)
U_t = restrictions[j]
for i=1:length(U_t)-1
elem_i = U_t[i]
elem_j = U_t[i+1]
k += 1
push!(I, elem_i)
push!(J, k)
push!(V, 1.0)
push!(I, elem_j)
push!(J, k)
push!(V, -1.0)
end
end
return sparse(I, J, V, nv, k)
end
"""
```
function embedding(cfg::PartialGroupingConstraints, L::NormalizedAdjacency, restrictions::Vector{Vector{Integer}})
```
# Arguments
- `cfg::PartialGroupingConstraints`
- `L::NormalizedAdjacency`
- `restrictions::Vector{Vector{Integer}}`
"""
function embedding(cfg::PartialGroupingConstraints, L::NormalizedAdjacency, restrictions::Vector{Vector{Integer}})
U = restriction_matrix(size(L, 1), restrictions)
if (cfg.smooth)
U = sparse(L.A)' * U
end
DU = sparse(spdiagm(0=>prescalefactor(L)) * U)
F = svds(DU, nsv=size(U, 2)-1)[1]
AAt = sparse(F.U)*sparse(F.U)'
(eigvals, eigvec) = LightGraphs.eigs(PGCMatrix(L, AAt), nev = cfg.nev, which = LM())
eigvec = real(eigvec)
V = spdiagm(0=>prescalefactor(L)) * eigvec
if cfg.normalize
return normalize_rows(V)
else
return V
end
end
"""
```
embedding(cfg::PartialGroupingConstraints, gr::Graph, restrictions::Vector{Vector{Integer}})
```
# Arguments
- `cfg::PartialGroupingConstraints`
- `L::NormalizedAdjacency`
- `restrictions::Vector{Vector{Integer}}`
"""
function embedding(cfg::PartialGroupingConstraints, gr::Graph, restrictions::Vector{Vector{Integer}})
L = NormalizedAdjacency(CombinatorialAdjacency(adjacency_matrix(gr)))
return embedding(cfg, L, restrictions)
end
"""
```
struct YuShiPopout <: AbstractEmbedding
```
# Members
- `nev::Integer`. The number of eigenvector to obtain.
Understanding Popout through Repulsion
Stella X. Yu and Jianbo Shi
"""
struct YuShiPopout <: AbstractEmbedding
nev::Integer
normalize::Bool
end
YuShiPopout(nev::Integer) = YuShiPopout(nev, true)
"""
```julia
function embedding(cfg::YuShiPopout, grA::Graph, grR::Graph)
```
# References
- Grouping with Directed Relationships. Stella X. Yu and Jianbo Shi
- Understanding Popout through Repulsion. Stella X. Yu and Jianbo Shi
"""
function embedding(cfg::YuShiPopout, grA::Graph, grR::Graph, restrictions::Vector{Vector{Integer}})
Wa = adjacency_matrix(grA)
Wr = adjacency_matrix(grR)
dr = vec(sum(Wr, 1))
W = Wa - Wr + spdiagm(dr)
return embedding(PartialGroupingConstraints(cfg.nev), NormalizedAdjacency(CombinatorialAdjacency(W)), restrictions)
end
"""
```julia
function embedding(cfg::YuShiPopout, grA::Graph, grR::Graph)
```
# References
- Grouping with Directed Relationships. Stella X. Yu and Jianbo Shi
- Understanding Popout through Repulsion. Stella X. Yu and Jianbo Shi
"""
function embedding(cfg::YuShiPopout, grA::Graph, grR::Graph)
Wa = adjacency_matrix(grA)
Wr = adjacency_matrix(grR)
dr = vec(sum(Wr, dims=1))
da = vec(sum(Wa, dims=1))
Weq = Wa - Wr + spdiagm(0=>dr)
Deq = spdiagm(0=>da + dr)
Wa = nothing
da = nothing
Wr = nothing
dr = nothing
(eigvals, eigvec) = Arpack.eigs(Weq, Deq, nev = cfg.nev, tol = 0.000001, which = :LM)
#indexes = sortperm(real(eigvals))
return eigvec
#if (cfg.normalize)
# return SpectralClustering.normalize_rows(eigvec)
#else
# return eigvec
#end
end
"""
```julia
embedding(cfg::NgLaplacian, W::CombinatorialAdjacency)
```
Performs the eigendecomposition of the laplacian matrix of the weight matrix `` W `` defined according to [`NgLaplacian`](@ref)
"""
function embedding(cfg::ShiMalikLaplacian, W::CombinatorialAdjacency)
return embedding(cfg, NormalizedLaplacian(NormalizedAdjacency(W)))
end
"""
```julia
embedding(cfg::ShiMalikLaplacian, L::NormalizedLaplacian)
```
# Parameters
- `cfg::ShiMalikLaplacian`. An instance of a [`ShiMalikLaplacian`](@ref) that specify the number of eigenvectors to obtain
- `gr::Union{Graph,SparseMatrixCSC}`. The `Graph`(@ref Graph) or the weight matrix of wich is going to be computed the normalized laplacian matrix.
Performs the eigendecomposition of the normalized laplacian matrix of
the laplacian matriz `L` defined acoording to [`ShiMalikLaplacian`](@ref). Returns
the cfg.nev eigenvectors associated with the non-zero smallest
eigenvalues.
"""
function embedding(cfg::ShiMalikLaplacian, L::NormalizedLaplacian)
(vals, V) = LightGraphs.eigs(sparse(L), nev=min(cfg.nev + 10, size(L, 1)), which = SR(), restarts=5000)
idxs = findall(real(vals) .> 0.0000001)
idxs = idxs[1:min(length(idxs), cfg.nev)]
V = spdiagm(0 => L.A.A.D.^(1 / 2)) * real(V[:,idxs])
if cfg.normalize
return normalize_rows(V)
else
return V
end
end
"""
```
embedding{T<:AbstractEmbedding}(cfg::T, neighborhood::VertexNeighborhood, oracle::Function, data)
```
"""
function embedding(cfg::T, neighborhood::VertexNeighborhood, oracle::Function, data) where T <: AbstractEmbedding
graph = create(cfg.graph_creator, data)
return embedding(cfg, graph)
end
"""
```julia
embedding(cfg::T, gr::Graph) where T<:AbstractEmbedding
```
Performs the eigendecomposition of the laplacian matrix of the weight matrix `` W `` derived from the graph `gr` defined according to [`NgLaplacian`](@ref)
"""
function embedding(cfg::T, gr::Graph) where T <: AbstractEmbedding
return embedding(cfg, CombinatorialAdjacency(adjacency_matrix(gr, dir = :both)))
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 3207 | export AbstractLandmarkSelection,
LandmarkBasedRepresentation,
RandomLandmarkSelection,
EvenlySpacedLandmarkSelection,
select_landmarks,
BresenhamLandmarkSelection,
MS3
using StatsBase
"""
```julia
abstract type AbstractLandmarkSelection end
```
Abstract type that defines how to sample data points. Types that inherint from `AbstractLandmarkSelection` has to implements the following interface:
```julia
select_landmarks{L<:AbstractLandmarkSelection}(c::L, X)
```
The `select_landmarks`function returns an array with the indices of the sampled points.
# Arguments
- ```c::T<:AbstractLandmarkSelecion```. The landmark selection type.
- ```d::D<:DataAccessor```. The [`DataAccessor`](ref) type.
- ```X```. The data. The data to be sampled.
"""
abstract type AbstractLandmarkSelection end
"""
```
struct RandomLandmarkSelection <: LandmarkSelection
```
`Random` random samples `n` data points from a dataset.
"""
struct RandomLandmarkSelection <: AbstractLandmarkSelection
end
"""
```julia
select_landmarks(c::RandomLandmarkSelection,d::T,n::Integer, X)
```
The function returns `n`random points according to `RandomLandmarkSelection`
# Arguments
- c::RandomLandmarkSelection.
- n::Integer. The number of data points to sample.
- X. The data to be sampled.
"""
function select_landmarks(c::RandomLandmarkSelection,n::Integer, X)
return StatsBase.sample(1:number_of_patterns(X), n, replace = false)
end
"""
```
struct EvenlySpacedLandmarkSelection <: AbstractLandmarkSelection
```
The `EvenlySpacedLandmarkSelection` selection method selects `n` evenly spaced points from a dataset.
"""
struct EvenlySpacedLandmarkSelection <: AbstractLandmarkSelection
end
"""
```
select_landmarks(c::EvenlySpacedLandmarkSelection,n::Integer, X)
```
"""
function select_landmarks(c::EvenlySpacedLandmarkSelection, n::Integer, X)
m = number_of_patterns(X)
return collect(1:round(Integer,floor(m/n)):m)[1:n]
end
struct BresenhamLandmarkSelection <: AbstractLandmarkSelection
end
function select_landmarks(c::BresenhamLandmarkSelection, m::Integer, X)
n = number_of_patterns(X)
return round.(Integer, [(i-1)*n//m + n//(2*m) for i=1:m])
end
"""
```
struct MS3 <: AbstractLandmarkSelection
proportion::Float64
sim::Function
end
```
The `MS3` selection method selects `m`
NYSTROM SAMPLING DEPENDS ON THE EIGENSPECTRUM SHAPE OF THE DATA
"""
struct MS3 <: AbstractLandmarkSelection
proportion::Float64
sim::Function
end
function select_landmarks(c::MS3, m::Integer, X)
cant = number_of_patterns(X)
points = [rand(1:cant); rand(1:cant)]
while (length(points)<m)
T_candidates = setdiff(1:cant, points)
T = rand(T_candidates, round(Integer,length(T_candidates)*c.proportion))
min_simmilarity = Inf
min_point = 0
for t in T
simmilarity = 0
for p in points
simmilarity = simmilarity + c.sim(get_element(X,p), get_element(X,t))^2
end
if simmilarity < min_simmilarity
min_simmilarity = simmilarity
min_point = t
end
end
push!(points, min_point)
end
return points
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 5498 | export embedding,
View,
CoRegularizedMultiView,
KernelProduct,
KernelAddition
"""
A view
```julia
struct View
embedder::NgLaplacian
lambda::Float64
end
```
# Members
- embedder::EigenvectorEmbedder
- lambda::Float64
"""
struct View
nev::Integer
lambda::Float64
end
"""
# Co-regularized Multi-view Spectral Clustering
### Abhishek Kumar, Piyush Rai, Hal Daumé
"""
struct CoRegularizedMultiView <: EigenvectorEmbedder
max_iterations::Integer
views::Vector{View}
end
function CoRegularizedMultiView(views::Vector{View}; max_iterations::Integer=500)
return CoRegularizedMultiView(max_iterations, views)
end
"""
```julia
embedding(cfg::CoRegularizedMultiView, X::Vector)
```
# Arguments
- `cfg::CoRegularizedMultiView`
- `X::Vector{Graph}`
An example that shows how to use this methods is provied in the Usage section of the manual
"""
function embedding(cfg::CoRegularizedMultiView, X::Vector{Graph}; disagreement::Union{Nothing,Vector} = nothing)
U = Vector{Matrix}(undef, length(cfg.views))
Laplacians = [sparse(NormalizedAdjacency(CombinatorialAdjacency(adjacency_matrix(X[i], dir=:both)))) for i=1:length(cfg.views) ]
#Initialize all U(v),2≤v≤m$
for i=2:length(cfg.views)
U[i] = embedding(NgLaplacian(cfg.views[i].nev, false), Laplacians[i])
end
curr_objective = -Inf
prev_objective = 0
best_objective = Inf
iterations_without_improvement = 0
threshold = 0.00001
iteration = 0
while iteration < cfg.max_iterations && (abs(curr_objective - prev_objective) > threshold) && (iterations_without_improvement < 8)
for i=1:length(cfg.views)
L = Laplacians[i]
for j=1:length(cfg.views)
if (j!=i)
L = L + cfg.views[j].lambda*U[j]*U[j]'
end
end
U[i] = embedding(NgLaplacian(cfg.views[i].nev, false), L)
prev_objective = curr_objective
curr_objective = sum([tr((U[d]*U[d]')*(U[j]*U[j]')) for j=1:length(U) for d=1:length(U)])
if curr_objective < best_objective
best_objective = curr_objective
iterations_without_improvement = 0
else
iterations_without_improvement += 1
end
if (disagreement!=nothing)
push!( disagreement, curr_objective)
end
iteration += 1
end
end
return U[1]
end
struct KernelAddition <: EigenvectorEmbedder
embedder::EigenvectorEmbedder
end
function embedding(cfg::KernelAddition, X::Vector{Graph})
W = adjacency_matrix(X[1])
for j=2:length(X)
W += adjacency_matrix(X[j])
end
W = CombinatorialAdjacency(W)
return embedding(cfg.embedder, W)
end
struct KernelProduct <: EigenvectorEmbedder
embedder::EigenvectorEmbedder
end
function embedding(cfg::KernelProduct, X::Vector{Graph})
W = adjacency_matrix(X[1])
for j=2:length(X)
W .*= adjacency_matrix(X[j])
end
W = CombinatorialAdjacency(W)
return embedding(cfg.embedder, W)
end
"""
type LargeScaleMultiView
# Large-Scale Multi-View Spectral Clustering via Bipartite Graph. In AAAI (pp. 2750-2756).
## Li, Y., Nie, F., Huang, H., & Huang, J. (2015, January).
[Matlab implementation](https://github.com/zzz123xyz/MVSC/blob/master/MVSC.m)
# Members
- `k::Integer`. Number of clusters.
- `n_salient_points::Integer`. Number of salient points.
- `k_nn::Integer`. k nearest neighbors.
- 'gamma::Float64`.
"""
struct LargeScaleMultiView
k::Integer
n_salient_points::Integer
k_nn::Integer
gamma::Float64
end
"""
# Parameters
- `cfg::LargeScaleMultiView`
- `data::Vector`. An array of views.
"""
#=function embedding(cfg::LargeScaleMultiView, data::Vector)
niters = 10;
n =
V = length(data)
salient_points =
rest_of_points =
a = fill(1/nbclusters, [V]);
for v = 1:V
RestPnt = data{v}(:,RestPntInd)';
PairDist = pdist2(RestPnt,SltPnt(:,dim_V_ind1(v): dim_V_ind2(v)));
[score, ind] = sort(PairDist,2);
ind = ind(:,1:k);
%*****
%make a Indicator Mask to record j \in \theta_i
IndMask = zeros(n - nbSltPnt, nbSltPnt);
for i = 1:n - nbSltPnt
IndMask(i, ind(i,:)) = 1;
end
Kernel = exp(-(PairDist).^2 ./ (2*param^2));
Kernel = Kernel.*IndMask;
SumSltKnl = repmat(sum(Kernel, 2),[1,nbSltPnt]);
Z{v} = Kernel ./ SumSltKnl;
Dc{v} = diag(sum(Z{v},1)+eps);
Dr{v} = diag(sum(Z{v},2));
D{v} = blkdiag(Dr{v},Dc{v});
tmp1 = zeros(n);
tmp1(1:n-nbSltPnt,n-nbSltPnt+1:end) = Z{v};
tmp1(n-nbSltPnt+1:end,1:n-nbSltPnt) = Z{v}';
W{v} = tmp1;
L{v} = eye(n) - (D{v}^-0.5) * W{v} * (D{v}^-0.5);
end
for t = 1:niters
L_sum = zeros(n, n);
Z_hat = zeros(n - nbSltPnt, nbSltPnt);
for v = 1:V
Z_hat = Z_hat + a(v)^gamma*Z{v}*(Dc{v})^(-0.5);
end
% compute G according to (14)
[Gx_a, S, Gu_a] = svd(Z_hat, 'econ');
Gx = Gx_a(:,1:nbclusters);
Gu = Gu_a(:,1:nbclusters);
G = [Gx', Gu']';
for v = 1:V
h(v) = trace(G'*L{v}*G); %*** h(v) = trace(G'*L{v}*G); dim of G mismatch L
end
% compute a(v) according to (10)
tmp1 = (gamma .* h).^(1/(1-gamma)) ;
a = tmp1 ./ sum(tmp1);
[Y, C] = kmeans(G, nbclusters);
% compute the value of objective function (5)
for v = 1:V
L_sum = L_sum + a(v)^gamma*L{v};
end
obj_value(t) = trace(G'*L_sum*G); %obj_value(t) = trace(G'*L_sum*G);
end
=#
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 359 | module SpectralClustering
export EigenvectorEmbedder,
embedding
abstract type EigenvectorEmbedder end
include("Utils/DataAccess.jl")
include("Utils/DataProcessing.jl")
include("Graph/Graphs.jl")
include("LandmarkSelection.jl")
include("Embedding.jl")
include("ApproximateEmbedding.jl")
include("MultiView.jl")
include("EigenvectorClustering.jl")
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 8815 | using NearestNeighbors
using StatsBase
using Statistics
export VertexNeighborhood,
KNNNeighborhood,
create,
PixelNeighborhood,
local_scale,
neighbors,
RandomNeighborhood,
CliqueNeighborhood
import SpectralClustering: spatial_position
import Base.ones
"""
```julia
struct RandomKGraph
```
The type RandomKGraph defines the parameters needed to create a random k-graph.
Every vertex it is connected to `k` random neigbors.
# Members
- `number_of_vertices::Integer`. Defines the number of vertices of the graph.
- `k::Integer`. Defines the minimum number of neighborhood of every vertex.
"""
struct RandomKGraph
number_of_vertices::Integer
k::Integer
end
"""
```julia
create(cfg::RandomKGraph)
```
Construct a [`RandomKGraph`](@ref) such that every vertex is connected with other k random vertices.
"""
function create(cfg::RandomKGraph)
g = Graph(cfg.number_of_vertices)
for i = 1:cfg.number_of_vertices
cant = 0
while cant < cfg.k
selected = rand(1:cfg.number_of_vertices)
while selected == i
selected = rand(1:cfg.number_of_vertices)
end
connect!(g, i, selected, rand())
cant = cant + 1
end
end
return g;
end
"""
```julia
abstract type VertexNeighborhood end
```
The abstract type VertexNeighborhood provides an interface to query for the
neighborhood of a given vertex. Every concrete type that inherit from
VertexNeighborhood must define the function
```julia
neighbors{T<:VertexNeighborhood}(cfg::T, j::Integer, data)
```
which returns the neighbors list of the vertex j for the given data.
"""
abstract type VertexNeighborhood end
"""
```julia
struct PixelNeighborhood <: VertexNeighborhood
```
`PixelNeighborhood` defines neighborhood for a given pixel based in its spatial location. Given a pixel located at (x,y), returns every pixel inside
\$(x+e,y), (x-e,y)\$ and \$(x,y+e)(x,y-e)\$.
# Members
- e:: Integer. Defines the radius of the neighborhood.
"""
struct PixelNeighborhood <: VertexNeighborhood
e::Integer
end
"""
```julia
neighbors(cfg::PixelNeighborhood, j::Integer, img)
```
Returns the neighbors of the pixel j according to the specified in [`PixelNeighborhood`](@ref)
"""
function neighbors(cfg::PixelNeighborhood, j::Integer, img::Matrix{T}) where T <: Colorant
pos = CartesianIndices(img)[j]
w_r = max(pos[1] - cfg.e, 1):min(pos[1] + cfg.e, size(img, 1))
w_c = max(pos[2] - cfg.e, 1):min(pos[2] + cfg.e, size(img, 2))
return vec(map(x->LinearIndices(img)[x[1],x[2]], CartesianIndices((w_r, w_c))))
end
"""
```julia
struct CliqueNeighborhood <: VertexNeighborhood
```
`CliqueNeighborhood` specifies that the neighborhood for a given vertex \$j\$ in a
graph of \$n\$ vertices are the remaining n-1 vertices.
"""
struct CliqueNeighborhood <: VertexNeighborhood
end
"""
```julia
neighbors(config::CliqueNeighborhood, j::Integer, X)
```
Return every other vertex index different from \$j\$. See [`CliqueNeighborhood`](@ref)
"""
function neighbors(config::CliqueNeighborhood, j::Integer, X)
return filter!(x->x != j, collect(1:number_of_patterns(X)))
end
"""
```julia
struct KNNNeighborhood <: VertexNeighborhood
k::Integer
tree::KDTree
end
```
`KNNNeighborhood` specifies that the neighborhood for a given vertex \$j\$ are the \$k\$ nearest neighborgs. It uses a tree to search the nearest patterns.
# Members
- `k::Integer`. The number of k nearest neighborgs to connect.
- `tree::KDTree`. Internal data structure.
- `f::Function`. Transformation function
"""
struct KNNNeighborhood <: VertexNeighborhood
k::Integer
tree::KDTree
t::Function
end
"""
```julia
KNNNeighborhood(X::Matrix, k::Integer)
```
Create the [`KNNNeighborhood`](@ref) type by building a `k`-nn tre from de data `X`
Return the indexes of the `config.k` nearest neigbors of the data point `j` of the data `X`.
"""
function KNNNeighborhood(X, k::Integer, f::Function = x->x)
tree = KDTree(hcat([f(get_element(X, j)) for j = 1:number_of_patterns(X)]...))
return KNNNeighborhood(k, tree, f)
end
neighbors(config::KNNNeighborhood, j::Integer, X) = neighbors(config, get_element(X, j))
function neighbors(config::KNNNeighborhood, data)
idxs, dists = knn(config.tree, config.t(data), config.k + 1, true)
return idxs[2:config.k + 1]
end
"""
```@julia
struct RandomNeighborhood <: VertexNeighborhood
k::Integer
end
```
For a given index `j`return `k` random vertices different from `j`
"""
struct RandomNeighborhood <: VertexNeighborhood
k::Integer
end
function neighbors(config::RandomNeighborhood, j::Integer, X)
samples = StatsBase.sample(1:number_of_patterns(X), config.k, replace = false)
if (in(j, samples))
filter!(e->e != j, samples)
end
while (length(samples) < config.k)
s = StatsBase.sample(1:number_of_patterns(X), 1)[1]
if (s != j)
push!(samples, s)
end
end
return samples
end
"""
```julia
weight{T<:DataAccessor}(w::Function,d::T, i::Int,j::Int,X)
```
Invoke the weight function provided to compute the similarity between the pattern `i` and the pattern `j`.
"""
function weight(w::Function, i::Integer, j::Integer, X)
x_i = get_element(X, i)
x_j = get_element(X, j)
return w(i, j, x_i, x_j)
end
"""
```julia
create(w_type::DataType, neighborhood::VertexNeighborhood, oracle::Function,X)
```
Given a [`VertexNeighborhood`](@ref), a simmilarity function `oracle` construct a simmilarity graph of the patterns in `X`.
"""
function create(w_type::DataType, neighborhood::VertexNeighborhood, oracle::Function, X)
number_of_vertices = number_of_patterns(X)
g = Graph(number_of_vertices; weight_type = w_type)
@Threads.threads for j = 1:number_of_vertices
neigh = neighbors(neighborhood, j, X)
x_j = get_element(X, j)
x_neigh = get_element(X, neigh)
weights = oracle(j, neigh, x_j, x_neigh)
connect!(g, j, neigh, weights)
end
GC.gc()
return g
end
"""
```julia
create(neighborhood::VertexNeighborhood, oracle::Function,X)
```
Given a [`VertexNeighborhood`](@ref), a simmilarity function `oracle` construct a simmilarity graph of the patterns in `X`.
"""
function create(neighborhood::VertexNeighborhood, oracle::Function, X)
create(Float64, neighborhood, oracle, X)
end
"""
```julia
local_scale(neighborhood::KNNNeighborhood, oracle::Function, X; k::Integer = 7)
```
Computes thescale of each pattern according to [Self-Tuning Spectral Clustering](https://papers.nips.cc/paper/2619-self-tuning-spectral-clustering.pdf).
Return a matrix containing for every pattern the local_scale.
# Arguments
- `neighborhood::KNNNeighborhood`
- `oracle::Function`
- `X`
the data
\"The selection of thescale \$ \\sigma \$ can be done by studying thestatistics of the neighborhoods surrounding points \$ i \$ and \$ j \$ .i \"
Zelnik-Manor and Perona use \$ \\sigma_i = d(s_i, s_K) \$ where \$s_K\$ is the \$ K \$ neighbor of point \$ s_i \$ .
They \"used a single value of \$K=7\$, which gave good results even for high-dimensional data \" .
"""
function local_scale(neighborhood::T, oracle::Function, X; k::Integer = 7, sortdim::Integer=1) where T<:VertexNeighborhood
sort_data(d::AbstractArray; dims=1) = sort(d)
sort_data(d::AbstractMatrix; dims=1) = sort(d, dims=dims)
temp = nothing
distance_function = nothing
try
temp = oracle(get_element(X, 1), get_element(X, [1, 2]))
distance_function = (a,b,c,d)->oracle(c, d)
catch e
temp = oracle(0, [0], get_element(X, 1), get_element(X, [1, 2]))
distance_function = oracle
end
number_of_vertices = number_of_patterns(X)
scales = zeros(size(temp, 2), number_of_vertices)
for j = 1:number_of_vertices
neigh = neighbors(neighborhood, j, X)
distances = distance_function(j, neigh, get_element(X, j), get_element(X, neigh))
scales[:, j] .= sort_data(distances, dims=sortdim)[k, :]
end
return scales
end
#="""
Given a graph (g) created from a X_prev \in R^{d x n}, updates de graph from
the matrix X \in R^{d x m}, m > n. Adding the correspondent vertices and connecting
them whith the existing ones.
"""
function update!(config::GraphCreationConfig,g::Graph,X)
number_of_vertices = number_of_patterns(config.da,X)
old_number_of_vertices = number_of_vertices(g)
for j=old_number_of_vertices+1:number_of_vertices
add_vertex!(g)
end
for j=old_number_of_vertices+1:number_of_vertices
neigh = neighbors(config.neighborhood,j,X)
for i in neigh
w = weight(config.oracle,i,j,X)
connect!(g,i,j,w)
end
end
end
=#
# Weight functions
constant(k) = (i::Integer, neigh, v, m) = ones(size(m, 2)) * k
ones(i::Integer, neigh, v, m) = ones(size(m, 2))
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 12611 | export
Creation,
Matrices,
Graph,
connect!,
disconnect,
remove_vertex!,
reindex!,
random_graph,
target_vertex,
cycles,
number_of_neighbors
using LightGraphs
import LightGraphs: nv, ne, has_edge, is_directed
import Base.length
import Base.empty!
import Base.iterate
"""
```julia
type Edge
```
"""
mutable struct Edge{T}
next_v1::Union{Nothing,Edge{T}}
prev_v1::Union{Nothing,Edge{T}}
next_v2::Union{Nothing,Edge{T}}
prev_v2::Union{Nothing,Edge{T}}
v1
v2
weight::T
end
function weight_type(edge::Edge{T}) where T
return T
end
mutable struct Vertex{T, EdgeType}
id::Integer
data::T
edges::Union{Nothing,Edge{EdgeType}}
number_of_edges::Integer
degree::Float64
connections::Set{Integer}
lock::Threads.SpinLock
end
function weight_type(v::Vertex{V,E}) where V where E
return E
end
function Vertex(id::Integer,d::DataType = Any, val=nothing, weight_type::DataType=Float64)
return Vertex{d, weight_type}(id,val,nothing,0,0, Set{Integer}(),Threads.SpinLock())
end
function Edge(v1::Vertex,v2::Vertex,w::Number)
return Edge{weight_type(v1)}(nothing,nothing,nothing,nothing,v1,v2,convert(weight_type(v1),w))
end
struct Graph
vertices::Vector{Vertex}
is_dirty::Bool
end
struct EdgeIterator
e::Union{Nothing,Edge}
i::Integer
end
function empty!(g::Graph)
for i=1:length(g.vertices)
g.vertices[i].edges = nothing
g.vertices[i].number_of_edges = 0
g.vertices[i].degree = 0
end
end
"""
```julia
Graph(n_vertices::Integer=0; vertex_type::DataType = Any ,initial_value=nothing, weight_type::DataType = Float64)
```
Construct an undirected weighted grpah of `n_vertices` vertices.
"""
function Graph(n_vertices::Integer=0; vertex_type::DataType = Any ,initial_value=nothing, weight_type::DataType = Float64)
vertices = Vector{Vertex{vertex_type, weight_type}}(undef, n_vertices)
for i=1:n_vertices
vertices[i] = Vertex(i,vertex_type,initial_value, weight_type)
end
return Graph(vertices,false)
end
function is_directed(g::Graph)
return false
end
"""
```julia
nv(g::Graph)
```
Return the number of vertices of `g`.
"""
function nv(g::Graph)
return length(g.vertices)
end
function ne(g::Graph)
return sum([v.number_of_edges for v in g.vertices])
end
function has_edge(gr::Graph, i::Integer, k::Integer)
return (k in gr.vertices[i].connections)
end
function insert!(v::Vertex,e::Edge)
if (e.v1 == v)
#El siguiente del nuevo es el primero de la lista original
e.next_v1 = v.edges
#El anterior del primero de la lista original es ahora el nuevo
if (v.edges != nothing)
if (v.edges.v1 == v)
v.edges.prev_v1 = e
else
v.edges.prev_v2 = e
end
end
v.edges = e
else
e.next_v2 = v.edges
if (v.edges != nothing)
if (v.edges.v1 == v)
v.edges.prev_v1 = e
else
v.edges.prev_v2 = e
end
end
v.edges = e
end
end
function set_previous(v::Vertex,e::Edge,prev::Union{Nothing,Edge})
if (e.v1 == v)
e.prev_v1=prev
else
e.prev_v2=prev
end
end
function set_next(v::Vertex,e::Edge,next::Union{Nothing,Edge})
if (e.v1 == v)
e.next_v1=next
else
e.next_v2=next
end
end
function linked_list_connect(v::Vertex,e::Edge,next::Union{Nothing,Edge})
set_next(v,e,next)
if (next != nothing)
set_previous(v,next,e)
end
end
function remove!(v::Vertex,e::Edge)
if (e.v1 == v)
prev = e.prev_v1
if (prev == nothing)
v.edges = e.next_v1
if e.next_v1 != nothing
set_previous(v,e.next_v1,nothing)
end
return
else
linked_list_connect(v,prev,e.next_v1)
end
else
prev = e.prev_v2
if (prev == nothing)
v.edges = e.next_v2
if e.next_v2 != nothing
set_previous(v,e.next_v2,nothing)
end
return
else
linked_list_connect(v,prev,e.next_v2)
end
end
end
"""
```julia
function connect!(g::Graph, i::Integer, neighbors::Vector, weigths::Vector)
```
"""
function connect!(g::Graph, i::Integer, neighbors::Vector, w::Vector)
for j=1:length(neighbors)
connect!(g,i,neighbors[j],w[j])
end
end
"""
```julia
connect!(g::Graph,i::Integer,j::Integer,w::Number)
```
Connect the vertex `i` with the vertex `j` with weight `w`.
"""
function connect!(g::Graph,i::Integer,j::Integer,w::Number)
if (i==j)
return
end
if i>nv(g) || j>nv(g)
throw("Invalid vertex")
end
if (w<=0)
return
end
vertex_j = g.vertices[j]
vertex_i = g.vertices[i]
if !(vertex_j.id in vertex_i.connections)
edge = Edge(g.vertices[i],g.vertices[j],w)
lock(vertex_i.lock)
lock(vertex_j.lock)
insert!(g.vertices[i],edge)
insert!(g.vertices[j],edge)
push!(vertex_i.connections, vertex_j.id)
push!(vertex_j.connections, vertex_i.id)
unlock(vertex_j.lock)
unlock(vertex_i.lock)
g.vertices[i].number_of_edges=g.vertices[i].number_of_edges+1
g.vertices[j].number_of_edges=g.vertices[j].number_of_edges+1
g.vertices[i].degree = g.vertices[i].degree + w
g.vertices[j].degree = g.vertices[j].degree + w
end
end
"""
```julia
target_vertex(e::Edge,v::Vertex)
```
Given an edge `e` and a vertex `v` returns the other vertex different from `v`
"""
function target_vertex(e::Edge,v::Vertex)
if (e.v1 == v)
return e.v2
elseif (e.v2 == v)
return e.v1
else
return Nothing
end
end
"""
```julia
length(v::Vertex)
```
Return the number of edges connected to a given vertex.
"""
function length(v::Vertex)
return v.number_of_edges
end
function iterate(v::Vertex)
if v.edges == nothing
return nothing
else
edge = v.edges
next_edge = edge.v1 == v ? edge.next_v1 : edge.next_v2
return (edge, next_edge)
end
end
function iterate(v::Vertex, state)
edge = state
if edge == nothing
return nothing
else
next_edge = edge.v1 == v ? edge.next_v1 : edge.next_v2
return (edge, next_edge)
end
end
import Base.show
function show(io::IO, e::Edge)
println(string(e.v1.id," -(",e.weight, ")-> ",e.v2.id))
end
function show(io::IO, g::Graph)
for vertex in g.vertices
println(vertex)
for e in vertex
println(e)
end
end
end
function show(io::IO, v::Vertex)
repr = "Vertex $(v.id) ("
if v.data != nothing
repr *= "data=$(v.data), "
end
repr *= "n_edges=$(v.number_of_edges), "
repr *= "degree=$(v.degree))"
println(repr)
end
function find_edge(p, v::Vertex)
for e in v
if (p(e))
return e
end
end
return nothing
end
"""
```julia
disconnect(g::Graph,i::Integer,j::Integer)
```
Removes the edge that connects the `i`-th vertex to the `j`-th vertex.
"""
function disconnect(g::Graph,i::Integer,j::Integer)
vertex_i = g.vertices[i]
vertex_j = g.vertices[j]
edge = find_edge(e->(e.v1 == vertex_j || e.v2==vertex_j),g.vertices[i])
remove!(vertex_i,edge)
remove!(vertex_j,edge)
delete!(vertex_i.connections, vertex_j.id)
delete!(vertex_j.connections, vertex_i.id)
vertex_i.degree = vertex_i.degree - edge.weight
vertex_j.degree = vertex_j.degree - edge.weight
g.is_dirty = true
end
function update_connections!(g::Graph)
for i=1:nv(g)
empty!(g.vertices[i].connections)
for e in g.vertices[i]
v_j = target_vertex(e, g.vertices[i])
push!(g.vertices[i].connections, v_j.id)
end
end
end
function reindex!(g::Graph)
for i=1:nv(g)
g.vertices[i].id = i
end
update_connections!(g)
g.is_dirty = false
end
"""
```julia
remove_vertex!(g::Graph,i::Integer)
```
Remove the `i`-th vertex.
"""
function remove_vertex!(g::Graph,i::Integer)
if i>nv(g)
throw("No se puede eliminar")
end
if (g.is_dirty)
reindex!(g)
end
vertex_i = g.vertices[i]
for e in vertex_i
if e.v1 == vertex_i
remove!(e.v2,e)
e.v2.degree = e.v2.degree - e.weight
e.v2.number_of_edges = e.v2.number_of_edges-1
else
remove!(e.v1,e)
e.v1.number_of_edges = e.v1.number_of_edges-1
e.v1.degree = e.v1.degree - e.weight
end
end
deleteat!(g.vertices,i)
g.is_dirty=true
end
function add_vertex!(g::Graph, datatype = Any, data=nothing)
if g.is_dirty
reindex!(g)
end
new_id = nv(g) +1
vertex = Vertex(new_id,datatype, data)
push!(g.vertices,vertex)
return vertex
end
function connect(e::Edge, v::Vertex)
return e.v1 == v || e.v2 == v
end
function number_of_neighbors(g::Graph)
number = zeros(Int,nv(g))
for i=1:length(g.vertices)
n=0
for e in g.vertices[i]
n= n +1
end
number[i] = n
end
return number
end
"""
```
function random_graph(iterations::Integer; probs=[0.4,0.4,0.2], weight=()->5, debug=false)
```
Create a random graphs. `probs` is an array of probabilities. The function create a vertex with probability `probs[1]`, connect two vertices with probability `probs[2]` and delete a vertex with probability `probs[2]`. The weight of the edges is given by `weight`
"""
function random_graph(iterations::Integer; probs=[0.4,0.4,0.2], weight=()->5, debug=false)
g= Graph()
for i=1:iterations
action = sum(rand() .>= cumsum(probs)) +1
nog = nv(g)
if action==1
add_vertex!(g)
if debug
println("add_vertex!(g)")
end
elseif action == 2
if (nog >= 2)
v1 = rand(1:nog)
v2 = rand(1:nog)
if (v1!=v2)
if (debug)
println("connect!(g,$(v1),$(v2),5)")
end
connect!(g,v1,v2,weight())
end
end
elseif action == 3
if (nog > 0)
v1 = rand(1:nog)
if (debug)
println("remove_vertex!(g,$(v1))")
end
remove_vertex!(g,v1)
end
end
end
return g
end
struct TargetVertexAndWeight
vertex_id::Integer
edge_weight::Float64
end
struct Triangle
edge_1::TargetVertexAndWeight
edge_2::TargetVertexAndWeight
end
function Triangle()
return Triangle((-1,-1.0),(-1,-1.0))
end
import Base.hash
import Base.isequal
function hash(a::Triangle, h::UInt)
if a.edge_1.vertex_id < a.edge_2.vertex_id
hash(a.edge_1.vertex_id, hash(a.edge_2.vertex_id, hash(:Triangle, h)))
else
hash(a.edge_2.vertex_id, hash(a.edge_1.vertex_id, hash(:Triangle, h)))
end
end
isequal(a::Triangle, b::Triangle) = Base.isequal(hash(a), hash(b))
function compute_cycles(vertice_inicio::Vertex, vertice::Vertex,
visitados::Vector{Bool}, ciclos::Set{Triangle},
cant_aristas::Integer, peso_aristas::TargetVertexAndWeight)
if !visitados[vertice.id]
visitados[vertice.id] = true
cant_aristas = cant_aristas+1
for edge in vertice
vertice_destino = target_vertex(edge, vertice)
if cant_aristas == 1
peso_aristas.vertex_id = vertice_destino.id
peso_aristas.edge_weight = edge.weight
end
if vertice_destino == vertice_inicio && cant_aristas == 3
push!(ciclos, Triangle( deepcopy(peso_aristas), TargetVertexAndWeight(vertice.id, edge.weight)))
visitados[vertice.id]=false
return #Si encontre un ciclo, no voy a encontrar otro
else
if !visitados[vertice_destino.id]
if cant_aristas < 3
compute_cycles(vertice_inicio, vertice_destino, visitados, ciclos,
cant_aristas,peso_aristas)
end
end
end
end
visitados[vertice.id]=false
end
end
function cycles(g::Graph)
visitados = fill(false,length(g.vertices))
ciclos_vertices = []
for i=1:length(g.vertices)
v = g.vertices[i]
peso_aristas = TargetVertexAndWeight(-1,-1.0)
ciclos = Set{Triangle}()
compute_cycles(v,v, visitados,ciclos,0,peso_aristas)
push!(ciclos_vertices,ciclos)
end
return ciclos_vertices
end
include("Creation.jl")
include("Matrices.jl")
include("Plot.jl")
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 1309 | import LightGraphs.LinAlg: adjacency_matrix
"""
adjacency_matrix(g[, T=Int; dir=:out])
Return a sparse adjacency matrix for a graph, indexed by `[u, v]`
vertices. Non-zero values indicate an edge between `u` and `v`. Users may
override the default data type (`Int`) and specify an optional direction.
### Optional Arguments
`dir=:out`: `:in`, `:out`, or `:both` are currently supported.
### Implementation Notes
This function is optimized for speed and directly manipulates CSC sparse matrix fields.
"""
function adjacency_matrix(g::Graph, T::DataType=Float64; dir::Symbol=:out)
n_v = nv(g)
nz = ne(g)
colpt = ones(Int64, n_v + 1)
rowval = sizehint!(Vector{Int64}(), nz)
weights = sizehint!(Vector{T}(), nz)
for j in 1:n_v # this is by column, not by row.
wgts = sizehint!(Vector{T}(), g.vertices[j].number_of_edges)
dsts = sizehint!(Vector{Int64}(), g.vertices[j].number_of_edges)
for e in g.vertices[j]
push!(wgts,e.weight)
push!(dsts,target_vertex(e,g.vertices[j]).id)
end
colpt[j + 1] = colpt[j] + length(dsts)
dsts_indices = sortperm(dsts)
append!(rowval, dsts[dsts_indices])
append!(weights, wgts[dsts_indices])
end
return SparseMatrixCSC(n_v, n_v, colpt, rowval, weights)
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 673 | using Plots
using GraphRecipes
import GraphRecipes: get_source_destiny_weight
export fixed_layout
function get_source_destiny_weight(g::Graph)
L = ne(g)
sources = Array{Int}(undef, L)
destiny = Array{Int}(undef, L)
weights = Array{Float64}(undef, L)
i = 0
for v in g.vertices
for e in v
i += 1
sources[i] = e.v1.id
destiny[i] = e.v2.id
weights[i] = e.weight
end
end
return sources, destiny, weights
end
function fixed_layout(locations::AbstractArray{T,2}, adjmat::AbstractMatrix; kw...) where T
return locations[1, :], locations[2, :], zeros(Int, size(locations, 2))
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 3627 | export get_element,
number_of_patterns,
number_of_pixels,
get_pixel,
get_element!,
spatial_position,
pattern_dimension,
get_element,
assign!
using ColorTypes
function pattern_dimension(X)
return length(get_element(X,1))
end
"""
```julia
function get_element!(o::Matrix, img::Matrix{C}, i::Vector{Integer}) where C<:Colorant
```
"""
function get_element!(o::D, img::Matrix{C}, i::Vector{<:Integer}) where D<:AbstractArray where C<:Colorant
car_inds = CartesianIndices(img)[i]
values = img[car_inds]
for (j, pos) in enumerate(car_inds)
@inbounds o[1, j] = pos[2]
@inbounds o[2, j] = pos[1]
end
N = length(C)
component = N >= 3 ? (comp1, comp2, comp3, alpha) : (comp1, alpha)
for j=1:length(C)
@inbounds o[2+j,:] = component[j].(values)
end
end
function get_element!(o::D, data::Matrix{C}, i::Integer) where D<:AbstractArray where C<:Number
o[:] = data[:, i]
end
"""
```julia
function assign!(vec::T, val::C) where T<:AbstractArray where C<:Colorant
```
This function assigns the components of the color component val to a vector v
"""
function assign!(vec::T, val::C) where T<:AbstractArray where C<:Colorant
N = length(C)
component = N >= 3 ? (comp1, comp2, comp3, alpha) : (comp1, alpha)
for j=1:length(C)
@inbounds vec[j] = component[j](val)
end
end
"""
```@julia
get_element!{T<:AbstractArray}(vec::T, img::Matrix{Gray}, i::Integer)
```
Return through```vec``` the intensity image element [x,y, i], where \$x,y\$ are the spatial
position of the pixel and the value i of the pixel \$(x,y)\$.
"""
function get_element!(vec::T, img::Matrix{C}, i::Integer) where T<:AbstractArray where C<:Colorant
ind = spatial_position(img,i)
@inbounds vec[1] = ind[2]
@inbounds vec[2] = ind[1]
assign!(view(vec,3:length(vec)), img[i])
end
"""
```
function get_element( img::Matrix{RGB}, i::Vector)
```
"""
function get_element(img::Matrix{T}, i::Vector) where T<:Colorant
m = zeros(length(T)+2,length(i))
get_element!(m,img,i)
return m
end
function get_element(img::Matrix{T}, i::Integer) where T<:Colorant
m = zeros(length(T)+2)
get_element!(m,img,i)
return m
end
"""
```@julia
number_of_patterns{T<:Any}(X::Matrix{T,3})
```
Return the number of pixels in the image
"""
number_of_patterns(X::Matrix{T}) where T<:Colorant = size(X,1)*size(X,2)
"""
```@julia
spatial_position(X::Matrix, i::Int)
```
Returns the sub indexes from the linear index ```i```
"""
spatial_position(img::Matrix, i) = CartesianIndices(img)[i]
get_element(X::T,i) where T<:AbstractArray= view(X,:,i)
number_of_patterns(X::T) where T<:AbstractArray = size(X,2)
get_element(X::Vector,i) = X[i]
number_of_patterns(X::Vector) = length(X)
number_of_patterns(X::Array{T, 3}) where T = size(X, 2) * size(X, 3)
"""
```
function get_element(data::Array{T, 3}, i::Vector) where T<:Number
```
"""
function get_element(data::Array{T, 3}, i::Vector) where T<:Number
m = zeros(size(data,1)+2,length(i))
get_element!(m, data, i)
return m
end
get_element(data::Array{T, 3}, i::Integer) where T<:Number = get_element(data, [i])
get_element!(o::AbstractArray, data::Array{T,3}, i::Integer) where T<:Number = get_element!(o, data, [i])
function get_element!(o::AbstractArray, data::Array{T,3}, i::Vector) where T<:Number
(_, nr, nc) = size(data)
cart_indices = CartesianIndices((nr,nc))[i]
@inbounds for (i, pos) in enumerate(cart_indices)
o[3:end, i] = data[:, pos[1], pos[2]]
o[1,i] = pos[2]
o[2,i] = pos[1]
end
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 1528 | using LinearAlgebra,
SparseArrays
function normalize_matrix(A::AbstractMatrix, dim::Integer; f=LinearAlgebra.norm)
if (size(A, dim) == 1)
return A
end
return A./mapslices(f, A, dims=[dim])
end
normalize_rows(A::AbstractMatrix; f=LinearAlgebra.norm) = normalize_matrix(A, 2, f=f)
normalize_cols(A::AbstractMatrix; f=LinearAlgebra.norm) = normalize_matrix(A, 1, f=f)
function normalize_matrix!(A::AbstractMatrix, dim::Integer; f=LinearAlgebra.norm)
if (size(A, dim) == 1)
return A
end
A ./= mapslices(f, A, dims=[dim])
end
normalize_rows!(A::AbstractMatrix; f=LinearAlgebra.norm) = normalize_matrix!(A, 2, f=f)
normalize_cols!(A::AbstractMatrix; f=LinearAlgebra.norm) = normalize_matrix!(A, 1, f=f)
#=
# get the column sums of A
S = vec(sum(A,1))
# get the nonzero entries in A. ei is row index, ej is col index, ev is the value in A
ei,ej,ev = findnz(A)
# get the number or rows and columns in A
m,n = size(A)
# create a new normalized matrix. For each nonzero index (ei,ej), its new value will be
# the old value divided by the sum of that column, which can be obtained by S[ej]
A_normalized = sparse(ei,ej,ev./S[ej],m,n)
http://stackoverflow.com/questions/24296856/in-julia-how-can-i-column-normalize-a-sparse-matrix
=#
function normalize_cols(A::SparseMatrixCSC)
sums = sum(A, 1) + eps()
I, J, V = findnz(A)
for idx in 1:length(V)
V[idx] /= sums[J[idx]]
end
sparse(I, J, V)
end
function svd_whiten(X)
U, s, Vt = svd(X)
return U * Vt
end
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | code | 15618 | using SpectralClustering
using Distances
using Test
using LinearAlgebra
using Statistics
using Clustering
using Images
using Random
using Distributions
import LightGraphs.LinAlg: adjacency_matrix
number_of_vertices = 5
Random.seed!(0)
function two_gaussians(N::Integer = 500; std_1=5, std_2 = 5, center_1=[15,5], center_2=[-15, 5])
d1 = (randn(2, N) * std_1) .+ center_1
d2 = (randn(2, N) * std_2) .+ center_2
labels = round.(Integer, vcat(zeros(N), ones(N)))
return (hcat(d1, d2), labels)
end
function three_gaussians(N::Integer = 250; )
d1 = (randn(2, N) * 1.5) .+ [5, 0]
d2 = (randn(2, N) * 1) .+ [0, 0]
d3 = (randn(2, N) * 1.5) .+ [-5, 0]
labels = round.(Integer, vcat(zeros(N), ones(N), ones(N)* 2))
return (hcat(d1, d2, d3), labels)
end
@testset "Graph Creation" begin
@testset "KNNNeighborhood" begin
function weight(i::Integer, neigh, v, m)
return Distances.colwise(SqEuclidean(), m, v)
end
data = convert.(Float64, [2 2; -2 -2; 2 -2; -2 2;0 0]')
knnconfig = KNNNeighborhood(data, 3);
graph = create(knnconfig, weight, data);
v1 = graph.vertices[1]
@test v1.number_of_edges == 3
for edge in v1
v2 = target_vertex(edge, v1)
@test norm(data[:, v2.id] - [-2, -2]) > 0.001
end
@test graph.vertices[2].connections == Set([4,3,5])
@test graph.vertices[end].number_of_edges == 4
end
@testset "RandomNeighborhood" begin
data, labels = two_gaussians()
randomconfig = RandomNeighborhood(5)
graph = create(randomconfig, ones, data);
@test all([v.number_of_edges for v in graph.vertices] .>= 5)
end
@testset "CliqueNeighborhood" begin
data, labels = two_gaussians(15)
clique = CliqueNeighborhood()
graph = create(clique, ones, data);
@test all([v.number_of_edges for v in graph.vertices] .== 29)
@test all([v.degree for v in graph.vertices] .== 29)
end
@testset "PixelNeighborhood" begin
img = fill(RGB(1, 0, 0), 20, 20)
nconfig = PixelNeighborhood(1)
graph = create(nconfig, ones, img)
@test length(graph.vertices) == size(img, 1) * size(img, 2)
@test graph.vertices[1].number_of_edges == 3
@test graph.vertices[148].number_of_edges == 8
end
@testset "Local Sale Knn" begin
function weight(i::Integer, neigh, v, m)
return Distances.colwise(Euclidean(), m, v)
end
X = [5.0 5; 5 4; 4 4; 6 6; -10 -10; -9 -9; -8 -8; -11 -11]'
knnconfig = KNNNeighborhood(X, 3)
scale = local_scale(knnconfig, weight, X, k = 3)
@test isapprox(scale[1], sqrt(2))
@test isapprox(scale[end], sqrt(18))
end
@testset "Local Sale Image" begin
function weight(i::Integer, neigh, v, m)
col_dist = Distances.colwise(Euclidean(), m[3:end, :], v[3:end])
xy_dist = Distances.colwise(Euclidean(), m[1:2, :], v[1:2])
return hcat(col_dist, xy_dist)
end
X = rand(RGB, 50, 50)
knnconfig = PixelNeighborhood(3)
scale = local_scale(knnconfig, weight, X, k = 9)
@test size(scale) == (2, 50 * 50)
end
end;
@testset "Embedding" begin
@testset "NgLaplacian" begin
function weight(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
(data, labels) = two_gaussians()
knnconfig = KNNNeighborhood(data, 7)
graph = create(knnconfig, weight, data)
emb = embedding(NgLaplacian(1), graph)
pred_clustering = convert(Array{Int64}, (emb .<= mean(emb)))
@test randindex(pred_clustering, labels)[4] > 0.9
end
@testset "ShiMalikLaplacian" begin
function weight(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
(data, labels) = two_gaussians()
knnconfig = KNNNeighborhood(data, 15)
graph = create(knnconfig, weight, data)
emb = embedding(ShiMalikLaplacian(1), graph)
pred_clustering = convert(Array{Int64}, (emb .<= mean(emb)))
@test randindex(pred_clustering, labels)[4] > 0.9
end
@testset "PartialGroupingConstraints" begin
function weight(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 0.7)
end
N = 150
(d, labels) = three_gaussians(N)
knnconfig = KNNNeighborhood(d, 100)
graph = create(knnconfig, weight, d)
indices_clus_1 = [1, 2, 3, 4, 5]
indices_clus_2 = [N+1, N+2, N+3, N+4]
indices_clus_3 = [2*N+1, 2*N+2, 2*N+3, 2*N+4]
constraints = Vector{Integer}[ vcat(indices_clus_1, indices_clus_2) ] ;
emb_1 = embedding(PartialGroupingConstraints(1, smooth=true), graph, constraints)
labels_1 = vcat(zeros(Integer, N*2), ones(Integer, N))
constraints = Vector{Integer}[ vcat(indices_clus_2, indices_clus_3) ]
emb_2 = embedding(PartialGroupingConstraints(1, smooth=true), graph, constraints)
labels_2 = vcat(zeros(Integer, N), ones(Integer, N*2))
pred_clustering = convert(Array{Int64}, (emb_1 .<= mean(emb_1)))
@test randindex(pred_clustering, labels_1)[4] > 0.85
@test randindex(pred_clustering, labels_2)[4] < 0.5
pred_clustering = convert(Array{Int64}, (emb_2 .<= mean(emb_2)))
@test randindex(pred_clustering, labels_2)[4] > 0.85
@test randindex(pred_clustering, labels_1)[4] < 0.5
end
@testset "YuShiPopout" begin
function weight(i::Integer, ineigh, vi, vneigh)
intensity_dist = Distances.colwise(Euclidean(), vi[3:end], vneigh[3:end, :])
xy_dist = Distances.colwise(Euclidean(), vi[1:2], vneigh[1:2, :])
a = 5
b = 0.05
return (pdf.(Normal(0, a*15), xy_dist) - pdf.(Normal(0, a), xy_dist)) .*
(pdf.(Normal(0, b*100), intensity_dist) - pdf.(Normal(0, b), intensity_dist))
end
function attraction(i::Integer, ineigh, vi, vneigh)
diff = weight(i, ineigh, vi, vneigh)
diff[diff.<0] .= 0
return diff
end
function repulsion(i::Integer, ineigh, vi, vneigh)
diff = weight(i, ineigh, vi, vneigh)
diff[diff.>0] .= 0
return abs.(diff)
end
img = zeros(31,31)
img[8:25, 3:12] .= 0.9
img[3:12, 5:28] .= 0.2
img[8:25, 25:30] .= 0.6
img = Gray.(img + randn(31, 31)*0.03)
labels = zeros(Integer, 31, 31)
labels[8:25, 3:12] .= 1
labels[3:12, 5:28] .= 2
labels[8:25, 25:30] .= 3
nconfig = PixelNeighborhood(4)
graph_attraction = create(nconfig, attraction, img);
graph_repulsion = create(nconfig, repulsion, img);
emb_config = YuShiPopout(3, false)
cluster_result = clusterize(emb_config, KMeansClusterizer(4), graph_attraction, graph_repulsion)
@test randindex(cluster_result.assignments[:], labels[:])[4] > 0.9
end
end
@testset "Clustering" begin
@testset "KMeans Clustering" begin
function weight(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
(data, labels) = two_gaussians()
knnconfig = KNNNeighborhood(data, 7)
graph = create(knnconfig, weight, data)
(data, labels) = two_gaussians()
pred_clustering = clusterize(NgLaplacian(2), KMeansClusterizer(2), graph)
@test randindex(pred_clustering.assignments, labels)[4] > 0.9
end
@testset "YuEigenvectorRotation" begin
function weight(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
(data, labels) = two_gaussians()
knnconfig = KNNNeighborhood(data, 7)
graph = create(knnconfig, weight, data)
(data, labels) = two_gaussians()
pred_clustering = clusterize(NgLaplacian(2), YuEigenvectorRotation(500), graph)
@test randindex(pred_clustering.assignments, labels)[4] > 0.9
end
end
@testset "Landmark Selection" begin
@testset "RandomLandmarkSelection" begin
r = RandomLandmarkSelection()
data = rand(2, 25)
s = select_landmarks(r, 15, data)
@test length(s) == 15
@test length(unique(s)) == length(s)
@test minimum(s)>=1 && maximum(s)<= 25
end
@testset "EvenlySpacedLandmarkSelection" begin
e = EvenlySpacedLandmarkSelection()
data = rand(2, 25)
s = select_landmarks(e, 5, data)
@test length(s) == 5
@test all(diff(s) .== 5)
@test minimum(s)>=1 && maximum(s)<= 25
end
@testset "BresenhamLandmarkSelection" begin
e = BresenhamLandmarkSelection()
data = rand(2, 25)
s = select_landmarks(e, 5, data)
@test length(s) == 5
@test all(diff(s) .> 0)
@test minimum(s)>=1 && maximum(s)<= 25
end
end
@testset "Approximate Embedding" begin
@testset "Nystrom" begin
@testset "Data embedding" begin
function weight(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
(data, labels) = two_gaussians(6000)
embedding_config = NystromMethod(EvenlySpacedLandmarkSelection(), 1000, weight, 1)
emb = embedding(embedding_config, data)
pred_clustering = convert(Array{Int64}, (emb .<= mean(emb)))
@test randindex(pred_clustering, labels)[4] > 0.9
end
@testset "Image embedding" begin
function weight(i::Integer,j::Vector{<:Integer},pixel_i, neighbors_data)
data_diff = pixel_i[3:5] .- neighbors_data[3:5,:]
a = exp.(-abs.(data_diff)./(2*0.1^2))
a = prod(a, dims=1)
return vec(a)
end
img = fill(RGB(0,0,0), 50, 50)
cluster_1 = CartesianIndices(size(img))[5:20, 5:20]
cluster_2 = CartesianIndices(size(img))[35:42, 35:47]
img[cluster_1] .= RGB(1, 0, 0)
img[cluster_2] .= RGB(0, 0, 1)
labels = zeros(Integer, 50*50)
labels[LinearIndices(size(img))[cluster_1][:]] .= 1
labels[LinearIndices(size(img))[cluster_2][:]] .= 2
embedding_config = NystromMethod(EvenlySpacedLandmarkSelection(),
500, weight, 1)
emb = embedding(embedding_config, img)
emb = vec(round.(emb, digits=2))
clusters = zeros(Integer, 50*50)
for (i, val) in enumerate(unique(emb))
clusters[findall(emb.==val)] .= i -1
end
@test randindex(clusters, labels)[4] > 0.95
end
end
@testset "DNCuts" begin
function weight(i::Integer,j::Vector{<:Integer},pixel_i, neighbors_data)
data_diff = pixel_i[3:5] .- neighbors_data[3:5,:]
a = exp.(-((data_diff).^2)./(0.1))
a = prod(a, dims=1)
return vec(a)
end
img = fill(RGB(0,0,0), 50, 50)
cluster_1 = CartesianIndices(size(img))[5:20, 5:20]
cluster_2 = CartesianIndices(size(img))[35:42, 35:47]
img[cluster_1] .= RGB(1, 0, 0)
img[cluster_2] .= RGB(0, 0, 1)
labels = ones(Integer, 50*50)
labels[LinearIndices(size(img))[cluster_1][:]] .= 2
labels[LinearIndices(size(img))[cluster_2][:]] .= 3
nconfig = PixelNeighborhood(4)
graph = create(nconfig, weight, img);
dncuts = DNCuts(2, 2, size(img))
emb = embedding(dncuts, graph)
pred_clustering = clusterize(dncuts, KMeansClusterizer(3), graph)
@test randindex(pred_clustering.assignments, labels)[4] > 0.9
end
@testset "LandmarkBasedRepresentation" begin
function weight(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
(data, labels) = two_gaussians(6000)
cfg = LandmarkBasedRepresentation(
BresenhamLandmarkSelection(),
500,
25,
2,
weight,
true)
emb = embedding(cfg, data)
pred_clustering = convert(Array{Int64}, (emb[:, 1] .<= mean(emb[:, 1])))
@test randindex(pred_clustering, labels)[4] > 0.9
end
end
@testset "MultiView" begin
@testset "CoRegularizedMultiView" begin
function weight_1(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
function weight_2(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 45)
end
(data, labels) = two_gaussians(500, center_1=[-15, -15], center_2=[9, 9])
knnconfig = KNNNeighborhood(data, 7)
graph_1 = create(knnconfig, weight_1, data);
graph_2 = create(knnconfig, weight_2, data);
coreg = CoRegularizedMultiView([View(1, 0.001),
View(1, 0.001)])
emb = embedding(coreg, [graph_1, graph_2])
pred_clustering = convert(Array{Int64}, (emb[:, 1] .<= mean(emb[:, 1])))
@test randindex(pred_clustering, labels)[4] > 0.9
end
@testset "KernelProduct" begin
function weight_1(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
function weight_2(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 45)
end
(data, labels) = two_gaussians(500, center_1=[-15, -15], center_2=[9, 9])
knnconfig = KNNNeighborhood(data, 7)
graph_1 = create(knnconfig, weight_1, data);
graph_2 = create(knnconfig, weight_2, data);
kernel_addition = KernelProduct(NgLaplacian(2))
emb = embedding(kernel_addition, [graph_1, graph_2])
pred_clustering = convert(Array{Int64}, (emb[:, 1] .<= mean(emb[:, 1])))
@test randindex(pred_clustering, labels)[4] > 0.9
end
@testset "KernelAddition" begin
function weight_1(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 15)
end
function weight_2(i::Integer, neigh, v, m)
return exp.(-Distances.colwise(SqEuclidean(), m, v) / 45)
end
(data, labels) = two_gaussians(500, center_1=[-15, -15], center_2=[9, 9])
knnconfig = KNNNeighborhood(data, 7)
graph_1 = create(knnconfig, weight_1, data);
graph_2 = create(knnconfig, weight_2, data);
kernel_addition = KernelAddition(NgLaplacian(2))
emb = embedding(kernel_addition, [graph_1, graph_2])
pred_clustering = convert(Array{Int64}, (emb[:, 1] .<= mean(emb[:, 1])))
@test randindex(pred_clustering, labels)[4] > 0.9
end
end
@testset "Utils" begin
@testset "Normalization" begin
a = rand(50, 3)
@test isapprox(norm(SpectralClustering.normalize_rows(a)[1, :]), 1)
@test isapprox(norm(SpectralClustering.normalize_cols(a)[:, 1]), 1)
a = rand(50, 1)
@test isapprox(norm(SpectralClustering.normalize_rows(a)[1, :]), a[1])
end
end
@testset "DataAcces" begin
a = rand(50, 3)
@test number_of_patterns(a) == 3
@test get_element(a, 1) == a[:, 1]
a = a'
@test number_of_patterns(a) == 50
@test get_element(a, 1) == a[:, 1]
a = rand(RGB, 50 ,50)
@test number_of_patterns(a) == 50*50
end | SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 2693 | # SpectralClustering
- [Documentation](https://lucianolorenti.github.io/SpectralClustering.jl/latest)
- Check out the [Examples](https://lucianolorenti.github.io/SpectralClustering.jl/latest/notebooks/Index.html)
[](https://coveralls.io/github/lucianolorenti/SpectralClustering.jl?branch=master)
The library provides functions that allow:
* Build the affinity matrix.
* Perform the embedding of the patterns in the space spanned by the eigenvectors of the matrices derived from the affinity matrix.
* Obtain an approximation of the eigenvectors in order to reduce the computational complexity.
* Exploiting information from multiple views. Corresponding nodes in each graph should have the same cluster membership.
* Clusterize the eigenvector space.
# Methods implemented
* Graph construction
* [Self-Tuning Spectral Clustering](https://papers.nips.cc/paper/2619-self-tuning-spectral-clustering.pdf)
* Embedding
* [Normalized cuts and image segmentation](https://people.eecs.berkeley.edu/~malik/papers/SM-ncut.pdf)
* [On Spectral Clustering: Analysis and an algorithm](https://papers.nips.cc/paper/2092-on-spectral-clustering-analysis-and-an-algorithm.pdf)
* [Understanding Popout through Repulsion](https://pdfs.semanticscholar.org/019c/099ab01902416a625a9d18a36e61b88f5a3d.pdf)
* [Segmentation Given Partial Grouping Constraints](http://www.cs.cmu.edu/~xingyu/papers/yu_bias.pdf)
* Approximate embedding
* [Spectral grouping using the nystrom method](https://people.eecs.berkeley.edu/~malik/papers/FBCM-nystrom.pdf)
* [Nystrom sampling depends on the eigenspectrum shape of the data](https://openreview.net/pdf?id=HJZvjvJPf)
* [Large Scale Spectral Clustering
with Landmark-Based Representation](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.365.6933&rep=rep1&type=pdf)
* Multiple views
* Kernel Addition
* Kernel Product
* Feature Concatenation (in the examples section)
* [Co-regularized Multi-view Spectral Clustering](https://papers.nips.cc/paper/4360-co-regularized-multi-view-spectral-clustering.pdf)
* Incremental
* TODO [Incremental spectral clustering by efficiently updating the eigen-system](https://www.sciencedirect.com/science/article/pii/S0031320309002209/pdfft?md5=dc50ecba5ab9ab23ea239ef89244800a&pid=1-s2.0-S0031320309002209-main.pdf)
* Clusterize
* [Multiclass Spectral Clustering](http://www.public.asu.edu/~jye02/CLASSES/Spring-2007/Papers/PAPERS/295_yu_s.pdf)
* KMeans via [Clustering.jl](https://github.com/JuliaStats/Clustering.jl)
The documentation and the library is still a work in progress.
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 2556 | # SpectralClustering.jl
Given a set of patterns $X=\{x_1,x_2,...x_n\} \in {\mathbb R}^m$, and a simmilarity function $d:{\mathbb R}^m \times {\mathbb R}^m \rightarrow {\mathbb R}$, is possible to build an affinity matrix $W$ such that $W(i,j) = d(x_i, x_j)$. Spectral clustering algorithms obtains a low rank representation of the patterns solving the following optimization problem
$\begin{array}{ccc}
\max & \mbox{Tr}(U^T L U) \\
U \in {\mathbb R}^{n\times k} & \\
\textrm{s.a.} & {U^T U} = I
\end{array}$
where $L = D^{-\frac{1}{2}}WD^{-\frac{1}{2}}$ is the Laplacian matrix derived from $W$ according [ng2002spectral](#ng2002spectral) and $D$ is a diagonal matrix with the sum of the rows of $W$ located in its main diagonal. Once obtained $U$, their rows are considered as the new coordinates of the patterns. In this new representation is simpler to apply a traditional clustering algorithm [shi2000normalized](#shi2000normalized).
Spectral graph partitioning methods have been successfully
applied to circuit layout [3, 1], load balancing [4] and
image segmentation [10, 6]. As a discriminative approach,
they do not make assumptions about the global structure of
data. Instead, local evidence on how likely two data points
belong to the same class is first collected and a global decision
is then made to divide all data points into disjunct sets
according to some criterion. Often, such a criterion can be
interpreted in an embedding framework, where the grouping
relationships among data points are preserved as much
as possible in a lower-dimensional representation.
## Installation
At the Julia REPL:
```julia
]add https://github.com/lucianolorenti/SpectralClustering.jl.git
```
## Description
The library provides functions that allow:
* Build the affinity matrix. [Simmilarity graph creation](@ref), [Graph matrices](ref)
* Perform the embedding of the patterns in the space spanned by the eigenvectors of the matrices derived from the affinity matrix. [Eigenvector Embedding](@ref)
* Obtain an approximation of the eigenvector in order to reduce the computational complexity. [Approximate embedding](@ref)
* Exploiting information from multiple views. Corresponding nodes in each graph should have the same cluster membership. [MultiView Embedding](@ref)
* Clusterize the eigenvector space. [Eigenvector Clustering](@ref)
# Bibliography
```@eval
import Documenter.Documents.RawHTML
Base.include(@__MODULE__, "DocUtils.jl")
RawHTML(bibliography(["ng2002spectral","shi2000normalized","yu2001understanding"]))
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 697 | # Approximate embedding
Given a symmetric affinity matrix \$A\$, we would like to compute the \$k\$ smallest eigenvectors of the Laplacian of A. Directly computing such eigenvectors can be very costly even with sophisticated solvers, due to the large size of \$A\$.
# Examples
[Approximate embedding examples](../../../notebooks/Approximate Embedding.html )
# Bibliography
```@eval
import Documenter.Documents.RawHTML
Base.include(@__MODULE__, "DocUtils.jl")
RawHTML(bibliography(["pont2017multiscale"]))
```
# Reference
## Index
```@index
Modules=[SpectralClustering]
Pages=["man/approximate.md"]
```
## Content
```@autodocs
Pages=["ApproximateEmbedding.jl"]
Modules=[SpectralClustering]
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 1114 | # Eigenvector Clustering
Once the eigenvectors are obtained, we have a continuous solution for a discrete problem. In order to obtain an assigment for every pattern, it is needed to discretize the eigenvectors.
Obtaining this discrete solution from eigenvectors often requires solving another clustering problem, albeit in a lower-dimensional space. That is, eigenvectors are treated as geometrical coordinates of a point set.
This library provides two methods two obtain the discrete solution:
- Kmeans by means of [Clustering.jl](https://github.com/JuliaStats/Clustering.jl)
- The one proposed in [Multiclass spectral clustering](#stella2003multiclassv)
# Examples
[Eigenvector clusterization examples](../../../notebooks/Eigenvector Clustering.html)
# Reference Index
```@index
Modules = [SpectralClustering]
Pages=["man/data_access.md"]
```
# Members Documentation
```@autodocs
Modules = [SpectralClustering]
Pages=["EigenvectorClustering.jl"]
```
# Bibliography
```@eval
import Documenter.Documents.RawHTML
Base.include(@__MODULE__, "DocUtils.jl")
RawHTML(bibliography(["stella2003multiclass"]))
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 612 | # Data Access
In order to establish how the data is going to be accessed, the module `DataAccess` provides an unified interface to access to the data for the underlaying algorithms. Every `DataAccessor` must implement this two methods:
1. `get_element(d::T, X, i::Integer)`. This function must return the i-th pattern of `X`.
2. `number_of_patterns(d::T,X)`. This function must return the numer of patterns of `X`
# Reference Index
```@index
Modules = [SpectralClustering]
Pages=["man/data_access.md"]
```
# Members Documentation
```@autodocs
Modules = [SpectralClustering]
Pages=["Utils/DataAccess.jl"]
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 1663 | # Eigenvector Embedding
Spectral clustering techniques require the computation of the extreme eigenvectors of matrices derived from patterns similarity . The Laplacian matrix obtained from the data is generally used as the starting point for decomposition into autovectors. Given the symmetric matrix $ W (i, j) = w_{ij}, W \in R^{n \times n} $ that contains information about similarity between the patterns, if $ D = W \mathbf {1} $, the unnormalized Laplacian matrix is defined as $ L = D-W $.
The matrix $ W $ can be seen as the incidence matrix of a weighted graph. The [Simmilarity graph creation] (@ref) utilities
implement functions that allow the construction of simmilarty graphs.
The Embedding utilities contain the functions for performing the embedding of the patterns in the space spanned by the $ k $ eigenvectors of a matrix derived from $W$.
Currently the module implements the techniques described in:
- [On spectral clustering: Analysis and an algorithm.](#ng2002spectral)
- [Normalized cuts and image segmentation.](#shi2000normalized)
- [Understanding Popout through Repulsion.](#yu2001understanding)
- [Segmentation Given Partial Grouping Constraints](#yu2004segmentation)
# Examples
[Embedding examples](../../../notebooks/Embedding.html)
# Bibliography
```@eval
import Documenter.Documents.RawHTML
Base.include(@__MODULE__, "DocUtils.jl")
RawHTML(bibliography(["ng2002spectral","shi2000normalized","yu2001understanding", "yu2004segmentation","lee2007trajectory"]))
```
## Index
```@index
Modules=[SpectralClustering]
Pages=["man/embedding.md"]
```
## Content
```@autodocs
Modules=[SpectralClustering]
Pages=["src/Embedding.jl"]
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 185 |
# Graphs
# Reference
## Index
```@contents
Pages=["man/graph.md"]
Modules=[SpectralClustering]
```
## Content
```@autodocs
Pages=["Graph/Graphs.jl"]
Modules=[SpectralClustering]
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 1424 | # Simmilarity graph creation
A weighted graph is an ordered pair $G=(V,E)$ that is composed of a set $V$ of vertices together with a set $E$ of edges $(i,j,w)$ $i,j \in V,w \in R$. The number $w$, the weight, represent the simmilarity between $i$ and $j$.
In order to build a simmilarity graph two elements have to be defined:
1. Which are the neighbors for a given vertex. For this, a concrete type that inherit from [`NeighborhoodConfig`](@ref SpectralClustering.VertexNeighborhood) has to be instantiated.
2. The simmilarity function between patterns. The function receives the element being evaluated and its neighbors and returns a vector with the simmilarities between them. The signature of the function has to be the following `function weight(i::Integer, j::Vector{Integer}, e1, e2)` where `i::Int` is the index of the pattern being evaluated, `j::Vector{Integer}` are the indices of the neighbors of `i`; `e1` are the `i`-th pattern and `e2` are the neighbors patterns.
# Examples
[Graph creation examples](../../../notebooks/Graph creation.html)
# Bibliography
```@eval
import Documenter.Documents.RawHTML
Base.include(@__MODULE__, "DocUtils.jl")
RawHTML(bibliography(["Zelnik-manor04self-tuningspectral"]))
```
# Reference
## Index
```@index
Modules=[SpectralClustering]
Pages = ["man/graphcreation.md"]
```
## Content
```@autodocs
Modules=[SpectralClustering]
Pages=["Graph/Creation.jl"]
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 34 | # Incremental Spectral Clustering
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 1197 | # Landmark Selection
In order to avoid the construction of a complete similarity matrix some spectral clustering
methods compute the simmilarity function between a subset of patterns. This module provides an
interface to sample points from diferentes data structures.
Methods availaible:
- `Random` . This selection method samples $k$ random points from a dataset
- `EvenlySpaced`. This selection method samples spaced evenly acorrding ther index.
## Detailed Description
### Random Landmark Selection
```@example
using SpectralClustering
number_of_points = 20
dimension = 5
data = rand(dimension,number_of_points)
selector = RandomLandmarkSelection()
number_of_landmarks = 7
select_landmarks(selector, number_of_landmarks, data )
```
### Evenly Spaced Landmark Selection
```@example
using SpectralClustering
number_of_points = 20
dimension = 5
data = rand(dimension,number_of_points)
selector = EvenlySpacedLandmarkSelection()
number_of_landmarks = 5
select_landmarks(selector, number_of_landmarks, data )
```
## Index
```@index
Modules=[SpectralClustering]
Pages=["man/landmark_selection.md"]
```
## Content
```@autodocs
Modules=[SpectralClustering]
Pages=["src/LandmarkSelection.jl"]
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 0.1.2 | 085361b986546da28d16474cfcb8b28479b80d2d | docs | 576 | # MultiView Embedding
When the dataset has more than one representation, each of them is named view. In the context of spectral clustering,
co-regularization techniques attempt to encourage the similarity of the examples in the new representation generated
from the eigenvectors of each view.
# Examples
[MultiView Embedding examples](../../../notebooks/Multiview Embedding.html )
# Reference Index
```@index
Modules = [SpectralClustering]
Pages=["man/data_access.md"]
```
# Members Documentation
```@autodocs
Modules = [SpectralClustering]
Pages=["MultiView.jl"]
```
| SpectralClustering | https://github.com/lucianolorenti/SpectralClustering.jl.git |
|
[
"MIT"
]
| 1.0.13 | b7e11245178a57be72e47a69468dca6b73f192d8 | code | 5349 | # The implementations are inspired by MatrixMarket.jl
# https://github.com/JuliaSparse/MatrixMarket.jl
# The MatrixMarket.jl package is licensed under the MIT Expat License:
# Copyright (c) 2013: Viral B. Shah.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""julia
MatrixDepot
Give access to a wealth of sample and test matrices and accompanying data.
A set of matrices is generated locally (with arguments controlling the special case).
Another set is loaded from one of the publicly accessible matrix collections
`SuiteSparse Matrix Collection` (formerly `University of Florida Matrix Collection`)
and the `Matrix Market Collection`.
Access is like
using MatrixDepot
A = matrixdepot("hilb", 10) # locally generated hilbert matrix dimensions (10,10)
A = matrixdepot("HB/1138_bus") # named matrix of the SuiteSparse Collection
A = matrixdepot(sp(1)) # same matrix using numerical id
A = matrixdepot("Harwell*/*/1138_bus") # matrix from the Matrix Market Collection
md = mdopen("*/bfly") # named matrix with some extra data
A = md.A
co = md.coord
tx = md("Gname_10.txt")
md = mdopen("gravity", 10, false) # locally generated example with rhs and solution
A = md.A
b = md.b
x = md.x
###### commands:
mdinfo, listdir, listgroups, matrixdepot, mdopen, listdata, mdlist,
metasymbols, setgroup!, deletegroup!.
###### selector patterns:
strings, string-patterns (using "*", "?", "[]", "/", "**"), regular expressions: for names
builtin(42), user(3,5), sp(10:11,6,2833), mm(1), mm(:): to access by integer id or all
sp(pattern), mm(pattern) to access corresponding (alternative) matrix for other collection
###### predicate patterns:
isboolean, isinteger, isreal, iscomplex
isgeneral, issymmetric, ishermitian, isskew
isbuiltin, isuser, islocal, isremote, isloaded, isunloaded
issvdok
keyword(string expression), logical, hasdata(symbol), @pred(expression)
see also: "logical" for logical combinations of all kinds of patterns.
"""
module MatrixDepot
using LinearAlgebra, SparseArrays, Serialization
using Scratch
import Base: show
export matrixdepot
export listnames, listdir, listdata, listgroups, mdlist, mdinfo, metasymbols, mdopen
export @addgroup, @rmgroup, @modifygroup # deprecated
export setgroup!, deletegroup!
# exports for predicate functions in `logical.jl`
export builtin, user, sp, mm, logical
export isgeneral, issymmetric, isskew, ishermitian
export iscomplex, isreal, isinteger, isboolean
export isremote, islocal, isloaded, isunloaded, isbuiltin, isuser
export issvdok, isposdef
export @pred, keyword, hasdata, charfun
# The following functions are re-used as predicate functions / logical operators
import Base: isreal, isinteger
import LinearAlgebra: issymmetric, ishermitian
import SparseArrays: issparse
import Base: &, |, *, ~
include("types.jl") # common data type definitions
include("higham.jl") # test matrices
include("regu.jl") # regularization test problem
include("graph.jl") # adjacency matrices for graphs
include("data.jl") # global variables and matrix data
include("common.jl") # main functions
include("logical.jl") # operations on patterns and predicates
include("download.jl") # download data from the UF and MM sparse matrix collection
include("datareader.jl") # read matrix data from local storage
include("matrixmarket.jl") # read matrix data from local storage
include("markdown.jl") # construct MD objects
include("downloadmm.jl") # read metadata from MM database
include("downloadsp.jl") # read metadata from SS database
function init(;ignoredb::Bool=false)
GROUP = "group.jl"
GENERATOR = "generator.jl"
url_redirect() # env MATRIXDEPOT_URL_REDIRECT == "1"
if !isdir(data_dir()) # env MATRIXDEPOT_DATA
mkpath(data_dir())
end
@info("verify download of index files...")
downloadindices(MATRIX_DB, ignoredb=ignoredb)
@info("used remote sites are $(remote_name(preferred(SSRemoteType))) and $(remote_name(preferred(MMRemoteType)))")
nothing
end
# will be called automatically once after `using`, `import`, `require`.
function __init__()
try init() catch ex; @warn "exception during initialization: '$ex'"
end
end
end # end module
| MatrixDepot | https://github.com/JuliaLinearAlgebra/MatrixDepot.jl.git |
|
[
"MIT"
]
| 1.0.13 | b7e11245178a57be72e47a69468dca6b73f192d8 | code | 15546 | ########################
# helper functions
########################
argerr(s::AbstractString) = throw(ArgumentError(s))
daterr(s::AbstractString) = throw(DataError(s))
parserr(s::AbstractString) = throw(Meta.ParseError(s))
"""
listgroups()
Return a list of available groups (array of `Symbol`).
"""
function listgroups()
groups = Symbol[]
append!(groups, sort!(collect(keys(SUBSETS))))
append!(groups, sort!(collect(keys(MATRIXCLASS))))
append!(groups, sort!(collect(keys(USERMATRIXCLASS))))
groups
end
#######################
# matrix group
#######################
# write one property association
function propline(io::IO, propname, matnames)
write(io, repr(propname))
write(io, " => [")
for str in matnames
write(io, repr(str))
write(io, ", ")
end
write(io, "],\n")
end
# add, remove, or replace complete user group
function modgroup(prop::Symbol, mats::Union{Nothing,Pattern})
prop in keys(MATRIXCLASS) && daterr("$prop can not be modified.")
if mats !== nothing
USERMATRIXCLASS[prop] = mats
else
delete!(USERMATRIXCLASS, prop)
end
return nothing
end
"""
setgroup!(s::Symbol, p::Pattern)
Define user group. `s` must not be one of the predefined group names.
`p` may be any pattern, also a vector of matrix names.
"""
setgroup!(s::Symbol, p::Pattern) = modgroup(s, p)
"""
deletegroup!(s::Symbol)
Delete a previously defined user group.
"""
deletegroup!(s::Symbol) = modgroup(s, nothing)
"add a group to Matrix Depot"
macro addgroup(ex)
@warn("`@addgroup name = ex` is deprecated, use `setgroup!(:name, ex)`")
nn = QuoteNode(ex.args[1])
:( modgroup($(esc(nn)), $(esc(ex.args[2]))) )
end
"add or replace group in Matrix Depot"
macro modifygroup(ex)
@warn("`@modifygroup name = ex`` is deprecated, use `setgroup!(:name, ex)`")
nn = QuoteNode(ex.args[1])
:( modgroup($(esc(nn)), $(esc(ex.args[2]))) )
end
"remove an added group from Matrix Depot"
macro rmgroup(ex)
@warn("`@rmgroup name` is deprecated, use `deletegroup!(:name)`")
nn = QuoteNode(ex)
:( modgroup($(esc(nn)), nothing) )
end
################################
# user defined matrix generators
################################
abstract type MatrixGenerator end
abstract type FunctionName <: MatrixGenerator end
abstract type Group <: MatrixGenerator end
function include_generator(::Type{FunctionName}, fn::AbstractString, f::Function)
(haskey(MATRIXDICT, fn) ? MATRIXDICT : USERMATRIXDICT)[fn] = f
end
function addtogroup(dir::Dict, groupname::Symbol, f::Function)
if groupname in keys(dir)
fn = fname(f)
gr = dir[groupname]
fn in gr || push!(gr, fn)
true
else
false
end
end
function include_generator(::Type{Group}, groupname::Symbol, f::Function)
addtogroup(MATRIXCLASS, groupname, f) ||
addtogroup(USERMATRIXCLASS, groupname, f) ||
argerr("$(groupname) is not a group in MatrixDepot, use
`setgroup!`` to add this group")
end
#a more lightweight alternative to calling `init` again after adding user-defined matrices.
function publish_user_generators()
insertlocal(MATRIX_DB, GeneratedMatrixData{:U}, USERMATRIXDICT)
#note that we do not call writedb because we don't serialize user matrix generators
end
"return the name of the function `f` as a string."
function fname(f::Function)
for (key, value) in MATRIXDICT
value == f && return key
end
for (key, value) in USERMATRIXDICT
value == f && return key
end
"unknown-function"
end
"""
listdir([db,] p::AbstractString)
list directories and the number of matrices contained in them.
get an overview of the count of names down directories.
return a list with summary information for directories in matrix name space.
The input argument is split into 2 patterns by the first double slash `"//"`.
The whole string (with multiple slashes reduced to single slashes) determines
a subset of all matrix names. They are then grouped by the first pattern and
for each different group value the number of names in the subset is counted.
A final `/` is replaced by `"//**"`.
E.g.
+ `listdir("/*")` - count names without a `/`.
+ `listdir("/k*")` - count names without `/` starting with `k*`.
+ `listdir("*//*")` - count names with one directory part (sp-collection)
+ `listdir("*/*//*")` - count names with two directory parts (mm-collection)
+ `listdir("*//*/*")` - count names with two directory parts (mm-collection)
+ `listdir("Har*//*/*")` - restrict to directories starting with "Har"
+ `listdir("Har*/*//*")` - all subdirectoreis of the previous
"""
listdir(p::AbstractString, xp::Pattern=()) = listdir(MATRIX_DB, p, xp)
function listdir(db::MatrixDatabase, p::AbstractString, xp::Pattern)
check_symbols(xp)
r = findfirst(r"/+", p)
if r !== nothing && first(r) == 1
p = p[last(r)+1:end]
depth = 0
else
m = match(r"^(([^/]+/)+)(/|$)", p)
depth = m !== nothing ? count(x == '/' for x in m.captures[1]) : -1
end
p = replace(p, r"//+" => '/')
endswith(p, '/') && ( p = string(p, "**") )
r = shell_to_regex(p, false)
if depth >= 0
length(p) == 1 && ( p = ".*/" )
listdir(db, r & xp, depth)
else
argerr("pattern '$p' needs '//' in the middle or '/' at start or end")
end
end
function listdir(db::MatrixDatabase, r::Pattern, depth::Int)
result = Dict{AbstractString, Int}()
f(x, n) = string(join(x[1:n], '/'), "/*" ^ max(length(x) - n, 0))
for name in mdlist(r)
li = split(name, '/')
if length(li) >= depth
key = f(li, depth)
result[key] = get!(result, key, 0) + 1
end
end
sort!([string(k, " - (", v, ")") for (k,v) in result])
end
"""
listdata([db,] p::Pattern)
Return an array of `MatrixData` objects according to matched patterns.
"""
listdata(p::Pattern) = listdata(MATRIX_DB, p)
listdata(db::MatrixDatabase, p::Pattern) = mdata.(mdlist(db, p))
"""
mdlist([db,] p::Pattern)
return a vector of full matrix names where name or alias match given pattern.
`p` can be one of the following:
+ a plain string (without characters `*` and `?`) which must match exactly
+ a string containing `*` and `?` acting like a shell path pattern
+ a regular expression
+ an integer matching equivalent to the alias string `"#\$p"`
+ a range of integers
+ a group name expressed as a symbol e.g. `:local`, `:all`, `:illcond`, `:posdef`
+ the name of a predicate function `f(::MatrixData)::Bool`, e.g. `issymmetric`, isposdef, ...
+ a vector of patterns meaning the union (or `|`)
+ a tuple of patterns meaning the intersection ( or `&`)
"""
mdlist(p::Pattern) = mdlist(MATRIX_DB, p)
is_all(res::Vector) = length(res) == 1 && res[1] == ""
function mdlist(db::MatrixDatabase, p::Pattern)
res = list!(db, [""], p)
is_all(res) ? list_all(db) : res
end
function list!(db::MatrixDatabase, res::Vector{String}, r::Regex)
isempty(res) && return res
if is_all(res)
empty!(res)
for name in keys(db.data)
if match(r, name) !== nothing
push!(res, name)
end
end
sort!(res)
else
for i = 1:length(res)
name = res[i]
if match(r, name) === nothing
res[i] = ""
end
end
for i = length(res):-1:1
if res[i] == ""
deleteat!(res, i)
end
end
end
res
end
function list!(db::MatrixDatabase, res::Vector{String}, p::Symbol)
isempty(res) && return res
x = if haskey(SUBSETS, p)
SUBSETS[p](db)
elseif haskey(MATRIXCLASS, p)
MATRIXCLASS[p]
elseif haskey(USERMATRIXCLASS, p)
list!(db, [""], USERMATRIXCLASS[p])
else
argerr("unknown group name '$p'")
# EMPTY_PATTERN
end
if is_all(res)
empty!(res)
append!(res, sort(x))
else
intersect!(res, x)
end
end
"""
shell_to_regex
return a regular expression if shell pattern characters `"*?]"` are contained in
string, otherwise return string.
If no `'/'` is contained in p and p is not "*", insert `"(.*/)?"` in regular expression.
"""
function shell_to_regex(p::AbstractString, retain_pure::Bool)
regex(p) = Regex(string('^', p, '$'))
p = replace(p, r"//+" => '/')
# p = p == "*" || '/' in p ? p : string("(**/)\x03", p)
if occursin(r"[*?.]", p)
p = replace(p, "**" => "\x01\x02")
p = replace(p, '*' => "[^/]*")
p = replace(p, '?' => "[^/]")
p = replace(p, '.' => "[.]")
p = replace(p, '\x01' => '.')
p = replace(p, '\x02' => '*')
p = replace(p, '\x03' => '?')
regex(p)
else
retain_pure ? p : regex(p)
end
end
function singlist!(db::MatrixDatabase, res::Vector{String}, p::AbstractString)
if is_all(res)
if haskey(db.data, p)
res[1] = p
res
else
empty!(res)
end
else
if p in res
empty!(res)
push!(res, p)
else
empty!(res)
end
end
end
function list!(db::MatrixDatabase, res::Vector{String}, p::AbstractString)
isempty(res) && return res
r = shell_to_regex(p, true)
r isa Regex ? list!(db, res, r) : singlist!(db, res, r)
end
list!(db::MatrixDatabase, res::Vector{String}, p::Alias) = list!(db, res, aliasresolve(db, p))
function list!(db::MatrixDatabase, res::Vector{String}, p::Alternate{R}) where R
xlate = R == MMRemoteType ? name2mm : name2ss
xpatterns = xlate.(mdlist(db, p.pattern))
list!(db, res, xpatterns)
end
# If res is symbolically [""], populate it with the set of all available names
function resall!(db::MatrixDatabase, res::Vector{String})
if is_all(res)
x = list_all(db)
resize!(res, length(x))
copyto!(res, x)
end
res
end
function list!(db::MatrixDatabase, res::Vector{String}, p::Not)
isempty(res) && return res
cres = list!(db, copy(res), p.pattern)
isempty(cres) && return res
resall!(db, res)
setdiff!(res, cres)
end
# logical OR
function list!(db::MatrixDatabase, res::Vector{String}, r::AbstractVector)
isempty(r) && return empty!(res)
check_symbols(r)
isempty(res) && return res
length(r) == 1 && return list!(db, res, r[1])
cres = copy(res)
list!(db, res, r[1])
for y in r[2:end]
union!(res, list!(db, copy(cres), y))
end
sort!(res)
end
list!(db::MatrixDatabase, res::Vector{String}, ::Tuple{}) = res
# logical AND
function list!(db::MatrixDatabase, res::Vector{String}, r::Tuple)
isempty(res) && return res
check_symbols(r)
for y in sort_by_type(r)
list!(db, res, y)
isempty(res) && break
end
res
end
function list!(db::MatrixDatabase, res::Vector{String}, pred::Function)
isempty(res) && return res
resall!(db, res)
filter!(k -> pred(db.data[k]), res)
end
# return a vector with re-arranged the contents of itr
# the elements of its must be of type Pattern
function sort_by_type(itr)
f(p::AbstractString) = true
f(p::Symbol) = true
f(p::AbstractVector) = all(f.(p))
f(p::Tuple) = all(f.(p))
f(p::Pattern) = false
vall = collect(itr)
vend = filter(!f, vall)
filter!(f, vall)
append!(vall, vend)
vall
end
## internal list special cases
list_all(db::MatrixDatabase) = sort!(String.(collect(keys(db.data))))
list_local(db::MatrixDatabase) = union(collect(keys(MATRIXDICT)), keys(USERMATRIXDICT))
list_builtin(db::MatrixDatabase) = collect(keys(MATRIXDICT))
list_user(db::MatrixDatabase) = collect(keys(USERMATRIXDICT))
const SUBSETS = Dict(
:local => list_local,
:builtin => list_builtin,
:user => list_user,
:all => list_all,
)
function verify_loaded(db::MatrixDatabase, data::RemoteMatrixData)
if isempty(data.metadata) || !isfile(matrixfile(data))
loadmatrix(data)
end
data
end
verify_loaded(db::MatrixDatabase, data::MatrixData) = data
function verify_loadinfo(data::RemoteMatrixData)
file = matrixfile(data)
if !isfile(file)
file = matrixinfofile(data)
if !isfile(file)
loadinfo(data)
end
end
file
end
mdatav(db::MatrixDatabase, p::Pattern) = verify_loaded(db, mdata(db, p))
"""
load([db,] pattern)
Load data from remote repository for all problems matching pattern.
Return the number of successfully loaded matrices.
"""
load(p::Pattern) = load(MATRIX_DB, p)
load(db::MatrixDatabase, p::Pattern) = _load(db, loadmatrix, p)
"""
loadinfo([db,] pattern)
"""
loadinfo(p::Pattern) = loadinfo(MATRIX_DB, p)
loadinfo(db::MatrixDatabase, p::Pattern) = _load(db, loadinfo, p)
"""
loadsvd([db,], pattern)
"""
loadsvd(p::Pattern) = loadsvd(MATRIX_DB, p)
loadsvd(db::MatrixDatabase, p::Pattern) = _load(db, loadsvd, p)
function _load(db::MatrixDatabase, loadfunc::Function, p::Pattern)
check_symbols(p)
n = 0
for name in mdlist(p)
try
n += loadfunc(db.data[name])
catch ex
ex isa InterruptException && rethrow()
@warn "could not load $name: $ex"
end
end
n
end
"""
mdopen([db,] pattern)
mdopen(f, [db,] pattern)
Return `MatrixDescriptor` object, which can be used with data access functions.
Make sure that data files are loaded.
Keeps a cache of already delivered matrices and metadata.
If the pattern has not a unique resolution, an error is thrown.
"""
mdopen(p::Pattern, args...) = mdopen(MATRIX_DB, p, args...)
function mdopen(db::MatrixDatabase, p::Pattern, args...)
_mdopen(mdatav(db, p), args...)
end
mdopen(f::Function, p::Pattern, args...) = mdopen(f, MATRIX_DB, p, args...)
function mdopen(f::Function, db::MatrixDatabase, p::Pattern, args...)
data = _mdopen(mdatav(db, p), args...)
f(data)
end
"""
mdata(db, pattern)
Return unique `MatrixData` object according to pattern.
"""
mdata(p::Pattern) = mdata(MATRIX_DB, p)
function mdata(db::MatrixDatabase, p::Pattern)
check_symbols(p)
li = mdlist(db, p)
length(li) == 0 && daterr("no matrix according to $p found")
length(li) > 1 && daterr("pattern not unique: $p -> $li")
db.data[li[1]]
end
"""
getmeta([db, ], Union{MatrixDescriptor,MatrixData})
Return copy of list of metadata names.
"""
getmeta(mdesc::MatrixDescriptor) = getmeta(mdesc.data)
getmeta(data::RemoteMatrixData) = copy(data.metadata)
getmeta(data::MatrixData) = String[]
_mdopen(data::RemoteMatrixData)= MatrixDescriptor(data)
function _mdopen(data::GeneratedMatrixData, args...)
md = MatrixDescriptor(data, args...)
md.A # trigger generation of data and fill cache
md
end
###
# convenience API
###
"""
matrixdepot(p::Pattern, args...)
Return matrix according to pattern or local matrix according to name and arguments.
If not loaded, load remote matrix first.
`p` must be a unique pattern (match only one name). The presence of arguments makes
sense only if the pattern matches the name of a generated (=local) matrix.
Only the matrix part is delivered, also in the local cases, where the underlying
function returns a structure containing matrix and vectors.
Use `md = mdopen; md.A, md.b ...`
to access those objects.
"""
matrixdepot(p::Pattern, args...) = matrixdepot(MATRIX_DB, p, args...)
function matrixdepot(db::MatrixDatabase, p::Pattern, args...)
mdopen(db, p, args...) do md
md.A
end
end
| MatrixDepot | https://github.com/JuliaLinearAlgebra/MatrixDepot.jl.git |
|
[
"MIT"
]
| 1.0.13 | b7e11245178a57be72e47a69468dca6b73f192d8 | code | 6065 | """
user-defined matrix generators
to be populated with `include_generator` in user source code
"""
const USERMATRIXDICT = Dict{String,Function}()
"""
user-defined groups
"""
const USERMATRIXCLASS = Dict{Symbol,Pattern}()
"""
Associate names with matrix-generating functions
"""
const MATRIXDICT = Dict("hilb" => hilb, "hadamard" => hadamard,
"cauchy" => cauchy, "circul" => circul,
"dingdong" => dingdong, "frank" => frank,
"invhilb" => invhilb, "forsythe" => forsythe,
"magic" => magic, "grcar" => grcar,
"triw" => triw, "moler" => moler,
"pascal" => pascal, "kahan" => kahan,
"pei" => pei, "vand" => vand,
"invol" => invol, "chebspec" => chebspec,
"lotkin" => lotkin, "clement" => clement,
"fiedler" => fiedler, "minij" => minij,
"binomial" => binomialm, "tridiag" => tridiag,
"lehmer" => lehmer, "parter" => parter,
"chow" => chow, "randcorr" => randcorr,
"poisson" => poisson, "neumann" => neumann,
"rosser" => rosser, "sampling" => sampling,
"wilkinson" => wilkinson, "rando" => rando,
"randsvd" => randsvd, "rohess" => rohess,
"kms" => kms, "wathen" => wathen,
"oscillate" => oscillate, "toeplitz" => toeplitz,
"hankel" => hankel, "golub" => golub,
"companion" => companion,
"prolate" => prolate, "deriv2" => deriv2,
"shaw" => shaw, "wing" => wing,
"foxgood" => foxgood, "heat" => heat,
"baart" => baart, "phillips" => phillips,
"gravity" => gravity, "blur" => blur,
"spikes" => spikes, "ursell" => ursell,
"parallax" => parallax, "erdrey" => erdrey,
"gilbert" => gilbert, "smallworld" => smallworld
)
"""
predefined matrix classes (for the generated functions)
"""
const MATRIXCLASS = Dict(
:symmetric => ["hilb", "cauchy", "circul", "dingdong",
"invhilb", "moler", "pascal", "pei",
"clement", "fiedler", "minij",
"lehmer", "randcorr", "poisson", "wilkinson",
"kms", "wathen", "oscillate", "prolate",
"hankel"],
:inverse => ["hilb", "hadamard", "cauchy", "invhilb",
"forsythe", "magic", "triw", "moler", "pascal",
"kahan", "pei", "vand", "invol", "lotkin",
"clement", "fiedler", "minij", "tridiag",
"lehmer", "poisson", "kms" ],
:illcond => ["hilb", "cauchy", "frank", "invhilb",
"forsythe", "triw", "moler", "pascal",
"kahan","pei", "vand", "invol", "lotkin",
"tridiag", "rosser", "randsvd", "kms",
"oscillate", "prolate", "golub"],
:posdef => ["hilb", "cauchy", "circul", "invhilb",
"moler", "pascal", "pei", "minij", "tridiag",
"lehmer", "poisson", "kms", "wathen", "oscillate"],
:eigen => ["hadamard", "circul", "dingdong", "frank",
"forsythe", "grcar", "pascal", "invol","chebspec",
"lotkin", "clement", "fiedler", "minij",
"tridiag", "parter", "chow", "poisson", "neumann",
"rosser", "sampling", "wilkinson","wathen",
"oscillate"],
:sparse => ["poisson", "neumann", "wathen", "blur", "erdrey", "gilbert",
"smallworld"],
:random => ["rosser", "rando", "randcorr", "randsvd", "rohess",
"wathen", "oscillate", "golub", "erdrey", "gilbert", "smallworld"],
:regprob => ["deriv2", "shaw", "wing", "foxgood", "heat",
"baart", "phillips", "gravity", "blur",
"spikes", "ursell", "parallax"],
:graph => ["erdrey", "gilbert", "smallworld"]
)
# remote parameters for several data sources
const SS_REMOTE = SSRemoteType(RemoteParametersNew(
"https://sparse.tamu.edu",
"https://sparse.tamu.edu/MM",
"https://sparse.tamu.edu/files/ss_index.mat",
"https://sparse.tamu.edu/files/ssstats.csv",
".tar.gz"
))
const MM_REMOTE = MMRemoteType(RemoteParameters(
"https://math.nist.gov/MatrixMarket",
"https://math.nist.gov/pub/MatrixMarket2",
"https://math.nist.gov/MatrixMarket/matrices.html",
"""<TITLE>The Matrix Market Matrices by Name</TITLE>""",
("M", """<A HREF="/MatrixMarket/data/""", 2, ".html", 3, nothing),
".mtx.gz"
))
# return the single instance for the remote type
preferred(::Type{SSRemoteType}) = SS_REMOTE
preferred(::Type{MMRemoteType}) = MM_REMOTE
"""
The place to store all matrix data in process
"""
const MATRIX_DB = MatrixDatabase()
# local storage directory
const DATA_DIR = @get_scratch!("data")
data_dir() = get(ENV, "MATRIXDEPOT_DATA", DATA_DIR)
url_redirect() = URL_REDIRECT[] = get(ENV, "MATRIXDEPOT_URL_REDIRECT", "0") != "0"
const REDIRECT_DIR = abspath(dirname(@__FILE__), "..", "test", "data")
const URL_REDIRECT = Ref(false)
function redirect(url::AbstractString)
if URL_REDIRECT[]
urlpart = split(url, ":/", limit=2)[2]
if Sys.iswindows()
string("file:/", replace(REDIRECT_DIR, '\\' => '/'), urlpart)
else
string("file://", REDIRECT_DIR, urlpart)
end
else
url
end
end
| MatrixDepot | https://github.com/JuliaLinearAlgebra/MatrixDepot.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.