licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 15698 | @eval module $(gensym())
using AbstractTrees:
IndexNode,
Leaves,
PostOrderDFS,
childindices,
children,
nodevalue,
nodevalues,
parent,
parentindex,
rootindex
using Dictionaries: Dictionary, Indices
using Graphs:
add_edge!,
add_vertex!,
edges,
edgetype,
has_edge,
inneighbors,
is_cyclic,
is_directed,
ne,
nv,
outneighbors,
rem_edge!,
vertices
using Graphs.SimpleGraphs:
SimpleDiGraph,
SimpleEdge,
SimpleGraph,
binary_tree,
cycle_digraph,
cycle_graph,
grid,
path_digraph,
path_graph
using NamedGraphs: NamedGraph
using NamedGraphs.GraphGenerators: binary_arborescence
using NamedGraphs.GraphsExtensions:
TreeGraph,
⊔,
add_edge,
add_edges,
add_edges!,
all_edges,
child_edges,
child_vertices,
convert_vertextype,
degrees,
directed_graph,
directed_graph_type,
disjoint_union,
distance_to_leaves,
has_edges,
has_leaf_neighbor,
has_vertices,
incident_edges,
indegrees,
is_arborescence,
is_binary_arborescence,
is_cycle_graph,
is_ditree,
is_leaf_edge,
is_leaf_vertex,
is_path_graph,
is_root_vertex,
is_rooted,
is_self_loop,
leaf_vertices,
minimum_distance_to_leaves,
next_nearest_neighbors,
non_leaf_edges,
outdegrees,
permute_vertices,
rem_edge,
rem_edges,
rem_edges!,
rename_vertices,
root_vertex,
subgraph,
tree_graph_node,
undirected_graph,
undirected_graph_type,
vertextype,
vertices_at_distance
using Test: @test, @test_broken, @test_throws, @testset
# TODO: Still need to test:
# - post_order_dfs_vertices
# - pre_order_dfs_vertices
# - post_order_dfs_edges
# - vertex_path
# - edge_path
# - parent_vertices
# - parent_vertex
# - parent_edges
# - parent_edge
# - mincut_partitions
# - eccentricities
# - decorate_graph_edges
# - decorate_graph_vertices
# - random_bfs_tree
@testset "NamedGraphs.GraphsExtensions" begin
# has_vertices
g = path_graph(4)
@test has_vertices(g, 1:3)
@test has_vertices(g, [2, 4])
@test !has_vertices(g, [2, 5])
# has_edges
g = path_graph(4)
@test has_edges(g, [1 => 2, 2 => 3, 3 => 4])
@test has_edges(g, [2 => 3])
@test !has_edges(g, [1 => 3])
@test !has_edges(g, [4 => 5])
# convert_vertextype
for g in (path_graph(4), path_digraph(4))
g_uint16 = convert_vertextype(UInt16, g)
@test g_uint16 == g
@test vertextype(g_uint16) == UInt16
@test issetequal(vertices(g_uint16), vertices(g))
@test issetequal(edges(g_uint16), edges(g))
end
# is_self_loop
@test is_self_loop(SimpleEdge(1, 1))
@test !is_self_loop(SimpleEdge(1, 2))
@test is_self_loop(1 => 1)
@test !is_self_loop(1 => 2)
# directed_graph_type
@test directed_graph_type(SimpleGraph{Int}) === SimpleDiGraph{Int}
@test directed_graph_type(SimpleGraph(4)) === SimpleDiGraph{Int}
# undirected_graph_type
@test undirected_graph_type(SimpleGraph{Int}) === SimpleGraph{Int}
@test undirected_graph_type(SimpleGraph(4)) === SimpleGraph{Int}
# directed_graph
@test directed_graph(path_digraph(4)) == path_digraph(4)
@test typeof(directed_graph(path_digraph(4))) === SimpleDiGraph{Int}
g = path_graph(4)
dig = directed_graph(g)
@test typeof(dig) === SimpleDiGraph{Int}
@test nv(dig) == 4
@test ne(dig) == 6
@test issetequal(
edges(dig), edgetype(dig).([1 => 2, 2 => 1, 2 => 3, 3 => 2, 3 => 4, 4 => 3])
)
# undirected_graph
@test undirected_graph(path_graph(4)) == path_graph(4)
@test typeof(undirected_graph(path_graph(4))) === SimpleGraph{Int}
dig = path_digraph(4)
g = undirected_graph(dig)
@test typeof(g) === SimpleGraph{Int}
@test g == path_graph(4)
# vertextype
for f in (path_graph, path_digraph)
for vtype in (Int, UInt64)
@test vertextype(f(vtype(4))) === vtype
@test vertextype(typeof(f(vtype(4)))) === vtype
end
end
# rename_vertices
vs = ["a", "b", "c", "d"]
g = rename_vertices(v -> vs[v], NamedGraph(path_graph(4)))
@test nv(g) == 4
@test ne(g) == 3
@test issetequal(vertices(g), vs)
@test issetequal(edges(g), edgetype(g).(["a" => "b", "b" => "c", "c" => "d"]))
@test g isa NamedGraph
# Not defined for AbstractSimpleGraph.
@test_throws ErrorException rename_vertices(v -> vs[v], path_graph(4))
# permute_vertices
g = path_graph(4)
g_perm = permute_vertices(g, [2, 1, 4, 3])
@test nv(g_perm) == 4
@test ne(g_perm) == 3
@test vertices(g_perm) == 1:4
@test has_edge(g_perm, 1 => 2)
@test has_edge(g_perm, 2 => 1)
@test has_edge(g_perm, 1 => 4)
@test has_edge(g_perm, 4 => 1)
@test has_edge(g_perm, 3 => 4)
@test has_edge(g_perm, 4 => 3)
@test !has_edge(g_perm, 2 => 3)
@test !has_edge(g_perm, 3 => 2)
g = path_digraph(4)
g_perm = permute_vertices(g, [2, 1, 4, 3])
@test nv(g_perm) == 4
@test ne(g_perm) == 3
@test vertices(g_perm) == 1:4
@test has_edge(g_perm, 2 => 1)
@test !has_edge(g_perm, 1 => 2)
@test has_edge(g_perm, 1 => 4)
@test !has_edge(g_perm, 4 => 1)
@test has_edge(g_perm, 4 => 3)
@test !has_edge(g_perm, 3 => 4)
# all_edges
g = path_graph(4)
@test issetequal(
all_edges(g), edgetype(g).([1 => 2, 2 => 1, 2 => 3, 3 => 2, 3 => 4, 4 => 3])
)
g = path_digraph(4)
@test issetequal(all_edges(g), edgetype(g).([1 => 2, 2 => 3, 3 => 4]))
# subgraph
g = subgraph(path_graph(4), 2:4)
@test nv(g) == 3
@test ne(g) == 2
# TODO: Should this preserve vertex names by
# converting to `NamedGraph` if indexed by
# something besides `Base.OneTo`?
@test vertices(g) == 1:3
@test issetequal(edges(g), edgetype(g).([1 => 2, 2 => 3]))
@test subgraph(v -> v ∈ 2:4, path_graph(4)) == g
# degrees
@test degrees(path_graph(4)) == [1, 2, 2, 1]
@test degrees(path_graph(4), 2:4) == [2, 2, 1]
@test degrees(path_digraph(4)) == [1, 2, 2, 1]
@test degrees(path_digraph(4), 2:4) == [2, 2, 1]
@test degrees(path_graph(4), Indices(2:4)) == Dictionary(2:4, [2, 2, 1])
# indegrees
@test indegrees(path_graph(4)) == [1, 2, 2, 1]
@test indegrees(path_graph(4), 2:4) == [2, 2, 1]
@test indegrees(path_digraph(4)) == [0, 1, 1, 1]
@test indegrees(path_digraph(4), 2:4) == [1, 1, 1]
# outdegrees
@test outdegrees(path_graph(4)) == [1, 2, 2, 1]
@test outdegrees(path_graph(4), 2:4) == [2, 2, 1]
@test outdegrees(path_digraph(4)) == [1, 1, 1, 0]
@test outdegrees(path_digraph(4), 2:4) == [1, 1, 0]
# TreeGraph
# Binary tree:
#
# 1
# / \
# / \
# 2 3
# / \ / \
# 4 5 6 7
#
# with vertex 1 as root.
g = binary_arborescence(3)
@test is_arborescence(g)
@test is_binary_arborescence(g)
@test is_ditree(g)
g′ = copy(g)
add_edge!(g′, 2 => 3)
@test !is_arborescence(g′)
@test !is_ditree(g′)
t = TreeGraph(g)
@test is_directed(t)
@test ne(t) == 6
@test nv(t) == 7
@test vertices(t) == 1:7
@test issetequal(outneighbors(t, 1), [2, 3])
@test issetequal(outneighbors(t, 2), [4, 5])
@test issetequal(outneighbors(t, 3), [6, 7])
@test isempty(inneighbors(t, 1))
for v in 2:3
@test only(inneighbors(t, v)) == 1
end
for v in 4:5
@test only(inneighbors(t, v)) == 2
end
for v in 6:7
@test only(inneighbors(t, v)) == 3
end
@test edgetype(t) === SimpleEdge{Int}
@test vertextype(t) == Int
@test nodevalue(t) == 1
for v in 1:7
@test tree_graph_node(g, v) == IndexNode(t, v)
end
@test rootindex(t) == 1
@test issetequal(nodevalue.(children(t)), 2:3)
@test issetequal(childindices(t, 1), 2:3)
@test issetequal(childindices(t, 2), 4:5)
@test issetequal(childindices(t, 3), 6:7)
for v in 4:7
@test isempty(childindices(t, v))
end
@test isnothing(parentindex(t, 1))
for v in 2:3
@test parentindex(t, v) == 1
end
for v in 4:5
@test parentindex(t, v) == 2
end
for v in 6:7
@test parentindex(t, v) == 3
end
@test IndexNode(t) == IndexNode(t, 1)
@test tree_graph_node(g) == tree_graph_node(g, 1)
for dfs_g in (
collect(nodevalues(PostOrderDFS(tree_graph_node(g, 1)))),
collect(nodevalues(PostOrderDFS(t))),
)
@test length(dfs_g) == 7
@test dfs_g == [4, 5, 2, 6, 7, 3, 1]
end
@test issetequal(nodevalue.(children(tree_graph_node(g, 1))), 2:3)
@test issetequal(nodevalue.(children(tree_graph_node(g, 2))), 4:5)
@test issetequal(nodevalue.(children(tree_graph_node(g, 3))), 6:7)
for v in 4:7
@test isempty(children(tree_graph_node(g, v)))
end
for n in (tree_graph_node(g), t)
@test issetequal(nodevalue.(Leaves(n)), 4:7)
end
@test issetequal(nodevalue.(Leaves(t)), 4:7)
@test isnothing(nodevalue(parent(tree_graph_node(g, 1))))
for v in 2:3
@test nodevalue(parent(tree_graph_node(g, v))) == 1
end
for v in 4:5
@test nodevalue(parent(tree_graph_node(g, v))) == 2
end
for v in 6:7
@test nodevalue(parent(tree_graph_node(g, v))) == 3
end
# disjoint_union, ⊔
g1 = NamedGraph(path_graph(3))
g2 = NamedGraph(path_graph(3))
for g in (
disjoint_union(g1, g2),
disjoint_union([g1, g2]),
disjoint_union([1 => g1, 2 => g2]),
disjoint_union(Dictionary([1, 2], [g1, g2])),
g1 ⊔ g2,
(1 => g1) ⊔ (2 => g2),
)
@test nv(g) == 6
@test ne(g) == 4
@test issetequal(vertices(g), [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2)])
end
for g in (
disjoint_union("x" => g1, "y" => g2),
disjoint_union(["x" => g1, "y" => g2]),
disjoint_union(Dictionary(["x", "y"], [g1, g2])),
("x" => g1) ⊔ ("y" => g2),
)
@test nv(g) == 6
@test ne(g) == 4
@test issetequal(
vertices(g), [(1, "x"), (2, "x"), (3, "x"), (1, "y"), (2, "y"), (3, "y")]
)
end
# is_path_graph
@test is_path_graph(path_graph(4))
@test !is_path_graph(cycle_graph(4))
# Only defined for undirected graphs at the moment.
@test_throws MethodError is_path_graph(path_digraph(4))
@test !is_path_graph(grid((3, 2)))
# is_cycle_graph
@test is_cycle_graph(cycle_graph(4))
@test !is_cycle_graph(path_graph(4))
# Only defined for undirected graphs at the moment.
@test_throws MethodError is_cycle_graph(cycle_digraph(4))
@test !is_cycle_graph(grid((3, 2)))
@test is_cycle_graph(grid((2, 2)))
# incident_edges
g = path_graph(4)
@test issetequal(incident_edges(g, 2), SimpleEdge.([2 => 1, 2 => 3]))
@test issetequal(incident_edges(g, 2; dir=:out), SimpleEdge.([2 => 1, 2 => 3]))
@test issetequal(incident_edges(g, 2; dir=:in), SimpleEdge.([1 => 2, 3 => 2]))
# TODO: Only output out edges?
@test issetequal(
incident_edges(g, 2; dir=:both), SimpleEdge.([2 => 1, 1 => 2, 2 => 3, 3 => 2])
)
# is_leaf_vertex
g = binary_tree(3)
for v in 1:3
@test !is_leaf_vertex(g, v)
end
for v in 4:7
@test is_leaf_vertex(g, v)
end
g = binary_arborescence(3)
for v in 1:3
@test !is_leaf_vertex(g, v)
end
for v in 4:7
@test is_leaf_vertex(g, v)
end
# child_vertices
g = binary_arborescence(3)
@test issetequal(child_vertices(g, 1), 2:3)
@test issetequal(child_vertices(g, 2), 4:5)
@test issetequal(child_vertices(g, 3), 6:7)
for v in 4:7
@test isempty(child_vertices(g, v))
end
# child_edges
g = binary_arborescence(3)
@test issetequal(child_edges(g, 1), SimpleEdge.([1 => 2, 1 => 3]))
@test issetequal(child_edges(g, 2), SimpleEdge.([2 => 4, 2 => 5]))
@test issetequal(child_edges(g, 3), SimpleEdge.([3 => 6, 3 => 7]))
for v in 4:7
@test isempty(child_edges(g, v))
end
# leaf_vertices
g = binary_tree(3)
@test issetequal(leaf_vertices(g), 4:7)
g = binary_arborescence(3)
@test issetequal(leaf_vertices(g), 4:7)
# is_leaf_edge
g = binary_tree(3)
for e in [1 => 2, 1 => 3]
@test !is_leaf_edge(g, e)
@test !is_leaf_edge(g, reverse(e))
end
for e in [2 => 4, 2 => 5, 3 => 6, 3 => 7]
@test is_leaf_edge(g, e)
@test is_leaf_edge(g, reverse(e))
end
g = binary_arborescence(3)
for e in [1 => 2, 1 => 3]
@test !is_leaf_edge(g, e)
@test !is_leaf_edge(g, reverse(e))
end
for e in [2 => 4, 2 => 5, 3 => 6, 3 => 7]
@test is_leaf_edge(g, e)
@test !is_leaf_edge(g, reverse(e))
end
# has_leaf_neighbor
for g in (binary_tree(3), binary_arborescence(3))
for v in [1; 4:7]
@test !has_leaf_neighbor(g, v)
end
for v in 2:3
@test has_leaf_neighbor(g, v)
end
end
# non_leaf_edges
g = binary_tree(3)
es = collect(non_leaf_edges(g))
es = [es; reverse.(es)]
for e in SimpleEdge.([1 => 2, 1 => 3])
@test e in es
@test reverse(e) in es
end
for e in SimpleEdge.([2 => 4, 2 => 5, 3 => 6, 3 => 7])
@test !(e in es)
@test !(reverse(e) in es)
end
g = binary_arborescence(3)
es = collect(non_leaf_edges(g))
for e in SimpleEdge.([1 => 2, 1 => 3])
@test e in es
@test !(reverse(e) in es)
end
for e in SimpleEdge.([2 => 4, 2 => 5, 3 => 6, 3 => 7])
@test !(e in es)
@test !(reverse(e) in es)
end
# distance_to_leaves
g = binary_tree(3)
d = distance_to_leaves(g, 3)
d_ref = Dict([4 => 3, 5 => 3, 6 => 1, 7 => 1])
for v in keys(d)
@test is_leaf_vertex(g, v)
@test d[v] == d_ref[v]
end
g = binary_arborescence(3)
d = distance_to_leaves(g, 3)
d_ref = Dict([4 => typemax(Int), 5 => typemax(Int), 6 => 1, 7 => 1])
for v in keys(d)
@test is_leaf_vertex(g, v)
@test d[v] == d_ref[v]
end
d = distance_to_leaves(g, 1)
d_ref = Dict([4 => 2, 5 => 2, 6 => 2, 7 => 2])
for v in keys(d)
@test is_leaf_vertex(g, v)
@test d[v] == d_ref[v]
end
# minimum_distance_to_leaves
for g in (binary_tree(3), binary_arborescence(3))
@test minimum_distance_to_leaves(g, 1) == 2
@test minimum_distance_to_leaves(g, 3) == 1
@test minimum_distance_to_leaves(g, 7) == 0
end
# is_root_vertex
g = binary_arborescence(3)
@test is_root_vertex(g, 1)
for v in 2:7
@test !is_root_vertex(g, v)
end
g = binary_tree(3)
for v in vertices(g)
@test_throws MethodError is_root_vertex(g, v)
end
# is_rooted
@test is_rooted(binary_arborescence(3))
g = binary_arborescence(3)
add_edge!(g, 2 => 3)
@test is_rooted(g)
g = binary_arborescence(3)
add_vertex!(g)
add_edge!(g, 8 => 3)
@test !is_rooted(g)
@test is_rooted(path_digraph(4))
@test_throws MethodError is_rooted(binary_tree(3))
# is_binary_arborescence
@test is_binary_arborescence(binary_arborescence(3))
g = binary_arborescence(3)
add_vertex!(g)
add_edge!(g, 3 => 8)
@test !is_binary_arborescence(g)
@test_throws MethodError is_binary_arborescence(binary_tree(3))
# root_vertex
@test root_vertex(binary_arborescence(3)) == 1
# No root vertex of cyclic graph.
g = binary_arborescence(3)
add_edge!(g, 7 => 1)
@test_throws ErrorException root_vertex(g)
@test_throws MethodError root_vertex(binary_tree(3))
# add_edge
g = SimpleGraph(4)
add_edge!(g, 1 => 2)
@test add_edge(SimpleGraph(4), 1 => 2) == g
# add_edges
@test add_edges(SimpleGraph(4), [1 => 2, 2 => 3, 3 => 4]) == path_graph(4)
# add_edges!
g = SimpleGraph(4)
add_edges!(g, [1 => 2, 2 => 3, 3 => 4])
@test g == path_graph(4)
# rem_edge
g = path_graph(4)
# https://github.com/JuliaGraphs/Graphs.jl/issues/364
rem_edge!(g, 2, 3)
@test rem_edge(path_graph(4), 2 => 3) == g
# rem_edges
g = path_graph(4)
# https://github.com/JuliaGraphs/Graphs.jl/issues/364
rem_edge!(g, 2, 3)
rem_edge!(g, 3, 4)
@test rem_edges(path_graph(4), [2 => 3, 3 => 4]) == g
# rem_edges!
g = path_graph(4)
# https://github.com/JuliaGraphs/Graphs.jl/issues/364
rem_edge!(g, 2, 3)
rem_edge!(g, 3, 4)
g′ = path_graph(4)
rem_edges!(g′, [2 => 3, 3 => 4])
@test g′ == g
#vertices at distance
L = 10
g = path_graph(L)
@test only(vertices_at_distance(g, 1, L - 1)) == L
@test only(next_nearest_neighbors(g, 1)) == 3
@test issetequal(vertices_at_distance(g, 5, 3), [2, 8])
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1186 | module Keys
"""
Key{K}
A key (index) type, used for unambiguously identifying
an object as a key or index of an indexible object
`AbstractArray`, `AbstractDict`, etc.
Useful for nested structures of indices, for example:
```julia
[Key([1, 2]), [Key([3, 4]), Key([5, 6])]]
```
which could represent partitioning a set of vertices
```julia
[Key([1, 2]), Key([3, 4]), Key([5, 6])]
```
"""
struct Key{K}
I::K
end
Key(I...) = Key(I)
Base.show(io::IO, I::Key) = print(io, "Key(", I.I, ")")
## For indexing into `AbstractArray`
# This allows linear indexing `A[Key(2)]`.
# Overload of `Base.to_index`.
Base.to_index(I::Key) = I.I
# This allows cartesian indexing `A[Key(CartesianIndex(1, 2))]`.
# Overload of `Base.to_indices`.
Base.to_indices(A::AbstractArray, I::Tuple{Key{<:CartesianIndex}}) = I[1].I.I
# This would allow syntax like `A[Key(1, 2)]`, should we support that?
# Overload of `Base.to_indices`.
# to_indices(A::AbstractArray, I::Tuple{Key}) = I[1].I
Base.getindex(d::AbstractDict, I::Key) = d[I.I]
# Fix ambiguity error with Base
Base.getindex(d::Dict, I::Key) = d[I.I]
using Dictionaries: AbstractDictionary
Base.getindex(d::AbstractDictionary, I::Key) = d[I.I]
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 5682 | module NamedGraphGenerators
using Graphs:
IsDirected,
bfs_tree,
binary_tree,
grid,
inneighbors,
merge_vertices,
nv,
outneighbors,
path_graph,
rem_vertex!
using Graphs.SimpleGraphs: AbstractSimpleGraph
using ..GraphGenerators: comb_tree
using ..GraphsExtensions: add_edges!, rem_vertices!
using ..NamedGraphs: NamedGraph
using SimpleTraits: SimpleTraits, Not, @traitfn
## TODO: Bring this back in some form?
## TODO: Move to `GraphsExtensions`?
## @traitfn function parent(tree::AbstractSimpleGraph::IsDirected, v::Integer)
## return only(inneighbors(tree, v))
## end
## TODO: Move to `GraphsExtensions`?
@traitfn function children(tree::AbstractSimpleGraph::IsDirected, v::Integer)
return outneighbors(tree, v)
end
## TODO: Move to `GraphsExtensions`?
@traitfn function set_named_vertices!(
named_vertices::AbstractVector,
tree::AbstractSimpleGraph::IsDirected,
simple_parent::Integer,
named_parent;
child_name=identity,
)
simple_children = children(tree, simple_parent)
for n in 1:length(simple_children)
simple_child = simple_children[n]
named_child = (named_parent..., child_name(n))
named_vertices[simple_child] = named_child
set_named_vertices!(named_vertices, tree, simple_child, named_child; child_name)
end
return named_vertices
end
# TODO: Use vectors as vertex names?
# k = 3:
# 1 => (1,)
# 2 => (1, 1)
# 3 => (1, 2)
# 4 => (1, 1, 1)
# 5 => (1, 1, 2)
# 6 => (1, 2, 1)
# 7 => (1, 2, 2)
function named_bfs_tree_vertices(
simple_graph::AbstractSimpleGraph, source::Integer=1; source_name=1, child_name=identity
)
tree = bfs_tree(simple_graph, source)
named_vertices = Vector{Tuple}(undef, nv(simple_graph))
named_source = (source_name,)
named_vertices[source] = named_source
set_named_vertices!(named_vertices, tree, source, named_source; child_name)
return named_vertices
end
function named_bfs_tree(
simple_graph::AbstractSimpleGraph, source::Integer=1; source_name=1, child_name=identity
)
named_vertices = named_bfs_tree_vertices(simple_graph, source; source_name, child_name)
return NamedGraph(simple_graph, named_vertices)
end
function named_binary_tree(
k::Integer, source::Integer=1; source_name=1, child_name=identity
)
simple_graph = binary_tree(k)
return named_bfs_tree(simple_graph, source; source_name, child_name)
end
function named_path_graph(dim::Integer)
return NamedGraph(path_graph(dim))
end
function named_path_digraph(dim::Integer)
return NamedDiGraph(path_digraph(dim))
end
function named_grid(dim::Integer; kwargs...)
simple_graph = grid((dim,); kwargs...)
return NamedGraph(simple_graph)
end
function named_grid(dims; kwargs...)
simple_graph = grid(dims; kwargs...)
return NamedGraph(simple_graph, Tuple.(CartesianIndices(Tuple(dims))))
end
function named_comb_tree(dims::Tuple)
simple_graph = comb_tree(dims)
return NamedGraph(simple_graph, Tuple.(CartesianIndices(Tuple(dims))))
end
function named_comb_tree(tooth_lengths::AbstractVector{<:Integer})
@assert all(>(0), tooth_lengths)
simple_graph = comb_tree(tooth_lengths)
nx = length(tooth_lengths)
ny = maximum(tooth_lengths)
vertices = filter(Tuple.(CartesianIndices((nx, ny)))) do (jx, jy)
jy <= tooth_lengths[jx]
end
return NamedGraph(simple_graph, vertices)
end
"""Generate a graph which corresponds to a hexagonal tiling of the plane. There are m rows and n columns of hexagons.
Based off of the generator in Networkx hexagonal_lattice_graph()"""
function named_hexagonal_lattice_graph(m::Integer, n::Integer; periodic=false)
M = 2 * m
rows = [i for i in 1:(M + 2)]
cols = [i for i in 1:(n + 1)]
if periodic && (n % 2 == 1 || m < 2 || n < 2)
error("Periodic Hexagonal Lattice needs m > 1, n > 1 and n even")
end
G = NamedGraph([(j, i) for i in cols for j in rows])
col_edges = [(j, i) => (j + 1, i) for i in cols for j in rows[1:(M + 1)]]
row_edges = [(j, i) => (j, i + 1) for i in cols[1:n] for j in rows if i % 2 == j % 2]
add_edges!(G, col_edges)
add_edges!(G, row_edges)
rem_vertex!(G, (M + 2, 1))
rem_vertex!(G, ((M + 1) * (n % 2) + 1, n + 1))
if periodic == true
for i in cols[1:n]
G = merge_vertices(G, [(1, i), (M + 1, i)])
end
for i in cols[2:(n + 1)]
G = merge_vertices(G, [(2, i), (M + 2, i)])
end
for j in rows[2:M]
G = merge_vertices(G, [(j, 1), (j, n + 1)])
end
rem_vertex!(G, (M + 1, n + 1))
end
return G
end
"""Generate a graph which corresponds to a equilateral triangle tiling of the plane. There are m rows and n columns of triangles.
Based off of the generator in Networkx triangular_lattice_graph()"""
function named_triangular_lattice_graph(m::Integer, n::Integer; periodic=false)
N = floor(Int64, (n + 1) / 2.0)
rows = [i for i in 1:(m + 1)]
cols = [i for i in 1:(N + 1)]
if periodic && (n < 5 || m < 3)
error("Periodic Triangular Lattice needs m > 2, n > 4")
end
G = NamedGraph([(j, i) for i in cols for j in rows])
grid_edges1 = [(j, i) => (j, i + 1) for j in rows for i in cols[1:N]]
grid_edges2 = [(j, i) => (j + 1, i) for j in rows[1:m] for i in cols]
add_edges!(G, vcat(grid_edges1, grid_edges2))
diagonal_edges1 = [(j, i) => (j + 1, i + 1) for j in rows[2:2:m] for i in cols[1:N]]
diagonal_edges2 = [(j, i + 1) => (j + 1, i) for j in rows[1:2:m] for i in cols[1:N]]
add_edges!(G, vcat(diagonal_edges1, diagonal_edges2))
if periodic == true
for i in cols
G = merge_vertices(G, [(1, i), (m + 1, i)])
end
for j in rows[1:m]
G = merge_vertices(G, [(j, 1), (j, N + 1)])
end
elseif n % 2 == 1
rem_vertices!(G, [(j, N + 1) for j in rows[2:2:(m + 1)]])
end
return G
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 122 | module OrderedDictionaries
include("orderedindices.jl")
include("ordereddictionary.jl")
include("ordinalindexing.jl")
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 2096 | using Dictionaries: AbstractDictionary
struct OrderedDictionary{I,T} <: AbstractDictionary{I,T}
indices::OrderedIndices{I}
values::Vector{T}
global function _OrderedDictionary(inds::OrderedIndices, values::Vector)
@assert length(values) == length(inds)
return new{eltype(inds),eltype(values)}(inds, values)
end
end
function OrderedDictionary(indices::OrderedIndices, values::Vector)
return _OrderedDictionary(indices, values)
end
function OrderedDictionary(indices, values)
return OrderedDictionary(OrderedIndices(indices), Vector(values))
end
Base.values(dict::OrderedDictionary) = getfield(dict, :values)
# https://github.com/andyferris/Dictionaries.jl/tree/master?tab=readme-ov-file#abstractdictionary
Base.keys(dict::OrderedDictionary) = getfield(dict, :indices)
ordered_indices(dict::OrderedDictionary) = ordered_indices(keys(dict))
# https://github.com/andyferris/Dictionaries.jl/tree/master?tab=readme-ov-file#implementing-the-token-interface-for-abstractdictionary
Dictionaries.istokenizable(dict::OrderedDictionary) = Dictionaries.istokenizable(keys(dict))
Base.@propagate_inbounds function Dictionaries.gettokenvalue(
dict::OrderedDictionary, token::Int
)
return values(dict)[token]
end
function Dictionaries.istokenassigned(dict::OrderedDictionary, token::Int)
return isassigned(values(dict), token)
end
Dictionaries.issettable(dict::OrderedDictionary) = true
Base.@propagate_inbounds function Dictionaries.settokenvalue!(
dict::OrderedDictionary{<:Any,T}, token::Int, value::T
) where {T}
values(dict)[token] = value
return dict
end
Dictionaries.isinsertable(dict::OrderedDictionary) = true
Dictionaries.gettoken!(dict::OrderedDictionary, index) = error()
Dictionaries.deletetoken!(dict::OrderedDictionary, token) = error()
function Base.similar(indices::OrderedIndices, type::Type)
return OrderedDictionary(indices, Vector{type}(undef, length(indices)))
end
# Circumvents https://github.com/andyferris/Dictionaries.jl/pull/140
function Base.map(f, dict::OrderedDictionary)
return OrderedDictionary(keys(dict), map(f, values(dict)))
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 4131 | using Dictionaries: Dictionaries, AbstractIndices, Dictionary
# Represents a [set](https://en.wikipedia.org/wiki/Set_(mathematics)) of indices
# `I` whose elements/members are ordered in a sequence such that each element can be
# associated with a position which is a positive integer
# (1-based [natural numbers](https://en.wikipedia.org/wiki/Natural_number))
# which can be accessed through ordinal indexing (`I[4th]`).
# Related to an (indexed family)[https://en.wikipedia.org/wiki/Indexed_family],
# [index set](https://en.wikipedia.org/wiki/Index_set), or
# [sequence](https://en.wikipedia.org/wiki/Sequence).
# In other words, it is a [bijection](https://en.wikipedia.org/wiki/Bijection)
# from a finite subset of 1-based natural numbers to a set of corresponding
# elements/members.
struct OrderedIndices{I} <: AbstractIndices{I}
ordered_indices::Vector{I}
index_positions::Dictionary{I,Int}
function OrderedIndices{I}(indices) where {I}
ordered_indices = collect(indices)
index_positions = Dictionary{I,Int}(copy(ordered_indices), undef)
for i in eachindex(ordered_indices)
index_positions[ordered_indices[i]] = i
end
return new{I}(ordered_indices, index_positions)
end
end
OrderedIndices(indices) = OrderedIndices{eltype(indices)}(indices)
OrderedIndices{I}(indices::OrderedIndices{I}) where {I} = copy(indices)
ordered_indices(indices::OrderedIndices) = getfield(indices, :ordered_indices)
# TODO: Better name for this?
index_positions(indices::OrderedIndices) = getfield(indices, :index_positions)
# TODO: Better name for this?
parent_indices(indices::OrderedIndices) = keys(index_positions(indices))
# https://github.com/andyferris/Dictionaries.jl/tree/master?tab=readme-ov-file#abstractindices
function Dictionaries.iterate(indices::OrderedIndices, state...)
return Dictionaries.iterate(ordered_indices(indices), state...)
end
function Base.in(index::I, indices::OrderedIndices{I}) where {I}
return in(index, parent_indices(indices))
end
Base.length(indices::OrderedIndices) = length(ordered_indices(indices))
# https://github.com/andyferris/Dictionaries.jl/tree/master?tab=readme-ov-file#implementing-the-token-interface-for-abstractindices
Dictionaries.istokenizable(indices::OrderedIndices) = true
Dictionaries.tokentype(indices::OrderedIndices) = Int
function Dictionaries.iteratetoken(indices::OrderedIndices, state...)
return iterate(Base.OneTo(length(indices)), state...)
end
function Dictionaries.iteratetoken_reverse(indices::OrderedIndices, state...)
return iterate(reverse(Base.OneTo(length(indices))), state...)
end
function Dictionaries.gettoken(indices::OrderedIndices, key)
if !haskey(index_positions(indices), key)
return (false, 0)
end
return (true, index_positions(indices)[key])
end
function Dictionaries.gettokenvalue(indices::OrderedIndices, token)
return ordered_indices(indices)[token]
end
Dictionaries.isinsertable(indices::OrderedIndices) = true
function Dictionaries.gettoken!(indices::OrderedIndices{I}, key::I) where {I}
(hadtoken, token) = Dictionaries.gettoken(indices, key)
if hadtoken
return (true, token)
end
push!(ordered_indices(indices), key)
token = length(ordered_indices(indices))
insert!(index_positions(indices), key, token)
return (false, token)
end
function Dictionaries.deletetoken!(indices::OrderedIndices, token)
len = length(indices)
position = token
index = ordered_indices(indices)[position]
# Move the last vertex to the position of the deleted one.
if position < len
ordered_indices(indices)[position] = last(ordered_indices(indices))
end
last_index = pop!(ordered_indices(indices))
delete!(index_positions(indices), index)
if position < len
index_positions(indices)[last_index] = position
end
return indices
end
using Random: Random
function Dictionaries.randtoken(rng::Random.AbstractRNG, indices::OrderedIndices)
return rand(rng, Base.OneTo(length(indices)))
end
# Circumvents https://github.com/andyferris/Dictionaries.jl/pull/140
function Base.map(f, indices::OrderedIndices)
return OrderedDictionary(indices, map(f, ordered_indices(indices)))
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1146 | using ..OrdinalIndexing: OrdinalSuffixedInteger, cardinal, th
Base.@propagate_inbounds function Base.getindex(
indices::OrderedIndices, ordinal_index::OrdinalSuffixedInteger
)
return ordered_indices(indices)[cardinal(ordinal_index)]
end
Base.@propagate_inbounds function Base.setindex!(
indices::OrderedIndices, value, ordinal_index::OrdinalSuffixedInteger
)
old_value = indices[ordinal_index]
ordered_indices(indices)[cardinal(index)] = value
delete!(index_ordinals(indices), old_value)
set!(index_ordinals(indices), value, cardinal(index))
return indices
end
each_ordinal_index(indices::OrderedIndices) = (Base.OneTo(length(indices))) * th
Base.@propagate_inbounds function Base.getindex(
dict::OrderedDictionary, ordinal_index::OrdinalSuffixedInteger
)
return dict[ordered_indices(dict)[cardinal(ordinal_index)]]
end
Base.@propagate_inbounds function Base.setindex!(
dict::OrderedDictionary, value, ordinal_index::OrdinalSuffixedInteger
)
index = keys(dict)[ordinal_index]
old_value = dict[index]
dict[index] = value
return dict
end
each_ordinal_index(dict::OrderedDictionary) = (Base.OneTo(length(dict))) * th
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 3816 | @eval module $(gensym())
using Dictionaries: Dictionary
using NamedGraphs.OrderedDictionaries:
OrderedDictionaries, OrderedDictionary, OrderedIndices, each_ordinal_index
using NamedGraphs.OrdinalIndexing: th
using Test: @test, @testset
@testset "OrderedDictionaries" begin
@testset "OrderedIndices" begin
i = OrderedIndices(["x1", "x2", "x3", "x4"])
@test i isa OrderedIndices{String}
@test length(i) == 4
@test collect(i) == ["x1", "x2", "x3", "x4"]
@test eachindex(i) isa OrderedIndices{String}
@test keys(i) isa OrderedIndices{String}
@test issetequal(i, ["x1", "x2", "x3", "x4"])
@test keys(i) == OrderedIndices(["x1", "x2", "x3", "x4"])
@test issetequal(keys(i), ["x1", "x2", "x3", "x4"])
@test i["x1"] == "x1"
@test i["x2"] == "x2"
@test i["x3"] == "x3"
@test i["x4"] == "x4"
@test i[1th] == "x1"
@test i[2th] == "x2"
@test i[3th] == "x3"
@test i[4th] == "x4"
i = OrderedIndices(["x1", "x2", "x3", "x4"])
delete!(i, "x2")
@test length(i) == 3
@test collect(i) == ["x1", "x4", "x3"]
@test i["x1"] == "x1"
@test i["x3"] == "x3"
@test i["x4"] == "x4"
@test "x1" ∈ i
@test !("x2" ∈ i)
@test "x3" ∈ i
@test "x4" ∈ i
@test i[1th] == "x1"
@test i[2th] == "x4"
@test i[3th] == "x3"
@test OrderedDictionaries.ordered_indices(i) == ["x1", "x4", "x3"]
@test OrderedDictionaries.index_positions(i) ==
Dictionary(["x1", "x3", "x4"], [1, 3, 2])
# Test for deleting the last index, this is a special
# case in the code.
i = OrderedIndices(["x1", "x2", "x3", "x4"])
delete!(i, "x4")
@test length(i) == 3
@test collect(i) == ["x1", "x2", "x3"]
@test i["x1"] == "x1"
@test i["x2"] == "x2"
@test i["x3"] == "x3"
@test "x1" ∈ i
@test "x2" ∈ i
@test "x3" ∈ i
@test !("x4" ∈ i)
@test i[1th] == "x1"
@test i[2th] == "x2"
@test i[3th] == "x3"
@test OrderedDictionaries.ordered_indices(i) == ["x1", "x2", "x3"]
@test OrderedDictionaries.index_positions(i) ==
Dictionary(["x1", "x2", "x3"], [1, 2, 3])
i = OrderedIndices(["x1", "x2", "x3", "x4"])
d = Dictionary(["x1", "x2", "x3", "x4"], [:x1, :x2, :x3, :x4])
mapped_i = map(i -> d[i], i)
@test mapped_i == Dictionary(["x1", "x2", "x3", "x4"], [:x1, :x2, :x3, :x4])
@test mapped_i == OrderedDictionary(["x1", "x2", "x3", "x4"], [:x1, :x2, :x3, :x4])
@test mapped_i isa OrderedDictionary{String,Symbol}
@test mapped_i["x1"] === :x1
@test mapped_i["x2"] === :x2
@test mapped_i["x3"] === :x3
@test mapped_i["x4"] === :x4
i = OrderedIndices(["x1", "x2", "x3"])
insert!(i, "x4")
@test length(i) == 4
@test collect(i) == ["x1", "x2", "x3", "x4"]
@test i["x1"] == "x1"
@test i["x2"] == "x2"
@test i["x3"] == "x3"
@test i["x4"] == "x4"
@test i[1th] == "x1"
@test i[2th] == "x2"
@test i[3th] == "x3"
@test i[4th] == "x4"
i = OrderedIndices(["x1", "x2", "x3"])
ords = each_ordinal_index(i)
@test ords == (1:3)th
@test i[ords[1]] == "x1"
@test i[ords[2]] == "x2"
@test i[ords[3]] == "x3"
i = OrderedIndices(["x1", "x2", "x3"])
d = Dictionary(["x1", "x2", "x3"], zeros(Int, 3))
for _ in 1:50
r = rand(i)
@test r ∈ i
d[r] += 1
end
for k in i
@test d[k] > 0
end
end
@testset "OrderedDictionaries" begin
d = OrderedDictionary(["x1", "x2", "x3"], [1, 2, 3])
@test d["x1"] == 1
@test d["x2"] == 2
@test d["x3"] == 3
d = OrderedDictionary(["x1", "x2", "x3"], [1, 2, 3])
d["x2"] = 4
@test d["x1"] == 1
@test d["x2"] == 4
@test d["x3"] == 3
d = OrderedDictionary(["x1", "x2", "x3"], [1, 2, 3])
@test d[1th] == 1
@test d[2th] == 2
@test d[3th] == 3
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 82 | module OrdinalIndexing
include("one.jl")
include("ordinalsuffixedinteger.jl")
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 329 | struct One <: Integer end
const 𝟏 = One()
Base.convert(type::Type{<:Number}, ::One) = one(type)
Base.promote_rule(type1::Type{One}, type2::Type{<:Number}) = type2
Base.:(*)(x::One, y::One) = 𝟏
# Needed for Julia 1.7.
Base.convert(::Type{One}, ::One) = One()
function Base.show(io::IO, ordinal::One)
return print(io, "𝟏")
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 3309 | struct OrdinalSuffixedInteger{T<:Integer} <: Integer
cardinal::T
function OrdinalSuffixedInteger{T}(cardinal::Integer) where {T<:Integer}
cardinal ≥ 0 || throw(ArgumentError("ordinal must be > 0"))
return new{T}(cardinal)
end
end
function OrdinalSuffixedInteger(cardinal::Integer)
return OrdinalSuffixedInteger{typeof(cardinal)}(cardinal)
end
function OrdinalSuffixedInteger{T}(ordinal::OrdinalSuffixedInteger) where {T<:Integer}
return OrdinalSuffixedInteger{T}(cardinal(ordinal))
end
cardinal(ordinal::OrdinalSuffixedInteger) = getfield(ordinal, :cardinal)
function cardinal_type(ordinal_type::Type{<:OrdinalSuffixedInteger})
return fieldtype(ordinal_type, :cardinal)
end
const th = OrdinalSuffixedInteger(𝟏)
const st = th
const nd = th
const rd = th
function Base.widen(ordinal_type::Type{<:OrdinalSuffixedInteger})
return OrdinalSuffixedInteger{widen(cardinal_type(ordinal_type))}
end
Base.Int(ordinal::OrdinalSuffixedInteger) = Int(cardinal(ordinal))
function Base.:(*)(a::OrdinalSuffixedInteger, b::Integer)
return OrdinalSuffixedInteger(cardinal(a) * b)
end
function Base.:(*)(a::Integer, b::OrdinalSuffixedInteger)
return OrdinalSuffixedInteger(a * cardinal(b))
end
function Base.:(:)(
start::OrdinalSuffixedInteger{T}, stop::OrdinalSuffixedInteger{T}
) where {T<:Integer}
return UnitRange{OrdinalSuffixedInteger{T}}(start, stop)
end
function Base.:(*)(a::OrdinalSuffixedInteger, b::OrdinalSuffixedInteger)
return (cardinal(a) * cardinal(b)) * th
end
function Base.:(+)(a::OrdinalSuffixedInteger, b::OrdinalSuffixedInteger)
return (cardinal(a) + cardinal(b)) * th
end
function Base.:(+)(a::OrdinalSuffixedInteger, b::Integer)
return a + b * th
end
function Base.:(+)(a::Integer, b::OrdinalSuffixedInteger)
return a * th + b
end
function Base.:(-)(a::OrdinalSuffixedInteger, b::OrdinalSuffixedInteger)
return (cardinal(a) - cardinal(b)) * th
end
function Base.:(-)(a::OrdinalSuffixedInteger, b::Integer)
return a - b * th
end
function Base.:(-)(a::Integer, b::OrdinalSuffixedInteger)
return a * th - b
end
function Base.:(:)(a::Integer, b::OrdinalSuffixedInteger)
return (a * th):b
end
function Base.:(<)(a::OrdinalSuffixedInteger, b::OrdinalSuffixedInteger)
return (cardinal(a) < cardinal(b))
end
Base.:(<)(a::OrdinalSuffixedInteger, b::Integer) = (a < b * th)
Base.:(<)(a::Integer, b::OrdinalSuffixedInteger) = (a * th < b)
function Base.:(<=)(a::OrdinalSuffixedInteger, b::OrdinalSuffixedInteger)
return (cardinal(a) <= cardinal(b))
end
Base.:(<=)(a::OrdinalSuffixedInteger, b::Integer) = (a <= b * th)
Base.:(<=)(a::Integer, b::OrdinalSuffixedInteger) = (a * th <= b)
function Broadcast.broadcasted(
::Broadcast.DefaultArrayStyle{1},
::typeof(*),
r::UnitRange,
t::OrdinalSuffixedInteger{One},
)
return (first(r) * t):(last(r) * t)
end
function Broadcast.broadcasted(
::Broadcast.DefaultArrayStyle{1},
::typeof(*),
r::Base.OneTo,
t::OrdinalSuffixedInteger{One},
)
return Base.OneTo(last(r) * t)
end
function Base.show(io::IO, ordinal::OrdinalSuffixedInteger)
n = cardinal(ordinal)
m = n % 10
if m == 1
return print(io, n, n == 11 ? "th" : "st")
elseif m == 2
return print(io, n, n == 12 ? "th" : "nd")
elseif m == 3
return print(io, n, n == 13 ? "th" : "rd")
end
return print(io, n, "th")
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1692 | @eval module $(gensym())
using NamedGraphs.OrdinalIndexing: One, 𝟏
using NamedGraphs.OrdinalIndexing: OrdinalSuffixedInteger, th
using Test: @test, @test_broken, @test_throws, @testset
@testset "OrdinalIndexing" begin
@testset "One" begin
@test One() === 𝟏
@test One() == 1
@test 𝟏 * 2 === 2
@test 2 * 𝟏 === 2
@test 2 + 𝟏 === 3
@test 𝟏 + 2 === 3
@test 2 - 𝟏 === 1
@test 𝟏 - 2 === -1
end
@testset "OrdinalSuffixedInteger" begin
@test th === OrdinalSuffixedInteger(𝟏)
@test 1th === OrdinalSuffixedInteger(1)
@test 2th === OrdinalSuffixedInteger(2)
@test_throws ArgumentError -1th
r = (2th):(4th)
@test r isa UnitRange{OrdinalSuffixedInteger{Int}}
@test r === (2:4)th
r = Base.OneTo(4th)
@test r isa Base.OneTo{OrdinalSuffixedInteger{Int}}
@test r === Base.OneTo(4)th
for r in ((1:4)th, Base.OneTo(4)th)
@test first(r) === 1th
@test step(r) === 1th
@test last(r) === 4th
@test length(r) === 4th
@test collect(r) == [1th, 2th, 3th, 4th]
end
@testset "$suffix1, $suffix2" for (suffix1, suffix2) in ((th, th), (th, 𝟏), (𝟏, th))
@test 2suffix1 + 3suffix2 === 5th
@test 4suffix1 - 2suffix2 === 2th
@test 2suffix1 * 3suffix2 === 6th
@test 2suffix1 < 3suffix2
@test !(2suffix1 < 2suffix2)
@test !(3suffix1 < 2suffix2)
@test !(2suffix1 > 3suffix2)
@test !(2suffix1 > 2suffix2)
@test 3suffix1 > 2suffix2
@test 2suffix1 <= 3suffix2
@test 2suffix1 <= 2suffix2
@test !(3suffix1 <= 2suffix2)
@test !(2suffix1 >= 3suffix2)
@test 2suffix1 >= 2suffix2
@test 3suffix1 >= 2suffix2
end
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 231 | module PartitionedGraphs
include("abstractpartitionvertex.jl")
include("abstractpartitionedge.jl")
include("abstractpartitionedgraph.jl")
include("partitionvertex.jl")
include("partitionedge.jl")
include("partitionedgraph.jl")
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 435 | using Graphs: Graphs
using ..NamedGraphs: AbstractNamedEdge
abstract type AbstractPartitionEdge{V} <: AbstractNamedEdge{V} end
Base.parent(pe::AbstractPartitionEdge) = not_implemented()
Graphs.src(pe::AbstractPartitionEdge) = not_implemented()
Graphs.dst(pe::AbstractPartitionEdge) = not_implemented()
Base.reverse(pe::AbstractPartitionEdge) = not_implemented()
#Don't have the vertices wrapped. But wrap them with source and edge.
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 6626 | using Graphs:
Graphs,
AbstractEdge,
add_vertex!,
dst,
edgetype,
has_vertex,
is_directed,
rem_vertex!,
src,
vertices
using ..NamedGraphs: NamedGraphs, AbstractNamedGraph
using ..NamedGraphs.GraphsExtensions:
GraphsExtensions, add_vertices!, not_implemented, rem_vertices!
abstract type AbstractPartitionedGraph{V,PV} <: AbstractNamedGraph{V} end
#Needed for interface
partitioned_graph(pg::AbstractPartitionedGraph) = not_implemented()
unpartitioned_graph(pg::AbstractPartitionedGraph) = not_implemented()
function unpartitioned_graph_type(pg::Type{<:AbstractPartitionedGraph})
return not_implemented()
end
partitionvertex(pg::AbstractPartitionedGraph, vertex) = not_implemented()
partitionvertices(pg::AbstractPartitionedGraph, verts) = not_implemented()
partitionvertices(pg::AbstractPartitionedGraph) = not_implemented()
Base.copy(pg::AbstractPartitionedGraph) = not_implemented()
delete_from_vertex_map!(pg::AbstractPartitionedGraph, vertex) = not_implemented()
insert_to_vertex_map!(pg::AbstractPartitionedGraph, vertex) = not_implemented()
partitionedge(pg::AbstractPartitionedGraph, edge) = not_implemented()
partitionedges(pg::AbstractPartitionedGraph, edges) = not_implemented()
partitionedges(pg::AbstractPartitionedGraph) = not_implemented()
function unpartitioned_graph_type(pg::AbstractPartitionedGraph)
return typeof(unpartitioned_graph(pg))
end
function Graphs.edges(pg::AbstractPartitionedGraph, partitionedge::AbstractPartitionEdge)
return not_implemented()
end
function Graphs.vertices(pg::AbstractPartitionedGraph, pv::AbstractPartitionVertex)
return not_implemented()
end
function Graphs.vertices(
pg::AbstractPartitionedGraph, partitionverts::Vector{V}
) where {V<:AbstractPartitionVertex}
return not_implemented()
end
function GraphsExtensions.directed_graph_type(PG::Type{<:AbstractPartitionedGraph})
return not_implemented()
end
function GraphsExtensions.undirected_graph_type(PG::Type{<:AbstractPartitionedGraph})
return not_implemented()
end
# AbstractGraph interface.
function Graphs.is_directed(graph_type::Type{<:AbstractPartitionedGraph})
return is_directed(unpartitioned_graph_type(graph_type))
end
#Functions for the abstract type
Graphs.vertices(pg::AbstractPartitionedGraph) = vertices(unpartitioned_graph(pg))
function NamedGraphs.position_graph(pg::AbstractPartitionedGraph)
return NamedGraphs.position_graph(unpartitioned_graph(pg))
end
function NamedGraphs.vertex_positions(pg::AbstractPartitionedGraph)
return NamedGraphs.vertex_positions(unpartitioned_graph(pg))
end
function NamedGraphs.ordered_vertices(pg::AbstractPartitionedGraph)
return NamedGraphs.ordered_vertices(unpartitioned_graph(pg))
end
Graphs.edgetype(pg::AbstractPartitionedGraph) = edgetype(unpartitioned_graph(pg))
function Graphs.nv(pg::AbstractPartitionedGraph, pv::AbstractPartitionVertex)
return length(vertices(pg, pv))
end
function Graphs.has_vertex(
pg::AbstractPartitionedGraph, partitionvertex::AbstractPartitionVertex
)
return has_vertex(partitioned_graph(pg), parent(partitionvertex))
end
function Graphs.has_edge(pg::AbstractPartitionedGraph, partitionedge::AbstractPartitionEdge)
return has_edge(partitioned_graph(pg), parent(partitionedge))
end
function is_boundary_edge(pg::AbstractPartitionedGraph, edge::AbstractEdge)
p_edge = partitionedge(pg, edge)
return src(p_edge) == dst(p_edge)
end
function Graphs.add_edge!(pg::AbstractPartitionedGraph, edge::AbstractEdge)
add_edge!(unpartitioned_graph(pg), edge)
pg_edge = parent(partitionedge(pg, edge))
if src(pg_edge) != dst(pg_edge)
add_edge!(partitioned_graph(pg), pg_edge)
end
return pg
end
function Graphs.rem_edge!(pg::AbstractPartitionedGraph, edge::AbstractEdge)
pg_edge = partitionedge(pg, edge)
if has_edge(partitioned_graph(pg), pg_edge)
g_edges = edges(pg, pg_edge)
if length(g_edges) == 1
rem_edge!(partitioned_graph(pg), pg_edge)
end
end
return rem_edge!(unpartitioned_graph(pg), edge)
end
function Graphs.rem_edge!(
pg::AbstractPartitionedGraph, partitionedge::AbstractPartitionEdge
)
return rem_edges!(pg, edges(pg, parent(partitionedge)))
end
#Vertex addition and removal. I think it's important not to allow addition of a vertex without specification of PV
function Graphs.add_vertex!(
pg::AbstractPartitionedGraph, vertex, partitionvertex::AbstractPartitionVertex
)
add_vertex!(unpartitioned_graph(pg), vertex)
add_vertex!(partitioned_graph(pg), parent(partitionvertex))
insert_to_vertex_map!(pg, vertex, partitionvertex)
return pg
end
function GraphsExtensions.add_vertices!(
pg::AbstractPartitionedGraph,
vertices::Vector,
partitionvertices::Vector{<:AbstractPartitionVertex},
)
@assert length(vertices) == length(partitionvertices)
for (v, pv) in zip(vertices, partitionvertices)
add_vertex!(pg, v, pv)
end
return pg
end
function GraphsExtensions.add_vertices!(
pg::AbstractPartitionedGraph, vertices::Vector, partitionvertex::AbstractPartitionVertex
)
add_vertices!(pg, vertices, fill(partitionvertex, length(vertices)))
return pg
end
function Graphs.rem_vertex!(pg::AbstractPartitionedGraph, vertex)
pv = partitionvertex(pg, vertex)
delete_from_vertex_map!(pg, pv, vertex)
rem_vertex!(unpartitioned_graph(pg), vertex)
if !haskey(partitioned_vertices(pg), parent(pv))
rem_vertex!(partitioned_graph(pg), parent(pv))
end
return pg
end
function Graphs.rem_vertex!(
pg::AbstractPartitionedGraph, partitionvertex::AbstractPartitionVertex
)
rem_vertices!(pg, vertices(pg, partitionvertex))
return pg
end
function GraphsExtensions.rem_vertex(
pg::AbstractPartitionedGraph, partitionvertex::AbstractPartitionVertex
)
pg_new = copy(pg)
rem_vertex!(pg_new, partitionvertex)
return pg_new
end
function Graphs.add_vertex!(pg::AbstractPartitionedGraph, vertex)
return error("Need to specify a partition where the new vertex will go.")
end
function Base.:(==)(pg1::AbstractPartitionedGraph, pg2::AbstractPartitionedGraph)
if unpartitioned_graph(pg1) != unpartitioned_graph(pg2) ||
partitioned_graph(pg1) != partitioned_graph(pg2)
return false
end
for v in vertices(pg1)
if partitionvertex(pg1, v) != partitionvertex(pg2, v)
return false
end
end
return true
end
function GraphsExtensions.subgraph(
pg::AbstractPartitionedGraph, partitionvertex::AbstractPartitionVertex
)
return first(induced_subgraph(unpartitioned_graph(pg), vertices(pg, [partitionvertex])))
end
function Graphs.induced_subgraph(
pg::AbstractPartitionedGraph, partitionvertex::AbstractPartitionVertex
)
return subgraph(pg, partitionvertex), nothing
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 155 | abstract type AbstractPartitionVertex{V} <: Any where {V} end
#Parent, wrap, unwrap, vertex?
Base.parent(pv::AbstractPartitionVertex) = not_implemented()
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 511 | using Graphs: Graphs, AbstractEdge, dst, src
struct PartitionEdge{V,E<:AbstractEdge{V}} <: AbstractPartitionEdge{V}
edge::E
end
Base.parent(pe::PartitionEdge) = getfield(pe, :edge)
Graphs.src(pe::PartitionEdge) = PartitionVertex(src(parent(pe)))
Graphs.dst(pe::PartitionEdge) = PartitionVertex(dst(parent(pe)))
PartitionEdge(p::Pair) = PartitionEdge(NamedEdge(first(p) => last(p)))
PartitionEdge(vsrc, vdst) = PartitionEdge(vsrc => vdst)
Base.reverse(pe::PartitionEdge) = PartitionEdge(reverse(parent(pe)))
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 6452 | using Dictionaries: Dictionary
using Graphs:
AbstractEdge, AbstractGraph, add_edge!, edges, has_edge, induced_subgraph, vertices
using .GraphsExtensions:
GraphsExtensions, boundary_edges, is_self_loop, partitioned_vertices
using ..NamedGraphs: NamedEdge, NamedGraph
# TODO: Parametrize `partitioned_vertices` and `which_partition`,
# see https://github.com/mtfishman/NamedGraphs.jl/issues/63.
struct PartitionedGraph{V,PV,G<:AbstractGraph{V},PG<:AbstractGraph{PV}} <:
AbstractPartitionedGraph{V,PV}
graph::G
partitioned_graph::PG
partitioned_vertices::Dictionary
which_partition::Dictionary
end
##Constructors.
function PartitionedGraph(g::AbstractGraph, partitioned_vertices)
pvs = keys(partitioned_vertices)
pg = NamedGraph(pvs)
# TODO: Make this type more specific.
which_partition = Dictionary()
for v in vertices(g)
v_pvs = Set(findall(pv -> v ∈ partitioned_vertices[pv], pvs))
@assert length(v_pvs) == 1
insert!(which_partition, v, first(v_pvs))
end
for e in edges(g)
pv_src, pv_dst = which_partition[src(e)], which_partition[dst(e)]
pe = NamedEdge(pv_src => pv_dst)
if pv_src != pv_dst && !has_edge(pg, pe)
add_edge!(pg, pe)
end
end
return PartitionedGraph(g, pg, Dictionary(partitioned_vertices), which_partition)
end
function PartitionedGraph(partitioned_vertices)
return PartitionedGraph(NamedGraph(keys(partitioned_vertices)), partitioned_vertices)
end
function PartitionedGraph(g::AbstractGraph; kwargs...)
partitioned_verts = partitioned_vertices(g; kwargs...)
return PartitionedGraph(g, partitioned_verts)
end
#Needed for interface
partitioned_graph(pg::PartitionedGraph) = getfield(pg, :partitioned_graph)
unpartitioned_graph(pg::PartitionedGraph) = getfield(pg, :graph)
function unpartitioned_graph_type(graph_type::Type{<:PartitionedGraph})
return fieldtype(graph_type, :graph)
end
function GraphsExtensions.partitioned_vertices(pg::PartitionedGraph)
return getfield(pg, :partitioned_vertices)
end
which_partition(pg::PartitionedGraph) = getfield(pg, :which_partition)
function Graphs.vertices(pg::PartitionedGraph, partitionvert::PartitionVertex)
return partitioned_vertices(pg)[parent(partitionvert)]
end
function Graphs.vertices(pg::PartitionedGraph, partitionverts::Vector{<:PartitionVertex})
return unique(reduce(vcat, Iterators.map(pv -> vertices(pg, pv), partitionverts)))
end
function partitionvertex(pg::PartitionedGraph, vertex)
return PartitionVertex(which_partition(pg)[vertex])
end
function partitionvertices(pg::PartitionedGraph, verts)
return unique(partitionvertex(pg, v) for v in verts)
end
function partitionvertices(pg::PartitionedGraph)
return PartitionVertex.(vertices(partitioned_graph(pg)))
end
function partitionedge(pg::PartitionedGraph, edge::AbstractEdge)
return PartitionEdge(
parent(partitionvertex(pg, src(edge))) => parent(partitionvertex(pg, dst(edge)))
)
end
partitionedge(pg::PartitionedGraph, p::Pair) = partitionedge(pg, edgetype(pg)(p))
function partitionedges(pg::PartitionedGraph, edges::Vector)
return filter(!is_self_loop, unique([partitionedge(pg, e) for e in edges]))
end
function partitionedges(pg::PartitionedGraph)
return PartitionEdge.(edges(partitioned_graph(pg)))
end
function Graphs.edges(pg::PartitionedGraph, partitionedge::PartitionEdge)
psrc_vs = vertices(pg, PartitionVertex(src(partitionedge)))
pdst_vs = vertices(pg, PartitionVertex(dst(partitionedge)))
psrc_subgraph = subgraph(unpartitioned_graph(pg), psrc_vs)
pdst_subgraph = subgraph(pg, pdst_vs)
full_subgraph = subgraph(pg, vcat(psrc_vs, pdst_vs))
return setdiff(edges(full_subgraph), vcat(edges(psrc_subgraph), edges(pdst_subgraph)))
end
function Graphs.edges(pg::PartitionedGraph, partitionedges::Vector{<:PartitionEdge})
return unique(reduce(vcat, [edges(pg, pe) for pe in partitionedges]))
end
function boundary_partitionedges(pg::PartitionedGraph, partitionvertices; kwargs...)
return PartitionEdge.(
boundary_edges(partitioned_graph(pg), parent.(partitionvertices); kwargs...)
)
end
function boundary_partitionedges(
pg::PartitionedGraph, partitionvertex::PartitionVertex; kwargs...
)
return boundary_partitionedges(pg, [partitionvertex]; kwargs...)
end
function Base.copy(pg::PartitionedGraph)
return PartitionedGraph(
copy(unpartitioned_graph(pg)),
copy(partitioned_graph(pg)),
copy(partitioned_vertices(pg)),
copy(which_partition(pg)),
)
end
function insert_to_vertex_map!(
pg::PartitionedGraph, vertex, partitionvertex::PartitionVertex
)
pv = parent(partitionvertex)
if pv ∉ keys(partitioned_vertices(pg))
insert!(partitioned_vertices(pg), pv, [vertex])
else
partitioned_vertices(pg)[pv] = unique(vcat(vertices(pg, partitionvertex), [vertex]))
end
insert!(which_partition(pg), vertex, pv)
return pg
end
function delete_from_vertex_map!(pg::PartitionedGraph, vertex)
pv = partitionvertex(pg, vertex)
return delete_from_vertex_map!(pg, pv, vertex)
end
function delete_from_vertex_map!(
pg::PartitionedGraph, partitioned_vertex::PartitionVertex, vertex
)
vs = vertices(pg, partitioned_vertex)
delete!(partitioned_vertices(pg), parent(partitioned_vertex))
if length(vs) != 1
insert!(partitioned_vertices(pg), parent(partitioned_vertex), setdiff(vs, [vertex]))
end
delete!(which_partition(pg), vertex)
return partitioned_vertex
end
### PartitionedGraph Specific Functions
function partitionedgraph_induced_subgraph(pg::PartitionedGraph, vertices::Vector)
sub_pg_graph, _ = induced_subgraph(unpartitioned_graph(pg), vertices)
sub_partitioned_vertices = copy(partitioned_vertices(pg))
for pv in NamedGraphs.vertices(partitioned_graph(pg))
vs = intersect(vertices, sub_partitioned_vertices[pv])
if !isempty(vs)
sub_partitioned_vertices[pv] = vs
else
delete!(sub_partitioned_vertices, pv)
end
end
return PartitionedGraph(sub_pg_graph, sub_partitioned_vertices), nothing
end
function partitionedgraph_induced_subgraph(
pg::PartitionedGraph, partitionverts::Vector{<:PartitionVertex}
)
return induced_subgraph(pg, vertices(pg, partitionverts))
end
function Graphs.induced_subgraph(pg::PartitionedGraph, vertices)
return partitionedgraph_induced_subgraph(pg, vertices)
end
# Fixes ambiguity error with `Graphs.jl`.
function Graphs.induced_subgraph(pg::PartitionedGraph, vertices::Vector{<:Integer})
return partitionedgraph_induced_subgraph(pg, vertices)
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 130 | struct PartitionVertex{V} <: AbstractPartitionVertex{V}
vertex::V
end
Base.parent(pv::PartitionVertex) = getfield(pv, :vertex)
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 107 | module SimilarType
similar_type(object) = similar_type(typeof(object))
similar_type(type::Type) = type
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 377 | @eval module $(gensym())
using Test: @testset
test_path = joinpath(@__DIR__)
test_files = filter(
file -> startswith(file, "test_") && endswith(file, ".jl"), readdir(test_path)
)
@testset "NamedGraphs.jl" begin
@testset "$(file)" for file in test_files
file_path = joinpath(test_path, file)
println("Running test $(file_path)")
include(file_path)
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 3819 | @eval module $(gensym())
using Graphs: binary_tree, dfs_tree, edgetype, grid, path_graph
using NamedGraphs.GraphGenerators: comb_tree
using NamedGraphs.GraphsExtensions:
is_leaf_vertex,
is_path_graph,
edge_path,
leaf_vertices,
post_order_dfs_vertices,
pre_order_dfs_vertices,
vertex_path
using NamedGraphs.NamedGraphGenerators:
named_binary_tree, named_comb_tree, named_grid, named_path_graph
using Test: @test, @testset
@testset "Tree graph paths" begin
# undirected trees
g1 = comb_tree((3, 2))
et1 = edgetype(g1)
@test vertex_path(g1, 4, 5) == [4, 1, 2, 5]
@test edge_path(g1, 4, 5) == [et1(4, 1), et1(1, 2), et1(2, 5)]
@test vertex_path(g1, 6, 1) == [6, 3, 2, 1]
@test edge_path(g1, 6, 1) == [et1(6, 3), et1(3, 2), et1(2, 1)]
@test vertex_path(g1, 2, 2) == [2]
@test edge_path(g1, 2, 2) == et1[]
ng1 = named_comb_tree((3, 2))
net1 = edgetype(ng1)
@test vertex_path(ng1, (1, 2), (2, 2)) == [(1, 2), (1, 1), (2, 1), (2, 2)]
@test edge_path(ng1, (1, 2), (2, 2)) ==
[net1((1, 2), (1, 1)), net1((1, 1), (2, 1)), net1((2, 1), (2, 2))]
@test vertex_path(ng1, (3, 2), (1, 1)) == [(3, 2), (3, 1), (2, 1), (1, 1)]
@test edge_path(ng1, (3, 2), (1, 1)) ==
[net1((3, 2), (3, 1)), net1((3, 1), (2, 1)), net1((2, 1), (1, 1))]
@test vertex_path(ng1, (1, 2), (1, 2)) == [(1, 2)]
@test edge_path(ng1, (1, 2), (1, 2)) == net1[]
g2 = binary_tree(3)
et2 = edgetype(g2)
@test vertex_path(g2, 2, 6) == [2, 1, 3, 6]
@test edge_path(g2, 2, 6) == [et2(2, 1), et2(1, 3), et2(3, 6)]
@test vertex_path(g2, 5, 4) == [5, 2, 4]
@test edge_path(g2, 5, 4) == [et2(5, 2), et2(2, 4)]
ng2 = named_binary_tree(3)
net2 = edgetype(ng2)
@test vertex_path(ng2, (1, 1), (1, 2, 1)) == [(1, 1), (1,), (1, 2), (1, 2, 1)]
@test edge_path(ng2, (1, 1), (1, 2, 1)) ==
[net2((1, 1), (1,)), net2((1,), (1, 2)), net2((1, 2), (1, 2, 1))]
@test vertex_path(ng2, (1, 1, 2), (1, 1, 1)) == [(1, 1, 2), (1, 1), (1, 1, 1)]
@test edge_path(ng2, (1, 1, 2), (1, 1, 1)) ==
[net2((1, 1, 2), (1, 1)), net2((1, 1), (1, 1, 1))]
# Test DFS traversals
@test post_order_dfs_vertices(ng2, (1,)) ==
[(1, 1, 1), (1, 1, 2), (1, 1), (1, 2, 1), (1, 2, 2), (1, 2), (1,)]
@test pre_order_dfs_vertices(ng2, (1,)) ==
[(1,), (1, 1), (1, 1, 1), (1, 1, 2), (1, 2), (1, 2, 1), (1, 2, 2)]
# directed trees
dg1 = dfs_tree(g1, 5)
# same behavior if path exists
@test vertex_path(dg1, 4, 5) == [4, 1, 2, 5]
@test edge_path(dg1, 4, 5) == [et1(4, 1), et1(1, 2), et1(2, 5)]
# returns nothing if path does not exists
@test isnothing(vertex_path(dg1, 4, 6))
@test isnothing(edge_path(dg1, 4, 6))
dng1 = dfs_tree(ng1, (2, 2))
@test vertex_path(dng1, (1, 2), (2, 2)) == [(1, 2), (1, 1), (2, 1), (2, 2)]
@test edge_path(dng1, (1, 2), (2, 2)) ==
[net1((1, 2), (1, 1)), net1((1, 1), (2, 1)), net1((2, 1), (2, 2))]
@test isnothing(vertex_path(dng1, (1, 2), (3, 2)))
@test isnothing(edge_path(dng1, (1, 2), (3, 2)))
@test is_path_graph(path_graph(4))
@test is_path_graph(named_path_graph(4))
@test is_path_graph(grid((3,)))
@test is_path_graph(named_grid((3,)))
@test !is_path_graph(grid((3, 3)))
@test !is_path_graph(named_grid((3, 3)))
end
@testset "Tree graph leaf vertices" begin
# undirected trees
g = comb_tree((3, 2))
@test is_leaf_vertex(g, 4)
@test !is_leaf_vertex(g, 1)
@test issetequal(leaf_vertices(g), [4, 5, 6])
ng = named_comb_tree((3, 2))
@test is_leaf_vertex(ng, (1, 2))
@test is_leaf_vertex(ng, (2, 2))
@test !is_leaf_vertex(ng, (1, 1))
@test issetequal(leaf_vertices(ng), [(1, 2), (2, 2), (3, 2)])
# directed trees
dng = dfs_tree(ng, (2, 2))
@test is_leaf_vertex(dng, (1, 2))
@test !is_leaf_vertex(dng, (2, 2))
@test !is_leaf_vertex(dng, (1, 1))
@test issetequal(leaf_vertices(dng), [(1, 2), (3, 2)])
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 5958 | @eval module $(gensym())
using Dictionaries: Dictionary
using Graphs:
DiGraph, Graph, a_star, add_edge!, edges, grid, has_edge, has_vertex, rem_edge!, vertices
using NamedGraphs: NamedGraphs, NamedDiGraph, NamedGraph
using NamedGraphs.GraphsExtensions: rename_vertices
using NamedGraphs.NamedGraphGenerators: named_grid
using Test: @test, @testset
@testset "AbstractNamedGraph equality" begin
# NamedGraph
g = grid((2, 2))
vs = ["A", "B", "C", "D"]
ng1 = NamedGraph(g, vs)
# construct same NamedGraph with different underlying structure
ng2 = NamedGraph(Graph(4), vs[[1, 4, 3, 2]])
add_edge!(ng2, "A" => "B")
add_edge!(ng2, "A" => "C")
add_edge!(ng2, "B" => "D")
add_edge!(ng2, "C" => "D")
@test NamedGraphs.position_graph(ng1) != NamedGraphs.position_graph(ng2)
@test ng1 == ng2
rem_edge!(ng2, "B" => "A")
@test ng1 != ng2
# NamedGraph
dvs = [("X", 1), ("X", 2), ("Y", 1), ("Y", 2)]
ndg1 = NamedGraph(g, dvs)
# construct same NamedGraph from different underlying structure
ndg2 = NamedGraph(Graph(4), dvs[[1, 4, 3, 2]])
add_edge!(ndg2, ("X", 1) => ("X", 2))
add_edge!(ndg2, ("X", 1) => ("Y", 1))
add_edge!(ndg2, ("X", 2) => ("Y", 2))
add_edge!(ndg2, ("Y", 1) => ("Y", 2))
@test NamedGraphs.position_graph(ndg1) != NamedGraphs.position_graph(ndg2)
@test ndg1 == ndg2
rem_edge!(ndg2, ("Y", 1) => ("X", 1))
@test ndg1 != ndg2
# NamedDiGraph
nddg1 = NamedDiGraph(DiGraph(collect(edges(g))), dvs)
# construct same NamedDiGraph from different underlying structure
nddg2 = NamedDiGraph(DiGraph(4), dvs[[1, 4, 3, 2]])
add_edge!(nddg2, ("X", 1) => ("X", 2))
add_edge!(nddg2, ("X", 1) => ("Y", 1))
add_edge!(nddg2, ("X", 2) => ("Y", 2))
add_edge!(nddg2, ("Y", 1) => ("Y", 2))
@test NamedGraphs.position_graph(nddg1) != NamedGraphs.position_graph(nddg2)
@test nddg1 == nddg2
rem_edge!(nddg2, ("X", 1) => ("Y", 1))
add_edge!(nddg2, ("Y", 1) => ("X", 1))
@test nddg1 != nddg2
end
@testset "AbstractNamedGraph vertex renaming" begin
g = grid((2, 2))
integer_names = collect(1:4)
string_names = ["A", "B", "C", "D"]
tuple_names = [("X", 1), ("X", 2), ("Y", 1), ("Y", 2)]
function_name = x -> reverse(x)
# NamedGraph
ng = NamedGraph(g, string_names)
# rename to integers
vmap_int = Dictionary(vertices(ng), integer_names)
ng_int = rename_vertices(v -> vmap_int[v], ng)
@test isa(ng_int, NamedGraph{Int})
@test has_vertex(ng_int, 3)
@test has_edge(ng_int, 1 => 2)
@test has_edge(ng_int, 2 => 4)
# rename to tuples
vmap_tuple = Dictionary(vertices(ng), tuple_names)
ng_tuple = rename_vertices(v -> vmap_tuple[v], ng)
@test isa(ng_tuple, NamedGraph{Tuple{String,Int}})
@test has_vertex(ng_tuple, ("X", 1))
@test has_edge(ng_tuple, ("X", 1) => ("X", 2))
@test has_edge(ng_tuple, ("X", 2) => ("Y", 2))
# rename with name map function
ng_function = rename_vertices(function_name, ng_tuple)
@test isa(ng_function, NamedGraph{Tuple{Int,String}})
@test has_vertex(ng_function, (1, "X"))
@test has_edge(ng_function, (1, "X") => (2, "X"))
@test has_edge(ng_function, (2, "X") => (2, "Y"))
# NamedGraph
ndg = named_grid((2, 2))
# rename to integers
vmap_int = Dictionary(vertices(ndg), integer_names)
ndg_int = rename_vertices(v -> vmap_int[v], ndg)
@test isa(ndg_int, NamedGraph{Int})
@test has_vertex(ndg_int, 1)
@test has_edge(ndg_int, 1 => 2)
@test has_edge(ndg_int, 2 => 4)
@test length(a_star(ndg_int, 1, 4)) == 2
# rename to strings
vmap_string = Dictionary(vertices(ndg), string_names)
ndg_string = rename_vertices(v -> vmap_string[v], ndg)
@test isa(ndg_string, NamedGraph{String})
@test has_vertex(ndg_string, "A")
@test has_edge(ndg_string, "A" => "B")
@test has_edge(ndg_string, "B" => "D")
@test length(a_star(ndg_string, "A", "D")) == 2
# rename to strings
vmap_tuple = Dictionary(vertices(ndg), tuple_names)
ndg_tuple = rename_vertices(v -> vmap_tuple[v], ndg)
@test isa(ndg_tuple, NamedGraph{Tuple{String,Int}})
@test has_vertex(ndg_tuple, ("X", 1))
@test has_edge(ndg_tuple, ("X", 1) => ("X", 2))
@test has_edge(ndg_tuple, ("X", 2) => ("Y", 2))
@test length(a_star(ndg_tuple, ("X", 1), ("Y", 2))) == 2
# rename with name map function
ndg_function = rename_vertices(function_name, ndg_tuple)
@test isa(ndg_function, NamedGraph{Tuple{Int,String}})
@test has_vertex(ndg_function, (1, "X"))
@test has_edge(ndg_function, (1, "X") => (2, "X"))
@test has_edge(ndg_function, (2, "X") => (2, "Y"))
@test length(a_star(ndg_function, (1, "X"), (2, "Y"))) == 2
# NamedDiGraph
nddg = NamedDiGraph(DiGraph(collect(edges(g))), vertices(ndg))
# rename to integers
vmap_int = Dictionary(vertices(nddg), integer_names)
nddg_int = rename_vertices(v -> vmap_int[v], nddg)
@test isa(nddg_int, NamedDiGraph{Int})
@test has_vertex(nddg_int, 1)
@test has_edge(nddg_int, 1 => 2)
@test has_edge(nddg_int, 2 => 4)
# rename to strings
vmap_string = Dictionary(vertices(nddg), string_names)
nddg_string = rename_vertices(v -> vmap_string[v], nddg)
@test isa(nddg_string, NamedDiGraph{String})
@test has_vertex(nddg_string, "A")
@test has_edge(nddg_string, "A" => "B")
@test has_edge(nddg_string, "B" => "D")
@test !has_edge(nddg_string, "D" => "B")
# rename to strings
vmap_tuple = Dictionary(vertices(nddg), tuple_names)
nddg_tuple = rename_vertices(v -> vmap_tuple[v], nddg)
@test isa(nddg_tuple, NamedDiGraph{Tuple{String,Int}})
@test has_vertex(nddg_tuple, ("X", 1))
@test has_edge(nddg_tuple, ("X", 1) => ("X", 2))
@test !has_edge(nddg_tuple, ("Y", 2) => ("X", 2))
# rename with name map function
nddg_function = rename_vertices(function_name, nddg_tuple)
@test isa(nddg_function, NamedDiGraph{Tuple{Int,String}})
@test has_vertex(nddg_function, (1, "X"))
@test has_edge(nddg_function, (1, "X") => (2, "X"))
@test has_edge(nddg_function, (2, "X") => (2, "Y"))
@test !has_edge(nddg_function, (2, "Y") => (2, "X"))
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 609 | @eval module $(gensym())
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: add_edges!, rem_edges!
using NamedGraphs.NamedGraphGenerators: named_grid
using Graphs: has_edge, is_connected
using Test: @test, @testset
@testset "Adding and Removing Edge Lists" begin
g = named_grid((2, 2))
rem_edges!(g, [(1, 1) => (1, 2), (1, 1) => (2, 1)])
@test !has_edge(g, (1, 1) => (1, 2))
@test !has_edge(g, (1, 1) => (2, 1))
@test has_edge(g, (1, 2) => (2, 2))
n = 10
g = NamedGraph([(i,) for i in 1:n])
add_edges!(g, [(i,) => (i + 1,) for i in 1:(n - 1)])
@test is_connected(g)
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 371 | @eval module $(gensym())
using NamedGraphs: NamedGraphs
using Suppressor: @suppress
using Test: @test, @testset
filenames = filter(endswith(".jl"), readdir(joinpath(pkgdir(NamedGraphs), "examples")))
@testset "Run examples: $filename" for filename in filenames
@test Returns(true)(
@suppress include(joinpath(pkgdir(NamedGraphs), "examples", filename))
)
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1990 | @eval module $(gensym())
using Graphs: a_star, edges, vertices
using NamedGraphs.GraphsExtensions: decorate_graph_edges, decorate_graph_vertices
using NamedGraphs.NamedGraphGenerators: named_grid, named_hexagonal_lattice_graph
using Test: @test, @testset
@testset "Decorated Graphs" begin
L = 4
g_2d = named_grid((L, L))
#Lieb lattice (loops are size 8)
g_2d_Lieb = decorate_graph_edges(g_2d)
#Heavier Lieb lattice (loops are size 16)
g_2d_Lieb_heavy = decorate_graph_edges(g_2d; edge_map=e -> named_grid((3,)))
#Another way to make the above graph
g_2d_Lieb_heavy_alt = decorate_graph_edges(g_2d_Lieb)
#Test they are the same (FUTURE: better way to test if two graphs are same with different vertex names - their adjacency matrices should be related by a permutation matrix)
@test length(vertices(g_2d_Lieb_heavy)) == length(vertices(g_2d_Lieb_heavy_alt))
@test length(edges(g_2d_Lieb_heavy)) == length(edges(g_2d_Lieb_heavy_alt))
#Test right number of edges
@test length(edges(g_2d_Lieb)) == 2 * length(edges(g_2d))
@test length(edges(g_2d_Lieb_heavy)) == 4 * length(edges(g_2d))
#Test new distances
@test length(a_star(g_2d, (1, 1), (2, 2))) == 2
@test length(a_star(g_2d_Lieb, (1, 1), (2, 2))) == 4
@test length(a_star(g_2d_Lieb_heavy, (1, 1), (2, 2))) == 8
#Create Hexagon (loops are size 6)
g_hexagon = named_hexagonal_lattice_graph(3, 6)
#Create Heavy Hexagon (loops are size 12)
g_heavy_hexagon = decorate_graph_edges(g_hexagon)
#Test heavy hexagon properties
@test length(vertices(g_heavy_hexagon)) == 125
@test length(a_star(g_hexagon, (1, 1), (2, 3))) == 3
@test length(a_star(g_heavy_hexagon, (1, 1), (2, 3))) == 6
#Create a comb
g_1d = named_grid((L, 1))
g_comb = decorate_graph_vertices(g_1d; vertex_map=v -> named_grid((5,)))
@test length(vertices(g_comb)) == 5 * length(vertices(g_1d))
@test length(a_star(g_1d, (1, 1), (L, 1))) ==
length(a_star(g_comb, ((1,), (1, 1)), ((1,), (L, 1))))
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 623 | @eval module $(gensym())
using Dictionaries: Dictionary
using NamedGraphs.Keys: Key
using Test: @test, @test_throws, @testset
@testset "Tree Base extensions" begin
@testset "Test Key indexing" begin
@test Key(1, 2) == Key((1, 2))
A = randn(2, 2)
@test A[1, 2] == A[Key(CartesianIndex(1, 2))]
@test A[2] == A[Key(2)]
@test_throws ErrorException A[Key(1, 2)]
A = randn(4)
@test A[2] == A[Key(2)]
@test A[2] == A[Key(CartesianIndex(2))]
A = Dict("X" => 2, "Y" => 3)
@test A["X"] == A[Key("X")]
A = Dictionary(["X", "Y"], [1, 2])
@test A["X"] == A[Key("X")]
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 440 | @eval module $(gensym())
using NamedGraphs: NamedGraphs
using Test: @testset
libs = [
#:GraphGenerators,
:GraphsExtensions,
#:Keys,
#:NamedGraphGenerators,
:OrderedDictionaries,
:OrdinalIndexing,
#:PartitionedGraphs,
#:SimilarType,
]
@testset "Test lib $lib" for lib in libs
path = joinpath(pkgdir(NamedGraphs), "src", "lib", String(lib), "test", "runtests.jl")
println("Runnint lib test $path")
include(path)
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 3249 | @eval module $(gensym())
using Graphs: add_edge!, add_vertex!, grid, has_edge, has_vertex, ne, nv
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: ⊔, disjoint_union, subgraph
using Test: @test, @testset
@testset "NamedGraph" begin
position_graph = grid((2, 2))
vertices = [("X", 1), ("X", 2), ("Y", 1), ("Y", 2)]
g = NamedGraph(position_graph, vertices)
@test has_vertex(g, ("X", 1))
@test has_edge(g, ("X", 1) => ("X", 2))
@test !has_edge(g, ("X", 2) => ("Y", 1))
@test has_edge(g, ("X", 2) => ("Y", 2))
io = IOBuffer()
show(io, "text/plain", g)
@test String(take!(io)) isa String
g_sub = subgraph(g, [("X", 1)])
@test has_vertex(g_sub, ("X", 1))
@test !has_vertex(g_sub, ("X", 2))
@test !has_vertex(g_sub, ("Y", 1))
@test !has_vertex(g_sub, ("Y", 2))
g_sub = subgraph(g, [("X", 1), ("X", 2)])
@test has_vertex(g_sub, ("X", 1))
@test has_vertex(g_sub, ("X", 2))
@test !has_vertex(g_sub, ("Y", 1))
@test !has_vertex(g_sub, ("Y", 2))
# g_sub = g["X", :]
g_sub = subgraph(v -> v[1] == "X", g)
@test has_vertex(g_sub, ("X", 1))
@test has_vertex(g_sub, ("X", 2))
@test !has_vertex(g_sub, ("Y", 1))
@test !has_vertex(g_sub, ("Y", 2))
@test has_edge(g_sub, ("X", 1) => ("X", 2))
# g_sub = g[:, 2]
g_sub = subgraph(v -> v[2] == 2, g)
@test has_vertex(g_sub, ("X", 2))
@test has_vertex(g_sub, ("Y", 2))
@test !has_vertex(g_sub, ("X", 1))
@test !has_vertex(g_sub, ("Y", 1))
@test has_edge(g_sub, ("X", 2) => ("Y", 2))
g1 = NamedGraph(grid((2, 2)), Tuple.(CartesianIndices((2, 2))))
@test nv(g1) == 4
@test ne(g1) == 4
@test has_vertex(g1, (1, 1))
@test has_vertex(g1, (2, 1))
@test has_vertex(g1, (1, 2))
@test has_vertex(g1, (2, 2))
@test has_edge(g1, (1, 1) => (1, 2))
@test has_edge(g1, (1, 1) => (2, 1))
@test has_edge(g1, (1, 2) => (2, 2))
@test has_edge(g1, (2, 1) => (2, 2))
@test !has_edge(g1, (1, 1) => (2, 2))
g2 = NamedGraph(grid((2, 2)), Tuple.(CartesianIndices((2, 2))))
g = ("X" => g1) ⊔ ("Y" => g2)
@test nv(g) == 8
@test ne(g) == 8
@test has_vertex(g, ((1, 1), "X"))
@test has_vertex(g, ((1, 1), "Y"))
g3 = NamedGraph(grid((2, 2)), Tuple.(CartesianIndices((2, 2))))
g = disjoint_union("X" => g1, "Y" => g2, "Z" => g3)
@test nv(g) == 12
@test ne(g) == 12
@test has_vertex(g, ((1, 1), "X"))
@test has_vertex(g, ((1, 1), "Y"))
@test has_vertex(g, ((1, 1), "Z"))
# TODO: Need to drop the dimensions to make these equal
#@test issetequal(Graphs.vertices(g1), Graphs.vertices(g["X", :]))
#@test issetequal(edges(g1), edges(g["X", :]))
#@test issetequal(Graphs.vertices(g1), Graphs.vertices(g["Y", :]))
#@test issetequal(edges(g1), edges(g["Y", :]))
end
@testset "NamedGraph add vertices" begin
position_graph = grid((2, 2))
vertices = [("X", 1), ("X", 2), ("Y", 1), ("Y", 2)]
g = NamedGraph()
add_vertex!(g, ("X", 1))
add_vertex!(g, ("X", 2))
add_vertex!(g, ("Y", 1))
add_vertex!(g, ("Y", 2))
@test nv(g) == 4
@test ne(g) == 0
@test has_vertex(g, ("X", 1))
@test has_vertex(g, ("X", 2))
@test has_vertex(g, ("Y", 1))
@test has_vertex(g, ("Y", 2))
add_edge!(g, ("X", 1) => ("Y", 2))
@test ne(g) == 1
@test has_edge(g, ("X", 1) => ("Y", 2))
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 20193 | @eval module $(gensym())
using Dictionaries: Dictionary, Indices
using Graphs:
Edge,
δ,
Δ,
a_star,
add_edge!,
add_vertex!,
adjacency_matrix,
bellman_ford_shortest_paths,
bfs_parents,
bfs_tree,
boruvka_mst,
center,
common_neighbors,
connected_components,
degree,
degree_histogram,
desopo_pape_shortest_paths,
dfs_parents,
dfs_tree,
diameter,
dijkstra_shortest_paths,
dst,
eccentricity,
edges,
edgetype,
floyd_warshall_shortest_paths,
grid,
has_edge,
has_path,
has_self_loops,
has_vertex,
indegree,
is_connected,
is_cyclic,
is_directed,
is_ordered,
johnson_shortest_paths,
kruskal_mst,
merge_vertices,
ne,
neighborhood,
neighborhood_dists,
neighbors,
nv,
outdegree,
path_digraph,
path_graph,
periphery,
prim_mst,
radius,
rem_vertex!,
spfa_shortest_paths,
src,
steiner_tree,
topological_sort_by_dfs,
vertices,
yen_k_shortest_paths
using Graphs.SimpleGraphs: SimpleDiGraph, SimpleEdge
using GraphsFlows: GraphsFlows
using NamedGraphs: AbstractNamedEdge, NamedEdge, NamedDiGraph, NamedGraph
using NamedGraphs.GraphsExtensions:
GraphsExtensions,
⊔,
boundary_edges,
boundary_vertices,
convert_vertextype,
degrees,
eccentricities,
dijkstra_mst,
dijkstra_parents,
dijkstra_tree,
has_vertices,
incident_edges,
indegrees,
inner_boundary_vertices,
mincut_partitions,
outdegrees,
outer_boundary_vertices,
permute_vertices,
rename_vertices,
subgraph,
symrcm_perm,
symrcm_permute,
vertextype
using NamedGraphs.NamedGraphGenerators: named_binary_tree, named_grid, named_path_graph
using SymRCM: SymRCM
using Test: @test, @test_broken, @testset
@testset "NamedEdge" begin
@test NamedEdge(SimpleEdge(1, 2)) == NamedEdge(1, 2)
@test AbstractNamedEdge(SimpleEdge(1, 2)) == NamedEdge(1, 2)
@test is_ordered(NamedEdge("A", "B"))
@test !is_ordered(NamedEdge("B", "A"))
@test rename_vertices(NamedEdge("A", "B"), Dict(["A" => "C", "B" => "D"])) ==
NamedEdge("C", "D")
@test rename_vertices(SimpleEdge(1, 2), Dict([1 => "C", 2 => "D"])) == NamedEdge("C", "D")
@test rename_vertices(v -> Dict(["A" => "C", "B" => "D"])[v], NamedEdge("A", "B")) ==
NamedEdge("C", "D")
@test rename_vertices(v -> Dict([1 => "C", 2 => "D"])[v], SimpleEdge(1, 2)) ==
NamedEdge("C", "D")
end
@testset "NamedGraph" begin
@testset "Basics" begin
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
@test nv(g) == 4
@test ne(g) == 3
@test sum(g) == 3
@test has_vertex(g, "A")
@test has_vertex(g, "B")
@test has_vertex(g, "C")
@test has_vertex(g, "D")
@test has_edge(g, "A" => "B")
@test issetequal(common_neighbors(g, "A", "C"), ["B"])
@test isempty(common_neighbors(g, "A", "D"))
@test degree(g, "A") == 1
@test degree(g, "B") == 2
g = NamedGraph(grid((4,)), [2, 4, 6, 8])
g_t = convert_vertextype(UInt16, g)
@test g == g_t
@test nv(g_t) == 4
@test ne(g_t) == 3
@test vertextype(g_t) === UInt16
@test issetequal(vertices(g_t), UInt16[2, 4, 6, 8])
@test eltype(vertices(g_t)) === UInt16
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
zg = zero(g)
@test zg isa NamedGraph{String}
@test nv(zg) == 0
@test ne(zg) == 0
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
add_vertex!(g, "E")
@test has_vertex(g, "E")
@test nv(g) == 5
@test has_vertices(g, ["A", "B", "C", "D", "E"])
g = NamedGraph(grid((5,)), ["A", "B", "C", "D", "E"])
rem_vertex!(g, "E")
@test !has_vertex(g, "E")
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
add_vertex!(g, "E")
rem_vertex!(g, "E")
@test !has_vertex(g, "E")
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
for gc in (NamedGraph(g), convert(NamedGraph, g))
@test gc == g
@test gc isa NamedGraph{String}
@test vertextype(gc) === vertextype(g)
@test issetequal(vertices(gc), vertices(g))
@test issetequal(edges(gc), edges(g))
end
for gc in (NamedGraph{Any}(g), convert(NamedGraph{Any}, g))
@test gc == g
@test gc isa NamedGraph{Any}
@test vertextype(gc) === Any
@test issetequal(vertices(gc), vertices(g))
@test issetequal(edges(gc), edges(g))
end
io = IOBuffer()
show(io, "text/plain", g)
@test String(take!(io)) isa String
add_edge!(g, "A" => "C")
@test has_edge(g, "A" => "C")
@test issetequal(neighbors(g, "A"), ["B", "C"])
@test issetequal(neighbors(g, "B"), ["A", "C"])
g_sub = subgraph(g, ["A", "B"])
@test has_vertex(g_sub, "A")
@test has_vertex(g_sub, "B")
@test !has_vertex(g_sub, "C")
@test !has_vertex(g_sub, "D")
# Test Graphs.jl `getindex` syntax.
@test g_sub == g[["A", "B"]]
g = NamedGraph(["A", "B", "C", "D", "E"])
add_edge!(g, "A" => "B")
add_edge!(g, "B" => "C")
add_edge!(g, "D" => "E")
@test has_path(g, "A", "B")
@test has_path(g, "A", "C")
@test has_path(g, "D", "E")
@test !has_path(g, "A", "E")
g = named_path_graph(4)
@test degree(g, 1) == 1
@test indegree(g, 1) == 1
@test outdegree(g, 1) == 1
@test degree(g, 2) == 2
@test indegree(g, 2) == 2
@test outdegree(g, 2) == 2
@test Δ(g) == 2
@test δ(g) == 1
end
@testset "neighborhood" begin
g = named_grid((4, 4))
@test issetequal(neighborhood(g, (1, 1), nv(g)), vertices(g))
@test issetequal(neighborhood(g, (1, 1), 0), [(1, 1)])
@test issetequal(neighborhood(g, (1, 1), 1), [(1, 1), (2, 1), (1, 2)])
ns = [(1, 1), (2, 1), (1, 2), (3, 1), (2, 2), (1, 3)]
@test issetequal(neighborhood(g, (1, 1), 2), ns)
ns = [(1, 1), (2, 1), (1, 2), (3, 1), (2, 2), (1, 3), (4, 1), (3, 2), (2, 3), (1, 4)]
@test issetequal(neighborhood(g, (1, 1), 3), ns)
ns = [
(1, 1),
(2, 1),
(1, 2),
(3, 1),
(2, 2),
(1, 3),
(4, 1),
(3, 2),
(2, 3),
(1, 4),
(4, 2),
(3, 3),
(2, 4),
]
@test issetequal(neighborhood(g, (1, 1), 4), ns)
ns = [
(1, 1),
(2, 1),
(1, 2),
(3, 1),
(2, 2),
(1, 3),
(4, 1),
(3, 2),
(2, 3),
(1, 4),
(4, 2),
(3, 3),
(2, 4),
(4, 3),
(3, 4),
]
@test issetequal(neighborhood(g, (1, 1), 5), ns)
@test issetequal(neighborhood(g, (1, 1), 6), vertices(g))
ns_ds = [
((1, 1), 0),
((2, 1), 1),
((1, 2), 1),
((3, 1), 2),
((2, 2), 2),
((1, 3), 2),
((4, 1), 3),
((3, 2), 3),
((2, 3), 3),
((1, 4), 3),
]
@test issetequal(neighborhood_dists(g, (1, 1), 3), ns_ds)
# Test ambiguity with Graphs.jl AbstractGraph definition
g = named_path_graph(5)
@test issetequal(neighborhood(g, 3, 1), [2, 3, 4])
@test issetequal(neighborhood_dists(g, 3, 1), [(2, 1), (3, 0), (4, 1)])
end
@testset "Basics (directed)" begin
g = NamedDiGraph(["A", "B", "C", "D"])
add_edge!(g, "A" => "B")
add_edge!(g, "B" => "C")
@test has_edge(g, "A" => "B")
@test has_edge(g, "B" => "C")
@test !has_edge(g, "B" => "A")
@test !has_edge(g, "C" => "B")
@test indegree(g, "A") == 0
@test outdegree(g, "A") == 1
@test indegree(g, "B") == 1
@test outdegree(g, "B") == 1
@test indegree(g, "C") == 1
@test outdegree(g, "C") == 0
@test indegree(g, "D") == 0
@test outdegree(g, "D") == 0
@test degrees(g) == Dictionary(vertices(g), [1, 2, 1, 0])
@test degrees(g, ["B", "C"]) == [2, 1]
@test degrees(g, Indices(["B", "C"])) == Dictionary(["B", "C"], [2, 1])
@test indegrees(g) == Dictionary(vertices(g), [0, 1, 1, 0])
@test outdegrees(g) == Dictionary(vertices(g), [1, 1, 0, 0])
h = degree_histogram(g)
@test h[0] == 1
@test h[1] == 2
@test h[2] == 1
h = degree_histogram(g, indegree)
@test h[0] == 2
@test h[1] == 2
end
@testset "BFS traversal" begin
g = named_grid((3, 3))
t = bfs_tree(g, (1, 1))
@test is_directed(t)
@test t isa NamedDiGraph{Tuple{Int,Int}}
@test ne(t) == 8
edges = [
(1, 1) => (1, 2),
(1, 2) => (1, 3),
(1, 1) => (2, 1),
(2, 1) => (2, 2),
(2, 2) => (2, 3),
(2, 1) => (3, 1),
(3, 1) => (3, 2),
(3, 2) => (3, 3),
]
for e in edges
@test has_edge(t, e)
end
p = bfs_parents(g, (1, 1))
@test length(p) == 9
vertices_g = [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
parent_vertices = [
(1, 1), (1, 1), (2, 1), (1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2)
]
d = Dictionary(vertices_g, parent_vertices)
for v in vertices(g)
@test p[v] == d[v]
end
g = named_grid(3)
t = bfs_tree(g, 2)
@test is_directed(t)
@test t isa NamedDiGraph{Int}
@test ne(t) == 2
@test has_edge(g, 2 => 1)
@test has_edge(g, 2 => 3)
end
@testset "DFS traversal" begin
g = named_grid((3, 3))
t = dfs_tree(g, (1, 1))
@test is_directed(t)
@test t isa NamedDiGraph{Tuple{Int,Int}}
@test ne(t) == 8
edges = [
(1, 1) => (2, 1),
(2, 1) => (3, 1),
(3, 1) => (3, 2),
(3, 2) => (2, 2),
(2, 2) => (1, 2),
(1, 2) => (1, 3),
(1, 3) => (2, 3),
(2, 3) => (3, 3),
]
for e in edges
@test has_edge(t, e)
end
p = dfs_parents(g, (1, 1))
@test length(p) == 9
vertices_g = [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
parent_vertices = [
(1, 1), (1, 1), (2, 1), (2, 2), (3, 2), (3, 1), (1, 2), (1, 3), (2, 3)
]
d = Dictionary(vertices_g, parent_vertices)
for v in vertices(g)
@test p[v] == d[v]
end
g = named_grid(3)
t = dfs_tree(g, 2)
@test is_directed(t)
@test t isa NamedDiGraph{Int}
@test ne(t) == 2
@test has_edge(g, 2 => 1)
@test has_edge(g, 2 => 3)
end
@testset "Shortest paths" begin
g = named_grid((10, 10))
p = a_star(g, (1, 1), (10, 10))
@test length(p) == 18
@test eltype(p) == edgetype(g)
@test eltype(p) == NamedEdge{Tuple{Int,Int}}
ps = spfa_shortest_paths(g, (1, 1))
@test ps isa Dictionary{Tuple{Int,Int},Int}
@test length(ps) == 100
@test ps[(8, 1)] == 7
es, weights = boruvka_mst(g)
@test length(es) == 99
@test weights == 99
@test es isa Vector{NamedEdge{Tuple{Int,Int}}}
es = kruskal_mst(g)
@test length(es) == 99
@test es isa Vector{NamedEdge{Tuple{Int,Int}}}
es = prim_mst(g)
@test length(es) == 99
@test es isa Vector{NamedEdge{Tuple{Int,Int}}}
for f in (
bellman_ford_shortest_paths,
desopo_pape_shortest_paths,
dijkstra_shortest_paths,
floyd_warshall_shortest_paths,
johnson_shortest_paths,
yen_k_shortest_paths,
)
@test_broken f(g, "A")
end
end
@testset "Graph connectivity" begin
g = NamedGraph(2)
@test g isa NamedGraph{Int}
add_edge!(g, 1, 2)
@test !has_self_loops(g)
add_edge!(g, 1, 1)
@test has_self_loops(g)
g1 = named_grid((2, 2))
g2 = named_grid((2, 2))
g = g1 ⊔ g2
t = named_binary_tree(3)
@test is_cyclic(g1)
@test is_cyclic(g2)
@test is_cyclic(g)
@test !is_cyclic(t)
@test is_connected(g1)
@test is_connected(g2)
@test !is_connected(g)
@test is_connected(t)
cc = connected_components(g1)
@test length(cc) == 1
@test length(only(cc)) == nv(g1)
@test issetequal(only(cc), vertices(g1))
cc = connected_components(g)
@test length(cc) == 2
@test length(cc[1]) == nv(g1)
@test length(cc[2]) == nv(g2)
@test issetequal(cc[1], map(v -> (v, 1), vertices(g1)))
@test issetequal(cc[2], map(v -> (v, 2), vertices(g2)))
end
@testset "incident_edges" begin
g = grid((3, 3))
inc_edges = Edge.([2 => 1, 2 => 3, 2 => 5])
@test issetequal(incident_edges(g, 2), inc_edges)
@test issetequal(incident_edges(g, 2; dir=:in), reverse.(inc_edges))
@test issetequal(incident_edges(g, 2; dir=:out), inc_edges)
@test issetequal(incident_edges(g, 2; dir=:both), inc_edges ∪ reverse.(inc_edges))
g = named_grid((3, 3))
inc_edges = NamedEdge.([(2, 1) => (1, 1), (2, 1) => (3, 1), (2, 1) => (2, 2)])
@test issetequal(incident_edges(g, (2, 1)), inc_edges)
@test issetequal(incident_edges(g, (2, 1); dir=:in), reverse.(inc_edges))
@test issetequal(incident_edges(g, (2, 1); dir=:out), inc_edges)
@test issetequal(incident_edges(g, (2, 1); dir=:both), inc_edges ∪ reverse.(inc_edges))
g = path_digraph(4)
@test issetequal(incident_edges(g, 3), Edge.([3 => 4]))
@test issetequal(incident_edges(g, 3; dir=:in), Edge.([2 => 3]))
@test issetequal(incident_edges(g, 3; dir=:out), Edge.([3 => 4]))
@test issetequal(incident_edges(g, 3; dir=:both), Edge.([2 => 3, 3 => 4]))
g = NamedDiGraph(path_digraph(4), ["A", "B", "C", "D"])
@test issetequal(incident_edges(g, "C"), NamedEdge.(["C" => "D"]))
@test issetequal(incident_edges(g, "C"; dir=:in), NamedEdge.(["B" => "C"]))
@test issetequal(incident_edges(g, "C"; dir=:out), NamedEdge.(["C" => "D"]))
@test issetequal(
incident_edges(g, "C"; dir=:both), NamedEdge.(["B" => "C", "C" => "D"])
)
end
@testset "merge_vertices" begin
g = named_grid((3, 3))
mg = merge_vertices(g, [(2, 2), (2, 3), (3, 3)])
@test nv(mg) == 7
@test ne(mg) == 9
merged_vertices = [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3)]
for v in merged_vertices
@test has_vertex(mg, v)
end
merged_edges = [
(1, 1) => (2, 1),
(1, 1) => (1, 2),
(2, 1) => (3, 1),
(2, 1) => (2, 2),
(3, 1) => (3, 2),
(1, 2) => (2, 2),
(1, 2) => (1, 3),
(2, 2) => (3, 2),
(2, 2) => (1, 3),
]
for e in merged_edges
@test has_edge(mg, e)
end
sg = SimpleDiGraph(4)
g = NamedDiGraph(sg, ["A", "B", "C", "D"])
add_edge!(g, "A" => "B")
add_edge!(g, "B" => "C")
add_edge!(g, "C" => "D")
mg = merge_vertices(g, ["B", "C"])
@test ne(mg) == 2
@test has_edge(mg, "A" => "B")
@test has_edge(mg, "B" => "D")
sg = SimpleDiGraph(4)
g = NamedDiGraph(sg, ["A", "B", "C", "D"])
add_edge!(g, "B" => "A")
add_edge!(g, "C" => "B")
add_edge!(g, "D" => "C")
mg = merge_vertices(g, ["B", "C"])
@test ne(mg) == 2
@test has_edge(mg, "B" => "A")
@test has_edge(mg, "D" => "B")
end
@testset "mincut" begin
g = NamedGraph(path_graph(4), ["A", "B", "C", "D"])
part1, part2, flow = GraphsFlows.mincut(g, "A", "D")
@test "A" ∈ part1
@test "D" ∈ part2
@test flow == 1
part1, part2 = mincut_partitions(g, "A", "D")
@test "A" ∈ part1
@test "D" ∈ part2
part1, part2 = mincut_partitions(g)
@test issetequal(vcat(part1, part2), vertices(g))
weights_dict = Dict{Tuple{String,String},Float64}()
weights_dict["A", "B"] = 3
weights_dict["B", "C"] = 2
weights_dict["C", "D"] = 3
weights_dictionary = Dictionary(keys(weights_dict), values(weights_dict))
for weights in (weights_dict, weights_dictionary)
part1, part2, flow = GraphsFlows.mincut(g, "A", "D", weights)
@test issetequal(part1, ["A", "B"]) || issetequal(part1, ["C", "D"])
@test issetequal(vcat(part1, part2), vertices(g))
@test flow == 2
part1, part2 = mincut_partitions(g, "A", "D", weights)
@test issetequal(part1, ["A", "B"]) || issetequal(part1, ["C", "D"])
@test issetequal(vcat(part1, part2), vertices(g))
part1, part2 = mincut_partitions(g, weights)
@test issetequal(part1, ["A", "B"]) || issetequal(part1, ["C", "D"])
@test issetequal(vcat(part1, part2), vertices(g))
end
end
@testset "dijkstra" begin
g = named_grid((3, 3))
srcs = [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
dsts = [(2, 1), (2, 2), (2, 1), (2, 2), (2, 2), (2, 2), (2, 3), (2, 2), (2, 3)]
parents = Dictionary(srcs, dsts)
d = dijkstra_shortest_paths(g, [(2, 2)])
@test d.dists == Dictionary(vertices(g), [2, 1, 2, 1, 0, 1, 2, 1, 2])
@test d.parents == parents
@test d.pathcounts ==
Dictionary(vertices(g), [2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 2.0])
# Regression test
# https://github.com/mtfishman/NamedGraphs.jl/pull/34
vertex_map = v -> v[1] > 1 ? (v, 1) : v
g̃ = rename_vertices(vertex_map, g)
d = dijkstra_shortest_paths(g̃, [((2, 2), 1)])
@test d.dists == Dictionary(vertices(g̃), [2, 1, 2, 1, 0, 1, 2, 1, 2])
@test d.parents == Dictionary(map(vertex_map, srcs), map(vertex_map, dsts))
@test d.pathcounts ==
Dictionary(vertices(g̃), [2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 2.0])
t = dijkstra_tree(g, (2, 2))
@test nv(t) == 9
@test ne(t) == 8
@test issetequal(vertices(t), vertices(g))
for v in vertices(g)
if parents[v] ≠ v
@test has_edge(t, parents[v] => v)
end
end
p = dijkstra_parents(g, (2, 2))
@test p == parents
mst = dijkstra_mst(g, (2, 2))
@test length(mst) == 8
for e in mst
@test parents[src(e)] == dst(e)
end
g = named_grid(4)
srcs = [1, 2, 3, 4]
dsts = [2, 2, 2, 3]
parents = Dictionary(srcs, dsts)
d = dijkstra_shortest_paths(g, [2])
@test d.dists == Dictionary(vertices(g), [1, 0, 1, 2])
@test d.parents == parents
@test d.pathcounts == Dictionary(vertices(g), [1.0, 1.0, 1.0, 1.0])
end
@testset "distances" begin
g = named_grid((3, 3))
@test eccentricity(g, (1, 1)) == 4
@test eccentricities(g, [(1, 2), (2, 2)]) == [3, 2]
@test eccentricities(g, Indices([(1, 2), (2, 2)])) ==
Dictionary([(1, 2), (2, 2)], [3, 2])
@test eccentricities(g) == Dictionary(vertices(g), [4, 3, 4, 3, 2, 3, 4, 3, 4])
@test issetequal(center(g), [(2, 2)])
@test radius(g) == 2
@test diameter(g) == 4
@test issetequal(periphery(g), [(1, 1), (3, 1), (1, 3), (3, 3)])
end
@testset "Bandwidth minimization" begin
g₀ = NamedGraph(path_graph(5), ["A", "B", "C", "D", "E"])
p = [3, 1, 5, 4, 2]
g = permute_vertices(g₀, p)
@test g == g₀
gp = symrcm_permute(g)
@test g == gp
pp = symrcm_perm(g)
@test pp == reverse(invperm(p))
gp′ = permute_vertices(g, pp)
@test g == gp′
A = adjacency_matrix(gp)
for i in 1:nv(g)
for j in 1:nv(g)
if abs(i - j) == 1
@test A[i, j] == A[j, i] == 1
else
@test A[i, j] == 0
end
end
end
end
@testset "boundary" begin
g = named_grid((5, 5))
subgraph_vertices = [
(2, 2), (2, 3), (2, 4), (3, 2), (3, 3), (3, 4), (4, 2), (4, 3), (4, 4)
]
inner_vertices = setdiff(subgraph_vertices, [(3, 3)])
outer_vertices = setdiff(vertices(g), subgraph_vertices, periphery(g))
@test issetequal(boundary_vertices(g, subgraph_vertices), inner_vertices)
@test issetequal(inner_boundary_vertices(g, subgraph_vertices), inner_vertices)
@test issetequal(outer_boundary_vertices(g, subgraph_vertices), outer_vertices)
es = boundary_edges(g, subgraph_vertices)
@test length(es) == 12
@test eltype(es) <: NamedEdge
for v1 in inner_vertices
for v2 in outer_vertices
if has_edge(g, v1 => v2)
@test edgetype(g)(v1, v2) ∈ es
end
end
end
end
@testset "steiner_tree" begin
g = named_grid((3, 5))
terminal_vertices = [(1, 2), (1, 4), (3, 4)]
st = steiner_tree(g, terminal_vertices)
es = [(1, 2) => (1, 3), (1, 3) => (1, 4), (1, 4) => (2, 4), (2, 4) => (3, 4)]
@test ne(st) == 4
@test nv(st) == 5
@test !any(v -> iszero(degree(st, v)), vertices(st))
for e in es
@test has_edge(st, e)
end
end
@testset "topological_sort_by_dfs" begin
g = NamedDiGraph(["A", "B", "C", "D", "E", "F", "G"])
add_edge!(g, "A" => "D")
add_edge!(g, "B" => "D")
add_edge!(g, "B" => "E")
add_edge!(g, "C" => "E")
add_edge!(g, "D" => "F")
add_edge!(g, "D" => "G")
add_edge!(g, "E" => "G")
t = topological_sort_by_dfs(g)
for e in edges(g)
@test findfirst(x -> x == src(e), t) < findfirst(x -> x == dst(e), t)
end
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1509 | @eval module $(gensym())
using Graphs: edges, neighbors, vertices
using NamedGraphs.GraphsExtensions: is_cycle_graph
using NamedGraphs.NamedGraphGenerators:
named_hexagonal_lattice_graph, named_triangular_lattice_graph
using Test: @test, @testset
@testset "Named Graph Generators" begin
g = named_hexagonal_lattice_graph(1, 1)
#Should just be 1 hexagon
@test is_cycle_graph(g)
#Check consistency with the output of hexagonal_lattice_graph(7,7) in networkx
g = named_hexagonal_lattice_graph(7, 7)
@test length(vertices(g)) == 126
@test length(edges(g)) == 174
#Check all vertices have degree 3 in the periodic case
g = named_hexagonal_lattice_graph(6, 6; periodic=true)
degree_dist = [length(neighbors(g, v)) for v in vertices(g)]
@test all(d -> d == 3, degree_dist)
g = named_triangular_lattice_graph(1, 1)
#Should just be 1 triangle
@test is_cycle_graph(g)
g = named_hexagonal_lattice_graph(2, 1)
dims = maximum(vertices(g))
@test dims[1] > dims[2]
g = named_triangular_lattice_graph(2, 1)
dims = maximum(vertices(g))
@test dims[1] > dims[2]
#Check consistency with the output of triangular_lattice_graph(7,7) in networkx
g = named_triangular_lattice_graph(7, 7)
@test length(vertices(g)) == 36
@test length(edges(g)) == 84
#Check all vertices have degree 6 in the periodic case
g = named_triangular_lattice_graph(6, 6; periodic=true)
degree_dist = [length(neighbors(g, v)) for v in vertices(g)]
@test all(d -> d == 6, degree_dist)
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 6964 | @eval module $(gensym())
using Graphs:
center,
diameter,
edges,
has_vertex,
is_connected,
is_tree,
ne,
nv,
radius,
random_regular_graph,
rem_vertex!,
vertices
using Metis: Metis
using NamedGraphs: NamedEdge, NamedGraph
using NamedGraphs.GraphsExtensions:
add_edges!,
add_vertices!,
boundary_edges,
default_root_vertex,
forest_cover,
is_path_graph,
is_self_loop,
spanning_forest,
spanning_tree,
subgraph,
vertextype
using NamedGraphs.NamedGraphGenerators:
named_comb_tree, named_grid, named_triangular_lattice_graph
using NamedGraphs.PartitionedGraphs:
PartitionEdge,
PartitionedGraph,
PartitionVertex,
boundary_partitionedges,
partitioned_graph,
partitionedge,
partitionedges,
partitionvertex,
partitionvertices,
unpartitioned_graph
using Dictionaries: Dictionary, dictionary
using Pkg: Pkg
using Test: @test, @testset
@testset "Test Partitioned Graph Constructors" begin
nx, ny = 10, 10
g = named_grid((nx, ny))
#Partition it column-wise (into a 1D chain)
partitions = [[(i, j) for j in 1:ny] for i in 1:nx]
pg = PartitionedGraph(g, partitions)
@test vertextype(partitioned_graph(pg)) == Int64
@test vertextype(unpartitioned_graph(pg)) == vertextype(g)
@test isa(partitionvertices(pg), Dictionary{Int64,PartitionVertex{Int64}})
@test isa(partitionedges(pg), Vector{PartitionEdge{Int64,NamedEdge{Int64}}})
@test is_tree(partitioned_graph(pg))
@test nv(pg) == nx * ny
@test nv(partitioned_graph(pg)) == nx
pg_c = copy(pg)
@test pg_c == pg
#Same partitioning but with a dictionary constructor
partition_dict = Dictionary([first(partition) for partition in partitions], partitions)
pg = PartitionedGraph(g, partition_dict)
@test vertextype(partitioned_graph(pg)) == vertextype(g)
@test vertextype(unpartitioned_graph(pg)) == vertextype(g)
@test isa(
partitionvertices(pg),
Dictionary{Tuple{Int64,Int64},PartitionVertex{Tuple{Int64,Int64}}},
)
@test isa(
partitionedges(pg),
Vector{PartitionEdge{Tuple{Int64,Int64},NamedEdge{Tuple{Int64,Int64}}}},
)
@test is_tree(partitioned_graph(pg))
@test nv(pg) == nx * ny
@test nv(partitioned_graph(pg)) == nx
pg_c = copy(pg)
@test pg_c == pg
#Partition the whole thing into just 1 vertex
pg = PartitionedGraph([i for i in 1:nx])
@test unpartitioned_graph(pg) == partitioned_graph(pg)
@test nv(pg) == nx
@test nv(partitioned_graph(pg)) == nx
@test ne(pg) == 0
@test ne(partitioned_graph(pg)) == 0
pg_c = copy(pg)
@test pg_c == pg
end
@testset "Test Partitioned Graph Partition Edge and Vertex Finding" begin
nx, ny, nz = 4, 4, 4
g = named_grid((nx, ny, nz))
#Partition it column-wise (into a square grid)
partitions = [[(i, j, k) for k in 1:nz] for i in 1:nx for j in 1:ny]
pg = PartitionedGraph(g, partitions)
@test Set(partitionvertices(pg)) == Set(partitionvertices(pg, vertices(g)))
@test Set(partitionedges(pg)) == Set(partitionedges(pg, edges(g)))
@test is_self_loop(partitionedge(pg, (1, 1, 1) => (1, 1, 2)))
@test !is_self_loop(partitionedge(pg, (1, 2, 1) => (1, 1, 1)))
@test partitionvertex(pg, (1, 1, 1)) == partitionvertex(pg, (1, 1, nz))
@test partitionvertex(pg, (2, 1, 1)) != partitionvertex(pg, (1, 1, nz))
@test partitionedge(pg, (1, 1, 1) => (2, 1, 1)) ==
partitionedge(pg, (1, 1, 2) => (2, 1, 2))
inter_column_edges = [(1, 1, i) => (2, 1, i) for i in 1:nz]
@test length(partitionedges(pg, inter_column_edges)) == 1
@test length(partitionvertices(pg, [(1, 2, i) for i in 1:nz])) == 1
boundary_sizes = [length(boundary_partitionedges(pg, pv)) for pv in partitionvertices(pg)]
#Partitions into a square grid so each partition should have maximum 4 incoming edges and minimum 2
@test maximum(boundary_sizes) == 4
@test minimum(boundary_sizes) == 2
@test isempty(boundary_partitionedges(pg, partitionvertices(pg)))
end
@testset "Test Partitioned Graph Vertex/Edge Addition and Removal" begin
nx, ny = 10, 10
g = named_grid((nx, ny))
partitions = [[(i, j) for j in 1:ny] for i in 1:nx]
pg = PartitionedGraph(g, partitions)
pv = PartitionVertex(5)
v_set = vertices(pg, pv)
edges_involving_v_set = boundary_edges(g, v_set)
#Strip the middle column from pg via the partitioned graph vertex, and make a new pg
rem_vertex!(pg, pv)
@test !is_connected(unpartitioned_graph(pg)) && !is_connected(partitioned_graph(pg))
@test parent(pv) ∉ vertices(partitioned_graph(pg))
@test !has_vertex(pg, pv)
@test nv(pg) == (nx - 1) * ny
@test nv(partitioned_graph(pg)) == nx - 1
@test !is_tree(partitioned_graph(pg))
#Add the column back to the in place graph
add_vertices!(pg, v_set, pv)
add_edges!(pg, edges_involving_v_set)
@test is_connected(pg.graph) && is_path_graph(partitioned_graph(pg))
@test parent(pv) ∈ vertices(partitioned_graph(pg))
@test has_vertex(pg, pv)
@test is_tree(partitioned_graph(pg))
@test nv(pg) == nx * ny
@test nv(partitioned_graph(pg)) == nx
end
@testset "Test Partitioned Graph Subgraph Functionality" begin
n, z = 12, 4
g = NamedGraph(random_regular_graph(n, z))
partitions = dictionary([
1 => [1, 2, 3], 2 => [4, 5, 6], 3 => [7, 8, 9], 4 => [10, 11, 12]
])
pg = PartitionedGraph(g, partitions)
subgraph_partitioned_vertices = [1, 2]
subgraph_vertices = reduce(
vcat, [partitions[spv] for spv in subgraph_partitioned_vertices]
)
pg_1 = subgraph(pg, PartitionVertex.(subgraph_partitioned_vertices))
pg_2 = subgraph(pg, subgraph_vertices)
@test pg_1 == pg_2
@test nv(pg_1) == length(subgraph_vertices)
@test nv(partitioned_graph(pg_1)) == length(subgraph_partitioned_vertices)
subgraph_partitioned_vertex = 3
subgraph_vertices = partitions[subgraph_partitioned_vertex]
g_1 = subgraph(pg, PartitionVertex(subgraph_partitioned_vertex))
pg_1 = subgraph(pg, subgraph_vertices)
@test unpartitioned_graph(pg_1) == subgraph(g, subgraph_vertices)
@test g_1 == subgraph(g, subgraph_vertices)
end
@testset "Test NamedGraphs Functions on Partitioned Graph" begin
functions = (is_tree, default_root_vertex, center, diameter, radius)
gs = (
named_comb_tree((4, 4)),
named_grid((2, 2, 2)),
NamedGraph(random_regular_graph(12, 3)),
named_triangular_lattice_graph(7, 7),
)
for f in functions
for g in gs
pg = PartitionedGraph(g, [vertices(g)])
@test f(pg) == f(unpartitioned_graph(pg))
@test nv(pg) == nv(g)
@test nv(partitioned_graph(pg)) == 1
@test ne(pg) == ne(g)
@test ne(partitioned_graph(pg)) == 0
end
end
end
@testset "Graph partitioning" begin
g = named_grid((4, 4))
npartitions = 4
backends = ["metis"]
if !Sys.iswindows()
# `KaHyPar` doesn't work on Windows.
Pkg.add("KaHyPar"; io=devnull)
push!(backends, "kahypar")
end
for backend in backends
pg = PartitionedGraph(g; npartitions, backend="metis")
@test pg isa PartitionedGraph
@test nv(partitioned_graph(pg)) == npartitions
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 813 | @eval module $(gensym())
using Graphs: bfs_tree, edges, is_connected, vertices
using NamedGraphs.GraphsExtensions: random_bfs_tree
using NamedGraphs.NamedGraphGenerators: named_grid
using Random: Random
using Test: @test, @testset
@testset "Random Bfs Tree" begin
g = named_grid((10, 10))
s = (5, 5)
Random.seed!(1234)
g_randtree1 = random_bfs_tree(g, s)
g_nonrandtree1 = bfs_tree(g, s)
Random.seed!(1434)
g_randtree2 = random_bfs_tree(g, s)
g_nonrandtree2 = bfs_tree(g, s)
@test length(edges(g_randtree1)) == length(vertices(g_randtree1)) - 1 &&
is_connected(g_randtree1)
@test length(edges(g_randtree2)) == length(vertices(g_randtree2)) - 1 &&
is_connected(g_randtree2)
@test edges(g_randtree1) != edges(g_randtree2)
@test edges(g_nonrandtree1) == edges(g_nonrandtree2)
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1150 | @eval module $(gensym())
using Graphs: ne, neighbors, nv, vertices
using NamedGraphs.GraphGenerators: comb_tree
using NamedGraphs.NamedGraphGenerators: named_comb_tree
using Random: Random
using Test: @test, @testset
@testset "Comb tree constructors" begin
Random.seed!(1234)
# construct from tuple dimension
dim = (rand(2:5), rand(1:5))
ct1 = comb_tree(dim)
@test nv(ct1) == prod(dim)
@test ne(ct1) == prod(dim) - 1
nct1 = named_comb_tree(dim)
for v in vertices(nct1)
for n in neighbors(nct1, v)
if v[2] == 1
@test ((abs.(v .- n) == (1, 0)) ⊻ (abs.(v .- n) == (0, 1)))
else
@test (abs.(v .- n) == (0, 1))
end
end
end
# construct from random vector of tooth lengths
tooth_lengths = rand(1:5, rand(2:5))
ct2 = comb_tree(tooth_lengths)
@test nv(ct2) == sum(tooth_lengths)
@test ne(ct2) == sum(tooth_lengths) - 1
nct2 = named_comb_tree(tooth_lengths)
for v in vertices(nct2)
for n in neighbors(nct2, v)
if v[2] == 1
@test ((abs.(v .- n) == (1, 0)) ⊻ (abs.(v .- n) == (0, 1)))
else
@test (abs.(v .- n) == (0, 1))
end
end
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1402 | @eval module $(gensym())
using Test: @test, @testset
using Graphs: connected_components, edges, is_tree, vertices
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: GraphsExtensions, all_edges, forest_cover, spanning_tree
using NamedGraphs.NamedGraphGenerators:
named_comb_tree, named_grid, named_hexagonal_lattice_graph, named_triangular_lattice_graph
gs = [
("Chain", named_grid((6, 1))),
("Cubic Lattice", named_grid((3, 3, 3))),
("Hexagonal Grid", named_hexagonal_lattice_graph(6, 6)),
("Comb Tree", named_comb_tree((4, 4))),
("Square lattice", named_grid((10, 10))),
("Triangular Grid", named_triangular_lattice_graph(5, 5; periodic=true)),
]
algs = (GraphsExtensions.BFS(), GraphsExtensions.DFS(), GraphsExtensions.RandomBFS())
@testset "Test Spanning Trees $g_string, $alg" for (g_string, g) in gs, alg in algs
s_tree = spanning_tree(g; alg)
@test is_tree(s_tree)
@test issetequal(vertices(s_tree), vertices(g))
@test issubset(all_edges(s_tree), all_edges(g))
end
@testset "Test Forest Cover $g_string" for (g_string, g) in gs
cover = forest_cover(g)
cover_edges = reduce(vcat, edges.(cover))
@test issetequal(cover_edges, edges(g))
@test all(issetequal(vertices(forest), vertices(g)) for forest in cover)
for forest in cover
trees = NamedGraph[forest[vs] for vs in connected_components(forest)]
@test all(is_tree.(trees))
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | docs | 7899 | # NamedGraphs
[](https://mtfishman.github.io/NamedGraphs.jl/stable)
[](https://mtfishman.github.io/NamedGraphs.jl/dev)
[](https://github.com/mtfishman/NamedGraphs.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/mtfishman/NamedGraphs.jl)
[](https://github.com/invenia/BlueStyle)
## Installation
You can install the package using Julia's package manager:
```julia
julia> ] add NamedGraphs
```
## Introduction
This packages introduces graph types with named vertices, which are built on top of the `Graph`/`SimpleGraph` type in the [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl) package that only have contiguous integer vertices (i.e. linear indexing). The vertex names can be strings, tuples of integers, or other unique identifiers (anything that is hashable).
There is a supertype `AbstractNamedGraph` that defines an interface and fallback implementations of standard
Graphs.jl operations, and two implementations: `NamedGraph` and `NamedDiGraph`.
## `NamedGraph`
`NamedGraph` simply takes a set of names for the vertices of the graph. For example:
```julia
julia> using Graphs: grid, has_edge, has_vertex, neighbors
julia> using NamedGraphs: NamedGraph
julia> using NamedGraphs.GraphsExtensions: ⊔, disjoint_union, subgraph, rename_vertices
julia> g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
NamedGraphs.NamedGraph{String} with 4 vertices:
4-element NamedGraphs.OrderedDictionaries.OrderedIndices{String}
"A"
"B"
"C"
"D"
and 3 edge(s):
"A" => "B"
"B" => "C"
"C" => "D"
```
Common operations are defined as you would expect:
```julia
julia> has_vertex(g, "A")
true
julia> has_edge(g, "A" => "B")
true
julia> has_edge(g, "A" => "C")
false
julia> neighbors(g, "B")
2-element Vector{String}:
"A"
"C"
julia> subgraph(g, ["A", "B"])
NamedGraphs.NamedGraph{String} with 2 vertices:
2-element NamedGraphs.OrderedDictionaries.OrderedIndices{String}
"A"
"B"
and 1 edge(s):
"A" => "B"
```
Internally, this type wraps a `SimpleGraph`, and stores a `Dictionary` from the [Dictionaries.jl](https://github.com/andyferris/Dictionaries.jl) package that maps the vertex names to the linear indices of the underlying `SimpleGraph`.
Graph operations are implemented by mapping back and forth between the generalized named vertices and the linear index vertices of the `SimpleGraph`.
It is natural to use tuples of integers as the names for the vertices of graphs with grid connectivities.
For example:
```julia
julia> dims = (2, 2)
(2, 2)
julia> g = NamedGraph(grid(dims), Tuple.(CartesianIndices(dims)))
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 4 vertices:
4-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 1)
(2, 1)
(1, 2)
(2, 2)
and 4 edge(s):
(1, 1) => (2, 1)
(1, 1) => (1, 2)
(2, 1) => (2, 2)
(1, 2) => (2, 2)
```
In the future we will provide a shorthand notation for this, such as `cartesian_graph(grid((2, 2)), (2, 2))`.
Internally the vertices are all stored as tuples with a label in each dimension.
Vertices can be referred to by their tuples:
```julia
julia> has_vertex(g, (1, 1))
true
julia> has_edge(g, (1, 1) => (2, 1))
true
julia> has_edge(g, (1, 1) => (2, 2))
false
julia> neighbors(g, (2, 2))
2-element Vector{Tuple{Int64, Int64}}:
(2, 1)
(1, 2)
```
You can use vertex names to get [induced subgraphs](https://juliagraphs.org/Graphs.jl/dev/core_functions/operators/#Graphs.induced_subgraph-Union{Tuple{T},%20Tuple{U},%20Tuple{T,%20AbstractVector{U}}}%20where%20{U%3C:Integer,%20T%3C:AbstractGraph}):
```julia
julia> subgraph(v -> v[1] == 1, g)
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 2 vertices:
2-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 1)
(1, 2)
and 1 edge(s):
(1, 1) => (1, 2)
julia> subgraph(v -> v[2] == 2, g)
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 2 vertices:
2-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 2)
(2, 2)
and 1 edge(s):
(1, 2) => (2, 2)
julia> subgraph(g, [(1, 1), (2, 2)])
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 2 vertices:
2-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 1)
(2, 2)
and 0 edge(s):
```
You can also take [disjoint unions](https://en.wikipedia.org/wiki/Disjoint_union) or concatenations of graphs:
```julia
julia> g₁ = g
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 4 vertices:
4-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 1)
(2, 1)
(1, 2)
(2, 2)
and 4 edge(s):
(1, 1) => (2, 1)
(1, 1) => (1, 2)
(2, 1) => (2, 2)
(1, 2) => (2, 2)
julia> g₂ = g
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 4 vertices:
4-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 1)
(2, 1)
(1, 2)
(2, 2)
and 4 edge(s):
(1, 1) => (2, 1)
(1, 1) => (1, 2)
(2, 1) => (2, 2)
(1, 2) => (2, 2)
julia> disjoint_union(g₁, g₂)
NamedGraphs.NamedGraph{Tuple{Tuple{Int64, Int64}, Int64}} with 8 vertices:
8-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Tuple{Int64, Int64}, Int64}}
((1, 1), 1)
((2, 1), 1)
((1, 2), 1)
((2, 2), 1)
((1, 1), 2)
((2, 1), 2)
((1, 2), 2)
((2, 2), 2)
and 8 edge(s):
((1, 1), 1) => ((2, 1), 1)
((1, 1), 1) => ((1, 2), 1)
((2, 1), 1) => ((2, 2), 1)
((1, 2), 1) => ((2, 2), 1)
((1, 1), 2) => ((2, 1), 2)
((1, 1), 2) => ((1, 2), 2)
((2, 1), 2) => ((2, 2), 2)
((1, 2), 2) => ((2, 2), 2)
julia> g₁ ⊔ g₂ # Same as above
NamedGraphs.NamedGraph{Tuple{Tuple{Int64, Int64}, Int64}} with 8 vertices:
8-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Tuple{Int64, Int64}, Int64}}
((1, 1), 1)
((2, 1), 1)
((1, 2), 1)
((2, 2), 1)
((1, 1), 2)
((2, 1), 2)
((1, 2), 2)
((2, 2), 2)
and 8 edge(s):
((1, 1), 1) => ((2, 1), 1)
((1, 1), 1) => ((1, 2), 1)
((2, 1), 1) => ((2, 2), 1)
((1, 2), 1) => ((2, 2), 1)
((1, 1), 2) => ((2, 1), 2)
((1, 1), 2) => ((1, 2), 2)
((2, 1), 2) => ((2, 2), 2)
((1, 2), 2) => ((2, 2), 2)
```
The symbol `⊔` is just an alias for `disjoint_union` and can be written in the terminal
or in your favorite [IDE with the appropriate Julia extension](https://julialang.org/) with `\sqcup<tab>`
By default, this maps the vertices `v₁ ∈ vertices(g₁)` to `(v₁, 1)` and the vertices `v₂ ∈ vertices(g₂)`
to `(v₂, 2)`, so the resulting vertices of the unioned graph will always be unique.
The resulting graph will have no edges between vertices `(v₁, 1)` and `(v₂, 2)`, these would have to
be added manually.
The original graphs can be obtained from subgraphs:
```julia
julia> rename_vertices(first, subgraph(v -> v[2] == 1, g₁ ⊔ g₂))
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 4 vertices:
4-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 1)
(2, 1)
(1, 2)
(2, 2)
and 4 edge(s):
(1, 1) => (2, 1)
(1, 1) => (1, 2)
(2, 1) => (2, 2)
(1, 2) => (2, 2)
julia> rename_vertices(first, subgraph(v -> v[2] == 2, g₁ ⊔ g₂))
NamedGraphs.NamedGraph{Tuple{Int64, Int64}} with 4 vertices:
4-element NamedGraphs.OrderedDictionaries.OrderedIndices{Tuple{Int64, Int64}}
(1, 1)
(2, 1)
(1, 2)
(2, 2)
and 4 edge(s):
(1, 1) => (2, 1)
(1, 1) => (1, 2)
(2, 1) => (2, 2)
(1, 2) => (2, 2)
```
## Generating this README
This file was generated with [Weave.jl](https://github.com/JunoLab/Weave.jl) with the following commands:
```julia
using NamedGraphs: NamedGraphs
using Weave: Weave
Weave.weave(
joinpath(pkgdir(NamedGraphs), "examples", "README.jl");
doctype="github",
out_path=pkgdir(NamedGraphs),
)
```
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | docs | 192 | ```@meta
CurrentModule = NamedGraphs
```
# NamedGraphs
Documentation for [NamedGraphs](https://github.com/mtfishman/NamedGraphs.jl).
```@index
```
```@autodocs
Modules = [NamedGraphs]
```
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 2983 | using Documenter, FrankWolfe
using SparseArrays
using LinearAlgebra
using Literate, Test
EXAMPLE_DIR = joinpath(dirname(@__DIR__), "examples")
DOCS_EXAMPLE_DIR = joinpath(@__DIR__, "src", "examples")
DOCS_REFERENCE_DIR = joinpath(@__DIR__, "src", "reference")
function file_list(dir, extension)
return filter(file -> endswith(file, extension), sort(readdir(dir)))
end
# includes plot_utils to the example file before running it
function include_utils(content)
return """
import FrankWolfe; include(joinpath(dirname(pathof(FrankWolfe)), "../examples/plot_utils.jl")) # hide
""" * content
end
function literate_directory(jl_dir, md_dir)
for filename in file_list(md_dir, ".md")
filepath = joinpath(md_dir, filename)
rm(filepath)
end
for filename in file_list(jl_dir, ".jl")
filepath = joinpath(jl_dir, filename)
# `include` the file to test it before `#src` lines are removed. It is
# in a testset to isolate local variables between files.
if startswith(filename, "docs")
Literate.markdown(
filepath, md_dir;
documenter=true, flavor=Literate.DocumenterFlavor(), preprocess=include_utils,
)
end
end
return nothing
end
literate_directory(EXAMPLE_DIR, DOCS_EXAMPLE_DIR)
cp(joinpath(EXAMPLE_DIR, "plot_utils.jl"), joinpath(DOCS_EXAMPLE_DIR, "plot_utils.jl"))
ENV["GKSwstype"] = "100"
generated_path = joinpath(@__DIR__, "src")
base_url = "https://github.com/ZIB-IOL/FrankWolfe.jl/blob/master/"
isdir(generated_path) || mkdir(generated_path)
open(joinpath(generated_path, "contributing.md"), "w") do io
# Point to source license file
println(
io,
"""
```@meta
EditURL = "$(base_url)CONTRIBUTING.md"
```
""",
)
# Write the contents out below the meta block
for line in eachline(joinpath(dirname(@__DIR__), "CONTRIBUTING.md"))
println(io, line)
end
end
open(joinpath(generated_path, "index.md"), "w") do io
# Point to source license file
println(
io,
"""
```@meta
EditURL = "$(base_url)README.md"
```
""",
)
# Write the contents out below the meta block
for line in eachline(joinpath(dirname(@__DIR__), "README.md"))
println(io, line)
end
end
makedocs(;
modules=[FrankWolfe],
sitename="FrankWolfe.jl",
format=Documenter.HTML(; prettyurls=get(ENV, "CI", nothing) == "true", collapselevel=1),
pages=[
"Home" => "index.md",
"How does it work?" => "basics.md",
"Advanced features" => "advanced.md",
"Examples" => [joinpath("examples", f) for f in file_list(DOCS_EXAMPLE_DIR, ".md")],
"API reference" =>
[joinpath("reference", f) for f in file_list(DOCS_REFERENCE_DIR, ".md")],
"Contributing" => "contributing.md",
],
)
deploydocs(; repo="github.com/ZIB-IOL/FrankWolfe.jl.git", push_preview=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 138 | import Pkg
Pkg.activate(@__DIR__)
Pkg.instantiate()
using TestEnv
Pkg.activate(dirname(@__DIR__))
Pkg.instantiate()
TestEnv.activate()
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1707 | using FrankWolfe
import LinearAlgebra
include("../examples/plot_utils.jl")
n = Int(1e5)
k = 1000
xpi = rand(n);
total = sum(xpi);
const xp = xpi ./ total;
f(x) = LinearAlgebra.norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
lmo = FrankWolfe.KSparseLMO(40, 1.0);
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
FrankWolfe.benchmark_oracles(x -> f(x), (str, x) -> grad!(str, x), () -> randn(n), lmo; k=100)
println("\n==> Short Step rule - if you know L.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_shortstep = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\n==> Adaptive if you do not know L.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_adaptive = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\n==> Agnostic if function is too expensive for adaptive.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_agnostic = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
data = [trajectory_shortstep, trajectory_adaptive, trajectory_agnostic]
label = ["short step", "adaptive", "agnostic"]
plot_trajectories(data, label, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1805 | using FrankWolfe
using Random
using LinearAlgebra
include("../examples/plot_utils.jl")
##############
# example to demonstrate additional numerical stability of first order adaptive line search
# try different sizes of n
##############
# n = Int(1e2)
# n = Int(3e2)
n = Int(5e2)
k = Int(1e3)
######
seed = 10
Random.seed!(seed)
const A = let
A = randn(n, n)
A' * A
end
@assert isposdef(A) == true
const y = Random.rand(Bool, n) * 0.6 .+ 0.3
function f(x)
d = x - y
return dot(d, A, d)
end
function grad!(storage, x)
mul!(storage, A, x)
return mul!(storage, A, y, -2, 2)
end
# lmo = FrankWolfe.KSparseLMO(40, 1.0);
lmo = FrankWolfe.UnitSimplexOracle(1.0);
# lmo = FrankWolfe.ScaledBoundLInfNormBall(zeros(n),ones(n))
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
FrankWolfe.benchmark_oracles(x -> f(x), (str, x) -> grad!(str, x), () -> randn(n), lmo; k=100)
println("\n==> Adaptive (1-order) if you do not know L.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_adaptive_fo = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\n==> Adaptive (0-order) if you do not know L.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_adaptive_zo = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.AdaptiveZerothOrder(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
data = [trajectory_adaptive_zo, trajectory_adaptive_fo]
label = ["adaptive 0-order", "adaptive 1-order"]
plot_trajectories(data, label, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 837 | using FrankWolfe
n = Int(1e1)
xpi = rand(1:100, n)
total = sum(xpi)
xp = xpi .// total
f(x) = FrankWolfe.fast_dot(x - xp, x - xp)
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
vertices = 2 * rand(100, n) .- 1
lmo_nb = FrankWolfe.ScaledBoundL1NormBall(-ones(n), ones(n))
lmo_ball = FrankWolfe.KNormBallLMO(5, 1.0)
lmo_sparse = FrankWolfe.KSparseLMO(100, 1.0)
lmo_prob = FrankWolfe.ProbabilitySimplexOracle(1.0)
lmo_pairs = [(lmo_prob, lmo_sparse), (lmo_prob, lmo_ball), (lmo_prob, lmo_nb), (lmo_ball, lmo_nb)]
for pair in lmo_pairs
@time FrankWolfe.alternating_linear_minimization(
FrankWolfe.block_coordinate_frank_wolfe,
f,
grad!,
pair,
zeros(n);
update_order=FrankWolfe.FullUpdate(),
verbose=true,
update_step=FrankWolfe.BPCGStep(),
)
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1396 | using FrankWolfe
using LinearAlgebra
using JuMP
const MOI = JuMP.MOI
import GLPK
include("../examples/plot_utils.jl")
f(x) = 0.0
function grad!(storage, x)
@. storage = zero(x)
end
dim = 10
m = JuMP.Model(GLPK.Optimizer)
@variable(m, x[1:dim, 1:dim])
@constraint(m, sum(x * ones(dim, dim)) == 2)
@constraint(m, sum(x * I(dim)) <= 2)
@constraint(m, x .>= 0)
lmos = (FrankWolfe.SpectraplexLMO(1.0, dim), FrankWolfe.MathOptLMO(m.moi_backend))
x0 = (zeros(dim, dim), Matrix(I(dim) ./ dim))
trajectories = []
for order in [FrankWolfe.FullUpdate(), FrankWolfe.CyclicUpdate(), FrankWolfe.StochasticUpdate(), FrankWolfe.DualGapOrder(), FrankWolfe.DualProgressOrder()]
_, _, _, _, _, traj_data = FrankWolfe.alternating_linear_minimization(
FrankWolfe.block_coordinate_frank_wolfe,
f,
grad!,
lmos,
x0;
update_order=order,
verbose=true,
trajectory=true,
update_step=FrankWolfe.BPCGStep(),
)
push!(trajectories, traj_data)
end
labels = ["Full", "Cyclic", "Stochastic", "DualGapOrder", "DualProgressOrder"]
println(trajectories[1][1])
fp = plot_trajectories(
trajectories,
labels,
legend_position=:best,
xscalelog=true,
reduce_size=true,
marker_shapes=[:dtriangle, :rect, :circle, :dtriangle, :rect, :circle],
extra_plot=true,
extra_plot_label="infeasibility",
)
display(fp)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 2579 |
using FrankWolfe
using LinearAlgebra
n = Int(1e2);
k = n
f(x) = dot(x, x)
function grad!(storage, x)
@. storage = 2 * x
end
# pick feasible region
lmo = FrankWolfe.ProbabilitySimplexOracle{Rational{BigInt}}(1); # radius needs to be integer or rational
# compute some initial vertex
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
# benchmarking Oracles
FrankWolfe.benchmark_oracles(f, grad!, () -> rand(n), lmo; k=100)
# the algorithm runs in rational arithmetic even if the gradients and the function itself are not rational
# this is because we replace the descent direction by the directions of the LMO are rational
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
verbose=true,
memory_mode=FrankWolfe.OutplaceEmphasis(),
);
println("\nOutput type of solution: ", eltype(x))
# you can even run everything in rational arithmetic using the shortstep rule
# NOTE: in this case the gradient computation has to be rational as well
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2 // 1),
print_iter=k / 10,
verbose=true,
memory_mode=FrankWolfe.OutplaceEmphasis(),
);
println("\nOutput type of solution: ", eltype(x))
println("\nNote: the last step where we exactly close the gap. This is not an error. ")
fract = 1 // n
println(
"We have *exactly* computed the optimal solution with the $fract * (1, ..., 1) vector.\n",
)
println("x = $x")
####################################################################################################################
### APPRROXIMATE CARATHEODORY WITH PLANTED SOLUTION
####################################################################################################################
rhs = 1
n = 40
k = 1e5
xpi = rand(big(1):big(100), n)
total = sum(xpi)
xp = xpi .// total
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
lmo = FrankWolfe.ProbabilitySimplexOracle{Rational{BigInt}}(rhs)
direction = rand(n)
x0 = FrankWolfe.compute_extreme_point(lmo, direction)
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
)
println("\nOutput type of solution: ", eltype(x))
println("Computed solution: x = $x")
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1651 | using FrankWolfe
using LinearAlgebra
include("../examples/plot_utils.jl")
# n = Int(1e1)
n = Int(1e2)
k = Int(1e4)
xpi = rand(n);
total = sum(xpi);
const xp = xpi # ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
# problem with active set updates and the ksparselmo
lmo = FrankWolfe.KSparseLMO(40, 1.0);
# lmo = FrankWolfe.ProbabilitySimplexOracle(1)
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
FrankWolfe.benchmark_oracles(f, grad!, () -> rand(n), lmo; k=100)
x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=100.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
epsilon=1e-5,
trajectory=true,
);
x, v, primal, dual_gap, trajectory_away, active_set = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=100.0),
print_iter=k / 10,
epsilon=1e-5,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
away_steps=true,
trajectory=true,
);
x, v, primal, dual_gap, trajectory_away_outplace, active_set = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=100.0),
print_iter=k / 10,
epsilon=1e-5,
momentum=0.9,
memory_mode=FrankWolfe.OutplaceEmphasis(),
verbose=true,
away_steps=true,
trajectory=true,
);
data = [trajectory, trajectory_away, trajectory_away_outplace]
label = ["FW" "AFW" "MAFW"]
plot_trajectories(data, label)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1732 | using FrankWolfe
using LinearAlgebra
using Random
import GLPK
include("../examples/plot_utils.jl")
# s = rand(1:100)
s = 98
@info "Seed $s"
Random.seed!(s)
n = Int(1e2)
k = Int(2e4)
xpi = rand(n, n)
# total = sum(xpi)
const xp = xpi # / total
const normxp2 = dot(xp, xp)
# better for memory consumption as we do coordinate-wise ops
function cf(x, xp, normxp2)
return (normxp2 - 2dot(x, xp) + dot(x, x)) / n^2
end
function cgrad!(storage, x, xp)
return @. storage = 2 * (x - xp) / n^2
end
# BirkhoffPolytopeLMO via Hungarian Method
lmo_native = FrankWolfe.BirkhoffPolytopeLMO()
# BirkhoffPolytopeLMO realized via LP solver
lmo_moi = FrankWolfe.convert_mathopt(lmo_native, GLPK.Optimizer(), dimension=n)
# choose between lmo_native (= Hungarian Method) and lmo_moi (= LP formulation solved with GLPK)
lmo = lmo_native
# initial direction for first vertex
direction_mat = randn(n, n)
x0 = FrankWolfe.compute_extreme_point(lmo, direction_mat)
FrankWolfe.benchmark_oracles(
x -> cf(x, xp, normxp2),
(str, x) -> cgrad!(str, x, xp),
() -> randn(n, n),
lmo;
k=100,
)
# BPCG run
@time x, v, primal, dual_gap, trajectoryBPCG, _ = FrankWolfe.blended_pairwise_conditional_gradient(
x -> cf(x, xp, normxp2),
(str, x) -> cgrad!(str, x, xp),
lmo,
FrankWolfe.ActiveSetQuadratic([(1.0, collect(x0))], 2I/n^2, -2xp/n^2); # surprisingly faster and more memory efficient with collect
max_iteration=k,
line_search=FrankWolfe.Shortstep(2/n^2),
lazy=true,
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
trajectory=true,
verbose=true,
)
data = [trajectoryBPCG]
label = ["BPCG"]
plot_trajectories(data, label, reduce_size=true, marker_shapes=[:dtriangle])
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 4131 | using FrankWolfe
using LinearAlgebra
using Random
using SparseArrays
import GLPK
# s = rand(1:100)
s = 98
@info "Seed $s"
Random.seed!(s)
n = Int(2e2)
k = Int(4e4)
# we artificially create a symmetric instance to illustrate the syntax
xpi = rand(n, n)
xpi .+= xpi'
xpi .+= reverse(xpi)
xpi ./= 4
const xp = xpi
const normxp2 = dot(xp, xp)
function cf(x, xp, normxp2)
return (normxp2 - 2dot(x, xp) + dot(x, x)) / n^2
end
function cgrad!(storage, x, xp)
return @. storage = 2 * (x - xp) / n^2
end
lmo_nat = FrankWolfe.BirkhoffPolytopeLMO()
x0 = FrankWolfe.compute_extreme_point(lmo_nat, randn(n, n))
@time x, v, primal, dual_gap, _ = FrankWolfe.blended_pairwise_conditional_gradient(
x -> cf(x, xp, normxp2),
(str, x) -> cgrad!(str, x, xp),
lmo_nat,
FrankWolfe.ActiveSetQuadratic([(1.0, x0)], 2I/n^2, -2xp/n^2);
max_iteration=k,
line_search=FrankWolfe.Shortstep(2/n^2),
lazy=true,
print_iter=k / 10,
verbose=true,
)
# to accelerate the algorithm, we use the symmetry reduction described in the example 12 of the doc
# here the problem is invariant under mirror symmetry around the diagonal and the anti-diagonal
# each solution of the LMO can then be added to the active set together with its orbit
# on top of that, the effective dimension of the space is reduced
# the following function constructs the functions `reduce` and `inflate` needed for SymmetricLMO
# `reduce` maps a matrix to the invariant vector space
# `inflate` maps a vector in this space back to a matrix
# using `FrankWolfe.SymmetricArray` is a convenience to avoid reallocating the result of `inflate`
function build_reduce_inflate(p::Matrix{T}) where {T <: Number}
n = size(p, 1)
@assert n == size(p, 2) # square matrix
dimension = floor(Int, (n+1)^2 / 4) # reduced dimension
function reduce(A::AbstractMatrix{T}, lmo)
vec = Vector{T}(undef, dimension)
cnt = 0
@inbounds for i in 1:(n+1)÷2, j in i:n+1-i
cnt += 1
if i == j
if i + j == n+1
vec[cnt] = A[i, i]
else
vec[cnt] = (A[i, i] + A[n+1-i, n+1-i]) / sqrt(T(2))
end
else
if i + j == n+1
vec[cnt] = (A[i, j] + A[j, i]) / sqrt(T(2))
else
vec[cnt] = (A[i, j] + A[j, i] + A[n+1-i, n+1-j] + A[n+1-j, n+1-i]) / T(2)
end
end
end
return FrankWolfe.SymmetricArray(A, vec)
end
function inflate(x::FrankWolfe.SymmetricArray, lmo)
cnt = 0
@inbounds for i in 1:(n+1)÷2, j in i:n+1-i
cnt += 1
if i == j
if i + j == n+1
x.data[i, i] = x.vec[cnt]
else
x.data[i, i] = x.vec[cnt] / sqrt(T(2))
x.data[n+1-i, n+1-i] = x.data[i, j]
end
else
if i + j == n+1
x.data[i, j] = x.vec[cnt] / sqrt(T(2))
x.data[j, i] = x.data[i, j]
else
x.data[i, j] = x.vec[cnt] / 2
x.data[j, i] = x.data[i, j]
x.data[n+1-i, n+1-j] = x.data[i, j]
x.data[n+1-j, n+1-i] = x.data[i, j]
end
end
end
return x.data
end
return reduce, inflate
end
reduce, inflate = build_reduce_inflate(xpi)
const rxp = reduce(xpi, nothing)
@assert dot(rxp, rxp) ≈ normxp2 # should be correct thanks to the factors sqrt(2) and 2 in reduce and inflate
lmo_sym = FrankWolfe.SymmetricLMO(lmo_nat, reduce, inflate)
rx0 = FrankWolfe.compute_extreme_point(lmo_sym, reduce(sparse(randn(n, n)), nothing))
@time rx, rv, rprimal, rdual_gap, _ = FrankWolfe.blended_pairwise_conditional_gradient(
x -> cf(x, rxp, normxp2),
(str, x) -> cgrad!(str, x, rxp),
lmo_sym,
FrankWolfe.ActiveSetQuadratic([(1.0, rx0)], 2I/n^2, -2rxp/n^2);
max_iteration=k,
line_search=FrankWolfe.Shortstep(2/n^2),
lazy=true,
print_iter=k / 10,
verbose=true,
)
println()
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 4492 | using FrankWolfe
using LinearAlgebra
using Random
using SparseArrays
include("../examples/plot_utils.jl")
n = 1000
k = 10000
s = rand(1:100)
@info "Seed $s"
# this seed produces numerical issues with Float64 with the k-sparse 100 lmo / for testing
s = 41
Random.seed!(s)
matrix = rand(n, n)
hessian = transpose(matrix) * matrix
linear = rand(n)
f(x) = dot(linear, x) + 0.5 * transpose(x) * hessian * x
function grad!(storage, x)
return storage .= linear + hessian * x
end
L = eigmax(hessian)
#Run over the probability simplex and call LMO to get initial feasible point
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0);
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n))
target_tolerance = 1e-5
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_accel_simplex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=true,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_simplex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=false,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_convex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
data = [trajectoryBCG_accel_simplex, trajectoryBCG_simplex, trajectoryBCG_convex]
label = ["BCG (accel simplex)", "BCG (simplex)", "BCG (convex)"]
plot_trajectories(data, label, xscalelog=true)
matrix = rand(n, n)
hessian = transpose(matrix) * matrix
linear = rand(n)
f(x) = dot(linear, x) + 0.5 * transpose(x) * hessian * x + 10
function grad!(storage, x)
return storage .= linear + hessian * x
end
L = eigmax(hessian)
#Run over the K-sparse polytope
lmo = FrankWolfe.KSparseLMO(100, 100.0)
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n))
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_accel_simplex,_ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=true,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_simplex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=false,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_convex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBPCG, _ = FrankWolfe.blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
)
data = [trajectoryBCG_accel_simplex, trajectoryBCG_simplex, trajectoryBCG_convex, trajectoryBPCG]
label = ["BCG (accel simplex)", "BCG (simplex)", "BCG (convex)", "BPCG"]
plot_trajectories(data, label, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3620 | #=
Example demonstrating sparsity control by means of the "K"-factor passed to the lazy AFW variant
A larger K >= 1 favors sparsity by favoring optimization over the current active set rather than
adding a new FW vertex.
The default for AFW is K = 2.0
=#
using FrankWolfe
using LinearAlgebra
using Random
include("../examples/plot_utils.jl")
n = Int(1e3)
k = 10000
s = rand(1:100)
@info "Seed $s"
Random.seed!(s)
xpi = rand(n);
total = sum(xpi);
# here the optimal solution lies in the interior if you want an optimal solution on a face and not the interior use:
# const xp = xpi;
const xp = xpi ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
const lmo = FrankWolfe.KSparseLMO(5, 1.0)
## other LMOs to try
# lmo_big = FrankWolfe.KSparseLMO(100, big"1.0")
# lmo = FrankWolfe.LpNormLMO{Float64,5}(1.0)
# lmo = FrankWolfe.ProbabilitySimplexOracle(1.0);
# lmo = FrankWolfe.UnitSimplexOracle(1.0);
const x00 = FrankWolfe.compute_extreme_point(lmo, rand(n))
## example with BirkhoffPolytopeLMO - uses square matrix.
# const lmo = FrankWolfe.BirkhoffPolytopeLMO()
# cost = rand(n, n)
# const x00 = FrankWolfe.compute_extreme_point(lmo, cost)
function build_callback(trajectory_arr)
return function callback(state, active_set, args...)
return push!(trajectory_arr, (FrankWolfe.callback_state(state)..., length(active_set)))
end
end
FrankWolfe.benchmark_oracles(f, grad!, () -> randn(n), lmo; k=100)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_shortstep = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
trajectory_afw = []
callback = build_callback(trajectory_afw)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
callback=callback,
);
trajectory_lafw = []
callback = build_callback(trajectory_lafw)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
lazy=true,
trajectory=true,
callback=callback,
);
trajectoryBPCG = []
callback = build_callback(trajectoryBPCG)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
callback=callback,
);
trajectoryLBPCG = []
callback = build_callback(trajectoryLBPCG)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
lazy=true,
trajectory=true,
callback=callback,
);
# Reduction primal/dual error vs. sparsity of solution
dataSparsity = [trajectory_afw, trajectory_lafw, trajectoryBPCG, trajectoryLBPCG]
labelSparsity = ["AFW", "LAFW", "BPCG", "LBPCG"]
plot_sparsity(dataSparsity, labelSparsity, legend_position=:topright)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 657 | using FiniteDifferences
"""
Check if the gradient using finite differences matches the grad! provided.
"""
function check_gradients(grad!, f, gradient, num_tests=10, tolerance=1.0e-5)
for i in 1:num_tests
random_point = similar(gradient)
random_point .= rand(length(gradient))
grad!(gradient, random_point)
if norm(grad(central_fdm(5, 1), f, random_point)[1] - gradient) > tolerance
@warn "There is a noticeable difference between the gradient provided and
the gradient computed using finite differences.:\n$(norm(grad(central_fdm(5, 1), f, random_point)[1] - gradient))"
end
end
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 4726 | # # Visualization of Frank-Wolfe running on a 2-dimensional polytope
# This example provides an intuitive view of the Frank-Wolfe algorithm
# by running it on a polyhedral set with a quadratic function.
# The Linear Minimization Oracle (LMO) corresponds to a call to a generic simplex solver from `MathOptInterface.jl` (MOI).
# ## Import and setup
# We first import the necessary packages, including Polyhedra to visualize the feasible set.
using LinearAlgebra
using FrankWolfe
import MathOptInterface
const MOI = MathOptInterface
using GLPK
using Polyhedra
using Plots
# We can then define the objective function,
# here the squared distance to a point in the place, and its in-place gradient.
n = 2
y = [3.2, 0.5]
function f(x)
return 1 / 2 * norm(x - y)^2
end
function grad!(storage, x)
@. storage = x - y
end
# ## Custom callback
#
# FrankWolfe.jl lets users define custom callbacks to record information about each iteration.
# In that case, the callback will copy the current iterate `x`, the current vertex `v`, and the current step size `gamma`
# to an array thanks to a closure.
# We then declare the array and the callback over this array.
# Each iteration will then push to this array.
function build_callback(trajectory_arr)
return function callback(state, args...)
return push!(trajectory_arr, (copy(state.x), copy(state.v), state.gamma))
end
end
iterates_information_vector = []
callback = build_callback(iterates_information_vector)
# ## Creating the Linear Minimization Oracle
# The LMO is defined as a call to a linear optimization solver, each iteration resets the objective and calls the solver.
# The linear constraints must be defined only once at the beginning and remain identical along iterations.
# We use here MathOptInterface directly but the constraints could also be defined with JuMP or Convex.jl.
o = GLPK.Optimizer()
x = MOI.add_variables(o, n)
## −x + y ≤ 2
c1 = MOI.add_constraint(o, -1.0x[1] + x[2], MOI.LessThan(2.0))
## x + 2 y ≤ 4
c2 = MOI.add_constraint(o, x[1] + 2.0x[2], MOI.LessThan(4.0))
## −2 x − y ≤ 1
c3 = MOI.add_constraint(o, -2.0x[1] - x[2], MOI.LessThan(1.0))
## x − 2 y ≤ 2
c4 = MOI.add_constraint(o, x[1] - 2.0x[2], MOI.LessThan(2.0))
## x ≤ 2
c5 = MOI.add_constraint(o, x[1] + 0.0x[2], MOI.LessThan(2.0))
# The LMO is then built by wrapping the current MOI optimizer
lmo_moi = FrankWolfe.MathOptLMO(o)
# ## Calling Frank-Wolfe
# We can now compute an initial starting point from any direction
# and call the Frank-Wolfe algorithm.
# Note that we copy `x0` before passing it to the algorithm because it is modified in-place by `frank_wolfe`.
x0 = FrankWolfe.compute_extreme_point(lmo_moi, zeros(n))
xfinal, vfinal, primal_value, dual_gap, traj_data = FrankWolfe.frank_wolfe(
f,
grad!,
lmo_moi,
copy(x0),
line_search=FrankWolfe.Adaptive(),
max_iteration=10,
epsilon=1e-8,
callback=callback,
verbose=true,
print_iter=1,
)
# We now collect the iterates and vertices across iterations.
iterates = Vector{Vector{Float64}}()
push!(iterates, x0)
vertices = Vector{Vector{Float64}}()
for s in iterates_information_vector
push!(iterates, s[1])
push!(vertices, s[2])
end
# ## Plotting the algorithm run
# We define another method for `f` adapted to plot its contours.
function f(x1, x2)
x = [x1, x2]
return f(x)
end
xlist = collect(range(-1, 3, step=0.2))
ylist = collect(range(-1, 3, step=0.2))
X = repeat(reshape(xlist, 1, :), length(ylist), 1)
Y = repeat(ylist, 1, length(xlist))
# The feasible space is represented using Polyhedra.
h =
HalfSpace([-1, 1], 2) ∩ HalfSpace([1, 2], 4) ∩ HalfSpace([-2, -1], 1) ∩ HalfSpace([1, -2], 2) ∩
HalfSpace([1, 0], 2)
p = polyhedron(h)
p1 = contour(xlist, ylist, f, fill=true, line_smoothing=0.85)
plot(p1, opacity=0.5)
plot!(
p,
ratio=:equal,
opacity=0.5,
label="feasible region",
framestyle=:zerolines,
legend=true,
color=:blue,
);
# Finally, we add all iterates and vertices to the plot.
colors = ["gold", "purple", "darkorange2", "firebrick3"]
iterates = unique!(iterates)
for i in 1:3
scatter!(
[iterates[i][1]],
[iterates[i][2]],
label=string("x_", i - 1),
markersize=6,
color=colors[i],
)
end
scatter!(
[last(iterates)[1]],
[last(iterates)[2]],
label=string("x_", length(iterates) - 1),
markersize=6,
color=last(colors),
)
# plot chosen vertices
scatter!([vertices[1][1]], [vertices[1][2]], m=:diamond, markersize=6, color=colors[1], label="v_1")
scatter!(
[vertices[2][1]],
[vertices[2][2]],
m=:diamond,
markersize=6,
color=colors[2],
label="v_2",
legend=:outerleft,
colorbar=true,
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3863 | # # Comparison with MathOptInterface on a Probability Simplex
# In this example, we project a random point onto a probability simplex with the Frank-Wolfe algorithm using
# either the specialized LMO defined in the package or a generic LP formulation using `MathOptInterface.jl` (MOI) and
# `GLPK` as underlying LP solver.
# It can be found as Example 4.4 [in the paper](https://arxiv.org/abs/2104.06675).
using FrankWolfe
using LinearAlgebra
using LaTeXStrings
using Plots
using JuMP
const MOI = JuMP.MOI
import GLPK
n = Int(1e3)
k = 10000
xpi = rand(n);
total = sum(xpi);
const xp = xpi ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
return nothing
end
lmo_radius = 2.5
lmo = FrankWolfe.FrankWolfe.ProbabilitySimplexOracle(lmo_radius)
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n))
gradient = collect(x00)
x_lmo, v, primal, dual_gap, trajectory_lmo = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
collect(copy(x00)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=false,
trajectory=true,
);
# Create a MathOptInterface Optimizer and build the same linear constraints:
o = GLPK.Optimizer()
x = MOI.add_variables(o, n)
for xi in x
MOI.add_constraint(o, xi, MOI.GreaterThan(0.0))
end
MOI.add_constraint(
o,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, x), 0.0),
MOI.EqualTo(lmo_radius),
)
lmo_moi = FrankWolfe.MathOptLMO(o)
x, v, primal, dual_gap, trajectory_moi = FrankWolfe.frank_wolfe(
f,
grad!,
lmo_moi,
collect(copy(x00)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=false,
trajectory=true,
);
# Alternatively, we can use one of the modelling interfaces based on `MOI` to formulate the LP.
# The following example builds the same set of constraints using `JuMP`:
m = JuMP.Model(GLPK.Optimizer)
@variable(m, y[1:n] ≥ 0)
@constraint(m, sum(y) == lmo_radius)
lmo_jump = FrankWolfe.MathOptLMO(m.moi_backend)
x, v, primal, dual_gap, trajectory_jump = FrankWolfe.frank_wolfe(
f,
grad!,
lmo_jump,
collect(copy(x00)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=false,
trajectory=true,
);
x_lmo, v, primal, dual_gap, trajectory_lmo_blas = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x00,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.OutplaceEmphasis(),
verbose=false,
trajectory=true,
);
x, v, primal, dual_gap, trajectory_jump_blas = FrankWolfe.frank_wolfe(
f,
grad!,
lmo_jump,
x00,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.OutplaceEmphasis(),
verbose=false,
trajectory=true,
);
# We can now plot the results
iteration_list = [[x[1] + 1 for x in trajectory_lmo], [x[1] + 1 for x in trajectory_moi]]
time_list = [[x[5] for x in trajectory_lmo], [x[5] for x in trajectory_moi]]
primal_gap_list = [[x[2] for x in trajectory_lmo], [x[2] for x in trajectory_moi]]
dual_gap_list = [[x[4] for x in trajectory_lmo], [x[4] for x in trajectory_moi]]
label = [L"\textrm{Closed-form LMO}", L"\textrm{MOI LMO}"]
plot_results(
[primal_gap_list, primal_gap_list, dual_gap_list, dual_gap_list],
[iteration_list, time_list, iteration_list, time_list],
label,
["", "", L"\textrm{Iteration}", L"\textrm{Time}"],
[L"\textrm{Primal Gap}", "", L"\textrm{Dual Gap}", ""],
xscalelog=[:log, :identity, :log, :identity],
yscalelog=[:log, :log, :log, :log],
legend_position=[:bottomleft, nothing, nothing, nothing],
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 10401 | # # Polynomial Regression
# The following example features the LMO for polynomial regression on the ``\ell_1`` norm ball. Given input/output pairs ``\{x_i,y_i\}_{i=1}^N`` and sparse coefficients ``c_j``, where
# ```math
# y_i=\sum_{j=1}^m c_j f_j(x_i)
# ```
# and ``f_j: \mathbb{R}^n\to\mathbb{R}``, the task is to recover those ``c_j`` that are non-zero alongside their corresponding values. Under certain assumptions,
# this problem can be convexified into
# ```math
# \min_{c\in\mathcal{C}}||y-Ac||^2
# ```
# for a convex set ``\mathcal{C}``. It can also be found as example 4.1 [in the paper](https://arxiv.org/pdf/2104.06675.pdf).
# In order to evaluate the polynomial, we generate a total of 1000 data points
# ``\{x_i\}_{i=1}^N`` from the standard multivariate Gaussian, with which we will compute the output variables ``\{y_i\}_{i=1}^N``. Before
# evaluating the polynomial, these points will be contaminated with noise drawn from a standard multivariate Gaussian.
# We run the [`away_frank_wolfe`](@ref) and [`blended_conditional_gradient`](@ref) algorithms, and compare them to Projected Gradient Descent using a
# smoothness estimate. We will evaluate the output solution on test points drawn in a similar manner as the training points.
using FrankWolfe
using LinearAlgebra
import Random
using MultivariatePolynomials
using DynamicPolynomials
using Plots
using LaTeXStrings
const N = 10
DynamicPolynomials.@polyvar X[1:15]
const max_degree = 4
coefficient_magnitude = 10
noise_magnitude = 1
const var_monomials = MultivariatePolynomials.monomials(X, 0:max_degree)
Random.seed!(42)
const all_coeffs = map(var_monomials) do m
d = MultivariatePolynomials.degree(m)
return coefficient_magnitude * rand() .* (rand() .> 0.95 * d / max_degree)
end
const true_poly = dot(all_coeffs, var_monomials)
const training_data = map(1:500) do _
x = 0.1 * randn(N)
y = MultivariatePolynomials.subs(true_poly, Pair(X, x)) + noise_magnitude * randn()
return (x, y.a[1])
end
const extended_training_data = map(training_data) do (x, y)
x_ext = MultivariatePolynomials.coefficient.(MultivariatePolynomials.subs.(var_monomials, X => x))
return (x_ext, y)
end
const test_data = map(1:1000) do _
x = 0.4 * randn(N)
y = MultivariatePolynomials.subs(true_poly, Pair(X, x)) + noise_magnitude * randn()
return (x, y.a[1])
end
const extended_test_data = map(test_data) do (x, y)
x_ext = MultivariatePolynomials.coefficient.(MultivariatePolynomials.subs.(var_monomials, X => x))
return (x_ext, y)
end
function f(coefficients)
return 0.5 / length(extended_training_data) * sum(extended_training_data) do (x, y)
return (dot(coefficients, x) - y)^2
end
end
function f_test(coefficients)
return 0.5 / length(extended_test_data) * sum(extended_test_data) do (x, y)
return (dot(coefficients, x) - y)^2
end
end
function coefficient_errors(coeffs)
return 0.5 * sum(eachindex(all_coeffs)) do idx
return (all_coeffs[idx] - coeffs[idx])^2
end
end
function grad!(storage, coefficients)
storage .= 0
for (x, y) in extended_training_data
p_i = dot(coefficients, x) - y
@. storage += x * p_i
end
storage ./= length(training_data)
return nothing
end
function build_callback(trajectory_arr)
return function callback(state, args...)
return push!(
trajectory_arr,
(FrankWolfe.callback_state(state)..., f_test(state.x), coefficient_errors(state.x)),
)
end
end
gradient = similar(all_coeffs)
max_iter = 10000
random_initialization_vector = rand(length(all_coeffs))
lmo = FrankWolfe.LpNormLMO{1}(0.95 * norm(all_coeffs, 1))
## Estimating smoothness parameter
num_pairs = 1000
L_estimate = -Inf
gradient_aux = similar(gradient)
for i in 1:num_pairs # hide
global L_estimate # hide
x = compute_extreme_point(lmo, randn(size(all_coeffs))) # hide
y = compute_extreme_point(lmo, randn(size(all_coeffs))) # hide
grad!(gradient, x) # hide
grad!(gradient_aux, y) # hide
new_L = norm(gradient - gradient_aux) / norm(x - y) # hide
if new_L > L_estimate # hide
L_estimate = new_L # hide
end # hide
end # hide
function projnorm1(x, τ)
n = length(x)
if norm(x, 1) ≤ τ
return x
end
u = abs.(x)
## simplex projection
bget = false
s_indices = sortperm(u, rev=true)
tsum = zero(τ)
@inbounds for i in 1:n-1
tsum += u[s_indices[i]]
tmax = (tsum - τ) / i
if tmax ≥ u[s_indices[i+1]]
bget = true
break
end
end
if !bget
tmax = (tsum + u[s_indices[n]] - τ) / n
end
@inbounds for i in 1:n
u[i] = max(u[i] - tmax, 0)
u[i] *= sign(x[i])
end
return u
end
xgd = FrankWolfe.compute_extreme_point(lmo, random_initialization_vector) # hide
training_gd = Float64[] # hide
test_gd = Float64[] # hide
coeff_error = Float64[] # hide
time_start = time_ns() # hide
gd_times = Float64[] # hide
for iter in 1:max_iter # hide
global xgd # hide
grad!(gradient, xgd) # hide
xgd = projnorm1(xgd - gradient / L_estimate, lmo.right_hand_side) # hide
push!(training_gd, f(xgd)) # hide
push!(test_gd, f_test(xgd)) # hide
push!(coeff_error, coefficient_errors(xgd)) # hide
push!(gd_times, (time_ns() - time_start) * 1e-9) # hide
end # hide
x00 = FrankWolfe.compute_extreme_point(lmo, random_initialization_vector) # hide
x0 = deepcopy(x00) # hide
trajectory_lafw = [] # hide
callback = build_callback(trajectory_lafw) # hide
x_lafw, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe( # hide
f, # hide
grad!, # hide
lmo, # hide
x0, # hide
max_iteration=max_iter, # hide
line_search=FrankWolfe.Adaptive(L_est=L_estimate), # hide
print_iter=max_iter ÷ 10, # hide
memory_mode=FrankWolfe.InplaceEmphasis(), # hide
verbose=false, # hide
lazy=true, # hide
gradient=gradient, # hide
callback=callback, # hide
) # hide
trajectory_bcg = [] # hide
callback = build_callback(trajectory_bcg) # hide
x0 = deepcopy(x00) # hide
x_bcg, v, primal, dual_gap, _, _ = FrankWolfe.blended_conditional_gradient( # hide
f, # hide
grad!, # hide
lmo, # hide
x0, # hide
max_iteration=max_iter, # hide
line_search=FrankWolfe.Adaptive(L_est=L_estimate), # hide
print_iter=max_iter ÷ 10, # hide
memory_mode=FrankWolfe.InplaceEmphasis(), # hide
verbose=false, # hide
weight_purge_threshold=1e-10, # hide
callback=callback, # hide
) # hide
x0 = deepcopy(x00) # hide
trajectory_lafw_ref = [] # hide
callback = build_callback(trajectory_lafw_ref) # hide
_, _, primal_ref, _, _ = FrankWolfe.away_frank_wolfe( # hide
f, # hide
grad!, # hide
lmo, # hide
x0, # hide
max_iteration=2 * max_iter, # hide
line_search=FrankWolfe.Adaptive(L_est=L_estimate), # hide
print_iter=max_iter ÷ 10, # hide
memory_mode=FrankWolfe.InplaceEmphasis(), # hide
verbose=false, # hide
lazy=true, # hide
gradient=gradient, # hide
callback=callback, # hide
) # hide
for i in 1:num_pairs
global L_estimate
x = compute_extreme_point(lmo, randn(size(all_coeffs)))
y = compute_extreme_point(lmo, randn(size(all_coeffs)))
grad!(gradient, x)
grad!(gradient_aux, y)
new_L = norm(gradient - gradient_aux) / norm(x - y)
if new_L > L_estimate
L_estimate = new_L
end
end
# We can now perform projected gradient descent:
xgd = FrankWolfe.compute_extreme_point(lmo, random_initialization_vector)
training_gd = Float64[]
test_gd = Float64[]
coeff_error = Float64[]
time_start = time_ns()
gd_times = Float64[]
for iter in 1:max_iter
global xgd
grad!(gradient, xgd)
xgd = projnorm1(xgd - gradient / L_estimate, lmo.right_hand_side)
push!(training_gd, f(xgd))
push!(test_gd, f_test(xgd))
push!(coeff_error, coefficient_errors(xgd))
push!(gd_times, (time_ns() - time_start) * 1e-9)
end
x00 = FrankWolfe.compute_extreme_point(lmo, random_initialization_vector)
x0 = deepcopy(x00)
trajectory_lafw = []
callback = build_callback(trajectory_lafw)
x_lafw, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=max_iter,
line_search=FrankWolfe.Adaptive(L_est=L_estimate),
print_iter=max_iter ÷ 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=false,
lazy=true,
gradient=gradient,
callback=callback,
)
trajectory_bcg = []
callback = build_callback(trajectory_bcg)
x0 = deepcopy(x00)
x_bcg, v, primal, dual_gap, _, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=max_iter,
line_search=FrankWolfe.Adaptive(L_est=L_estimate),
print_iter=max_iter ÷ 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=false,
weight_purge_threshold=1e-10,
callback=callback,
)
x0 = deepcopy(x00)
trajectory_lafw_ref = []
callback = build_callback(trajectory_lafw_ref)
_, _, primal_ref, _, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=2 * max_iter,
line_search=FrankWolfe.Adaptive(L_est=L_estimate),
print_iter=max_iter ÷ 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=false,
lazy=true,
gradient=gradient,
callback=callback,
)
iteration_list = [
[x[1] + 1 for x in trajectory_lafw],
[x[1] + 1 for x in trajectory_bcg],
collect(eachindex(training_gd)),
]
time_list = [[x[5] for x in trajectory_lafw], [x[5] for x in trajectory_bcg], gd_times]
primal_list = [
[x[2] - primal_ref for x in trajectory_lafw],
[x[2] - primal_ref for x in trajectory_bcg],
[x - primal_ref for x in training_gd],
]
test_list = [[x[6] for x in trajectory_lafw], [x[6] for x in trajectory_bcg], test_gd]
label = [L"\textrm{L-AFW}", L"\textrm{BCG}", L"\textrm{GD}"]
coefficient_error_values =
[[x[7] for x in trajectory_lafw], [x[7] for x in trajectory_bcg], coeff_error]
plot_results(
[primal_list, primal_list, test_list, test_list],
[iteration_list, time_list, iteration_list, time_list],
label,
[L"\textrm{Iteration}", L"\textrm{Time}", L"\textrm{Iteration}", L"\textrm{Time}"],
[L"\textrm{Primal Gap}", L"\textrm{Primal Gap}", L"\textrm{Test loss}", L"\textrm{Test loss}"],
xscalelog=[:log, :identity, :log, :identity],
legend_position=[:bottomleft, nothing, nothing, nothing],
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 8381 | # # Matrix Completion
# We present another example that is about matrix completion. The idea is, given a partially observed matrix ``Y\in\mathbb{R}^{m\times n}``, to find
# ``X\in\mathbb{R}^{m\times n}`` to minimize the sum of squared errors from the observed entries while 'completing' the matrix ``Y``, i.e. filling the unobserved
# entries to match ``Y`` as good as possible. A detailed explanation can be found in section 4.2 of
# [the paper](https://arxiv.org/pdf/2104.06675.pdf).
# We will try to solve
# ```math
# \min_{||X||_*\le \tau} \sum_{(i,j)\in\mathcal{I}} (X_{i,j}-Y_{i,j})^2,
# ```
# where ``\tau>0``, ``||X||_*`` is the nuclear norm, and ``\mathcal{I}`` denotes the indices of the observed entries. We will use [`FrankWolfe.NuclearNormLMO`](@ref) and compare our
# Frank-Wolfe implementation with a Projected Gradient Descent (PGD) algorithm which, after each gradient descent step, projects the iterates back onto the nuclear
# norm ball. We use a movielens dataset for comparison.
using FrankWolfe
using ZipFile, DataFrames, CSV
using Random
using Plots
using Profile
import Arpack
using SparseArrays, LinearAlgebra
using LaTeXStrings
temp_zipfile = download("http://files.grouplens.org/datasets/movielens/ml-latest-small.zip")
zarchive = ZipFile.Reader(temp_zipfile)
movies_file = zarchive.files[findfirst(f -> occursin("movies", f.name), zarchive.files)]
movies_frame = CSV.read(movies_file, DataFrame)
ratings_file = zarchive.files[findfirst(f -> occursin("ratings", f.name), zarchive.files)]
ratings_frame = CSV.read(ratings_file, DataFrame)
users = unique(ratings_frame[:, :userId])
movies = unique(ratings_frame[:, :movieId])
@assert users == eachindex(users)
movies_revert = zeros(Int, maximum(movies))
for (idx, m) in enumerate(movies)
movies_revert[m] = idx
end
movies_indices = [movies_revert[idx] for idx in ratings_frame[:, :movieId]]
const rating_matrix = sparse(
ratings_frame[:, :userId],
movies_indices,
ratings_frame[:, :rating],
length(users),
length(movies),
)
missing_rate = 0.05
Random.seed!(42)
const missing_ratings = Tuple{Int,Int}[]
const present_ratings = Tuple{Int,Int}[]
let
(I, J, V) = SparseArrays.findnz(rating_matrix)
for idx in eachindex(I)
if V[idx] > 0
if rand() <= missing_rate
push!(missing_ratings, (I[idx], J[idx]))
else
push!(present_ratings, (I[idx], J[idx]))
end
end
end
end
function f(X)
r = 0.0
for (i, j) in present_ratings
r += 0.5 * (X[i, j] - rating_matrix[i, j])^2
end
return r
end
function grad!(storage, X)
storage .= 0
for (i, j) in present_ratings
storage[i, j] = X[i, j] - rating_matrix[i, j]
end
return nothing
end
function test_loss(X)
r = 0.0
for (i, j) in missing_ratings
r += 0.5 * (X[i, j] - rating_matrix[i, j])^2
end
return r
end
function project_nuclear_norm_ball(X; radius=1.0)
U, sing_val, Vt = svd(X)
if (sum(sing_val) <= radius)
return X, -norm_estimation * U[:, 1] * Vt[:, 1]'
end
sing_val = FrankWolfe.projection_simplex_sort(sing_val, s=radius)
return U * Diagonal(sing_val) * Vt', -norm_estimation * U[:, 1] * Vt[:, 1]'
end
norm_estimation = 10 * Arpack.svds(rating_matrix, nsv=1, ritzvec=false)[1].S[1]
const lmo = FrankWolfe.NuclearNormLMO(norm_estimation)
const x0 = FrankWolfe.compute_extreme_point(lmo, ones(size(rating_matrix)))
const k = 10
gradient = spzeros(size(x0)...)
gradient_aux = spzeros(size(x0)...)
function build_callback(trajectory_arr)
return function callback(state, args...)
return push!(trajectory_arr, (FrankWolfe.callback_state(state)..., test_loss(state.x)))
end
end
# The smoothness constant is estimated:
num_pairs = 100
L_estimate = -Inf
for i in 1:num_pairs
global L_estimate
u1 = rand(size(x0, 1))
u1 ./= sum(u1)
u1 .*= norm_estimation
v1 = rand(size(x0, 2))
v1 ./= sum(v1)
x = FrankWolfe.RankOneMatrix(u1, v1)
u2 = rand(size(x0, 1))
u2 ./= sum(u2)
u2 .*= norm_estimation
v2 = rand(size(x0, 2))
v2 ./= sum(v2)
y = FrankWolfe.RankOneMatrix(u2, v2)
grad!(gradient, x)
grad!(gradient_aux, y)
new_L = norm(gradient - gradient_aux) / norm(x - y)
if new_L > L_estimate
L_estimate = new_L
end
end
# We can now perform projected gradient descent:
xgd = Matrix(x0)
function_values = Float64[]
timing_values = Float64[]
function_test_values = Float64[]
ls = FrankWolfe.Backtracking()
ls_storage = similar(xgd)
time_start = time_ns()
for _ in 1:k
f_val = f(xgd)
push!(function_values, f_val)
push!(function_test_values, test_loss(xgd))
push!(timing_values, (time_ns() - time_start) / 1e9)
@info f_val
grad!(gradient, xgd)
xgd_new, vertex = project_nuclear_norm_ball(xgd - gradient / L_estimate, radius=norm_estimation)
gamma = FrankWolfe.perform_line_search(
ls,
1,
f,
grad!,
gradient,
xgd,
xgd - xgd_new,
1.0,
ls_storage,
FrankWolfe.InplaceEmphasis(),
)
@. xgd -= gamma * (xgd - xgd_new)
end
trajectory_arr_fw = Vector{Tuple{Int64,Float64,Float64,Float64,Float64,Float64}}()
callback = build_callback(trajectory_arr_fw)
xfin, _, _, _, traj_data = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0;
epsilon=1e-9,
max_iteration=10 * k,
print_iter=k / 10,
verbose=false,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=gradient,
callback=callback,
)
trajectory_arr_lazy = Vector{Tuple{Int64,Float64,Float64,Float64,Float64,Float64}}()
callback = build_callback(trajectory_arr_lazy)
xlazy, _, _, _, _ = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x0;
epsilon=1e-9,
max_iteration=10 * k,
print_iter=k / 10,
verbose=false,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=gradient,
callback=callback,
)
trajectory_arr_lazy_ref = Vector{Tuple{Int64,Float64,Float64,Float64,Float64,Float64}}()
callback = build_callback(trajectory_arr_lazy_ref)
xlazy, _, _, _, _ = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x0;
epsilon=1e-9,
max_iteration=50 * k,
print_iter=k / 10,
verbose=false,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=gradient,
callback=callback,
)
fw_test_values = getindex.(trajectory_arr_fw, 6)
lazy_test_values = getindex.(trajectory_arr_lazy, 6)
results = Dict(
"svals_gd" => svdvals(xgd),
"svals_fw" => svdvals(xfin),
"svals_lcg" => svdvals(xlazy),
"fw_test_values" => fw_test_values,
"lazy_test_values" => lazy_test_values,
"trajectory_arr_fw" => trajectory_arr_fw,
"trajectory_arr_lazy" => trajectory_arr_lazy,
"function_values_gd" => function_values,
"function_values_test_gd" => function_test_values,
"timing_values_gd" => timing_values,
"trajectory_arr_lazy_ref" => trajectory_arr_lazy_ref,
)
ref_optimum = results["trajectory_arr_lazy_ref"][end][2]
iteration_list = [
[x[1] + 1 for x in results["trajectory_arr_fw"]],
[x[1] + 1 for x in results["trajectory_arr_lazy"]],
collect(1:1:length(results["function_values_gd"])),
]
time_list = [
[x[5] for x in results["trajectory_arr_fw"]],
[x[5] for x in results["trajectory_arr_lazy"]],
results["timing_values_gd"],
]
primal_gap_list = [
[x[2] - ref_optimum for x in results["trajectory_arr_fw"]],
[x[2] - ref_optimum for x in results["trajectory_arr_lazy"]],
[x - ref_optimum for x in results["function_values_gd"]],
]
test_list =
[results["fw_test_values"], results["lazy_test_values"], results["function_values_test_gd"]]
label = [L"\textrm{FW}", L"\textrm{L-CG}", L"\textrm{GD}"]
plot_results(
[primal_gap_list, primal_gap_list, test_list, test_list],
[iteration_list, time_list, iteration_list, time_list],
label,
[L"\textrm{Iteration}", L"\textrm{Time}", L"\textrm{Iteration}", L"\textrm{Time}"],
[
L"\textrm{Primal Gap}",
L"\textrm{Primal Gap}",
L"\textrm{Test Error}",
L"\textrm{Test Error}",
],
xscalelog=[:log, :identity, :log, :identity],
legend_position=[:bottomleft, nothing, nothing, nothing],
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 2655 | # # Exact Optimization with Rational Arithmetic
# This example can be found in section 4.3 [in the paper](https://arxiv.org/pdf/2104.06675.pdf).
# The package allows for exact optimization with rational arithmetic. For this, it suffices to set up the LMO
# to be rational and choose an appropriate step-size rule as detailed below. For the LMOs included in the
# package, this simply means initializing the radius with a rational-compatible element type, e.g., `1`, rather
# than a floating-point number, e.g., `1.0`. Given that numerators and denominators can become quite large in
# rational arithmetic, it is strongly advised to base the used rationals on extended-precision integer types such
# as `BigInt`, i.e., we use `Rational{BigInt}`.
# The second requirement ensuring that the computation runs in rational arithmetic is
# a rational-compatible step-size rule. The most basic step-size rule compatible with rational optimization is
# the agnostic step-size rule with ``\gamma_t = 2/(2 + t)``. With this step-size rule, the gradient does not even need to
# be rational as long as the atom computed by the LMO is of a rational type. Assuming these requirements are
# met, all iterates and the computed solution will then be rational.
using FrankWolfe
using LinearAlgebra
n = 100
k = n
x = fill(big(1) // 100, n)
f(x) = dot(x, x)
function grad!(storage, x)
@. storage = 2 * x
end
# pick feasible region
# radius needs to be integer or rational
lmo = FrankWolfe.ProbabilitySimplexOracle{Rational{BigInt}}(1)
# compute some initial vertex
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
verbose=true,
memory_mode=FrankWolfe.OutplaceEmphasis(),
);
println("\nOutput type of solution: ", eltype(x))
# Another possible step-size rule is `rationalshortstep` which computes the step size by minimizing the
# smoothness inequality as ``\gamma_t=\frac{\langle \nabla f(x_t),x_t-v_t\rangle}{2L||x_t-v_t||^2}``. However, as this step size depends on an upper bound on the
# Lipschitz constant ``L`` as well as the inner product with the gradient ``\nabla f(x_t)``, both have to be of a rational type.
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2 // 1),
print_iter=k / 10,
verbose=true,
memory_mode=FrankWolfe.OutplaceEmphasis(),
);
# Note: at the last step, we exactly close the gap, finding the solution 1//n * ones(n)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 5003 | # # Blended Conditional Gradients
# The FW and AFW algorithms, and their lazy variants share one feature:
# they attempt to make primal progress over a reduced set of vertices. The AFW algorithm does this through
# away steps (which do not increase the cardinality of the active set), and the lazy variants do this through the
# use of previously exploited vertices. A third strategy that one can follow is to explicitly _blend_ Frank-Wolfe
# steps with gradient descent steps over the convex hull of the active set (note that this can be done without
# requiring a projection oracle over ``C``, thus making the algorithm projection-free). This results in the _Blended Conditional Gradient_
# (BCG) algorithm, which attempts to make as much progress as
# possible through the convex hull of the current active set ``S_t`` until it automatically detects that in order to
# make further progress it requires additional calls to the LMO.
# See also Blended Conditional Gradients: the unconditioning of conditional gradients, Braun et al, 2019, https://arxiv.org/abs/1805.07311
using FrankWolfe
using LinearAlgebra
using Random
using SparseArrays
n = 1000
k = 10000
Random.seed!(41)
matrix = rand(n, n)
hessian = transpose(matrix) * matrix
linear = rand(n)
f(x) = dot(linear, x) + 0.5 * transpose(x) * hessian * x
function grad!(storage, x)
return storage .= linear + hessian * x
end
L = eigmax(hessian)
# We run over the probability simplex and call the LMO to get an initial feasible point:
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0);
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n))
target_tolerance = 1e-5
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_accel_simplex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=true,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_simplex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=false,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_convex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
data = [trajectoryBCG_accel_simplex, trajectoryBCG_simplex, trajectoryBCG_convex]
label = ["BCG (accel simplex)", "BCG (simplex)", "BCG (convex)"]
plot_trajectories(data, label, xscalelog=true)
matrix = rand(n, n)
hessian = transpose(matrix) * matrix
linear = rand(n)
f(x) = dot(linear, x) + 0.5 * transpose(x) * hessian * x + 10
function grad!(storage, x)
return storage .= linear + hessian * x
end
L = eigmax(hessian)
lmo = FrankWolfe.KSparseLMO(100, 100.0)
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n))
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_accel_simplex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=true,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_simplex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
hessian=hessian,
memory_mode=FrankWolfe.InplaceEmphasis(),
accelerated=false,
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
x0 = deepcopy(x00)
x, v, primal, dual_gap, trajectoryBCG_convex, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
epsilon=target_tolerance,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=L),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
lazy_tolerance=1.0,
weight_purge_threshold=1e-10,
)
data = [trajectoryBCG_accel_simplex, trajectoryBCG_simplex, trajectoryBCG_convex]
label = ["BCG (accel simplex)", "BCG (simplex)", "BCG (convex)"]
plot_trajectories(data, label, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3288 | # # Spectrahedron
#
# This example shows an optimization problem over the spectraplex:
# ```math
# S = \{X \in \mathbb{S}_+^n, Tr(X) = 1\}
# ```
# with $\mathbb{S}_+^n$ the set of positive semidefinite matrices.
# Linear optimization with symmetric objective $D$ over the spetraplex consists in computing the
# leading eigenvector of $D$.
#
# The package also exposes `UnitSpectrahedronLMO` which corresponds to the feasible set:
# ```math
# S_u = \{X \in \mathbb{S}_+^n, Tr(X) \leq 1\}
# ```
using FrankWolfe
using LinearAlgebra
using Random
using SparseArrays
# The objective function will be the symmetric squared distance to a set of known or observed entries $Y_{ij}$ of the matrix.
# ```math
# f(X) = \sum_{(i,j) \in L} 1/2 (X_{ij} - Y_{ij})^2
# ```
# ## Setting up the input data, objective, and gradient
# Dimension, number of iterations and number of known entries:
n = 1500
k = 5000
n_entries = 1000
Random.seed!(41)
const entry_indices = unique!([minmax(rand(1:n, 2)...) for _ in 1:n_entries])
const entry_values = randn(length(entry_indices))
function f(X)
r = zero(eltype(X))
for (idx, (i, j)) in enumerate(entry_indices)
r += 1 / 2 * (X[i, j] - entry_values[idx])^2
r += 1 / 2 * (X[j, i] - entry_values[idx])^2
end
return r / length(entry_values)
end
function grad!(storage, X)
storage .= 0
for (idx, (i, j)) in enumerate(entry_indices)
storage[i, j] += (X[i, j] - entry_values[idx])
storage[j, i] += (X[j, i] - entry_values[idx])
end
return storage ./= length(entry_values)
end
# Note that the `ensure_symmetry = false` argument to `SpectraplexLMO`.
# It skips an additional step making the used direction symmetric.
# It is not necessary when the gradient is a `LinearAlgebra.Symmetric` (or more rarely a `LinearAlgebra.Diagonal` or `LinearAlgebra.UniformScaling`).
const lmo = FrankWolfe.SpectraplexLMO(1.0, n, false)
const x0 = FrankWolfe.compute_extreme_point(lmo, spzeros(n, n))
target_tolerance = 1e-8;
#src the following two lines are used only to precompile the functions
FrankWolfe.frank_wolfe( #src
f, #src
grad!, #src
lmo, #src
x0, #src
max_iteration=2, #src
line_search=FrankWolfe.MonotonicStepSize(), #src
) #src
FrankWolfe.lazified_conditional_gradient( #src
f, #src
grad!, #src
lmo, #src
x0, #src
max_iteration=2, #src
line_search=FrankWolfe.MonotonicStepSize(), #src
) #src
# ## Running standard and lazified Frank-Wolfe
Xfinal, Vfinal, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.MonotonicStepSize(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
epsilon=target_tolerance,
)
Xfinal, Vfinal, primal, dual_gap, trajectory_lazy = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.MonotonicStepSize(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
epsilon=target_tolerance,
);
# ## Plotting the resulting trajectories
data = [trajectory, trajectory_lazy]
label = ["FW", "LCG"]
plot_trajectories(data, label, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 2289 | using FrankWolfe
using LinearAlgebra
using LaTeXStrings
using Plots
# # FrankWolfe for scaled, shifted ``\ell^1`` and ``\ell^{\infty}`` norm balls
# In this example, we run the vanilla FrankWolfe algorithm on a scaled and shifted ``\ell^1`` and ``\ell^{\infty}`` norm ball, using the `ScaledBoundL1NormBall`
# and `ScaledBoundLInfNormBall` LMOs. We shift both onto the point ``(1,0)`` and then scale them by a factor of ``2`` along the x-axis. We project the point ``(2,1)`` onto the polytopes.
n = 2
k = 1000
xp = [2.0, 1.0]
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
return nothing
end
lower = [-1.0, -1.0]
upper = [3.0, 1.0]
l1 = FrankWolfe.ScaledBoundL1NormBall(lower, upper)
linf = FrankWolfe.ScaledBoundLInfNormBall(lower, upper)
x1 = FrankWolfe.compute_extreme_point(l1, zeros(n))
gradient = collect(x1)
x_l1, v_1, primal_1, dual_gap_1, trajectory_1 = FrankWolfe.frank_wolfe(
f,
grad!,
l1,
collect(copy(x1)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=50,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\nFinal solution: ", x_l1)
x2 = FrankWolfe.compute_extreme_point(linf, zeros(n))
gradient = collect(x2)
x_linf, v_2, primal_2, dual_gap_2, trajectory_2 = FrankWolfe.frank_wolfe(
f,
grad!,
linf,
collect(copy(x2)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=50,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\nFinal solution: ", x_linf)
# We plot the polytopes alongside the solutions from above:
xcoord1 = [1, 3, 1, -1, 1]
ycoord1 = [-1, 0, 1, 0, -1]
xcoord2 = [3, 3, -1, -1, 3]
ycoord2 = [-1, 1, 1, -1, -1]
plot(
xcoord1,
ycoord1,
title="Visualization of scaled shifted norm balls",
lw=2,
label=L"\ell^1 \textrm{ norm}",
)
plot!(xcoord2, ycoord2, lw=2, label=L"\ell^{\infty} \textrm{ norm}")
plot!(
[x_l1[1]],
[x_l1[2]],
seriestype=:scatter,
lw=5,
color="blue",
label=L"\ell^1 \textrm{ solution}",
)
plot!(
[x_linf[1]],
[x_linf[2]],
seriestype=:scatter,
lw=5,
color="orange",
label=L"\ell^{\infty} \textrm{ solution}",
legend=:bottomleft,
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 4410 | # # Tracking, counters and custom callbacks for Frank Wolfe
# In this example we will run the standard Frank-Wolfe algorithm while tracking the number of
# calls to the different oracles, namely function, gradient evaluations, and LMO calls.
# In order to track each of these metrics, a "Tracking" version of the Gradient, LMO and Function methods have to be supplied to
# the frank_wolfe algorithm, which are wrapping a standard one.
using FrankWolfe
using Test
using LinearAlgebra
using FrankWolfe: ActiveSet
# ## The trackers for primal objective, gradient and LMO.
# In order to count the number of function calls, a `TrackingObjective` is built from a standard objective function `f`,
# which will act in the same way as the original function does, but with an additional `.counter` field which tracks the number of calls.
f(x) = norm(x)^2
tf = FrankWolfe.TrackingObjective(f)
@show tf.counter
tf(rand(3))
@show tf.counter
## Resetting the counter
tf.counter = 0;
# Similarly, the `tgrad!` function tracks the number of gradient calls:
function grad!(storage, x)
return storage .= 2x
end
tgrad! = FrankWolfe.TrackingGradient(grad!)
@show tgrad!.counter;
# The tracking LMO operates in a similar fashion and tracks the number of `compute_extreme_point` calls.
lmo_prob = FrankWolfe.ProbabilitySimplexOracle(1)
tlmo_prob = FrankWolfe.TrackingLMO(lmo_prob)
@show tlmo_prob.counter;
# The tracking LMO can be applied for all types of LMOs and even in a nested way, which can be useful to
# track the number of calls to a lazified oracle.
# We can now pass the tracking versions `tf`, `tgrad` and `tlmo_prob` to `frank_wolfe`
# and display their call counts after the optimization process.
x0 = FrankWolfe.compute_extreme_point(tlmo_prob, ones(5))
fw_results = FrankWolfe.frank_wolfe(
tf,
tgrad!,
tlmo_prob,
x0,
max_iteration=1000,
line_search=FrankWolfe.Agnostic(),
callback=nothing,
)
@show tf.counter
@show tgrad!.counter
@show tlmo_prob.counter;
# ## Adding a custom callback
# A callback is a user-defined function called at every iteration
# of the algorithm with the current state passed as a named tuple.
#
# We can implement our own callback, for example with:
# - Extended trajectory logging, similar to the `trajectory = true` option
# - Stop criterion after a certain number of calls to the primal objective function
#
# To reuse the same tracking functions, Let us first reset their counters:
tf.counter = 0
tgrad!.counter = 0
tlmo_prob.counter = 0;
# The `storage` variable stores in the trajectory array the
# number of calls to each oracle at each iteration.
storage = []
# Now define our own trajectory logging function that extends
# the five default logged elements `(iterations, primal, dual, dual_gap, time)` with ".counter" field arguments present in the tracking functions.
function push_tracking_state(state, storage)
base_tuple = FrankWolfe.callback_state(state)
if state.lmo isa FrankWolfe.CachedLinearMinimizationOracle
complete_tuple = tuple(
base_tuple...,
state.gamma,
state.f.counter,
state.grad!.counter,
state.lmo.inner.counter,
)
else
complete_tuple = tuple(
base_tuple...,
state.gamma,
state.f.counter,
state.grad!.counter,
state.lmo.counter,
)
end
return push!(storage, complete_tuple)
end
# In case we want to stop the frank_wolfe algorithm prematurely after a certain condition is met,
# we can return a boolean stop criterion `false`.
# Here, we will implement a callback that terminates the algorithm if the primal objective function is evaluated more than 500 times.
function make_callback(storage)
return function callback(state, args...)
push_tracking_state(state, storage)
return state.f.counter < 500
end
end
callback = make_callback(storage)
# We can show the difference between this standard run and the
# lazified conditional gradient algorithm which does not call the LMO at each iteration.
FrankWolfe.lazified_conditional_gradient(
tf,
tgrad!,
tlmo_prob,
x0,
max_iteration=1000,
traj_data=storage,
line_search=FrankWolfe.Agnostic(),
callback=callback,
)
total_iterations = storage[end][1]
@show total_iterations
@show tf.counter
@show tgrad!.counter
@show tlmo_prob.counter;
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3091 | # # Extra-lazification
# Sometimes the Frank-Wolfe algorithm will be run multiple times
# with slightly different settings under which vertices collected
# in a previous run are still valid.
# The extra-lazification feature can be used for this purpose.
# It consists of a storage that can collect dropped vertices during a run,
# and the ability to use these vertices in another run, when they are not part
# of the current active set.
# The vertices that are part of the active set do not need to be duplicated in the extra-lazification storage.
# The extra-vertices can be used instead of calling the LMO when it is a relatively expensive operation.
using FrankWolfe
using Test
using LinearAlgebra
# We will use a parameterized objective function ``1/2 \|x - c\|^2``
# over the unit simplex.
const n = 100
const center0 = 5.0 .+ 3 * rand(n)
f(x) = 0.5 * norm(x .- center0)^2
function grad!(storage, x)
return storage .= x .- center0
end
# The `TrackingLMO` will let us count how many real calls to the LMO are performed
# by a single run of the algorithm.
lmo = FrankWolfe.UnitSimplexOracle(4.3)
tlmo = FrankWolfe.TrackingLMO(lmo)
x0 = FrankWolfe.compute_extreme_point(lmo, randn(n));
# ## Adding a vertex storage
# `FrankWolfe` offers a simple `FrankWolfe.DeletedVertexStorage` storage type
# which has as parameter `return_kth`, the number of good directions to find before returning the best.
# `return_kth` larger than the number of vertices means that the best-aligned vertex will be found.
# `return_kth = 1` means the first acceptable vertex (with the specified threhsold) is returned.
#
# See [FrankWolfe.DeletedVertexStorage](@ref)
vertex_storage = FrankWolfe.DeletedVertexStorage(typeof(x0)[], 5)
tlmo.counter = 0
results = FrankWolfe.blended_pairwise_conditional_gradient(
f,
grad!,
tlmo,
x0,
max_iteration=4000,
verbose=true,
lazy=true,
epsilon=1e-5,
add_dropped_vertices=true,
extra_vertex_storage=vertex_storage,
)
# The counter indicates the number of initial calls to the LMO.
# We will now construct different objective functions based on new centers,
# call the BPCG algorithm while accumulating vertices in the storage,
# in addition to warm-starting with the active set of the previous iteration.
# This allows for a "double-warmstarted" algorithm, reducing the number of LMO
# calls from one problem to the next.
active_set = results[end]
tlmo.counter
for iter in 1:10
center = 5.0 .+ 3 * rand(n)
f_i(x) = 0.5 * norm(x .- center)^2
function grad_i!(storage, x)
return storage .= x .- center
end
tlmo.counter = 0
FrankWolfe.blended_pairwise_conditional_gradient(
f_i,
grad_i!,
tlmo,
active_set,
max_iteration=4000,
lazy=true,
epsilon=1e-5,
add_dropped_vertices=true,
use_extra_vertex_storage=true,
extra_vertex_storage=vertex_storage,
verbose=false,
)
@info "Number of LMO calls in iter $iter: $(tlmo.counter)"
@info "Vertex storage size: $(length(vertex_storage.storage))"
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3683 | # # Alternating methods
# In this example we will compare [`FrankWolfe.alternating_linear_minimization`](@ref) and [`FrankWolfe.alternating_projections`](@ref) for a very simple feasibility problem.
# We consider the probability simplex
# ```math
# P = \{ x \in \mathbb{R}^n \colon \sum_{i=1}^n x_i = 1, x_i \geq 0 ~~ i=1,\dots,n\} ~.
# ```
# and a scaled, shifted ``\ell^{\infty}`` norm ball
# ```math
# Q = [-1,0]^n ~.
# ```
# The goal is to find a point that lies both in ``P`` and ``Q``. We do this by reformulating the problem first.
# Instead of a finding a point in the intersection ``P \cap Q``, we search for a pair of points, ``(x_P, x_Q)`` in the cartesian product ``P \times Q``, which attains minimal distance between ``P`` and ``Q``,
# ```math
# \|x_P - x_Q\|_2 = \min_{(x,y) \in P \times Q} \|x - y \|_2 ~.
# ```
using FrankWolfe
include("../examples/plot_utils.jl")
# ## Setting up objective, gradient and linear minimization oracles
# Alternating Linear Minimization (ALM) allows for an additional objective such that one can optimize over an intersection of sets instead of finding only feasible points.
# Since this example only considers the feasibility, we set the objective function as well as the gradient to zero.
n = 20
f(x) = 0
function grad!(storage, x)
@. storage = zero(x)
end
lmo1 = FrankWolfe.ProbabilitySimplexOracle(1.0)
lmo2 = FrankWolfe.ScaledBoundLInfNormBall(-ones(n), zeros(n))
lmos = (lmo1, lmo2)
x0 = rand(n)
target_tolerance = 1e-6
trajectories = [];
# ## Running Alternating Linear Minimization
# The method [`FrankWolfe.alternating_linear_minimization`](@ref) is not a FrankWolfe method itself. It is a wrapper translating a problem over the intersection of multiple sets to a problem over the product space.
# ALM can be called with any FW method. The default choice though is [`FrankWolfe.block_coordinate_frank_wolfe`](@ref) as it allows to update the blocks separately.
# There are three different update orders implemented, `FullUpdate`, `CyclicUpdate` and `Stochasticupdate`.
# Accordingly both blocks are updated either simulatenously, sequentially or in random order.
for order in [FrankWolfe.FullUpdate(), FrankWolfe.CyclicUpdate(), FrankWolfe.StochasticUpdate()]
_, _, _, _, _, alm_trajectory = FrankWolfe.alternating_linear_minimization(
FrankWolfe.block_coordinate_frank_wolfe,
f,
grad!,
lmos,
x0,
update_order=order,
verbose=true,
trajectory=true,
epsilon=target_tolerance,
)
push!(trajectories, alm_trajectory)
end
# As an alternative to Block-Coordiante Frank-Wolfe (BCFW), one can also run alternating linear minimization with standard Frank-Wolfe algorithm.
# These methods perform then the full (simulatenous) update at each iteration. In this example we also use [`FrankWolfe.away_frank_wolfe`](@ref).
_, _, _, _, _, afw_trajectory = FrankWolfe.alternating_linear_minimization(
FrankWolfe.away_frank_wolfe,
f,
grad!,
lmos,
x0,
verbose=true,
trajectory=true,
epsilon=target_tolerance,
)
push!(trajectories, afw_trajectory);
# ## Running Alternating Projections
# Unlike ALM, Alternating Projections (AP) is only suitable for feasibility problems. One omits the objective and gradient as parameters.
_, _, _, _, ap_trajectory = FrankWolfe.alternating_projections(
lmos,
x0,
trajectory=true,
verbose=true,
print_iter=100,
epsilon=target_tolerance,
)
push!(trajectories, ap_trajectory);
# ## Plotting the resulting trajectories
labels = ["BCFW - Full", "BCFW - Cyclic", "BCFW - Stochastic", "AFW", "AP"]
plot_trajectories(trajectories, labels, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3705 | # # Block-Coordinate Frank-Wolfe and Block-Vectors
# In this example, we demonstrate the usage of the [`FrankWolfe.block_coordinate_frank_wolfe`](@ref) and [`FrankWolfe.BlockVector`](@ref).
# We consider the problem of minimizing the squared Euclidean distance between two sets.
# We compare different update orders and different update steps.
# ## Import and setup
# We first import the necessary packages and include the code for plotting the results.
using FrankWolfe
using LinearAlgebra
include("plot_utils.jl")
# Next, we define the objective function and its gradient. The iterates `x` are instances of the [`FrankWolfe.BlockVector`](@ref) type.
# The different blocks of the vector can be accessed via the `blocks` field.
f(x) = dot(x.blocks[1] - x.blocks[2], x.blocks[1] - x.blocks[2])
function grad!(storage, x)
@. storage.blocks = [x.blocks[1] - x.blocks[2], x.blocks[2] - x.blocks[1]]
end
# In our example we consider the probability simplex and an L-infinity norm ball as the feasible sets.
n = 100
lmo1 = FrankWolfe.ScaledBoundLInfNormBall(-ones(n), zeros(n))
lmo2 = FrankWolfe.ProbabilitySimplexOracle(1.0)
prod_lmo = FrankWolfe.ProductLMO((lmo1, lmo2))
# We initialize the starting point `x0` as a [`FrankWolfe.BlockVector`](@ref) with two blocks.
# The two other arguments are the block sizes and the overall number of entries.
x0 = FrankWolfe.BlockVector([-ones(n), [i == 1 ? 1 : 0 for i in 1:n]], [(n,), (n,)], 2 * n);
# ## Running block-coordinate Frank-Wolfe with different update-orders
# In a first step, we compare different update orders. There are three different update orders implemented,
# [`FrankWolfe.FullUpdate`](@ref), [`CyclicUpdate`](@ref) and [`Stochasticupdate`](@ref).
# For creating a custome [`FrankWolfe.BlockCoordinateUpdateOrder`](@ref), one needs to implement the function `select_update_indices`.
struct CustomOrder <: FrankWolfe.BlockCoordinateUpdateOrder end
function FrankWolfe.select_update_indices(::CustomOrder, state::FrankWolfe.CallbackState, dual_gaps)
return [rand() < 1 / n ? 1 : 2 for _ in 1:length(state.lmo.lmos)]
end
# We run the block-coordinate Frank-Wolfe method with the different update orders and store the trajectories.
trajectories = []
for order in [
FrankWolfe.FullUpdate(),
FrankWolfe.CyclicUpdate(),
FrankWolfe.StochasticUpdate(),
CustomOrder(),
]
_, _, _, _, traj_data = FrankWolfe.block_coordinate_frank_wolfe(
f,
grad!,
prod_lmo,
x0;
verbose=true,
trajectory=true,
update_order=order,
)
push!(trajectories, traj_data)
end
# ### Plotting the results
labels = ["Full update", "Cyclic order", "Stochstic order", "Custom order"]
plot_trajectories(trajectories, labels, xscalelog=true)
# ## Running BCFW with different update methods
# As a second step, we compare different update steps. We consider the [`FrankWolfe.BPCGStep`](@ref) and the [`FrankWolfe.FrankWolfeStep`](@ref).
# One can either pass a tuple of [`FrankWolfe.UpdateStep`](@ref) to define for each block the update procedure or pass a single update step so that each block uses the same procedure.
trajectories = []
for us in [(FrankWolfe.BPCGStep(), FrankWolfe.FrankWolfeStep()), (FrankWolfe.FrankWolfeStep(), FrankWolfe.BPCGStep()), FrankWolfe.BPCGStep(), FrankWolfe.FrankWolfeStep()]
_, _, _, _, traj_data = FrankWolfe.block_coordinate_frank_wolfe(
f,
grad!,
prod_lmo,
x0;
verbose=true,
trajectory=true,
update_step=us,
)
push!(trajectories, traj_data)
end
# ### Plotting the results
labels = ["BPCG FW", "FW BPCG", "BPCG", "FW"]
plot_trajectories(trajectories, labels, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 13282 | # # Accelerations for quadratic functions and symmetric problems
# This example illustrates how to exploit symmetry to reduce the dimension of the problem via `SymmetricLMO`.
# Moreover, active set based algorithms can be accelerated by using the specialized structure `ActiveSetQuadratic`.
# The specific problem we consider here comes from quantum information and some context can be found [here](https://arxiv.org/abs/2302.04721).
# Formally, we want to find the distance between a tensor of size `m^N` and the `N`-partite local polytope which is defined by its vertices
# ```math
# d^{\vec{a}^{(1)}\ldots \vec{a}^{(N)}}_{x_1\ldots x_N}\coloneqq\prod_{n=1}^Na^{(n)}_{x_n}
# ```
# labeled by ``\vec{a}^{(n)}=a^{(n)}_1\ldots a^{(n)}_m`` for ``n\in[1,N]``, where ``a^{(n)}_x=\pm1``.
# In the bipartite case (`N=2`), this polytope is affinely equivalent to the cut polytope.
# ## Import and setup
# We first import the necessary packages.
import Combinatorics
import FrankWolfe
import LinearAlgebra
import Tullio
# Then we can define our custom LMO, together with the method `compute_extreme_point`,
# which simply enumerates the vertices ``d^{\vec{a}^{(1)}}`` defined above.
# This structure is specialized for the case `N=5` and contains pre-allocated fields used to accelerate the enumeration.
# Note that the output type (full tensor) is quite naive, but this is enough to illustrate the syntax in this toy example.
struct BellCorrelationsLMO{T} <: FrankWolfe.LinearMinimizationOracle
m::Int # size of the tensor
tmp1::Array{T, 1}
tmp2::Array{T, 2}
tmp3::Array{T, 3}
tmp4::Array{T, 4}
end
function FrankWolfe.compute_extreme_point(lmo::BellCorrelationsLMO{T}, A::Array{T, 5}; kwargs...) where {T <: Number}
ax = [ones(T, lmo.m) for n in 1:5]
sc1 = zero(T)
sc2 = one(T)
axm = [zeros(T, lmo.m) for n in 1:5]
scm = typemax(T)
L = 2^lmo.m
aux = zeros(Int, lmo.m)
for λa5 in 0:(L÷2)-1
digits!(aux, λa5, base=2)
ax[5] .= 2aux .- 1
Tullio.@tullio lmo.tmp4[x1, x2, x3, x4] = A[x1, x2, x3, x4, x5] * ax[5][x5]
for λa4 in 0:L-1
digits!(aux, λa4, base=2)
ax[4] .= 2aux .- 1
Tullio.@tullio lmo.tmp3[x1, x2, x3] = lmo.tmp4[x1, x2, x3, x4] * ax[4][x4]
for λa3 in 0:L-1
digits!(aux, λa3, base=2)
ax[3] .= 2aux .- 1
Tullio.@tullio lmo.tmp2[x1, x2] = lmo.tmp3[x1, x2, x3] * ax[3][x3]
for λa2 in 0:L-1
digits!(aux, λa2, base=2)
ax[2] .= 2aux .- 1
LinearAlgebra.mul!(lmo.tmp1, lmo.tmp2, ax[2])
for x1 in 1:lmo.m
ax[1][x1] = lmo.tmp1[x1] > zero(T) ? -one(T) : one(T)
end
sc = LinearAlgebra.dot(ax[1], lmo.tmp1)
if sc < scm
scm = sc
for n in 1:5
axm[n] .= ax[n]
end
end
end
end
end
end
return [axm[1][x1]*axm[2][x2]*axm[3][x3]*axm[4][x4]*axm[5][x5] for x1 in 1:lmo.m, x2 in 1:lmo.m, x3 in 1:lmo.m, x4 in 1:lmo.m, x5 in 1:lmo.m]
end
# Then we define our specific instance, coming from a GHZ state measured with measurements forming a regular polygon on the equator of the Bloch sphere.
# See [this article](https://arxiv.org/abs/2310.20677) for definitions and references.
function correlation_tensor_GHZ_polygon(::Type{T}, N::Int, m::Int) where {T <: Number}
res = zeros(T, m*ones(Int, N)...)
tab_cos = [cos(x*T(pi)/m) for x in 0:N*m]
tab_cos[abs.(tab_cos) .< Base.rtoldefault(T)] .= zero(T)
for ci in CartesianIndices(res)
res[ci] = tab_cos[sum(ci.I)-N+1]
end
return res
end
T = Float64
verbose = true
max_iteration = 10^4
m = 5
p = 0.23correlation_tensor_GHZ_polygon(T, 5, m)
x0 = zeros(T, size(p))
println() #hide
# The objective function is simply ``\frac12\|x-p\|_2^2``, which we decompose in different terms for speed.
normp2 = LinearAlgebra.dot(p, p) / 2
f = let p = p, normp2 = normp2
x -> LinearAlgebra.dot(x, x) / 2 - LinearAlgebra.dot(p, x) + normp2
end
grad! = let p = p
(storage, x) -> begin
@inbounds for i in eachindex(x)
storage[i] = x[i] - p[i]
end
end
end
println() #hide
# ## Naive run
# If we run the blended pairwise conditional gradient algorithm without modifications, convergence is not reached in 10000 iterations.
lmo_naive = BellCorrelationsLMO{T}(m, zeros(T, m), zeros(T, m, m), zeros(T, m, m, m), zeros(T, m, m, m, m))
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_naive, FrankWolfe.ActiveSet([(one(T), x0)]); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
as_naive = FrankWolfe.ActiveSet([(one(T), x0)])
@time FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_naive, as_naive; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide
# ## Faster active set for quadratic functions
# A first acceleration can be obtained by using the active set specialized for the quadratic objective function,
# whose gradient is here ``x-p``, explaining the hessian and linear part provided as arguments.
# The speedup is obtained by pre-computing some scalar products to quickly obtained, in each iteration, the best and worst
# atoms currently in the active set.
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_naive, FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_naive = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p)
@time FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_naive, asq_naive; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide
# In this small example, the acceleration is quite minimal, but as soon as one of the following conditions is met,
# significant speedups (factor ten at least) can be expected:
# - quite expensive scalar product between atoms, for instance, due to a high dimension (say, more than 10000),
# - high number of atoms in the active set (say, more than 1000),
# - high number of iterations (say, more than 100000), spending most of the time redistributing the weights in the active set.
# ## Dimension reduction via symmetrization
# ### Permutation of the tensor axes
# It is easy to see that our specific instance remains invariant under permutation of the dimensions of the tensor.
# This means that all computations can be performed in the symmetric subspace, which leads to an important speedup,
# owing to the reduced dimension (hence reduced size of the final active set and reduced number of iterations).
# The way to operate this in the `FrankWolfe` package is to use a symmetrized LMO, which basically does the following:
# - symmetrize the gradient, which is not necessary here as the gradient remains symmetric throughout the algorithm,
# - call the standard LMO,
# - symmetrize its output, which amounts to averaging over its orbit with respect to the group considered (here the symmetric group permuting the dimensions of the tensor).
function reynolds_permutedims(atom::Array{T, N}, lmo::BellCorrelationsLMO{T}) where {T <: Number, N}
res = zeros(T, size(atom))
for per in Combinatorics.permutations(1:N)
res .+= permutedims(atom, per)
end
res ./= factorial(N)
return res
end
println() #hide
# Note that the second argument `lmo` is not used here but could in principle be exploited to obtain
# a very small speedup by precomputing and storing `Combinatorics.permutations(1:N)`
# in a dedicated field of our custom LMO.
lmo_permutedims = FrankWolfe.SymmetricLMO(lmo_naive, reynolds_permutedims)
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_permutedims, FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_permutedims = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p)
@time FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_permutedims, asq_permutedims; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide
# Now, convergence is reached within 10000 iterations, and the size of the final active set is
# considerably smaller than before, thanks to the reduced dimension.
# ### Uniqueness pattern
# In this specific case, there is a bigger symmetry group that we can exploit.
# Its action roughly allows us to work in the subspace respecting the structure of the objective point `p`, that is,
# to average over tensor entries that have the same value in `p`.
# Although quite general, this kind of symmetry is not always applicable, and great care has to be taken when using it, in particular,
# to ensure that there exists a suitable group action whose Reynolds operator corresponds to this averaging procedure.
# In our current case, the theoretical study enabling this further symmetrization can be found [here](https://arxiv.org/abs/2310.20677).
function build_reynolds_unique(p::Array{T, N}) where {T <: Number, N}
ptol = round.(p; digits=8)
ptol[ptol .== zero(T)] .= zero(T) # transform -0.0 into 0.0 as isequal(0.0, -0.0) is false
uniquetol = unique(ptol[:])
indices = [ptol .== u for u in uniquetol]
return function(A::Array{T, N}, lmo)
res = zeros(T, size(A))
for ind in indices
@view(res[ind]) .= sum(A[ind]) / sum(ind) # average over ind
end
return res
end
end
lmo_unique = FrankWolfe.SymmetricLMO(lmo_naive, build_reynolds_unique(p))
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_unique, FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_unique = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p)
@time FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_unique, asq_unique; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide
# ### Reduction of the memory footprint of the iterate
# In the previous run, the dimension reduction is mathematically exploited to accelerate the algorithm,
# but it is not used to effectively work in a subspace of reduced dimension.
# Indeed, the iterate, although symmetric, was still a full tensor.
# As a last example of the speedup obtainable through symmetry reduction, we show how to map the computations
# into a space whose physical dimension is also reduced during the algorithm.
# This makes all in-place operations marginally faster, which can lead, in bigger instances, to significant
# accelerations, especially for active set based algorithms in the regime where many lazy iterations are performed.
# We refer to the example `symmetric.jl` for a small benchmark with symmetric matrices.
function build_reduce_inflate(p::Array{T, N}) where {T <: Number, N}
ptol = round.(p; digits=8)
ptol[ptol .== zero(T)] .= zero(T) # transform -0.0 into 0.0 as isequal(0.0, -0.0) is false
uniquetol = unique(ptol[:])
dim = length(uniquetol) # reduced dimension
indices = [ptol .== u for u in uniquetol]
mul = [sum(ind) for ind in indices] # multiplicities, used to have matching scalar products
sqmul = sqrt.(mul) # precomputed for speed
return function(A::Array{T, N}, lmo)
vec = zeros(T, dim)
for (i, ind) in enumerate(indices)
vec[i] = sum(A[ind]) / sqmul[i]
end
return FrankWolfe.SymmetricArray(A, vec)
end, function(x::FrankWolfe.SymmetricArray, lmo)
for (i, ind) in enumerate(indices)
@view(x.data[ind]) .= x.vec[i] / sqmul[i]
end
return x.data
end
end
reduce, inflate = build_reduce_inflate(p)
p_reduce = reduce(p, nothing)
x0_reduce = reduce(x0, nothing)
f_reduce = let p_reduce = p_reduce, normp2 = normp2
x -> LinearAlgebra.dot(x, x) / 2 - LinearAlgebra.dot(p_reduce, x) + normp2
end
grad_reduce! = let p_reduce = p_reduce
(storage, x) -> begin
@inbounds for i in eachindex(x)
storage[i] = x[i] - p_reduce[i]
end
end
end
println() #hide
# Note that the objective function and its gradient have to be explicitly rewritten.
# In this simple example, their shape remains unchanged, but in general this may need some
# reformulation, which falls to the user.
lmo_reduce = FrankWolfe.SymmetricLMO(lmo_naive, reduce, inflate)
FrankWolfe.blended_pairwise_conditional_gradient(f_reduce, grad_reduce!, lmo_reduce, FrankWolfe.ActiveSetQuadratic([(one(T), x0_reduce)], LinearAlgebra.I, -p_reduce); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_reduce = FrankWolfe.ActiveSetQuadratic([(one(T), x0_reduce)], LinearAlgebra.I, -p_reduce)
@time FrankWolfe.blended_pairwise_conditional_gradient(f_reduce, grad_reduce!, lmo_reduce, asq_reduce; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1298 | #=
Running a very large-scale example with 1e9 variables (note this requires a lot of RAM).
Problem is quite simple: minimize || x - p ||^2 over the probability simplex
NOTE.
1. running standard FW with agnostic step-size here as overhead from line searches etc is quite substantial
2. observe that the memory consummption is sub-linear in the number of iterations
=#
using FrankWolfe
using LinearAlgebra
n = Int(1e7)
k = 1000
xpi = rand(n);
total = sum(xpi);
const xp = xpi ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
# better for memory consumption as we do coordinate-wise ops
function cf(x, xp)
return LinearAlgebra.norm(x .- xp)^2
end
function cgrad!(storage, x, xp)
return @. storage = 2 * (x - xp)
end
lmo = FrankWolfe.ProbabilitySimplexOracle(1);
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
FrankWolfe.benchmark_oracles(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
() -> randn(n),
lmo;
k=100,
)
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
);
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1568 | using FrankWolfe
using LinearAlgebra
include("../examples/plot_utils.jl")
# n = Int(1e1)
n = Int(1e4)
k = 5 * Int(1e3)
number_nonzero = 40
xpi = rand(n);
total = sum(xpi);
const xp = xpi # ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
lmo = FrankWolfe.KSparseLMO(number_nonzero, 1.0);
## alternative lmo
# lmo = FrankWolfe.ProbabilitySimplexOracle(1)
x0 = FrankWolfe.compute_extreme_point(lmo, ones(n));
@time x, v, primal, dual_gap, trajectorylazy, active_set = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
epsilon=1e-5,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
lazy=true,
);
@time x, v, primal, dual_gap, trajectoryAFW, active_set = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
epsilon=1e-5,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
away_steps=true,
trajectory=true,
);
@time x, v, primal, dual_gap, trajectoryFW = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
epsilon=1e-5,
trajectory=true,
away_steps=false,
);
data = [trajectorylazy, trajectoryAFW, trajectoryFW]
label = ["LAFW" "AFW" "FW"]
plot_trajectories(data, label, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 4346 | #=
Example demonstrating sparsity control by means of the `lazy_tolerance`-factor passed to the lazy AFW variant
A larger lazy_tolerance >= 1 favors sparsity by favoring optimization over the current active set rather than
adding a new FW vertex.
The default for AFW is lazy_tolerance = 2.0
=#
using FrankWolfe
using LinearAlgebra
using Random
include("../examples/plot_utils.jl")
n = Int(1e4)
k = 1000
s = rand(1:100)
@info "Seed $s"
Random.seed!(s)
xpi = rand(n);
total = sum(xpi);
# here the optimal solution lies in the interior if you want an optimal solution on a face and not the interior use:
# const xp = xpi;
const xp = xpi ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
const lmo = FrankWolfe.KSparseLMO(100, 1.0)
## other LMOs to try
# lmo_big = FrankWolfe.KSparseLMO(100, big"1.0")
# lmo = FrankWolfe.LpNormLMO{Float64,5}(1.0)
# lmo = FrankWolfe.ProbabilitySimplexOracle(1.0);
# lmo = FrankWolfe.UnitSimplexOracle(1.0);
const x00 = FrankWolfe.compute_extreme_point(lmo, rand(n))
## example with BirkhoffPolytopeLMO - uses square matrix.
# const lmo = FrankWolfe.BirkhoffPolytopeLMO()
# cost = rand(n, n)
# const x00 = FrankWolfe.compute_extreme_point(lmo, cost)
function build_callback(trajectory_arr)
return function callback(state, active_set, args...)
return push!(trajectory_arr, (FrankWolfe.callback_state(state)..., length(active_set)))
end
end
FrankWolfe.benchmark_oracles(f, grad!, () -> randn(n), lmo; k=100)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_shortstep = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
trajectory_adaptive = []
callback = build_callback(trajectory_adaptive)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
callback=callback,
);
println("\n==> Lazy AFW.\n")
trajectory_adaptiveLoc15 = []
callback = build_callback(trajectory_adaptiveLoc15)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
lazy=true,
lazy_tolerance=1.5,
trajectory=true,
callback=callback,
);
trajectory_adaptiveLoc2 = []
callback = build_callback(trajectory_adaptiveLoc2)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
lazy=true,
lazy_tolerance=2.0,
trajectory=true,
callback=callback,
);
trajectory_adaptiveLoc4 = []
callback = build_callback(trajectory_adaptiveLoc4)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
lazy_tolerance=4.0,
lazy=true,
trajectory=true,
callback=callback,
);
trajectory_adaptiveLoc10 = []
callback = build_callback(trajectory_adaptiveLoc10)
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
lazy=true,
lazy_tolerance=10.0,
verbose=true,
trajectory=true,
callback=callback,
);
# Reduction primal/dual error vs. sparsity of solution
dataSparsity = [
trajectory_adaptive,
trajectory_adaptiveLoc15,
trajectory_adaptiveLoc2,
trajectory_adaptiveLoc4,
trajectory_adaptiveLoc10,
]
labelSparsity = ["AFW", "LAFW-K-1.5", "LAFW-K-2.0", "LAFW-K-4.0", "LAFW-K-10.0"]
plot_sparsity(dataSparsity, labelSparsity, legend_position=:topright)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 975 | using FrankWolfe
using LinearAlgebra
n = Int(1e4)
k = 10000
xpi = rand(n);
total = sum(xpi);
const xp = xpi # ./ total;
f(x) = dot(x, x) - 2 * dot(x, xp)
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
lmo = FrankWolfe.KSparseLMO(100, 1.0)
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
# arbitrary cache
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=100.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
);
# fixed cache size
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=100.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
cache_size=500,
verbose=true,
);
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3940 | using FrankWolfe
using Plots
using LinearAlgebra
using Random
import GLPK
using JSON
include("../examples/plot_utils.jl")
n = 200
k = 3000
#k = 500
xpi = rand(n * n);
total = sum(xpi);
# next line needs to be commented out if we use the GLPK variants
xpi = reshape(xpi, n, n)
const xp = xpi # ./ total;
# better for memory consumption as we do coordinate-wise ops
function cf(x, xp)
return norm(x .- xp)^2 / n^2
end
function cgrad!(storage, x, xp)
return @. storage = 2 * (x - xp) / n^2
end
# initial direction for first vertex
direction_vec = Vector{Float64}(undef, n * n)
randn!(direction_vec)
direction_mat = reshape(direction_vec, n, n)
lmo = FrankWolfe.BirkhoffPolytopeLMO()
x00 = FrankWolfe.compute_extreme_point(lmo, direction_mat)
target_accuracy = 1e-7
# modify to GLPK variant
# o = GLPK.Optimizer()
# lmo_moi = FrankWolfe.convert_mathopt(lmo, o, dimension=n)
# x00 = FrankWolfe.compute_extreme_point(lmo, direction_vec)
FrankWolfe.benchmark_oracles(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
() -> randn(n, n),
lmo;
k=100,
)
# vanllia FW
x0 = copy(x00)
x, v, primal, dual_gap, trajectoryFW = FrankWolfe.frank_wolfe(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
epsilon=target_accuracy,
memory_mode=FrankWolfe.InplaceEmphasis(),
trajectory=true,
verbose=true,
);
# arbitrary cache
x0 = copy(x00)
x, v, primal, dual_gap, trajectoryLCG = FrankWolfe.lazified_conditional_gradient(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
lmo,
x0,
max_iteration=k,
epsilon=target_accuracy,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
trajectory=true,
verbose=true,
);
# fixed cache size
x0 = copy(x00)
x, v, primal, dual_gap, trajectoryBLCG = FrankWolfe.lazified_conditional_gradient(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
epsilon=target_accuracy,
memory_mode=FrankWolfe.InplaceEmphasis(),
trajectory=true,
cache_size=500,
verbose=true,
);
# AFW run
x0 = copy(x00)
x, v, primal, dual_gap, trajectoryLAFW = FrankWolfe.away_frank_wolfe(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
epsilon=target_accuracy,
memory_mode=FrankWolfe.InplaceEmphasis(),
lazy=true,
trajectory=true,
verbose=true,
);
# BCG run
x0 = copy(x00)
x, v, primal, dual_gap, trajectoryBCG, _ = FrankWolfe.blended_conditional_gradient(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
epsilon=target_accuracy,
memory_mode=FrankWolfe.InplaceEmphasis(),
trajectory=true,
verbose=true,
);
# BCG run (reference optimum)
x0 = copy(x00)
x, v, primal, dual_gap, trajectoryBCG_ref, _ = FrankWolfe.blended_conditional_gradient(
x -> cf(x, xp),
(str, x) -> cgrad!(str, x, xp),
lmo,
x0,
max_iteration=2 * k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
epsilon=target_accuracy / 10.0,
memory_mode=FrankWolfe.InplaceEmphasis(),
trajectory=true,
verbose=true,
);
open("lcg_expensive_data.json", "w") do f
return write(
f,
JSON.json((
FW=trajectoryFW,
LCG=trajectoryLCG,
BLCG=trajectoryBLCG,
LAFW=trajectoryLAFW,
BCG=trajectoryBCG,
reference_BCG_primal=primal,
)),
)
end
data = [trajectoryFW, trajectoryLCG, trajectoryBLCG, trajectoryLAFW, trajectoryBCG]
label = ["FW", "L-CG", "BL-CG", "L-AFW", "BCG"]
plot_trajectories(data, label, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1957 | # This example highlights the use of a linear minimization oracle
# using an LP solver defined in MathOptInterface
# we compare the performance of the two LMOs, in- and out of place
#
# to get accurate timings it is important to run twice so that the
# compile time of Julia for the first run is not tainting the results
using FrankWolfe
using JSON
using LaTeXStrings
results = JSON.Parser.parsefile("lcg_expensive_data.json")
ref_optimum = results["reference_BCG_primal"]
iteration_list = [
[x[1] + 1 for x in results["FW"]],
[x[1] + 1 for x in results["LCG"]],
[x[1] + 1 for x in results["BLCG"]],
[x[1] + 1 for x in results["LAFW"]],
[x[1] + 1 for x in results["BCG"]],
]
time_list = [
[x[5] for x in results["FW"]],
[x[5] for x in results["LCG"]],
[x[5] for x in results["BLCG"]],
[x[5] for x in results["LAFW"]],
[x[5] for x in results["BCG"]],
]
primal_gap_list = [
[x[2] - ref_optimum for x in results["FW"]],
[x[2] - ref_optimum for x in results["LCG"]],
[x[2] - ref_optimum for x in results["BLCG"]],
[x[2] - ref_optimum for x in results["LAFW"]],
[x[2] - ref_optimum for x in results["BCG"]],
]
dual_gap_list = [
[x[4] for x in results["FW"]],
[x[4] for x in results["LCG"]],
[x[4] for x in results["BLCG"]],
[x[4] for x in results["LAFW"]],
[x[4] for x in results["BCG"]],
]
label = [L"\textrm{FW}", L"\textrm{L-CG}", L"\textrm{BL-CG}", L"\textrm{L-AFW}", L"\textrm{BCG}"]
plot_results(
[primal_gap_list, primal_gap_list, dual_gap_list, dual_gap_list],
[iteration_list, time_list, iteration_list, time_list],
label,
[L"\textrm{Iteration}", L"\textrm{Time}", L"\textrm{Iteration}", L"\textrm{Time}"],
[L"\textrm{Primal Gap}", L"\textrm{Primal Gap}", L"\textrm{Dual Gap}", L"\textrm{Dual Gap}"],
xscalelog=[:log, :identity, :log, :identity],
legend_position=[:bottomleft, nothing, nothing, nothing],
filename="lcg_expensive.pdf",
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 4480 | """
Example of a L2-regularized linearized regression
using the stochastic version of Frank-Wolfe.
"""
using FrankWolfe
using Random
using LinearAlgebra
using Test
include("../examples/plot_utils.jl")
# user-provided loss function and gradient
function simple_reg_loss(θ, data_point)
(xi, yi) = data_point
(a, b) = (θ[1:end-1], θ[end])
pred = a ⋅ xi + b
return (pred - yi)^2 / 2
end
function ∇simple_reg_loss(storage, θ, data_point)
(xi, yi) = data_point
(a, b) = (θ[1:end-1], θ[end])
pred = a ⋅ xi + b
@. storage[1:end-1] += xi * (pred - yi)
storage[end] += pred - yi
return storage
end
xs = [10 * randn(5) for i in 1:20000]
params = rand(6) .- 1 # start params in (-1,0)
bias = 4π
params_perfect = [1:5; bias]
data_perfect = [(x, x ⋅ (1:5) + bias) for x in xs]
f_stoch =
FrankWolfe.StochasticObjective(simple_reg_loss, ∇simple_reg_loss, data_perfect, similar(params))
@test FrankWolfe.compute_value(f_stoch, params) > FrankWolfe.compute_value(f_stoch, params_perfect)
# Vanilla Stochastic Gradient Descent with reshuffling
storage = similar(params)
for idx in 1:1000
for data_point in shuffle!(data_perfect)
storage .= 0
∇simple_reg_loss(storage, params, data_point)
params .-= 0.05 * storage / length(data_perfect)
end
end
@test norm(params - params_perfect) <= 1e-6
# similar example with noisy data, Gaussian noise around the linear estimate
data_noisy = [(x, x ⋅ (1:5) + bias + 0.5 * randn()) for x in xs]
f_stoch_noisy =
FrankWolfe.StochasticObjective(simple_reg_loss, ∇simple_reg_loss, data_noisy, storage)
params = rand(6) .- 1 # start params in (-1,0)
@testset "true parameters yield a good error" begin
n1 = norm(FrankWolfe.compute_gradient(f_stoch_noisy, params_perfect))
@test n1 <= length(data_noisy) * 0.05
end
# test that gradient at true parameters has lower norm than at randomly initialized ones
@test norm(FrankWolfe.compute_gradient(f_stoch_noisy, params_perfect)) <
norm(FrankWolfe.compute_gradient(f_stoch_noisy, params))
# test that error at true parameters is lower than at randomly initialized ones
@test FrankWolfe.compute_value(f_stoch_noisy, params) >
FrankWolfe.compute_value(f_stoch_noisy, params_perfect)
# Vanilla Stochastic Gradient Descent with reshuffling
for idx in 1:1000
for data_point in shuffle!(data_perfect)
storage .= 0
params .-= 0.05 * ∇simple_reg_loss(storage, params, data_point) / length(data_perfect)
end
end
# test that SGD converged towards true parameters
@test norm(params - params_perfect) <= 1e-6
#####
# Stochastic Frank Wolfe version
# We constrain the argument in the L2-norm ball with a large-enough radius
lmo = FrankWolfe.LpNormLMO{2}(1.05 * norm(params_perfect))
params0 = rand(6) .- 1 # start params in (-1,0)
k = 10000
@time x, v, primal, dual_gap, trajectoryS = FrankWolfe.stochastic_frank_wolfe(
f_stoch_noisy,
lmo,
copy(params0),
verbose=true,
rng=Random.GLOBAL_RNG,
line_search=FrankWolfe.Nonconvex(),
max_iteration=k,
print_iter=k / 10,
batch_size=length(f_stoch_noisy.xs) ÷ 100 + 1,
trajectory=true,
)
@time x, v, primal, dual_gap, trajectory09 = FrankWolfe.stochastic_frank_wolfe(
f_stoch_noisy,
lmo,
copy(params0),
momentum=0.9,
verbose=true,
rng=Random.GLOBAL_RNG,
line_search=FrankWolfe.Nonconvex(),
max_iteration=k,
print_iter=k / 10,
batch_size=length(f_stoch_noisy.xs) ÷ 100 + 1,
trajectory=true,
)
@time x, v, primal, dual_gap, trajectory099 = FrankWolfe.stochastic_frank_wolfe(
f_stoch_noisy,
lmo,
copy(params0),
momentum=0.99,
verbose=true,
rng=Random.GLOBAL_RNG,
line_search=FrankWolfe.Nonconvex(),
max_iteration=k,
print_iter=k / 10,
batch_size=length(f_stoch_noisy.xs) ÷ 100 + 1,
trajectory=true,
)
ff(x) = sum(simple_reg_loss(x, data_point) for data_point in data_noisy)
function gradf(storage, x)
storage .= 0
for dp in data_noisy
∇simple_reg_loss(storage, x, dp)
end
end
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
ff,
gradf,
lmo,
params,
verbose=true,
line_search=FrankWolfe.Adaptive(L_est=10.0, relaxed_smoothness=true),
max_iteration=k,
print_iter=k / 10,
trajectory=true,
)
data = [trajectory, trajectoryS, trajectory09, trajectory099]
label = ["exact", "stochastic", "stochM 0.9", "stochM 0.99"]
plot_trajectories(data, label)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3176 | #=
Lower bound instance from
http://proceedings.mlr.press/v28/jaggi13.pdf
and
https://arxiv.org/abs/1309.5550
Example instance is to minimize || x ||^2 over the probability simplex conv(e_1, ..., e_n)
Then the primal gap is lower bounded by 1/k - 1/n in iteration k as the optimal solution has value 1/n attained by
the (1/n, ..., 1/n) vector and in iteration k we have picked up at most k vertices from the simplex lower bounding the
primal value by 1/k.
Here: slightly rewritten to consider || x - (1/n, ..., 1/n) ||^2 so that the function value becomes directly the
primal gap (squared)
Three runs are compared:
1. Frank-Wolfe with traditional step-size rule
2. Away-step Frank-Wolfe with adaptive step-size rule
3. Blended Conditional Gradients with adaptive step-size rule
NOTE:
1. ignore the timing graphs
2. as the primal gap lower bounds the dual gap we also plot the primal gap lower bound in the dual gap graph
3. AFW violates the bound in the very first round which is due to the initialization in AFW starting with two vertices
4. all methods that call an LMO at most once in an iteration are subject to this lower bound
5. the objective is strongly convex, this implies limitations also for strongly convex functions (see for a discussion https://arxiv.org/abs/1906.07867)
=#
using FrankWolfe
using LinearAlgebra
include("../examples/plot_utils.jl")
# n = Int(1e1)
n = Int(1e2)
k = Int(1e3)
xp = 1 / n * ones(n);
# definition of objective
f(x) = norm(x - xp)^2
# definition of gradient
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
# define LMO and do initial call to obtain starting point
lmo = FrankWolfe.ProbabilitySimplexOracle(1)
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
# simple benchmarking of oracles to get an idea how expensive each component is
FrankWolfe.benchmark_oracles(f, grad!, () -> rand(n), lmo; k=100)
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
epsilon=1e-5,
trajectory=true,
);
@time x, v, primal, dual_gap, trajectoryAFW = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
epsilon=1e-5,
trajectory=true,
);
@time x, v, primal, dual_gap, trajectoryBCG, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
epsilon=1e-5,
trajectory=true,
);
# define lower bound
trajLowerbound = []
for i in 1:n-1
push!(trajLowerbound, (i, 1 / i - 1 / n, NaN, 1 / i - 1 / n, NaN))
end
data = [trajectory, trajectoryAFW, trajectoryBCG, trajLowerbound]
label = ["FW", "AFW", "BCG", "Lowerbound"]
# ignore the timing plots - they are not relevant for this example
plot_trajectories(data, label, xscalelog=true, legend_position=:bottomleft)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3980 | # This example highlights the use of a linear minimization oracle
# using an LP solver defined in MathOptInterface
# we compare the performance of the two LMOs, in- and out of place
#
# to get accurate timings it is important to run twice so that the compile time of Julia for the first run
# is not tainting the results
using FrankWolfe
using LinearAlgebra
using LaTeXStrings
using JuMP
const MOI = JuMP.MOI
import GLPK
n = Int(1e3)
k = 10000
xpi = rand(n);
total = sum(xpi);
const xp = xpi ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
return nothing
end
lmo_radius = 2.5
lmo = FrankWolfe.FrankWolfe.ProbabilitySimplexOracle(lmo_radius)
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n))
gradient = collect(x00)
x_lmo, v, primal, dual_gap, trajectory_lmo = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
collect(copy(x00)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
)
# create a MathOptInterface Optimizer and build the same linear constraints
o = GLPK.Optimizer()
x = MOI.add_variables(o, n)
# x_i ≥ 0
for xi in x
MOI.add_constraint(o, xi, MOI.GreaterThan(0.0))
end
# ∑ x_i == 1
MOI.add_constraint(
o,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, x), 0.0),
MOI.EqualTo(lmo_radius),
)
lmo_moi = FrankWolfe.MathOptLMO(o)
x, v, primal, dual_gap, trajectory_moi = FrankWolfe.frank_wolfe(
f,
grad!,
lmo_moi,
collect(copy(x00)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
)
# formulate the LP using JuMP
m = JuMP.Model(GLPK.Optimizer)
@variable(m, y[1:n] ≥ 0)
# ∑ x_i == 1
@constraint(m, sum(y) == lmo_radius)
lmo_jump = FrankWolfe.MathOptLMO(m.moi_backend)
x, v, primal, dual_gap, trajectory_jump = FrankWolfe.frank_wolfe(
f,
grad!,
lmo_jump,
collect(copy(x00)),
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
)
x_lmo, v, primal, dual_gap, trajectory_lmo_blas = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x00,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.OutplaceEmphasis(),
verbose=true,
trajectory=true,
)
x, v, primal, dual_gap, trajectory_jump_blas = FrankWolfe.frank_wolfe(
f,
grad!,
lmo_jump,
x00,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.OutplaceEmphasis(),
verbose=true,
trajectory=true,
)
# Defined the x-axis for the series, when plotting in terms of iterations.
iteration_list = [[x[1] + 1 for x in trajectory_lmo], [x[1] + 1 for x in trajectory_moi]]
# Defined the x-axis for the series, when plotting in terms of time.
time_list = [[x[5] for x in trajectory_lmo], [x[5] for x in trajectory_moi]]
# Defined the y-axis for the series, when plotting the primal gap.
primal_gap_list = [[x[2] for x in trajectory_lmo], [x[2] for x in trajectory_moi]]
# Defined the y-axis for the series, when plotting the dual gap.
dual_gap_list = [[x[4] for x in trajectory_lmo], [x[4] for x in trajectory_moi]]
# Defined the labels for the series using latex rendering.
label = [L"\textrm{Closed-form LMO}", L"\textrm{GLPK LMO}"]
plot_results(
[primal_gap_list, primal_gap_list, dual_gap_list, dual_gap_list],
[iteration_list, time_list, iteration_list, time_list],
label,
["", "", L"\textrm{Iteration}", L"\textrm{Time}"],
[L"\textrm{Primal Gap}", "", L"\textrm{Dual Gap}", ""],
xscalelog=[:log, :identity, :log, :identity],
yscalelog=[:log, :log, :log, :log],
legend_position=[:bottomleft, nothing, nothing, nothing],
filename="moi_compare.pdf",
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 8636 | using FrankWolfe
import Arpack
# download movielens data
using ZipFile, DataFrames, CSV
import JSON
using Random
using Profile
using SparseArrays, LinearAlgebra
temp_zipfile = download("http://files.grouplens.org/datasets/movielens/ml-latest-small.zip")
# temp_zipfile = download("http://files.grouplens.org/datasets/movielens/ml-25m.zip")
#temp_zipfile = download("http://files.grouplens.org/datasets/movielens/ml-latest.zip")
zarchive = ZipFile.Reader(temp_zipfile)
movies_file = zarchive.files[findfirst(f -> occursin("movies", f.name), zarchive.files)]
movies_frame = CSV.read(movies_file, DataFrame)
ratings_file = zarchive.files[findfirst(f -> occursin("ratings", f.name), zarchive.files)]
ratings_frame = CSV.read(ratings_file, DataFrame)
# ratings_frame has columns user_id, movie_id
# we construct a new matrix with users as rows and all ratings as columns
# we use missing for non-present movies
users = unique(ratings_frame[:, :userId])
movies = unique(ratings_frame[:, :movieId])
@assert users == eachindex(users)
movies_revert = zeros(Int, maximum(movies))
for (idx, m) in enumerate(movies)
movies_revert[m] = idx
end
movies_indices = [movies_revert[idx] for idx in ratings_frame[:, :movieId]]
const rating_matrix = sparse(
ratings_frame[:, :userId],
movies_indices,
ratings_frame[:, :rating],
length(users),
length(movies),
)
missing_rate = 0.05
Random.seed!(42)
const missing_ratings = Tuple{Int,Int}[]
const present_ratings = Tuple{Int,Int}[]
let
(I, J, V) = SparseArrays.findnz(rating_matrix)
for idx in eachindex(I)
if V[idx] > 0
if rand() <= missing_rate
push!(missing_ratings, (I[idx], J[idx]))
else
push!(present_ratings, (I[idx], J[idx]))
end
end
end
end
function f(X)
# note: we iterate over the rating_matrix indices,
# since it is sparse unlike X
r = 0.0
for (i, j) in present_ratings
r += 0.5 * (X[i, j] - rating_matrix[i, j])^2
end
return r
end
function grad!(storage, X)
storage .= 0
for (i, j) in present_ratings
storage[i, j] = X[i, j] - rating_matrix[i, j]
end
return nothing
end
function test_loss(X)
r = 0.0
for (i, j) in missing_ratings
r += 0.5 * (X[i, j] - rating_matrix[i, j])^2
end
return r
end
function project_nuclear_norm_ball(X; radius=1.0)
U, sing_val, Vt = svd(X)
if (sum(sing_val) <= radius)
return X, -norm_estimation * U[:, 1] * Vt[:, 1]'
end
sing_val = FrankWolfe.projection_simplex_sort(sing_val, s=radius)
return U * Diagonal(sing_val) * Vt', -norm_estimation * U[:, 1] * Vt[:, 1]'
end
#norm_estimation = 400 * Arpack.svds(rating_matrix, nsv=1, ritzvec=false)[1].S[1]
norm_estimation = 10 * Arpack.svds(rating_matrix, nsv=1, ritzvec=false)[1].S[1]
const lmo = FrankWolfe.NuclearNormLMO(norm_estimation)
const x0 = FrankWolfe.compute_extreme_point(lmo, zero(rating_matrix))
const k = 100
# benchmark the oracles
FrankWolfe.benchmark_oracles(
f,
(str, x) -> grad!(str, x),
() -> randn(size(rating_matrix)),
lmo;
k=100,
)
gradient = spzeros(size(x0)...)
gradient_aux = spzeros(size(x0)...)
# pushes to the trajectory the first 5 elements of the trajectory and the test value at the current iterate
function build_callback(trajectory_arr)
return function callback(state, args...)
return push!(trajectory_arr, (FrankWolfe.callback_state(state)..., test_loss(state.x)))
end
end
#Estimate the smoothness constant.
num_pairs = 1000
L_estimate = -Inf
for i in 1:num_pairs
global L_estimate
# computing random rank-one matrices
u1 = rand(size(x0, 1))
u1 ./= sum(u1)
u1 .*= norm_estimation
v1 = rand(size(x0, 2))
v1 ./= sum(v1)
x = FrankWolfe.RankOneMatrix(u1, v1)
u2 = rand(size(x0, 1))
u2 ./= sum(u2)
u2 .*= norm_estimation
v2 = rand(size(x0, 2))
v2 ./= sum(v2)
y = FrankWolfe.RankOneMatrix(u2, v2)
grad!(gradient, x)
grad!(gradient_aux, y)
new_L = norm(gradient - gradient_aux) / norm(x - y)
if new_L > L_estimate
L_estimate = new_L
end
end
# PGD steps
xgd = Matrix(x0)
function_values = Float64[]
timing_values = Float64[]
function_test_values = Float64[]
ls = FrankWolfe.Backtracking()
ls_workspace = FrankWolfe.build_linesearch_workspace(ls, xgd, gradient)
time_start = time_ns()
for _ in 1:k
f_val = f(xgd)
push!(function_values, f_val)
push!(function_test_values, test_loss(xgd))
push!(timing_values, (time_ns() - time_start) / 1e9)
grad!(gradient, xgd)
xgd_new, vertex = project_nuclear_norm_ball(xgd - gradient / L_estimate, radius=norm_estimation)
gamma = FrankWolfe.perform_line_search(
ls,
1,
f,
grad!,
gradient,
xgd,
xgd - xgd_new,
1.0,
ls_workspace,
FrankWolfe.InplaceEmphasis(),
)
@. xgd -= gamma * (xgd - xgd_new)
end
trajectory_arr_fw = Vector{Tuple{Int64,Float64,Float64,Float64,Float64,Float64}}()
callback = build_callback(trajectory_arr_fw)
xfin, _, _, _, traj_data = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0;
epsilon=1e-9,
max_iteration=10 * k,
print_iter=k / 10,
verbose=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=gradient,
callback=callback,
)
trajectory_arr_lazy = Vector{Tuple{Int64,Float64,Float64,Float64,Float64,Float64}}()
callback = build_callback(trajectory_arr_lazy)
xlazy, _, _, _, _ = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x0;
epsilon=1e-9,
max_iteration=10 * k,
print_iter=k / 10,
verbose=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=gradient,
callback=callback,
)
trajectory_arr_lazy_ref = Vector{Tuple{Int64,Float64,Float64,Float64,Float64,Float64}}()
callback = build_callback(trajectory_arr_lazy_ref)
xlazy, _, _, _, _ = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x0;
epsilon=1e-9,
max_iteration=50 * k,
print_iter=k / 10,
verbose=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=gradient,
callback=callback,
)
@info "Gdescent test loss: $(test_loss(xgd))"
@info "FW test loss: $(test_loss(xfin))"
@info "LCG test loss: $(test_loss(xlazy))"
fw_test_values = getindex.(trajectory_arr_fw, 6)
lazy_test_values = getindex.(trajectory_arr_lazy, 6)
open(joinpath(@__DIR__, "movielens_result.json"), "w") do f
data = JSON.json((
svals_gd=svdvals(xgd),
svals_fw=svdvals(xfin),
svals_lcg=svdvals(xlazy),
fw_test_values=fw_test_values,
lazy_test_values=lazy_test_values,
trajectory_arr_fw=trajectory_arr_fw,
trajectory_arr_lazy=trajectory_arr_lazy,
function_values_gd=function_values,
function_values_test_gd=function_test_values,
timing_values_gd=timing_values,
trajectory_arr_lazy_ref=trajectory_arr_lazy_ref,
))
return write(f, data)
end
#Plot results w.r.t. iteration count
gr()
pit = plot(
getindex.(trajectory_arr_fw, 1),
getindex.(trajectory_arr_fw, 2),
label="FW",
xlabel="iterations",
ylabel="Objective function",
yaxis=:log,
yguidefontsize=8,
xguidefontsize=8,
legendfontsize=8,
legend=:bottomleft,
)
plot!(getindex.(trajectory_arr_lazy, 1), getindex.(trajectory_arr_lazy, 2), label="LCG")
plot!(eachindex(function_values), function_values, yaxis=:log, label="GD")
plot!(eachindex(function_test_values), function_test_values, label="GD_test")
plot!(getindex.(trajectory_arr_fw, 1), getindex.(trajectory_arr_fw, 6), label="FW_T")
plot!(getindex.(trajectory_arr_lazy, 1), getindex.(trajectory_arr_lazy, 6), label="LCG_T")
savefig(pit, "objective_func_vs_iteration.pdf")
#Plot results w.r.t. time
pit = plot(
getindex.(trajectory_arr_fw, 5),
getindex.(trajectory_arr_fw, 2),
label="FW",
ylabel="Objective function",
yaxis=:log,
xlabel="time (s)",
yguidefontsize=8,
xguidefontsize=8,
legendfontsize=8,
legend=:bottomleft,
)
plot!(getindex.(trajectory_arr_lazy, 5), getindex.(trajectory_arr_lazy, 2), label="LCG")
plot!(getindex.(trajectory_arr_lazy, 5), getindex.(trajectory_arr_lazy, 6), label="LCG_T")
plot!(getindex.(trajectory_arr_fw, 5), getindex.(trajectory_arr_fw, 6), label="FW_T")
plot!(timing_values, function_values, label="GD", yaxis=:log)
plot!(timing_values, function_test_values, label="GD_test")
savefig(pit, "objective_func_vs_time.pdf")
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1735 | # This example highlights the use of a linear minimization oracle
# using an LP solver defined in MathOptInterface
# we compare the performance of the two LMOs, in- and out of place
#
# to get accurate timings it is important to run twice so that the compile time of Julia for the first run
# is not tainting the results
using FrankWolfe
using JSON
using LaTeXStrings
results = JSON.Parser.parsefile("movielens_result.json")
ref_optimum = results["trajectory_arr_lazy_ref"][end][2]
iteration_list = [
[x[1] + 1 for x in results["trajectory_arr_fw"]],
[x[1] + 1 for x in results["trajectory_arr_lazy"]],
collect(1:1:length(results["function_values_gd"])),
]
time_list = [
[x[5] for x in results["trajectory_arr_fw"]],
[x[5] for x in results["trajectory_arr_lazy"]],
results["timing_values_gd"],
]
primal_gap_list = [
[x[2] - ref_optimum for x in results["trajectory_arr_fw"]],
[x[2] - ref_optimum for x in results["trajectory_arr_lazy"]],
[x - ref_optimum for x in results["function_values_gd"]],
]
test_list =
[results["fw_test_values"], results["lazy_test_values"], results["function_values_test_gd"]]
label = [L"\textrm{FW}", L"\textrm{L-CG}", L"\textrm{GD}"]
plot_results(
[primal_gap_list, primal_gap_list, test_list, test_list],
[iteration_list, time_list, iteration_list, time_list],
label,
[L"\textrm{Iteration}", L"\textrm{Time}", L"\textrm{Iteration}", L"\textrm{Time}"],
[
L"\textrm{Primal Gap}",
L"\textrm{Primal Gap}",
L"\textrm{Test Error}",
L"\textrm{Test Error}",
],
xscalelog=[:log, :identity, :log, :identity],
legend_position=[:bottomleft, nothing, nothing, nothing],
filename="movielens_result.pdf",
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 798 |
using FrankWolfe
using LinearAlgebra
using ReverseDiff
n = Int(1e3);
k = 1e5
xpi = rand(n);
total = sum(xpi);
const xp = xpi ./ total;
f(x) = 2 * norm(x - xp)^3 - norm(x)^2
# this is just for the example -> better explicitly define your gradient
grad!(storage, x) = ReverseDiff.gradient!(storage, f, x)
# pick feasible region
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0); #radius needs to be float
# compute some initial vertex
x0 = collect(FrankWolfe.compute_extreme_point(lmo, zeros(n)))
# benchmarking Oracles
FrankWolfe.benchmark_oracles(f, grad!, () -> randn(n), lmo; k=100)
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Nonconvex(),
print_iter=k / 10,
verbose=true,
);
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3632 | using FrankWolfe
import Random
using SparseArrays, LinearAlgebra
using Test
using Plots
const nfeat = 100 * 5
const nobs = 500
# rank of the real data
const r = 30
const Xreal = Matrix{Float64}(undef, nobs, nfeat)
const X_gen_cols = randn(nfeat, r)
const X_gen_rows = randn(r, nobs)
const svals = 100 * rand(r)
for i in 1:nobs
for j in 1:nfeat
Xreal[i, j] = sum(X_gen_cols[j, k] * X_gen_rows[k, i] * svals[k] for k in 1:r)
end
end
nucnorm(Xmat) = sum(abs(σi) for σi in LinearAlgebra.svdvals(Xmat))
@test rank(Xreal) == r
# 0.2 of entries missing
const missing_entries = unique!([(rand(1:nobs), rand(1:nfeat)) for _ in 1:10000])
const present_entries = [(i, j) for i in 1:nobs, j in 1:nfeat if (i, j) ∉ missing_entries]
f(X) = 0.5 * sum((X[i, j] - Xreal[i, j])^2 for (i, j) in present_entries)
function grad!(storage, X)
storage .= 0
for (i, j) in present_entries
storage[i, j] = X[i, j] - Xreal[i, j]
end
return nothing
end
const lmo = FrankWolfe.NuclearNormLMO(275_000.0)
const x0 = FrankWolfe.compute_extreme_point(lmo, zero(Xreal))
FrankWolfe.benchmark_oracles(f, grad!, () -> randn(size(Xreal)), lmo; k=100)
# gradient descent
gradient = similar(x0)
xgd = Matrix(x0)
for _ in 1:5000
@info f(xgd)
grad!(gradient, xgd)
xgd .-= 0.01 * gradient
if norm(gradient) ≤ sqrt(eps())
break
end
end
grad!(gradient, x0)
v0 = FrankWolfe.compute_extreme_point(lmo, gradient)
@test dot(v0 - x0, gradient) < 0
const k = 500
x00 = copy(x0)
xfin, vmin, _, _, traj_data = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x00;
epsilon=1e7,
max_iteration=k,
print_iter=k / 10,
trajectory=true,
verbose=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=spzeros(size(x0)...),
)
xfinlcg, vmin, _, _, traj_data = FrankWolfe.lazified_conditional_gradient(
f,
grad!,
lmo,
x00;
epsilon=1e7,
max_iteration=k,
print_iter=k / 10,
trajectory=true,
verbose=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
gradient=spzeros(size(x0)...),
)
x00 = copy(x0)
xfinAFW, vmin, _, _, traj_data = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x00;
epsilon=1e7,
max_iteration=k,
print_iter=k / 10,
trajectory=true,
verbose=true,
lazy=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),#,
)
x00 = copy(x0)
xfinBCG, vmin, _, _, traj_data, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x00;
epsilon=1e7,
max_iteration=k,
print_iter=k / 10,
trajectory=true,
verbose=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
)
xfinBPCG, vmin, _, _, traj_data = FrankWolfe.blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
x00;
epsilon=1e7,
max_iteration=k,
print_iter=k / 10,
trajectory=true,
verbose=true,
line_search=FrankWolfe.Adaptive(),
memory_mode=FrankWolfe.InplaceEmphasis(),
# lazy=true,
)
pit = plot(svdvals(xfin), label="FW", width=3, yaxis=:log)
plot!(svdvals(xfinlcg), label="LCG", width=3, yaxis=:log)
plot!(svdvals(xfinAFW), label="LAFW", width=3, yaxis=:log)
plot!(svdvals(xfinBCG), label="BCG", width=3, yaxis=:log)
plot!(svdvals(xfinBPCG), label="BPCG", width=3, yaxis=:log)
plot!(svdvals(xgd), label="Gradient descent", width=3, yaxis=:log)
plot!(svdvals(Xreal), label="Real matrix", linestyle=:dash, width=3, color=:black)
title!("Singular values")
savefig(pit, "matrix_completion.pdf")
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1893 | using LinearAlgebra
using FrankWolfe
using Random
include("../examples/plot_utils.jl")
n = 3000
k = 5000
s = 97
@info "Seed $s"
Random.seed!(s)
epsilon=1e-10
# strongly convex set
xp2 = 10 * ones(n)
diag_term = 5 * rand(n)
covariance_matrix = zeros(n,n) + LinearAlgebra.Diagonal(diag_term)
lmo2 = FrankWolfe.EllipsoidLMO(covariance_matrix)
f2(x) = norm(x - xp2)^2
function grad2!(storage, x)
@. storage = 2 * (x - xp2)
end
x0 = FrankWolfe.compute_extreme_point(lmo2, randn(n))
res_2 = FrankWolfe.frank_wolfe(
f2,
grad2!,
lmo2,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(2),
print_iter= k / 10,
epsilon=epsilon,
verbose=true,
trajectory=true,
)
res_4 = FrankWolfe.frank_wolfe(
f2,
grad2!,
lmo2,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(4),
print_iter= k / 10,
epsilon=epsilon,
verbose=true,
trajectory=true,
)
res_6 = FrankWolfe.frank_wolfe(
f2,
grad2!,
lmo2,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(6),
print_iter= k / 10,
epsilon=epsilon,
verbose=true,
trajectory=true,
)
res_log = FrankWolfe.frank_wolfe(
f2,
grad2!,
lmo2,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(-1),
print_iter= k / 10,
epsilon=epsilon,
verbose=true,
trajectory=true,
)
res_adapt = FrankWolfe.frank_wolfe(
f2,
grad2!,
lmo2,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Adaptive(relaxed_smoothness=true),
print_iter=k / 10,
epsilon=epsilon,
verbose=true,
trajectory=true,
)
plot_trajectories([res_2[end], res_4[end], res_6[end], res_log[end], res_adapt[end]], ["ell = 2 (default)", "ell = 4", "ell = 6", "ell = log t", "adaptive"], marker_shapes=[:dtriangle, :rect, :circle, :pentagon, :octagon], xscalelog=true, reduce_size=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 7282 | ## Benchmark example
using FrankWolfe
using Random
using Distributions
using LinearAlgebra
using Statistics
using Test
# The Optimal Experiment Design Problem consists of choosing a subset of experiments
# maximising the information gain.
# The Limit version of this problem (see below) is a continous version where the
# number of allowed experiments is infinity.
# Thus, the solution can be interpreted as a probability distributions.
#
# min_x Φ(A^T diag(x) A)
# s.t. ∑ x_i = 1
# x ≥ 0
#
# A denotes the Experiment Matrix. We generate it randomly.
# Φ is a function from the PD cone into R.
# In our case, Φ is
# Trace(X^{-1}) (A-Optimal)
# and
# -logdet(X) (D-Optimal).
"""
build_data(m)
seed - for the Random functions.
m - number of experiments.
Build the experiment matrix A.
"""
function build_data(m)
n = Int(floor(m/10))
B = rand(m,n)
B = B'*B
@assert isposdef(B)
D = MvNormal(randn(n),B)
A = rand(D, m)'
@assert rank(A) == n
return A
end
"""
Check if given point is in the domain of f, i.e. X = transpose(A) * diagm(x) * A
positive definite.
"""
function build_domain_oracle(A)
m, n = size(A)
return function domain_oracle(x)
S = findall(x-> !iszero(x),x)
#@show rank(A[S,:]) == n
return rank(A[S,:]) == n #&& sum(x .< 0) == 0
end
end
"""
Find n linearly independent rows of A to build the starting point.
"""
function linearly_independent_rows(A)
S = []
m, n = size(A)
for i in 1:m
S_i= vcat(S, i)
if rank(A[S_i,:])==length(S_i)
S=S_i
end
if length(S) == n # we only n linearly independent points
return S
end
end
return S
end
"""
Build start point used in Boscia in case of A-opt and D-opt.
The functions are self concordant and so not every point in the feasible region
is in the domain of f and grad!.
"""
function build_start_point(A)
# Get n linearly independent rows of A
m, n = size(A)
S = linearly_independent_rows(A)
@assert length(S) == n
V = Vector{Float64}[]
for i in S
v = zeros(m)
v[i] = 1.0
push!(V, v)
end
x = sum(V .* 1/n)
active_set= FrankWolfe.ActiveSet(fill(1/n, n), V, x)
return x, active_set, S
end
# A Optimal
"""
Build function for the A-criterion.
"""
function build_a_criterion(A; μ=0.0, build_safe=true)
m, n = size(A)
a=m
domain_oracle = build_domain_oracle(A)
function f_a(x)
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X = Symmetric(X)
U = cholesky(X)
X_inv = U \ I
return LinearAlgebra.tr(X_inv)/a
end
function grad_a!(storage, x)
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X = Symmetric(X*X)
F = cholesky(X)
for i in 1:length(x)
storage[i] = LinearAlgebra.tr(- (F \ A[i,:]) * transpose(A[i,:]))/a
end
return storage #float.(storage) # in case of x .= BigFloat(x)
end
function f_a_safe(x)
if !domain_oracle(x)
return Inf
end
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X = Symmetric(X)
X_inv = LinearAlgebra.inv(X)
return LinearAlgebra.tr(X_inv)/a
end
function grad_a_safe!(storage, x)
if !domain_oracle(x)
return fill(Inf, length(x))
end
#x = BigFloat.(x) # Setting can be useful for numerical tricky problems
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X = Symmetric(X*X)
F = cholesky(X)
for i in 1:length(x)
storage[i] = LinearAlgebra.tr(- (F \ A[i,:]) * transpose(A[i,:]))/a
end
return storage #float.(storage) # in case of x .= BigFloat(x)
end
if build_safe
return f_a_safe, grad_a_safe!
end
return f_a, grad_a!
end
# D Optimal
"""
Build function for the D-criterion.
"""
function build_d_criterion(A; μ =0.0, build_safe=true)
m, n = size(A)
a=m
domain_oracle = build_domain_oracle(A)
function f_d(x)
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X = Symmetric(X)
return -log(det(X))/a
end
function grad_d!(storage, x)
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X= Symmetric(X)
F = cholesky(X)
for i in 1:length(x)
storage[i] = 1/a * LinearAlgebra.tr(-(F \ A[i,:] )*transpose(A[i,:]))
end
# https://stackoverflow.com/questions/46417005/exclude-elements-of-array-based-on-index-julia
return storage
end
function f_d_safe(x)
if !domain_oracle(x)
return Inf
end
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X = Symmetric(X)
return -log(det(X))/a
end
function grad_d_safe!(storage, x)
if !domain_oracle(x)
return fill(Inf, length(x))
end
X = transpose(A)*diagm(x)*A + Matrix(μ *I, n, n)
X= Symmetric(X)
F = cholesky(X)
for i in 1:length(x)
storage[i] = 1/a * LinearAlgebra.tr(-(F \ A[i,:] )*transpose(A[i,:]))
end
# https://stackoverflow.com/questions/46417005/exclude-elements-of-array-based-on-index-julia
return storage
end
if build_safe
return f_d_safe, grad_d_safe!
end
return f_d, grad_d!
end
m = 300
@testset "Limit Optimal Design Problem" begin
@testset "A-Optimal Design" begin
A = build_data(m)
f, grad! = build_a_criterion(A, build_safe=true)
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0)
x0, active_set = build_start_point(A)
x, _, primal, dual_gap, traj_data, _ = FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo, active_set, verbose=true, trajectory=true)
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0)
f, grad! = build_a_criterion(A, build_safe=false)
x0, active_set = build_start_point(A)
domain_oracle = build_domain_oracle(A)
x_s, _, primal, dual_gap, traj_data_s, _ = FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo, active_set, verbose=true, line_search=FrankWolfe.Secant(domain_oracle=domain_oracle), trajectory=true)
@test traj_data_s[end][1] < traj_data[end][1]
@test isapprox(f(x_s), f(x))
end
@testset "D-Optimal Design" begin
A = build_data(m)
f, grad! = build_d_criterion(A)
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0)
x0, active_set = build_start_point(A)
x, _, primal, dual_gap, traj_data, _ = FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo, active_set, verbose=true, trajectory=true)
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0)
f, grad! = build_d_criterion(A, build_safe=false)
x0, active_set = build_start_point(A)
domain_oracle = build_domain_oracle(A)
x_s, _, primal, dual_gap, traj_data_s, _ = FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo, active_set, verbose=true, line_search=FrankWolfe.Secant(domain_oracle=domain_oracle), trajectory=true)
@test traj_data_s[end][1] < traj_data[end][1]
@test isapprox(f(x_s), f(x))
end
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 13714 | using Plots
"""
plot_results
Given a series of list, generate subplots.
list_data_y -> contains a list of a list of lists (where each list refers to a subplot, and a list of lists refers to the y-values of the series inside a subplot).
list_data_x -> contains a list of a list of lists (where each list refers to a subplot, and a list of lists refers to the x-values of the series inside a subplot).
So if we have one plot with two series, these might look like:
list_data_y = [[[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]]]
list_data_x = [[[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]]]
And if we have two plots, each with two series, these might look like:
list_data_y = [[[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11, 12], [7, 8, 9, 10, 11, 12]]]
list_data_x = [[[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11, 12], [7, 8, 9, 10, 11, 12]]]
list_label -> contains the labels for the series that will be plotted,
which has to have a length equal to the number of series that are being plotted:
list_label = ["Series 1", "Series 2"]
list_axis_x -> contains the labels for the x-axis that will be plotted,
which has to have a length equal to the number of subplots:
list_axis_x = ["x-axis plot 1", "x-axis plot 1"]
list_axis_y -> Same as list_axis_x but for the y-axis
xscalelog -> A list of values indicating the type of axes to use in each subplot,
must be equal to the number of subplots:
xscalelog = [:log, :identity]
yscalelog -> Same as xscalelog but for the y-axis
"""
function plot_results(
list_data_y,
list_data_x,
list_label,
list_axis_x,
list_axis_y;
filename=nothing,
xscalelog=nothing,
yscalelog=nothing,
legend_position=nothing,
list_style=fill(:solid, length(list_label)),
list_color=get_color_palette(:auto, plot_color(:white)),
list_markers=[
:circle,
:rect,
:utriangle,
:diamond,
:hexagon,
:+,
:x,
:star5,
:cross,
:xcross,
:dtriangle,
:rtriangle,
:ltriangle,
:pentagon,
:heptagon,
:octagon,
:star4,
:star6,
:star7,
:star8,
:vline,
:hline,
],
number_markers_per_line=10,
line_width=3.0,
marker_size=5.0,
transparency_markers=0.45,
font_size_axis=12,
font_size_legend=9,
)
gr()
plt = nothing
list_plots = Plots.Plot{Plots.GRBackend}[]
#Plot an appropiate number of plots
for i in eachindex(list_data_x)
for j in eachindex(list_data_x[i])
if isnothing(xscalelog)
xscale = :identity
else
xscale = xscalelog[i]
end
if isnothing(yscalelog)
yscale = :log
else
yscale = yscalelog[i]
end
if isnothing(legend_position)
position_legend = :best
legend_display = true
else
position_legend = legend_position[i]
if isnothing(position_legend)
legend_display = false
else
legend_display = true
end
end
if j == 1
if legend_display
plt = plot(
list_data_x[i][j],
list_data_y[i][j],
label="",
xaxis=xscale,
yaxis=yscale,
ylabel=list_axis_y[i],
xlabel=list_axis_x[i],
legend=position_legend,
yguidefontsize=font_size_axis,
xguidefontsize=font_size_axis,
legendfontsize=font_size_legend,
width=line_width,
linestyle=list_style[j],
color=list_color[j],
grid=true,
)
else
plt = plot(
list_data_x[i][j],
list_data_y[i][j],
label="",
xaxis=xscale,
yaxis=yscale,
ylabel=list_axis_y[i],
xlabel=list_axis_x[i],
yguidefontsize=font_size_axis,
xguidefontsize=font_size_axis,
width=line_width,
linestyle=list_style[j],
color=list_color[j],
grid=true,
)
end
else
if legend_display
plot!(
list_data_x[i][j],
list_data_y[i][j],
label="",
width=line_width,
linestyle=list_style[j],
color=list_color[j],
legend=position_legend,
)
else
plot!(
list_data_x[i][j],
list_data_y[i][j],
label="",
width=line_width,
linestyle=list_style[j],
color=list_color[j],
)
end
end
if xscale == :log
indices =
round.(
Int,
10 .^ (range(
log10(1),
log10(length(list_data_x[i][j])),
length=number_markers_per_line,
)),
)
scatter!(
list_data_x[i][j][indices],
list_data_y[i][j][indices],
markershape=list_markers[j],
markercolor=list_color[j],
markersize=marker_size,
markeralpha=transparency_markers,
label=list_label[j],
legend=position_legend,
)
else
scatter!(
view(
list_data_x[i][j],
1:length(list_data_x[i][j])÷number_markers_per_line:length(
list_data_x[i][j],
),
),
view(
list_data_y[i][j],
1:length(list_data_y[i][j])÷number_markers_per_line:length(
list_data_y[i][j],
),
),
markershape=list_markers[j],
markercolor=list_color[j],
markersize=marker_size,
markeralpha=transparency_markers,
label=list_label[j],
legend=position_legend,
)
end
end
push!(list_plots, plt)
end
fp = plot(list_plots..., layout=length(list_plots))
plot!(size=(600, 400))
if filename !== nothing
savefig(fp, filename)
end
return fp
end
# Recipe for plotting markers in plot_trajectories
@recipe function f(::Type{Val{:samplemarkers}}, x, y, z; n_markers=10, log=false)
n = length(y)
# Choose datapoints for markers
if log
xmin = log10(x[1])
xmax = log10(x[end])
thresholds = collect(xmin:(xmax-xmin)/(n_markers-1):xmax)
indices = [argmin(i -> abs(t - log10(x[i])), eachindex(x)) for t in thresholds]
else
indices = 1:Int(ceil(length(x) / n_markers)):n
end
sx, sy = x[indices], y[indices]
# add an empty series with the correct type for legend markers
@series begin
seriestype := :path
markershape --> :auto
x := []
y := []
end
# add a series for the line
@series begin
primary := false # no legend entry
markershape := :none # ensure no markers
seriestype := :path
seriescolor := get(plotattributes, :seriescolor, :auto)
x := x
y := y
end
# return a series for the sampled markers
primary := false
seriestype := :scatter
markershape --> :auto
x := sx
y := sy
z_order := 1
end
function plot_trajectories(
data,
label;
filename=nothing,
xscalelog=false,
yscalelog=true,
legend_position=:topright,
lstyle=fill(:solid, length(data)),
marker_shapes=nothing,
n_markers=10,
reduce_size=false,
primal_offset=1e-8,
line_width=1.3,
empty_marker=false,
extra_plot=false,
extra_plot_label="",
)
# theme(:dark)
# theme(:vibrant)
Plots.gr()
x = []
y = []
offset = 2
function sub_plot(idx_x, idx_y; legend=false, xlabel="", ylabel="", y_offset=0)
fig = nothing
for (i, trajectory) in enumerate(data)
l = length(trajectory)
if reduce_size && l > 1000
indices = Int.(round.(collect(1:l/1000:l)))
trajectory = trajectory[indices]
end
x = [trajectory[j][idx_x] for j in offset:length(trajectory)]
y = [trajectory[j][idx_y] + y_offset for j in offset:length(trajectory)]
if marker_shapes !== nothing && n_markers >= 2
marker_args = Dict(
:st => :samplemarkers,
:n_markers => n_markers,
:shape => marker_shapes[i],
:log => xscalelog,
:markercolor => empty_marker ? :white : :match,
:markerstrokecolor => empty_marker ? i : :match,
)
else
marker_args = Dict()
end
if i == 1
fig = plot(
x,
y,
label=label[i],
xaxis=xscalelog ? :log : :identity,
yaxis=yscalelog ? :log : :identity,
xlabel=xlabel,
ylabel=ylabel,
legend=legend,
yguidefontsize=8,
xguidefontsize=8,
legendfontsize=8,
width=line_width,
linestyle=lstyle[i];
marker_args...,
)
else
plot!(x, y, label=label[i], width=line_width, linestyle=lstyle[i]; marker_args...)
end
end
return fig
end
pit = sub_plot(1, 2; legend=legend_position, ylabel="Primal", y_offset=primal_offset)
pti = sub_plot(5, 2; y_offset=primal_offset)
dit = sub_plot(1, 4; xlabel="Iterations", ylabel="FW gap")
dti = sub_plot(5, 4; xlabel="Time (s)")
if extra_plot
iit = sub_plot(1, 6; ylabel=extra_plot_label)
iti = sub_plot(5, 6)
fp = plot(pit, pti, iit, iti, dit, dti, layout=(3, 2)) # layout = @layout([A{0.01h}; [B C; D E]]))
plot!(size=(600, 600))
else
fp = plot(pit, pti, dit, dti, layout=(2, 2)) # layout = @layout([A{0.01h}; [B C; D E]]))
plot!(size=(600, 400))
end
if filename !== nothing
savefig(fp, filename)
end
return fp
end
function plot_sparsity(
data,
label;
filename=nothing,
xscalelog=false,
legend_position=:topright,
yscalelog=true,
lstyle=fill(:solid, length(data)),
marker_shapes=nothing,
n_markers=10,
empty_marker=false,
reduce_size=false,
)
Plots.gr()
xscale = xscalelog ? :log : :identity
yscale = yscalelog ? :log : :identity
offset = 2
function subplot(idx_x, idx_y, ylabel)
fig = nothing
for (i, trajectory) in enumerate(data)
l = length(trajectory)
if reduce_size && l > 1000
indices = Int.(round.(collect(1:l/1000:l)))
trajectory = trajectory[indices]
end
x = [trajectory[j][idx_x] for j in offset:length(trajectory)]
y = [trajectory[j][idx_y] for j in offset:length(trajectory)]
if marker_shapes !== nothing && n_markers >= 2
marker_args = Dict(
:st => :samplemarkers,
:n_markers => n_markers,
:shape => marker_shapes[i],
:log => xscalelog,
:startmark => 5 + 20 * (i - 1),
:markercolor => empty_marker ? :white : :match,
:markerstrokecolor => empty_marker ? i : :match,
)
else
marker_args = Dict()
end
if i == 1
fig = plot(
x,
y;
label=label[i],
xaxis=xscale,
yaxis=yscale,
ylabel=ylabel,
legend=legend_position,
yguidefontsize=8,
xguidefontsize=8,
legendfontsize=8,
linestyle=lstyle[i],
marker_args...,
)
else
plot!(x, y; label=label[i], linestyle=lstyle[i], marker_args...)
end
end
return fig
end
ps = subplot(6, 2, "Primal")
ds = subplot(6, 4, "FW gap")
fp = plot(ps, ds, layout=(1, 2)) # layout = @layout([A{0.01h}; [B C; D E]]))
plot!(size=(600, 200))
if filename !== nothing
savefig(fp, filename)
end
return fp
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1590 |
using FrankWolfe
using JSON
using LaTeXStrings
results = JSON.Parser.parsefile(joinpath(@__DIR__, "polynomial_result.json"))
iteration_list = [
[x[1] + 1 for x in results["trajectory_arr_lafw"]],
[x[1] + 1 for x in results["trajectory_arr_bcg"]],
collect(eachindex(results["function_values_gd"])),
]
time_list = [
[x[5] for x in results["trajectory_arr_lafw"]],
[x[5] for x in results["trajectory_arr_bcg"]],
results["gd_times"],
]
primal_list = [
[x[2] - results["ref_primal_value"] for x in results["trajectory_arr_lafw"]],
[x[2] - results["ref_primal_value"] for x in results["trajectory_arr_bcg"]],
[x - results["ref_primal_value"] for x in results["function_values_gd"]],
]
test_list = [
[x[6] for x in results["trajectory_arr_lafw"]],
[x[6] for x in results["trajectory_arr_bcg"]],
results["function_values_test_gd"],
]
label = [L"\textrm{L-AFW}", L"\textrm{BCG}", L"\textrm{GD}"]
coefficient_error_values = [
[x[7] for x in results["trajectory_arr_lafw"]],
[x[7] for x in results["trajectory_arr_bcg"]],
results["coefficient_error_gd"],
]
plot_results(
[primal_list, primal_list, test_list, test_list],
[iteration_list, time_list, iteration_list, time_list],
label,
[L"\textrm{Iteration}", L"\textrm{Time}", L"\textrm{Iteration}", L"\textrm{Time}"],
[L"\textrm{Primal Gap}", L"\textrm{Primal Gap}", L"\textrm{Test loss}", L"\textrm{Test loss}"],
xscalelog=[:log, :identity, :log, :identity],
legend_position=[:bottomleft, nothing, nothing, nothing],
filename="polynomial_result.pdf",
)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 7242 | using FrankWolfe
using LinearAlgebra
import Random
using MultivariatePolynomials
using DynamicPolynomials
using FiniteDifferences
import JSON
using Statistics
const N = 15
DynamicPolynomials.@polyvar X[1:15]
const max_degree = 4
coefficient_magnitude = 10
noise_magnitude = 1
const var_monomials = MultivariatePolynomials.monomials(X, 0:max_degree)
Random.seed!(42)
all_coeffs = map(var_monomials) do m
d = MultivariatePolynomials.degree(m)
return coefficient_magnitude * rand()
end
random_vector = rand(length(all_coeffs))
cutoff = quantile(random_vector, 0.95)
all_coeffs[findall(<(cutoff), random_vector)] .= 0.0
const true_poly = dot(all_coeffs, var_monomials)
function evaluate_poly(coefficients)
poly = dot(coefficients, var_monomials)
return function p(x)
return MultivariatePolynomials.subs(poly, Pair(X, x)).a[1]
end
end
const training_data = map(1:500) do _
x = 0.1 * randn(N)
y = MultivariatePolynomials.subs(true_poly, Pair(X, x)) + noise_magnitude * randn()
return (x, y.a[1])
end
const extended_training_data = map(training_data) do (x, y)
x_ext = MultivariatePolynomials.coefficient.(MultivariatePolynomials.subs.(var_monomials, X => x))
return (x_ext, y)
end
const test_data = map(1:1000) do _
x = 0.4 * randn(N)
y = MultivariatePolynomials.subs(true_poly, Pair(X, x)) + noise_magnitude * randn()
return (x, y.a[1])
end
const extended_test_data = map(test_data) do (x, y)
x_ext = MultivariatePolynomials.coefficient.(MultivariatePolynomials.subs.(var_monomials, X => x))
return (x_ext, y)
end
function f(coefficients)
return 0.5 / length(extended_training_data) * sum(extended_training_data) do (x, y)
return (dot(coefficients, x) - y)^2
end
end
function f_test(coefficients)
return 0.5 / length(extended_test_data) * sum(extended_test_data) do (x, y)
return (dot(coefficients, x) - y)^2
end
end
function coefficient_errors(coeffs)
return 0.5 * sum(eachindex(all_coeffs)) do idx
return (all_coeffs[idx] - coeffs[idx])^2
end
end
function grad!(storage, coefficients)
storage .= 0
for (x, y) in extended_training_data
p_i = dot(coefficients, x) - y
@. storage += x * p_i
end
storage ./= length(training_data)
return nothing
end
function build_callback(trajectory_arr)
return function callback(state, args...)
return push!(
trajectory_arr,
(FrankWolfe.callback_state(state)..., f_test(state.x), coefficient_errors(state.x)),
)
end
end
#Check the gradient using finite differences just in case
gradient = similar(all_coeffs)
max_iter = 100_000
random_initialization_vector = rand(length(all_coeffs))
#lmo = FrankWolfe.LpNormLMO{1}(100 * maximum(all_coeffs))
lmo = FrankWolfe.LpNormLMO{1}(0.95 * norm(all_coeffs, 1))
# L estimate
num_pairs = 10000
L_estimate = -Inf
gradient_aux = similar(gradient)
for i in 1:num_pairs
global L_estimate
x = compute_extreme_point(lmo, randn(size(all_coeffs)))
y = compute_extreme_point(lmo, randn(size(all_coeffs)))
grad!(gradient, x)
grad!(gradient_aux, y)
new_L = norm(gradient - gradient_aux) / norm(x - y)
if new_L > L_estimate
L_estimate = new_L
end
end
# L1 projection
# inspired by https://github.com/MPF-Optimization-Laboratory/ProjSplx.jl
function projnorm1(x, τ)
n = length(x)
if norm(x, 1) ≤ τ
return x
end
u = abs.(x)
# simplex projection
bget = false
s_indices = sortperm(u, rev=true)
tsum = zero(τ)
@inbounds for i in 1:n-1
tsum += u[s_indices[i]]
tmax = (tsum - τ) / i
if tmax ≥ u[s_indices[i+1]]
bget = true
break
end
end
if !bget
tmax = (tsum + u[s_indices[n]] - τ) / n
end
@inbounds for i in 1:n
u[i] = max(u[i] - tmax, 0)
u[i] *= sign(x[i])
end
return u
end
# gradient descent
xgd = FrankWolfe.compute_extreme_point(lmo, random_initialization_vector)
training_gd = Float64[]
test_gd = Float64[]
coeff_error = Float64[]
time_start = time_ns()
gd_times = Float64[]
for iter in 1:max_iter
global xgd
grad!(gradient, xgd)
xgd = projnorm1(xgd - gradient / L_estimate, lmo.right_hand_side)
push!(training_gd, f(xgd))
push!(test_gd, f_test(xgd))
push!(coeff_error, coefficient_errors(xgd))
push!(gd_times, (time_ns() - time_start) * 1e-9)
end
@info "Gradient descent training loss $(f(xgd))"
@info "Gradient descent test loss $(f_test(xgd))"
@info "Coefficient error $(coefficient_errors(xgd))"
x00 = FrankWolfe.compute_extreme_point(lmo, random_initialization_vector)
x0 = deepcopy(x00)
# lazy AFW
trajectory_lafw = []
callback = build_callback(trajectory_lafw)
@time x_lafw, v, primal, dual_gap, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=max_iter,
line_search=FrankWolfe.Adaptive(L_est=L_estimate),
print_iter=max_iter ÷ 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
lazy=true,
gradient=gradient,
callback=callback,
);
@info "Lazy AFW training loss $(f(x_lafw))"
@info "Test loss $(f_test(x_lafw))"
@info "Coefficient error $(coefficient_errors(x_lafw))"
trajectory_bcg = []
callback = build_callback(trajectory_bcg)
x0 = deepcopy(x00)
@time x_bcg, v, primal, dual_gap, _, _ = FrankWolfe.blended_conditional_gradient(
f,
grad!,
lmo,
x0,
max_iteration=max_iter,
line_search=FrankWolfe.Adaptive(L_est=L_estimate),
print_iter=max_iter ÷ 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
weight_purge_threshold=1e-10,
callback=callback,
)
@info "BCG training loss $(f(x_bcg))"
@info "Test loss $(f_test(x_bcg))"
@info "Coefficient error $(coefficient_errors(x_bcg))"
x0 = deepcopy(x00)
# compute reference solution using lazy AFW
trajectory_lafw_ref = []
callback = build_callback(trajectory_lafw_ref)
@time _, _, primal_ref, _, _ = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=2 * max_iter,
line_search=FrankWolfe.Adaptive(L_est=L_estimate),
print_iter=max_iter ÷ 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
lazy=true,
gradient=gradient,
callback=callback,
);
open(joinpath(@__DIR__, "polynomial_result.json"), "w") do f
data = JSON.json((
trajectory_arr_lafw=trajectory_lafw,
trajectory_arr_bcg=trajectory_bcg,
function_values_gd=training_gd,
function_values_test_gd=test_gd,
coefficient_error_gd=coeff_error,
gd_times=gd_times,
ref_primal_value=primal_ref,
))
return write(f, data)
end
#Count missing\extra terms
print("\n Number of extra terms in GD: ", sum((all_coeffs .== 0) .* (xgd .!= 0)))
print("\n Number of missing terms in GD: ", sum((all_coeffs .!= 0) .* (xgd .== 0)))
print("\n Number of extra terms in BCG: ", sum((all_coeffs .== 0) .* (x_bcg .!= 0)))
print("\n Number of missing terms in BCG: ", sum((all_coeffs .!= 0) .* (x_bcg .== 0)))
print("\n Number of missing terms in Lazy AFW: ", sum((all_coeffs .== 0) .* (x_lafw .!= 0)))
print("\n Number of extra terms in Lazy AFW: ", sum((all_coeffs .!= 0) .* (x_lafw .== 0)))
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 2711 | using Plots
using LinearAlgebra
using FrankWolfe
using JSON
using DelimitedFiles
# Set the tolerance
eps = 1e-5
# NOTE: the data are random normal matrices with mean 0.05, not 0.1 as indicated in their paper
# we also generated additional datasets at larger scale and log-normal revenues
# Specify an explicit problem instance
# problem_instance = joinpath(@__DIR__, "syn_200_200.csv")
problem_instance = joinpath(@__DIR__, "syn_200_200.csv")
const W = readdlm(problem_instance, ',')
# Set the maximum number of iterations
max_iteration = 5000
function build_objective(W)
(n, p) = size(W)
function f(x)
return -sum(log(dot(x, @view(W[:, t]))) for t in 1:p)
end
function ∇f(storage, x)
storage .= 0
for t in 1:p
temp_rev = dot(x, @view(W[:, t]))
@. storage -= @view(W[:, t]) ./ temp_rev
end
return storage
end
return (f, ∇f)
end
# lower bound on objective value
true_obj_value = -2
(f, ∇f) = build_objective(W)
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0)
x0 = FrankWolfe.compute_extreme_point(lmo, rand(size(W, 1)))
storage = Vector{Float64}(undef, size(x0)...)
(x, v, primal_agnostic, dual_gap, traj_data_agnostic) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.Agnostic(),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
(xback, v, primal_back, dual_gap, traj_data_backtracking) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.Adaptive(),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
(xback, v, primal_back, dual_gap, traj_data_monotoninc) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.MonotonicStepSize(),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
(xsecant, v, primal_secant, dual_gap, traj_data_secant) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.Secant(tol=1e-12),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
# Plotting the trajectories
labels = ["Agnostic", "Adaptive", "Monotonic", "Secant"]
data = [traj_data_agnostic, traj_data_backtracking, traj_data_monotoninc, traj_data_secant]
plot_trajectories(data, labels, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 2758 | using Plots
using LinearAlgebra
using FrankWolfe
using JSON
using DelimitedFiles
using MAT
# Set the tolerance
eps = 1e-5
# NOTE: the data are random normal matrices with mean 0.05, not 0.1 as indicated in their paper
# we also generated additional datasets at larger scale and log-normal revenues
#
# for large problem instances from https://zenodo.org/records/4836009
# see paper: https://arxiv.org/abs/2105.13913
problem_instance = joinpath(@__DIR__, "data/syn_1000_800_10_50_1.mat")
W = MAT.matread(problem_instance)["W"]
# Set the maximum number of iterations
max_iteration = 5000
function build_objective(W)
(n, p) = size(W)
function f(x)
return -sum(log(dot(x, @view(W[:, t]))) for t in 1:p)
end
function ∇f(storage, x)
storage .= 0
for t in 1:p
temp_rev = dot(x, @view(W[:, t]))
@. storage -= @view(W[:, t]) ./ temp_rev
end
return storage
end
return (f, ∇f)
end
# lower bound on objective value
true_obj_value = -10.0
(f, ∇f) = build_objective(W)
lmo = FrankWolfe.ProbabilitySimplexOracle(1.0)
x0 = FrankWolfe.compute_extreme_point(lmo, rand(size(W, 1)))
storage = Vector{Float64}(undef, size(x0)...)
(x, v, primal_agnostic, dual_gap, traj_data_agnostic) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.Agnostic(),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
(xback, v, primal_back, dual_gap, traj_data_backtracking) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.Adaptive(),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
(xback, v, primal_back, dual_gap, traj_data_monotoninc) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.MonotonicStepSize(),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
(xsecant, v, primal_secant, dual_gap, traj_data_secant) = FrankWolfe.frank_wolfe(
x -> f(x) - true_obj_value,
∇f,
lmo,
x0,
verbose=true,
trajectory=true,
line_search=FrankWolfe.Secant(tol=1e-12),
max_iteration=max_iteration,
gradient=storage,
print_iter=max_iteration / 10,
epsilon=eps,
)
# Plotting the trajectories
labels = ["Agnostic", "Adaptive", "Monotonic", "Secant"]
data = [traj_data_agnostic, traj_data_backtracking, traj_data_monotoninc, traj_data_secant]
plot_trajectories(data, labels, xscalelog=true)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3956 | using LinearAlgebra
using FrankWolfe
using Random
# Example of speedup using the quadratic active set
# This is exactly the same as in the literate example #12,
# but in the bipartite case and with a heuristic LMO
# The size of the instance is then higher, making the acceleration more visible
struct BellCorrelationsLMOHeuristic{T} <: FrankWolfe.LinearMinimizationOracle
m::Int # number of inputs
tmp::Vector{T} # used to compute scalar products
end
function FrankWolfe.compute_extreme_point(
lmo::BellCorrelationsLMOHeuristic{T},
A::Array{T, 2};
kwargs...,
) where {T <: Number}
ax = [ones(T, lmo.m) for n in 1:2]
axm = [zeros(Int, lmo.m) for n in 1:2]
scm = typemax(T)
for i in 1:100
rand!(ax[1], [-1, 1])
sc1 = zero(T)
sc2 = one(T)
while sc1 < sc2
sc2 = sc1
mul!(lmo.tmp, A', ax[1])
for x2 in 1:length(ax[2])
ax[2][x2] = lmo.tmp[x2] > zero(T) ? -one(T) : one(T)
end
mul!(lmo.tmp, A, ax[2])
for x2 in 1:length(ax[1])
ax[1][x2] = lmo.tmp[x2] > zero(T) ? -one(T) : one(T)
end
sc1 = dot(ax[1], lmo.tmp)
end
if sc1 < scm
scm = sc1
for n in 1:2
axm[n] .= ax[n]
end
end
end
# returning a full tensor is naturally naive, but this is only a toy example
return [axm[1][x1]*axm[2][x2] for x1 in 1:lmo.m, x2 in 1:lmo.m]
end
function correlation_tensor_GHZ_polygon(N::Int, m::Int; type=Float64)
res = zeros(type, m*ones(Int, N)...)
tab_cos = [cos(x*type(pi)/m) for x in 0:N*m]
tab_cos[abs.(tab_cos) .< Base.rtoldefault(type)] .= zero(type)
for ci in CartesianIndices(res)
res[ci] = tab_cos[sum(ci.I)-N+1]
end
return res
end
function benchmark_Bell(p::Array{T, 2}, quadratic::Bool; fw_method=FrankWolfe.blended_pairwise_conditional_gradient, kwargs...) where {T <: Number}
Random.seed!(0)
normp2 = dot(p, p) / 2
# weird syntax to enable the compiler to correctly understand the type
f = let p = p, normp2 = normp2
x -> normp2 + dot(x, x) / 2 - dot(p, x)
end
grad! = let p = p
(storage, xit) -> begin
for x in eachindex(xit)
storage[x] = xit[x] - p[x]
end
end
end
lmo = BellCorrelationsLMOHeuristic{T}(size(p, 1), zeros(T, size(p, 1)))
x0 = FrankWolfe.compute_extreme_point(lmo, -p)
if quadratic
active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], I, -p)
else
active_set = FrankWolfe.ActiveSet([(one(T), x0)])
end
return fw_method(f, grad!, lmo, active_set; line_search=FrankWolfe.Shortstep(one(T)), kwargs...)
end
p = correlation_tensor_GHZ_polygon(2, 100)
max_iteration = 10^3 # speedups are way more important for more iterations
verbose = false
# the following kwarg passing might break for old julia versions
@time benchmark_Bell(p, false; verbose, max_iteration, lazy=false, fw_method=FrankWolfe.blended_pairwise_conditional_gradient) # 2.4s
@time benchmark_Bell(p, true; verbose, max_iteration, lazy=false, fw_method=FrankWolfe.blended_pairwise_conditional_gradient) # 0.8s
@time benchmark_Bell(p, false; verbose, max_iteration, lazy=true, fw_method=FrankWolfe.blended_pairwise_conditional_gradient) # 2.1s
@time benchmark_Bell(p, true; verbose, max_iteration, lazy=true, fw_method=FrankWolfe.blended_pairwise_conditional_gradient) # 0.4s
@time benchmark_Bell(p, false; verbose, max_iteration, lazy=false, fw_method=FrankWolfe.away_frank_wolfe) # 5.7s
@time benchmark_Bell(p, true; verbose, max_iteration, lazy=false, fw_method=FrankWolfe.away_frank_wolfe) # 2.3s
@time benchmark_Bell(p, false; verbose, max_iteration, lazy=true, fw_method=FrankWolfe.away_frank_wolfe) # 3s
@time benchmark_Bell(p, true; verbose, max_iteration, lazy=true, fw_method=FrankWolfe.away_frank_wolfe) # 0.7s
println()
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1664 | using FrankWolfe
using Random
using LinearAlgebra
Random.seed!(0)
n = 5 # number of dimensions
p = 10^3 # number of points
k = 10^4 # number of iterations
T = Float64
function simple_reg_loss(θ, data_point)
(xi, yi) = data_point
(a, b) = (θ[1:end-1], θ[end])
pred = a ⋅ xi + b
return (pred - yi)^2 / 2
end
function ∇simple_reg_loss(storage, θ, data_point)
(xi, yi) = data_point
(a, b) = (θ[1:end-1], θ[end])
pred = a ⋅ xi + b
@. storage[1:end-1] += xi * (pred - yi)
storage[end] += pred - yi
return storage
end
xs = [10randn(T, n) for _ in 1:p]
bias = 4
params_perfect = [1:n; bias]
# similar example with noisy data, Gaussian noise around the linear estimate
data_noisy = [(x, x ⋅ (1:n) + bias + 0.5 * randn(T)) for x in xs]
f(x) = sum(simple_reg_loss(x, data_point) for data_point in data_noisy)
function gradf(storage, x)
storage .= 0
for dp in data_noisy
∇simple_reg_loss(storage, x, dp)
end
end
lmo = FrankWolfe.LpNormLMO{T, 2}(1.05 * norm(params_perfect))
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(T, n+1))
# standard active set
# active_set = FrankWolfe.ActiveSet([(1.0, x0)])
# specialized active set, automatically detecting the parameters A and b of the quadratic function f
active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], gradf)
@time res = FrankWolfe.blended_pairwise_conditional_gradient(
# @time res = FrankWolfe.away_frank_wolfe(
f,
gradf,
lmo,
active_set;
verbose=true,
lazy=true,
line_search=FrankWolfe.Adaptive(L_est=10.0, relaxed_smoothness=true),
max_iteration=k,
print_iter=k / 10,
trajectory=true,
)
println()
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 3799 | using LinearAlgebra
using FrankWolfe
# Example of speedup using the symmetry reduction
# See arxiv.org/abs/2302.04721 for the context
# and arxiv.org/abs/2310.20677 for further symmetrisation
# The symmetry exploited is the invariance of a tensor
# by exchange of the dimensions
struct BellCorrelationsLMO{T} <: FrankWolfe.LinearMinimizationOracle
m::Int # number of inputs
tmp::Vector{T} # used to compute scalar products
end
function FrankWolfe.compute_extreme_point(
lmo::BellCorrelationsLMO{T},
A::Array{T, 3};
kwargs...,
) where {T <: Number}
ax = [ones(T, lmo.m) for n in 1:3]
sc1 = zero(T)
sc2 = one(T)
axm = [zeros(Int, lmo.m) for n in 1:3]
scm = typemax(T)
L = 2^lmo.m
intax = zeros(Int, lmo.m)
for λa3 in 0:(L÷2)-1
digits!(intax, λa3, base=2)
ax[3][1:lmo.m] .= 2intax .- 1
for λa2 in 0:L-1
digits!(intax, λa2, base=2)
ax[2][1:lmo.m] .= 2intax .- 1
for x1 in 1:lmo.m
lmo.tmp[x1] = 0
for x2 in 1:lmo.m, x3 in 1:lmo.m
lmo.tmp[x1] += A[x1, x2, x3] * ax[2][x2] * ax[3][x3]
end
ax[1][x1] = lmo.tmp[x1] > zero(T) ? -one(T) : one(T)
end
sc = dot(ax[1], lmo.tmp)
if sc < scm
scm = sc
for n in 1:3
axm[n] .= ax[n]
end
end
end
end
# returning a full tensor is naturally naive, but this is only a toy example
return [axm[1][x1]*axm[2][x2]*axm[3][x3] for x1 in 1:lmo.m, x2 in 1:lmo.m, x3 in 1:lmo.m]
end
function correlation_tensor_GHZ_polygon(N::Int, m::Int; type=Float64)
res = zeros(type, m*ones(Int, N)...)
tab_cos = [cos(x*type(pi)/m) for x in 0:N*m]
tab_cos[abs.(tab_cos) .< Base.rtoldefault(type)] .= zero(type)
for ci in CartesianIndices(res)
res[ci] = tab_cos[sum(ci.I)-N+1]
end
return res
end
function benchmark_Bell(p::Array{T, 3}, sym::Bool; kwargs...) where {T <: Number}
normp2 = dot(p, p) / 2
# weird syntax to enable the compiler to correctly understand the type
f = let p = p, normp2 = normp2
x -> normp2 + dot(x, x) / 2 - dot(p, x)
end
grad! = let p = p
(storage, xit) -> begin
for x in eachindex(xit)
storage[x] = xit[x] - p[x]
end
end
end
function reynolds_permutedims(atom::Array{Int, 3}, lmo::BellCorrelationsLMO{T}) where {T <: Number}
res = zeros(T, size(atom))
for per in [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
res .+= permutedims(atom, per)
end
res ./= 6
return res
end
function reynolds_adjoint(gradient::Array{T, 3}, lmo::BellCorrelationsLMO{T}) where {T <: Number}
return gradient # we can spare symmetrising the gradient as it remains symmetric throughout the algorithm
end
lmo = BellCorrelationsLMO{T}(size(p, 1), zeros(T, size(p, 1)))
if sym
lmo = FrankWolfe.SymmetricLMO(lmo, reynolds_permutedims, reynolds_adjoint)
end
x0 = FrankWolfe.compute_extreme_point(lmo, -p)
println("Output type of the LMO: ", typeof(x0))
active_set = FrankWolfe.ActiveSet([(one(T), x0)])
# active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], I, -p)
return FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo, active_set; lazy=true, line_search=FrankWolfe.Shortstep(one(T)), kwargs...)
end
p = 0.5correlation_tensor_GHZ_polygon(3, 8)
benchmark_Bell(p, true; verbose=true, max_iteration=10^6, print_iter=10^4) # 24_914 iterations and 89 atoms
println()
benchmark_Bell(p, false; verbose=true, max_iteration=10^6, print_iter=10^4) # 107_647 iterations and 379 atoms
println()
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 824 | using Random
# for bug with display
ENV["GKSwstype"] = "100"
const example_files = filter(readdir(@__DIR__, join=true)) do f
return endswith(f, ".jl") &&
!occursin("large", f) &&
!occursin("result", f) &&
!occursin("activate.jl", f) &&
!occursin("plot_utils.jl", f)
end
example_shuffle = randperm(length(example_files))
if !haskey(ENV, "ALL_EXAMPLES")
example_shuffle = example_shuffle[1:2]
else
@info "Running all examples"
end
const activate_file = joinpath(@__DIR__, "activate.jl")
const plot_file = joinpath(@__DIR__, "plot_utils.jl")
for file in example_files[example_shuffle]
@info "Including example $file"
instruction = """include("$activate_file"); include("$plot_file"); include("$file")"""
run(`julia -e $instruction`)
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1652 | using FrankWolfe
import LinearAlgebra
include("../examples/plot_utils.jl")
n = Int(1e5)
k = 1000
xpi = rand(n);
total = sum(xpi);
const xp = xpi ./ total;
f(x) = LinearAlgebra.norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end
lmo = FrankWolfe.ProbabilitySimplexOracle(1);
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
FrankWolfe.benchmark_oracles(x -> f(x), (str, x) -> grad!(str, x), () -> randn(n), lmo; k=100)
println("\n==> Monotonic Step Size.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_monotonic = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.MonotonicStepSize(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\n==> Backtracking.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_backtracking = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Backtracking(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\n==> Secant.\n")
x0 = deepcopy(x00)
@time x, v, primal, dual_gap, trajectory_secant = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Secant(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
data = [trajectory_monotonic, trajectory_backtracking, trajectory_secant]
label = ["monotonic", "backtracking", "secant"]
plot_trajectories(data, label, xscalelog=true) | FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1879 | # Simple versus stateless step
using FrankWolfe
using Plots
using LinearAlgebra
using Random
using Test
include("../examples/plot_utils.jl")
Random.seed!(48)
n = 1000
Q = Symmetric(randn(n,n))
e = eigen(Q)
evals = sort!(exp.(2 * randn(n)))
e.values .= evals
const A = Matrix(e)
lmo = FrankWolfe.LpNormLMO{1}(100.0)
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
const b = n * randn(n)
function f(x)
1/2 * dot(x, A, x) + dot(b, x) - 0.5 * log(sum(x)) + 278603
end
function grad!(storage, x)
mul!(storage, A, x)
storage .+= b
s = sum(x)
storage .-= 0.5 * inv(s)
end
gradient=collect(x0)
k = 40_000
line_search = FrankWolfe.MonotonicStepSize(x -> sum(x) > 0)
x, v, primal, dual_gap, trajectory_simple = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
FrankWolfe.compute_extreme_point(lmo, zeros(n)),
max_iteration=k,
line_search=line_search,
print_iter=k / 10,
verbose=true,
gradient=gradient,
trajectory=true,
);
line_search2 = FrankWolfe.MonotonicGenericStepsize(FrankWolfe.Agnostic(), x -> sum(x) > 0)
x, v, primal, dual_gap, trajectory_restart = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
FrankWolfe.compute_extreme_point(lmo, zeros(n)),
max_iteration=k,
line_search=line_search2,
print_iter=k / 10,
verbose=true,
gradient=gradient,
trajectory=true,
);
plot_trajectories([trajectory_simple[1:end], trajectory_restart[1:end]], ["simple", "stateless"], legend_position=:topright)
# simple step iterations about 33% faster
@test line_search.factor <= 44
x, v, primal, dual_gap, trajectory_restart_highpres = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
FrankWolfe.compute_extreme_point(lmo, zeros(BigFloat, n)),
max_iteration=10k,
line_search=line_search2,
print_iter=k / 10,
verbose=true,
gradient=big.(gradient),
trajectory=true,
);
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1836 | using FrankWolfe
using Plots
using LinearAlgebra
using Random
using Test
include("../examples/plot_utils.jl")
Random.seed!(42)
n = 30
Q = Symmetric(randn(n,n))
e = eigen(Q)
evals = sort!(exp.(2 * randn(n)))
e.values .= evals
const A = Matrix(e)
lmo = FrankWolfe.LpNormLMO{1}(100.0)
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
const b = n * randn(n)
function f(x)
1/2 * dot(x, A, x) + dot(b, x) - 0.5 * log(sum(x)) + 4000
end
function grad!(storage, x)
mul!(storage, A, x)
storage .+= b
s = sum(x)
storage .-= 0.5 * inv(s)
end
gradient=collect(x0)
k = 10_000
line_search = FrankWolfe.MonotonicStepSize(x -> sum(x) > 0)
x, v, primal, dual_gap, trajectory_simple = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
FrankWolfe.compute_extreme_point(lmo, zeros(n)),
max_iteration=k,
line_search=line_search,
print_iter=k / 10,
verbose=true,
gradient=gradient,
trajectory=true,
);
line_search2 = FrankWolfe.MonotonicGenericStepsize(FrankWolfe.Agnostic(), x -> sum(x) > 0)
x, v, primal, dual_gap, trajectory_restart = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
FrankWolfe.compute_extreme_point(lmo, zeros(n)),
max_iteration=k,
line_search=line_search2,
print_iter=k / 10,
verbose=true,
gradient=gradient,
trajectory=true,
);
plot_trajectories([trajectory_simple[1:end], trajectory_restart[1:end]], ["simple", "stateless"], legend_position=:topright)
# simple step iterations about 33% faster
@test line_search.factor == 8
x, v, primal, dual_gap, trajectory_restart_highpres = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
FrankWolfe.compute_extreme_point(lmo, zeros(BigFloat, n)),
max_iteration=10k,
line_search=line_search2,
print_iter=k / 10,
verbose=true,
gradient=gradient,
trajectory=true,
);
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 4355 | using LinearAlgebra
using FrankWolfe
using Random
# Example of speedup using the quadratic active set
# This is exactly the same as in the literate example #12,
# but in the bipartite case and with a heuristic LMO
# The size of the instance is then higher, making the acceleration more visible
struct BellCorrelationsLMOHeuristic{T} <: FrankWolfe.LinearMinimizationOracle
m::Int # number of inputs
tmp::Vector{T} # used to compute scalar products
end
function FrankWolfe.compute_extreme_point(
lmo::BellCorrelationsLMOHeuristic{T},
A::AbstractMatrix{T};
kwargs...,
) where {T <: Number}
ax = [ones(T, lmo.m) for n in 1:2]
axm = [zeros(T, lmo.m) for n in 1:2]
scm = typemax(T)
for i in 1:100
rand!(ax[1], [-one(T), one(T)])
sc1 = zero(T)
sc2 = one(T)
while sc1 < sc2
sc2 = sc1
mul!(lmo.tmp, A', ax[1])
for x2 in eachindex(ax[2])
ax[2][x2] = lmo.tmp[x2] > zero(T) ? -one(T) : one(T)
end
mul!(lmo.tmp, A, ax[2])
for x1 in eachindex(ax[1])
ax[1][x1] = lmo.tmp[x1] > zero(T) ? -one(T) : one(T)
end
sc1 = dot(ax[1], lmo.tmp)
end
if sc1 < scm
scm = sc1
for n in 1:2
axm[n] .= ax[n]
end
end
end
# returning a full tensor is naturally naive, but this is only a toy example
return [axm[1][x1]*axm[2][x2] for x1 in 1:lmo.m, x2 in 1:lmo.m]
end
function correlation_tensor_GHZ_polygon(N::Int, m::Int; type=Float64)
res = zeros(type, m*ones(Int, N)...)
tab_cos = [cos(x*type(pi)/m) for x in 0:N*m]
tab_cos[abs.(tab_cos) .< Base.rtoldefault(type)] .= zero(type)
for ci in CartesianIndices(res)
res[ci] = tab_cos[sum(ci.I)-N+1]
end
return res
end
function build_reduce_inflate_permutedims(p::Array{T, 2}) where {T <: Number}
n = size(p, 1)
@assert n == size(p, 2)
dimension = (n * (n + 1)) ÷ 2
sqrt2 = sqrt(T(2))
return function(A::AbstractArray{T, 2}, lmo)
vec = Vector{T}(undef, dimension)
cnt = 0
@inbounds for i in 1:n
vec[i] = A[i, i]
cnt += n - i
for j in i+1:n
vec[cnt+j] = (A[i, j] + A[j, i]) / sqrt2
end
end
return FrankWolfe.SymmetricArray(A, vec)
end, function(x::FrankWolfe.SymmetricArray, lmo)
cnt = 0
@inbounds for i in 1:n
x.data[i, i] = x.vec[i]
cnt += n - i
for j in i+1:n
x.data[i, j] = x.vec[cnt+j] / sqrt2
x.data[j, i] = x.data[i, j]
end
end
return x.data
end
end
function benchmark_Bell(p::Array{T, 2}, sym::Bool; fw_method=FrankWolfe.blended_pairwise_conditional_gradient, kwargs...) where {T <: Number}
Random.seed!(0)
if sym
reduce, inflate = build_reduce_inflate_permutedims(p)
lmo = FrankWolfe.SymmetricLMO(BellCorrelationsLMOHeuristic{T}(size(p, 1), zeros(T, size(p, 1))), reduce, inflate)
p = reduce(p, lmo)
else
lmo = BellCorrelationsLMOHeuristic{T}(size(p, 1), zeros(T, size(p, 1)))
end
normp2 = dot(p, p) / 2
# weird syntax to enable the compiler to correctly understand the type
f = let p = p, normp2 = normp2
x -> normp2 + dot(x, x) / 2 - dot(p, x)
end
grad! = let p = p
(storage, xit) -> begin
for x in eachindex(xit)
storage[x] = xit[x] - p[x]
end
end
end
x0 = FrankWolfe.compute_extreme_point(lmo, -p)
active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], I, -p)
res = fw_method(f, grad!, lmo, active_set; line_search=FrankWolfe.Shortstep(one(T)), lazy=true, verbose=false, max_iteration=10^2)
return fw_method(f, grad!, lmo, res[6]; line_search=FrankWolfe.Shortstep(one(T)), lazy=true, lazy_tolerance=10^6, kwargs...)
end
p = correlation_tensor_GHZ_polygon(2, 100)
max_iteration = 10^4
verbose = false
# the following kwarg passing might break for old julia versions
@time benchmark_Bell(p, false; verbose, max_iteration, fw_method=FrankWolfe.blended_pairwise_conditional_gradient)
@time benchmark_Bell(p, true; verbose, max_iteration, fw_method=FrankWolfe.blended_pairwise_conditional_gradient)
println()
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 2393 | using FrankWolfe
using LinearAlgebra
include("../examples/plot_utils.jl")
n = Int(1e5)
k = 10000
xpi = rand(n);
total = sum(xpi);
const xp = xpi ./ total;
f(x) = norm(x - xp)^2
function grad!(storage, x)
storage .= 2 * (x - xp)
return nothing
end
# better for memory consumption as we do coordinate-wise ops
function cf(x, xp)
return norm(x .- xp)^2
end
# lmo = FrankWolfe.KSparseLMO(100, 1.0)
lmo = FrankWolfe.LpNormLMO{Float64,1}(1.0)
# lmo = FrankWolfe.ProbabilitySimplexOracle(1.0);
# lmo = FrankWolfe.UnitSimplexOracle(1.0);
x00 = FrankWolfe.compute_extreme_point(lmo, zeros(n))
# print(x0)
gradient = similar(x00)
FrankWolfe.benchmark_oracles(f, grad!, () -> randn(n), lmo; k=100)
# 1/t *can be* better than short step
println("\n==> Short Step rule - if you know L.\n")
x0 = copy(x00)
@time x, v, primal, dual_gap, trajectory_shortstep = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\n==> Short Step rule with momentum - if you know L.\n")
x0 = copy(x00)
@time x, v, primal, dual_gap, trajectoryM = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Shortstep(2.0),
print_iter=k / 10,
memory_mode=FrankWolfe.OutplaceEmphasis(),
verbose=true,
trajectory=true,
momentum=0.9,
);
println("\n==> Adaptive if you do not know L.\n")
x0 = copy(x00)
@time x, v, primal, dual_gap, trajectory_adaptive = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Adaptive(L_est=100.0),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
println("\n==> Agnostic if function is too expensive for adaptive.\n")
x0 = copy(x00)
@time x, v, primal, dual_gap, trajectory_agnostic = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
memory_mode=FrankWolfe.InplaceEmphasis(),
verbose=true,
trajectory=true,
);
data = [trajectory_shortstep, trajectory_adaptive, trajectory_agnostic, trajectoryM]
label = ["short step" "adaptive" "agnostic" "momentum"]
plot_trajectories(data, label)
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 1257 | module FrankWolfe
using GenericSchur
using LinearAlgebra
using Printf
using ProgressMeter
using TimerOutputs
using SparseArrays: spzeros, SparseVector
import SparseArrays
import Random
using Setfield: @set
import MathOptInterface
const MOI = MathOptInterface
const MOIU = MOI.Utilities
# for Birkhoff polytope LMO
import Hungarian
import Arpack
export frank_wolfe, lazified_conditional_gradient, away_frank_wolfe
export blended_conditional_gradient, compute_extreme_point
include("abstract_oracles.jl")
include("defs.jl")
include("utils.jl")
include("linesearch.jl")
include("types.jl")
include("simplex_oracles.jl")
include("norm_oracles.jl")
include("polytope_oracles.jl")
include("moi_oracle.jl")
include("function_gradient.jl")
include("active_set.jl")
include("active_set_quadratic.jl")
include("blended_cg.jl")
include("afw.jl")
include("fw_algorithms.jl")
include("block_oracles.jl")
include("block_coordinate_algorithms.jl")
include("alternating_methods.jl")
include("blended_pairwise.jl")
include("pairwise.jl")
include("tracking.jl")
include("callback.jl")
# collecting most common data types etc and precompile
# min version req set to 1.5 to prevent stalling of julia 1
@static if VERSION >= v"1.5"
include("precompile.jl")
end
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 9117 |
"""
Supertype for linear minimization oracles.
All LMOs must implement `compute_extreme_point(lmo::LMO, direction)`
and return a vector `v` of the appropriate type.
"""
abstract type LinearMinimizationOracle end
"""
compute_extreme_point(lmo::LinearMinimizationOracle, direction; kwargs...)
Computes the point `argmin_{v ∈ C} v ⋅ direction`
with `C` the set represented by the LMO.
Most LMOs feature `v` as a keyword argument that allows for an in-place computation whenever `v` is dense.
All LMOs should accept keyword arguments that they can ignore.
"""
function compute_extreme_point end
"""
CachedLinearMinimizationOracle{LMO}
Oracle wrapping another one of type lmo.
Subtypes of `CachedLinearMinimizationOracle` contain a cache of
previous solutions.
By convention, the inner oracle is named `inner`.
Cached optimizers are expected to implement `Base.empty!` and `Base.length`.
"""
abstract type CachedLinearMinimizationOracle{LMO<:LinearMinimizationOracle} <:
LinearMinimizationOracle end
# by default do nothing and return the LMO itself
Base.empty!(lmo::CachedLinearMinimizationOracle) = lmo
Base.length(::CachedLinearMinimizationOracle) = 0
"""
SingleLastCachedLMO{LMO, VT}
Caches only the last result from an LMO and stores it in `last_vertex`.
Vertices of `LMO` have to be of type `VT` if provided.
"""
mutable struct SingleLastCachedLMO{LMO,A} <: CachedLinearMinimizationOracle{LMO}
last_vertex::Union{Nothing,A}
inner::LMO
end
# initializes with no cache by default
SingleLastCachedLMO(lmo::LMO) where {LMO<:LinearMinimizationOracle} =
SingleLastCachedLMO{LMO,AbstractVector}(nothing, lmo)
function compute_extreme_point(
lmo::SingleLastCachedLMO,
direction;
v=nothing,
threshold=-Inf,
store_cache=true,
kwargs...,
)
if lmo.last_vertex !== nothing && isfinite(threshold)
if fast_dot(lmo.last_vertex, direction) ≤ threshold # cache is a sufficiently-decreasing direction
return lmo.last_vertex
end
end
v = compute_extreme_point(lmo.inner, direction, kwargs...)
if store_cache
lmo.last_vertex = v
end
return v
end
function Base.empty!(lmo::SingleLastCachedLMO)
lmo.last_vertex = nothing
return lmo
end
Base.length(lmo::SingleLastCachedLMO) = Int(lmo.last_vertex !== nothing)
"""
MultiCacheLMO{N, LMO, A}
Cache for a LMO storing up to `N` vertices in the cache, removed in FIFO style.
`oldest_idx` keeps track of the oldest index in the tuple, i.e. to replace next.
`VT`, if provided, must be the type of vertices returned by `LMO`
"""
mutable struct MultiCacheLMO{N,LMO<:LinearMinimizationOracle,A} <:
CachedLinearMinimizationOracle{LMO}
vertices::NTuple{N,Union{A,Nothing}}
inner::LMO
oldest_idx::Int
end
function MultiCacheLMO{N,LMO,A}(lmo::LMO) where {N,LMO<:LinearMinimizationOracle,A}
return MultiCacheLMO{N,LMO,A}(ntuple(_ -> nothing, Val{N}()), lmo, 1)
end
function MultiCacheLMO{N}(lmo::LMO) where {N,LMO<:LinearMinimizationOracle}
return MultiCacheLMO{N,LMO,AbstractVector}(ntuple(_ -> nothing, Val{N}()), lmo, 1)
end
# arbitrary default to 10 points
function MultiCacheLMO(lmo::LMO) where {LMO<:LinearMinimizationOracle}
return MultiCacheLMO{10}(lmo)
end
# type-unstable
function MultiCacheLMO(n::Integer, lmo::LMO) where {LMO<:LinearMinimizationOracle}
return MultiCacheLMO{n}(lmo)
end
function Base.empty!(lmo::MultiCacheLMO{N}) where {N}
lmo.vertices = ntuple(_ -> nothing, Val{N}())
lmo.oldest_idx = 1
return lmo
end
Base.length(lmo::MultiCacheLMO) = count(!isnothing, lmo.vertices)
"""
Compute the extreme point with a multi-vertex cache.
`store_cache` indicates whether the newly-computed point should be stored in cache.
`greedy` determines if we should return the first point with dot-product
below `threshold` or look for the best one.
"""
function compute_extreme_point(
lmo::MultiCacheLMO{N},
direction;
v=nothing,
threshold=-Inf,
store_cache=true,
greedy=false,
kwargs...,
) where {N}
if isfinite(threshold)
best_idx = -1
best_val = Inf
best_v = nothing
# create an iteration order to visit most recent vertices first
iter_order = if lmo.oldest_idx > 1
Iterators.flatten((lmo.oldest_idx-1:-1:1, N:-1:lmo.oldest_idx))
else
N:-1:1
end
for idx in iter_order
if lmo.vertices[idx] !== nothing
v = lmo.vertices[idx]
new_val = fast_dot(v, direction)
if new_val ≤ threshold # cache is a sufficiently-decreasing direction
# if greedy, stop and return point
if greedy
# println("greedy cache sol")
return v
end
# otherwise, keep the index only if better than incumbent
if new_val < best_val
best_idx = idx
best_val = new_val
best_v = v
end
end
end
end
if best_idx > 0 # && fast_dot(best_v, direction) ≤ threshold
# println("cache sol")
return best_v
end
end
# no interesting point found, computing new
# println("LP sol")
v = compute_extreme_point(lmo.inner, direction, kwargs...)
if store_cache
tup = Base.setindex(lmo.vertices, v, lmo.oldest_idx)
lmo.vertices = tup
# if oldest_idx was last, we get back to 1, otherwise we increment oldest index
lmo.oldest_idx = lmo.oldest_idx < N ? lmo.oldest_idx + 1 : 1
end
return v
end
"""
VectorCacheLMO{LMO, VT}
Cache for a LMO storing an unbounded number of vertices of type `VT` in the cache.
`VT`, if provided, must be the type of vertices returned by `LMO`
"""
mutable struct VectorCacheLMO{LMO<:LinearMinimizationOracle,VT} <:
CachedLinearMinimizationOracle{LMO}
vertices::Vector{VT}
inner::LMO
end
function VectorCacheLMO{LMO,VT}(lmo::LMO) where {VT,LMO<:LinearMinimizationOracle}
return VectorCacheLMO{LMO,VT}(VT[], lmo)
end
function VectorCacheLMO(lmo::LMO) where {LMO<:LinearMinimizationOracle}
return VectorCacheLMO{LMO,Vector{Float64}}(AbstractVector[], lmo)
end
function Base.empty!(lmo::VectorCacheLMO)
empty!(lmo.vertices)
return lmo
end
Base.length(lmo::VectorCacheLMO) = length(lmo.vertices)
function compute_extreme_point(
lmo::VectorCacheLMO,
direction;
v=nothing,
threshold=-Inf,
store_cache=true,
greedy=false,
kwargs...,
)
if isempty(lmo.vertices)
v = compute_extreme_point(lmo.inner, direction)
if store_cache
push!(lmo.vertices, v)
end
return v
end
best_idx = -1
best_val = Inf
best_v = nothing
for idx in reverse(eachindex(lmo.vertices))
@inbounds v = lmo.vertices[idx]
new_val = fast_dot(v, direction)
if new_val ≤ threshold
# stop, store and return
if greedy
return v
end
# otherwise, compare to incumbent
if new_val < best_val
best_v = v
best_val = new_val
best_idx = idx
end
end
end
v = best_v
if best_idx < 0
v = compute_extreme_point(lmo.inner, direction)
if store_cache
# note: we do not check for duplicates. hence you might end up with more vertices,
# in fact up to number of dual steps many, that might be already in the cache
# in order to reach this point, if v was already in the cache is must not meet the threshold (otherwise we would have returned it)
# and it is the best possible, hence we will perform a dual step on the outside.
#
# note: another possibility could be to test against that in the if statement but then you might end you recalculating the same vertex a few times.
# as such this might be a better tradeoff, i.e., to not check the set for duplicates and potentially accept #dualSteps many duplicates.
push!(lmo.vertices, v)
end
end
return v
end
"""
SymmetricLMO{LMO, TR, TI}
Symmetric LMO for the reduction operator defined by `TR`
and the inflation operator defined by `TI`.
Computations are performed in the reduced subspace, and the
effective call of the LMO first inflates the gradient, then
use the non-symmetric LMO, and finally reduces the output.
"""
struct SymmetricLMO{LMO<:LinearMinimizationOracle,TR,TI} <: LinearMinimizationOracle
lmo::LMO
reduce::TR
inflate::TI
function SymmetricLMO(lmo::LMO, reduce, inflate=(x, lmo) -> x) where {LMO<:LinearMinimizationOracle}
return new{typeof(lmo),typeof(reduce),typeof(inflate)}( lmo, reduce, inflate)
end
end
function compute_extreme_point(sym::SymmetricLMO, direction; kwargs...)
return sym.reduce(compute_extreme_point(sym.lmo, sym.inflate(direction, sym.lmo)), sym.lmo)
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 9960 |
"""
AbstractActiveSet{AT, R, IT}
Abstract type for an active set of atoms of type `AT` with weights of type `R` and iterate of type `IT`.
An active set is typically expected to have a field `weights`, a field `atoms`, and a field `x`.
Otherwise, all active set methods from `src/active_set.jl` can be overwritten.
"""
abstract type AbstractActiveSet{AT, R <: Real, IT} <: AbstractVector{Tuple{R,AT}} end
"""
ActiveSet{AT, R, IT}
Represents an active set of extreme vertices collected in a FW algorithm,
along with their coefficients `(λ_i, a_i)`.
`R` is the type of the `λ_i`, `AT` is the type of the atoms `a_i`.
The iterate `x = ∑λ_i a_i` is stored in x with type `IT`.
"""
struct ActiveSet{AT, R <: Real, IT} <: AbstractActiveSet{AT,R,IT}
weights::Vector{R}
atoms::Vector{AT}
x::IT
end
ActiveSet{AT,R}() where {AT,R} = ActiveSet{AT,R,Vector{float(eltype(AT))}}([], [])
ActiveSet{AT}() where {AT} = ActiveSet{AT,Float64,Vector{float(eltype(AT))}}()
function ActiveSet(tuple_values::AbstractVector{Tuple{R,AT}}) where {AT,R}
n = length(tuple_values)
weights = Vector{R}(undef, n)
atoms = Vector{AT}(undef, n)
@inbounds for idx in 1:n
weights[idx] = tuple_values[idx][1]
atoms[idx] = tuple_values[idx][2]
end
x = similar(atoms[1], float(eltype(atoms[1])))
as = ActiveSet{AT,R,typeof(x)}(weights, atoms, x)
compute_active_set_iterate!(as)
return as
end
function ActiveSet{AT,R}(tuple_values::AbstractVector{<:Tuple{<:Number,<:Any}}) where {AT,R}
n = length(tuple_values)
weights = Vector{R}(undef, n)
atoms = Vector{AT}(undef, n)
@inbounds for idx in 1:n
weights[idx] = tuple_values[idx][1]
atoms[idx] = tuple_values[idx][2]
end
x = similar(tuple_values[1][2], float(eltype(tuple_values[1][2])))
as = ActiveSet{AT,R,typeof(x)}(weights, atoms, x)
compute_active_set_iterate!(as)
return as
end
Base.getindex(as::AbstractActiveSet, i) = (as.weights[i], as.atoms[i])
Base.size(as::AbstractActiveSet) = size(as.weights)
# these three functions do not update the active set iterate
function Base.push!(as::AbstractActiveSet, (λ, a))
push!(as.weights, λ)
push!(as.atoms, a)
return as
end
function Base.deleteat!(as::AbstractActiveSet, idx)
# WARNING assumes that idx is sorted
for (i, j) in enumerate(idx)
deleteat!(as, j-i+1)
end
return as
end
function Base.deleteat!(as::AbstractActiveSet, idx::Int)
deleteat!(as.atoms, idx)
deleteat!(as.weights, idx)
return as
end
function Base.empty!(as::AbstractActiveSet)
empty!(as.atoms)
empty!(as.weights)
as.x .= 0
return as
end
function Base.isempty(as::AbstractActiveSet)
return isempty(as.atoms)
end
"""
Copies an active set, the weight and atom vectors and the iterate.
Individual atoms are not copied.
"""
function Base.copy(as::AbstractActiveSet{AT,R,IT}) where {AT,R,IT}
return ActiveSet{AT,R,IT}(copy(as.weights), copy(as.atoms), copy(as.x))
end
"""
active_set_update!(active_set::AbstractActiveSet, lambda, atom)
Adds the atom to the active set with weight lambda or adds lambda to existing atom.
"""
function active_set_update!(
active_set::AbstractActiveSet{AT,R},
lambda, atom, renorm=true, idx=nothing;
weight_purge_threshold=weight_purge_threshold_default(R),
add_dropped_vertices=false,
vertex_storage=nothing,
) where {AT,R}
# rescale active set
active_set.weights .*= (1 - lambda)
# add value for new atom
if idx === nothing
idx = find_atom(active_set, atom)
end
if idx > 0
@inbounds active_set.weights[idx] += lambda
else
push!(active_set, (lambda, atom))
end
if renorm
add_dropped_vertices = add_dropped_vertices ? vertex_storage !== nothing : add_dropped_vertices
active_set_cleanup!(active_set; weight_purge_threshold=weight_purge_threshold, update=false, add_dropped_vertices=add_dropped_vertices, vertex_storage=vertex_storage)
active_set_renormalize!(active_set)
end
active_set_update_scale!(active_set.x, lambda, atom)
return active_set
end
"""
active_set_update_scale!(x, lambda, atom)
Operates `x ← (1-λ) x + λ a`.
"""
function active_set_update_scale!(x::IT, lambda, atom) where {IT}
@. x = x * (1 - lambda) + lambda * atom
return x
end
function active_set_update_scale!(x::IT, lambda, atom::SparseArrays.SparseVector) where {IT}
@. x *= (1 - lambda)
nzvals = SparseArrays.nonzeros(atom)
nzinds = SparseArrays.nonzeroinds(atom)
@inbounds for idx in eachindex(nzvals)
x[nzinds[idx]] += lambda * nzvals[idx]
end
return x
end
"""
active_set_update_iterate_pairwise!(active_set, x, lambda, fw_atom, away_atom)
Operates `x ← x + λ a_fw - λ a_aw`.
"""
function active_set_update_iterate_pairwise!(x::IT, lambda::Real, fw_atom::A, away_atom::A) where {IT, A}
@. x += lambda * fw_atom - lambda * away_atom
return x
end
function active_set_validate(active_set::AbstractActiveSet)
return sum(active_set.weights) ≈ 1.0 && all(≥(0), active_set.weights)
end
function active_set_renormalize!(active_set::AbstractActiveSet)
renorm = sum(active_set.weights)
active_set.weights ./= renorm
return active_set
end
function weight_from_atom(active_set::AbstractActiveSet, atom)
idx = find_atom(active_set, atom)
if idx > 0
return active_set.weights[idx]
else
return nothing
end
end
"""
get_active_set_iterate(active_set)
Return the current iterate corresponding. Does not recompute it.
"""
function get_active_set_iterate(active_set)
return active_set.x
end
"""
compute_active_set_iterate!(active_set::AbstractActiveSet) -> x
Recomputes from scratch the iterate `x` from the current weights and vertices of the active set.
Returns the iterate `x`.
"""
function compute_active_set_iterate!(active_set)
active_set.x .= 0
for (λi, ai) in active_set
@. active_set.x += λi * ai
end
return active_set.x
end
# specialized version for sparse vector
function compute_active_set_iterate!(active_set::AbstractActiveSet{<:SparseArrays.SparseVector})
active_set.x .= 0
for (λi, ai) in active_set
nzvals = SparseArrays.nonzeros(ai)
nzinds = SparseArrays.nonzeroinds(ai)
@inbounds for idx in eachindex(nzvals)
active_set.x[nzinds[idx]] += λi * nzvals[idx]
end
end
return active_set.x
end
function compute_active_set_iterate!(active_set::FrankWolfe.ActiveSet{<:SparseArrays.AbstractSparseMatrix})
active_set.x .= 0
for (λi, ai) in active_set
(I, J, V) = SparseArrays.findnz(ai)
@inbounds for idx in eachindex(I)
active_set.x[I[idx], J[idx]] += λi * V[idx]
end
end
return active_set.x
end
function active_set_cleanup!(
active_set::AbstractActiveSet{AT,R};
weight_purge_threshold=weight_purge_threshold_default(R),
update=true,
add_dropped_vertices=false,
vertex_storage=nothing,
) where {AT,R}
if add_dropped_vertices && vertex_storage !== nothing
for (weight, v) in zip(active_set.weights, active_set.atoms)
if weight ≤ weight_purge_threshold
push!(vertex_storage, v)
end
end
end
# one cannot use a generator as deleteat! modifies active_set in place
deleteat!(active_set, [idx for idx in eachindex(active_set) if active_set.weights[idx] ≤ weight_purge_threshold])
if update
compute_active_set_iterate!(active_set)
end
return nothing
end
function find_atom(active_set::AbstractActiveSet, atom)
@inbounds for idx in eachindex(active_set)
if _unsafe_equal(active_set.atoms[idx], atom)
return idx
end
end
return -1
end
"""
active_set_argmin(active_set::AbstractActiveSet, direction)
Computes the linear minimizer in the direction on the active set.
Returns `(λ_i, a_i, i)`
"""
function active_set_argmin(active_set::AbstractActiveSet, direction)
valm = typemax(eltype(direction))
idxm = -1
@inbounds for i in eachindex(active_set)
val = fast_dot(active_set.atoms[i], direction)
if val < valm
valm = val
idxm = i
end
end
if idxm == -1
error("Infinite minimum $valm in the active set. Does the gradient contain invalid (NaN / Inf) entries?")
end
return (active_set[idxm]..., idxm)
end
"""
active_set_argminmax(active_set::AbstractActiveSet, direction)
Computes the linear minimizer in the direction on the active set.
Returns `(λ_min, a_min, i_min, val_min, λ_max, a_max, i_max, val_max, val_max-val_min ≥ Φ)`
"""
function active_set_argminmax(active_set::AbstractActiveSet, direction; Φ=0.5)
valm = typemax(eltype(direction))
valM = typemin(eltype(direction))
idxm = -1
idxM = -1
@inbounds for i in eachindex(active_set)
val = fast_dot(active_set.atoms[i], direction)
if val < valm
valm = val
idxm = i
end
if valM < val
valM = val
idxM = i
end
end
if idxm == -1 || idxM == -1
error("Infinite minimum $valm or maximum $valM in the active set. Does the gradient contain invalid (NaN / Inf) entries?")
end
return (active_set[idxm]..., idxm, valm, active_set[idxM]..., idxM, valM, valM - valm ≥ Φ)
end
"""
active_set_initialize!(as, v)
Resets the active set structure to a single vertex `v` with unit weight.
"""
function active_set_initialize!(as::AbstractActiveSet{AT,R}, v) where {AT,R}
empty!(as)
push!(as, (one(R), v))
compute_active_set_iterate!(as)
return as
end
function compute_active_set_iterate!(active_set::AbstractActiveSet{<:ScaledHotVector, <:Real, <:AbstractVector})
active_set.x .= 0
@inbounds for (λi, ai) in active_set
active_set.x[ai.val_idx] += λi * ai.active_val
end
return active_set.x
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 10697 |
"""
ActiveSetQuadratic{AT, R, IT}
Represents an active set of extreme vertices collected in a FW algorithm,
along with their coefficients `(λ_i, a_i)`.
`R` is the type of the `λ_i`, `AT` is the type of the atoms `a_i`.
The iterate `x = ∑λ_i a_i` is stored in x with type `IT`.
The objective function is assumed to be of the form `f(x)=½⟨x,Ax⟩+⟨b,x⟩+c`
so that the gradient is simply `∇f(x)=Ax+b`.
"""
struct ActiveSetQuadratic{AT, R <: Real, IT, H} <: AbstractActiveSet{AT,R,IT}
weights::Vector{R}
atoms::Vector{AT}
x::IT
A::H # Hessian matrix
b::IT # linear term
dots_x::Vector{R} # stores ⟨A * x, atoms[i]⟩
dots_A::Vector{Vector{R}} # stores ⟨A * atoms[j], atoms[i]⟩
dots_b::Vector{R} # stores ⟨b, atoms[i]⟩
weights_prev::Vector{R}
modified::BitVector
end
function detect_quadratic_function(grad!, x0; test=true)
n = length(x0)
T = eltype(x0)
storage = collect(x0)
g0 = zeros(T, n)
grad!(storage, x0)
g0 .= storage
X = randn(T, n, n)
G = zeros(T, n, n)
for i in 1:n
grad!(storage, X[:, i])
X[:, i] .-= x0
G[:, i] .= storage .- g0
end
A = G * inv(X)
b = g0 - A * x0
if test
x_test = randn(T, n)
grad!(storage, x_test)
if norm(storage - (A * x_test + b)) ≥ Base.rtoldefault(T)
@warn "The function given is either not a quadratic or too high-dimensional for an accurate estimation of its parameters."
end
end
return A, b
end
function ActiveSetQuadratic(tuple_values::AbstractVector{Tuple{R,AT}}, grad!::Function) where {AT,R}
return ActiveSetQuadratic(tuple_values, detect_quadratic_function(grad!, tuple_values[1][2])...)
end
function ActiveSetQuadratic(tuple_values::AbstractVector{Tuple{R,AT}}, A::H, b) where {AT,R,H}
n = length(tuple_values)
weights = Vector{R}(undef, n)
atoms = Vector{AT}(undef, n)
dots_x = zeros(R, n)
dots_A = Vector{Vector{R}}(undef, n)
dots_b = Vector{R}(undef, n)
weights_prev = zeros(R, n)
modified = trues(n)
@inbounds for idx in 1:n
weights[idx] = tuple_values[idx][1]
atoms[idx] = tuple_values[idx][2]
dots_A[idx] = Vector{R}(undef, idx)
for idy in 1:idx
dots_A[idx][idy] = fast_dot(A * atoms[idx], atoms[idy])
end
dots_b[idx] = fast_dot(b, atoms[idx])
end
x = similar(b)
as = ActiveSetQuadratic{AT,R,typeof(x),H}(weights, atoms, x, A, b, dots_x, dots_A, dots_b, weights_prev, modified)
compute_active_set_iterate!(as)
return as
end
function ActiveSetQuadratic{AT,R}(tuple_values::AbstractVector{<:Tuple{<:Number,<:Any}}, grad!) where {AT,R}
return ActiveSetQuadratic{AT,R}(tuple_values, detect_quadratic_function(grad!, tuple_values[1][2])...)
end
function ActiveSetQuadratic{AT,R}(tuple_values::AbstractVector{<:Tuple{<:Number,<:Any}}, A::H, b) where {AT,R,H}
n = length(tuple_values)
weights = Vector{R}(undef, n)
atoms = Vector{AT}(undef, n)
dots_x = zeros(R, n)
dots_A = Vector{Vector{R}}(undef, n)
dots_b = Vector{R}(undef, n)
weights_prev = zeros(R, n)
modified = trues(n)
@inbounds for idx in 1:n
weights[idx] = tuple_values[idx][1]
atoms[idx] = tuple_values[idx][2]
dots_A[idx] = Vector{R}(undef, idx)
for idy in 1:idx
dots_A[idx][idy] = fast_dot(A * atoms[idx], atoms[idy])
end
dots_b[idx] = fast_dot(b, atoms[idx])
end
x = similar(b)
as = ActiveSetQuadratic{AT,R,typeof(x),H}(weights, atoms, x, A, b, dots_x, dots_A, dots_b, weights_prev, modified)
compute_active_set_iterate!(as)
return as
end
# custom dummy structure to handle identity hessian matrix
# required as LinearAlgebra.I does not work for general tensors
struct Identity{R <: Real}
λ::R
end
function Base.:*(a::Identity, b)
if a.λ == 1
return b
else
return a.λ * b
end
end
function ActiveSetQuadratic(tuple_values::AbstractVector{Tuple{R,AT}}, A::UniformScaling, b) where {AT,R}
return ActiveSetQuadratic(tuple_values, Identity(A.λ), b)
end
function ActiveSetQuadratic{AT,R}(tuple_values::AbstractVector{<:Tuple{<:Number,<:Any}}, A::UniformScaling, b) where {AT,R}
return ActiveSetQuadratic{AT,R}(tuple_values, Identity(A.λ), b)
end
# these three functions do not update the active set iterate
function Base.push!(as::ActiveSetQuadratic{AT,R}, (λ, a)) where {AT,R}
dot_x = zero(R)
dot_A = Vector{R}(undef, length(as))
dot_b = fast_dot(as.b, a)
Aa = as.A * a
@inbounds for i in 1:length(as)
dot_A[i] = fast_dot(Aa, as.atoms[i])
as.dots_x[i] += λ * dot_A[i]
dot_x += as.weights[i] * dot_A[i]
end
push!(dot_A, fast_dot(Aa, a))
dot_x += λ * dot_A[end]
push!(as.weights, λ)
push!(as.atoms, a)
push!(as.dots_x, dot_x)
push!(as.dots_A, dot_A)
push!(as.dots_b, dot_b)
push!(as.weights_prev, λ)
push!(as.modified, true)
return as
end
function Base.deleteat!(as::ActiveSetQuadratic, idx::Int)
@inbounds for i in 1:idx-1
as.dots_x[i] -= as.weights_prev[idx] * as.dots_A[idx][i]
end
@inbounds for i in idx+1:length(as)
as.dots_x[i] -= as.weights_prev[idx] * as.dots_A[i][idx]
deleteat!(as.dots_A[i], idx)
end
deleteat!(as.weights, idx)
deleteat!(as.atoms, idx)
deleteat!(as.dots_x, idx)
deleteat!(as.dots_A, idx)
deleteat!(as.dots_b, idx)
deleteat!(as.weights_prev, idx)
deleteat!(as.modified, idx)
return as
end
function Base.empty!(as::ActiveSetQuadratic)
empty!(as.atoms)
empty!(as.weights)
as.x .= 0
empty!(as.dots_x)
empty!(as.dots_A)
empty!(as.dots_b)
empty!(as.weights_prev)
empty!(as.modified)
return as
end
function active_set_update!(
active_set::ActiveSetQuadratic{AT,R},
lambda, atom, renorm=true, idx=nothing;
weight_purge_threshold=weight_purge_threshold_default(R),
add_dropped_vertices=false,
vertex_storage=nothing,
) where {AT,R}
# rescale active set
active_set.weights .*= (1 - lambda)
active_set.weights_prev .*= (1 - lambda)
active_set.dots_x .*= (1 - lambda)
# add value for new atom
if idx === nothing
idx = find_atom(active_set, atom)
end
if idx > 0
@inbounds active_set.weights[idx] += lambda
@inbounds active_set.modified[idx] = true
else
push!(active_set, (lambda, atom))
end
if renorm
add_dropped_vertices = add_dropped_vertices ? vertex_storage !== nothing : add_dropped_vertices
active_set_cleanup!(active_set; weight_purge_threshold=weight_purge_threshold, update=false, add_dropped_vertices=add_dropped_vertices, vertex_storage=vertex_storage)
active_set_renormalize!(active_set)
end
active_set_update_scale!(active_set.x, lambda, atom)
return active_set
end
function active_set_renormalize!(active_set::ActiveSetQuadratic)
renorm = sum(active_set.weights)
active_set.weights ./= renorm
active_set.weights_prev ./= renorm
# WARNING: it might sometimes be necessary to recompute dots_x to prevent discrepancy due to numerical errors
active_set.dots_x ./= renorm
return active_set
end
function active_set_argmin(active_set::ActiveSetQuadratic, direction)
valm = typemax(eltype(direction))
idxm = -1
idx_modified = findall(active_set.modified)
@inbounds for idx in idx_modified
weights_diff = active_set.weights[idx] - active_set.weights_prev[idx]
for i in 1:idx
active_set.dots_x[i] += weights_diff * active_set.dots_A[idx][i]
end
for i in idx+1:length(active_set)
active_set.dots_x[i] += weights_diff * active_set.dots_A[i][idx]
end
end
@inbounds for i in eachindex(active_set)
val = active_set.dots_x[i] + active_set.dots_b[i]
if val < valm
valm = val
idxm = i
end
end
@inbounds for idx in idx_modified
active_set.weights_prev[idx] = active_set.weights[idx]
active_set.modified[idx] = false
end
if idxm == -1
error("Infinite minimum $valm in the active set. Does the gradient contain invalid (NaN / Inf) entries?")
end
active_set.modified[idxm] = true
return (active_set[idxm]..., idxm)
end
function active_set_argminmax(active_set::ActiveSetQuadratic, direction; Φ=0.5)
valm = typemax(eltype(direction))
valM = typemin(eltype(direction))
idxm = -1
idxM = -1
idx_modified = findall(active_set.modified)
@inbounds for idx in idx_modified
weights_diff = active_set.weights[idx] - active_set.weights_prev[idx]
for i in 1:idx
active_set.dots_x[i] += weights_diff * active_set.dots_A[idx][i]
end
for i in idx+1:length(active_set)
active_set.dots_x[i] += weights_diff * active_set.dots_A[i][idx]
end
end
@inbounds for i in eachindex(active_set)
# direction is not used and assumed to be Ax+b
val = active_set.dots_x[i] + active_set.dots_b[i]
# @assert abs(fast_dot(active_set.atoms[i], direction) - val) < Base.rtoldefault(eltype(direction))
if val < valm
valm = val
idxm = i
end
if valM < val
valM = val
idxM = i
end
end
@inbounds for idx in idx_modified
active_set.weights_prev[idx] = active_set.weights[idx]
active_set.modified[idx] = false
end
if idxm == -1 || idxM == -1
error("Infinite minimum $valm or maximum $valM in the active set. Does the gradient contain invalid (NaN / Inf) entries?")
end
active_set.modified[idxm] = true
active_set.modified[idxM] = true
return (active_set[idxm]..., idxm, valm, active_set[idxM]..., idxM, valM, valM - valm ≥ Φ)
end
# in-place warm-start of a quadratic active set for A and b
function update_active_set_quadratic!(warm_as::ActiveSetQuadratic{AT,R}, A::H, b) where {AT,R,H}
@inbounds for idx in eachindex(warm_as)
for idy in 1:idx
warm_as.dots_A[idx][idy] = fast_dot(A * warm_as.atoms[idx], warm_as.atoms[idy])
end
end
warm_as.A .= A
return update_active_set_quadratic!(warm_as, b)
end
# in-place warm-start of a quadratic active set for b
function update_active_set_quadratic!(warm_as::ActiveSetQuadratic{AT,R,IT,H}, b) where {AT,R,IT,H}
warm_as.dots_x .= 0
warm_as.weights_prev .= 0
warm_as.modified .= true
@inbounds for idx in eachindex(warm_as)
warm_as.dots_b[idx] = fast_dot(b, warm_as.atoms[idx])
end
warm_as.b .= b
compute_active_set_iterate!(warm_as)
return warm_as
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 16210 |
"""
away_frank_wolfe(f, grad!, lmo, x0; ...)
Frank-Wolfe with away steps.
The algorithm maintains the current iterate as a convex combination of vertices in the
[`FrankWolfe.ActiveSet`](@ref) data structure.
See [M. Besançon, A. Carderera and S. Pokutta 2021](https://arxiv.org/abs/2104.06675) for illustrations of away steps.
"""
function away_frank_wolfe(
f,
grad!,
lmo,
x0;
line_search::LineSearchMethod=Adaptive(),
lazy_tolerance=2.0,
epsilon=1e-7,
away_steps=true,
lazy=false,
momentum=nothing,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
gradient=nothing,
renorm_interval=1000,
callback=nothing,
traj_data=[],
timeout=Inf,
weight_purge_threshold=weight_purge_threshold_default(eltype(x0)),
extra_vertex_storage=nothing,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
linesearch_workspace=nothing,
recompute_last_vertex=true,
)
# add the first vertex to active set from initialization
active_set = ActiveSet([(1.0, x0)])
# Call the method using an ActiveSet as input
return away_frank_wolfe(
f,
grad!,
lmo,
active_set,
line_search=line_search,
lazy_tolerance=lazy_tolerance,
epsilon=epsilon,
away_steps=away_steps,
lazy=lazy,
momentum=momentum,
max_iteration=max_iteration,
print_iter=print_iter,
trajectory=trajectory,
verbose=verbose,
memory_mode=memory_mode,
gradient=gradient,
renorm_interval=renorm_interval,
callback=callback,
traj_data=traj_data,
timeout=timeout,
weight_purge_threshold=weight_purge_threshold,
extra_vertex_storage=extra_vertex_storage,
add_dropped_vertices=add_dropped_vertices,
use_extra_vertex_storage=use_extra_vertex_storage,
linesearch_workspace=linesearch_workspace,
recompute_last_vertex=recompute_last_vertex,
)
end
# step away FrankWolfe with the active set given as parameter
# note: in this case I don't need x0 as it is given by the active set and might otherwise lead to confusion
function away_frank_wolfe(
f,
grad!,
lmo,
active_set::AbstractActiveSet{AT,R};
line_search::LineSearchMethod=Adaptive(),
lazy_tolerance=2.0,
epsilon=1e-7,
away_steps=true,
lazy=false,
momentum=nothing,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
gradient=nothing,
renorm_interval=1000,
callback=nothing,
traj_data=[],
timeout=Inf,
weight_purge_threshold=weight_purge_threshold_default(R),
extra_vertex_storage=nothing,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
linesearch_workspace=nothing,
recompute_last_vertex=true,
) where {AT,R}
# format string for output of the algorithm
format_string = "%6s %13s %14e %14e %14e %14e %14e %14i\n"
headers = ("Type", "Iteration", "Primal", "Dual", "Dual Gap", "Time", "It/sec", "#ActiveSet")
function format_state(state, active_set)
rep = (
steptype_string[Symbol(state.step_type)],
string(state.t),
Float64(state.primal),
Float64(state.primal - state.dual_gap),
Float64(state.dual_gap),
state.time,
state.t / state.time,
length(active_set),
)
return rep
end
if isempty(active_set)
throw(ArgumentError("Empty active set"))
end
t = 0
dual_gap = Inf
primal = Inf
x = get_active_set_iterate(active_set)
step_type = ST_REGULAR
if trajectory
callback = make_trajectory_callback(callback, traj_data)
end
if verbose
callback = make_print_callback(callback, print_iter, headers, format_string, format_state)
end
time_start = time_ns()
d = similar(x)
if gradient === nothing
gradient = collect(x)
end
gtemp = if momentum !== nothing
similar(gradient)
else
nothing
end
if verbose
println("\nAway-step Frank-Wolfe Algorithm.")
NumType = eltype(x)
println(
"MEMORY_MODE: $memory_mode STEPSIZE: $line_search EPSILON: $epsilon MAXITERATION: $max_iteration TYPE: $NumType",
)
grad_type = typeof(gradient)
println(
"GRADIENTTYPE: $grad_type LAZY: $lazy lazy_tolerance: $lazy_tolerance MOMENTUM: $momentum AWAYSTEPS: $away_steps",
)
println("LMO: $(typeof(lmo))")
if (use_extra_vertex_storage || add_dropped_vertices) && extra_vertex_storage === nothing
@warn(
"use_extra_vertex_storage and add_dropped_vertices options are only usable with a extra_vertex_storage storage"
)
end
end
x = get_active_set_iterate(active_set)
primal = f(x)
v = active_set.atoms[1]
phi_value = convert(eltype(x), Inf)
gamma = one(phi_value)
if linesearch_workspace === nothing
linesearch_workspace = build_linesearch_workspace(line_search, x, gradient)
end
if extra_vertex_storage === nothing
use_extra_vertex_storage = add_dropped_vertices = false
end
while t <= max_iteration && phi_value >= max(eps(float(typeof(phi_value))), epsilon)
#####################
# managing time and Ctrl-C
#####################
time_at_loop = time_ns()
if t == 0
time_start = time_at_loop
end
# time is measured at beginning of loop for consistency throughout all algorithms
tot_time = (time_at_loop - time_start) / 1e9
if timeout < Inf
if tot_time ≥ timeout
if verbose
@info "Time limit reached"
end
break
end
end
#####################
t += 1
# compute current iterate from active set
x = get_active_set_iterate(active_set)
if isnothing(momentum)
grad!(gradient, x)
else
grad!(gtemp, x)
@memory_mode(memory_mode, gradient = (momentum * gradient) + (1 - momentum) * gtemp)
end
if away_steps
if lazy
d, vertex, index, gamma_max, phi_value, away_step_taken, fw_step_taken, step_type =
lazy_afw_step(
x,
gradient,
lmo,
active_set,
phi_value,
epsilon,
d;
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
lazy_tolerance=lazy_tolerance,
memory_mode=memory_mode,
)
else
d, vertex, index, gamma_max, phi_value, away_step_taken, fw_step_taken, step_type =
afw_step(x, gradient, lmo, active_set, epsilon, d, memory_mode=memory_mode)
end
else
d, vertex, index, gamma_max, phi_value, away_step_taken, fw_step_taken, step_type =
fw_step(x, gradient, lmo, d, memory_mode=memory_mode)
end
gamma = 0.0
if fw_step_taken || away_step_taken
gamma = perform_line_search(
line_search,
t,
f,
grad!,
gradient,
x,
d,
gamma_max,
linesearch_workspace,
memory_mode,
)
gamma = min(gamma_max, gamma)
step_type = gamma ≈ gamma_max ? ST_DROP : step_type
# cleanup and renormalize every x iterations. Only for the fw steps.
renorm = mod(t, renorm_interval) == 0
if away_step_taken
active_set_update!(active_set, -gamma, vertex, true, index, add_dropped_vertices=use_extra_vertex_storage, vertex_storage=extra_vertex_storage)
else
if add_dropped_vertices && gamma == gamma_max
for vtx in active_set.atoms
if vtx != v
push!(extra_vertex_storage, vtx)
end
end
end
active_set_update!(active_set, gamma, vertex, renorm, index)
end
end
if callback !== nothing
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
vertex,
d,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
if callback(state, active_set) === false
break
end
end
if mod(t, renorm_interval) == 0
active_set_renormalize!(active_set)
x = compute_active_set_iterate!(active_set)
end
if (
(mod(t, print_iter) == 0 && verbose) ||
callback !== nothing ||
!(line_search isa Agnostic || line_search isa Nonconvex || line_search isa FixedStep)
)
primal = f(x)
dual_gap = phi_value
end
end
# recompute everything once more for final verfication / do not record to trajectory though for now!
# this is important as some variants do not recompute f(x) and the dual_gap regularly but only when reporting
# hence the final computation.
# do also cleanup of active_set due to many operations on the same set
x = get_active_set_iterate(active_set)
grad!(gradient, x)
v = compute_extreme_point(lmo, gradient)
primal = f(x)
dual_gap = fast_dot(x, gradient) - fast_dot(v, gradient)
step_type = ST_LAST
tot_time = (time_ns() - time_start) / 1e9
if callback !== nothing
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
callback(state, active_set)
end
active_set_renormalize!(active_set)
active_set_cleanup!(active_set; weight_purge_threshold=weight_purge_threshold, add_dropped_vertices=use_extra_vertex_storage, vertex_storage=extra_vertex_storage)
x = get_active_set_iterate(active_set)
grad!(gradient, x)
if recompute_last_vertex
v = compute_extreme_point(lmo, gradient)
primal = f(x)
dual_gap = fast_dot(x, gradient) - fast_dot(v, gradient)
end
step_type = ST_POSTPROCESS
tot_time = (time_ns() - time_start) / 1e9
if callback !== nothing
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
callback(state, active_set)
end
return (x=x, v=v, primal=primal, dual_gap=dual_gap, traj_data=traj_data, active_set=active_set)
end
function lazy_afw_step(x, gradient, lmo, active_set, phi, epsilon, d; use_extra_vertex_storage=false, extra_vertex_storage=nothing, lazy_tolerance=2.0, memory_mode::MemoryEmphasis=InplaceEmphasis())
_, v, v_loc, _, a_lambda, a, a_loc, _, _ = active_set_argminmax(active_set, gradient)
#Do lazy FW step
grad_dot_lazy_fw_vertex = fast_dot(v, gradient)
grad_dot_x = fast_dot(x, gradient)
grad_dot_a = fast_dot(a, gradient)
if grad_dot_x - grad_dot_lazy_fw_vertex >= grad_dot_a - grad_dot_x &&
grad_dot_x - grad_dot_lazy_fw_vertex >= phi / lazy_tolerance &&
grad_dot_x - grad_dot_lazy_fw_vertex >= epsilon
step_type = ST_LAZY
gamma_max = one(a_lambda)
d = muladd_memory_mode(memory_mode, d, x, v)
vertex = v
away_step_taken = false
fw_step_taken = true
index = v_loc
else
#Do away step, as it promises enough progress.
if grad_dot_a - grad_dot_x > grad_dot_x - grad_dot_lazy_fw_vertex &&
grad_dot_a - grad_dot_x >= phi / lazy_tolerance
step_type = ST_AWAY
gamma_max = a_lambda / (1 - a_lambda)
d = muladd_memory_mode(memory_mode, d, a, x)
vertex = a
away_step_taken = true
fw_step_taken = false
index = a_loc
#Resort to calling the LMO
else
# optionally: try vertex storage
if use_extra_vertex_storage
lazy_threshold = fast_dot(gradient, x) - phi / lazy_tolerance
(found_better_vertex, new_forward_vertex) =
storage_find_argmin_vertex(extra_vertex_storage, gradient, lazy_threshold)
if found_better_vertex
@debug("Found acceptable lazy vertex in storage")
v = new_forward_vertex
step_type = ST_LAZYSTORAGE
else
v = compute_extreme_point(lmo, gradient)
step_type = ST_REGULAR
end
else
v = compute_extreme_point(lmo, gradient)
step_type = ST_REGULAR
end
# Real dual gap promises enough progress.
grad_dot_fw_vertex = fast_dot(v, gradient)
dual_gap = grad_dot_x - grad_dot_fw_vertex
if dual_gap >= phi / lazy_tolerance
gamma_max = one(a_lambda)
d = muladd_memory_mode(memory_mode, d, x, v)
vertex = v
away_step_taken = false
fw_step_taken = true
index = -1
#Lower our expectation for progress.
else
step_type = ST_DUALSTEP
phi = min(dual_gap, phi / 2.0)
gamma_max = zero(a_lambda)
vertex = v
away_step_taken = false
fw_step_taken = false
index = -1
end
end
end
return d, vertex, index, gamma_max, phi, away_step_taken, fw_step_taken, step_type
end
function afw_step(x, gradient, lmo, active_set, epsilon, d; memory_mode::MemoryEmphasis=InplaceEmphasis())
_, _, _, _, a_lambda, a, a_loc = active_set_argminmax(active_set, gradient)
v = compute_extreme_point(lmo, gradient)
grad_dot_x = fast_dot(x, gradient)
away_gap = fast_dot(a, gradient) - grad_dot_x
dual_gap = grad_dot_x - fast_dot(v, gradient)
if dual_gap >= away_gap && dual_gap >= epsilon
step_type = ST_REGULAR
gamma_max = one(a_lambda)
d = muladd_memory_mode(memory_mode, d, x, v)
vertex = v
away_step_taken = false
fw_step_taken = true
index = -1
elseif away_gap >= epsilon
step_type = ST_AWAY
gamma_max = a_lambda / (1 - a_lambda)
d = muladd_memory_mode(memory_mode, d, a, x)
vertex = a
away_step_taken = true
fw_step_taken = false
index = a_loc
else
step_type = ST_AWAY
gamma_max = zero(a_lambda)
vertex = a
away_step_taken = false
fw_step_taken = false
index = a_loc
end
return d, vertex, index, gamma_max, dual_gap, away_step_taken, fw_step_taken, step_type
end
function fw_step(x, gradient, lmo, d; memory_mode::MemoryEmphasis = InplaceEmphasis())
v = compute_extreme_point(lmo, gradient)
d = muladd_memory_mode(memory_mode, d, x, v)
return (
d,
v,
nothing,
1,
fast_dot(x, gradient) - fast_dot(v, gradient),
false,
true,
ST_REGULAR,
)
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 13037 | # Alternating Linear Minimization with a start direction instead of an initial point x0
# The is for the case of unknown feasible points.
function alternating_linear_minimization(
bc_method,
f,
grad!,
lmos::NTuple{N,LinearMinimizationOracle},
start_direction::T;
lambda=1.0,
verbose=false,
callback=nothing,
print_iter=1e3,
kwargs...,
) where {N,T<:AbstractArray}
x0 = compute_extreme_point(ProductLMO(lmos), tuple(fill(start_direction, N)...))
return alternating_linear_minimization(
bc_method,
f,
grad!,
lmos,
x0;
lambda=lambda,
verbose=verbose,
callback=callback,
print_iter=print_iter,
kwargs...,
)
end
"""
alternating_linear_minimization(bc_algo::BlockCoordinateMethod, f, grad!, lmos::NTuple{N,LinearMinimizationOracle}, x0; ...) where {N}
Alternating Linear Minimization minimizes the objective `f` over the intersections of the feasible domains specified by `lmos`.
The tuple `x0` defines the initial points for each domain.
Returns a tuple `(x, v, primal, dual_gap, infeas, traj_data)` with:
- `x` cartesian product of final iterates
- `v` cartesian product of last vertices of the LMOs
- `primal` primal value `f(x)`
- `dual_gap` final Frank-Wolfe gap
- `infeas` sum of squared, pairwise distances between iterates
- `traj_data` vector of trajectory information.
"""
function alternating_linear_minimization(
bc_method,
f,
grad!,
lmos::NTuple{N,LinearMinimizationOracle},
x0::Tuple{Vararg{Any,N}};
lambda::Union{Float64, Function}=1.0,
verbose=false,
trajectory=false,
callback=nothing,
max_iteration=10000,
print_iter = max_iteration / 10,
memory_mode=InplaceEmphasis(),
line_search::LS=Adaptive(),
epsilon=1e-7,
kwargs...,
) where {N, LS<:Union{LineSearchMethod,NTuple{N,LineSearchMethod}}}
x0_bc = BlockVector([x0[i] for i in 1:N], [size(x0[i]) for i in 1:N], sum(length, x0))
gradf = similar(x0_bc)
prod_lmo = ProductLMO(lmos)
λ0 = lambda isa Function ? 1.0 : lambda
function build_gradient()
λ = Ref(λ0)
return (storage, x) -> begin
for i in 1:N
grad!(gradf.blocks[i], x.blocks[i])
end
t = [2.0 * (N * b - sum(x.blocks)) for b in x.blocks]
return storage.blocks = λ[] * gradf.blocks + t
end
end
function dist2(x::BlockVector)
s = 0
for i=1:N
for j=1:i-1
diff = x.blocks[i] - x.blocks[j]
s += fast_dot(diff, diff)
end
end
return s
end
function build_objective()
λ = Ref(λ0)
return x -> begin
return λ[] * sum(f(x.blocks[i]) for i in 1:N) + dist2(x)
end
end
f_bc = build_objective()
grad_bc! = build_gradient()
dist2_data = []
if trajectory
function make_dist2_callback(callback)
return function callback_dist2(state, args...)
push!(dist2_data, dist2(state.x))
if callback === nothing
return true
end
return callback(state, args...)
end
end
callback = make_dist2_callback(callback)
end
if verbose
println("\nAlternating Linear Minimization (ALM).")
println("FW METHOD: $bc_method")
num_type = eltype(x0[1])
grad_type = eltype(gradf.blocks[1])
line_search_type = line_search isa Tuple ? [typeof(a) for a in line_search] : typeof(line_search)
println("MEMORY_MODE: $memory_mode STEPSIZE: $line_search_type EPSILON: $epsilon MAXITERATION: $max_iteration")
println("TYPE: $num_type GRADIENTTYPE: $grad_type")
println("LAMBDA: $lambda")
if memory_mode isa InplaceEmphasis
@info("In memory_mode memory iterates are written back into x0!")
end
# header and format string for output of the algorithm
headers = ["Type", "Iteration", "Primal", "Dual", "Dual Gap", "Time", "It/sec", "Dist2"]
format_string = "%6s %13s %14e %14e %14e %14e %14e %14e\n"
function format_state(state, args...)
rep = (
steptype_string[Symbol(state.step_type)],
string(state.t),
Float64(state.primal),
Float64(state.primal - state.dual_gap),
Float64(state.dual_gap),
state.time,
state.t / state.time,
Float64(dist2(state.x)),
)
if bc_method in
[away_frank_wolfe, blended_pairwise_conditional_gradient, pairwise_frank_wolfe]
add_rep = (length(args[1]))
elseif bc_method === blended_conditional_gradient
add_rep = (length(args[1]), args[2])
elseif bc_method === stochastic_frank_wolfe
add_rep = (args[1],)
else
add_rep = ()
end
return (rep..., add_rep...)
end
if bc_method in
[away_frank_wolfe, blended_pairwise_conditional_gradient, pairwise_frank_wolfe]
push!(headers, "#ActiveSet")
format_string = format_string[1:end-1] * " %14i\n"
elseif bc_method === blended_conditional_gradient
append!(headers, ["#ActiveSet", "#non-simplex"])
format_string = format_string[1:end-1] * " %14i %14i\n"
elseif bc_method === stochastic_frank_wolfe
push!(headers, "Batch")
format_string = format_string[1:end-1] * " %6i\n"
end
callback = make_print_callback(callback, print_iter, headers, format_string, format_state)
end
if lambda isa Function
callback = function (state,args...)
state.f.λ[] = lambda(state)
state.grad!.λ[] = state.f.λ[]
if callback === nothing
return true
end
return callback(state, args...)
end
end
x, v, primal, dual_gap, traj_data = bc_method(
f_bc,
grad_bc!,
prod_lmo,
x0_bc;
verbose=false, # Suppress inner verbose output
trajectory=trajectory,
callback=callback,
max_iteration=max_iteration,
print_iter=print_iter,
epsilon=epsilon,
memory_mode=memory_mode,
line_search=line_search,
kwargs...,
)
if trajectory
traj_data = [(t..., dist2_data[i]) for (i, t) in enumerate(traj_data)]
end
return x, v, primal, dual_gap, dist2(x), traj_data
end
function ProjectionFW(y, lmo; max_iter=10000, eps=1e-3)
f(x) = sum(abs2, x - y)
grad!(storage, x) = storage .= 2 * (x - y)
x0 = FrankWolfe.compute_extreme_point(lmo, y)
x_opt, _ = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
epsilon=eps,
max_iteration=max_iter,
trajectory=true,
line_search=FrankWolfe.Adaptive(verbose=false, relaxed_smoothness=false),
)
return x_opt
end
"""
alternating_projections(lmos::NTuple{N,LinearMinimizationOracle}, x0; ...) where {N}
Computes a point in the intersection of feasible domains specified by `lmos`.
Returns a tuple `(x, v, dual_gap, infeas, traj_data)` with:
- `x` cartesian product of final iterates
- `v` cartesian product of last vertices of the LMOs
- `dual_gap` final Frank-Wolfe gap
- `infeas` sum of squared, pairwise distances between iterates
- `traj_data` vector of trajectory information.
"""
function alternating_projections(
lmos::NTuple{N,LinearMinimizationOracle},
x0;
epsilon=1e-7,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
callback=nothing,
traj_data=[],
timeout=Inf,
) where {N}
return alternating_projections(
ProductLMO(lmos),
x0;
epsilon,
max_iteration,
print_iter,
trajectory,
verbose,
memory_mode,
callback,
traj_data,
timeout,
)
end
function alternating_projections(
lmo::ProductLMO{N},
x0;
epsilon=1e-7,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
callback=nothing,
traj_data=[],
timeout=Inf,
) where {N}
# header and format string for output of the algorithm
headers = ["Type", "Iteration", "Dual Gap", "Infeas", "Time", "It/sec"]
format_string = "%6s %13s %14e %14e %14e %14e\n"
function format_state(state, infeas)
rep = (
steptype_string[Symbol(state.step_type)],
string(state.t),
Float64(state.dual_gap),
Float64(infeas),
state.time,
state.t / state.time,
)
return rep
end
t = 0
dual_gap = Inf
x = fill(x0, N)
v = similar(x)
step_type = ST_REGULAR
gradient = similar(x)
ndim = ndims(x)
infeasibility(x) = sum(
fast_dot(
selectdim(x, ndim, i) - selectdim(x, ndim, j),
selectdim(x, ndim, i) - selectdim(x, ndim, j),
) for i in 1:N for j in 1:i-1
)
partial_infeasibility(x) =
sum(fast_dot(x[mod(i - 2, N)+1] - x[i], x[mod(i - 2, N)+1] - x[i]) for i in 1:N)
function grad!(storage, x)
@. storage = [2 * (x[i] - x[mod(i - 2, N)+1]) for i in 1:N]
end
projection_step(x, i, t) = ProjectionFW(x, lmo.lmos[i]; eps=1 / (t^2 + 1))
if trajectory
callback = make_trajectory_callback(callback, traj_data)
end
if verbose
callback = make_print_callback(callback, print_iter, headers, format_string, format_state)
end
time_start = time_ns()
if verbose
println("\nAlternating Projections.")
num_type = eltype(x0[1])
println(
"MEMORY_MODE: $memory_mode EPSILON: $epsilon MAXITERATION: $max_iteration TYPE: $num_type",
)
grad_type = typeof(gradient)
println("GRADIENTTYPE: $grad_type")
if memory_mode isa InplaceEmphasis
@info("In memory_mode memory iterates are written back into x0!")
end
end
first_iter = true
while t <= max_iteration && dual_gap >= max(epsilon, eps(float(typeof(dual_gap))))
#####################
# managing time and Ctrl-C
#####################
time_at_loop = time_ns()
if t == 0
time_start = time_at_loop
end
# time is measured at beginning of loop for consistency throughout all algorithms
tot_time = (time_at_loop - time_start) / 1e9
if timeout < Inf
if tot_time ≥ timeout
if verbose
@info "Time limit reached"
end
break
end
end
# Projection step:
for i in 1:N
# project the previous iterate on the i-th feasible region
x[i] = projection_step(x[mod(i - 2, N)+1], i, t)
end
# Update gradients
grad!(gradient, x)
# Update dual gaps
v = compute_extreme_point.(lmo.lmos, gradient)
dual_gap = fast_dot(x - v, gradient)
# go easy on the memory - only compute if really needed
if ((mod(t, print_iter) == 0 && verbose) || callback !== nothing)
infeas = infeasibility(x)
end
first_iter = false
t = t + 1
if callback !== nothing
state = CallbackState(
t,
infeas,
infeas - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
nothing,
nothing,
nothing,
lmo,
gradient,
step_type,
)
# @show state
if callback(state, infeas) === false
break
end
end
end
# recompute everything once for final verfication / do not record to trajectory though for now!
# this is important as some variants do not recompute f(x) and the dual_gap regularly but only when reporting
# hence the final computation.
step_type = ST_LAST
infeas = infeasibility(x)
grad!(gradient, x)
v = compute_extreme_point.(lmo.lmos, gradient)
dual_gap = fast_dot(x - v, gradient)
tot_time = (time_ns() - time_start) / 1.0e9
if callback !== nothing
state = CallbackState(
t,
infeas,
infeas - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
nothing,
nothing,
nothing,
lmo,
gradient,
step_type,
)
callback(state, infeas)
end
return x, v, dual_gap, infeas, traj_data
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 36496 |
"""
blended_conditional_gradient(f, grad!, lmo, x0)
Entry point for the Blended Conditional Gradient algorithm.
See Braun, Gábor, et al. "Blended conditonal gradients" ICML 2019.
The method works on an active set like [`FrankWolfe.away_frank_wolfe`](@ref),
performing gradient descent over the convex hull of active vertices,
removing vertices when their weight drops to 0 and adding new vertices
by calling the linear oracle in a lazy fashion.
"""
function blended_conditional_gradient(
f,
grad!,
lmo,
x0;
line_search::LineSearchMethod=Adaptive(),
line_search_inner::LineSearchMethod=Adaptive(),
hessian=nothing,
epsilon=1e-7,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
accelerated=false,
lazy_tolerance=2.0,
gradient=nothing,
callback=nothing,
traj_data=[],
timeout=Inf,
weight_purge_threshold=weight_purge_threshold_default(eltype(x0)),
extra_vertex_storage=nothing,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
linesearch_workspace=nothing,
linesearch_inner_workspace=nothing,
renorm_interval=1000,
lmo_kwargs...,
)
# add the first vertex to active set from initialization
active_set = ActiveSet([(1.0, x0)])
return blended_conditional_gradient(
f,
grad!,
lmo,
active_set,
line_search=line_search,
line_search_inner=line_search_inner,
hessian=hessian,
epsilon=epsilon,
max_iteration=max_iteration,
print_iter=print_iter,
trajectory=trajectory,
verbose=verbose,
memory_mode=memory_mode,
accelerated=accelerated,
lazy_tolerance=lazy_tolerance,
gradient=gradient,
callback=callback,
traj_data=traj_data,
timeout=timeout,
weight_purge_threshold=weight_purge_threshold,
extra_vertex_storage=extra_vertex_storage,
add_dropped_vertices=add_dropped_vertices,
use_extra_vertex_storage=use_extra_vertex_storage,
linesearch_workspace=linesearch_workspace,
linesearch_inner_workspace=linesearch_inner_workspace,
renorm_interval=renorm_interval,
lmo_kwargs=lmo_kwargs,
)
end
function blended_conditional_gradient(
f,
grad!,
lmo,
active_set::AbstractActiveSet{AT,R};
line_search::LineSearchMethod=Adaptive(),
line_search_inner::LineSearchMethod=Adaptive(),
hessian=nothing,
epsilon=1e-7,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
accelerated=false,
lazy_tolerance=2.0,
gradient=nothing,
callback=nothing,
traj_data=[],
timeout=Inf,
weight_purge_threshold=weight_purge_threshold_default(R),
extra_vertex_storage=nothing,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
linesearch_workspace=nothing,
linesearch_inner_workspace=nothing,
renorm_interval=1000,
lmo_kwargs...,
) where {AT,R}
# format string for output of the algorithm
format_string = "%6s %13s %14e %14e %14e %14e %14e %14i %14i\n"
headers = (
"Type",
"Iteration",
"Primal",
"Dual",
"Dual Gap",
"Time",
"It/sec",
"#ActiveSet",
"#non-simplex",
)
function format_state(state, active_set, non_simplex_iter)
rep = (
steptype_string[Symbol(state.step_type)],
string(state.t),
Float64(state.primal),
Float64(state.primal - state.dual_gap),
Float64(state.dual_gap),
state.time,
state.t / state.time,
length(active_set),
non_simplex_iter,
)
return rep
end
t = 0
primal = Inf
dual_gap = Inf
x = active_set.x
if gradient === nothing
gradient = collect(x)
end
d = similar(x)
primal = f(x)
grad!(gradient, x)
# initial gap estimate computation
vmax = compute_extreme_point(lmo, gradient)
phi = (fast_dot(gradient, x) - fast_dot(gradient, vmax)) / 2
dual_gap = phi
if trajectory
callback = make_trajectory_callback(callback, traj_data)
end
if verbose
callback = make_print_callback(callback, print_iter, headers, format_string, format_state)
end
step_type = ST_REGULAR
time_start = time_ns()
v = x
if line_search isa Agnostic || line_search isa Nonconvex
@error("Lazification is not known to converge with open-loop step size strategies.")
end
if verbose
println("\nBlended Conditional Gradients Algorithm.")
NumType = eltype(x)
println(
"MEMORY_MODE: $memory_mode STEPSIZE: $line_search EPSILON: $epsilon MAXITERATION: $max_iteration TYPE: $NumType",
)
grad_type = typeof(gradient)
println("GRADIENTTYPE: $grad_type lazy_tolerance: $lazy_tolerance")
println("LMO: $(typeof(lmo))")
if (use_extra_vertex_storage || add_dropped_vertices) && extra_vertex_storage === nothing
@warn(
"use_extra_vertex_storage and add_dropped_vertices options are only usable with a extra_vertex_storage storage"
)
end
end
# ensure x is a mutable type
if !isa(x, Union{Array,SparseArrays.AbstractSparseArray})
x = copyto!(similar(x), x)
end
non_simplex_iter = 0
force_fw_step = false
if linesearch_workspace === nothing
linesearch_workspace = build_linesearch_workspace(line_search, x, gradient)
end
if linesearch_inner_workspace === nothing
linesearch_inner_workspace = build_linesearch_workspace(line_search_inner, x, gradient)
end
if extra_vertex_storage === nothing
use_extra_vertex_storage = add_dropped_vertices = false
end
# this is never used and only defines gamma in the scope outside of the loop
gamma = NaN
while t <= max_iteration && (phi ≥ epsilon || t == 0) # do at least one iteration for consistency with other algos
#####################
# managing time and Ctrl-C
#####################
time_at_loop = time_ns()
if t == 0
time_start = time_at_loop
end
# time is measured at beginning of loop for consistency throughout all algorithms
tot_time = (time_at_loop - time_start) / 1e9
if timeout < Inf
if tot_time ≥ timeout
if verbose
@info "Time limit reached"
end
break
end
end
#####################
t += 1
# TODO replace with single call interface from function_gradient.jl
#Mininize over the convex hull until strong Wolfe gap is below a given tolerance.
num_simplex_descent_steps = minimize_over_convex_hull!(
f,
grad!,
gradient,
active_set::AbstractActiveSet,
phi,
t,
time_start,
non_simplex_iter,
line_search_inner=line_search_inner,
verbose=verbose,
print_iter=print_iter,
hessian=hessian,
accelerated=accelerated,
max_iteration=max_iteration,
callback=callback,
timeout=timeout,
format_string=format_string,
linesearch_inner_workspace=linesearch_inner_workspace,
memory_mode=memory_mode,
renorm_interval=renorm_interval,
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
)
t += num_simplex_descent_steps
#Take a FW step.
x = get_active_set_iterate(active_set)
primal = f(x)
grad!(gradient, x)
# compute new atom
(v, value) = lp_separation_oracle(
lmo,
active_set,
gradient,
phi,
lazy_tolerance;
inplace_loop=(memory_mode isa InplaceEmphasis),
force_fw_step=force_fw_step,
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
phi=phi,
lmo_kwargs...,
)
force_fw_step = false
xval = fast_dot(x, gradient)
if value > xval - phi / lazy_tolerance
step_type = ST_DUALSTEP
# setting gap estimate as ∇f(x) (x - v_FW) / 2
phi = (xval - value) / 2
if callback !== nothing
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
if callback(state, active_set, non_simplex_iter) === false
break
end
end
else
step_type = ST_REGULAR
d = muladd_memory_mode(memory_mode, d, x, v)
gamma = perform_line_search(
line_search,
t,
f,
grad!,
gradient,
x,
d,
1.0,
linesearch_workspace,
memory_mode,
)
if callback !== nothing
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
v,
d,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
if callback(state, active_set, non_simplex_iter) === false
break
end
end
if gamma == 1.0
if add_dropped_vertices
for vtx in active_set.atoms
if vtx != v
push!(extra_vertex_storage, vtx)
end
end
end
active_set_initialize!(active_set, v)
else
active_set_update!(active_set, gamma, v, add_dropped_vertices=use_extra_vertex_storage, vertex_storage=extra_vertex_storage)
end
end
x = get_active_set_iterate(active_set)
dual_gap = phi
non_simplex_iter += 1
end
## post-processing and cleanup after loop
# report last iteration
if callback !== nothing
x = get_active_set_iterate(active_set)
grad!(gradient, x)
v = compute_extreme_point(lmo, gradient)
primal = f(x)
dual_gap = fast_dot(x, gradient) - fast_dot(v, gradient)
tot_time = (time_ns() - time_start) / 1e9
step_type = ST_LAST
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
callback(state, active_set, non_simplex_iter)
end
# cleanup the active set, renormalize, and recompute values
active_set_cleanup!(active_set, weight_purge_threshold=weight_purge_threshold, add_dropped_vertices=use_extra_vertex_storage, vertex_storage=extra_vertex_storage)
active_set_renormalize!(active_set)
x = get_active_set_iterate(active_set)
grad!(gradient, x)
v = compute_extreme_point(lmo, gradient)
primal = f(x)
#dual_gap = 2phi
dual_gap = fast_dot(x, gradient) - fast_dot(v, gradient)
# report post-processed iteration
if callback !== nothing
step_type = ST_POSTPROCESS
tot_time = (time_ns() - time_start) / 1e9
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
callback(state, active_set, non_simplex_iter)
end
return (x=x, v=v, primal=primal, dual_gap=dual_gap, traj_data=traj_data, active_set=active_set)
end
"""
minimize_over_convex_hull!
Given a function f with gradient grad! and an active set
active_set this function will minimize the function over
the convex hull of the active set until the strong-wolfe
gap over the active set is below tolerance.
It will either directly minimize over the convex hull using
simplex gradient descent, or it will transform the problem
to barycentric coordinates and minimize over the unit
probability simplex using gradient descent or Nesterov's
accelerated gradient descent.
"""
function minimize_over_convex_hull!(
f,
grad!,
gradient,
active_set::AbstractActiveSet{AT,R},
tolerance,
t,
time_start,
non_simplex_iter;
line_search_inner=Adaptive(),
verbose=true,
print_iter=1000,
hessian=nothing,
weight_purge_threshold=weight_purge_threshold_default(R),
accelerated=false,
max_iteration,
callback,
timeout=Inf,
format_string=nothing,
linesearch_inner_workspace=nothing,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
renorm_interval=1000,
use_extra_vertex_storage=false,
extra_vertex_storage=nothing,
) where {AT,R}
#No hessian is known, use simplex gradient descent.
if hessian === nothing
number_of_steps = simplex_gradient_descent_over_convex_hull(
f,
grad!,
gradient,
active_set::AbstractActiveSet,
tolerance,
t,
time_start,
non_simplex_iter,
memory_mode,
line_search_inner=line_search_inner,
verbose=verbose,
print_iter=print_iter,
weight_purge_threshold=weight_purge_threshold,
max_iteration=max_iteration,
callback=callback,
timeout=timeout,
format_string=format_string,
linesearch_inner_workspace=linesearch_inner_workspace,
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
)
else
x = get_active_set_iterate(active_set)
grad!(gradient, x)
#Rewrite as problem over the simplex
M, b = build_reduced_problem(
active_set.atoms,
hessian,
active_set.weights,
gradient,
tolerance,
)
#Early exit if we have detected that the strong-Wolfe gap is below the desired tolerance while building the reduced problem.
if isnothing(M)
return 0
end
T = eltype(M)
S = schur(M)
L_reduced = maximum(S.values)::T
reduced_f(y) =
f(x) - fast_dot(gradient, x) +
0.5 * dot(x, hessian, x) +
fast_dot(b, y) +
0.5 * dot(y, M, y)
function reduced_grad!(storage, x)
return storage .= b + M * x
end
#Solve using Nesterov's AGD
if accelerated
mu_reduced = minimum(S.values)::T
if L_reduced / mu_reduced > 1.0
new_weights, number_of_steps =
accelerated_simplex_gradient_descent_over_probability_simplex(
active_set.weights,
reduced_f,
reduced_grad!,
tolerance,
t,
time_start,
active_set,
verbose=verbose,
L=L_reduced,
mu=mu_reduced,
max_iteration=max_iteration,
callback=callback,
timeout=timeout,
memory_mode=memory_mode,
non_simplex_iter=non_simplex_iter,
)
@. active_set.weights = new_weights
end
end
if !accelerated || L_reduced / mu_reduced == 1.0
#Solve using gradient descent.
new_weights, number_of_steps = simplex_gradient_descent_over_probability_simplex(
active_set.weights,
reduced_f,
reduced_grad!,
tolerance,
t,
time_start,
non_simplex_iter,
active_set,
verbose=verbose,
print_iter=print_iter,
L=L_reduced,
max_iteration=max_iteration,
callback=callback,
timeout=timeout,
)
@. active_set.weights = new_weights
end
end
active_set_cleanup!(active_set, weight_purge_threshold=weight_purge_threshold, add_dropped_vertices=use_extra_vertex_storage, vertex_storage=extra_vertex_storage)
# if we reached a renorm interval
if (t + number_of_steps) ÷ renorm_interval > t ÷ renorm_interval
active_set_renormalize!(active_set)
compute_active_set_iterate!(active_set)
end
return number_of_steps
end
"""
build_reduced_problem(atoms::AbstractVector{<:AbstractVector}, hessian, weights, gradient, tolerance)
Given an active set formed by vectors , a (constant)
Hessian and a gradient constructs a quadratic problem
over the unit probability simplex that is equivalent to
minimizing the original function over the convex hull of the
active set. If λ are the barycentric coordinates of dimension
equal to the cardinality of the active set, the objective
function is:
f(λ) = reduced_linear^T λ + 0.5 * λ^T reduced_hessian λ
In the case where we find that the current iterate has a strong-Wolfe
gap over the convex hull of the active set that is below the tolerance
we return nothing (as there is nothing to do).
"""
function build_reduced_problem(
atoms::AbstractVector{<:ScaledHotVector},
hessian,
weights,
gradient,
tolerance,
)
n = length(atoms[1])
k = length(atoms)
reduced_linear = [fast_dot(gradient, a) for a in atoms]
if strong_frankwolfe_gap(reduced_linear) <= tolerance
return nothing, nothing
end
aux_matrix = zeros(eltype(atoms[1].active_val), n, k)
#Compute the intermediate matrix.
for i in 1:k
aux_matrix[:, i] .= atoms[i].active_val * hessian[atoms[i].val_idx, :]
end
#Compute the final matrix.
reduced_hessian = zeros(eltype(atoms[1].active_val), k, k)
for i in 1:k
reduced_hessian[:, i] .= atoms[i].active_val * aux_matrix[atoms[i].val_idx, :]
end
reduced_linear .-= reduced_hessian * weights
return reduced_hessian, reduced_linear
end
function build_reduced_problem(
atoms::AbstractVector{<:SparseArrays.AbstractSparseArray},
hessian,
weights,
gradient,
tolerance,
)
n = length(atoms[1])
k = length(atoms)
reduced_linear = [fast_dot(gradient, a) for a in atoms]
if strong_frankwolfe_gap(reduced_linear) <= tolerance
return nothing, nothing
end
#Construct the matrix of vertices.
vertex_matrix = spzeros(n, k)
for i in 1:k
vertex_matrix[:, i] .= atoms[i]
end
reduced_hessian = transpose(vertex_matrix) * hessian * vertex_matrix
reduced_linear .-= reduced_hessian * weights
return reduced_hessian, reduced_linear
end
function build_reduced_problem(
atoms::AbstractVector{<:AbstractVector},
hessian,
weights,
gradient,
tolerance,
)
n = length(atoms[1])
k = length(atoms)
reduced_linear = [fast_dot(gradient, a) for a in atoms]
if strong_frankwolfe_gap(reduced_linear) <= tolerance
return nothing, nothing
end
#Construct the matrix of vertices.
vertex_matrix = zeros(n, k)
for i in 1:k
vertex_matrix[:, i] .= atoms[i]
end
reduced_hessian = transpose(vertex_matrix) * hessian * vertex_matrix
reduced_linear .-= reduced_hessian * weights
return reduced_hessian, reduced_linear
end
"""
Checks the strong Frank-Wolfe gap for the reduced problem.
"""
function strong_frankwolfe_gap(gradient)
val_min = Inf
val_max = -Inf
for i in 1:length(gradient)
temp_val = gradient[i]
if temp_val < val_min
val_min = temp_val
end
if temp_val > val_max
val_max = temp_val
end
end
return val_max - val_min
end
"""
accelerated_simplex_gradient_descent_over_probability_simplex
Minimizes an objective function over the unit probability simplex
until the Strong-Wolfe gap is below tolerance using Nesterov's
accelerated gradient descent.
"""
function accelerated_simplex_gradient_descent_over_probability_simplex(
initial_point,
reduced_f,
reduced_grad!,
tolerance,
t,
time_start,
active_set::AbstractActiveSet;
verbose=false,
L=1.0,
mu=1.0,
max_iteration,
callback,
timeout=Inf,
memory_mode::MemoryEmphasis,
non_simplex_iter=0,
)
number_of_steps = 0
x = deepcopy(initial_point)
x_old = deepcopy(initial_point)
y = deepcopy(initial_point)
gradient_x = similar(x)
gradient_y = similar(x)
reduced_grad!(gradient_x, x)
reduced_grad!(gradient_y, x)
strong_wolfe_gap = strong_frankwolfe_gap_probability_simplex(gradient_x, x)
q = mu / L
# If the problem is close to convex, simply use the accelerated algorithm for convex objective functions.
if mu < 1.0e-3
alpha = 0.0
alpha_old = 0.0
else
gamma = (1 - sqrt(q)) / (1 + sqrt(q))
end
while strong_wolfe_gap > tolerance && t + number_of_steps <= max_iteration
@. x_old = x
reduced_grad!(gradient_y, y)
x = projection_simplex_sort(y .- gradient_y / L)
if mu < 1.0e-3
alpha_old = alpha
alpha = 0.5 * (1 + sqrt(1 + 4 * alpha^2))
gamma = (alpha_old - 1.0) / alpha
end
diff = similar(x)
diff = muladd_memory_mode(memory_mode, diff, x, x_old)
y = muladd_memory_mode(memory_mode, y, x, -gamma, diff)
number_of_steps += 1
primal = reduced_f(x)
reduced_grad!(gradient_x, x)
strong_wolfe_gap = strong_frankwolfe_gap_probability_simplex(gradient_x, x)
step_type = ST_SIMPLEXDESCENT
if callback !== nothing
state = CallbackState(
t + number_of_steps,
primal,
primal - tolerance,
tolerance,
(time_ns() - time_start) / 1e9,
x,
y,
nothing,
gamma,
reduced_f,
reduced_grad!,
nothing,
gradient_x,
step_type,
)
if callback(state, active_set, non_simplex_iter) === false
break
end
end
if timeout < Inf
tot_time = (time_ns() - time_start) / 1e9
if tot_time ≥ timeout
if verbose
@info "Time limit reached"
end
break
end
end
end
return x, number_of_steps
end
"""
simplex_gradient_descent_over_probability_simplex
Minimizes an objective function over the unit probability simplex
until the Strong-Wolfe gap is below tolerance using gradient descent.
"""
function simplex_gradient_descent_over_probability_simplex(
initial_point,
reduced_f,
reduced_grad!,
tolerance,
t,
time_start,
non_simplex_iter,
active_set::AbstractActiveSet;
verbose=verbose,
print_iter=print_iter,
L=1.0,
max_iteration,
callback,
timeout=Inf,
)
number_of_steps = 0
x = deepcopy(initial_point)
gradient = collect(x)
reduced_grad!(gradient, x)
strong_wolfe_gap = strong_frankwolfe_gap_probability_simplex(gradient, x)
while strong_wolfe_gap > tolerance && t + number_of_steps <= max_iteration
x = projection_simplex_sort(x .- gradient / L)
number_of_steps = number_of_steps + 1
primal = reduced_f(x)
reduced_grad!(gradient, x)
strong_wolfe_gap = strong_frankwolfe_gap_probability_simplex(gradient, x)
tot_time = (time_ns() - time_start) / 1e9
step_type = ST_SIMPLEXDESCENT
if callback !== nothing
state = CallbackState(
t + number_of_steps,
primal,
primal - tolerance,
tolerance,
tot_time,
x,
nothing,
nothing,
inv(L),
reduced_f,
reduced_grad!,
nothing,
gradient,
step_type,
)
if callback(state, active_set, non_simplex_iter) === false
break
end
end
if timeout < Inf
tot_time = (time_ns() - time_start) / 1e9
if tot_time ≥ timeout
if verbose
@info "Time limit reached"
end
break
end
end
end
return x, number_of_steps
end
"""
projection_simplex_sort(x; s=1.0)
Perform a projection onto the probability simplex of radius `s`
using a sorting algorithm.
"""
function projection_simplex_sort(x; s=1.0)
n = length(x)
if sum(x) == s && all(>=(0.0), x)
return x
end
v = x .- maximum(x)
u = sort(v, rev=true)
cssv = cumsum(u)
rho = sum(u .* collect(1:1:n) .> (cssv .- s)) - 1
theta = (cssv[rho+1] - s) / (rho + 1)
w = clamp.(v .- theta, 0.0, Inf)
return w
end
"""
strong_frankwolfe_gap_probability_simplex
Compute the Strong-Wolfe gap over the unit probability simplex
given a gradient.
"""
function strong_frankwolfe_gap_probability_simplex(gradient, x)
val_min = Inf
val_max = -Inf
for i in 1:length(gradient)
if x[i] > 0
temp_val = gradient[i]
if temp_val < val_min
val_min = temp_val
end
if temp_val > val_max
val_max = temp_val
end
end
end
return val_max - val_min
end
"""
simplex_gradient_descent_over_convex_hull(f, grad!, gradient, active_set, tolerance, t, time_start, non_simplex_iter)
Minimizes an objective function over the convex hull of the active set
until the Strong-Wolfe gap is below tolerance using simplex gradient
descent.
"""
function simplex_gradient_descent_over_convex_hull(
f,
grad!,
gradient,
active_set::AbstractActiveSet{AT,R},
tolerance,
t,
time_start,
non_simplex_iter,
memory_mode::MemoryEmphasis=InplaceEmphasis();
line_search_inner=Adaptive(),
verbose=true,
print_iter=1000,
hessian=nothing,
weight_purge_threshold=weight_purge_threshold_default(R),
max_iteration,
callback,
timeout=Inf,
format_string=nothing,
linesearch_inner_workspace=build_linesearch_workspace(
line_search_inner,
active_set.x,
gradient,
),
use_extra_vertex_storage=false,
extra_vertex_storage=nothing,
) where {AT,R}
number_of_steps = 0
x = get_active_set_iterate(active_set)
if line_search_inner isa Adaptive
line_search_inner.L_est = Inf
end
while t + number_of_steps ≤ max_iteration
grad!(gradient, x)
#Check if strong Wolfe gap over the convex hull is small enough.
c = [fast_dot(gradient, a) for a in active_set.atoms]
if maximum(c) - minimum(c) <= tolerance || t + number_of_steps ≥ max_iteration
return number_of_steps
end
#Otherwise perform simplex steps until we get there.
k = length(active_set)
csum = sum(c)
c .-= (csum / k)
# name change to stay consistent with the paper, c is actually updated in-place
d = c
# NOTE: sometimes the direction is non-improving
# usual suspects are floating-point errors when multiplying atoms with near-zero weights
# in that case, inverting the sense of d
# Computing the quantity below is the same as computing the <-\nabla f(x), direction>.
# If <-\nabla f(x), direction> >= 0 the direction is a descent direction.
descent_direction_product = fast_dot(d, d) + (csum / k) * sum(d)
@inbounds if descent_direction_product < eps(float(eltype(d))) * length(d)
current_iteration = t + number_of_steps
@warn "Non-improving d ($descent_direction_product) due to numerical instability in iteration $current_iteration. Temporarily upgrading precision to BigFloat for the current iteration."
# extended warning - we can discuss what to integrate
# If higher accuracy is required, consider using DoubleFloats.Double64 (still quite fast) and if that does not help BigFloat (slower) as type for the numbers.
# Alternatively, consider using AFW (with lazy = true) instead."
bdir = big.(gradient)
c = [fast_dot(bdir, a) for a in active_set.atoms]
csum = sum(c)
c .-= csum / k
d = c
descent_direction_product_inner = fast_dot(d, d) + (csum / k) * sum(d)
if descent_direction_product_inner < 0
@warn "d non-improving in large precision, forcing FW"
@warn "dot value: $descent_direction_product_inner"
return number_of_steps
end
end
η = eltype(d)(Inf)
rem_idx = -1
@inbounds for idx in eachindex(d)
if d[idx] > 0
max_val = active_set.weights[idx] / d[idx]
if η > max_val
η = max_val
rem_idx = idx
end
end
end
# TODO at some point avoid materializing both x and y
x = copy(active_set.x)
η = max(0, η)
@. active_set.weights -= η * d
y = copy(compute_active_set_iterate!(active_set))
number_of_steps += 1
gamma = NaN
if f(x) ≥ f(y)
active_set_cleanup!(active_set, weight_purge_threshold=weight_purge_threshold, add_dropped_vertices=use_extra_vertex_storage, vertex_storage=extra_vertex_storage)
else
if line_search_inner isa Adaptive
gamma = perform_line_search(
line_search_inner,
t,
f,
grad!,
gradient,
x,
x - y,
1.0,
linesearch_inner_workspace,
memory_mode,
)
#If the stepsize is that small we probably need to increase the accuracy of
#the types we are using.
if gamma < eps(float(gamma))
# @warn "Upgrading the accuracy of the adaptive line search."
gamma = perform_line_search(
line_search_inner,
t,
f,
grad!,
gradient,
x,
x - y,
1.0,
linesearch_inner_workspace,
memory_mode,
should_upgrade=Val{true}(),
)
end
else
gamma = perform_line_search(
line_search_inner,
t,
f,
grad!,
gradient,
x,
x - y,
1.0,
linesearch_inner_workspace,
memory_mode,
)
end
gamma = min(1, gamma)
# step back from y to x by (1 - γ) η d
# new point is x - γ η d
if gamma == 1.0
active_set_cleanup!(active_set, weight_purge_threshold=weight_purge_threshold, add_dropped_vertices=use_extra_vertex_storage, vertex_storage=extra_vertex_storage)
else
@. active_set.weights += η * (1 - gamma) * d
@. active_set.x = x + gamma * (y - x)
end
end
x = get_active_set_iterate(active_set)
primal = f(x)
dual_gap = tolerance
step_type = ST_SIMPLEXDESCENT
if callback !== nothing
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
(time_ns() - time_start) / 1e9,
x,
y,
nothing,
η * (1 - gamma),
f,
grad!,
nothing,
gradient,
step_type,
)
callback(state, active_set, non_simplex_iter)
end
if timeout < Inf
tot_time = (time_ns() - time_start) / 1e9
if tot_time ≥ timeout
if verbose
@info "Time limit reached"
end
break
end
end
end
return number_of_steps
end
"""
Returns either a tuple `(y, val)` with `y` an atom from the active set satisfying
the progress criterion and `val` the corresponding gap `dot(y, direction)`
or the same tuple with `y` from the LMO.
`inplace_loop` controls whether the iterate type allows in-place writes.
`kwargs` are passed on to the LMO oracle.
"""
function lp_separation_oracle(
lmo::LinearMinimizationOracle,
active_set::AbstractActiveSet,
direction,
min_gap,
lazy_tolerance;
inplace_loop=false,
force_fw_step::Bool=false,
use_extra_vertex_storage=false,
extra_vertex_storage=nothing,
phi=Inf,
kwargs...,
)
# if FW step forced, ignore active set
if !force_fw_step
ybest = active_set.atoms[1]
x = active_set.weights[1] * active_set.atoms[1]
if inplace_loop
if !isa(x, Union{Array,SparseArrays.AbstractSparseArray})
if x isa AbstractVector
x = convert(SparseVector{eltype(x)}, x)
else
x = convert(SparseArrays.SparseMatrixCSC{eltype(x)}, x)
end
end
end
val_best = fast_dot(direction, ybest)
for idx in 2:length(active_set)
y = active_set.atoms[idx]
if inplace_loop
x .+= active_set.weights[idx] * y
else
x += active_set.weights[idx] * y
end
val = fast_dot(direction, y)
if val < val_best
val_best = val
ybest = y
end
end
xval = fast_dot(direction, x)
if xval - val_best ≥ min_gap / lazy_tolerance
return (ybest, val_best)
end
end
# optionally: try vertex storage
if use_extra_vertex_storage && extra_vertex_storage !== nothing
lazy_threshold = fast_dot(direction, x) - phi / lazy_tolerance
(found_better_vertex, new_forward_vertex) =
storage_find_argmin_vertex(extra_vertex_storage, direction, lazy_threshold)
if found_better_vertex
@debug("Found acceptable lazy vertex in storage")
y = new_forward_vertex
else
# otherwise, call the LMO
y = compute_extreme_point(lmo, direction; kwargs...)
end
else
y = compute_extreme_point(lmo, direction; kwargs...)
end
# don't return nothing but y, fast_dot(direction, y) / use y for step outside / and update phi as in LCG (lines 402 - 406)
return (y, fast_dot(direction, y))
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
|
[
"MIT"
] | 0.4.1 | 6efeb9baf0fbec3f91d1cb985b8a7eb4151c446f | code | 17171 |
"""
blended_pairwise_conditional_gradient(f, grad!, lmo, x0; kwargs...)
Implements the BPCG algorithm from [Tsuji, Tanaka, Pokutta (2021)](https://arxiv.org/abs/2110.12650).
The method uses an active set of current vertices.
Unlike away-step, it transfers weight from an away vertex to another vertex of the active set.
"""
function blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
x0;
line_search::LineSearchMethod=Adaptive(),
epsilon=1e-7,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
gradient=nothing,
callback=nothing,
traj_data=[],
timeout=Inf,
renorm_interval=1000,
lazy=false,
linesearch_workspace=nothing,
lazy_tolerance=2.0,
weight_purge_threshold=weight_purge_threshold_default(eltype(x0)),
extra_vertex_storage=nothing,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
recompute_last_vertex=true,
)
# add the first vertex to active set from initialization
active_set = ActiveSet([(1.0, x0)])
return blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
active_set,
line_search=line_search,
epsilon=epsilon,
max_iteration=max_iteration,
print_iter=print_iter,
trajectory=trajectory,
verbose=verbose,
memory_mode=memory_mode,
gradient=gradient,
callback=callback,
traj_data=traj_data,
timeout=timeout,
renorm_interval=renorm_interval,
lazy=lazy,
linesearch_workspace=linesearch_workspace,
lazy_tolerance=lazy_tolerance,
weight_purge_threshold=weight_purge_threshold,
extra_vertex_storage=extra_vertex_storage,
add_dropped_vertices=add_dropped_vertices,
use_extra_vertex_storage=use_extra_vertex_storage,
recompute_last_vertex=recompute_last_vertex,
)
end
"""
blended_pairwise_conditional_gradient(f, grad!, lmo, active_set::AbstractActiveSet; kwargs...)
Warm-starts BPCG with a pre-defined `active_set`.
"""
function blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
active_set::AbstractActiveSet{AT,R};
line_search::LineSearchMethod=Adaptive(),
epsilon=1e-7,
max_iteration=10000,
print_iter=1000,
trajectory=false,
verbose=false,
memory_mode::MemoryEmphasis=InplaceEmphasis(),
gradient=nothing,
callback=nothing,
traj_data=[],
timeout=Inf,
renorm_interval=1000,
lazy=false,
linesearch_workspace=nothing,
lazy_tolerance=2.0,
weight_purge_threshold=weight_purge_threshold_default(R),
extra_vertex_storage=nothing,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
recompute_last_vertex=true,
) where {AT,R}
# format string for output of the algorithm
format_string = "%6s %13s %14e %14e %14e %14e %14e %14i\n"
headers = ("Type", "Iteration", "Primal", "Dual", "Dual Gap", "Time", "It/sec", "#ActiveSet")
function format_state(state, active_set, args...)
rep = (
steptype_string[Symbol(state.step_type)],
string(state.t),
Float64(state.primal),
Float64(state.primal - state.dual_gap),
Float64(state.dual_gap),
state.time,
state.t / state.time,
length(active_set),
)
return rep
end
if trajectory
callback = make_trajectory_callback(callback, traj_data)
end
if verbose
callback = make_print_callback(callback, print_iter, headers, format_string, format_state)
end
t = 0
compute_active_set_iterate!(active_set)
x = get_active_set_iterate(active_set)
primal = convert(eltype(x), Inf)
step_type = ST_REGULAR
time_start = time_ns()
d = similar(x)
if gradient === nothing
gradient = collect(x)
end
if verbose
println("\nBlended Pairwise Conditional Gradient Algorithm.")
NumType = eltype(x)
println(
"MEMORY_MODE: $memory_mode STEPSIZE: $line_search EPSILON: $epsilon MAXITERATION: $max_iteration TYPE: $NumType",
)
grad_type = typeof(gradient)
println("GRADIENTTYPE: $grad_type LAZY: $lazy lazy_tolerance: $lazy_tolerance")
println("LMO: $(typeof(lmo))")
if use_extra_vertex_storage && !lazy
@info("vertex storage only used in lazy mode")
end
if (use_extra_vertex_storage || add_dropped_vertices) && extra_vertex_storage === nothing
@warn(
"use_extra_vertex_storage and add_dropped_vertices options are only usable with a extra_vertex_storage storage"
)
end
end
grad!(gradient, x)
v = compute_extreme_point(lmo, gradient)
# if !lazy, phi is maintained as the global dual gap
phi = max(0, fast_dot(x, gradient) - fast_dot(v, gradient))
local_gap = zero(phi)
gamma = one(phi)
if linesearch_workspace === nothing
linesearch_workspace = build_linesearch_workspace(line_search, x, gradient)
end
if extra_vertex_storage === nothing
use_extra_vertex_storage = add_dropped_vertices = false
end
while t <= max_iteration && phi >= max(epsilon, eps(epsilon))
# managing time limit
time_at_loop = time_ns()
if t == 0
time_start = time_at_loop
end
# time is measured at beginning of loop for consistency throughout all algorithms
tot_time = (time_at_loop - time_start) / 1e9
if timeout < Inf
if tot_time ≥ timeout
if verbose
@info "Time limit reached"
end
break
end
end
#####################
t += 1
# compute current iterate from active set
x = get_active_set_iterate(active_set)
primal = f(x)
if t > 1
grad!(gradient, x)
end
_, v_local, v_local_loc, _, a_lambda, a, a_loc, _, _ =
active_set_argminmax(active_set, gradient)
dot_forward_vertex = fast_dot(gradient, v_local)
dot_away_vertex = fast_dot(gradient, a)
local_gap = dot_away_vertex - dot_forward_vertex
if !lazy
if t > 1
v = compute_extreme_point(lmo, gradient)
dual_gap = fast_dot(gradient, x) - fast_dot(gradient, v)
phi = dual_gap
end
end
# minor modification from original paper for improved sparsity
# (proof follows with minor modification when estimating the step)
if local_gap ≥ phi / lazy_tolerance
d = muladd_memory_mode(memory_mode, d, a, v_local)
vertex_taken = v_local
gamma_max = a_lambda
gamma = perform_line_search(
line_search,
t,
f,
grad!,
gradient,
x,
d,
gamma_max,
linesearch_workspace,
memory_mode,
)
gamma = min(gamma_max, gamma)
step_type = gamma ≈ gamma_max ? ST_DROP : ST_PAIRWISE
if callback !== nothing
state = CallbackState(
t,
primal,
primal - phi,
phi,
tot_time,
x,
vertex_taken,
d,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
if callback(state, active_set, a) === false
break
end
end
# reached maximum of lambda -> dropping away vertex
if gamma ≈ gamma_max
active_set.weights[v_local_loc] += gamma
deleteat!(active_set, a_loc)
if add_dropped_vertices
push!(extra_vertex_storage, a)
end
else # transfer weight from away to local FW
active_set.weights[a_loc] -= gamma
active_set.weights[v_local_loc] += gamma
@assert active_set_validate(active_set)
end
active_set_update_iterate_pairwise!(active_set.x, gamma, v_local, a)
else # add to active set
if lazy # otherwise, v computed above already
# optionally try to use the storage
if use_extra_vertex_storage
lazy_threshold = fast_dot(gradient, x) - phi / lazy_tolerance
(found_better_vertex, new_forward_vertex) =
storage_find_argmin_vertex(extra_vertex_storage, gradient, lazy_threshold)
if found_better_vertex
if verbose
@debug("Found acceptable lazy vertex in storage")
end
v = new_forward_vertex
step_type = ST_LAZYSTORAGE
else
v = compute_extreme_point(lmo, gradient)
step_type = ST_REGULAR
end
else
# for t == 1, v is already computed before first iteration
if t > 1
v = compute_extreme_point(lmo, gradient)
end
step_type = ST_REGULAR
end
else # Set the correct flag step.
step_type = ST_REGULAR
end
vertex_taken = v
dual_gap = fast_dot(gradient, x) - fast_dot(gradient, v)
# if we are about to exit, compute dual_gap with the cleaned-up x
if dual_gap ≤ epsilon
active_set_renormalize!(active_set)
active_set_cleanup!(active_set; weight_purge_threshold=weight_purge_threshold)
compute_active_set_iterate!(active_set)
x = get_active_set_iterate(active_set)
grad!(gradient, x)
dual_gap = fast_dot(gradient, x) - fast_dot(gradient, v)
end
# Note: In the following, we differentiate between lazy and non-lazy updates.
# The reason is that the non-lazy version does not use phi but the lazy one heavily depends on it.
# It is important that the phi is only updated after dropping
# below phi / lazy_tolerance, as otherwise we simply have a "lagging" dual_gap estimate that just slows down convergence.
# The logic is as follows:
# - for non-lazy: we accept everything and there are no dual steps
# - for lazy: we also accept slightly weaker vertices, those satisfying phi / lazy_tolerance
# this should simplify the criterion.
# DO NOT CHANGE without good reason and talk to Sebastian first for the logic behind this.
if !lazy || dual_gap ≥ phi / lazy_tolerance
d = muladd_memory_mode(memory_mode, d, x, v)
gamma = perform_line_search(
line_search,
t,
f,
grad!,
gradient,
x,
d,
one(eltype(x)),
linesearch_workspace,
memory_mode,
)
if callback !== nothing
state = CallbackState(
t,
primal,
primal - phi,
phi,
tot_time,
x,
vertex_taken,
d,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
if callback(state, active_set) === false
break
end
end
# dropping active set and restarting from singleton
if gamma ≈ 1.0
if add_dropped_vertices
for vtx in active_set.atoms
if vtx != v
push!(extra_vertex_storage, vtx)
end
end
end
active_set_initialize!(active_set, v)
else
renorm = mod(t, renorm_interval) == 0
active_set_update!(active_set, gamma, v, renorm, nothing)
end
else # dual step
# set to computed dual_gap for consistency between the lazy and non-lazy run.
# that is ok as we scale with the K = 2.0 default anyways
# we only update the dual gap if the step was regular (not lazy from discarded set)
if step_type != ST_LAZYSTORAGE
phi = dual_gap
@debug begin
@assert step_type == ST_REGULAR
v2 = compute_extreme_point(lmo, gradient)
g = dot(gradient, x - v2)
if abs(g - dual_gap) > 100 * sqrt(eps())
error("dual gap estimation error $g $dual_gap")
end
end
else
@info "useless step"
end
step_type = ST_DUALSTEP
if callback !== nothing
state = CallbackState(
t,
primal,
primal - phi,
phi,
tot_time,
x,
vertex_taken,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
if callback(state, active_set) === false
break
end
end
end
end
if mod(t, renorm_interval) == 0
active_set_renormalize!(active_set)
x = compute_active_set_iterate!(active_set)
end
if (
((mod(t, print_iter) == 0 || step_type == ST_DUALSTEP) == 0 && verbose) ||
callback !== nothing ||
!(line_search isa Agnostic || line_search isa Nonconvex || line_search isa FixedStep)
)
primal = f(x)
end
end
# recompute everything once more for final verfication / do not record to trajectory though for now!
# this is important as some variants do not recompute f(x) and the dual_gap regularly but only when reporting
# hence the final computation.
# do also cleanup of active_set due to many operations on the same set
if verbose
compute_active_set_iterate!(active_set)
x = get_active_set_iterate(active_set)
grad!(gradient, x)
v = compute_extreme_point(lmo, gradient)
primal = f(x)
phi_new = fast_dot(x, gradient) - fast_dot(v, gradient)
phi = phi_new < phi ? phi_new : phi
step_type = ST_LAST
tot_time = (time_ns() - time_start) / 1e9
if callback !== nothing
state = CallbackState(
t,
primal,
primal - phi,
phi,
tot_time,
x,
v,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
callback(state, active_set)
end
end
active_set_renormalize!(active_set)
active_set_cleanup!(active_set; weight_purge_threshold=weight_purge_threshold)
compute_active_set_iterate!(active_set)
x = get_active_set_iterate(active_set)
grad!(gradient, x)
# otherwise values are maintained to last iteration
if recompute_last_vertex
v = compute_extreme_point(lmo, gradient)
primal = f(x)
dual_gap = fast_dot(x, gradient) - fast_dot(v, gradient)
end
step_type = ST_POSTPROCESS
tot_time = (time_ns() - time_start) / 1e9
if callback !== nothing
state = CallbackState(
t,
primal,
primal - dual_gap,
dual_gap,
tot_time,
x,
v,
nothing,
gamma,
f,
grad!,
lmo,
gradient,
step_type,
)
callback(state, active_set)
end
return (x=x, v=v, primal=primal, dual_gap=dual_gap, traj_data=traj_data, active_set=active_set)
end
| FrankWolfe | https://github.com/ZIB-IOL/FrankWolfe.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.