licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 204 | function pr_feedback(history)
return history[end].a == 1
end
sim = simulate(mdl1; feedback=pr_feedback, init_θ = (α = 1.0, β=100.0, Values = reshape([1.0, 0.0], 2, 1)))
@test mean(sim.data.r) == 1.0 | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | docs | 3223 | # AnimalBehavior.jl
[](https://github.com/sqwayer/AnimalBehavior.jl/actions)
[](http://codecov.io/github/sqwayer/AnimalBehavior.jl?branch=main)
[](https://sqwayer.github.io/AnimalBehavior.jl/stable)
[](https://sqwayer.github.io/AnimalBehavior.jl/dev)
Generative models of animal behavior rely on the same global structure :
- They are defined by a set of ***latent variables*** (either fixed parameters or evolving variables)
- Those latent variables evolve as a function of external observations by an ***evolution function***
- Actions are generated by sampling from distributions defined by an ***observation function*** of the latent variables
AnimalBehavior.jl takes advantage of this common structure to wrap some functionnalities of the [Turing langage for dynamic probabilistic programming](https://github.com/TuringLang), in order to simulate and fit behavioral models with a minimal set of specifications from the user.
## Create a model
First, you need to create a [DynamicPPL model](https://github.com/TuringLang) using the ```@model``` macro, that returns all the latent variables of your model as a ```NamedTuple``` :
```julia
@model Qlearning(na, ns) = begin
α ~ Beta()
logβ ~ Normal(1,1)
return (α=α, β=exp(logβ), Values = fill(1/na,na,ns))
end
MyModel = Qlearning(2,1)
```
Latent variables can be sampled from a prior distribution, and/or transformed by any arbitrary function
Then you have to define an evolution and an observation functions with the macros ```@evolution```and ```@observation```respectively with the following syntax :
```julia
@evolution MyModel begin
Values[a,s] += α * (r - Values[a,s]) # or : delta_rule!(s, a, r, Values, α)
end
@observation MyModel begin
Categorical(softmax(β * @views(Values[:,s])))
end
```
The expression in the ```begin``` ```end``` statement can use the **reserved variables names** ```s```, ```a``` and ```r``` for the current state, action and feedback respectively, and/or any latent variable defined earlier.
Moreover, the observation function must return a ```Distribution``` from the [Distributions.jl package](https://github.com/JuliaStats/Distributions.jl).
## Simulate behavior
```julia
# Simulation of a probabilistic reversal task
function pr_feedback(history) # Reverse the correct response every 20 trials
correct = mod(length(history)/20, 2) < 1 ? 1 : 2
return rand() < 0.9 ? history[end].a == correct : history[end].a ≠ correct
end
sim = simulate(MyModel; feedback=pr_feedback);
```
```simulate``` returns a ```Simulation``` structure with fields ```data``` and ```latent```.
## Inference
The package re-export the ```sample``` function to return a [Chains](https://github.com/TuringLang/MCMCChains.jl) object, with the following syntax :
```julia
sample(model, data, args...; kwargs...)
```
e.g. :
```julia
chn = sample(MyModel, sim.data, NUTS(), 1000)
```
## Model comparison
WIP
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | docs | 65 | # AnimalBehavior.jl documentation
```@docs
simulate(mdl)
```
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2717 | using ProximalOperators
using BenchmarkTools
using LinearAlgebra
using SparseArrays
using Random
const SUITE = BenchmarkGroup()
k = "IndPSD"
SUITE[k] = BenchmarkGroup(["IndPSD"])
for T in [Float64, Complex{Float64}]
for n in [10, 20, 50]
SUITE[k][T, n] = @benchmarkable prox!(Y, f, X) setup=begin
f = IndPSD()
W = if $T <: Real
Symmetric
else
Hermitian
end
Random.seed!(0)
A = randn($T, $n, $n)
X = W((A + A')/2)
Y = similar(X)
end
end
end
k = "IndBox"
SUITE[k] = BenchmarkGroup(["IndBox"])
for T in [Float32, Float64]
SUITE[k][T] = @benchmarkable prox!(y, f, x) setup=begin
low = -ones($T, 10000)
upp = +ones($T, 10000)
f = IndBox(low, upp)
x = [-2*ones($T, 3000); zeros($T, 4000); ones($T, 3000)]
y = similar(x)
end
end
k = "IndNonnegative"
SUITE[k] = BenchmarkGroup(["IndNonnegative"])
for T in [Float32, Float64]
SUITE[k][T] = @benchmarkable prox!(y, f, x) setup=begin
f = IndNonnegative()
x = [-2*ones($T, 3000); zeros($T, 4000); ones($T, 3000)]
y = similar(x)
end
end
k = "NormL2"
SUITE[k] = BenchmarkGroup(["NormL2"])
for T in [Float32, Float64]
SUITE[k][T] = @benchmarkable prox!(y, f, x) setup=begin
f = NormL2()
x = ones($T, 10000)
y = similar(x)
end
end
k = "LeastSquares"
SUITE[k] = BenchmarkGroup(["LeastSquares"])
for (T, s, sparse, iterative) in Iterators.product(
[Float64, ComplexF64],
[(5, 11), (11, 5)],
[false, true],
[false, true],
)
SUITE[k][(T, s, sparse, iterative)] = @benchmarkable prox!(y, f, x) setup=begin
A = if $sparse sparse(ones($T, $s)) else ones($T, $s) end
b = ones($T, $(s[1]))
f = LeastSquares(A, b, iterative=$iterative)
x = ones($T, $(s[2]))
y = similar(x)
end
end
k = "IndExpPrimal"
SUITE[k] = BenchmarkGroup(["IndExpPrimal"])
for T in [Float32, Float64]
SUITE[k][T] = @benchmarkable prox!(y, f, x) setup=begin
f = IndExpPrimal()
x = [0.537667139546100, 1.833885014595086, -2.258846861003648]
y = similar(x)
end
end
k = "IndSimplex"
SUITE[k] = BenchmarkGroup(["IndSimplex"])
for T in [Float32, Float64]
SUITE[k][T] = @benchmarkable prox!(y, f, x) setup=begin
f = IndSimplex()
x = collect($T, -0.5:0.001:2.0)
y = similar(x)
end
end
k = "IndBallL1"
SUITE[k] = BenchmarkGroup(["IndBallL1"])
for T in [Float32, Float64]
SUITE[k][T] = @benchmarkable prox!(y, f, x) setup=begin
f = IndBallL1()
x = collect($T, -2.0:0.001:0.5)
y = similar(x)
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1992 | # This file was adapted from Transducers.jl
# which is available under an MIT license (see LICENSE).
using ArgParse
using PkgBenchmark
using Markdown
function displayresult(result)
md = sprint(export_markdown, result)
md = replace(md, ":x:" => "❌")
md = replace(md, ":white_check_mark:" => "✅")
display(Markdown.parse(md))
end
function printnewsection(name)
println()
println()
println()
printstyled("▃" ^ displaysize(stdout)[2]; color=:blue)
println()
printstyled(name; bold=true)
println()
println()
end
function parse_commandline()
s = ArgParseSettings()
@add_arg_table! s begin
"--target"
help = "the branch/commit/tag to use as target"
default = "HEAD"
"--baseline"
help = "the branch/commit/tag to use as baseline"
default = "master"
"--retune"
help = "force re-tuning (ignore existing tuning data)"
action = :store_true
end
return parse_args(s)
end
function main()
parsed_args = parse_commandline()
mkconfig(; kwargs...) =
BenchmarkConfig(
env = Dict(
"JULIA_NUM_THREADS" => "1",
);
kwargs...
)
target = parsed_args["target"]
group_target = benchmarkpkg(
dirname(@__DIR__),
mkconfig(id = target),
resultfile = joinpath(@__DIR__, "result-$(target).json"),
retune = parsed_args["retune"],
)
baseline = parsed_args["baseline"]
group_baseline = benchmarkpkg(
dirname(@__DIR__),
mkconfig(id = baseline),
resultfile = joinpath(@__DIR__, "result-$(baseline).json"),
)
printnewsection("Target result")
displayresult(group_target)
printnewsection("Baseline result")
displayresult(group_baseline)
judgement = judge(group_target, group_baseline)
printnewsection("Judgement result")
displayresult(judgement)
end
main()
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1875 | # lasso.jl - Lasso solvers based on FISTA and ADMM using ProximalOperators
#
# minimize 0.5*||A*x - b||^2 + lam*||x||_1
#
using LinearAlgebra
using Random
using ProximalOperators
Random.seed!(0)
# Define solvers
function lasso_fista(A, b, lam, x; tol=1e-3, maxit=50000)
x_prev = copy(x)
g = NormL1(lam)
gam = 1.0/norm(A)^2
for it = 1:maxit
# extrapolation step
x_extr = x + (it-2)/(it+1)*(x - x_prev)
# compute least-squares residual
res = A*x_extr - b
# compute gradient (forward) step
y = x_extr - gam*(A'*res)
# store current iterate
x_prev .= x
# compute proximal (backward) step
prox!(x, g, y, gam)
# stopping criterion
if norm(x_extr-x, Inf)/gam <= tol*(1+norm(x, Inf))
break
end
end
return x
end
function lasso_admm(A, b, lam, x; tol=1e-8, maxit=50000)
u = zero(x)
z = copy(x)
f = LeastSquares(A, b)
g = NormL1(lam)
gam = 100.0/norm(A)^2
for it = 1:maxit
# perform f-update step
prox!(x, f, z - u, gam)
# perform g-update step
prox!(z, g, x + u, gam)
# stopping criterion
if norm(x-z, Inf) <= tol*(1+norm(u, Inf))
break
end
# dual update
u .+= x - z
end
return z
end
# Generate random problem
println("Generating random lasso problem")
m, n, k, sig = 500, 2500, 100, 1e-3
A = randn(m, n)
x_true = [randn(k)..., zeros(n-k)...]
b = A*x_true + sig*randn(m)
lam = 0.1*norm(A'*b, Inf)
# Call solvers
println("Calling solvers")
x_fista = lasso_fista(A, b, lam, zeros(n))
println("FISTA")
println(" nnz(x) = $(norm(x_fista, 0))")
println(" obj value = $(0.5*norm(A*x_fista-b)^2 + lam*norm(x_fista, 1))")
x_admm = lasso_admm(A, b, lam, zeros(n))
println("ADMM")
println(" nnz(x) = $(norm(x_admm, 0))")
println(" obj value = $(0.5*norm(A*x_admm-b)^2 + lam*norm(x_admm, 1))")
@test x_fista ≈ x_admm rtol=1e-3
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1711 | # rpca.jl - Robust PCA solvers using ProximalOperators
#
# minimize 0.5*||A - S - L||^2_F + lam1*||S||_1 + lam2*||L||_*
#
# See Parikh, Boyd "Proximal Algorithms", §7.2
using LinearAlgebra
using Random
using SparseArrays
using ProximalOperators
Random.seed!(0)
# Define solvers
function rpca_fista(A, lam1, lam2, S, L; tol=1e-3, maxit=50000)
S_prev = copy(S)
L_prev = copy(L)
g = SeparableSum(NormL1(lam1), NuclearNorm(lam2))
gam = 0.5
for it = 1:maxit
# extrapolation step
S_extr = S + (it-2)/(it+1)*(S - S_prev)
L_extr = L + (it-2)/(it+1)*(L - L_prev)
# compute residual
res = A - S - L
# compute gradient (forward) step
y_S = S_extr + gam*res
y_L = L_extr + gam*res
# store current iterates
S_prev .= S
L_prev .= L
# compute proximal (backward) step
prox!((S, L), g, (y_S, y_L), gam)
# stopping criterion
fix_point_res = max(norm(S_extr-S, Inf), norm(L_extr-L, Inf))/gam
rel_fix_point_res = fix_point_res/(1+max(norm(S,Inf), norm(L,Inf)))
if rel_fix_point_res <= tol
break
end
end
return S, L
end
# Generate random problem
println("Generating random robust PCA problem")
m, n, r, p, sig = 200, 500, 4, 0.05, 1e-3
L1 = randn(m, r)
L2 = randn(r, n)
L = L1*L2
S = sprand(m, n, p)
V = sig*randn(m, n)
A = L + S + V
lam1 = 0.15*norm(A, Inf)
lam2 = 0.15*opnorm(A)
# Call solvers
println("Calling solvers")
S_fista, L_fista = rpca_fista(A, lam1, lam2, zeros(m, n), zeros(m, n))
println("FISTA")
println(" nnz(S) = $(count(!isequal(0), S_fista))")
println(" rank(L) = $(rank(L_fista))")
println(" ||A|| = $(norm(A, Inf))")
println(" ||A-S-L|| = $(norm(A - S_fista - L_fista, Inf))")
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 465 | using Documenter, ProximalOperators, ProximalCore
makedocs(
modules = [ProximalOperators, ProximalCore],
sitename = "ProximalOperators.jl",
pages = [
"Home" => "index.md",
"Functions" => "functions.md",
"Calculus rules" => "calculus.md",
"Prox and gradient" => "operators.md",
"Demos" => "demos.md"
],
checkdocs=:none,
)
deploydocs(
repo = "github.com/JuliaFirstOrder/ProximalOperators.jl.git",
)
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 3202 | # ProximalOperators.jl - library of commonly used functions in optimization, and associated proximal mappings and gradients
module ProximalOperators
using LinearAlgebra
import ProximalCore: prox, prox!, gradient, gradient!
import ProximalCore: is_convex, is_generalized_quadratic
const RealOrComplex{R <: Real} = Union{R, Complex{R}}
const HermOrSym{T, S} = Union{Hermitian{T, S}, Symmetric{T, S}}
const RealBasedArray{R} = AbstractArray{C, N} where {C <: RealOrComplex{R}, N}
const TupleOfArrays{R} = Tuple{RealBasedArray{R}, Vararg{RealBasedArray{R}}}
const ArrayOrTuple{R} = Union{RealBasedArray{R}, TupleOfArrays{R}}
const TransposeOrAdjoint{M} = Union{Transpose{C,M} where C, Adjoint{C,M} where C}
const Maybe{T} = Union{T, Nothing}
export prox, prox!, gradient, gradient!
# Utilities
include("utilities/approx_inequality.jl")
include("utilities/linops.jl")
include("utilities/symmetricpacked.jl")
include("utilities/uniformarrays.jl")
include("utilities/normdiff.jl")
include("utilities/traits.jl")
# Basic functions
include("functions/cubeNormL2.jl")
include("functions/elasticNet.jl")
include("functions/huberLoss.jl")
include("functions/indAffine.jl")
include("functions/indBallL0.jl")
include("functions/indBallL1.jl")
include("functions/indBallL2.jl")
include("functions/indBallRank.jl")
include("functions/indBinary.jl")
include("functions/indBox.jl")
include("functions/indFree.jl")
include("functions/indGraph.jl")
include("functions/indHalfspace.jl")
include("functions/indHyperslab.jl")
include("functions/indNonnegative.jl")
include("functions/indNonpositive.jl")
include("functions/indPoint.jl")
include("functions/indPolyhedral.jl")
include("functions/indPSD.jl")
include("functions/indSimplex.jl")
include("functions/indSOC.jl")
include("functions/indSphereL2.jl")
include("functions/indStiefel.jl")
include("functions/indZero.jl")
include("functions/leastSquares.jl")
include("functions/linear.jl")
include("functions/logBarrier.jl")
include("functions/logisticLoss.jl")
include("functions/normL0.jl")
include("functions/normL1.jl")
include("functions/normL2.jl")
include("functions/normL21.jl")
include("functions/normL1plusL2.jl")
include("functions/nuclearNorm.jl")
include("functions/quadratic.jl")
include("functions/sqrNormL2.jl")
include("functions/sumPositive.jl")
include("functions/sqrHingeLoss.jl")
include("functions/crossEntropy.jl")
include("functions/totalVariation1D.jl")
# Calculus rules
include("calculus/conjugate.jl")
include("calculus/epicompose.jl")
include("calculus/distL2.jl")
include("calculus/moreauEnvelope.jl")
include("calculus/postcompose.jl")
include("calculus/precompose.jl")
include("calculus/precomposeDiagonal.jl")
include("calculus/regularize.jl")
include("calculus/separableSum.jl")
include("calculus/slicedSeparableSum.jl")
include("calculus/sqrDistL2.jl")
include("calculus/tilt.jl")
include("calculus/translate.jl")
include("calculus/sum.jl")
include("calculus/pointwiseMinimum.jl")
# Functions obtained from basic (as special cases or using calculus rules)
include("functions/hingeLoss.jl")
include("functions/indExp.jl")
include("functions/maximum.jl")
include("functions/normLinf.jl")
include("functions/sumLargest.jl")
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1716 | # Conjugate
export Conjugate
"""
Conjugate(f)
Return the convex conjugate (also known as Fenchel conjugate, or Fenchel-Legendre transform) of function `f`, that is
```math
f^*(x) = \\sup_y \\{ \\langle y, x \\rangle - f(y) \\}.
```
"""
struct Conjugate{T}
f::T
function Conjugate{T}(f::T) where T
if is_convex(f) == false
error("`f` must be convex")
end
new(f)
end
end
is_prox_accurate(::Type{Conjugate{T}}) where T = is_prox_accurate(T)
is_convex(::Type{Conjugate{T}}) where T = true
is_cone(::Type{Conjugate{T}}) where T = is_cone(T) && is_convex(T)
is_smooth(::Type{Conjugate{T}}) where T = is_strongly_convex(T)
is_strongly_convex(::Type{Conjugate{T}}) where T = is_smooth(T)
is_generalized_quadratic(::Type{Conjugate{T}}) where T = is_generalized_quadratic(T)
is_set(::Type{Conjugate{T}}) where T = is_convex(T) && is_support(T)
is_positively_homogeneous(::Type{Conjugate{T}}) where T = is_convex(T) && is_set(T)
Conjugate(f::T) where T = Conjugate{T}(f)
# only prox! is provided here, call method would require being able to compute
# an element of the subdifferential of the conjugate
function prox!(y, g::Conjugate, x, gamma)
# Moreau identity
v = prox!(y, g.f, x/gamma, 1/gamma)
if is_set(g)
v = real(eltype(x))(0)
else
v = real(dot(x, y)) - gamma * real(dot(y, y)) - v
end
y .= x .- gamma .* y
return v
end
# naive implementation
function prox_naive(g::Conjugate, x, gamma)
y, v = prox_naive(g.f, x/gamma, 1/gamma)
return x - gamma * y, if is_set(g) real(eltype(x))(0) else real(dot(x, y)) - gamma * real(dot(y, y)) - v end
end
# TODO: hard-code conjugation rules? E.g. precompose/epicompose
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1730 | # Euclidean distance from a set
export DistL2
"""
DistL2(ind_S)
Given `ind_S` the indicator function of a set ``S``, and an optional positive parameter `λ`, return the (weighted) Euclidean distance from ``S``, that is function
```math
g(x) = λ\\mathrm{dist}_S(x) = \\min \\{ λ\\|y - x\\| : y \\in S \\}.
```
"""
struct DistL2{R, T}
ind::T
lambda::R
function DistL2{R, T}(ind::T, lambda::R) where {R, T}
if !is_set(ind)
error("`ind` must be a convex set")
end
if lambda <= 0
error("parameter `λ` must be positive")
else
new(ind, lambda)
end
end
end
is_prox_accurate(::Type{DistL2{R, T}}) where {R, T} = is_prox_accurate(T)
is_convex(::Type{DistL2{R, T}}) where {R, T} = is_convex(T)
DistL2(ind::T, lambda::R=1) where {R, T} = DistL2{R, T}(ind, lambda)
function (f::DistL2)(x)
p, = prox(f.ind, x)
return f.lambda * normdiff(x, p)
end
function prox!(y, f::DistL2, x, gamma)
R = real(eltype(x))
prox!(y, f.ind, x)
d = normdiff(x, y)
gamlam = gamma * f.lambda
if gamlam < d
gamlamd = gamlam/d
y .= (1 - gamlamd) .*x .+ gamlamd .* y
return f.lambda * (d - gamlam)
end
return R(0)
end
function gradient!(y, f::DistL2, x)
prox!(y, f.ind, x) # Use y as temporary storage
dist = normdiff(x, y)
if dist > 0
y .= (f.lambda / dist) .* (x .- y)
else
y .= 0
end
return f.lambda * dist
end
function prox_naive(f::DistL2, x, gamma)
R = real(eltype(x))
p, = prox(f.ind, x)
d = norm(x - p)
gamlam = gamma * f.lambda
if d > gamlam
return x + gamlam/d * (p - x), f.lambda * (d - gamlam)
end
return p, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1644 | # Epi-composition, also known as infimal postcomposition or image function.
# This is the dual operation to precomposition, see Rockafellar and Wets,
# "Variational Analysis", Theorem 11.23.
#
# Given a function f and a linear operator L, their epi-composition is:
#
# g(y) = (Lf)(y) = inf_x { f(x) : Lx = y }.
#
# Plugging g directly in the definition of prox, one has:
#
# prox_{\gamma g}(z) = argmin_y { (Lf)(y) + 1/(2\gamma)||y - z||^2 }
# = argmin_y { inf_x { f(x) : Lx = y } + 1/(2\gamma)||y - z||^2 }
# = L * argmin_x { f(x) + 1/(2\gamma)||Lx - z||^2 }.
#
# When L is such that L'*L = mu*Id, then this just requires prox_{\gamma f}.
#
# In some other cases the prox can be "easily" computed, such as when f is
# quadratic or extended-quadratic.
#
export Epicompose
"""
Epicompose(L, f, [mu])
Return the epi-composition of ``f`` with ``L``, also known as infimal
postcomposition or image function. Given a function f and a linear operator L,
their epi-composition is:
```math
g(y) = (Lf)(y) = inf_x { f(x) : Lx = y }
```
This is the dual operation to precomposition, see Rockafellar and Wets,
"Variational Analysis", Theorem 11.23.
If ``mu > 0`` is specified, then ``L`` is assumed to be such that ``L'*L == mu*I``,
and the proximal operator is computable for any convex ``f``. If ``mu`` is
not specified, then ``f`` must be of ``Quadratic`` type.
"""
abstract type Epicompose end
Epicompose(L, f, mu) = EpicomposeGramDiagonal(L, f, mu)
Epicompose(L, f::Quadratic) = EpicomposeQuadratic(L, f)
# TODO add properties
### INCLUDE CONCRETE TYPES
include("epicomposeGramDiagonal.jl")
include("epicomposeQuadratic.jl")
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 706 | using LinearAlgebra
mutable struct EpicomposeGramDiagonal{M, P, R} <: Epicompose
L::M
f::P
mu::R
function EpicomposeGramDiagonal{M, P, R}(L::M, f::P, mu::R) where {M, P, R}
if mu <= 0
error("mu must be positive")
end
new(L, f, mu)
end
end
EpicomposeGramDiagonal(L, f, mu) = EpicomposeGramDiagonal{typeof(L), typeof(f), typeof(mu)}(L, f, mu)
function prox!(y, g::EpicomposeGramDiagonal, x, gamma)
z = (g.L'*x)/g.mu
p, v = prox(g.f, z, gamma/g.mu)
mul!(y, g.L, p)
return v
end
function prox_naive(g::EpicomposeGramDiagonal, x, gamma)
z = (g.L'*x)/g.mu
p, v = prox_naive(g.f, z, gamma/g.mu)
y = g.L*p
return y, v
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1527 | using LinearAlgebra
mutable struct EpicomposeQuadratic{F, M, P, V, R} <: Epicompose
L::M
Q::P
q::V
gamma::R
fact::F
function EpicomposeQuadratic{F, M, P, V, R}(L::M, Q::P, q::V) where {F, M, P, V, R}
new(L, Q, q, -R(1))
end
end
function EpicomposeQuadratic(L, Q::P, q) where {R, P <: DenseMatrix{R}}
return EpicomposeQuadratic{
LinearAlgebra.Cholesky{R, P}, typeof(L), P, typeof(q), real(eltype(L))
}(L, Q, q)
end
function EpicomposeQuadratic(L, Q::P, q) where {R, P <: SparseMatrixCSC{R}}
return EpicomposeQuadratic{
SuiteSparse.CHOLMOD.Factor{R}, typeof(L), P, typeof(q), real(eltype(L))
}(L, Q, q)
end
# TODO: enable construction from other types of quadratics, e.g. LeastSquares
# TODO: probably some access methods are needed to obtain Hessian and linear term?
EpicomposeQuadratic(L, f::Quadratic) = EpicomposeQuadratic(L, f.Q, f.q)
function get_factorization!(g::EpicomposeQuadratic, gamma)
if !isapprox(gamma, g.gamma)
g.gamma = gamma
g.fact = cholesky(g.Q + (g.L' * g.L)/gamma)
end
return g.fact
end
function prox!(y, g::EpicomposeQuadratic, x, gamma)
fact = get_factorization!(g, gamma)
p = fact\((g.L' * x) / gamma - g.q)
fy = dot(p, g.Q * p)/2 + dot(p, g.q)
mul!(y, g.L, p)
return fy
end
function prox_naive(g::EpicomposeQuadratic, x, gamma)
S = g.Q + (g.L' * g.L) / gamma
p = S\((g.L' * x) / gamma - g.q)
fy = dot(p, g.Q * p)/2 + dot(p, g.q)
y = g.L * p
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1973 | export MoreauEnvelope
"""
MoreauEnvelope(f, γ=1)
Return the Moreau envelope (also known as Moreau-Yosida regularization) of function `f` with parameter `γ` (positive), that is
```math
f^γ(x) = \\min_z \\left\\{ f(z) + \\tfrac{1}{2γ}\\|z-x\\|^2 \\right\\}.
```
If ``f`` is convex, then ``f^γ`` is a smooth, convex, lower approximation to ``f``, having the same minima as the original function.
"""
struct MoreauEnvelope{R, T}
g::T
lambda::R
function MoreauEnvelope{R, T}(g::T, lambda::R) where {R, T}
if lambda <= 0 error("parameter lambda must be positive") end
new(g, lambda)
end
end
MoreauEnvelope(g::T, lambda::R=1) where {R, T} = MoreauEnvelope{R, T}(g, lambda)
is_convex(::Type{MoreauEnvelope{R, T}}) where {R, T} = is_convex(T)
is_smooth(::Type{MoreauEnvelope{R, T}}) where {R, T} = is_convex(T)
is_generalized_quadratic(::Type{MoreauEnvelope{R, T}}) where {R, T} = is_generalized_quadratic(T)
is_strongly_convex(::Type{MoreauEnvelope{R, T}}) where {R, T} = is_strongly_convex(T)
function (f::MoreauEnvelope)(x)
R = eltype(x)
buf = similar(x)
g_prox = prox!(buf, f.g, x, f.lambda)
return g_prox + R(1) / (2 * f.lambda) * norm(buf .- x)^2
end
function gradient!(grad, f::MoreauEnvelope, x)
R = eltype(x)
g_prox = prox!(grad, f.g, x, f.lambda)
grad .= (x .- grad)./f.lambda
fx = g_prox + f.lambda / R(2) * norm(grad)^2
return fx
end
function prox!(u, f::MoreauEnvelope, x, gamma)
# See: Thm. 6.63 in A. Beck, "First-Order Methods in Optimization", MOS-SIAM Series on Optimization, SIAM, 2017
R = eltype(x)
gamma_lambda = gamma + f.lambda
y, fy = prox(f.g, x, gamma_lambda)
alpha = gamma / gamma_lambda
u .= ((1 - alpha) .* x) .+ (alpha .* y)
return fy + R(1) / (2 * f.lambda) * norm(u .- y)^2
end
# NOTE the following is just so we can use certain test helpers
# TODO properly implement the following
prox_naive(f::MoreauEnvelope, x, gamma) = prox(f, x, gamma)
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1485 | export PointwiseMinimum
"""
PointwiseMinimum(f_1, ..., f_k)
Given functions `f_1` to `f_k`, return their pointwise minimum, that is function
```math
g(x) = \\min\\{f_1(x), ..., f_k(x)\\}
```
Note that `g` is a nonconvex function in general.
"""
struct PointwiseMinimum{T}
fs::T
end
PointwiseMinimum(fs...) = PointwiseMinimum{typeof(fs)}(fs)
component_types(::Type{PointwiseMinimum{T}}) where T = fieldtypes(T)
@generated is_set(::Type{T}) where T <: PointwiseMinimum = return all(is_set, component_types(T)) ? :(true) : :(false)
@generated is_cone(::Type{T}) where T <: PointwiseMinimum = return all(is_cone, component_types(T)) ? :(true) : :(false)
function (g::PointwiseMinimum{T})(x) where T
return minimum(f(x) for f in g.fs)
end
function prox!(y, g::PointwiseMinimum, x, gamma)
R = real(eltype(x))
y_temp = similar(y)
minimum_moreau_env = Inf
for f in g.fs
f_y_temp = prox!(y_temp, f, x, gamma)
moreau_env = f_y_temp + R(1)/(2*gamma)*norm(x - y_temp)^2
if moreau_env <= minimum_moreau_env
copyto!(y, y_temp)
minimum_moreau_env = moreau_env
end
end
return g(y)
end
function prox_naive(g::PointwiseMinimum, x, gamma)
R = real(eltype(x))
proxes = [prox_naive(f, x, gamma) for f in g.fs]
moreau_envs = [f_y + R(1)/(R(2)*gamma)*norm(x - y)^2 for (y, f_y) in proxes]
_, i_min = findmin(moreau_envs)
y = proxes[i_min][1]
return y, minimum(f(y) for f in g.fs)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1706 | # Postcompose with scaling and sum
export Postcompose
"""
Postcompose(f, a=1, b=0)
Return the function
```math
g(x) = a\\cdot f(x) + b.
```
"""
struct Postcompose{T, R, S}
f::T
a::R
b::S
function Postcompose{T,R,S}(f::T, a::R, b::S) where {T, R, S}
if a <= 0
error("parameter `a` must be positive")
else
new(f, a, b)
end
end
end
is_prox_accurate(::Type{<:Postcompose{T}}) where T = is_prox_accurate(T)
is_separable(::Type{<:Postcompose{T}}) where T = is_separable(T)
is_convex(::Type{<:Postcompose{T}}) where T = is_convex(T)
is_set(::Type{<:Postcompose{T}}) where T = is_set(T)
is_singleton(::Type{<:Postcompose{T}}) where T = is_singleton(T)
is_cone(::Type{<:Postcompose{T}}) where T = is_cone(T)
is_affine(::Type{<:Postcompose{T}}) where T = is_affine(T)
is_smooth(::Type{<:Postcompose{T}}) where T = is_smooth(T)
is_generalized_quadratic(::Type{<:Postcompose{T}}) where T = is_generalized_quadratic(T)
is_strongly_convex(::Type{<:Postcompose{T}}) where T = is_strongly_convex(T)
Postcompose(f::T, a::R=1, b::S=0) where {T, R <: Real, S <: Real} = Postcompose{T, R, S}(f, a, b)
Postcompose(f::Postcompose{T, R, S}, a::R=1, b::S=0) where {T, R <: Real, S <: Real} = Postcompose{T, R, S}(f.f, a * f.a, b + a * f.b)
function (g::Postcompose)(x)
return g.a * g.f(x) + g.b
end
function gradient!(y, g::Postcompose, x)
v = gradient!(y, g.f, x)
y .*= g.a
return g.a * v + g.b
end
function prox!(y, g::Postcompose, x, gamma)
v = prox!(y, g.f, x, g.a * gamma)
return g.a * v + g.b
end
function prox_naive(g::Postcompose, x, gamma)
y, v = prox_naive(g.f, x, g.a * gamma)
return y, g.a * v + g.b
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2942 | # Precompose with a linear mapping + translation (= affine)
export Precompose
"""
Precompose(f, L, μ, b)
Return the function
```math
g(x) = f(Lx + b)
```
where ``f`` is a convex function and ``L`` is a linear mapping: this must
satisfy ``LL^* = μI`` for ``μ > 0``. Furthermore, either ``f`` is separable or
parameter `μ` is a scalar, for the `prox` of ``g`` to be computable.
Parameter `L` defines ``L`` through the `mul!` method. Therefore `L` can be an
`AbstractMatrix` for example, but not necessarily.
In this case, `prox` and `prox!` are computed according to Prop. 24.14 in
Bauschke, Combettes "Convex Analysis and Monotone Operator Theory in Hilbert
Spaces", 2nd edition, 2016. The same result is Prop. 23.32 in the 1st edition
of the same book.
"""
struct Precompose{T, M, U, V}
f::T
L::M
mu::U
b::V
function Precompose{T, M, U, V}(f::T, L::M, mu::U, b::V) where {T, M, U, V}
if !is_convex(f)
error("f must be convex")
end
if any(mu .<= 0)
error("elements of μ must be positive")
end
new(f, L, mu, b)
end
end
is_prox_accurate(::Type{<:Precompose{T}}) where T = is_prox_accurate(T)
is_convex(::Type{<:Precompose{T}}) where T = is_convex(T)
is_set(::Type{<:Precompose{T}}) where T = is_set(T)
is_singleton(::Type{<:Precompose{T}}) where T = is_singleton(T)
is_cone(::Type{<:Precompose{T}}) where T = is_cone(T)
is_affine(::Type{<:Precompose{T}}) where T = is_affine(T)
is_smooth(::Type{<:Precompose{T}}) where T = is_smooth(T)
is_generalized_quadratic(::Type{<:Precompose{T}}) where T = is_generalized_quadratic(T)
is_strongly_convex(::Type{<:Precompose{T}}) where T = is_strongly_convex(T)
Precompose(f::T, L::M, mu::U, b::V) where {T, M, U, V} = Precompose{T, M, U, V}(f, L, mu, b)
Precompose(f::T, L::M, mu::U) where {T, M, U} = Precompose(f, L, mu, 0)
function (g::Precompose)(x)
return g.f(g.L * x .+ g.b)
end
function gradient!(y, g::Precompose, x)
res = g.L*x .+ g.b
gradres = similar(res)
v = gradient!(gradres, g.f, res)
mul!(y, adjoint(g.L), gradres)
return v
end
function prox!(y, g::Precompose, x, gamma)
# See Prop. 24.14 in Bauschke, Combettes
# "Convex Analysis and Monotone Operator Theory in Hilbert Spaces",
# 2nd ed., 2016.
#
# The same result is Prop. 23.32 in the 1st ed. of the same book.
#
# This case has an additional translation: if f(x) = h(x + b) then
# prox_f(x) = prox_h(x + b) - b
# Then one can apply the above mentioned result to g(x) = f(Lx).
#
res = g.L*x .+ g.b
proxres = similar(res)
v = prox!(proxres, g.f, res, g.mu.*gamma)
proxres .-= res
proxres ./= g.mu
mul!(y, adjoint(g.L), proxres)
y .+= x
return v
end
function prox_naive(g::Precompose, x, gamma)
res = g.L*x .+ g.b
proxres, v = prox_naive(g.f, res, g.mu .* gamma)
y = x + g.L'*((proxres .- res)./g.mu)
return y, v
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2589 | # Precompose with diagonal scaling and translation
export PrecomposeDiagonal
"""
PrecomposeDiagonal(f, a, b)
Return the function
```math
g(x) = f(\\mathrm{diag}(a)x + b)
```
Function ``f`` must be convex and separable, or `a` must be a scalar, for the
`prox` of ``g`` to be computable. Parametes `a` and `b` can be arrays of
multiple dimensions, according to the shape/size of the input `x` that will be
provided to the function: the way the above expression for ``g`` should be
thought of, is `g(x) = f(a.*x + b)`.
"""
struct PrecomposeDiagonal{T, R, S}
f::T
a::R
b::S
function PrecomposeDiagonal{T,R,S}(f::T, a::R, b::S) where {T, R, S}
if R <: AbstractArray && !(is_convex(f) && is_separable(f))
error("`f` must be convex and separable since `a` is of type $(R)")
end
if any(a == 0)
error("elements of `a` must be nonzero")
else
new(f, a, b)
end
end
end
is_separable(::Type{<:PrecomposeDiagonal{T}}) where T = is_separable(T)
is_prox_accurate(::Type{<:PrecomposeDiagonal{T}}) where T = is_prox_accurate(T)
is_convex(::Type{<:PrecomposeDiagonal{T}}) where T = is_convex(T)
is_set(::Type{<:PrecomposeDiagonal{T}}) where T = is_set(T)
is_singleton(::Type{<:PrecomposeDiagonal{T}}) where T = is_singleton(T)
is_cone(::Type{<:PrecomposeDiagonal{T}}) where T = is_cone(T)
is_affine(::Type{<:PrecomposeDiagonal{T}}) where T = is_affine(T)
is_smooth(::Type{<:PrecomposeDiagonal{T}}) where T = is_smooth(T)
is_generalized_quadratic(::Type{<:PrecomposeDiagonal{T}}) where T = is_generalized_quadratic(T)
is_strongly_convex(::Type{<:PrecomposeDiagonal{T}}) where T = is_strongly_convex(T)
PrecomposeDiagonal(f::T, a::S=1, b::S=0) where {T, S <: Real} = PrecomposeDiagonal{T, S, S}(f, a, b)
PrecomposeDiagonal(f::T, a::R, b::S=0) where {T, R <: AbstractArray, S <: Real} = PrecomposeDiagonal{T, R, S}(f, a, b)
PrecomposeDiagonal(f::T, a::R, b::S) where {T, R <: Union{AbstractArray, Real}, S <: AbstractArray} = PrecomposeDiagonal{T, R, S}(f, a, b)
function (g::PrecomposeDiagonal)(x)
return g.f(g.a .* x .+ g.b)
end
function gradient!(y, g::PrecomposeDiagonal, x)
z = g.a .* x .+ g.b
v = gradient!(y, g.f, z)
y .*= g.a
return v
end
function prox!(y, g::PrecomposeDiagonal, x, gamma)
z = g.a .* x .+ g.b
v = prox!(y, g.f, z, (g.a .* g.a) .* gamma)
y .-= g.b
y ./= g.a
return v
end
function prox_naive(g::PrecomposeDiagonal, x, gamma)
z = g.a .* x .+ g.b
y, fy = prox_naive(g.f, z, (g.a .* g.a) .* gamma)
return (y .- g.b)./g.a, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1768 | # Regularize
export Regularize
"""
Regularize(f, ρ=1.0, a=0.0)
Given function `f`, and optional parameters `ρ` (positive) and `a`, return
```math
g(x) = f(x) + \\tfrac{ρ}{2}\\|x-a\\|².
```
Parameter `a` can be either an array or a scalar, in which case it is subtracted component-wise from `x` in the above expression.
"""
struct Regularize{T, S, A}
f::T
rho::S
a::A
function Regularize{T,S,A}(f::T, rho::S, a::A) where {T, S, A}
if rho <= 0.0
error("parameter `ρ` must be positive")
else
new(f, rho, a)
end
end
end
is_separable(::Type{<:Regularize{T}}) where T = is_separable(T)
is_prox_accurate(::Type{<:Regularize{T}}) where T = is_prox_accurate(T)
is_convex(::Type{<:Regularize{T}}) where T = is_convex(T)
is_smooth(::Type{<:Regularize{T}}) where T = is_smooth(T)
is_generalized_quadratic(::Type{<:Regularize{T}}) where T = is_generalized_quadratic(T)
is_strongly_convex(::Type{<:Regularize}) = true
Regularize(f::T, rho::S=1, a::A=0) where {T, S, A} = Regularize{T, S, A}(f, rho, a)
function (g::Regularize)(x)
return g.f(x) + g.rho/2*norm(x .- g.a)^2
end
function gradient!(y, g::Regularize, x)
v = gradient!(y, g.f, x)
y .+= g.rho * (x .- g.a)
return v + g.rho / 2 * norm(x .- g.a)^2
end
function prox!(y, g::Regularize, x, gamma)
R = real(eltype(x))
gr = g.rho * gamma
gr2 = R(1) ./ (R(1) .+ gr)
v = prox!(y, g.f, gr2 .* (x .+ gr .* g.a), gr2 .* gamma)
return v + g.rho / R(2) * norm(y .- g.a)^2
end
function prox_naive(g::Regularize, x, gamma)
R = real(eltype(x))
y, v = prox_naive(g.f, x./(R(1) .+ gamma.*g.rho) .+ g.a./(R(1)./(gamma.*g.rho) .+ R(1)), gamma./(R(1) .+ gamma.*g.rho))
return y, v + g.rho/R(2)*norm(y .- g.a)^2
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 3025 | # Separable sum, using tuples of arrays as variables
export SeparableSum
"""
SeparableSum(f_1, ..., f_k)
Given functions `f_1` to `f_k`, return their separable sum, that is
```math
g(x_1, ..., x_k) = \\sum_{i=1}^k f_i(x_i).
```
The object `g` constructed in this way can be evaluated at `Tuple`s of length
`k`. Likewise, the `prox` and `prox!` methods for `g` operate with
(input and output) `Tuple`s of length `k`.
Example:
f = SeparableSum(NormL1(), NuclearNorm()); # separable sum of two functions
x = randn(10); # some random vector
Y = randn(20, 30); # some random matrix
f_xY = f((x, Y)); # evaluates f at (x, Y)
(u, V), f_uV = prox(f, (x, Y), 1.3); # computes prox at (x, Y)
"""
struct SeparableSum{T}
fs::T
end
SeparableSum(fs::Vararg) = SeparableSum((fs...,))
component_types(::Type{SeparableSum{T}}) where T = fieldtypes(T)
@generated is_prox_accurate(::Type{T}) where T <: SeparableSum = return all(is_prox_accurate, component_types(T)) ? :(true) : :(false)
@generated is_convex(::Type{T}) where T <: SeparableSum = return all(is_convex, component_types(T)) ? :(true) : :(false)
@generated is_set(::Type{T}) where T <: SeparableSum = return all(is_set, component_types(T)) ? :(true) : :(false)
@generated is_singleton(::Type{T}) where T <: SeparableSum = return all(is_singleton, component_types(T)) ? :(true) : :(false)
@generated is_cone(::Type{T}) where T <: SeparableSum = return all(is_cone, component_types(T)) ? :(true) : :(false)
@generated is_affine(::Type{T}) where T <: SeparableSum = return all(is_affine, component_types(T)) ? :(true) : :(false)
@generated is_smooth(::Type{T}) where T <: SeparableSum = return all(is_smooth, component_types(T)) ? :(true) : :(false)
@generated is_generalized_quadratic(::Type{T}) where T <: SeparableSum = return all(is_generalized_quadratic, component_types(T)) ? :(true) : :(false)
@generated is_strongly_convex(::Type{T}) where T <: SeparableSum = return all(is_strongly_convex, component_types(T)) ? :(true) : :(false)
(g::SeparableSum)(xs::Tuple) = sum(f(x) for (f, x) in zip(g.fs, xs))
prox!(ys::Tuple, g::SeparableSum, xs::Tuple, gamma::Number) = sum(prox!(y, f, x, gamma) for (y, f, x) in zip(ys, g.fs, xs))
prox!(ys::Tuple, g::SeparableSum, xs::Tuple, gammas::Tuple) = sum(prox!(y, f, x, gamma) for (y, f, x, gamma) in zip(ys, g.fs, xs, gammas))
function prox(g::SeparableSum, xs::Tuple, gamma=1)
ys = similar.(xs)
fys = prox!(ys, g, xs, gamma)
return ys, fys
end
gradient!(grads::Tuple, g::SeparableSum, xs::Tuple) = sum(gradient!(grad, f, x) for (grad, f, x) in zip(grads, g.fs, xs))
function gradient(g::SeparableSum, xs::Tuple)
ys = similar.(xs)
fxs = gradient!(ys, g, xs)
return ys, fxs
end
function prox_naive(f::SeparableSum, xs::Tuple, gamma)
fys = real(eltype(xs[1]))(0)
ys = []
for k in eachindex(xs)
y, fy = prox_naive(f.fs[k], xs[k], typeof(gamma) <: Number ? gamma : gamma[k])
fys += fy
append!(ys, [y])
end
return Tuple(ys), fys
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 4278 | # Separable sum, using slices of an array as variables
export SlicedSeparableSum
"""
SlicedSeparableSum((f_1, ..., f_k), (J_1, ..., J_k))
Return the function
```math
g(x) = \\sum_{i=1}^k f_i(x_{J_i}).
```
SlicedSeparableSum(f, (J_1, ..., J_k))
Analogous to the previous one, but apply the same function `f` to all slices
of the variable `x`:
```math
g(x) = \\sum_{i=1}^k f(x_{J_i}).
```
"""
struct SlicedSeparableSum{S <: Tuple, T <: AbstractArray, N}
fs::S # Tuple, where each element is a Vector with elements of the same type; the functions to prox on
# Example: S = Tuple{Array{ProximalOperators.NormL1{Float64},1}, Array{ProximalOperators.NormL2{Float64},1}}
idxs::T # Vector, where each element is a Vector containing the indices to prox on
# Example: T = Array{Array{Tuple{Colon,UnitRange{Int64}},1},1}
end
function SlicedSeparableSum(fs::Tuple, idxs::Tuple)
ftypes = DataType[]
fsarr = Array{Any,1}[]
indarr = Array{eltype(idxs),1}[]
for (i,f) in enumerate(fs)
t = typeof(f)
fi = findfirst(isequal(t), ftypes)
if fi === nothing
push!(ftypes, t)
push!(fsarr, Any[f])
push!(indarr, eltype(idxs)[idxs[i]])
else
push!(fsarr[fi], f)
push!(indarr[fi], idxs[i])
end
end
fsnew = ((Array{typeof(fs[1]),1}(fs) for fs in fsarr)...,)
@assert typeof(fsnew) == Tuple{(Array{ft,1} for ft in ftypes)...}
SlicedSeparableSum{typeof(fsnew),typeof(indarr),length(fsnew)}(fsnew, indarr)
end
# Constructor for the case where the same function is applied to all slices
SlicedSeparableSum(f::F, idxs::T) where {F, T <: Tuple} =
SlicedSeparableSum(Tuple(f for k in eachindex(idxs)), idxs)
# Unroll the loop over the different types of functions to evaluate
@generated function (f::SlicedSeparableSum{A, B, N})(x) where {A, B, N}
ex = :(v = 0.0)
for i = 1:N # For each function type
ex = quote $ex;
for k in eachindex(f.fs[$i]) # For each function of that type
v += f.fs[$i][k](view(x,f.idxs[$i][k]...))
end
end
end
ex = :($ex; return v)
end
# Unroll the loop over the different types of functions to prox on
@generated function prox!(y, f::SlicedSeparableSum{A, B, N}, x, gamma) where {A, B, N}
ex = :(v = 0.0)
for i = 1:N # For each function type
ex = quote $ex;
for k in eachindex(f.fs[$i]) # For each function of that type
g = prox!(view(y, f.idxs[$i][k]...), f.fs[$i][k], view(x,f.idxs[$i][k]...), gamma)
v += g
end
end
end
ex = :($ex; return v)
end
component_types(::Type{SlicedSeparableSum{S, T, N}}) where {S, T, N} = Tuple(A.parameters[1] for A in fieldtypes(S))
@generated is_prox_accurate(::Type{T}) where T <: SlicedSeparableSum = return all(is_prox_accurate, component_types(T)) ? :(true) : :(false)
@generated is_convex(::Type{T}) where T <: SlicedSeparableSum = return all(is_convex, component_types(T)) ? :(true) : :(false)
@generated is_set(::Type{T}) where T <: SlicedSeparableSum = return all(is_set, component_types(T)) ? :(true) : :(false)
@generated is_singleton(::Type{T}) where T <: SlicedSeparableSum = return all(is_singleton, component_types(T)) ? :(true) : :(false)
@generated is_cone(::Type{T}) where T <: SlicedSeparableSum = return all(is_cone, component_types(T)) ? :(true) : :(false)
@generated is_affine(::Type{T}) where T <: SlicedSeparableSum = return all(is_affine, component_types(T)) ? :(true) : :(false)
@generated is_smooth(::Type{T}) where T <: SlicedSeparableSum = return all(is_smooth, component_types(T)) ? :(true) : :(false)
@generated is_generalized_quadratic(::Type{T}) where T <: SlicedSeparableSum = return all(is_generalized_quadratic, component_types(T)) ? :(true) : :(false)
@generated is_strongly_convex(::Type{T}) where T <: SlicedSeparableSum = return all(is_strongly_convex, component_types(T)) ? :(true) : :(false)
function prox_naive(f::SlicedSeparableSum, x, gamma)
fy = 0
y = similar(x)
for t in eachindex(f.fs)
for k in eachindex(f.fs[t])
y[f.idxs[t][k]...], fy1 = prox_naive(f.fs[t][k], x[f.idxs[t][k]...], gamma)
fy += fy1
end
end
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 455 | # squared Euclidean distance from a set
export SqrDistL2
"""
SqrDistL2(ind_S, λ=1)
Given `ind_S` the indicator function of a set ``S``, and an optional positive parameter `λ`, return the (weighted) squared Euclidean distance from ``S``, that is function
```math
g(x) = \\tfrac{λ}{2}\\mathrm{dist}_S^2(x) = \\min \\left\\{ \\tfrac{λ}{2}\\|y - x\\|^2 : y \\in S \\right\\}.
```
"""
SqrDistL2(ind, lambda=1) = Postcompose(MoreauEnvelope(ind), lambda)
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1821 | export Sum
"""
Sum(f_1, ..., f_k)
Given functions `f_1` to `f_k`, return their sum
```math
g(x) = \\sum_{i=1}^k f_i(x).
```
"""
struct Sum{T}
fs::T
end
Sum(fs::Vararg) = Sum((fs...,))
component_types(::Type{Sum{T}}) where T = fieldtypes(T)
# note: is_prox_accurate false because prox in general doesn't exist?
is_prox_accurate(::Type{<:Sum}) = false
@generated is_convex(::Type{T}) where T <: Sum = return all(is_convex, component_types(T)) ? :(true) : :(false)
@generated is_set(::Type{T}) where T <: Sum = return all(is_set, component_types(T)) ? :(true) : :(false)
@generated is_singleton(::Type{T}) where T <: Sum = return all(is_singleton, component_types(T)) ? :(true) : :(false)
@generated is_cone(::Type{T}) where T <: Sum = return all(is_cone, component_types(T)) ? :(true) : :(false)
@generated is_affine(::Type{T}) where T <: Sum = return all(is_affine, component_types(T)) ? :(true) : :(false)
@generated is_smooth(::Type{T}) where T <: Sum = return all(is_smooth, component_types(T)) ? :(true) : :(false)
@generated is_generalized_quadratic(::Type{T}) where T <: Sum = return all(is_generalized_quadratic, component_types(T)) ? :(true) : :(false)
@generated is_strongly_convex(::Type{T}) where T <: Sum = return (all(is_convex, component_types(T)) && any(is_strongly_convex, component_types(T))) ? :(true) : :(false)
function (sumobj::Sum)(x)
sum = real(eltype(x))(0)
for f in sumobj.fs
sum += f(x)
end
sum
end
function gradient!(grad, sumobj::Sum, x)
# gradient of sum is sum of gradients
val = real(eltype(x))(0)
# to keep track of this sum, i may not be able to
# avoid allocating an array
grad .= eltype(x)(0)
temp = similar(grad)
for f in sumobj.fs
val += gradient!(temp, f, x)
grad .+= temp
end
return val
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1138 | # Tilting (addition with affine function)
export Tilt
"""
Tilt(f, a, b=0.0)
Given function `f`, an array `a` and a constant `b` (optional), return function
```math
g(x) = f(x) + \\langle a, x \\rangle + b.
```
"""
struct Tilt{T, S, R}
f::T
a::S
b::R
end
is_separable(::Type{<:Tilt{T}}) where T = is_separable(T)
is_prox_accurate(::Type{<:Tilt{T}}) where T = is_prox_accurate(T)
is_convex(::Type{<:Tilt{T}}) where T = is_convex(T)
is_singleton(::Type{<:Tilt{T}}) where T = is_singleton(T)
is_smooth(::Type{<:Tilt{T}}) where T = is_smooth(T)
is_generalized_quadratic(::Type{<:Tilt{T}}) where T = is_generalized_quadratic(T)
is_strongly_convex(::Type{<:Tilt{T}}) where T = is_strongly_convex(T)
Tilt(f::T, a::S) where {T, S} = Tilt{T, S, real(eltype(S))}(f, a, real(eltype(S))(0))
function (g::Tilt)(x)
return g.f(x) + real(dot(g.a, x)) + g.b
end
function prox!(y, g::Tilt, x, gamma)
v = prox!(y, g.f, x .- gamma .* g.a, gamma)
return v + real(dot(g.a, y)) + g.b
end
function prox_naive(g::Tilt, x, gamma)
y, v = prox_naive(g.f, x .- gamma .* g.a, gamma)
return y, v + real(dot(g.a, y)) + g.b
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1184 | export Translate
"""
Translate(f, b)
Return the translated function
```math
g(x) = f(x + b)
```
"""
struct Translate{T, V}
f::T
b::V
end
is_separable(::Type{<:Translate{T}}) where T = is_separable(T)
is_prox_accurate(::Type{<:Translate{T}}) where T = is_prox_accurate(T)
is_convex(::Type{<:Translate{T}}) where T = is_convex(T)
is_set(::Type{<:Translate{T}}) where T = is_set(T)
is_singleton(::Type{<:Translate{T}}) where T = is_singleton(T)
is_cone(::Type{<:Translate{T}}) where T = is_cone(T)
is_affine(::Type{<:Translate{T}}) where T = is_affine(T)
is_smooth(::Type{<:Translate{T}}) where T = is_smooth(T)
is_generalized_quadratic(::Type{<:Translate{T}}) where T = is_generalized_quadratic(T)
is_strongly_convex(::Type{<:Translate{T}}) where T = is_strongly_convex(T)
function (g::Translate)(x)
return g.f(x .+ g.b)
end
function gradient!(y, g::Translate, x)
z = x .+ g.b
v = gradient!(y, g.f, z)
return v
end
function prox!(y, g::Translate, x, gamma)
z = x .+ g.b
v = prox!(y, g.f, z, gamma)
y .-= g.b
return v
end
function prox_naive(g::Translate, x, gamma)
y, v = prox_naive(g.f, x .+ g.b, gamma)
return y - g.b, v
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1050 | # Cross Entropy loss function
export CrossEntropy
"""
CrossEntropy(b)
Return the function
```math
f(x) = -\\frac{1}{N} \\sum_{i = 1}^{N} b_i \\log (x_i)+(1-b_i) \\log (1-x_i),
```
where `b` is an array of length `N` such that `0 ≤ b ≤ 1` component-wise.
"""
struct CrossEntropy{T}
b::T
function CrossEntropy{T}(b::T) where T
if !(all(0 .<= b .<= 1. ))
error("b must be 0 ≤ b ≤ 1 ")
else
new(b)
end
end
end
is_convex(f::Type{<:CrossEntropy}) = true
is_smooth(f::Type{<:CrossEntropy}) = true
CrossEntropy(b::T) where {T} = CrossEntropy{T}(b)
function (f::CrossEntropy)(x)
fsum = eltype(x)(0)
for i in eachindex(f.b)
fsum += f.b[i]*log(x[i])+(1-f.b[i])*log(1-x[i])
end
return -fsum/length(f.b)
end
function gradient!(y, f::CrossEntropy, x)
fsum = eltype(x)(0)
for i in eachindex(x)
y[i] = 1/length(f.b)*( - f.b[i]/x[i] + (1-f.b[i])/(1-x[i]) )
fsum += f.b[i]*log(x[i])+(1-f.b[i])*log(1-x[i])
end
return -1/length(f.b)*fsum
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1064 | # cubic L2 norm (times a constant
export CubeNormL2
"""
CubeNormL2(λ=1)
With a nonnegative scalar `λ`, return the function
```math
f(x) = λ\\|x\\|^3.
```
"""
struct CubeNormL2{R}
lambda::R
function CubeNormL2{R}(lambda::R) where R
if lambda < 0
error("coefficient λ must be nonnegative")
else
new(lambda)
end
end
end
is_convex(f::Type{<:CubeNormL2}) = true
is_smooth(f::Type{<:CubeNormL2}) = true
CubeNormL2(lambda::R=1) where R = CubeNormL2{R}(lambda)
function (f::CubeNormL2)(x)
return f.lambda * norm(x)^3
end
function gradient!(y, f::CubeNormL2, x)
norm_x = norm(x)
y .= (3 * f.lambda * norm_x) .* x
return f.lambda * norm_x^3
end
function prox!(y, f::CubeNormL2, x, gamma)
norm_x = norm(x)
scale = 2 / (1 + sqrt(1 + 12 * gamma * f.lambda * norm_x))
y .= scale .* x
return f.lambda * (scale * norm_x)^3
end
function prox_naive(f::CubeNormL2, x, gamma)
y = 2 / (1 + sqrt(1 + 12 * gamma * f.lambda * norm(x))) * x
return y, f.lambda * norm(y)^3
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2852 | # elastic-net regularization
export ElasticNet
"""
ElasticNet(μ=1, λ=1)
Return the function
```math
f(x) = μ\\|x\\|_1 + (λ/2)\\|x\\|^2,
```
for nonnegative parameters `μ` and `λ`.
"""
struct ElasticNet{R, S}
mu::R
lambda::S
function ElasticNet{R, S}(mu::R, lambda::S) where {R, S}
if lambda < 0 || mu < 0
error("parameters `μ` and `λ` must be nonnegative")
else
new(mu, lambda)
end
end
end
is_separable(f::Type{<:ElasticNet}) = true
is_prox_accurate(f::Type{<:ElasticNet}) = true
is_convex(f::Type{<:ElasticNet}) = true
ElasticNet(mu::R=1, lambda::S=1) where {R, S} = ElasticNet{R, S}(mu, lambda)
function (f::ElasticNet)(x)
R = real(eltype(x))
return f.mu * norm(x, 1) + f.lambda / R(2) * norm(x, 2)^2
end
function prox!(y, f::ElasticNet, x, gamma)
R = real(eltype(x))
sqnorm2x = R(0)
norm1x = R(0)
gm = gamma * f.mu
gl = gamma * f.lambda
for i in eachindex(x)
y[i] = (x[i] + (x[i] <= -gm ? gm : (x[i] >= gm ? -gm : -x[i])))/(1 + gl)
sqnorm2x += abs2(y[i])
norm1x += abs(y[i])
end
return f.mu * norm1x + f.lambda / R(2) * sqnorm2x
end
function prox!(y, f::ElasticNet, x, gamma::AbstractArray)
R = real(eltype(x))
sqnorm2x = R(0)
norm1x = R(0)
for i in eachindex(x)
gm = gamma[i] * f.mu
gl = gamma[i] * f.lambda
y[i] = (x[i] + (x[i] <= -gm ? gm : (x[i] >= gm ? -gm : -x[i])))/(1 + gl)
sqnorm2x += abs2(y[i])
norm1x += abs(y[i])
end
return f.mu * norm1x + f.lambda / R(2) * sqnorm2x
end
function prox!(y, f::ElasticNet, x::AbstractArray{<:Complex}, gamma)
R = real(eltype(x))
sqnorm2x = R(0)
norm1x = R(0)
gm = gamma * f.mu
gl = gamma * f.lambda
for i in eachindex(x)
y[i] = sign(x[i]) * max(0, abs(x[i]) - gm)/(1 + gl)
sqnorm2x += abs2(y[i])
norm1x += abs(y[i])
end
return f.mu * norm1x + f.lambda / R(2) * sqnorm2x
end
function prox!(y, f::ElasticNet, x::AbstractArray{<:Complex}, gamma::AbstractArray)
R = real(eltype(x))
sqnorm2x = R(0)
norm1x = R(0)
for i in eachindex(x)
gm = gamma[i] * f.mu
gl = gamma[i] * f.lambda
y[i] = sign(x[i]) * max(0, abs(x[i]) - gm)/(1 + gl)
sqnorm2x += abs2(y[i])
norm1x += abs(y[i])
end
return f.mu * norm1x + f.lambda / R(2) * sqnorm2x
end
function gradient!(y, f::ElasticNet, x)
R = real(eltype(x))
# Gradient of 1 norm
y .= f.mu .* sign.(x)
# Gradient of 2 norm
y .+= f.lambda .* x
return f.mu * norm(x, 1) + f.lambda / R(2) * norm(x, 2)^2
end
function prox_naive(f::ElasticNet, x, gamma)
R = real(eltype(x))
uz = max.(0, abs.(x) .- gamma .* f.mu)./(1 .+ f.lambda .* gamma)
return sign.(x) .* uz, f.mu * norm(uz, 1) + f.lambda / R(2) * norm(uz)^2
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 335 | # Hinge loss function
export HingeLoss
"""
HingeLoss(y, μ=1)
Return the function
```math
f(x) = μ⋅∑_i \\max\\{0, 1 - y_i ⋅ x_i\\},
```
where `y` is an array and `μ` is a positive parameter.
"""
HingeLoss(y) = PrecomposeDiagonal(SumPositive(), -y, 1)
HingeLoss(y, mu) = Postcompose(PrecomposeDiagonal(SumPositive(), -y, 1), mu)
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1920 | # Huber loss function
using LinearAlgebra
export HuberLoss
"""
HuberLoss(ρ=1, μ=1)
Return the function
```math
f(x) = \\begin{cases}
\\tfrac{μ}{2}\\|x\\|^2 & \\text{if}\\ \\|x\\| ⩽ ρ \\\\
ρμ(\\|x\\| - \\tfrac{ρ}{2}) & \\text{otherwise},
\\end{cases}
```
where `ρ` and `μ` are positive parameters.
"""
struct HuberLoss{R, S}
rho::R
mu::S
function HuberLoss{R, S}(rho::R, mu::S) where {R, S}
if rho <= 0 || mu <= 0
error("parameters rho and mu must be positive")
else
new(rho, mu)
end
end
end
is_convex(f::Type{<:HuberLoss}) = true
is_smooth(f::Type{<:HuberLoss}) = true
HuberLoss(rho::R=1, mu::S=1) where {R, S} = HuberLoss{R, S}(rho, mu)
function (f::HuberLoss)(x)
R = real(eltype(x))
normx = norm(x)
if normx <= f.rho
return f.mu / R(2) * normx^2
else
return f.rho * f.mu * (normx - f.rho / R(2))
end
end
function gradient!(y, f::HuberLoss, x)
R = real(eltype(x))
normx = norm(x)
if normx <= f.rho
y .= f.mu .* x
v = f.mu / R(2) * normx^2
else
y .= (f.mu * f.rho) / normx .* x
v = f.rho * f.mu * (normx - f.rho / R(2))
end
return v
end
function prox!(y, f::HuberLoss, x, gamma)
R = real(eltype(x))
normx = norm(x)
mugam = f.mu * gamma
scal = (R(1) - min(mugam / (R(1) + mugam), mugam * f.rho / normx))
for k in eachindex(y)
y[k] = scal*x[k]
end
normy = scal*normx
if normy <= f.rho
return f.mu / R(2) * normy^2
else
return f.rho * f.mu * (normy - f.rho / R(2))
end
end
function prox_naive(f::HuberLoss, x, gamma)
R = real(eltype(x))
y = (R(1) - min(f.mu * gamma / (R(1) + f.mu * gamma), f.mu * gamma * f.rho / norm(x))) * x
if norm(y) <= f.rho
return y, f.mu / R(2) * norm(y)^2
else
return y, f.rho * f.mu * (norm(y) - f.rho / R(2))
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1281 | # indicator of an affine set
using LinearAlgebra
using SparseArrays
using SuiteSparse
export IndAffine
### ABSTRACT TYPE
abstract type IndAffine end
is_affine(f::Type{<:IndAffine}) = true
is_generalized_quadratic(f::Type{<:IndAffine}) = true
fun_name(f::IndAffine) = "Indicator of an affine subspace"
### CONSTRUCTORS
"""
IndAffine(A, b; iterative=false)
If `A` is a matrix (dense or sparse) and `b` is a vector, return the indicator function of the affine set
```math
S = \\{x : Ax = b\\}.
```
If `A` is a vector and `b` is a scalar, return the indicator function of the set
```math
S = \\{x : \\langle A, x \\rangle = b\\}.
```
By default, a direct method (QR factorization of matrix `A'`) is used to evaluate `prox!`.
If `iterative=true`, then `prox!` is evaluated approximately using an iterative method instead.
"""
function IndAffine(A::M, b::V; iterative=false) where {M, V}
if iterative == false
IndAffineDirect(A, b)
else
IndAffineIterative(A, b)
end
end
### INCLUDE CONCRETE TYPES
using LinearAlgebra
using SparseArrays
using SuiteSparse
include("indAffineDirect.jl")
include("indAffineIterative.jl")
function prox_naive(f::IndAffine, x, gamma)
y = x + f.A'*((f.A*f.A')\(f.b - f.A*x))
return y, real(eltype(x))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2518 | ### CONCRETE TYPE: DIRECT PROX EVALUATION
# prox! is computed using a QR factorization of A'.
using LinearAlgebra: QRCompactWY
struct IndAffineDirect{M, V, F} <: IndAffine
A::M
b::V
fact::F
res::V
function IndAffineDirect{M, V, F}(A::M, b::V) where {M, V, F}
if size(A, 1) > size(A, 2)
error("A must be full row rank")
end
normrowsinv = 1 ./ vec(sqrt.(sum(abs2.(A); dims=2)))
A = normrowsinv.*A # normalize rows of A
b = normrowsinv.*b # and b accordingly
fact = qr(M(A'))
new(A, b, fact, similar(b))
end
end
IndAffineDirect(A::M, b::V) where {T, M <: DenseMatrix{T}, V} = IndAffineDirect{M, V, QRCompactWY{T, M}}(A, b)
IndAffineDirect(A::M, b::V) where {T, M <: SparseMatrixCSC{T}, V} = IndAffineDirect{M, V, SuiteSparse.SPQR.Factorization{T}}(A, b)
IndAffineDirect(a::V, b::T) where {T, V <: AbstractVector{T}} = IndAffineDirect(reshape(a,1,:), [b])
factorization_type(::IndAffineDirect{M, V, F}) where {M, V, F} = F
function (f::IndAffineDirect)(x)
R = real(eltype(x))
mul!(f.res, f.A, x)
f.res .= f.b .- f.res
# the tolerance in the following line should be customizable
if norm(f.res, Inf) <= sqrt(eps(R))
return R(0)
end
return typemax(R)
end
prox!(y, f::IndAffineDirect, x, gamma) = prox!(factorization_type(f), y, f, x, gamma)
function prox!(::Type{<:QRCompactWY}, y, f::IndAffineDirect, x, gamma)
mul!(f.res, f.A, x)
f.res .= f.b .- f.res
Rfact = view(f.fact.factors, 1:length(f.b), 1:length(f.b))
LinearAlgebra.LAPACK.trtrs!('U', 'C', 'N', Rfact, f.res)
LinearAlgebra.LAPACK.trtrs!('U', 'N', 'N', Rfact, f.res)
mul!(y, adjoint(f.A), f.res)
y .+= x
return real(eltype(x))(0)
end
function prox!(::Type{<:SuiteSparse.SPQR.Factorization}, y, f::IndAffineDirect, x, gamma)
mul!(f.res, f.A, x)
f.res .= f.b .- f.res
##############################################################################
# We need to solve for z: AA'z = res
# We have: QR=PA'S so A'=P'QRS' and AA'=SR'Q'PP'QRS'=SR'RS'
# So: z = S'\R\R'\S\res
# TODO: the following lines should be made more efficient
temp = f.res[f.fact.pcol]
temp = LowerTriangular(adjoint(f.fact.R))\temp
temp = UpperTriangular(f.fact.R)\temp
RRres = similar(temp)
RRres[f.fact.pcol] .= temp
##############################################################################
mul!(y, adjoint(f.A), RRres)
y .+= x
return real(eltype(x))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1453 | ### CONCRETE TYPE: ITERATIVE PROX EVALUATION
struct IndAffineIterative{M, V} <: IndAffine
A::M
b::V
res::V
function IndAffineIterative{M, V}(A::M, b::V) where {M, V}
if size(A,1) > size(A,2)
error("A must be full row rank")
end
normrowsinv = 1 ./ vec(sqrt.(sum(abs2.(A); dims=2)))
A = normrowsinv.*A # normalize rows of A
b = normrowsinv.*b # and b accordingly
new(A, b, similar(b))
end
end
is_prox_accurate(f::Type{<:IndAffineIterative}) = false
IndAffineIterative(A::M, b::V) where {M, V} = IndAffineIterative{M, V}(A, b)
function (f::IndAffineIterative{M, V})(x) where {M, V}
R = real(eltype(x))
mul!(f.res, f.A, x)
f.res .= f.b .- f.res
# the tolerance in the following line should be customizable
if norm(f.res, Inf) <= sqrt(eps(R))
return R(0)
end
return typemax(R)
end
function prox!(y, f::IndAffineIterative{M, V}, x, gamma) where {M, V}
# Von Neumann's alternating projections
R = real(eltype(x))
y .= x
for k = 1:1000
maxres = R(0)
for i in eachindex(f.b)
resi = (f.b[i] - dot(f.A[i,:], y))
y .= y + resi*f.A[i,:] # no need to divide: rows of A are normalized
absresi = resi > 0 ? resi : -resi
maxres = absresi > maxres ? absresi : maxres
end
if maxres < sqrt(eps(R))
break
end
end
return R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1354 | # indicator of the L0 norm ball with given (integer) radius
export IndBallL0
"""
IndBallL0(r=1)
Return the indicator function of the ``L_0`` pseudo-norm ball
```math
S = \\{ x : \\mathrm{nnz}(x) \\leq r \\}.
```
Parameter `r` must be a positive integer.
"""
struct IndBallL0{I}
r::I
function IndBallL0{I}(r::I) where {I}
if r <= 0
error("parameter r must be a positive integer")
else
new(r)
end
end
end
is_set(f::Type{<:IndBallL0}) = true
IndBallL0(r::I) where {I} = IndBallL0{I}(r)
function (f::IndBallL0)(x)
R = real(eltype(x))
if count(!isequal(0), x) > f.r
return R(Inf)
end
return R(0)
end
function _get_top_k_abs_indices(x::AbstractVector, k)
range = firstindex(x):(firstindex(x) + k - 1)
return partialsortperm(x, range, by=abs, rev=true)
end
_get_top_k_abs_indices(x, k) = _get_top_k_abs_indices(x[:], k)
function prox!(y, f::IndBallL0, x, gamma)
T = eltype(x)
p = _get_top_k_abs_indices(x, f.r)
y .= T(0)
for i in eachindex(p)
y[p[i]] = x[p[i]]
end
return real(T)(0)
end
function prox_naive(f::IndBallL0, x, gamma)
T = eltype(x)
p = sortperm(abs.(x)[:], rev=true)
y = similar(x)
y[p[begin:begin+f.r-1]] .= x[p[begin:begin+f.r-1]]
y[p[begin+f.r:end]] .= T(0)
return y, real(T)(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2008 | # indicator of the L1 norm ball with given radius
export IndBallL1
"""
IndBallL1(r=1.0)
Return the indicator function of the ``L_1`` norm ball
```math
S = \\left\\{ x : \\sum_i |x_i| \\leq r \\right\\}.
```
Parameter `r` must be positive.
"""
struct IndBallL1{R}
r::R
function IndBallL1{R}(r::R) where R
if r <= 0
error("parameter r must be positive")
else
new(r)
end
end
end
is_convex(f::Type{<:IndBallL1}) = true
is_set(f::Type{<:IndBallL1}) = true
is_prox_accurate(f::Type{<:IndBallL1}) = false
IndBallL1(r::R=1.0) where R = IndBallL1{R}(r)
function (f::IndBallL1)(x)
R = real(eltype(x))
if norm(x, 1) - f.r > f.r*eps(R)
return R(Inf)
end
return R(0)
end
function prox!(y, f::IndBallL1, x::AbstractArray{<:Real}, gamma)
R = eltype(x)
if norm(x, 1) <= f.r
y .= x
return R(0)
else # do a projection of abs(x) onto simplex then recover signs
abs_x = abs.(x)
simplex_proj_condat!(y, f.r, abs_x)
y .*= sign.(x)
return R(0)
end
end
function prox!(y, f::IndBallL1, x::AbstractArray{<:Complex}, gamma)
R = real(eltype(x))
if norm(x, 1) <= f.r
y .= x
return R(0)
else # do a projection of abs(x) onto simplex then recover signs
abs_x = real.(abs.(x))
y_temp = similar(abs_x)
simplex_proj_condat!(y_temp, f.r, abs_x)
y .= y_temp .* sign.(x)
return R(0)
end
end
function prox_naive(f::IndBallL1, x, gamma)
R = real(eltype(x))
# do a simple bisection (aka binary search) on λ
L = R(0)
U = maximum(abs, x)
λ = L
v = R(0)
maxit = 120
for _ in 1:maxit
λ = (L + U) / 2
v = sum(max.(abs.(x) .- λ, R(0)))
# modify lower or upper bound
(v < f.r) ? U = λ : L = λ
# exit condition
if abs(L - U) < (1 + abs(U))*eps(R)
break
end
end
return sign.(x) .* max.(R(0), abs.(x) .- λ), R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1173 | # indicator of the L2 norm ball with given radius
export IndBallL2
"""
IndBallL2(r=1.0)
Return the indicator function of the Euclidean ball
```math
S = \\{ x : \\|x\\| \\leq r \\},
```
where ``\\|\\cdot\\|`` is the ``L_2`` (Euclidean) norm. Parameter `r` must be positive.
"""
struct IndBallL2{R}
r::R
function IndBallL2{R}(r::R) where {R}
if r <= 0
error("parameter r must be positive")
else
new(r)
end
end
end
is_convex(f::Type{<:IndBallL2}) = true
is_set(f::Type{<:IndBallL2}) = true
IndBallL2(r::R=1) where R = IndBallL2{R}(r)
function (f::IndBallL2)(x)
R = real(eltype(x))
if isapprox_le(norm(x), f.r, atol=eps(R), rtol=sqrt(eps(R)))
return R(0)
end
return R(Inf)
end
function prox!(y, f::IndBallL2, x, gamma)
R = real(eltype(x))
scal = f.r/norm(x)
if scal > 1
y .= x
return R(0)
end
for k in eachindex(x)
y[k] = scal*x[k]
end
return R(0)
end
function prox_naive(f::IndBallL2, x, gamma)
normx = norm(x)
if normx > f.r
y = (f.r/normx)*x
else
y = x
end
return y, real(eltype(x))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1538 | # indicator of the ball of matrices with (at most) a given rank
using LinearAlgebra
using TSVD
export IndBallRank
"""
IndBallRank(r=1)
Return the indicator function of the set of matrices of rank at most `r`:
```math
S = \\{ X : \\mathrm{rank}(X) \\leq r \\},
```
Parameter `r` must be a positive integer.
"""
struct IndBallRank{I}
r::I
function IndBallRank{I}(r::I) where {I}
if r <= 0
error("parameter r must be a positive integer")
else
new(r)
end
end
end
is_set(f::Type{<:IndBallRank}) = true
is_prox_accurate(f::Type{<:IndBallRank}) = false
IndBallRank(r::I=1) where I = IndBallRank{I}(r)
function (f::IndBallRank)(x)
R = real(eltype(x))
maxr = minimum(size(x))
if maxr <= f.r return R(0) end
U, S, V = tsvd(x, f.r+1)
# the tolerance in the following line should be customizable
if S[end]/S[1] <= 1e-7
return R(0)
end
return R(Inf)
end
function prox!(y, f::IndBallRank, x, gamma)
R = real(eltype(x))
maxr = minimum(size(x))
if maxr <= f.r
y .= x
return R(0)
end
U, S, V = tsvd(x, f.r)
# TODO: the order of the following matrix products should depend on the shape of x
M = S .* V'
mul!(y, U, M)
return R(0)
end
function prox_naive(f::IndBallRank, x, gamma)
R = real(eltype(x))
maxr = minimum(size(x))
if maxr <= f.r
y = x
return y, R(0)
end
F = svd(x)
y = F.U[:,1:f.r]*(Diagonal(F.S[1:f.r])*F.V[:,1:f.r]')
return y, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1467 | # indicator of the Cartesian product of real binary sets
export IndBinary
"""
IndBinary(low, up)
Return the indicator function of the set
```math
S = \\{ x : x_i = low_i\\ \\text{or}\\ x_i = up_i \\},
```
Parameters `low` and `up` can be either scalars or arrays of the same dimension as the space.
"""
struct IndBinary{T, S}
low::T
high::S
end
is_set(f::Type{<:IndBinary}) = true
IndBinary() = IndBinary(0, 1)
IndBinary_low(f::IndBinary{<: Number, S}, i) where S = f.low
IndBinary_low(f::IndBinary{T, S}, i) where {T, S} = f.low[i]
IndBinary_high(f::IndBinary{T, <: Number}, i) where T = f.high
IndBinary_high(f::IndBinary{T, S}, i) where {T, S} = f.high[i]
function (f::IndBinary)(x)
R = real(eltype(x))
for k in eachindex(x)
if x[k] != IndBinary_low(f, k) && x[k] != IndBinary_high(f, k)
return R(Inf)
end
end
return R(0)
end
function prox!(y, f::IndBinary, x, gamma)
for k in eachindex(x)
low = eltype(y)(IndBinary_low(f, k))
high = eltype(y)(IndBinary_high(f, k))
if abs(x[k] - low) < abs(x[k] - high)
y[k] = low
else
y[k] = high
end
end
return real(eltype(x))(0)
end
function prox_naive(f::IndBinary, x, gamma)
distlow = abs.(x .- f.low)
disthigh = abs.(x .- f.high)
indlow = distlow .< disthigh
indhigh = distlow .>= disthigh
y = f.low.*indlow + f.high.*indhigh
return y, real(eltype(x))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2123 | # indicator of a generic box
export IndBox, IndBallLinf
"""
IndBox(low, up)
Return the indicator function of the box
```math
S = \\{ x : low \\leq x \\leq up \\}.
```
Parameters `low` and `up` can be either scalars or arrays of the same dimension as the space: they must satisfy `low <= up`, and are allowed to take values `-Inf` and `+Inf` to indicate unbounded coordinates.
"""
struct IndBox{T, S}
lb::T
ub::S
function IndBox{T,S}(lb::T, ub::S) where {T, S}
if !(eltype(lb) <: Real && eltype(ub) <: Real)
error("`lb` and `ub` must be real")
end
if any(lb .> ub)
error("`lb` and `ub` must satisfy `lb <= ub`")
else
new(lb, ub)
end
end
end
is_separable(f::Type{<:IndBox}) = true
is_convex(f::Type{<:IndBox}) = true
is_set(f::Type{<:IndBox}) = true
compatible_bounds(::Real, ::Real) = true
compatible_bounds(::Real, ::AbstractArray) = true
compatible_bounds(::AbstractArray, ::Real) = true
compatible_bounds(lb::AbstractArray, ub::AbstractArray) = size(lb) == size(ub)
IndBox(lb, ub) = if compatible_bounds(lb, ub)
IndBox{typeof(lb), typeof(ub)}(lb, ub)
else
error("bounds must have the same dimensions, or at least one of them be scalar")
end
function (f::IndBox)(x)
R = eltype(x)
for k in eachindex(x)
if x[k] < get_kth_elem(f.lb, k) || x[k] > get_kth_elem(f.ub, k)
return R(Inf)
end
end
return R(0)
end
function prox!(y, f::IndBox, x, gamma)
for k in eachindex(x)
if x[k] < get_kth_elem(f.lb, k)
y[k] = get_kth_elem(f.lb, k)
elseif x[k] > get_kth_elem(f.ub, k)
y[k] = get_kth_elem(f.ub, k)
else
y[k] = x[k]
end
end
return eltype(x)(0)
end
"""
**Indicator of a ``L_∞`` norm ball**
IndBallLinf(r=1.0)
Return the indicator function of the set
```math
S = \\{ x : \\max (|x_i|) \\leq r \\}.
```
Parameter `r` must be positive.
"""
IndBallLinf(r::R=1) where R = IndBox(-r, r)
function prox_naive(f::IndBox, x, gamma)
y = min.(f.ub, max.(f.lb, x))
return y, eltype(x)(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 4829 | # indicator of the (primal) exponential cone
# the dual exponential cone is obtained through calculus rules
export IndExpPrimal, IndExpDual
"""
IndExpPrimal()
Return the indicator function of the primal exponential cone, that is
```math
C = \\mathrm{cl} \\{ (r,s,t) : s > 0, s⋅e^{r/s} \\leq t \\} \\subset \\mathbb{R}^3.
```
"""
struct IndExpPrimal end
is_convex(f::Type{<:IndExpPrimal}) = true
is_cone(f::Type{<:IndExpPrimal}) = true
"""
IndExpDual()
Return the indicator function of the dual exponential cone, that is
```math
C = \\mathrm{cl} \\{ (u,v,w) : u < 0, -u⋅e^{v/u} \\leq w⋅e \\} \\subset \\mathbb{R}^3.
```
"""
IndExpDual() = PrecomposeDiagonal(Conjugate(IndExpPrimal()), -1)
EXP_PRIMAL_CALL_TOL = 1e-6
EXP_POLAR_CALL_TOL = 1e-3
EXP_PROJ_TOL = 1e-15
EXP_PROJ_MAXIT = 100
function (::IndExpPrimal)(x)
R = real(eltype(x))
if (x[2] > 0 && x[2]*exp(x[1]/x[2]) <= x[3]+EXP_PRIMAL_CALL_TOL) ||
(x[1] <= EXP_PRIMAL_CALL_TOL && abs(x[2]) <= EXP_PRIMAL_CALL_TOL && x[3] >= -EXP_PRIMAL_CALL_TOL)
return R(0)
end
return R(Inf)
end
function (::Conjugate{IndExpPrimal})(x)
R = real(eltype(x))
if (x[1] > 0 && x[1]*exp(x[2]/x[1]) <= -exp(1)*x[3]+EXP_POLAR_CALL_TOL) ||
(abs(x[1]) <= EXP_POLAR_CALL_TOL && x[2] <= EXP_POLAR_CALL_TOL && x[3] <= EXP_POLAR_CALL_TOL)
return R(0)
end
return R(Inf)
end
# Projection onto the cone is performed as in SCS (https://github.com/cvxgrp/scs).
# See the following copyright and permission notices.
# The MIT License (MIT)
#
# Copyright (c) 2012 Brendan O'Donoghue ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
function prox!(y, ::IndExpPrimal, x, gamma)
R = real(eltype(x))
r = x[1]
s = x[2]
t = x[3]
if (s*exp(r/s) <= t && s > 0) || (r <= 0 && s == 0 && t >= 0)
# x in the cone
y .= x
elseif (-r < 0 && r*exp(s/r) <= -exp(1)*t) || (-r == 0 && -s >= 0 && -t >= 0)
# -x in the dual cone (x in the polar cone)
y .= R(0)
elseif r < 0 && s < 0
# analytical solution
y[1] = x[1]
y[2] = max(x[2], 0.0)
y[3] = max(x[3], 0.0)
else
v = x
ub, lb = getRhoUb(x)
for iter = 1:EXP_PROJ_MAXIT
rho = (ub + lb)/2
g, v = calcGrad(x,rho)
if g > 0
lb = rho
else
ub = rho
end
if ub - lb <= EXP_PROJ_TOL
break
end
end
y .= v
end
return R(0)
end
function getRhoUb(v)
lb = 0
rho = 2.0^(-3)
g, z = calcGrad(v, rho)
while g > 0
lb = rho
rho = rho*2
g, z = calcGrad(v, rho)
end
ub = rho
return ub, lb
end
function calcGrad(v, rho)
x = solve_with_rho(v, rho)
if x[2] == 0.0
g = x[1]
else
g = x[1] + x[2]*log(x[2]/x[3])
end
return g, x
end
function solve_with_rho(v, rho)
x = zeros(3)
x[3] = newton_exp_onz(rho, v[2], v[3])
x[2] = (1/rho)*(x[3] - v[3])*x[3]
x[1] = v[1] - rho
return x
end
function newton_exp_onz(rho, y_hat, z_hat)
t = max(-z_hat,EXP_PROJ_TOL)
for _ in 1:EXP_PROJ_MAXIT
f = (1.0/rho^2)*t*(t + z_hat) - y_hat/rho + log(t/rho) + 1.0
fp = (1.0/rho^2)*(2.0*t + z_hat) + 1.0/t
t = t - f/fp
if t <= -z_hat
t = -z_hat
break
elseif t <= 0
t = 0
break
elseif abs(f) <= EXP_PROJ_TOL
break
end
end
z = t + z_hat
return z
end
prox_naive(f::IndExpPrimal, x, gamma) = prox(f, x, gamma) # we don't have a much simpler way to do this yet
prox_naive(f::PrecomposeDiagonal{Conjugate{IndExpPrimal}}, x, gamma) = prox(f, x, gamma) # we don't have a much simpler way to do this yet
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 779 | # indicator of the free cone
export IndFree
"""
IndFree()
Return the indicator function of the whole space, or "free cone", *i.e.*,
a function which is identically zero.
"""
struct IndFree end
is_separable(f::Type{<:IndFree}) = true
is_convex(f::Type{<:IndFree}) = true
is_affine(f::Type{<:IndFree}) = true
is_cone(f::Type{<:IndFree}) = true
is_smooth(f::Type{<:IndFree}) = true
is_generalized_quadratic(f::Type{<:IndFree}) = true
const Zero = IndFree
function (::IndFree)(x)
return real(eltype(x))(0)
end
function prox!(y, ::IndFree, x, gamma)
y .= x
return real(eltype(x))(0)
end
function gradient!(y, ::IndFree, x)
T = eltype(x)
y .= T(0)
return real(T)(0)
end
function prox_naive(::IndFree, x, gamma)
return x, real(eltype(x))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2035 | export IndGraph
abstract type IndGraph end
"""
IndGraph(A)
For matrix `A` (dense or sparse) return the indicator function of its graph:
```math
G_A = \\{(x, y) : Ax = y\\}.
```
The evaluation of `prox!` uses direct methods based on LDLt (LL for dense cases) matrix factorization and backsolve.
The `prox!` method operates on pairs `(x, y)` as input/output. So if `f = IndGraph(A)` is the indicator of the graph ``G_A``,
while `(x, y)` and `(c, d)` are pairs of vectors of the same sizes, then
```
prox!((c, d), f, (x, y))
```
writes to `(c, d)` the projection onto ``G_A`` of `(x, y)`.
"""
function IndGraph(A::AbstractMatrix)
if issparse(A)
IndGraphSparse(A)
elseif size(A, 1) > size(A, 2)
IndGraphSkinny(A)
else
IndGraphFat(A)
end
end
is_convex(f::Type{<:IndGraph}) = true
is_set(f::Type{<:IndGraph}) = true
is_cone(f::Type{<:IndGraph}) = true
IndGraph(a::AbstractVector) = IndGraph(a')
# Auxiliary function to be used in fused input call
function splitinput(f::IndGraph, xy)
@assert length(xy) == f.m + f.n
x = view(xy, 1:f.n)
y = view(xy, (f.n + 1):(f.n + f.m))
return x, y
end
# call additional signatures
function (f::IndGraph)(xy::AbstractVector)
x, y = splitinput(f, xy)
return f(x, y)
end
(f::IndGraph)(xy::Tuple) = f(xy[1], xy[2])
# prox! additional signatures
function prox!(xy::AbstractVector, f::IndGraph, cd::AbstractVector, gamma)
x, y = splitinput(f, xy)
c, d = splitinput(f, cd)
prox!(x, y, f, c, d)
return real(eltype(cd))(0)
end
prox!(xy::Tuple, f::IndGraph, cd::Tuple, gamma) = prox!(xy[1], xy[2], f, cd[1], cd[2])
# prox_naive additional signatures
function prox_naive(f::IndGraph, cd::AbstractVector, gamma)
c, d = splitinput(f, cd)
x, y, f = prox_naive(f, c, d, gamma)
return [x;y], f
end
function prox_naive(f::IndGraph, cd::Tuple, gamma)
x, y, fv = prox_naive(f, cd[1], cd[2], gamma)
return (x, y), fv
end
include("indGraphSparse.jl")
include("indGraphFat.jl")
include("indGraphSkinny.jl")
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1154 | # This implements the variant when A is dense and m < n
using LinearAlgebra
struct IndGraphFat{T} <: IndGraph
m::Int
n::Int
A::Matrix{T}
AA::Matrix{T}
tmp::Vector{T}
# tmpx::Vector{T}
F::Cholesky{T, Array{T, 2}} # LL factorization
end
function IndGraphFat(A::Matrix{T}) where T
m, n = size(A)
AA = A * A'
F = cholesky(AA + I)
#normrows = vec(sqrt.(sum(abs2.(A), 2)))
IndGraphFat(m, n, A, AA, Array{T, 1}(undef, m), F)
end
function (f::IndGraphFat)(x, y)
R = real(eltype(x))
# the tolerance in the following line should be customizable
mul!(f.tmp, f.A, x)
f.tmp .-= y
if norm(f.tmp, Inf) <= 1e-10
return R(0)
end
return R(Inf)
end
function prox!(x, y, f::IndGraphFat, c, d, gamma=1)
# y .= f.F \ (f.A * c + f.AA * d)
mul!(f.tmp, f.A, c)
mul!(y, f.AA, d)
y .+= f.tmp
ldiv!(f.F, y)
# f.A' * (d - y) + c # note: for complex the complex conjugate is used
copyto!(f.tmp, d)
f.tmp .-= y
mul!(x, adjoint(f.A), f.tmp)
x .+= c
return real(eltype(c))(0)
end
function prox_naive(f::IndGraphFat, c, d, gamma)
y = f.F \ (f.A * c + f.AA * d)
return c + f.A' * (d - y), y, real(eltype(c))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1277 | # This implements the variant when A is dense and m > n
using LinearAlgebra
struct IndGraphSkinny{T} <: IndGraph
m::Int
n::Int
A::Matrix{T}
AA::Matrix{T}
F::Cholesky{T, Matrix{T}} #LL factorization
tmp::Vector{T}
end
function IndGraphSkinny(A::Matrix{T}) where T
m, n = size(A)
AA = A' * A
F = cholesky(AA + I)
#normrows = vec(sqrt.(sum(abs2.(A), 2)))
#The tmp vector assumes that difference between m and n is not drastic.
# If someone will decide to solve it using m >> n, then tmp vector might be
# considered to have only n position required to prox esitmation and indicator
# calculation might be converted to less efficient.
tmp = Vector{T}(undef, m)
IndGraphSkinny(m, n, A, AA, F, tmp)
end
function (f::IndGraphSkinny)(x, y)
R = real(eltype(x))
# the tolerance in the following line should be customizable
mul!(f.tmp, f.A, x)
f.tmp .-= y
if norm(f.tmp, Inf) <= 1e-10
return R(0)
end
return R(Inf)
end
function prox!(x, y, f::IndGraphSkinny, c, d, gamma=1)
# x[:] = f.F \ (c + f.A' * d)
mul!(x, adjoint(f.A), d)
x .+= c
ldiv!(f.F, x)
mul!(y, f.A, x)
return real(eltype(c))(0)
end
function prox_naive(f::IndGraphSkinny, c, d, gamma)
x = f.F \ (c + f.A' * d)
return x, f.A * x, real(eltype(c))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1555 | using LinearAlgebra
using SparseArrays
using SuiteSparse
struct IndGraphSparse{T, Ti} <: IndGraph
m::Int
n::Int
A::SparseMatrixCSC{T, Ti}
F::SuiteSparse.CHOLMOD.Factor{T} #LDL factorization
tmp::Vector{T}
tmpx::SubArray{T, 1, Vector{T}, Tuple{UnitRange{Int}}, true}
res::Vector{T}
end
function IndGraphSparse(A::SparseMatrixCSC{T,Ti}) where {T, Ti}
m, n = size(A)
K = [SparseMatrixCSC{T}(I, n, n) A'; A -SparseMatrixCSC{T}(I, m, m)]
F = ldlt(K)
tmp = Array{T, 1}(undef, m + n)
tmpx = view(tmp, 1:n)
tmpy = view(tmp, (n + 1):(n + m)) #second part is always zeros
fill!(tmpy, 0)
res = Array{T,1}(undef, m + n)
return IndGraphSparse(m, n, A, F, tmp, tmpx, res)
end
function (f::IndGraphSparse)(x, y)
R = real(eltype(x))
# the tolerance in the following line should be customizable
tmpy = view(f.res, 1:f.m) # the res is rewritten in prox!
mul!(tmpy, f.A, x)
tmpy .-= y
if norm(tmpy, Inf) <= 1e-12
return R(0)
end
return R(Inf)
end
function prox!(x, y, f::IndGraphSparse, c, d, gamma=1)
#instead of res = [c + f.A' * d; zeros(f.m)]
mul!(f.tmpx, adjoint(f.A), d)
f.tmpx .+= c
# A_ldiv_B!(f.res, f.F, f.tmp) #is not working
f.res .= f.F \ f.tmp
# f.res .= f.F \ f.tmp #note here f.tmp which is m+n array
copyto!(x, 1, f.res, 1, f.n)
copyto!(y, 1, f.res, f.n + 1, f.m)
return real(eltype(c))(0)
end
function prox_naive(f::IndGraphSparse, c, d, gamma)
tmp = f.A'*d
tmp .+= c
res = [tmp; zeros(f.m)]
xy = f.F \ res
return xy[1:f.n], xy[f.n + 1:f.n + f.m], real(eltype(c))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1202 | # indicator of a halfspace
export IndHalfspace
"""
IndHalfspace(a, b)
For an array `a` and a scalar `b`, return the indicator of half-space
```math
S = \\{x : \\langle a,x \\rangle \\leq b \\}.
```
"""
struct IndHalfspace{R, T}
a::T
b::R
norm_a::R
function IndHalfspace{R, T}(a::T, b::R) where {R, T}
norm_a = norm(a)
if isapprox(norm_a, 0) && b < 0
error("function is improper")
end
new(a, b, norm_a)
end
end
IndHalfspace(a::T, b::R) where {R, T} = IndHalfspace{R, T}(a, b)
is_convex(f::Type{<:IndHalfspace}) = true
is_set(f::Type{<:IndHalfspace}) = true
function (f::IndHalfspace)(x)
R = real(eltype(x))
if isapprox_le(dot(f.a, x), f.b, atol=eps(R), rtol=sqrt(eps(R)))
return R(0)
end
return R(Inf)
end
function prox!(y, f::IndHalfspace, x, gamma)
R = real(eltype(x))
s = dot(f.a, x)
if s > f.b
y .= x .- ((s - f.b)/f.norm_a^2) .* f.a
else
copyto!(y, x)
end
return R(0)
end
function prox_naive(f::IndHalfspace, x, gamma)
R = real(eltype(x))
s = dot(f.a, x) - f.b
if s <= 0
return x, 0.0
end
return x - (s/norm(f.a)^2)*f.a, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1658 | # indicator of a hyperslab
export IndHyperslab
"""
IndHyperslab(low, a, upp)
For an array `a` and scalars `low` and `upp`, return the so-called hyperslab
```math
S = \\{x : low \\leq \\langle a,x \\rangle \\leq upp \\}.
```
"""
struct IndHyperslab{R, T}
low::R
a::T
upp::R
norm_a::R
function IndHyperslab{R, T}(low::R, a::T, upp::R) where {R, T}
norm_a = norm(a)
if (norm_a == 0 && (upp < 0 || low > 0)) || upp < low
error("function is improper")
end
new(low, a, upp, norm_a)
end
end
IndHyperslab(low::R, a::T, upp::R) where {R, T} = IndHyperslab{R, T}(low, a, upp)
is_convex(f::Type{<:IndHyperslab}) = true
is_set(f::Type{<:IndHyperslab}) = true
function (f::IndHyperslab)(x)
R = real(eltype(x))
if iszero(f.norm_a)
return R(0)
end
s = dot(f.a, x)
tol = 100 * eps(R) * f.norm_a
if isapprox_le(f.low, s, atol=tol, rtol=tol) && isapprox_le(s, f.upp, atol=tol, rtol=tol)
return R(0)
end
return R(Inf)
end
function prox!(y, f::IndHyperslab, x, gamma)
s = dot(f.a, x)
if s < f.low && f.norm_a > 0
y .= x .- ((s - f.low)/f.norm_a^2) .* f.a
elseif s > f.upp && f.norm_a > 0
y .= x .- ((s - f.upp)/f.norm_a^2) .* f.a
else
copyto!(y, x)
end
return real(eltype(x))(0)
end
function prox_naive(f::IndHyperslab, x, gamma)
R = real(eltype(x))
s = dot(f.a, x)
if s < f.low && f.norm_a > 0
return x - ((s - f.low)/norm(f.a)^2) * f.a, R(0)
elseif s > f.upp && f.norm_a > 0
return x - ((s - f.upp)/norm(f.a)^2) * f.a, R(0)
else
return x, R(0)
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 822 | # indicator of nonnegative orthant
export IndNonnegative
"""
IndNonnegative()
Return the indicator of the nonnegative orthant
```math
C = \\{ x : x \\geq 0 \\}.
```
"""
struct IndNonnegative end
is_separable(f::Type{<:IndNonnegative}) = true
is_convex(f::Type{<:IndNonnegative}) = true
is_cone(f::Type{<:IndNonnegative}) = true
function (::IndNonnegative)(x)
R = eltype(x)
for k in eachindex(x)
if x[k] < 0
return R(Inf)
end
end
return R(0)
end
function prox!(y, ::IndNonnegative, x, gamma)
R = eltype(x)
for k in eachindex(x)
if x[k] < 0
y[k] = R(0)
else
y[k] = x[k]
end
end
return R(0)
end
function prox_naive(::IndNonnegative, x, gamma)
R = eltype(x)
y = max.(R(0), x)
return y, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 822 | # indicator of nonpositive orthant
export IndNonpositive
"""
IndNonpositive()
Return the indicator of the nonpositive orthant
```math
C = \\{ x : x \\leq 0 \\}.
```
"""
struct IndNonpositive end
is_separable(f::Type{<:IndNonpositive}) = true
is_convex(f::Type{<:IndNonpositive}) = true
is_cone(f::Type{<:IndNonpositive}) = true
function (::IndNonpositive)(x)
R = eltype(x)
for k in eachindex(x)
if x[k] > 0
return R(Inf)
end
end
return R(0)
end
function prox!(y, ::IndNonpositive, x, gamma)
R = eltype(x)
for k in eachindex(x)
if x[k] > 0
y[k] = R(0)
else
y[k] = x[k]
end
end
return R(0)
end
function prox_naive(::IndNonpositive, x, gamma)
R = eltype(x)
y = min.(R(0), x)
return y, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 5381 | # indicator of a PSD
export IndPSD
"""
IndPSD(;scaling=false)
Return the indicator of the positive semi-definite cone
```math
C = \\{ X : X \\succeq 0 \\}.
```
The argument to the function can be either a `Symmetric`, `Hermitian`, or
`AbstractMatrix` object, or an object of type `AbstractVector{Float64}` holding
a symmetric matrix in (lower triangular) packed storage.
If `scaling = true` then the vectors `y` and `x` in
`prox!(y::AbstractVector{Float64}, f::IndPSD, x::AbstractVector{Float64}, args...)`
have the off-diagonal elements multiplied with `√2` to preserve inner products,
see Vandenberghe 2010: http://www.seas.ucla.edu/~vandenbe/publications/coneprog.pdf .
I.e. when when `scaling=true`, let `X,Y` be matrices and
`x = (X_{1,1}, √2⋅X_{2,1}, ... ,√2⋅X_{n,1}, X_{2,2}, √2⋅X_{3,2}, ..., X_{n,n})`,
`y = (Y_{1,1}, √2⋅Y_{2,1}, ... ,√2⋅Y_{n,1}, Y_{2,2}, √2⋅Y_{3,2}, ..., Y_{n,n})`
then `prox!(Y, f, X)` is equivalent to `prox!(y, f, x)`.
"""
struct IndPSD
scaling::Bool
end
IndPSD(; scaling=false) = IndPSD(scaling)
function (::IndPSD)(X::Union{Symmetric, Hermitian})
R = real(eltype(X))
F = eigen(X)
for i in eachindex(F.values)
# Do we allow for some tolerance here?
if F.values[i] <= -100 * eps(R)
return R(Inf)
end
end
return R(0)
end
is_convex(f::Type{<:IndPSD}) = true
is_cone(f::Type{<:IndPSD}) = true
function prox!(Y::Union{Symmetric, Hermitian}, ::IndPSD, X::Union{Symmetric, Hermitian}, gamma)
R = real(eltype(X))
n = size(X, 1)
F = eigen(X)
for i in eachindex(F.values)
F.values[i] = max.(R(0), F.values[i])
end
for i = 1:n, j = i:n
Y.data[i, j] = R(0)
for k = 1:n
Y.data[i, j] += F.vectors[i, k] * F.values[k] * conj(F.vectors[j, k])
end
Y.data[j, i] = conj(Y.data[i, j])
end
return R(0)
end
function prox_naive(::IndPSD, X::Union{Symmetric, Hermitian}, gamma)
R = real(eltype(X))
F = eigen(X)
return F.vectors * Diagonal(max.(R(0), F.values)) * F.vectors', R(0)
end
"""
Scales the diagonal of `x` with `val`, where `x` is the lower triangualar part
of a matrix, stored column by column.
"""
function scale_diagonal!(x, val)
n = Int(sqrt(1/4+2*length(x))-1/2)
k = -n
for i = 1:n
# Calculate indices of diagonal elements recursively (parallel faster?)
k += n - i + 2
# Scale diagonal
x[k] *= val
end
end
## Below: with AbstractVector argument
function (f::IndPSD)(x::AbstractVector{Float64})
y = copy(x)
# If scaling, scale diagonal (eigenvalues scaled by sqrt(2))
f.scaling && scale_diagonal!(y, sqrt(2))
Z = dspev!(:N, :L, y)
for i in eachindex(Z)
# Do we allow for some tolerance here?
if Z[i] <= -1e-14
return +Inf
end
end
return 0.0
end
function prox!(y::AbstractVector{Float64}, f::IndPSD, x::AbstractVector{Float64}, gamma)
# Copy x since dspev! corrupts input
y .= x
# If scaling, scale diagonal
f.scaling && scale_diagonal!(y, sqrt(2))
(W, Z) = dspevV!(:L, y)
# NonNeg eigenvalues
W = max.(W, 0.0)
# Equivalent to Z*diagm(W) without constructing W matrix
M = Z.*W'
# Now let M = Z*diagm(W)*Z'
M = M*Z'
n = length(W)
k = firstindex(y)
# Store lower diagonal of M in y
for j in 1:n, i in j:n
y[k] = M[i,j]
k = k+1
end
# If scaling, un-scale diagonal
f.scaling && scale_diagonal!(y, 1/sqrt(2))
return 0.0
end
function prox_naive(f::IndPSD, x::AbstractVector{Float64}, gamma)
# Formula for size of matrix
n = Int(sqrt(1/4+2*length(x))-1/2)
X = Matrix{Float64}(undef, n, n)
k = firstindex(x)
# Store x in X
for j = 1:n, i = j:n
# Lower half
X[i,j] = x[k]
if i != j
# Strictly upper half
X[j,i] = x[k]
end
k = k+1
end
# Scale diagonal elements by sqrt(2)
# See Vandenberghe 2010 http://www.seas.ucla.edu/~vandenbe/publications/coneprog.pdf
# It's equivalent to scaling off-diagonal by 1/sqrt(2) and working with sqrt(2)*X
if f.scaling
for i = 1:n
X[i,i] *= sqrt(2)
end
end
X, v = prox_naive(f, Symmetric(X), gamma)
# Scale diagonal elements back
if f.scaling
for i = 1:n
X[i,i] /= sqrt(2)
end
end
y = similar(x)
k = firstindex(y)
# Store Lower half of X in y
for j = 1:n, i = j:n
y[k] = X[i,j]
k = k+1
end
return y, 0.0
end
## Below: with AbstractMatrix argument (wrap in Symmetric or Hermitian)
function (f::IndPSD)(X::AbstractMatrix{R}) where R <: Real
f(Symmetric(X))
end
function prox!(y::AbstractMatrix{R}, f::IndPSD, x::AbstractMatrix{R}, gamma) where R <: Real
prox!(Symmetric(y), f, Symmetric(x), gamma)
end
function prox_naive(f::IndPSD, X::AbstractMatrix{R}, gamma) where R <: Real
prox_naive(f, Symmetric(X), gamma)
end
function (f::IndPSD)(X::AbstractMatrix{C}) where C <: Complex
f(Hermitian(X))
end
function prox!(y::AbstractMatrix{C}, f::IndPSD, x::AbstractMatrix{C}, gamma) where C <: Complex
prox!(Hermitian(y), f, Hermitian(x), gamma)
end
function prox_naive(f::IndPSD, X::AbstractMatrix{C}, gamma) where C <: Complex
prox_naive(f, Hermitian(X), gamma)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 884 | # indicator of a point
export IndPoint
"""
IndPoint(p=0)
Return the indicator of the singleton set
```math
C = \\{p \\}.
```
Parameter `p` can be a scalar, in which case the unique element of `S` has uniform coefficients.
"""
struct IndPoint{T}
p::T
function IndPoint{T}(p::T) where {T}
new(p)
end
end
is_separable(f::Type{<:IndPoint}) = true
is_convex(f::Type{<:IndPoint}) = true
is_singleton(f::Type{<:IndPoint}) = true
is_affine(f::Type{<:IndPoint}) = true
IndPoint(p::T=0) where T = IndPoint{T}(p)
function (f::IndPoint)(x)
R = real(eltype(x))
if all(isapprox.(x, f.p))
return R(0)
end
return R(Inf)
end
function prox!(y, f::IndPoint, x, gamma)
R = real(eltype(x))
y .= f.p
return R(0)
end
function prox_naive(f::IndPoint, x, gamma)
R = real(eltype(x))
y = similar(x)
y .= f.p
return y, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 661 | export IndPolyhedral
abstract type IndPolyhedral end
is_convex(::Type{<:IndPolyhedral}) = true
is_set(::Type{<:IndPolyhedral}) = true
"""
IndPolyhedral([l,] A, [u, xmin, xmax])
Return the indicator function of the polyhedral set:
```math
S = \\{ x : x_\\min \\leq x \\leq x_\\max, l \\leq Ax \\leq u \\}.
```
Matrix `A` is a mandatory argument; when any of the bounds is not provided,
it is assumed to be (plus or minus) infinity.
"""
function IndPolyhedral(args...; solver=:osqp)
if solver == :osqp
IndPolyhedralOSQP(args...)
else
error("unknown solver")
end
end
# including concrete types
include("indPolyhedralOSQP.jl")
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2611 | # IndPolyhedral: OSQP implementation
using OSQP
struct IndPolyhedralOSQP{R} <: IndPolyhedral
l::AbstractVector{R}
A::AbstractMatrix{R}
u::AbstractVector{R}
mod::OSQP.Model
function IndPolyhedralOSQP{R}(
l::AbstractVector{R}, A::AbstractMatrix{R}, u::AbstractVector{R}
) where R
m, n = size(A)
mod = OSQP.Model()
if !all(l .<= u)
error("function is improper (are some bounds inverted?)")
end
OSQP.setup!(mod; P=SparseMatrixCSC{R}(I, n, n), l=l, A=sparse(A), u=u, verbose=false,
eps_abs=eps(R), eps_rel=eps(R),
eps_prim_inf=eps(R), eps_dual_inf=eps(R))
new(l, A, u, mod)
end
end
# properties
is_prox_accurate(::Type{<:IndPolyhedralOSQP}) = false
# constructors
IndPolyhedralOSQP(
l::AbstractVector{R}, A::AbstractMatrix{R}, u::AbstractVector{R}
) where R =
IndPolyhedralOSQP{R}(l, A, u)
IndPolyhedralOSQP(
l::AbstractVector{R}, A::AbstractMatrix{R}, u::AbstractVector{R},
xmin::AbstractVector{R}, xmax::AbstractVector{R}
) where R =
IndPolyhedralOSQP([l; xmin], [A; I], [u; xmax])
IndPolyhedralOSQP(
l::AbstractVector{R}, A::AbstractMatrix{R}, args...
) where R =
IndPolyhedralOSQP(
l, SparseMatrixCSC(A), R(Inf).*ones(R, size(A, 1)), args...
)
IndPolyhedralOSQP(
A::AbstractMatrix{R}, u::AbstractVector{R}, args...
) where R =
IndPolyhedralOSQP(
R(-Inf).*ones(R, size(A, 1)), SparseMatrixCSC(A), u, args...
)
# function evaluation
function (f::IndPolyhedralOSQP)(x)
R = eltype(x)
Ax = f.A * x
return all(f.l .<= Ax .<= f.u) ? R(0) : Inf
end
# prox
function prox!(y, f::IndPolyhedralOSQP, x, gamma)
R = eltype(x)
OSQP.update!(f.mod; q=-x)
results = OSQP.solve!(f.mod)
y .= results.x
return R(0)
end
# naive prox
# we want to compute the projection p of a point x
#
# primal problem is: minimize_p (1/2)||p-x||^2 + g(Ap)
# where g is the indicator of the box [l, u]
#
# dual problem is: minimize_y (1/2)||-A'y||^2 - x'A'y + g*(y)
# can solve with (fast) dual proximal gradient method
function prox_naive(f::IndPolyhedralOSQP, x, gamma)
R = eltype(x)
y = zeros(R, size(f.A, 1)) # dual vector
y1 = y
g = IndBox(f.l, f.u)
gstar = Conjugate(g)
gstar_y = R(0)
stepsize = R(1)/opnorm(Matrix(f.A*f.A'))
for it = 1:1e6
w = y + (it-1)/(it+2)*(y - y1)
y1 = y
z = w - stepsize * (f.A * (f.A'*w - x))
y, = prox(gstar, z, stepsize)
if norm(y-w)/(1+norm(w)) <= 1e-12 break end
end
p = -f.A'*y + x
return p, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 3178 | # indicator of second-order cones
export IndSOC, IndRotatedSOC
"""
IndSOC()
Return the indicator of the second-order cone (also known as ice-cream cone or Lorentz cone), that is
```math
C = \\left\\{ (t, x) : \\|x\\| \\leq t \\right\\}.
```
"""
struct IndSOC end
function (::IndSOC)(x)
T = eltype(x)
# the tolerance in the following line should be customizable
if isapprox_le(norm(x[2:end]), x[1], atol=eps(T), rtol=sqrt(eps(T)))
return T(0)
end
return T(Inf)
end
is_convex(f::Type{<:IndSOC}) = true
is_cone(f::Type{<:IndSOC}) = true
function prox!(y, ::IndSOC, x, gamma)
T = eltype(x)
@views nx = norm(x[2:end])
t = x[1]
if t <= -nx
y .= T(0)
elseif t >= nx
y .= x
else
r = T(0.5) * (T(1) + t / nx)
y[1] = r * nx
@views y[2:end] .= r .* x[2:end]
end
return T(0)
end
function prox_naive(::IndSOC, x, gamma)
T = eltype(x)
nx = norm(x[2:end])
t = x[1]
if t <= -nx
y = zero(x)
elseif t >= nx
y = x
else
y = zero(x)
r = T(0.5) * (T(1) + t / nx)
y[1] = r * nx
y[2:end] .= r .* x[2:end]
end
return y, T(0)
end
# ########################
# ROTATED SOC
# ########################
"""
**Indicator of the rotated second-order cone**
IndRotatedSOC()
Return the indicator of the *rotated* second-order cone (also known as ice-cream cone or Lorentz cone), that is
```math
C = \\left\\{ (p, q, x) : \\|x\\|^2 \\leq 2\\cdot pq, p \\geq 0, q \\geq 0 \\right\\}.
```
"""
struct IndRotatedSOC end
function (::IndRotatedSOC)(x)
T = eltype(x)
if isapprox_le(0, x[1], atol=eps(T), rtol=sqrt(eps(T))) &&
isapprox_le(0, x[2], atol=eps(T), rtol=sqrt(eps(T))) &&
isapprox_le(norm(x[3:end])^2, 2*x[1]*x[2], atol=eps(T), rtol=sqrt(eps(T)))
return T(0)
end
return T(Inf)
end
is_convex(f::IndRotatedSOC) = true
is_set(f::IndRotatedSOC) = true
function prox!(y, ::IndRotatedSOC, x, gamma)
T = eltype(x)
# sin(pi/4) = cos(pi/4) = 0.7071067811865475
# rotate x ccw by pi/4
x1 = 0.7071067811865475*x[1] + 0.7071067811865475*x[2]
x2 = 0.7071067811865475*x[1] - 0.7071067811865475*x[2]
# project rotated x onto SOC
@views nx = sqrt(x2^2+norm(x[3:end])^2)
t = x1
if t <= -nx
y .= T(0)
elseif t >= nx
y[1] = x1
y[2] = x2
@views y[3:end] .= x[3:end]
else
r = T(0.5) * (T(1) + t / nx)
y[1] = r * nx
y[2] = r * x2
@views y[3:end] = r .* x[3:end]
end
# rotate back y cw by pi/4
y1 = 0.7071067811865475*y[1] + 0.7071067811865475*y[2]
y2 = 0.7071067811865475*y[1] - 0.7071067811865475*y[2]
y[1] = y1
y[2] = y2
return T(0)
end
function prox_naive(::IndRotatedSOC, x, gamma)
g = IndSOC()
z = copy(x)
z[1] = 0.7071067811865475*x[1] + 0.7071067811865475*x[2]
z[2] = 0.7071067811865475*x[1] - 0.7071067811865475*x[2]
y, = prox_naive(g, z, gamma)
y1 = 0.7071067811865475*y[1] + 0.7071067811865475*y[2]
y2 = 0.7071067811865475*y[1] - 0.7071067811865475*y[2]
y[1] = y1
y[2] = y2
return y, eltype(x)(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2313 | # indicator of a simplex
export IndSimplex
"""
IndSimplex(a=1.0)
Return the indicator of the simplex
```math
S = \\left\\{ x : x \\geq 0, \\sum_i x_i = a \\right\\}.
```
By default `a=1`, therefore ``S`` is the probability simplex.
"""
struct IndSimplex{R}
a::R
function IndSimplex{R}(a::R) where R
if a <= 0
error("parameter a must be positive")
else
new(a)
end
end
end
is_convex(f::Type{<:IndSimplex}) = true
is_set(f::Type{<:IndSimplex}) = true
IndSimplex(a::R=1) where R = IndSimplex{R}(a)
function (f::IndSimplex)(x)
R = eltype(x)
if all(x .>= 0) && sum(x) ≈ f.a
return R(0)
end
return R(Inf)
end
function simplex_proj_condat!(y, a, x)
# Implements algorithm proposed in:
# Condat, L. "Fast projection onto the simplex and the l1 ball",
# Mathematical Programming, 158:575–585, 2016.
R = eltype(x)
v = [x[1]]
v_tilde = R[]
rho = x[1] - a
N = length(x)
for k in 2:N
if x[k] > rho
rho += (x[k] - rho) / (length(v) + 1)
if rho > x[k] - a
push!(v, x[k])
else
append!(v_tilde, v)
v = [x[k]]
rho = x[k] - a
end
end
end
for z in v_tilde
if z > rho
push!(v, z)
rho += (z - rho) / length(v)
end
end
v_changed = true
while v_changed == true
v_changed = false
k = 1
while k <= length(v)
z = v[k]
if z <= rho
deleteat!(v, k)
v_changed = true
rho += (rho - z) / length(v)
else
k = k + 1
end
end
end
y .= max.(x .- rho, R(0))
end
function prox!(y, f::IndSimplex, x, gamma)
simplex_proj_condat!(y, f.a, x)
return eltype(x)(0)
end
function prox_naive(f::IndSimplex, x, gamma)
R = eltype(x)
low = minimum(x)
upp = maximum(x)
v = x
s = Inf
for i = 1:100
if abs(s)/f.a ≈ 0
break
end
alpha = (low+upp)/2
v = max.(x .- alpha, R(0))
s = sum(v) - f.a
if s <= 0
upp = alpha
else
low = alpha
end
end
return v, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1376 | # indicator of the L2 norm sphere with given radius
export IndSphereL2
"""
IndSphereL2(r=1)
Return the indicator function of the Euclidean sphere
```math
S = \\{ x : \\|x\\| = r \\},
```
where ``\\|\\cdot\\|`` is the ``L_2`` (Euclidean) norm. Parameter `r` must be positive.
"""
struct IndSphereL2{R}
r::R
function IndSphereL2{R}(r::R) where R
if r <= 0
error("parameter r must be positive")
else
new(r)
end
end
end
is_set(f::Type{<:IndSphereL2}) = true
IndSphereL2(r::R=1) where R = IndSphereL2{R}(r)
function (f::IndSphereL2)(x)
R = real(eltype(x))
if isapprox(norm(x), f.r, atol=eps(R), rtol=sqrt(eps(R)))
return R(0)
end
return R(Inf)
end
function prox!(y, f::IndSphereL2, x, gamma)
R = real(eltype(x))
normx = norm(x)
if normx > 0 # zero-zero?
scal = f.r/normx
for k in eachindex(x)
y[k] = scal*x[k]
end
else
normy = R(0)
for k in eachindex(x)
y[k] = randn()
normy += y[k]*y[k]
end
normy = sqrt(normy)
y .*= f.r/normy
end
return R(0)
end
function prox_naive(f::IndSphereL2, x, gamma)
normx = norm(x)
if normx > 0
y = x*f.r/normx
else
y = randn(size(x))
y *= f.r/norm(y)
end
return y, real(eltype(x))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 882 | # indicator of the Stiefel manifold
export IndStiefel
@doc raw"""
IndStiefel()
Return the indicator of the Stiefel manifold
```math
S_{n,p} = \left\{ X \in \mathbb{F}^{n \times p} : X^*X = I \right\}.
```
where ``\mathbb{F}`` is the real or complex field, and parameters ``n`` and ``p``
are inferred from the matrix provided as input.
"""
struct IndStiefel end
is_set(f::Type{<:IndStiefel}) = true
function (::IndStiefel)(X)
R = real(eltype(X))
F = svd(X)
if all(F.S .≈ R(1))
return R(0)
end
return R(Inf)
end
function prox!(Y, ::IndStiefel, X, gamma)
R = real(eltype(X))
n, p = size(X)
F = svd(X)
U_sliced = view(F.U, :, 1:p)
mul!(Y, U_sliced, F.Vt)
return R(0)
end
function prox_naive(::IndStiefel, X, gamma)
R = real(eltype(X))
n, p = size(X)
F = svd(X)
Y = F.U[:, 1:p] * F.Vt
return Y, R(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 751 | # indicator of the zero cone
export IndZero
"""
IndZero()
Return the indicator function of the set containing the origin, the "zero cone".
"""
struct IndZero end
is_separable(f::Type{<:IndZero}) = true
is_convex(f::Type{<:IndZero}) = true
is_singleton(f::Type{<:IndZero}) = true
is_cone(f::Type{<:IndZero}) = true
is_affine(f::Type{<:IndZero}) = true
function (::IndZero)(x)
C = eltype(x)
for k in eachindex(x)
if x[k] != C(0)
return real(C)(Inf)
end
end
return real(C)(0)
end
function prox!(y, ::IndZero, x, gamma)
for k in eachindex(y)
y[k] = eltype(y)(0)
end
return real(eltype(x))(0)
end
function prox_naive(::IndZero, x, gamma)
return zero(x), real(eltype(x))(0)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 978 | # least squares penalty
export LeastSquares
### ABSTRACT TYPE
abstract type LeastSquares end
is_smooth(::Type{<:LeastSquares}) = true
is_generalized_quadratic(::Type{<:LeastSquares}) = true
### CONSTRUCTORS
"""
LeastSquares(A, b, λ=1.0; iterative=false)
For a matrix `A`, a vector `b` and a scalar `λ`, return the function
```math
f(x) = \\tfrac{\\lambda}{2}\\|Ax - b\\|^2.
```
By default, a direct method (based on Cholesky factorization) is used to evaluate `prox!`.
If `iterative=true`, then `prox!` is evaluated approximately using an iterative method instead.
"""
function LeastSquares(A, b, lam=1; iterative=false)
if iterative == false
LeastSquaresDirect(A, b, lam)
else
LeastSquaresIterative(A, b, lam)
end
end
infer_shape_of_x(A, ::AbstractVector) = (size(A, 2), )
infer_shape_of_x(A, b::AbstractMatrix) = (size(A, 2), size(b, 2))
### INCLUDE CONCRETE TYPES
include("leastSquaresDirect.jl")
include("leastSquaresIterative.jl")
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 4324 | ### CONCRETE TYPE: DIRECT PROX EVALUATION
# prox! is computed using a Cholesky factorization of A'A + I/(lambda*gamma)
# or AA' + I/(lambda*gamma), according to which matrix is smaller.
# The factorization is cached and recomputed whenever gamma changes
using LinearAlgebra
using SparseArrays
using SuiteSparse
mutable struct LeastSquaresDirect{N, R, C, M, V, F, IsConvex} <: LeastSquares
A::M # m-by-n
b::V # m (by-p)
lambda::R
lambdaAtb::V
gamma::R
shape::Symbol
S::M
res::Array{C, N} # m (by-p)
q::Array{C, N} # n (by-p)
fact::F
function LeastSquaresDirect{N, R, C, M, V, F, IsConvex}(A::M, b::V, lambda::R) where {N, R, C, M, V, F, IsConvex}
if size(A, 1) != size(b, 1)
error("A and b have incompatible dimensions")
end
m, n = size(A)
if m >= n
S = lambda * (A' * A)
shape = :Tall
else
S = lambda * (A * A')
shape = :Fat
end
x_shape = infer_shape_of_x(A, b)
new(A, b, lambda, lambda*(A'*b), R(-1), shape, S, zero(b), zeros(C, x_shape))
end
end
is_convex(::Type{LeastSquaresDirect{N, R, C, M, V, F, IsConvex}}) where {N, R, C, M, V, F, IsConvex} = IsConvex
function LeastSquaresDirect(A::M, b, lambda) where M <: DenseMatrix
C = eltype(M)
R = real(C)
LeastSquaresDirect{ndims(b), R, C, M, typeof(b), Cholesky{C, M}, lambda >= 0}(A, b, R(lambda))
end
function LeastSquaresDirect(A::M, b, lambda) where M <: SparseMatrixCSC
C = eltype(M)
R = real(C)
LeastSquaresDirect{ndims(b), R, C, M, typeof(b), SuiteSparse.CHOLMOD.Factor{C}, lambda >= 0}(A, b, R(lambda))
end
function LeastSquaresDirect(A::Union{Transpose, Adjoint}, b, lambda)
LeastSquaresDirect(copy(A), b, lambda)
end
function LeastSquaresDirect(A, b, lambda)
@warn "Could not infer type of Factorization for $M in LeastSquaresDirect, this type will be type-unstable"
LeastSquaresDirect{N, R, C, M, V, Factorization, lambda >= 0}(A, b, lambda)
end
function (f::LeastSquaresDirect)(x)
mul!(f.res, f.A, x)
f.res .-= f.b
return (f.lambda / 2) * norm(f.res, 2)^2
end
function prox!(y, f::LeastSquaresDirect, x, gamma)
# if gamma different from f.gamma then call factor_step!
if gamma != f.gamma
factor_step!(f, gamma)
end
solve_step!(y, f, x, gamma)
mul!(f.res, f.A, y)
f.res .-= f.b
return (f.lambda/2)*norm(f.res, 2)^2
end
function factor_step!(f::LeastSquaresDirect{N, R, C, M, V, F}, gamma) where {N, R, C, M, V, F}
f.fact = cholesky(f.S + I/gamma)
f.gamma = gamma
end
function factor_step!(f::LeastSquaresDirect{N, R, C, <:SparseMatrixCSC, V, F}, gamma) where {N, R, C, V, F}
f.fact = ldlt(f.S; shift = R(1)/gamma)
f.gamma = gamma
end
function solve_step!(y, f::LeastSquaresDirect{N, R, C, M, V, <:Cholesky}, x, gamma) where {N, R, C, M, V}
f.q .= f.lambdaAtb .+ x./gamma
# two cases: (1) tall A, (2) fat A
if f.shape == :Tall
# y .= f.fact\f.q
y .= f.q
LAPACK.trtrs!('U', 'C', 'N', f.fact.factors, y)
LAPACK.trtrs!('U', 'N', 'N', f.fact.factors, y)
else # f.shape == :Fat
# y .= gamma*(f.q - lambda*(f.A'*(f.fact\(f.A*f.q))))
mul!(f.res, f.A, f.q)
LAPACK.trtrs!('U', 'C', 'N', f.fact.factors, f.res)
LAPACK.trtrs!('U', 'N', 'N', f.fact.factors, f.res)
mul!(y, adjoint(f.A), f.res)
y .*= -f.lambda
y .+= f.q
y .*= gamma
end
end
function solve_step!(y, f::LeastSquaresDirect, x, gamma)
f.q .= f.lambdaAtb .+ x./gamma
# two cases: (1) tall A, (2) fat A
if f.shape == :Tall
y .= f.fact\f.q
else # f.shape == :Fat
# y .= gamma*(f.q - lambda*(f.A'*(f.fact\(f.A*f.q))))
mul!(f.res, f.A, f.q)
f.res .= f.fact\f.res
mul!(y, adjoint(f.A), f.res)
y .*= -f.lambda
y .+= f.q
y .*= gamma
end
end
function gradient!(y, f::LeastSquaresDirect, x)
mul!(f.res, f.A, x)
f.res .-= f.b
mul!(y, adjoint(f.A), f.res)
y .*= f.lambda
return (f.lambda / 2) * real(dot(f.res, f.res))
end
function prox_naive(f::LeastSquaresDirect, x, gamma)
lamgam = f.lambda*gamma
y = (f.A'*f.A + I/lamgam)\(f.A' * f.b + x/lamgam)
fy = (f.lambda/2)*norm(f.A*y-f.b)^2
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2428 | ### CONCRETE TYPE: ITERATIVE PROX EVALUATION
# prox! is computed using CG on a system with matrix lambda*A'A + I/gamma
# or lambda*AA' + I/gamma, according to which matrix is smaller.
using LinearAlgebra
struct LeastSquaresIterative{N, R, RC, M, V, O, IsConvex} <: LeastSquares
A::M # m-by-n operator
b::V # m (by-p)
lambda::R
lambdaAtb::V
shape::Symbol
S::O
res::Array{RC, N} # m (by-p)
res2::Array{RC, N} # m (by-p)
q::Array{RC, N} # n (by-p)
end
is_prox_accurate(f::Type{<:LeastSquaresIterative}) = false
is_convex(::Type{LeastSquaresIterative{N, R, RC, M, V, O, IsConvex}}) where {N, R, RC, M, V, O, IsConvex} = IsConvex
function LeastSquaresIterative(A::M, b, lambda) where M
if size(A, 1) != size(b, 1)
error("A and b have incompatible dimensions")
end
m, n = size(A)
x_shape = infer_shape_of_x(A, b)
shape, S, res2 = if m >= n
:Tall, AcA(A, x_shape), []
else
:Fat, AAc(A, size(b)), zero(b)
end
RC = eltype(A)
R = real(RC)
LeastSquaresIterative{ndims(b), R, RC, M, typeof(b), typeof(S), lambda >= 0}(A, b, R(lambda), lambda*(A'*b), shape, S, zero(b), res2, zeros(RC, x_shape))
end
function (f::LeastSquaresIterative)(x)
mul!(f.res, f.A, x)
f.res .-= f.b
return (f.lambda/2)*norm(f.res, 2)^2
end
function prox!(y, f::LeastSquaresIterative, x, gamma)
f.q .= f.lambdaAtb .+ x./gamma
RC = eltype(f.S)
# two cases: (1) tall A, (2) fat A
if f.shape == :Tall
y .= x
op = ScaleShift(RC(f.lambda), f.S, RC(1)/gamma)
IterativeSolvers.cg!(y, op, f.q)
else # f.shape == :Fat
# y .= gamma*(f.q - lambda*(f.A'*(f.fact\(f.A*f.q))))
mul!(f.res, f.A, f.q)
op = ScaleShift(RC(f.lambda), f.S, RC(1)/gamma)
IterativeSolvers.cg!(f.res2, op, f.res)
mul!(y, adjoint(f.A), f.res2)
y .*= -f.lambda
y .+= f.q
y .*= gamma
end
mul!(f.res, f.A, y)
f.res .-= f.b
return (f.lambda/2)*norm(f.res, 2)^2
end
function gradient!(y, f::LeastSquaresIterative, x)
mul!(f.res, f.A, x)
f.res .-= f.b
mul!(y, adjoint(f.A), f.res)
y .*= f.lambda
return (f.lambda / 2) * real(dot(f.res, f.res))
end
function prox_naive(f::LeastSquaresIterative, x, gamma)
y = IterativeSolvers.cg(f.lambda*f.A'*f.A + I/gamma, f.lambda*f.A'*f.b + x/gamma)
fy = (f.lambda/2)*norm(f.A*y-f.b)^2
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 594 | export Linear
"""
Linear(c)
Return the linear function
```math
f(x) = \\langle c, x \\rangle.
```
"""
struct Linear{A}
c::A
end
is_separable(f::Type{<:Linear}) = true
is_convex(f::Type{<:Linear}) = true
is_smooth(f::Type{<:Linear}) = true
function (f::Linear)(x)
return dot(f.c, x)
end
function prox!(y, f::Linear, x, gamma)
y .= x .- gamma .* f.c
fy = dot(f.c, y)
return fy
end
function gradient!(y, f::Linear, x)
y .= f.c
return dot(f.c, x)
end
function prox_naive(f::Linear, x, gamma)
y = x - gamma .* f.c
fy = dot(f.c, y)
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2007 | # logarithmic barrier function
export LogBarrier
"""
LogBarrier(a=1, b=0, μ=1)
Return the function
```math
f(x) = -μ⋅∑_i\\log(a⋅x_i+b),
```
for a nonnegative parameter `μ`.
"""
struct LogBarrier{R, S, T}
a::R
b::S
mu::T
function LogBarrier{R, S, T}(a::R, b::S, mu::T) where {R, S, T}
if mu <= 0
error("parameter mu must be positive")
else
new(a, b, mu)
end
end
end
is_separable(f::Type{<:LogBarrier}) = true
is_convex(f::Type{<:LogBarrier}) = true
LogBarrier(a::R=1, b::S=0, mu::T=1) where {R, S, T} = LogBarrier{R, S, T}(a, b, mu)
function (f::LogBarrier)(x)
T = eltype(x)
sumf = T(0)
v = T(0)
for i in eachindex(x)
v = f.a * x[i] + f.b
if v <= T(0)
return +Inf
end
sumf += log(v)
end
return -f.mu * sumf
end
function prox!(y, f::LogBarrier, x, gamma)
T = eltype(x)
par = 4 * gamma * f.mu * f.a * f.a
sumf = T(0)
z = T(0)
v = T(0)
for i in eachindex(x)
z = f.a * x[i] + f.b
v = (z + sqrt(z * z + par)) / 2
y[i] = (v - f.b) / f.a
sumf += log(v)
end
return -f.mu * sumf
end
function prox!(y, f::LogBarrier, x, gamma::AbstractArray)
T = eltype(x)
par = 4 * f.mu * f.a * f.a
sumf = T(0)
z = T(0)
v = T(0)
for i in eachindex(x)
par_i = gamma[i] * par
z = f.a * x[i] + f.b
v = (z + sqrt(z * z + par_i)) / 2
y[i] = (v - f.b) / f.a
sumf += log(v)
end
return -f.mu * sumf
end
function gradient!(y, f::LogBarrier, x)
sumf = eltype(x)(0)
for i in eachindex(x)
logarg = f.a * x[i] + f.b
y[i] = -f.mu * f.a / logarg
sumf += log(logarg)
end
return -f.mu * sumf
end
function prox_naive(f::LogBarrier, x, gamma)
asqr = f.a * f.a
z = f.a * x .+ f.b
y = ((z .+ sqrt.(z .* z .+ 4 * gamma * f.mu * asqr)) / 2 .- f.b) / f.a
fy = -f.mu * sum(log.(f.a .* y .+ f.b))
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2370 | # Logistic loss function
export LogisticLoss
"""
LogisticLoss(y, μ=1)
Return the function
```math
f(x) = μ⋅∑_i log(1+exp(-y_i⋅x_i))
```
where `y` is an array and `μ` is a positive parameter.
"""
struct LogisticLoss{T, R}
y::T
mu::R
function LogisticLoss{T, R}(y::T, mu::R) where {T, R}
if mu <= R(0)
error("parameter mu must be positive")
end
new(y, mu)
end
end
LogisticLoss(y::T, mu::R=1) where {R, T} = LogisticLoss{T, R}(y, mu)
is_separable(f::Type{<:LogisticLoss}) = true
is_convex(f::Type{<:LogisticLoss}) = true
is_smooth(f::Type{<:LogisticLoss}) = true
is_prox_accurate(f::Type{<:LogisticLoss}) = false
# f(x) = mu log(1 + exp(-y x))
function (f::LogisticLoss)(x)
R = eltype(x)
val = R(0)
for k in eachindex(x)
expyx = exp(f.y[k] * x[k])
val += log(R(1) + R(1) / expyx)
end
return f.mu * val
end
# f'(x) = -mu y exp(-y x) / (1 + exp(-y x))
# = -mu y / (1 + exp(y x))
#
# Lipschitz constant of gradient: (mu y)
function gradient!(g, f::LogisticLoss, x)
R = eltype(x)
val = R(0)
for k in eachindex(x)
expyx = exp(f.y[k] * x[k])
g[k] = -f.mu * f.y[k] / (R(1) + expyx)
val += log(R(1) + R(1) / expyx)
end
return f.mu * val
end
# Computing proximal operator:
# z = prox(f, x, gamma)
# <==> f'(z) + (z - x)/gamma = 0
# <==> (z - x)/gamma - mu y / (1 + exp(y z)) = 0
# <==> z - x - mu gamma y / (1 + exp(y z)) = 0
#
# Indicating the above condition as F(z) = 0, then
# ==> F'(z) = 1 - (mu gamma y^2 exp(y z))/(1+exp(y z))^2
#
# Newton's method (no damping) to compute z reads:
# z_{k+1} = z_k - F(z_k)/F'(z_k)
#
# To ensure convergence of Newton's method a damping is required.
# The damping coefficient could be computed by backtracking.
#
# Alternatively we can use gradient methods with constant step size.
function prox!(z, f::LogisticLoss, x, gamma)
R = eltype(x)
c = R(1) / gamma # convexity modulus
L = maximum(abs, f.mu .* f.y) + c # Lipschitz constant
z .= x
expyz = similar(z)
Fz = similar(z)
for k = 1:20
expyz .= exp.(f.y .* z)
Fz .= z .- x .- f.mu * gamma * (f.y ./ (1 .+ expyz))
z .-= Fz ./ L
end
expyz .= exp.(f.y .* z)
val = R(0)
for k in eachindex(expyz)
val += log(R(1) + R(1)/expyz[k])
end
return f.mu * val
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 238 | # Max function
export Maximum
"""
Maximum(λ=1)
For a nonnegative parameter `λ ⩾ 0`, return the function
```math
f(x) = \\lambda \\cdot \\max \\{x_i : i = 1,\\ldots, n \\}.
```
"""
Maximum(lambda=1) = SumLargest(one(Int32), lambda)
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 947 | # L0 pseudo-norm (times a constant)
export NormL0
"""
NormL0(λ=1)
Return the ``L_0`` pseudo-norm function
```math
f(x) = λ\\cdot\\mathrm{nnz}(x)
```
for a nonnegative parameter `λ`.
"""
struct NormL0{R}
lambda::R
function NormL0{R}(lambda::R) where R
if lambda < 0
error("parameter λ must be nonnegative")
else
new(lambda)
end
end
end
NormL0(lambda::R=1) where R = NormL0{R}(lambda)
(f::NormL0)(x) = f.lambda * real(eltype(x))(count(!iszero, x))
function prox!(y, f::NormL0, x, gamma)
countnzy = real(eltype(x))(0)
gl = gamma * f.lambda
for i in eachindex(x)
over = abs(x[i]) > sqrt(2 * gl)
y[i] = over * x[i]
countnzy += over
end
return f.lambda * countnzy
end
function prox_naive(f::NormL0, x, gamma)
over = abs.(x) .> sqrt(2 * gamma * f.lambda)
y = x.*over
return y, f.lambda * real(eltype(x))(count(!iszero, y))
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 3864 | # L1 norm (times a constant, or weighted)
export NormL1
"""
NormL1(λ=1)
With a nonnegative scalar parameter λ, return the ``L_1`` norm
```math
f(x) = λ\\cdot∑_i|x_i|.
```
With a nonnegative array parameter λ, return the weighted ``L_1`` norm
```math
f(x) = ∑_i λ_i|x_i|.
```
"""
struct NormL1{T}
lambda::T
function NormL1{T}(lambda::T) where T
if !(eltype(lambda) <: Real)
error("λ must be real")
end
if any(lambda .< 0)
error("λ must be nonnegative")
else
new(lambda)
end
end
end
is_separable(f::Type{<:NormL1}) = true
is_convex(f::Type{<:NormL1}) = true
is_positively_homogeneous(f::Type{<:NormL1}) = true
NormL1(lambda::R=1) where R = NormL1{R}(lambda)
(f::NormL1)(x) = f.lambda * norm(x, 1)
(f::NormL1{<:AbstractArray})(x) = norm(f.lambda .* x, 1)
function prox!(y, f::NormL1{<:AbstractArray}, x::AbstractArray{<:Real}, gamma)
@assert length(y) == length(x) == length(f.lambda)
@inbounds @simd for i in eachindex(x)
gl = gamma * f.lambda[i]
y[i] = x[i] + (x[i] <= -gl ? gl : (x[i] >= gl ? -gl : -x[i]))
end
return sum(f.lambda .* abs.(y))
end
function prox!(y, f::NormL1{<:AbstractArray}, x::AbstractArray{<:Complex}, gamma)
@assert length(y) == length(x) == length(f.lambda)
@inbounds @simd for i in eachindex(x)
gl = gamma * f.lambda[i]
y[i] = sign(x[i]) * (abs(x[i]) <= gl ? 0 : abs(x[i]) - gl)
end
return sum(f.lambda .* abs.(y))
end
function prox!(y, f::NormL1, x::AbstractArray{<:Real}, gamma)
@assert length(y) == length(x)
n1y = eltype(x)(0)
gl = gamma * f.lambda
@inbounds @simd for i in eachindex(x)
y[i] = x[i] + (x[i] <= -gl ? gl : (x[i] >= gl ? -gl : -x[i]))
n1y += y[i] > 0 ? y[i] : -y[i]
end
return f.lambda * n1y
end
function prox!(y, f::NormL1, x::AbstractArray{<:Complex}, gamma)
@assert length(y) == length(x)
gl = gamma * f.lambda
n1y = real(eltype(x))(0)
@inbounds @simd for i in eachindex(x)
y[i] = sign(x[i]) * (abs(x[i]) <= gl ? 0 : abs(x[i]) - gl)
n1y += abs(y[i])
end
return f.lambda * n1y
end
function prox!(y, f::NormL1{<:AbstractArray}, x::AbstractArray{<:Real}, gamma::AbstractArray)
@assert length(y) == length(x) == length(f.lambda) == length(gamma)
@inbounds @simd for i in eachindex(x)
gl = gamma[i] * f.lambda[i]
y[i] = x[i] + (x[i] <= -gl ? gl : (x[i] >= gl ? -gl : -x[i]))
end
return sum(f.lambda .* abs.(y))
end
function prox!(y, f::NormL1{<:AbstractArray}, x::AbstractArray{<:Complex}, gamma::AbstractArray)
@assert length(y) == length(x) == length(f.lambda) == length(gamma)
@inbounds @simd for i in eachindex(x)
gl = gamma[i] * f.lambda[i]
y[i] = sign(x[i]) * (abs(x[i]) <= gl ? 0 : abs(x[i]) - gl)
end
return sum(f.lambda .* abs.(y))
end
function prox!(y, f::NormL1, x::AbstractArray{<:Real}, gamma::AbstractArray)
@assert length(y) == length(x) == length(gamma)
n1y = eltype(x)(0)
@inbounds @simd for i in eachindex(x)
gl = gamma[i] * f.lambda
y[i] = x[i] + (x[i] <= -gl ? gl : (x[i] >= gl ? -gl : -x[i]))
n1y += y[i] > 0 ? y[i] : -y[i]
end
return f.lambda * n1y
end
function prox!(y, f::NormL1, x::AbstractArray{<:Complex}, gamma::AbstractArray)
@assert length(y) == length(x) == length(gamma)
n1y = real(eltype(x))(0)
@inbounds @simd for i in eachindex(x)
gl = gamma[i] * f.lambda
y[i] = sign(x[i]) * (abs(x[i]) <= gl ? 0 : abs(x[i]) - gl)
n1y += abs(y[i])
end
return f.lambda * n1y
end
function gradient!(y, f::NormL1, x)
y .= f.lambda .* sign.(x)
return f(x)
end
function prox_naive(f::NormL1, x, gamma)
y = sign.(x).*max.(0, abs.(x) .- gamma .* f.lambda)
return y, norm(f.lambda .* y,1)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1124 | # (weighted) sum of L2 norm and L1 norm
# for Group Lasso, this can be used together with src/calculus/slicedSeparableSum
export NormL1plusL2
"""
NormL1plusL2(λ_1=1, λ_2=1)
With two nonegative scalars λ_1 and λ_2, return the function
```math
f(x) = λ_1 ∑_{i=1}^{n} |x_i| + λ_2 \\sqrt{x_1^2 + … + x_n^2}.
```
With nonnegative array λ_1 and nonnegative scalar λ_2, return the function
```math
f(x) = ∑_{i=1}^{n} {λ_1}_i |x_i| + λ_2 \\sqrt{x_1^2 + … + x_n^2}.
```
"""
struct NormL1plusL2{L1<:NormL1, L2 <: NormL2}
l1::L1
l2::L2
end
is_separable(f::Type{<:NormL1plusL2}) = false
is_convex(f::Type{<:NormL1plusL2}) = true
is_positively_homogeneous(f::Type{<:NormL1plusL2}) = true
NormL1plusL2(lambda1::L=1, lambda2::M=1) where {L, M} = NormL1plusL2(NormL1(lambda1), NormL2(lambda2))
(f::NormL1plusL2)(x) = f.l1(x) + f.l2(x)
function prox!(y, f::NormL1plusL2, x, gamma)
prox!(y, f.l1, x, gamma)
vl2 = prox!(y, f.l2, y, gamma)
return f.l1(y) + vl2
end
function prox_naive(f::NormL1plusL2, x, gamma)
y1, = prox_naive(f.l1, x, gamma)
y2, = prox_naive(f.l2, y1, gamma)
return y2, f(y2)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1143 | # L2 norm (times a constant)
export NormL2
"""
NormL2(λ=1)
With a nonnegative scalar parameter λ, return the ``L_2`` norm
```math
f(x) = λ\\cdot\\sqrt{x_1^2 + … + x_n^2}.
```
"""
struct NormL2{R}
lambda::R
function NormL2{R}(lambda::R) where R
if lambda < 0
error("parameter λ must be nonnegative")
else
new(lambda)
end
end
end
is_convex(f::Type{<:NormL2}) = true
is_positively_homogeneous(f::Type{<:NormL2}) = true
NormL2(lambda::R=1) where R = NormL2{R}(lambda)
(f::NormL2)(x) = f.lambda * norm(x)
function prox!(y, f::NormL2, x, gamma)
normx = norm(x)
scale = max(0, 1 - f.lambda * gamma / normx)
for i in eachindex(x)
y[i] = scale*x[i]
end
return f.lambda * scale * normx
end
function gradient!(y, f::NormL2, x)
fx = norm(x) # Value of f, without lambda
if fx == 0
y .= 0
else
y .= (f.lambda / fx) .* x
end
return f.lambda * fx
end
function prox_naive(f::NormL2, x, gamma)
normx = norm(x)
scale = max(0, 1 -f.lambda * gamma / normx)
y = scale * x
return y, f.lambda * scale * normx
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2459 | # L2,1 norm/Sum of norms of columns or rows (times a constant)
export NormL21
"""
NormL21(λ=1, dim=1)
Return the "sum of ``L_2`` norm" function
```math
f(X) = λ⋅∑_i\\|x_i\\|
```
for a nonnegative `λ`, where ``x_i`` is the ``i``-th column of ``X`` if `dim == 1`, and the ``i``-th row of ``X`` if `dim == 2`.
In words, it is the sum of the Euclidean norms of the columns or rows.
"""
struct NormL21{R, I}
lambda::R
dim::I
function NormL21{R,I}(lambda::R, dim::I) where {R, I}
if lambda < 0
error("parameter λ must be nonnegative")
else
new(lambda, dim)
end
end
end
is_convex(f::Type{<:NormL21}) = true
NormL21(lambda::R=1, dim::I=1) where {R, I} = NormL21{R, I}(lambda, dim)
function (f::NormL21)(X)
R = real(eltype(X))
nslice = R(0)
n21X = R(0)
if f.dim == 1
for j in axes(X, 2)
nslice = R(0)
for i in axes(X, 1)
nslice += abs(X[i, j])^2
end
n21X += sqrt(nslice)
end
elseif f.dim == 2
for i in axes(X, 1)
nslice = R(0)
for j in axes(X, 2)
nslice += abs(X[i, j])^2
end
n21X += sqrt(nslice)
end
end
return f.lambda * n21X
end
function prox!(Y, f::NormL21, X, gamma)
R = real(eltype(X))
gl = gamma * f.lambda
nslice = R(0)
n21X = R(0)
if f.dim == 1
for j in axes(X, 2)
nslice = R(0)
for i in axes(X, 1)
nslice += abs(X[i, j])^2
end
nslice = sqrt(nslice)
scal = 1 - gl / nslice
scal = scal <= 0 ? R(0) : scal
for i in axes(X, 1)
Y[i, j] = scal * X[i, j]
end
n21X += scal * nslice
end
elseif f.dim == 2
for i in axes(X, 1)
nslice = R(0)
for j in axes(X, 2)
nslice += abs(X[i, j])^2
end
nslice = sqrt(nslice)
scal = 1-gl/nslice
scal = scal <= 0 ? R(0) : scal
for j in axes(X, 2)
Y[i, j] = scal * X[i, j]
end
n21X += scal * nslice
end
end
return f.lambda * n21X
end
function prox_naive(f::NormL21, X, gamma)
Y = max.(0, 1 .- f.lambda * gamma ./ sqrt.(sum(abs.(X).^2, dims=f.dim))) .* X
return Y, f.lambda * sum(sqrt.(sum(abs.(Y).^2, dims=f.dim)))
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 467 | # L-infinity norm
export NormLinf
"""
NormLinf(λ=1)
Return the ``L_∞`` norm
```math
f(x) = λ⋅\\max\\{|x_1|, …, |x_n|\\},
```
for a nonnegative parameter `λ`.
"""
NormLinf(lambda::T=1) where T = Conjugate(IndBallL1(lambda))
(f::Conjugate{<:IndBallL1})(x) = (f.f.r) * norm(x, Inf)
function gradient!(y, f::Conjugate{<:IndBallL1}, x)
absxi, i = findmax(abs.(x)) # Largest absolute value
y .= 0
y[i] = f.f.r * sign(x[i])
return f.f.r * absxi
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1330 | # nuclear Norm (times a constant)
export NuclearNorm
"""
NuclearNorm(λ=1)
Return the nuclear norm
```math
f(X) = \\|X\\|_* = λ ∑_i σ_i(X),
```
where `λ` is a positive parameter and ``σ_i(X)`` is ``i``-th singular value of matrix ``X``.
"""
struct NuclearNorm{R}
lambda::R
function NuclearNorm{R}(lambda::R) where {R}
if lambda < 0
error("parameter λ must be nonnegative")
else
new(lambda)
end
end
end
is_convex(f::Type{<:NuclearNorm}) = true
NuclearNorm(lambda::R=1) where {R} = NuclearNorm{R}(lambda)
function (f::NuclearNorm)(X)
F = svd(X)
return f.lambda * sum(F.S)
end
function prox!(Y, f::NuclearNorm, X, gamma)
R = real(eltype(X))
F = svd(X)
S_thresh = max.(R(0), F.S .- f.lambda*gamma)
rankY = findfirst(S_thresh .== R(0))
if rankY === nothing
rankY = minimum(size(X))
end
Vt_thresh = view(F.Vt, 1:rankY, :)
U_thresh = view(F.U, :, 1:rankY)
# TODO: the order of the following matrix products should depend on the shape of x
M = S_thresh[1:rankY] .* Vt_thresh
mul!(Y, U_thresh, M)
return f.lambda * sum(S_thresh)
end
function prox_naive(f::NuclearNorm, X, gamma)
F = svd(X)
S = max.(0, F.S .- f.lambda*gamma)
Y = F.U * (Diagonal(S) * F.Vt)
return Y, f.lambda * sum(S)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 964 | # quadratic function
export Quadratic
### ABSTRACT TYPE
abstract type Quadratic end
is_convex(f::Type{<:Quadratic}) = true
is_smooth(f::Type{<:Quadratic}) = true
is_generalized_quadratic(f::Type{<:Quadratic}) = true
fun_name(f::Quadratic) = "Quadratic function"
### CONSTRUCTORS
"""
Quadratic(Q, q; iterative=false)
For a matrix `Q` (dense or sparse, symmetric and positive semidefinite) and a vector `q`, return the quadratic function
```math
f(x) = \\tfrac{1}{2}\\langle Qx, x\\rangle + \\langle q, x \\rangle.
```
By default, a direct method (based on Cholesky factorization) is used to evaluate `prox!`.
If `iterative=true`, then `prox!` is evaluated approximately using an iterative method instead.
"""
function Quadratic(Q, q; iterative=false)
if iterative == false
QuadraticDirect(Q, q)
else
QuadraticIterative(Q, q)
end
end
### INCLUDE CONCRETE TYPES
include("quadraticDirect.jl")
include("quadraticIterative.jl")
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2364 | ### CONCRETE TYPE: DIRECT PROX EVALUATION
# prox! is computed using a Cholesky factorization of Q + I/gamma.
# The factorization is cached and recomputed whenever gamma changes
using LinearAlgebra
using SparseArrays
using SuiteSparse
mutable struct QuadraticDirect{R, M, V, F} <: Quadratic
Q::M
q::V
gamma::R
temp::V
fact::F
function QuadraticDirect{R, M, V, F}(Q::M, q::V) where {R, M, V, F}
if size(Q, 1) != size(Q, 2) || length(q) != size(Q, 2)
error("Q must be squared and q must be compatible with Q")
end
new(Q, q, -1, similar(q))
end
end
function QuadraticDirect(Q::M, q) where M <: SparseMatrixCSC
R = eltype(M)
QuadraticDirect{R, M, typeof(q), SuiteSparse.CHOLMOD.Factor{R}}(Q, q)
end
function QuadraticDirect(Q::M, q) where M <: DenseMatrix
R = eltype(M)
QuadraticDirect{R, M, typeof(q), Cholesky{R, M}}(Q, q)
end
function (f::QuadraticDirect)(x)
mul!(f.temp, f.Q, x)
return 0.5*dot(x, f.temp) + dot(x, f.q)
end
function prox!(y, f::QuadraticDirect{R, M, V, <:Cholesky}, x, gamma) where {R, M, V}
if gamma != f.gamma
factor_step!(f, gamma)
end
y .= x./gamma
y .-= f.q
# Qy = U'Uy = b, therefore y = U\(U'\b)
LAPACK.trtrs!('U', 'C', 'N', f.fact.factors, y)
LAPACK.trtrs!('U', 'N', 'N', f.fact.factors, y)
mul!(f.temp, f.Q, y)
fy = 0.5*dot(y, f.temp) + dot(y, f.q)
return fy
end
function prox!(y, f::QuadraticDirect{R, M, V, <:SuiteSparse.CHOLMOD.Factor}, x, gamma) where {R, M, V}
if gamma != f.gamma
factor_step!(f, gamma)
end
f.temp .= x./gamma
f.temp .-= f.q
y .= f.fact\f.temp
mul!(f.temp, f.Q, y)
fy = 0.5*dot(y, f.temp) + dot(y, f.q)
return fy
end
function factor_step!(f::QuadraticDirect{R, <:DenseMatrix, V, F}, gamma) where {R, V, F}
f.gamma = gamma
f.fact = cholesky(f.Q + I/gamma)
end
function factor_step!(f::QuadraticDirect{R, <:SparseMatrixCSC, V, F}, gamma) where {R, V, F}
f.gamma = gamma
f.fact = ldlt(f.Q; shift = 1/gamma)
end
function gradient!(y, f::QuadraticDirect{R, M, V, F}, x) where {R, M, V, F}
mul!(y, f.Q, x)
y .+= f.q
return 0.5*(dot(x, y) + dot(x, f.q))
end
function prox_naive(f::QuadraticDirect, x, gamma)
y = (gamma*f.Q + I)\(x - gamma*f.q)
fy = 0.5*dot(y, f.Q*y) + dot(y, f.q)
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1175 | ### CONCRETE TYPE: ITERATIVE PROX EVALUATION
using LinearAlgebra
using IterativeSolvers
struct QuadraticIterative{M, V} <: Quadratic
Q::M
q::V
temp::V
end
is_prox_accurate(f::Type{<:QuadraticIterative}) = false
function QuadraticIterative(Q::M, q::V) where {M, V}
if size(Q, 1) != size(Q, 2) || length(q) != size(Q, 2)
error("Q must be squared and q must be compatible with Q")
end
QuadraticIterative{M, V}(Q, q, similar(q))
end
function (f::QuadraticIterative)(x)
mul!(f.temp, f.Q, x)
return 0.5*dot(x, f.temp) + dot(x, f.q)
end
function prox!(y, f::QuadraticIterative{M, V}, x, gamma) where {M, V}
R = eltype(M)
y .= x
f.temp .= x./gamma .- f.q
op = ScaleShift(R(1), f.Q, R(1)/gamma)
IterativeSolvers.cg!(y, op, f.temp)
mul!(f.temp, f.Q, y)
fy = 0.5*dot(y, f.temp) + dot(y, f.q)
return fy
end
function gradient!(y, f::QuadraticIterative, x)
mul!(y, f.Q, x)
y .+= f.q
return 0.5*(dot(x, y) + dot(x, f.q))
end
function prox_naive(f::QuadraticIterative, x, gamma)
y = IterativeSolvers.cg(gamma*f.Q + I, x - gamma*f.q)
fy = 0.5*dot(y, f.Q*y) + dot(y, f.q)
return y, fy
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1608 | # Hinge loss function
export SqrHingeLoss
"""
SqrHingeLoss(y, μ=1)
Return the squared Hinge loss
```math
f(x) = μ⋅∑_i \\max\\{0, 1 - y_i ⋅ x_i\\}^2,
```
where `y` is an array and `μ` is a positive parameter.
"""
struct SqrHingeLoss{R, T}
y::T
mu::R
function SqrHingeLoss{R, T}(y::T, mu::R) where {R, T}
if mu <= 0
error("parameter mu must be positive")
else
new(y, mu)
end
end
end
is_separable(f::Type{<:SqrHingeLoss}) = true
is_convex(f::Type{<:SqrHingeLoss}) = true
is_smooth(f::Type{<:SqrHingeLoss}) = true
SqrHingeLoss(b::T, mu::R=1) where {R, T} = SqrHingeLoss{R, T}(b, mu)
function (f::SqrHingeLoss)(x)
R = eltype(x)
return f.mu * sum(max.(R(0), (R(1) .- f.y .* x)).^2)
end
function gradient!(y, f::SqrHingeLoss, x)
R = eltype(x)
sum = R(0)
for i in eachindex(x)
zz = 1 - f.y[i] * x[i]
z = max(R(0), zz)
y[i] = z .> 0 ? -2 * f.mu * f.y[i] * zz : 0
sum += z^2
end
return f.mu * sum
end
function prox!(z, f::SqrHingeLoss, x, gamma)
v = eltype(x)(0)
for k in eachindex(x)
if f.y[k] * x[k] >= 1
z[k] = x[k]
else
z[k] = (x[k] + 2 * f.mu * gamma * f.y[k]) / (1 + 2 * f.mu * gamma * f.y[k]^2)
v += (1 - f.y[k] * z[k])^2
end
end
return f.mu * v
end
function prox_naive(f::SqrHingeLoss, x, gamma)
flag = f.y .* x .<= 1
z = copy(x)
z[flag] = (x[flag] .+ 2 .* f.mu .* gamma .* f.y[flag]) ./ (1 + 2 .* f.mu .* gamma .* f.y[flag].^2)
return z, f.mu * sum(max.(0, 1 .- f.y .* z).^2)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2824 | # squared L2 norm (times a constant, or weighted)
export SqrNormL2
"""
SqrNormL2(λ=1)
With a nonnegative scalar `λ`, return the squared Euclidean norm
```math
f(x) = \\tfrac{λ}{2}\\|x\\|^2.
```
With a nonnegative array `λ`, return the weighted squared Euclidean norm
```math
f(x) = \\tfrac{1}{2}∑_i λ_i x_i^2.
```
"""
struct SqrNormL2{T,SC}
lambda::T
function SqrNormL2{T,SC}(lambda::T) where {T,SC}
if any(lambda .< 0)
error("coefficients in λ must be nonnegative")
else
new(lambda)
end
end
end
is_convex(f::Type{<:SqrNormL2}) = true
is_smooth(f::Type{<:SqrNormL2}) = true
is_separable(f::Type{<:SqrNormL2}) = true
is_generalized_quadratic(f::Type{<:SqrNormL2}) = true
is_strongly_convex(f::Type{SqrNormL2{T,SC}}) where {T,SC} = SC
SqrNormL2(lambda::T=1) where T = SqrNormL2{T,all(lambda .> 0)}(lambda)
function (f::SqrNormL2{S})(x) where {S <: Real}
return f.lambda / real(eltype(x))(2) * norm(x)^2
end
function (f::SqrNormL2{<:AbstractArray})(x)
R = real(eltype(x))
sqnorm = R(0)
for k in eachindex(x)
sqnorm += f.lambda[k] * abs2(x[k])
end
return sqnorm / R(2)
end
function gradient!(y, f::SqrNormL2{<:Real}, x)
R = real(eltype(x))
sqnx = R(0)
for k in eachindex(x)
y[k] = f.lambda * x[k]
sqnx += abs2(x[k])
end
return f.lambda / R(2) * sqnx
end
function gradient!(y, f::SqrNormL2{<:AbstractArray}, x)
R = real(eltype(x))
sqnx = R(0)
for k in eachindex(x)
y[k] = f.lambda[k] * x[k]
sqnx += f.lambda[k] * abs2(x[k])
end
return sqnx / R(2)
end
function prox!(y, f::SqrNormL2{<:Real}, x, gamma::Number)
R = real(eltype(x))
gl = gamma * f.lambda
sqny = R(0)
for k in eachindex(x)
y[k] = x[k] / (1 + gl)
sqny += abs2(y[k])
end
return f.lambda / R(2) * sqny
end
function prox!(y, f::SqrNormL2{<:AbstractArray}, x, gamma::Number)
R = real(eltype(x))
wsqny = R(0)
for k in eachindex(x)
y[k] = x[k] / (1 + gamma * f.lambda[k])
wsqny += f.lambda[k] * abs2(y[k])
end
return wsqny / R(2)
end
function prox!(y, f::SqrNormL2{<:Real}, x, gamma::AbstractArray)
R = real(eltype(x))
sqny = R(0)
for k in eachindex(x)
y[k] = x[k] / (1 + gamma[k] * f.lambda)
sqny += abs2(y[k])
end
return f.lambda / R(2) * sqny
end
function prox!(y, f::SqrNormL2{<:AbstractArray}, x, gamma::AbstractArray)
R = real(eltype(x))
wsqny = R(0)
for k in eachindex(x)
y[k] = x[k] / (1 + gamma[k] * f.lambda[k])
wsqny += f.lambda[k] * abs2(y[k])
end
return wsqny / R(2)
end
function prox_naive(f::SqrNormL2, x, gamma)
R = real(eltype(x))
y = x./(R(1) .+ f.lambda .* gamma)
return y, real(dot(f.lambda .* y, y)) / R(2)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1084 | # Sum of the largest k components
# export SumLargest
# TODO: SumLargest(r) is (the postcomposition of) the conjugate of
# (1) ind{0 <= x <= 1 : sum(x) = r},
# where instead IndSimplex(r) corresponds to
# (2) ind{0 <= x : sum(x) = r}.
# Therefore SumLargest(r) now is only correct when r = 1.
# To make SumLargest correct we should extend IndSimplex to allow for that
# additional bound in its definition, by adding a second argument to the
# constructor. Then in the following line we should replace IndSimplex(k)
# with IndSimplex(k, 1.0). Note that (1) is proper only if x ∈ Rⁿ for n ⩾ r.
"""
SumLargest(k::Integer=1, λ::Real=1)
Return the function `g(x) = λ⋅sum(x_[1], ..., x_[k])`, for an integer k ⩾ 1 and `λ ⩾ 0`.
"""
SumLargest(k::I=1, lambda::R=1) where {I, R} = Postcompose(Conjugate(IndSimplex(k)), lambda)
function (f::Conjugate{<:IndSimplex})(x)
if f.f.a == 1
return maximum(x)
end
p = if ndims(x) == 1
partialsortperm(x, 1:f.f.a, rev=true)
else
partialsortperm(x[:], 1:f.f.a, rev=true)
end
return sum(x[p])
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1442 | # Sum of the positive components
export SumPositive
"""
SumPositive()
Return the function
```math
f(x) = ∑_i \\max\\{0, x_i\\}.
```
"""
struct SumPositive end
is_separable(f::Type{<:SumPositive}) = true
is_convex(f::Type{<:SumPositive}) = true
function (::SumPositive)(x)
return sum(xi -> max(xi, eltype(x)(0)), x)
end
function prox!(y, ::SumPositive, x, gamma)
R = eltype(x)
fsum = R(0)
for i in eachindex(x)
y[i] = x[i] < gamma ? (x[i] > 0 ? R(0) : x[i]) : x[i]-gamma
fsum += y[i] > 0 ? y[i] : R(0)
end
return fsum
end
function gradient!(y, ::SumPositive, x)
R = eltype(x)
y .= max.(0, sign.(x))
return sum(xi -> max(xi, R(0)), x)
end
function prox_naive(::SumPositive, x, gamma)
R = eltype(x)
y = copy(x)
indpos = x .> 0
y[indpos] = max.(R(0), x[indpos] .- gamma)
return y, sum(max.(R(0), y))
end
# ######################### #
# Prox with multiple gammas #
# ######################### #
function prox!(y, ::SumPositive, x, gamma::AbstractArray)
R = eltype(x)
fsum = R(0)
for i in eachindex(x)
y[i] = x[i] < gamma[i] ? (x[i] > 0 ? R(0) : x[i]) : x[i]-gamma[i]
fsum += y[i] > 0 ? y[i] : R(0)
end
return fsum
end
function prox_naive(::SumPositive, x, gamma::AbstractArray)
R = eltype(x)
y = copy(x)
indpos = x .> 0
y[indpos] = max.(R(0), x[indpos] .- gamma[indpos])
return y, sum(max.(R(0), y))
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2892 | # 1-dimensional Total Variation (times a constant)
export TotalVariation1D
"""
TotalVariation1D(λ=1)
With a nonnegative scalar parameter λ, return the 1D total variation
```math
f(x) = λ ∑_{i=2}^{n} |x_i - x_{i-1}|.
```
"""
struct TotalVariation1D{T}
lambda::T
function TotalVariation1D{T}(lambda::T) where T
if lambda < 0
error("parameter λ must be nonnegative")
else
new(lambda)
end
end
end
is_separable(f::Type{<:TotalVariation1D}) = false
is_convex(f::Type{<:TotalVariation1D}) = true
is_positively_homogeneous(f::Type{<:TotalVariation1D}) = true
TotalVariation1D(lambda::R=1) where R = TotalVariation1D{R}(lambda)
function (f::TotalVariation1D)(x)
return f.lambda * norm(x[2:end] - x[1:end-1], 1)
end
# Condat algorithm
# https://lcondat.github.io/publis/Condat-fast_TV-SPL-2013.pdf
function tvnorm_prox_condat(y, x, lambda)
# solves y = arg min_z lambda*sum_k |z_{k+1}-z_k| + 1/2 * ||z-x||^2
N = length(x)
k = k0 = kmin = kplus = 1
vmin = x[1] - lambda
vmax = x[1] + lambda
umin = lambda
umax = -lambda
while 0 < 1
while k == N
if umin < 0
y[k0:kmin] .= vmin
kmin += 1
k = k0 = kmin
vmin = x[k]
umin = lambda
umax = x[k] + lambda - vmax
elseif umax > 0
y[k0:kplus] .= vmax
kplus +=1
k = k0 = kplus
vmax = x[k]
umax = -lambda
umin = x[k] - lambda - vmin
else
y[k0:N] .= vmin + umin/(k-k0+1)
return
end
if k==N
y[N] = vmin + umin
return
end
end
if x[k+1] + umin < vmin - lambda
y[k0:kmin] .= vmin
kmin += 1
k = k0 = kplus = kmin
vmin = x[k]
vmax = x[k] + 2*lambda
umin = lambda
umax = -lambda
elseif x[k+1] + umax > vmax + lambda
y[k0:kplus] .= vmax
kplus += 1
k = k0 =kmin = kplus
vmin = x[k] - 2*lambda
vmax = x[k]
umin = lambda
umax = -lambda
else
k += 1
umin = umin + x[k] - vmin
umax = umax + x[k] - vmax
if umin >= lambda
vmin = vmin + (umin-lambda)/(k-k0+1)
umin = lambda
kmin = k
end
if umax <= -lambda
vmax += (umax+lambda)/(k-k0+1)
umax = -lambda
kplus = k
end
end
end
end
function prox!(y, f::TotalVariation1D, x, gamma)
a = gamma * f.lambda
tvnorm_prox_condat(y, x, a)
return f.lambda * norm(y[2:end] - y[1:end-1], 1)
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 343 | # This is adapted from Base.isapprox
# https://github.com/JuliaLang/julia/blob/381693d3dfc9b7072707f6d544f82f6637fc5e7c/base/floatfuncs.jl#L222-L291
function isapprox_le(x::Number, y::Number; atol::Real=0, rtol::Real=Base.rtoldefault(x,y,atol))
x <= y || (isfinite(x) && isfinite(y) && abs(x-y) <= max(atol, rtol*max(abs(x), abs(y))))
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2292 | # Utility operators for computing prox iteratively, e.g. using CG
import Base: *, size, eltype
import LinearAlgebra: mul!
abstract type LinOp end
infer_shape_of_y(Op, ::AbstractVector) = (size(Op, 1), )
infer_shape_of_y(Op, x::AbstractMatrix) = (size(Op, 1), size(x, 2))
function (*)(Op::LinOp, x)
y = zeros(promote_type(eltype(Op), eltype(x)), infer_shape_of_y(Op, x))
mul!(y, Op, x)
end
size(Op::LinOp, i::Integer) = i <= 2 ? size(Op)[i] : 1
# AAc (Gram matrix)
struct AAc{M, T} <: LinOp
A::M
buf::T
end
function AAc(A::M, input_shape::Tuple) where M
buffer_shape = (size(A, 2), input_shape[2:end]...)
buffer = zeros(eltype(A), buffer_shape)
AAc(A, buffer)
end
function mul!(y, Op::AAc, x)
if Op.buf === nothing
Op.buf = adjoint(Op.A) * x
else
mul!(Op.buf, adjoint(Op.A), x)
end
mul!(y, Op.A, Op.buf)
end
mul!(y, Op::Adjoint{AAc}, x) = mul!(y, adjoint(Op), x)
size(Op::AAc) = size(Op.A, 1), size(Op.A, 1)
eltype(Op::AAc) = eltype(Op.A)
# AcA (Covariance matrix)
struct AcA{M, T} <: LinOp
A::M
buf::T
end
function AcA(A::M, input_shape::Tuple) where M
buffer_shape = (size(A, 1), input_shape[2:end]...)
buffer = zeros(eltype(A), buffer_shape)
AcA(A, buffer)
end
function mul!(y, Op::AcA, x)
if Op.buf === nothing
Op.buf = Op.A * x
else
mul!(Op.buf, Op.A, x)
end
mul!(y, adjoint(Op.A), Op.buf)
end
mul!(y, Op::Adjoint{AcA}, x) = mul!(y, adjoint(Op), x)
size(Op::AcA) = size(Op.A, 2), size(Op.A, 2)
eltype(Op::AcA) = eltype(Op.A)
# Shifted symmetric linear operator
struct ScaleShift{M, T} <: LinOp
alpha::T
A::M
rho::T
function ScaleShift{M, T}(alpha::T, A::M, rho::T) where {M, T}
if eltype(A) != T
error("type of alpha, rho ($T) is different from that of A ($(eltype(A)))")
end
new(alpha, A, rho)
end
end
ScaleShift(alpha::T, A::M, rho::T) where {M, T} = ScaleShift{M, T}(alpha, A, rho)
function mul!(y, Op::ScaleShift, x)
mul!(y, Op.A, x)
y .*= Op.alpha
y .+= Op.rho .* x
end
function mul!(y, Op::Adjoint{ScaleShift}, x)
mul!(y, adjoint(Op.A), x)
y .*= Op.alpha
y .+= Op.rho .* x
end
size(Op::ScaleShift) = size(Op.A, 2), size(Op.A, 2)
eltype(Op::ScaleShift) = eltype(Op.A)
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 353 | """
Fast (non-allocating) version of norm(x-y,2)^2
"""
function normdiff2(x::AbstractArray{C}, y::AbstractArray{C}) where {
R <: Real, C <: Union{R, Complex{R}}
}
s = R(0)
for i in eachindex(x)
s += abs2(x[i]-y[i])
end
return s
end
"""
Fast (non-allocating) version of norm(x-y,2)
"""
normdiff(x, y) = sqrt(normdiff2(x, y))
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2260 | using LinearAlgebra: BlasInt, chkstride1
using LinearAlgebra.LAPACK: chklapackerror
using LinearAlgebra.BLAS: @blasfunc
"""
dspev!(jobz::Symbol, uplo::Symbol, x::StridedVector{Float64})
Computes all the eigenvalues and optionally the eigenvectors of a real
symmetric `n×n` matrix `A` in packed storage. Will corrupt `x`.
Arguments:
`jobz`: `:N` if only eigenvalues, `:V` if eigenvalues and eigenvectors
`uplo`: `:L` if lower triangle of `A` is stored, `:U` if upper
`x`: `A` represented as vector of the lower (upper) n*(n+1)/2 elements, packed columnwise.
Returns:
`W,Z` if `jobz == :V` or: `W` if `jobz == :N` such that `A=Z*diagm(W)*Z'`
"""
function dspev!(jobz::Symbol, uplo::Symbol, A::StridedVector{Float64})
chkstride1(A)
vecN = length(A)
n = try
Int(sqrt(1/4+2*vecN)-1/2)
catch
throw(DimensionMismatch("A has length $vecN which is not N*(N+1)/2 for any integer N"))
end
W = similar(A, Float64, n)
Z = similar(A, Float64, n, n)
work = Array{Float64}(undef, 1)
lwork = BlasInt(3*n)
info = Ref{BlasInt}()
work = Array{Float64}(undef, lwork)
ccall((@blasfunc(dspev_), Base.liblapack_name), Cvoid,
(Ptr{UInt8}, Ptr{UInt8}, Ptr{BlasInt}, Ptr{Float64},
Ptr{Float64}, Ptr{Float64}, Ptr{BlasInt}, Ptr{Float64}, Ptr{BlasInt}),
jobz, uplo, Ref(n), A,
W, Z, Ref(n), work, info)
chklapackerror(info[])
jobz == :V ? (W, Z) : W
end
function dspevV!(uplo::Symbol, A::StridedVector{Float64})
jobz = :V
chkstride1(A)
vecN = length(A)
n = try
Int(sqrt(1/4+2*vecN)-1/2)
catch
throw(DimensionMismatch("A has length $vecN which is not N*(N+1)/2 for any integer N"))
end
W = similar(A, Float64, n)
Z = similar(A, Float64, n, n)
work = Array{Float64}(undef, 1)
lwork = BlasInt(3*n)
info = Ref{BlasInt}()
work = Array{Float64}(undef, lwork)
ccall((@blasfunc(dspev_), Base.liblapack_name), Cvoid,
(Ptr{UInt8}, Ptr{UInt8}, Ptr{BlasInt}, Ptr{Float64},
Ptr{Float64}, Ptr{Float64}, Ptr{BlasInt}, Ptr{Float64}, Ptr{BlasInt}),
jobz, uplo, Ref(n), A,
W, Z, Ref(n), work, info)
chklapackerror(info[])
return W, Z
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 942 | is_prox_accurate(::Type) = true
is_prox_accurate(::T) where T = is_prox_accurate(T)
is_separable(::Type) = false
is_separable(::T) where T = is_separable(T)
is_singleton(::Type) = false
is_singleton(::T) where T = is_singleton(T)
is_cone(::Type) = false
is_cone(::T) where T = is_cone(T)
is_affine(T::Type) = is_singleton(T)
is_affine(::T) where T = is_affine(T)
is_set(T::Type) = is_cone(T) || is_affine(T)
is_set(::T) where T = is_set(T)
is_positively_homogeneous(T::Type) = is_cone(T)
is_positively_homogeneous(::T) where T = is_positively_homogeneous(T)
is_support(T::Type) = is_convex(T) && is_positively_homogeneous(T)
is_support(::T) where T = is_support(T)
is_smooth(::Type) = false
is_smooth(::T) where T = is_smooth(T)
is_quadratic(T::Type) = is_generalized_quadratic(T) && is_smooth(T)
is_quadratic(::T) where T = is_quadratic(T)
is_strongly_convex(::Type) = false
is_strongly_convex(::T) where T = is_strongly_convex(T)
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 211 | # utilities to handle scalars as uniform arrays
# (instead writing multiple implementations of functions, proxes, gradients)
get_kth_elem(n::N, k) where {N <: Number} = n
get_kth_elem(n::T, k) where {T} = n[k]
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 4476 | using Test
using ProximalOperators
using ProximalOperators:
ArrayOrTuple,
is_prox_accurate,
is_separable,
is_convex,
is_singleton,
is_cone,
is_affine,
is_set,
is_smooth,
is_quadratic,
is_generalized_quadratic,
is_strongly_convex,
is_positively_homogeneous,
is_support
using Aqua
function call_test(f, x::ArrayOrTuple{R}) where R <: Real
try
fx = @inferred f(x)
@test typeof(fx) == R
return fx
catch e
if !isa(e, MethodError)
return nothing
end
end
end
Base.zero(xs::Tuple) = Base.zero.(xs)
# tests equality of the results of prox, prox! and prox_naive
function prox_test(f, x::ArrayOrTuple{R}, gamma=1) where R <: Real
y, fy = @inferred prox(f, x, gamma)
@test typeof(fy) == R
y_prealloc = zero(x)
fy_prealloc = prox!(y_prealloc, f, x, gamma)
@test typeof(fy_prealloc) == R
y_naive, fy_naive = ProximalOperators.prox_naive(f, x, gamma)
@test typeof(fy_naive) == R
rtol = if ProximalOperators.is_prox_accurate(f) sqrt(eps(R)) else 1e-4 end
if ProximalOperators.is_convex(f)
@test all(isapprox.(y_prealloc, y, rtol=rtol, atol=100*eps(R)))
@test all(isapprox.(y_naive, y, rtol=rtol, atol=100*eps(R)))
if ProximalOperators.is_set(f)
@test fy_prealloc == 0
end
@test isapprox(fy_prealloc, fy, rtol=rtol, atol=100*eps(R))
@test isapprox(fy_naive, fy, rtol=rtol, atol=100*eps(R))
end
if !ProximalOperators.is_set(f) || ProximalOperators.is_prox_accurate(f)
f_at_y = call_test(f, y)
if f_at_y !== nothing
@test isapprox(f_at_y, fy, rtol=rtol, atol=100*eps(R))
end
end
return y, fy
end
# tests equality of the results of prox, prox! and prox_naive
function gradient_test(f, x::ArrayOrTuple{R}, gamma=R(1)) where R <: Real
grad_fx, fx = gradient(f, x)
@test typeof(fx) == R
return grad_fx, fx
end
# test predicates consistency
# i.e., that more specific properties imply less specific ones
# e.g., the indicator of a subspace is the indicator of a set in particular
function predicates_test(f)
preds = [
is_convex,
is_strongly_convex,
is_generalized_quadratic,
is_quadratic,
is_smooth,
is_singleton,
is_cone,
is_affine,
is_set,
is_positively_homogeneous,
is_support,
]
for pred in preds
# check that the value of the predicate can be inferred
@inferred (arg -> Val(pred(arg)))(f)
end
# quadratic => generalized_quadratic && smooth
@test !is_quadratic(f) || (is_generalized_quadratic(f) && is_smooth(f))
# (singleton || cone || affine) => set
@test !(is_singleton(f) || is_cone(f) || is_affine(f)) || is_set(f)
# cone => positively homogeneous
@test !is_cone(f) || is_positively_homogeneous(f)
# (convex && positively homogeneous) <=> (convex && support)
@test (is_convex(f) && is_positively_homogeneous(f)) == (is_convex(f) && is_support(f))
# strongly_convex => convex
@test !is_strongly_convex(f) || is_convex(f)
end
@testset "Aqua" begin
Aqua.test_all(ProximalOperators; ambiguities=false)
end
@testset "Utilities" begin
include("test_symmetricpacked.jl")
end
@testset "Functions" begin
include("test_cubeNormL2.jl")
include("test_huberLoss.jl")
include("test_indAffine.jl")
include("test_leastSquares.jl")
include("test_logisticLoss.jl")
include("test_quadratic.jl")
include("test_linear.jl")
include("test_indHyperslab.jl")
include("test_graph.jl")
include("test_normL1plusL2.jl")
end
include("test_calls.jl")
include("test_indPolyhedral.jl")
@testset "Gradients" begin
include("test_gradients.jl")
end
@testset "Calculus rules" begin
include("test_calculus.jl")
include("test_epicompose.jl")
include("test_moreauEnvelope.jl")
include("test_precompose.jl")
include("test_pointwiseMinimum.jl")
include("test_postcompose.jl")
include("test_regularize.jl")
include("test_separableSum.jl")
include("test_slicedSeparableSum.jl")
include("test_sum.jl")
end
@testset "Equivalences" begin
include("test_equivalences.jl")
end
include("test_optimality_conditions.jl")
@testset "Hardcoded" begin
include("test_results.jl")
end
@testset "Demos" begin
include("../demos/lasso.jl")
include("../demos/rpca.jl")
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 4786 | # Test equivalence of functions and prox mappings by means of calculus rules
using LinearAlgebra
using ProximalOperators
using Test
stuff = [
Dict( "funcs" => (IndBallLinf(), Conjugate(NormL1())),
"args" => ( randn(10), ),
"gammas" => ( 1.0, )
),
Dict( "funcs" => (lambda -> (NormL1(lambda), Conjugate(IndBallLinf(lambda))))(0.1 + 10.0*rand()),
"args" => ( 5.0*sign.(randn(10)) + 5.0*randn(10),
5.0*sign.(randn(20)) + 5.0*randn(20) ),
"gammas" => ( 0.5+rand(), 0.5+rand() )
),
Dict( "funcs" => (lambda -> (IndBallLinf(lambda), Conjugate(NormL1(lambda))))(0.1 + 10.0*rand()),
"args" => ( 5.0*sign.(randn(10)) + 5.0*randn(10),
5.0*sign.(randn(20)) + 5.0*randn(20) ),
"gammas" => ( 0.5+rand(), 0.5+rand() )
),
Dict( "funcs" => (lambda -> (NormL1(lambda), Conjugate(IndBox(-lambda,lambda))))(0.1 .+ 10.0*rand(30)),
"args" => ( 5.0*sign.(randn(30)) + 5.0*randn(30), ),
"gammas" => ( 0.5+rand(), 0.5+rand() )
),
Dict( "funcs" => (lambda -> (IndBox(-lambda,lambda), Conjugate(NormL1(lambda))))(0.1 .+ 10.0*rand(30)),
"args" => ( 5.0*sign.(randn(30)) + 5.0*randn(30), ),
"gammas" => ( 0.5+rand(), 0.5+rand() )
),
Dict( "funcs" => (lambda -> (NormL2(lambda), Conjugate(IndBallL2(lambda))))(0.1 .+ 10.0*rand()),
"args" => ( 5.0*sign.(randn(10)) + 5.0*randn(10),
5.0*sign.(randn(20)) + 5.0*randn(20) ),
"gammas" => ( 0.5+rand(), 0.5+rand() )
),
Dict( "funcs" => (lambda -> (IndBallL2(lambda), Conjugate(NormL2(lambda))))(0.1 .+ 10.0*rand()),
"args" => ( 5.0*sign.(randn(10)) + 5.0*randn(10),
5.0*sign.(randn(20)) + 5.0*randn(20) ),
"gammas" => ( 0.5+rand(), 0.5+rand() )
),
Dict( "funcs" => ((a, b, mu) -> (LogBarrier(a, b, mu), Postcompose(PrecomposeDiagonal(LogBarrier(), a, b), mu)))(2.0, 0.5, 1.0),
"args" => ( rand(10), rand(10) ),
"gammas" => ( 0.5+rand(), 0.5+rand() )
),
Dict( "funcs" => (p -> (IndPoint(p), IndBox(p, p)))(randn(50)),
"args" => ( randn(50), randn(50), randn(50) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => (IndZero(), IndBox(0, 0)),
"args" => ( randn(50), randn(50), randn(50) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => (IndFree(), IndBox(-Inf, +Inf)) ,
"args" => ( randn(50), randn(50), randn(50) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => (IndNonnegative(), IndBox(0.0, Inf)),
"args" => ( randn(50), randn(50), randn(50) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => (IndNonpositive(), IndBox(-Inf, 0.0)),
"args" => ( randn(50), randn(50), randn(50) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => (lambda -> (SqrNormL2(lambda), Conjugate(SqrNormL2(1.0/lambda))))(0.1 .+ 5.0*rand()),
"args" => ( randn(50), randn(50), randn(50) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => ((A, b) -> (LeastSquares(A, b), Tilt(LeastSquares(A, zeros(size(A, 1))), -A'*b, 0.5*dot(b, b))))(randn(10,20), randn(10)),
"args" => ( randn(20), randn(20), randn(20) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => ((lambda, rho) -> (ElasticNet(lambda,rho), Regularize(NormL1(lambda),rho)))(rand(), rand()),
"args" => ( randn(20), randn(20), randn(20) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => ((b, mu) -> (HingeLoss(b, mu), Postcompose(PrecomposeDiagonal(SumPositive(), -b, 1.0), mu)))([0.5 .+ rand(10); -0.5 .- rand(10)], 0.5+rand()),
"args" => ( randn(20), randn(20), randn(20) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
),
Dict( "funcs" => ((A, b) -> (Postcompose(LeastSquares(A, b), 15.0, 6.5), Postcompose(Postcompose(LeastSquares(A, b), 5.0, 1.5), 3.0, 2.0)))(randn(10, 20), randn(10)),
"args" => ( randn(20), randn(20), randn(20) ),
"gammas" => ( 1.0, rand(), 5.0*rand() )
)
]
@testset "$i" for i in eachindex(stuff)
f = stuff[i]["funcs"][1]
g = stuff[i]["funcs"][2]
for j in eachindex(stuff[i]["args"])
x = stuff[i]["args"][j]
gamma = stuff[i]["gammas"][j]
# compare the three versions (for f)
yf, fy = prox_test(f, x, gamma)
# compare the three versions (for g)
yg, gy = prox_test(g, x, gamma)
# compare results of f and g
@test isapprox(yf, yg, atol=1e-12, rtol=1e-8)
@test isapprox(fy, gy, atol=1e-12, rtol=1e-8)
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 17385 | using LinearAlgebra
using SparseArrays
using Random
using Test
Random.seed!(0)
# Simply test the call to functions and their prox
test_cases_spec = [
Dict(
"constr" => DistL2,
"wrong" => [
(IndBallL2(), -rand()),
],
"right" => [
( (IndSOC(),), randn(Float32, 10) ),
( (IndSOC(),), randn(Float64, 10) ),
( (IndNonnegative(), rand()), randn(Float64, 10) ),
( (IndZero(),), randn(Float64, 10) ),
( (IndBox(-1, 1),), randn(Float32, 10) ),
( (IndBox(-1, 1),), randn(Float64, 10) ),
],
),
Dict(
"constr" => SqrDistL2,
"wrong" => [
(IndBallL2(), -rand()),
],
"right" => [
( (IndSimplex(),), randn(Float32, 10) ),
( (IndSimplex(),), randn(Float64, 10) ),
( (IndNonnegative(), rand()), randn(Float64, 10) ),
( (IndZero(),), randn(Float64, 10) ),
( (IndBox(-1, 1),), randn(Float32, 10) ),
( (IndBox(-1, 1),), randn(Float64, 10) ),
],
),
Dict(
"constr" => ElasticNet,
"wrong" => [
(-rand()),
(-rand(), -rand()),
(-rand(), rand()),
(rand(), -rand())
],
"right" => [
( (), randn(Float64, 10) ),
( (rand(Float32),), randn(Float32, 10) ),
( (rand(Float64), rand(Float64)), randn(Float64, 10) ),
( (rand(Float64), rand(Float64)), rand(Complex{Float64}, 20) ),
],
),
Dict(
"constr" => HingeLoss,
"wrong" => [
(randn(10), -rand()),
],
"right" => [
( (Int.(sign.(randn(10))), ), randn(Float32, 10) ),
( (Int.(sign.(randn(10))), ), randn(Float64, 10) ),
( (Int.(sign.(randn(20))), 0.1f0 + rand(Float32)), randn(Float32, 20) ),
( (Int.(sign.(randn(20))), 0.1e0 + rand(Float64)), randn(Float64, 20) ),
],
),
Dict(
"constr" => IndAffine,
"right" => [
( (randn(10), randn()), randn(Float32, 10) ),
( (randn(10, 20), randn(10)), randn(20) ),
( (sprand(50,100, 0.1), randn(50)), randn(Float32, 100) ),
( (sprand(Complex{Float64}, 50, 100, 0.1), randn(50)+im*randn(50)), randn(100)+im*randn(100) ),
],
),
Dict(
"constr" => IndBallLinf,
"wrong" => [
(-rand(),),
],
"right" => [
( (rand(Float32),), randn(Float32, 10) ),
( (rand(Float64),), randn(Float64, 10) ),
],
),
Dict(
"constr" => IndBallL0,
"wrong" => [
(-4,),
],
"right" => [
( (5,), randn(25) ),
( (10, ), randn(Float32, 5, 5) ),
( (5, ), randn(Complex{Float64}, 15) ),
],
),
Dict(
"constr" => IndBallL1,
"wrong" => [
(-rand(),),
],
"right" => [
( (), [0.1, -0.2, 0.3, -0.39] ),
( (), Complex{Float64}[0.1, -0.2, 0.3, -0.39] ),
( (), rand(Float32, 5) ),
( (), rand(Float64, 5) ),
( (), rand(Complex{Float32}, 5) ),
( (Float32(3),), randn(Float32, 5) ),
( (Float64(5),), randn(Float64, 2, 3) ),
( (Float64(1),), randn(Complex{Float64}, 5) ),
],
),
Dict(
"constr" => IndBallL2,
"wrong" => [
(-rand(),),
],
"right" => [
( (), rand(Float32, 5) ),
( (), rand(Float64, 5) ),
( (), rand(Complex{Float32}, 5) ),
( (Float32(3),), randn(Float32, 5) ),
( (Float64(5),), randn(Float64, 2, 3) ),
( (Float64(1),), randn(Complex{Float64}, 5) ),
],
),
Dict(
"constr" => IndBallRank,
"wrong" => [
(-2,),
],
"right" => [
( (1+Int(round(10*rand())),), randn(20, 50) ),
( (10+Int(round(5*rand())),), rand(30, 8)*rand(8,70) ),
( (10+Int(round(5*rand())),), randn(Float32, 5, 8) ),
( (10+Int(round(5*rand())),), rand(Complex{Float64}, 20, 50) ),
( (10+Int(round(5*rand())),), rand(Complex{Float32}, 5, 8) ),
],
),
Dict(
"constr" => IndBox,
"wrong" => [
(+1, -1),
],
"right" => [
( (-rand(Float32), +rand(Float32, 10)), randn(Float32, 10) ),
( (-rand(Float32, 10), +rand(Float32)), randn(Float32, 10) ),
( (-rand(Float64), +rand(Float64)), randn(Float64, 20) ),
( (-rand(Float64, 30), +rand(Float64, 30)), randn(Float64, 30) ),
],
),
Dict(
"constr" => IndExpPrimal,
"right" => [
( (), randn(Float32, 3) ),
( (), randn(Float64, 3) ),
],
),
Dict(
"constr" => IndExpDual,
"right" => [
( (), randn(Float32, 3) ),
( (), randn(Float64, 3) ),
],
),
Dict(
"constr" => IndFree,
"right" => [
( (), randn(Float32, 5) ),
( (), randn(Float64, 2, 3) ),
],
),
Dict(
"constr" => IndGraph,
"right" => [
( (sprand(50, 100, 0.2),), randn(150) ),
( (sprand(Complex{Float64}, 50, 100, 0.2),), randn(150)+im*randn(150) ),
( (rand(50, 100),), randn(150) ),
( (rand(Complex{Float64}, 50, 100),), randn(150)+im*randn(150) ),
( (rand(55, 50),), randn(105) ),
( (rand(Complex{Float64}, 55, 50),), randn(105)+im*randn(105) ),
],
),
Dict(
"constr" => IndPoint,
"right" => [
( (), randn(5) ),
( (randn(Float32, 5),), randn(Float32, 5) ),
( (randn(Float64, 5),), randn(Float32, 5) ),
( (randn(Float64, 5),), randn(Float64, 5) ),
( (randn(Complex{Float32}, 5),), randn(Complex{Float32}, 5) ),
( (randn(Complex{Float64}, 5),), randn(Complex{Float32}, 5) ),
( (randn(Complex{Float64}, 5),), randn(Complex{Float64}, 5) ),
],
),
Dict(
"constr" => IndStiefel,
"right" => [
( (), randn(Float32, 5, 3) ),
( (), randn(Float64, 5, 3) ),
( (), randn(Complex{Float32}, 5, 3) ),
( (), randn(Complex{Float64}, 5, 3) ),
],
),
Dict(
"constr" => IndZero,
"right" => [
( (), randn(Float32, 5) ),
( (), randn(Float32, 2, 3) ),
( (), randn(Float64, 5) ),
( (), randn(Float64, 2, 3) ),
( (), randn(Complex{Float32}, 5) ),
( (), randn(Complex{Float32}, 2, 3) ),
],
),
Dict(
"constr" => IndHalfspace,
"right" => [
( (-ones(5), -1.0), -rand(5) ),
( (-ones(5), 1.0), rand(5) ),
],
),
Dict(
"constr" => IndNonnegative,
"right" => [
( (), randn(10) ),
( (), randn(3, 5) ),
],
),
Dict(
"constr" => IndNonpositive,
"right" => [
( (), randn(10) ),
( (), randn(3, 5) ),
],
),
Dict(
"constr" => IndSimplex,
"wrong" => [
(-rand(),)
],
"right" => [
( (), randn(10) ),
( (), randn(3, 5) ),
( (5.0,), randn(10) ),
],
),
Dict(
"constr" => IndSOC,
"right" => [
( (), [rand(), -rand()] ),
( (), [-rand(), rand()] ),
( (), rand(5) ),
],
),
Dict(
"constr" => IndRotatedSOC,
"right" => [
( (), [rand(), -rand()] ),
( (), [-rand(), rand()] ),
( (), rand(5) ),
],
),
Dict(
"constr" => IndSphereL2,
"wrong" => [
(-rand(),),
],
"right" => [
( (rand(),), randn(10) ),
( (sqrt(20),), randn(20) ),
( (1,), 1e-3*randn(10) ),
( (1,), randn(Complex{Float64}, 10) ),
],
),
Dict(
"constr" => IndPSD,
"right" => [
( (), Symmetric(randn(Float32, 5, 5)) ),
( (), Symmetric(randn(Float64, 10, 10)) ),
( (), Hermitian(randn(Complex{Float32}, 5, 5)) ),
( (), Hermitian(randn(Complex{Float64}, 10, 10)) ),
( (), randn(Float32, 5, 5) ),
( (), randn(Float64, 10, 10) ),
( (), randn(Complex{Float32}, 5, 5) ),
( (), randn(Complex{Float64}, 10, 10) ),
( (), randn(15) ),
],
),
Dict(
"constr" => LogBarrier,
"wrong" => [
(1.0, 0.0, -rand()),
],
"right" => [
( (), rand(Float32, 5) ),
( (), rand(Float64, 5) ),
( (rand(Float32),), rand(Float32, 5) ),
( (rand(Float64), rand(Float64)), rand(Float64, 5) ),
( (rand(Float32), rand(Float32), rand(Float32)), rand(Float32, 5) ),
],
),
Dict(
"constr" => NormL0,
"wrong" => [
(-rand(),),
],
"right" => [
( (), randn(Float32, 5) ),
( (), randn(Float64, 5) ),
( (rand(Float32)), randn(Complex{Float32}, 5) ),
],
),
Dict(
"constr" => NormL1,
"wrong" => [
(-rand(),),
(-rand(10),)
],
"right" => [
( (), randn(Float32, 5) ),
( (), randn(Float64, 5) ),
( (), randn(Complex{Float32}, 5) ),
( (rand(Float32),), randn(Float32, 5) ),
( (rand(Float64, 5),), randn(Float64, 5) ),
( (rand(Float64, 5),), rand(Complex{Float64}, 5) ),
( (rand(Float32),), rand(Complex{Float32}, 5) ),
],
),
Dict(
"constr" => NormL2,
"right" => [
( (), randn(Float32, 5), ),
( (), randn(Float64, 5), ),
( (), randn(Complex{Float32}, 5), ),
( (rand(Float32),), randn(Float32, 5), ),
( (rand(Float64),), randn(Complex{Float64}, 5), ),
],
),
Dict(
"constr" => NormL1plusL2,
"wrong" => [
(-rand(), rand()),
(-rand(10),),
(-rand(10), rand()),
],
"right" => [
( (), randn(Float32, 5) ),
( (), randn(Float64, 5) ),
( (), randn(Complex{Float32}, 5) ),
( (rand(Float32),), randn(Float32, 5) ),
( (rand(Float64, 5),), randn(Float64, 5) ),
( (rand(Float64, 5),), rand(Complex{Float64}, 5) ),
( (rand(Float32),), rand(Complex{Float32}, 5) ),
( (rand(Float32), rand(Float32)), randn(Float32, 5) ),
( (rand(Float64, 5), rand(Float64)), randn(Float64, 5) ),
( (rand(Float64, 5), rand(Float64)), rand(Complex{Float64}, 5) ),
( (rand(Float32), rand(Float32)), rand(Complex{Float32}, 5) ),
],
),
Dict(
"constr" => NormL21,
"right" => [
( (), randn(Float32, 3, 5) ),
( (), randn(Float64, 3, 5) ),
( (), randn(Complex{Float32}, 3, 5) ),
( (rand(Float32),), randn(Float32, 3, 5) ),
( (rand(Float64),), randn(Float64, 3, 5) ),
( (rand(Float32), 1), randn(Complex{Float32}, 3, 5) ),
( (rand(Float64), 2), randn(Complex{Float64}, 3, 5) ),
],
),
Dict(
"constr" => NormLinf,
"wrong" => [
(-rand(),),
],
"right" => [
( (), randn(Float32, 5) ),
( (), randn(Float64, 5) ),
( (), randn(Complex{Float32}, 5) ),
( (rand(Float32),), randn(Float32, 5) ),
( (rand(Float32),), rand(Complex{Float32}, 5) ),
],
),
Dict(
"constr" => NuclearNorm,
"wrong" => [
(-rand(),),
],
"right" => [
( (), rand(Float32, 5, 5) ),
( (), rand(Float64, 3, 5) ),
( (), rand(Complex{Float32}, 5, 5) ),
( (rand(Float32),), rand(Float32, 5, 5) ),
( (rand(Float64),), rand(Float64, 3, 5) ),
( (rand(Float32),), rand(Complex{Float32}, 5, 5) ),
( (sqrt(10e0),), rand(Complex{Float64}, 3, 5) ),
( (sqrt(10f0),), rand(Complex{Float32}, 5, 3) ),
],
),
Dict(
"constr" => SqrNormL2,
"wrong" => [
(-rand(),),
],
"right" => [
( (), randn(Float32, 10) ),
( (), randn(Float64, 10) ),
( (rand(Float32),), randn(Float32, 10) ),
( (rand(Float64),), randn(Float64, 10) ),
( (Float64[1, 1, 1, 1, 0],), randn(Float64, 5) ),
( (rand(Float32, 20),), randn(Float32, 20) ),
( (rand(Float64, 20),), randn(Float64, 20) ),
( (rand(30),), rand(Complex{Float64}, 30) ),
( (rand(),), rand(Complex{Float64}, 50) ),
],
),
Dict(
"constr" => LeastSquares,
"wrong" => [
(randn(3, 5), randn(4), rand()),
],
"right" => [
( (randn(10, 25), randn(10)), randn(25) ),
( (randn(40, 13), randn(40), rand()), randn(13) ),
( (rand(Complex{Float64}, 25, 10), rand(Complex{Float64}, 25)), rand(Complex{Float64}, 10) ),
( (sprandn(10, 100, 0.15), randn(10), rand()), randn(100) ),
],
),
# Dict(
# "constr" => SumLargest,
# "wrong" => [
# (-1,), (0,), (1, -2.0), (2, 0.0), (2, -rand())
# ],
# "right" => [
# ( (), randn(10) ),
# ( (1,), randn(20) ),
# ( (5, 3.2), randn(5,10) ),
# ( (3, rand()), randn(8,17) ),
# ],
# ),
Dict(
"constr" => Maximum,
"wrong" => [
(-rand(),),
],
"right" => [
( (), randn(Float32, 10) ),
( (), randn(Float64, 10) ),
( (rand(Float64),), randn(Float64, 20) ),
( (), randn(Float32, 5,10) ),
( (rand(Float32)), randn(Float32, 8,17) ),
],
),
Dict(
"constr" => IndBinary,
"right" => [
( (), randn(10) ),
( (randn(), randn()), randn(10) ),
( (randn(), randn(10)), randn(10) ),
( (randn(10), randn()), randn(10) ),
( (randn(10), randn(10)), randn(10) ),
( (randn(), randn(5,5)), randn(5,5) ),
],
),
Dict(
"constr" => Regularize,
"wrong" => [
(NormL1(), -rand()),
],
"right" => [
( (NormL1(), rand()), randn(10) ),
( (NormL1(rand()), rand()), randn(5,10) ),
( (NormL1(), rand(), randn(20)), randn(20) ),
( (NormL1(rand()), rand(), randn(20)), randn(20) ),
],
),
Dict(
"constr" => Tilt,
"right" => [
( (LeastSquares(randn(20, 10), randn(20)), randn(10)), randn(10) ),
],
),
Dict(
"constr" => HuberLoss,
"wrong" => [
(-rand(), ),
(rand(), -rand()),
(-rand(), rand()),
(-rand(), -rand())
],
"right" => [
( (), randn(Float32, 10) ),
( (), randn(Float64, 10) ),
( (rand(Float32), ), randn(Float32, 5, 8) ),
( (rand(Float64), ), randn(Float64, 5, 8) ),
( (rand(Float64), rand(Float64)), randn(Float64, 20) ),
( (rand(Float64), rand(Float64)), rand(Complex{Float64}, 8, 12) ),
],
),
Dict(
"constr" => SumPositive,
"right" => [
( (), randn(Float32, 5) ),
( (), randn(Float64, 5) ),
( (), randn(Float32, 3, 5) ),
( (), randn(Float64, 3, 5) ),
],
),
Dict(
"constr" => SeparableSum,
"right" => [
( ((NormL2(2.0), NormL1(1.5), NormL2(0.5)), ), (randn(5), randn(15), randn(10)) ),
],
),
Dict(
"constr" => SlicedSeparableSum,
"right" => [
( ((NormL2(2.0), NormL1(1.5), NormL2(0.5)), ((1:5,), (6:20,), (21:30,))), randn(30) ),
],
),
]
@testset "$(spec["constr"])" for spec in test_cases_spec
constr = spec["constr"]
if haskey(spec, "wrong")
for wrong in spec["wrong"]
@test_throws ErrorException constr(wrong...)
end
end
for right in spec["right"]
params, x = right
f = constr(params...)
predicates_test(f)
##### just call f
fx = call_test(f, x)
##### compute prox with default gamma
y, fy = prox_test(f, x)
##### compute prox with random gamma
T = if typeof(x) <: Array
eltype(x)
elseif typeof(x) <: Tuple
eltype(x[1])
else
Float64
end
gam = 5*rand(real(T))
y, fy = prox_test(f, x, gam)
##### compute prox with multiple random gammas
if ProximalOperators.is_separable(f)
gam = real(T)(0.5) .+ 2 .* rand(real(T), size(x))
y, fy = prox_test(f, x, gam)
end
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 556 | using Test
using Random
using ProximalOperators
@testset "CubeNormL2" begin
for R in [Float16, Float32, Float64]
for T in [R, Complex{R}]
for shape in [(5,), (3, 5), (3, 4, 5)]
lambda = R(0.1) + 5*rand(R)
f = CubeNormL2(lambda)
predicates_test(f)
x = randn(T, shape)
call_test(f, x)
gamma = R(0.5)+rand(R)
y, f_y = prox_test(f, x, gamma)
grad_f_y, f_y = gradient_test(f, y)
@test grad_f_y ≈ (x - y)/gamma
end
end
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 927 | using Test
using LinearAlgebra
using SparseArrays
using ProximalOperators
@testset "Epicompose (Gram-diagonal)" begin
n = 5
for R in [Float64] # TODO: enable Float32?
for T in [R, Complex{R}]
A = randn(T, n, n)
Q, _ = qr(A)
mu = R(2)
L = mu*Q
Lfs = [
(L, NormL1(R(1))),
(sparse(L), NormL1(R(1))),
]
for (L, f) in Lfs
g = Epicompose(L, f, mu)
x = randn(T, n)
prox_test(g, x, R(2))
end
end
end
end
@testset "Epicompose (Quadratic)" begin
n = 5
m = 3
for R in [Float64] # TODO: enable Float32?
W = randn(R, n, n)
Q = W' * W
q = randn(R, n)
L = randn(R, m, n)
Lfs = [
(L, Quadratic(Q, q)),
(sparse(L), Quadratic(sparse(Q), q)),
]
for (L, f) in Lfs
g = Epicompose(L, f)
x = randn(R, m)
prox_test(g, x, R(2))
end
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 3271 | # Test other equivalences of prox operations which are not covered by calculus rules
using Random
using SparseArrays
using ProximalOperators
using Test
################################################################################
### testing consistency of simplex/L1 ball
################################################################################
# Inspired by Condat, "Fast projection onto the simplex and the l1 ball", Mathematical Programming, 158:575–585, 2016.
# See Prop. 2.1 there and following remarks.
@testset "IndSimplex/IndBallL1" begin
n = 20
N = 10
# projecting onto the L1 ball
for i = 1:N
x = randn(n)
r = 5*rand()
f = IndSimplex(r)
g = IndBallL1(r)
y1, fy1 = prox(f, abs.(x))
y1 = sign.(x).*y1
y2, gy2 = prox(g, x)
@test y1 ≈ y2
end
# projecting onto the simplex
for i = 1:N
x = randn(n)
r = 5*rand()
f = IndSimplex(r)
g = IndBallL1(r)
y1, fy1 = prox(f, x)
y2, gy2 = prox(g, x .- minimum(x) .+ r./n)
@test y1 ≈ y2
end
end
################################################################################
### testing consistency of hinge loss/box indicator
################################################################################
@testset "HingeLoss/IndBox" begin
n = 20
N = 10
# test using Moreau identity: prox(f, x, gamma) = x - gamma*prox(f*, x/gamma, 1/gamma)
# and a couple of other calculus rules in the case b = ±ones(n)
#
# f(x) = max(0, x) is conjugate to h = IndBox(0,1)
# g(x) = HingeLoss(b, mu)(x) = mu*f(1-b.*x)
# prox(g, x, gamma) = (prox(f, 1-b.*x, mu*gamma) - 1)./(-b)
# = x + mu*gamma*prox(h, (1-b.*x)/(mu*gamma), 1/(mu*gamma))./b
b = sign.(randn(n))
mu = 0.1+rand()
g = HingeLoss(b, mu)
h = IndBox(0, 1)
for i = 1:N
x = randn(n)
gamma = 0.1 + rand()
y1, ~ = prox(g, x, gamma)
z, ~ = prox(h, (1 .- b.*x)./(mu*gamma), 1/(mu*gamma))
y2 = mu*gamma*(z./b) + x
@test y1 ≈ y2
end
end
################################################################################
### testing regularize
################################################################################
@testset "Regularize/ElasticNet" begin
lambda = rand()
rho = rand()
g = Regularize(NormL1(lambda),rho)
x = randn(10)
y,f = prox(g,x)
y2,f2 = prox(ElasticNet(lambda,rho),x)
@test f ≈ f2
@test y ≈ y2
end
################################################################################
### testing IndAffine
################################################################################
@testset "IndAffine (sparse/dense)" begin
A = sprand(50,100, 0.1)
b = randn(50)
g1 = IndAffine(A, b)
g2 = IndAffine(Matrix(A), b)
x = randn(100)
y1, f1 = prox(g1, x)
y2, f2 = prox(g2, x)
@test f1 ≈ f2
@test y1 ≈ y2
end
################################################################################
### testing NormL1plusL2 reduces to L1/L2
################################################################################
@testset "NormL1plusL2 special case" begin
g = NormL1(1.)
# λ_2 = 0
f = NormL1plusL2(1., 0.)
x = randn(100)
y1, f1 = prox(g, x)
y2, f2 = prox(f, x)
@test f1 ≈ f2
@test y1 ≈ y2
end
@testset "NormL1plusL2 special case" begin
g = NormL2(1.)
# λ_1 = 0
f = NormL1plusL2(0., 1.)
x = randn(100)
y1, f1 = prox(g, x)
y2, f2 = prox(f, x)
@test f1 ≈ f2
@test y1 ≈ y2
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 5924 | using LinearAlgebra
using Random
Random.seed!(0)
function gradient_fd(f,x) #compute gradient using finite differences
gradfd = zero(x)
xeps1 = zero(x)
xeps2 = zero(x)
delta = sqrt(eps())
for i in eachindex(gradfd)
xeps1 .= x
xeps2 .= x
xeps1[i] -= delta
xeps2[i] += delta
gradfd[i] = (f(xeps2)-f(xeps1))/(2*delta)
end
return gradfd
end
stuff = [
Dict( "f" => ElasticNet(2.0, 3.0),
"x" => [-2., -1., 0., 1., 2., 3.],
"∇f(x)" => 2*[-1., -1., 0., 1., 1., 1.] + 3*[-2., -1., 0., 1., 2., 3.],
),
Dict( "f" => NormL1(2.0),
"x" => [-2., -1., 0., 1., 2., 3.],
"∇f(x)" => 2*[-1., -1., 0., 1., 1., 1.],
),
Dict( "f" => NormL2(2.0),
"x" => [-1., 0., 2.],
"∇f(x)" => 2/sqrt(5)*[-1., 0., 2.],
),
Dict( "f" => NormL2(2.0),
"x" => [-1., 0., 2.],
"∇f(x)" => 2/sqrt(5)*[-1., 0., 2.],
),
Dict( "f" => NormL2(2.0),
"x" => [0., 0., 0.],
"∇f(x)" => [0., 0., 0.],
),
# Dict( "f" => NormL21(),
# "x" => ,
# "∇f(x)" => ,
# ),
Dict( "f" => NormLinf(2.),
"x" => [-2., -1., 0., 1., 2., 3.],
"∇f(x)" => 2*[0., 0., 0., 0., 0., 1.],
),
Dict( "f" => NormLinf(2.),
"x" => [-4., -1., 0., 1., 2., 3.],
"∇f(x)" => 2*[-1., 0., 0., 0., 0., 0.],
),
# Dict( "f" => NuclearNorm(),
# "x" => ,
# "∇f(x)" => ,
# ),
Dict( "f" => SqrNormL2(2.0),
"x" => [-2., -1., 0., 1., 2., 3.],
"∇f(x)" => 2*[-2., -1., 0., 1., 2., 3.],
),
Dict( "f" => HingeLoss([1., 2., 1., 2., 1.], 2.0),
"x" => [-2., -1., 0., 2., 3.],
"∇f(x)" => -2*[1., 2., 1., 0., 0.],
),
Dict( "f" => HuberLoss(2., 3.),
"x" => [-1., 0.5],
"∇f(x)" => 3*[-1., 0.5],
),
Dict( "f" => HuberLoss(2., 3.),
"x" => [-2., 1.5],
"∇f(x)" => [-4.8, 3.6], # 3*2*[-2., 1.5]/norm([-2., 1.5]),
),
Dict( "f" => Linear([-1.,2.,3.]),
"x" => [1., 5., 3.14],
"∇f(x)" => [-1.,2.,3.],
),
Dict( "f" => LogBarrier(2.0, 1.0, 1.5),
"x" => [1.0, 2.0, 3.0],
"∇f(x)" => -1.5*2.0./(2.0*[1.0, 2.0, 3.0].+1.0),# -μ*a*/(a*x+b)
),
Dict( "f" => LeastSquares([1.0 2.0; 3.0 4.0], [0.1, 0.2], 2.0, iterative=false),
"x" => [-1., 2.],
"∇f(x)" => [34.6, 50.], #λ*(A'A*x-A'b),
),
Dict( "f" => LeastSquares([1.0 2.0; 3.0 4.0], [0.1, 0.2], 2.0, iterative=true),
"x" => [-1., 2.],
"∇f(x)" => [34.6, 50.], #λ*(A'A*x-A'b),
),
Dict( "f" => Quadratic([2. -1.; -1. 2.], [0.1, 0.2], iterative=false),
"x" => [3., 4.],
"∇f(x)" => [2.1, 5.2], #[2. -1.; -1. 2.]*[3., 4.]+[0.1, 0.2],
),
Dict( "f" => Quadratic([2. -1.; -1. 2.], [0.1, 0.2], iterative=true),
"x" => [3., 4.],
"∇f(x)" => [2.1, 5.2], #[2. -1.; -1. 2.]*[3., 4.]+[0.1, 0.2],
),
Dict( "f" => SumPositive(),
"x" => [-1., 1., 2.],
"∇f(x)" => [0., 1., 1.],
),
# Dict( "f" => Maximum(2.0),
# "x" => [-4., 2., 3.],
# "∇f(x)" => 2*[0., 0., 1.],
# ),
Dict( "f" => DistL2(IndZero(),2.0),
"x" => [1., -2],
"∇f(x)" => 2*[1., -2]./norm([1., -2]),
),
Dict( "f" => DistL2(IndBallL2(1.0),2.0),
"x" => [2., -2],
"∇f(x)" => [sqrt(2), -sqrt(2)],
),
Dict( "f" => SqrDistL2(IndZero(),2.0),
"x" => [1., -2],
"∇f(x)" => 2*[1., -2],
),
Dict( "f" => SqrDistL2(IndBallL2(1.0),2.0),
"x" => [2., -2],
"∇f(x)" => 2.0*([2., -2]-[1/sqrt(2), -1/(sqrt(2))]),
),
Dict( "f" => LogisticLoss([1.0, -1.0, 1.0, -1.0, 1.0], 1.5),
"x" => [-1.0, -2.0, 3.0, 2.0, 1.0],
"∇f(x)" => [-1.0965878679450072, 0.17880438303317633, -0.07113880976635019, 1.3211956169668235, -0.4034121320549927]
),
Dict( "f" => SqrHingeLoss([1.0, -1.0, 1.0, -1.0, 1.0], 1.5),
"x" => randn(MersenneTwister(0),Float64,5),
"∇f(x)" => gradient_fd(SqrHingeLoss([1.0, -1.0, 1.0, -1.0, 1.0], 1.5),randn(MersenneTwister(0),Float64,5))
),
Dict( "f" => CrossEntropy([1.0, 0., 1.0, 0., 1.0]),
"x" => rand(MersenneTwister(0),Float64,5),
"∇f(x)" => gradient_fd(CrossEntropy([1.0, 0., 1.0, 0., 1.0]),rand(MersenneTwister(0),Float64,5))
),
Dict( "f" => CrossEntropy([true, false, true, false, true]),
"x" => rand(MersenneTwister(0),Float64,5),
"∇f(x)" => gradient_fd(CrossEntropy([true, false, true, false, true]),rand(MersenneTwister(0),Float64,5))
),
Dict( "f" => CrossEntropy([true, false, true, false, true]),
"x" => rand(MersenneTwister(0),Float64,1,5),
"∇f(x)" => gradient_fd(CrossEntropy([true, false, true, false, true]),rand(MersenneTwister(0),Float64,1,5))
),
]
for i in eachindex(stuff)
f = stuff[i]["f"]
x = stuff[i]["x"]
ref_∇f = stuff[i]["∇f(x)"]
ref_fx = f(x)
∇f, fx = gradient_test(f, x)
@test fx ≈ ref_fx
@test ∇f ≈ ref_∇f
for j = 1:11
#For initial point x and 10 other random points
∇f, fx = gradient_test(f, x)
for k = 1:10
# Test conditions in different directions
if ProximalOperators.is_convex(f)
# Test ∇f is subgradient
if typeof(f) <: CrossEntropy
d = x.*(rand(Float64, size(x)).-1)./2 # assures 0 <= x+d <= 1
else
d = randn(Float64, size(x))
end
@test ProximalOperators.isapprox_le(fx + dot(d, ∇f), f(x+d))
else
# Assume smooth function
d = randn(Float64, size(x))
d ./= norm(d) .* 1e-6
@test f(x+d) ≈ fx + dot(d, ∇f) atol=1e-12
end
end
x = rand(Float64, size(x))
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 2748 | using LinearAlgebra
using SparseArrays
using Random
Random.seed!(0)
## Test IndGraph
m, n = (50, 100)
sm = n + div(n, 10)
function test_against_IndAffine(f, A, cd)
m, n = size(A)
if m > n
return # no variant for skinny case for now
end
T = eltype(A)
## TODO: remove when IndAffine will be revised
if T <: Complex
return
end
B = ifelse(issparse(A), [A -SparseMatrixCSC{T}(I, m, m)], [A -Matrix{T}(I, m, m)])
# INIT IndAffine for the case
faff = IndAffine(B, zeros(T, m))
xy_aff, fx_aff = prox(faff, cd)
@test faff(xy_aff) == 0.0
# INIT testing function
xy, fx = prox(f, cd)
@test f(xy) == 0.0
# test against IndAFfine
if T <: Complex
return # currently complex case has different mappings, though both valid
else
@test xy ≈ xy_aff
end
return
end
## First, do common tests
stuff = [
Dict(
"constr" => IndGraph,
"params" => (
(sprand(m, n, 0.2),),
(sprand(Complex{Float64}, m, n, 0.2),),
(rand(m, n),),
(rand(Complex{Float64}, m, n),),
(rand(sm, n),),
(rand(Complex{Float64}, sm, n),),
),
"args" => (
randn(m + n),
randn(m + n)+im * randn(m + n),
randn(m + n),
randn(m + n)+im * randn(m + n),
randn(sm + n),
randn(sm + n)+im * randn(sm + n)
)
),
]
for i in eachindex(stuff)
constr = stuff[i]["constr"]
if haskey(stuff[i], "wrong")
for j in eachindex(stuff[i]["wrong"])
wrong = stuff[i]["wrong"][j]
@test_throws ErrorException constr(wrong...)
end
end
for j in eachindex(stuff[i]["params"])
params = stuff[i]["params"][j]
x = stuff[i]["args"][j]
f = constr(params...)
predicates_test(f)
##### argument split
c = view(x, 1:f.n)
d = view(x, f.n + 1:f.n + f.m)
ax = zero(c)
ay = zero(d)
##### just call f
fx = call_test(f, x)
##### compute prox with default gamma
y, fy = prox_test(f, x)
##### compute prox with random gamma
gam = 5*rand()
y, fy = prox_test(f, x, gam)
##### test calls to prox! with more signatures
prox!(ax, ay, f, c, d)
@test f(ax, ay) ≈ 0
ax_naive, ay_naive, fv_naive = ProximalOperators.prox_naive(f, c, d, 1)
@test f(ax_naive, ay_naive) ≈ 0
prox!((ax, ay), f, (c, d))
@test f((ax, ay)) ≈ 0
axy_naive, fv_naive = ProximalOperators.prox_naive(f, (c, d), 1)
@test f(axy_naive) ≈ 0
##### test against IndAffine
test_against_IndAffine(f, params[1], x)
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 638 | using LinearAlgebra
using ProximalOperators
using Test
@testset "HuberLoss" begin
f = HuberLoss(1.5, 0.7)
predicates_test(f)
@test ProximalOperators.is_smooth(f) == true
@test ProximalOperators.is_quadratic(f) == false
@test ProximalOperators.is_set(f) == false
x = randn(10)
x = 1.6*x/norm(x)
call_test(f, x)
prox_test(f, x, 1.3)
grad_fx, fx = gradient_test(f, x)
@test abs(fx - f(x)) <= 1e-12
@test norm(0.7*1.5*x/norm(x) - grad_fx, Inf) <= 1e-12
x = randn(10)
x = 1.4*x/norm(x)
call_test(f, x)
prox_test(f, x, 0.9)
grad_fx, fx = gradient_test(f, x)
@test abs(fx - f(x)) <= 1e-12
@test norm(0.7*x - grad_fx, Inf) <= 1e-12
end | ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 833 | using LinearAlgebra
using SparseArrays
using Random
using ProximalOperators
using Test
@testset "IndAffine" begin
# Full matrix
m, n = 10, 30
A = randn(m, n)
b = randn(m)
f = IndAffine(A, b)
x = randn(n)
predicates_test(f)
@test ProximalOperators.is_smooth(f) == false
@test ProximalOperators.is_quadratic(f) == false
@test ProximalOperators.is_generalized_quadratic(f) == true
@test ProximalOperators.is_set(f) == true
call_test(f, x)
y, fy = prox_test(f, x)
@test f(y) == 0.0
# Sparse matrix
m, n = 10, 30
A = sprandn(m, n, 0.5)
b = randn(m)
f = IndAffine(A, b)
x = randn(n)
call_test(f, x)
y, fy = prox_test(f, x)
@test f(y) == 0.0
# Iterative version
m, n = 200, 500
A = sprandn(m, n, 0.5)
b = randn(m)
f = IndAffine(A, b; iterative=true)
x = randn(n)
call_test(f, x)
y, fy = prox_test(f, x)
@test f(y) == 0.0
end | ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 499 | using LinearAlgebra
using ProximalOperators
using Test
@testset "IndHyperslab" begin
for R in [Float32, Float64]
for shape in [(5,), (3, 5), (3, 4, 5)]
c = randn(R, shape)
x = randn(R, shape)
cx = dot(c, x)
for (low, upp) in [(cx-R(1), cx+R(1)), (cx-R(2), cx-R(1)), (cx+R(1), cx+R(2))]
f = IndHyperslab(low, c, upp)
predicates_test(f)
call_test(f, x)
prox_test(f, x, R(0.5)+rand(R))
end
end
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 977 | using ProximalOperators
using Test
@testset "IndPolyhedral" begin
# set dimensions
m, n = 25, 10
# pick random (nonempty) polyhedron
xmin = -ones(n)
xmax = +ones(n)
x0 = min.(xmax, max.(xmin, 10 .* rand(n) .- 5.0))
A = randn(m, n)
u = A*x0 .+ 0.1
l = A*x0 .- 0.1
# pick random point
x = 10 .* randn(n)
p = similar(x)
@testset "valid" for constr in [
() -> IndPolyhedral(l, A),
() -> IndPolyhedral(l, A, xmin, xmax),
() -> IndPolyhedral(A, u),
() -> IndPolyhedral(A, u, xmin, xmax),
() -> IndPolyhedral(l, A, u),
() -> IndPolyhedral(l, A, u, xmin, xmax),
]
f = constr()
@test ProximalOperators.is_convex(f) == true
@test ProximalOperators.is_set(f) == true
fx = call_test(f, x)
p, fp = prox_test(f, x)
end
@testset "invalid" for constr in [
() -> IndPolyhedral(l, A, xmax, xmin),
() -> IndPolyhedral(A, u, xmax, xmin),
() -> IndPolyhedral(l, A, u, xmax, xmin),
]
@test_throws ErrorException constr()
end
end
| ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
|
[
"MIT"
] | 0.16.1 | af4153db223c4b262747aaa656ed7b30b15c038c | code | 1670 | using LinearAlgebra
using SparseArrays
using ProximalOperators
using Test
@testset "LeastSquares" begin
@testset "$(T), $(s), $(matrix_type), $(mode)" for (T, s, matrix_type, mode) in Iterators.product(
[Float64, ComplexF64],
[(10, 29), (29, 10), (10, 29, 3), (29, 10, 3)],
[:dense, :sparse],
[:direct, :iterative],
)
if mode == :iterative && length(s) == 3
# FIXME this case is currently not supported due to cg! in IterativeSolvers
# See https://github.com/JuliaMath/IterativeSolvers.jl/issues/248
# The fix is simple, but affects a lot of solvers in IterativeSolvers
# Maybe we can use our own CG here and drop the dependency
continue
end
R = real(T)
shape_A = s[1:2]
shape_b = if length(s) == 2 s[1] else s[[1, 3]] end
shape_x = if length(s) == 2 s[2] else s[2:3] end
A = if matrix_type == :sparse sparse(randn(T, shape_A...)) else randn(T, shape_A...) end
b = randn(T, shape_b...)
x = randn(T, shape_x...)
f = LeastSquares(A, b, iterative=(mode == :iterative))
predicates_test(f)
@test ProximalOperators.is_smooth(f) == true
@test ProximalOperators.is_quadratic(f) == true
@test ProximalOperators.is_generalized_quadratic(f) == true
@test ProximalOperators.is_set(f) == false
grad_fx, fx = gradient_test(f, x)
lsres = A*x - b
@test fx ≈ 0.5*norm(lsres)^2
@test all(grad_fx .≈ (A'*lsres))
call_test(f, x)
prox_test(f, x)
prox_test(f, x, R(1.5))
lam = R(0.1) + rand(R)
f = LeastSquares(A, b, lam, iterative=(mode == :iterative))
predicates_test(f)
grad_fx, fx = gradient_test(f, x)
@test fx ≈ (lam/2)*norm(lsres)^2
@test all(grad_fx .≈ lam*(A'*lsres))
call_test(f, x)
prox_test(f, x)
prox_test(f, x, R(2.1))
end
end | ProximalOperators | https://github.com/JuliaFirstOrder/ProximalOperators.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.