licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 152 | Dict{String,Any}("albums" => Dict{String,Any}("songs" => Any[Dict{String,Any}("name" => Dict{String,Any}("value" => "Glory Days","type" => "string"))])) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 524 | Dict{String,Any}("people" => Any[Dict{String,Any}("first_name" => Dict{String,Any}("value" => "Bruce","type" => "string"),"last_name" => Dict{String,Any}("value" => "Springsteen","type" => "string")), Dict{String,Any}("first_name" => Dict{String,Any}("value" => "Eric","type" => "string"),"last_name" => Dict{String,Any}("value" => "Clapton","type" => "string")), Dict{String,Any}("first_name" => Dict{String,Any}("value" => "Bob","type" => "string"),"last_name" => Dict{String,Any}("value" => "Seger","type" => "string"))]) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 632 | Dict{String,Any}("albums" => Any[Dict{String,Any}("name" => Dict{String,Any}("value" => "Born to Run","type" => "string"),"songs" => Any[Dict{String,Any}("name" => Dict{String,Any}("value" => "Jungleland","type" => "string")), Dict{String,Any}("name" => Dict{String,Any}("value" => "Meeting Across the River","type" => "string"))]), Dict{String,Any}("name" => Dict{String,Any}("value" => "Born in the USA","type" => "string"),"songs" => Any[Dict{String,Any}("name" => Dict{String,Any}("value" => "Glory Days","type" => "string")), Dict{String,Any}("name" => Dict{String,Any}("value" => "Dancing in the Dark","type" => "string"))])]) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 201 | Dict{String,Any}("people" => Any[Dict{String,Any}("first_name" => Dict{String,Any}("value" => "Bruce","type" => "string"),"last_name" => Dict{String,Any}("value" => "Springsteen","type" => "string"))]) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 270 | Dict{String,Any}("a" => Any[Dict{String,Any}("b" => Any[Dict{String,Any}("c" => Dict{String,Any}("d" => Dict{String,Any}("value" => "val0","type" => "string"))), Dict{String,Any}("c" => Dict{String,Any}("d" => Dict{String,Any}("value" => "val1","type" => "string")))])]) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 43 | Dict{String,Any}("a" => Dict{String,Any}()) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 47 | Dict{String,Any}("table" => Dict{String,Any}()) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 68 | Dict{String,Any}("a" => Dict{String,Any}("b" => Dict{String,Any}())) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 51 | Dict{String,Any}("valid key" => Dict{String,Any}()) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 162 | Dict{String,Any}("a" => Dict{String,Any}("\"b\"" => Dict{String,Any}("c" => Dict{String,Any}("answer" => Dict{String,Any}("value" => "42","type" => "integer"))))) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 116 | Dict{String,Any}("key#group" => Dict{String,Any}("answer" => Dict{String,Any}("value" => "42","type" => "integer"))) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 158 | Dict{String,Any}("a" => Dict{String,Any}("b" => Dict{String,Any}("c" => Dict{String,Any}("answer" => Dict{String,Any}("value" => "42","type" => "integer"))))) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 101 | Dict{String,Any}("electron_mass" => Dict{String,Any}("value" => "9.109109383e-31","type" => "float")) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 89 | Dict{String,Any}("million" => Dict{String,Any}("value" => "1000000","type" => "integer")) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 147 | Dict{String,Any}("answer8" => Dict{String,Any}("value" => "δ","type" => "string"),"answer4" => Dict{String,Any}("value" => "δ","type" => "string")) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 81 | Dict{String,Any}("answer" => Dict{String,Any}("value" => "δ","type" => "string")) | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | code | 544 | # This converts the ground-truth JSON files to the Julia repr format so
# we can use that without requiring a JSON parser during testing.
using JSON
const testfiles = joinpath(@__DIR__, "..", "testfiles")
function convert_json_files()
for folder in ("invalid", "valid")
for file in readdir(joinpath(testfiles, folder); join=true)
endswith(file, ".json") || continue
d_json = open(JSON.parse, file)
d_jl = repr(d_json)
write(splitext(file)[1] * ".jl", d_jl)
end
end
end | TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | docs | 969 | # TOML.jl
A [TOML v1.0.0](https://github.com/toml-lang/toml) parser for Julia.
[](https://github.com/JuliaLang/TOML.jl/actions?query=workflow%3ACI) [](https://codecov.io/gh/JuliaLang/TOML.jl)
[](https://JuliaLang.github.io/TOML.jl/dev/)
*Note:* In Julia 1.6+, this is a standard library and does not need to be explicitly installed.
*Note:* A different TOML package was previously hosted in this package's URL. If you have the older package (UUID `191fdcea...`) installed in your environment from `https://github.com/JuliaLang/TOML.jl.git`, `update` and other `Pkg` operations may fail. To switch to this package, remove and re-add `TOML`. The old TOML package (which only supports TOML 0.4.0) may be found in `https://github.com/JuliaAttic/TOML.jl`.
| TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.0.3 | 44aaac2d2aec4a850302f9aa69127c74f0c3787e | docs | 2427 | # TOML
TOML.jl is a Julia standard library for parsing and writing [TOML
v1.0](https://toml.io/en/) files.
## Parsing TOML data
```jldoctest
julia> using TOML
julia> data = """
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
""";
julia> TOML.parse(data)
Dict{String, Any} with 1 entry:
"database" => Dict{String, Any}("server"=>"192.168.1.1", "ports"=>[8001, 8001…
```
To parse a file, use [`TOML.parsefile`](@ref). If the file has a syntax error,
an exception is thrown:
```jldoctest
julia> using TOML
julia> TOML.parse("""
value = 0.0.0
""")
ERROR: TOML Parser error:
none:1:16 error: failed to parse value
value = 0.0.0
^
[...]
```
There are other versions of the parse functions ([`TOML.tryparse`](@ref)
and [`TOML.tryparsefile`](@ref) that instead of throwing exceptions on parser error
returns a [`TOML.ParserError`](@ref) with information:
```jldoctest
julia> using TOML
julia> err = TOML.tryparse("""
value = 0.0.0
""");
julia> err.type
ErrGenericValueError::ErrorType = 14
julia> err.line
1
julia> err.column
16
```
## Exporting data to TOML file
The [`TOML.print`](@ref) function is used to print (or serialize) data into TOML
format.
```jldoctest
julia> using TOML
julia> data = Dict(
"names" => ["Julia", "Julio"],
"age" => [10, 20],
);
julia> TOML.print(data)
names = ["Julia", "Julio"]
age = [10, 20]
julia> fname = tempname();
julia> open(fname, "w") do io
TOML.print(io, data)
end
julia> TOML.parsefile(fname)
Dict{String, Any} with 2 entries:
"names" => ["Julia", "Julio"]
"age" => [10, 20]
```
Keys can be sorted according to some value
```jldoctest
julia> using TOML
julia> TOML.print(Dict(
"abc" => 1,
"ab" => 2,
"abcd" => 3,
); sorted=true, by=length)
ab = 2
abc = 1
abcd = 3
```
For custom structs, pass a function that converts the struct to a supported
type
```jldoctest
julia> using TOML
julia> struct MyStruct
a::Int
b::String
end
julia> TOML.print(Dict("foo" => MyStruct(5, "bar"))) do x
x isa MyStruct && return [x.a, x.b]
error("unhandled type $(typeof(x))")
end
foo = [5, "bar"]
```
## References
```@docs
TOML.parse
TOML.parsefile
TOML.tryparse
TOML.tryparsefile
TOML.print
TOML.Parser
TOML.ParserError
```
| TOML | https://github.com/JuliaLang/TOML.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | code | 7064 | using Luxor, Colors, ColorSchemes
function draw_rgb_levels(cs::ColorScheme, w = 800, h = 500, filename = "/tmp/rgb-levels.svg")
# This function is a quick hack to draw swatches and curves in a documenter pass.
# The diagrams are merely illustrative, not 100% technically precise :(
dwg = Drawing(w, h, filename)
origin()
background("black")
setlinejoin("bevel")
# three rows (thin, fat, thin), one wide column
table = Table([h / 6, 2h / 3, h / 6], w)
l = length(cs.colors)
bbox = BoundingBox(box(O, table.colwidths[1], table.rowheights[2], vertices = true)) * 0.85
# axes and labels in main (second) cell of table
@layer begin
translate(table[2])
setline(0.5)
fontsize(7)
box(bbox, :stroke)
# horizontal lines
div10 = boxheight(bbox) / 10
for (ylabel, yy) in enumerate((boxtopcenter(bbox).y):div10:(boxbottomcenter(bbox).y))
sethue("grey25")
rule(Point(0, yy), boundingbox = bbox)
sethue("grey85")
text(string((11 - ylabel) / 10), Point(boxbottomleft(bbox).x - 10, yy), halign = :right, valign = :middle)
end
# vertical lines
div10 = boxwidth(bbox) / 10
for (xlabel, xx) in enumerate((boxtopleft(bbox).x):div10:(boxtopright(bbox).x))
sethue("grey25")
rule(Point(xx, 0), π / 2, boundingbox = bbox)
sethue("grey85")
text(string((xlabel - 1) / 10), Point(xx, boxbottomleft(bbox).y + 10), halign = :center, valign = :bottom)
end
end
# middle, show 'curves'
# 'curves'
# run through color levels in scheme and sample/quantize
@layer begin
translate(table[2])
redline = Point[]
greenline = Point[]
blueline = Point[]
alphaline = Point[]
verticalscale = boxheight(bbox)
stepping = 0.0025
# TODO better way to examine quantized color values
for i in 0:stepping:1
swatch = convert(Int, round(rescale(i, 0, 1, 1, l)))
c = cs[swatch]
r = red(c)
g = green(c)
b = blue(c)
a = alpha(c)
x = rescale(i, 0, 1, -boxwidth(bbox) / 2, boxwidth(bbox) / 2)
push!(redline, Point(x, boxbottomcenter(bbox).y - verticalscale * r))
push!(greenline, Point(x, boxbottomcenter(bbox).y - verticalscale * g))
push!(blueline, Point(x, boxbottomcenter(bbox).y - verticalscale * b))
push!(alphaline, Point(x, boxbottomcenter(bbox).y - verticalscale * a))
end
# the idea to make the lines different weights to assist reading overlaps may not be a good one
setline(1)
sethue("blue")
poly(blueline, :stroke)
setline(0.8)
sethue("red")
poly(redline, :stroke)
setline(0.7)
sethue("green")
poly(greenline, :stroke)
setline(0.6)
sethue("grey80")
poly(alphaline, :stroke)
end
# top tile, swatches
@layer begin
translate(table[1])
# draw in a single pane
panes = Tiler(boxwidth(bbox), table.rowheights[1], 1, 1, margin = 0)
panewidth = panes.tilewidth
paneheight = panes.tileheight
# draw the swatches
swatchwidth = panewidth / l
for (i, p) in enumerate(cs.colors)
swatchcenter = Point(boxtopleft(bbox).x - swatchwidth / 2 + (i * swatchwidth), O.y)
setcolor(p)
box(swatchcenter, swatchwidth - 1, table.rowheights[1] / 2 - 1, :fill)
@layer begin
setline(0.4)
sethue("grey50")
box(swatchcenter, swatchwidth - 1, table.rowheights[1] / 2 - 1, :stroke)
end
end
end
# third tile, continuous sampling
@layer begin
setline(0)
translate(table[3])
# draw blend
stepping = 0.001
boxw = panewidth * stepping
for i in 0:stepping:1
c = get(cs, i)
setcolor(c)
xpos = rescale(i, 0, 1, O.x - panewidth / 2, O.x + panewidth / 2 - boxw)
box(Point(xpos + boxw / 2, O.y), boxw, table.rowheights[3] / 2, :fillstroke)
end
end
finish()
return dwg
end
function draw_transparent(cs::ColorScheme, csa::ColorScheme,
w = 800, h = 500, filename = "/tmp/transparency-levels.svg",
)
dwg = Drawing(w, h, filename)
origin()
background("black")
setlinejoin("bevel")
N = length(csa.colors) * 2
h = w ÷ 4
backgroundtiles = Tiler(w, h, 4, N, margin = 0)
setline(0)
for (pos, n) in backgroundtiles
if iseven(backgroundtiles.currentrow + backgroundtiles.currentcol)
sethue("grey80")
else
sethue("grey90")
end
box(backgroundtiles, n, :fillstroke)
end
referencecolortiles = Tiler(w, h, 2, N ÷ 2, margin = 0)
for (pos, n) in referencecolortiles[1:(N ÷ 2)]
setcolor(cs[n])
box(referencecolortiles, n, :fillstroke)
end
for (i, (pos, n)) in enumerate(referencecolortiles[(N ÷ 2 + 1):end])
setcolor(csa[i])
box(referencecolortiles, n, :fillstroke)
end
finish()
return dwg
end
function draw_lightness_swatch(cs::ColorScheme, width = 800, height = 150;
name = "")
@drawsvg begin
hmargin = 30
vmargin = 20
bb = BoundingBox(Point(-width / 2 + hmargin, -height / 2 + vmargin), Point(width / 2 - hmargin, height / 2 - vmargin))
background("black")
fontsize(8)
sethue("white")
setline(0.5)
box(bb, :stroke)
tickline(boxbottomleft(bb), boxbottomright(bb), major = 9, axis = false,
major_tick_function = (n, pos; startnumber, finishnumber, nticks) ->
text(string(n / 10), pos + (0, 12), halign = :center),
)
tickline(boxbottomright(bb), boxtopright(bb), major = 9, axis = false,
major_tick_function = (n, pos; startnumber, finishnumber, nticks) ->
text(string(10n), pos + (0, 20), angle = π / 2, halign = :right, valign = :middle),
)
text("lightness", boxtopleft(bb) + (10, 10), halign = :right, angle = -π / 2)
fontsize(12)
L = 70
sw = width / L
saved = Point[]
for i in range(0.0, 1.0, length = L)
pos = between(boxmiddleleft(bb), boxmiddleright(bb), i)
color = get(cs, i)
setcolor(color)
labcolor = convert(Lab, color)
lightness = labcolor.l
lightnesspos = pos + (0, boxheight(bb) / 2 - rescale(labcolor.l, 0, 100, 0, boxheight(bb)))
push!(saved, lightnesspos)
circle(lightnesspos, 5, :fill)
end
# setline(1)
# sethue("black")
# line(saved[1], saved[end], :stroke)
# setline(0.8)
# line(saved[1], saved[end], :stroke)
sethue("white")
text(name, boxtopcenter(bb) + (0, -6), halign = :center)
end width height
end
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | code | 909 | using Documenter, ColorSchemes, ColorSchemeTools, Luxor, Colors
makedocs(
modules = [ColorSchemeTools],
sitename = "ColorSchemeTools",
warnonly = true,
format = Documenter.HTML(
size_threshold=nothing,
prettyurls = get(ENV, "CI", nothing) == "true",
assets = ["assets/colorschemetools-docs.css"],
collapselevel=1,
),
pages = [
"Introduction" => "index.md",
"Tools" => "tools.md",
"Converting image colors" => "convertingimages.md",
"Making colorschemes" => "makingschemes.md",
"Saving colorschemes" => "output.md",
"Equalizing colorschemes" => "equalizing.md",
"Alphabetical function list" => "functionindex.md"
]
)
deploydocs(
push_preview = true,
repo = "github.com/JuliaGraphics/ColorSchemeTools.jl.git",
target = "build",
forcepush=true,
)
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | code | 23164 | """
This package provides some tools for working with ColorSchemes:
- `colorscheme_to_image()`: make an image from a colorscheme
- `colorscheme_to_text()`: save scheme as Julia code in text file
- `colorscheme_weighted()`: make new colorscheme from scheme and weights
- 'equalize()`
- `convert_to_scheme()`: convert image to use only colorscheme colors
- `extract_weighted_colors()`: obtain colors weighted scheme from image
- `extract()`: obtain colors from image
- `image_to_swatch()`: extract scheme and save as swatch file
- `make_colorscheme()`: make new scheme
- `sortcolorscheme()`: sort scheme using color model
"""
module ColorSchemeTools
using Images, ColorSchemes, Colors, Clustering, FileIO, Interpolations
import Base.get
include("utils.jl")
include("equalize.jl")
export
colorscheme_to_image,
colorscheme_to_text,
colorscheme_weighted,
compare_colors,
equalize,
extract,
extract_weighted_colors,
convert_to_scheme,
image_to_swatch,
sortcolorscheme,
sineramp,
make_colorscheme,
add_alpha,
get_linear_segment_color,
get_indexed_list_color
"""
extract(imfile, n=10, i=10, tolerance=0.01; shrink=n)
`extract()` extracts the most common colors from an image from the image file
`imfile` by finding `n` dominant colors, using `i` iterations. You can (and
probably should) shrink larger images before running this function.
Returns a ColorScheme.
"""
function extract(imfile, n = 10, i = 10, tolerance = 0.01; kwargs...)
ewc = extract_weighted_colors(imfile, n, i, tolerance; kwargs...)[1] # throw away the weights
return ewc
end
"""
extract_weighted_colors(imfile, n=10, i=10, tolerance=0.01; shrink = 2)
Extract colors and weights of the clusters of colors in an image file. Returns a
ColorScheme and weights.
Example:
```
pal, wts = extract_weighted_colors(imfile, n, i, tolerance; shrink = 2)
```
"""
function extract_weighted_colors(imfile, n = 10, i = 10, tolerance = 0.01; shrink = 2.0)
img = load(imfile)
# TODO this is the wrong way to do errors I'm told
(!@isdefined img) && error("Can't load the image file \"$imfile\"")
w, h = size(img)
neww = round(Int, w / shrink)
newh = round(Int, h / shrink)
smaller_image = Images.imresize(img, (neww, newh))
w, h = size(smaller_image)
imdata = convert(Array{Float64}, channelview(smaller_image))
!any(n -> n == 3, size(imdata)) && error("Image file \"$imfile\" doesn't have three color channels; perhaps it has an alpha channel as well?")
d = reshape(imdata, 3, :)
R = kmeans(d, n, maxiter = i, tol = tolerance)
cols = RGB{Float64}[]
for i in 1:3:length(R.centers)
push!(cols, RGB(R.centers[i], R.centers[i + 1], R.centers[i + 2]))
end
# KmeansResult::cweights is deprecated, use wcounts(clu::KmeansResult) as of Clustering v0.13.2
return ColorScheme(cols), wcounts(R) / sum(wcounts(R))
end
"""
colorscheme_weighted(colorscheme, weights, length)
Returns a new ColorScheme of length `length` (default 50) where the proportion
of each color in `colorscheme` is represented by the associated weight of each
entry.
Examples:
```
colorscheme_weighted(extract_weighted_colors("hokusai.jpg")...)
colorscheme_weighted(extract_weighted_colors("filename00000001.jpg")..., 500)
```
"""
function colorscheme_weighted(cscheme::ColorScheme, weights, l = 50)
iweights = map(n -> convert(Integer, round(n * l)), weights)
# adjust highest or lowest so that length of result is exact
while sum(iweights) < l
val, ix = findmin(iweights)
iweights[ix] = val + 1
end
while sum(iweights) > l
val, ix = findmax(iweights)
iweights[ix] = val - 1
end
a = Array{RGB{Float64}}(undef, 0)
for n in 1:length(cscheme)
a = vcat(a, repeat([cscheme[n]], iweights[n]))
end
return ColorScheme(a)
end
"""
compare_colors(color_a, color_b, field = :l)
Compare two colors, using the Luv colorspace. `field` defaults to luminance `:l`
but could be `:u` or `:v`. Return true if the specified field of `color_a` is
less than `color_b`.
"""
function compare_colors(color_a, color_b, field = :l)
if 1 < color_a.r < 255
fac = 255
else
fac = 1
end
luv1 = convert(Luv, RGB(color_a.r / fac, color_a.g / fac, color_a.b / fac))
luv2 = convert(Luv, RGB(color_b.r / fac, color_b.g / fac, color_b.b / fac))
return getfield(luv1, field) < getfield(luv2, field)
end
"""
sortcolorscheme(colorscheme::ColorScheme, field; kwargs...)
Sort (non-destructively) a colorscheme using a field of the LUV colorspace.
The less than function is `lt = (x,y) -> compare_colors(x, y, field)`.
The default is to sort by the luminance field `:l` but could be by `:u` or `:v`.
Returns a new ColorScheme.
"""
function sortcolorscheme(colorscheme::ColorScheme, field = :l; kwargs...)
cols = sort(colorscheme.colors, lt = (x, y) -> compare_colors(x, y, field); kwargs...)
return ColorScheme(cols)
end
"""
convert_to_scheme(cscheme, img)
Converts `img` from its current color values to use only the colors defined in the ColorScheme `cscheme`.
```
image = nonTransparentImg
convert_to_scheme(ColorSchemes.leonardo, image)
convert_to_scheme(ColorSchemes.Paired_12, image)
```
"""
convert_to_scheme(cscheme::ColorScheme, img) =
map(c -> get(cscheme, getinverse(cscheme, c)), img)
"""
colorscheme_to_image(cs, nrows=50, tilewidth=5)
Make an image from a ColorScheme by repeating the colors in `nrows` rows, repeating each pixel `tilewidth` times.
Returns the image as an array.
Examples:
```
using FileIO
img = colorscheme_to_image(ColorSchemes.leonardo, 50, 200)
save("/tmp/cs_image.png", img)
save("/tmp/blackbody.png", colorscheme_to_image(ColorSchemes.blackbody, 10, 100))
```
"""
function colorscheme_to_image(cs::ColorScheme, nrows = 50, tilewidth = 5)
ncols = tilewidth * length(cs)
a = Array{RGB{Float64}}(undef, nrows, ncols)
for row in 1:nrows
for col in 1:ncols
a[row, col] = cs.colors[div(col - 1, tilewidth) + 1]
end
end
return a
end
"""
image_to_swatch(imagefilepath, samples, destinationpath;
nrows=50,
tilewidth=5)
Extract a ColorsSheme from the image in `imagefilepath` to a swatch image PNG in
`destinationpath`. This just runs `sortcolorscheme()`, `colorscheme_to_image()`,
and `save()` in sequence.
Specify the number of colors. You can also specify the number of rows, and how
many times each color is repeated.
```
image_to_swatch("monalisa.jpg", 10, "/tmp/monalisaswatch.png")
```
"""
function image_to_swatch(imagefilepath, n::Int64, destinationpath;
nrows = 50,
tilewidth = 5)
tempcs = sortcolorscheme(extract(imagefilepath, n))
img = colorscheme_to_image(tempcs, nrows, tilewidth)
save(destinationpath, img)
end
"""
colorscheme_to_text(cscheme::ColorScheme, schemename, filename;
category="dutch painters", # category
notes="it's not really lost" # notes
)
Write a ColorScheme to a Julia text file.
## Example
```
colorscheme_to_text(ColorSchemes.vermeer,
"the_lost_vermeer", # name
"/tmp/the_lost_vermeer.jl", # file
category="dutch painters", # category
notes="it's not really lost" # notes
)
```
and read it back in with:
```
include("/tmp/the_lost_vermeer.jl")
```
"""
function colorscheme_to_text(cs::ColorScheme, schemename::String, file::String;
category = "",
notes = "")
fhandle = open(file, "w")
write(fhandle, string("loadcolorscheme(:$(schemename), [\n"))
for c in cs.colors
write(fhandle, string("\tColors.$(c), \n"))
end
write(fhandle, string("], \"$(category)\", \"$(notes)\")"))
close(fhandle)
end
"""
get_linear_segment_color(dict, n)
Get the RGB color for value `n` from a dictionary of linear color segments.
This following is a dictionary where red increases from 0 to 1 over the bottom
half, green does the same over the middle half, and blue over the top half:
```
cdict = Dict(:red => ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
:green => ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
:blue => ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)))
```
The value of RGB component at every value of `n` is defined by a set of tuples.
In each tuple, the first number is `x`. Colors are linearly interpolated in
bands between consecutive values of `x`; if the first tuple is given by `(Z, A, B)` and the second tuple by `(X, C, D)`, the color of a point `n` between Z and
X will be given by `(n - Z) / (X - Z) * (C - B) + B`.
For example, given an entry like this:
```
:red => ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0))
```
and if `n` = 0.75, we return 1.0; 0.75 is between the second and third segments,
but we'd already reached 1.0 (segment 2) when `n` was 0.5.
"""
function get_linear_segment_color(dict, n)
result = Float64[]
for c in [:red, :green, :blue]
listoftuples = dict[c]
n = clamp(n, 0.0, 1.0)
upper = max(2, findfirst(f -> n <= first(f), listoftuples))
lower = max(1, upper - 1)
lowersegment = listoftuples[lower]
uppersegment = listoftuples[upper]
Z, A, B = lowersegment
X, C, D = uppersegment
if X > Z
color_at_n = (n - Z) / (X - Z) * (C - B) + B
else
color_at_n = 0.0
end
push!(result, color_at_n)
end
return result
end
"""
lerp((x, from_min, from_max, to_min=0.0, to_max=1.0)
Linear interpolation of `x` between `from_min` and `from_max`.
Example
```
ColorSchemeTools.lerp(128, 0, 256)
0.5
```
"""
function lerp(x, from_min, from_max, to_min = 0.0, to_max = 1.0)
if !isapprox(from_max, from_min)
return ((x - from_min) / (from_max - from_min)) * (to_max - to_min) + to_min
else
return from_max
end
end
"""
get_indexed_list_color(indexedlist, n)
Get the color at a point `n` given an indexed list of triples like this:
```
gist_rainbow = (
(0.000, (1.00, 0.00, 0.16)),
(0.030, (1.00, 0.00, 0.00)),
(0.215, (1.00, 1.00, 0.00)),
(0.400, (0.00, 1.00, 0.00)),
(0.586, (0.00, 1.00, 1.00)),
(0.770, (0.00, 0.00, 1.00)),
(0.954, (1.00, 0.00, 1.00)),
(1.000, (1.00, 0.00, 0.75))
)
```
To make a ColorScheme from this type of list, use:
```
make_colorscheme(gist_rainbow)
```
"""
function get_indexed_list_color(indexedlist, n)
m = clamp(n, 0.0, 1.0)
# for sorting, convert to array
stops = Float64[]
rgbvalues = NTuple[]
try
for i in indexedlist
push!(stops, first(i))
push!(rgbvalues, convert.(Float64, last(i)))
end
catch e
throw(error("ColorSchemeTools.get_indexed_list_colors(): error in the indexed list's format.\n
Format should be something like this:
(
(0.0, (1.00, 0.00, 0.16)),
(0.5, (0.00, 1.00, 1.00)),
(0.7, (0.00, 0.00, 1.00)),
(0.9, (1.00, 0.00, 1.00)),
(1.0, (1.00, 0.00, 0.75))
)"))
end
if length(stops) != length(rgbvalues)
throw(error("ColorSchemeTools.get_indexed_list_colors(): error in the indexed list's format.\n
Format should be something like this:
(
(0.0, (1.00, 0.00, 0.16)),
(0.5, (0.00, 1.00, 1.00)),
(0.7, (0.00, 0.00, 1.00)),
(0.9, (1.00, 0.00, 1.00)),
(1.0, (1.00, 0.00, 0.75))
)"))
end
if length(stops) < 2
throw(error("ColorSchemeTools.get_indexed_list_colors(): should be 3 or more entries in list"))
end
p = sortperm(stops)
sort!(stops)
rgbvalues = rgbvalues[p] # needs Julia v1.6
upper = findfirst(f -> f >= m, stops)
if isnothing(upper) # use final value
upper = length(stops)
elseif upper == 0 || upper > length(stops)
throw(error("ColorSchemeTools.get_indexed_list_colors(): error processing list"))
end
lower = max(1, upper - 1)
if lower == upper
upper += 1
end
lr, lg, lb = rgbvalues[lower]
ur, ug, ub = rgbvalues[upper]
r = ColorSchemeTools.lerp(m, stops[lower], stops[upper], lr, ur)
g = ColorSchemeTools.lerp(m, stops[lower], stops[upper], lg, ug)
b = ColorSchemeTools.lerp(m, stops[lower], stops[upper], lb, ub)
return (r, g, b)
end
"""
make_colorscheme(dict;
length=100,
category="",
notes="")
Make a new ColorScheme from a dictionary of linear-segment information. Calls
`get_linear_segment_color(dict, n)` with `n` for every `length` value between 0 and 1.
"""
function make_colorscheme(dict::Dict;
length = 100,
category = "",
notes = "")
cs = ColorScheme([RGB(get_linear_segment_color(dict, i)...)
for i in range(0, stop = 1, length = length)],
category, notes)
return cs
end
"""
make_colorscheme(indexedlist;
length=length(indexedlist),
category="",
notes="")
Make a ColorScheme using an 'indexed list' like this:
```
gist_rainbow = (
(0.000, (1.00, 0.00, 0.16)),
(0.030, (1.00, 0.00, 0.00)),
(0.215, (1.00, 1.00, 0.00)),
(0.400, (0.00, 1.00, 0.00)),
(0.586, (0.00, 1.00, 1.00)),
(0.770, (0.00, 0.00, 1.00)),
(0.954, (1.00, 0.00, 1.00)),
(1.000, (1.00, 0.00, 0.75))
)
make_colorscheme(gist_rainbow)
```
The first element of each item is the point on the colorscheme.
Use `length` keyword to set the number of colors in the colorscheme.
"""
function make_colorscheme(indexedlist::Tuple;
length = length(indexedlist),
category = "",
notes = "indexed list")
cs = ColorScheme([RGB(get_indexed_list_color(indexedlist, i)...)
for i in range(0, stop = 1, length = length)],
category, notes)
return cs
end
"""
make_colorscheme(f1::Function, f2::Function, f3::Function;
model = :RGB,
length = 100,
category = "",
notes = "functional ColorScheme")
Make a colorscheme using functions. Each argument is a function that returns a
value for the red, green, and blue components for the values between 0
and 1.
`model` is the color model, and can be `:RGB`, `:HSV`, or `:LCHab`.
Use `length` keyword to set the number of colors in the colorscheme.
The default color model is `:RGB`, and the functions should return values in the
appropriate range:
- f1 - [0.0 - 1.0] - red
- f2 - [0.0 - 1.0] - green
- f3 - [0.0 - 1.0] - blue
For the `:HSV` color model:
- f1 - [0.0 - 360.0] - hue
- f2 - [0.0 - 1.0] - saturataion
- f3 - [0.0 - 1.0] - value (brightness)
For the `:LCHab` color model:
- f1 - [0.0 - 100.0] - luminance
- f2 - [0.0 - 100.0] - chroma
- f3 - [0.0 - 360.0] - hue
### Example
Make a colorscheme with the red component defined as a sine curve running from
0 to π and back to 0, the green component is always 0, and the blue component starts at
π and goes to 0 at 0.5 (it's clamped to 0 after that).
```julia
make_colorscheme(n -> sin(n * π), n -> 0, n -> cos(n * π))
```
"""
function make_colorscheme(f1::Function, f2::Function, f3::Function;
model = :RGB,
length = 100,
category = "",
notes = "functional ColorScheme")
# output is always RGB for the moment
cs = RGB[]
if model == :LCHab
clamp1 = (0.0, 100.0) #
clamp2 = (0.0, 100.0) #
clamp3 = (0.0, 360.0) # Hue is 0-360
elseif model == :HSV
clamp1 = (0.0, 360.0) # Hue is 0-360
clamp2 = (0.0, 1.0) #
clamp3 = (0.0, 1.0) #
elseif model == :RGB
clamp1 = (0.0, 1.0) #
clamp2 = (0.0, 1.0) #
clamp3 = (0.0, 1.0) #
end
counter = 0
for i in range(0.0, stop = 1.0, length = length)
final1 = clamp(f1(i), clamp1...)
final2 = clamp(f2(i), clamp2...)
final3 = clamp(f3(i), clamp3...)
if model == :LCHab
push!(cs, convert(RGB, LCHab(final1, final2, final3)))
elseif model == :RGB
push!(cs, RGB(final1, final2, final3))
elseif model == :HSV
push!(cs, convert(RGB, HSV(final1, final2, final3)))
end
end
return ColorScheme(cs, category, notes)
end
"""
make_colorscheme(f1::Function, f2::Function, f3::Function, f4::Function;
model = :RGBA,
length = 100,
category = "",
notes = "functional ColorScheme")
Make a colorscheme with transparency using functions. Each argument is a
function that returns a value for the red, green, blue, and alpha components for
the values between 0 and 1.
`model` is the color model, and can be `:RGBA`, `:HSVA`, or `:LCHabA`.
Use `length` keyword to set the number of colors in the colorscheme.
The default color mo del is `:RGBA`, and the functions should return values in the
appropriate range:
- f1 - [0.0 - 1.0] - red
- f2 - [0.0 - 1.0] - green
- f3 - [0.0 - 1.0] - blue
- f4 - [0.0 - 1.0] - alpha
For the `:HSVA` color model:
- f1 - [0.0 - 360.0] - hue
- f2 - [0.0 - 1.0] - saturataion
- f3 - [0.0 - 1.0] - value (brightness)
- f4 - [0.0 - 1.0] - alpha
For the `:LCHabA` color model:
- f1 - [0.0 - 100.0] - luminance
- f2 - [0.0 - 100.0] - chroma
- f3 - [0.0 - 360.0] - hue
- f4 - [0.0 - 1.0] - alpha
## Examples
```julia
csa = make_colorscheme1(
n -> red(get(ColorSchemes.leonardo, n)),
n -> green(get(ColorSchemes.leonardo, n)),
n -> blue(get(ColorSchemes.leonardo, n)),
n -> 1 - identity(n))
```
"""
function make_colorscheme(f1::Function, f2::Function, f3::Function, f4::Function;
model = :RGBA,
length = 100,
category = "",
notes = "functional ColorScheme")
# output is always RGBA
cs = RGBA[]
if model == :LCHabA
clamp1 = (0.0, 100.0) #
clamp2 = (0.0, 100.0) #
clamp3 = (0.0, 360.0) # Hue is 0-360
clamp4 = (0.0, 1.0)
elseif model == :HSVA
clamp1 = (0.0, 360.0) # Hue is 0-360
clamp2 = (0.0, 1.0) #
clamp3 = (0.0, 1.0) #
clamp4 = (0.0, 1.0)
elseif model == :RGBA
clamp1 = (0.0, 1.0) #
clamp2 = (0.0, 1.0) #
clamp3 = (0.0, 1.0) #
clamp4 = (0.0, 1.0)
end
counter = 0
for i in range(0.0, stop = 1.0, length = length)
final1 = clamp(f1(i), clamp1...)
final2 = clamp(f2(i), clamp2...)
final3 = clamp(f3(i), clamp3...)
final4 = clamp(f4(i), clamp4...)
if model == :LCHabA
push!(cs, convert(RGBA, LCHab(final1, final2, final3, final4)))
elseif model == :RGBA
push!(cs, RGBA(final1, final2, final3, final4))
elseif model == :HSVA
push!(cs, convert(RGBA, HSVA(final1, final2, final3, final4)))
end
end
return ColorScheme(cs, category, notes)
end
"""
make_colorscheme(colorlist, steps)
Make a new colorscheme consisting of the colors in the array `colorlist`.
```
make_colorscheme([RGB(1, 0, 0), HSB(285, 0.7, 0.7), colorant"darkslateblue"], 20)
```
"""
function make_colorscheme(colorlist::Array, steps)
colorlist_rgb = convert.(RGB, colorlist)
reds = convert.(Float64, getfield.(colorlist_rgb, :r))
greens = convert.(Float64, getfield.(colorlist_rgb, :g))
blues = convert.(Float64, getfield.(colorlist_rgb, :b))
rednodes = (range(0.0, 1.0, length = length(reds)),)
greennodes = (range(0.0, 1.0, length = length(greens)),)
bluenodes = (range(0.0, 1.0, length = length(blues)),)
reditp = interpolate(rednodes, reds, Gridded(Linear()))
greenitp = interpolate(greennodes, greens, Gridded(Linear()))
blueitp = interpolate(bluenodes, blues, Gridded(Linear()))
colors = Color[]
for i in range(0, 1, length = steps)
push!(colors, RGB(reditp(i), greenitp(i), blueitp(i)))
end
return ColorScheme(colors, "interpolated gradient", "$steps")
end
"""
add_alpha(cs::ColorScheme, alpha::Real=0.5)
Make a copy of the colorscheme `cs` with alpha opacity value `alpha`.
### Example
Make a copy of the PuOr colorscheme and set every element of it to have alpha
opacity 0.5
```julia
add_alpha(ColorSchemes.PuOr, 0.5)
```
"""
function add_alpha(cs::ColorScheme, alpha::Real = 0.5)
return make_colorscheme(
n -> red(get(cs, n)),
n -> green(get(cs, n)),
n -> blue(get(cs, n)),
n -> alpha,
model = :RGBA,
length = length(cs.colors),
category = cs.category,
notes = cs.notes * " with alpha",
)
end
"""
add_alpha(cs::ColorScheme, alpha::Vector)
Make a copy of the colorscheme `cs` with alpha opacity values in the vector
`alpha`.
### Example
Make a copy of the PuOr colorscheme, set the first element to have alpha
opacity 1.0, the last element to have opacity 0.0, with intermediate elements
taking values between 1.0 and 0.0.
```julia
add_alpha(ColorSchemes.PuOr, [1.0, 0.0])
```
"""
function add_alpha(cs::ColorScheme, alpha::Vector)
alphanodes = (range(0, 1, length = length(alpha)),)
itpalpha = interpolate(alphanodes,
range(alpha[1], alpha[end], length = length(alpha)),
Gridded(Linear()))
return make_colorscheme(
n -> red(get(cs, n)),
n -> green(get(cs, n)),
n -> blue(get(cs, n)),
n -> itpalpha(n),
model = :RGBA,
length = length(cs.colors),
category = cs.category,
notes = cs.notes * " with alpha",
)
end
"""
add_alpha(cs::ColorScheme, r::Range)
Make a copy of the colorscheme `cs` with alpha opacity values in the range `r`.
### Example
Make a copy of the PuOr colorscheme, set the first element to have alpha
opacity 0.5, the last element to have opacity 0.0, with intermediate elements
taking values between 0.5 and 1.0.
```julia
add_alpha(ColorSchemes.PuOr, 0.5:0.1:1.0)
```
"""
function add_alpha(cs::ColorScheme, r::T where {T<:AbstractRange})
if length(r) == 1
throw(error("add_alpha: range $(r) should be have more than one step."))
end
add_alpha(cs, collect(r))
end
"""
add_alpha(cs::ColorScheme, f::Function)
Make a copy of the colorscheme `cs` with alpha opacity values defined by the
function.
### Example
Make a copy of the PuOr colorscheme, set the opacity of each element to be the
result of calling the function on the value. So at value 0.5, the opacity is
1.0, but it's 0.0 at either end.
```julia
add_alpha(ColorSchemes.PuOr, (n) -> sin(n * π))
```
"""
function add_alpha(cs::ColorScheme, f::Function)
return make_colorscheme(
n -> red(get(cs, n)),
n -> green(get(cs, n)),
n -> blue(get(cs, n)),
n -> f(n),
model = :RGBA,
length = length(cs.colors),
category = cs.category,
notes = cs.notes * " with alpha",
)
end
end
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | code | 6627 | # this code is adaapted from peterkovesi/PerceptualColourMaps.jl
# https://github.com/peterkovesi/PerceptualColourMaps.jl/
# Because: Peter has retired, and is no longer updating his packages.
# all errors are mine!
# original copyright message
#=----------------------------------------------------------------------------
Copyright (c) 2015-2020 Peter Kovesi
peterkovesi.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind.
----------------------------------------------------------------------------=#
function _equalizecolormap(
colormodel::Symbol,
cmap::AbstractMatrix{Float64},
formula::String = "CIE76",
W::Array = [1.0, 0.0, 0.0],
sigma::Real = 0.0,
cyclic::Bool = false,
diagnostics::Bool = false)
N = size(cmap, 1)
if N / sigma < 25
@warn "sigma shouldn't be larger than 1/25 of the colormap length"
end
formula = uppercase(formula)
if colormodel === :RGB && (maximum(cmap) > 1.01 || minimum(cmap) < -0.01)
throw(error("_equalizecolormap(): If map is RGB values should be in the range 0-1"))
elseif colormodel === :LAB && maximum(abs.(cmap)) < 10
throw(error("_equalizecolormap(): If map is LAB magnitude of values are expected to be > 10"))
end
# If input is RGB, convert colormap to Lab. Also, ensure that we have both
# RGB and Lab representations of the colormap. Assume the Colors.convert()
# function uses a default white point of D65
if colormodel === :RGB
rgbmap = copy(cmap)
labmap = _srgb_to_lab(cmap)
L = labmap[:, 1]
a = labmap[:, 2]
b = labmap[:, 3]
elseif colormodel === :LAB
labmap = copy(cmap)
rgbmap = _lab_to_srgb(cmap)
L = cmap[:, 1]
a = cmap[:, 2]
b = cmap[:, 3]
else
throw(error("_equalizecolormap(): `colormodel` must be RGB or LAB, not $(colormodel)"))
end
# The following section of code computes the locations to interpolate into
# the colormap in order to achieve equal steps of perceptual contrast.
# The process is repeated recursively on its own output. This helps overcome
# the approximations induced by using linear interpolation to estimate the
# locations of equal perceptual contrast. This is mainly an issue for
# colormaps with only a few entries.
initialdeltaE = 0
initialcumdE = 0
initialequicumdE = 0
initialnewN = 0
for iter in 1:3
# Compute perceptual colour difference values along the colormap using
# the chosen formula and weighting vector.
if formula == "CIE76"
deltaE = _cie76(L, a, b, W)
elseif formula == "CIEDE2000"
deltaE = _ciede2000(L, a, b, W)
else
throw(error("_equalizecolormap(): Unknown colour difference formula in $(formula)"))
end
# Form cumulative sum of delta E values. However, first ensure all
# values are larger than 0.001 to ensure the cumulative sum always
# increases.
deltaE[deltaE .< 0.001] .= 0.001
cumdE = cumsum(deltaE, dims = 1)
# Form an array of equal steps in cumulative contrast change.
equicumdE = collect(0:(N - 1)) ./ (N - 1) .* (cumdE[end] - cumdE[1]) .+ cumdE[1]
# Solve for the locations that would give equal Delta E values.
newN = _interp1(cumdE, 1:N, equicumdE)
# newN now represents the locations where we want to interpolate into the
# colormap to obtain constant perceptual contrast
Li = interpolate(L, BSpline(Linear()))
L = [Li(v) for v in newN]
ai = interpolate(a, BSpline(Linear()))
a = [ai(v) for v in newN]
bi = interpolate(b, BSpline(Linear()))
b = [bi(v) for v in newN]
# Record initial colour differences for evaluation at the end
if iter == 1
initialdeltaE = deltaE
initialcumdE = cumdE
initialequicumdE = equicumdE
initialnewN = newN
end
end
# Apply smoothing of the path in CIELAB space if requested. The aim is to
# smooth out sharp lightness/colour changes that might induce the perception
# of false features. In doing this there will be some cost to the
# perceptual contrast at these points.
if sigma > 0.0
L = _smooth(L, sigma, cyclic)
a = _smooth(a, sigma, cyclic)
b = _smooth(b, sigma, cyclic)
end
# Convert map back to RGB
newlabmap = [L a b]
newrgbmap = _lab_to_srgb(newlabmap)
return newrgbmap
end
"""
equalize(cs::ColorScheme;
colormodel::Symbol="RGB",
formula::String="CIE76",
W::Array=[1.0, 0.0, 0.0],
sigma::Real=0.0,
cyclic::Bool=false)
equalize(ca::Array{Colorant, 1};
# same keywords
)
Equalize colors in the colorscheme `cs` or the array of colors `ca` so that they
are more perceptually uniform.
- `cs` is a ColorScheme
- `ca` is an array of colors
- `colormodel`` is `:RGB`or`:LAB`
- `formula` is "CIE76" or "CIEDE2000"
- `W` is a vector of three weights to be applied to the lightness, chroma, and hue
components of the difference equation
- `sigma` is an optional Gaussian smoothing parameter
- `cyclic` is a Boolean flag indicating whether the colormap is cyclic
Returns a colorscheme with the colors adjusted.
"""
function equalize(ca::Array{T};
colormodel::Symbol = :RGB,
formula::String = "CIE76",
W::Array = [1.0, 0.0, 0.0],
sigma::Real = 0.0,
cyclic::Bool = false) where {T<:Colorant}
# if colors are RGB or RGBA
if eltype(ca) <: Colors.RGBA
rgbdata = _equalizecolormap(colormodel, _RGBA_to_FloatArray(ca), formula, W,
sigma, cyclic, false)
else
rgbdata = _equalizecolormap(colormodel, _RGB_to_FloatArray(ca), formula, W,
sigma, cyclic, false)
end
newcolors = [RGB(rgb...) for rgb in eachrow(rgbdata)]
return ColorScheme(newcolors)
end
equalize(cs::ColorScheme;
colormodel::Symbol = :RGB,
formula::String = "CIE76",
W::Array = [1.0, 0.0, 0.0],
sigma::Real = 0.0,
cyclic::Bool = false) = equalize(cs.colors, colormodel = colormodel, formula = formula, W = W, sigma = sigma, cyclic = cyclic)
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | code | 15110 | # this code is adaapted from peterkovesi/PerceptualColourMaps.jl
# https://github.com/peterkovesi/PerceptualColourMaps.jl/
# Because: Peter has retired, and is no longer updating his packages.
# all errors are mine!
# original copyright message
#=----------------------------------------------------------------------------
Copyright (c) 2015-2020 Peter Kovesi
peterkovesi.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind.
----------------------------------------------------------------------------=#
"""
_gaussfilt1d(s::Array, sigma::Real)
Apply a 1D Gaussian filter to `s`. Filtering at
the ends is done using zero padding.
Usage:
sm = _gaussfilt1d(s::Array, sigma::Real)
"""
function _gaussfilt1d(s::Array, sigma::Real)
N = length(s)
r = ceil(Int, 3 * sigma) # Determine filter size
fw = 2 * r + 1
# Construct filter
f = [exp(-x .^ 2 / (2 * sigma^2)) for x in (-r):r]
f = f / sum(f)
sm = zeros(size(s))
# Filter centre section
for i in (r + 1):(N - r), k in 1:fw
sm[i] += f[k] * s[i + k - r - 1]
end
# Filter start section of array using 0 padding
for i in 1:r, k in 1:fw
ind = i + k - r - 1
if ind >= 1 && ind <= N
sm[i] += f[k] * s[ind]
end
end
# Filter end section of array using 0 padding
for i in (N - r + 1):N, k in 1:fw
ind = i + k - r - 1
if ind >= 1 && ind <= N
sm[i] += f[k] * s[ind]
end
end
return sm
end
"""
_smooth(L::Array{T,1}, sigma::Real, cyclic::Bool) where {T<:Real}
Smooth an array of values but also ensure end values are
not altered or, if the map is cyclic, ensures smoothing is applied
across the end points in a cyclic manner.
Assume input data is a column vector.
"""
function _smooth(L::Array{T,1}, sigma::Real, cyclic::Bool) where {T<:Real}
if cyclic
Le = [L; L; L] # Form a concatenation of 3 repetitions of the array.
Ls = _gaussfilt1d(Le, sigma) # Apply smoothing filter
Ls = Ls[(length(L) + 1):(length(L) + length(L))] # and then return the center section
else
# Non-cyclic colormap: Pad out input array L at both ends by 3*sigma
# with additional values at the same slope. The aim is to eliminate
# edge effects in the filtering
extension = collect(1:ceil(3 * sigma))
dL1 = L[2] - L[1]
dL2 = L[end] - L[end - 1]
Le = [-reverse(dL1 * extension, dims = 1) .+ L[1]; L; dL2 * extension .+ L[end]]
Ls = _gaussfilt1d(Le, sigma) # Apply smoothing filter
# Trim off extensions
Ls = Ls[(length(extension) + 1):(length(extension) + length(L))]
end
return Ls
end
"""
_cie76(L::Array, a::Array, b::Array, W::Array)
Compute weighted Delta E between successive entries in a
colormap using the CIE76 formula + weighting
Usage:
deltaE = _cie76(L::Array, a::Array, b::Array, W::Array)
"""
function _cie76(L::Array, a::Array, b::Array, W::Array)
N = length(L)
# Compute central differences
dL = zeros(size(L))
da = zeros(size(a))
db = zeros(size(b))
dL[2:(end - 1)] = (L[3:end] - L[1:(end - 2)]) / 2
da[2:(end - 1)] = (a[3:end] - a[1:(end - 2)]) / 2
db[2:(end - 1)] = (b[3:end] - b[1:(end - 2)]) / 2
# Differences at end points
dL[1] = L[2] - L[1]
dL[end] = L[end] - L[end - 1]
da[1] = a[2] - a[1]
da[end] = a[end] - a[end - 1]
db[1] = b[2] - b[1]
db[end] = b[end] - b[end - 1]
return deltaE = sqrt.(W[1] * dL .^ 2 + W[2] * da .^ 2 + W[3] * db .^ 2)
end
"""
_ciede2000(L::Array, a::Array, b::Array, W::Array)
Compute weighted Delta E between successive entries in a
colormap using the CIEDE2000 formula + weighting
Usage:
deltaE = _ciede2000(L::Array, a::Array, b::Array, W::Array)
"""
function _ciede2000(L::Array, a::Array, b::Array, W::Array)
N = length(L)
deltaE = zeros(N, 1)
kl = 1 / W[1]
kc = 1 / W[2]
kh = 1 / W[3]
# Compute deltaE using central differences
for i in 2:(N - 1)
deltaE[i] = Colors.colordiff(Colors.Lab(L[i + 1], a[i + 1], b[i + 1]), Colors.Lab(L[i - 1], a[i - 1], b[i - 1]);
metric = Colors.DE_2000(kl, kc, kh)) / 2
end
# Differences at end points
deltaE[1] = Colors.colordiff(Colors.Lab(L[2], a[2], b[2]), Colors.Lab(L[1], a[1], b[1]);
metric = Colors.DE_2000(kl, kc, kh))
deltaE[N] = Colors.colordiff(Colors.Lab(L[N], a[N], b[N]), Colors.Lab(L[N - 1], a[N - 1], b[N - 1]);
metric = Colors.DE_2000(kl, kc, kh))
return deltaE
end
"""
_srgb_to_lab(rgb::AbstractMatrix{T}) where {T}
Convert an Nx3 array of RGB values in a colormap to an Nx3 array of CIELAB
values. Function can also be used to convert a 3 channel RGB image to a 3
channel CIELAB image Note it appears that the Colors.convert() function uses a
default white point of D65
Usage:
lab = _srgb_to_lab(rgb)
Argument:
rgb - A N x 3 array of RGB values or a 3 channel RGB image.
Returns:
lab - A N x 3 array of Lab values of a 3 channel CIELAB image.
See also: _lab_to_srgb
"""
function _srgb_to_lab(rgb::AbstractMatrix{T}) where {T}
N = size(rgb, 1)
lab = zeros(N, 3)
for i in 1:N
labval = Colors.convert(Colors.Lab, Colors.RGB(rgb[i, 1], rgb[i, 2], rgb[i, 3]))
lab[i, 1] = labval.l
lab[i, 2] = labval.a
lab[i, 3] = labval.b
end
return lab
end
"""
_srgb_to_lab(rgb::Array{T, 3}) where {T}
Convert a 3 channel RGB image to a 3 channel CIELAB image.
Usage:
lab = _srgb_to_lab(rgb)
"""
function _srgb_to_lab(rgb::Array{T,3}) where {T}
(rows, cols, chan) = size(rgb)
lab = zeros(size(rgb))
for r in 1:rows, c in 1:cols
labval = Colors.convert(Colors.Lab, Colors.RGB(rgb[r, c, 1], rgb[r, c, 2], rgb[r, c, 3]))
lab[r, c, 1] = labval.l
lab[r, c, 2] = labval.a
lab[r, c, 3] = labval.b
end
return lab
end
"""
_srgb_to_lab(rgb::Vector{T}) where {T}
"""
function _srgb_to_lab(rgb::Vector{T}) where {T}
N = size(rgb, 1)
lab = zeros(N, 3)
for i in 1:N
labval = Colors.convert(Colors.Lab, rgb[i])
lab[i, 1] = labval.l
lab[i, 2] = labval.a
lab[i, 3] = labval.b
end
return lab
end
"""
_lab_to_srgb(lab::AbstractMatrix{T}) where {T}
Convert an Nx3 array of CIELAB values in a colormap to an Nx3 array of RGB
values. Function can also be used to convert a 3 channel CIELAB image to a 3
channel RGB image Note it appears that the Colors.convert() function uses a
default white point of D65
Usage:
rgb = _lab_to_srgb(lab)
Argument:
lab - N x 3 array of CIELAB values of a 3 channel CIELAB image
Returns:
rgb - N x 3 array of RGB values or a 3 channel RGB image
See also: _srgb_to_lab
"""
function _lab_to_srgb(lab::AbstractMatrix{T}) where {T}
N = size(lab, 1)
rgb = zeros(N, 3)
for i in 1:N
rgbval = Colors.convert(ColorTypes.RGB, ColorTypes.Lab(lab[i, 1], lab[i, 2], lab[i, 3]))
rgb[i, 1] = rgbval.r
rgb[i, 2] = rgbval.g
rgb[i, 3] = rgbval.b
end
return rgb
end
"""
_lab_to_srgb(lab::Array{T,3}) where {T}
Convert a 3 channel Lab image to a 3 channel RGB image.
Usage:
rgb = _lab_to_srgb(lab)
"""
function _lab_to_srgb(lab::Array{T,3}) where {T}
(rows, cols, chan) = size(lab)
rgb = zeros(size(lab))
for r in 1:rows, c in 1:cols
rgbval = Colors.convert(ColorTypes.RGB, ColorTypes.Lab(lab[r, c, 1], lab[r, c, 2], lab[r, c, 3]))
rgb[r, c, 1] = rgbval.r
rgb[r, c, 2] = rgbval.g
rgb[r, c, 3] = rgbval.b
end
return rgb
end
"""
_RGBA_to_UInt32(rgb)
Convert an array of RGB values to an array of UInt32 values
for use as a colormap.
Usage:
uint32rgb = _RGBA_to_UInt32(rgbmap)
Argument:
rgbmap - Vector of ColorTypes.RGBA values
Returns:
uint32rgb, an array of UInt32 values packed with the 8 bit RGB values.
"""
function _RGBA_to_UInt32(rgb)
N = length(rgb)
uint32rgb = zeros(UInt32, N)
for i in 1:N
r = round(UInt32, rgb[i].r * 255)
g = round(UInt32, rgb[i].g * 255)
b = round(UInt32, rgb[i].b * 255)
uint32rgb[i] = r << 16 + g << 8 + b
end
return uint32rgb
end
"""
_linearrgbmap(C::Array, N::Int=256)
Linear RGB colourmap from black to a specified color.
Usage:
cmap = _linearrgbmap(C, N)
Arguments:
C - 3-vector specifying RGB colour
N - Number of colourmap elements, defaults to 256
Returns
cmap - an N element ColorTypes.RGBA colourmap ranging from [0 0 0] to RGB colour C.
You should pass the result through equalize() to obtain uniform steps in perceptual lightness.
"""
function _linearrgbmap(C::Array, N::Int = 256)
if length(C) != 3
throw(error("_linearrgbmap(): Colour must be a 3 element array"))
end
rgbmap = zeros(N, 3)
ramp = (0:(N - 1)) / (N - 1)
for n in 1:3
rgbmap[:, n] = C[n] * ramp
end
return _FloatArray_to_RGBA(rgbmap)
end
"""
_FloatArray_to_RGB(cmap)
Convert Nx3 Float64 array to N array of ColorTypes.RGB{Float64}.
"""
function _FloatArray_to_RGB(cmap)
(N, cols) = size(cmap)
if cols != 3
throw(error("_FloatArray_to_RGB(): data must be N x 3"))
end
rgbmap = Array{Colors.RGB{Float64},1}(undef, N)
for i in 1:N
rgbmap[i] = Colors.RGB(cmap[i, 1], cmap[i, 2], cmap[i, 3])
end
return rgbmap
end
"""
_FloatArray_to_RGBA(cmap)
Convert Nx3 Float64 array to array of N ColorTypes.RGBA{Float64}.
"""
function _FloatArray_to_RGBA(cmap)
(N, cols) = size(cmap)
if cols != 3
throw(error("_FloatArray_to_RGB(): data must be N x 3"))
end
rgbmap = Array{Colors.RGBA{Float64},1}(undef, N)
for i in 1:N
rgbmap[i] = Colors.RGBA(cmap[i, 1], cmap[i, 2], cmap[i, 3], 1.0)
end
return rgbmap
end
"""
_RGB_to_FloatArray(rgbmap)
Convert array of N RGB{Float64} to Nx3 Float64 array.
"""
function _RGB_to_FloatArray(rgbmap)
N = length(rgbmap)
cmap = Array{Float64}(undef, N, 3)
for i in 1:N
cmap[i, :] = [rgbmap[i].r rgbmap[i].g rgbmap[i].b]
end
return cmap
end
"""
_RGBA_to_FloatArray(rgbmap)
Convert array of N RGBA{Float64} to Nx3 Float64 array
"""
function _RGBA_to_FloatArray(rgbmap)
N = length(rgbmap)
cmap = Array{Float64}(undef, N, 3)
for i in 1:N
cmap[i, :] = [rgbmap[i].r rgbmap[i].g rgbmap[i].b]
end
return cmap
end
"""
_interp1(x, y, xi)
Simple 1D linear interpolation of an array of data
Usage:
yi = _interp1(x, y, xi)
Arguments:
x - Array of coordinates at which y is defined
y - Array of values at coordinates x
xi - Coordinate locations at which you wish to interpolate y values
Returns:
yi - Values linearly interpolated from y at xi
Interpolates y, defined at values x, at locations xi and returns the
corresponding values as yi.
x is assumed increasing but not necessarily equi-spaced.
xi values do not need to be sorted.
If any xi are outside the range of x, then the corresponding value of
yi is set to the appropriate end value of y.
"""
function _interp1(x, y, xi)
N = length(xi)
yi = zeros(size(xi))
minx = minimum(x)
maxx = maximum(x)
for i in 1:N
# Find interval in x that each xi lies within and interpolate its value
if xi[i] <= minx
yi[i] = y[1]
elseif xi[i] >= maxx
yi[i] = y[end]
else
left = maximum(findall(x .<= xi[i]))
right = minimum(findall(x .> xi[i]))
yi[i] = y[left] + (xi[i] - x[left]) / (x[right] - x[left]) * (y[right] - y[left])
end
end
return yi
end
"""
_normalize_array(img::Array)
Offsets and rescales elements of `image` so that the minimum value is 0 and the
maximum value is 1.
"""
function _normalize_array(img::Array)
lo, hi = extrema(img)
if !isapprox(lo, hi)
n = img .- lo
return n / maximum(n)
else
# no NaNs please
return img .- lo
end
end
"""
sineramp(rows, cols;
amplitude = 12.5,
wavelength = 8,
p = 2)
Generate a `rows × cols` array of values which show a sine wave with
decreasing amplitude from top to bottom.
Usage:
```julia
using Images
scheme = ColorSchemes.dracula
img = Gray.(sineramp(256, 512, amp = 12.5, wavelen = 8, p = 2))
cimg = zeros(RGB, 256, 512)
for e in eachindex(img)
cimg[e] = get(mscheme, img[e])
end
cimg
```
The default wavelength is 8 pixels. On a computer monitor with a nominal pixel
pitch of 0.25mm this corresponds to a wavelength of 2mm. With a monitor viewing
distance of 600mm this corresponds to 0.19 degrees of viewing angle or
approximately 5.2 cycles per degree. This falls within the range of spatial
frequencies (3-7 cycles per degree) at which most people have maximal contrast
sensitivity to a sine wave grating (this varies with mean luminance). A
wavelength of 8 pixels is also sufficient to provide a reasonable discrete
representation of a sine wave. The aim is to present a stimulus that is well
matched to the performance of the human visual system so that what we are
primarily evaluating is the colorscheme's perceptual contrast and not the visual
performance of the viewer.
The default amplitude is set at 12.5, so that from peak to trough we have a
local feature of magnitude 25. This is approximately 10% of the 256 levels in a
typical colorscheme. It is not uncommon for colorschemes to have perceptual flat
spots that can hide features of this magnitude.
The width of the image is adjusted so that we have an integer number of cycles
of the sinewave. This helps should one be using the test image to evaluate a
cyclic colorscheme. However you will still see a slight cyclic discontinuity at
the top of the image, though this will disappear at the bottom.
"""
function sineramp(rows, cols;
amplitude = 12.5,
wavelength = 8,
p = 2)
cycles = round(cols / wavelength)
cols = cycles * wavelength
# Sine wave
x = collect(0:(cols - 1))'
fx = amplitude * sin.(1.0 / wavelength * 2 * pi * x)
# Vertical modulating function
A = (collect((rows - 1):-1:0)' / (rows - 1)) .^ float(p)
img = A' * fx
# Add ramp
ramp = [c / (cols - 1) for r in 1:rows, c in 0:(cols - 1)]
img = img + ramp * (255.0 - 2 * amplitude)
# Now normalise each row so that it spans the full data range from 0 to 255.
# Again, this is important for evaluation of cyclic colour maps though a
# small cyclic discontinuity will remain at the top of the test image.
for r in 1:rows
img[r, :] = _normalize_array(img[r, :])
end
return img
end
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | code | 1100 | using Test
using ColorSchemeTools
using ColorSchemes
using Colors
cs = ColorScheme([colorant"yellow", colorant"red"])
# basic dispatch
@test equalize(cs) isa ColorScheme
@test equalize(cs, W = [0, 0, 0]) isa ColorScheme
# with linear interpolation this is not perceptually uniform
get(cs, 0:0.01:1)
# so generate corrected colors
ncs = equalize(cs.colors, colormodel=:RGB, formula="CIEDE2000", W=[1, 0, 0])
get(ncs, 0:0.01:1)
# Generate an array in Lab space with an uneven
# ramp in lightness and check that this is corrected
labmap = zeros(256, 3)
labmap[1:127, 1] = range(0, stop=40, length=127)
labmap[128:256, 1] = range(40, stop=100, length=129)
rgb_lab_array = [convert(RGB, Lab(l...)) for l in eachrow(labmap)]
afterscheme = equalize(rgb_lab_array,
colormodel=:RGB,
formula="CIE76",
W=[1, 0, 0],
sigma = 1)
# Convert to Nx3 array and then back to lab space. Then check that dL
# is roughly constant
labmap2 = ColorSchemeTools._srgb_to_lab(afterscheme.colors)
dL = labmap2[2:end, 1] - labmap2[1:end-1, 1]
@test maximum(dL[2:end-1]) - minimum(dL[2:end-1]) < 1e-1
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | code | 7762 | using Test, ColorSchemes, ColorSchemeTools, FileIO, Colors
using Aqua
@static if Sys.isapple()
using QuartzImageIO
end
function run_all_tests()
# Aqua.test_all(ColorSchemeTools) # too many ImageCore errors for now
@testset "basic functions" begin
# load existing scheme from ColorSchemes.jl
hok = ColorSchemes.hokusai
@test length(hok) == 32
# image file located here in the test directory
# create a colorscheme from image file, default is 10
hokusai_test = ColorSchemeTools.extract(dirname(@__FILE__) * "/hokusai.jpg")
@test length(hokusai_test) == 10
# extract colors and weights
c, w = ColorSchemeTools.extract_weighted_colors(dirname(@__FILE__) * "/hokusai.jpg", 10, 10, 0.01; shrink=4)
@test length(c) == 10
@test length(w) == 10
# test that sampling schemes yield different values
@test get(hokusai_test, 0.0) != get(hokusai_test, 0.5)
# test sort
@test ColorSchemeTools.sortcolorscheme(hokusai_test, rev=true) != ColorSchemeTools.sortcolorscheme(hokusai_test)
# create weighted palette; there is some unpredictability here... :)
csw = colorscheme_weighted(c, w, 37)
@test 36 <= length(csw) <= 38
# default is 50
csw = colorscheme_weighted(c, w)
@test length(csw) == 50
# save as Julia text file
colorscheme_to_text(ColorSchemes.hokusai, "hokusai_test_version", "hokusai_as_text.jl")
# and read it in as text
open("hokusai_as_text.jl") do f
lines = readlines(f)
@test occursin("loadcolorscheme(", lines[1])
@test occursin("Colors.RGB{Float64}(0.1112499123425623", lines[4])
end
end
@testset "getinverse tests" begin
getinverse(ColorSchemes.colorschemes[:leonardo], RGB(1, 0, 0))
getinverse(ColorScheme([Colors.RGB(0, 0, 0), Colors.RGB(1, 1, 1)]), Colors.RGB(0.5, 0.5, 0.5))
cs = ColorScheme(range(Colors.RGB(0, 0, 0), stop=Colors.RGB(1, 1, 1), length=5))
gi = getinverse(cs, cs[3])
@test gi == 0.5
end
@testset "convert to scheme tests" begin
# Add color to a grayscale image.
# now using ColorScheme objects and .colors accessors
red_cs = ColorScheme(range(RGB(0, 0, 0), stop=RGB(1, 0, 0), length=11))
gray_cs = ColorScheme(range(RGB(0, 0, 0), stop=RGB(1, 1, 0), length=11))
vs = [getinverse(gray_cs, p) for p in red_cs.colors]
cs = ColorScheme([RGB(v, v, v) for v in vs])
rcs = [get(red_cs, p) for p in vs]
new_img = convert_to_scheme(red_cs, gray_cs.colors)
# TODO
# This is broken.. It should be way more specific. See next test.
@test all(.≈(new_img, red_cs.colors, atol=0.5))
# Should be able to uniquely match each increasing color with the next
# increasing color in the new scale.
red_cs = ColorScheme(range(RGB(0, 0, 0), stop=RGB(1, 1, 1)))
blue_scale_img = range(RGB(0, 0, 0), stop=RGB(0, 0, 1))
new_img = convert_to_scheme(red_cs, blue_scale_img)
@test unique(new_img) == new_img
end
@testset "make_colorscheme tests" begin
cs = make_colorscheme([colorant"red", colorant"green", colorant"blue"], 20)
@test cs[1] == RGB{Float64}(1.0, 0.0, 0.0)
@test cs[end] == RGB{Float64}(0.0, 0.0, 1.0)
colorscheme_to_text(cs, "rgb_scheme", "rgb_scheme.jl")
end
@testset "make_colorscheme tests" begin
# check that list ordering isn't important
alist2 = (
(1.00, (1.00, 0.00, 0.75)),
(0.000, (1.00, 0.00, 0.16)),
(0.586, (0.00, 1.00, 1.00)),
)
alist2s = (
(0.000, (1.00, 0.00, 0.16)),
(0.586, (0.00, 1.00, 1.00)),
(1.00, (1.00, 0.00, 0.75)),
)
s1 = make_colorscheme(alist2, length=5)
s2 = make_colorscheme(alist2s, length=5)
@test s1.colors == s2.colors
end
@testset "add_alpha tests" begin
csa = add_alpha(ColorSchemes.plasma, 0.5)
# all alpha = 0.5
@test all(isequal(0.5), getfield.(csa.colors, :alpha)) == true
csa = add_alpha(ColorSchemes.plasma, [0.8, 1.0])
# all alpha between 0.8 and 1.0
@test all(>=(0.8), getfield.(csa.colors, :alpha))
csa = add_alpha(ColorSchemes.plasma, [1.0, 0.8])
# all alpha > 0.8
@test all(>=(0.8), getfield.(csa.colors, :alpha))
csa = add_alpha(ColorSchemes.plasma, (n) -> sin(n * π))
# all alphas close to sin(n π)
for (i, a) in enumerate(csa.colors)
a, b = sin(i / 256 * π), a.alpha
@test abs(a - b) < 0.1
end
end
@testset "equalize tests" begin
include("equalize-tests.jl")
end
end
function run_minimum_tests()
@testset "basic minimum tests" begin
# load scheme
hok = ColorSchemes.hokusai
@test length(hok) == 32
# test sort
@test ColorSchemeTools.sortcolorscheme(hok, rev=true) != ColorSchemeTools.sortcolorscheme(hok)
# save as text
ColorSchemeTools.colorscheme_to_text(hok, "hokusai_test_version", "hokusai_as_text.jl")
@test filesize("hokusai_as_text.jl") > 2000
open("hokusai_as_text.jl") do f
lines = readlines(f)
@test occursin("Colors.RGB{Float64}(0.1112499123425623", lines[4])
end
# convert an Array{T,2} to an RGB image
tmp = get(ColorSchemes.leonardo, rand(10, 10))
@test typeof(tmp) == Array{ColorTypes.RGB{Float64}, 2}
# test conversion with default clamp
x = [0.0 1.0 ; -1.0 2.0]
y = get(ColorSchemes.leonardo, x)
@test y[1,1] == y[2,1]
@test y[1,2] == y[2,2]
# test conversion with symbol clamp
y2 = get(ColorSchemes.leonardo, x, :clamp)
@test y2 == y
# test conversion with symbol extrema
y2 = get(ColorSchemes.leonardo, x, :extrema)
@test y2[2,1] == y[1,1] # Minimum now becomes one edge of ColorScheme
@test y2[2,2] == y[1,2] # Maximum now becomes other edge of ColorScheme
@test y2[1,1] !== y2[2,1] # Inbetween values or now different
# test conversion with manually supplied range
y3 = get(ColorSchemes.leonardo, x, (-1.0, 2.0))
@test y3 == y2
# test with steplen (#17)
r = range(0, stop=5, length=10)
y = get(ColorSchemes.leonardo, r)
y2 = get(ColorSchemes.leonardo, collect(r))
@test y == y2
# test for specific value
val = 0.2
y = get(ColorSchemes.leonardo, [val])
y2 = get(ColorSchemes.leonardo, val)
@test y2 == y[1]
end
end
if get(ENV, "ColorSchemeTools_KEEP_TEST_RESULTS", false) == "true"
# they changed mktempdir in v1.3
if VERSION <= v"1.2"
cd(mktempdir())
else
cd(mktempdir(cleanup=false))
end
@info("...Keeping the results in: $(pwd())")
@info("..running minimum tests")
run_minimum_tests()
@info("..running all tests")
run_all_tests()
@info("Test images saved in: $(pwd())")
else
mktempdir() do tmpdir
cd(tmpdir) do
@info("..running tests in: $(pwd())")
@info("..but not keeping the results")
@info("..because you didn't do: ENV[\"ColorSchemeTools_KEEP_TEST_RESULTS\"] = \"true\"")
@info("..running minimum tests")
run_minimum_tests()
@info("..running all tests")
run_all_tests()
@info("..Test images weren't saved. To see the test images, next time do this before running:")
@info(" ENV[\"ColorSchemeTools_KEEP_TEST_RESULTS\"] = \"true\"")
end
end
end
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 1416 | # Changelog
## [v1.6.0] - forthcoming
### Added
- `equalize()` and some other functions from PerceptualColorMaps.jl
### Changed
- Interpolations.jl compatibility
- Julia v1.9
### Removed
### Deprecated
## [v1.5.0] - 30 October 2023
### Added
- `add_alpha`
### Changed
- fixed indexed list handling (#10)
- minimum Julia version now 1.6
### Removed
### Deprecated
## [v1.4.0] - 2022-12-21
### Added
### Changed
- fixed indexed list handling (#10)
- minimum Julia version now 1.6
### Removed
### Deprecated
## [v1.3.0] - 2022-08-18
### Added
### Changed
- update deps
- broken tests now work
### Removed
### Deprecated
## [v1.2.0] - 2021-02-16
### Added
- make_colorscheme with array of colorants
### Changed
### Removed
### Deprecated
## [v1.1.0] - 2020-07-14
### Added
### Changed
- Project.toml versions
### Removed
### Deprecated
## [v1.0.0] - 2020-03-09
### Added
### Changed
- extract() modified to use wcounts()
### Removed
### Deprecated
## [v0.2.0] - 2019-06-01
### Added
### Changed
- Require => Project.toml
### Removed
### Deprecated
## [v0.1.0] - minor changes - 2019-02-15
### Added
### Changed
- makecolor_scheme uses color models
### Removed
-
### Deprecated
-
## [v0.0.1] - first release - 2019-01-24
### Added
- all functions moved from ColorSchemes v2.0.0
- get_linear_segment_colors()
### Changed
-
### Removed
-
### Deprecated
-
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 2061 | | **Documentation** | **Build Status** |
|:--------------------------------------- |:----------------------------------------------------------------------------------------------- |
| [![][docs-stable-img]][docs-stable-url] [![][docs-latest-img]][docs-latest-url] | [![Build Status][ci-img]][ci-url] | [![][appveyor-img]][appveyor-url] [![][codecov-img]][codecov-url] |
## ColorSchemeTools
This package provides tools for working with colorschemes and colormaps. It's a
companion to the
[ColorSchemes.jl](https://github.com/JuliaGraphics/ColorSchemes.jl) package.
For example, you can extract colorschemes from images, and replace an image
colorscheme with another. There are also functions for creating new color schemes
from lists, dictionaries, and functions.
This package relies on:
- [Colors.jl](https://github.com/JuliaGraphics/Colors.jl)
- [ColorSchemes.jl](https://github.com/JuliaGraphics/ColorSchemes.jl)
- [Images.jl](https://github.com/JuliaImages/Images.jl)
- [Clustering.jl](https://github.com/JuliaStats/Clustering.jl)
- [Interpolations.jl](https://github.com/JuliaMath/Interpolations.jl)
[docs-stable-img]: https://img.shields.io/badge/docs-stable%20release-blue.svg
[docs-stable-url]: https://JuliaGraphics.github.io/ColorSchemeTools.jl/stable/
[docs-latest-img]: https://img.shields.io/badge/docs-in_development-orange.svg
[docs-latest-url]: https://JuliaGraphics.github.io/ColorSchemeTools.jl/latest/
[appveyor-img]: https://ci.appveyor.com/api/projects/status/59hherf65c713iaw/branch/master?svg=true
[appveyor-url]: https://ci.appveyor.com/project/cormullion/colorschemetools-jl
[codecov-img]: https://codecov.io/gh/JuliaGraphics/ColorSchemeTools.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/JuliaGraphics/ColorSchemeTools.jl
[ci-img]: https://github.com/JuliaGraphics/ColorSchemeTools.jl/workflows/CI/badge.svg
[ci-url]: https://github.com/JuliaGraphics/ColorSchemeTools.jl/actions?query=workflow%3ACI
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 939 | # Converting images
## Convert image from one scheme to another
It's possible to convert an image using one color scheme to use another.
The function [`convert_to_scheme()`](@ref) returns a new image in which each pixel from the provided image is mapped to its closest matching color in the provided scheme. See ColorSchemes's `getinverse()` function for more details on how this works.
In the following figure, the Julia logo is converted to use a ColorScheme with no black or white:
```julia
using FileIO, ColorSchemes, ColorSchemeTools, Images
img = load("julia-logo-square.png")
img_rgb = RGB.(img) # get rid of alpha channel
convertedimage = convert_to_scheme(ColorSchemes.PiYG_4, img_rgb)
save("original.png", img)
save("converted.png", convertedimage)
```

Notice how the white was matched by the color right at the boundary of the light purple and pale green.
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 8521 | ```@setup drawscheme
using ColorSchemeTools
include(joinpath(dirname(pathof(ColorSchemeTools)), "..", "docs/", "displayschemes.jl"))
#=
this provides defines:
- draw_rgb_levels(cs::ColorScheme, w=800, h=500, filename="/tmp/rgb-levels.svg")
- draw_transparent(cs::ColorScheme, csa::ColorScheme, w=800, h=500, filename="/tmp/transparency-levels.svg")
draw_lightness_swatch(cs::ColorScheme, width = 800, height = 150; name = "")
=#
```
# Equalizing color constrasts
The `equalize()` function equalizes the contrasts between colors of a colorscheme.
!!! note
This function is derived from the work of Peter Kovesi in
[PerceptualColorMaps](https://github.com/peterkovesi/PerceptualColourMaps.jl). You can find the original code there.
It's copied here because Peter has retired from coding, and the package is not being maintained.
In the following example, the first image is the original colorscheme sampled 101 times. The second image shows the colors after they've been passed through `equalize()`.
```julia
cs = ColorScheme([colorant"yellow", colorant"red"])
# sample
origcolors = get(cs, 0:0.01:1)
# return a new colorscheme based on the colors in cs
newcs = equalize(origcolors)
# sample
newcolors = get(newcs, 0:0.01:1)
```
```@example drawscheme
cs = ColorScheme([colorant"yellow", colorant"red"]) # hide
# linear interpolation, not perceptually uniform # hide
origcolors = get(cs, 0:0.01:1) # hide
```
```@example drawscheme
cs = ColorScheme([colorant"yellow", colorant"red"]) # hide
# linear interpolation, not perceptually uniform # hide
origcolors = get(cs, 0:0.01:1) # hide
# generate corrected colormap: # hide
newcs = equalize(origcolors, colormodel=:RGB, sigma=0.0, formula="CIEDE2000", W=[1, 0, 0]) # hide
newcolors = get(newcs, 0:0.01:1) # hide
```
You should be able to see the difference between the two images: the original
colorscheme (top) uses simple linear interpolation, the modified scheme (below)
shows the adjusted scheme, with smoother transitions in the red shades.
# Testing a colorscheme with `sineramp()`
Ideally, for a colorscheme to be effective, the perceptual contrast along the
colors should be constant. Some colorschemes are better than others!
Try testing your favourite colorscheme on the image generated with `sineramp()`. This function generates an array where the values consist of a sine wave superimposed on a ramp function. The amplitude of the sine wave is modulated from its full value at the top
of the array to 0 at the bottom.
When a colorscheme is used to render the array as a color image, we're hoping to see the sine wave uniformly visible across the image from left to right. We also want the contrast level, the distance down the image at which the sine wave remains discernible,
to be uniform across the image. At the very bottom of the image, where
the sine wave amplitude is 0, we just have a linear ramp which simply
reproduces the colors in the colorscheme. Here the underlying data is a
featureless ramp, so we should not perceive any identifiable features
across the bottom of the image.
Here's a comparison between the `jet` and the `rainbow_bgyr_35_85_c72_n256` colorschemes:
```@example
using Images, ColorSchemes, ColorSchemeTools # hide
scheme = ColorSchemes.jet
img = Gray.(sineramp(150, 800, amplitude = 12.5, wavelength=8, p=2))
cimg = zeros(RGB, 150, 800)
for e in eachindex(img)
cimg[e] = get(scheme, img[e])
end
cimg
```
```@example
using Images, ColorSchemes, ColorSchemeTools # hide
scheme = ColorSchemes.rainbow_bgyr_35_85_c72_n256
img = Gray.(sineramp(150, 800, amplitude = 12.5, wavelength=8, p=2))
cimg = zeros(RGB, 150, 800)
for e in eachindex(img)
cimg[e] = get(scheme, img[e])
end
cimg
```
You can hopefully see that the `jet` image is patchy; the `rainbow_bgyr_35_85_c72_n256` shows the sinuous rippling consistently.
# Options for `equalize()`
The `equalize` function's primary use is for the correction of colorschemes.
The perceptual contrast is very much dominated by the contrast in colour lightness
values along the map. This function attempts to equalise the chosen perceptual
contrast measure along a colorscheme by stretching and/or compressing sections
of the colorscheme.
There are limitations to what this function can correct. When applied to some colorschemes such as `jet`, `hsv`, and `cool`, you might see colour discontinuity artifacts, because these colorschemes have segments that are nearly constant in lightness.
However, the function can succesfully fix up `hot`, `winter`, `spring` and `autumn`
colorschemes. If you do see colour discontinuities in the resulting colorscheme,
try changing W from [1, 0, 0] to [1, 1, 1], or some intermediate weighting of
[1, 0.5, 0.5], say.
The `equalize()` function takes either a ColorScheme argument or an array of colors. The following keyword arguments are available:
- `colormodel` is `:RGB` or `:LAB` indicating the type of data (use `:RGB` unless the ColorScheme contains LAB color definitions)
- `formula` is "CIE76" or "CIEDE2000"
- `W` is 3-vector of weights to be applied to the lightness, chroma and hue components of the difference equation
- `sigma` is an optional Gaussian smoothing parameter
- `cyclic` is a Boolean flag indicating whether the colormap is cyclic. This affects how smoothing is applied at the end points.
## Formulae
The CIE76 and CIEDE2000 colour difference formulae were developed for
much lower spatial frequencies than we are typically interested in.
Neither is ideal for our application. The main thing to note is that
at *fine* spatial frequencies perceptual contrast is dominated by
*lightness* difference, chroma and hue are relatively unimportant.
Neither CIE76 or CIEDE2000 difference measures are ideal
for the high spatial frequencies that we are interested in. Empirically I
find that CIEDE2000 seems to give slightly better results on colormaps where
there is a significant lightness gradient (this applies to most colormaps).
In this case you would be using a weighting vector W = [1, 0, 0]. For
isoluminant, or low lightness gradient colormaps where one is using a
weighting vector W = [1, 1, 1] CIE76 should be used as the CIEDE2000 chroma
correction is inapropriate for the spatial frequencies we are interested in.
# The Weighting vector W
The CIEDE2000 colour difference formula incorporates the
scaling parameters kL, kC, kH in the demonimator of the lightness, chroma, and
hue difference components respectively. The 3 components of W correspond to
the reciprocal of these 3 parameters. (I do not know why they chose to put
kL, kC, kH in the denominator. If you wanted to ignore, say, the chroma
component you would have to set kC to Inf, rather than setting W[2] to 0 which
seems more sensible to me). If you are using CIE76 then W[2] amd W[3] are
applied to the differences in a and b. In this case you should ensure W[2] =
W[3].
In general, for the spatial frequencies of interest to us, lightness
differences are overwhelmingly more important than chroma or hue and W should
be set to [1, 0, 0]
For colormaps with a significant range of lightness, use:
- formula = "CIE76" or "CIEDE2000"
- W = [1, 0, 0] Only correct for lightness
- sigma = 5 - 7
For isoluminant or low lightness gradient colormaps use:
- formula = "CIE76"
- W = [1, 1, 1] Correct for colour and lightness
- sigma = 5 - 7
# Smoothing parameter sigma
The output will have lightness values of constant slope magnitude.
However, it is possible that the sign of the slope may change, for example at
the midpoint of a bilateral colorscheme. This slope discontinuity of lightness
can induce a false apparent feature in the colorscheme. A smaller effect is
also occurs for slope discontinuities in a and b. For such colorschemes it can
be useful to introduce a small amount of _smoothing of the Lab values to soften
the transition of sign in the slope to remove this apparent feature. However
in doing this one creates a small region of suppressed luminance contrast in
the colorscheme which induces a 'blind spot' that compromises the visibility of
features should they fall in that data range. ccordingly the smoothing
should be kept to a minimum. A value of sigma in the range 5 to 7 in a 256
element colorscheme seems about right. As a guideline sigma should not be more
than about 1/25 of the number of entries in the colormap, preferably less.
Reference: Peter Kovesi. Good ColorMaps: How to Design Them. [arXiv:1509.03700 [cs.GR] 2015](https://arXiv:1509.03700)
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 83 | # Index
```@autodocs
Modules = [ColorSchemeTools]
Order = [:function, :type]
``` | ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 991 | # Introduction to ColorSchemeTools
This package provides tools for working with color schemes - gradients and color maps - and is designed to work with ColorSchemes.jl.
You can extract colorschemes from images, and replace an image's color scheme with another. There are also functions for creating color schemes from pre-defined lists and Julia functions.
This package relies on:
- [Colors.jl](https://github.com/JuliaGraphics/Colors.jl)
- [ColorSchemes.jl](https://github.com/JuliaGraphics/ColorSchemes.jl)
- [Images.jl](https://github.com/JuliaImages/Images.jl)
- [Clustering.jl](https://github.com/JuliaStats/Clustering.jl)
- [Interpolations.jl](https://github.com/JuliaMath/Interpolations.jl)
and you might need image-capable Julia packages installed, depending on the OS.
## Installation and basic usage
Install the package as follows:
```
] add ColorSchemeTools
```
To use it:
```
using ColorSchemeTools
```
Original version by [cormullion](https://github.com/cormullion).
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 10033 | ```@setup drawscheme
using ColorSchemeTools
include(joinpath(dirname(pathof(ColorSchemeTools)), "..", "docs/", "displayschemes.jl"))
#=
ColorSchemeTools/docs/displayschemes.jl defines:
- draw_rgb_levels(cs::ColorScheme, w=800, h=500, filename="/tmp/rgb-levels.svg")
- draw_transparent(cs::ColorScheme, csa::ColorScheme, w=800, h=500, filename="/tmp/transparency-levels.svg")
=#
```
# Making colorschemes
!!! note
The diagrams in this section show: the colors of a colorscheme as individual swatches along the top; the changing RGBA curves in the middle; and a continuously-sampled gradient below.
## Making simple colorschemes
Colors.jl provides a method for `range()` that accepts colorants:
```@example drawscheme
using ColorSchemes, Colors # hide
cs = ColorScheme(range(RGB(1, 0, 0), stop = colorant"blue", length=15),
"gradient", "red to blue 15")
draw_rgb_levels(cs, 800, 200, :svg) # hide
```
You can make a new colorscheme by building an array of colors.
The ColorSchemeTools function [`make_colorscheme()`](@ref) lets you build more elaborate colorschemes. You can supply the color specifications using different methods, depending on the arguments you supply:
- a list of colors and a number specifying the length
- a dictionary of linear segments
- an 'indexed list' of RGB values
- a group of Julia functions that generate values between 0 and 1 for the RGB levels
## List of colors
Given a list of colors, use [`make_colorscheme()`](@ref) to create a new colorscheme with `n` steps.
For example, given an array of various colorants:
```
roygbiv = [
colorant"red",
colorant"orange",
colorant"yellow",
colorant"green",
colorant"blue",
colorant"indigo",
colorant"violet"
]
```
you can use `make_colorscheme(cols, 10)` to create a colorscheme with 10 steps:
```@example drawscheme
roygbiv = [ # hide
colorant"red", # hide
colorant"orange", # hide
colorant"yellow", # hide
colorant"green", # hide
colorant"blue", # hide
colorant"indigo", # hide
colorant"violet" # hide
] # hide
scheme = make_colorscheme(roygbiv, 10)
draw_rgb_levels(scheme, 800, 200, :svg) # hide
```
If you increase the number of steps, the interpolations are smoother. Here it is with 200 steps (shown in the top bar):
```@example drawscheme
roygbiv = [ # hide
colorant"red", # hide
colorant"orange", # hide
colorant"yellow", # hide
colorant"green", # hide
colorant"blue", # hide
colorant"indigo", # hide
colorant"violet" # hide
] # hide
scheme = make_colorscheme(roygbiv, 200)
draw_rgb_levels(scheme, 800, 200, :svg)
```
You can supply the colors in any format, as long as it's a Colorant:
```@example drawscheme
cols = Any[
RGB(0, 0, 1),
Gray(0.5),
HSV(50., 0.7, 1.),
Gray(0.4),
LCHab(54, 105, 40),
HSV(285., 0.9, 0.8),
colorant"#FFEEFF",
colorant"hotpink",
]
scheme = make_colorscheme(cols, 8)
draw_rgb_levels(scheme, 800, 200, :svg)
```
The `Any` array was necessary only because of the presence of the `Gray(0..5)` element. If all the elements are colorants, you can use `[]` or `Colorant[]`.
## Linearly-segmented colors
A linearly-segmented color dictionary looks like this:
```julia
cdict = Dict(:red => ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
:green => ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
:blue => ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)))
```
This specifies that red increases from 0 to 1 over the bottom half, green does the same over the middle half, and blue over the top half.
The triplets _aren't_ RGB values... For each channel, the first number in each tuple are points on the 0 to 1 brightness scale, and should gradually increase. The second and third values determine the intensity values at that point.
The change of color between point `p1` and `p2` is defined by `b` and `c`:
```julia
:red => (
...,
(p1, a, b),
(p2, c, d),
...
)
```
If `a` and `b` (or `c` and `d`) aren't the same, the color will abruptly jump. Notice that the very first `a` and the very last `d` aren't used.
To create a new colorscheme from a suitable dictionary in this format, run `make_colorscheme()`.
```@example drawscheme
cdict = Dict(:red => ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
:green => ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
:blue => ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))) # hide
scheme = make_colorscheme(cdict)
draw_rgb_levels(scheme, 800, 200, :svg) # hide
```
## Indexed-list color schemes
The data to define an 'indexed list' colorscheme looks like this:
```julia
terrain = (
(0.00, (0.2, 0.2, 0.6)),
(0.15, (0.0, 0.6, 1.0)),
(0.25, (0.0, 0.8, 0.4)),
(0.50, (1.0, 1.0, 0.6)),
(0.75, (0.5, 0.36, 0.33)),
(1.00, (1.0, 1.0, 1.0))
)
```
The first item of each element is the location between 0 and 1, the second specifies the RGB values at that point.
The `make_colorscheme(indexedlist)` function makes a new colorscheme from such an indexed list.
Use the `length` keyword to specify how many colors are used in the colorscheme.
For example:
```@example drawscheme
terrain_data = (
(0.00, (0.2, 0.2, 0.6)),
(0.15, (0.0, 0.6, 1.0)),
(0.25, (0.0, 0.8, 0.4)),
(0.50, (1.0, 1.0, 0.6)),
(0.75, (0.5, 0.36, 0.33)),
(1.00, (1.0, 1.0, 1.0)))
terrain = make_colorscheme(terrain_data, length = 50)
draw_rgb_levels(terrain, 800, 200, :svg)
```
## Functional color schemes
The colors in a ‘functional’ colorscheme are produced by three functions that calculate the color values at each point on the colorscheme.
The [`make_colorscheme()`](@ref) function applies the first supplied function at each point on the colorscheme for the red values, the second function for the green values, and the third for the blue. You can use defined functions or supply anonymous ones.
Values produced by the functions are clamped to 0.0 and 1.0 before they’re converted to RGB values.
### Examples
The first example returns a smooth black to white gradient, because the `identity()` function gives back as good as it gets.
```@example drawscheme
fscheme = make_colorscheme(identity, identity, identity)
draw_rgb_levels(fscheme, 800, 200, :svg)
```
The next example uses the `sin()` function on values from 0 to π to control the red, and the `cos()` function from 0 to π to control the blue. The green channel is flat-lined.
```@example drawscheme
fscheme = make_colorscheme(n -> sin(n*π), n -> 0, n -> cos(n*π))
draw_rgb_levels(fscheme, 800, 200, :svg)
```
You can generate stepped gradients by controlling the numbers. Here, each point on the scheme is nudged to the nearest multiple of 0.1.
```@example drawscheme
fscheme = make_colorscheme(
n -> round(n, digits=1),
n -> round(n, digits=1),
n -> round(n, digits=1), length=10)
draw_rgb_levels(fscheme, 800, 200, :svg)
```
The next example sinusoidally sends the red channel from black to red and back again.
```@example drawscheme
fscheme = make_colorscheme(n -> sin(n * π), n -> 0, n -> 0)
draw_rgb_levels(fscheme, 800, 200, :svg)
```
The next example produces a striped colorscheme as the rippling sine waves continually change phase:
```@example drawscheme
ripple7(n) = sin(π * 7n)
ripple13(n) = sin(π * 13n)
ripple17(n) = sin(π * 17n)
fscheme = make_colorscheme(ripple7, ripple13, ripple17, length=80)
draw_rgb_levels(fscheme, 800, 200, :svg)
```
If you're creating a scheme by generating LCHab colors, your functions should convert values between 0 and 1 to values between 0 and 100 (luminance and chroma) or 0 to 360 (hue).
```@example drawscheme
f1(n) = 180 + 180sin(2π * n)
f2(n) = 50 + 20(0.5 - abs(n - 0.5))
fscheme = make_colorscheme(n -> 50, f2, f1,
length=80,
model=:LCHab)
draw_rgb_levels(fscheme, 800, 200, :svg)
```
## Alpha opacity colorschemes
Usually, colorschemes are RGB values with no alpha values.
Use [`add_alpha()`](@ref) to add alpha opacity values to the colors in the colorschemes.
In the illustrations, the top row shows the original colorscheme, the bottom row shows the modified colorscheme drawn over a checkerboard pattern to show the alpha opacity.
You can make a new colorscheme where every color now has a specific alpha opacity value:
```@example drawscheme
cs = ColorSchemes.PRGn_10
csa = add_alpha(cs, 0.8)
draw_transparent(cs, csa, 800, 200, :svg) # hide
```
```@example drawscheme
cs = ColorSchemes.PRGn_10 # hide
csa = add_alpha(cs, 0.8) # hide
draw_rgb_levels(csa, 800, 200, :svg) # hide
```
You can specify alpha values using a range:
```@example drawscheme
cs = ColorSchemes.lisbon10
csa = add_alpha(cs, 0.3:0.1:1.0)
draw_transparent(cs, csa, 800, 200, :svg) # hide
```
```@example drawscheme
cs = ColorSchemes.lisbon10 # hide
csa = add_alpha(cs, 0.3:0.1:1.0) # hide
draw_rgb_levels(csa, 800, 200, :svg) # hide
```
Or you can specify alpha values using a function that returns a value for every value between 0 and 1. In the next example the opacity varies from 1.0 to 0.0 and back to 1.0 again, as the colorscheme index goes from 0 to 1; at point 0.5, `abs(cos(0.5 * π))` is 0.0, so the colorscheme is completely transparent at that point.
```@example drawscheme
cs = ColorSchemes.PuOr
csa = add_alpha(cs, (n) -> abs(cos(n * π)))
draw_transparent(cs, csa, 700, 200, :svg)
```
```@example drawscheme
cs = ColorSchemes.PuOr # hide
csa = add_alpha(cs, (n) -> abs(cos(n * π))) # hide
draw_rgb_levels(csa, 800, 200, :svg) # hide
```
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 1567 | # Saving colorschemes
## Saving colorschemes as images
Sometimes you want to save a colorscheme, which is usually just a pixel thick, as a swatch or image. You can do this with [`colorscheme_to_image()`](@ref).
The second argument is the number of rows.
The third argument is the number of times each pixel is repeated in the row.
The function returns an image which you can save using FileIO's `save()`:
```julia
using FileIO, ColorSchemeTools, Images, Colors
# 20 pixels for each color, 150 rows
img = colorscheme_to_image(ColorSchemes.vermeer, 150, 20)
save("/tmp/cs_vermeer-150-20.png", img)
```

The [`image_to_swatch()`](@ref) function (a shortcut) extracts a `n`-color scheme from a supplied image and saves it as a swatch in a PNG.
```julia
image_to_swatch("/tmp/input.png", 10, "/tmp/output.png")
```
## Saving colorschemes to text files
You can save a ColorScheme as a (Julia) text file with the imaginatively-titled [`colorscheme_to_text()`](@ref) function.
Remember to make the name a Julia-friendly one, because it may eventually become a symbol and a dictionary key if the Julia file is `include`-d.
```julia
colorscheme_to_text(ColorSchemes.vermeer,
"the_lost_vermeer", # name
"/tmp/the_lost_vermeer.jl", # filename
category="dutch painters", # category
notes="it's not really lost" # notes
)
```
Of course, if you just want the color definitions, you can simply type:
```julia
map(println, ColorSchemes.vermeer.colors);
```
| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 1.6.0 | 4ff1579f2c7c5f52d741d519b4b93f5ff7bb040e | docs | 3383 | ```@meta
DocTestSetup = quote
include("displayschemes.jl")
using ColorSchemes, ColorSchemeTools, Colors
end
```
## Extracting colorschemes from images
You can extract a colorscheme from an image. For example, here's an image of a famous painting:

Use the [`extract()`](@ref) function to create a color scheme from the original image:
```
using ColorSchemeTools
monalisa = extract("monalisa.jpg", 10, 15, 0.01; shrink=2)
```
which in this example creates a 10-color ColorScheme object (using 15 iterations and with a tolerance of 0.01; the image can be reduced in size, here by 2, before processing, to save time).

```
ColorSchemes.ColorScheme(ColorTypes.RGB{Float64}[
RGB{Float64}(0.0406901,0.0412985,0.0423865),
RGB{Float64}(0.823493,0.611246,0.234261),
RGB{Float64}(0.374688,0.363066,0.182004),
RGB{Float64}(0.262235,0.239368,0.110915),
RGB{Float64}(0.614806,0.428448,0.112495),
RGB{Float64}(0.139384,0.124466,0.0715472),
RGB{Float64}(0.627381,0.597513,0.340734),
RGB{Float64}(0.955276,0.775304,0.37135),
RGB{Float64}(0.497517,0.4913,0.269587),
RGB{Float64}(0.880421,0.851357,0.538013),
RGB{Float64}(0.738879,0.709218,0.441082)
], "", "")
```
(Extracting color schemes from images may require you to install image importing and exporting abilities. These are platform-specific.)
## Sorting color schemes
Use [`sortcolorscheme()`](@ref) to sort a scheme non-destructively in the LUV color space:
```julia
using ColorSchemes
sortcolorscheme(ColorSchemes.leonardo)
sortcolorscheme(ColorSchemes.leonardo, rev=true)
```
The default is to sort colors by their LUV luminance value, but you could try specifying the `:u` or `:v` LUV fields instead (sorting colors is another problem domain not really addressed in this package...):
```julia
sortcolorscheme(ColorSchemes.leonardo, :u)
```
## Weighted colorschemes
Sometimes an image is dominated by some colors with others occurring less frequently. For example, there may be much more brown than yellow in a particular image. A *weighted* colorscheme derived from this image can reflect this. You can extract both a set of colors and a set of numerical values or weights that indicate the relative proportions of colors in the image.
```
cs, wts = extract_weighted_colors("monalisa.jpg", 10, 15, 0.01; shrink=2)
```
The ColorScheme is now in `cs`, and `wts` holds the various weights of each color:
```julia-term
wts
10-element Array{Float64,1}:
0.0521126446851636
0.20025391828582884
0.08954703056671294
0.09603605342678319
0.09507086696018234
0.119987526821047
0.08042973071297582
0.08863381567908292
0.08599068966285295
0.09193772319937041
```
With the ColorScheme and the weights, you can make a new color scheme in which the more common colors take up more space in the scheme. Use [`colorscheme_weighted()`](@ref):
```julia
len = 50
colorscheme_weighted(cs, wts, len)
```
Or in one go:
```julia
colorscheme_weighted(extract_weighted_colors("monalisa.jpg" # ...
```
Compare the weighted and unweighted versions of schemes extracted from the Hokusai image "The Great Wave":


| ColorSchemeTools | https://github.com/JuliaGraphics/ColorSchemeTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 255 | using Documenter, WorldOceanAtlasTools
makedocs(
sitename="WorldOceanAtlasTools Documentation",
# options
modules = [WorldOceanAtlasTools]
)
deploydocs(
repo = "github.com/briochemc/WorldOceanAtlasTools.jl.git",
push_preview = true
) | WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 309 | module WorldOceanAtlasTools
using DataDeps
using Downloads
using NCDatasets
using DataFrames
using Match
using Statistics
using StatsBase
using Unitful
using OceanGrids
using NearestNeighbors
include("names.jl")
include("citations.jl")
include("convert_to_Unitful.jl")
include("functions.jl")
end # module
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 4927 | function citation(tracer::String ; product_year=2018)
yr_str = my_product_year(product_year)
@match my_varname(tracer) begin
"DSi" || "DIP" || "DIN" => citation_Nutrients(yr_str)
"Temp" => citation_Temperature(yr_str)
"Salt" => citation_Salinity(yr_str)
"O2" || "O2sat" || "AOU" => citation_Oxygen(yr_str)
"Dens" || "Cond" => citation(product_year)
_ => error("Not sure what you are trying to cite.")
end
end
citation(product_year::Int) = @match my_product_year(product_year) begin
"13" => "Boyer, T. P. et al. (2013): World Ocean Database 2013, NOAA Atlas NESDIS 72, S. Levitus, Ed., A. Mishonov, Technical Ed.; Silver Spring, MD, 209 pp., doi:10.7289/V5NZ85MT"
"18" => "Boyer, T. P. et al. (2018): World Ocean Database 2018. A. V. Mishonov, Technical Editor, NOAA Atlas NESDIS 87."
"23" => "Reagan, J. R. et al. (2024). World Ocean Atlas 2023. NOAA National Centers for Environmental Information. Dataset: NCEI Accession 0270533."
_ => "I could not find a citation for WOD$(my_product_year(product_year)). Add it if you know where to find it!"
end
citation_Temperature(product_year) = @match my_product_year(product_year) begin
"09" => "Locarnini et al. (2010), World Ocean Atlas 2009, Volume 1: Temperature. S. Levitus, Ed. NOAA Atlas NESDIS 68, U.S. Government Printing Office, Washington, D.C., 184 pp."
"13" => "Locarnini et al. (2013), World Ocean Atlas 2013, Volume 1: Temperature. S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 73, 40 pp."
"18" => "Locarnini et al. (2018), World Ocean Atlas 2018, Volume 1: Temperature. A. Mishonov Technical Ed.; in preparation."
"23" => "Locarnini et al. (2023), World Ocean Atlas 2023, Volume 1: Temperature. A. Mishonov Technical Ed.; NOAA Atlas NESDIS 89, 52 pp, https://doi.org/10.25923/54bh-1613"
_ => error("No citation for WOA$(my_product_year(product_year)) Temperature. Add it if it should be there!")
end
citation_Salinity(product_year) = @match my_product_year(product_year) begin
"09" => "Antonov et al. (2010), World Ocean Atlas 2009, Volume 2: Salinity. S. Levitus, Ed. NOAA Atlas NESDIS 69, U.S. Government Printing Office, Washington, D.C., 184 pp."
"13" => "Zweng et al. (2013), World Ocean Atlas 2013, Volume 2: Salinity. S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 74, 39 pp."
"18" => "Zweng et al. (2018), World Ocean Atlas 2018, Volume 2: Salinity. A. Mishonov Technical Ed.; in preparation."
"23" => "Reagan, et al. (2023), World Ocean Atlas 2023, Volume 2: Salinity. A. Mishonov Technical Ed.; NOAA Atlas NESDIS 90, 51pp. https://doi.org/10.25923/70qt-9574"
_ => error("No citation for WOA$(my_product_year(product_year)) Salinity. Add it if it should be there!")
end
citation_Oxygen(product_year) = @match my_product_year(product_year) begin
"09" => "Garcia et al. (2010), World Ocean Atlas 2009, Volume 3: Dissolved Oxygen, Apparent Oxygen Utilization, and Oxygen Saturation. S. Levitus, Ed. NOAA Atlas NESDIS 70, U.S. Government Printing Office, Washington, D.C., 344 pp."
"13" => "Garcia et al. (2014), World Ocean Atlas 2013, Volume 3: Dissolved Oxygen, Apparent Oxygen Utilization, and Oxygen Saturation. S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 75, 27 pp."
"18" => "Garcia et al. (2018), World Ocean Atlas 2018, Volume 3: Dissolved Oxygen, Apparent Oxygen Utilization, and Oxygen Saturation. A. Mishonov Technical Ed.; in preparation."
"23" => "Garcia et al. (2023), World Ocean Atlas 2023, Volume 3: Dissolved Oxygen, Apparent Oxygen Utilization, and Oxygen Saturation. A. Mishonov Technical Ed.; NOAA Atlas NESDIS 91, 34pp, https://doi.org/10.25923/rb67-ns53"
_ => error("No citation for WOA$(my_product_year(product_year)) AOU. Add it if it should be there!")
end
citation_Nutrients(product_year) = @match my_product_year(product_year) begin
"09" => "Garcia et al. (2010), World Ocean Atlas 2009, Volume 4: Nutrients (phosphate, nitrate, silicate). S. Levitus, Ed. NOAA Atlas NESDIS 71, U.S. Government Printing Office, Washington, D.C., 398 pp."
"13" => "Garcia et al. (2014), World Ocean Atlas 2013, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate, silicate). S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 76, 25 pp."
"18" => "Garcia et al. (2018), World Ocean Atlas 2018, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate and nitrate+nitrite, silicate). A. Mishonov Technical Ed.; in preparation."
"23" => "Garcia et al. (2023), World Ocean Atlas 2023, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate and nitrate+nitrite, silicate). A. Mishonov Technical Ed.; NOAA Atlas NESDIS 92, 34pp, https://doi.org/10.25923/39qw-7j08"
_ => error("No citation for WOA$(my_product_year(product_year)) Nutrients. Add it if it should be there!")
end
# TODO Add other citations for other years!
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 534 | """
convert_WOAunits_to_unitful(s)
Converts the string from WOA's `units` attribute to a Unitful.jl unit.
"""
convert_to_Unitful(v::String) = @match v begin
"micromoles_per_liter" => u"μmol/l"
"micromoles_per_kilogram" => u"μmol/kg"
"percent" => u"percent"
"meters" => u"m"
"degrees_north" => u"°"
"degrees_east" => u"°"
"degrees_celsius" => u"°C"
_ => nothing
end
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 9091 |
"""
get_3D_field(tracer; product_year=2018, period=0, resolution=1, field="an")
Downloads and returns the 3D field of a tracer from the World Ocean Atlas.
Tracers are either "phosphate", "nitrate", or "silicate".
Availabe product years are 2009, 2013, and 2018 (default: 2018).
Resolution is either "5°", "1°", or "0.25°" (default: 1°).
Fields are "mean" or "an" (for objectively analyzed climatology).
(default = "an".)
Note that WOA's nomenclature should work too, e.g.,
"p" for "phosphate", "mn" for "mean", and so on.
"""
function get_3D_field(tracer; product_year=2018, period=0, resolution=1, field="an")
println("Getting the 3D field of WOA$(my_product_year(product_year)) $(my_averaging_period(period)) $(WOA_path_varname(tracer)) $(surface_grid_size(resolution)) data")
ds = WOA_Dataset(tracer; product_year, period, resolution)
field3D = ds[WOA_varname(tracer, field)][:, :, :, 1]
println(" Rearranging data")
field3D = permutedims(field3D, [2 1 3])
return field3D
end
function get_gridded_3D_field(ds, tracer, field)
println(" Reading NetCDF file")
field3D = ds[WOA_varname(tracer, field)][:, :, :, 1]
lon = ds["lon"][:] .|> Float64
lat = ds["lat"][:] .|> Float64
depth = ds["depth"][:] .|> Float64
println(" Rearranging data")
# Reorder the variable index order (lat <-> lon from WOA to OCIM)
field3D = permutedims(field3D, [2 1 3])
# Rearrange longitude range (from WOA data, which is -180:180, to 0:360)
lon = mod.(lon, 360)
lon_reordering = sortperm(lon)
lon = lon[lon_reordering]
field3D .= field3D[:, lon_reordering, :]
return field3D, lat, lon, depth
end
function get_gridded_3D_field(tracer, field; kwargs...)
return Dataset(WOAfile(tracer=tracer; kwargs...), "r") do ds
get_gridded_3D_field(ds, tracer, field)
end
end
"""
mean_std_and_number_obs(ds, tracer)
Returns a DataFrame containing the following columns
lat, lon, depth, mean, std, nobs
"""
function mean_std_and_number_obs(ds, tracer)
println(" Reading NetCDF file")
lon = mod.(ds["lon"][:] .|> Float64, 360)
lat = ds["lat"][:] .|> Float64
depth = ds["depth"][:] .|> Float64
μ3D = ds[WOA_varname(tracer, "mn")][:, :, :, 1]
σ3D = ds[WOA_varname(tracer, "sd")][:, :, :, 1]
nobs3D = ds[WOA_varname(tracer, "dd")][:, :, :, 1]
println(" Filtering missing data")
CI = findall(@. !ismissing(μ3D) & !ismissing(nobs3D) & !iszero(nobs3D))
lon1D = lon[map(x -> x.I[1], CI)]
lat1D = lat[map(x -> x.I[2], CI)]
depth1D = depth[map(x -> x.I[3], CI)]
μ1D = μ3D[CI] .|> Float64
σ1D = σ3D[CI] .|> Float64
nobs1D = nobs3D[CI] .|> Int64
return DataFrame(
:Latitude => lat1D,
:Longitude => lon1D,
:Depth => depth1D,
:Mean => μ1D,
:Std => σ1D,
:n_obs => nobs1D,
)
end
function filter_gridded_3D_field(field3D, lat, lon, depth)
# Find where there is data for both mean and std
println(" Filtering data")
CI = findall(x -> !ismissing(x) && !iszero(x), field3D) # filter out fill-values and 0's
fieldvec = field3D[CI]
latvec = lat[map(x -> x.I[1], CI)]
lonvec = lon[map(x -> x.I[2], CI)]
depthvec = depth[map(x -> x.I[3], CI)]
return fieldvec, latvec, lonvec, depthvec, CI
end
get_unit(ds, tracer, field) = convert_to_Unitful(ds[WOA_varname(tracer, field)].attrib["units"])
function mean_and_variance_gridded_3d_field(grid::OceanGrid, field3D, lat, lon, depth)
χ_3D, σ²_3D, n_3D = raw_mean_and_variance_gridded_3d_field(grid, field3D, lat, lon, depth)
# Enforce μ = 0 and σ = ∞ where no observations
# (Instead of NaNs)
println(" Setting μ = 0 and σ² = ∞ where no obs")
χ_3D[findall(n_3D .== 0)] .= 0.0
σ²_3D[findall(n_3D .== 0)] .= Inf
println(" Setting a realistic minimum for σ²")
meanχ = mean(χ_3D, weights(n_3D))
σ²_3D .= max.(σ²_3D, 1e-4meanχ^2)
return χ_3D, σ²_3D
end
function raw_mean_and_variance_gridded_3d_field(grid::OceanGrid, field3D, lat, lon, depth)
fieldvec, latvec, lonvec, depthvec, CI = filter_gridded_3D_field(field3D, lat, lon, depth)
println(" Averaging data over each grid box")
χ_3D = zeros(size(grid))
σ²_3D = zeros(size(grid))
n_3D = zeros(size(grid))
# Use NearestNeighbors to bin into OceanGrid
gridbox_centers = [ustrip.(vec(grid.lat_3D)) ustrip.(vec(grid.lon_3D)) ustrip.(vec(grid.depth_3D))] # TODO Maybe add option for using iwet instead of full vec
gridbox_centers = permutedims(gridbox_centers, [2, 1])
kdtree = KDTree(gridbox_centers)
for i in eachindex(CI)
idx = knn(kdtree, [latvec[i], lonvec[i], depthvec[i]], 1, true)[1][1]
χ_3D[idx] += fieldvec[i] # μ = Σᵢ μᵢ / n
σ²_3D[idx] += fieldvec[i]^2 # σ² = Σᵢ μᵢ² / n - μ²
n_3D[idx] += 1
end
χ_3D .= χ_3D ./ n_3D # μ = Σᵢ μᵢ / n
σ²_3D .= σ²_3D ./ n_3D .- χ_3D.^2 # σ² = Σᵢ μᵢ² / n - μ²
return χ_3D, σ²_3D, n_3D
end
function convert_to_SI_unit!(χ_3D, σ²_3D, ds, tracer, field)
# Convert to SI units
χ_unit = get_unit(ds, tracer, field)
χ_3D .*= ustrip(upreferred(1.0χ_unit))
σ²_3D .*= ustrip(upreferred(1.0χ_unit^2))
end
"""
fit_to_grid(grid, tracer; product_year=2018, period=0, resolution=1, field="an")
Returns `χ_3D`, `σ²_3D` of "regridded" WOA data using a nearest neighbor approach.
"""
function fit_to_grid(grid::OceanGrid, tracer; product_year=2018, period=0, resolution=1, field="an")
ds = WOA_Dataset(tracer; product_year, period, resolution)
field3D, lat, lon, depth = get_gridded_3D_field(ds, tracer, field)
χ_3D, σ²_3D = mean_and_variance_gridded_3d_field(grid, field3D, lat, lon, depth)
convert_to_SI_unit!(χ_3D, σ²_3D, ds, tracer, field)
return χ_3D, σ²_3D
end
function raw_to_grid(grid::OceanGrid, tracer; product_year=2018, period=0, resolution=1, field="an")
ds = WOA_Dataset(tracer; product_year, period, resolution)
field3D, lat, lon, depth = get_gridded_3D_field(ds, tracer, field)
χ_3D, σ²_3D, n_3D = raw_mean_and_variance_gridded_3d_field(grid, field3D, lat, lon, depth)
convert_to_SI_unit!(χ_3D, σ²_3D, ds, tracer, field)
return χ_3D, n_3D
end
#=========================================
observations function returns a DataFrames
=========================================#
function observations(ds::Dataset, tracer::String; metadatakeys=("lat", "lon", "depth"))
var, v, ikeep = indices_and_var(ds, tracer)
u = _unit(var)
WOAmetadatakeys = varname.(metadatakeys)
metadata = (metadatakeyvaluepair(ds[k], ikeep) for k in WOAmetadatakeys)
df = DataFrame(metadata..., Symbol(tracer)=>float.(view(v, ikeep))*u)
return df
end
"""
observations(tracer::String; metadatakeys=("lat", "lon", "depth"))
Returns observations of `tracer` with its metadata.
### Example
```
obs = observations("po4")
```
"""
function observations(tracer::String; metadatakeys=("lat", "lon", "depth"), kwargs...)
return Dataset(WOAfile(tracer; kwargs...), "r") do ds
observations(ds, tracer; metadatakeys)
end
end
function indices_and_var(ds::Dataset, tracer::String)
var = ds[WOA_varname(tracer, "mn")]
FV = _fillvalue(var)
v = var.var[:,:,:,1]
ikeep = findall(v .≠ FV)
return var, v, ikeep
end
_unit(v) = convert_to_Unitful(get(v.attrib, "units", "nothing"))
_fillvalue(v) = get(v.attrib, "_FillValue", NaN)
metadatakeyvaluepair(v, idx) = @match name(v) begin
"lon" => (:lon => float.(v.var[:][[i.I[1] for i in idx]]))
"lat" => (:lat => float.(v.var[:][[i.I[2] for i in idx]]))
"depth" => (:depth => float.(v.var[:][[i.I[3] for i in idx]]) * u"m")
end
#==================================
Helper functions
==================================#
function WOAfile(tracer; product_year=2018, period=0, resolution="1")
println("Registering World Ocean Atlas data with DataDeps")
@warn """You are about to use World Ocean Atlas data $(my_product_year(product_year)).
Please cite the following corresponding reference(s):
$(citation(tracer; product_year)))
"""
register_WOA(tracer; product_year, period, resolution)
return @datadep_str string(my_DataDeps_name(tracer; product_year, period, resolution),
"/",
WOA_NetCDF_filename(tracer; product_year, period, resolution))
end
function WOA_Dataset(tracer; product_year=2018, period=0, resolution=1)
Dataset(WOAfile(tracer; product_year, period, resolution))
end
function remove_DataDep(tracer; product_year=2018, period=0, resolution=1)
println("Removing WOA$(my_product_year(product_year)) $(my_averaging_period(period)) $(WOA_path_varname(tracer)) $(surface_grid_size(resolution)) data")
nc_file = @datadep_str string(my_DataDeps_name(tracer; product_year, period, resolution), "/", WOA_NetCDF_filename(tracer; product_year, period, resolution))
rm(nc_file; recursive=true, force=true)
end
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 13007 | # This file "resolves" the variable names used by the World Ocean Database (WOD).
# The idea is that the WOD has a naming convention (plus some exceptions)
# that are taken care of by the code below.
# Fallback for using direct download
function fallback_download(remotepath, localdir)
@assert(isdir(localdir))
filename = basename(remotepath) # only works for URLs with filename as last part of name
localpath = joinpath(localdir, filename)
Downloads.download(remotepath, localpath)
return localpath
end
"""
register_WOA(tracer; product_year=2018, period=0, resolution=1)
Registers a `datadep` for the variable `tracer` averaged over `period` at resolution `resolution`.
"""
function register_WOA(tracer; product_year=2018, period=0, resolution=1)
register(DataDep(
my_DataDeps_name(tracer; product_year, period, resolution),
string(citation(tracer; product_year)),
url_WOA(tracer; product_year, period, resolution),
sha2_256,
fetch_method = fallback_download
))
return nothing
end
#============================================================
DataDeps registering name
============================================================#
my_DataDeps_name(tracer; product_year=2018, period=0, resolution=1) = string(
"WOA",
my_product_year(product_year), "_",
my_averaging_period(period), "_",
WOA_path_varname(tracer), "_",
surface_grid_size(resolution)
)
#============================================================
WOA product_year of the data product
============================================================#
my_product_year(product_year) = @match product_year begin
2005 || 05 || "2005" || "05" => "05"
2009 || 09 || "2009" || "09" => "09"
2013 || 13 || "2013" || "13" => "13"
2018 || 18 || "2018" || "18" => "18"
2023 || 23 || "2023" || "23" => "23"
_ => error("Cannot register WOA data from year $product_year")
end
#============================================================
Variable names
============================================================#
incorrect_varname(tracer) = """
"$tracer" is an incorrect variable name.
Use one of these `String`s:
- "t" for Temperature
- "s" for Salinity
- "I" for Density
- "C" for Conductivity
- "o" for Dissolved Oxygen
- "O" for Percent Oxygen Saturation
- "A" for Apparent Oxygen Utilization
- "i" for Silicate
- "p" for Phosphate
- "n" for Nitrate
"""
WOA_path_varname(tracer) = @match my_varname(tracer) begin
"Temp" => "temperature"
"Salt" => "salinity"
"Dens" => "density"
"O2" => "oxygen"
"O2sat" => "o2sat"
"AOU" => "AOU"
"DSi" => "silicate"
"DIP" => "phosphate"
"DIN" => "nitrate"
"Cond" => "conductivity"
end
WOA_filename_varname(tracer) = @match my_varname(tracer) begin
"Temp" => "t"
"Salt" => "s"
"Dens" => "I"
"O2" => "o"
"O2sat" => "O"
"AOU" => "A"
"DSi" => "i"
"DIP" => "p"
"DIN" => "n"
"Cond" => "C"
end
my_varname(tracer) = @match tracer begin
"t" || "T" || "Temperature" || "temperature" || "Temp" || "temp" => "Temp"
"s" || "Salinity" || "salinity" || "Salt" || "salt" => "Salt"
"I" || "Density" || "density" || "Dens" || "dens" || "σ" => "Dens"
"o" || "O2" || "O₂" || "Oxygen" || "oxygen" || "Dissolved oxygen" => "O2"
"O" || "o2sat" || "O₂sat" || "O2sat" || "O2Sat" || "oxygen saturation" || "Oxygen saturation" => "O2sat"
"A" || "AOU" || "Apparent oxygen utilization" => "AOU"
"i" || "silicate" || "DSi" || "Silicic Acid" || "Si(OH)4" || "SiOH4" || "sioh4" => "DSi"
"p" || "phosphate" || "PO4" || "Phosphate" || "DIP" || "po4" || "PO₄" => "DIP"
"n" || "nitrate" || "NO3" || "Nitrate" || "DIN" || "no3" || "NO₃" => "DIN"
"C" || "conductivity" || "Conductivity" || "Cond" || "cond" => "Cond"
_ => error(incorrect_varname(tracer))
end
WOA_varname(tracer, field) = string(WOA_filename_varname(tracer), "_", my_field_type_code(field))
#============================================================
Averaging period
============================================================#
incorrect_averagingperiod(period) = """
"$period" is an incorrect averaging period.
The averaging period must fit the World Ocean Atlas naming convention.
That is, it must be one of these:
- "00" for annual statistics, all data used
- "13" to "16" for seasonal statistics:
- "13" for winter (first three months of the year - Jan–Mar)
- "14" for spring (Apr–Jun)
- "15" for summer (Jul–Sep)
- "16" for autumn (Oct–Dec)
- "01" to "12" for monthly statistics (starting with "01" = January, to "12" = December)
"""
my_averaging_period(period::String) = @match lowercase(period) begin
"00" || "0" || "annual" => "Annual"
"13" || "13" || "winter" => "Winter"
"14" || "14" || "spring" => "Spring"
"15" || "15" || "summer" => "Summer"
"16" || "16" || "autumn" => "Autumn"
"01" || "1" || "january" || "jan" => "January"
"02" || "2" || "february" || "feb" => "February"
"03" || "3" || "march" || "mar" => "March"
"04" || "4" || "april" || "apr" => "April"
"05" || "5" || "may" || "may" => "May"
"06" || "6" || "june" || "jun" => "June"
"07" || "7" || "july" || "jul" => "July"
"08" || "8" || "august" || "aug" => "August"
"09" || "9" || "september" || "sep" => "September"
"10" || "10" || "october" || "oct" => "October"
"11" || "11" || "november" || "nov" => "November"
"12" || "12" || "december" || "dec" => "December"
_ => error(incorrect_averagingperiod(period))
end
my_averaging_period(period::Int) = my_averaging_period(string(period))
WOA_averaging_period(period) = @match my_averaging_period(period) begin
"Annual" => "00"
"Winter" => "13"
"Spring" => "14"
"Summer" => "15"
"Autumn" => "16"
"January" => "01"
"February" => "02"
"March" => "03"
"April" => "04"
"May" => "05"
"June" => "06"
"July" => "07"
"August" => "08"
"September" => "09"
"October" => "10"
"November" => "11"
"December" => "12"
end
seasonal_annual_monthly(period) = @match WOA_averaging_period(period) begin
"00" => "annual"
"13" || "14" || "15" || "16" => "seasonal"
_ => "monthly"
end
#============================================================
Field type
============================================================#
incorrect_field_type_code(field) = """
"$field" is an incorrect "field type code".
The "field type code" must be must be one of these:
- "an" for the objectively analyzed climatology
- "mn" for the statistical mean
- "sd" for the standard deviation
You need to edit the `my_function` function to add more!
"""
my_field_type_code(field) = @match lowercase(field) begin
"an" || "objectively analyzed climatology" => "an"
"mn" || "mean" || "statistical mean" => "mn"
"sd" || "std" || "standard deviation" => "sd"
"dd" || "number of observations" => "dd"
_ => error(incorrect_field_type_code(field))
end
#============================================================
Resolution names
============================================================#
incorrect_resolution(resolution) = """
"$resolution" is an incorrect resolution.
Only these resolutions are available:
- "1" for 1°×1° resolution
- "5" for 5°×5° resolution
- "0.25" for 0.25°×0.25° resolution
You need to edit the `myresolution` function to add more!
"""
WOA_path_resolution(resolution; product_year=2018) = @match my_resolution(resolution) begin
"0.25°" => "0.25"
"1°" => "1.00"
"5°" => (product_year < 2023 ? "5deg" : "5.00")
end
WOA_filename_resolution(resolution) = @match my_resolution(resolution) begin
"0.25°" => "04"
"1°" => "01"
"5°" => "5d"
end
surface_grid_size(resolution) = @match my_resolution(resolution) begin
"0.25°" => "1440x720"
"1°" => "360x180"
"5°" => "73x36"
end
my_resolution(resolution) = @match resolution begin
"0.25°×0.25°" || "04" || "0.25d" || "0.25" || "0.25°" || 0.25 => "0.25°"
"1°×1°" || "01" || "1d" || "1" || "1°" || 1 => "1°"
"5°×5°" || "05" || "5d" || "5" || "5°" || 5 => "5°"
_ => error(incorrect_resolution(resolution))
end
WOA09_file_resolution(resolution) = @match my_resolution(resolution) begin
"1°" => "1deg"
"5°" => "5deg"
_ => error("No such resolution for WOA09 data")
end
#============================================================
Decade names
============================================================#
WOA_decade(tracer) = @match my_varname(tracer) begin
"Temp" || "Salt" || "Dens" || "Cond" => "decav"
"O2" || "O2sat" || "AOU" || "DSi" || "DIP" || "DIN" => "all"
_ => error(incorrect_varname(tracer))
end
WOA_v2(tracer) = @match my_varname(tracer) begin
"Salt" => "v2"
"Dens" || "Temp" || "Cond" || "O2" || "O2sat" || "AOU" || "DSi" || "DIP" || "DIN" => ""
_ => error(incorrect_varname(tracer))
end
#============================================================
NetCDF file name
============================================================#
function WOA_NetCDF_filename(tracer; product_year=2018, period=0, resolution=1)
return string("woa",
my_product_year(product_year), "_",
WOA_decade(tracer), "_",
WOA_filename_varname(tracer),
WOA_averaging_period(period), "_",
WOA_filename_resolution(resolution),
WOA_v2(tracer), ".nc")
end
#============================================================
URLs
============================================================#
"""
url_WOA(tracer; product_year=2018, period=0, resolution=1)
Returns the URL (`String`) for the NetCDF file of the World Ocean Atlas.
This URL `String` typically looks like
```
"https://data.nodc.noaa.gov/woa/WOA13/DATAv2/phosphate/netcdf/all/1.00/woa13_all_p00_01.nc"
```
"""
url_WOA(tracer; product_year=2018, period=0, resolution=1) = string("https://www.ncei.noaa.gov/data/oceans/woa/WOA",
my_product_year(product_year), "/",
url_DATA(product_year), "/",
WOA_path_varname(tracer), "/netcdf/",
WOA_decade(tracer), "/",
WOA_path_resolution(resolution; product_year), "/",
WOA_NetCDF_filename(tracer; product_year, period, resolution))
url_WOA_THREDDS_23(tracer, period, resolution) = string("https://www.ncei.noaa.gov/thredds-ocean/dodsC/woa23/DATA/",
WOA_path_varname(tracer), "/netcdf/",
WOA_decade(tracer), "/",
WOA_path_resolution(resolution, product_year=2023), "/",
WOA_NetCDF_filename(tracer; product_year=2023, period, resolution))
url_WOA_THREDDS_18(tracer, period, resolution) = string("https://www.ncei.noaa.gov/thredds-ocean/dodsC/ncei/woa/",
WOA_path_varname(tracer), "/",
WOA_decade(tracer), "/",
WOA_path_resolution(resolution), "/",
WOA_NetCDF_filename(tracer; product_year=2018, period, resolution))
url_WOA_THREDDS_09(tracer, period, resolution) = string("https://data.nodc.noaa.gov/thredds/dodsC/woa09/",
WOA_path_varname(tracer), "_",
seasonal_annual_monthly(period), "_",
WOA09_file_resolution(resolution), ".nc")
url_WOA_THREDDS(tracer; product_year=2018, period=0, resolution=1) = @match my_product_year(product_year) begin
"18" => url_WOA_THREDDS_18(tracer, period, resolution)
"09" => url_WOA_THREDDS_09(tracer, period, resolution)
"23" => url_WOA_THREDDS_23(tracer, period, resolution)
_ => "No THREDDS links for year $product_year"
end
url_DATA(product_year) = @match my_product_year(product_year) begin
"09" => "DATA"
"13" => "DATAv2"
"18" => "DATA"
"23" => "DATA"
end
"""
varname(tracer)
Returns the World Ocean Data variable name that "matches" `tracer`.
"""
varname(tracer::String) = @match lowercase(tracer) begin
"lat" || "latitude" => "lat"
"lon" || "longitude" => "lon"
"depth" || "depths" => "depth"
end
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 365 |
using Test, WorldOceanAtlasTools
using NCDatasets
using OceanGrids, Unitful
using Statistics
# Alias for short name
WOA = WorldOceanAtlasTools
# For CI, make sure the download does not hang
ENV["DATADEPS_ALWAYS_ACCEPT"] = true
# include("test_urls.jl") # Only include locally, as it URLs randomly fail
include("test_functions.jl")
include("test_citations.jl")
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 573 | @testset "Citations" begin
product_years = [2009, 2013, 2018, 2023]
@testset "WOA$(WOA.my_product_year(product_year))" for product_year in product_years
tracers = ["t", "s", "I", "C", "o", "O", "A", "i", "p", "n"]
@testset "$(WOA.my_varname(tracer))" for tracer in tracers
citation_str = WOA.citation(tracer; product_year)
@test citation_str isa String
println("Citation for $(WOA.my_varname(tracer)) from WOA$(WOA.my_product_year(product_year))")
println(citation_str * "\n")
end
end
end
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 2418 |
@testset "Testing functions" begin
product_year = 2023
tracer = "p"
period = 0 # Annual, 1 month and 1 season — no need to test every month and season
resolution = "5°"
field = "mn"
ds = WOA.WOA_Dataset(tracer; product_year, period, resolution)
@test ds isa Dataset
@testset "get_3D_field" begin
field3D = WOA.get_3D_field(tracer; product_year, period, resolution, field)
@test field3D isa Array{Union{Float32, Missing},3}
@test size(field3D) == (36, 72, 102)
end
@testset "Citations erroring" begin
@test_broken citation("What?", product_year=2000)
@test_broken citation(product_year=2000)
@test_broken citation_Temperature(product_year=2000)
@test_broken citation_Salinity(product_year=2000)
@test_broken citation_Oxygen(product_year=2000)
@test_broken citation_Nutrients(product_year=2000)
end
@testset "get_gridded_3D_field" begin
field3D, lat, lon, depth = WOA.get_gridded_3D_field(ds, tracer, field)
@test field3D isa Array{Union{Float32, Missing},3}
@test size(field3D) == (36, 72, 102)
@test lat isa Vector
@test length(lat) == 36
@test lon isa Vector
@test length(lon) == 72
@test depth isa Vector
@test length(depth) == 102
end
@testset "filter_gridded_3D_field" begin
field3D, lat, lon, depth = WOA.get_gridded_3D_field(ds, tracer, field)
fieldvec, latvec, lonvec, depthvec, CI = WOA.filter_gridded_3D_field(field3D, lat, lon, depth)
@test fieldvec isa Vector
@test latvec isa Vector
@test lonvec isa Vector
@test depthvec isa Vector
end
@testset "fit_to_grid" begin
grid = OceanGrid(90,180,24)
a, b = WOA.fit_to_grid(grid, tracer; product_year, period, resolution, field)
println(typeof(a))
println(typeof(b))
@test a isa Array{Float64, 3}
@test size(a) == size(grid)
@test b isa Array{Float64, 3}
@test size(b) == size(grid)
I = findall(a .≠ 0)
end
# new functionality
@testset "observations function" begin
σSW = 1.035u"kg/L" # approximate mean sea water density to convert mol/kg to mol/m^3
obs = WorldOceanAtlasTools.observations(tracer)
obs.value = obs.p .* σSW .|> u"μM"
@test 0.1u"μM" < mean(obs.value) < 10u"μM"
end
end
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | code | 1070 |
@testset "Testing URLs" begin
years = [2009, 2013, 2018, 2023] # WOA product years
vvs = ["p", "i", "n"] # nutrients only
tts = [0, 1, 13] # Annual, 1 month and 1 season — no need to test every month and season
ggs = ["5°", "1°", "0.25°"]
ffs = ["mn", "an", "sd", "se"]
@testset "WOA$(WOA.my_product_year(year))" for year in years
@testset "$gg x $gg grid" for gg in ggs
@testset "$(WOA.my_averaging_period(tt)) period" for tt in tts
@testset "$(WOA.WOA_path_varname(vv)) tracer" for vv in vvs
ds = WOA.WOA_Dataset(vv; product_year=year, period=tt, resolution=gg)
@testset "$ff field" for ff in ffs
@test haskey(ds, WOA.WOA_varname(vv, ff))
end
@testset "dimensions" begin
@test haskey(ds, "lat")
@test haskey(ds, "lon")
@test haskey(ds, "depth")
end
end
end
end
end
end | WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | docs | 5274 | <a href="https://github.com/briochemc/WorldOceanAtlasTools.jl">
<img src="https://user-images.githubusercontent.com/4486578/59411626-07e2ed00-8dff-11e9-8daf-e823f61124d9.png" width="100%" align="center">
</a>
# World Ocean Atlas Tools
<p>
<a href="https://github.com/briochemc/WorldOceanAtlasTools.jl/actions">
<img src="https://img.shields.io/github/actions/workflow/status/briochemc/WorldOceanAtlasTools.jl/mac.yml?label=OSX&logo=Apple&logoColor=white&style=flat-square">
</a>
<a href="https://github.com/briochemc/WorldOceanAtlasTools.jl/actions">
<img src="https://img.shields.io/github/actions/workflow/status/briochemc/WorldOceanAtlasTools.jl/linux.yml?label=Linux&logo=Linux&logoColor=white&style=flat-square">
</a>
<a href="https://github.com/briochemc/WorldOceanAtlasTools.jl/actions">
<img src="https://img.shields.io/github/actions/workflow/status/briochemc/WorldOceanAtlasTools.jl/windows.yml?label=Windows&logo=Windows&logoColor=white&style=flat-square">
</a>
<a href="https://codecov.io/gh/briochemc/WorldOceanAtlasTools.jl">
<img src="https://img.shields.io/codecov/c/github/briochemc/WorldOceanAtlasTools.jl/master?label=Codecov&logo=codecov&logoColor=white&style=flat-square">
</a>
</p>
<p>
<a href="https://briochemc.github.io/WorldOceanAtlasTools.jl/stable/">
<img src="https://img.shields.io/github/actions/workflow/status/briochemc/WorldOceanAtlasTools.jl/docs.yml?style=for-the-badge&label=Documentation&logo=Read%20the%20Docs&logoColor=white">
</a>
</p>
<p>
<a href="https://doi.org/10.5281/zenodo.2677666">
<img src="https://zenodo.org/badge/DOI/10.5281/zenodo.2677666.svg" alt="DOI">
</a>
<a href="https://github.com/briochemc/WorldOceanAtlasTools.jl/blob/master/LICENSE">
<img alt="License: MIT" src="https://img.shields.io/badge/License-MIT-yellow.svg">
</a>
</p>
### Simple usage
Just add WorldOceanAtlasTools like any other Julia package, and then you can grab the data with, e.g.,
```julia
julia> WorldOceanAtlasTools.observations("PO₄")
Registering World Ocean Atlas data with DataDeps
┌ Warning: You are about to use World Ocean Atlas data.
│ Please cite the following corresponding reference(s):
│ Garcia, H. E., K. Weathers, C. R. Paver, I. Smolyar, T. P. Boyer, R. A. Locarnini, M. M. Zweng, A. V. Mishonov, O. K. Baranova, D. Seidov, and J. R. Reagan, 2018. World Ocean Atlas 2018, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate and nitrate+nitrite, silicate). A. Mishonov Technical Ed.; in preparation.)
└ @ WorldOceanAtlasTools ~/.julia/dev/WorldOceanAtlasTools/src/functions.jl:218
1312523×4 DataFrame
Row │ lat lon depth PO₄
│ Float32 Float32 Quantity… Quantity…
─────────┼────────────────────────────────────────────────
1 │ -77.5 -178.5 0.0 m 1.35075 μmol kg⁻¹
2 │ -77.5 -177.5 0.0 m 1.28211 μmol kg⁻¹
3 │ -77.5 -176.5 0.0 m 1.37447 μmol kg⁻¹
⋮ │ ⋮ ⋮ ⋮ ⋮
1312521 │ 52.5 -165.5 5500.0 m 2.4566 μmol kg⁻¹
1312522 │ 52.5 -163.5 5500.0 m 2.42341 μmol kg⁻¹
1312523 │ 53.5 -158.5 5500.0 m 2.45224 μmol kg⁻¹
1312517 rows omitted
```
### Why this package?
[WorldOceanAtlasTools.jl](https://github.com/briochemc/WorldOceanAtlasTools.jl) was developed for the purpose of downloading and using data from the World Ocean Atlas (WOA) database to be used by the [AIBECS.jl](https://github.com/briochemc/AIBECS.jl) package.
The more generic ambition is for [WorldOceanAtlasTools.jl](https://github.com/briochemc/WorldOceanAtlasTools.jl) to provide an API that can fetch data from [this list](https://www.nodc.noaa.gov/OC5/indprod.html) of WOA data sets and products (located on the National Oceanic and Atmospheric Administration (NOAA) wesbite) and fit it to any model's grid.
This is a work in progress, therefore PRs, suggestions, and generally help are, of course, more than welcome!
### How it works
[WorldOceanAtlasTools.jl](https://github.com/briochemc/WorldOceanAtlasTools.jl) essentially defines the nomenclature and URLs used by the WOA and then relies on the [DataDeps.jl](https://github.com/oxinabox/DataDeps.jl) package developed by [White et al. (2018)](https://arxiv.org/abs/1808.01091) to download the corresponding NetCDF files.
(NetCDF files are read using the [NCDatasets.jl](https://github.com/Alexander-Barth/NCDatasets.jl) package.)
In order to facilitate the use of WOA data in [AIBECS.jl](https://github.com/briochemc/AIBECS.jl), the [WorldOceanAtlasTools.jl](https://github.com/briochemc/WorldOceanAtlasTools.jl) package can use a `grid` from the [OceanGrids.jl](https://github.com/briochemc/OceanGrids.jl) package and bin a WOA tracer into that grid, and uses the [NearestNeighbors.jl](https://github.com/KristofferC/NearestNeighbors.jl) package to decide where to bin each observation.
But you can also use it as in the example snippet above by simply calling the function `observations`.
### Cite me if you use me!
If you use this package, please cite it using the [CITATION.bib](./CITATION.bib) file, and cite the WOA references using the `citation` function or use the corresponding bibtex entries in the [CITATION.bib](./CITATION.bib) file.
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.3 | 6a1d3bbb9e01dbbc873e32e964cee8cf8c5a4c9d | docs | 5694 | # WorldOceanAtlasTools.jl Documentation
This package provides tools for downloading and using data from the [World Ocean Atlas (WOA)](https://en.wikipedia.org/wiki/World_Ocean_Atlas).
Like with every Julia package, you must start with
```julia
julia> using WorldOceanAtlasTools
```
By default, the latest WOA18 data is used.
Below are examples.
## Get WOA observations
To get a list of observations as a table, simply call `observations(tracer)`:
```julia
julia> Pobs = WorldOceanAtlasTools.observations("PO4")
Registering World Ocean Atlas data with DataDeps
┌ Warning: You are about to use World Ocean Atlas data 18.
│
│ Please cite the following corresponding reference(s):
│ Garcia et al. (2018), World Ocean Atlas 2018, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate and nitrate+nitrite, silicate). A. Mishonov Technical Ed.; in preparation.)
└ @ WorldOceanAtlasTools ~/.julia/dev/WorldOceanAtlasTools/src/functions.jl:223
┌ Warning: Over-writing registration of the datadep
│ name = "WOA18_Annual_phosphate_360x180"
└ @ DataDeps ~/.julia/packages/DataDeps/ooWXe/src/registration.jl:15
1312523×4 DataFrame
Row │ lat lon depth PO4
│ Float32 Float32 Quantity… Quantity…
─────────┼────────────────────────────────────────────────
1 │ -77.5 -178.5 0.0 m 1.35075 μmol kg⁻¹
2 │ -77.5 -177.5 0.0 m 1.28211 μmol kg⁻¹
⋮ │ ⋮ ⋮ ⋮ ⋮
1312522 │ 52.5 -163.5 5500.0 m 2.42341 μmol kg⁻¹
1312523 │ 53.5 -158.5 5500.0 m 2.45224 μmol kg⁻¹
1312519 rows omitted
```
## Get gridded WOA data
To get the 3D field of the "statistical mean" climatological concentration of phosphate from the World Ocean Atlas 2018 product at 5-degree resolution, use `get_3D_field`:
```julia
julia> WorldOceanAtlasTools.get_3D_field("PO4"; field="mn", resolution="5°")
Getting the 3D field of WOA18 Annual phosphate 73x36 data
Registering World Ocean Atlas data with DataDeps
┌ Warning: You are about to use World Ocean Atlas data 18.
│
│ Please cite the following corresponding reference(s):
│ Garcia et al. (2018), World Ocean Atlas 2018, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate and nitrate+nitrite, silicate). A. Mishonov Technical Ed.; in preparation.)
└ @ WorldOceanAtlasTools ~/.julia/dev/WorldOceanAtlasTools/src/functions.jl:223
┌ Warning: Over-writing registration of the datadep
│ name = "WOA18_Annual_phosphate_73x36"
└ @ DataDeps ~/.julia/packages/DataDeps/ooWXe/src/registration.jl:15
Rearranging data
36×72×102 Array{Union{Missing, Float32}, 3}:
[:, :, 1] =
missing missing … missing missing
missing missing missing missing
⋮ ⋱ ⋮
0.563333 0.69 0.61 0.62
0.78 0.455 … 0.525 0.638659
[:, :, 2] =
missing missing … missing missing
missing missing missing missing
⋮ ⋱ ⋮
0.563014 0.69 0.612773 0.624986
0.781124 0.446186 … 0.525 0.77
;;; …
[:, :, 101] =
missing missing … missing missing
missing missing missing missing
⋮ ⋱ ⋮
missing missing missing missing
missing missing … missing missing
[:, :, 102] =
missing missing … missing missing
missing missing missing missing
⋮ ⋱ ⋮
missing missing missing missing
missing missing … missing missing
```
## Regridding WOA data to an AIBECS grid
To "regrid" WOA data (using a nearest-neighbour algorithm) to an AIBECS grid, you can use `fit_to_grid`:
```julia
julia> using AIBECS
julia> grd, _ = OCIM2.load();
┌ Info: You are about to use the OCIM2_CTL_He model.
│ If you use it for research, please cite:
│
│ - DeVries, T., & Holzer, M. (2019). Radiocarbon and helium isotope constraints on deep ocean ventilation and mantle‐³He sources. Journal of Geophysical Research: Oceans, 124, 3036–3057. https://doi.org/10.1029/2018JC014716
│
│ You can find the corresponding BibTeX entries in the CITATION.bib file
│ at the root of the AIBECS.jl package repository.
└ (Look for the "DeVries_Holzer_2019" key.)
julia> μPO43D, σPO43D = WorldOceanAtlasTools.fit_to_grid(grd, "PO₄");
Registering World Ocean Atlas data with DataDeps
┌ Warning: You are about to use World Ocean Atlas data 18.
│
│ Please cite the following corresponding reference(s):
│ Garcia et al. (2018), World Ocean Atlas 2018, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate and nitrate+nitrite, silicate). A. Mishonov Technical Ed.; in preparation.)
└ @ WorldOceanAtlasTools ~/.julia/dev/WorldOceanAtlasTools/src/functions.jl:223
┌ Warning: Over-writing registration of the datadep
│ name = "WOA18_Annual_phosphate_360x180"
└ @ DataDeps ~/.julia/packages/DataDeps/ooWXe/src/registration.jl:15
Reading NetCDF file
Rearranging data
Filtering data
Averaging data over each grid box
Setting μ = 0 and σ² = ∞ where no obs
Setting a realistic minimum for σ²
```
This will use the "objectively analyzed mean" (because it is more filled than the statistical mean), which is at 1° resolution.
You can check the size of `μPO43D` and rearrange it to work with model vectors:
```julia
julia> size(μPO43D), size(grd) # Check that the variable has the same size
((91, 180, 24), (91, 180, 24))
julia> PO4obs = vectorize(μPO43D, grd) # rearrange as a vector of wet-box values only
200160-element Vector{Float64}:
1.916053578257561e-6
1.9119117371737955e-6
1.8802444487810134e-6
⋮
1.5629494562745093e-6
1.5608462169766426e-6
1.544798031449318e-6
```
## Reference (function docstrings)
```@autodocs
Modules = [WorldOceanAtlasTools]
Order = [:function, :type]
```
| WorldOceanAtlasTools | https://github.com/briochemc/WorldOceanAtlasTools.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1919 | using Documenter
using Stopping
makedocs(
sitename = "Stopping.jl",
format = Documenter.HTML(
assets = ["assets/style.css"],
prettyurls = get(ENV, "CI", nothing) == "true",
),
modules = [Stopping],
pages = [
"Home" => "index.md",
"API" => "api.md",
"Stopping's ID" => "idcard.md",
"State's ID" => "idcard-state.md",
"Meta's ID" => "idcard-stoppingmeta.md",
"Optimality in Stopping" => "howstopcheckoptimality.md",
"Stopping in action" => "example-basic-Newton.md",
"Stop remote control" => "idcard-stopremote.md",
"Stopping workflow" => "stop-workflow.md",
"Speak to stopping" => "speak-to-stopping.md",
"NLPStopping" => "nlpstopping.md",
"LAStopping" => "lastopping.md",
"Readme" => "index_tuto.md",
"How to State" => "howtostate.md",
"How to State for NLPs" => "howtostate-nlp.md",
"How to Stop" => "howtostop.md",
"How to Stop 2" => "howtostop-2.md",
"How to Stop for NLPs" => "howtostop-nlp.md",
"Solve linear algebra" => "linear-algebra.md",
"Use a buffer function" => "buffer.md",
"A fixed point algorithm" => "fixed-point.md",
"Backtracking linesearch algorithm" => "backls.md",
"Unconstrained optimization algorithm" => "uncons.md",
"Active set algorithm" => "active-set.md",
"Quadratic penalty algorithm" => "penalty.md",
"Run optimization algorithms" => "run-optimsolver.md",
"Benchmark optimization algorithms" => "benchmark.md",
"Overfitting" => "overfitting.md",
"Checkpointing" => "checkpointing.md",
"Mix algorithms" => "gradient-lbfgs.md",
],
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(repo = "github.com/SolverStoppingJulia/Stopping.jl", push_preview = true)
#https://juliadocs.github.io/Documenter.jl/stable/man/hosting/ ?
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2433 | using LinearAlgebra, Krylov, LinearOperators, SparseArrays, Stopping
#Krylov @kdot
macro kdot(n, x, y)
return esc(:(Krylov.krylov_dot($n, $x, 1, $y, 1)))
end
using SolverTools, Logging
"""
Randomized coordinate descent
Sect. 3.7 in Gower, R. M., & Richtárik, P. (2015).
Randomized iterative methods for linear systems.
SIAM Journal on Matrix Analysis and Applications, 36(4), 1660-1690.
Using Stopping
"""
function StopRandomizedCD2(
A::AbstractMatrix,
b::AbstractVector{T};
is_zero_start::Bool = true,
x0::AbstractVector{T} = zeros(T, size(A, 2)),
atol::AbstractFloat = 1e-7,
rtol::AbstractFloat = 1e-15,
max_iter::Int = size(A, 2)^2,
verbose::Int = 100,
kwargs...,
) where {T <: AbstractFloat}
stp = LAStopping(
LinearSystem(A, b),
GenericState(x0, similar(b)),
max_cntrs = Stopping._init_max_counters_linear_operators(),
atol = atol,
rtol = rtol,
max_iter = max_iter,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
retol = false,
optimality_check = (pb, state) -> state.res,
kwargs...,
)
return StopRandomizedCD2(stp, is_zero_start = is_zero_start, verbose = verbose, kwargs...)
end
function StopRandomizedCD2(
stp::AbstractStopping;
is_zero_start::Bool = true,
verbose::Int = 100,
kwargs...,
)
state = stp.current_state
A, b = stp.pb.A, stp.pb.b
m, n = size(A)
x = state.x
T = eltype(x)
state.res = is_zero_start ? b : b - A * x
#res = state.res
OK = start!(stp, no_start_opt_check = true)
stp.meta.optimality0 = norm(b)
#@info log_header([:iter, :nrm, :time], [Int, T, T],
# hdr_override=Dict(:nrm=>"||Ax-b||"))
#@info log_row(Any[0, res[1], state.current_time])
while !OK
#rand a number between 1 and n
#224.662 ns (4 allocations: 79 bytes) - independent of the n
i = mod(stp.meta.nb_of_stop, n) + 1#Int(floor(rand() * n) + 1)
Ai = A[:, i]
#ei = zeros(n); ei[i] = 1.0 #unit vector in R^n
#xk = Ai == 0 ? x0 : x0 - dot(Ai,res)/norm(Ai,2)^2 * ei
Aires = @kdot(m, Ai, state.res)
nAi = @kdot(m, Ai, Ai)
state.x[i] -= Aires / nAi
#state.res = b - A*state.x #TO IMPROVE!!
state.res += Ai * Aires / nAi
OK = cheap_stop!(stp) #make a copy of res in current_score?
if mod(stp.meta.nb_of_stop, verbose) == 0 #print every 20 iterations
#@info log_row(Any[stp.meta.nb_of_stop, res[1], state.current_time])
end
end
return stp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1505 | using Krylov, Stopping
include("random_coordinate_descent_method.jl")
include("stop_random_coordinate_descent_method.jl")
include("stop_random_coordinate_descent_method_LS.jl")
n = 5000;
A = rand(n, n)
sol = rand(n)
b = A * sol
#@time stp = StopRandomizedCD(A,b, max_iter = 1000)
x0 = zeros(size(A, 2))
pb = LinearSystem(A, b)
s0 = GenericState(x0, similar(b))
mcnt = Stopping._init_max_counters_linear_operators()
#
@time meta = StoppingMeta(
max_cntrs = mcnt,
atol = 1e-7,
rtol = 1e-15,
max_iter = 99,
retol = false,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
optimality_check = (pb, state) -> state.res,
)
s0 = GenericState(x0, similar(b))
@time stp = LAStopping(pb, meta, cheap_stop_remote_control(), s0)
@time StopRandomizedCD(stp)
@time x, OK = RandomizedCD(A, b, max_iter = 100)
###############################################################################
#
# We compare the stopping_randomized_CD with the same version with an
# artificial substopping created and called at each iteration.
# It appears that the expansive part is to create the SubStopping, and in
# particular create the StoppingMeta.
#
@time meta = StoppingMeta(
max_cntrs = mcnt,
atol = 1e-7,
rtol = 1e-15,
max_iter = 99,
retol = false,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
optimality_check = (pb, state) -> state.res,
)
s0 = GenericState(x0, similar(b))
@time stp2 = LAStopping(pb, meta, cheap_stop_remote_control(), s0)
@time StopRandomizedCD_LS(stp2)
;
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4083 | using Krylov, Stopping
#https://github.com/JuliaSmoothOptimizers/Krylov.jl
include("random_coordinate_descent_method.jl")
include("stop_random_coordinate_descent_method.jl")
include("cheap_stop_random_coordinate_descent_method.jl")
include("instate_coordinate_descent_method.jl")
#using ProfileView,
using BenchmarkTools
n = 5000;
A = rand(n, n)
sol = rand(n)
b = A * sol
x0 = zeros(size(A, 2))
pb = LinearSystem(A, b)
s0 = GenericState(x0, similar(b))
mcnt = Stopping._init_max_counters_linear_operators()
###############################################################################
#
# The original algorithm
#
###############################################################################
@time x, OK, k = RandomizedCD(A, b, max_iter = 100)
###############################################################################
#
# Stopping with memory optimization using the State and with cheap_stop!
#
###############################################################################
@time meta3 = StoppingMeta(
max_cntrs = mcnt,
atol = 1e-7,
rtol = 1e-15,
max_iter = 100,
retol = false,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
optimality_check = (pb, state) -> state.res,
)
s3 = GenericState(zeros(size(A, 2)), similar(b))
@time stp3 = LAStopping(pb, meta3, s3)
@time StopRandomizedCD2(stp3)
###############################################################################
#
# Stopping version of the algorithm
#
###############################################################################
@time meta2 = StoppingMeta(
max_cntrs = mcnt,
atol = 1e-7,
rtol = 1e-15,
max_iter = 100,
retol = false,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
optimality_check = (pb, state) -> state.res,
)
s2 = GenericState(zeros(size(A, 2)), similar(b))
@time stp2 = LAStopping(pb, meta2, s2)
@time StopRandomizedCD(stp2)
###############################################################################
#
# Stopping version of the algorithm with cheap remote control
#
###############################################################################
@time meta1 = StoppingMeta(
max_cntrs = mcnt,
atol = 1e-7,
rtol = 1e-15,
max_iter = 100,
retol = false,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
optimality_check = (pb, state) -> state.res,
)
s1 = GenericState(zeros(size(A, 2)), similar(b))
@time stp1 = LAStopping(pb, meta1, cheap_stop_remote_control(), s1)
@time StopRandomizedCD(stp1)
###############################################################################
#
# Stopping version of the algorithm with cheap remote control
#
###############################################################################
@time meta = StoppingMeta(
max_cntrs = mcnt,
atol = 1e-7,
rtol = 1e-15,
max_iter = 100,
retol = false,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
optimality_check = (pb, state) -> state.res,
)
s0 = GenericState(zeros(size(A, 2)), similar(b))
@time stp = LAStopping(pb, meta, StopRemoteControl(), s0)
@time StopRandomizedCD(stp)
###############################################################################
#
# Stopping with memory optimization using the State and with cheap_stop!
# uses the @ macro instate.
#
#DOESN'T WORK ??
#
###############################################################################
#=
@time meta4 = StoppingMeta(max_cntrs = mcnt,
atol = 1e-7, rtol = 1e-15, max_iter = 100,
retol = false,
tol_check = (atol, rtol, opt0)->(atol + rtol * opt0),
optimality_check = (pb, state) -> state.res)
s4 = GenericState(zeros(size(A,2)), similar(b))
@time stp4 = LAStopping(pb, meta4, s4)
@time stp4 = StopRandomizedCD3(stp4)
=#
Lnrm = [
norm(stp.current_state.current_score),
norm(stp1.current_state.current_score),
norm(stp2.current_state.current_score),
norm(stp3.current_state.current_score), #norm(stp4.current_state.current_score),
norm(b - A * x),
]
using Test
@test Lnrm ≈ minimum(Lnrm) * ones(length(Lnrm)) atol = 1e-7
nothing;
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2149 | using LinearAlgebra, Krylov, LinearOperators, SparseArrays, Stopping
#Krylov @kdot
macro kdot(n, x, y)
return esc(:(Krylov.krylov_dot($n, $x, 1, $y, 1)))
end
using SolverTools, Logging
"""
Randomized coordinate descent
Sect. 3.7 in Gower, R. M., & Richtárik, P. (2015).
Randomized iterative methods for linear systems.
SIAM Journal on Matrix Analysis and Applications, 36(4), 1660-1690.
Using Stopping
"""
function StopRandomizedCD3(
A::AbstractMatrix,
b::AbstractVector{T};
is_zero_start::Bool = true,
x0::AbstractVector{T} = zeros(T, size(A, 2)),
atol::AbstractFloat = 1e-7,
rtol::AbstractFloat = 1e-15,
max_iter::Int = size(A, 2)^2,
verbose::Int = 100,
kwargs...,
) where {T <: AbstractFloat}
stp = LAStopping(
LinearSystem(A, b),
GenericState(x0, similar(b)),
max_cntrs = Stopping._init_max_counters_linear_operators(),
atol = atol,
rtol = rtol,
max_iter = max_iter,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
retol = false,
optimality_check = (pb, state) -> state.res,
kwargs...,
)
return StopRandomizedCD3(stp, is_zero_start = is_zero_start, verbose = verbose, kwargs...)
end
function StopRandomizedCD3(
stp::AbstractStopping;
is_zero_start::Bool = true,
verbose::Int = 100,
kwargs...,
)
state = stp.current_state
A, b = stp.pb.A, stp.pb.b
m, n = size(A)
x = stp.current_state.x
T = eltype(x)
stp.current_state.res = is_zero_start ? b : b - A * x
#res = state.res
OK = start!(stp, no_start_opt_check = true)
stp.meta.optimality0 = norm(b)
#@info log_header([:iter, :nrm, :time], [Int, T, T],
# hdr_override=Dict(:nrm=>"||Ax-b||"))
#@info log_row(Any[0, res[1], state.current_time])
@instate state while !OK
i = mod(stp.meta.nb_of_stop, n) + 1#Int(floor(rand() * n) + 1)
Ai = A[:, i]
Aires = kdot(m, Ai, res)
nAi = kdot(m, Ai, Ai)
x[i] -= Aires / nAi
res += Ai * Aires / nAi
OK = stop!(stp)
if mod(stp.meta.nb_of_stop, verbose) == 0 #print every 20 iterations
#@info log_row(Any[stp.meta.nb_of_stop, res[1], state.current_time])
end
end
return stp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2080 | using LinearAlgebra, Krylov, LinearOperators, SparseArrays, Stopping
#Krylov @kdot
macro kdot(n, x, y)
return esc(:(Krylov.krylov_dot($n, $x, 1, $y, 1)))
end
using SolverTools, Logging
"""
Randomized coordinate descent
Sect. 3.7 in Gower, R. M., & Richtárik, P. (2015).
Randomized iterative methods for linear systems.
SIAM Journal on Matrix Analysis and Applications, 36(4), 1660-1690.
"""
function RandomizedCD(
A::AbstractMatrix,
b::AbstractVector{T};
is_zero_start::Bool = true,
x0::AbstractVector{T} = zeros(T, size(A, 2)),
atol::AbstractFloat = 1e-7,
rtol::AbstractFloat = 1e-15,
max_iter::Int = size(A, 2)^2,
max_time::Float64 = 60.0,
max_cntrs = Stopping._init_max_counters_linear_operators(quick = 20000),
verbose::Int = 100,
kwargs...,
) where {T <: AbstractFloat}
m, n = size(A)
x = copy(x0)
res = is_zero_start ? b : b - A * x
nrm0 = norm(res)
time_init = time()
elapsed_time = time_init
cntrs = LACounters()
max_f = false
OK = nrm0 <= atol
k = 0
#@info log_header([:iter, :un, :time], [Int, T, T])
#@info log_row(Any[0, res[1], elapsed_time])
while !OK && (k <= max_iter) && (elapsed_time - time_init <= max_time) && !max_f
#rand a number between 1 and n
#224.662 ns (4 allocations: 79 bytes) - independent of the n
i = mod(k, n) + 1#Int(floor(rand() * n) + 1)
Ai = A[:, i]
#ei = zeros(n); ei[i] = 1.0 #unit vector in R^n
#xk = Ai == 0 ? x0 : x0 - dot(Ai,res)/norm(Ai,2)^2 * ei
Aires = @kdot(m, Ai, res)
nAi = @kdot(m, Ai, Ai)
x[i] -= Aires / nAi
#res = b - A*x
res += Ai * Aires / nAi
cntrs.nprod += 1
nrm = norm(res, Inf)
OK = nrm <= atol + nrm0 * rtol
sum, max_f = 0, false
for f in [:nprod, :ntprod, :nctprod]
ff = getfield(cntrs, f)
max_f = max_f || (ff > max_cntrs[f])
sum += ff
end
max_f = max_f || sum > max_cntrs[:neval_sum]
k += 1
elapsed_time = time()
if mod(k, verbose) == 0 #print every 20 iterations
# @info log_row(Any[k, res[1], elapsed_time])
end
end
return x, OK, k
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2376 | using LinearAlgebra, Krylov, LinearOperators, SparseArrays, Stopping
#Krylov @kdot
macro kdot(n, x, y)
return esc(:(Krylov.krylov_dot($n, $x, 1, $y, 1)))
end
using SolverTools, Logging
"""
Randomized coordinate descent
Sect. 3.7 in Gower, R. M., & Richtárik, P. (2015).
Randomized iterative methods for linear systems.
SIAM Journal on Matrix Analysis and Applications, 36(4), 1660-1690.
Using Stopping
"""
function StopRandomizedCD(
A::AbstractMatrix,
b::AbstractVector{T};
is_zero_start::Bool = true,
x0::AbstractVector{T} = zeros(T, size(A, 2)),
atol::AbstractFloat = 1e-7,
rtol::AbstractFloat = 1e-15,
max_iter::Int = size(A, 2)^2,
verbose::Int = 100,
kwargs...,
) where {T <: AbstractFloat}
stp = LAStopping(
LinearSystem(A, b),
GenericState(x0, similar(b)),
max_cntrs = Stopping._init_max_counters_linear_operators(),
atol = atol,
rtol = rtol,
max_iter = max_iter,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
retol = false,
optimality_check = (pb::LinearSystem, state::GenericState{Vector{T}, Vector{T}}) -> state.res,
kwargs...,
)
return StopRandomizedCD(stp, is_zero_start = is_zero_start, verbose = verbose, kwargs...)
end
function StopRandomizedCD(
stp::AbstractStopping;
is_zero_start::Bool = true,
verbose::Int = 100,
kwargs...,
)
state = stp.current_state
A, b = stp.pb.A, stp.pb.b
m, n = size(A)
x = state.x
T = eltype(x)
res = is_zero_start ? b : b - A * x
OK = update_and_start!(stp, res = res)
#@info log_header([:iter, :nrm, :time], [Int, T, T],
# hdr_override=Dict(:nrm=>"||Ax-b||"))
#@info log_row(Any[0, state.current_score[1], state.current_time])
while !OK
#rand a number between 1 and n
#224.662 ns (4 allocations: 79 bytes) - independent of the n
i = mod(stp.meta.nb_of_stop, n) + 1#Int(floor(rand() * n) + 1)
Ai = A[:, i]
#ei = zeros(n); ei[i] = 1.0 #unit vector in R^n
#xk = Ai == 0 ? x0 : x0 - dot(Ai,res)/norm(Ai,2)^2 * ei
Aires = @kdot(m, Ai, res)
nAi = @kdot(m, Ai, Ai)
x[i] -= Aires / nAi
res += Ai * Aires / nAi
OK = update_and_stop!(stp, x = x, res = res)
if mod(stp.meta.nb_of_stop, verbose) == 0 #print every 20 iterations
# @info log_row(Any[stp.meta.nb_of_stop, state.current_score[1], state.current_time])
end
end
return stp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 3266 | using LinearAlgebra, Krylov, LinearOperators, SparseArrays, Stopping
#Krylov @kdot
macro kdot(n, x, y)
return esc(:(Krylov.krylov_dot($n, $x, 1, $y, 1)))
end
using SolverTools, Logging
"""
Randomized coordinate descent
Sect. 3.7 in Gower, R. M., & Richtárik, P. (2015).
Randomized iterative methods for linear systems.
SIAM Journal on Matrix Analysis and Applications, 36(4), 1660-1690.
Using Stopping and a fake line search
"""
function StopRandomizedCD_LS(
A::AbstractMatrix,
b::AbstractVector{T};
is_zero_start::Bool = true,
x0::AbstractVector{T} = zeros(T, size(A, 2)),
atol::AbstractFloat = 1e-7,
rtol::AbstractFloat = 1e-15,
max_iter::Int = size(A, 2)^2,
verbose::Int = 100,
kwargs...,
) where {T <: AbstractFloat}
stp = LAStopping(
LinearSystem(A, b),
GenericState(x0, similar(b)),
max_cntrs = Stopping._init_max_counters_linear_operators(),
atol = atol,
rtol = rtol,
max_iter = max_iter,
tol_check = (atol, rtol, opt0) -> (atol + rtol * opt0),
retol = false,
optimality_check = (pb::LinearSystem, state::GenericState{Vector{T}, Vector{T}}) -> state.res,
kwargs...,
)
return StopRandomizedCD_LS(stp, is_zero_start = is_zero_start, verbose = verbose, kwargs...)
end
function StopRandomizedCD_LS(
stp::AbstractStopping;
is_zero_start::Bool = true,
verbose::Int = 100,
kwargs...,
)
state = stp.current_state
A, b = stp.pb.A, stp.pb.b
m, n = size(A)
x = state.x
T = eltype(x)
state.res = is_zero_start ? b : b - A * x
OK = start!(stp)
#We create a fake line search
substate = GenericState(x)
#stop_remote = cheap_stop_remote_control()
#meta = StoppingMeta() #the expansive part
#list = VoidListStates()
#stopping_user_struct = nothing
#substp = GenericStopping(stp.pb.b, cheap_stop_remote_control(), substate, main_stp = stp, max_iter = 0)
substp = GenericStopping(stp.pb.b, cheap_stop_remote_control(), substate, main_stp = stp)
#substp = GenericStopping(stp.pb.b, substate)
#substp = GenericStopping(stp.pb.b, meta, stop_remote, substate)
#@info log_header([:iter, :nrm, :time], [Int, T, T],
# hdr_override=Dict(:nrm=>"||Ax-b||"))
#@info log_row(Any[0, state.current_score[1], state.current_time])
while !OK
#rand a number between 1 and n
#224.662 ns (4 allocations: 79 bytes) - independent of the n
i = mod(stp.meta.nb_of_stop, n) + 1#Int(floor(rand() * n) + 1)
Ai = A[:, i]
#ei = zeros(n); ei[i] = 1.0 #unit vector in R^n
#xk = Ai == 0 ? x0 : x0 - dot(Ai,res)/norm(Ai,2)^2 * ei
Aires = @kdot(m, Ai, state.res)
nAi = @kdot(m, Ai, Ai)
state.x[i] -= Aires / nAi
state.res += Ai * Aires / nAi
#reinitialize and "solve" the fake subproblem
#reinit!(substp)
#substp.current_state.x[i] = state.x[i] #reinit!(substp.current_state, state.x) #would reassign the whole vector x
#substp.current_state.x = state.x
#solve_fake_subproblem!(substp)
OK = stop!(stp)
if mod(stp.meta.nb_of_stop, verbose) == 0 #print every 20 iterations
# @info log_row(Any[stp.meta.nb_of_stop, state.current_score[1], state.current_time])
end
end
return stp
end
function solve_fake_subproblem!(stp::GenericStopping)
start!(stp)
return stop!(stp)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 204 | function Armijo(nlp, f, g, x, d, τ₀)
# Simple Armijo backtracking
hp0 = g' * d
t = 1.0
ft = obj(nlp, x + t * d)
while ft > (f + τ₀ * t * hp0)
t /= 2.0
ft = obj(nlp, x + t * d)
end
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 764 | function Newton_Spectral(
nlp::AbstractNLPModel,
x₀::AbstractVector;
τ₀::Float64 = 0.0005,
ϵ::Float64 = 1e-6,
maxiter::Int = 200,
)
x = copy(x₀)
iter = 0
f, g = obj(nlp, x), grad(nlp, x)
while (norm(g, Inf) > ϵ) && (iter <= maxiter)
H = Matrix(Symmetric(hess(nlp, x), :L))
Δ, O = eigen(H)
# Boost negative values of Δ to 1e-8
D = Δ .+ max.((1e-8 .- Δ), 0.0)
d = -O * diagm(1.0 ./ D) * O' * g
# Simple Armijo backtracking
hp0 = g' * d
t = 1.0
ft = obj(nlp, x + t * d)
while ft > (f + τ₀ * t * hp0)
t /= 2.0
ft = obj(nlp, x + t * d)
end
x += t * d
f, g = ft, grad(nlp, x)
iter += 1
end
if iter > maxiter
@warn "Iteration limit"
end
return x, f, norm(g), iter
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 808 | function Newton_Stop(
nlp::AbstractNLPModel,
x₀::AbstractVector;
τ₀::Float64 = 0.0005,
stp::NLPStopping = NLPStopping(nlp, NLPAtX(x₀)),
optimality_check = unconstrained_check,
atol = 1e-6,
max_iter = 200,
)
x = copy(x₀)
f, g = obj(nlp, x), grad(nlp, x)
OK = update_and_start!(stp, x = x, fx = f, gx = g)
while !OK
Hx = hess(nlp, x)
H = Matrix(Symmetric(Hx, :L))
Δ, O = eigen(H)
# Boost negative values of Δ to 1e-8
D = Δ .+ max.((1e-8 .- Δ), 0.0)
d = -O * diagm(1.0 ./ D) * O' * g
# Armijo bactracking
t, f, g = Armijo(nlp, f, g, x, d, τ₀)
x += t * d
OK = update_and_stop!(stp, x = x, gx = g, Hx = H)
end
if !stp.meta.optimal
@warn "Optimality not reached"
@info status(stp, list = true)
end
return x, f, norm(g), stp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 911 | function Newton_StopLS(
nlp::AbstractNLPModel,
x₀::AbstractVector;
τ₀::Float64 = 0.0005,
τ₁::Float64 = 0.99,
stp::NLPStopping = NLPStopping(nlp, NLPAtX(x₀)),
optimality_check = unconstrained_check,
atol = 1e-6,
max_iter = 200,
)
x = copy(x₀)
f, g = obj(nlp, x), grad(nlp, x)
OK = update_and_start!(stp, x = x, fx = f, gx = g)
d = similar(x)
ϕ, ϕstp = prepare_LS(stp, x, d, τ₀, f, g)
while !OK
Hx = hess(nlp, x)
H = Matrix(Symmetric(Hx, :L))
Δ, O = eigen(H)
# Boost negative values of Δ to 1e-8
D = Δ .+ max.((1e-8 .- Δ), 0.0)
d = -O * diagm(1.0 ./ D) * O' * g
# Simple line search call
t, f, g = linesearch(ϕ, ϕstp, x, d, f, g, τ₀, τ₁)
x += t * d
OK = update_and_stop!(stp, x = x, gx = g, Hx = H)
end
if !stp.meta.optimal
@warn "Optimality not reached"
@info status(stp, list = true)
end
return x, f, norm(g), stp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1041 | #linesearch is an intermediary between NewtonStopLS and a 1D solver.
#change the name?
function linesearch(
ϕ::LSModel,
ϕstp::AbstractStopping, # be more specific for stp
x₀::AbstractVector,
d::AbstractVector,
f₀::AbstractFloat,
∇f₀::AbstractVector,
τ₀::Real,
τ₁::Real,
)
# rebase the LSModel
g₀ = dot(∇f₀, d)
rebase!(ϕ, x₀, d, τ₀, f₀, g₀)
# convert the Armijo and Wolfe criteria to an asymetric interval [α,β]
α = (τ₁ - τ₀) * g₀
β = -(τ₁ + τ₀) * g₀
# reuse the stopping
reinit!(ϕstp)
ϕstp.pb = ϕ
# redefine the optimality_check function using α and β
ϕstp.optimality_check = (p, s) -> optim_check_LS(p, s, α, β)
# optimize in the interval [0.0,Inf]
ϕstp = min_1D(ϕ, 0.0, Inf, α, β, stp = ϕstp)
# unpack the results
t = ϕstp.current_state.x #Tanj: Shouldn't we check if ϕstp.meta.optimal = true?
ft = ϕ.f # must rely on the solver (min_1D) so that the last evaluation was
gt = ϕ.∇f # at the optimal step, so that the stored value for f and ∇f are valid
return t, ft, gt, ϕstp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 662 | function prepare_LS(stp, x₀, d, τ₀, f₀, ∇f₀)
# extract the nlp
nlp = stp.pb
# construct the line search model, which will be rebased at each iteration'current data
ϕ = LSModel(nlp, x₀, d, τ₀, f₀, dot(∇f₀, d))
# instantiate stp, which will be adjusted at each iteration's current data
ϕstp = LS_Stopping(
ϕ,
LSAtT(0.0),
optimality_check = (p, s) -> optim_check_LS(p, s), #Tanj: already set optim_check_LS(p,s,α,β)?
main_stp = stp,
max_iter = 40,
atol = 0.0, # to rely only on the Armijo-Wolfe conditions
rtol = 0.0, # otherwise, tolerance may prohibit convergence
unbounded_threshold = 1e100,
)
return ϕ, ϕstp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2582 | ################################################################################
#VERSION 1
#linesearch is an intermediary between NewtonStopLS and a 1D solver.
function preparelinesearch(
stp::NLPStopping,
x₀::AbstractVector,
d::AbstractVector,
f₀::AbstractFloat,
∇f₀::AbstractVector,
τ₀::Real,
τ₁::Real,
)
# initialize the LSModel
g₀ = dot(∇f₀, d)
ϕ = LSModel(stp.pb, x₀, d, τ₀, f₀, g₀)
# convert the Armijo and Wolfe criteria to an asymetric interval [α,β]
α = (τ₁ - τ₀) * g₀
β = -(τ₁ + τ₀) * g₀
# reuse the stopping
ϕstp = LS_Stopping(
ϕ,
LSAtT(0.0, ht = f₀, gt = ∇f₀),
optimality_check = (p, s) -> optim_check_LS(p, s, α, β),
main_stp = stp,
max_iter = 40,
atol = 0.0, # to rely only on the Armijo-Wolfe conditions
rtol = 0.0, # otherwise, tolerance may prohibit convergence
unbounded_threshold = 1e100,
)
# optimize in the interval [0.0,Inf]
ϕstp = min_1D(ϕ, 0.0, Inf, α, β, stp = ϕstp)
# unpack the results
if ϕstp.meta.optimal
t, ft, gt = ϕstp.current_state.x, ϕstp.current_state.ht, ϕstp.current_state.gt
else
t, ft, gt = 0.0, f₀, ∇f₀
stp.meta.fail_sub_pb = true
end
return t, ft, gt
end
function optim_check_LS(p, s::LSAtT, α::Float64, β::Float64)
return max(α - s.gt, s.gt - β, 0.0)
end
################################################################################
#VERSION 2
#Not an equivalent way would be with tol_check:
function preparelinesearch(
stp::NLPStopping,
x₀::AbstractVector,
d::AbstractVector,
f₀::AbstractFloat,
∇f₀::AbstractVector,
τ₀::Real,
τ₁::Real,
)
# initialize the LSModel
g₀ = dot(∇f₀, d)
ϕ = LSModel(stp.pb, x₀, d, τ₀, f₀, g₀)
#Instead of redefining LSModel we can use the known ADNLPModel ? (not optimal though)
#ϕ = ADNLPModel(t -> (obj(stp.pb, x₀ + t*d) - f₀ - τ₀ * t * g₀), [0.0], lvar = [0.0], uvar = [Inf])
# convert the Armijo and Wolfe criteria to an asymetric interval [α,β]
α = (τ₁ - τ₀) * g₀
β = -(τ₁ + τ₀) * g₀
# reuse the stopping
ϕstp = NLPStopping(
ϕ,
NLPAtX([0.0], fx = f₀, gx = ∇f₀),
optimality_check = unconstrained_check,
main_stp = stp,
max_iter = 40,
unbounded_threshold = 1e100,
tol_check = (a, b, c) -> β,
tol_check_neg = (a, b, c) -> α,
)
# optimize in the interval [0.0,Inf]
ϕstp = min_1D(ϕ, 0.0, Inf, stp = ϕstp)
# unpack the results
if ϕstp.meta.optimal
t, ft, gt = ϕstp.current_state.x, ϕstp.current_state.ht, ϕstp.current_state.gt
else
t, ft, gt = 0.0, f₀, ∇f₀
stp.meta.fail_sub_pb = true
end
return t, ft, gt
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 167 | using LinearAlgebra, NLPModels, Stopping
include("NewtonSolver.jl")
include("Armijo.jl")
include("NewtonStop.jl")
include("prepare_LS.jl")
include("NewtonStopLS.jl")
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 7267 | """
Module Stopping:
## Purpose
Tools to ease the uniformization of stopping criteria in iterative solvers.
When a solver is called on an optimization model, four outcomes may happen:
1. the approximate solution is obtained, the problem is considered solved
2. the problem is declared unsolvable (unboundedness, infeasibility ...)
3. the maximum available resources are not sufficient to compute the solution
4. some algorithm dependent failure happens
This tool eases the first three items above. It defines a type
mutable struct GenericStopping <: AbstractStopping
problem :: Any # an arbitrary instance of a problem
meta :: AbstractStoppingMeta # contains the used parameters
current_state :: AbstractState # the current state
The `StoppingMeta` provides default tolerances, maximum resources, ... as well as (boolean) information on the result.
### Your Stopping your way
The `GenericStopping` (with `GenericState`) provides a complete structure to handle stopping criteria.
Then, depending on the problem structure, you can specialize a new Stopping by
redefining a State and some functions specific to your problem.
See also `NLPStopping`, `NLPAtX`, `LS_Stopping`, `OneDAtX`
In these examples, the function `optimality_residual` computes the residual of the optimality conditions is an additional attribute of the types.
## Functions
The tool provides two main functions:
- `start!(stp :: AbstractStopping)` initializes the time and the tolerance at the starting point and check wether the initial guess is optimal.
- `stop!(stp :: AbstractStopping)` checks optimality of the current guess as well as failure of the system (unboundedness for instance) and maximum resources (number of evaluations of functions, elapsed time ...)
Stopping uses the informations furnished by the State to evaluate its functions. Communication between the two can be done through the following functions:
- `update_and_start!(stp :: AbstractStopping; kwargs...)` updates the states with informations furnished as kwargs and then call start!.
- `update_and_stop!(stp :: AbstractStopping; kwargs...)` updates the states with informations furnished as kwargs and then call stop!.
- `fill_in!(stp :: AbstractStopping, x :: T)` a function that fill in all the State with all the informations required to correctly evaluate the stopping functions. This can reveal useful, for instance, if the user do not trust the informations furnished by the algorithm in the State.
- `reinit!(stp :: AbstractStopping)` reinitialize the entries of
the Stopping to reuse for another call.
"""
module Stopping
using LinearAlgebra, LinearOperators, SparseArrays
using DataFrames, LLSModels, NLPModels, Printf
"""
AbstractState:
Abstract type, if specialized state were to be implemented they would need to
be subtypes of `AbstractState`.
"""
abstract type AbstractState{S, T} end
include("State/GenericStatemod.jl")
include("State/OneDAtXmod.jl")
include("State/NLPAtXmod.jl")
export scoretype, xtype
export AbstractState, GenericState, update!, copy, compress_state!, copy_compress_state
export OneDAtX, update!
export NLPAtX, update!
export set_x!, set_d!, set_res!, set_current_score!, set_fx!, set_gx!, set_cx!, set_lambda!, set_mu!
include("State/ListOfStates.jl")
export AbstractListofStates, ListofStates, VoidListofStates
export add_to_list!, length, print, getindex, state_type
function _instate(stt::Symbol, es::Symbol)
for t in fieldnames(GenericState)
if es == t
es = esc(Symbol(stt, ".$t"))
end
end
es
end
function _instate(state::Symbol, a::Any)
a
end
function _instate(state::Symbol, ex::Expr)
for i = 1:length(ex.args)
ex.args[i] = _instate(state, ex.args[i])
end
ex
end
"""
`@instate state expression`
Macro that set the prefix state. to all the variables whose name belong to the
field names of the state.
"""
macro instate(state::Symbol, ex)
if typeof(ex) == Expr
ex = _instate(state, ex)
end
ex
end
export @instate
include("Stopping/StopRemoteControl.jl")
export AbstractStopRemoteControl, StopRemoteControl, cheap_stop_remote_control
"""
AbstractStoppingMeta
Abstract type, if specialized meta for stopping were to be implemented they
would need to be subtypes of AbstractStoppingMeta
"""
abstract type AbstractStoppingMeta end
"""
AbstractStopping
Abstract type, if specialized stopping were to be implemented they would need to
be subtypes of AbstractStopping
"""
abstract type AbstractStopping{
Pb <: Any,
M <: AbstractStoppingMeta,
SRC <: AbstractStopRemoteControl,
T <: AbstractState,
MStp <: Any, #AbstractStopping
LoS <: AbstractListofStates,
} end
for field in [:pb, :meta, :remote, :state, :main_stp, :list_of_states, :user_struct]
meth = Symbol("get_", field)
@eval begin
@doc """
$($meth)(stp::AbstractStopping)
Return the value $($(QuoteNode(field))) from `stp`.
"""
function $meth end
end
@eval export $meth
end
include("Stopping/StoppingMetamod.jl")
export AbstractStoppingMeta, StoppingMeta, tol_check, update_tol!, OK_check
struct VoidStopping{Pb, M, SRC, T, MStp, LoS} <: AbstractStopping{Pb, M, SRC, T, MStp, LoS} end
function VoidStopping()
return VoidStopping{
Any,
StoppingMeta,
StopRemoteControl,
GenericState,
Nothing,
VoidListofStates,
}()
end
export AbstractStopping, VoidStopping
import Base.show
function show(io::IO, stp::VoidStopping)
println(io, typeof(stp))
end
function show(io::IO, stp::AbstractStopping)
println(io, typeof(stp))
#print(io, stp.meta) #we can always print stp.meta
#print(io, stp.stop_remote) #we can always print stp.stop_remote
#print(io, stp.current_state) #we can always print stp.current_state
if !(typeof(stp.main_stp) <: VoidStopping)
println(io, "It has a main_stp $(typeof(stp.main_stp))")
else
println(io, "It has no main_stp.")
end
if typeof(stp.listofstates) != VoidListofStates
nmax = stp.listofstates.n == -1 ? Inf : stp.listofstates.n
println(io, "It handles a list of states $(typeof(stp.listofstates)) of maximum length $(nmax)")
else
println(io, "It doesn't keep track of the state history.")
end
try
print(io, "Problem is ")
show(io, stp.pb)
print(io, " ")
catch
print(io, "Problem is $(typeof(stp.pb)). ")
end
if stp.stopping_user_struct != Dict()
try
print(io, "The user-defined structure is ")
show(io, stp.stopping_user_struct)
catch
print(io, "The user-defined structure is of type $(typeof(stp.stopping_user_struct)).\n")
end
else
print(io, "No user-defined structure is furnished.\n")
end
end
include("Stopping/GenericStoppingmod.jl")
include("Stopping/NLPStoppingmod.jl")
export GenericStopping, start!, stop!, cheap_stop!, update_and_start!
export update_and_stop!, cheap_update_and_stop!, cheap_update_and_start!
export fill_in!, reinit!, status, elapsed_time
export NLPStopping, unconstrained_check, max_evals!
export optim_check_bounded, KKT, init_max_counters, init_max_counters_NLS
include("Stopping/LinearAlgebraStopping.jl")
export LAStopping, LinearSystem, LACounters, linear_system_check, normal_equation_check
export init_max_counters_linear_operators
end # end of module
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 11245 | _init_field(t::Type) = _init_field(Val{t}())
_init_field(::Val{T}) where {T <: AbstractMatrix} = zeros(eltype(T), 0, 0)
_init_field(::Val{T}) where {T <: LinearOperator} =
LinearOperator(eltype(T), 0, 0, true, true, (res, v, α, β) -> zero(eltype(T)))
_init_field(::Val{T}) where {T <: SparseMatrixCSC} = sparse(zeros(eltype(T), 0, 0))
_init_field(::Val{T}) where {T <: AbstractVector} = zeros(eltype(T), 0)
_init_field(::Val{T}) where {T <: SparseVector} = sparse(zeros(eltype(T), 0))
_init_field(::Val{BigFloat}) = BigFloat(NaN)
_init_field(::Val{Float64}) = NaN
_init_field(::Val{Float32}) = NaN32
_init_field(::Val{Float16}) = NaN16
_init_field(::Val{Missing}) = missing
_init_field(::Val{Nothing}) = nothing
_init_field(::Val{Bool}) = false #unfortunately no way to differentiate
_init_field(::Val{T}) where {T <: Number} = typemin(T)
_init_field(::Val{Counters}) = Counters()
"""
Type: `GenericState`
Methods: `update!`, `reinit!`
A generic State to describe the state of a problem at a point x.
Tracked data include:
- x : current iterate
- d [opt] : search direction
- res [opt] : residual
- current_time : time
- current_score : score
Constructors:
`GenericState(:: T, :: S; d :: T = _init_field(T), res :: T = _init_field(T), current_time :: Float64 = NaN) where {S, T <:AbstractVector}`
`GenericState(:: T; d :: T = _init_field(T), res :: T = _init_field(T), current_time :: Float64 = NaN, current_score :: Union{T,eltype(T)} = _init_field(eltype(T))) where T <:AbstractVector`
Note:
- By default, unknown entries are set using `_init_field`.
- By default the type of `current_score` is `eltype(x)` and cannot be changed once the State is created.
To have a vectorized `current_score` of length n, try something like `GenericState(x, Array{eltype(x),1}(undef, n))`.
Examples:
`GenericState(x)`
`GenericState(x, Array{eltype(x),1}(undef, length(x)))`
`GenericState(x, current_time = 1.0)`
`GenericState(x, current_score = 1.0)`
See also: `Stopping`, `NLPAtX`
"""
mutable struct GenericState{S, T <: Union{AbstractFloat, AbstractVector}} <: AbstractState{S, T}
x::T
d::T
res::T
#Current time
current_time::Float64
#Current score
current_score::S
function GenericState(
x::T,
current_score::S;
d::T = _init_field(T),
res::T = _init_field(T),
current_time::Float64 = NaN,
) where {S, T <: AbstractVector}
return new{S, T}(x, d, res, current_time, current_score)
end
end
function GenericState(
x::T;
d::T = _init_field(T),
res::T = _init_field(T),
current_time::Float64 = NaN,
current_score::Union{T, eltype(T)} = _init_field(eltype(T)),
) where {T <: AbstractVector}
return GenericState(x, current_score, d = d, res = res, current_time = current_time)
end
scoretype(typestate::AbstractState{S, T}) where {S, T} = S
xtype(typestate::AbstractState{S, T}) where {S, T} = T
for field in fieldnames(GenericState)
meth = Symbol("get_", field)
@eval begin
@doc """
$($meth)(state)
Return the value $($(QuoteNode(field))) from the state.
"""
$meth(state::GenericState) = getproperty(state, $(QuoteNode(field)))
end
@eval export $meth
end
function set_current_score!(state::GenericState{S, T}, current_score::S) where {S, T}
state.current_score .= current_score
return state
end
function set_current_score!(state::GenericState{S, T}, current_score::S) where {S <: Number, T}
state.current_score = current_score
return state
end
function set_x!(state::GenericState{S, T}, x::T) where {S, T}
if length(state.x) == length(x)
state.x .= x
else
state.x = x
end
return state
end
function set_d!(state::GenericState{S, T}, d::T) where {S, T}
if length(state.d) == length(d)
state.d .= d
else
state.d = d
end
return state
end
function set_res!(state::GenericState{S, T}, res::T) where {S, T}
if length(state.res) == length(res)
state.res .= res
else
state.res = res
end
return state
end
"""
`update!(:: AbstractState; convert = false, kwargs...)`
Generic update function for the State
The function compares the kwargs and the entries of the State.
If the type of the kwargs is the same as the entry, then
it is updated.
Set kargs `convert` to true to update even incompatible types.
Examples:
`update!(state1)`
`update!(state1, current_time = 2.0)`
`update!(state1, convert = true, current_time = 2.0)`
See also: `GenericState`, `reinit!`, `update_and_start!`, `update_and_stop!`
"""
function update!(stateatx::T; convert::Bool = false, kwargs...) where {T <: AbstractState}
fnames = fieldnames(T)
for k ∈ keys(kwargs)
#check if k is in fieldnames and type compatibility
if (k ∈ fnames) && (convert || typeof(kwargs[k]) <: typeof(getfield(stateatx, k)))
setfield!(stateatx, k, kwargs[k])
end
end
return stateatx
end
#Ca serait cool d'avoir un shortcut en repérant certains keywords
#ou si il n'y a aucun keyword!
#function update!(stateatx :: T; x :: TT = stateatx.x) where {TS, TT, T <: AbstractState{TS,TT}}
# setfield!(stateatx, :x, x)
# return stateatx
#end
"""
`_smart_update!(:: AbstractState; kwargs...)`
Generic update function for the State without Type verification.
The function works exactly as update! without type and field verifications.
So, affecting a value to nothing or a different type will return an error.
See also: `update!`, `GenericState`, `reinit!`, `update_and_start!`, `update_and_stop!`
"""
function _smart_update!(stateatx::T; kwargs...) where {T <: AbstractState}
for k ∈ keys(kwargs)
setfield!(stateatx, k, kwargs[k])
end
return stateatx
end
#https://github.com/JuliaLang/julia/blob/f3252bf50599ba16640ef08eb1e43c632eacf264/base/Base.jl#L34
function _update_time!(stateatx::T, current_time::Float64) where {T <: AbstractState}
setfield!(stateatx, :current_time, current_time)
return stateatx
end
"""
`reinit!(:: AbstractState, :: T; kwargs...)`
Function that set all the entries at `_init_field` except the mandatory `x`.
Note: If `x` is given as a kargs it will be prioritized over
the second argument.
Examples:
`reinit!(state2, zeros(2))`
`reinit!(state2, zeros(2), current_time = 1.0)`
There is a shorter version of reinit! reusing the `x` in the state
`reinit!(:: AbstractState; kwargs...)`
Examples:
`reinit!(state2)`
`reinit!(state2, current_time = 1.0)`
"""
function reinit!(stateatx::St, x::T; kwargs...) where {S, T, St <: AbstractState{S, T}}
#for k not in the kwargs
for k ∈ setdiff(fieldnames(St), keys(kwargs))
if k != :x
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
end
end
setfield!(stateatx, :x, x)
if length(kwargs) == 0
return stateatx #save the update! call if no other kwargs than x
end
return update!(stateatx; kwargs...)
end
function reinit!(stateatx::T; kwargs...) where {T <: AbstractState}
for k ∈ setdiff(fieldnames(T), keys(kwargs))
if k != :x
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
end
end
return update!(stateatx; kwargs...)
end
"""
`_domain_check(:: AbstractState; kwargs...)`
Returns true if there is a `NaN` or a `Missing` in the state entries (short-circuiting), false otherwise.
Note:
- The fields given as keys in kwargs are not checked.
Examples:
`_domain_check(state1)`
`_domain_check(state1, x = true)`
"""
function _domain_check(stateatx::T; kwargs...) where {T <: AbstractState}
for k in fieldnames(T)
if !(k in keys(kwargs))
gf = getfield(stateatx, k)
if Stopping._check_nan_miss(gf)
return true
end
end
end
return false
end
_check_nan_miss(field::Any) = false #Nothing or Counters
_check_nan_miss(field::SparseMatrixCSC) = any(isnan, field.nzval) #because checking in sparse matrices is too slow
_check_nan_miss(field::Union{AbstractVector, AbstractMatrix}) = any(isnan, field)
#We don't check for NaN's in Float as they are the _init_field
_check_nan_miss(field::AbstractFloat) = ismissing(field)
import Base.copy
ex = :(_genobj(typ) = $(Expr(:new, :typ)));
eval(ex);
function copy(state::T) where {T <: AbstractState}
#ex=:(_genobj(typ)=$(Expr(:new, :typ))); eval(ex)
cstate = _genobj(T)
#cstate = $(Expr(:new, typeof(state)))
for k ∈ fieldnames(T)
setfield!(cstate, k, deepcopy(getfield(state, k)))
end
return cstate
end
"""
`compress_state!(:: AbstractState; save_matrix :: Bool = false, max_vector_size :: Int = length(stateatx.x), pnorm :: Real = Inf, keep :: Bool = false, kwargs...)`
compress_state!: compress State with the following rules.
- If it contains matrices and save_matrix is false, then the corresponding entries
are set to _init_field(typeof(getfield(stateatx, k)).
- If it contains vectors with length greater than max_vector_size, then the
corresponding entries are replaced by a vector of size 1 containing its pnorm-norm.
- If keep is true, then only the entries given in kwargs will be saved (the others are set to _init_field(typeof(getfield(stateatx, k))).
- If keep is false and an entry in the State is in the kwargs list, then it is put as _init_field(typeof(getfield(stateatx, k)) if possible.
see also: `copy`, `copy_compress_state`, `ListofStates`
"""
function compress_state!(
stateatx::T;
save_matrix::Bool = false,
max_vector_size::Int = length(stateatx.x),
pnorm::Real = Inf,
keep::Bool = false,
kwargs...,
) where {T <: AbstractState}
for k ∈ fieldnames(T)
if k ∈ keys(kwargs) && !keep
try
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
catch
#nothing happens
end
end
if k ∉ keys(kwargs) && keep
try
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
catch
#nothing happens
end
end
if typeof(getfield(stateatx, k)) <: AbstractVector
katt = getfield(stateatx, k)
if (length(katt) > max_vector_size)
setfield!(stateatx, k, [norm(katt, pnorm)])
end
elseif typeof(getfield(stateatx, k)) <: Union{AbstractArray, AbstractMatrix}
if save_matrix
katt = getfield(stateatx, k)
if maximum(size(katt)) > max_vector_size
setfield!(stateatx, k, norm(getfield(stateatx, k)) * ones(1, 1))
end
else #save_matrix is false
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
end
else
#nothing happens
end
end
return stateatx
end
"""
`copy_compress_state(:: AbstractState; save_matrix :: Bool = false, max_vector_size :: Int = length(stateatx.x), pnorm :: Real = Inf, kwargs...)`
Copy the State and then compress it.
see also: copy, compress_state!, ListofStates
"""
function copy_compress_state(
stateatx::AbstractState;
save_matrix::Bool = false,
max_vector_size::Int = length(stateatx.x),
pnorm::Real = Inf,
kwargs...,
)
cstate = copy(stateatx)
return compress_state!(
cstate;
save_matrix = save_matrix,
max_vector_size = max_vector_size,
pnorm = pnorm,
kwargs...,
)
end
import Base.show
function show(io::IO, state::AbstractState)
varlines = "$(typeof(state)) with an iterate of type $(xtype(state)) and a score of type $(scoretype(state))."
println(io, varlines)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4607 | abstract type AbstractListofStates end
struct VoidListofStates <: AbstractListofStates end
"""
Type: list of States
Constructor:
`ListofStates(:: AbstractState)`
`ListofStates(n :: Int, :: Val{AbstractState})`
`ListofStates(n :: Int, list :: Array{AbstractState,1})`
`ListofStates(state :: S; n :: Int = -1, kwargs...)`
Note:
- If `n != -1`, then it stores at most n `AbstractState`.
- `ListofStates` recursively handles sub-list of states as the attribute list is
an array of pair whose first component is a, `AbstractState` and the second
component is a `ListofStates` (or `VoidListofStates`).
Examples:
`ListofStates(state)`
`ListofStates(state, n = 2)`
`ListofStates(-1, Val{NLPAtX}())`
`ListofStates(-1, [(state1, VoidListofStates), (state2, VoidListofStates)], 2)`
"""
mutable struct ListofStates{S <: AbstractState, T} <: AbstractListofStates
n::Int #If length of the list is knwon, -1 if unknown
i::Int #current index in the list/length
list::Array{Tuple{S, T}, 1}
#Tanj: \TODO Tuple instead of an Array would be better, I think
end
state_type(::ListofStates{S, T}) where {S <: AbstractState, T} = S
function ListofStates(n::T, ::Val{S}) where {T <: Int, S <: AbstractState}
list = Array{Tuple{S, VoidListofStates}, 1}(undef, 0)
i = 0
return ListofStates(n, i, list)
end
function ListofStates(n::Ti, list::Array{S, 1}) where {S <: AbstractState, Ti <: Int}
i = length(list)
tuple_list = Array{Tuple{S, VoidListofStates}, 1}(undef, 0)
for j = 1:i
push!(tuple_list, (list[j], VoidListofStates()))
end
return ListofStates(n, i, tuple_list)
end
function ListofStates(n::Ti, list::Array{Tuple{S, T}, 1}) where {S <: AbstractState, T, Ti <: Int}
i = length(list)
return ListofStates(n, i, list)
end
function ListofStates(state::S; n::Int = -1, kwargs...) where {S <: AbstractState}
i = 1
list = [(copy_compress_state(state; kwargs...), VoidListofStates())]
return ListofStates(n, i, list)
end
"""
add\\_to\\_list!: add a State to the list of maximal size n.
If a n+1-th State is added, the first one in the list is removed.
The given is State is compressed before being added in the list (via State.copy\\_compress\\_state).
`add_to_list!(:: AbstractListofStates, :: AbstractState; kwargs...)`
Note:
- kwargs are passed to the compress_state call.
- does nothing for `VoidListofStates`
see also: ListofStates, State.compress\\_state, State.copy\\_compress\\_state
"""
function add_to_list!(list::AbstractListofStates, state::AbstractState; kwargs...)
if typeof(list.n) <: Int && list.n > 0 #If n is a natural number
if list.i + 1 > list.n
popfirst!(list.list) #remove the first item
list.i = list.n
else
list.i += 1
end
cstate = copy_compress_state(state; kwargs...)
push!(list.list, (cstate, VoidListofStates()))
else
push!(list.list, (copy_compress_state(state; kwargs...), VoidListofStates()))
list.i += 1
end
return list
end
function add_to_list!(list::VoidListofStates, state::AbstractState; kwargs...)
return list
end
import Base.length
"""
length: return the number of States in the list.
`length(:: ListofStates)`
see also: print, add_to_list!, ListofStates
"""
function length(list::AbstractListofStates)
return list.i
end
import Base.print
"""
print: output formatting. return a DataFrame.
`print(:: ListofStates; verbose :: Bool = true, print_sym :: Union{Nothing,Array{Symbol,1}})`
Note:
- set `verbose` to false to avoid printing.
- if `print_sym` is an Array of Symbol, only those symbols are printed. Note that
the returned DataFrame still contains all the columns.
- More information about DataFrame: http://juliadata.github.io/DataFrames.jl
see also: add\\_to\\_list!, length, ListofStates
"""
function print(
list::AbstractListofStates;
verbose::Bool = true,
print_sym::Union{Nothing, Array{Symbol, 1}} = nothing,
)
tab = zeros(0, length(list.list))#Array{Any,2}(undef, length(fieldnames(typeof(list.list[1,1]))))
for k in fieldnames(typeof(list.list[1, 1]))
tab = vcat(tab, [getfield(i[1], k) for i in list.list]')
end
df = DataFrame(tab, :auto)
if isnothing(print_sym)
verbose && print(df)
else
verbose && print(df[!, print_sym])
end
return df
end
import Base.getindex
"""
`getindex(:: ListofStates, :: Int)`
`getindex(:: ListofStates, :: Int, :: Int)`
Example:
stop_lstt.listofstates.list[3]
stop_lstt.listofstates.list[3,1]
"""
function getindex(list::AbstractListofStates, i::Int)
return list.list[i]
end
function getindex(list::AbstractListofStates, i::Int, j::Int)
return list.list[i][j]
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 10517 | """
Type: NLPAtX
Methods: update!, reinit!
NLPAtX contains the information concerning a nonlinear optimization model at
the iterate x.
min_{x ∈ ℜⁿ} f(x) subject to lcon <= c(x) <= ucon, lvar <= x <= uvar.
Tracked data include:
- x : the current iterate
- fx [opt] : function evaluation at x
- gx [opt] : gradient evaluation at x
- Hx [opt] : hessian evaluation at x
- mu [opt] : Lagrange multiplier of the bounds constraints
- cx [opt] : evaluation of the constraint function at x
- Jx [opt] : jacobian matrix of the constraint function at x
- lambda : Lagrange multiplier of the constraints
- d [opt] : search direction
- res [opt] : residual
- current_time : time
- current_score : score
(import the type NLPModels.Counters)
Constructors:
`NLPAtX(:: T, :: T, :: S; fx :: eltype(T) = _init_field(eltype(T)), gx :: T = _init_field(T), Hx :: Matrix{eltype(T)} = _init_field(Matrix{eltype(T)}), mu :: T = _init_field(T), cx :: T = _init_field(T), Jx :: SparseMatrixCSC{eltype(T), Int64} = _init_field(SparseMatrixCSC{eltype(T), Int64}), d :: T = _init_field(T), res :: T = _init_field(T), current_time :: Float64 = NaN) where {S, T <: AbstractVector}`
`NLPAtX(:: T; fx :: eltype(T) = _init_field(eltype(T)), gx :: T = _init_field(T), Hx :: Matrix{eltype(T)} = _init_field(Matrix{eltype(T)}), mu :: T = _init_field(T), current_time :: Float64 = NaN, current_score :: Union{T,eltype(T)} = _init_field(eltype(T))) where {T <: AbstractVector}`
`NLPAtX(:: T, :: T; fx :: eltype(T) = _init_field(eltype(T)), gx :: T = _init_field(T), Hx :: Matrix{eltype(T)} = _init_field(Matrix{eltype(T)}), mu :: T = _init_field(T), cx :: T = _init_field(T), Jx :: SparseMatrixCSC{eltype(T), Int64} = _init_field(SparseMatrixCSC{eltype(T), Int64}), d :: T = _init_field(T), res :: T = _init_field(T), current_time :: Float64 = NaN, current_score :: Union{T,eltype(T)} = _init_field(eltype(T))) where T <: AbstractVector`
Note:
- By default, unknown entries are set using `_init_field`.
- By default the type of `current_score` is `eltype(x)` and cannot be changed once the State is created.
To have a vectorized `current_score` of length n, try something like `GenericState(x, Array{eltype(x),1}(undef, n))`.
- All these information (except for `x` and `lambda`) are optionnal and need to be update when
required. The update is done through the `update!` function.
- `x` and `lambda` are mandatory entries. If no constraints `lambda = []`.
- The constructor check the size of the entries.
See also: `GenericState`, `update!`, `update_and_start!`, `update_and_stop!`, `reinit!`
"""
mutable struct NLPAtX{Score, S, T <: AbstractVector} <: AbstractState{S, T}
#Unconstrained State
x::T # current point
fx::S # objective function
gx::T # gradient size: x
Hx # hessian size: |x| x |x|
#Bounds State
mu::T # Lagrange multipliers with bounds size of |x|
#Constrained State
cx::T # vector of constraints lc <= c(x) <= uc
Jx # jacobian matrix, size: |lambda| x |x|
lambda::T # Lagrange multipliers
d::T #search direction
res::T #residual
#Resources State
current_time::Float64
current_score::Score
function NLPAtX(
x::T,
lambda::T,
current_score::Score;
fx::eltype(T) = _init_field(eltype(T)),
gx::T = _init_field(T),
Hx = _init_field(Matrix{eltype(T)}),
mu::T = _init_field(T),
cx::T = _init_field(T),
Jx = _init_field(SparseMatrixCSC{eltype(T), Int64}),
d::T = _init_field(T),
res::T = _init_field(T),
current_time::Float64 = NaN,
) where {Score, T <: AbstractVector}
_size_check(x, lambda, fx, gx, Hx, mu, cx, Jx)
return new{Score, eltype(T), T}(
x,
fx,
gx,
Hx,
mu,
cx,
Jx,
lambda,
d,
res,
current_time,
current_score,
)
end
end
function NLPAtX(
x::T,
lambda::T;
fx::eltype(T) = _init_field(eltype(T)),
gx::T = _init_field(T),
Hx = _init_field(Matrix{eltype(T)}),
mu::T = _init_field(T),
cx::T = _init_field(T),
Jx = _init_field(SparseMatrixCSC{eltype(T), Int64}),
d::T = _init_field(T),
res::T = _init_field(T),
current_time::Float64 = NaN,
current_score::Union{T, eltype(T)} = _init_field(eltype(T)),
) where {T <: AbstractVector}
_size_check(x, lambda, fx, gx, Hx, mu, cx, Jx)
return NLPAtX(
x,
lambda,
current_score,
fx = fx,
gx = gx,
Hx = Hx,
mu = mu,
cx = cx,
Jx = Jx,
d = d,
res = res,
current_time = current_time,
)
end
function NLPAtX(
x::T;
fx::eltype(T) = _init_field(eltype(T)),
gx::T = _init_field(T),
Hx = _init_field(Matrix{eltype(T)}),
mu::T = _init_field(T),
d::T = _init_field(T),
res::T = _init_field(T),
current_time::Float64 = NaN,
current_score::Union{T, eltype(T)} = _init_field(eltype(T)),
) where {T <: AbstractVector}
_size_check(
x,
zeros(eltype(T), 0),
fx,
gx,
Hx,
mu,
_init_field(T),
_init_field(SparseMatrixCSC{eltype(T), Int64}),
)
return NLPAtX(
x,
zeros(eltype(T), 0),
current_score,
fx = fx,
gx = gx,
Hx = Hx,
mu = mu,
d = d,
res = res,
current_time = current_time,
)
end
for field in fieldnames(NLPAtX)
meth = Symbol("get_", field)
@eval begin
@doc """
$($meth)(state)
Return the value $($(QuoteNode(field))) from the state.
"""
$meth(state::NLPAtX) = getproperty(state, $(QuoteNode(field)))
end
@eval export $meth
end
function set_current_score!(state::NLPAtX{Score, S, T}, current_score::Score) where {Score, S, T}
if length(state.current_score) == length(current_score)
state.current_score .= current_score
else
state.current_score = current_score
end
return state
end
function Stopping.set_current_score!(
state::NLPAtX{Score, S, T},
current_score::Score,
) where {Score <: Number, S, T}
state.current_score = current_score
return state
end
function set_x!(state::NLPAtX{Score, S, T}, x::T) where {Score, S, T}
if length(state.x) == length(x)
state.x .= x
else
state.x = x
end
return state
end
function set_d!(state::NLPAtX{Score, S, T}, d::T) where {Score, S, T}
if length(state.d) == length(d)
state.d .= d
else
state.d = d
end
return state
end
function set_res!(state::NLPAtX{Score, S, T}, res::T) where {Score, S, T}
if length(state.res) == length(res)
state.res .= res
else
state.res = res
end
return state
end
function set_lambda!(state::NLPAtX{Score, S, T}, lambda::T) where {Score, S, T}
if length(state.lambda) == length(lambda)
state.lambda .= lambda
else
state.lambda = lambda
end
return state
end
function set_mu!(state::NLPAtX{Score, S, T}, mu::T) where {Score, S, T}
if length(state.mu) == length(mu)
state.mu .= mu
else
state.mu = mu
end
return state
end
function set_fx!(state::NLPAtX{Score, S, T}, fx::S) where {Score, S, T}
state.fx = fx
return state
end
function set_gx!(state::NLPAtX{Score, S, T}, gx::T) where {Score, S, T}
if length(state.gx) == length(gx)
state.gx .= gx
else
state.gx = gx
end
return state
end
function set_cx!(state::NLPAtX{Score, S, T}, cx::T) where {Score, S, T}
if length(state.cx) == length(cx)
state.cx .= cx
else
state.cx = cx
end
return state
end
function Stopping._domain_check(
stateatx::NLPAtX{Score, S, T};
current_score = false,
x = false,
) where {Score, S, T}
if !x && Stopping._check_nan_miss(get_x(stateatx))
return true
end
if !current_score && Stopping._check_nan_miss(get_current_score(stateatx))
return true
end
if Stopping._check_nan_miss(get_d(stateatx))
return true
end
if Stopping._check_nan_miss(get_res(stateatx))
return true
end
if Stopping._check_nan_miss(get_fx(stateatx))
return true
end
if Stopping._check_nan_miss(get_gx(stateatx))
return true
end
if Stopping._check_nan_miss(get_mu(stateatx))
return true
end
if Stopping._check_nan_miss(get_cx(stateatx))
return true
end
if Stopping._check_nan_miss(get_lambda(stateatx))
return true
end
if Stopping._check_nan_miss(get_Jx(stateatx))
return true
end
if Stopping._check_nan_miss(get_Hx(stateatx))
return true
end
return false
end
"""
reinit!: function that set all the entries at void except the mandatory x
`reinit!(:: NLPAtX, x :: AbstractVector, l :: AbstractVector; kwargs...)`
`reinit!(:: NLPAtX; kwargs...)`
Note: if `x` or `lambda` are given as keyword arguments they will be
prioritized over the existing `x`, `lambda` and the default `Counters`.
"""
function reinit!(stateatx::NLPAtX{Score, S, T}, x::T, l::T; kwargs...) where {Score, S, T}
for k ∈ fieldnames(NLPAtX)
if k ∉ [:x, :lambda]
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
end
end
setfield!(stateatx, :x, x)
setfield!(stateatx, :lambda, l)
if length(kwargs) == 0
return stateatx #save the update! call if no other kwargs than x
end
return update!(stateatx; kwargs...)
end
function reinit!(stateatx::NLPAtX{Score, S, T}, x::T; kwargs...) where {Score, S, T}
for k ∈ fieldnames(NLPAtX)
if k ∉ [:x, :lambda]
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
end
end
setfield!(stateatx, :x, x)
if length(kwargs) == 0
return stateatx #save the update! call if no other kwargs than x
end
return update!(stateatx; kwargs...)
end
function reinit!(stateatx::NLPAtX; kwargs...)
for k ∈ fieldnames(NLPAtX)
if k ∉ [:x, :lambda]
setfield!(stateatx, k, _init_field(typeof(getfield(stateatx, k))))
end
end
return update!(stateatx; kwargs...)
end
"""
_size_check!: check the size of the entries in the State
`_size_check(x, lambda, fx, gx, Hx, mu, cx, Jx)`
"""
function _size_check(x, lambda, fx, gx, Hx, mu, cx, Jx)
if length(gx) != 0 && length(gx) != length(x)
throw(error("Wrong size of gx in the NLPAtX."))
end
if size(Hx) != (0, 0) && size(Hx) != (length(x), length(x))
throw(error("Wrong size of Hx in the NLPAtX."))
end
if length(mu) != 0 && length(mu) != length(x)
throw(error("Wrong size of mu in the NLPAtX."))
end
if length(lambda) != 0
if length(cx) != 0 && length(cx) != length(lambda)
throw(error("Wrong size of cx in the NLPAtX."))
end
if size(Jx) != (0, 0) && size(Jx) != (length(lambda), length(x))
throw(error("Wrong size of Jx in the NLPAtX."))
end
end
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 3422 | """
Type: OneDAtX
Methods: update!, reinit!, copy
A structure designed to track line search information from one iteration to
another. Given f : ℜⁿ → ℜ, define h(θ) = f(x + θ*d) where x and d
are vectors of same dimension and θ is a scalar, more specifically the step size.
Tracked data can include:
- x : the current step size
- fx [opt] : h(θ) at the current iteration
- gx [opt] : h'(θ)
- f₀ [opt] : h(0)
- g₀ [opt] : h'(0)
- d [opt] : search direction
- res [opt] : residual
- current_time : the time at which the line search algorithm started.
- current_score : the score at which the line search algorithm started.
Constructors:
`OneDAtX(:: T, :: S; fx :: T = _init_field(T), gx :: T = _init_field(T), f₀ :: T = _init_field(T), g₀ :: T = _init_field(T), current_time :: Float64 = NaN) where {S, T <: Number}`
`OneDAtX(:: T; fx :: T = _init_field(T), gx :: T = _init_field(T), f₀ :: T = _init_field(T), g₀ :: T = _init_field(T), current_time :: Float64 = NaN, current_score :: T = _init_field(T)) where T <: Number`
Note:
- By default, unknown entries are set using `_init_field`.
- By default the type of `current_score` is `eltype(x)` and cannot be changed once the State is created.
To have a vectorized `current_score` of length n, use `OneDAtX(x, Array{eltype(x),1}(undef, n))`.
"""
mutable struct OneDAtX{S, T <: Number} <: AbstractState{S, T}
x::T
fx::T # h(θ)
gx::T # h'(θ)
f₀::T # h(0)
g₀::T # h'(0)
d::T
res::T
current_time::Float64
current_score::S
function OneDAtX(
t::T,
current_score::S;
fx::T = _init_field(T),
gx::T = _init_field(T),
f₀::T = _init_field(T),
g₀::T = _init_field(T),
d::T = _init_field(T),
res::T = _init_field(T),
current_time::Float64 = NaN,
) where {S, T <: Number}
return new{S, T}(t, fx, gx, f₀, g₀, d, res, current_time, current_score)
end
end
function OneDAtX(
t::T;
fx::T = _init_field(T),
gx::T = _init_field(T),
f₀::T = _init_field(T),
g₀::T = _init_field(T),
d::T = _init_field(T),
res::T = _init_field(T),
current_time::Float64 = NaN,
current_score::T = _init_field(T),
) where {T <: Number}
return OneDAtX(
t,
current_score,
fx = fx,
gx = gx,
f₀ = f₀,
g₀ = g₀,
d = d,
res = res,
current_time = current_time,
)
end
for field in fieldnames(OneDAtX)
meth = Symbol("get_", field)
@eval begin
@doc """
$($meth)(state)
Return the value $($(QuoteNode(field))) from the state.
"""
$meth(state::OneDAtX) = getproperty(state, $(QuoteNode(field)))
end
@eval export $meth
end
function set_current_score!(state::OneDAtX{S, T}, current_score::S) where {S, T}
state.current_score .= current_score
return state
end
function set_current_score!(state::OneDAtX{S, T}, current_score::S) where {S <: Number, T}
state.current_score = current_score
return state
end
function set_x!(state::OneDAtX{S, T}, x::T) where {S, T}
if length(state.x) == length(x)
state.x .= x
else
state.x = x
end
return state
end
function set_d!(state::OneDAtX{S, T}, d::T) where {S, T}
if length(state.d) == length(d)
state.d .= d
else
state.d = d
end
return state
end
function set_res!(state::OneDAtX{S, T}, res::T) where {S, T}
if length(state.res) == length(res)
state.res .= res
else
state.res = res
end
return state
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 23084 | """
Type: `GenericStopping`
Methods: `start!`, `stop!`, `update_and_start!`, `update_and_stop!`, `fill_in!`, `reinit!`, `status`
A generic Stopping to solve instances with respect to some
optimality conditions. Optimality is decided by computing a score, which is then
tested to zero.
Tracked data include:
- `pb` : A problem
- `current_state` : The information relative to the problem, see `GenericState`.
- (opt) `meta` : Metadata relative to a stopping criteria, see `StoppingMeta`.
- (opt) `main_stp` : Stopping of the main loop in case we consider a Stopping
of a subproblem.
If not a subproblem, then `VoidStopping`.
- (opt) `listofstates` : `ListofStates` designed to store the history of States.
- (opt) `stopping_user_struct` : Contains a structure designed by the user.
Constructors:
- `GenericStopping(pb, meta::AbstractStoppingMeta, stop_remote::AbstractStopRemoteControl, state::AbstractState; main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), kwargs...)`
The default constructor.
- `GenericStopping(pb, meta::AbstractStoppingMeta, state::AbstractState; main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), kwargs...)`
The one passing the `kwargs` to the `stop_remote`.
- `GenericStopping(pb, state::AbstractState; stop_remote::AbstractStopRemoteControl = StopRemoteControl(), main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), kwargs...)`
The one passing the `kwargs` to the `meta`.
- `GenericStopping(pb, stop_remote::AbstractStopRemoteControl, state::AbstractState; main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), kwargs...)`
The one passing the `kwargs` to the `meta`.
- `GenericStopping(pb, x; n_listofstates=, kwargs...)`
The one setting up a default state using x, and initializing the list of states if `n_listofstates>0`.
Note: Metadata can be provided by the user or created with the Stopping
constructor via kwargs. If a specific StoppingMeta is given and
kwargs are provided, the kwargs have priority.
Examples:
`GenericStopping(pb, GenericState(ones(2)), rtol = 1e-1)`
Besides optimality conditions, we consider classical emergency exit:
- domain error (for instance: NaN in x)
- unbounded problem (not implemented)
- unbounded x (x is too large)
- tired problem (time limit attained)
- resources exhausted (not implemented)
- stalled problem (not implemented)
- iteration limit (maximum number of iteration (i.e. nb of stop) attained)
- main_pb limit (tired or resources of main problem exhausted)
There is an additional default constructor which creates a Stopping with a default State.
`GenericStopping(:: Any, :: Union{Number, AbstractVector}; kwargs...)`
Note: Keywords arguments are forwarded to the classical constructor.
Examples:
`GenericStopping(pb, x0, rtol = 1e-1)`
"""
mutable struct GenericStopping{Pb, M, SRC, T, MStp, LoS} <:
AbstractStopping{Pb, M, SRC, T, MStp, LoS}
# Problem
pb::Pb
# Problem stopping criterion
meta::M
stop_remote::SRC
# Current information on the problem
current_state::T
# Stopping of the main problem, or nothing
main_stp::MStp
# History of states
listofstates::LoS
# User-specific structure
stopping_user_struct::AbstractDict
end
get_pb(stp::GenericStopping) = stp.pb
get_meta(stp::GenericStopping) = stp.meta
get_remote(stp::GenericStopping) = stp.stop_remote
get_state(stp::GenericStopping) = stp.current_state
get_main_stp(stp::GenericStopping) = stp.main_stp
get_list_of_states(stp::GenericStopping) = stp.listofstates
get_user_struct(stp::GenericStopping) = stp.stopping_user_struct
function GenericStopping(
pb::Pb,
meta::M,
stop_remote::SRC,
current_state::T;
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
user_struct::AbstractDict = Dict(),
kwargs...,
) where {Pb <: Any, M <: AbstractStoppingMeta, SRC <: AbstractStopRemoteControl, T <: AbstractState}
return GenericStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct)
end
function GenericStopping(
pb::Pb,
meta::M,
current_state::T;
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
user_struct::AbstractDict = Dict(),
kwargs...,
) where {Pb <: Any, M <: AbstractStoppingMeta, T <: AbstractState}
stop_remote = StopRemoteControl(; kwargs...) #main_stp == VoidStopping() ? StopRemoteControl() : cheap_stop_remote_control()
return GenericStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct)
end
function GenericStopping(
pb::Pb,
current_state::T;
stop_remote::AbstractStopRemoteControl = StopRemoteControl(), #main_stp == VoidStopping() ? StopRemoteControl() : cheap_stop_remote_control(),
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
user_struct::AbstractDict = Dict(),
kwargs...,
) where {Pb <: Any, T <: AbstractState}
meta = StoppingMeta(; kwargs...)
return GenericStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct)
end
function GenericStopping(
pb::Pb,
stop_remote::SRC,
current_state::T;
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
user_struct::AbstractDict = Dict(),
kwargs...,
) where {Pb <: Any, SRC <: AbstractStopRemoteControl, T <: AbstractState}
meta = StoppingMeta(; kwargs...)
return GenericStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct)
end
"""
`GenericStopping(pb :: Any, x :: T; n_listofstates :: Int = 0, kwargs...)`
Setting the keyword argument `n_listofstates > 0` initialize a ListofStates of length `n_listofstates`.
"""
function GenericStopping(pb::Any, x::T; n_listofstates::Int = 0, kwargs...) where {T}
state = GenericState(x)
if n_listofstates > 0 && :list ∉ keys(kwargs)
list = ListofStates(n_listofstates, Val{typeof(state)}())
return GenericStopping(pb, state, list = list; kwargs...)
end
return GenericStopping(pb, state; kwargs...)
end
"""
`update!(stp::AbstractStopping; kwargs...)`
update!: generic update function for the Stopping
Shortcut for update!(stp.current_state; kwargs...)
"""
function update!(stp::AbstractStopping; kwargs...)
return update!(stp.current_state; kwargs...)
end
"""
`fill_in!(stp::AbstractStopping, x::T) where {T}`
fill_in!: fill in the unspecified values of the AbstractState.
Note: NotImplemented for Abstract/Generic-Stopping.
"""
function fill_in!(stp::AbstractStopping, x::AbstractVector)
return throw(error("NotImplemented function"))
end
"""
`update_and_start!(stp::AbstractStopping; no_opt_check::Bool = false, kwargs...)`
Update values in the State and initialize the Stopping.
Returns the optimality status of the problem as a boolean.
Note:
- Kwargs are forwarded to the `update!` call.
- `no_opt_check` skip optimality check in `start!` (`false` by default).
"""
function update_and_start!(stp::AbstractStopping; no_opt_check::Bool = false, kwargs...)
if stp.stop_remote.cheap_check
_smart_update!(stp.current_state; kwargs...)
else
update!(stp; kwargs...)
end
OK = start!(stp, no_opt_check = no_opt_check)
return OK
end
"""
`start!(stp::AbstractStopping; no_opt_check::Bool = false, kwargs...)`
Update the Stopping and return `true` if we must stop.
Purpose is to know if there is a need to even perform an optimization algorithm
or if we are at an optimal solution from the beginning.
Set `no_opt_check` to `true` avoid checking optimality and domain errors.
The function `start!` successively calls: `_domain_check(stp, x)`, `_optimality_check!(stp, x)`, `_null_test(stp, x)` and `_user_check!(stp, x, true)`.
Note: - `start!` initializes `stp.meta.start_time` (if not done before),
`stp.current_state.current_time` and `stp.meta.optimality0`
(if `no_opt_check` is false).
- Keywords argument are passed to the `_optimality_check!` call.
- Compatible with the `StopRemoteControl`.
"""
function start!(stp::AbstractStopping; no_opt_check::Bool = false, kwargs...)
state = stp.current_state
src = stp.stop_remote
#Initialize the time counter
if src.tired_check && isnan(stp.meta.start_time)
stp.meta.start_time = time()
end
#and synchornize with the State
if src.tired_check && isnan(state.current_time)
_update_time!(state, stp.meta.start_time)
end
if !no_opt_check
stp.meta.domainerror = if src.domain_check
#don't check current_score
_domain_check(stp.current_state, current_score = true)
else
stp.meta.domainerror
end
if src.optimality_check
optimality0 = _optimality_check!(stp; kwargs...)
norm_optimality0 = norm(optimality0, Inf)
if src.domain_check && isnan(norm_optimality0)
stp.meta.domainerror = true
elseif norm_optimality0 == Inf
stp.meta.optimality0 = one(typeof(norm_optimality0))
else
stp.meta.optimality0 = norm_optimality0
end
if _null_test(stp, optimality0)
stp.meta.optimal = true
end
end
end
src.user_start_check && _user_check!(stp, state.x, true)
OK = OK_check(stp.meta)
#do nothing if typeof(stp.listofstates) == VoidListofStates
add_to_list!(stp.listofstates, stp.current_state)
return OK
end
"""
`reinit!(:: AbstractStopping; rstate :: Bool = false, kwargs...)`
Reinitialize the meta-data in the Stopping.
Note:
- If `rstate` is set as `true` it reinitializes the current State
(with the kwargs).
- If `rlist` is set as true the list of states is also reinitialized, either
set as a `VoidListofStates` if `rstate` is `true` or a list containing only the current
state otherwise.
"""
function reinit!(stp::AbstractStopping; rstate::Bool = false, rlist::Bool = true, kwargs...)
stp.meta.start_time = NaN
stp.meta.optimality0 = 1.0
#reinitialize the boolean status
reinit!(stp.meta)
#reinitialize the counter of stop
stp.meta.nb_of_stop = 0
#reinitialize the list of states
if rlist && (typeof(stp.listofstates) != VoidListofStates)
#TODO: Warning we cannot change the type of ListofStates
stp.listofstates = rstate ? VoidListofStates() : ListofStates(stp.current_state)
end
#reinitialize the state
if rstate
reinit!(stp.current_state; kwargs...)
end
return stp
end
"""
`update_and_stop!(stp :: AbstractStopping; kwargs...)`
Update the values in the state and return the optimality status of the problem as a boolean.
Note: Kwargs are forwarded to the `update!` call.
"""
function update_and_stop!(stp::AbstractStopping; kwargs...)
if stp.stop_remote.cheap_check
_smart_update!(stp.current_state; kwargs...)
OK = cheap_stop!(stp)
else
update!(stp; kwargs...)
OK = stop!(stp)
end
return OK
end
"""
`stop!(:: AbstractStopping; kwargs...)`
Update the Stopping and return a boolean true if we must stop.
It serves the same purpose as `start!` in an algorithm; telling us if we
stop the algorithm (because we have reached optimality or we loop infinitely,
etc).
The function `stop!` successively calls: `_domain_check`, `_optimality_check`,
`_null_test`, `_unbounded_check!`, `_tired_check!`, `_resources_check!`,
`_stalled_check!`, `_iteration_check!`, `_main_pb_check!`, `add_to_list!`
Note:
- kwargs are sent to the `_optimality_check!` call.
- If `listofstates != VoidListofStates`, call `add_to_list!`.
"""
function stop!(stp::AbstractStopping; no_opt_check::Bool = false, kwargs...)
x = stp.current_state.x
src = stp.stop_remote
src.unbounded_and_domain_x_check && _unbounded_and_domain_x_check!(stp, x)
stp.meta.domainerror = if src.domain_check
#don't check x and current_score
_domain_check(stp.current_state, x = true, current_score = true)
else
stp.meta.domainerror
end
if !no_opt_check
# Optimality check
if src.optimality_check
score = _optimality_check!(stp; kwargs...)
if src.domain_check && any(isnan, score)
stp.meta.domainerror = true
end
if _null_test(stp, score)
stp.meta.optimal = true
end
end
src.infeasibility_check && _infeasibility_check!(stp, x)
src.unbounded_problem_check && _unbounded_problem_check!(stp, x)
src.tired_check && _tired_check!(stp, x)
src.resources_check && _resources_check!(stp, x)
src.stalled_check && _stalled_check!(stp, x)
src.iteration_check && _iteration_check!(stp, x)
src.main_pb_check && _main_pb_check!(stp, x)
src.user_check && _user_check!(stp, x)
end
OK = OK_check(stp.meta)
_add_stop!(stp)
#do nothing if typeof(stp.listofstates) == VoidListofStates
add_to_list!(stp.listofstates, stp.current_state)
return OK
end
"""
`cheap_stop!(:: AbstractStopping; kwargs...)`
Update the Stopping and return a boolean true if we must stop.
It serves the same purpose as `stop!`, but avoids any potentially expensive checks.
We no longer browse `x` and `res` in the State, and no check on the `main_stp`.
Check only the updated entries in the meta.
The function `cheap_stop!` successively calls:
`_null_test`, `_unbounded_check!`, `_tired_check!`, `_resources_check!`,
`_stalled_check!`, `_iteration_check!`, `add_to_list!`
Note:
- kwargs are sent to the `_optimality_check!` call.
- If `listofstates != VoidListofStates`, call `add_to_list!`.
"""
function cheap_stop!(stp::AbstractStopping; kwargs...)
x = stp.current_state.x
src = stp.stop_remote
# Optimality check
if src.optimality_check
score = _optimality_check!(stp; kwargs...) #update state.current_score
if _null_test(stp, score)
stp.meta.optimal = true
end
end
OK = stp.meta.optimal
OK = OK || (src.infeasibility_check && _infeasibility_check!(stp, x))
OK = OK || (src.unbounded_problem_check && _unbounded_problem_check!(stp, x))
OK = OK || (src.tired_check && _tired_check!(stp, x))
OK = OK || (src.resources_check && _resources_check!(stp, x))
OK = OK || (src.iteration_check && _iteration_check!(stp, x))
OK = OK || (src.user_check && _user_check!(stp, x))
_add_stop!(stp)
#do nothing if typeof(stp.listofstates) == VoidListofStates
add_to_list!(stp.listofstates, stp.current_state)
return OK
end
"""
`_add_stop!(:: AbstractStopping)`
Increment a counter of stop.
Fonction called everytime `stop!` is called. In theory should be called once
every iteration of an algorithm.
Note: update `meta.nb_of_stop`.
"""
function _add_stop!(stp::AbstractStopping)
stp.meta.nb_of_stop += 1
return stp
end
"""
`_iteration_check!(:: AbstractStopping, :: T)`
Check if the optimization algorithm has reached the
iteration limit.
Note: Compare `meta.iteration_limit` with `meta.nb_of_stop`.
"""
function _iteration_check!(stp::AbstractStopping, x::T) where {T}
max_iter = stp.meta.nb_of_stop >= stp.meta.max_iter
if max_iter
stp.meta.iteration_limit = true
end
return stp.meta.iteration_limit
end
"""
`_stalled_check!(:: AbstractStopping, :: T)`
Check if the optimization algorithm is stalling.
Note: Do nothing by default for AbstractStopping.
"""
function _stalled_check!(stp::AbstractStopping, x::T) where {T}
return stp.meta.stalled
end
"""
`_tired_check!(:: AbstractStopping, :: T)`
Check if the optimization algorithm has been running for too long.
Note:
- Return `false` if `meta.start_time` is `NaN` (by default).
- Update `meta.tired`.
"""
function _tired_check!(stp::AbstractStopping, x::T) where {T}
stime = stp.meta.start_time #can be NaN
ctime = time()
#Keep the current_state updated
_update_time!(stp.current_state, ctime)
elapsed_time = ctime - stime
max_time = elapsed_time > stp.meta.max_time #NaN > 1. is false
if max_time
stp.meta.tired = true
end
return stp.meta.tired
end
function _tired_check!(stp::VoidStopping, x::T) where {T}
return false
end
"""
`_resources_check!(:: AbstractStopping, :: T)`
Check if the optimization algorithm has exhausted the resources.
Note: Do nothing by default `meta.resources` for AbstractStopping.
"""
function _resources_check!(stp::AbstractStopping, x::T) where {T}
return stp.meta.resources
end
function _resources_check!(stp::VoidStopping, x::T) where {T}
return false
end
"""
`_main_pb_check!(:: AbstractStopping, :: T)`
Check the resources and the time of the upper problem if `main_stp != VoidStopping`.
Note: - Modify the meta of the `main_stp`.
- return `false` for `VoidStopping`.
"""
function _main_pb_check!(stp::AbstractStopping, x::T) where {T}
max_time = _tired_check!(stp.main_stp, x)
resources = _resources_check!(stp.main_stp, x)
main_main_pb = _main_pb_check!(stp.main_stp, x)
check = max_time || resources || main_main_pb
if check
stp.meta.main_pb = true
end
return stp.meta.main_pb
end
function _main_pb_check!(stp::VoidStopping, x::T) where {T}
return false
end
"""
`_unbounded_and_domain_x_check!(:: AbstractStopping, :: T)`
Check if x gets too big, and if it has NaN or missing values.
Note:
- compare `||x||_∞` with `meta.unbounded_x` and update `meta.unbounded`.
- it also checks `NaN` and `missing` and update `meta.domainerror`.
- short-circuiting if one of the two is `true`.
"""
function _unbounded_and_domain_x_check!(stp::AbstractStopping, x::T) where {T}
bigX(z::eltype(T)) = (abs(z) >= stp.meta.unbounded_x)
(stp.meta.unbounded, stp.meta.domainerror) = _large_and_domain_check(bigX, x)
return stp.meta.unbounded || stp.meta.domainerror
end
function _large_and_domain_check(f, itr)
for x in itr
v = f(x)
w = ismissing(x) || isnan(x)
if w
return (false, true)
elseif v
return (true, false)
end
end
return (false, false)
end
"""
`_unbounded_problem_check!(:: AbstractStopping, :: T)`
Check if problem relative informations are unbounded
Note: Do nothing by default.
"""
function _unbounded_problem_check!(stp::AbstractStopping, x::T) where {T}
return stp.meta.unbounded_pb
end
"""
`_infeasibility_check!(:: AbstractStopping, :: T)`
Check if problem is infeasible.
Note: `meta.infeasible` is `false` by default.
"""
function _infeasibility_check!(stp::AbstractStopping, x::T) where {T}
return stp.meta.infeasible
end
"""
`_optimality_check!(:: AbstractStopping; kwargs...)`
Compute the optimality score.
"""
function _optimality_check!(
stp::AbstractStopping{Pb, M, SRC, T, MStp, LoS};
kwargs...,
) where {Pb, M, SRC, T, MStp, LoS}
set_current_score!(
stp.current_state,
stp.meta.optimality_check(stp.pb, stp.current_state; kwargs...),
)
return stp.current_state.current_score
end
"""
`_null_test(:: AbstractStopping, :: T)`
Check if the score is close enough to zero (up to some precisions found in the meta).
Note:
- the second argument is compared with
`meta.tol_check(meta.atol, meta.rtol, meta.optimality0)`,
and `meta.tol_check_neg(meta.atol, meta.rtol, meta.optimality0)`.
- Compatible sizes is not verified.
"""
function _null_test(stp::AbstractStopping, optimality::T) where {T}
check_pos, check_neg = tol_check(stp.meta)
optimal = _inequality_check(optimality, check_pos, check_neg)
return optimal
end
#remove the Missing option here
_inequality_check(opt::Number, check_pos::Number, check_neg::Number) =
(opt <= check_pos) && (opt >= check_neg)
_inequality_check(opt, check_pos::Number, check_neg::Number)::Bool =
!any(z -> (ismissing(z) || (z > check_pos) || (z < check_neg)), opt)
function _inequality_check(opt::T, check_pos::T, check_neg::T) where {T}
size_check = try
n = size(opt)
ncp, ncn = size(check_pos), size(check_neg)
n != ncp || n != ncn
catch
false
end
if size_check
throw(
ErrorException(
"Error: incompatible size in _null_test wrong size of optimality, tol_check and tol_check_neg",
),
)
end
for (o, cp, cn) in zip(opt, check_pos, check_neg)
v = o > cp || o < cn
if v
return false
end
end
return true
end
"""
`_user_check!( :: AbstractStopping, x :: T, start :: Bool)`
Nothing by default.
Call the `user_check_func!(:: AbstractStopping, :: Bool)` from the meta.
The boolean `start` is `true` when called from the `start!` function.
"""
function _user_check!(stp::AbstractStopping, x::T, start::Bool) where {T}
#callback function furnished by the user
stp.meta.user_check_func!(stp, start)
return stp.meta.stopbyuser
end
function _user_check!(stp::AbstractStopping, x::T) where {T}
return _user_check!(stp, x, false)
end
const status_meta_list = Dict([
(:Optimal, :optimal),
(:SubProblemFailure, :fail_sub_pb),
(:SubOptimal, :suboptimal),
(:Unbounded, :unbounded),
(:UnboundedPb, :unbounded_pb),
(:Stalled, :stalled),
(:IterationLimit, :iteration_limit),
(:TimeLimit, :tired),
(:EvaluationLimit, :resources),
(:ResourcesOfMainProblemExhausted, :main_pb),
(:Infeasible, :infeasible),
(:StopByUser, :stopbyuser),
(:Exception, :exception),
(:DomainError, :domainerror),
])
"""
`status(:: AbstractStopping; list = false)`
Returns the status of the algorithm:
The different statuses are:
- `Optimal`: reached an optimal solution.
- `SubProblemFailure`
- `SubOptimal`: reached an acceptable solution.
- `Unbounded`: current iterate too large in norm.
- `UnboundedPb`: unbouned problem.
- `Stalled`: stalled algorithm.
- `IterationLimit`: too many iterations of the algorithm.
- `TimeLimit`: time limit.
- `EvaluationLimit`: too many ressources used,
i.e. too many functions evaluations.
- `ResourcesOfMainProblemExhausted`: in the case of a substopping, EvaluationLimit or TimeLimit
for the main stopping.
- `Infeasible`: default return value, if nothing is done the problem is
considered feasible.
- `StopByUser`: stopped by the user.
- `DomainError`: there is a NaN somewhere.
- `Exception`: unhandled exception
- `Unknwon`: if stopped for reasons unknown by Stopping.
Note:
- Set keyword argument `list` to true, to get an `Array` with all the statuses.
- The different statuses correspond to boolean values in the meta.
"""
function status(stp::AbstractStopping; list = false)
if list
list_status = findall(x -> getfield(stp.meta, x), status_meta_list)
if list_status == zeros(0)
list_status = [:Unknown]
end
else
list_status = findfirst(x -> getfield(stp.meta, x), status_meta_list)
if isnothing(list_status)
list_status = :Unknown
end
end
return list_status
end
"""
`elapsed_time(:: AbstractStopping)`
Returns the elapsed time.
`current_time` and `start_time` are NaN if not initialized.
"""
function elapsed_time(stp::AbstractStopping)
return stp.current_state.current_time - stp.meta.start_time
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 11387 | """
Type: LAStopping
Methods: `start!`, `stop!`, `update_and_start!`, `update_and_stop!`, `fill_in!`, `reinit!`, `status`,
`linear_system_check`, `normal_equation_check`
Specialization of GenericStopping. Stopping structure for linear algebra
solving either
``Ax = b``
or
```math
min\\_{x} \\tfrac{1}{2}\\|Ax - b\\|^2
```
Attributes:
- `pb` : a problem using, for instance, either `LLSModel` (designed for linear least square problem, see https://github.com/JuliaSmoothOptimizers/LLSModels.jl ) or `LinearSystem`.
- `current_state` : The information relative to the problem, see `GenericState`.
- (opt) `meta` : Metadata relative to stopping criteria, see `StoppingMeta`.
- (opt) `main_stp` : Stopping of the main loop in case we consider a Stopping
of a subproblem.
If not a subproblem, then `VoidStopping`.
- (opt) `listofstates` : ListofStates designed to store the history of States.
- (opt) `stopping_user_struct` : Contains a structure designed by the user.
Constructors:
- `LAStopping(pb, meta::AbstractStoppingMeta, stop_remote::AbstractStopRemoteControl, state::AbstractState; main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), zero_start::Bool = false)`
The default constructor.
- `LAStopping(pb, meta::AbstractStoppingMeta, state::AbstractState; main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), zero_start::Bool = false, kwargs...)`
The one passing the `kwargs` to the `stop_remote`.
- `LAStopping(pb, state::AbstractState; stop_remote::AbstractStopRemoteControl = StopRemoteControl(), main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), zero_start::Bool = false, kwargs...)`
The one passing the `kwargs` to the `meta`.
- `LAStopping(:: Union{AbstractLinearOperator, AbstractMatrix}, :: AbstractVector; sparse::Bool = true, n_listofstates::Int = 0, kwargs...)`
The one setting up a default problem (`sparse ? LLSModel(A, b) : LinearSystem(A, b)`), a default `GenericState` using x, and initializing the list of states if `n_listofstates>0`.
- `LAStopping(:: Union{AbstractLinearOperator, AbstractMatrix}, :: AbstractVector, :: AbstractState; sparse::Bool = true, kwargs...)`
The one setting up a default problem (`sparse ? LLSModel(A, b) : LinearSystem(A, b)`).
Notes:
- No specific State targeted
- State don't necessarily keep track of evals
- Evals are checked only for `pb.A` being a LinearOperator
- `zero_start` is true if 0 is the initial guess (not check automatically)
- `LLSModel` counter follow `NLSCounters` (see `init_max_counters_NLS`)
- By default, `meta.max_cntrs` is initialized with an NLSCounters
See also `GenericStopping`, `NLPStopping`, `linear_system_check`, `normal_equation_check`
"""
mutable struct LAStopping{Pb, M, SRC, T, MStp, LoS} <: AbstractStopping{Pb, M, SRC, T, MStp, LoS}
# problem
pb::Pb
# Common parameters
meta::M
stop_remote::SRC
# current state of the problem
current_state::T
# Stopping of the main problem, or nothing
main_stp::MStp
# History of states
listofstates::LoS
# User-specific structure
stopping_user_struct::AbstractDict
#zero is initial point
zero_start::Bool
end
get_pb(stp::LAStopping) = stp.pb
get_meta(stp::LAStopping) = stp.meta
get_remote(stp::LAStopping) = stp.stop_remote
get_state(stp::LAStopping) = stp.current_state
get_main_stp(stp::LAStopping) = stp.main_stp
get_list_of_states(stp::LAStopping) = stp.listofstates
get_user_struct(stp::LAStopping) = stp.stopping_user_struct
function LAStopping(
pb::Pb,
meta::M,
stop_remote::SRC,
current_state::T;
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
user_struct::AbstractDict = Dict(),
zero_start::Bool = false,
) where {Pb <: Any, M <: AbstractStoppingMeta, SRC <: AbstractStopRemoteControl, T <: AbstractState}
return LAStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct, zero_start)
end
function LAStopping(
pb::Pb,
meta::M,
current_state::T;
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
user_struct::AbstractDict = Dict(),
zero_start::Bool = false,
kwargs...,
) where {Pb <: Any, M <: AbstractStoppingMeta, T <: AbstractState}
stop_remote = StopRemoteControl(; kwargs...) #main_stp == VoidStopping() ? StopRemoteControl() : cheap_stop_remote_control()
return LAStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct, zero_start)
end
function LAStopping(
pb::Pb,
current_state::T;
stop_remote::AbstractStopRemoteControl = StopRemoteControl(), #main_stp == VoidStopping() ? StopRemoteControl() : cheap_stop_remote_control()
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
user_struct::AbstractDict = Dict(),
zero_start::Bool = false,
kwargs...,
) where {Pb <: Any, T <: AbstractState}
if :max_cntrs in keys(kwargs)
mcntrs = kwargs[:max_cntrs]
elseif Pb <: LLSModel
mcntrs = init_max_counters_NLS()
else
mcntrs = init_max_counters_linear_operators()
end
if :optimality_check in keys(kwargs)
oc = kwargs[:optimality_check]
else
oc = linear_system_check
end
meta = StoppingMeta(; max_cntrs = mcntrs, optimality_check = oc, kwargs...)
return LAStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct, zero_start)
end
function LAStopping(
A::TA,
b::Tb;
x::Tb = zeros(eltype(Tb), size(A, 2)),
sparse::Bool = true,
n_listofstates::Int = 0,
kwargs...,
) where {TA <: Any, Tb <: AbstractVector}
pb = sparse ? LLSModel(A, b) : LinearSystem(A, b)
state = GenericState(x)
mcntrs = sparse ? init_max_counters_NLS() : init_max_counters_linear_operators()
if n_listofstates > 0 && :list ∉ keys(kwargs)
list = ListofStates(n_listofstates, Val{typeof(state)}())
return LAStopping(pb, state, max_cntrs = mcntrs, list = list; kwargs...)
end
return LAStopping(pb, state, max_cntrs = mcntrs; kwargs...)
end
function LAStopping(
A::TA,
b::Tb,
state::S;
sparse::Bool = true,
kwargs...,
) where {TA <: Any, Tb <: AbstractVector, S <: AbstractState}
pb = sparse ? LLSModel(A, b) : LinearSystem(A, b)
mcntrs = sparse ? init_max_counters_NLS() : init_max_counters_linear_operators()
return LAStopping(pb, state, max_cntrs = mcntrs; kwargs...)
end
"""
Type: LACounters
"""
mutable struct LACounters{T <: Int}
nprod::T
ntprod::T
nctprod::T
sum::T
function LACounters(nprod::T, ntprod::T, nctprod::T, sum::T) where {T <: Int}
return new{T}(nprod, ntprod, nctprod, sum)
end
end
function LACounters(; nprod::Int64 = 0, ntprod::Int64 = 0, nctprod::Int64 = 0, sum::Int64 = 0)
return LACounters(nprod, ntprod, nctprod, sum)
end
"""
init\\_max\\_counters\\_linear\\_operators: counters for LinearOperator
`init_max_counters_linear_operators(; allevals :: T = 20000, nprod = allevals, ntprod = allevals, nctprod = allevals, sum = 11 * allevals)`
"""
function init_max_counters_linear_operators(;
allevals::T = 20000,
nprod::T = allevals,
ntprod::T = allevals,
nctprod::T = allevals,
sum::T = allevals * 11,
) where {T <: Int}
cntrs =
Dict{Symbol, T}([(:nprod, nprod), (:ntprod, ntprod), (:nctprod, nctprod), (:neval_sum, sum)])
return cntrs
end
"""
LinearSystem: Minimal structure to store linear algebra problems
`LinearSystem(:: Union{AbstractLinearOperator, AbstractMatrix}, :: AbstractVector)`
Note:
Another option is to convert the `LinearSystem` as an `LLSModel`.
"""
mutable struct LinearSystem{
TA <: Union{AbstractLinearOperator, AbstractMatrix},
Tb <: AbstractVector,
}
A::TA
b::Tb
counters::LACounters
function LinearSystem(
A::TA,
b::Tb;
counters::LACounters = LACounters(),
kwargs...,
) where {TA <: Union{AbstractLinearOperator, AbstractMatrix}, Tb <: AbstractVector}
return new{TA, Tb}(A, b, counters)
end
end
function LAStopping(
A::TA,
b::Tb;
x::Tb = zeros(eltype(Tb), size(A, 2)),
kwargs...,
) where {TA <: AbstractLinearOperator, Tb <: AbstractVector}
return LAStopping(A, b, GenericState(x), kwargs...)
end
function LAStopping(
A::TA,
b::Tb,
state::AbstractState;
kwargs...,
) where {TA <: AbstractLinearOperator, Tb <: AbstractVector}
return LAStopping(
LinearSystem(A, b),
state,
max_cntrs = init_max_counters_linear_operators(),
kwargs...,
)
end
"""
\\_resources\\_check!: check if the optimization algorithm has
exhausted the resources. This is the Linear Algebra specialized version.
Note:
- function does _not_ keep track of the evals in the state
- check `:nprod`, `:ntprod`, and `:nctprod` in the `LinearOperator` entries
"""
function _resources_check!(stp::LAStopping, x::T) where {T}
#GenericState has no field evals.
#_smart_update!(stp.current_state, evals = cntrs)
# check all the entries in the counter
# global user limit diagnostic
stp.meta.resources = _counters_loop!(stp.pb.counters, stp.meta.max_cntrs)
return stp.meta.resources
end
function _counters_loop!(cntrs::LACounters{T}, max_cntrs::Dict{Symbol, T}) where {T}
sum, max_f = 0, false
for f in (:nprod, :ntprod, :nctprod)
ff = getfield(cntrs, f)
max_f = max_f || (ff > max_cntrs[f])
sum += ff
end
return max_f || (sum > max_cntrs[:neval_sum])
end
function _counters_loop!(cntrs::NLSCounters, max_cntrs::Dict{Symbol, T}) where {T}
sum, max_f = 0, false
for f in intersect(fieldnames(NLSCounters), keys(max_cntrs))
max_f = f != :counters ? (max_f || (getfield(cntrs, f) > max_cntrs[f])) : max_f
end
for f in intersect(fieldnames(Counters), keys(max_cntrs))
max_f = max_f || (getfield(cntrs.counters, f) > max_cntrs[f])
end
return max_f || (sum > max_cntrs[:neval_sum])
end
"""
linear\\_system\\_check: return ||Ax-b||_p
`linear_system_check(:: Union{LinearSystem, LLSModel}, :: AbstractState; pnorm :: Real = Inf, kwargs...)`
Note:
- Returns the p-norm of state.res
- state.res is filled in if nothing.
"""
function linear_system_check(pb::LinearSystem, state::AbstractState; pnorm::Real = Inf, kwargs...)
pb.counters.nprod += 1
if length(state.res) == 0
update!(state, res = pb.A * state.x - pb.b)
end
return norm(state.res, pnorm)
end
function linear_system_check(pb::LLSModel, state::AbstractState; pnorm::Real = Inf, kwargs...)
if length(state.res) == 0
Axmb = if xtype(state) <: SparseVector
sparse(residual(pb, state.x))
else
residual(pb, state.x)
end
update!(state, res = Axmb)
end
return norm(state.res, pnorm)
end
"""
normal\\_equation\\_check: return ||A'Ax-A'b||_p
`normal_equation_check(:: Union{LinearSystem, LLSModel}, :: AbstractState; pnorm :: Real = Inf, kwargs...)`
Note: pb must have A and b entries
"""
function normal_equation_check(pb::LinearSystem, state::AbstractState; pnorm::Real = Inf, kwargs...)
pb.counters.nprod += 1
pb.counters.ntprod += 1
return norm(pb.A' * (pb.A * state.x) - pb.A' * pb.b, pnorm)
end
function normal_equation_check(pb::LLSModel, state::AbstractState; pnorm::Real = Inf, kwargs...)
nres = jtprod_residual(pb, state.x, residual(pb, state.x))
return norm(nres, pnorm)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 17641 | """
Type: NLPStopping
Methods: `start!`, `stop!`, `update_and_start!`, `update_and_stop!`, `fill_in!`, `reinit!`, `status`,
`KKT`, `unconstrained_check`, `optim_check_bounded`
Specialization of `GenericStopping`. Stopping structure for non-linear optimization models using `NLPModels` ( https://github.com/JuliaSmoothOptimizers/NLPModels.jl ).
Attributes:
- `pb` : An `AbstractNLPModel`.
- `current_state` : The information relative to the problem, see `GenericState` or `NLPAtX`.
- (opt) `meta` : Metadata relative to stopping criteria, see `StoppingMeta`.
- (opt) `main_stp` : Stopping of the main loop in case we consider a Stopping
of a subproblem.
If not a subproblem, then `VoidStopping`.
- (opt) `listofstates` : ListofStates designed to store the history of States.
- (opt) `stopping_user_struct` : Contains any structure designed by the user.
Constructors:
- `NLPStopping(pb::AbstractNLPModel, meta::AbstractStoppingMeta, stop_remote::AbstractStopRemoteControl, state::AbstractState; main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), kwargs...)`
The default constructor.
- `NLPStopping(pb::AbstractNLPModel, meta::AbstractStoppingMeta, state::AbstractState; main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), kwargs...)`
The one passing the `kwargs` to the `stop_remote`.
- `GenericStopping(pb::AbstractNLPModel, state::AbstractState; stop_remote::AbstractStopRemoteControl = StopRemoteControl(), main_stp::AbstractStopping=VoidStopping(), list::AbstractListofStates = VoidListofStates(), user_struct::AbstractDict = Dict(), kwargs...)`
The one passing the `kwargs` to the `meta`.
- `GenericStopping(pb::AbstractNLPModel; n_listofstates=, kwargs...)`
The one setting up a default state `NLPAtX` using `pb.meta.x0`, and initializing the list of states if `n_listofstates>0`. The optimality function is the function `KKT` unless `optimality_check` is in the `kwargs`.
Notes:
- Designed for `NLPAtX` State. Constructor checks that the State has the required entries.
"""
mutable struct NLPStopping{Pb, M, SRC, T, MStp, LoS} <: AbstractStopping{Pb, M, SRC, T, MStp, LoS}
# problem
pb::Pb
# Common parameters
meta::M
stop_remote::SRC
# current state of the problem
current_state::T
# Stopping of the main problem, or nothing
main_stp::MStp
# History of states
listofstates::LoS
# User-specific structure
stopping_user_struct::AbstractDict
end
get_pb(stp::NLPStopping) = stp.pb
get_meta(stp::NLPStopping) = stp.meta
get_remote(stp::NLPStopping) = stp.stop_remote
get_state(stp::NLPStopping) = stp.current_state
get_main_stp(stp::NLPStopping) = stp.main_stp
get_list_of_states(stp::NLPStopping) = stp.listofstates
get_user_struct(stp::NLPStopping) = stp.stopping_user_struct
function NLPStopping(
pb::Pb,
meta::M,
stop_remote::SRC,
current_state::T;
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
n_listofstates::Integer = 0,
user_struct::AbstractDict = Dict(),
kwargs...,
) where {
Pb <: AbstractNLPModel,
M <: AbstractStoppingMeta,
SRC <: AbstractStopRemoteControl,
T <: AbstractState,
}
if n_listofstates > 0
list = ListofStates(n_listofstates, Val{T}())
end
return NLPStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct)
end
function NLPStopping(
pb::Pb,
meta::M,
current_state::T;
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
n_listofstates::Integer = 0,
user_struct::AbstractDict = Dict(),
kwargs...,
) where {Pb <: AbstractNLPModel, M <: AbstractStoppingMeta, T <: AbstractState}
stop_remote = StopRemoteControl(; kwargs...) #main_stp == VoidStopping() ? StopRemoteControl() : cheap_stop_remote_control()
if n_listofstates > 0
list = ListofStates(n_listofstates, Val{T}())
end
return NLPStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct)
end
function NLPStopping(
pb::Pb,
current_state::T;
stop_remote::AbstractStopRemoteControl = StopRemoteControl(),
main_stp::AbstractStopping = VoidStopping(),
list::AbstractListofStates = VoidListofStates(),
n_listofstates::Integer = 0,
user_struct::AbstractDict = Dict(),
kwargs...,
) where {Pb <: AbstractNLPModel, T <: AbstractState}
mcntrs = if :max_cntrs in keys(kwargs)
kwargs[:max_cntrs]
elseif Pb <: AbstractNLSModel
init_max_counters_NLS()
else
init_max_counters()
end
if :optimality_check in keys(kwargs)
oc = kwargs[:optimality_check]
else
oc = KKT
end
if n_listofstates > 0
list = ListofStates(n_listofstates, Val{T}())
end
meta = StoppingMeta(; max_cntrs = mcntrs, optimality_check = oc, kwargs...)
return NLPStopping(pb, meta, stop_remote, current_state, main_stp, list, user_struct)
end
function NLPStopping(pb::AbstractNLPModel; n_listofstates::Integer = 0, kwargs...)
#Create a default NLPAtX
initial_guess = copy(pb.meta.x0)
if get_ncon(pb) > 0
initial_lag = copy(pb.meta.y0)
nlp_at_x = NLPAtX(initial_guess, initial_lag)
else
nlp_at_x = NLPAtX(initial_guess)
end
if n_listofstates > 0 && :list ∉ keys(kwargs)
list = ListofStates(n_listofstates, Val{typeof(nlp_at_x)}())
return NLPStopping(pb, nlp_at_x, list = list, optimality_check = KKT; kwargs...)
end
return NLPStopping(pb, nlp_at_x, optimality_check = KKT; kwargs...)
end
"""
init\\_max\\_counters:
initialize the maximum number of evaluations on each of
the functions present in the NLPModels.Counters, e.g.
`init_max_counters(; allevals :: T = typemax(T), obj = allevals, grad = allevals, cons = allevals, jcon = allevals, jgrad = allevals, jac = allevals, jprod = allevals, jtprod = allevals, hess = allevals, hprod = allevals, jhprod = allevals, sum = 11 * allevals, kwargs...)`
`:neval_sum` is by default limited to `|Counters| * allevals`.
"""
function init_max_counters(; allevals::T = typemax(Int), kwargs...) where {T <: Integer}
entries = [Meta.parse(split("$(f)", '_')[2]) for f in fieldnames(Counters)]
lim_fields = keys(kwargs)
cntrs = Dict{Symbol, T}([
(Meta.parse("neval_$(t)"), t in lim_fields ? kwargs[t] : allevals) for t in entries
])
push!(cntrs, (:neval_sum => :sum in lim_fields ? kwargs[:sum] : typemax(T)))
return cntrs
end
function max_evals!(
stp::NLPStopping{Pb, M, SRC, T, MStp, LoS},
allevals::Integer,
) where {Pb, M, SRC, T, MStp, LoS}
stp.meta.max_cntrs = if Pb <: AbstractNLSModel
init_max_counters_NLS(allevals = allevals)
else
init_max_counters(allevals = allevals)
end
return stp
end
function max_evals!(
stp::NLPStopping{Pb, M, SRC, T, MStp, LoS};
allevals::I = typemax(Int),
kwargs...,
) where {Pb, M, SRC, T, MStp, LoS, I <: Integer}
stp.meta.max_cntrs = if Pb <: AbstractNLSModel
init_max_counters_NLS(allevals = allevals; kwargs...)
else
init_max_counters(allevals = allevals; kwargs...)
end
return stp
end
"""
init\\_max\\_counters\\_NLS:
initialize the maximum number of evaluations on each of
the functions present in the `NLPModels.NLSCounters`, e.g.
`init_max_counters_NLS(; allevals = typemax(T), residual = allevals, jac_residual = allevals, jprod_residual = allevals, jtprod_residual = allevals, hess_residual = allevals, jhess_residual = allevals, hprod_residual = allevals, kwargs...)`
"""
function init_max_counters_NLS(; allevals::T = typemax(Int), kwargs...) where {T <: Integer}
cntrs_nlp = init_max_counters(; allevals = allevals, kwargs...)
entries =
[Meta.parse(split("$(f)", "neval_")[2]) for f in setdiff(fieldnames(NLSCounters), [:counters])]
lim_fields = keys(kwargs)
cntrs = Dict{Symbol, T}([
(Meta.parse("neval_$(t)"), t in lim_fields ? kwargs[t] : allevals) for t in entries
])
return merge(cntrs_nlp, cntrs)
end
"""
fill_in!: (NLPStopping version) a function that fill in the required values in the `NLPAtX`.
`fill_in!( :: NLPStopping, :: Union{AbstractVector, Nothing}; fx :: Union{AbstractVector, Nothing} = nothing, gx :: Union{AbstractVector, Nothing} = nothing, Hx :: Union{MatrixType, Nothing} = nothing, cx :: Union{AbstractVector, Nothing} = nothing, Jx :: Union{MatrixType, Nothing} = nothing, lambda :: Union{AbstractVector, Nothing} = nothing, mu :: Union{AbstractVector, Nothing} = nothing, matrix_info :: Bool = true, kwargs...)`
"""
function fill_in!(
stp::NLPStopping{Pb, M, SRC, NLPAtX{Score, S, T}, MStp, LoS},
x::T;
fx::Union{eltype(T), Nothing} = nothing,
gx::Union{T, Nothing} = nothing,
Hx = nothing,
cx::Union{T, Nothing} = nothing,
Jx = nothing,
lambda::Union{T, Nothing} = nothing,
mu::Union{T, Nothing} = nothing,
matrix_info::Bool = true,
convert::Bool = true,
kwargs...,
) where {
Pb,
M <: AbstractStoppingMeta,
SRC <: AbstractStopRemoteControl,
MStp,
LoS <: AbstractListofStates,
Score,
S,
T <: AbstractVector,
}
gfx = isnothing(fx) ? obj(stp.pb, x) : fx
ggx = isnothing(gx) ? grad(stp.pb, x) : gx
if isnothing(Hx) && matrix_info
gHx = hess(stp.pb, x).data
else
gHx = isnothing(Hx) ? zeros(eltype(T), 0, 0) : Hx
end
if stp.pb.meta.ncon > 0
gJx = if !isnothing(Jx)
Jx
elseif typeof(stp.current_state.Jx) <: LinearOperator
jac_op(stp.pb, x)
else # typeof(stp.current_state.Jx) <: SparseArrays.SparseMatrixCSC
jac(stp.pb, x)
end
gcx = isnothing(cx) ? cons(stp.pb, x) : cx
else
gJx = stp.current_state.Jx
gcx = stp.current_state.cx
end
#update the Lagrange multiplier if one of the 2 is asked
if (stp.pb.meta.ncon > 0 || has_bounds(stp.pb)) && (isnothing(lambda) || isnothing(mu))
lb, lc = _compute_mutliplier(stp.pb, x, ggx, gcx, gJx; kwargs...)
else
lb = if isnothing(mu) & has_bounds(stp.pb)
zeros(eltype(T), get_nvar(stp.pb))
elseif isnothing(mu) & !has_bounds(stp.pb)
zeros(eltype(T), 0)
else
mu
end
lc = isnothing(lambda) ? zeros(eltype(T), get_ncon(stp.pb)) : lambda
end
return update!(
stp,
x = x,
fx = gfx,
gx = ggx,
Hx = gHx,
cx = gcx,
Jx = gJx,
mu = lb,
lambda = lc,
convert = convert,
)
end
function fill_in!(
stp::NLPStopping{Pb, M, SRC, OneDAtX{S, T}, MStp, LoS},
x::T;
fx::Union{T, Nothing} = nothing,
gx::Union{T, Nothing} = nothing,
f₀::Union{T, Nothing} = nothing,
g₀::Union{T, Nothing} = nothing,
convert::Bool = true,
kwargs...,
) where {
Pb,
M <: AbstractStoppingMeta,
SRC <: AbstractStopRemoteControl,
MStp,
LoS <: AbstractListofStates,
S,
T,
}
gfx = isnothing(fx) ? obj(stp.pb, x) : fx
ggx = isnothing(gx) ? grad(stp.pb, x) : gx
gf₀ = isnothing(f₀) ? obj(stp.pb, 0.0) : f₀
gg₀ = isnothing(g₀) ? grad(stp.pb, 0.0) : g₀
return update!(
stp.current_state,
x = x,
fx = gfx,
gx = ggx,
f₀ = gf₀,
g₀ = gg₀,
convert = convert,
)
end
"""
For NLPStopping, `rcounters` set as true also reinitialize the counters.
"""
function reinit!(
stp::NLPStopping;
rstate::Bool = false,
rlist::Bool = true,
rcounters::Bool = false,
kwargs...,
)
stp.meta.start_time = NaN
stp.meta.optimality0 = 1.0
#reinitialize the boolean status
reinit!(stp.meta)
#reinitialize the counter of stop
stp.meta.nb_of_stop = 0
#reinitialize the list of states
if rlist && (typeof(stp.listofstates) != VoidListofStates)
#TODO: Warning we cannot change the type of ListofStates
stp.listofstates = rstate ? VoidListofStates() : ListofStates(stp.current_state)
end
#reinitialize the state
if rstate
reinit!(stp.current_state; kwargs...)
end
#reinitialize the NLPModel Counters
if rcounters && typeof(stp.pb) <: AbstractNLPModel
NLPModels.reset!(stp.pb)
end
return stp
end
"""
`_resources_check!`: check if the optimization algorithm has exhausted the resources.
This is the NLP specialized version that takes into account
the evaluation of the functions following the `sum_counters`
structure from NLPModels.
_resources_check!(::NLPStopping, ::T)
Note:
- function uses counters in `stp.pb`, and update the counters in the state.
- function is compatible with `Counters`, `NLSCounters`, and any type whose entries match the entries of `max_cntrs`.
"""
function _resources_check!(
stp::NLPStopping{Pb, M, SRC, T, MStp, LoS},
x::S,
) where {Pb <: AbstractNLPModel, M, SRC, T, MStp, LoS, S}
max_cntrs = stp.meta.max_cntrs
if length(max_cntrs) == 0
return stp.meta.resources
end
# check all the entries in the counter
max_f = check_entries_counters(stp.pb, max_cntrs)
# Maximum number of function and derivative(s) computation
if :neval_sum in keys(max_cntrs)
max_evals = sum_counters(stp.pb) > max_cntrs[:neval_sum]
end
# global user limit diagnostic
if (max_evals || max_f)
stp.meta.resources = true
end
return stp.meta.resources
end
function check_entries_counters(nlp::AbstractNLPModel, max_cntrs)
for f in keys(max_cntrs)
if f in fieldnames(Counters)
if eval(f)(nlp)::Int > max_cntrs[f]
return true
end
end
end
return false
end
function check_entries_counters(nlp::AbstractNLSModel, max_cntrs)
for f in keys(max_cntrs)
if (f in fieldnames(NLSCounters)) && (f != :counters)
if eval(f)(nlp)::Int > max_cntrs[f]
return true
end
elseif f in fieldnames(Counters)
if eval(f)(nlp)::Int > max_cntrs[f]
return true
end
end
end
return false
end
"""
`_unbounded_problem_check!`: This is the NLP specialized version that takes into account
that the problem might be unbounded if the objective or the
constraint function are unbounded.
`_unbounded_problem_check!(:: NLPStopping, :: AbstractVector)`
Note:
- evaluate the objective function if `state.fx` for NLPAtX or `state.fx` for OneDAtX is `_init_field` and store in `state`.
- if minimize problem (i.e. nlp.meta.minimize is true) check if `state.fx <= - meta.unbounded_threshold`, otherwise check `state.fx ≥ meta.unbounded_threshold`.
"""
function _unbounded_problem_check!(
stp::NLPStopping{Pb, M, SRC, NLPAtX{Score, S, T}, MStp, LoS},
x::AbstractVector,
) where {Pb, M, SRC, MStp, LoS, Score, S, T}
if isnan(get_fx(stp.current_state))
stp.current_state.fx = obj(stp.pb, x)
end
if stp.pb.meta.minimize
f_too_large = get_fx(stp.current_state) <= -stp.meta.unbounded_threshold
else
f_too_large = get_fx(stp.current_state) >= stp.meta.unbounded_threshold
end
if f_too_large
stp.meta.unbounded_pb = true
end
return stp.meta.unbounded_pb
end
function _unbounded_problem_check!(
stp::NLPStopping{Pb, M, SRC, OneDAtX{S, T}, MStp, LoS},
x::Union{AbstractVector, Number},
) where {Pb, M, SRC, MStp, LoS, S, T}
if isnan(get_fx(stp.current_state))
stp.current_state.fx = obj(stp.pb, x)
end
if stp.pb.meta.minimize
f_too_large = get_fx(stp.current_state) <= -stp.meta.unbounded_threshold
else
f_too_large = get_fx(stp.current_state) >= stp.meta.unbounded_threshold
end
return stp.meta.unbounded_pb
end
"""
\\_infeasibility\\_check!: This is the NLP specialized version.
Note:
- check wether the `current_score` contains Inf.
- check the feasibility of an optimization problem in the spirit of a convex
indicator function.
"""
function _infeasibility_check!(stp::NLPStopping, x::T) where {T}
#=
#- evaluate the constraint function if `state.cx` is `nothing` and store in `state`.
#- check the Inf-norm of the violation ≤ stp.meta.atol
if stp.pb.meta.ncon != 0 #if the problems has constraints, check |c(x)|
cx = stp.current_state.cx
if cx == _init_field(typeof(stp.current_state.cx))
cx = cons(stp.pb, x)
end
vio = max.(max.(cx - stp.pb.meta.ucon, 0.), max.(stp.pb.meta.lcon - cx, 0.))
tol = Inf #stp.meta.atol
stp.meta.infeasible = _inequality_check(vio, stp.meta.atol, 0.) ? true : stp.meta.infeasible
end
=#
if stp.pb.meta.minimize
vio = any(z -> z == Inf, stp.current_state.current_score)
if vio
stp.meta.infeasible = true
end
else
vio = any(z -> z == -Inf, stp.current_state.current_score)
if vio
stp.meta.infeasible = true
end
end
return stp.meta.infeasible
end
################################################################################
# Nonlinear problems admissibility functions
# Available: unconstrained_check(...), optim_check_bounded(...), KKT
################################################################################
include("nlp_admissible_functions.jl")
################################################################################
# line search admissibility functions
#
# TODO: change the ls_admissible_functions and use tol_check et tol_check_neg to
# handle the inequality instead of a max.
################################################################################
include("ls_admissible_functions.jl")
#=
"""
"""
function feasibility_optim_check(pb, state; kwargs...)
vio = _feasibility(pb, state)
tol = Inf #stp.meta.atol
return _inequality_check(vio, tol, 0.)
end
=#
################################################################################
# Functions computing Lagrange multipliers of a nonlinear problem
# Available: _compute_mutliplier(...)
################################################################################
include("nlp_compute_multiplier.jl")
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2568 | abstract type AbstractStopRemoteControl end
"""
Turn a boolean to false to cancel this check in the functions stop! and start!.
"""
struct StopRemoteControl <: AbstractStopRemoteControl
unbounded_and_domain_x_check::Bool
domain_check::Bool
optimality_check::Bool
infeasibility_check::Bool
unbounded_problem_check::Bool
tired_check::Bool
resources_check::Bool
stalled_check::Bool
iteration_check::Bool
main_pb_check::Bool
user_check::Bool
user_start_check::Bool
cheap_check::Bool #`stop!` and `start!` stop whenever one check worked
#not used now
end
function StopRemoteControl(;
unbounded_and_domain_x_check::Bool = true, #O(n)
domain_check::Bool = true, #O(n)
optimality_check::Bool = true,
infeasibility_check::Bool = true,
unbounded_problem_check::Bool = true, #O(n)
tired_check::Bool = true,
resources_check::Bool = true,
stalled_check::Bool = true,
iteration_check::Bool = true,
main_pb_check::Bool = true, #O(n)
user_check::Bool = true,
user_start_check::Bool = true,
cheap_check::Bool = false,
)
return StopRemoteControl(
unbounded_and_domain_x_check,
domain_check,
optimality_check,
infeasibility_check,
unbounded_problem_check,
tired_check,
resources_check,
stalled_check,
iteration_check,
main_pb_check,
user_check,
user_start_check,
cheap_check,
)
end
"""
Return a StopRemoteControl with the most expansive checks at false (the O(n))
by default in Stopping when it has a main_stp.
"""
function cheap_stop_remote_control(;
unbounded_and_domain_x_check::Bool = false,
domain_check::Bool = false,
optimality_check::Bool = true,
infeasibility_check::Bool = true,
unbounded_problem_check::Bool = false,
tired_check::Bool = true,
resources_check::Bool = true,
stalled_check::Bool = true,
iteration_check::Bool = true,
main_pb_check::Bool = false,
user_check::Bool = true,
user_start_check::Bool = true,
cheap_check::Bool = true,
)
return StopRemoteControl(
unbounded_and_domain_x_check,
domain_check,
optimality_check,
infeasibility_check,
unbounded_problem_check,
tired_check,
resources_check,
stalled_check,
iteration_check,
main_pb_check,
user_check,
user_start_check,
cheap_check,
)
end
import Base.show
function show(io::IO, src::StopRemoteControl)
varlines = "$(typeof(src)) controls the following checks \n"
for f in fieldnames(typeof(src))
varlines = string(varlines, @sprintf("%28s: %s \n", f, getfield(src, f)))
end
println(io, varlines)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 13439 | """
Type: StoppingMeta
Methods: no methods.
Attributes:
- `atol`: absolute tolerance.
- `rtol`: relative tolerance.
- `optimality0`: optimality score at the initial guess.
- `tol_check`: Function of `atol`, `rtol` and `optimality0` testing a score to zero.
- `tol_check_neg`: Function of `atol`, `rtol` and `optimality0` testing a score to zero.
- `check_pos`: pre-allocation for positive tolerance
- `check_neg`: pre-allocation for negative tolerance
- `recomp_tol`: true if tolerances are updated
- `optimality_check`: a stopping criterion via an admissibility function
- `unbounded_threshold`: threshold for unboundedness of the problem.
- `unbounded_x`: threshold for unboundedness of the iterate.
- `max_f`: maximum number of function (and derivatives) evaluations.
- `max_cntrs`: Dict contains the maximum number of evaluations
- `max_eval`: maximum number of function (and derivatives) evaluations.
- `max_iter`: threshold on the number of stop! call/number of iteration.
- `max_time`: time limit to let the algorithm run.
- `nb_of_stop`: keep track of the number of stop! call/iteration.
- `start_time`: keep track of the time at the beginning.
- `fail_sub_pb`: status.
- `unbounded`: status.
- `unbounded_pb`: status.
- `tired`: status.
- `stalled`: status.
- `iteration_limit`: status.
- `resources`: status.
- `optimal`: status.
- `infeasible`: status.
- `main_pb`: status.
- `domainerror`: status.
- `suboptimal`: status.
- `stopbyuser`: status.
- `exception`: status.
- `meta_user_struct`: Any
- `user_check_func!`: Function (AbstractStopping, Bool) -> callback.
`StoppingMeta(;atol :: Number = 1.0e-6, rtol :: Number = 1.0e-15, optimality0 :: Number = 1.0, tol_check :: Function = (atol,rtol,opt0) -> max(atol,rtol*opt0), tol_check_neg :: Function = (atol,rtol,opt0) -> -max(atol,rtol*opt0), unbounded_threshold :: Number = 1.0e50, unbounded_x :: Number = 1.0e50, max_f :: Int = typemax(Int), max_eval :: Int = 20000, max_iter :: Int = 5000, max_time :: Number = 300.0, start_time :: Float64 = NaN, meta_user_struct :: Any = nothing, kwargs...)`
an alternative with constant tolerances:
`StoppingMeta(tol_check :: T, tol_check_neg :: T;atol :: Number = 1.0e-6, rtol :: Number = 1.0e-15, optimality0 :: Number = 1.0, unbounded_threshold :: Number = 1.0e50, unbounded_x :: Number = 1.0e50, max_f :: Int = typemax(Int), max_eval :: Int = 20000, max_iter :: Int = 5000, max_time :: Number = 300.0, start_time :: Float64 = NaN, meta_user_struct :: Any = nothing, kwargs...)`
Note:
- It is a mutable struct, therefore we can modify elements of a `StoppingMeta`.
- The `nb_of_stop` is incremented everytime `stop!` or `update_and_stop!` is called
- The `optimality0` is modified once at the beginning of the algorithm (`start!`)
- The `start_time` is modified once at the beginning of the algorithm (`start!`)
if not precised before.
- The different status: `fail_sub_pb`, `unbounded`, `unbounded_pb`, `tired`, `stalled`,
`iteration_limit`, `resources`, `optimal`, `main_pb`, `domainerror`, `suboptimal`, `infeasible`
- `fail_sub_pb`, `suboptimal`, and `infeasible` are modified by the algorithm.
- `optimality_check` takes two inputs (`AbstractNLPModel`, `NLPAtX`)
and returns a `Number` or an `AbstractVector` to be compared to `0`.
- `optimality_check` does not necessarily fill in the State.
Examples: `StoppingMeta()`, `StoppingMeta(1., -1.)`
"""
mutable struct StoppingMeta{
TolType <: Number,
CheckType, #Type of the tol_check output
MUS, #Meta User Struct
Ftol <: Union{Function, CheckType},
Ftolneg <: Union{Function, CheckType},
Opt <: Function,
} <: AbstractStoppingMeta
# problem tolerances
atol::TolType # absolute tolerance
rtol::TolType # relative tolerance
optimality0::TolType # value of the optimality residual at starting point
tol_check::Ftol #function of atol, rtol and optimality0
#by default: tol_check = max(atol, rtol * optimality0)
#other example: atol + rtol * optimality0
tol_check_neg::Ftolneg # function of atol, rtol and optimality0
check_pos::CheckType #pre-allocation for positive tolerance
check_neg::CheckType #pre-allocation for negative tolerance
optimality_check::Opt # stopping criterion
# Function of (pb, state; kwargs...)
#return type :: Union{Number, eltype(stp.meta)}
recomp_tol::Bool #true if tolerances are updated
unbounded_threshold::TolType # beyond this value, the problem is declared unbounded
unbounded_x::TolType # beyond this value, ||x||_\infty is unbounded
# fine grain control on ressources
max_f::Int # max function evaluations allowed TODO: used?
max_cntrs::Dict{Symbol, Int} #contains the detailed max number of evaluations
# global control on ressources
max_eval::Int # max evaluations (f+g+H+Hv) allowed TODO: used?
max_iter::Int # max iterations allowed
max_time::Float64 # max elapsed time allowed
#intern Counters
nb_of_stop::Int
#intern start_time
start_time::Float64
# stopping properties status of the problem)
fail_sub_pb::Bool
unbounded::Bool
unbounded_pb::Bool
tired::Bool
stalled::Bool
iteration_limit::Bool
resources::Bool
optimal::Bool
infeasible::Bool
main_pb::Bool
domainerror::Bool
suboptimal::Bool
stopbyuser::Bool
exception::Bool
meta_user_struct::MUS
user_check_func!::Function
end
function StoppingMeta(
tol_check::CheckType,
tol_check_neg::CheckType;
atol::Number = 1.0e-6,
rtol::Number = 1.0e-15,
optimality0::Number = 1.0,
optimality_check::Function = (a, b) -> 1.0,
recomp_tol::Bool = false,
unbounded_threshold::Number = 1.0e50, #typemax(Float64)
unbounded_x::Number = 1.0e50,
max_f::Int = typemax(Int),
max_cntrs::Dict{Symbol, Int} = Dict{Symbol, Int}(),
max_eval::Int = 20000,
max_iter::Int = 5000,
max_time::Float64 = 300.0,
start_time::Float64 = NaN,
meta_user_struct::Any = nothing,
user_check_func!::Function = (stp::AbstractStopping, start::Bool) -> nothing,
kwargs...,
) where {CheckType}
T = typeof(atol)
check_pos = tol_check
check_neg = tol_check_neg
# This might be an expansive step.
# if (true in (check_pos .< check_neg)) #any(x -> x, check_pos .< check_neg)
# throw(ErrorException("StoppingMeta: tol_check should be greater than tol_check_neg."))
# end
fail_sub_pb = false
unbounded = false
unbounded_pb = false
tired = false
stalled = false
iteration_limit = false
resources = false
optimal = false
infeasible = false
main_pb = false
domainerror = false
suboptimal = false
stopbyuser = false
exception = false
nb_of_stop = 0
#new{TolType, typeof(check_pos), typeof(meta_user_struct)}
return StoppingMeta(
atol,
T(rtol),
T(optimality0),
tol_check,
tol_check_neg,
check_pos,
check_neg,
optimality_check,
recomp_tol,
T(unbounded_threshold),
T(unbounded_x),
max_f,
max_cntrs,
max_eval,
max_iter,
max_time,
nb_of_stop,
start_time,
fail_sub_pb,
unbounded,
unbounded_pb,
tired,
stalled,
iteration_limit,
resources,
optimal,
infeasible,
main_pb,
domainerror,
suboptimal,
stopbyuser,
exception,
meta_user_struct,
user_check_func!,
)
end
function StoppingMeta(;
atol::Number = 1.0e-6,
rtol::Number = 1.0e-15,
optimality0::Number = 1.0,
tol_check::Function = (atol::Number, rtol::Number, opt0::Number) -> max(atol, rtol * opt0),
tol_check_neg::Function = (atol::Number, rtol::Number, opt0::Number) ->
-tol_check(atol, rtol, opt0),
optimality_check::Function = (a, b) -> 1.0,
recomp_tol::Bool = true,
unbounded_threshold::Number = 1.0e50, #typemax(Float64)
unbounded_x::Number = 1.0e50,
max_f::Int = typemax(Int),
max_cntrs::Dict{Symbol, Int} = Dict{Symbol, Int}(),
max_eval::Int = 20000,
max_iter::Int = 5000,
max_time::Float64 = 300.0,
start_time::Float64 = NaN,
meta_user_struct::Any = nothing,
user_check_func!::Function = (stp::AbstractStopping, start::Bool) -> nothing,
kwargs...,
)
T = typeof(atol)
rtol = T(rtol)
optimality0 = T(optimality0)
check_pos = tol_check(atol, rtol, optimality0)
check_neg = tol_check_neg(atol, rtol, optimality0)
# This might be an expansive step.
# if (true in (check_pos .< check_neg)) #any(x -> x, check_pos .< check_neg)
# throw(ErrorException("StoppingMeta: tol_check should be greater than tol_check_neg."))
# end
fail_sub_pb = false
unbounded = false
unbounded_pb = false
tired = false
stalled = false
iteration_limit = false
resources = false
optimal = false
infeasible = false
main_pb = false
domainerror = false
suboptimal = false
stopbyuser = false
exception = false
nb_of_stop = 0
#new{TolType, typeof(check_pos), typeof(meta_user_struct)}
return StoppingMeta(
atol,
rtol,
optimality0,
tol_check,
tol_check_neg,
check_pos,
check_neg,
optimality_check,
recomp_tol,
T(unbounded_threshold),
T(unbounded_x),
max_f,
max_cntrs,
max_eval,
max_iter,
max_time,
nb_of_stop,
start_time,
fail_sub_pb,
unbounded,
unbounded_pb,
tired,
stalled,
iteration_limit,
resources,
optimal,
infeasible,
main_pb,
domainerror,
suboptimal,
stopbyuser,
exception,
meta_user_struct,
user_check_func!,
)
end
const meta_statuses = [
:fail_sub_pb,
:unbounded,
:unbounded_pb,
:tired,
:stalled,
:iteration_limit,
:resources,
:optimal,
:suboptimal,
:main_pb,
:domainerror,
:infeasible,
:stopbyuser,
:exception,
]
"""
`OK_check(meta :: StoppingMeta)`
Return true if one of the decision boolean is true.
"""
function OK_check(
meta::StoppingMeta{TolType, CheckType, MUS, Ftol, Ftolneg, Opt},
) where {TolType, CheckType, MUS, Ftol, Ftolneg, Opt}
#13 checks
OK =
meta.optimal ||
meta.tired ||
meta.iteration_limit ||
meta.resources ||
meta.unbounded ||
meta.unbounded_pb ||
meta.main_pb ||
meta.domainerror ||
meta.suboptimal ||
meta.fail_sub_pb ||
meta.stalled ||
meta.infeasible ||
meta.stopbyuser
return OK
end
"""
`tol_check(meta :: StoppingMeta)`
Return the pair of tolerances, recomputed if `meta.recomp_tol` is `true`.
"""
function tol_check(
meta::StoppingMeta{TolType, CheckType, MUS, Ftol, Ftolneg, Opt},
) where {TolType, CheckType, MUS, Ftol, Ftolneg, Opt}
if meta.recomp_tol
atol, rtol, opt0 = meta.atol, meta.rtol, meta.optimality0
setfield!(meta, :check_pos, meta.tol_check(atol, rtol, opt0))
setfield!(meta, :check_neg, meta.tol_check_neg(atol, rtol, opt0))
end
return (meta.check_pos, meta.check_neg)
end
"""
`update_tol!(meta :: StoppingMeta; atol = meta.atol, rtol = meta.rtol, optimality0 = meta.optimality0)`
Update the tolerances parameters. Set `meta.recomp_tol` as `true`.
"""
function update_tol!(
meta::StoppingMeta{TolType, CheckType, MUS, Ftol, Ftolneg, Opt};
atol::TolType = meta.atol,
rtol::TolType = meta.rtol,
optimality0::TolType = meta.optimality0,
) where {TolType, CheckType, MUS, Ftol, Ftolneg, Opt}
setfield!(meta, :recomp_tol, true)
setfield!(meta, :atol, atol)
setfield!(meta, :rtol, rtol)
setfield!(meta, :optimality0, optimality0)
return meta
end
function reinit!(
meta::StoppingMeta{TolType, CheckType, MUS, Ftol, Ftolneg, Opt},
) where {TolType, CheckType, MUS, Ftol, Ftolneg, Opt}
for k in meta_statuses
setfield!(meta, k, false)
end
return meta
end
function checktype(
meta::StoppingMeta{TolType, CheckType, MUS, Ftol, Ftolneg, Opt},
) where {TolType, CheckType, MUS, Ftol, Ftolneg, Opt}
return CheckType
end
function toltype(
meta::StoppingMeta{TolType, CheckType, MUS, Ftol, Ftolneg, Opt},
) where {TolType, CheckType, MUS, Ftol, Ftolneg, Opt}
return TolType
end
function metausertype(
meta::StoppingMeta{TolType, CheckType, MUS, Ftol, Ftolneg, Opt},
) where {TolType, CheckType, MUS, Ftol, Ftolneg, Opt}
return MUS
end
import Base.show
function show(io::IO, meta::AbstractStoppingMeta)
varlines = "$(typeof(meta)) has"
if OK_check(meta)
ntrue = 0
for f in meta_statuses
if getfield(meta, f) && ntrue == 0
ntrue += 1
varlines = string(varlines, @sprintf(" %s", f))
elseif getfield(meta, f)
ntrue += 1
varlines = string(varlines, @sprintf(",\n %s", f))
end
end
varlines =
ntrue == 1 ? string(varlines, " as only true status.\n") :
string(varlines, " as true statuses.\n")
else
varlines = string(varlines, " now no true statuses.\n")
end
varlines = string(varlines, "The return type of tol check functions is $(checktype(meta))")
if meta.recomp_tol
varlines = string(varlines, ", and these functions are reevaluated at each stop!.\n")
else
varlines = string(varlines, ".\n")
end
varlines = string(varlines, "Current tolerances are: \n")
for k in [
:atol,
:rtol,
:optimality0,
:unbounded_threshold,
:unbounded_x,
:max_f,
:max_eval,
:max_iter,
:max_time,
]
varlines = string(
varlines,
@sprintf("%19s: %s (%s) \n", k, getfield(meta, k), typeof(getfield(meta, k)))
)
end
if metausertype(meta) != Nothing
varlines =
string(varlines, "The user defined structure in the meta is a $(metausertype(meta)).\n")
else
varlines = string(varlines, "There is no user defined structure in the meta.\n")
end
println(io, varlines)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4128 | export armijo, wolfe, armijo_wolfe, shamanskii_stop, goldstein
"""
`armijo(h::Any, h_at_t::OneDAtX{S, T}; τ₀::T = T(0.01), kwargs...) where {S, T}`
Check if a step size is admissible according to the Armijo criterion.
Armijo criterion: `f(x + θd) - f(x) - τ₀ θ ∇f(x+θd)d < 0`
This function returns the maximum between the left-hand side and 0.
Note: `fx`, `f₀` and `g₀` are required in the `OneDAtX`.
See also `wolfe`, `armijo_wolfe`, `shamanskii_stop`, `goldstein`
"""
function armijo(h::Any, h_at_t::OneDAtX{S, T}; τ₀::T = T(0.01), kwargs...) where {S, T}
if isnan(h_at_t.fx) || isnan(h_at_t.f₀) || isnan(h_at_t.g₀)
return throw(error("fx, f₀ and g₀ are mandatory in the state."))
else
fact = -T(0.8)
Eps = T(1e-10)
hgoal = h_at_t.fx - h_at_t.f₀ - h_at_t.g₀ * h_at_t.x * τ₀
# Armijo = (h_at_t.fx <= hgoal)# || ((h_at_t.fx <= h_at_t.f₀ + Eps * abs(h_at_t.f₀)) & (h_at_t.gx <= fact * h_at_t.g₀))
# positive = h_at_t.x > 0.0 # positive step
return max(hgoal, zero(T))
end
end
"""
`wolfe(h::Any, h_at_t::OneDAtX{S, T}; τ₁::T = T(0.99), kwargs...) where {S, T}`
Check if a step size is admissible according to the Wolfe criterion.
Strong Wolfe criterion: `|∇f(x+θd)| - τ₁||∇f(x)|| < 0`.
This function returns the maximum between the left-hand side and 0.
Note: `gx` and `g₀` are required in the `OneDAtX`.
See also `armijo`, `armijo_wolfe`, `shamanskii_stop`, `goldstein`
"""
function wolfe(h::Any, h_at_t::OneDAtX{S, T}; τ₁::T = T(0.99), kwargs...) where {S, T}
if isnan(h_at_t.g₀) || isnan(h_at_t.gx)
return throw(error("gx and g₀ are mandatory in the state."))
else
wolfe = abs(h_at_t.gx) - τ₁ * abs(h_at_t.g₀)
#positive = h_at_t.x > 0.0 # positive step
return max(wolfe, zero(T))
end
end
"""
`armijo_wolfe(h::Any, h_at_t::OneDAtX{S, T}; τ₀::T = T(0.01), τ₁::T = T(0.99), kwargs...) where {S, T}`
Check if a step size is admissible according to the Armijo and Wolfe criteria.
Note: `fx`, `f₀`, `gx` and `g₀` are required in the `OneDAtX`.
See also `armijo`, `wolfe`, `shamanskii_stop`, `goldstein`
"""
function armijo_wolfe(
h::Any,
h_at_t::OneDAtX{S, T};
τ₀::T = T(0.01),
τ₁::T = T(0.99),
kwargs...,
) where {S, T}
if isnan(h_at_t.fx) || isnan(h_at_t.gx) || isnan(h_at_t.f₀) || isnan(h_at_t.g₀)
return throw(error("fx, f₀, gx and g₀ are mandatory."))
else
wolfe = abs(h_at_t.gx) - τ₁ * abs(h_at_t.g₀)
armijo = h_at_t.fx - h_at_t.f₀ - h_at_t.g₀ * h_at_t.x * τ₀
return max(armijo, wolfe, zero(T))
end
end
"""
`shamanskii_stop(h :: Any, h_at_t :: OneDAtX; γ :: Float64 = 1.0e-09, kwargs...)`
Check if a step size is admissible according to the "Shamanskii" criteria.
This criteria was proposed in:
> Lampariello, F., & Sciandrone, M. (2001).
> Global convergence technique for the Newton method with periodic Hessian evaluation.
> Journal of optimization theory and applications, 111(2), 341-358.
Note:
- `h.d` accessible (specific `LineModel`).
- `fx`, `f₀` are required in the `OneDAtX`.
See also `armijo`, `wolfe`, `armijo_wolfe`, `goldstein`
"""
function shamanskii_stop(h::Any, h_at_t::OneDAtX{S, T}; γ::T = T(1.0e-09), kwargs...) where {S, T}
admissible = h_at_t.fx - h_at_t.f₀ - γ * (h_at_t.x)^3 * norm(h.d)^3
return max(admissible, zero(T))
end
"""
`goldstein(h::Any, h_at_t::OneDAtX{S, T}; τ₀::T = T(0.0001), τ₁::T = T(0.9999), kwargs...) where {S, T}`
Check if a step size is admissible according to the Goldstein criteria.
Note: `fx`, `f₀` and `g₀` are required in the `OneDAtX`.
See also `armijo`, `wolfe`, `armijo_wolfe`, `shamanskii_stop`
"""
function goldstein(
h::Any,
h_at_t::OneDAtX{S, T};
τ₀::T = T(0.0001),
τ₁::T = T(0.9999),
kwargs...,
) where {S, T}
if isnan(h_at_t.fx) || isnan(h_at_t.gx) || isnan(h_at_t.f₀) || isnan(h_at_t.g₀)
return throw(error("fx, f₀, gx and g₀ are mandatory."))
else
goldstein = max(
h_at_t.f₀ + h_at_t.x * (1 - τ₀) * h_at_t.g₀ - h_at_t.fx,
h_at_t.fx - (h_at_t.f₀ + h_at_t.x * τ₀ * h_at_t.g₀),
)
# positive = h_at_t.x > 0.0 # positive step
return max(goldstein, zero(T)) #&& positive
end
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4182 | import NLPModels: grad, cons, jac
"""
`unconstrained_check( :: AbstractNLPModel, :: NLPAtX; pnorm :: Real = Inf, kwargs...)`
Return the `pnorm`-norm of the gradient of the objective function.
Require `state.gx` (filled if not provided).
See also `optim_check_bounded`, `KKT`
"""
function unconstrained_check(
pb::AbstractNLPModel,
state::NLPAtX{S, T};
pnorm::eltype(T) = eltype(T)(Inf),
kwargs...,
) where {S, T}
if length(state.gx) == 0 # should be filled if empty
update!(state, gx = grad(pb, state.x))
end
return norm(state.gx, pnorm)
end
"""
`optim_check_bounded( :: AbstractNLPModel, :: NLPAtX; pnorm :: Real = Inf, kwargs...)`
Check the `pnorm`-norm of the gradient of the objective function projected over the bounds.
Require `state.gx` (filled if not provided).
See also `unconstrained_check`, `KKT`
"""
function optim_check_bounded(
pb::AbstractNLPModel,
state::NLPAtX{S, T};
pnorm::eltype(T) = eltype(T)(Inf),
kwargs...,
) where {S, T}
if length(state.gx) == 0 # should be filled if void
update!(state, gx = grad(pb, state.x))
end
proj = max.(min.(state.x - state.gx, pb.meta.uvar), pb.meta.lvar)
gradproj = state.x - proj
return norm(gradproj, pnorm)
end
"""
constrained: return the violation of the KKT conditions
length(lambda) > 0
"""
function _grad_lagrangian(pb::AbstractNLPModel, state::NLPAtX{S, T}) where {S, T}
if (pb.meta.ncon == 0) & !has_bounds(pb)
return state.gx
elseif pb.meta.ncon == 0
return state.gx + state.mu
elseif !has_bounds(pb)
return state.gx + state.Jx' * state.lambda
else
return state.gx + state.mu + state.Jx' * state.lambda
end
end
function _sign_multipliers_bounds(pb::AbstractNLPModel, state::NLPAtX{S, T}) where {S, T}
if has_bounds(pb)
return vcat(
min.(max.(state.mu, zero(eltype(T))), -state.x + pb.meta.uvar),
min.(max.(-state.mu, zero(eltype(T))), state.x - pb.meta.lvar),
)
else
return zeros(eltype(T), 0)
end
end
function _sign_multipliers_nonlin(pb::AbstractNLPModel, state::NLPAtX{S, T}) where {S, T}
if pb.meta.ncon == 0
return zeros(eltype(T), 0)
else
return vcat(
min.(max.(state.lambda, zero(eltype(T))), -state.cx + pb.meta.ucon),
min.(max.(-state.lambda, zero(eltype(T))), state.cx - pb.meta.lcon),
)
end
end
function _feasibility(pb::AbstractNLPModel, state::NLPAtX{S, T}) where {S, T}
if pb.meta.ncon == 0
return vcat(
max.(state.x - pb.meta.uvar, zero(eltype(T))),
max.(-state.x + pb.meta.lvar, zero(eltype(T))),
)
else
return vcat(
max.(state.cx - pb.meta.ucon, zero(eltype(T))),
max.(-state.cx + pb.meta.lcon, zero(eltype(T))),
max.(state.x - pb.meta.uvar, zero(eltype(T))),
max.(-state.x + pb.meta.lvar, zero(eltype(T))),
)
end
end
"""
`KKT( :: AbstractNLPModel, :: NLPAtX; pnorm :: Real = Inf, kwargs...)`
Check the KKT conditions.
Note: `state.gx` is mandatory + if bounds `state.mu` + if constraints `state.cx`, `state.Jx`, `state.lambda`.
See also `unconstrained_check`, `optim_check_bounded`
"""
function KKT(
pb::AbstractNLPModel,
state::NLPAtX{S, T};
pnorm::eltype(T) = eltype(T)(Inf),
kwargs...,
) where {S, T}
if unconstrained(pb) && length(state.gx) == 0
@warn "KKT needs stp.current_state.gx to be filled-in."
return eltype(T)(Inf)
elseif has_bounds(pb) && length(state.mu) == 0
@warn "KKT needs stp.current_state.mu to be filled-in."
return eltype(T)(Inf)
elseif get_ncon(pb) > 0 &&
(length(state.cx) == 0 || size(state.Jx) == (0, 0) || length(state.lambda) == 0)
@warn "KKT needs stp.current_state.cx, stp.current_state.Jx and stp.current_state.lambda to be filled-in."
return eltype(T)(Inf)
end
#Check the gradient of the Lagrangian
gLagx = _grad_lagrangian(pb, state)
#Check the complementarity condition for the bounds
dual_res_bounds = _sign_multipliers_bounds(pb, state)
#Check the complementarity condition for the constraints
res_nonlin = _sign_multipliers_nonlin(pb, state)
#Check the feasibility
feas = _feasibility(pb, state)
res = vcat(gLagx, feas, dual_res_bounds, res_nonlin)
return norm(res, pnorm)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1084 | """
_compute_mutliplier: Additional function to estimate Lagrange multiplier of the problems
(guarantee if LICQ holds)
`_compute_mutliplier(pb :: AbstractNLPModel, x :: T, gx :: T, cx :: T, Jx :: MT; active_prec_c :: Real = 1e-6, active_prec_b :: Real = 1e-6)`
"""
function _compute_mutliplier(
pb::AbstractNLPModel,
x::T,
gx::T,
cx::T,
Jx::MT;
active_prec_c::Real = 1e-6,
active_prec_b::Real = 1e-6,
) where {MT, T}
n = length(x)
nc = length(cx)
#active res_bounds
Ib = findall(x -> (norm(x) <= active_prec_b), min(abs.(x - pb.meta.lvar), abs.(x - pb.meta.uvar)))
if nc != 0
#active constraints
Ic = findall(
x -> (norm(x) <= active_prec_c),
min(abs.(cx - pb.meta.ucon), abs.(cx - pb.meta.lcon)),
)
Jc = hcat(Matrix(1.0I, n, n)[:, Ib], Jx'[:, Ic])
else
Ic = []
Jc = hcat(Matrix(1.0I, n, n)[:, Ib])
end
mu, lambda = zeros(eltype(T), n), zeros(eltype(T), nc)
if (Ib != []) || (Ic != [])
l = Jc \ (-gx)
mu[Ib], lambda[Ic] = l[1:length(Ib)], l[(length(Ib) + 1):length(l)]
end
return mu, lambda
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1244 | using Test
using ADNLPModels, DataFrames, LinearAlgebra, LLSModels, NLPModels, Printf, SparseArrays
using NLPModelsModifiers
using Stopping
using Stopping: _init_field
using SolverTools: LineModel
#"State tests...\n"
include("test-state/unit-test-GenericStatemod.jl")
include("test-state/unit-test-OneDAtXmod.jl")
include("test-state/unit-test-NLPAtXmod.jl")
include("test-state/unit-test-ListOfStates.jl")
include("test-stopping/unit-test-voidstopping.jl")
include("test-stopping/test-users-struct-function.jl")
include("test-stopping/unit-test-stopping-meta.jl")
include("test-stopping/unit-test-remote-control.jl")
#"Stopping tests...\n"
include("test-stopping/test-unitaire-generic-stopping.jl")
include("test-stopping/test-unitaire-ls-stopping.jl")
include("test-stopping/unit-test-line-model.jl")
include("test-stopping/test-unitaire-nlp-stopping.jl")
include("test-stopping/test-unitaire-nlp-evals.jl") #not in an environment
include("test-stopping/test-unitaire-nlp-stopping_2.jl")
include("test-stopping/strong-epsilon-check.jl")
include("test-stopping/test-unitaire-linearalgebrastopping.jl")
#"HowTo tests..."
include("examples/runhowto.jl")
#printstyled("Run OptimSolver tests...\n")
#include("examples/run-optimsolver.jl")
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 807 | @testset "Test @instate" begin
function algo(stp::AbstractStopping, n::Int)
OK = start!(stp)
x = stp.current_state.x
while !OK
x = x .+ 1
OK = update_and_stop!(stp, x = x)
end
return stp
end
function instatealgo(stp::AbstractStopping, n::Int)
state = stp.current_state
x = state.x
OK = start!(stp)
@instate state while !OK
x = x .+ 1
OK = stop!(stp)
end
return stp
end
n = 10
stp1 = GenericStopping(x -> 0.0, zeros(n), max_iter = n, rtol = 0.0)
stp2 = GenericStopping(x -> 0.0, zeros(n), max_iter = n, rtol = 0.0)
stp1 = algo(stp1, n)
stp2 = algo(stp2, n)
@test stp1.current_state.x == stp2.current_state.x
#=
Suggestions:
- It would be better to say stp.current_state instead of state.
=#
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 3952 | ##############################################################################
#
# In this test problem we consider an active-set method.
#
# Note that there is no optimization of the evaluations here.
#
# Note the use of a structure for the algorithmic parameters which is
# forwarded to all the 3 steps. If a parameter is not mentioned, then the default
# entry in the algorithm will be taken.
#
#############################################################################
#include("penalty.jl")
##############################################################################
#
# First, we create a subtype of AbstractNLPModel to represent the unconstrained
# subproblem we "solve" at each iteration of the activeset.
#
import NLPModels: obj, grad, hess, hprod
mutable struct ActifNLP <: AbstractNLPModel
nlp::AbstractNLPModel
x0::Vector #reference vector
I::Vector #set of active indices
Ic::Vector #set of inactive indices
meta::AbstractNLPModelMeta
counters::Counters
end
function obj(anlp::ActifNLP, x::Vector)
t = anlp.x0
t[anlp.Ic] = x
return obj(anlp.nlp, t)
end
function grad(anlp::ActifNLP, x::Vector)
t = anlp.x0
t[anlp.Ic] = x
return grad(anlp.nlp, t)[anlp.Ic]
end
function hess(anlp::ActifNLP, x::Vector)
t = anlp.x0
t[anlp.Ic] = x
return hess(anlp.nlp, t)[anlp.Ic, anlp.Ic]
end
function hprod(anlp::ActifNLP, x::Vector, v::Vector, y::Vector)
return hess(anlp, x) * v
end
##############################################################################
#
# Active-set algorithm for bound constraints optimization
# fill_in! used instead of update! (works but usually more costly in evaluations)
# subproblems are solved via Newton method
#
#############################################################################
function activeset(
stp::NLPStopping;
active::Float64 = stp.meta.tol_check(stp.meta.atol, stp.meta.rtol, stp.meta.optimality0),
prms = nothing,
)
xt = stp.current_state.x
n = length(xt)
all = findall(xt .== xt)
if maximum(vcat(max.(xt - stp.pb.meta.uvar, 0.0), max.(-xt + stp.pb.meta.lvar, 0.0))) > 0.0
#OK = true; stp.meta.fail_sub_pb = true
#xt is not feasible
xt = max.(min.(stp.current_state.x, stp.pb.meta.uvar), stp.pb.meta.lvar)
end
fill_in!(stp, xt)
OK = start!(stp)
Il = findall(abs.(-xt + stp.pb.meta.lvar) .<= active)
Iu = findall(abs.(xt - stp.pb.meta.uvar) .<= active)
I = union(Il, Iu)
Ic = setdiff(all, I)
nI = max(0, length(xt) - length(Il) - length(Iu)) #lvar_i != uvar_i
@show xt, I
while !OK
#prepare the subproblem stopping:
subpb = ActifNLP(nlp, xt, I, Ic, NLPModelMeta(nI), Counters())
#the subproblem stops if he solved the unconstrained nlp or iterate is infeasible
feas(x, y) =
maximum(vcat(max.(y.x - stp.pb.meta.uvar[Ic], 0.0), max.(-y.x + stp.pb.meta.lvar[Ic], 0.0)))
check_func(x, y) = feas(x, y) > 0.0 ? 0.0 : unconstrained_check(x, y)
substp = NLPStopping(subpb, NLPAtX(xt[Ic]), main_stp = stp, optimality_check = check_func)
#we solve the unconstrained subproblem:
global_newton(substp, prms)
@show status(substp, list = true)
if feas(substp.pb, substp.current_state) > 0.0 #new iterate is infeasible
#then we need to project
xt[Ic] = max.(min.(substp.current_state.x, stp.pb.meta.uvar[Ic]), stp.pb.meta.lvar[Ic])
#we keep track of the new active indices
Inew = setdiff(
union(
findall(abs.(-xt + stp.pb.meta.lvar) .<= active),
findall(abs.(x0 - stp.pb.meta.uvar) .<= active),
),
I,
)
else
Inew = []
end
fill_in!(stp, xt) #the lazy update
OK = update_and_stop!(stp, evals = stp.pb.counters)
if !OK #we use a relaxation rule based on an approx. of Lagrange multipliers
Irmv = findall(stp.current_state.mu .< 0.0)
I = union(setdiff(I, Irmv), Inew)
Ic = setdiff(all, I)
end
@show xt, I
end #end of main loop
return stp
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4231 | ##############################################################################
#
# In this test problem, we consider a backtracking algorithm for 1D optimization.
# The scenario considers three different stopping criterion to solve a specific
# problem.
#
# This example illustrates how to use a "structure" to handle the algorithmic
# parameters and unify the input. The function
# backtracking_ls(stp :: LS_Stopping, prms)
# serves as a buffer for the real algorithm in the function
# backtracking_ls(stp :: LS_Stopping; back_update :: Float64 = 0.5, prms = nothing)
#
# It also shows that obsolete information in the State (after an update of x)
# must be removed by the algorithm. Otherwise, the optimality_check function
# cannot make the difference between valid and invalid entries.
#############################################################################
##############################################################################
#
# We create a basic structure to handle 1D optimization.
#
# We can also use the LineModel available in
# https://github.com/JuliaSmoothOptimizers/SolverTools.jl
mutable struct onedoptim
f::Function
g::Function
end
#############################################################################
#
# We specialize three optimality_check functions for 1D optimization to the
# onedoptim type of problem.
#
# The default functions do not fill in automatically the necessary entries.
#
import Stopping: armijo, wolfe, armijo_wolfe
function armijo(h::onedoptim, h_at_t::LSAtT; τ₀::Float64 = 0.01, kwargs...)
h_at_t.ht = isnan(h_at_t.ht) ? h.f(h_at_t.x) : h_at_t.ht
h_at_t.h₀ = isnan(h_at_t.h₀) ? h.f(0) : h_at_t.h₀
h_at_t.g₀ = isnan(h_at_t.g₀) ? h.g(0) : h_at_t.g₀
hgoal = h_at_t.ht - h_at_t.h₀ - h_at_t.g₀ * h_at_t.x * τ₀
return max(hgoal, 0.0)
end
function wolfe(h::onedoptim, h_at_t::LSAtT; τ₁::Float64 = 0.99, kwargs...)
h_at_t.gt = isnan(h_at_t.gt) ? h.g(h_at_t.x) : h_at_t.gt
h_at_t.g₀ = isnan(h_at_t.g₀) ? h.g(0) : h_at_t.g₀
wolfe = τ₁ .* h_at_t.g₀ - abs(h_at_t.gt)
return max(wolfe, 0.0)
end
function armijo_wolfe(
h::onedoptim,
h_at_t::LSAtT;
τ₀::Float64 = 0.01,
τ₁::Float64 = 0.99,
kwargs...,
)
h_at_t.ht = isnan(h_at_t.ht) ? h.f(h_at_t.x) : h_at_t.ht
h_at_t.h₀ = isnan(h_at_t.h₀) ? h.f(0) : h_at_t.h₀
h_at_t.gt = isnan(h_at_t.gt) ? h.g(h_at_t.x) : h_at_t.gt
h_at_t.g₀ = isnan(h_at_t.g₀) ? h.g(0) : h_at_t.g₀
return max(armijo(h, h_at_t, τ₀ = τ₀), wolfe(h, h_at_t, τ₁ = τ₁), 0.0)
end
##############################################################################
#
# backtracking LineSearch
# !! The problem (stp.pb) is the 1d objective function
#
# Requirement: g0 and h0 have been filled in the State.
#
#############################################################################
function backtracking_ls(stp::LS_Stopping; back_update::Float64 = 0.5, prms = nothing)
state = stp.current_state
xt = state.x
#First call to stopping
OK = start!(stp)
#main loop
while !OK
xt = xt * back_update
#after update the infos in the State are no longer valid (except h₀, g₀)
reinit!(state, xt, h₀ = stp.current_state.h₀, g₀ = stp.current_state.g₀)
#we call the stop!
OK = stop!(stp)
end
return stp
end
##############################################################################
#
# Buffer to handle a structure containing the algorithmic parameters.
#
#############################################################################
function backtracking_ls(stp::LS_Stopping, prms)
#extract required values in the prms file
bu = :back_update ∈ fieldnames(typeof(prms)) ? prms.back_update : 0.5
return backtracking_ls(stp::LS_Stopping, back_update = bu; prms = prms)
end
##############################################################################
#
# Scenario: optimization of the rosenbrock function at x0 along the opposite
# of the gradient.
#
# We also store all the algorithmic parameters in a structure.
mutable struct ParamLS
#parameters of the 1d minimization
back_update::Float64 #backtracking update
function ParamLS(; back_update::Float64 = 0.1)
return new(back_update)
end
end
#############################################################################
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 3095 | using LinearAlgebra, NLPModels, Stopping
include("backls.jl")
include("uncons.jl")
#https://juliasmoothoptimizers.github.io/SolverBenchmark.jl/latest/tutorial/
#In this tutorial we illustrate the main uses of SolverBenchmark.
using DataFrames, Printf, SolverBenchmark
#CUTEst is a collection of test problems
using CUTEst
problems_unconstrained = CUTEst.select(contype = "unc")
n = length(problems_unconstrained) #240
#problems_boundconstrained = CUTEst.select(contype="bounds")
#n = length(problems_boundconstrained) #124
printstyled("Benchmark solvers: \n", color = :green)
n = min(n, 3)
#Names of 3 solvers:
names = [:armijo, :wolfe, :armijo_wolfe]
p1 = PrmUn();
p2 = PrmUn(ls_func = wolfe);
p3 = PrmUn(ls_func = armijo_wolfe);
paramDict = Dict(:armijo => p1, :wolfe => p2, :armijo_wolfe => p3)
#Initialization of the DataFrame for n problems.
stats = Dict(
name => DataFrame(
:id => 1:n,
:name => [@sprintf("prob%s", problems_unconstrained[i]) for i = 1:n],
:nvar => zeros(Int64, n),
:status => [:Unknown for i = 1:n],
:f => NaN * ones(n),
:t => NaN * ones(n),
:iter => zeros(Int64, n),
:eval_f => zeros(Int64, n),
:eval_g => zeros(Int64, n),
:eval_H => zeros(Int64, n),
:score => NaN * ones(n),
) for name in names
)
for i = 1:n
nlp_cutest = CUTEst.CUTEstModel(problems_unconstrained[i])
@show i, problems_unconstrained[i], nlp_cutest.meta.nvar
#update the stopping with the new problem
stop_nlp = NLPStopping(
nlp_cutest,
NLPAtX(nlp_cutest.meta.x0),
max_iter = 20,
optimality_check = unconstrained_check,
)
for name in names
#solve the problem
global_newton(stop_nlp, paramDict[name])
#update the stats from the Stopping
stats[name].nvar[i] = nlp_cutest.meta.nvar
stats[name].status[i] = status(stop_nlp)
stats[name].f[i] = stop_nlp.current_state.fx
stats[name].t[i] = stop_nlp.current_state.current_time - stop_nlp.meta.start_time
stats[name].iter[i] = stop_nlp.meta.nb_of_stop
stats[name].score[i] = unconstrained_check(nlp_cutest, stop_nlp.current_state)
stats[name].eval_f[i] = getfield(stop_nlp.current_state.evals, :neval_obj)
stats[name].eval_g[i] = getfield(stop_nlp.current_state.evals, :neval_grad)
stats[name].eval_H[i] = getfield(stop_nlp.current_state.evals, :neval_hess)
#reinitialize the Stopping and the nlp
reinit!(stop_nlp, rstate = true, x = nlp_cutest.meta.x0)
reset!(stop_nlp.pb)
end
#finalize nlp
finalize(nlp_cutest)
end #end of main loop
for name in names
@show stats[name]
end
#You can export the table in Latex
#latex_table(stdout, stats[:armijo])
#or run a performance profile:
#using Plots
#pyplot()
#cost(df) = (df.status != :Optimal) * Inf + df.t
#p = performance_profile(stats, cost)
#Plots.svg(p, "profile2")
#or a profile wall:
#solved(df) = (def.status .== :Optimal)
#costs = [df -> .!sovled(df) * Inf + df.t, df -> .!sovled(df) * Inf + df.iter]
#costnames = ["Time", "Iterations"]
#p = profile_solvers(stats, costs, costnames)
#Plots.svg(p, "profile3")
printstyled("The End.", color = :green)
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2949 | ###############################################################################
#
# We already illustrated the use of Stopping for optimization algorithm,
# however, in the case where one algorithm/solver is not Stopping-compatible,
# a buffer solver is required to unify the formalism.
# We illustrate this situation here with the Ipopt solver.
#
# Remark in the buffer function: in case the solver stops with success
# but the stopping condition is not satisfied, one option is to iterate
# and reduce the various tolerances.
#
# Documentation for Ipopt options can be found here:
# https://coin-or.github.io/Ipopt/OPTIONS.html#OPTIONS_REF
###############################################################################
using Ipopt, NLPModels, NLPModelsIpopt, Stopping
include("../test-stopping/rosenbrock.jl")
x0 = 1.5 * ones(6)
nlp = ADNLPModel(rosenbrock, x0)
#The traditional way to solve an optimization problem using NLPModelsIpopt
#https://github.com/JuliaSmoothOptimizers/NLPModelsIpopt.jl
printstyled("Oth scenario:\n")
stats = ipopt(nlp, print_level = 0, x0 = x0)
#Use y0 (general), zL (lower bound), zU (upper bound)
#for initial guess of Lagrange multipliers.
@show stats.solution, stats.status
#Using Stopping, the idea is to create a buffer function
function solveIpopt(stp::NLPStopping)
#xk = solveIpopt(stop.pb, stop.current_state.x)
stats = ipopt(
nlp,
print_level = 0,
tol = stp.meta.rtol,
x0 = stp.current_state.x,
max_iter = stp.meta.max_iter,
max_cpu_time = stp.meta.max_time,
dual_inf_tol = stp.meta.atol,
constr_viol_tol = stp.meta.atol,
compl_inf_tol = stp.meta.atol,
)
#Update the meta boolean with the output message
if stats.status == :first_order
stp.meta.suboptimal = true
end
if stats.status == :acceptable
stp.meta.suboptimal = true
end
if stats.status == :infeasible
stp.meta.infeasible = true
end
if stats.status == :small_step
stp.meta.stalled = true
end
if stats.status == :max_iter
stp.meta.iteration_limit = true
end
if stats.status == :max_time
stp.meta.tired = true
end
stp.meta.nb_of_stop = stats.iter
#stats.elapsed_time
x = stats.solution
#Not mandatory, but in case some entries of the State are used to stop
fill_in!(stp, x)
stop!(stp)
return stp
end
nlp_at_x = NLPAtX(x0)
stop = NLPStopping(nlp, nlp_at_x, optimality_check = unconstrained_check)
#1st scenario, we solve again the problem with the buffer solver
printstyled("1st scenario:\n")
solveIpopt(stop)
@show stop.current_state.x, status(stop)
nbiter = stop.meta.nb_of_stop
#2nd scenario: we check that we control the maximum iterations.
printstyled("2nd scenario:\n")
#rstate is set as true to allow reinit! modifying the State
reinit!(stop, rstate = true, x = x0)
stop.meta.max_iter = max(nbiter - 4, 1)
solveIpopt(stop)
#Final status is :IterationLimit
@show stop.current_state.x, status(stop)
printstyled("The End.\n")
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2409 | ###############################################################################
#
# Stopping can also be used for fixed point methods
# Example here concerns the AlternatingDirections Algorithm to find
# a feasible point in the intersection of 2 convex sets A and B.
# This algorithm relies on a fixed point argument, hence it stopped if it finds
# a fixed point.
#
# Example:
# A={ (x,y) | x=y} and B = {(x,y) | y=0}
# Clearly the unique intersection point is (0,0)
#
# Note that in this case the projection on A and the projection on B are trivial
#
# Takeaway: the 2nd scenario illustrates a situation where the algorithm stalls
# as it reached a personal success. (optimal_sub_pb is true)
#
###############################################################################
using LinearAlgebra, NLPModels, Stopping, Test
#Main algorithm
function AlternatingDirections(stp)
xk = stp.current_state.x
OK = update_and_start!(stp, cx = cons(stp.pb, x0))
@show OK, xk
while !OK
#First projection
xk1 = 0.5 * (xk[1] + xk[2]) * ones(2)
#Second projection
xk2 = [xk1[1], 0.0]
#check if we have a fixed point
Fix = dot(xk - xk2, xk - xk2)
if Fix <= min(eps(Float64), stp.meta.atol)
stp.meta.suboptimal = true
end
#call the stopping
OK = update_and_stop!(stp, x = xk2, cx = cons(stp.pb, xk2))
xk = xk2
@show OK, xk
end
return stp
end
# We model the problem using the NLPModels without objective function
#Formulate the problem with NLPModels
c(x) = [x[1] - x[2], x[2]]
lcon = [0.0, 0.0]
ucon = [0.0, 0.0]
nlp = ADNLPModel(x -> 0.0, zeros(2), c, lcon, ucon)
#1st scenario: we solve the problem
printstyled("1st scenario:\n")
#Prepare the Stopping
x0 = [0.0, 5.0]
state = NLPAtX(x0)
#Recall that for the optimality_check function x is the pb and y is the state
#Here we take the infinite norm of the residual.
stop = NLPStopping(nlp, state, optimality_check = (x, y) -> norm(y.cx, Inf))
AlternatingDirections(stop)
@show status(stop)
@test status(stop) == :Optimal
#2nd scenario: the user gives an irrealistic optimality condition
printstyled("2nd scenario:\n")
reinit!(stop, rstate = true, x = x0)
stop.meta.optimality_check = (x, y) -> norm(y.cx, Inf) + 0.5
AlternatingDirections(stop)
#In this scenario, the algorithm stops because it attains a fixed point
#Hence, status is :SubOptimal.
@show status(stop)
@test status(stop) == :SubOptimal
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 9397 | ###############################################################################
#
# # ListofStates tutorial
#
# We illustrate here the use of ListofStates in dealing with a warm start
# procedure.
#
# ListofStates can also prove the user history over the iteration process.
#
# We compare the resolution of a convex unconstrained problem with 3 variants:
# - a steepest descent method
# - an inverse-BFGS method
# - a mix with 5 steps of steepest descent and then switching to BFGS with
#the history (using the strength of the ListofStates).
#
###############################################################################
using Stopping, NLPModels, LinearAlgebra, Printf
import Stopping.armijo
function armijo(xk, dk, fk, slope, f)
t = 1.0
fk_new = f(xk + dk)
while f(xk + t * dk) > fk + 1.0e-4 * t * slope
t /= 1.5
fk_new = f(xk + t * dk)
end
return t, fk_new
end
#Newton's method for optimization:
function steepest_descent(stp::NLPStopping)
xk = stp.current_state.x
fk, gk = objgrad(stp.pb, xk)
OK = update_and_start!(stp, fx = fk, gx = gk)
@printf "%2s %9s %7s %7s %7s\n" "k" "fk" "||∇f(x)||" "t" "λ"
@printf "%2d %7.1e %7.1e\n" stp.meta.nb_of_stop fk norm(stp.current_state.current_score)
while !OK
dk = -gk
slope = dot(dk, gk)
t, fk = armijo(xk, dk, fk, slope, x -> obj(stp.pb, x))
xk += t * dk
fk, gk = objgrad(stp.pb, xk)
OK = update_and_stop!(stp, x = xk, fx = fk, gx = gk)
@printf "%2d %9.2e %7.1e %7.1e %7.1e\n" stp.meta.nb_of_stop fk norm(
stp.current_state.current_score,
) t slope
end
return stp
end
function bfgs_quasi_newton_armijo(stp::NLPStopping; Hk = nothing)
xk = stp.current_state.x
fk, gk = objgrad(stp.pb, xk)
gm = gk
dk, t = similar(gk), 1.0
if isnothing(Hk)
Hk = I #start from identity matrix
end
OK = update_and_start!(stp, fx = fk, gx = gk)
@printf "%2s %7s %7s %7s %7s\n" "k" "fk" "||∇f(x)||" "t" "cos"
@printf "%2d %7.1e %7.1e\n" stp.meta.nb_of_stop fk norm(stp.current_state.current_score)
while !OK
if stp.meta.nb_of_stop != 0
sk = t * dk
yk = gk - gm
ρk = 1 / dot(yk, sk)
#we need yk'*sk > 0 for instance yk'*sk ≥ 1.0e-2 * sk' * Hk * sk
Hk = ρk ≤ 0.0 ? Hk : (I - ρk * sk * yk') * Hk * (I - ρk * yk * sk') + ρk * sk * sk'
if norm(sk) ≤ 1e-14
break
end
#H2 = Hk + sk * sk' * (dot(sk,yk) + yk'*Hk*yk )*ρk^2 - ρk*(Hk * yk * sk' + sk * yk'*Hk)
end
dk = -Hk * gk
slope = dot(dk, gk) # ≤ -1.0e-4 * norm(dk) * gnorm
t, fk = armijo(xk, dk, fk, slope, x -> obj(stp.pb, x))
xk = xk + t * dk
gm = copy(gk)
gk = grad(stp.pb, xk)
OK = update_and_stop!(stp, x = xk, fx = fk, gx = gk)
@printf "%2d %7.1e %7.1e %7.1e %7.1e\n" stp.meta.nb_of_stop fk norm(
stp.current_state.current_score,
) t slope
end
stp.stopping_user_struct = Dict(:Hk => Hk)
return stp
end
using Test
############ PROBLEM TEST #############################################
fH(x) = (x[2] + x[1] .^ 2 - 11) .^ 2 + (x[1] + x[2] .^ 2 - 7) .^ 2
nlp = ADNLPModel(fH, [10.0, 20.0])
stp =
NLPStopping(nlp, optimality_check = unconstrained_check, atol = 1e-6, rtol = 0.0, max_iter = 100)
reinit!(stp, rstate = true, x = nlp.meta.x0)
steepest_descent(stp)
@test status(stp) == :Optimal
@test stp.listofstates == VoidListofStates()
@show elapsed_time(stp)
@show nlp.counters
reinit!(stp, rstate = true, x = nlp.meta.x0, rcounters = true)
bfgs_quasi_newton_armijo(stp)
@test status(stp) == :Optimal
@test stp.listofstates == VoidListofStates()
@show elapsed_time(stp)
@show nlp.counters
NLPModels.reset!(nlp)
stp_warm = NLPStopping(
nlp,
optimality_check = unconstrained_check,
atol = 1e-6,
rtol = 0.0,
max_iter = 5,
n_listofstates = 5,
) #shortcut for list = ListofStates(5, Val{NLPAtX{Float64,Array{Float64,1},Array{Float64,2}}}()))
steepest_descent(stp_warm)
@test status(stp_warm) == :IterationLimit
@test length(stp_warm.listofstates) == 5
Hwarm = I
for i = 2:5
sk = stp_warm.listofstates.list[i][1].x - stp_warm.listofstates.list[i - 1][1].x
yk = stp_warm.listofstates.list[i][1].gx - stp_warm.listofstates.list[i - 1][1].gx
ρk = 1 / dot(yk, sk)
if ρk > 0.0
global Hwarm = (I - ρk * sk * yk') * Hwarm * (I - ρk * yk * sk') + ρk * sk * sk'
end
end
reinit!(stp_warm)
stp_warm.meta.max_iter = 100
bfgs_quasi_newton_armijo(stp_warm, Hk = Hwarm)
status(stp_warm)
@show elapsed_time(stp_warm)
@show nlp.counters
#=
k fk ||∇f(x)|| t λ
0 1.7e+05 3.2e+04
1 2.73e+04 8.6e+03 1.0e-03 -1.1e+09
2 1.80e+03 1.1e+03 2.3e-03 -7.3e+07
3 1.24e+03 7.9e+02 1.2e-02 -1.3e+06
4 6.37e+01 2.4e+01 1.2e-02 -6.3e+05
5 1.34e+01 5.8e+01 2.0e-01 -8.3e+02
6 5.87e+00 2.5e+01 1.3e-01 -3.5e+03
7 2.88e+00 2.4e+01 2.6e-02 -6.7e+02
8 2.42e+00 1.8e+01 1.7e-02 -6.1e+02
9 6.58e-01 1.2e+01 1.2e-02 -6.1e+02
10 1.64e-01 5.3e+00 1.2e-02 -1.7e+02
11 4.96e-02 3.2e+00 1.2e-02 -4.4e+01
12 1.44e-02 1.6e+00 1.2e-02 -1.3e+01
13 4.35e-03 9.2e-01 1.2e-02 -3.9e+00
14 1.29e-03 5.0e-01 1.2e-02 -1.2e+00
15 3.87e-04 2.7e-01 1.2e-02 -3.5e-01
16 1.15e-04 1.5e-01 1.2e-02 -1.0e-01
17 3.45e-05 8.2e-02 1.2e-02 -3.1e-02
18 1.03e-05 4.5e-02 1.2e-02 -9.2e-03
19 3.08e-06 2.4e-02 1.2e-02 -2.8e-03
20 9.21e-07 1.3e-02 1.2e-02 -8.2e-04
21 2.75e-07 7.3e-03 1.2e-02 -2.5e-04
22 8.23e-08 4.0e-03 1.2e-02 -7.4e-05
23 2.46e-08 2.2e-03 1.2e-02 -2.2e-05
24 7.35e-09 1.2e-03 1.2e-02 -6.6e-06
25 2.20e-09 6.5e-04 1.2e-02 -2.0e-06
26 6.57e-10 3.6e-04 1.2e-02 -5.9e-07
27 1.96e-10 1.9e-04 1.2e-02 -1.8e-07
28 5.87e-11 1.1e-04 1.2e-02 -5.3e-08
29 1.75e-11 5.8e-05 1.2e-02 -1.6e-08
30 5.24e-12 3.2e-05 1.2e-02 -4.7e-09
31 1.57e-12 1.7e-05 1.2e-02 -1.4e-09
32 4.68e-13 9.5e-06 1.2e-02 -4.2e-10
33 1.40e-13 5.2e-06 1.2e-02 -1.3e-10
34 4.18e-14 2.8e-06 1.2e-02 -3.7e-11
35 1.25e-14 1.6e-06 1.2e-02 -1.1e-11
36 3.74e-15 8.5e-07 1.2e-02 -3.3e-12
elapsed_time(stp) = 0.7508440017700195
nlp.counters = Counters:
obj: ████████████████████ 889 grad: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 37 cons: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
hprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
k fk ||∇f(x)|| t cos
0 1.7e+05 3.2e+04
1 2.7e+04 8.6e+03 1.0e-03 -1.1e+09
2 1.8e+04 4.5e+03 1.2e-02 -1.8e+06
3 2.5e+03 1.3e+03 1.0e+00 -7.1e+04
4 1.2e+03 8.5e+02 1.0e+00 -1.7e+03
5 3.2e+02 3.3e+02 1.0e+00 -1.4e+03
6 9.8e+01 1.4e+02 1.0e+00 -3.2e+02
7 2.7e+01 6.0e+01 1.0e+00 -1.1e+02
8 6.4e+00 2.4e+01 1.0e+00 -3.0e+01
9 9.9e-01 7.9e+00 1.0e+00 -8.2e+00
10 6.3e-02 1.9e+00 1.0e+00 -1.5e+00
11 8.7e-04 3.2e-01 1.0e+00 -1.1e-01
12 3.6e-05 7.9e-02 1.0e+00 -1.6e-03
13 1.4e-05 4.2e-02 1.0e+00 -2.9e-05
14 2.0e-07 3.4e-03 1.0e+00 -2.6e-05
15 4.1e-09 4.9e-04 1.0e+00 -3.6e-07
16 2.9e-12 2.5e-05 1.0e+00 -8.1e-09
17 2.5e-15 6.3e-07 1.0e+00 -5.6e-12
elapsed_time(stp) = 0.017869949340820312
nlp.counters = Counters:
obj: ████████████████████ 91 grad: ████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 18 cons: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
hprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
k fk ||∇f(x)|| t λ
0 1.7e+05 3.2e+04
1 2.73e+04 8.6e+03 1.0e-03 -1.1e+09
2 1.80e+03 1.1e+03 2.3e-03 -7.3e+07
3 1.24e+03 7.9e+02 1.2e-02 -1.3e+06
4 6.37e+01 2.4e+01 1.2e-02 -6.3e+05
5 1.34e+01 5.8e+01 2.0e-01 -8.3e+02
6 5.87e+00 2.5e+01 1.3e-01 -3.5e+03
k fk ||∇f(x)|| t cos
0 5.9e+00 2.5e+01
1 3.8e+00 2.7e+01 1.7e-02 -1.1e+03
2 2.8e+00 2.4e+01 4.4e-01 -1.1e+01
3 1.4e+00 1.2e+01 3.0e-01 -3.0e+01
4 1.1e-02 1.3e+00 1.0e+00 -2.5e+00
5 9.0e-05 9.2e-02 1.0e+00 -2.5e-02
6 7.9e-08 3.9e-03 1.0e+00 -1.8e-04
7 7.7e-10 3.8e-04 1.0e+00 -1.4e-07
8 1.3e-19 4.2e-09 1.0e+00 -1.5e-09
elapsed_time(stp_warm) = 0.01520395278930664
nlp.counters = Counters:
obj: ████████████████████ 192 grad: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 16 cons: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
hprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
Counters:
obj: ████████████████████ 192 grad: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 16 cons: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
jprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
hprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
=#
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4059 | @testset "Test How to State NLP" begin
###############################################################################
#
# The data used through the algorithmic process in the Stopping framework
# are stored in a State.
# We illustrate here the NLPAtX which is a specialization of the State for
# non-linear programming.
#
###############################################################################
#using Test, NLPModels, Stopping
include("../test-stopping/rosenbrock.jl")
#Formulate the problem with NLPModels
x0 = ones(6)
y0 = ones(1)
c(x) = [x[1] - x[2]]
lcon = [0.0]
ucon = [0.0]
#We can create a NLPAtX for constrained optimization.
#Here we provide y0 = [1.0]
#Note that the default value is [0.0]
nlp = ADNLPModel(rosenbrock, x0, zeros(6), Inf * ones(6), c, lcon, ucon, y0 = y0)
#We can create a NLPAtX for bounds-constrained optimization:
nlp2 = ADNLPModel(rosenbrock, x0, zeros(6), Inf * ones(6))
#We can create a NLPAtX for unconstrained optimization:
nlp3 = ADNLPModel(x -> rosenbrock(x), x0)
###############################################################################
#I. Initialize a NLPAtX:
#
#There are two main constructor for the States.
#The unconstrained:
state_unc = NLPAtX(x0)
#The constrained:
state_con = NLPAtX(x0, y0)
#By default, all the values in the State are set to nothing except x and lambda
#In the unconstrained case lambda is a vector of length 0
@test !(state_unc.lambda == nothing)
#From the default initialization, all the other entries are void:
@test state_unc.mu == [] && state_con.mu == []
@test isnan(state_unc.fx) && isnan(state_con.fx)
#Note that the constructor proceeds to a size checking on gx, Hx, mu, cx, Jx.
#It returns an error if this test fails.
try
NLPAtX(x0, Jx = ones(1, 1))
@test false
catch
#printstyled("NLPAtX(x0, Jx = ones(1,1)) is invalid as length(lambda)=0\n")
@test true
end
###############################################################################
#II. Update the entries
#
#At the creation of a NLPAtX, keyword arguments populate the state:
state_bnd = NLPAtX(x0, mu = zeros(6))
@test state_bnd.mu == zeros(6) #initialize multipliers with bounds constraints
#The NLPAtX has two functions: update! and reinit!
#The update! has the same behavior as in the GenericState:
update!(state_bnd, fx = 1.0, blah = 1) #update! ignores unnecessary keywords
@test state_bnd.mu == zeros(6) && state_bnd.fx == 1.0 && state_bnd.x == x0
#reinit! by default reuse x and lambda and reset all the entries at their
#default values (void or empty Counters):
reinit!(state_bnd, mu = ones(6))
@test state_bnd.mu == ones(6) && isnan(state_bnd.fx)
@test state_bnd.x == x0 && state_bnd.lambda == zeros(0)
#However, we can specify both entries
reinit!(state_bnd, 2 * ones(6), zeros(0))
@test state_bnd.x == 2 * ones(6) && state_bnd.lambda == zeros(0)
@test state_bnd.mu == []
###############################################################################
#III. Domain Error
#Similar to the GenericState we can use _domain_check to verify there are no NaN
@test Stopping._domain_check(state_bnd) == false
update!(state_bnd, mu = [NaN])
@test Stopping._domain_check(state_bnd) == true
###############################################################################
#IV. Use the NLPAtX
#
#For algorithmic use, it might be conveninent to fill in all the entries of then
#State. In this case, we can use the Stopping:
stop = NLPStopping(nlp, state_unc, optimality_check = (x, y) -> unconstrained_check(x, y))
#Note that the fill_in! can receive known informations via keywords.
#If we don't want to store the hessian matrix, we turn the keyword
#matrix_info as false.
fill_in!(stop, x0, matrix_info = false)
#printstyled("Hx has not been updated: ",stop.current_state.Hx == nothing,"\n")
@test stop.current_state.Hx == zeros(0, 0)
# We can now use the updated step in the algorithmic procedure
@test start!(stop)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2964 | @testset "Test How to State" begin
###############################################################################
#
# The data used through the algorithmic process in the Stopping framework
# are stored in a State.
# We illustrate here the GenericState and its features
#
###############################################################################
#using Test, Stopping
###############################################################################
#The GenericState contains only two entries:
# a vector x, and a Float current_time
state1 = GenericState(ones(2)) #takes a Vector as a mandatory input
state2 = GenericState(ones(2), current_time = 1.0)
#By default if a non-mandatory entry is not specified it is void:
@test isnan(state1.current_time)
@test state2.current_time == 1.0
###############################################################################
#The GenericState has two functions: update! and reinit!
#update! is used to update entries of the State:
update!(state1, current_time = 1.0)
@test state1.current_time == 1.0
#Note that the update select the relevant entries
update!(state1, fx = 1.0) #does nothing as there are no fx entry
@test state1.current_time == 1.0 && state1.x == ones(2)
#The update! can be done only if the new entry is void or has the same type
#as the existing one.
update!(state1, current_time = 2) #does nothing as it is the wrong type
@test state1.current_time == 1.0
#An advanced user can force the update even if the type is not the same by
#turning the keyword convert as true (it is false by default).
#update!(state1, convert = true, current_time = 2) NON!!!
#@test state1.current_time == 2
#Non-required entry in the State can always be set as void without convert
update!(state1, current_time = NaN)
@test isnan(state1.current_time)
#A shorter way to empty the State is to use the reinit! function.
#This function is particularly useful, when there are many entries.
reinit!(state2)
@test state2.x == ones(2) && isnan(state2.current_time)
#If we want to reinit! with a different value of the mandatory entry:
reinit!(state2, zeros(2))
@test state2.x == zeros(2) && isnan(state2.current_time)
#After reinitializing the State reinit! can update entries passed as keywords.
#either in the default call:
reinit!(state2, current_time = 1.0)
@test state2.x == zeros(2) && state2.current_time == 1.0
#or in the one changing x:
reinit!(state2, ones(2), current_time = 1.0)
@test state2.x == ones(2) && state2.current_time == 1.0
###############################################################################
#The State has also a private function guaranteeing there are no NaN
OK = Stopping._domain_check(state1) #function returns a boolean
@test OK == false #no NaN
@test Stopping._domain_check(state2) == false
update!(state2, x = [NaN, 0.0])
@test Stopping._domain_check(state2) == true
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2408 | @testset "Test How to Stop II" begin
###############################################################################
#
# The Stopping structure eases the implementation of algorithms and the
# stopping criterion.
# We illustrate here the features of Stopping when the algorithm is used a
# subStopping.
#
###############################################################################
#using Test, Stopping
#Assume we want to solve "pb" starting from "x0" and solving at each step
#of the algorithm the subproblem "subpb".
# We can use this additional info to improve the stopping criterion.
x0 = ones(2)
pb = nothing
subpb = nothing
subsubpb = nothing
###############################################################################
#Initialize a Stopping for the main pb
main_stop = GenericStopping(pb, x0)
#We can then, initialize another stopping to the subproblem, and providing
#the main_stop as a keyword argument:
sub_stop =
GenericStopping(subpb, x0, main_stp = main_stop, tol_check = (atol, rtol, opt0) -> atol)
#Note that by default main_stp is void
@test main_stop.main_stp == VoidStopping()
#The only difference appears in the event of a call to stop!, which now also
#check the time and resources of the main_pb.
OK = start!(sub_stop)
@test OK == false #no reason to stop just yet.
#Assume time is exhausted for the main_stop
main_stop.meta.start_time = 0.0 #force a timing failure in the main problem
stop!(sub_stop)
@test status(sub_stop, list = true) == [:ResourcesOfMainProblemExhausted]
@test sub_stop.meta.tired == false
@test sub_stop.meta.main_pb == true
#The same applies if there is now a third subproblem
reinit!(main_stop)
reinit!(sub_stop)
subsub_stop =
GenericStopping(subsubpb, x0, main_stp = sub_stop, tol_check = (atol, rtol, opt0) -> atol)
main_stop.meta.start_time = 0.0 #force a timing failure in the main problem
stop!(subsub_stop)
@test status(subsub_stop, list = true) == [:ResourcesOfMainProblemExhausted]
@test subsub_stop.meta.tired == false
@test subsub_stop.meta.main_pb == true
@test status(sub_stop, list = true) == [:ResourcesOfMainProblemExhausted]
@test sub_stop.meta.tired == false
@test sub_stop.meta.main_pb == true
@test status(main_stop, list = true) == [:TimeLimit]
@test main_stop.meta.tired == true
@test main_stop.meta.main_pb == false
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4741 | @testset "How to stop NLP" begin
###############################################################################
#
# The Stopping structure eases the implementation of algorithms and the
# stopping criterion.
#
# We illustrate here the basic features of NLPStopping, which is a
# specialized version of the Stopping to the case where:
# pb is an AbstractNLPModel
# state shares the structure of the NLPAtX.
#
# NLPModels is a package to handle non-linear (constrained) optimization.
# NLPStopping is following this approach.
#
###############################################################################
#using Test, NLPModels, Stopping
#We first create a toy problem
f(x) = sum(x .^ 2)
x0 = zeros(5)
nlp = ADNLPModel(f, x0)
nlp2 = ADNLPModel(f, x0, zeros(5), Inf * ones(5))
nlp_at_x = NLPAtX(x0)
x1 = ones(5)
###############################################################################
#1) Initialize the NLPStopping.
#The specificity here is that the NLPStopping requires another mandatory input:
#optimality_check which is the function later used to compute the score.
#Recall that the score is then tested at 0, to declare optimality.
#Stopping provides a default KKT function.
stop_nlp = NLPStopping(nlp, nlp_at_x, optimality_check = (x, y) -> KKT(x, y))
#Another approach is to use the lazy way:
stop_nlp_lazy = NLPStopping(nlp2) #use nlp.meta.x0 as initial point
@test stop_nlp_lazy.current_state.x == nlp2.meta.x0
###############################################################################
#2) Fill in
#Before calling start! and stop! one should fill in current information in the
#State. -> the optimality_check then exploits this knowledge.
#As seen before, we by hand use update_and_start and update_and_stop.
#Another way is to call the fill_in! function:
fill_in!(stop_nlp, x1, matrix_info = false)
@test stop_nlp.current_state.x == x1
@test stop_nlp.current_state.fx == 5.0
@test stop_nlp.current_state.Hx == zeros(0, 0)
#Note that since there are no constraints, c(x) and J(x) are not called:
@test stop_nlp.current_state.Jx == zeros(0, 0)
@test stop_nlp.current_state.cx == zeros(0)
#Since there are no bounds on x, the Lagrange multiplier is not updated:
@test stop_nlp.current_state.mu == zeros(0)
#would give Hx if matrix_info = true
fill_in!(stop_nlp_lazy, x1)
@test stop_nlp_lazy.current_state.Hx != zeros(0, 0)
#stop_nlp_lazy.pb has bounds, so mu is a vector of size x
@test size(x0) == size(stop_nlp_lazy.current_state.mu)
###############################################################################
#3) Evaluations
#Another particularity is that the NLPModels has a counter keeping track of
#the evaluations of each function.
#Similarly the NLPStopping has a dictionary keeping all the maximum number of
#evaluations:
@test typeof(stop_nlp.meta.max_cntrs) <: Dict
#For instance the limit in evaluations of objective and gradient:
@test stop_nlp.meta.max_cntrs[:neval_obj] == typemax(Int)
@test stop_nlp.meta.max_cntrs[:neval_grad] == typemax(Int)
#Limit can be set using init_max_counters function:
stop_nlp.meta.max_cntrs = init_max_counters(obj = 3, grad = 0, hess = 0)
@test stop_nlp.meta.max_cntrs[:neval_obj] == 3
@test stop_nlp.meta.max_cntrs[:neval_grad] == 0
OK = update_and_stop!(stop_nlp, evals = stop_nlp.pb.counters)
@test OK == true
@test stop_nlp.meta.resources == true
@test status(stop_nlp) == :EvaluationLimit
###############################################################################
#4) Unbounded problem
#An additional feature of the NLPStopping is to provide an _unbounded_problem_check
#whenever \|c(x)\| or -f(x) become too large.
stop_nlp.meta.unbounded_threshold = -6.0 #by default 1.0e50
stop!(stop_nlp)
@test stop_nlp.meta.unbounded_pb == true
@test stop_nlp.current_state.fx > stop_nlp.meta.unbounded_threshold
@test stop_nlp.meta.resources == true #still true as the state has not changed
###############################################################################
#An advanced feature is the possibility to send keywords to optimality_check:
optimality_fct_test = (x, y; a = 1.0) -> a
#In this case, the optimality_check function used to compute the score may
#depend on a parameter (algorithm-dependent for instance)
stop_nlp_2 = NLPStopping(nlp, nlp_at_x, optimality_check = optimality_fct_test)
fill_in!(stop_nlp_2, x0)
OK = stop!(stop_nlp_2, a = 0.0)
@test OK == true
@test stop_nlp_2.meta.optimal == true
#However, note that the same cannot be achieved with update_and_stop!:
reinit!(stop_nlp_2)
OK = update_and_stop!(stop_nlp_2, a = 0.0)
@test OK == false
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 6528 | @testset "Test How to Stop I" begin
###############################################################################
#
# The Stopping structure eases the implementation of algorithms and the
# stopping criterion.
# We illustrate here the basic features of Stopping.
#
# -> the case where a Stopping is a sub-Stopping is treated in the next tuto.
#
###############################################################################
#using Test, Stopping
x0 = ones(2)
pb = nothing
###############################################################################
#I. Initialize a Stopping
#The lazy way to initialize the stopping is to provide an initial point:
stop1 = GenericStopping(pb, x0, rtol = 1e-1)
#The more sophisticated way is to first build a State:
state1 = GenericState(ones(2))
#then, use it to create a Stopping:
stop2 = GenericStopping(pb, state1, rtol = 1e-1)
#Both ways give the same result:
@test stop1.current_state.x == stop2.current_state.x
@test isnan(stop1.current_state.current_time) && isnan(stop2.current_state.current_time)
#Keywords given in the Stopping creator are forwarded to the StoppingMeta.
@test stop1.meta.rtol == 1e-1
###############################################################################
#II. Check the status
#To ask the Stopping what is the current situation, we have the status function:
@test status(stop1) == :Unknown #nothing happened yet.
#The status function check the boolean values in the Meta:
#unbounded, unbounded_pb, tired, stalled, iteration_limit, resources, optimal,
#infeasible, main_pb, domainerror, suboptimal
stop1.meta.unbounded = true
stop1.meta.suboptimal = true
#By default the status function prioritizes a status:
@test status(stop1) == :SubOptimal
#while you can access the list of status by turning the keyword list as true:
@test status(stop1, list = true) == [:SubOptimal, :Unbounded]
###############################################################################
#III. Analyze the situation: start!
#Two functions are designed to ask Stopping to analyze the current situation
#mainly described by the State: start!, stop!
#start! is designed to be used right at the beginning of the algorithm:
start!(stop1) #we will compare with stop2
#this call initializes a few entries:
#a) start_time in the META
@test isnan(stop2.meta.start_time)
@test !isnan(stop1.meta.start_time)
#b) optimality0 in the META (used to check the relative error)
@test stop2.meta.optimality0 == 1.0 #default value was 1.0
@test stop1.meta.optimality0 == 1.0 #GenericStopping has no specified measure, but get 1. if optimality is Inf
#c) the time measured is also updated in the State (if void)
@test stop1.current_state.current_time != nothing
#d) in the case where optimality0 is NaN, meta.domainerror becomes true
@test stop1.meta.domainerror == false
#e) the problem would be already solved if optimality0 pass a _null_test
#Since optimality0 is 1., any value would pass the relative error check:
@test Stopping._null_test(stop1, Inf) == false
@test stop1.meta.optimal == false
@test :SubOptimal in status(stop1, list = true)
#The Stopping determines the optimality by testing a score at zero.
#The test at zero is controlled by the function meta.tol_check which
#takes 3 arguments: atol, rtol, optimality0. By default it check if the score
#is less than: max(atol, rtol * opt0)
#This can be determined in the initialization of the Stopping
stop3 = GenericStopping(pb, state1, tol_check = (atol, rtol, opt0) -> atol)
@test Stopping._null_test(stop3, Inf) == false
#The function _optimality_check providing the score returns Inf by default
#and must be specialized for specialized Stopping.
#If State entries have to be specified before the start!, you can use the
#function update_and_start! instead of a update! and then a start!
update_and_start!(stop3, x = zeros(2), current_time = -1.0)
@test stop3.meta.optimal == false
@test stop3.current_state.current_time == -1.0
@test stop3.meta.start_time != nothing
@test stop3.current_state.x == zeros(2)
###############################################################################
#Once the iterations begins #stop! is the main function.
#if needed an update is needed first, we can use update_and_stop!
OK = stop!(stop3) #update the Stopping and return a boolean
@test OK == false #no reason to stop just yet!
#The stop! call check the following:
#1) meta.domainerror: check if the score is NaN
#2) meta.optimal: the score passes the _null_test
#3) meta.unbounded: check if state.x is too large
#4) meta.unbounded_pb: false by default
#5) meta.tired: check if time is exhausted
#6) meta.resources: false by default
#7) meta.iteration_limit: check the number of iterations
#8) meta.stalled: false by default
#9) meta.main_pb: false by default -> see Stopping as a subproblem tutorial
# Note that 1 and 2 are also done by start!.
#1) check unboundedness of x:
@test update_and_stop!(stop3, x = (stop3.meta.unbounded_x + 1.0) * x0)
@test stop3.meta.unbounded == true
#5) check time
stop3.meta.start_time = 0.0 #too force the time limit.
stop!(stop3)
@test stop3.meta.tired == true
#7) Stopping the number of iterations by the number of calls to stop!
@test stop3.meta.nb_of_stop == 3 #We called stop3 3 times already
stop3.meta.max_iter = 3
stop!(stop3)
@test stop3.meta.iteration_limit == true #as stop3.meta.nb_of_stop > 3.
#Overall we activated three flags:
@test status(stop3, list = true) == [:TimeLimit, :Unbounded, :IterationLimit]
###############################################################################
#Once we are done with an algorithm and want to reuse a stopping, we need to
#reinitialize all the entries.
reinit!(stop3)
#the status boolean are back to false
@test !stop3.meta.iteration_limit && !stop3.meta.tired && !stop3.meta.unbounded
#reinitialize also the entries updated by the start!
@test isnan(stop3.meta.start_time) && (stop3.meta.optimality0 == 1.0)
@test stop3.meta.nb_of_stop == 0 #and the counter of stop
#Note that by default reinit! does not reinitialize the current_state.
#This can be done by switching the keyword rstate to true.
#In this case, keywords are forwarded to the reinit! of current_state.
reinit!(stop3, rstate = true, x = zeros(2))
@test isnan(stop3.current_state.current_time)
@test stop3.current_state.x == zeros(2)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 2605 | ###############################################################################
#
# The Stopping structure eases the implementation of algorithms and the
# stopping criterion.
#
# The following examples illustrate solver for linear algebra:
# Ax = b with A an m x n matrix.
#
# This tutorial illustrates the different step in preparing the resolution of a
# new problem.
# - we create a LinearAlgebraPRoblem (that stores A, b)
# - we use the GennericState storing x and the current_time
# - we create a LinearAlgebraStopping
# - the optimality function linear_system_check!
#
# A more sophisticated version can be found in LAStopping.
#
###############################################################################
using LinearAlgebra, Stopping, Test
m, n = 400, 200 #size of A: m x n
A = 100 * rand(m, n)
xref = 100 * rand(n)
b = A * xref
#Our initial guess
x0 = zeros(n)
mutable struct LinearAlgebraProblem
A::Any #matrix type
b::Vector
end
la_pb = LinearAlgebraProblem(A, b)
la_state = GenericState(xref)
@test norm(la_pb.A * xref - la_pb.b) <= 1e-6
mutable struct LinearAlgebraStopping <: AbstractStopping
# problem
pb::LinearAlgebraProblem
# Common parameters
meta::AbstractStoppingMeta
# current state of the problem
current_state::AbstractState
# Stopping of the main problem, or nothing
main_stp::Union{AbstractStopping, Nothing}
function LinearAlgebraStopping(pb::LinearAlgebraProblem, current_state::AbstractState; kwargs...)
return new(
pb,
linear_system_check!,
StoppingMeta(; optimality_check = linear_system_check, kwargs...),
la_state,
nothing,
)
end
end
function linear_system_check(pb::LinearAlgebraProblem, state::AbstractState; kwargs...)
return norm(pb.A * state.x - pb.b)
end
@test linear_system_check(la_pb, la_state) == 0.0
update!(la_state, x = x0)
@test linear_system_check(la_pb, la_state) != 0.0
la_stop = LinearAlgebraStopping(
la_pb,
la_state,
max_iter = 150000,
rtol = 1e-6,
optimality_check = linear_system_check,
)
"""
Randomized block Kaczmarz
"""
function RandomizedBlockKaczmarz(stp::AbstractStopping; kwargs...)
#A,b = stp.current_state.Jx, stp.current_state.cx
A, b = stp.pb.A, stp.pb.b
x0 = stp.current_state.x
m, n = size(A)
xk = x0
OK = start!(stp)
while !OK
i = Int(floor(rand() * m) + 1) #rand a number between 1 and m
Ai = A[i, :]
xk = Ai == 0 ? x0 : x0 - (dot(Ai, x0) - b[i]) / dot(Ai, Ai) * Ai
OK = update_and_stop!(stp, x = xk)
x0 = xk
end
return stp
end
RandomizedBlockKaczmarz(la_stop)
@test status(la_stop) == :Optimal
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1626 | ###############################################################################
#
# We illustrate here the use of Stopping in a classical algorithm,
# the Newton method for unconstrained optimization.
#
###############################################################################
using LinearAlgebra, NLPModels, Stopping, Test
# We create a quadratic test function, and create an NLPModels
A = rand(5, 5);
Q = A' * A
f(x) = 0.5 * x' * Q * x
nlp = ADNLPModel(f, ones(5))
#We now initialize the NLPStopping:
nlp_at_x = NLPAtX(ones(5)) #First create a State
#We use unconstrained_check as an optimality function (src/Stopping/nlp_admissible_functions.jl)
stop_nlp = NLPStopping(nlp, nlp_at_x, optimality_check = unconstrained_check)
function newton(stp::NLPStopping)
#Notations
pb = stp.pb
state = stp.current_state
#Initialization
xt = state.x
#First, call start! to check optimality and set an initial configuration
#(start the time counter, set relative error ...)
OK = update_and_start!(stp, x = xt, gx = grad(pb, xt), Hx = hess(pb, xt))
while !OK
#Compute the Newton direction
d = Symmetric(state.Hx, :L) \ (-state.gx)
#Update the iterate
xt = xt + d
#Update the State and call the Stopping with stop!
OK = update_and_stop!(stp, x = xt, gx = grad(pb, xt), Hx = hess(pb, xt))
end
return stp
end #end of function newton
stop_nlp = newton(stop_nlp)
#We can then ask stop_nlp the final status
@test :Optimal in status(stop_nlp, list = true)
#Explore the final values in stop_nlp.current_state
printstyled("Final solution is $(stop_nlp.current_state.x)", color = :green)
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4152 | ##############################################################################
#
# In this test problem we consider a quadratic penalty method.
# This example features an algorithm with the 3 steps:
# the penalization - the unconstrained min - the 1d min
#
# Note that there is no optimization of the evaluations here.
# The penalization gives an approximation of the gradients, multipliers...
#
# Note the use of a structure for the algorithmic parameters which is
# forwarded to all the 3 steps. If a parameter is not mentioned, then the default
# entry in the algorithm will be taken.
#
#############################################################################
#include("uncons.jl")
##############################################################################
#
# Quadratic penalty algorithm
# fill_in! used instead of update! (works but usually more costly in evaluations)
# subproblems are solved via Newton method
#
#############################################################################
function penalty(stp::NLPStopping; rho0 = 1.0, rho_min = 1e-10, rho_update = 0.5, prms = nothing)
#algorithm's parameters
rho = rho0
#First call to the stopping
#Becareful here, some entries must be filled in first.
fill_in!(stp, stp.current_state.x)
OK = start!(stp)
#prepare the subproblem stopping:
sub_nlp_at_x = NLPAtX(stp.current_state.x)
sub_pb = ADNLPModel(
x ->
obj(stp.pb, x) +
1 / rho * norm(max.(cons(stp.pb, x) - stp.pb.meta.ucon, 0.0))^2 +
1 / rho * norm(max.(-cons(stp.pb, x) + stp.pb.meta.lcon, 0.0))^2,
x0,
)
sub_stp =
NLPStopping(sub_pb, sub_nlp_at_x, main_stp = stp, optimality_check = unconstrained_check)
#main loop
while !OK
#solve the subproblem
reinit!(sub_stp)
sub_stp.meta.atol = min(rho, sub_stp.meta.atol)
global_newton(sub_stp, prms)
#Update all the entries of the State
fill_in!(stp, sub_stp.current_state.x)
#Either stop! is true OR the penalty parameter is too small
if rho < rho_min
stp.meta.suboptimal = true
end
OK = stop!(stp)
@show stp.meta.nb_of_stop, OK, rho
#update the penalty parameter if necessary
if !OK
rho = rho * rho_update
sub_stp.pb = ADNLPModel(
x ->
obj(stp.pb, x) +
1 / rho * norm(max.(cons(stp.pb, x) - stp.pb.meta.ucon, 0.0))^2 +
1 / rho * norm(max.(-cons(stp.pb, x) + stp.pb.meta.lcon, 0.0))^2,
x0,
)
end
end
return stp
end
##############################################################################
#
# Quadratic penalty algorithm: buffer function
#
#############################################################################
function penalty(stp::NLPStopping, prms)
#extract required values in the prms file
r0 = :rho0 ∈ fieldnames(typeof(prms)) ? prms.rho0 : 1.0
rm = :rho_min ∈ fieldnames(typeof(prms)) ? prms.rho_min : 1e-10
ru = :rho_update ∈ fieldnames(typeof(prms)) ? prms.rho_update : 0.5
return penalty(stp, rho0 = r0, rho_min = rm, ru = 0.5, prms = prms)
end
##############################################################################
#
# Algorithmic parameters structure
#
#############################################################################
mutable struct Param
#parameters for the penalty
rho0::Float64 #initial value of the penalty parameter
rho_min::Float64 #smallest possible parameter
rho_update::Float64 #update of the penalty parameter
#parameters of the unconstrained minimization
armijo_prm::Float64 #Armijo parameter
wolfe_prm::Float64 #Wolfe parameter
onedsolve::Function #1D solver
ls_func::Function
#parameters of the 1d minimization
back_update::Float64 #backtracking update
function Param(;
rho0::Float64 = 1.0,
rho_min::Float64 = sqrt(eps(Float64)),
rho_update::Float64 = 0.5,
armijo_prm::Float64 = 0.01,
wolfe_prm::Float64 = 0.99,
onedsolve::Function = backtracking_ls,
ls_func::Function = (x, y) -> armijo(x, y, τ₀ = armijo_prm),
back_update::Float64 = 0.5,
)
return new(rho0, rho_min, rho_update, armijo_prm, wolfe_prm, onedsolve, ls_func, back_update)
end
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 5401 | ###############################################################################
#
# The Stopping structure eases the implementation of algorithms and the
# stopping criterion.
#
# The following examples illustrate solver for optimization:
# - a backtracking 1D optimization solver
# - a globalized Newton for unconstrained optimization solver
# - a bound constraint active-set algorithm
# - a quadratic penalty algorithm for non-linear optimization
#
###############################################################################
using LinearAlgebra, NLPModels, Stopping, Test
include("../test-stopping/rosenbrock.jl")
##############################################################################
#
# Part 1/4
#
#############################################################################
printstyled("How to solve 1D optim problem: \n", color = :red)
include("backls.jl")
printstyled("1D Optimization: backtracking tutorial.\n", color = :green)
x0 = 1.5 * ones(6)
nlp = ADNLPModel(rosenbrock, x0)
g0 = grad(nlp, x0)
h = onedoptim(x -> obj(nlp, x0 - x * g0), x -> -dot(g0, grad(nlp, x0 - x * g0)))
#SCENARIO:
#We create 3 stopping:
#Define the LSAtT with mandatory entries g₀ and h₀.
lsatx = LSAtT(1.0, h₀ = obj(nlp, x0), g₀ = -dot(grad(nlp, x0), grad(nlp, x0)))
lsstp = LS_Stopping(h, lsatx, optimality_check = (x, y) -> armijo(x, y, τ₀ = 0.01))
lsatx2 = LSAtT(1.0, h₀ = obj(nlp, x0), g₀ = -dot(grad(nlp, x0), grad(nlp, x0)))
lsstp2 = LS_Stopping(h, lsatx2, optimality_check = (x, y) -> wolfe(x, y, τ₁ = 0.99))
lsatx3 = LSAtT(1.0, h₀ = obj(nlp, x0), g₀ = -dot(grad(nlp, x0), grad(nlp, x0)))
lsstp3 =
LS_Stopping(h, lsatx3, optimality_check = (x, y) -> armijo_wolfe(x, y, τ₀ = 0.01, τ₁ = 0.99))
parameters = ParamLS(back_update = 0.5)
printstyled("backtracking line search with Armijo:\n", color = :green)
backtracking_ls(lsstp, parameters)
@show status(lsstp)
@show lsstp.meta.nb_of_stop
@show lsstp.current_state.x
printstyled("backtracking line search with Wolfe:\n", color = :green)
backtracking_ls(lsstp2, parameters)
@show status(lsstp2)
@show lsstp2.meta.nb_of_stop
@show lsstp2.current_state.x
printstyled("backtracking line search with Armijo-Wolfe:\n", color = :green)
backtracking_ls(lsstp3, parameters)
@show status(lsstp3)
@show lsstp3.meta.nb_of_stop
@show lsstp3.current_state.x
printstyled("The End.\n", color = :green)
printstyled("passed ✓ \n", color = :green)
##############################################################################
#
# Part 2/4
#
#############################################################################
printstyled("How to solve unconstrained optim problem: \n", color = :red)
include("uncons.jl")
printstyled("Unconstrained Optimization: globalized Newton.\n", color = :green)
x0 = 1.5 * ones(6)
nlp = ADNLPModel(rosenbrock, x0)
# We use the default builder using the KKT optimality function (which does not
# automatically fill in the State)
stop_nlp = NLPStopping(nlp)
parameters = PrmUn()
printstyled("Newton method with Armijo linesearch.\n", color = :green)
global_newton(stop_nlp, parameters)
@show status(stop_nlp)
#We can check afterwards, the score
@show Stopping.KKT(stop_nlp.pb, stop_nlp.current_state)
@show stop_nlp.meta.nb_of_stop
printstyled("Newton method with Armijo-Wolfe linesearch.\n", color = :green)
reinit!(stop_nlp, rstate = true, x = x0)
reset!(stop_nlp.pb) #reinitialize the counters of the NLP
parameters.ls_func =
(x, y) -> armijo_wolfe(x, y, τ₀ = parameters.armijo_prm, τ₁ = parameters.wolfe_prm)
global_newton(stop_nlp, parameters)
@show status(stop_nlp)
#We can check afterwards, the score
@show Stopping.KKT(stop_nlp.pb, stop_nlp.current_state)
@show stop_nlp.meta.nb_of_stop
printstyled("The End.\n", color = :green)
printstyled("passed ✓ \n", color = :green)
##############################################################################
#
# Part 3/4
#
#############################################################################
printstyled("How to solve bound constrained optim problem: \n", color = :red)
include("activeset.jl")
printstyled("Constrained optimization: active-set algorithm tutorial.\n", color = :green)
x0 = 1.5 * ones(6);
x0[6] = 1.0;
nlp_bnd = ADNLPModel(rosenbrock, x0, fill(-10.0, size(x0)), fill(1.5, size(x0)))
nlp_bnd_at_x = NLPAtX(x0)
stop_nlp_c = NLPStopping(nlp_bnd, max_iter = 10)
activeset(stop_nlp_c)
@show status(stop_nlp_c)
printstyled("The End.\n", color = :green)
printstyled("passed ✓ \n", color = :green)
##############################################################################
#
# Part 4/4
#
#############################################################################
printstyled("How to solve nonlinear optim problem: \n", color = :red)
include("penalty.jl")
printstyled("Constrained optimization: quadratic penalty tutorial.\n", color = :green)
x0 = 1.5 * ones(6)
c(x) = [sum(x)]
nlp2 = ADNLPModel(rosenbrock, x0, fill(-10.0, size(x0)), fill(10.0, size(x0)), c, [-Inf], [5.0])
nlp_at_x_c = NLPAtX(x0, zeros(nlp2.meta.ncon))
stop_nlp_c = NLPStopping(
nlp2,
nlp_at_x_c,
atol = 1e-3,
max_cntrs = init_max_counters(obj = 400000, cons = 800000, sum = 1000000),
optimality_check = (x, y) -> KKT(x, y),
)
penalty(stop_nlp_c)
@show status(stop_nlp_c)
#We can check afterwards, the score
@show KKT(stop_nlp_c.pb, stop_nlp_c.current_state)
printstyled("The End.\n", color = :green)
printstyled("passed ✓ \n", color = :green)
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 892 | ###############################################################################
#
# The Stopping structure eases the implementation of algorithms and the
# stopping criterion.
#
# The following examples illustre the various possibilities offered by Stopping
#
###############################################################################
using Test, NLPModels, Stopping
#printstyled("How to State ")
include("howtostate.jl")
#printstyled("passed ✓ \n", color = :green)
#printstyled("How to State for NLP ")
include("howtostate-nlp.jl")
#printstyled("passed ✓ \n", color = :green)
#printstyled("How to Stop ")
include("howtostop.jl")
#printstyled("passed ✓ \n", color = :green)
#printstyled("How to Stop II ")
include("howtostop-2.jl")
#printstyled("passed ✓ \n", color = :green)
#printstyled("How to Stop for NLP ")
include("howtostop-nlp.jl")
#printstyled("passed ✓ \n", color = :green)
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 3890 | ##############################################################################
#
# In this test problem, we consider a globalized Newton method.
# The scenario considers two different stopping criteria to solve the linesearch.
#
# i) This example illustrates how the "structure" handling the algorithmic
# parameters can be passed to the solver of the subproblem.
#
# ii) This algorithm handles a sub-stopping defined by passing the stopping as a
# keyword argument. Note that when a stopping is used multiple times, it has to
# be reinitialized.
#
# iii) It also shows how we can reuse the information and avoid unnecessary evals.
# Here, the objective function of the main problem and sub-problem are the same.
# Warning: the structure onedoptim however does not allow keeping the gradient
# of the main problem. This issue can be corrected by using a specialized State.
#
#############################################################################
#include("backls.jl")
#contains the rosenbrock and backtracking_ls functions, and the onedoptim struct
#using LinearAlgebra, NLPModels, Stopping
##############################################################################
#
# Newton method with LineSearch
#
#############################################################################
function global_newton(stp::NLPStopping, onedsolve::Function, ls_func::Function; prms = nothing)
#Notations
state = stp.current_state
nlp = stp.pb
#Initialization
xt = state.x
d = zeros(size(xt))
#First call
OK = update_and_start!(stp, x = xt, fx = obj(nlp, xt), gx = grad(nlp, xt), Hx = hess(nlp, xt))
#Initialize the sub-Stopping with the main Stopping as keyword argument
h = onedoptim(x -> obj(nlp, xt + x * d), x -> dot(d, grad(nlp, xt + x * d)))
lsstp = LS_Stopping(h, LSAtT(1.0), main_stp = stp, optimality_check = ls_func)
#main loop
while !OK
#Compute the Newton direction
d = Symmetric(state.Hx, :L) \ (-state.gx)
#Prepare the substopping
#We reinitialize the stopping before each new use
#rstate = true, force a reinialization of the State as well
reinit!(lsstp, rstate = true, x = 1.0, g₀ = -dot(state.gx, d), h₀ = state.fx)
lsstp.pb = onedoptim(x -> obj(nlp, xt + x * d), x -> dot(d, grad(nlp, xt + x * d)))
#solve subproblem
onedsolve(lsstp, prms)
if status(lsstp) == :Optimal
alpha = lsstp.current_state.x
#update
xt = xt + alpha * d
#Since the onedoptim and the nlp have the same objective function,
#we save one evaluation.
update!(stp.current_state, fx = lsstp.current_state.ht)
else
stp.meta.fail_sub_pb = true
end
OK = update_and_stop!(stp, x = xt, gx = grad(nlp, xt), Hx = hess(nlp, xt))
end
return stp
end
##############################################################################
#
# Newton method with LineSearch
# Buffer version
#
#############################################################################
function global_newton(stp::NLPStopping, prms)
lf = :ls_func ∈ fieldnames(typeof(prms)) ? prms.ls_func : armijo
os = :onedsolve ∈ fieldnames(typeof(prms)) ? prms.onedsolve : backtracking_ls
return global_newton(stp, os, lf; prms = prms)
end
##############################################################################
#
#
#
mutable struct PrmUn
#parameters of the unconstrained minimization
armijo_prm::Float64 #Armijo parameter
wolfe_prm::Float64 #Wolfe parameter
onedsolve::Function #1D solver
ls_func::Function
#parameters of the 1d minimization
back_update::Float64 #backtracking update
function PrmUn(;
armijo_prm::Float64 = 0.01,
wolfe_prm::Float64 = 0.99,
onedsolve::Function = backtracking_ls,
ls_func::Function = (x, y) -> armijo(x, y, τ₀ = armijo_prm),
back_update::Float64 = 0.5,
)
return new(armijo_prm, wolfe_prm, onedsolve, ls_func, back_update)
end
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1776 | ###############################################################################
#
# # ListofStates tutorial : 1/2
#
# We illustrate here the use of ListofStates in dealing with a warm start
# procedure.
#
# ListofStates can also prove the user history over the iteration process.
#
###############################################################################
using NLPModels, Stopping, Test
# Random search in [0,1] of the global minimum for unconstrained optimization
function algo_rand(stp::NLPStopping)
x0 = stp.current_state.x
n = length(x0)
OK = start!(stp)
while !OK
x = rand(n)
OK = update_and_stop!(stp, x = x, fx = obj(nlp, x), gx = grad(nlp, x))
end
return stp
end
include("../test-stopping/rosenbrock.jl")
x0 = 1.5 * ones(6)
nlp = ADNLPModel(rosenbrock, x0, zeros(6), ones(6))
state = NLPAtX(x0)
stop_lstt = NLPStopping(
nlp,
state,
list = ListofStates(state),
max_iter = 10,
optimality_check = optim_check_bounded,
)
algo_rand(stop_lstt)
print(stop_lstt.listofstates, print_sym = [:fx, :x])
@test length(stop_lstt.listofstates.list) == 12
#Note the difference if the length of the ListofStates is limited
reinit!(stop_lstt, rstate = true, x = x0)
stop_lstt.listofstates = ListofStates(state, n = 5)
algo_rand(stop_lstt)
print(stop_lstt.listofstates, print_sym = [:fx, :x])
@test length(stop_lstt.listofstates.list) == 5
#Take the best out of 5:
bestfx, best = findmax([stop_lstt.listofstates[i].fx for i = 1:length(stop_lstt.listofstates)])
best_state = copy(stop_lstt.listofstates[best])
reinit!(stop_lstt)
stop_lstt.current_state = best_state
stop_lstt.listofstates = ListofStates(best_state, n = 5)
algo_rand(stop_lstt)
print(stop_lstt.listofstates, print_sym = [:fx, :x])
@test length(stop_lstt.listofstates.list) == 5
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1709 | #unitary test GenericStatemod
x0 = ones(6)
state0 = GenericState(x0)
io = IOBuffer()
show(io, state0)
@test scoretype(state0) == Float64
@test xtype(state0) == Array{Float64, 1}
@test isnan(state0.current_time) #Default value of start_time is void
@test isnan(state0.current_score)
x1 = [1.0]
update!(state0, x = x1) #Check the update of state0
@test state0.x == x1
@test isnan(state0.current_time) #start_time must be unchanged
@test isnan(state0.current_score)
update!(state0, current_time = 1.0)
@test state0.x == x1 #must be unchanged
@test state0.current_time == 1.0
@test isnan(state0.current_score)
reinit!(state0, x0)
@test state0.x == x0
@test isnan(state0.current_time)
@test isnan(state0.current_score)
update!(state0, x = x1)
reinit!(state0, current_time = 0.5)
@test state0.x == x1
@test state0.current_time == 0.5
@test isnan(state0.current_score)
#Test _init_field
@test _init_field(typeof(zeros(2, 2))) == zeros(0, 0)
@test _init_field(SparseMatrixCSC{Float64, Int64}) == spzeros(0, 0)
@test _init_field(typeof(zeros(2))) == zeros(0)
@test _init_field(typeof(sparse(zeros(2)))) == spzeros(0)
@test isnan(_init_field(BigFloat))
@test isnan(_init_field(typeof(1.0)))
@test isnan(_init_field(Float32))
@test isnan(_init_field(Float16))
@test _init_field(Nothing) == nothing
@test ismissing(Main.Stopping._init_field(Missing))
@test !_init_field(typeof(true))
@test _init_field(typeof(1)) == -9223372036854775808
#_check_nan_miss
@test !Stopping._check_nan_miss(nothing)
@test !Stopping._check_nan_miss(Counters())
@test !Stopping._check_nan_miss(spzeros(0))
@test !Stopping._check_nan_miss(zeros(0))
@test !Stopping._check_nan_miss(missing)
@test !Stopping._check_nan_miss(spzeros(0, 0))
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1668 | @testset "List of States" begin
s0 = GenericState(zeros(50))
s1 = GenericState(ones(10))
s2 = GenericState(NaN * ones(10), current_time = 1.0, current_score = 0.0)
@test typeof(ListofStates(s0)) <: AbstractListofStates
@test typeof(ListofStates(-1, Val{GenericState}())) <: AbstractListofStates
@test typeof(ListofStates(1, Val{GenericState}())) <: AbstractListofStates
#@test typeof(ListofStates(-1, 3, [])) <: AbstractListofStates
#@test typeof(ListofStates(-1, [])) <: AbstractListofStates
stest = ListofStates(s0, max_vector_size = 2, pnorm = Inf)
@test state_type(stest) == GenericState{Float64, Array{Float64, 1}}
add_to_list!(stest, s1, max_vector_size = 2, pnorm = Inf)
add_to_list!(stest, s2, max_vector_size = 2, pnorm = Inf)
@test length(stest) == 3
stest2 = ListofStates(s0, n = 2, max_vector_size = 2, pnorm = Inf)
add_to_list!(stest2, s1, max_vector_size = 2, pnorm = Inf)
add_to_list!(stest2, s2, max_vector_size = 2, pnorm = Inf)
@test length(stest2) == 2
df1 = print(stest, verbose = false)
df2 = print(stest2, verbose = false)
df3 = print(stest2, verbose = false, print_sym = [:x])
@test typeof(df2) <: DataFrame
stest3 = ListofStates(
-1,
3,
[(s0, VoidListofStates()), (s1, VoidListofStates()), (s2, VoidListofStates())],
)
@test stest3[2, 1] == s1
stest4 =
ListofStates(-1, [(s0, VoidListofStates()), (s1, VoidListofStates()), (s2, VoidListofStates())])
@test length(stest4) == 3
#nested lists
stest5 = ListofStates(-1, [(s0, stest3)])
df5 = print(stest5[1, 2], verbose = false)
stest7 = ListofStates(-1, [s0, s1, s2])
@test length(stest7) == 3
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 4201 | @testset "NLPAtX" begin
#Test unconstrained NLPAtX
uncons_nlp_at_x = NLPAtX(zeros(10))
@test uncons_nlp_at_x.x == zeros(10)
@test isnan(uncons_nlp_at_x.fx)
@test uncons_nlp_at_x.gx == zeros(0)
@test uncons_nlp_at_x.Hx == zeros(0, 0)
@test uncons_nlp_at_x.mu == zeros(0)
@test uncons_nlp_at_x.cx == zeros(0)
@test uncons_nlp_at_x.Jx == zeros(0, 0)
@test uncons_nlp_at_x.lambda == zeros(0)
@test isnan(uncons_nlp_at_x.current_time)
@test isnan(uncons_nlp_at_x.current_score)
#check constrained NLPAtX
cons_nlp_at_x = NLPAtX(zeros(10), zeros(10))
@test cons_nlp_at_x.x == zeros(10)
@test isnan(cons_nlp_at_x.fx)
@test cons_nlp_at_x.gx == zeros(0)
@test cons_nlp_at_x.Hx == zeros(0, 0)
@test cons_nlp_at_x.mu == zeros(0)
@test cons_nlp_at_x.cx == zeros(0)
@test cons_nlp_at_x.Jx == zeros(0, 0)
@test (false in (cons_nlp_at_x.lambda .== 0.0)) == false
@test isnan(cons_nlp_at_x.current_time)
@test isnan(cons_nlp_at_x.current_score)
update!(cons_nlp_at_x, Hx = ones(20, 20), gx = ones(2), lambda = zeros(2))
compress_state!(cons_nlp_at_x, max_vector_size = 5, lambda = zeros(0), gx = true)
@test cons_nlp_at_x.Hx == zeros(0, 0)
@test cons_nlp_at_x.x == [0.0]
@test cons_nlp_at_x.lambda == zeros(0)
@test cons_nlp_at_x.gx == zeros(0)
# On vérifie que la fonction update! fonctionne
update!(uncons_nlp_at_x, x = ones(10), fx = 1.0, gx = ones(10))
update!(uncons_nlp_at_x, lambda = ones(10), current_time = 1.0)
update!(uncons_nlp_at_x, Hx = ones(10, 10), mu = ones(10), cx = ones(10), Jx = ones(10, 10))
@test (false in (uncons_nlp_at_x.x .== 1.0)) == false #assez bizarre comme test...
@test uncons_nlp_at_x.fx == 1.0
@test (false in (uncons_nlp_at_x.gx .== 1.0)) == false
@test (false in (uncons_nlp_at_x.Hx .== 1.0)) == false
@test uncons_nlp_at_x.mu == ones(10)
@test uncons_nlp_at_x.cx == ones(10)
@test (false in (uncons_nlp_at_x.Jx .== 1.0)) == false
@test (false in (uncons_nlp_at_x.lambda .== 1.0)) == false
@test uncons_nlp_at_x.current_time == 1.0
@test isnan(uncons_nlp_at_x.current_score)
reinit!(uncons_nlp_at_x)
@test uncons_nlp_at_x.x == ones(10)
@test isnan(uncons_nlp_at_x.fx)
reinit!(uncons_nlp_at_x, x = zeros(10))
@test uncons_nlp_at_x.x == zeros(10)
@test isnan(uncons_nlp_at_x.fx)
reinit!(uncons_nlp_at_x, zeros(10))
@test uncons_nlp_at_x.x == zeros(10)
@test isnan(uncons_nlp_at_x.fx)
reinit!(uncons_nlp_at_x, zeros(10), l = zeros(0))
@test uncons_nlp_at_x.x == zeros(10)
@test isnan(uncons_nlp_at_x.fx)
c_uncons_nlp_at_x = copy_compress_state(uncons_nlp_at_x, max_vector_size = 5)
@test c_uncons_nlp_at_x != uncons_nlp_at_x
@test c_uncons_nlp_at_x.x == [0.0]
@test c_uncons_nlp_at_x.lambda == [1.0]
uncons_nlp_at_x.Hx = zeros(10, 10)
zip_uncons_nlp_at_x =
compress_state!(uncons_nlp_at_x, keep = true, save_matrix = true, max_vector_size = 5, Hx = 1)
zip_uncons_nlp_at_x.Hx == 0.0
nlp_64 = NLPAtX(ones(10))
nlp_64.x = ones(10)
nlp_64.fx = 1.0
nlp_64.gx = ones(10)
# nlp_32 = convert_nlp(Float32, nlp_64)
# @test typeof(nlp_32.x[1]) == Float32
# @test typeof(nlp_32.fx[1]) == Float32
# @test typeof(nlp_32.gx[1]) == Float32
# @test isnan(nlp_32.mu[1])
# @test isnan(nlp_32.current_time)
#
# @test typeof(nlp_64.x[1]) == Float64
# @test typeof(nlp_64.fx[1]) == Float64
# @test typeof(nlp_64.gx[1]) == Float64
# @test isnan(nlp_64.mu[1])
# @test isnan(nlp_64.current_time)
#Test the _size_check:
try
NLPAtX(ones(5), gx = zeros(4))
@test false
catch
@test true
end
try
NLPAtX(ones(5), mu = zeros(4))
@test false
catch
@test true
end
try
NLPAtX(ones(5), Hx = zeros(4, 4))
@test false
catch
@test true
end
try
NLPAtX(ones(5), zeros(1), cx = zeros(2))
@test false
catch
@test true
end
# Test matrix types
state = NLPAtX(ones(5), ones(2), Jx = spzeros(2, 5), Hx = spzeros(5, 5))
@test typeof(spzeros(2, 5)) == typeof(state.Jx)
@test typeof(spzeros(5, 5)) == typeof(state.Hx)
state = NLPAtX(ones(5), ones(2), Jx = spzeros(2, 5))
@test typeof(spzeros(2, 5)) == typeof(state.Jx)
@test typeof(zeros(5, 5)) == typeof(state.Hx)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
|
[
"MIT"
] | 0.6.5 | 96a43d53d1fb7db5e33406e96ab59256636bba1e | code | 1377 | @testset "OneDAtX" begin
# On vérifie que le constructeur par défaut fonctionne
ls_at_t = OneDAtX(0.0)
@test scoretype(ls_at_t) == Float64
@test xtype(ls_at_t) == Float64
@test ls_at_t.x == 0.0
@test isnan(ls_at_t.fx)
@test isnan(ls_at_t.gx)
@test isnan(ls_at_t.f₀)
@test isnan(ls_at_t.g₀)
@test isnan(ls_at_t.current_time)
@test isnan(ls_at_t.current_score)
# On test la fonction update!(...)
update!(ls_at_t, x = 1.0, fx = 1.0, gx = 1.0, f₀ = 1.0)
update!(ls_at_t, g₀ = 1.0, current_time = 0.0, current_score = 0.0)
@test ls_at_t.x == 1.0
@test ls_at_t.fx == 1.0
@test ls_at_t.gx == 1.0
@test ls_at_t.f₀ == 1.0
@test ls_at_t.g₀ == 1.0
@test ls_at_t.current_time == 0.0
@test ls_at_t.current_score == 0.0
# on vérifie que la fonction copy fonctionne
ls_at_t_2 = copy(ls_at_t)
@test scoretype(ls_at_t_2) == Float64
@test xtype(ls_at_t_2) == Float64
@test ls_at_t_2.x == 1.0
@test ls_at_t_2.fx == 1.0
@test ls_at_t_2.gx == 1.0
@test ls_at_t_2.f₀ == 1.0
@test ls_at_t_2.g₀ == 1.0
@test ls_at_t_2.current_time == 0.0
@test ls_at_t_2.current_score == 0.0
ls_64 = OneDAtX(0.0)
@test scoretype(ls_64) == Float64
@test xtype(ls_64) == Float64
update!(ls_64, x = 1.0, fx = 1.0, gx = 1.0, f₀ = 1.0)
reinit!(ls_64)
@test ls_64.x == 1.0
@test isnan(ls_64.fx)
@test isnan(ls_64.current_time)
end
| Stopping | https://github.com/SolverStoppingJulia/Stopping.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.