licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.3 | 9986eabab3dcfd90cf3a041b7ec5a64fca4aa508 | docs | 1816 | # UnionArrays: storage-agnostic array type with `Union` elements
[](https://juliafolds.github.io/UnionArrays.jl/stable)
[](https://juliafolds.github.io/UnionArrays.jl/dev)
[](https://github.com/JuliaFolds/UnionArrays.jl/actions?query=workflow%3ARun+tests)
[](https://codecov.io/gh/JuliaFolds/UnionArrays.jl)
[](https://github.com/JuliaFolds/UnionArrays.jl)
UnionArrays.jl provides an array type with `Union` element types that is
generic over the data storage type.
```julia
julia> using UnionArrays
julia> xs = UnionVector(undef, Vector, Union{Float32,Tuple{},UInt8}, 3);
julia> fill!(xs, ());
julia> xs[1]
()
julia> xs[2] = 1.0f0;
julia> xs[3] = UInt8(2);
julia> collect(xs)
3-element Vector{Union{Tuple{}, Float32, UInt8}}:
()
1.0f0
0x02
```
For example, it can be used for bringing `Union` element types to GPU:
```julia
julia> using CUDA
julia> xs = UnionVector(undef, CuVector, Union{Float32,Nothing}, 3);
julia> fill!(xs, nothing);
```
Packages like [Transducers.jl](https://github.com/JuliaFolds/Transducers.jl)
and [Folds.jl](https://github.com/JuliaFolds/Folds.jl) support computations
with `UnionArray`s on GPU:
```julia
julia> using Folds, FoldsCUDA
julia> Folds.all(==(nothing), xs)
true
julia> CUDA.@allowscalar begin
xs[2] = 1.0f0
xs[3] = 2.0f0
end;
julia> Folds.sum(x -> x === nothing ? 0.0f0 : x, xs; init = 0.0f0)
3.0f0
```
| UnionArrays | https://github.com/JuliaFolds/UnionArrays.jl.git |
|
[
"MIT"
] | 0.1.3 | 9986eabab3dcfd90cf3a041b7ec5a64fca4aa508 | docs | 96 | # UnionArrays.jl
```@index
```
```@autodocs
Modules = [UnionArrays, UnionArrays.Abstract]
```
| UnionArrays | https://github.com/JuliaFolds/UnionArrays.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 282 | module DashBasePlotlyBaseExt
using DashBase
isdefined(Base, :get_extension) ? (using PlotlyBase) : (using ..PlotlyBase)
const JSON = PlotlyBase.JSON
function DashBase.to_dash(p::PlotlyBase.Plot)
data = JSON.lower(p)
pop!(data, :config, nothing)
return data
end
end
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 203 | module DashBasePlotlyJSExt
using DashBase
isdefined(Base, :get_extension) ? (using PlotlyJS) : (using ..PlotlyJS)
function DashBase.to_dash(p::PlotlyJS.SyncPlot)
DashBase.to_dash(p.plot)
end
end
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 849 | module DashBasePlotsExt
using DashBase
isdefined(Base, :get_extension) ? (using Plots) : (using ..Plots)
function DashBase.to_dash(p::Plots.Plot{Plots.PlotlyBackend})
return if haskey(Base.loaded_modules, Base.PkgId(Base.UUID("a03496cd-edff-5a9b-9e67-9cda94a718b5"), "PlotlyBase")) &&
haskey(Base.loaded_modules, Base.PkgId(Base.UUID("f2990250-8cf9-495f-b13a-cce12b45703c"), "PlotlyKaleido"))
# Note: technically it would be sufficient if PlotlyBase is loaded, but thats how it is currently handled by Plots.jl
sp = Plots.plotlybase_syncplot(p)
DashBase.to_dash(sp)
else
Dict(:data => Plots.plotly_series(p), :layout => Plots.plotly_layout(p))
end
end
function DashBase.to_dash(p::Plots.Plot{Plots.PlotlyJSBackend})
sp = Plots.plotlyjs_syncplot(p)
return DashBase.to_dash(sp)
end
end
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 887 | module DashBase
import JSON3
include("components.jl")
include("registry.jl")
export Component, push_prop!, get_name, get_type, get_namespace,
ResourcePkg, Resource, ResourcesRegistry, isdynamic, register_package!, main_registry,
get_dash_dependencies, get_dash_renderer_pkg, get_componens_pkgs,
has_relative_path, has_dev_path, has_external_url, get_type,
get_external_url, get_dev_path, get_relative_path
@static if !isdefined(Base, :get_extension)
using Requires
end
function __init__()
@static if !isdefined(Base, :get_extension)
@require PlotlyBase = "a03496cd-edff-5a9b-9e67-9cda94a718b5" include("../ext/DashBasePlotlyBaseExt.jl")
@require PlotlyJS = "f0f68f2c-4968-5e81-91da-67840de0976a" include("../ext/DashBasePlotlyJSExt.jl")
@require Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" include("../ext/DashBasePlotsExt.jl")
end
end
end # module
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 3069 | to_dash(t::Any) = t
struct Component
name ::String
type ::String
namespace ::String
props ::Dict{Symbol, Any}
available_props ::Set{Symbol}
wildcard_regex ::Union{Nothing, Regex}
function Component(name::String, type::String, namespace::String,
props::Vector{Symbol}, wildcard_props::Vector{Symbol}; kwargs...)
available_props = Set{Symbol}(props)
wildcard_regex::Union{Nothing, Regex} = nothing
if !isempty(wildcard_props)
wildcard_regex = Regex(join(string.(wildcard_props), "|"))
end
component = new(name, type, namespace, Dict{Symbol, Any}(), available_props, wildcard_regex)
for (prop, value) in kwargs
Base.setproperty!(component, prop, value)
end
return component
end
end
get_name(comp::Component) = getfield(comp, :name)
get_type(comp::Component) = getfield(comp, :type)
get_namespace(comp::Component) = getfield(comp, :namespace)
get_available_props(comp::Component) = getfield(comp, :available_props)
get_wildcard_regex(comp::Component) = getfield(comp, :wildcard_regex)
get_props(comp::Component) = getfield(comp, :props)
const VecChildTypes = Union{NTuple{N, Component} where {N}, Vector{<:Component}}
function Base.getindex(component::Component, id::AbstractString)
component.id == id && return component
hasproperty(component, :children) || return nothing
cc = component.children
return if cc isa Union{VecChildTypes, DashBase.Component}
cc[id]
elseif cc isa AbstractVector
fcc = identity.(filter(x->hasproperty(x, :id), cc))
isempty(fcc) ? nothing : fcc[id]
else
nothing
end
end
function Base.getindex(children::VecChildTypes, id::AbstractString)
for element in children
element.id == id && return element
el = element[id]
el !== nothing && return el
end
end
function Base.getproperty(comp::Component, prop::Symbol)
!Base.hasproperty(comp, prop) && error("Component $(get_name(comp)) has no property $(prop)")
props = get_props(comp)
return haskey(props, prop) ? props[prop] : nothing
end
function Base.setproperty!(comp::Component, prop::Symbol, value)
!Base.hasproperty(comp, prop) && error("Component $(get_name(comp)) has no property $(prop)")
props = get_props(comp)
push!(props, prop=>to_dash(value))
end
function check_whild(wildcard_regex::Union{Nothing, Regex}, name::Symbol)
isnothing(wildcard_regex) && return false
return startswith(string(name), wildcard_regex)
end
function Base.hasproperty(comp::Component, prop::Symbol)
return in(prop, get_available_props(comp)) || check_whild(get_wildcard_regex(comp), prop)
end
Base.propertynames(comp::Component) = collect(get_available_props(comp))
push_prop!(component::Component, prop::Symbol, value) = push!(component.props, prop=>to_dash(value))
JSON3.StructTypes.StructType(::Type{DashBase.Component}) = JSON3.StructTypes.Struct()
JSON3.StructTypes.excludes(::Type{DashBase.Component}) = (:name, :available_props, :wildcard_regex)
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 2944 | struct Resource
relative_package_path::Union{Nothing, Vector{String}}
dev_package_path::Union{Nothing, Vector{String}}
external_url::Union{Nothing, Vector{String}}
type::Symbol
async::Symbol # :none, :eager, :lazy May be we should use enum
function Resource(;relative_package_path, dev_package_path = nothing, external_url = nothing, type = :js, dynamic = nothing, async=nothing)
(!isnothing(dynamic) && !isnothing(async)) && throw(ArgumentError("Can't have both 'dynamic' and 'async'"))
!in(type, [:js, :css]) && throw(ArgumentError("type must be `:js` or `:css`"))
async_symbol = :none
if !isnothing(dynamic)
dynamic == true && (async_symbol = :lazy)
elseif !isnothing(async) && async != :false
async_symbol = async == :lazy ? :lazy : :eager
end
return new(_path_to_vector(relative_package_path), _path_to_vector(dev_package_path), _path_to_vector(external_url), type, async_symbol)
end
end
_path_to_vector(s::Nothing) = nothing
_path_to_vector(s::String) = [s]
_path_to_vector(s::Vector{String}) = s
has_relative_path(r::Resource) = !isnothing(r.relative_package_path)
has_dev_path(r::Resource) = !isnothing(r.dev_package_path)
has_external_url(r::Resource) = !isnothing(r.external_url)
get_type(r::Resource) = r.type
get_external_url(r::Resource) = r.external_url
get_dev_path(r::Resource) = r.dev_package_path
get_relative_path(r::Resource) = r.relative_package_path
isdynamic(resource::Resource, eager_loading::Bool) = resource.async == :lazy || (resource.async == :eager && !eager_loading)
struct ResourcePkg
namespace ::String
path ::String
resources ::Vector{Resource}
version ::String
ResourcePkg(namespace, path, resources = Resource[]; version = "") = new(namespace, path, resources, version)
end
mutable struct ResourcesRegistry
components ::Dict{String, ResourcePkg}
dash_dependency ::Union{Nothing, NamedTuple{(:dev, :prod), Tuple{ResourcePkg,ResourcePkg}}}
dash_renderer ::Union{Nothing, ResourcePkg}
ResourcesRegistry(;dash_dependency = nothing, dash_renderer = nothing) = new(Dict{String, ResourcePkg}(), dash_dependency, dash_renderer)
end
get_dash_dependencies(registry::ResourcesRegistry, prop_check::Bool) = prop_check ?
registry.dash_dependency[:dev] :
registry.dash_dependency[:prod]
get_componens_pkgs(registry::ResourcesRegistry) = values(registry.components)
get_dash_renderer_pkg(registry::ResourcesRegistry) = registry.dash_renderer
function register_package!(registry::ResourcesRegistry, pkg::ResourcePkg)
registry.components[pkg.namespace] = pkg
end
const resources_registry = ResourcesRegistry()
register_package(pkg::ResourcePkg) = register_package!(resources_registry, pkg)
main_registry() = resources_registry
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 515 | using Aqua
using DashBase
using Test
# skip `project_toml_formatting` for older version, as
# the Project.toml formatting generated after `Pkg.update` changed
project_toml_formatting = VERSION >= v"1.8.0"
# skip `stale_deps` for newer version, as
# we no longer load Requires
stale_deps = VERSION < v"1.9.0"
Aqua.test_all(DashBase; stale_deps, project_toml_formatting)
if !stale_deps
@testset "test_stale_deps except for Requires" begin
Aqua.test_stale_deps(DashBase; ignore=[:Requires])
end
end
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 2568 | using DashBase
using JSON3
@testset "components creation" begin
test_comp = Component("html_div", "Div", "dash_html_components",
[:children, :id, :n_clicks],
[Symbol("data-"), Symbol("aria-")];
id=10,
n_clicks=1
)
@test get_name(test_comp) == "html_div"
@test get_type(test_comp) == "Div"
@test get_namespace(test_comp) == "dash_html_components"
@test test_comp.id == 10
@test test_comp.n_clicks == 1
test_comp.var"data-id" = 20
@test test_comp.var"data-id" == 20
@test sort(propertynames(test_comp)) == sort([:children, :id, :n_clicks])
@test isnothing(test_comp.children)
#anavailable property
@test_throws ErrorException test_comp.key
@test_throws ErrorException test_comp.key = 20
@test_throws ErrorException Component("html_div", "Div", "dash_html_components",
[:children, :id, :n_clicks],
[Symbol("data-"), Symbol("aria-")];
id=10,
key=1
)
json = JSON3.write(test_comp)
res = JSON3.read(json)
@test all(keys(res) .== [:type, :namespace, :props])
@test sort(collect(keys(res.props))) == sort([:id, :n_clicks, Symbol("data-id")])
@test res.props.id == 10
@test res.props.n_clicks == 1
@test res.props.var"data-id" == 20
end
@testset "empty wilds" begin
test_comp = Component("html_div", "Div", "dash_html_components",
[:children, :id, :n_clicks],
Symbol[];
id=10,
n_clicks=1
)
@test get_name(test_comp) == "html_div"
@test get_type(test_comp) == "Div"
@test get_namespace(test_comp) == "dash_html_components"
@test test_comp.id == 10
@test test_comp.n_clicks == 1
@test_throws ErrorException test_comp.var"data-id" = 20
@test_throws ErrorException test_comp.aaaaa
end
function test_component(; kwargs...)
Component("html_div", "Div", "dash_html_components", [:dir, :n_clicks, :key, :loading_state, :contentEditable, :contextMenu, :n_clicks_timestamp, :draggable, :accessKey, :hidden, :style, :children, :id, :title, :role, :lang, :spellCheck, :tabIndex, :disable_n_clicks, :className], Symbol[]; kwargs...)
end
@testset "Index by id" begin
layout = test_component(children=[
test_component(id="my-id", n_clicks=0, title="text"),
test_component(id="my-div", children=[
test_component(children=[]),
"string",
test_component(id="target")
]),
test_component(id="my-div2")
])
@test layout["target"].id == "target"
@test layout["ups"] === nothing
end
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 2178 | using DashBase
@testset "lazy + dynamic" begin
test_resource = Resource(
relative_package_path = "test.js",
dynamic = true
)
@test test_resource.async == :lazy
@test DashBase.isdynamic(test_resource, true)
@test DashBase.isdynamic(test_resource, false)
test_resource = Resource(
relative_package_path = "test.js",
dynamic = false
)
@test test_resource.async == :none
@test !DashBase.isdynamic(test_resource, true)
@test !DashBase.isdynamic(test_resource, false)
@test_throws ArgumentError test_resource = Resource(
relative_package_path = "test.js",
dynamic = false,
async = :false
)
test_resource = Resource(
relative_package_path = "test.js",
async = :lazy
)
@test test_resource.async == :lazy
@test DashBase.isdynamic(test_resource, true)
@test DashBase.isdynamic(test_resource, false)
test_resource = Resource(
relative_package_path = "test.js",
async = :eager
)
@test test_resource.async == :eager
@test !DashBase.isdynamic(test_resource, true)
@test DashBase.isdynamic(test_resource, false)
end
@testset "register" begin
DashBase.register_package(
ResourcePkg(
"dash_html_components",
"path",
version = "1.2.3",
[
Resource(
relative_package_path = "dash_html_components.min.js",
external_url = "https://unpkg.com/[email protected]/dash_html_components/dash_html_components.min.js",
dynamic = nothing,
async = nothing,
type = :js
),
Resource(
relative_package_path = "dash_html_components.min.js.map",
external_url = "https://unpkg.com/[email protected]/dash_html_components/dash_html_components.min.js.map",
dynamic = true,
async = nothing,
type = :js
)
]
)
)
@test length(DashBase.main_registry().components) == 1
end
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 101 | using Test
include("components.jl")
include("test_ext.jl")
include("registry.jl")
include("aqua.jl")
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | code | 754 | using Test
using DashBase
import PlotlyBase
import PlotlyJS
import Plots
function run_assertions(pl)
obj = @test_nowarn DashBase.to_dash(pl)
@test obj isa Dict{Symbol, Any}
@test obj[:data][1][:y] == [1, 2, 3, 4, 5]
@test haskey(obj, :layout)
@test haskey(obj, :frames)
@test !haskey(obj, :config)
end
@testset "DashBasePlotlyBaseExt" begin
pl = PlotlyBase.Plot(1:5)
run_assertions(pl)
end
@testset "DashBasePlotsJSExt" begin
pl = PlotlyJS.plot(1:5)
run_assertions(pl)
end
@testset "DashBasePlotsExt + plotlyjs()" begin
Plots.plotlyjs()
pl = Plots.plot(1:5)
run_assertions(pl)
end
@testset "DashBasePlotsExt + plotly()" begin
Plots.plotly()
pl = Plots.plot(1:5)
run_assertions(pl)
end
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 1.0.0 | f56a284687c4f7a67a1341a275baf733c99149ba | docs | 152 | # DashBase
The package that defines base `Component` and `Resource` structure for `Dash.jl`
and Dash components packages. Not intended for direct use.
| DashBase | https://github.com/plotly/DashBase.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 566 | using Documenter, Revise
makedocs(
modules = [Revise],
clean = false,
format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
sitename = "Revise.jl",
authors = "Tim Holy",
linkcheck = !("skiplinks" in ARGS),
pages = [
"Home" => "index.md",
"config.md",
"cookbook.md",
"limitations.md",
"debugging.md",
"internals.md",
"user_reference.md",
"dev_reference.md",
],
)
deploydocs(
repo = "github.com/timholy/Revise.jl.git",
push_preview = true,
)
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 2038 | """
Revise.jl tracks source code changes and incorporates the changes to a running Julia session.
Revise.jl works behind-the-scenes. To track a package, e.g. `Example`:
```julia
(@v1.6) pkg> dev Example # make a development copy of the package
[...pkg output omitted...]
julia> using Revise # this must come before the package under development
julia> using Example
[...develop the package...] # Revise.jl will automatically update package functionality to match code changes
```
Functions in Revise.jl that may come handy in special circumstances:
- `Revise.track`: track updates to `Base` Julia itself or `Core.Compiler`
- `includet`: load a file and track future changes. Intended for small, quick works
- `entr`: call an additional function whenever code updates
- `revise`: evaluate any changes in `Revise.revision_queue` or every definition in a module
- `Revise.retry`: perform previously-failed revisions. Useful in cases of order-dependent errors
- `Revise.errors`: report the errors represented in `Revise.queue_errors`
"""
module Revise
# We use a code structure where all `using` and `import`
# statements in the package that load anything other than
# a Julia base or stdlib package are located in this file here.
# Nothing else should appear in this file here, apart from
# the `include("packagedef.jl")` statement, which loads what
# we would normally consider the bulk of the package code.
# This somewhat unusual structure is in place to support
# the VS Code extension integration.
using OrderedCollections, CodeTracking, JuliaInterpreter, LoweredCodeUtils
using CodeTracking: PkgFiles, basedir, srcfiles, line_is_decl, basepath
using JuliaInterpreter: whichtt, is_doc_expr, step_expr!, finish_and_return!, get_return,
@lookup, moduleof, scopeof, pc_expr, is_quotenode_egal,
linetable, codelocs, LineTypes, isassign, isidentical
using LoweredCodeUtils: next_or_nothing!, trackedheads, callee_matches
include("packagedef.jl")
end # module
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 5377 | # Globals needed to support `entr` and other callbacks
"""
Revise.revision_event
This `Condition` is used to notify `entr` that one of the watched files has changed.
"""
const revision_event = Condition()
"""
Revise.user_callbacks_queue
Global variable, `user_callbacks_queue` holds `key` values for which the
file has changed but the user hooks have not yet been called.
"""
const user_callbacks_queue = Set{Any}()
"""
Revise.user_callbacks_by_file
Global variable, maps files (identified by their absolute path) to the set of
callback keys registered for them.
"""
const user_callbacks_by_file = Dict{String, Set{Any}}()
"""
Revise.user_callbacks_by_key
Global variable, maps callback keys to user hooks.
"""
const user_callbacks_by_key = Dict{Any, Any}()
"""
key = Revise.add_callback(f, files, modules=nothing; key=gensym())
Add a user-specified callback, to be executed during the first run of
`revise()` after a file in `files` or a module in `modules` is changed on the
file system. If `all` is set to `true`, also execute the callback whenever any
file already monitored by Revise changes. In an interactive session like the
REPL, Juno or Jupyter, this means the callback executes immediately before
executing a new command / cell.
You can use the return value `key` to remove the callback later
(`Revise.remove_callback`) or to update it using another call
to `Revise.add_callback` with `key=key`.
"""
function add_callback(f, files, modules=nothing; all=false, key=gensym())
fix_trailing(path) = isdir(path) ? joinpath(path, "") : path # insert a trailing '/' if missing, see https://github.com/timholy/Revise.jl/issues/470#issuecomment-633298553
remove_callback(key)
files = map(fix_trailing, map(abspath, files))
init_watching(files)
# in case the `all` kwarg was set:
# add all files which are already known to Revise
if all
for pkgdata in values(pkgdatas)
append!(files, joinpath.(Ref(basedir(pkgdata)), srcfiles(pkgdata)))
end
end
if modules !== nothing
for mod in modules
track(mod) # Potentially needed for modules like e.g. Base
id = PkgId(mod)
pkgdata = pkgdatas[id]
for file in srcfiles(pkgdata)
absname = joinpath(basedir(pkgdata), file)
push!(files, absname)
end
end
end
# There might be duplicate entries in `files`, but it shouldn't cause any
# problem with the sort of things we do here
for file in files
cb = get!(Set, user_callbacks_by_file, file)
push!(cb, key)
end
user_callbacks_by_key[key] = f
return key
end
"""
Revise.remove_callback(key)
Remove a callback previously installed by a call to `Revise.add_callback(...)`.
See its docstring for details.
"""
function remove_callback(key)
for cbs in values(user_callbacks_by_file)
delete!(cbs, key)
end
delete!(user_callbacks_queue, key)
delete!(user_callbacks_by_key, key)
# possible future work: we may stop watching (some of) these files
# now. But we don't really keep track of what background tasks are running
# and Julia doesn't have an ergonomic way of task cancellation yet (see
# e.g.
# https://github.com/JuliaLang/Juleps/blob/master/StructuredConcurrency.md
# so we'll omit this for now. The downside is that in pathological cases,
# this may exhaust inotify resources.
nothing
end
function process_user_callbacks!(keys = user_callbacks_queue; throw=false)
try
# use (a)sync so any exceptions get nicely collected into CompositeException
@sync for key in keys
f = user_callbacks_by_key[key]
@async Base.invokelatest(f)
end
catch err
if throw
rethrow(err)
else
@warn "[Revise] Ignoring callback errors" err
end
finally
empty!(keys)
end
end
"""
entr(f, files; all=false, postpone=false, pause=0.02)
entr(f, files, modules; all=false, postpone=false, pause=0.02)
Execute `f()` whenever files or directories listed in `files`, or code in `modules`, updates.
If `all` is `true`, also execute `f()` as soon as code updates are detected in
any module tracked by Revise.
`entr` will process updates (and block your command line) until you press Ctrl-C.
Unless `postpone` is `true`, `f()` will be executed also when calling `entr`,
regardless of file changes. The `pause` is the period (in seconds) that `entr`
will wait between being triggered and actually calling `f()`, to handle
clusters of modifications, such as those produced by saving files in certain
text editors.
# Example
```julia
entr(["/tmp/watched.txt"], [Pkg1, Pkg2]) do
println("update")
end
```
This will print "update" every time `"/tmp/watched.txt"` or any of the code defining
`Pkg1` or `Pkg2` gets updated.
"""
function entr(f::Function, files, modules=nothing; all=false, postpone=false, pause=0.02)
yield()
postpone || f()
key = add_callback(files, modules; all=all) do
sleep(pause)
f()
end
try
while true
wait(revision_event)
revise(throw=true)
end
catch err
isa(err, InterruptException) || rethrow(err)
finally
remove_callback(key)
end
nothing
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 2433 | """
repo, repo_path = git_repo(path::AbstractString)
Return the `repo::LibGit2.GitRepo` containing the file or directory `path`.
`path` does not necessarily need to be the top-level directory of the
repository. Also returns the `repo_path` of the top-level directory for the
repository.
"""
function git_repo(path::AbstractString)
if isfile(path)
path = dirname(path)
end
while true
# check if we are at the repo root
git_dir = joinpath(path, ".git")
if ispath(git_dir)
return LibGit2.GitRepo(path), path
end
# traverse to parent folder
previous = path
path = dirname(path)
if previous == path
return nothing, path
end
end
end
function git_tree(repo::LibGit2.GitRepo, commit="HEAD")
return LibGit2.GitTree(repo, "$commit^{tree}")
end
function git_tree(path::AbstractString, commit="HEAD")
repo, _ = git_repo(path)
return git_tree(repo, commit)
end
"""
files = git_files(repo)
Return the list of files checked into `repo`.
"""
function git_files(repo::LibGit2.GitRepo)
status = LibGit2.GitStatus(repo;
status_opts=LibGit2.StatusOptions(flags=LibGit2.Consts.STATUS_OPT_INCLUDE_UNMODIFIED))
files = String[]
for i = 1:length(status)
e = status[i]
dd = unsafe_load(e.head_to_index)
push!(files, unsafe_string(dd.new_file.path))
end
return files
end
Base.keys(tree::LibGit2.GitTree) = git_files(tree.owner)
"""
Revise.git_source(file::AbstractString, reference)
Read the source-text for `file` from a git commit `reference`.
The reference may be a string, Symbol, or `LibGit2.Tree`.
# Example:
Revise.git_source("/path/to/myfile.jl", "HEAD")
Revise.git_source("/path/to/myfile.jl", :abcd1234) # by commit SHA
"""
function git_source(file::AbstractString, reference)
fullfile = abspath(file)
tree = git_tree(fullfile, reference)
# git uses Unix-style paths even on Windows
filepath = replace(relpath(fullfile, LibGit2.path(tree.owner)),
Base.Filesystem.path_separator_re=>'/')
return git_source(filepath, tree)
end
function git_source(file::AbstractString, tree::LibGit2.GitTree)
local blob
blob = tree[file]
if blob === nothing
# assume empty tree when tracking new files
src = ""
else
src = LibGit2.content(blob)
end
return src
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 4080 | const badfile = (nothing, nothing, nothing, UInt128(0))
function pkg_fileinfo(id::PkgId)
origin = get(Base.pkgorigins, id, nothing)
origin === nothing && return badfile
cachepath = origin.cachepath
cachepath === nothing && return badfile
local checksum
provides, includes_requires, required_modules = try
ret = @static if VERSION ≥ v"1.11.0-DEV.683" # https://github.com/JuliaLang/julia/pull/49866
io = open(cachepath, "r")
checksum = Base.isvalid_cache_header(io)
iszero(checksum) && (close(io); return badfile)
provides, (_, includes_srcfiles_only, requires), required_modules, _... =
Base.parse_cache_header(io, cachepath)
close(io)
provides, (includes_srcfiles_only, requires), required_modules
else
checksum = UInt64(0) # Buildid prior to v"1.12.0-DEV.764", and the `srcfiles_only` API does not take `io`
Base.parse_cache_header(cachepath, srcfiles_only = true)
end
ret
catch err
return badfile
end
includes, _ = includes_requires
for (pkgid, buildid) in provides
if pkgid.uuid === id.uuid && pkgid.name == id.name
return cachepath, includes, first.(required_modules), (UInt128(checksum) << 64 | buildid)
end
end
end
function parse_pkg_files(id::PkgId)
pkgdata = get(pkgdatas, id, nothing)
if pkgdata === nothing
pkgdata = PkgData(id)
end
modsym = Symbol(id.name)
if use_compiled_modules()
cachefile, includes, reqs, buildid = pkg_fileinfo(id)
if cachefile !== nothing
@assert includes !== nothing
@assert reqs !== nothing
pkgdata.requirements = reqs
for chi in includes
if isdefined(Base, :loaded_precompiles) && haskey(Base.loaded_precompiles, id => buildid)
mod = Base.loaded_precompiles[id => buildid]
else
mod = Base.root_module(id)
end
for mpath in chi.modpath
mod = getfield(mod, Symbol(mpath))::Module
end
fname = relpath(chi.filename, pkgdata)
# For precompiled packages, we can read the source later (whenever we need it)
# from the *.ji cachefile.
push!(pkgdata, fname=>FileInfo(mod, cachefile))
end
CodeTracking._pkgfiles[id] = pkgdata.info
return pkgdata
end
end
# Non-precompiled package(s). Here we rely on the `include` callbacks to have
# already populated `included_files`; all we have to do is collect the relevant
# files.
# To reduce compiler latency, use runtime dispatch for `queue_includes!`.
# `queue_includes!` requires compilation of the whole parsing/expression-splitting infrastructure,
# and it's better to wait to compile it until we actually need it.
Base.invokelatest(queue_includes!, pkgdata, id)
return pkgdata
end
function modulefiles(mod::Module)
function keypath(filename)
filename = fixpath(filename)
return get(src_file_key, filename, filename)
end
parentfile = String(first(methods(getfield(mod, :eval))).file)
id = PkgId(mod)
if id.name == "Base" || Symbol(id.name) ∈ stdlib_names
parentfile = normpath(Base.find_source_file(parentfile))
if !startswith(parentfile, juliadir)
parentfile = replace(parentfile, fallback_juliadir()=>juliadir)
end
filedata = Base._included_files
included_files = filter(mf->mf[1] == mod, filedata)
return keypath(parentfile), [keypath(mf[2]) for mf in included_files]
end
use_compiled_modules() || return nothing, nothing # FIXME: support non-precompiled packages
_, filedata, reqs = pkg_fileinfo(id)
filedata === nothing && return nothing, nothing
included_files = filter(mf->mf.id == id, filedata)
return keypath(parentfile), [keypath(mf.filename) for mf in included_files]
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 5015 | using Base.CoreLogging
using Base.CoreLogging: Info, Debug
struct LogRecord
level
message
group
id
file
line
kwargs
end
LogRecord(args...; kwargs...) = LogRecord(args..., kwargs)
mutable struct ReviseLogger <: AbstractLogger
logs::Vector{LogRecord}
min_level::LogLevel
end
ReviseLogger(; min_level=Info) = ReviseLogger(LogRecord[], min_level)
CoreLogging.min_enabled_level(logger::ReviseLogger) = logger.min_level
CoreLogging.shouldlog(logger::ReviseLogger, level, _module, group, id) = _module == Revise
function CoreLogging.handle_message(logger::ReviseLogger, level, msg, _module,
group, id, file, line; kwargs...)
rec = LogRecord(level, msg, group, id, file, line, kwargs)
push!(logger.logs, rec)
if level >= Info
if group == "lowered" && haskey(kwargs, :mod) && haskey(kwargs, :ex) && haskey(kwargs, :exception)
ex, bt = kwargs[:exception]
printstyled(stderr, msg; color=:red)
print(stderr, "\n ")
showerror(stderr, ex, bt; backtrace = bt!==nothing)
println(stderr, "\nwhile evaluating\n", kwargs[:ex], "\nin module ", kwargs[:mod])
else
show(stderr, rec)
end
end
end
CoreLogging.catch_exceptions(::ReviseLogger) = false
function Base.show(io::IO, l::LogRecord; verbose::Bool=true)
if verbose
print(io, LogRecord)
print(io, '(', l.level, ", ", l.message, ", ", l.group, ", ", l.id, ", \"", l.file, "\", ", l.line)
else
printstyled(io, "Revise ", l.message, '\n'; color=Base.error_color())
end
exc = nothing
if !isempty(l.kwargs)
verbose && print(io, ", (")
prefix = ""
for (kw, val) in l.kwargs
kw === :exception && (exc = val; continue)
verbose && print(io, prefix, kw, "=", val)
prefix = ", "
end
verbose && print(io, ')')
end
if exc !== nothing
ex, bt = exc
showerror(io, ex, bt; backtrace = bt!==nothing)
verbose || println(io)
end
verbose && println(io, ')')
end
const _debug_logger = ReviseLogger()
"""
logger = Revise.debug_logger(; min_level=Debug)
Turn on [debug logging](https://docs.julialang.org/en/v1/stdlib/Logging/)
(if `min_level` is set to `Debug` or better) and return the logger object.
`logger.logs` contains a list of the logged events. The items in this list are of type `Revise.LogRecord`,
with the following relevant fields:
- `group`: the event category. Revise currently uses the following groups:
+ "Action": a change was implemented, of type described in the `message` field.
+ "Parsing": a "significant" event in parsing. For these, examine the `message` field
for more information.
+ "Watching": an indication that Revise determined that a particular file needed to be
examined for possible code changes. This is typically done on the basis of `mtime`,
the modification time of the file, and does not necessarily indicate that there were
any changes.
- `message`: a string containing more information. Some examples:
+ For entries in the "Action" group, `message` can be `"Eval"` when modifying
old methods or defining new ones, "DeleteMethod" when deleting a method,
and "LineOffset" to indicate that the line offset for a method
was updated (the last only affects the printing of stacktraces upon error,
it does not change how code runs)
+ Items with group "Parsing" and message "Diff" contain sets `:newexprs` and `:oldexprs`
that contain the expression unique to post- or pre-revision, respectively.
- `kwargs`: a pairs list of any other data. This is usually specific to particular `group`/`message`
combinations.
See also [`Revise.actions`](@ref) and [`Revise.diffs`](@ref).
"""
function debug_logger(; min_level=Debug)
_debug_logger.min_level = min_level
return _debug_logger
end
"""
actions(logger; line=false)
Return a vector of all log events in the "Action" group. "LineOffset" events are returned
only if `line=true`; by default the returned items are the events that modified
methods in your session.
"""
function actions(logger::ReviseLogger; line=false)
filter(logger.logs) do r
r.group=="Action" && (line || r.message!="LineOffset")
end
end
"""
diffs(logger)
Return a vector of all log events that encode a (non-empty) diff between two versions of a file.
"""
function diffs(logger::ReviseLogger)
filter(logger.logs) do r
r.message=="Diff" && r.group=="Parsing" && (!isempty(r.kwargs[:newexprs]) || !isempty(r.kwargs[:oldexprs]))
end
end
## Make the logs portable
"""
MethodSummary(method)
Create a portable summary of a method. In particular, a MethodSummary can be saved to a JLD2 file.
"""
struct MethodSummary
name::Symbol
modulename::Symbol
file::Symbol
line::Int32
sig::Type
end
MethodSummary(m::Method) = MethodSummary(m.name, nameof(m.module), m.file, m.line, m.sig)
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 25459 | ## Analyzing lowered code
function add_docexpr!(docexprs::AbstractDict{Module,V}, mod::Module, ex) where V
docexs = get(docexprs, mod, nothing)
if docexs === nothing
docexs = docexprs[mod] = V()
end
push!(docexs, ex)
return docexprs
end
function assign_this!(frame, value)
frame.framedata.ssavalues[frame.pc] = value
end
# This defines the API needed to store signatures using methods_by_execution!
# This default version is simple and only used for testing purposes.
# The "real" one is CodeTrackingMethodInfo in Revise.jl.
const MethodInfo = IdDict{Type,LineNumberNode}
add_signature!(methodinfo::MethodInfo, @nospecialize(sig), ln) = push!(methodinfo, sig=>ln)
push_expr!(methodinfo::MethodInfo, mod::Module, ex::Expr) = methodinfo
pop_expr!(methodinfo::MethodInfo) = methodinfo
add_dependencies!(methodinfo::MethodInfo, be::CodeEdges, src, isrequired) = methodinfo
add_includes!(methodinfo::MethodInfo, mod::Module, filename) = methodinfo
function is_some_include(@nospecialize(f))
if isa(f, GlobalRef)
return f.name === :include
elseif isa(f, Symbol)
return f === :include
else
if isa(f, QuoteNode)
f = f.value
end
if isa(f, Function)
mod = Base.typename(typeof(f)).module
return isdefined(mod, :include) && f === (@isdefined(getglobal) ? getglobal(mod, :include) : getfield(mod, :include))
end
end
return false
end
# This is not generally used, see `is_method_or_eval` instead
function hastrackedexpr(stmt; heads=LoweredCodeUtils.trackedheads)
haseval = false
if isa(stmt, Expr)
haseval = matches_eval(stmt)
if stmt.head === :call
f = stmt.args[1]
callee_matches(f, Core, :_typebody!) && return true, haseval
callee_matches(f, Core, :_setsuper!) && return true, haseval
is_some_include(f) && return true, haseval
elseif stmt.head === :thunk
any(s->any(hastrackedexpr(s; heads=heads)), (stmt.args[1]::Core.CodeInfo).code) && return true, haseval
elseif stmt.head ∈ heads
return true, haseval
end
end
return false, haseval
end
function matches_eval(stmt::Expr)
stmt.head === :call || return false
f = stmt.args[1]
return f === :eval ||
(callee_matches(f, Base, :getproperty) && is_quotenode_egal(stmt.args[end], :eval)) ||
(isa(f, GlobalRef) && f.name === :eval) || is_quotenode_egal(f, Core.eval)
end
function categorize_stmt(@nospecialize(stmt))
ismeth, haseval, isinclude, isnamespace, istoplevel = false, false, false, false, false
if isa(stmt, Expr)
haseval = matches_eval(stmt)
ismeth = stmt.head === :method || (stmt.head === :thunk && defines_function(only(stmt.args)))
istoplevel = stmt.head === :toplevel
isnamespace = stmt.head === :export || stmt.head === :import || stmt.head === :using
isinclude = stmt.head === :call && is_some_include(stmt.args[1])
end
return ismeth, haseval, isinclude, isnamespace, istoplevel
end
# Check for thunks that define functions (fixes #792)
function defines_function(@nospecialize(ci))
isa(ci, CodeInfo) || return false
if length(ci.code) == 1
stmt = ci.code[1]
if isa(stmt, Core.ReturnNode)
val = stmt.val
isexpr(val, :method) && return true
end
end
return false
end
"""
isrequired, evalassign = minimal_evaluation!([predicate,] methodinfo, src::Core.CodeInfo, mode::Symbol)
Mark required statements in `src`: `isrequired[i]` is `true` if `src.code[i]` should be evaluated.
Statements are analyzed by `isreq, haseval = predicate(stmt)`, and `predicate` defaults
to `Revise.is_method_or_eval`.
`haseval` is true if the statement came from `@eval` or `eval(...)` call.
Since the contents of such expression are difficult to analyze, it is generally
safest to execute all such evals.
"""
function minimal_evaluation!(@nospecialize(predicate), methodinfo, mod::Module, src::Core.CodeInfo, mode::Symbol)
edges = CodeEdges(mod, src)
# LoweredCodeUtils.print_with_code(stdout, src, edges)
isrequired = fill(false, length(src.code))
namedconstassigned = Dict{GlobalRef,Bool}()
evalassign = false
for (i, stmt) in enumerate(src.code)
if !isrequired[i]
isrequired[i], haseval = predicate(stmt)::Tuple{Bool,Bool}
if haseval # line `i` may be the equivalent of `f = Core.eval`, so...
isrequired[edges.succs[i]] .= true # ...require each stmt that calls `eval` via `f(expr)`
isrequired[i] = true
end
end
if isexpr(stmt, :const)
name = stmt.args[1]
if isa(name, Symbol)
name = GlobalRef(mod, name)
end
namedconstassigned[name::GlobalRef] = false
elseif LoweredCodeUtils.is_assignment_like(stmt)
lhs = (stmt::Expr).args[1]
if isa(lhs, Symbol)
lhs = GlobalRef(mod, lhs)
end
if isa(lhs, GlobalRef)
if haskey(namedconstassigned, lhs)
namedconstassigned[lhs] = true
end
end
if mode === :evalassign
evalassign = isrequired[i] = true
if isa(lhs, GlobalRef)
isrequired[edges.byname[lhs].succs] .= true # mark any `const` statements or other "uses" in this block
end
end
end
end
if mode === :sigs
for (name, isassigned) in namedconstassigned
isassigned || continue
if isdefined(name.mod, name.name)
empty!(edges.byname[name].succs) # avoid redefining `consts` in `:sigs` mode (fixes #789)
end
end
end
# Check for docstrings
if length(src.code) > 1 && mode !== :sigs
stmt = src.code[end-1]
if isexpr(stmt, :call) && (stmt::Expr).args[1] === Base.Docs.doc!
isrequired[end-1] = true
end
end
# All tracked expressions are marked. Now add their dependencies.
# LoweredCodeUtils.print_with_code(stdout, src, isrequired)
lines_required!(isrequired, src, edges;)
# norequire=mode===:sigs ? LoweredCodeUtils.exclude_named_typedefs(src, edges) : ())
# LoweredCodeUtils.print_with_code(stdout, src, isrequired)
add_dependencies!(methodinfo, edges, src, isrequired)
return isrequired, evalassign
end
@noinline minimal_evaluation!(@nospecialize(predicate), methodinfo, frame::JuliaInterpreter.Frame, mode::Symbol) =
minimal_evaluation!(predicate, methodinfo, moduleof(frame), frame.framecode.src, mode)
function minimal_evaluation!(methodinfo, frame::JuliaInterpreter.Frame, mode::Symbol)
minimal_evaluation!(methodinfo, frame, mode) do @nospecialize(stmt)
ismeth, haseval, isinclude, isnamespace, istoplevel = categorize_stmt(stmt)
isreq = ismeth | isinclude | istoplevel
return mode === :sigs ? (isreq, haseval) : (isreq | isnamespace, haseval)
end
end
function methods_by_execution(mod::Module, ex::Expr; kwargs...)
methodinfo = MethodInfo()
docexprs = DocExprs()
value, frame = methods_by_execution!(JuliaInterpreter.Compiled(), methodinfo, docexprs, mod, ex; kwargs...)
return methodinfo, docexprs, frame
end
"""
methods_by_execution!(recurse=JuliaInterpreter.Compiled(), methodinfo, docexprs, mod::Module, ex::Expr;
mode=:eval, disablebp=true, skip_include=mode!==:eval, always_rethrow=false)
Evaluate or analyze `ex` in the context of `mod`.
Depending on the setting of `mode` (see the Extended help), it supports full evaluation or just the minimal
evaluation needed to extract method signatures.
`recurse` controls JuliaInterpreter's evaluation of any non-intercepted statement;
likely choices are `JuliaInterpreter.Compiled()` or `JuliaInterpreter.finish_and_return!`.
`methodinfo` is a cache for storing information about any method definitions (see [`CodeTrackingMethodInfo`](@ref)).
`docexprs` is a cache for storing documentation expressions; obtain an empty one with `Revise.DocExprs()`.
# Extended help
The action depends on `mode`:
- `:eval` evaluates the expression in `mod`, similar to `Core.eval(mod, ex)` except that `methodinfo` and `docexprs`
will be populated with information about any signatures or docstrings. This mode is used to implement `includet`.
- `:sigs` analyzes `ex` and extracts signatures of methods and docstrings (specifically, statements flagged by
[`Revise.minimal_evaluation!`](@ref)), but does not evaluate `ex` in the traditional sense.
It will selectively execute statements needed to form the signatures of defined methods.
It will also expand any `@eval`ed expressions, since these might contain method definitions.
- `:evalmeth` analyzes `ex` and extracts signatures and docstrings like `:sigs`, but takes the additional step of
evaluating any `:method` statements.
- `:evalassign` acts similarly to `:evalmeth`, and also evaluates assignment statements.
When selectively evaluating an expression, Revise will incorporate required dependencies, even for
minimal-evaluation modes like `:sigs`. For example, the method definition
max_values(T::Union{map(X -> Type{X}, Base.BitIntegerSmall_types)...}) = 1 << (8*sizeof(T))
found in `base/abstractset.jl` requires that it create the anonymous function in order to compute the
signature.
The other keyword arguments are more straightforward:
- `disablebp` controls whether JuliaInterpreter's breakpoints are disabled before stepping through the code.
They are restored on exit.
- `skip_include` prevents execution of `include` statements, instead inserting them into `methodinfo`'s
cache. This defaults to `true` unless `mode` is `:eval`.
- `always_rethrow`, if true, causes an error to be thrown if evaluating `ex` triggered an error.
If false, the error is logged with `@error`. `InterruptException`s are always rethrown.
This is primarily useful for debugging.
"""
function methods_by_execution!(@nospecialize(recurse), methodinfo, docexprs, mod::Module, ex::Expr;
mode::Symbol=:eval, disablebp::Bool=true, always_rethrow::Bool=false, kwargs...)
mode ∈ (:sigs, :eval, :evalmeth, :evalassign) || error("unsupported mode ", mode)
lwr = Meta.lower(mod, ex)
isa(lwr, Expr) || return nothing, nothing
if lwr.head === :error || lwr.head === :incomplete
error("lowering returned an error, ", lwr)
end
if lwr.head !== :thunk
mode === :sigs && return nothing, nothing
return Core.eval(mod, lwr), nothing
end
frame = JuliaInterpreter.Frame(mod, lwr.args[1]::CodeInfo)
mode === :eval || LoweredCodeUtils.rename_framemethods!(recurse, frame)
# Determine whether we need interpreted mode
isrequired, evalassign = minimal_evaluation!(methodinfo, frame, mode)
# LoweredCodeUtils.print_with_code(stdout, frame.framecode.src, isrequired)
if !any(isrequired) && (mode===:eval || !evalassign)
# We can evaluate the entire expression in compiled mode
if mode===:eval
ret = try
Core.eval(mod, ex)
catch err
(always_rethrow || isa(err, InterruptException)) && rethrow(err)
loc = location_string(whereis(frame))
bt = trim_toplevel!(catch_backtrace())
throw(ReviseEvalException(loc, err, Any[(sf, 1) for sf in stacktrace(bt)]))
end
else
ret = nothing
end
else
# Use the interpreter
local active_bp_refs
if disablebp
# We have to turn off all active breakpoints, https://github.com/timholy/CodeTracking.jl/issues/27
bp_refs = JuliaInterpreter.BreakpointRef[]
for bp in JuliaInterpreter.breakpoints()
append!(bp_refs, bp.instances)
end
active_bp_refs = filter(bp->bp[].isactive, bp_refs)
foreach(disable, active_bp_refs)
end
ret = try
methods_by_execution!(recurse, methodinfo, docexprs, frame, isrequired; mode=mode, kwargs...)
catch err
(always_rethrow || isa(err, InterruptException)) && (disablebp && foreach(enable, active_bp_refs); rethrow(err))
loc = location_string(whereis(frame))
sfs = [] # crafted for interaction with Base.show_backtrace
frame = JuliaInterpreter.leaf(frame)
while frame !== nothing
push!(sfs, (Base.StackTraces.StackFrame(frame), 1))
frame = frame.caller
end
throw(ReviseEvalException(loc, err, sfs))
end
if disablebp
foreach(enable, active_bp_refs)
end
end
return ret, lwr
end
methods_by_execution!(methodinfo, docexprs, mod::Module, ex::Expr; kwargs...) =
methods_by_execution!(JuliaInterpreter.Compiled(), methodinfo, docexprs, mod, ex; kwargs...)
function methods_by_execution!(@nospecialize(recurse), methodinfo, docexprs, frame::Frame, isrequired::AbstractVector{Bool}; mode::Symbol=:eval, skip_include::Bool=true)
isok(lnn::LineTypes) = !iszero(lnn.line) || lnn.file !== :none # might fail either one, but accept anything
mod = moduleof(frame)
# Hoist this lookup for performance. Don't throw even when `mod` is a baremodule:
modinclude = isdefined(mod, :include) ? getfield(mod, :include) : nothing
signatures = [] # temporary for method signature storage
pc = frame.pc
while true
JuliaInterpreter.is_leaf(frame) || (@warn("not a leaf"); break)
stmt = pc_expr(frame, pc)
if !isrequired[pc] && mode !== :eval && !(mode === :evalassign && LoweredCodeUtils.is_assignment_like(stmt))
pc = next_or_nothing!(frame)
pc === nothing && break
continue
end
if isa(stmt, Expr)
head = stmt.head
if head === :toplevel
local value
for ex in stmt.args
ex isa Expr || continue
value = methods_by_execution!(recurse, methodinfo, docexprs, mod, ex; mode=mode, disablebp=false, skip_include=skip_include)
end
isassign(frame, pc) && assign_this!(frame, value)
pc = next_or_nothing!(frame)
elseif head === :thunk && defines_function(only(stmt.args))
mode !== :sigs && Core.eval(mod, stmt)
pc = next_or_nothing!(frame)
# elseif head === :thunk && isanonymous_typedef(stmt.args[1])
# # Anonymous functions should just be defined anew, since there does not seem to be a practical
# # way to find them within the already-defined module.
# # They may be needed to define later signatures.
# # Note that named inner methods don't require special treatment.
# pc = step_expr!(recurse, frame, stmt, true)
elseif head === :method
empty!(signatures)
ret = methoddef!(recurse, signatures, frame, stmt, pc; define=mode!==:sigs)
if ret === nothing
# This was just `function foo end` or similar.
# However, it might have been followed by a thunk that defined a
# method (issue #435), so we still need to check for additions.
if !isempty(signatures)
loc = whereis(frame.framecode, pc)
if loc !== nothing
file, line = loc
lnn = LineNumberNode(Int(line), Symbol(file))
for sig in signatures
add_signature!(methodinfo, sig, lnn)
end
end
end
pc = next_or_nothing!(frame)
else
pc, pc3 = ret
# Get the line number from the body
stmt3 = pc_expr(frame, pc3)::Expr
lnn = nothing
if line_is_decl
sigcode = @lookup(frame, stmt3.args[2])::Core.SimpleVector
lnn = sigcode[end]
if !isa(lnn, LineNumberNode)
lnn = nothing
end
end
if lnn === nothing
bodycode = stmt3.args[end]
if !isa(bodycode, CodeInfo)
bodycode = @lookup(frame, bodycode)
end
if isa(bodycode, CodeInfo)
lnn = linetable(bodycode, 1)
if !isok(lnn)
lnn = nothing
if length(bodycode.code) > 1
# This may be a kwarg method. Mimic LoweredCodeUtils.bodymethod,
# except without having a method
stmt = bodycode.code[end-1]
if isa(stmt, Expr) && length(stmt.args) > 1
stmt = stmt::Expr
a = stmt.args[1]
nargs = length(stmt.args)
hasself = let stmt = stmt, slotnames::Vector{Symbol} = bodycode.slotnames
any(i->LoweredCodeUtils.is_self_call(stmt, slotnames, i), 2:nargs)
end
if isa(a, Core.SlotNumber)
a = bodycode.slotnames[a.id]
end
if hasself && (isa(a, Symbol) || isa(a, GlobalRef))
thismod, thisname = isa(a, Symbol) ? (mod, a) : (a.mod, a.name)
if isdefined(thismod, thisname)
f = getfield(thismod, thisname)
mths = methods(f)
if length(mths) == 1
mth = first(mths)
lnn = LineNumberNode(Int(mth.line), mth.file)
end
end
end
end
end
if lnn === nothing
# Just try to find *any* line number
for lnntmp in linetable(bodycode)
lnntmp = lnntmp::LineTypes
if isok(lnntmp)
lnn = lnntmp
break
end
end
end
end
elseif isexpr(bodycode, :lambda)
bodycode = bodycode::Expr
lnntmp = bodycode.args[end][1]::LineTypes
if isok(lnntmp)
lnn = lnntmp
end
end
end
if lnn === nothing
i = codelocs(frame, pc3)
while i > 0
lnntmp = linetable(frame, i)
if isok(lnntmp)
lnn = lnntmp
break
end
i -= 1
end
end
if lnn !== nothing && isok(lnn)
for sig in signatures
add_signature!(methodinfo, sig, lnn)
end
end
end
elseif LoweredCodeUtils.is_assignment_like(stmt)
# If we're here, either isrequired[pc] is true, or the mode forces us to eval assignments
pc = step_expr!(recurse, frame, stmt, true)
elseif head === :call
f = @lookup(frame, stmt.args[1])
if f === Core.eval
# an @eval or eval block: this may contain method definitions, so intercept it.
evalmod = @lookup(frame, stmt.args[2])::Module
evalex = @lookup(frame, stmt.args[3])
value = nothing
for (newmod, newex) in ExprSplitter(evalmod, evalex)
if is_doc_expr(newex)
add_docexpr!(docexprs, newmod, newex)
newex = newex.args[4]
end
newex = unwrap(newex)
push_expr!(methodinfo, newmod, newex)
value = methods_by_execution!(recurse, methodinfo, docexprs, newmod, newex; mode=mode, skip_include=skip_include, disablebp=false)
pop_expr!(methodinfo)
end
assign_this!(frame, value)
pc = next_or_nothing!(frame)
elseif skip_include && (f === modinclude || f === Core.include)
# include calls need to be managed carefully from several standpoints, including
# path management and parsing new expressions
if length(stmt.args) == 2
add_includes!(methodinfo, mod, @lookup(frame, stmt.args[2]))
else
error("include(mapexpr, path) is not supported") # TODO (issue #634)
end
assign_this!(frame, nothing) # FIXME: the file might return something different from `nothing`
pc = next_or_nothing!(frame)
elseif skip_include && f === Base.include
if length(stmt.args) == 2
add_includes!(methodinfo, mod, @lookup(frame, stmt.args[2]))
else # either include(module, path) or include(mapexpr, path)
mod_or_mapexpr = @lookup(frame, stmt.args[2])
if isa(mod_or_mapexpr, Module)
add_includes!(methodinfo, mod_or_mapexpr, @lookup(frame, stmt.args[3]))
else
error("include(mapexpr, path) is not supported")
end
end
assign_this!(frame, nothing) # FIXME: the file might return something different from `nothing`
pc = next_or_nothing!(frame)
elseif f === Base.Docs.doc! # && mode !== :eval
fargs = JuliaInterpreter.collect_args(recurse, frame, stmt)
popfirst!(fargs)
length(fargs) == 3 && push!(fargs, Union{}) # add the default sig
dmod::Module, b::Base.Docs.Binding, str::Base.Docs.DocStr, sig = fargs
if isdefined(b.mod, b.var)
tmpvar = getfield(b.mod, b.var)
if isa(tmpvar, Module)
dmod = tmpvar
end
end
# Workaround for julia#38819 on older Julia versions
if !isdefined(dmod, Base.Docs.META)
Base.Docs.initmeta(dmod)
end
m = get!(Base.Docs.meta(dmod), b, Base.Docs.MultiDoc())::Base.Docs.MultiDoc
if haskey(m.docs, sig)
currentstr = m.docs[sig]::Base.Docs.DocStr
redefine = currentstr.text != str.text
else
push!(m.order, sig)
redefine = true
end
# (Re)assign without the warning
if redefine
m.docs[sig] = str
str.data[:binding] = b
str.data[:typesig] = sig
end
assign_this!(frame, Base.Docs.doc(b, sig))
pc = next_or_nothing!(frame)
else
# A :call Expr we don't want to intercept
pc = step_expr!(recurse, frame, stmt, true)
end
else
# An Expr we don't want to intercept
frame.pc = pc
pc = step_expr!(recurse, frame, stmt, true)
end
else
# A statement we don't want to intercept
pc = step_expr!(recurse, frame, stmt, true)
end
pc === nothing && break
end
return isrequired[frame.pc] ? get_return(frame) : nothing
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 54023 | @eval Base.Experimental.@optlevel 1
using FileWatching, REPL, Distributed, UUIDs
import LibGit2
using Base: PkgId
using Base.Meta: isexpr
using Core: CodeInfo
export revise, includet, entr, MethodSummary
"""
Revise.watching_files[]
Returns `true` if we watch files rather than their containing directory.
FreeBSD and NFS-mounted systems should watch files, otherwise we prefer to watch
directories.
"""
const watching_files = Ref(Sys.KERNEL === :FreeBSD)
"""
Revise.polling_files[]
Returns `true` if we should poll the filesystem for changes to the files that define
loaded code. It is preferable to avoid polling, instead relying on operating system
notifications via `FileWatching.watch_file`. However, NFS-mounted
filesystems (and perhaps others) do not support file-watching, so for code stored
on such filesystems you should turn polling on.
See the documentation for the `JULIA_REVISE_POLL` environment variable.
"""
const polling_files = Ref(false)
function wait_changed(file)
try
polling_files[] ? poll_file(file) : watch_file(file)
catch err
if Sys.islinux() && err isa Base.IOError && err.code == -28 # ENOSPC
@warn """Your operating system has run out of inotify capacity.
Check the current value with `cat /proc/sys/fs/inotify/max_user_watches`.
Set it to a higher level with, e.g., `echo 65536 | sudo tee -a /proc/sys/fs/inotify/max_user_watches`.
This requires having administrative privileges on your machine (or talk to your sysadmin).
See https://github.com/timholy/Revise.jl/issues/26 for more information."""
end
rethrow(err)
end
return nothing
end
"""
Revise.tracking_Main_includes[]
Returns `true` if files directly included from the REPL should be tracked.
The default is `false`. See the documentation regarding the `JULIA_REVISE_INCLUDE`
environment variable to customize it.
"""
const tracking_Main_includes = Ref(false)
include("relocatable_exprs.jl")
include("types.jl")
include("utils.jl")
include("parsing.jl")
include("lowered.jl")
include("pkgs.jl")
include("git.jl")
include("recipes.jl")
include("logging.jl")
include("callbacks.jl")
### Globals to keep track of state
"""
Revise.watched_files
Global variable, `watched_files[dirname]` returns the collection of files in `dirname`
that we're monitoring for changes. The returned value has type [`Revise.WatchList`](@ref).
This variable allows us to watch directories rather than files, reducing the burden on
the OS.
"""
const watched_files = Dict{String,WatchList}()
"""
Revise.watched_manifests
Global variable, a set of `Manifest.toml` files from the active projects used during this session.
"""
const watched_manifests = Set{String}()
# Locks access to `revision_queue` to prevent race conditions, see issues #837 and #845
const revise_lock = ReentrantLock()
"""
Revise.revision_queue
Global variable, `revision_queue` holds `(pkgdata,filename)` pairs that we need to revise, meaning
that these files have changed since we last processed a revision.
This list gets populated by callbacks that watch directories for updates.
"""
const revision_queue = Set{Tuple{PkgData,String}}()
"""
Revise.queue_errors
Global variable, maps `(pkgdata, filename)` pairs that errored upon last revision to
`(exception, backtrace)`.
"""
const queue_errors = Dict{Tuple{PkgData,String},Tuple{Exception, Any}}()
"""
Revise.NOPACKAGE
Global variable; default `PkgId` used for files which do not belong to any
package, but still have to be watched because user callbacks have been
registered for them.
"""
const NOPACKAGE = PkgId(nothing, "")
"""
Revise.pkgdatas
`pkgdatas` is the core information that tracks the relationship between source code
and julia objects, and allows re-evaluation of code in the proper module scope.
It is a dictionary indexed by PkgId:
`pkgdatas[id]` returns a value of type [`Revise.PkgData`](@ref).
"""
const pkgdatas = Dict{PkgId,PkgData}(NOPACKAGE => PkgData(NOPACKAGE))
const moduledeps = Dict{Module,DepDict}()
function get_depdict(mod::Module)
if !haskey(moduledeps, mod)
moduledeps[mod] = DepDict()
end
return moduledeps[mod]
end
"""
Revise.included_files
Global variable, `included_files` gets populated by callbacks we register with `include`.
It's used to track non-precompiled packages and, optionally, user scripts (see docs on
`JULIA_REVISE_INCLUDE`).
"""
const included_files = Tuple{Module,String}[] # (module, filename)
"""
Revise.basesrccache
Full path to the running Julia's cache of source code defining `Base`.
"""
const basesrccache = normpath(joinpath(Sys.BINDIR, Base.DATAROOTDIR, "julia", "base.cache"))
"""
Revise.basebuilddir
Julia's top-level directory when Julia was built, as recorded by the entries in
`Base._included_files`.
"""
const basebuilddir = begin
sysimg = filter(x->endswith(x[2], "sysimg.jl"), Base._included_files)[1][2]
dirname(dirname(sysimg))
end
function fallback_juliadir()
candidate = joinpath(Sys.BINDIR, Base.DATAROOTDIR, "julia")
if !isdir(joinpath(candidate, "base"))
while true
trydir = joinpath(candidate, "base")
isdir(trydir) && break
trydir = joinpath(candidate, "share", "julia", "base")
if isdir(trydir)
candidate = joinpath(candidate, "share", "julia")
break
end
next_candidate = dirname(candidate)
next_candidate == candidate && break
candidate = next_candidate
end
end
normpath(candidate)
end
"""
Revise.juliadir
Constant specifying full path to julia top-level source directory.
This should be reliable even for local builds, cross-builds, and binary installs.
"""
const juliadir = normpath(
if isdir(joinpath(basebuilddir, "base"))
basebuilddir
else
fallback_juliadir() # Binaries probably end up here. We fall back on Sys.BINDIR
end
)
const cache_file_key = Dict{String,String}() # corrected=>uncorrected filenames
const src_file_key = Dict{String,String}() # uncorrected=>corrected filenames
"""
Revise.dont_watch_pkgs
Global variable, use `push!(Revise.dont_watch_pkgs, :MyPackage)` to prevent Revise
from tracking changes to `MyPackage`. You can do this from the REPL or from your
`.julia/config/startup.jl` file.
See also [`Revise.silence`](@ref).
"""
const dont_watch_pkgs = Set{Symbol}()
const silence_pkgs = Set{Symbol}()
const depsdir = joinpath(dirname(@__DIR__), "deps")
const silencefile = Ref(joinpath(depsdir, "silence.txt")) # Ref so that tests don't clobber
##
## The inputs are sets of expressions found in each file.
## Some of those expressions will generate methods which are identified via their signatures.
## From "old" expressions we know their corresponding signatures, but from "new"
## expressions we have not yet computed them. This makes old and new asymmetric.
##
## Strategy:
## - For every old expr not found in the new ones,
## + delete the corresponding methods (using the signatures we've previously computed)
## + remove the sig entries from CodeTracking.method_info (")
## Best to do all the deletion first (across all files and modules) in case a method is
## simply being moved from one file to another.
## - For every new expr found among the old ones,
## + update the location info in CodeTracking.method_info
## - For every new expr not found in the old ones,
## + eval the expr
## + extract signatures
## + add to the ModuleExprsSigs
## + add to CodeTracking.method_info
##
## Interestingly, the ex=>sigs link may not be the same as the sigs=>ex link.
## Consider a conditional block,
## if Sys.islinux()
## f() = 1
## g() = 2
## else
## g() = 3
## end
## From the standpoint of Revise's diff-and-patch functionality, we should look for
## diffs in this entire block. (Really good backedge support---or a variant of `lower` that
## links back to the specific expression---might change this, but for
## now this is the right strategy.) From the standpoint of CodeTracking, we should
## link the signature to the actual method-defining expression (either :(f() = 1) or :(g() = 2)).
get_method_from_match(mm::Core.MethodMatch) = mm.method
function delete_missing!(exs_sigs_old::ExprsSigs, exs_sigs_new)
with_logger(_debug_logger) do
for (ex, sigs) in exs_sigs_old
haskey(exs_sigs_new, ex) && continue
# ex was deleted
sigs === nothing && continue
for sig in sigs
@static if VERSION ≥ v"1.10.0-DEV.873"
ret = Base._methods_by_ftype(sig, -1, Base.get_world_counter())
else
ret = Base._methods_by_ftype(sig, -1, typemax(UInt))
end
success = false
if !isempty(ret)
m = get_method_from_match(ret[end]) # the last method returned is the least-specific that matches, and thus most likely to be type-equal
methsig = m.sig
if sig <: methsig && methsig <: sig
locdefs = get(CodeTracking.method_info, sig, nothing)
if isa(locdefs, Vector{Tuple{LineNumberNode,Expr}})
if length(locdefs) > 1
# Just delete this reference but keep the method
line = firstline(ex)
ld = map(pr->linediff(line, pr[1]), locdefs)
idx = argmin(ld)
@assert ld[idx] < typemax(eltype(ld))
deleteat!(locdefs, idx)
continue
else
@assert length(locdefs) == 1
end
end
@debug "DeleteMethod" _group="Action" time=time() deltainfo=(sig, MethodSummary(m))
# Delete the corresponding methods
for p in workers()
try # guard against serialization errors if the type isn't defined on the worker
remotecall(Core.eval, p, Main, :(delete_method_by_sig($sig)))
catch
end
end
Base.delete_method(m)
# Remove the entries from CodeTracking data
delete!(CodeTracking.method_info, sig)
# Remove frame from JuliaInterpreter, if applicable. Otherwise debuggers
# may erroneously work with outdated code (265-like problems)
if haskey(JuliaInterpreter.framedict, m)
delete!(JuliaInterpreter.framedict, m)
end
if isdefined(m, :generator)
# defensively delete all generated functions
empty!(JuliaInterpreter.genframedict)
end
success = true
end
end
if !success
@debug "FailedDeletion" _group="Action" time=time() deltainfo=(sig,)
end
end
end
end
return exs_sigs_old
end
const empty_exs_sigs = ExprsSigs()
function delete_missing!(mod_exs_sigs_old::ModuleExprsSigs, mod_exs_sigs_new)
for (mod, exs_sigs_old) in mod_exs_sigs_old
exs_sigs_new = get(mod_exs_sigs_new, mod, empty_exs_sigs)
delete_missing!(exs_sigs_old, exs_sigs_new)
end
return mod_exs_sigs_old
end
function eval_rex(rex::RelocatableExpr, exs_sigs_old::ExprsSigs, mod::Module; mode::Symbol=:eval)
return with_logger(_debug_logger) do
sigs, includes = nothing, nothing
rexo = getkey(exs_sigs_old, rex, nothing)
# extract the signatures and update the line info
if rexo === nothing
ex = rex.ex
# ex is not present in old
@debug "Eval" _group="Action" time=time() deltainfo=(mod, ex)
sigs, deps, includes, thunk = eval_with_signatures(mod, ex; mode=mode) # All signatures defined by `ex`
if !isexpr(thunk, :thunk)
thunk = ex
end
if myid() == 1
for p in workers()
p == myid() && continue
try # don't error if `mod` isn't defined on the worker
remotecall(Core.eval, p, mod, thunk)
catch
end
end
end
storedeps(deps, rex, mod)
else
sigs = exs_sigs_old[rexo]
# Update location info
ln, lno = firstline(unwrap(rex)), firstline(unwrap(rexo))
if sigs !== nothing && !isempty(sigs) && ln != lno
ln, lno = ln::LineNumberNode, lno::LineNumberNode
@debug "LineOffset" _group="Action" time=time() deltainfo=(sigs, lno=>ln)
for sig in sigs
locdefs = CodeTracking.method_info[sig]::AbstractVector
ld = let lno=lno
map(pr->linediff(lno, pr[1]), locdefs)
end
idx = argmin(ld)
if ld[idx] === typemax(eltype(ld))
# println("Missing linediff for $lno and $(first.(locdefs)) with ", rex.ex)
idx = length(locdefs)
end
methloc, methdef = locdefs[idx]
locdefs[idx] = (newloc(methloc, ln, lno), methdef)
end
end
end
return sigs, includes
end
end
# These are typically bypassed in favor of expression-by-expression evaluation to
# allow handling of new `include` statements.
function eval_new!(exs_sigs_new::ExprsSigs, exs_sigs_old, mod::Module; mode::Symbol=:eval)
includes = Vector{Pair{Module,String}}()
for rex in keys(exs_sigs_new)
sigs, _includes = eval_rex(rex, exs_sigs_old, mod; mode=mode)
if sigs !== nothing
exs_sigs_new[rex] = sigs
end
if _includes !== nothing
append!(includes, _includes)
end
end
return exs_sigs_new, includes
end
function eval_new!(mod_exs_sigs_new::ModuleExprsSigs, mod_exs_sigs_old; mode::Symbol=:eval)
includes = Vector{Pair{Module,String}}()
for (mod, exs_sigs_new) in mod_exs_sigs_new
# Allow packages to override the supplied mode
if isdefined(mod, :__revise_mode__)
mode = getfield(mod, :__revise_mode__)::Symbol
end
exs_sigs_old = get(mod_exs_sigs_old, mod, empty_exs_sigs)
_, _includes = eval_new!(exs_sigs_new, exs_sigs_old, mod; mode=mode)
append!(includes, _includes)
end
return mod_exs_sigs_new, includes
end
"""
CodeTrackingMethodInfo(ex::Expr)
Create a cache for storing information about method definitions.
Adding signatures to such an object inserts them into `CodeTracking.method_info`,
which maps signature Tuple-types to `(lnn::LineNumberNode, ex::Expr)` pairs.
Because method signatures are unique within a module, this is the foundation for
identifying methods in a manner independent of source-code location.
It also has the following fields:
- `exprstack`: used when descending into `@eval` statements (via `push_expr` and `pop_expr!`)
`ex` (used in creating the `CodeTrackingMethodInfo` object) is the first entry in the stack.
- `allsigs`: a list of all method signatures defined by a given expression
- `deps`: list of top-level named objects (`Symbol`s and `GlobalRef`s) that method definitions
in this block depend on. For example, `if Sys.iswindows() f() = 1 else f() = 2 end` would
store `Sys.iswindows` here.
- `includes`: a list of `module=>filename` for any `include` statements encountered while the
expression was parsed.
"""
struct CodeTrackingMethodInfo
exprstack::Vector{Expr}
allsigs::Vector{Any}
deps::Set{Union{GlobalRef,Symbol}}
includes::Vector{Pair{Module,String}}
end
CodeTrackingMethodInfo(ex::Expr) = CodeTrackingMethodInfo([ex], Any[], Set{Union{GlobalRef,Symbol}}(), Pair{Module,String}[])
function add_signature!(methodinfo::CodeTrackingMethodInfo, @nospecialize(sig), ln)
locdefs = CodeTracking.invoked_get!(Vector{Tuple{LineNumberNode,Expr}}, CodeTracking.method_info, sig)
newdef = unwrap(methodinfo.exprstack[end])
if newdef !== nothing
if !any(locdef->locdef[1] == ln && isequal(RelocatableExpr(locdef[2]), RelocatableExpr(newdef)), locdefs)
push!(locdefs, (fixpath(ln), newdef))
end
push!(methodinfo.allsigs, sig)
end
return methodinfo
end
push_expr!(methodinfo::CodeTrackingMethodInfo, mod::Module, ex::Expr) = (push!(methodinfo.exprstack, ex); methodinfo)
pop_expr!(methodinfo::CodeTrackingMethodInfo) = (pop!(methodinfo.exprstack); methodinfo)
function add_dependencies!(methodinfo::CodeTrackingMethodInfo, edges::CodeEdges, src, musteval)
isempty(src.code) && return methodinfo
stmt1 = first(src.code)
if isa(stmt1, Core.GotoIfNot) && (dep = stmt1.cond; isa(dep, Union{GlobalRef,Symbol}))
# This is basically a hack to look for symbols that control definition of methods via a conditional.
# It is aimed at solving #249, but this will have to be generalized for anything real.
for (stmt, me) in zip(src.code, musteval)
me || continue
if hastrackedexpr(stmt)[1]
push!(methodinfo.deps, dep)
break
end
end
end
# for (dep, lines) in be.byname
# for ln in lines
# stmt = src.code[ln]
# if isexpr(stmt, :(=)) && stmt.args[1] == dep
# continue
# else
# push!(methodinfo.deps, dep)
# end
# end
# end
return methodinfo
end
function add_includes!(methodinfo::CodeTrackingMethodInfo, mod::Module, filename)
push!(methodinfo.includes, mod=>filename)
return methodinfo
end
# Eval and insert into CodeTracking data
function eval_with_signatures(mod, ex::Expr; mode=:eval, kwargs...)
methodinfo = CodeTrackingMethodInfo(ex)
docexprs = DocExprs()
frame = methods_by_execution!(finish_and_return!, methodinfo, docexprs, mod, ex; mode=mode, kwargs...)[2]
return methodinfo.allsigs, methodinfo.deps, methodinfo.includes, frame
end
function instantiate_sigs!(modexsigs::ModuleExprsSigs; mode=:sigs, kwargs...)
for (mod, exsigs) in modexsigs
for rex in keys(exsigs)
is_doc_expr(rex.ex) && continue
sigs, deps, _ = eval_with_signatures(mod, rex.ex; mode=mode, kwargs...)
exsigs[rex] = sigs
storedeps(deps, rex, mod)
end
end
return modexsigs
end
function storedeps(deps, rex, mod)
for dep in deps
if isa(dep, GlobalRef)
haskey(moduledeps, dep.mod) || continue
ddict, sym = get_depdict(dep.mod), dep.name
else
ddict, sym = get_depdict(mod), dep
end
if !haskey(ddict, sym)
ddict[sym] = Set{DepDictVals}()
end
push!(ddict[sym], (mod, rex))
end
return rex
end
# This is intended for testing purposes, but not general use. The key problem is
# that it doesn't properly handle methods that move from one file to another; there is the
# risk you could end up deleting the method altogether depending on the order in which you
# process these.
# See `revise` for the proper approach.
function eval_revised(mod_exs_sigs_new, mod_exs_sigs_old)
delete_missing!(mod_exs_sigs_old, mod_exs_sigs_new)
eval_new!(mod_exs_sigs_new, mod_exs_sigs_old) # note: drops `includes`
instantiate_sigs!(mod_exs_sigs_new)
end
"""
Revise.init_watching(files)
Revise.init_watching(pkgdata::PkgData, files)
For every filename in `files`, monitor the filesystem for updates. When the file is
updated, either [`Revise.revise_dir_queued`](@ref) or [`Revise.revise_file_queued`](@ref) will
be called.
Use the `pkgdata` version if the files are supplied using relative paths.
"""
function init_watching(pkgdata::PkgData, files=srcfiles(pkgdata))
udirs = Set{String}()
for file in files
file = String(file)::String
dir, basename = splitdir(file)
dirfull = joinpath(basedir(pkgdata), dir)
already_watching_dir = haskey(watched_files, dirfull)
already_watching_dir || (watched_files[dirfull] = WatchList())
watchlist = watched_files[dirfull]
current_id = get(watchlist.trackedfiles, basename, nothing)
new_id = pkgdata.info.id
if new_id != NOPACKAGE || current_id === nothing
# Allow the package id to be updated
push!(watchlist, basename=>pkgdata)
if watching_files[]
fwatcher = TaskThunk(revise_file_queued, (pkgdata, file))
schedule(Task(fwatcher))
else
already_watching_dir || push!(udirs, dir)
end
end
end
for dir in udirs
dirfull = joinpath(basedir(pkgdata), dir)
updatetime!(watched_files[dirfull])
if !watching_files[]
dwatcher = TaskThunk(revise_dir_queued, (dirfull,))
schedule(Task(dwatcher))
end
end
return nothing
end
init_watching(files) = init_watching(pkgdatas[NOPACKAGE], files)
"""
revise_dir_queued(dirname)
Wait for one or more of the files registered in `Revise.watched_files[dirname]` to be
modified, and then queue the corresponding files on [`Revise.revision_queue`](@ref).
This is generally called via a [`Revise.TaskThunk`](@ref).
"""
@noinline function revise_dir_queued(dirname)
@assert isabspath(dirname)
if !isdir(dirname)
sleep(0.1) # in case git has done a delete/replace cycle
end
stillwatching = true
while stillwatching
if !isdir(dirname)
with_logger(SimpleLogger(stderr)) do
@warn "$dirname is not an existing directory, Revise is not watching"
end
break
end
latestfiles, stillwatching = watch_files_via_dir(dirname) # will block here until file(s) change
for (file, id) in latestfiles
key = joinpath(dirname, file)
if key in keys(user_callbacks_by_file)
union!(user_callbacks_queue, user_callbacks_by_file[key])
notify(revision_event)
end
if id != NOPACKAGE
pkgdata = pkgdatas[id]
lock(revise_lock) do
if hasfile(pkgdata, key) # issue #228
push!(revision_queue, (pkgdata, relpath(key, pkgdata)))
notify(revision_event)
end
end
end
end
end
return
end
# See #66.
"""
revise_file_queued(pkgdata::PkgData, filename)
Wait for modifications to `filename`, and then queue the corresponding files on [`Revise.revision_queue`](@ref).
This is generally called via a [`Revise.TaskThunk`](@ref).
This is used only on platforms (like BSD) which cannot use [`Revise.revise_dir_queued`](@ref).
"""
function revise_file_queued(pkgdata::PkgData, file)
if !isabspath(file)
file = joinpath(basedir(pkgdata), file)
end
if !file_exists(file)
sleep(0.1) # in case git has done a delete/replace cycle
end
dirfull, basename = splitdir(file)
stillwatching = true
while stillwatching
if !file_exists(file) && !isdir(file)
let file=file
with_logger(SimpleLogger(stderr)) do
@warn "$file is not an existing file, Revise is not watching"
end
end
notify(revision_event)
break
end
try
wait_changed(file) # will block here until the file changes
catch e
# issue #459
(isa(e, InterruptException) && throwto_repl(e)) || throw(e)
end
if file in keys(user_callbacks_by_file)
union!(user_callbacks_queue, user_callbacks_by_file[file])
notify(revision_event)
end
# Check to see if we're still watching this file
stillwatching = haskey(watched_files, dirfull)
if PkgId(pkgdata) != NOPACKAGE
lock(revise_lock) do
push!(revision_queue, (pkgdata, relpath(file, pkgdata)))
end
end
end
return
end
# Because we delete first, we have to make sure we've parsed the file
function handle_deletions(pkgdata, file)
fi = maybe_parse_from_cache!(pkgdata, file)
maybe_extract_sigs!(fi)
mexsold = fi.modexsigs
idx = fileindex(pkgdata, file)
filep = pkgdata.info.files[idx]
if isa(filep, AbstractString)
if file ≠ "."
filep = normpath(basedir(pkgdata), file)
else
filep = normpath(basedir(pkgdata))
end
end
topmod = first(keys(mexsold))
fileok = file_exists(String(filep)::String)
mexsnew = fileok ? parse_source(filep, topmod) : ModuleExprsSigs(topmod)
if mexsnew !== nothing
delete_missing!(mexsold, mexsnew)
end
if !fileok
@warn("$filep no longer exists, deleted all methods")
deleteat!(pkgdata.fileinfos, idx)
deleteat!(pkgdata.info.files, idx)
wl = get(watched_files, basedir(pkgdata), nothing)
if isa(wl, WatchList)
delete!(wl.trackedfiles, file)
end
end
return mexsnew, mexsold
end
"""
Revise.revise_file_now(pkgdata::PkgData, file)
Process revisions to `file`. This parses `file` and computes an expression-level diff
between the current state of the file and its most recently evaluated state.
It then deletes any removed methods and re-evaluates any changed expressions.
Note that generally it is better to use [`revise`](@ref) as it properly handles methods
that move from one file to another.
`id` must be a key in [`Revise.pkgdatas`](@ref), and `file` a key in
`Revise.pkgdatas[id].fileinfos`.
"""
function revise_file_now(pkgdata::PkgData, file)
# @assert !isabspath(file)
i = fileindex(pkgdata, file)
if i === nothing
println("Revise is currently tracking the following files in $(PkgId(pkgdata)): ", srcfiles(pkgdata))
error(file, " is not currently being tracked.")
end
mexsnew, mexsold = handle_deletions(pkgdata, file)
if mexsnew != nothing
_, includes = eval_new!(mexsnew, mexsold)
fi = fileinfo(pkgdata, i)
pkgdata.fileinfos[i] = FileInfo(mexsnew, fi)
maybe_add_includes_to_pkgdata!(pkgdata, file, includes; eval_now=true)
end
nothing
end
"""
Revise.errors()
Report the errors represented in [`Revise.queue_errors`](@ref).
Errors are automatically reported the first time they are encountered, but this function
can be used to report errors again.
"""
function errors(revision_errors=keys(queue_errors))
printed = Set{eltype(revision_errors)}()
for item in revision_errors
item in printed && continue
push!(printed, item)
pkgdata, file = item
(err, bt) = queue_errors[(pkgdata, file)]
fullpath = joinpath(basedir(pkgdata), file)
if err isa ReviseEvalException
@error "Failed to revise $fullpath" exception=err
else
@error "Failed to revise $fullpath" exception=(err, trim_toplevel!(bt))
end
end
end
"""
Revise.retry()
Attempt to perform previously-failed revisions. This can be useful in cases of order-dependent errors.
"""
function retry()
lock(revise_lock) do
for (k, v) in queue_errors
push!(revision_queue, k)
end
end
revise()
end
"""
revise(; throw=false)
`eval` any changes in the revision queue. See [`Revise.revision_queue`](@ref).
If `throw` is `true`, throw any errors that occur during revision or callback;
otherwise these are only logged.
"""
function revise(; throw=false)
sleep(0.01) # in case the file system isn't quite done writing out the new files
lock(revise_lock) do
have_queue_errors = !isempty(queue_errors)
# Do all the deletion first. This ensures that a method that moved from one file to another
# won't get redefined first and deleted second.
revision_errors = Tuple{PkgData,String}[]
queue = sort!(collect(revision_queue); lt=pkgfileless)
finished = eltype(revision_queue)[]
mexsnews = ModuleExprsSigs[]
interrupt = false
for (pkgdata, file) in queue
try
push!(mexsnews, handle_deletions(pkgdata, file)[1])
push!(finished, (pkgdata, file))
catch err
throw && Base.throw(err)
interrupt |= isa(err, InterruptException)
push!(revision_errors, (pkgdata, file))
queue_errors[(pkgdata, file)] = (err, catch_backtrace())
end
end
# Do the evaluation
for ((pkgdata, file), mexsnew) in zip(finished, mexsnews)
defaultmode = PkgId(pkgdata).name == "Main" ? :evalmeth : :eval
i = fileindex(pkgdata, file)
i === nothing && continue # file was deleted by `handle_deletions`
fi = fileinfo(pkgdata, i)
modsremaining = Set(keys(mexsnew))
changed, err = true, nothing
while changed
changed = false
for (mod, exsnew) in mexsnew
mod ∈ modsremaining || continue
try
mode = defaultmode
# Allow packages to override the supplied mode
if isdefined(mod, :__revise_mode__)
mode = getfield(mod, :__revise_mode__)::Symbol
end
mode ∈ (:sigs, :eval, :evalmeth, :evalassign) || error("unsupported mode ", mode)
exsold = get(fi.modexsigs, mod, empty_exs_sigs)
for rex in keys(exsnew)
sigs, includes = eval_rex(rex, exsold, mod; mode=mode)
if sigs !== nothing
exsnew[rex] = sigs
end
if includes !== nothing
maybe_add_includes_to_pkgdata!(pkgdata, file, includes; eval_now=true)
end
end
delete!(modsremaining, mod)
changed = true
catch _err
err = _err
end
end
end
if isempty(modsremaining)
pkgdata.fileinfos[i] = FileInfo(mexsnew, fi)
delete!(queue_errors, (pkgdata, file))
else
throw && Base.throw(err)
interrupt |= isa(err, InterruptException)
push!(revision_errors, (pkgdata, file))
queue_errors[(pkgdata, file)] = (err, catch_backtrace())
end
end
if interrupt
for pkgfile in finished
haskey(queue_errors, pkgfile) || delete!(revision_queue, pkgfile)
end
else
empty!(revision_queue)
end
errors(revision_errors)
if !isempty(queue_errors)
if !have_queue_errors # only print on the first time errors occur
io = IOBuffer()
println(io, "\n") # better here than in the triple-quoted literal, see https://github.com/JuliaLang/julia/issues/34105
for (pkgdata, file) in keys(queue_errors)
println(io, " ", joinpath(basedir(pkgdata), file))
end
str = String(take!(io))
@warn """The running code does not match the saved version for the following files:$str
If the error was due to evaluation order, it can sometimes be resolved by calling `Revise.retry()`.
Use Revise.errors() to report errors again. Only the first error in each file is shown.
Your prompt color may be yellow until the errors are resolved."""
maybe_set_prompt_color(:warn)
end
else
maybe_set_prompt_color(:ok)
end
tracking_Main_includes[] && queue_includes(Main)
process_user_callbacks!(throw=throw)
end
nothing
end
revise(backend::REPL.REPLBackend) = revise()
"""
revise(mod::Module; force::Bool=true)
Revise all files that define `mod`.
If `force=true`, reevaluate every definition in `mod`, whether it was changed or not. This is useful
to propagate an updated macro definition, or to force recompiling generated functions.
Be warned, however, that this invalidates all the compiled code in your session that depends on `mod`,
and can lead to long recompilation times.
"""
function revise(mod::Module; force::Bool=true)
mod == Main && error("cannot revise(Main)")
id = PkgId(mod)
pkgdata = pkgdatas[id]
for file in pkgdata.info.files
push!(revision_queue, (pkgdata, file))
end
revise()
force || return true
for (i, file) in enumerate(srcfiles(pkgdata))
fi = fileinfo(pkgdata, i)
for (mod, exsigs) in fi.modexsigs
for def in keys(exsigs)
ex = def.ex
exuw = unwrap(ex)
isexpr(exuw, :call) && is_some_include(exuw.args[1]) && continue
try
Core.eval(mod, ex)
catch err
@show mod
display(ex)
rethrow(err)
end
end
end
end
return true # fixme try/catch?
end
"""
Revise.track(mod::Module, file::AbstractString)
Revise.track(file::AbstractString)
Watch `file` for updates and [`revise`](@ref) loaded code with any
changes. `mod` is the module into which `file` is evaluated; if omitted,
it defaults to `Main`.
If this produces many errors, check that you specified `mod` correctly.
"""
function track(mod::Module, file; mode=:sigs, kwargs...)
isfile(file) || error(file, " is not a file")
# Determine whether we're already tracking this file
id = Base.moduleroot(mod) == Main ? PkgId(mod, string(mod)) : PkgId(mod) # see #689 for `Main`
if haskey(pkgdatas, id)
pkgdata = pkgdatas[id]
relfile = relpath(abspath(file), pkgdata)
hasfile(pkgdata, relfile) && return nothing
# Use any "fixes" provided by relpath
file = joinpath(basedir(pkgdata), relfile)
else
# Check whether `track` was called via a @require. Ref issue #403 & #431.
st = stacktrace(backtrace())
if any(sf->sf.func === :listenpkg && endswith(String(sf.file), "require.jl"), st)
nameof(mod) === :Plots || Base.depwarn("[email protected] or higher automatically handles `include` statements in `@require` expressions.\nPlease do not call `Revise.track` from such blocks.", :track)
return nothing
end
file = abspath(file)
end
# Set up tracking
fm = parse_source(file, mod; mode=mode)
if fm !== nothing
if mode === :includet
mode = :sigs # we already handled evaluation in `parse_source`
end
instantiate_sigs!(fm; mode=mode, kwargs...)
if !haskey(pkgdatas, id)
# Wait a bit to see if `mod` gets initialized
sleep(0.1)
end
pkgdata = get(pkgdatas, id, nothing)
if pkgdata === nothing
pkgdata = PkgData(id, pathof(mod))
end
if !haskey(CodeTracking._pkgfiles, id)
CodeTracking._pkgfiles[id] = pkgdata.info
end
push!(pkgdata, relpath(file, pkgdata)=>FileInfo(fm))
init_watching(pkgdata, (String(file)::String,))
pkgdatas[id] = pkgdata
end
return nothing
end
function track(file; kwargs...)
startswith(file, juliadir) && error("use Revise.track(Base) or Revise.track(<stdlib module>)")
track(Main, file; kwargs...)
end
"""
includet(filename)
Load `filename` and track future changes. `includet` is intended for quick "user scripts"; larger or more
established projects are encouraged to put the code in one or more packages loaded with `using`
or `import` instead of using `includet`. See https://timholy.github.io/Revise.jl/stable/cookbook/
for tips about setting up the package workflow.
By default, `includet` only tracks modifications to *methods*, not *data*. See the extended help for details.
Note that this differs from packages, which evaluate all changes by default.
This default behavior can be overridden; see [Configuring the revise mode](@ref).
# Extended help
## Behavior and justification for the default revision mode (`:evalmeth`)
`includet` uses a default `__revise_mode__ = :evalmeth`. The consequence is that if you change
```
a = [1]
f() = 1
```
to
```
a = [2]
f() = 2
```
then Revise will update `f` but not `a`.
This is the default choice for `includet` because such files typically mix method definitions and data-handling.
Data often has many untracked dependencies; later in the same file you might `push!(a, 22)`, but Revise cannot
determine whether you wish it to re-run that line after redefining `a`.
Consequently, the safest default choice is to leave the user in charge of data.
## Workflow tips
If you have a series of computations that you want to run when you redefine your methods, consider separating
your method definitions from your computations:
- method definitions go in a package, or a file that you `includet` *once*
- the computations go in a separate file, that you re-`include` (no "t" at the end) each time you want to rerun
your computations.
This can be automated using [`entr`](@ref).
## Internals
`includet` is essentially shorthand for
Revise.track(Main, filename; mode=:includet, skip_include=true)
Do *not* use `includet` for packages, as those should be handled by `using` or `import`.
If `using` and `import` aren't working, you may have packages in a non-standard location;
try fixing it with something like `push!(LOAD_PATH, "/path/to/my/private/repos")`.
(If you're working with code in Base or one of Julia's standard libraries, use
`Revise.track(mod)` instead, where `mod` is the module.)
`includet` is deliberately non-recursive, so if `filename` loads any other files,
they will not be automatically tracked.
(See [`Revise.track`](@ref) to set it up manually.)
"""
function includet(mod::Module, file)
prev = Base.source_path(nothing)
file = if prev === nothing
abspath(file)
else
normpath(joinpath(dirname(prev), file))
end
tls = task_local_storage()
tls[:SOURCE_PATH] = file
try
track(mod, file; mode=:includet, skip_include=true)
if prev === nothing
delete!(tls, :SOURCE_PATH)
else
tls[:SOURCE_PATH] = prev
end
catch err
if prev === nothing
delete!(tls, :SOURCE_PATH)
else
tls[:SOURCE_PATH] = prev
end
if isa(err, ReviseEvalException)
printstyled(stderr, "ERROR: "; color=Base.error_color());
showerror(stderr, err; blame_revise=false)
println(stderr, "\nin expression starting at ", err.loc)
else
throw(err)
end
end
return nothing
end
includet(file) = includet(Main, file)
"""
Revise.silence(pkg)
Silence warnings about not tracking changes to package `pkg`.
"""
function silence(pkg::Symbol)
push!(silence_pkgs, pkg)
if !isdir(depsdir)
mkpath(depsdir)
end
open(silencefile[], "w") do io
for p in silence_pkgs
println(io, p)
end
end
nothing
end
silence(pkg::AbstractString) = silence(Symbol(pkg))
## Utilities
"""
success = get_def(method::Method)
As needed, load the source file necessary for extracting the code defining `method`.
The source-file defining `method` must be tracked.
If it is in Base, this will execute `track(Base)` if necessary.
This is a callback function used by `CodeTracking.jl`'s `definition`.
"""
function get_def(method::Method; modified_files=revision_queue)
yield() # magic bug fix for the OSX test failures. TODO: figure out why this works (prob. Julia bug)
if method.file === :none && String(method.name)[1] == '#'
# This is likely to be a kwarg method, try to find something with location info
method = bodymethod(method)
end
filename = fixpath(String(method.file))
if startswith(filename, "REPL[")
isdefined(Base, :active_repl) || return false
fi = add_definitions_from_repl(filename)
hassig = false
for (mod, exs) in fi.modexsigs
for sigs in values(exs)
hassig |= !isempty(sigs)
end
end
return hassig
end
id = get_tracked_id(method.module; modified_files=modified_files)
id === nothing && return false
pkgdata = pkgdatas[id]
filename = relpath(filename, pkgdata)
if hasfile(pkgdata, filename)
def = get_def(method, pkgdata, filename)
def !== nothing && return true
end
# Lookup can fail for macro-defined methods, see https://github.com/JuliaLang/julia/issues/31197
# We need to find the right file.
if method.module == Base || method.module == Core || method.module == Core.Compiler
@warn "skipping $method to avoid parsing too much code"
CodeTracking.invoked_setindex!(CodeTracking.method_info, method.sig, missing)
return false
end
parentfile, included_files = modulefiles(method.module)
if parentfile !== nothing
def = get_def(method, pkgdata, relpath(parentfile, pkgdata))
def !== nothing && return true
for modulefile in included_files
def = get_def(method, pkgdata, relpath(modulefile, pkgdata))
def !== nothing && return true
end
end
# As a last resort, try every file in the package
for file in srcfiles(pkgdata)
def = get_def(method, pkgdata, file)
def !== nothing && return true
end
@warn "$(method.sig) was not found"
# So that we don't call it again, store missingness info in CodeTracking
CodeTracking.invoked_setindex!(CodeTracking.method_info, method.sig, missing)
return false
end
function get_def(method, pkgdata, filename)
maybe_extract_sigs!(maybe_parse_from_cache!(pkgdata, filename))
return get(CodeTracking.method_info, method.sig, nothing)
end
function get_tracked_id(id::PkgId; modified_files=revision_queue)
# Methods from Base or the stdlibs may require that we start tracking
if !haskey(pkgdatas, id)
recipe = id.name === "Compiler" ? :Compiler : Symbol(id.name)
recipe === :Core && return nothing
_track(id, recipe; modified_files=modified_files)
@info "tracking $recipe"
if !haskey(pkgdatas, id)
@warn "despite tracking $recipe, $id was not found"
return nothing
end
end
return id
end
get_tracked_id(mod::Module; modified_files=revision_queue) =
get_tracked_id(PkgId(mod); modified_files=modified_files)
function get_expressions(id::PkgId, filename)
get_tracked_id(id)
pkgdata = pkgdatas[id]
fi = maybe_parse_from_cache!(pkgdata, filename)
maybe_extract_sigs!(fi)
return fi.modexsigs
end
function add_definitions_from_repl(filename::String)
hist_idx = parse(Int, filename[6:end-1])
hp = (Base.active_repl::REPL.LineEditREPL).interface.modes[1].hist::REPL.REPLHistoryProvider
src = hp.history[hp.start_idx+hist_idx]
id = PkgId(nothing, "@REPL")
pkgdata = pkgdatas[id]
mexs = ModuleExprsSigs(Main::Module)
parse_source!(mexs, src, filename, Main::Module)
instantiate_sigs!(mexs)
fi = FileInfo(mexs)
push!(pkgdata, filename=>fi)
return fi
end
add_definitions_from_repl(filename::AbstractString) = add_definitions_from_repl(convert(String, filename)::String)
function update_stacktrace_lineno!(trace)
local nrep
for i = 1:length(trace)
t = trace[i]
has_nrep = !isa(t, StackTraces.StackFrame)
if has_nrep
t, nrep = t
end
t = t::StackTraces.StackFrame
if t.linfo isa Core.MethodInstance
m = t.linfo.def
sigt = m.sig
# Why not just call `whereis`? Because that forces tracking. This is being
# clever by recognizing that these entries exist only if there have been updates.
updated = get(CodeTracking.method_info, sigt, nothing)
if updated !== nothing
lnn = updated[1][1] # choose the first entry by default
lineoffset = lnn.line - m.line
t = StackTraces.StackFrame(t.func, lnn.file, t.line+lineoffset, t.linfo, t.from_c, t.inlined, t.pointer)
trace[i] = has_nrep ? (t, nrep) : t
end
end
end
return trace
end
function method_location(method::Method)
# Why not just call `whereis`? Because that forces tracking. This is being
# clever by recognizing that these entries exist only if there have been updates.
updated = get(CodeTracking.method_info, method.sig, nothing)
if updated !== nothing
lnn = updated[1][1]
return lnn.file, lnn.line
end
return method.file, method.line
end
# Set the prompt color to indicate the presence of unhandled revision errors
const original_repl_prefix = Ref{Union{String,Function,Nothing}}(nothing)
function maybe_set_prompt_color(color)
if isdefined(Base, :active_repl)
repl = Base.active_repl
if isa(repl, REPL.LineEditREPL)
if color === :warn
# First save the original setting
if original_repl_prefix[] === nothing
original_repl_prefix[] = repl.mistate.current_mode.prompt_prefix
end
repl.mistate.current_mode.prompt_prefix = "\e[33m" # yellow
else
color = original_repl_prefix[]
color === nothing && return nothing
repl.mistate.current_mode.prompt_prefix = color
original_repl_prefix[] = nothing
end
end
end
return nothing
end
# `revise_first` gets called by the REPL prior to executing the next command (by having been pushed
# onto the `ast_transform` list).
# This uses invokelatest not for reasons of world age but to ensure that the call is made at runtime.
# This allows `revise_first` to be compiled without compiling `revise` itself, and greatly
# reduces the overhead of using Revise.
function revise_first(ex)
# Special-case `exit()` (issue #562)
if isa(ex, Expr)
exu = unwrap(ex)
isa(exu, Expr) && exu.head === :call && length(exu.args) == 1 && exu.args[1] === :exit && return ex
end
# Check for queued revisions, and if so call `revise` first before executing the expression
return Expr(:toplevel, :($isempty($revision_queue) || $(Base.invokelatest)($revise)), ex)
end
steal_repl_backend(args...) = @warn "`steal_repl_backend` has been removed from Revise, please update your `~/.julia/config/startup.jl`.\nSee https://timholy.github.io/Revise.jl/stable/config/"
wait_steal_repl_backend() = steal_repl_backend()
async_steal_repl_backend() = steal_repl_backend()
"""
Revise.init_worker(p)
Define methods on worker `p` that Revise needs in order to perform revisions on `p`.
Revise itself does not need to be running on `p`.
"""
function init_worker(p)
remotecall(Core.eval, p, Main, quote
function whichtt(@nospecialize sig)
@static if VERSION ≥ v"1.10.0-DEV.873"
ret = Base._methods_by_ftype(sig, -1, Base.get_world_counter())
else
ret = Base._methods_by_ftype(sig, -1, typemax(UInt))
end
isempty(ret) && return nothing
m = ret[end][3]::Method # the last method returned is the least-specific that matches, and thus most likely to be type-equal
methsig = m.sig
(sig <: methsig && methsig <: sig) || return nothing
return m
end
function delete_method_by_sig(@nospecialize sig)
m = whichtt(sig)
isa(m, Method) && Base.delete_method(m)
end
end)
end
active_repl_backend_available() = isdefined(Base, :active_repl_backend) && Base.active_repl_backend !== nothing
function __init__()
ccall(:jl_generating_output, Cint, ()) == 1 && return nothing
run_on_worker = get(ENV, "JULIA_REVISE_WORKER_ONLY", "0")
if !(myid() == 1 || run_on_worker == "1")
return nothing
end
# Check Julia paths (issue #601)
if !isdir(juliadir)
major, minor = Base.VERSION.major, Base.VERSION.minor
@warn """Expected non-existent $juliadir to be your Julia directory.
Certain functionality will be disabled.
To fix this, try deleting Revise's cache files in ~/.julia/compiled/v$major.$minor/Revise, then restart Julia and load Revise.
If this doesn't fix the problem, please report an issue at https://github.com/timholy/Revise.jl/issues."""
end
if isfile(silencefile[])
pkgs = readlines(silencefile[])
for pkg in pkgs
push!(silence_pkgs, Symbol(pkg))
end
end
polling = get(ENV, "JULIA_REVISE_POLL", "0")
if polling == "1"
polling_files[] = watching_files[] = true
end
rev_include = get(ENV, "JULIA_REVISE_INCLUDE", "0")
if rev_include == "1"
tracking_Main_includes[] = true
end
# Correct line numbers for code moving around
Base.update_stackframes_callback[] = update_stacktrace_lineno!
if isdefined(Base, :methodloc_callback)
Base.methodloc_callback[] = method_location
end
# Add `includet` to the compiled_modules (fixes #302)
for m in methods(includet)
push!(JuliaInterpreter.compiled_methods, m)
end
# Set up a repository for methods defined at the REPL
id = PkgId(nothing, "@REPL")
pkgdatas[id] = pkgdata = PkgData(id, nothing)
# Set the lookup callbacks
CodeTracking.method_lookup_callback[] = get_def
CodeTracking.expressions_callback[] = get_expressions
# Watch the manifest file for changes
mfile = manifest_file()
if mfile !== nothing
push!(watched_manifests, mfile)
wmthunk = TaskThunk(watch_manifest, (mfile,))
schedule(Task(wmthunk))
end
push!(Base.include_callbacks, watch_includes)
push!(Base.package_callbacks, watch_package_callback)
mode = get(ENV, "JULIA_REVISE", "auto")
if mode == "auto"
if isdefined(Main, :IJulia)
Main.IJulia.push_preexecute_hook(revise)
else
pushfirst!(REPL.repl_ast_transforms, revise_first)
# #664: once a REPL is started, it no longer interacts with REPL.repl_ast_transforms
if active_repl_backend_available()
push!(Base.active_repl_backend.ast_transforms, revise_first)
else
# wait for active_repl_backend to exist
# #719: do this async in case Revise is being loaded from startup.jl
t = @async begin
iter = 0
while !active_repl_backend_available() && iter < 20
sleep(0.05)
iter += 1
end
if active_repl_backend_available()
push!(Base.active_repl_backend.ast_transforms, revise_first)
end
end
isdefined(Base, :errormonitor) && Base.errormonitor(t)
end
end
if isdefined(Main, :Atom)
Atom = getfield(Main, :Atom)
if Atom isa Module && isdefined(Atom, :handlers)
setup_atom(Atom)
end
end
end
return nothing
end
const REVISE_ID = Base.PkgId(Base.UUID("295af30f-e4ad-537b-8983-00126c2a3abe"), "Revise")
function watch_package_callback(id::PkgId)
# `Base.package_callbacks` fire immediately after module initialization, and
# would fire on Revise itself. This is not necessary for most users, and has
# the downside that the user doesn't get to the REPL prompt until
# `watch_package` finishes compiling. To prevent this, Revise hides the
# actual `watch_package` method behind an `invokelatest`. This delays
# compilation of everything that `watch_package` requires, leading to faster
# perceived startup times.
if id != REVISE_ID
Base.invokelatest(watch_package, id)
end
return
end
function setup_atom(atommod::Module)::Nothing
handlers = getfield(atommod, :handlers)
for x in ["eval", "evalall", "evalshow", "evalrepl"]
if haskey(handlers, x)
old = handlers[x]
Main.Atom.handle(x) do data
revise()
old(data)
end
end
end
return nothing
end
function add_revise_deps()
# Populate CodeTracking data for dependencies and initialize watching on code that Revise depends on
for mod in (CodeTracking, OrderedCollections, JuliaInterpreter, LoweredCodeUtils, Revise)
id = PkgId(mod)
pkgdata = parse_pkg_files(id)
init_watching(pkgdata, srcfiles(pkgdata))
pkgdatas[id] = pkgdata
end
return nothing
end
include("precompile.jl")
_precompile_()
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 3623 | """
mexs = parse_source(filename::AbstractString, mod::Module)
Parse the source `filename`, returning a [`ModuleExprsSigs`](@ref) `mexs`.
`mod` is the "parent" module for the file (i.e., the one that `include`d the file);
if `filename` defines more module(s) then these will all have separate entries in `mexs`.
If parsing `filename` fails, `nothing` is returned.
"""
parse_source(filename, mod::Module; kwargs...) =
parse_source!(ModuleExprsSigs(mod), filename, mod; kwargs...)
"""
parse_source!(mexs::ModuleExprsSigs, filename, mod::Module)
Top-level parsing of `filename` as included into module
`mod`. Successfully-parsed expressions will be added to `mexs`. Returns
`mexs` if parsing finished successfully, otherwise `nothing` is returned.
See also [`Revise.parse_source`](@ref).
"""
function parse_source!(mod_exprs_sigs::ModuleExprsSigs, filename::AbstractString, mod::Module; kwargs...)
if !isfile(filename)
@warn "$filename is not a file, omitting from revision tracking"
return nothing
end
parse_source!(mod_exprs_sigs, read(filename, String), filename, mod; kwargs...)
end
"""
success = parse_source!(mod_exprs_sigs::ModuleExprsSigs, src::AbstractString, filename::AbstractString, mod::Module)
Parse a string `src` obtained by reading `file` as a single
string. `pos` is the 1-based byte offset from which to begin parsing `src`.
See also [`Revise.parse_source`](@ref).
"""
function parse_source!(mod_exprs_sigs::ModuleExprsSigs, src::AbstractString, filename::AbstractString, mod::Module; kwargs...)
startswith(src, "# REVISE: DO NOT PARSE") && return nothing
ex = Base.parse_input_line(src; filename=filename)
ex === nothing && return mod_exprs_sigs
if isexpr(ex, :error) || isexpr(ex, :incomplete)
if Base.VERSION >= v"1.10"
eval(ex) # this will throw, so the statements below will not execute
end
prevex, pos = first_bad_position(src)
ln = count(isequal('\n'), SubString(src, 1, min(pos, length(src)))) + 1
throw(LoadError(filename, ln, ex.args[1]))
end
return process_source!(mod_exprs_sigs, ex, filename, mod; kwargs...)
end
function process_source!(mod_exprs_sigs::ModuleExprsSigs, ex, filename, mod::Module; mode::Symbol=:sigs)
for (mod, ex) in ExprSplitter(mod, ex)
if mode === :includet
try
Core.eval(mod, ex)
catch err
bt = trim_toplevel!(catch_backtrace())
lnn = firstline(ex)
loc = location_string((lnn.file, lnn.line))
throw(ReviseEvalException(loc, err, Any[(sf, 1) for sf in stacktrace(bt)]))
end
end
exprs_sigs = get(mod_exprs_sigs, mod, nothing)
if exprs_sigs === nothing
mod_exprs_sigs[mod] = exprs_sigs = ExprsSigs()
end
if ex.head === :toplevel
lnn = nothing
for a in ex.args
if isa(a, LineNumberNode)
lnn = a
else
pushex!(exprs_sigs, Expr(:toplevel, lnn, a))
end
end
else
pushex!(exprs_sigs, ex)
end
end
return mod_exprs_sigs
end
if Base.VERSION < v"1.10"
function first_bad_position(str)
ex, pos, n = nothing, 1, length(str)
while pos < n
ex, pos = Meta.parse(str, pos; greedy=true, raise=false)
if isexpr(ex, :error) || isexpr(ex, :incomplete)
return ex, pos
end
end
error("expected an error, finished without one")
end
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 18917 | using Base: PkgId
include("loading.jl")
"""
parse_pkg_files(id::PkgId)
This function gets called by `watch_package` and runs when a package is first loaded.
Its job is to organize the files and expressions defining the module so that later we can
detect and process revisions.
"""
parse_pkg_files(id::PkgId)
"""
parentfile, included_files = modulefiles(mod::Module)
Return the `parentfile` in which `mod` was defined, as well as a list of any
other files that were `include`d to define `mod`. If this operation is unsuccessful,
`(nothing, nothing)` is returned.
All files are returned as absolute paths.
"""
modulefiles(mod::Module)
# This is primarily used to parse non-precompilable packages.
# These lack a cache header that lists the files that constitute the package;
# they also lack the source cache, and so have to parsed immediately or
# we won't be able to compute a diff when a file is modified (we don't have a record
# of what the source was before the modification).
#
# The main trick here is that since `using` is recursive, `included_files`
# might contain files associated with many different packages. We have to figure
# out which correspond to a particular module `mod`, which we do by:
# - checking the module in which each file is evaluated. This suffices to
# detect "supporting" files, i.e., those `included` within the module
# definition.
# - checking the filename. Since the "top level" file is evaluated into Main,
# we can't use the module-of-evaluation to find it. Here we hope that the
# top-level filename follows convention and matches the module. TODO?: it's
# possible that this needs to be supplemented with parsing.
function queue_includes!(pkgdata::PkgData, id::PkgId)
modstring = id.name
delids = Int[]
for i = 1:length(included_files)
mod, fname = included_files[i]
if mod == Base.__toplevel__
mod = Main
end
modname = String(Symbol(mod))
if startswith(modname, modstring) || endswith(fname, modstring*".jl")
modexsigs = parse_source(fname, mod)
if modexsigs !== nothing
fname = relpath(fname, pkgdata)
push!(pkgdata, fname=>FileInfo(modexsigs))
end
push!(delids, i)
end
end
deleteat!(included_files, delids)
CodeTracking._pkgfiles[id] = pkgdata.info
return pkgdata
end
function queue_includes(mod::Module)
id = PkgId(mod)
pkgdata = get(pkgdatas, id, nothing)
if pkgdata === nothing
pkgdata = PkgData(id)
end
queue_includes!(pkgdata, id)
if has_writable_paths(pkgdata)
init_watching(pkgdata)
end
pkgdatas[id] = pkgdata
return pkgdata
end
# A near-duplicate of some of the functionality of queue_includes!
# This gets called for silenced packages, to make sure they don't "contaminate"
# included_files
function remove_from_included_files(modsym::Symbol)
i = 1
modstring = string(modsym)
while i <= length(included_files)
mod, fname = included_files[i]
modname = String(Symbol(mod))
if startswith(modname, modstring) || endswith(fname, modstring*".jl")
deleteat!(included_files, i)
else
i += 1
end
end
end
function read_from_cache(pkgdata::PkgData, file::AbstractString)
fi = fileinfo(pkgdata, file)
filep = joinpath(basedir(pkgdata), file)
if fi.cachefile == basesrccache
# Get the original path
filec = get(cache_file_key, filep, filep)
return open(basesrccache) do io
Base._read_dependency_src(io, filec)
end
end
Base.read_dependency_src(fi.cachefile, filep)
end
function maybe_parse_from_cache!(pkgdata::PkgData, file::AbstractString)
if startswith(file, "REPL[")
return add_definitions_from_repl(file)
end
fi = fileinfo(pkgdata, file)
if (isempty(fi.modexsigs) && !fi.parsed[]) && (!isempty(fi.cachefile) || !isempty(fi.cacheexprs))
# Source was never parsed, get it from the precompile cache
src = read_from_cache(pkgdata, file)
filep = joinpath(basedir(pkgdata), file)
filec = get(cache_file_key, filep, filep)
topmod = first(keys(fi.modexsigs))
if parse_source!(fi.modexsigs, src, filec, topmod) === nothing
@error "failed to parse cache file source text for $file"
end
add_modexs!(fi, fi.cacheexprs)
empty!(fi.cacheexprs)
fi.parsed[] = true
end
return fi
end
function add_modexs!(fi::FileInfo, modexs)
for (mod, rex) in modexs
exsigs = get(fi.modexsigs, mod, nothing)
if exsigs === nothing
fi.modexsigs[mod] = exsigs = ExprsSigs()
end
pushex!(exsigs, rex)
end
return fi
end
function maybe_extract_sigs!(fi::FileInfo)
if !fi.extracted[]
instantiate_sigs!(fi.modexsigs)
fi.extracted[] = true
end
return fi
end
maybe_extract_sigs!(pkgdata::PkgData, file::AbstractString) = maybe_extract_sigs!(fileinfo(pkgdata, file))
function maybe_add_includes_to_pkgdata!(pkgdata::PkgData, file::AbstractString, includes; eval_now::Bool=false)
for (mod, inc) in includes
inc = joinpath(splitdir(file)[1], inc)
incrp = relpath(inc, pkgdata)
hasfile = false
for srcfile in srcfiles(pkgdata)
if srcfile == incrp
hasfile = true
break
end
end
if !hasfile
# Add the file to pkgdata
push!(pkgdata.info.files, incrp)
fi = FileInfo(mod)
push!(pkgdata.fileinfos, fi)
# Parse the source of the new file
fullfile = joinpath(basedir(pkgdata), incrp)
if isfile(fullfile)
parse_source!(fi.modexsigs, fullfile, mod)
if eval_now
# Use runtime dispatch to reduce latency
Base.invokelatest(instantiate_sigs!, fi.modexsigs; mode=:eval)
end
end
# Add to watchlist
init_watching(pkgdata, (incrp,))
yield()
end
end
end
# Use locking to prevent races between inner and outer @require blocks
const requires_lock = ReentrantLock()
function add_require(sourcefile::String, modcaller::Module, idmod::String, modname::String, expr::Expr)
id = PkgId(modcaller)
# If this fires when the module is first being loaded (because the dependency
# was already loaded), Revise may not yet have the pkgdata for this package.
if !haskey(pkgdatas, id)
watch_package(id)
end
lock(requires_lock)
try
# Get/create the FileInfo specifically for tracking @require blocks
pkgdata = pkgdatas[id]
filekey = relpath(sourcefile, pkgdata) * "__@require__"
fileidx = fileindex(pkgdata, filekey)
if fileidx === nothing
files = srcfiles(pkgdata)
fileidx = length(files) + 1
push!(files, filekey)
push!(pkgdata.fileinfos, FileInfo(modcaller))
end
fi = pkgdata.fileinfos[fileidx]
# Tag the expr to ensure it is unique
expr = Expr(:block, copy(expr))
push!(expr.args, :(__pkguuid__ = $idmod))
# Add the expression to the fileinfo
complex = true # is this too complex to delay?
if !fi.extracted[]
# If we haven't yet extracted signatures, do our best to avoid it now in case the
# signature-extraction code has not yet been compiled (latency reduction)
includes, complex = deferrable_require(expr)
if !complex
# [(modcaller, inc) for inc in includes] but without precompiling a Generator
modincludes = Tuple{Module,String}[]
for inc in includes
push!(modincludes, (modcaller, inc))
end
maybe_add_includes_to_pkgdata!(pkgdata, filekey, modincludes)
if isempty(fi.modexsigs)
# Source has not even been parsed
push!(fi.cacheexprs, (modcaller, expr))
else
add_modexs!(fi, [(modcaller, expr)])
end
end
end
if complex
Base.invokelatest(eval_require_now, pkgdata, fileidx, filekey, sourcefile, modcaller, expr)
end
finally
unlock(requires_lock)
end
end
function deferrable_require(expr)
includes = String[]
complex = deferrable_require!(includes, expr)
return includes, complex
end
function deferrable_require!(includes, expr::Expr)
if expr.head === :call
callee = expr.args[1]
if is_some_include(callee)
if isa(expr.args[2], AbstractString)
push!(includes, expr.args[2])
else
return true
end
elseif callee === :eval || (isa(callee, Expr) && callee.head === :. && is_quotenode_egal(callee.args[2], :eval))
# Any eval statement is suspicious and requires immediate action
return false
end
end
expr.head === :macrocall && expr.args[1] === Symbol("@eval") && return true
for a in expr.args
a isa Expr || continue
deferrable_require!(includes, a) && return true
end
return false
end
function eval_require_now(pkgdata::PkgData, fileidx::Int, filekey::String, sourcefile::String, modcaller::Module, expr::Expr)
fi = pkgdata.fileinfos[fileidx]
exsnew = ExprsSigs()
exsnew[RelocatableExpr(expr)] = nothing
mexsnew = ModuleExprsSigs(modcaller=>exsnew)
# Before executing the expression we need to set the load path appropriately
prev = Base.source_path(nothing)
tls = task_local_storage()
tls[:SOURCE_PATH] = sourcefile
# Now execute the expression
mexsnew, includes = try
eval_new!(mexsnew, fi.modexsigs)
finally
if prev === nothing
delete!(tls, :SOURCE_PATH)
else
tls[:SOURCE_PATH] = prev
end
end
# Add any new methods or `include`d files to tracked objects
pkgdata.fileinfos[fileidx] = FileInfo(mexsnew, fi)
ret = maybe_add_includes_to_pkgdata!(pkgdata, filekey, includes; eval_now=true)
return ret
end
function watch_files_via_dir(dirname)
try
wait_changed(dirname) # this will block until there is a modification
catch e
# issue #459
(isa(e, InterruptException) && throwto_repl(e)) || throw(e)
end
latestfiles = Pair{String,PkgId}[]
# Check to see if we're still watching this directory
stillwatching = haskey(watched_files, dirname)
if stillwatching
wf = watched_files[dirname]
for (file, id) in wf.trackedfiles
fullpath = joinpath(dirname, file)
if isdir(fullpath)
# Detected a modification in a directory that we're watching in
# itself (not as a container for watched files)
push!(latestfiles, file=>id)
continue
elseif !file_exists(fullpath)
# File may have been deleted. But be very sure.
sleep(0.1)
if !file_exists(fullpath)
push!(latestfiles, file=>id)
continue
end
end
if newer(mtime(fullpath), wf.timestamp)
push!(latestfiles, file=>id)
end
end
isempty(latestfiles) || updatetime!(wf) # ref issue #341
end
return latestfiles, stillwatching
end
const wplock = ReentrantLock()
"""
watch_package(id::Base.PkgId)
Start watching a package for changes to the files that define it.
This function gets called via a callback registered with `Base.require`, at the completion
of module-loading by `using` or `import`.
"""
function watch_package(id::PkgId)
# we may have switched environments, so make sure we're watching the right manifest
active_project_watcher()
pkgdata = get(pkgdatas, id, nothing)
pkgdata !== nothing && return pkgdata
lock(wplock)
try
modsym = Symbol(id.name)
if modsym ∈ dont_watch_pkgs
if modsym ∉ silence_pkgs
@warn "$modsym is excluded from watching by Revise. Use Revise.silence(\"$modsym\") to quiet this warning."
end
remove_from_included_files(modsym)
return nothing
end
pkgdata = parse_pkg_files(id)
if has_writable_paths(pkgdata)
init_watching(pkgdata, srcfiles(pkgdata))
end
pkgdatas[id] = pkgdata
finally
unlock(wplock)
end
return pkgdata
end
function has_writable_paths(pkgdata::PkgData)
dir = basedir(pkgdata)
isdir(dir) || return true
haswritable = false
# Compatibility note:
# The following can be written in cd(dir) do ... end block
# but that would trigger Julia to crash for some corner cases.
# This is identified on Julia 1.7.3 + modified ubuntu 18.04, and it is
# verified that doesn't happen for Julia 1.9.2 on the same machine.
current_dir = pwd()
try
cd(dir)
for file in srcfiles(pkgdata)
haswritable |= iswritable(file)
end
finally
cd(current_dir)
end
return haswritable
end
function watch_includes(mod::Module, fn::AbstractString)
push!(included_files, (mod, normpath(abspath(fn))))
end
## Working with Pkg and code-loading
# Much of this is adapted from base/loading.jl
function manifest_file(project_file)
if project_file isa String && isfile(project_file)
mfile = Base.project_file_manifest_path(project_file)
if mfile isa String
return mfile
end
end
return nothing
end
manifest_file() = manifest_file(Base.active_project())
function manifest_paths!(pkgpaths::Dict, manifest_file::String)
d = if isdefined(Base, :get_deps) # `get_deps` is present in versions that support new manifest formats
Base.get_deps(Base.parsed_toml(manifest_file))
else
Base.parsed_toml(manifest_file)
end
for (name, entries) in d
entries::Vector{Any}
for entry in entries
id = PkgId(UUID(entry["uuid"]::String), name)
path = Base.explicit_manifest_entry_path(manifest_file, id, entry)
if path isa String
if isfile(path)
# Workaround for #802
path = dirname(dirname(path))
end
pkgpaths[id] = path
end
end
end
return pkgpaths
end
manifest_paths(manifest_file::String) =
manifest_paths!(Dict{PkgId,String}(), manifest_file)
function watch_manifest(mfile)
while true
try
wait_changed(mfile)
catch e
# issue #459
(isa(e, InterruptException) && throwto_repl(e)) || throw(e)
end
manifest_file() == mfile || continue # process revisions only if this is the active manifest
try
with_logger(_debug_logger) do
@debug "Pkg" _group="manifest_update" manifest_file=mfile
isfile(mfile) || return nothing
pkgdirs = manifest_paths(mfile)
for (id, pkgdir) in pkgdirs
if haskey(pkgdatas, id)
pkgdata = pkgdatas[id]
if pkgdir != basedir(pkgdata)
## The package directory has changed
@debug "Pkg" _group="pathswitch" oldpath=basedir(pkgdata) newpath=pkgdir
# Stop all associated watching tasks
for dir in unique_dirs(srcfiles(pkgdata))
@debug "Pkg" _group="unwatch" dir=dir
delete!(watched_files, joinpath(basedir(pkgdata), dir))
# Note: if the file is revised, the task(s) will run one more time.
# However, because we've removed the directory from the watch list this will be a no-op,
# and then the tasks will be dropped.
end
# Revise code as needed
files = String[]
mustnotify = false
for file in srcfiles(pkgdata)
fi = try
maybe_parse_from_cache!(pkgdata, file)
catch err
# https://github.com/JuliaLang/julia/issues/42404
# Get the source-text from the package source instead
fi = fileinfo(pkgdata, file)
if isempty(fi.modexsigs) && (!isempty(fi.cachefile) || !isempty(fi.cacheexprs))
filep = joinpath(basedir(pkgdata), file)
src = read(filep, String)
topmod = first(keys(fi.modexsigs))
if parse_source!(fi.modexsigs, src, filep, topmod) === nothing
@error "failed to parse source text for $filep"
end
add_modexs!(fi, fi.cacheexprs)
empty!(fi.cacheexprs)
fi.parsed[] = true
end
fi
end
maybe_extract_sigs!(fi)
push!(revision_queue, (pkgdata, file))
push!(files, file)
mustnotify = true
end
mustnotify && notify(revision_event)
# Update the directory
pkgdata.info.basedir = pkgdir
# Restart watching, if applicable
if has_writable_paths(pkgdata)
init_watching(pkgdata, files)
end
end
end
end
end
catch err
@error "Error watching manifest" exception=(err, trim_toplevel!(catch_backtrace()))
end
end
end
function active_project_watcher()
mfile = manifest_file()
if !isnothing(mfile) && mfile ∉ watched_manifests
push!(watched_manifests, mfile)
wmthunk = TaskThunk(watch_manifest, (mfile,))
schedule(Task(wmthunk))
end
return
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 6076 | # COV_EXCL_START
macro warnpcfail(ex::Expr)
modl = __module__
file = __source__.file === nothing ? "?" : String(__source__.file)
line = __source__.line
quote
$(esc(ex)) || @warn """precompile directive
$($(Expr(:quote, ex)))
failed. Please report an issue in $($modl) (after checking for duplicates) or remove this directive.""" _file=$file _line=$line
end
end
function _precompile_()
ccall(:jl_generating_output, Cint, ()) == 1 || return nothing
@warnpcfail precompile(Tuple{TaskThunk})
@warnpcfail precompile(Tuple{typeof(wait_changed), String})
@warnpcfail precompile(Tuple{typeof(watch_package), PkgId})
@warnpcfail precompile(Tuple{typeof(watch_includes), Module, String})
@warnpcfail precompile(Tuple{typeof(watch_manifest), String})
@warnpcfail precompile(Tuple{typeof(revise_dir_queued), String})
@warnpcfail precompile(Tuple{typeof(revise_file_queued), PkgData, String})
@warnpcfail precompile(Tuple{typeof(init_watching), PkgData, Vector{String}})
@warnpcfail precompile(Tuple{typeof(add_revise_deps)})
@warnpcfail precompile(Tuple{typeof(watch_package_callback), PkgId})
@warnpcfail precompile(Tuple{typeof(revise)})
@warnpcfail precompile(Tuple{typeof(revise_first), Expr})
@warnpcfail precompile(Tuple{typeof(includet), String})
@warnpcfail precompile(Tuple{typeof(track), Module, String})
# setindex! doesn't fully precompile, but it's still beneficial to do it
# (it shaves off a bit of the time)
# See https://github.com/JuliaLang/julia/pull/31466
@warnpcfail precompile(Tuple{typeof(setindex!), ExprsSigs, Nothing, RelocatableExpr})
@warnpcfail precompile(Tuple{typeof(setindex!), ExprsSigs, Vector{Any}, RelocatableExpr})
@warnpcfail precompile(Tuple{typeof(setindex!), ModuleExprsSigs, ExprsSigs, Module})
@warnpcfail precompile(Tuple{typeof(setindex!), Dict{PkgId,PkgData}, PkgData, PkgId})
@warnpcfail precompile(Tuple{Type{WatchList}})
@warnpcfail precompile(Tuple{typeof(setindex!), Dict{String,WatchList}, WatchList, String})
MI = CodeTrackingMethodInfo
@warnpcfail precompile(Tuple{typeof(minimal_evaluation!), Any, MI, Module, Core.CodeInfo, Symbol})
@warnpcfail precompile(Tuple{typeof(methods_by_execution!), Any, MI, DocExprs, Module, Expr})
@warnpcfail precompile(Tuple{typeof(methods_by_execution!), Any, MI, DocExprs, JuliaInterpreter.Frame, Vector{Bool}})
@warnpcfail precompile(Tuple{typeof(Core.kwfunc(methods_by_execution!)),
NamedTuple{(:mode,),Tuple{Symbol}},
typeof(methods_by_execution!), Function, MI, DocExprs, Module, Expr})
@warnpcfail precompile(Tuple{typeof(Core.kwfunc(methods_by_execution!)),
NamedTuple{(:skip_include,),Tuple{Bool}},
typeof(methods_by_execution!), Function, MI, DocExprs, Module, Expr})
@warnpcfail precompile(Tuple{typeof(Core.kwfunc(methods_by_execution!)),
NamedTuple{(:mode, :skip_include),Tuple{Symbol,Bool}},
typeof(methods_by_execution!), Function, MI, DocExprs, Module, Expr})
@warnpcfail precompile(Tuple{typeof(Core.kwfunc(methods_by_execution!)),
NamedTuple{(:mode,),Tuple{Symbol}},
typeof(methods_by_execution!), Function, MI, DocExprs, Frame, Vector{Bool}})
@warnpcfail precompile(Tuple{typeof(Core.kwfunc(methods_by_execution!)),
NamedTuple{(:mode, :skip_include),Tuple{Symbol,Bool}},
typeof(methods_by_execution!), Function, MI, DocExprs, Frame, Vector{Bool}})
mex = which(methods_by_execution!, (Function, MI, DocExprs, Module, Expr))
mbody = bodymethod(mex)
# use `typeof(pairs(NamedTuple()))` here since it actually differs between Julia versions
@warnpcfail precompile(Tuple{mbody.sig.parameters[1], Symbol, Bool, Bool, typeof(pairs(NamedTuple())), typeof(methods_by_execution!), Any, MI, DocExprs, Module, Expr})
@warnpcfail precompile(Tuple{mbody.sig.parameters[1], Symbol, Bool, Bool, Iterators.Pairs{Symbol,Bool,Tuple{Symbol},NamedTuple{(:skip_include,),Tuple{Bool}}}, typeof(methods_by_execution!), Any, MI, DocExprs, Module, Expr})
mfr = which(methods_by_execution!, (Function, MI, DocExprs, Frame, Vector{Bool}))
mbody = bodymethod(mfr)
@warnpcfail precompile(Tuple{mbody.sig.parameters[1], Symbol, Bool, typeof(methods_by_execution!), Any, MI, DocExprs, Frame, Vector{Bool}})
@warnpcfail precompile(Tuple{typeof(hastrackedexpr), Expr})
@warnpcfail precompile(Tuple{typeof(get_def), Method})
@warnpcfail precompile(Tuple{typeof(parse_pkg_files), PkgId})
if isdefined(Revise, :filter_valid_cachefiles)
@warnpcfail precompile(Tuple{typeof(filter_valid_cachefiles), String, Vector{String}})
end
@warnpcfail precompile(Tuple{typeof(pkg_fileinfo), PkgId})
@warnpcfail precompile(Tuple{typeof(push!), WatchList, Pair{String,PkgId}})
@warnpcfail precompile(Tuple{typeof(pushex!), ExprsSigs, Expr})
@warnpcfail precompile(Tuple{Type{ModuleExprsSigs}, Module})
@warnpcfail precompile(Tuple{Type{FileInfo}, Module, String})
@warnpcfail precompile(Tuple{Type{PkgData}, PkgId})
@warnpcfail precompile(Tuple{typeof(Base._deleteat!), Vector{Tuple{Module,String,Float64}}, Vector{Int}})
@warnpcfail precompile(Tuple{typeof(add_require), String, Module, String, String, Expr})
@warnpcfail precompile(Tuple{Core.kwftype(typeof(maybe_add_includes_to_pkgdata!)),NamedTuple{(:eval_now,), Tuple{Bool}},typeof(maybe_add_includes_to_pkgdata!),PkgData,String,Vector{Pair{Module, String}}})
for TT in (Tuple{Module,Expr}, Tuple{DataType,MethodSummary})
@warnpcfail precompile(Tuple{Core.kwftype(typeof(Base.CoreLogging.handle_message)),NamedTuple{(:time, :deltainfo), Tuple{Float64, TT}},typeof(Base.CoreLogging.handle_message),ReviseLogger,LogLevel,String,Module,String,Symbol,String,Int})
end
return nothing
end
# COV_EXCL_STOP
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 8523 | """
Revise.track(Base)
Revise.track(Core.Compiler)
Revise.track(stdlib)
Track updates to the code in Julia's `base` directory, `base/compiler`, or one of its
standard libraries.
"""
function track(mod::Module; modified_files=revision_queue)
id = PkgId(mod)
modname = nameof(mod)
return _track(id, modname; modified_files=modified_files)
end
const vstring = "v$(VERSION.major).$(VERSION.minor)"
function inpath(path, dirs)
spath = splitpath(path)
idx = findfirst(isequal(first(dirs)), spath)
idx === nothing && return false
for i = 2:length(dirs)
idx += 1
idx <= length(spath) || return false
if spath[idx] == vstring
idx += 1
end
spath[idx] == dirs[i] || return false
end
return true
end
function _track(id, modname; modified_files=revision_queue)
haskey(pkgdatas, id) && return nothing # already tracked
isbase = modname === :Base
isstdlib = !isbase && modname ∈ stdlib_names
if isbase || isstdlib
# Test whether we know where to find the files
if isbase
srcdir = fixpath(joinpath(juliadir, "base"))
dirs = ["base"]
else
stdlibv = joinpath("stdlib", vstring, String(modname))
srcdir = fixpath(joinpath(juliadir, stdlibv))
if !isdir(srcdir)
srcdir = fixpath(joinpath(juliadir, "stdlib", String(modname)))
end
if !isdir(srcdir)
# This can happen for Pkg, since it's developed out-of-tree
srcdir = joinpath(juliadir, "usr", "share", "julia", stdlibv) # omit fixpath deliberately
end
dirs = ["stdlib", String(modname)]
end
if !isdir(srcdir)
@error "unable to find path containing source for $modname, tracking is not possible"
end
# Determine when the basesrccache was built
mtcache = mtime(basesrccache)
# Initialize expression-tracking for files, and
# note any modified since Base was built
pkgdata = get(pkgdatas, id, nothing)
if pkgdata === nothing
pkgdata = PkgData(id, srcdir)
end
lock(revise_lock) do
for (submod, filename) in Iterators.drop(Base._included_files, 1) # stepping through sysimg.jl rebuilds Base, omit it
ffilename = fixpath(filename)
inpath(ffilename, dirs) || continue
keypath = ffilename[1:last(findfirst(dirs[end], ffilename))]
rpath = relpath(ffilename, keypath)
fullpath = joinpath(basedir(pkgdata), rpath)
if fullpath != filename
cache_file_key[fullpath] = filename
src_file_key[filename] = fullpath
end
push!(pkgdata, rpath=>FileInfo(submod, basesrccache))
if mtime(ffilename) > mtcache
with_logger(_debug_logger) do
@debug "Recipe for Base/StdLib" _group="Watching" filename=filename mtime=mtime(filename) mtimeref=mtcache
end
push!(modified_files, (pkgdata, rpath))
end
end
end
# Add files to CodeTracking pkgfiles
CodeTracking._pkgfiles[id] = pkgdata.info
# Add the files to the watch list
init_watching(pkgdata, srcfiles(pkgdata))
# Save the result (unnecessary if already in pkgdatas, but doesn't hurt either)
pkgdatas[id] = pkgdata
elseif modname === :Compiler
compilerdir = normpath(joinpath(juliadir, "base", "compiler"))
pkgdata = get(pkgdatas, id, nothing)
if pkgdata === nothing
pkgdata = PkgData(id, compilerdir)
end
track_subdir_from_git!(pkgdata, compilerdir; modified_files=modified_files)
# insertion into pkgdatas is done by track_subdir_from_git!
else
error("no Revise.track recipe for module ", modname)
end
return nothing
end
# Fix paths to files that define Julia (base and stdlibs)
function fixpath(filename::AbstractString; badpath=basebuilddir, goodpath=juliadir)
startswith(filename, badpath) || return normpath(filename)
filec = filename
relfilename = relpath(filename, badpath)
relfilename0 = relfilename
for strippath in (#joinpath("usr", "share", "julia", "stdlib", "v$(VERSION.major).$(VERSION.minor)"),
joinpath("usr", "share", "julia"),)
if startswith(relfilename, strippath)
relfilename = relpath(relfilename, strippath)
if occursin("stdlib", relfilename0) && !occursin("stdlib", relfilename)
relfilename = joinpath("stdlib", relfilename)
end
end
end
ffilename = normpath(joinpath(goodpath, relfilename))
if (isfile(filename) & !isfile(ffilename))
ffilename = normpath(filename)
end
return ffilename
end
_fixpath(lnn; kwargs...) = LineNumberNode(lnn.line, Symbol(fixpath(String(lnn.file); kwargs...)))
fixpath(lnn::LineNumberNode; kwargs...) = _fixpath(lnn; kwargs...)
fixpath(lnn::Core.LineInfoNode; kwargs...) = _fixpath(lnn; kwargs...)
# For tracking subdirectories of Julia itself (base/compiler, stdlibs)
function track_subdir_from_git!(pkgdata::PkgData, subdir::AbstractString; commit=Base.GIT_VERSION_INFO.commit, modified_files=revision_queue)
# diff against files at the same commit used to build Julia
repo, repo_path = git_repo(subdir)
if repo == nothing
throw(GitRepoException(subdir))
end
prefix = relpath(subdir, repo_path) # git-relative path of this subdir
tree = git_tree(repo, commit)
files = Iterators.filter(file->startswith(file, prefix) && endswith(file, ".jl"), keys(tree))
ccall((:giterr_clear, :libgit2), Cvoid, ()) # necessary to avoid errors like "the global/xdg file 'attributes' doesn't exist: No such file or directory"
lock(revise_lock) do
for file in files
fullpath = joinpath(repo_path, file)
rpath = relpath(fullpath, pkgdata) # this might undo the above, except for Core.Compiler
local src
try
src = git_source(file, tree)
catch err
if err isa KeyError
@warn "skipping $file, not found in repo"
continue
end
rethrow(err)
end
fmod = get(juliaf2m, fullpath, Core.Compiler) # Core.Compiler is not cached
if fmod === Core.Compiler
endswith(fullpath, "compiler.jl") && continue # defines the module, skip
@static if isdefined(Core.Compiler, :EscapeAnalysis)
# after https://github.com/JuliaLang/julia/pull/43800
if contains(fullpath, "compiler/ssair/EscapeAnalysis")
fmod = Core.Compiler.EscapeAnalysis
end
end
end
if src != read(fullpath, String)
push!(modified_files, (pkgdata, rpath))
end
fi = FileInfo(fmod)
if parse_source!(fi.modexsigs, src, file, fmod) === nothing
@warn "failed to parse Git source text for $file"
else
instantiate_sigs!(fi.modexsigs)
end
push!(pkgdata, rpath=>fi)
end
end
if !isempty(pkgdata.fileinfos)
id = PkgId(pkgdata)
CodeTracking._pkgfiles[id] = pkgdata.info
init_watching(pkgdata, srcfiles(pkgdata))
pkgdatas[id] = pkgdata
end
return nothing
end
# For tracking Julia's own stdlibs
const stdlib_names = Set([
:Base64, :CRC32c, :Dates, :DelimitedFiles, :Distributed,
:FileWatching, :Future, :InteractiveUtils, :Libdl,
:LibGit2, :LinearAlgebra, :Logging, :Markdown, :Mmap,
:OldPkg, :Pkg, :Printf, :Profile, :Random, :REPL,
:Serialization, :SHA, :SharedArrays, :Sockets, :SparseArrays,
:Statistics, :SuiteSparse, :Test, :Unicode, :UUIDs,
:TOML, :Artifacts, :LibCURL_jll, :LibCURL, :MozillaCACerts_jll,
:Downloads, :Tar, :ArgTools, :NetworkOptions])
# This replacement is needed because the path written during compilation differs from
# the git source path
const stdlib_rep = joinpath("usr", "share", "julia", "stdlib", "v$(VERSION.major).$(VERSION.minor)") => "stdlib"
const juliaf2m = Dict(normpath(replace(file, stdlib_rep))=>mod
for (mod,file) in Base._included_files)
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 5155 | # We will need to detect new function bodies, compare function bodies
# to see if they've changed, etc. This has to be done "blind" to the
# line numbers at which the functions are defined.
#
# Now, we could just discard line numbers from expressions, but that
# would have a very negative effect on the quality of backtraces. So
# we keep them, but introduce machinery to compare expressions without
# concern for line numbers.
"""
A `RelocatableExpr` wraps an `Expr` to ensure that comparisons
between `RelocatableExpr`s ignore line numbering information.
This allows one to detect that two expressions are the same no matter
where they appear in a file.
"""
struct RelocatableExpr
ex::Expr
end
const ExLike = Union{Expr,RelocatableExpr}
Base.convert(::Type{Expr}, rex::RelocatableExpr) = rex.ex
Base.convert(::Type{RelocatableExpr}, ex::Expr) = RelocatableExpr(ex)
# Expr(rex::RelocatableExpr) = rex.ex # too costly (inference invalidation)
Base.copy(rex::RelocatableExpr) = RelocatableExpr(copy(rex.ex))
# Implement the required comparison functions. `hash` is needed for Dicts.
function Base.:(==)(ra::RelocatableExpr, rb::RelocatableExpr)
a, b = ra.ex, rb.ex
if a.head == b.head
elseif a.head === :block
a = unwrap(a)
elseif b.head === :block
b = unwrap(b)
end
return a.head == b.head && isequal(LineSkippingIterator(a.args), LineSkippingIterator(b.args))
end
const hashrex_seed = UInt == UInt64 ? 0x7c4568b6e99c82d9 : 0xb9c82fd8
Base.hash(x::RelocatableExpr, h::UInt) = hash(LineSkippingIterator(x.ex.args),
hash(x.ex.head, h + hashrex_seed))
function Base.show(io::IO, rex::RelocatableExpr)
show(io, striplines!(copy(rex.ex)))
end
function striplines!(ex::Expr)
if ex.head === :macrocall
# for macros, the show method in Base assumes the line number is there,
# so don't strip it
args3 = [a isa ExLike ? striplines!(a) : a for a in ex.args[3:end]]
return Expr(ex.head, ex.args[1], nothing, args3...)
end
args = [a isa ExLike ? striplines!(a) : a for a in ex.args]
fargs = collect(LineSkippingIterator(args))
return Expr(ex.head, fargs...)
end
striplines!(rex::RelocatableExpr) = RelocatableExpr(striplines!(rex.ex))
# We could just collect all the non-line statements to a Vector, but
# doing things in-place will be more efficient.
struct LineSkippingIterator
args::Vector{Any}
end
Base.IteratorSize(::Type{LineSkippingIterator}) = Base.SizeUnknown()
function Base.iterate(iter::LineSkippingIterator, i=0)
i = skip_to_nonline(iter.args, i+1)
i > length(iter.args) && return nothing
return (iter.args[i], i)
end
function skip_to_nonline(args, i)
while true
i > length(args) && return i
ex = args[i]
if isa(ex, Expr) && ex.head === :line
i += 1
elseif isa(ex, LineNumberNode)
i += 1
elseif isa(ex, Pair) && (ex::Pair).first === :linenumber # used in the doc system
i += 1
elseif isa(ex, Base.RefValue) && !isdefined(ex, :x) # also in the doc system
i += 1
else
return i
end
end
end
function Base.isequal(itera::LineSkippingIterator, iterb::LineSkippingIterator)
# We could use `zip` here except that we want to insist that the
# iterators also have the same length.
reta, retb = iterate(itera), iterate(iterb)
while true
reta === nothing && retb === nothing && return true
(reta === nothing || retb === nothing) && return false
vala, ia = reta::Tuple{Any,Int}
valb, ib = retb::Tuple{Any,Int}
if isa(vala, Expr) && isa(valb, Expr)
vala, valb = vala::Expr, valb::Expr
vala.head == valb.head || return false
isequal(LineSkippingIterator(vala.args), LineSkippingIterator(valb.args)) || return false
elseif isa(vala, Symbol) && isa(valb, Symbol)
vala, valb = vala::Symbol, valb::Symbol
# two gensymed symbols do not need to match
sa, sb = String(vala), String(valb)
(startswith(sa, '#') && startswith(sb, '#')) || isequal(vala, valb) || return false
elseif isa(vala, Number) && isa(valb, Number)
vala === valb || return false # issue #233
else
isequal(vala, valb) || return false
end
reta, retb = iterate(itera, ia), iterate(iterb, ib)
end
end
const hashlsi_seed = UInt === UInt64 ? 0x533cb920dedccdae : 0x2667c89b
function Base.hash(iter::LineSkippingIterator, h::UInt)
h += hashlsi_seed
for x in iter
if x isa Expr
h += hash(LineSkippingIterator(x.args), hash(x.head, h + hashrex_seed))
elseif x isa Symbol
xs = String(x)
if startswith(xs, '#') # all gensymmed symbols are treated as identical
h += hash("gensym", h)
else
h += hash(x, h)
end
elseif x isa Number
h += hash(typeof(x), hash(x, h))::UInt
else
h += hash(x, h)::UInt
end
end
h
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 9136 | """
Revise.WatchList
A struct for holding files that live inside a directory.
Some platforms (OSX) have trouble watching too many files. So we
watch parent directories, and keep track of which files in them
should be tracked.
Fields:
- `timestamp`: mtime of last update
- `trackedfiles`: Set of filenames, generally expressed as a relative path
"""
mutable struct WatchList
timestamp::Float64 # unix time of last revision
trackedfiles::Dict{String,PkgId}
end
const DocExprs = Dict{Module,Vector{Expr}}
const ExprsSigs = OrderedDict{RelocatableExpr,Union{Nothing,Vector{Any}}}
const DepDictVals = Tuple{Module,RelocatableExpr}
const DepDict = Dict{Symbol,Set{DepDictVals}}
function Base.show(io::IO, exsigs::ExprsSigs)
compact = get(io, :compact, false)
if compact
n = 0
for (rex, sigs) in exsigs
sigs === nothing && continue
n += length(sigs)
end
print(io, "ExprsSigs(<$(length(exsigs)) expressions>, <$n signatures>)")
else
print(io, "ExprsSigs with the following expressions: ")
for def in keys(exsigs)
print(io, "\n ")
Base.show_unquoted(io, RelocatableExpr(unwrap(def)), 2)
end
end
end
"""
ModuleExprsSigs
For a particular source file, the corresponding `ModuleExprsSigs` is a mapping
`mod=>exprs=>sigs` of the expressions `exprs` found in `mod` and the signatures `sigs`
that arise from them. Specifically, if `mes` is a `ModuleExprsSigs`, then `mes[mod][ex]`
is a list of signatures that result from evaluating `ex` in `mod`. It is possible that
this returns `nothing`, which can mean either that `ex` does not define any methods
or that the signatures have not yet been cached.
The first `mod` key is guaranteed to be the module into which this file was `include`d.
To create a `ModuleExprsSigs` from a source file, see [`Revise.parse_source`](@ref).
"""
const ModuleExprsSigs = OrderedDict{Module,ExprsSigs}
function Base.typeinfo_prefix(io::IO, mexs::ModuleExprsSigs)
tn = typeof(mexs).name
return string(tn.module, '.', tn.name), true
end
"""
fm = ModuleExprsSigs(mod::Module)
Initialize an empty `ModuleExprsSigs` for a file that is `include`d into `mod`.
"""
ModuleExprsSigs(mod::Module) = ModuleExprsSigs(mod=>ExprsSigs())
Base.isempty(fm::ModuleExprsSigs) = length(fm) == 1 && isempty(first(values(fm)))
"""
FileInfo(mexs::ModuleExprsSigs, cachefile="")
Structure to hold the per-module expressions found when parsing a
single file.
`mexs` holds the [`Revise.ModuleExprsSigs`](@ref) for the file.
Optionally, a `FileInfo` can also record the path to a cache file holding the original source code.
This is applicable only for precompiled modules and `Base`.
(This cache file is distinct from the original source file that might be edited by the
developer, and it will always hold the state
of the code when the package was precompiled or Julia's `Base` was built.)
When a cache is available, `mexs` will be empty until the file gets edited:
the original source code gets parsed only when a revision needs to be made.
Source cache files greatly reduce the overhead of using Revise.
"""
struct FileInfo
modexsigs::ModuleExprsSigs
cachefile::String
cacheexprs::Vector{Tuple{Module,Expr}} # "unprocessed" exprs, used to support @require
extracted::Base.RefValue{Bool} # true if signatures have been processed from modexsigs
parsed::Base.RefValue{Bool} # true if modexsigs have been parsed from cachefile
end
FileInfo(fm::ModuleExprsSigs, cachefile="") = FileInfo(fm, cachefile, Tuple{Module,Expr}[], Ref(false), Ref(false))
"""
FileInfo(mod::Module, cachefile="")
Initialize an empty FileInfo for a file that is `include`d into `mod`.
"""
FileInfo(mod::Module, cachefile::AbstractString="") = FileInfo(ModuleExprsSigs(mod), cachefile)
FileInfo(fm::ModuleExprsSigs, fi::FileInfo) = FileInfo(fm, fi.cachefile, copy(fi.cacheexprs), Ref(fi.extracted[]), Ref(fi.parsed[]))
function Base.show(io::IO, fi::FileInfo)
print(io, "FileInfo(")
for (mod, exsigs) in fi.modexsigs
show(io, mod)
print(io, "=>")
show(io, exsigs)
print(io, ", ")
end
if !isempty(fi.cachefile)
print(io, "with cachefile ", fi.cachefile)
end
print(io, ')')
end
"""
PkgData(id, path, fileinfos::Dict{String,FileInfo})
A structure holding the data required to handle a particular package.
`path` is the top-level directory defining the package,
and `fileinfos` holds the [`Revise.FileInfo`](@ref) for each file defining the package.
For the `PkgData` associated with `Main` (e.g., for files loaded with [`includet`](@ref)),
the corresponding `path` entry will be empty.
"""
mutable struct PkgData
info::PkgFiles
fileinfos::Vector{FileInfo}
requirements::Vector{PkgId}
end
PkgData(id::PkgId, path) = PkgData(PkgFiles(id, path), FileInfo[], PkgId[])
PkgData(id::PkgId, ::Nothing) = PkgData(id, "")
function PkgData(id::PkgId)
bp = basepath(id)
if !isempty(bp)
bp = normpath(bp)
end
PkgData(id, bp)
end
# Abstraction interface for PkgData
Base.PkgId(pkgdata::PkgData) = PkgId(pkgdata.info)
CodeTracking.basedir(pkgdata::PkgData) = basedir(pkgdata.info)
CodeTracking.srcfiles(pkgdata::PkgData) = srcfiles(pkgdata.info)
is_same_file(a, b) = String(a) == String(b)
function fileindex(info, file)
for (i, f) in enumerate(srcfiles(info))
is_same_file(f, file) && return i
end
return nothing
end
function hasfile(info, file)
if isabspath(file)
file = relpath(file, info)
end
fileindex(info, file) !== nothing
end
function fileinfo(pkgdata::PkgData, file::String)
i = fileindex(pkgdata, file)
i === nothing && error("file ", file, " not found")
return pkgdata.fileinfos[i]
end
fileinfo(pkgdata::PkgData, i::Int) = pkgdata.fileinfos[i]
function Base.push!(pkgdata::PkgData, pr::Pair{<:Any,FileInfo})
push!(srcfiles(pkgdata), pr.first)
push!(pkgdata.fileinfos, pr.second)
return pkgdata
end
function Base.show(io::IO, pkgdata::PkgData)
compact = get(io, :compact, false)
print(io, "PkgData(")
if compact
print(io, '"', pkgdata.info.basedir, "\", ")
nexs, nsigs, nparsed = 0, 0, 0
for fi in pkgdata.fileinfos
thisnexs, thisnsigs = 0, 0
for (mod, exsigs) in fi.modexsigs
for (rex, sigs) in exsigs
thisnexs += 1
sigs === nothing && continue
thisnsigs += length(sigs)
end
end
nexs += thisnexs
nsigs += thisnsigs
if thisnexs > 0
nparsed += 1
end
end
print(io, nparsed, '/', length(pkgdata.fileinfos), " parsed files, ", nexs, " expressions, ", nsigs, " signatures)")
else
show(io, pkgdata.info.id)
println(io, ", basedir \"", pkgdata.info.basedir, "\":")
for (f, fi) in zip(pkgdata.info.files, pkgdata.fileinfos)
print(io, " \"", f, "\": ")
show(IOContext(io, :compact=>true), fi)
print(io, '\n')
end
end
end
function pkgfileless((pkgdata1,file1)::Tuple{PkgData,String}, (pkgdata2,file2)::Tuple{PkgData,String})
# implements a partial order
PkgId(pkgdata1) ∈ pkgdata2.requirements && return true
PkgId(pkgdata1) == PkgId(pkgdata2) && return fileindex(pkgdata1, file1) < fileindex(pkgdata2, file2)
return false
end
"""
ReviseEvalException(loc::String, exc::Exception, stacktrace=nothing)
Provide additional location information about `exc`.
When running via the interpreter, the backtraces point to interpreter code rather than the original
culprit. This makes it possible to use `loc` to provide information about the frame backtrace,
and even to supply a fake backtrace.
If `stacktrace` is supplied it must be a `Vector{Any}` containing `(::StackFrame, n)` pairs where `n`
is the recursion count (typically 1).
"""
struct ReviseEvalException <: Exception
loc::String
exc::Exception
stacktrace::Union{Nothing,Vector{Any}}
end
ReviseEvalException(loc::AbstractString, exc::Exception) = ReviseEvalException(loc, exc, nothing)
function Base.showerror(io::IO, ex::ReviseEvalException; blame_revise::Bool=true)
showerror(io, ex.exc)
st = ex.stacktrace
if st !== nothing
Base.show_backtrace(io, st)
end
if blame_revise
println(io, "\nRevise evaluation error at ", ex.loc)
end
end
struct GitRepoException <: Exception
filename::String
end
function Base.showerror(io::IO, ex::GitRepoException)
print(io, "no repository at ", ex.filename, " to track stdlibs you must build Julia from source")
end
"""
thunk = TaskThunk(f, args)
To facilitate precompilation and reduce latency, we avoid creation of anonymous thunks.
`thunk` can be used as an argument in `schedule(Task(thunk))`.
"""
struct TaskThunk
f # deliberately untyped
args # deliberately untyped
end
@noinline (thunk::TaskThunk)() = thunk.f(thunk.args...)
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 7243 | relpath_safe(path, startpath) = isempty(startpath) ? path : relpath(path, startpath)
function Base.relpath(filename, pkgdata::PkgData)
if isabspath(filename)
# `Base.locate_package`, which is how `pkgdata` gets initialized, might strip pieces of the path.
# For example, on Travis macOS the paths returned by `abspath`
# can be preceded by "/private" which is not present in the value returned by `Base.locate_package`.
idx = findfirst(basedir(pkgdata), filename)
if idx !== nothing
idx = first(idx)
if idx > 1
filename = filename[idx:end]
end
filename = relpath_safe(filename, basedir(pkgdata))
end
elseif startswith(filename, "compiler")
# Core.Compiler's pkgid includes "compiler/" in the path
filename = relpath(filename, "compiler")
end
return filename
end
function iswritable(file::AbstractString) # note this trashes the Base definition, but we don't need it
return uperm(stat(file)) & 0x02 != 0x00
end
function unique_dirs(iter)
udirs = Set{String}()
for file in iter
dir, basename = splitdir(file)
push!(udirs, dir)
end
return udirs
end
function file_exists(filename)
filename = normpath(filename)
isfile(filename) && return true
alt = get(cache_file_key, filename, nothing)
alt === nothing && return false
return isfile(alt)
end
function use_compiled_modules()
return Base.JLOptions().use_compiled_modules != 0
end
function firstline(ex::Expr)
for a in ex.args
isa(a, LineNumberNode) && return a
if isa(a, Expr)
line = firstline(a)
isa(line, LineNumberNode) && return line
end
end
return nothing
end
firstline(rex::RelocatableExpr) = firstline(rex.ex)
newloc(methloc::LineNumberNode, ln, lno) = fixpath(ln)
location_string((file, line)::Tuple{AbstractString, Any},) = abspath(file)*':'*string(line)
location_string((file, line)::Tuple{Symbol, Any},) = location_string((string(file), line))
location_string(::Nothing) = "unknown location"
function linediff(la::LineNumberNode, lb::LineNumberNode)
(isa(la.file, Symbol) && isa(lb.file, Symbol) && (la.file::Symbol === lb.file::Symbol)) || return typemax(Int)
return abs(la.line - lb.line)
end
# Return the only non-trivial expression in ex, or ex itself
function unwrap(ex::Expr)
if ex.head === :block || ex.head === :toplevel
for (i, a) in enumerate(ex.args)
if isa(a, Expr)
for j = i+1:length(ex.args)
istrivial(ex.args[j]) || return ex
end
return unwrap(a)
elseif !istrivial(a)
return ex
end
end
return nothing
end
return ex
end
unwrap(rex::RelocatableExpr) = RelocatableExpr(unwrap(rex.ex))
istrivial(a) = a === nothing || isa(a, LineNumberNode)
function unwrap_where(ex::Expr)
while isexpr(ex, :where)
ex = ex.args[1]
end
return ex
end
function pushex!(exsigs::ExprsSigs, ex::Expr)
uex = unwrap(ex)
if is_doc_expr(uex)
body = uex.args[4]
# Don't trigger for exprs where the documented expression is just a signature
# (e.g. `"docstr" f(x::Int)`, `"docstr" f(x::T) where T` etc.)
if isa(body, Expr) && unwrap_where(body).head !== :call
exsigs[RelocatableExpr(body)] = nothing
end
if length(uex.args) < 5
push!(uex.args, false)
else
uex.args[5] = false
end
end
exsigs[RelocatableExpr(ex)] = nothing
return exsigs
end
## WatchList utilities
function updatetime!(wl::WatchList)
wl.timestamp = time()
end
Base.push!(wl::WatchList, filenameid::Pair{<:AbstractString,PkgId}) =
push!(wl.trackedfiles, filenameid)
Base.push!(wl::WatchList, filenameid::Pair{<:AbstractString,PkgFiles}) =
push!(wl, filenameid.first=>filenameid.second.id)
Base.push!(wl::WatchList, filenameid::Pair{<:AbstractString,PkgData}) =
push!(wl, filenameid.first=>filenameid.second.info)
WatchList() = WatchList(time(), Dict{String,PkgId}())
Base.in(file, wl::WatchList) = haskey(wl.trackedfiles, file)
@static if Sys.isapple()
# HFS+ rounds time to seconds, see #22
# https://developer.apple.com/library/archive/technotes/tn/tn1150.html#HFSPlusDates
newer(mtime, timestamp) = ceil(mtime) >= floor(timestamp)
else
newer(mtime, timestamp) = mtime >= timestamp
end
"""
success = throwto_repl(e::Exception)
Try throwing `e` from the REPL's backend task. Returns `true` if the necessary conditions
were met and the throw can be expected to succeed. The throw is generated from another
task, so a `yield` will need to occur before it happens.
"""
function throwto_repl(e::Exception)
if isdefined(Base, :active_repl_backend) &&
!isnothing(Base.active_repl_backend) &&
Base.active_repl_backend.backend_task.state === :runnable &&
isempty(Base.Workqueue) &&
Base.active_repl_backend.in_eval
@async Base.throwto(Base.active_repl_backend.backend_task, e)
return true
end
return false
end
function printf_maxsize(f::Function, io::IO, args...; maxchars::Integer=500, maxlines::Integer=20)
# This is dumb but certain to work
iotmp = IOBuffer()
for a in args
print(iotmp, a)
end
print(iotmp, '\n')
seek(iotmp, 0)
str = read(iotmp, String)
if length(str) > maxchars
str = first(str, (maxchars+1)÷2) * "…" * last(str, maxchars - (maxchars+1)÷2)
end
lines = split(str, '\n')
if length(lines) <= maxlines
for line in lines
f(io, line)
end
return
end
half = (maxlines+1) ÷ 2
for i = 1:half
f(io, lines[i])
end
maxlines > 1 && f(io, ⋮)
for i = length(lines) - (maxlines-half) + 1:length(lines)
f(io, lines[i])
end
end
println_maxsize(args...; kwargs...) = println_maxsize(stdout, args...; kwargs...)
println_maxsize(io::IO, args...; kwargs...) = printf_maxsize(println, io, args...; kwargs...)
"""
trim_toplevel!(bt)
Truncate a list of instruction pointers, as obtained from `backtrace()` or `catch_backtrace()`,
at the first "top-level" call (e.g., as executed from the REPL prompt) or the
first entry corresponding to a method in Revise or its dependencies.
This is used to make stacktraces obtained with Revise more similar to those obtained
without Revise, while retaining one entry to reveal Revise's involvement.
"""
function trim_toplevel!(bt)
# return bt # uncomment this line if you're debugging Revise itself
n = itoplevel = length(bt)
for (i, t) in enumerate(bt)
sfs = StackTraces.lookup(t)
for sf in sfs
if sf.func === Symbol("top-level scope") || (let mi = sf.linfo
mi isa Core.MethodInstance && (let def = mi.def
def isa Method && def.module ∈ (JuliaInterpreter, LoweredCodeUtils, Revise)
end) end)
itoplevel = i
break
end
end
itoplevel < n && break
end
deleteat!(bt, itoplevel+1:length(bt))
return bt
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 2996 | using Revise, Test
using Revise.JuliaInterpreter: Frame
using Base.Meta: isexpr
isdefined(@__MODULE__, :do_test) || include("common.jl")
module BackEdgesTest
using Test
flag = false # this needs to be defined for the conditional part to work
end
do_test("Backedges") && @testset "Backedges" begin
frame = Frame(Base, :(max_values(T::Union{map(X -> Type{X}, BitIntegerSmall_types)...}) = 1 << (8*sizeof(T))))
src = frame.framecode.src
# Find the inner struct def for the anonymous function
idtype = findall(stmt->isexpr(stmt, :thunk) && isa(stmt.args[1], Core.CodeInfo), src.code)[end]
src2 = src.code[idtype].args[1]
methodinfo = Revise.MethodInfo()
isrequired = Revise.minimal_evaluation!(methodinfo, frame, :sigs)[1]
@test sum(isrequired) == length(src.code)-1 # skips the `return` at the end
src = """
# issue #249
flag = false
if flag
f() = 1
else
f() = 2
end
# don't do work in the interpreter that isn't needed for function definitions
# inspired by #300
const planetdiameters = Dict("Mercury" => 4_878)
planetdiameters["Venus"] = 12_104
function getdiameter(name)
return planetdiameters[name]
end
"""
mexs = Revise.parse_source!(Revise.ModuleExprsSigs(BackEdgesTest), src, "backedges_test.jl", BackEdgesTest)
Revise.moduledeps[BackEdgesTest] = Revise.DepDict()
Revise.instantiate_sigs!(mexs)
@test isempty(methods(BackEdgesTest.getdiameter))
@test !isdefined(BackEdgesTest, :planetdiameters)
@test length(Revise.moduledeps[BackEdgesTest]) == 1
@test Revise.moduledeps[BackEdgesTest][:flag] == Set([(BackEdgesTest, first(Iterators.drop(mexs[BackEdgesTest], 1))[1])])
# issue #399
src = """
for jy in ("j","y"), nu in (0,1)
jynu = Expr(:quote, Symbol(jy,nu))
jynuf = Expr(:quote, Symbol(jy,nu,"f"))
bjynu = Symbol("bessel",jy,nu)
if jy == "y"
@eval begin
\$bjynu(x::Float64) = nan_dom_err(ccall((\$jynu,libm), Float64, (Float64,), x), x)
\$bjynu(x::Float32) = nan_dom_err(ccall((\$jynuf,libm), Float32, (Float32,), x), x)
\$bjynu(x::Float16) = Float16(\$bjynu(Float32(x)))
end
else
@eval begin
\$bjynu(x::Float64) = ccall((\$jynu,libm), Float64, (Float64,), x)
\$bjynu(x::Float32) = ccall((\$jynuf,libm), Float32, (Float32,), x)
\$bjynu(x::Float16) = Float16(\$bjynu(Float32(x)))
end
end
@eval begin
\$bjynu(x::Real) = \$bjynu(float(x))
\$bjynu(x::Complex) = \$(Symbol("bessel",jy))(\$nu,x)
end
end
"""
ex = Meta.parse(src)
@test Revise.methods_by_execution(BackEdgesTest, ex) isa Tuple
# Issue #428
src = """
@testset for i in (1, 2)
@test i == i
end
"""
ex = Meta.parse(src)
@test Revise.methods_by_execution(BackEdgesTest, ex) isa Tuple
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 203 | module CalleeError
inner(A, i) = A[i]
function outer(A)
s = zero(eltype(A))
for i = 1:length(A)+1
s += inner(A, i)
end
return s
end
s = outer([1,2,3])
foo(x::Float32) = 1
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 2763 | using Random
using Base.Meta: isexpr
# Testsets will reset the default RNG after each testset to make
# tests more reproducible, but we need to be able to create new random
# directories (see julia #24445)
const RNG = copy(Random.default_rng())
const to_remove = String[]
randtmp() = joinpath(tempdir(), randstring(RNG, 10))
function newtestdir()
testdir = randtmp()
mkdir(testdir)
push!(to_remove, testdir)
push!(LOAD_PATH, testdir)
return testdir
end
@static if Sys.isapple()
const mtimedelay = 3.1 # so the defining files are old enough not to trigger mtime criterion
else
const mtimedelay = 0.1
end
yry() = (sleep(mtimedelay); revise(); sleep(mtimedelay))
function collectexprs(rex::Revise.RelocatableExpr)
items = []
for item in Revise.LineSkippingIterator(rex.ex.args)
push!(items, isa(item, Expr) ? Revise.RelocatableExpr(item) : item)
end
items
end
function get_docstring(obj)
while !isa(obj, AbstractString)
fn = fieldnames(typeof(obj))
if :content ∈ fn
obj = obj.content[1]
elseif :code ∈ fn
obj = obj.code
else
error("unknown object ", obj)
end
end
return obj
end
function get_code(f, typ)
# Julia 1.5 introduces ":code_coverage_effect" exprs
ci = code_typed(f, typ)[1].first
code = copy(ci.code)
while !isempty(code) && isexpr(code[1], :code_coverage_effect)
popfirst!(code)
end
return code
end
function do_test(name)
runtest = isempty(ARGS) || name in ARGS
# Sometimes we get "no output received for 10 minutes" on CI,
# to debug this it may be useful to know what test is being run.
runtest && haskey(ENV, "CI") && println("Starting test ", name)
return runtest
end
function rm_precompile(pkgname::AbstractString)
filepath = Base.cache_file_entry(Base.PkgId(pkgname))
isa(filepath, Tuple) && (filepath = filepath[1]*filepath[2]) # Julia 1.3+
for depot in DEPOT_PATH
fullpath = joinpath(depot, filepath)
isfile(fullpath) && rm(fullpath)
end
end
function isreturning(stmt, val)
isa(stmt, Core.ReturnNode) || return false
return stmt.val == val
end
function isreturning_slot(stmt, val)
isa(stmt, Core.ReturnNode) || return false
v = stmt.val
isa(v, Core.SlotNumber) || isa(v, Core.Argument) || return false
return (isa(v, Core.SlotNumber) ? v.id : v.n) == val
end
if !isempty(ARGS) && "REVISE_TESTS_WATCH_FILES" ∈ ARGS
Revise.watching_files[] = true
println("Running tests with `Revise.watching_files[] = true`")
idx = findall(isequal("REVISE_TESTS_WATCH_FILES"), ARGS)
deleteat!(ARGS, idx)
end
errmsg(err::Base.Meta.ParseError) = err.msg
errmsg(err::AbstractString) = err
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 306 | using Revise, Test
# This test should only be run if you have a very small inotify limit
@testset "inotify" begin
logs, _ = Test.collect_test_logs() do
Revise.track("revisetest.jl")
end
sleep(0.1)
@test !isempty(logs)
@test any(rec->occursin("inotify", rec.message), logs)
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 295 | using Revise, InteractiveUtils, Test
@eval Revise juliadir = ARGS[1]
@test Revise.juliadir != Revise.basebuilddir
@test Revise.juliadir != Revise.fallback_juliadir()
@show Revise.juliadir
# https://github.com/timholy/Revise.jl/issues/697
@test Revise.definition(@which(Float32(π))) isa Expr
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 1859 | # Test that one can overload `Revise.parse_source!` and several Base methods to allow revision of
# non-Julia code.
using Revise
using Test
struct MyFile
file::String
end
Base.abspath(file::MyFile) = MyFile(Base.abspath(file.file))
Base.isabspath(file::MyFile) = Base.isabspath(file.file)
Base.joinpath(str::String, file::MyFile) = MyFile(Base.joinpath(str, file.file))
Base.normpath(file::MyFile) = MyFile(Base.normpath(file.file))
Base.isfile(file::MyFile) = Base.isfile(file.file)
Base.findfirst(str::String, file::MyFile) = Base.findfirst(str, file.file)
Base.String(file::MyFile) = file.file
function make_module(file::MyFile)
exprs = []
for line in eachline(file.file)
val, name = split(line, '=')
push!(exprs, :(function $(Symbol(name))() $val end))
end
Expr(:toplevel, :(baremodule fake_lang
$(exprs...)
end), :(using .fake_lang))
end
function Base.include(mod::Module, file::MyFile)
Core.eval(mod, make_module(file))
end
Base.include(file::MyFile) = Base.include(Core.Main, file)
function Revise.parse_source!(mod_exprs_sigs::Revise.ModuleExprsSigs, file::MyFile, mod::Module; kwargs...)
ex = make_module(file)
Revise.process_source!(mod_exprs_sigs, ex, file, mod; kwargs...)
end
@testset "non-jl revisions" begin
path = joinpath(@__DIR__, "test.program")
try
cp(joinpath(@__DIR__, "fake_lang", "test.program"), path, force=true)
sleep(mtimedelay)
m=MyFile(path)
includet(m)
yry() # comes from test/common.jl
@test fake_lang.y() == "2"
@test fake_lang.x() == "1"
sleep(mtimedelay)
cp(joinpath(@__DIR__, "fake_lang", "new_test.program"), path, force=true)
yry()
@test fake_lang.x() == "2"
@test_throws MethodError fake_lang.y()
finally
rm(path, force=true)
end
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 916 | using Revise
using Test
include("common.jl")
@testset "Polling" begin
@test Revise.polling_files[]
testdir = randtmp()
mkdir(testdir)
push!(LOAD_PATH, testdir)
dn = joinpath(testdir, "Polling", "src")
mkpath(dn)
srcfile = joinpath(dn, "Polling.jl")
joinpath(dn, "Polling.jl")
open(srcfile, "w") do io
println(io, """
__precompile__(false)
module Polling
f() = 1
end
""")
end
sleep(0.5) # let the source file age a bit
@eval using Polling
@test Polling.f() == 1
# I'm not sure why 2 sleeps are better than one, but here it seems to make a difference
sleep(0.1)
sleep(0.1)
open(srcfile, "w") do io
println(io, """
__precompile__(false)
module Polling
f() = 2
end
""")
end
# Wait through the polling interval
yry()
sleep(7)
yry()
@test Polling.f() == 2
rm(testdir; force=true, recursive=true)
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 571 | # This runs only on CI. The goal is to populate the `.julia/compiled/v*` directory
# with some additional files, so that `filter_valid_cachefiles` has to run.
# This is to catch problems like #460.
using Pkg
using Test
Pkg.add(PackageSpec(name="EponymTuples", version="0.2.0"))
using EponymTuples # force compilation
id = Base.PkgId(EponymTuples)
paths = Base.find_all_in_cache_path(id)
Pkg.rm("EponymTuples") # we don't need it anymore
path = first(paths)
base, ext = splitext(path)
mv(path, base*"blahblah"*ext; force=true)
Pkg.add(PackageSpec(name="EponymTuples"))
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 307 | __precompile__(false)
module ReviseTest
square(x) = x^2
cube(x) = x^4 # should be x^3, but this simulates a mistake
module Internal
mult2(x) = 2*x
mult3(x) = 4*x # oops
mult4(x) = -x
"""
This has a docstring
"""
unchanged(x) = x
unchanged2(@nospecialize(x)) = x
end # Internal
end # ReviseTest
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 297 | __precompile__(false)
module ReviseTest
square(x) = x^2
cube(x) = error("cube")
fourth(x) = x^4 # this is an addition to the file
module Internal
mult2(x) = error("mult2")
mult3(x) = 3*x
"""
This has a docstring
"""
unchanged(x) = x
unchanged2(@nospecialize(x)) = x
end # Internal
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 276 | __precompile__(false)
module ReviseTest
square(x) = x^2
cube(x) = x^3
fourth(x) = x^4 # this is an addition to the file
module Internal
mult2(x) = 2*x
mult3(x) = 3*x
"""
This has a docstring
"""
unchanged(x) = x
unchanged2(@nospecialize(x)) = x
end # Internal
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 131755 | # REVISE: DO NOT PARSE # For people with JULIA_REVISE_INCLUDE=1
using Revise
using Revise.CodeTracking
using Revise.JuliaInterpreter
using Test
@test isempty(detect_ambiguities(Revise))
using Pkg, Unicode, Distributed, InteractiveUtils, REPL, UUIDs
import LibGit2
using Revise.OrderedCollections: OrderedSet
using Test: collect_test_logs
using Base.CoreLogging: Debug,Info
using Revise.CodeTracking: line_is_decl
# In addition to using this for the "More arg-modifying macros" test below,
# this package is used on CI to test what happens when you have multiple
# *.ji files for the package.
using EponymTuples
include("common.jl")
throwing_function(bt) = bt[2]
# A junk module that we can evaluate into
module ReviseTestPrivate
struct Inner
x::Float64
end
macro changeto1(args...)
return 1
end
macro donothing(ex)
esc(ex)
end
macro addint(ex)
:($(esc(ex))::$(esc(Int)))
end
macro empty_function(name)
return esc(quote
function $name end
end)
end
# The following two submodules are for testing #199
module A
f(x::Int) = 1
end
module B
f(x::Int) = 1
module Core end
end
end
function private_module()
modname = gensym()
Core.eval(ReviseTestPrivate, :(module $modname end))
end
sig_type_exprs(ex) = Revise.sig_type_exprs(Main, ex) # just for testing purposes
# accommodate changes in Dict printing w/ Julia version
const pair_op_compact = let io = IOBuffer()
print(IOContext(io, :compact=>true), Dict(1=>2))
String(take!(io))[7:end-2]
end
const issue639report = []
@testset "Revise" begin
do_test("PkgData") && @testset "PkgData" begin
# Related to #358
id = Base.PkgId(Main)
pd = Revise.PkgData(id)
@test isempty(Revise.basedir(pd))
end
do_test("Package contents") && @testset "Package contents" begin
id = Base.PkgId(EponymTuples)
path, mods_files_mtimes = Revise.pkg_fileinfo(id)
@test occursin("EponymTuples", path)
end
do_test("LineSkipping") && @testset "LineSkipping" begin
rex = Revise.RelocatableExpr(quote
f(x) = x^2
g(x) = sin(x)
end)
@test length(rex.ex.args) == 4 # including the line number expressions
exs = collectexprs(rex)
@test length(exs) == 2
@test isequal(exs[1], Revise.RelocatableExpr(:(f(x) = x^2)))
@test hash(exs[1]) == hash(Revise.RelocatableExpr(:(f(x) = x^2)))
@test !isequal(exs[2], Revise.RelocatableExpr(:(f(x) = x^2)))
@test isequal(exs[2], Revise.RelocatableExpr(:(g(x) = sin(x))))
@test !isequal(exs[1], Revise.RelocatableExpr(:(g(x) = sin(x))))
@test string(rex) == """
quote
f(x) = begin
x ^ 2
end
g(x) = begin
sin(x)
end
end"""
end
do_test("Equality and hashing") && @testset "Equality and hashing" begin
# issue #233
@test isequal(Revise.RelocatableExpr(:(x = 1)), Revise.RelocatableExpr(:(x = 1)))
@test !isequal(Revise.RelocatableExpr(:(x = 1)), Revise.RelocatableExpr(:(x = 1.0)))
@test hash(Revise.RelocatableExpr(:(x = 1))) == hash(Revise.RelocatableExpr(:(x = 1)))
@test hash(Revise.RelocatableExpr(:(x = 1))) != hash(Revise.RelocatableExpr(:(x = 1.0)))
@test hash(Revise.RelocatableExpr(:(x = 1))) != hash(Revise.RelocatableExpr(:(x = 2)))
end
do_test("Parse errors") && @testset "Parse errors" begin
md = Revise.ModuleExprsSigs(Main)
errtype = Base.VERSION < v"1.10" ? LoadError : Base.Meta.ParseError
@test_throws errtype Revise.parse_source!(md, """
begin # this block should parse correctly, cf. issue #109
end
f(x) = 1
g(x) = 2
h{x) = 3 # error
k(x) = 4
""", "test", Main)
# Issue #448
testdir = newtestdir()
file = joinpath(testdir, "badfile.jl")
write(file, """
function g()
while t
c =
k
end
""")
try
includet(file)
catch err
@test isa(err, errtype)
if Base.VERSION < v"1.10"
@test err.file == file
@test endswith(err.error, "requires end")
else
@test occursin("Expected `end`", err.msg)
end
end
end
do_test("REPL input") && @testset "REPL input" begin
# issue #573
retex = Revise.revise_first(nothing)
@test retex.head === :toplevel
@test length(retex.args) == 2 && retex.args[end] === nothing
end
do_test("Signature extraction") && @testset "Signature extraction" begin
jidir = dirname(dirname(pathof(JuliaInterpreter)))
scriptfile = joinpath(jidir, "test", "toplevel_script.jl")
modex = :(module Toplevel include($scriptfile) end)
mod = eval(modex)
mexs = Revise.parse_source(scriptfile, mod)
Revise.instantiate_sigs!(mexs)
nms = names(mod; all=true)
modeval, modinclude = getfield(mod, :eval), getfield(mod, :include)
failed = []
n = 0
for fsym in nms
f = getfield(mod, fsym)
isa(f, Base.Callable) || continue
(f === modeval || f === modinclude) && continue
for m in methods(f)
# MyInt8 brings in lots of number & type machinery, which leads
# to wandering through Base files. At this point we just want
# to test whether we have the basics down, so for now avoid
# looking in any file other than the script
string(m.file) == scriptfile || continue
isa(definition(m), Expr) || push!(failed, m.sig)
n += 1
end
end
@test isempty(failed)
@test n > length(nms)/2
# Method expressions with bad line number info
ex = quote
function nolineinfo(x)
y = x^2 + 2x + 1
@warn "oops"
return y
end
end
ex2 = ex.args[end].args[end]
for (i, arg) in enumerate(ex2.args)
if isa(arg, LineNumberNode)
ex2.args[i] = LineNumberNode(0, :none)
end
end
mexs = Revise.ModuleExprsSigs(ReviseTestPrivate)
mexs[ReviseTestPrivate][Revise.RelocatableExpr(ex)] = nothing
logs, _ = Test.collect_test_logs() do
Revise.instantiate_sigs!(mexs; mode=:eval)
end
@test isempty(logs)
@test isdefined(ReviseTestPrivate, :nolineinfo)
end
do_test("Comparison and line numbering") && @testset "Comparison and line numbering" begin
# We'll also use these tests to try out the logging system
rlogger = Revise.debug_logger()
fl1 = joinpath(@__DIR__, "revisetest.jl")
fl2 = joinpath(@__DIR__, "revisetest_revised.jl")
fl3 = joinpath(@__DIR__, "revisetest_errors.jl")
# Copy the files to a temporary file. This is to ensure that file name doesn't change
# in docstring macros and backtraces.
tmpfile = joinpath(tempdir(), randstring(10))*".jl"
push!(to_remove, tmpfile)
cp(fl1, tmpfile)
include(tmpfile) # So the modules are defined
# test the "mistakes"
@test ReviseTest.cube(2) == 16
@test ReviseTest.Internal.mult3(2) == 8
@test ReviseTest.Internal.mult4(2) == -2
# One method will be deleted, for log testing we need to grab it while we still have it
delmeth = first(methods(ReviseTest.Internal.mult4))
mmult3 = @which ReviseTest.Internal.mult3(2)
mexsold = Revise.parse_source(tmpfile, Main)
Revise.instantiate_sigs!(mexsold)
mcube = @which ReviseTest.cube(2)
cp(fl2, tmpfile; force=true)
mexsnew = Revise.parse_source(tmpfile, Main)
mexsnew = Revise.eval_revised(mexsnew, mexsold)
@test ReviseTest.cube(2) == 8
@test ReviseTest.Internal.mult3(2) == 6
@test length(mexsnew) == 3
@test haskey(mexsnew, ReviseTest) && haskey(mexsnew, ReviseTest.Internal)
dvs = collect(mexsnew[ReviseTest])
@test length(dvs) == 3
(def, val) = dvs[1]
@test isequal(Revise.unwrap(def), Revise.RelocatableExpr(:(square(x) = x^2)))
@test val == [Tuple{typeof(ReviseTest.square),Any}]
@test Revise.firstline(Revise.unwrap(def)).line == 5
m = @which ReviseTest.square(1)
@test m.line == 5
@test whereis(m) == (tmpfile, 5)
@test Revise.RelocatableExpr(definition(m)) == Revise.unwrap(def)
(def, val) = dvs[2]
@test isequal(Revise.unwrap(def), Revise.RelocatableExpr(:(cube(x) = x^3)))
@test val == [Tuple{typeof(ReviseTest.cube),Any}]
m = @which ReviseTest.cube(1)
@test m.line == 7
@test whereis(m) == (tmpfile, 7)
@test Revise.RelocatableExpr(definition(m)) == Revise.unwrap(def)
(def, val) = dvs[3]
@test isequal(Revise.unwrap(def), Revise.RelocatableExpr(:(fourth(x) = x^4)))
@test val == [Tuple{typeof(ReviseTest.fourth),Any}]
m = @which ReviseTest.fourth(1)
@test m.line == 9
@test whereis(m) == (tmpfile, 9)
@test Revise.RelocatableExpr(definition(m)) == Revise.unwrap(def)
dvs = collect(mexsnew[ReviseTest.Internal])
@test length(dvs) == 5
(def, val) = dvs[1]
@test isequal(Revise.unwrap(def), Revise.RelocatableExpr(:(mult2(x) = 2*x)))
@test val == [Tuple{typeof(ReviseTest.Internal.mult2),Any}]
@test Revise.firstline(Revise.unwrap(def)).line == 13
m = @which ReviseTest.Internal.mult2(1)
@test m.line == 11
@test whereis(m) == (tmpfile, 13)
@test Revise.RelocatableExpr(definition(m)) == Revise.unwrap(def)
(def, val) = dvs[2]
@test isequal(Revise.unwrap(def), Revise.RelocatableExpr(:(mult3(x) = 3*x)))
@test val == [Tuple{typeof(ReviseTest.Internal.mult3),Any}]
m = @which ReviseTest.Internal.mult3(1)
@test m.line == 14
@test whereis(m) == (tmpfile, 14)
@test Revise.RelocatableExpr(definition(m)) == Revise.unwrap(def)
@test_throws MethodError ReviseTest.Internal.mult4(2)
function cmpdiff(record, msg; kwargs...)
record.message == msg
for (kw, val) in kwargs
logval = record.kwargs[kw]
for (v, lv) in zip(val, logval)
isa(v, Expr) && (v = Revise.RelocatableExpr(v))
isa(lv, Expr) && (lv = Revise.RelocatableExpr(Revise.unwrap(lv)))
@test lv == v
end
end
return nothing
end
logs = filter(r->r.level==Debug && r.group=="Action", rlogger.logs)
@test length(logs) == 9
cmpdiff(logs[1], "DeleteMethod"; deltainfo=(Tuple{typeof(ReviseTest.cube),Any}, MethodSummary(mcube)))
cmpdiff(logs[2], "DeleteMethod"; deltainfo=(Tuple{typeof(ReviseTest.Internal.mult3),Any}, MethodSummary(mmult3)))
cmpdiff(logs[3], "DeleteMethod"; deltainfo=(Tuple{typeof(ReviseTest.Internal.mult4),Any}, MethodSummary(delmeth)))
cmpdiff(logs[4], "Eval"; deltainfo=(ReviseTest, :(cube(x) = x^3)))
cmpdiff(logs[5], "Eval"; deltainfo=(ReviseTest, :(fourth(x) = x^4)))
stmpfile = Symbol(tmpfile)
cmpdiff(logs[6], "LineOffset"; deltainfo=(Any[Tuple{typeof(ReviseTest.Internal.mult2),Any}], LineNumberNode(11,stmpfile)=>LineNumberNode(13,stmpfile)))
cmpdiff(logs[7], "Eval"; deltainfo=(ReviseTest.Internal, :(mult3(x) = 3*x)))
cmpdiff(logs[8], "LineOffset"; deltainfo=(Any[Tuple{typeof(ReviseTest.Internal.unchanged),Any}], LineNumberNode(18,stmpfile)=>LineNumberNode(19,stmpfile)))
cmpdiff(logs[9], "LineOffset"; deltainfo=(Any[Tuple{typeof(ReviseTest.Internal.unchanged2),Any}], LineNumberNode(20,stmpfile)=>LineNumberNode(21,stmpfile)))
@test length(Revise.actions(rlogger)) == 6 # by default LineOffset is skipped
@test length(Revise.actions(rlogger; line=true)) == 9
@test_broken length(Revise.diffs(rlogger)) == 2
io = PipeBuffer()
foreach(rec -> show(io, rec), rlogger.logs)
foreach(rec -> show(io, rec; verbose=false), rlogger.logs)
@test count("Revise.LogRecord", read(io, String)) > 8
empty!(rlogger.logs)
# Backtraces. Note this doesn't test the line-number correction
# because both of these are revised definitions.
cp(fl3, tmpfile; force=true)
mexsold = mexsnew
mexsnew = Revise.parse_source(tmpfile, Main)
mexsnew = Revise.eval_revised(mexsnew, mexsold)
try
ReviseTest.cube(2)
@test false
catch err
@test isa(err, ErrorException) && err.msg == "cube"
bt = throwing_function(stacktrace(catch_backtrace()))
@test bt.func === :cube && bt.file == Symbol(tmpfile) && bt.line == 7
end
try
ReviseTest.Internal.mult2(2)
@test false
catch err
@test isa(err, ErrorException) && err.msg == "mult2"
bt = throwing_function(stacktrace(catch_backtrace()))
@test bt.func === :mult2 && bt.file == Symbol(tmpfile) && bt.line == 13
end
logs = filter(r->r.level==Debug && r.group=="Action", rlogger.logs)
@test length(logs) == 4
cmpdiff(logs[3], "Eval"; deltainfo=(ReviseTest, :(cube(x) = error("cube"))))
cmpdiff(logs[4], "Eval"; deltainfo=(ReviseTest.Internal, :(mult2(x) = error("mult2"))))
# Turn off future logging
Revise.debug_logger(; min_level=Info)
# Gensymmed symbols
rex1 = Revise.RelocatableExpr(macroexpand(Main, :(t = @elapsed(foo(x)))))
rex2 = Revise.RelocatableExpr(macroexpand(Main, :(t = @elapsed(foo(x)))))
@test isequal(rex1, rex2)
@test hash(rex1) == hash(rex2)
rex3 = Revise.RelocatableExpr(macroexpand(Main, :(t = @elapsed(bar(x)))))
@test !isequal(rex1, rex3)
@test hash(rex1) != hash(rex3)
sym1, sym2 = gensym(:hello), gensym(:hello)
rex1 = Revise.RelocatableExpr(:(x = $sym1))
rex2 = Revise.RelocatableExpr(:(x = $sym2))
@test isequal(rex1, rex2)
@test hash(rex1) == hash(rex2)
sym3 = gensym(:world)
rex3 = Revise.RelocatableExpr(:(x = $sym3))
@test isequal(rex1, rex3)
@test hash(rex1) == hash(rex3)
# coverage
rex = convert(Revise.RelocatableExpr, :(a = 1))
@test Revise.striplines!(rex) isa Revise.RelocatableExpr
@test copy(rex) !== rex
end
do_test("Display") && @testset "Display" begin
io = IOBuffer()
show(io, Revise.RelocatableExpr(:(@inbounds x[2])))
str = String(take!(io))
@test str == ":(@inbounds x[2])"
mod = private_module()
file = joinpath(@__DIR__, "revisetest.jl")
Base.include(mod, file)
mexs = Revise.parse_source(file, mod)
Revise.instantiate_sigs!(mexs)
# io = IOBuffer()
print(IOContext(io, :compact=>true), mexs)
str = String(take!(io))
@test str == "OrderedCollections.OrderedDict($mod$(pair_op_compact)ExprsSigs(<1 expressions>, <0 signatures>), $mod.ReviseTest$(pair_op_compact)ExprsSigs(<2 expressions>, <2 signatures>), $mod.ReviseTest.Internal$(pair_op_compact)ExprsSigs(<6 expressions>, <5 signatures>))"
exs = mexs[getfield(mod, :ReviseTest)]
# io = IOBuffer()
print(IOContext(io, :compact=>true), exs)
@test String(take!(io)) == "ExprsSigs(<2 expressions>, <2 signatures>)"
print(IOContext(io, :compact=>false), exs)
str = String(take!(io))
@test str == "ExprsSigs with the following expressions: \n :(square(x) = begin\n x ^ 2\n end)\n :(cube(x) = begin\n x ^ 4\n end)"
sleep(0.1) # wait for EponymTuples to hit the cache
pkgdata = Revise.pkgdatas[Base.PkgId(EponymTuples)]
file = first(Revise.srcfiles(pkgdata))
Revise.maybe_parse_from_cache!(pkgdata, file)
print(io, pkgdata)
str = String(take!(io))
@test occursin("EponymTuples.jl\": FileInfo", str)
@test occursin(r"with cachefile.*EponymTuples.*ji", str)
print(IOContext(io, :compact=>true), pkgdata)
str = String(take!(io))
@test occursin("1/1 parsed files", str)
end
do_test("File paths") && @testset "File paths" begin
testdir = newtestdir()
for wf in (Revise.watching_files[] ? (true,) : (true, false))
for (pcflag, fbase) in ((true, "pc"), (false, "npc"),) # precompiled & not
modname = uppercase(fbase) * (wf ? "WF" : "WD")
fbase = fbase * (wf ? "wf" : "wd")
pcexpr = pcflag ? "" : :(__precompile__(false))
# Create a package with the following structure:
# src/PkgName.jl # PC.jl = precompiled, NPC.jl = nonprecompiled
# src/file2.jl
# src/subdir/file3.jl
# src/subdir/file4.jl
# exploring different ways of expressing the `include` statement
dn = joinpath(testdir, modname, "src")
mkpath(dn)
write(joinpath(dn, modname*".jl"), """
$pcexpr
module $modname
export $(fbase)1, $(fbase)2, $(fbase)3, $(fbase)4, $(fbase)5, using_macro_$(fbase)
$(fbase)1() = 1
include("file2.jl")
include("subdir/file3.jl")
include(joinpath(@__DIR__, "subdir", "file4.jl"))
otherfile = "file5.jl"
include(otherfile)
# Update order check: modifying `some_macro_` to return -6 doesn't change the
# return value of `using_macro_` (issue #20) unless `using_macro_` is also updated,
# *in this order*:
# 1. update the `@some_macro_` definition
# 2. update the `using_macro_` definition
macro some_macro_$(fbase)()
return 6
end
using_macro_$(fbase)() = @some_macro_$(fbase)()
end
""")
write(joinpath(dn, "file2.jl"), "$(fbase)2() = 2")
mkdir(joinpath(dn, "subdir"))
write(joinpath(dn, "subdir", "file3.jl"), "$(fbase)3() = 3")
write(joinpath(dn, "subdir", "file4.jl"), "$(fbase)4() = 4")
write(joinpath(dn, "file5.jl"), "$(fbase)5() = 5")
sleep(mtimedelay)
@eval using $(Symbol(modname))
sleep(mtimedelay)
fn1, fn2 = Symbol("$(fbase)1"), Symbol("$(fbase)2")
fn3, fn4 = Symbol("$(fbase)3"), Symbol("$(fbase)4")
fn5 = Symbol("$(fbase)5")
fn6 = Symbol("using_macro_$(fbase)")
@eval @test $(fn1)() == 1
@eval @test $(fn2)() == 2
@eval @test $(fn3)() == 3
@eval @test $(fn4)() == 4
@eval @test $(fn5)() == 5
@eval @test $(fn6)() == 6
m = @eval first(methods($fn1))
rex = Revise.RelocatableExpr(definition(m))
@test rex == Revise.RelocatableExpr(:( $fn1() = 1 ))
# Check that definition returns copies
rex2 = deepcopy(rex)
rex.ex.args[end].args[end] = 2
@test Revise.RelocatableExpr(definition(m)) == rex2
@test Revise.RelocatableExpr(definition(m)) != rex
# CodeTracking methods
m3 = first(methods(eval(fn3)))
m3file = joinpath(dn, "subdir", "file3.jl")
@test whereis(m3) == (m3file, 1)
@test signatures_at(m3file, 1) == [m3.sig]
@test signatures_at(eval(Symbol(modname)), joinpath("src", "subdir", "file3.jl"), 1) == [m3.sig]
id = Base.PkgId(eval(Symbol(modname))) # for testing #596
pkgdata = Revise.pkgdatas[id]
# Change the definition of function 1 (easiest to just rewrite the whole file)
write(joinpath(dn, modname*".jl"), """
$pcexpr
module $modname
export $(fbase)1, $(fbase)2, $(fbase)3, $(fbase)4, $(fbase)5, using_macro_$(fbase)
$(fbase)1() = -1
include("file2.jl")
include("subdir/file3.jl")
include(joinpath(@__DIR__, "subdir", "file4.jl"))
otherfile = "file5.jl"
include(otherfile)
macro some_macro_$(fbase)()
return -6
end
using_macro_$(fbase)() = @some_macro_$(fbase)()
end
""") # just for fun we skipped the whitespace
yry()
fi = pkgdata.fileinfos[1]
@test fi.extracted[] # issue 596
@eval @test $(fn1)() == -1
@eval @test $(fn2)() == 2
@eval @test $(fn3)() == 3
@eval @test $(fn4)() == 4
@eval @test $(fn5)() == 5
@eval @test $(fn6)() == 6 # because it hasn't been re-macroexpanded
@test revise(eval(Symbol(modname)))
@eval @test $(fn6)() == -6
# Redefine function 2
write(joinpath(dn, "file2.jl"), "$(fbase)2() = -2")
yry()
@eval @test $(fn1)() == -1
@eval @test $(fn2)() == -2
@eval @test $(fn3)() == 3
@eval @test $(fn4)() == 4
@eval @test $(fn5)() == 5
@eval @test $(fn6)() == -6
write(joinpath(dn, "subdir", "file3.jl"), "$(fbase)3() = -3")
yry()
@eval @test $(fn1)() == -1
@eval @test $(fn2)() == -2
@eval @test $(fn3)() == -3
@eval @test $(fn4)() == 4
@eval @test $(fn5)() == 5
@eval @test $(fn6)() == -6
write(joinpath(dn, "subdir", "file4.jl"), "$(fbase)4() = -4")
yry()
@eval @test $(fn1)() == -1
@eval @test $(fn2)() == -2
@eval @test $(fn3)() == -3
@eval @test $(fn4)() == -4
@eval @test $(fn5)() == 5
@eval @test $(fn6)() == -6
write(joinpath(dn, "file5.jl"), "$(fbase)5() = -5")
yry()
@eval @test $(fn1)() == -1
@eval @test $(fn2)() == -2
@eval @test $(fn3)() == -3
@eval @test $(fn4)() == -4
@eval @test $(fn5)() == -5
@eval @test $(fn6)() == -6
# Check that the list of files is complete
pkgdata = Revise.pkgdatas[Base.PkgId(modname)]
for file = [joinpath("src", modname*".jl"), joinpath("src", "file2.jl"),
joinpath("src", "subdir", "file3.jl"),
joinpath("src", "subdir", "file4.jl"),
joinpath("src", "file5.jl")]
@test Revise.hasfile(pkgdata, file)
end
end
end
# Remove the precompiled file(s)
rm_precompile("PCWF")
Revise.watching_files[] || rm_precompile("PCWD")
# Submodules (issue #142)
srcdir = joinpath(testdir, "Mysupermodule", "src")
subdir = joinpath(srcdir, "Mymodule")
mkpath(subdir)
write(joinpath(srcdir, "Mysupermodule.jl"), """
module Mysupermodule
include("Mymodule/Mymodule.jl")
end
""")
write(joinpath(subdir, "Mymodule.jl"), """
module Mymodule
include("filesub.jl")
end
""")
write(joinpath(subdir, "filesub.jl"), "func() = 1")
sleep(mtimedelay)
@eval using Mysupermodule
sleep(mtimedelay)
@test Mysupermodule.Mymodule.func() == 1
write(joinpath(subdir, "filesub.jl"), "func() = 2")
yry()
@test Mysupermodule.Mymodule.func() == 2
rm_precompile("Mymodule")
rm_precompile("Mysupermodule")
# Test files paths that can't be statically parsed
dn = joinpath(testdir, "LoopInclude", "src")
mkpath(dn)
write(joinpath(dn, "LoopInclude.jl"), """
module LoopInclude
export li_f, li_g
for fn in ("file1.jl", "file2.jl")
include(fn)
end
end
""")
write(joinpath(dn, "file1.jl"), "li_f() = 1")
write(joinpath(dn, "file2.jl"), "li_g() = 2")
sleep(mtimedelay)
@eval using LoopInclude
sleep(mtimedelay)
@test li_f() == 1
@test li_g() == 2
write(joinpath(dn, "file1.jl"), "li_f() = -1")
yry()
@test li_f() == -1
rm_precompile("LoopInclude")
# Multiple packages in the same directory (issue #228)
write(joinpath(testdir, "A228.jl"), """
module A228
using B228
export f228
f228(x) = 3 * g228(x)
end
""")
write(joinpath(testdir, "B228.jl"), """
module B228
export g228
g228(x) = 4x + 2
end
""")
sleep(mtimedelay)
using A228
sleep(mtimedelay)
@test f228(3) == 42
write(joinpath(testdir, "B228.jl"), """
module B228
export g228
g228(x) = 4x + 1
end
""")
yry()
@test f228(3) == 39
rm_precompile("A228")
rm_precompile("B228")
# uncoupled packages in the same directory (issue #339)
write(joinpath(testdir, "A339.jl"), """
module A339
f() = 1
end
""")
write(joinpath(testdir, "B339.jl"), """
module B339
f() = 1
end
""")
sleep(mtimedelay)
using A339, B339
sleep(mtimedelay)
@test A339.f() == 1
@test B339.f() == 1
sleep(mtimedelay)
write(joinpath(testdir, "A339.jl"), """
module A339
f() = 2
end
""")
yry()
@test A339.f() == 2
@test B339.f() == 1
sleep(mtimedelay)
write(joinpath(testdir, "B339.jl"), """
module B339
f() = 2
end
""")
yry()
@test A339.f() == 2
@test B339.f() == 2
rm_precompile("A339")
rm_precompile("B339")
# Combining `include` with empty functions (issue #758)
write(joinpath(testdir, "Issue758.jl"), """
module Issue758
global gvar = true
function f end
include("Issue758helperfile.jl")
end
""")
write(joinpath(testdir, "Issue758helperfile.jl"), "")
sleep(mtimedelay)
using Issue758
sleep(mtimedelay)
@test_throws MethodError Issue758.f()
sleep(mtimedelay)
write(joinpath(testdir, "Issue758.jl"), """
module Issue758
global gvar = true
function f end
f() = 1
include("Issue758helperfile.jl")
end
""")
yry()
@test Issue758.f() == 1
rm_precompile("Issue758")
pop!(LOAD_PATH)
end
# issue #131
do_test("Base & stdlib file paths") && @testset "Base & stdlib file paths" begin
@test isfile(Revise.basesrccache)
targetfn = Base.Filesystem.path_separator * joinpath("good", "path", "mydir", "myfile.jl")
@test Revise.fixpath("/some/bad/path/mydir/myfile.jl"; badpath="/some/bad/path", goodpath="/good/path") == targetfn
@test Revise.fixpath("/some/bad/path/mydir/myfile.jl"; badpath="/some/bad/path/", goodpath="/good/path") == targetfn
@test isfile(Revise.fixpath(Base.find_source_file("array.jl")))
failedfiles = Tuple{String,String}[]
for (mod,file) = Base._included_files
fixedfile = Revise.fixpath(file)
if !isfile(fixedfile)
push!(failedfiles, (file, fixedfile))
end
end
if !isempty(failedfiles)
display(failedfiles)
end
@test isempty(failedfiles)
end
do_test("Namespace") && @testset "Namespace" begin
# Issues #579, #239, and #627
testdir = newtestdir()
dn = joinpath(testdir, "Namespace", "src")
mkpath(dn)
write(joinpath(dn, "Namespace.jl"), """
module Namespace
struct X end
cos(::X) = 20
end
""")
sleep(mtimedelay)
@eval using Namespace
@test Namespace.cos(Namespace.X()) == 20
@test_throws MethodError Base.cos(Namespace.X())
sleep(mtimedelay)
write(joinpath(dn, "Namespace.jl"), """
module Namespace
struct X end
sin(::Int) = 10
Base.cos(::X) = 20
# From #627
module Foos
struct Foo end
end
using .Foos: Foo
end
""")
yry()
@test Namespace.sin(0) == 10
@test Base.sin(0) == 0
@test Base.cos(Namespace.X()) == 20
@test_throws MethodError Namespace.cos(Namespace.X())
rm_precompile("Namespace")
pop!(LOAD_PATH)
end
do_test("Multiple definitions") && @testset "Multiple definitions" begin
# This simulates a copy/paste/save "error" from one file to another
# ref https://github.com/timholy/CodeTracking.jl/issues/55
testdir = newtestdir()
dn = joinpath(testdir, "Multidef", "src")
mkpath(dn)
write(joinpath(dn, "Multidef.jl"), """
module Multidef
include("utils.jl")
end
""")
write(joinpath(dn, "utils.jl"), "repeated(x) = x+1")
sleep(mtimedelay)
@eval using Multidef
@test Multidef.repeated(3) == 4
sleep(mtimedelay)
write(joinpath(dn, "Multidef.jl"), """
module Multidef
include("utils.jl")
repeated(x) = x+1
end
""")
yry()
@test Multidef.repeated(3) == 4
sleep(mtimedelay)
write(joinpath(dn, "utils.jl"), "\n")
yry()
@test Multidef.repeated(3) == 4
rm_precompile("Multidef")
pop!(LOAD_PATH)
end
do_test("Recursive types (issue #417)") && @testset "Recursive types (issue #417)" begin
testdir = newtestdir()
fn = joinpath(testdir, "recursive.jl")
write(fn, """
module RecursiveTypes
struct Foo
x::Vector{Foo}
Foo() = new(Foo[])
end
end
""")
sleep(mtimedelay)
includet(fn)
@test isa(RecursiveTypes.Foo().x, Vector{RecursiveTypes.Foo})
pop!(LOAD_PATH)
end
# issue #318
do_test("Cross-module extension") && @testset "Cross-module extension" begin
testdir = newtestdir()
dnA = joinpath(testdir, "CrossModA", "src")
mkpath(dnA)
write(joinpath(dnA, "CrossModA.jl"), """
module CrossModA
foo(x) = "default"
end
""")
dnB = joinpath(testdir, "CrossModB", "src")
mkpath(dnB)
write(joinpath(dnB, "CrossModB.jl"), """
module CrossModB
import CrossModA
CrossModA.foo(x::Int) = 1
end
""")
sleep(mtimedelay)
@eval using CrossModA, CrossModB
@test CrossModA.foo("") == "default"
@test CrossModA.foo(0) == 1
sleep(mtimedelay)
write(joinpath(dnB, "CrossModB.jl"), """
module CrossModB
import CrossModA
CrossModA.foo(x::Int) = 2
end
""")
yry()
@test CrossModA.foo("") == "default"
@test CrossModA.foo(0) == 2
write(joinpath(dnB, "CrossModB.jl"), """
module CrossModB
import CrossModA
CrossModA.foo(x::Int) = 3
end
""")
yry()
@test CrossModA.foo("") == "default"
@test CrossModA.foo(0) == 3
rm_precompile("CrossModA")
rm_precompile("CrossModB")
pop!(LOAD_PATH)
end
# issue #36
do_test("@__FILE__") && @testset "@__FILE__" begin
testdir = newtestdir()
dn = joinpath(testdir, "ModFILE", "src")
mkpath(dn)
write(joinpath(dn, "ModFILE.jl"), """
module ModFILE
mf() = @__FILE__, 1
end
""")
sleep(mtimedelay)
@eval using ModFILE
sleep(mtimedelay)
@test ModFILE.mf() == (joinpath(dn, "ModFILE.jl"), 1)
write(joinpath(dn, "ModFILE.jl"), """
module ModFILE
mf() = @__FILE__, 2
end
""")
yry()
@test ModFILE.mf() == (joinpath(dn, "ModFILE.jl"), 2)
rm_precompile("ModFILE")
pop!(LOAD_PATH)
end
do_test("Revision order") && @testset "Revision order" begin
testdir = newtestdir()
dn = joinpath(testdir, "Order1", "src")
mkpath(dn)
write(joinpath(dn, "Order1.jl"), """
module Order1
include("file1.jl")
include("file2.jl")
end
""")
write(joinpath(dn, "file1.jl"), "# a comment")
write(joinpath(dn, "file2.jl"), "# a comment")
sleep(mtimedelay)
@eval using Order1
sleep(mtimedelay)
# we want Revise to process files the order file1.jl, file2.jl, but let's save them in the opposite order
write(joinpath(dn, "file2.jl"), "f(::Ord1) = 1")
sleep(mtimedelay)
write(joinpath(dn, "file1.jl"), "struct Ord1 end")
yry()
@test Order1.f(Order1.Ord1()) == 1
# A case in which order cannot be determined solely from file order
dn = joinpath(testdir, "Order2", "src")
mkpath(dn)
write(joinpath(dn, "Order2.jl"), """
module Order2
include("file.jl")
end
""")
write(joinpath(dn, "file.jl"), "# a comment")
sleep(mtimedelay)
@eval using Order2
sleep(mtimedelay)
write(joinpath(dn, "Order2.jl"), """
module Order2
include("file.jl")
f(::Ord2) = 1
end
""")
sleep(mtimedelay)
write(joinpath(dn, "file.jl"), "struct Ord2 end")
# TODO: remove also the log messages check when this test is fixed
@test_logs (:error, r"Failed to revise") (:warn, r"The running code does not match the saved version") yry()
@test_broken Order2.f(Order2.Ord2()) == 1
# Resolve it with retry
Revise.retry()
@test Order2.f(Order2.Ord2()) == 1
# Cross-module dependencies
dn3 = joinpath(testdir, "Order3", "src")
mkpath(dn3)
write(joinpath(dn3, "Order3.jl"), """
module Order3
using Order2
end
""")
sleep(mtimedelay)
@eval using Order3
sleep(mtimedelay)
write(joinpath(dn3, "Order3.jl"), """
module Order3
using Order2
g(::Order2.Ord2a) = 1
end
""")
sleep(mtimedelay)
write(joinpath(dn, "file.jl"), """
struct Ord2 end
struct Ord2a end
""")
yry()
@test Order3.g(Order2.Ord2a()) == 1
rm_precompile("Order1")
rm_precompile("Order2")
pop!(LOAD_PATH)
end
# issue #8 and #197
do_test("Module docstring") && @testset "Module docstring" begin
testdir = newtestdir()
dn = joinpath(testdir, "ModDocstring", "src")
mkpath(dn)
write(joinpath(dn, "ModDocstring.jl"), """
" Ahoy! "
module ModDocstring
include("dependency.jl")
f() = 1
end
""")
write(joinpath(dn, "dependency.jl"), "")
sleep(mtimedelay)
@eval using ModDocstring
sleep(mtimedelay)
@test ModDocstring.f() == 1
ds = @doc(ModDocstring)
@test get_docstring(ds) == "Ahoy! "
write(joinpath(dn, "ModDocstring.jl"), """
" Ahoy! "
module ModDocstring
include("dependency.jl")
f() = 2
end
""")
yry()
@test ModDocstring.f() == 2
ds = @doc(ModDocstring)
@test get_docstring(ds) == "Ahoy! "
write(joinpath(dn, "ModDocstring.jl"), """
" Hello! "
module ModDocstring
include("dependency.jl")
f() = 3
end
""")
yry()
@test ModDocstring.f() == 3
ds = @doc(ModDocstring)
@test get_docstring(ds) == "Hello! "
rm_precompile("ModDocstring")
# issue #197
dn = joinpath(testdir, "ModDocstring2", "src")
mkpath(dn)
write(joinpath(dn, "ModDocstring2.jl"), """
"docstring"
module ModDocstring2
"docstring for .Sub"
module Sub
end
end
""")
sleep(mtimedelay)
@eval using ModDocstring2
sleep(mtimedelay)
ds = @doc(ModDocstring2)
@test get_docstring(ds) == "docstring"
ds = @doc(ModDocstring2.Sub)
@test get_docstring(ds) == "docstring for .Sub"
write(joinpath(dn, "ModDocstring2.jl"), """
"updated docstring"
module ModDocstring2
"updated docstring for .Sub"
module Sub
end
end
""")
yry()
ds = @doc(ModDocstring2)
@test get_docstring(ds) == "updated docstring"
ds = @doc(ModDocstring2.Sub)
@test get_docstring(ds) == "updated docstring for .Sub"
rm_precompile("ModDocstring2")
pop!(LOAD_PATH)
end
do_test("Changing docstrings") && @testset "Changing docstring" begin
# Compiled mode covers most docstring changes, so we have to go to
# special effort to test the older interpreter-based solution.
testdir = newtestdir()
dn = joinpath(testdir, "ChangeDocstring", "src")
mkpath(dn)
write(joinpath(dn, "ChangeDocstring.jl"), """
module ChangeDocstring
"f" f() = 1
g() = 1
end
""")
sleep(mtimedelay)
@eval using ChangeDocstring
sleep(mtimedelay)
@test ChangeDocstring.f() == 1
ds = @doc(ChangeDocstring.f)
@test get_docstring(ds) == "f"
@test ChangeDocstring.g() == 1
ds = @doc(ChangeDocstring.g)
@test get_docstring(ds) in ("No documentation found.", "No documentation found for private symbol.")
# Ordinary route
write(joinpath(dn, "ChangeDocstring.jl"), """
module ChangeDocstring
"h" f() = 1
"g" g() = 1
end
""")
yry()
ds = @doc(ChangeDocstring.f)
@test get_docstring(ds) == "h"
ds = @doc(ChangeDocstring.g)
@test get_docstring(ds) == "g"
# Now manually change the docstring
ex = quote "g" f() = 1 end
lwr = Meta.lower(ChangeDocstring, ex)
frame = Frame(ChangeDocstring, lwr.args[1])
methodinfo = Revise.MethodInfo()
docexprs = Revise.DocExprs()
ret = Revise.methods_by_execution!(JuliaInterpreter.finish_and_return!, methodinfo,
docexprs, frame, trues(length(frame.framecode.src.code)); mode=:sigs)
ds = @doc(ChangeDocstring.f)
@test get_docstring(ds) == "g"
rm_precompile("ChangeDocstring")
# Test for #583
dn = joinpath(testdir, "FirstDocstring", "src")
mkpath(dn)
write(joinpath(dn, "FirstDocstring.jl"), """
module FirstDocstring
g() = 1
end
""")
sleep(mtimedelay)
@eval using FirstDocstring
sleep(mtimedelay)
@test FirstDocstring.g() == 1
ds = @doc(FirstDocstring.g)
@test get_docstring(ds) in ("No documentation found.", "No documentation found for private symbol.")
write(joinpath(dn, "FirstDocstring.jl"), """
module FirstDocstring
"g" g() = 1
end
""")
yry()
ds = @doc(FirstDocstring.g)
@test get_docstring(ds) == "g"
rm_precompile("FirstDocstring")
pop!(LOAD_PATH)
end
do_test("doc expr signature") && @testset "Docstring attached to signatures" begin
md = Revise.ModuleExprsSigs(Main)
Revise.parse_source!(md, """
module DocstringSigsOnly
function f end
"basecase" f(x)
"basecase with type" f(x::Int)
"basecase no varname" f(::Float64)
"where" f(x::T) where T <: Int8
"where no varname" f(::T) where T <: String
end
""", "test2", Main)
# Simply test that the "bodies" of the doc exprs are not included as
# standalone expressions.
@test length(md[Main.DocstringSigsOnly]) == 6 # 1 func + 5 doc exprs
end
do_test("Undef in docstrings") && @testset "Undef in docstrings" begin
fn = Base.find_source_file("abstractset.jl") # has lots of examples of """str""" func1, func2
mexsold = Revise.parse_source(fn, Base)
mexsnew = Revise.parse_source(fn, Base)
odict = mexsold[Base]
ndict = mexsnew[Base]
for (k, v) in odict
@test haskey(ndict, k)
end
end
do_test("Macro docstrings (issue #309)") && @testset "Macro docstrings (issue #309)" begin
testdir = newtestdir()
dn = joinpath(testdir, "MacDocstring", "src")
mkpath(dn)
write(joinpath(dn, "MacDocstring.jl"), """
module MacDocstring
macro myconst(name, val)
quote
\"\"\"
mydoc
\"\"\"
const \$(esc(name)) = \$val
end
end
@myconst c 1.2
f() = 1
end # module
""")
sleep(mtimedelay)
@eval using MacDocstring
sleep(mtimedelay)
@test MacDocstring.f() == 1
ds = @doc(MacDocstring.c)
@test strip(get_docstring(ds)) == "mydoc"
write(joinpath(dn, "MacDocstring.jl"), """
module MacDocstring
macro myconst(name, val)
quote
\"\"\"
mydoc
\"\"\"
const \$(esc(name)) = \$val
end
end
@myconst c 1.2
f() = 2
end # module
""")
yry()
@test MacDocstring.f() == 2
ds = @doc(MacDocstring.c)
@test strip(get_docstring(ds)) == "mydoc"
rm_precompile("MacDocstring")
pop!(LOAD_PATH)
end
# issue #165
do_test("Changing @inline annotations") && @testset "Changing @inline annotations" begin
testdir = newtestdir()
dn = joinpath(testdir, "PerfAnnotations", "src")
mkpath(dn)
write(joinpath(dn, "PerfAnnotations.jl"), """
module PerfAnnotations
@inline hasinline(x) = x
check_hasinline(x) = hasinline(x)
@noinline hasnoinline(x) = x
check_hasnoinline(x) = hasnoinline(x)
notannot1(x) = x
check_notannot1(x) = notannot1(x)
notannot2(x) = x
check_notannot2(x) = notannot2(x)
end
""")
sleep(mtimedelay)
@eval using PerfAnnotations
sleep(mtimedelay)
@test PerfAnnotations.check_hasinline(3) == 3
@test PerfAnnotations.check_hasnoinline(3) == 3
@test PerfAnnotations.check_notannot1(3) == 3
@test PerfAnnotations.check_notannot2(3) == 3
code = get_code(PerfAnnotations.check_hasinline, Tuple{Int})
@test length(code) == 1 && isreturning_slot(code[1], 2)
code = get_code(PerfAnnotations.check_hasnoinline, Tuple{Int})
@test length(code) == 2 && code[1].head === :invoke
code = get_code(PerfAnnotations.check_notannot1, Tuple{Int})
@test length(code) == 1 && isreturning_slot(code[1], 2)
code = get_code(PerfAnnotations.check_notannot2, Tuple{Int})
@test length(code) == 1 && isreturning_slot(code[1], 2)
write(joinpath(dn, "PerfAnnotations.jl"), """
module PerfAnnotations
hasinline(x) = x
check_hasinline(x) = hasinline(x)
hasnoinline(x) = x
check_hasnoinline(x) = hasnoinline(x)
@inline notannot1(x) = x
check_notannot1(x) = notannot1(x)
@noinline notannot2(x) = x
check_notannot2(x) = notannot2(x)
end
""")
yry()
@test PerfAnnotations.check_hasinline(3) == 3
@test PerfAnnotations.check_hasnoinline(3) == 3
@test PerfAnnotations.check_notannot1(3) == 3
@test PerfAnnotations.check_notannot2(3) == 3
code = get_code(PerfAnnotations.check_hasinline, Tuple{Int})
@test length(code) == 1 && isreturning_slot(code[1], 2)
code = get_code(PerfAnnotations.check_hasnoinline, Tuple{Int})
@test length(code) == 1 && isreturning_slot(code[1], 2)
code = get_code(PerfAnnotations.check_notannot1, Tuple{Int})
@test length(code) == 1 && isreturning_slot(code[1], 2)
code = get_code(PerfAnnotations.check_notannot2, Tuple{Int})
@test length(code) == 2 && code[1].head === :invoke
rm_precompile("PerfAnnotations")
pop!(LOAD_PATH)
end
do_test("Revising macros") && @testset "Revising macros" begin
# issue #174
testdir = newtestdir()
dn = joinpath(testdir, "MacroRevision", "src")
mkpath(dn)
write(joinpath(dn, "MacroRevision.jl"), """
module MacroRevision
macro change(foodef)
foodef.args[2].args[2] = 1
esc(foodef)
end
@change foo(x) = 0
end
""")
sleep(mtimedelay)
@eval using MacroRevision
sleep(mtimedelay)
@test MacroRevision.foo("hello") == 1
write(joinpath(dn, "MacroRevision.jl"), """
module MacroRevision
macro change(foodef)
foodef.args[2].args[2] = 2
esc(foodef)
end
@change foo(x) = 0
end
""")
yry()
@test MacroRevision.foo("hello") == 1
revise(MacroRevision)
@test MacroRevision.foo("hello") == 2
write(joinpath(dn, "MacroRevision.jl"), """
module MacroRevision
macro change(foodef)
foodef.args[2].args[2] = 3
esc(foodef)
end
@change foo(x) = 0
end
""")
yry()
@test MacroRevision.foo("hello") == 2
revise(MacroRevision)
@test MacroRevision.foo("hello") == 3
rm_precompile("MacroRevision")
# issue #435
dn = joinpath(testdir, "MacroSigs", "src")
mkpath(dn)
write(joinpath(dn, "MacroSigs.jl"), """
module MacroSigs
end
""")
sleep(mtimedelay)
@eval using MacroSigs
sleep(mtimedelay)
write(joinpath(dn, "MacroSigs.jl"), """
module MacroSigs
macro testmac(fname)
esc(quote
function some_fun end
\$fname() = 1
end)
end
@testmac blah
end
""")
yry()
@test MacroSigs.blah() == 1
@test haskey(CodeTracking.method_info, (@which MacroSigs.blah()).sig)
rm_precompile("MacroSigs")
# Issue #568 (a macro *execution* bug)
dn = joinpath(testdir, "MacroLineNos568", "src")
mkpath(dn)
write(joinpath(dn, "MacroLineNos568.jl"), """
module MacroLineNos568
using MacroTools: @q
function my_fun end
macro some_macro(value)
return esc(@q \$MacroLineNos568.my_fun() = \$value)
end
@some_macro 20
end
""")
sleep(mtimedelay)
@eval using MacroLineNos568
sleep(mtimedelay)
@test MacroLineNos568.my_fun() == 20
write(joinpath(dn, "MacroLineNos568.jl"), """
module MacroLineNos568
using MacroTools: @q
function my_fun end
macro some_macro(value)
return esc(@q \$MacroLineNos568.my_fun() = \$value)
end
@some_macro 30
end
""")
yry()
@test MacroLineNos568.my_fun() == 30
rm_precompile("MacroLineNos568")
# Macros that create empty functions (another macro *execution* bug, issue #792)
file = tempname()
write(file, "@empty_function issue792f1\n")
sleep(mtimedelay)
includet(ReviseTestPrivate, file)
sleep(mtimedelay)
@test isempty(methods(ReviseTestPrivate.issue792f1))
open(file, "a") do f
println(f, "@empty_function issue792f2")
end
yry()
@test isempty(methods(ReviseTestPrivate.issue792f2))
rm(file)
pop!(LOAD_PATH)
end
do_test("More arg-modifying macros") && @testset "More arg-modifying macros" begin
# issue #183
testdir = newtestdir()
dn = joinpath(testdir, "ArgModMacros", "src")
mkpath(dn)
write(joinpath(dn, "ArgModMacros.jl"), """
module ArgModMacros
using EponymTuples
const revision = Ref(0)
function hyper_loglikelihood(@eponymargs(μ, σ, LΩ), @eponymargs(w̃s, α̃s, β̃s))
revision[] = 1
loglikelihood_normal(@eponymtuple(μ, σ, LΩ), vcat(w̃s, α̃s, β̃s))
end
loglikelihood_normal(@eponymargs(μ, σ, LΩ), stuff) = stuff
end
""")
sleep(mtimedelay)
@eval using ArgModMacros
sleep(mtimedelay)
@test ArgModMacros.hyper_loglikelihood((μ=1, σ=2, LΩ=3), (w̃s=4, α̃s=5, β̃s=6)) == [4,5,6]
@test ArgModMacros.revision[] == 1
write(joinpath(dn, "ArgModMacros.jl"), """
module ArgModMacros
using EponymTuples
const revision = Ref(0)
function hyper_loglikelihood(@eponymargs(μ, σ, LΩ), @eponymargs(w̃s, α̃s, β̃s))
revision[] = 2
loglikelihood_normal(@eponymtuple(μ, σ, LΩ), vcat(w̃s, α̃s, β̃s))
end
loglikelihood_normal(@eponymargs(μ, σ, LΩ), stuff) = stuff
end
""")
yry()
@test ArgModMacros.hyper_loglikelihood((μ=1, σ=2, LΩ=3), (w̃s=4, α̃s=5, β̃s=6)) == [4,5,6]
@test ArgModMacros.revision[] == 2
rm_precompile("ArgModMacros")
pop!(LOAD_PATH)
end
do_test("Line numbers") && @testset "Line numbers" begin
# issue #27
testdir = newtestdir()
modname = "LineNumberMod"
dn = joinpath(testdir, modname, "src")
mkpath(dn)
write(joinpath(dn, modname*".jl"), "module $modname include(\"incl.jl\") end")
write(joinpath(dn, "incl.jl"), """
0
0
1
2
3
4
5
6
7
8
foo(x) = x+5
foo(y::Int) = y-51
""")
sleep(mtimedelay)
@eval using LineNumberMod
sleep(mtimedelay)
lines = Int[]
files = String[]
for m in methods(LineNumberMod.foo)
push!(files, String(m.file))
push!(lines, m.line)
end
@test all(f->endswith(string(f), "incl.jl"), files)
write(joinpath(dn, "incl.jl"), """
0
0
1
2
3
4
5
6
7
8
foo(x) = x+6
foo(y::Int) = y-51
""")
yry()
for m in methods(LineNumberMod.foo)
@test endswith(string(m.file), "incl.jl")
@test m.line ∈ lines
end
rm_precompile("LineNumberMod")
pop!(LOAD_PATH)
end
do_test("Line numbers in backtraces and warnings") && @testset "Line numbers in backtraces and warnings" begin
filename = randtmp() * ".jl"
write(filename, """
function triggered(iserr::Bool, iswarn::Bool)
iserr && error("error")
iswarn && @warn "Information"
return nothing
end
""")
sleep(mtimedelay)
includet(filename)
sleep(mtimedelay)
try
triggered(true, false)
@test false
catch err
st = stacktrace(catch_backtrace())
Revise.update_stacktrace_lineno!(st)
bt = throwing_function(st)
@test bt.file == Symbol(filename) && bt.line == 2
end
io = IOBuffer()
if isdefined(Base, :methodloc_callback)
print(io, methods(triggered))
mline = line_is_decl ? 1 : 2
@test occursin(filename * ":$mline", String(take!(io)))
end
write(filename, """
# A comment to change the line numbers
function triggered(iserr::Bool, iswarn::Bool)
iserr && error("error")
iswarn && @warn "Information"
return nothing
end
""")
yry()
try
triggered(true, false)
@test false
catch err
bt = throwing_function(Revise.update_stacktrace_lineno!(stacktrace(catch_backtrace())))
@test bt.file == Symbol(filename) && bt.line == 3
end
st = try
triggered(true, false)
@test false
catch err
stacktrace(catch_backtrace())
end
targetstr = basename(filename * ":3")
Base.show_backtrace(io, st)
@test occursin(targetstr, String(take!(io)))
# Long stacktraces take a different path, test this too
while length(st) < 100
st = vcat(st, st)
end
Base.show_backtrace(io, st)
@test occursin(targetstr, String(take!(io)))
if isdefined(Base, :methodloc_callback)
print(io, methods(triggered))
mline = line_is_decl ? 2 : 3
@test occursin(basename(filename * ":$mline"), String(take!(io)))
end
push!(to_remove, filename)
end
# Issue #43
do_test("New submodules") && @testset "New submodules" begin
testdir = newtestdir()
dn = joinpath(testdir, "Submodules", "src")
mkpath(dn)
write(joinpath(dn, "Submodules.jl"), """
module Submodules
f() = 1
end
""")
sleep(mtimedelay)
@eval using Submodules
sleep(mtimedelay)
@test Submodules.f() == 1
write(joinpath(dn, "Submodules.jl"), """
module Submodules
f() = 1
module Sub
g() = 2
end
end
""")
yry()
@test Submodules.f() == 1
@test Submodules.Sub.g() == 2
rm_precompile("Submodules")
pop!(LOAD_PATH)
end
do_test("Submodule in same file (#718)") && @testset "Submodule in same file (#718)" begin
testdir = newtestdir()
dn = joinpath(testdir, "TestPkg718", "src")
mkpath(dn)
write(joinpath(dn, "TestPkg718.jl"), """
module TestPkg718
module TestModule718
export _VARIABLE_UNASSIGNED
global _VARIABLE_UNASSIGNED = -84.0
end
using .TestModule718
end
""")
sleep(mtimedelay)
@eval using TestPkg718
sleep(mtimedelay)
@test TestPkg718._VARIABLE_UNASSIGNED == -84.0
write(joinpath(dn, "TestPkg718.jl"), """
module TestPkg718
module TestModule718
export _VARIABLE_UNASSIGNED
global _VARIABLE_UNASSIGNED = -83.0
end
using .TestModule718
end
""")
yry()
@test TestPkg718._VARIABLE_UNASSIGNED == -83.0
rm_precompile("TestPkg718")
pop!(LOAD_PATH)
end
do_test("Timing (issue #341)") && @testset "Timing (issue #341)" begin
testdir = newtestdir()
dn = joinpath(testdir, "Timing", "src")
mkpath(dn)
write(joinpath(dn, "Timing.jl"), """
module Timing
f(x) = 1
end
""")
sleep(mtimedelay)
@eval using Timing
sleep(mtimedelay)
@test Timing.f(nothing) == 1
tmpfile = joinpath(dn, "Timing_temp.jl")
write(tmpfile, """
module Timing
f(x) = 2
end
""")
yry()
@test Timing.f(nothing) == 1
mv(tmpfile, pathof(Timing), force=true)
yry()
@test Timing.f(nothing) == 2
rm_precompile("Timing")
end
do_test("Method deletion") && @testset "Method deletion" begin
Core.eval(Base, :(revisefoo(x::Float64) = 1)) # to test cross-module method scoping
testdir = newtestdir()
dn = joinpath(testdir, "MethDel", "src")
mkpath(dn)
write(joinpath(dn, "MethDel.jl"), """
__precompile__(false) # "clean" Base doesn't have :revisefoo
module MethDel
f(x) = 1
f(x::Int) = 2
g(x::Vector{T}, y::T) where T = 1
g(x::Array{T,N}, y::T) where N where T = 2
g(::Array, ::Any) = 3
h(x::Array{T}, y::T) where T = g(x, y)
k(::Int; badchoice=1) = badchoice
Base.revisefoo(x::Int) = 2
struct Private end
Base.revisefoo(::Private) = 3
dfltargs(x::Int8, y::Int=0, z::Float32=1.0f0) = x+y+z
hasmacro1(@nospecialize(x)) = x
hasmacro2(@nospecialize(x::Int)) = x
hasmacro3(@nospecialize(x::Int), y::Float64) = x
hasdestructure1(x, (count, name)) = name^count
hasdestructure2(x, (count, name)::Tuple{Int,Any}) = name^count
struct A end
struct B end
checkunion(a::Union{Nothing, A}) = 1
methgensym(::Vector{<:Integer}) = 1
mapf(fs, x) = (fs[1](x), mapf(Base.tail(fs), x)...)
mapf(::Tuple{}, x) = ()
for T in (Int, Float64, String)
@eval mytypeof(x::\$T) = \$T
end
@generated function firstparam(A::AbstractArray)
T = A.parameters[1]
return :(\$T)
end
end
""")
sleep(mtimedelay)
@eval using MethDel
sleep(mtimedelay)
@test MethDel.f(1.0) == 1
@test MethDel.f(1) == 2
@test MethDel.g(rand(3), 1.0) == 1
@test MethDel.g(rand(3, 3), 1.0) == 2
@test MethDel.g(Int[], 1.0) == 3
@test MethDel.h(rand(3), 1.0) == 1
@test MethDel.k(1) == 1
@test MethDel.k(1; badchoice=2) == 2
@test MethDel.hasmacro1(1) == 1
@test MethDel.hasmacro2(1) == 1
@test MethDel.hasmacro3(1, 0.0) == 1
@test MethDel.hasdestructure1(0, (3, "hi")) == "hihihi"
@test MethDel.hasdestructure2(0, (3, "hi")) == "hihihi"
@test Base.revisefoo(1.0) == 1
@test Base.revisefoo(1) == 2
@test Base.revisefoo(MethDel.Private()) == 3
@test MethDel.dfltargs(Int8(2)) == 3.0f0
@test MethDel.dfltargs(Int8(2), 5) == 8.0f0
@test MethDel.dfltargs(Int8(2), 5, -17.0f0) == -10.0f0
@test MethDel.checkunion(nothing) == 1
@test MethDel.methgensym([1]) == 1
@test_throws MethodError MethDel.methgensym([1.0])
@test MethDel.mapf((x->x+1, x->x+0.1), 3) == (4, 3.1)
@test MethDel.mytypeof(1) === Int
@test MethDel.mytypeof(1.0) === Float64
@test MethDel.mytypeof("hi") === String
@test MethDel.firstparam(rand(2,2)) === Float64
write(joinpath(dn, "MethDel.jl"), """
module MethDel
f(x) = 1
g(x::Array{T,N}, y::T) where N where T = 2
h(x::Array{T}, y::T) where T = g(x, y)
k(::Int; goodchoice=-1) = goodchoice
dfltargs(x::Int8, yz::Tuple{Int,Float32}=(0,1.0f0)) = x+yz[1]+yz[2]
struct A end
struct B end
checkunion(a::Union{Nothing, B}) = 2
methgensym(::Vector{<:Real}) = 1
mapf(fs::F, x) where F = (fs[1](x), mapf(Base.tail(fs), x)...)
mapf(::Tuple{}, x) = ()
for T in (Int, String)
@eval mytypeof(x::\$T) = \$T
end
end
""")
yry()
@test MethDel.f(1.0) == 1
@test MethDel.f(1) == 1
@test MethDel.g(rand(3), 1.0) == 2
@test MethDel.g(rand(3, 3), 1.0) == 2
@test_throws MethodError MethDel.g(Int[], 1.0)
@test MethDel.h(rand(3), 1.0) == 2
@test_throws MethodError MethDel.k(1; badchoice=2)
@test MethDel.k(1) == -1
@test MethDel.k(1; goodchoice=10) == 10
@test_throws MethodError MethDel.hasmacro1(1)
@test_throws MethodError MethDel.hasmacro2(1)
@test_throws MethodError MethDel.hasmacro3(1, 0.0)
@test_throws MethodError MethDel.hasdestructure1(0, (3, "hi"))
@test_throws MethodError MethDel.hasdestructure2(0, (3, "hi"))
@test Base.revisefoo(1.0) == 1
@test_throws MethodError Base.revisefoo(1)
@test_throws MethodError Base.revisefoo(MethDel.Private())
@test MethDel.dfltargs(Int8(2)) == 3.0f0
@test MethDel.dfltargs(Int8(2), (5,-17.0f0)) == -10.0f0
@test_throws MethodError MethDel.dfltargs(Int8(2), 5) == 8.0f0
@test_throws MethodError MethDel.dfltargs(Int8(2), 5, -17.0f0) == -10.0f0
@test MethDel.checkunion(nothing) == 2
@test MethDel.methgensym([1]) == 1
@test MethDel.methgensym([1.0]) == 1
@test length(methods(MethDel.methgensym)) == 1
@test MethDel.mapf((x->x+1, x->x+0.1), 3) == (4, 3.1)
@test length(methods(MethDel.mapf)) == 2
@test MethDel.mytypeof(1) === Int
@test_throws MethodError MethDel.mytypeof(1.0)
@test MethDel.mytypeof("hi") === String
@test_throws MethodError MethDel.firstparam(rand(2,2))
Base.delete_method(first(methods(Base.revisefoo)))
# Test for specificity in deletion
ex1 = :(methspecificity(x::Int) = 1)
ex2 = :(methspecificity(x::Integer) = 2)
Core.eval(ReviseTestPrivate, ex1)
Core.eval(ReviseTestPrivate, ex2)
exsig1 = Revise.RelocatableExpr(ex1)=>[Tuple{typeof(ReviseTestPrivate.methspecificity),Int}]
exsig2 = Revise.RelocatableExpr(ex2)=>[Tuple{typeof(ReviseTestPrivate.methspecificity),Integer}]
f_old, f_new = Revise.ExprsSigs(exsig1, exsig2), Revise.ExprsSigs(exsig2)
Revise.delete_missing!(f_old, f_new)
m = @which ReviseTestPrivate.methspecificity(1)
@test m.sig.parameters[2] === Integer
Revise.delete_missing!(f_old, f_new)
m = @which ReviseTestPrivate.methspecificity(1)
@test m.sig.parameters[2] === Integer
end
do_test("revise_file_now") && @testset "revise_file_now" begin
# Very rarely this is used for debugging
testdir = newtestdir()
dn = joinpath(testdir, "ReviseFileNow", "src")
mkpath(dn)
fn = joinpath(dn, "ReviseFileNow.jl")
write(fn, """
module ReviseFileNow
f(x) = 1
end
""")
sleep(mtimedelay)
@eval using ReviseFileNow
@test ReviseFileNow.f(0) == 1
sleep(mtimedelay)
pkgdata = Revise.pkgdatas[Base.PkgId(ReviseFileNow)]
write(fn, """
module ReviseFileNow
f(x) = 2
end
""")
try
Revise.revise_file_now(pkgdata, "foo")
catch err
@test isa(err, ErrorException)
@test occursin("not currently being tracked", err.msg)
end
Revise.revise_file_now(pkgdata, relpath(fn, pkgdata))
@test ReviseFileNow.f(0) == 2
rm_precompile("ReviseFileNow")
end
do_test("Evaled toplevel") && @testset "Evaled toplevel" begin
testdir = newtestdir()
dnA = joinpath(testdir, "ToplevelA", "src"); mkpath(dnA)
dnB = joinpath(testdir, "ToplevelB", "src"); mkpath(dnB)
dnC = joinpath(testdir, "ToplevelC", "src"); mkpath(dnC)
write(joinpath(dnA, "ToplevelA.jl"), """
module ToplevelA
@eval using ToplevelB
g() = 2
end""")
write(joinpath(dnB, "ToplevelB.jl"), """
module ToplevelB
using ToplevelC
end""")
write(joinpath(dnC, "ToplevelC.jl"), """
module ToplevelC
export f
f() = 1
end""")
sleep(mtimedelay)
using ToplevelA
sleep(mtimedelay)
@test ToplevelA.ToplevelB.f() == 1
@test ToplevelA.g() == 2
write(joinpath(dnA, "ToplevelA.jl"), """
module ToplevelA
@eval using ToplevelB
g() = 3
end""")
yry()
@test ToplevelA.ToplevelB.f() == 1
@test ToplevelA.g() == 3
rm_precompile("ToplevelA")
rm_precompile("ToplevelB")
rm_precompile("ToplevelC")
end
do_test("struct inner functions") && @testset "struct inner functions" begin
# issue #599
testdir = newtestdir()
dn = joinpath(testdir, "StructInnerFuncs", "src"); mkpath(dn)
write(joinpath(dn, "StructInnerFuncs.jl"), """
module StructInnerFuncs
mutable struct A
x::Int
A(x) = new(f(x))
f(x) = x^2
end
g(x) = 1
end""")
sleep(mtimedelay)
using StructInnerFuncs
sleep(mtimedelay)
@test StructInnerFuncs.A(2).x == 4
@test StructInnerFuncs.g(3) == 1
write(joinpath(dn, "StructInnerFuncs.jl"), """
module StructInnerFuncs
mutable struct A
x::Int
A(x) = new(f(x))
f(x) = x^2
end
g(x) = 2
end""")
yry()
@test StructInnerFuncs.A(2).x == 4
@test StructInnerFuncs.g(3) == 2
rm_precompile("StructInnerFuncs")
end
do_test("Issue 606") && @testset "Issue 606" begin
# issue #606
testdir = newtestdir()
dn = joinpath(testdir, "Issue606", "src"); mkpath(dn)
write(joinpath(dn, "Issue606.jl"), """
module Issue606
function convert_output_relations()
function add_default_zero!(dict::Dict{K, V})::Dict{K, V} where
{K <: Tuple, V}
if K == Tuple{} && isempty(dict)
dict[()] = 0.0
end
return dict
end
function convert_to_sorteddict(
relation::Union{Dict{K, Tuple{Float64}}}
) where K <: Tuple
return add_default_zero!(Dict{K, Float64}((k, v[1]) for (k, v) in relation))
end
function convert_to_sorteddict(relation::Dict{<:Tuple, Float64})
return add_default_zero!(relation)
end
return "HELLO"
end
end""")
sleep(mtimedelay)
using Issue606
sleep(mtimedelay)
@test Issue606.convert_output_relations() == "HELLO"
write(joinpath(dn, "Issue606.jl"), """
module Issue606
function convert_output_relations()
function add_default_zero!(dict::Dict{K, V})::Dict{K, V} where
{K <: Tuple, V}
if K == Tuple{} && isempty(dict)
dict[()] = 0.0
end
return dict
end
function convert_to_sorteddict(
relation::Union{Dict{K, Tuple{Float64}}}
) where K <: Tuple
return add_default_zero!(Dict{K, Float64}((k, v[1]) for (k, v) in relation))
end
function convert_to_sorteddict(relation::Dict{<:Tuple, Float64})
return add_default_zero!(relation)
end
return "HELLO2"
end
end""")
yry()
@test Issue606.convert_output_relations() == "HELLO2"
rm_precompile("Issue606")
end
do_test("Revision errors") && @testset "Revision errors" begin
testdir = newtestdir()
dn = joinpath(testdir, "RevisionErrors", "src")
mkpath(dn)
fn = joinpath(dn, "RevisionErrors.jl")
write(fn, """
module RevisionErrors
f(x) = 1
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
g(x) = 1
end
""")
sleep(mtimedelay)
@eval using RevisionErrors
sleep(mtimedelay)
@test RevisionErrors.f(0) == 1
write(fn, """
module RevisionErrors
f{x) = 2
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
g(x) = 1
end
""")
logs, _ = Test.collect_test_logs() do
yry()
end
function check_revision_error(rec, ErrorType, msg, line)
@test rec.message == "Failed to revise $fn"
exc = rec.kwargs[:exception]
if exc isa Revise.ReviseEvalException
exc, st = exc.exc, exc.stacktrace
else
exc, bt = exc
st = stacktrace(bt)
end
@test exc isa ErrorType
if ErrorType === LoadError
@test exc.file == fn
@test exc.line == line
@test occursin(msg, errmsg(exc.error))
elseif ErrorType === Base.Meta.ParseError
@test occursin(msg, exc.msg)
elseif ErrorType === UndefVarError
@test msg == exc.var
end
@test length(st) == 1
end
# test errors are reported the the first time
check_revision_error(logs[1], Base.VERSION < v"1.10" ? LoadError : Base.Meta.ParseError,
Base.VERSION < v"1.10" ? "missing comma or }" : "Expected `}`", 2 + (Base.VERSION >= v"1.10"))
# Check that there's an informative warning
rec = logs[2]
@test startswith(rec.message, "The running code does not match")
@test occursin("RevisionErrors.jl", rec.message)
# test errors are not re-reported
logs, _ = Test.collect_test_logs() do
yry()
end
@test isempty(logs)
# test error re-reporting
logs,_ = Test.collect_test_logs() do
Revise.errors()
end
check_revision_error(logs[1], Base.VERSION < v"1.10" ? LoadError : Base.Meta.ParseError,
Base.VERSION < v"1.10" ? "missing comma or }" : "Expected `}`", 2 + (Base.VERSION >= v"1.10"))
write(joinpath(dn, "RevisionErrors.jl"), """
module RevisionErrors
f(x) = 2
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
g(x) = 1
end
""")
logs, _ = Test.collect_test_logs() do
yry()
end
@test isempty(logs)
@test RevisionErrors.f(0) == 2
# issue #421
write(joinpath(dn, "RevisionErrors.jl"), """
module RevisionErrors
f(x) = 2
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
function g(x) = 1
end
""")
logs, _ = Test.collect_test_logs() do
yry()
end
delim = Base.VERSION < v"1.10" ? '"' : '`'
check_revision_error(logs[1], Base.VERSION < v"1.10" ? LoadError : Base.Meta.ParseError,
"unexpected $delim=$delim", 6 + (Base.VERSION >= v"1.10")*2)
write(joinpath(dn, "RevisionErrors.jl"), """
module RevisionErrors
f(x) = 2
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
g(x) = 1
end
""")
logs, _ = Test.collect_test_logs() do
yry()
end
@test isempty(logs)
write(joinpath(dn, "RevisionErrors.jl"), """
module RevisionErrors
f(x) = 2
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
g(x) = 1
foo(::Vector{T}) = 3
end
""")
logs, _ = Test.collect_test_logs() do
yry()
end
check_revision_error(logs[1], UndefVarError, :T, 6)
# issue #541
sleep(mtimedelay)
write(joinpath(dn, "RevisionErrors.jl"), """
module RevisionErrors
f(x) = 2
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
g(x} = 2
end
""")
@test try
revise(throw=true)
false
catch err
if Base.VERSION < v"1.10"
isa(err, LoadError) && occursin("""unexpected "}" """, errmsg(err.error))
else
isa(err, Base.Meta.ParseError) && occursin("Expected `)`", err.msg)
end
end
sleep(mtimedelay)
write(joinpath(dn, "RevisionErrors.jl"), """
module RevisionErrors
f(x) = 2
struct Vec{N, T <: Union{Float32,Float64}}
data::NTuple{N, T}
end
g(x) = 2
end
""")
yry()
@test RevisionErrors.g(0) == 2
rm_precompile("RevisionErrors")
empty!(Revise.queue_errors)
testfile = joinpath(testdir, "Test301.jl")
write(testfile, """
module Test301
mutable struct Struct301
x::Int
unset
Struct301(x::Integer) = new(x)
end
f(s) = s.unset
const s = Struct301(1)
if f(s)
g() = 1
else
g() = 2
end
end
""")
logfile = joinpath(tempdir(), randtmp()*".log")
open(logfile, "w") do io
redirect_stderr(io) do
includet(testfile)
end
end
sleep(mtimedelay)
lines = readlines(logfile)
@test lines[1] == "ERROR: UndefRefError: access to undefined reference"
@test any(str -> occursin(r"f\(.*Test301\.Struct301\)", str), lines)
@test any(str -> endswith(str, "Test301.jl:10"), lines)
logfile = joinpath(tempdir(), randtmp()*".log")
open(logfile, "w") do io
redirect_stderr(io) do
includet("callee_error.jl")
end
end
sleep(mtimedelay)
lines = readlines(logfile)
@test lines[1] == "ERROR: BoundsError: attempt to access 3-element $(Vector{Int}) at index [4]"
@test any(str -> endswith(str, "callee_error.jl:12"), lines)
@test_throws UndefVarError CalleeError.foo(0.1f0)
end
do_test("Retry on InterruptException") && @testset "Retry on InterruptException" begin
function check_revision_interrupt(logs)
rec = logs[1]
@test rec.message == "Failed to revise $fn"
exc = rec.kwargs[:exception]
if exc isa Revise.ReviseEvalException
exc, st = exc.exc, exc.stacktrace
else
exc, bt = exc
st = stacktrace(bt)
end
@test exc isa InterruptException
if length(logs) > 1
rec = logs[2]
@test startswith(rec.message, "The running code does not match")
end
end
testdir = newtestdir()
dn = joinpath(testdir, "RevisionInterrupt", "src")
mkpath(dn)
fn = joinpath(dn, "RevisionInterrupt.jl")
write(fn, """
module RevisionInterrupt
f(x) = 1
end
""")
sleep(mtimedelay)
@eval using RevisionInterrupt
sleep(mtimedelay)
@test RevisionInterrupt.f(0) == 1
# Interpreted & compiled mode
n = 1
for errthrow in ("throw(InterruptException())", """
eval(quote # this forces interpreted mode
throw(InterruptException())
end)""")
n += 1
write(fn, """
module RevisionInterrupt
$errthrow
f(x) = $n
end
""")
logs, _ = Test.collect_test_logs() do
yry()
end
check_revision_interrupt(logs)
# This method gets deleted because it's redefined to f(x) = 2,
# but the error prevents it from getting that far.
# @test RevisionInterrupt.f(0) == 1
# Check that InterruptException triggers a retry (issue #418)
logs, _ = Test.collect_test_logs() do
yry()
end
check_revision_interrupt(logs)
# @test RevisionInterrupt.f(0) == 1
write(fn, """
module RevisionInterrupt
f(x) = $n
end
""")
logs, _ = Test.collect_test_logs() do
yry()
end
@test isempty(logs)
@test RevisionInterrupt.f(0) == n
end
end
do_test("Modify @enum") && @testset "Modify @enum" begin
testdir = newtestdir()
dn = joinpath(testdir, "ModifyEnum", "src")
mkpath(dn)
write(joinpath(dn, "ModifyEnum.jl"), """
module ModifyEnum
@enum Fruit apple=1 orange=2
end
""")
sleep(mtimedelay)
@eval using ModifyEnum
sleep(mtimedelay)
@test Int(ModifyEnum.apple) == 1
@test ModifyEnum.apple isa ModifyEnum.Fruit
@test_throws UndefVarError Int(ModifyEnum.kiwi)
write(joinpath(dn, "ModifyEnum.jl"), """
module ModifyEnum
@enum Fruit apple=1 orange=2 kiwi=3
end
""")
yry()
@test Int(ModifyEnum.kiwi) == 3
@test Base.instances(ModifyEnum.Fruit) === (ModifyEnum.apple, ModifyEnum.orange, ModifyEnum.kiwi)
rm_precompile("ModifyEnum")
pop!(LOAD_PATH)
end
do_test("get_def") && @testset "get_def" begin
testdir = newtestdir()
dn = joinpath(testdir, "GetDef", "src")
mkpath(dn)
write(joinpath(dn, "GetDef.jl"), """
module GetDef
f(x) = 1
f(v::AbstractVector) = 2
f(v::AbstractVector{<:Integer}) = 3
foo(x::T, y::Integer=1; kw1="hello", kwargs...) where T<:Number = error("stop")
bar(x) = foo(x; kw1="world")
end
""")
sleep(mtimedelay)
@eval using GetDef
sleep(mtimedelay)
@test GetDef.f(1.0) == 1
@test GetDef.f([1.0]) == 2
@test GetDef.f([1]) == 3
m = @which GetDef.f([1])
ex = Revise.RelocatableExpr(definition(m))
@test ex isa Revise.RelocatableExpr
@test isequal(ex, Revise.RelocatableExpr(:(f(v::AbstractVector{<:Integer}) = 3)))
st = try GetDef.bar(5.0) catch err stacktrace(catch_backtrace()) end
m = st[2].linfo.def
def = Revise.RelocatableExpr(definition(m))
@test def == Revise.RelocatableExpr(:(foo(x::T, y::Integer=1; kw1="hello", kwargs...) where T<:Number = error("stop")))
rm_precompile("GetDef")
# This method identifies itself as originating from @irrational, defined in Base, but
# the module of the method is listed as Base.MathConstants.
m = @which Float32(π)
@test definition(m) isa Expr
end
do_test("Pkg exclusion") && @testset "Pkg exclusion" begin
push!(Revise.dont_watch_pkgs, :Example)
push!(Revise.silence_pkgs, :Example)
@eval import Example
id = Base.PkgId(Example)
@test !haskey(Revise.pkgdatas, id)
# Ensure that silencing works
sfile = Revise.silencefile[] # remember the original
try
sfiletemp = tempname()
Revise.silencefile[] = sfiletemp
Revise.silence("GSL")
@test isfile(sfiletemp)
pkgs = readlines(sfiletemp)
@test any(p->p=="GSL", pkgs)
rm(sfiletemp)
finally
Revise.silencefile[] = sfile
end
pop!(LOAD_PATH)
end
do_test("Manual track") && @testset "Manual track" begin
srcfile = joinpath(tempdir(), randtmp()*".jl")
write(srcfile, "revise_f(x) = 1")
sleep(mtimedelay)
includet(srcfile)
sleep(mtimedelay)
@test revise_f(10) == 1
@test length(signatures_at(srcfile, 1)) == 1
write(srcfile, "revise_f(x) = 2")
yry()
@test revise_f(10) == 2
push!(to_remove, srcfile)
# Do it again with a relative path
curdir = pwd()
cd(tempdir())
srcfile = randtmp()*".jl"
write(srcfile, "revise_floc(x) = 1")
sleep(mtimedelay)
include(joinpath(pwd(), srcfile))
@test revise_floc(10) == 1
Revise.track(srcfile)
sleep(mtimedelay)
write(srcfile, "revise_floc(x) = 2")
yry()
@test revise_floc(10) == 2
# Call track again & make sure it doesn't track twice
Revise.track(srcfile)
id = Base.PkgId(Main)
pkgdata = Revise.pkgdatas[id]
@test count(isequal(srcfile), pkgdata.info.files) == 1
push!(to_remove, joinpath(tempdir(), srcfile))
cd(curdir)
# Empty files (issue #253)
srcfile = joinpath(tempdir(), randtmp()*".jl")
write(srcfile, "\n")
sleep(mtimedelay)
includet(srcfile)
sleep(mtimedelay)
@test basename(srcfile) ∈ Revise.watched_files[dirname(srcfile)]
push!(to_remove, srcfile)
# Double-execution (issue #263)
srcfile = joinpath(tempdir(), randtmp()*".jl")
write(srcfile, "println(\"executed\")")
sleep(mtimedelay)
logfile = joinpath(tempdir(), randtmp()*".log")
open(logfile, "w") do io
redirect_stdout(io) do
includet(srcfile)
end
end
sleep(mtimedelay)
lines = readlines(logfile)
@test length(lines) == 1 && chomp(lines[1]) == "executed"
# In older versions of Revise, it would do the work again when the file
# changed. Starting with 3.0, Revise modifies methods and docstrings but
# does not "do work."
write(srcfile, "println(\"executed again\")")
open(logfile, "w") do io
redirect_stdout(io) do
yry()
end
end
lines = readlines(logfile)
@test isempty(lines)
# tls path (issue #264)
srcdir = joinpath(tempdir(), randtmp())
mkpath(srcdir)
push!(to_remove, srcdir)
srcfile1 = joinpath(srcdir, randtmp()*".jl")
srcfile2 = joinpath(srcdir, randtmp()*".jl")
write(srcfile1, "includet(\"$(basename(srcfile2))\")")
write(srcfile2, "f264() = 1")
sleep(mtimedelay)
include(srcfile1)
sleep(mtimedelay)
@test f264() == 1
write(srcfile2, "f264() = 2")
yry()
@test f264() == 2
# recursive `includet`s (issue #302)
testdir = newtestdir()
srcfile1 = joinpath(testdir, "Test302.jl")
write(srcfile1, """
module Test302
struct Parameters{T}
control::T
end
function Parameters(control = nothing; kw...)
Parameters(control)
end
function (p::Parameters)(; kw...)
p
end
end
""")
srcfile2 = joinpath(testdir, "test2.jl")
write(srcfile2, """
includet(joinpath(@__DIR__, "Test302.jl"))
using .Test302
""")
sleep(mtimedelay)
includet(srcfile2)
sleep(mtimedelay)
p = Test302.Parameters{Int}(3)
@test p() == p
write(srcfile1, """
module Test302
struct Parameters{T}
control::T
end
function Parameters(control = nothing; kw...)
Parameters(control)
end
function (p::Parameters)(; kw...)
0
end
end
""")
yry()
@test p() == 0
# Double-execution prevention (issue #639)
empty!(issue639report)
srcfile1 = joinpath(testdir, "file1.jl")
srcfile2 = joinpath(testdir, "file2.jl")
write(srcfile1, """
include(joinpath(@__DIR__, "file2.jl"))
push!($(@__MODULE__).issue639report, '1')
""")
write(srcfile2, "push!($(@__MODULE__).issue639report, '2')")
sleep(mtimedelay)
includet(srcfile1)
@test issue639report == ['2', '1']
# Non-included dependency (issue #316)
testdir = newtestdir()
dn = joinpath(testdir, "LikePlots", "src"); mkpath(dn)
write(joinpath(dn, "LikePlots.jl"), """
module LikePlots
plot() = 0
backend() = include(joinpath(@__DIR__, "backends/backend.jl"))
end
""")
sd = joinpath(dn, "backends"); mkpath(sd)
write(joinpath(sd, "backend.jl"), "f() = 1")
sleep(mtimedelay)
@eval using LikePlots
@test LikePlots.plot() == 0
@test_throws UndefVarError LikePlots.f()
sleep(mtimedelay)
Revise.track(LikePlots, joinpath(sd, "backend.jl"))
LikePlots.backend()
@test LikePlots.f() == 1
sleep(2*mtimedelay)
write(joinpath(sd, "backend.jl"), "f() = 2")
yry()
@test LikePlots.f() == 2
pkgdata = Revise.pkgdatas[Base.PkgId(LikePlots)]
@test joinpath("src", "backends", "backend.jl") ∈ Revise.srcfiles(pkgdata)
# No duplications from Revise.track with either relative or absolute paths
Revise.track(LikePlots, joinpath(sd, "backend.jl"))
@test length(Revise.srcfiles(pkgdata)) == 2
cd(dn) do
Revise.track(LikePlots, joinpath("backends", "backend.jl"))
@test length(Revise.srcfiles(pkgdata)) == 2
end
rm_precompile("LikePlots")
# Issue #475
srcfile = joinpath(tempdir(), randtmp()*".jl")
write(srcfile, """
a475 = 0.8
a475 = 0.7
a475 = 0.8
""")
includet(srcfile)
@test a475 == 0.8
end
do_test("Auto-track user scripts") && @testset "Auto-track user scripts" begin
srcfile = joinpath(tempdir(), randtmp()*".jl")
push!(to_remove, srcfile)
write(srcfile, "revise_g() = 1")
sleep(mtimedelay)
# By default user scripts are not tracked
# issue #358: but if the user is tracking all includes...
user_track_includes = Revise.tracking_Main_includes[]
Revise.tracking_Main_includes[] = false
include(srcfile)
yry()
@test revise_g() == 1
write(srcfile, "revise_g() = 2")
yry()
@test revise_g() == 1
# Turn on tracking of user scripts
empty!(Revise.included_files) # don't track files already loaded (like this one)
Revise.tracking_Main_includes[] = true
try
srcfile = joinpath(tempdir(), randtmp()*".jl")
push!(to_remove, srcfile)
write(srcfile, "revise_g() = 1")
sleep(mtimedelay)
include(srcfile)
yry()
@test revise_g() == 1
write(srcfile, "revise_g() = 2")
yry()
@test revise_g() == 2
# issue #257
logs, _ = Test.collect_test_logs() do # just to prevent noisy warning
try include("nonexistent1.jl") catch end
yry()
try include("nonexistent2.jl") catch end
yry()
end
finally
Revise.tracking_Main_includes[] = user_track_includes # restore old behavior
end
end
do_test("Distributed") && @testset "Distributed" begin
# The d31474 test below is from
# https://discourse.julialang.org/t/how-do-i-make-revise-jl-work-in-multiple-workers-environment/31474
newprocs = addprocs(2)
newproc = newprocs[end]
Revise.init_worker.(newprocs)
allworkers = [myid(); newprocs]
dirname = randtmp()
mkdir(dirname)
@everywhere push_LOAD_PATH!(dirname) = push!(LOAD_PATH, dirname) # Don't want to share this LOAD_PATH
for p in allworkers
remotecall_wait(push_LOAD_PATH!, p, dirname)
end
push!(to_remove, dirname)
modname = "ReviseDistributed"
dn = joinpath(dirname, modname, "src")
mkpath(dn)
s31474 = """
function d31474()
r = @spawnat $newproc sqrt(4)
fetch(r)
end
"""
write(joinpath(dn, modname*".jl"), """
module ReviseDistributed
using Distributed
f() = π
g(::Int) = 0
$s31474
end
""")
sleep(mtimedelay)
using ReviseDistributed
sleep(mtimedelay)
@everywhere using ReviseDistributed
for p in allworkers
@test remotecall_fetch(ReviseDistributed.f, p) == π
@test remotecall_fetch(ReviseDistributed.g, p, 1) == 0
end
@test ReviseDistributed.d31474() == 2.0
s31474 = """
function d31474()
r = @spawnat $newproc sqrt(9)
fetch(r)
end
"""
write(joinpath(dn, modname*".jl"), """
module ReviseDistributed
f() = 3.0
$s31474
end
""")
yry()
@test_throws MethodError ReviseDistributed.g(1)
for p in allworkers
@test remotecall_fetch(ReviseDistributed.f, p) == 3.0
@test_throws RemoteException remotecall_fetch(ReviseDistributed.g, p, 1)
end
@test ReviseDistributed.d31474() == 3.0
rmprocs(allworkers[2:3]...; waitfor=10)
rm_precompile("ReviseDistributed")
pop!(LOAD_PATH)
end
do_test("Distributed on worker") && @testset "Distributed on worker" begin
# https://github.com/timholy/Revise.jl/pull/527
favorite_proc, boring_proc = addprocs(2)
Distributed.remotecall_eval(Main, [favorite_proc, boring_proc], :(ENV["JULIA_REVISE_WORKER_ONLY"] = "1"))
dirname = randtmp()
mkdir(dirname)
push!(to_remove, dirname)
@everywhere push_LOAD_PATH!(dirname) = push!(LOAD_PATH, dirname) # Don't want to share this LOAD_PATH
remotecall_wait(push_LOAD_PATH!, favorite_proc, dirname)
modname = "ReviseDistributedOnWorker"
dn = joinpath(dirname, modname, "src")
mkpath(dn)
s527_old = """
module ReviseDistributedOnWorker
f() = π
g(::Int) = 0
end
"""
write(joinpath(dn, modname*".jl"), s527_old)
# In the first tests, we only load Revise on our favorite process. The other (boring) process should be unaffected by the upcoming tests.
Distributed.remotecall_eval(Main, [favorite_proc], :(using Revise))
sleep(mtimedelay)
Distributed.remotecall_eval(Main, [favorite_proc], :(using ReviseDistributedOnWorker))
sleep(mtimedelay)
@test Distributed.remotecall_eval(Main, favorite_proc, :(ReviseDistributedOnWorker.f())) == π
@test Distributed.remotecall_eval(Main, favorite_proc, :(ReviseDistributedOnWorker.g(1))) == 0
# we only loaded ReviseDistributedOnWorker on our favorite process
@test_throws RemoteException Distributed.remotecall_eval(Main, boring_proc, :(ReviseDistributedOnWorker.f()))
@test_throws RemoteException Distributed.remotecall_eval(Main, boring_proc, :(ReviseDistributedOnWorker.g(1)))
s527_new = """
module ReviseDistributedOnWorker
f() = 3.0
end
"""
write(joinpath(dn, modname*".jl"), s527_new)
sleep(mtimedelay)
Distributed.remotecall_eval(Main, [favorite_proc], :(Revise.revise()))
sleep(mtimedelay)
@test Distributed.remotecall_eval(Main, favorite_proc, :(ReviseDistributedOnWorker.f())) == 3.0
@test_throws RemoteException Distributed.remotecall_eval(Main, favorite_proc, :(ReviseDistributedOnWorker.g(1)))
@test_throws RemoteException Distributed.remotecall_eval(Main, boring_proc, :(ReviseDistributedOnWorker.f()))
@test_throws RemoteException Distributed.remotecall_eval(Main, boring_proc, :(ReviseDistributedOnWorker.g(1)))
# In the second part, we'll also load Revise on the boring process, which should have no effect.
Distributed.remotecall_eval(Main, [boring_proc], :(using Revise))
write(joinpath(dn, modname*".jl"), s527_old)
sleep(mtimedelay)
@test !Distributed.remotecall_eval(Main, favorite_proc, :(Revise.revision_queue |> isempty))
@test Distributed.remotecall_eval(Main, boring_proc, :(Revise.revision_queue |> isempty))
Distributed.remotecall_eval(Main, [favorite_proc, boring_proc], :(Revise.revise()))
sleep(mtimedelay)
@test Distributed.remotecall_eval(Main, favorite_proc, :(ReviseDistributedOnWorker.f())) == π
@test Distributed.remotecall_eval(Main, favorite_proc, :(ReviseDistributedOnWorker.g(1))) == 0
@test_throws RemoteException Distributed.remotecall_eval(Main, boring_proc, :(ReviseDistributedOnWorker.f()))
@test_throws RemoteException Distributed.remotecall_eval(Main, boring_proc, :(ReviseDistributedOnWorker.g(1)))
rmprocs(favorite_proc, boring_proc; waitfor=10)
end
do_test("Git") && @testset "Git" begin
loc = Base.find_package("Revise")
if occursin("dev", loc)
repo, path = Revise.git_repo(loc)
@test repo != nothing
files = Revise.git_files(repo)
@test "README.md" ∈ files
src = Revise.git_source(loc, "946d588328c2eb5fe5a56a21b4395379e41092e0")
@test startswith(src, "__precompile__")
src = Revise.git_source(loc, "eae5e000097000472280e6183973a665c4243b94") # 2nd commit in Revise's history
@test src == "module Revise\n\n# package code goes here\n\nend # module\n"
else
@warn "skipping git tests because Revise is not under development"
end
# Issue #135
if !Sys.iswindows()
randdir = randtmp()
modname = "ModuleWithNewFile"
push!(to_remove, randdir)
push!(LOAD_PATH, randdir)
randdir = joinpath(randdir, modname)
mkpath(joinpath(randdir, "src"))
mainjl = joinpath(randdir, "src", modname*".jl")
LibGit2.with(LibGit2.init(randdir)) do repo
write(mainjl, """
module $modname
end
""")
LibGit2.add!(repo, joinpath("src", modname*".jl"))
test_sig = LibGit2.Signature("TEST", "[email protected]", round(time(); digits=0), 0)
LibGit2.commit(repo, "New file test"; author=test_sig, committer=test_sig)
end
sleep(mtimedelay)
@eval using $(Symbol(modname))
sleep(mtimedelay)
mod = @eval $(Symbol(modname))
id = Base.PkgId(mod)
extrajl = joinpath(randdir, "src", "extra.jl")
write(extrajl, "println(\"extra\")")
write(mainjl, """
module $modname
include("extra.jl")
end
""")
sleep(mtimedelay)
repo = LibGit2.GitRepo(randdir)
LibGit2.add!(repo, joinpath("src", "extra.jl"))
pkgdata = Revise.pkgdatas[id]
logs, _ = Test.collect_test_logs() do
Revise.track_subdir_from_git!(pkgdata, joinpath(randdir, "src"); commit="HEAD")
end
yry()
@test Revise.hasfile(pkgdata, mainjl)
@test startswith(logs[end].message, "skipping src/extra.jl") || startswith(logs[end-1].message, "skipping src/extra.jl")
rm_precompile("ModuleWithNewFile")
pop!(LOAD_PATH)
end
end
do_test("Recipes") && @testset "Recipes" begin
# https://github.com/JunoLab/Juno.jl/issues/257#issuecomment-473856452
meth = @which gcd(10, 20)
sigs = signatures_at(Base.find_source_file(String(meth.file)), meth.line) # this should track Base
# Tracking Base
# issue #250
@test_throws ErrorException("use Revise.track(Base) or Revise.track(<stdlib module>)") Revise.track(joinpath(Revise.juliadir, "base", "intfuncs.jl"))
id = Base.PkgId(Base)
pkgdata = Revise.pkgdatas[id]
@test any(k->endswith(k, "number.jl"), Revise.srcfiles(pkgdata))
@test length(filter(k->endswith(k, "file.jl"), Revise.srcfiles(pkgdata))) == 1
m = @which show([1,2,3])
@test definition(m) isa Expr
m = @which redirect_stdout()
@test definition(m).head ∈ (:function, :(=))
# Tracking stdlibs
Revise.track(Unicode)
id = Base.PkgId(Unicode)
pkgdata = Revise.pkgdatas[id]
@test any(k->endswith(k, "Unicode.jl"), Revise.srcfiles(pkgdata))
m = first(methods(Unicode.isassigned))
@test definition(m) isa Expr
@test isfile(whereis(m)[1])
# Submodule of Pkg (note that package is developed outside the
# Julia repo, this tests new cases)
id = Revise.get_tracked_id(Pkg.Types)
pkgdata = Revise.pkgdatas[id]
@test definition(first(methods(Pkg.API.add))) isa Expr
# Test that we skip over files that don't end in ".jl"
logs, _ = Test.collect_test_logs() do
Revise.track(REPL)
end
@test isempty(logs)
Revise.get_tracked_id(Core) # just test that this doesn't error
if !haskey(ENV, "BUILDKITE") # disable on buildkite, see discussion in https://github.com/JuliaCI/julia-buildkite/pull/372#issuecomment-2262840304
# Determine whether a git repo is available. Travis & Appveyor do not have this.
repo, path = Revise.git_repo(Revise.juliadir)
if repo != nothing && isfile(joinpath(path, "VERSION")) && isdir(joinpath(path, "base"))
# Tracking Core.Compiler
Revise.track(Core.Compiler)
id = Base.PkgId(Core.Compiler)
pkgdata = Revise.pkgdatas[id]
@test any(k->endswith(k, "optimize.jl"), Revise.srcfiles(pkgdata))
m = first(methods(Core.Compiler.typeinf_code))
@test definition(m) isa Expr
else
@test_throws Revise.GitRepoException Revise.track(Core.Compiler)
@warn "skipping Core.Compiler tests due to lack of git repo"
end
end
end
do_test("CodeTracking #48") && @testset "CodeTracking #48" begin
m = @which sum([1]; dims=1)
file, line = whereis(m)
@test endswith(file, "reducedim.jl") && line > 1
end
do_test("Methods at REPL") && @testset "Methods at REPL" begin
if isdefined(Base, :active_repl) && !isnothing(Base.active_repl)
hp = Base.active_repl.interface.modes[1].hist
fstr = "__fREPL__(x::Int16) = 0"
histidx = length(hp.history) + 1 - hp.start_idx
ex = Base.parse_input_line(fstr; filename="REPL[$histidx]")
f = Core.eval(Main, ex)
if ex.head === :toplevel
ex = ex.args[end]
end
push!(hp.history, fstr)
m = first(methods(f))
@test !isempty(signatures_at(String(m.file), m.line))
@test isequal(Revise.RelocatableExpr(definition(m)), Revise.RelocatableExpr(ex))
@test definition(String, m)[1] == fstr
# Test that revisions work (https://github.com/timholy/CodeTracking.jl/issues/38)
fstr = "__fREPL__(x::Int16) = 1"
histidx = length(hp.history) + 1 - hp.start_idx
ex = Base.parse_input_line(fstr; filename="REPL[$histidx]")
f = Core.eval(Main, ex)
if ex.head === :toplevel
ex = ex.args[end]
end
push!(hp.history, fstr)
m = first(methods(f))
@test isequal(Revise.RelocatableExpr(definition(m)), Revise.RelocatableExpr(ex))
@test definition(String, m)[1] == fstr
@test !isempty(signatures_at(String(m.file), m.line))
pop!(hp.history)
pop!(hp.history)
else
@warn "REPL tests skipped"
end
end
do_test("baremodule") && @testset "baremodule" begin
testdir = newtestdir()
dn = joinpath(testdir, "Baremodule", "src")
mkpath(dn)
write(joinpath(dn, "Baremodule.jl"), """
baremodule Baremodule
f() = 1
end
""")
sleep(mtimedelay)
@eval using Baremodule
sleep(mtimedelay)
@test Baremodule.f() == 1
write(joinpath(dn, "Baremodule.jl"), """
module Baremodule
f() = 2
end
""")
yry()
@test Baremodule.f() == 2
rm_precompile("Baremodule")
pop!(LOAD_PATH)
end
do_test("module style 2-argument includes (issue #670)") && @testset "module style 2-argument includes (issue #670)" begin
testdir = newtestdir()
dn = joinpath(testdir, "B670", "src")
mkpath(dn)
write(joinpath(dn, "A670.jl"), """
x = 6
y = 7
""")
sleep(mtimedelay)
write(joinpath(dn, "B670.jl"), """
module B670
x = 5
end
""")
sleep(mtimedelay)
write(joinpath(dn, "C670.jl"), """
using B670
Base.include(B670, "A670.jl")
""")
sleep(mtimedelay)
@eval using B670
path = joinpath(dn, "C670.jl")
@eval include($path)
@test B670.x == 6
@test B670.y == 7
rm_precompile("B670")
end
end
do_test("Utilities") && @testset "Utilities" begin
# Used by Rebugger but still lives here
io = IOBuffer()
Revise.println_maxsize(io, "a"^100; maxchars=50)
str = String(take!(io))
@test startswith(str, "a"^25)
@test endswith(chomp(chomp(str)), "a"^24)
@test occursin("…", str)
end
do_test("Switching free/dev") && @testset "Switching free/dev" begin
function make_a2d(path, val, mode="r"; generate=true)
# Create a new "read-only package" (which mimics how Pkg works when you `add` a package)
cd(path) do
pkgpath = normpath(joinpath(path, "A2D"))
srcpath = joinpath(pkgpath, "src")
if generate
Pkg.generate("A2D")
else
mkpath(srcpath)
end
filepath = joinpath(srcpath, "A2D.jl")
write(filepath, """
module A2D
f() = $val
end
""")
chmod(filepath, mode=="r" ? 0o100444 : 0o100644)
return pkgpath
end
end
# Create a new package depot
depot = mktempdir()
old_depots = copy(DEPOT_PATH)
empty!(DEPOT_PATH)
push!(DEPOT_PATH, depot)
# Skip cloning the General registry since that is slow and unnecessary
ENV["JULIA_PKG_SERVER"] = ""
registries = isdefined(Pkg.Types, :DEFAULT_REGISTRIES) ? Pkg.Types.DEFAULT_REGISTRIES : Pkg.Registry.DEFAULT_REGISTRIES
old_registries = copy(registries)
empty!(registries)
# Ensure we start fresh with no dependencies
old_project = Base.ACTIVE_PROJECT[]
Base.ACTIVE_PROJECT[] = joinpath(depot, "environments", "v$(VERSION.major).$(VERSION.minor)", "Project.toml")
mkpath(dirname(Base.ACTIVE_PROJECT[]))
write(Base.ACTIVE_PROJECT[], "[deps]")
ropkgpath = make_a2d(depot, 1)
Pkg.develop(PackageSpec(path=ropkgpath))
sleep(mtimedelay)
@eval using A2D
sleep(mtimedelay)
@test Base.invokelatest(A2D.f) == 1
for dir in keys(Revise.watched_files)
@test !startswith(dir, ropkgpath)
end
devpath = joinpath(depot, "dev")
mkpath(devpath)
mfile = Revise.manifest_file()
schedule(Task(Revise.TaskThunk(Revise.watch_manifest, (mfile,))))
sleep(mtimedelay)
pkgdevpath = make_a2d(devpath, 2, "w"; generate=false)
cp(joinpath(ropkgpath, "Project.toml"), joinpath(devpath, "A2D/Project.toml"))
Pkg.develop(PackageSpec(path=pkgdevpath))
yry()
@test Base.invokelatest(A2D.f) == 2
Pkg.develop(PackageSpec(path=ropkgpath))
yry()
@test Base.invokelatest(A2D.f) == 1
for dir in keys(Revise.watched_files)
@test !startswith(dir, ropkgpath)
end
# Restore internal Pkg data
empty!(DEPOT_PATH)
append!(DEPOT_PATH, old_depots)
for pr in old_registries
push!(registries, pr)
end
Base.ACTIVE_PROJECT[] = old_project
push!(to_remove, depot)
end
do_test("Switching environments") && @testset "Switching environments" begin
old_project = Base.active_project()
function generate_package(path, val)
cd(path) do
pkgpath = normpath(joinpath(path, "TestPackage"))
srcpath = joinpath(pkgpath, "src")
if !isdir(srcpath)
Pkg.generate("TestPackage")
end
filepath = joinpath(srcpath, "TestPackage.jl")
write(filepath, """
module TestPackage
f() = $val
end
""")
return pkgpath
end
end
try
Pkg.activate(; temp=true)
# generate a package
root = mktempdir()
pkg = generate_package(root, 1)
LibGit2.with(LibGit2.init(pkg)) do repo
LibGit2.add!(repo, "Project.toml")
LibGit2.add!(repo, "src/TestPackage.jl")
test_sig = LibGit2.Signature("TEST", "[email protected]", round(time(); digits=0), 0)
LibGit2.commit(repo, "version 1"; author=test_sig, committer=test_sig)
end
# install the package
Pkg.add(url=pkg)
sleep(mtimedelay)
@eval using TestPackage
sleep(mtimedelay)
@test Base.invokelatest(TestPackage.f) == 1
# update the package
generate_package(root, 2)
LibGit2.with(LibGit2.GitRepo(pkg)) do repo
LibGit2.add!(repo, "src/TestPackage.jl")
test_sig = LibGit2.Signature("TEST", "[email protected]", round(time(); digits=0), 0)
LibGit2.commit(repo, "version 2"; author=test_sig, committer=test_sig)
end
# install the update
Pkg.add(url=pkg)
sleep(mtimedelay)
revise()
@test Base.invokelatest(TestPackage.f) == 2
finally
Pkg.activate(old_project)
end
end
# in v1.8 and higher, a package can't be loaded at all when its precompilation failed
@static if Base.VERSION < v"1.8.0-DEV.1451"
do_test("Broken dependencies (issue #371)") && @testset "Broken dependencies (issue #371)" begin
testdir = newtestdir()
srcdir = joinpath(testdir, "DepPkg371", "src")
filepath = joinpath(srcdir, "DepPkg371.jl")
cd(testdir) do
Pkg.generate("DepPkg371")
write(filepath, """
module DepPkg371
using OrderedCollections # undeclared dependency
greet() = "Hello world!"
end
""")
end
sleep(mtimedelay)
@info "A warning about not having OrderedCollection in dependencies is expected"
@eval using DepPkg371
@test DepPkg371.greet() == "Hello world!"
sleep(mtimedelay)
write(filepath, """
module DepPkg371
using OrderedCollections # undeclared dependency
greet() = "Hello again!"
end
""")
yry()
@test DepPkg371.greet() == "Hello again!"
rm_precompile("DepPkg371")
pop!(LOAD_PATH)
end
end # @static if VERSION ≤ v"1.7"
do_test("Non-jl include_dependency (issue #388)") && @testset "Non-jl include_dependency (issue #388)" begin
push!(LOAD_PATH, joinpath(@__DIR__, "pkgs"))
@eval using ExcludeFile
sleep(0.01)
pkgdata = Revise.pkgdatas[Base.PkgId(UUID("b915cca1-7962-4ffb-a1c7-2bbdb2d9c14c"), "ExcludeFile")]
files = Revise.srcfiles(pkgdata)
@test length(files) == 2
@test joinpath("src", "ExcludeFile.jl") ∈ files
@test joinpath("src", "f.jl") ∈ files
@test joinpath("deps", "dependency.txt") ∉ files
end
do_test("New files & Requires.jl") && @testset "New files & Requires.jl" begin
# Issue #107
testdir = newtestdir()
dn = joinpath(testdir, "NewFile", "src")
mkpath(dn)
write(joinpath(dn, "NewFile.jl"), """
module NewFile
f() = 1
module SubModule
struct NewType end
end
end
""")
sleep(mtimedelay)
@eval using NewFile
@test NewFile.f() == 1
@test_throws UndefVarError NewFile.g()
sleep(mtimedelay)
write(joinpath(dn, "g.jl"), "g() = 2")
write(joinpath(dn, "NewFile.jl"), """
module NewFile
include("g.jl")
f() = 1
module SubModule
struct NewType end
end
end
""")
yry()
@test NewFile.f() == 1
@test NewFile.g() == 2
sd = joinpath(dn, "subdir")
mkpath(sd)
write(joinpath(sd, "h.jl"), "h(::NewType) = 3")
write(joinpath(dn, "NewFile.jl"), """
module NewFile
include("g.jl")
f() = 1
module SubModule
struct NewType end
include("subdir/h.jl")
end
end
""")
yry()
@test NewFile.f() == 1
@test NewFile.g() == 2
@test NewFile.SubModule.h(NewFile.SubModule.NewType()) == 3
dn = joinpath(testdir, "DeletedFile", "src")
mkpath(dn)
write(joinpath(dn, "DeletedFile.jl"), """
module DeletedFile
include("g.jl")
f() = 1
end
""")
write(joinpath(dn, "g.jl"), "g() = 1")
sleep(mtimedelay)
@eval using DeletedFile
@test DeletedFile.f() == DeletedFile.g() == 1
sleep(mtimedelay)
write(joinpath(dn, "DeletedFile.jl"), """
module DeletedFile
f() = 1
end
""")
rm(joinpath(dn, "g.jl"))
yry()
@test DeletedFile.f() == 1
@test_throws MethodError DeletedFile.g()
rm_precompile("NewFile")
rm_precompile("DeletedFile")
# https://discourse.julialang.org/t/revise-with-requires/19347
dn = joinpath(testdir, "TrackRequires", "src")
mkpath(dn)
write(joinpath(dn, "TrackRequires.jl"), """
module TrackRequires
using Requires
const called_onearg = Ref(false)
onearg(x) = called_onearg[] = true
module SubModule
abstract type SuperType end
end
function __init__()
@require EndpointRanges="340492b5-2a47-5f55-813d-aca7ddf97656" begin
export testfunc
include("testfile.jl")
end
@require CatIndices="aafaddc9-749c-510e-ac4f-586e18779b91" onearg(1)
@require IndirectArrays="9b13fd28-a010-5f03-acff-a1bbcff69959" @eval SubModule include("st.jl")
@require RoundingIntegers="d5f540fe-1c90-5db3-b776-2e2f362d9394" begin
fn = joinpath(@__DIR__, "subdir", "anotherfile.jl")
include(fn)
@require Revise="295af30f-e4ad-537b-8983-00126c2a3abe" Revise.track(TrackRequires, fn)
end
@require UnsafeArrays="c4a57d5a-5b31-53a6-b365-19f8c011fbd6" begin
fn = joinpath(@__DIR__, "subdir", "yetanotherfile.jl")
include(fn)
end
end
end # module
""")
write(joinpath(dn, "testfile.jl"), "testfunc() = 1")
write(joinpath(dn, "st.jl"), """
struct NewType <: SuperType end
h(::NewType) = 3
""")
sd = mkpath(joinpath(dn, "subdir"))
write(joinpath(sd, "anotherfile.jl"), "ftrack() = 1")
write(joinpath(sd, "yetanotherfile.jl"), "fauto() = 1")
sleep(mtimedelay)
@eval using TrackRequires
notified = isdefined(TrackRequires.Requires, :withnotifications)
notified || @warn "Requires does not support notifications"
@test_throws UndefVarError TrackRequires.testfunc()
@test_throws UndefVarError TrackRequires.SubModule.h(TrackRequires.SubModule.NewType())
@eval using EndpointRanges # to trigger Requires
sleep(mtimedelay)
notified && @test TrackRequires.testfunc() == 1
write(joinpath(dn, "testfile.jl"), "testfunc() = 2")
yry()
notified && @test TrackRequires.testfunc() == 2
@test_throws UndefVarError TrackRequires.SubModule.h(TrackRequires.SubModule.NewType())
# Issue #477
@eval using IndirectArrays
sleep(mtimedelay)
notified && @test TrackRequires.SubModule.h(TrackRequires.SubModule.NewType()) == 3
# Check a non-block expression
warnfile = randtmp()
open(warnfile, "w") do io
redirect_stderr(io) do
@eval using CatIndices
sleep(0.5)
end
end
notified && @test TrackRequires.called_onearg[]
@test isempty(read(warnfile, String))
# Issue #431
@test_throws UndefVarError TrackRequires.ftrack()
if !(get(ENV, "CI", nothing) == "true" && Base.VERSION.major == 1 && Base.VERSION.minor == 8) # circumvent CI hang
@eval using RoundingIntegers
sleep(2) # allow time for the @async in all @require blocks to finish
if notified
@test TrackRequires.ftrack() == 1
id = Base.PkgId(TrackRequires)
pkgdata = Revise.pkgdatas[id]
sf = Revise.srcfiles(pkgdata)
@test count(name->occursin("@require", name), sf) == 1
@test count(name->occursin("anotherfile", name), sf) == 1
@test !any(isequal("."), sf)
idx = findfirst(name->occursin("anotherfile", name), sf)
@test !isabspath(sf[idx])
end
end
@test_throws UndefVarError TrackRequires.fauto()
@eval using UnsafeArrays
sleep(2) # allow time for the @async in all @require blocks to finish
if notified
@test TrackRequires.fauto() == 1
id = Base.PkgId(TrackRequires)
pkgdata = Revise.pkgdatas[id]
sf = Revise.srcfiles(pkgdata)
@test count(name->occursin("@require", name), sf) == 1
@test count(name->occursin("yetanotherfile", name), sf) == 1
@test !any(isequal("."), sf)
idx = findfirst(name->occursin("yetanotherfile", name), sf)
@test !isabspath(sf[idx])
end
# Ensure it also works if the Requires dependency is pre-loaded
dn = joinpath(testdir, "TrackRequires2", "src")
mkpath(dn)
write(joinpath(dn, "TrackRequires2.jl"), """
module TrackRequires2
using Requires
function __init__()
@require EndpointRanges="340492b5-2a47-5f55-813d-aca7ddf97656" begin
export testfunc
include("testfile.jl")
end
@require MappedArrays="dbb5928d-eab1-5f90-85c2-b9b0edb7c900" begin
export othertestfunc
include("testfile2.jl")
end
end
end # module
""")
write(joinpath(dn, "testfile.jl"), "testfunc() = 1")
write(joinpath(dn, "testfile2.jl"), "othertestfunc() = -1")
sleep(mtimedelay)
@eval using TrackRequires2
sleep(mtimedelay)
notified && @test TrackRequires2.testfunc() == 1
@test_throws UndefVarError TrackRequires2.othertestfunc()
write(joinpath(dn, "testfile.jl"), "testfunc() = 2")
yry()
notified && @test TrackRequires2.testfunc() == 2
@test_throws UndefVarError TrackRequires2.othertestfunc()
@eval using MappedArrays
@test TrackRequires2.othertestfunc() == -1
sleep(mtimedelay)
write(joinpath(dn, "testfile2.jl"), "othertestfunc() = -2")
yry()
notified && @test TrackRequires2.othertestfunc() == -2
# Issue #442
push!(LOAD_PATH, joinpath(@__DIR__, "pkgs"))
@eval using Pkg442
sleep(0.01)
@test check442()
@test Pkg442.check442A()
@test Pkg442.check442B()
@test Pkg442.Dep442B.has442A()
pop!(LOAD_PATH)
rm_precompile("TrackRequires")
rm_precompile("TrackRequires2")
pop!(LOAD_PATH)
end
do_test("entr") && @testset "entr" begin
srcfile1 = joinpath(tempdir(), randtmp()*".jl"); push!(to_remove, srcfile1)
srcfile2 = joinpath(tempdir(), randtmp()*".jl"); push!(to_remove, srcfile2)
revise(throw=true) # force compilation
write(srcfile1, "Core.eval(Main, :(__entr__ = 1))")
touch(srcfile2)
Core.eval(Main, :(__entr__ = 0))
sleep(mtimedelay)
try
@sync begin
@test Main.__entr__ == 0
@async begin
entr([srcfile1, srcfile2]; pause=0.5) do
include(srcfile1)
end
end
sleep(1)
@test Main.__entr__ == 1 # callback should have been run (postpone=false)
# File modification
write(srcfile1, "Core.eval(Main, :(__entr__ = 2))")
sleep(1)
@test Main.__entr__ == 2 # callback should have been called
# Two events in quick succession (w.r.t. the `pause` argument)
write(srcfile1, "Core.eval(Main, :(__entr__ += 1))")
sleep(0.1)
touch(srcfile2)
sleep(1)
@test Main.__entr__ == 3 # callback should have been called only once
write(srcfile1, "error(\"stop\")")
sleep(mtimedelay)
end
@test false
catch err
while err isa CompositeException
err = err.exceptions[1]
if err isa TaskFailedException
err = err.task.exception
end
if err isa CapturedException
err = err.ex
end
end
@test isa(err, LoadError)
@test err.error.msg == "stop"
end
# Callback should have been removed
@test isempty(Revise.user_callbacks_by_file[srcfile1])
# Watch directories (#470)
try
@sync let
srcdir = joinpath(tempdir(), randtmp())
mkdir(srcdir)
trigger = joinpath(srcdir, "trigger.txt")
counter = Ref(0)
stop = Ref(false)
@async begin
entr([srcdir]; pause=0.5) do
counter[] += 1
stop[] && error("stop watching directory")
end
end
sleep(1)
@test length(readdir(srcdir)) == 0 # directory should still be empty
@test counter[] == 1 # postpone=false
# File creation
touch(trigger)
sleep(1)
@test counter[] == 2
# File modification
touch(trigger)
sleep(1)
@test counter[] == 3
# File deletion -> the directory should be empty again
rm(trigger)
sleep(1)
@test length(readdir(srcdir)) == 0
@test counter[] == 4
# Two events in quick succession (w.r.t. the `pause` argument)
touch(trigger) # creation
sleep(0.1)
touch(trigger) # modification
sleep(1)
@test counter[] == 5 # Callback should have been called only once
# Stop
stop[] = true
touch(trigger)
end
# `entr` should have errored by now
@test false
catch err
while err isa CompositeException
err = err.exceptions[1]
if err isa TaskFailedException
err = err.task.exception
end
if err isa CapturedException
err = err.ex
end
end
@test isa(err, ErrorException)
@test err.msg == "stop watching directory"
end
end
const A354_result = Ref(0)
# issue #354
do_test("entr with modules") && @testset "entr with modules" begin
testdir = newtestdir()
modname = "A354"
srcfile = joinpath(testdir, modname * ".jl")
setvalue(x) = write(srcfile, "module $modname test() = $x end")
setvalue(1)
# these sleeps may not be needed...
sleep(mtimedelay)
@eval using A354
sleep(mtimedelay)
A354_result[] = 0
@async begin
sleep(mtimedelay)
setvalue(2)
# belt and suspenders -- make sure we trigger entr:
sleep(mtimedelay)
touch(srcfile)
sleep(mtimedelay)
end
try
entr([], [A354], postpone=true) do
A354_result[] = A354.test()
error()
end
catch err
end
@test A354_result[] == 2
rm_precompile(modname)
end
# issue #469
do_test("entr with all files") && @testset "entr with all files" begin
testdir = newtestdir()
modname = "A469"
srcfile = joinpath(testdir, modname * ".jl")
write(srcfile, "module $modname test() = 469 end")
sleep(mtimedelay)
@eval using A469
sleep(mtimedelay)
result = Ref(0)
try
@sync begin
@async begin
# Watch all files known to Revise
# (including `srcfile`)
entr([]; all=true, postpone=true) do
result[] = 1
error("stop")
end
end
sleep(mtimedelay)
@test result[] == 0
# Trigger the callback
touch(srcfile)
end
@test false
catch err
while err isa CompositeException
err = err.exceptions[1]
if err isa TaskFailedException
err = err.task.exception
end
if err isa CapturedException
err = err.ex
end
end
@test isa(err, ErrorException)
@test err.msg == "stop"
end
# If we got to this point, the callback should have been triggered. But
# let's check nonetheless
@test result[] == 1
rm_precompile(modname)
end
do_test("callbacks") && @testset "callbacks" begin
append(path, x...) = open(path, append=true) do io
write(io, x...)
end
mktemp() do path, _
contents = Ref("")
key = Revise.add_callback([path]) do
contents[] = read(path, String)
end
sleep(mtimedelay)
append(path, "abc")
sleep(mtimedelay)
revise()
@test contents[] == "abc"
sleep(mtimedelay)
append(path, "def")
sleep(mtimedelay)
revise()
@test contents[] == "abcdef"
Revise.remove_callback(key)
sleep(mtimedelay)
append(path, "ghi")
sleep(mtimedelay)
revise()
@test contents[] == "abcdef"
end
testdir = newtestdir()
modname = "A355"
srcfile = joinpath(testdir, modname * ".jl")
setvalue(x) = write(srcfile, "module $modname test() = $x end")
setvalue(1)
sleep(mtimedelay)
@eval using A355
sleep(mtimedelay)
A355_result = Ref(0)
Revise.add_callback([], [A355]) do
A355_result[] = A355.test()
end
sleep(mtimedelay)
setvalue(2)
# belt and suspenders -- make sure we trigger entr:
sleep(mtimedelay)
touch(srcfile)
yry()
@test A355_result[] == 2
rm_precompile(modname)
# Issue 574 - ad-hoc revision of a file, combined with add_callback()
A574_path = joinpath(testdir, "A574.jl")
set_foo_A574(x) = write(A574_path, "foo_574() = $x")
set_foo_A574(1)
includet(@__MODULE__, A574_path)
@test Base.invokelatest(foo_574) == 1
foo_A574_result = Ref(0)
key = Revise.add_callback([A574_path]) do
foo_A574_result[] = foo_574()
end
sleep(mtimedelay)
set_foo_A574(2)
sleep(mtimedelay)
revise()
@test Base.invokelatest(foo_574) == 2
@test foo_A574_result[] == 2
Revise.remove_callback(key)
sleep(mtimedelay)
set_foo_A574(3)
sleep(mtimedelay)
revise()
@test Base.invokelatest(foo_574) == 3
@test foo_A574_result[] == 2 # <- callback removed - no longer updated
end
do_test("includet with mod arg (issue #689)") && @testset "includet with mod arg (issue #689)" begin
testdir = newtestdir()
common = joinpath(testdir, "common.jl")
write(common, """
module Common
const foo = 2
end
""")
routines = joinpath(testdir, "routines.jl")
write(routines, """
module Routines
using Revise
includet(@__MODULE__, raw"$common")
using .Common
end
""")
codes = joinpath(testdir, "codes.jl")
write(codes, """
module Codes
using Revise
includet(@__MODULE__, raw"$common")
using .Common
end
""")
driver = joinpath(testdir, "driver.jl")
write(driver, """
module Driver
using Revise
includet(@__MODULE__, raw"$routines")
using .Routines
includet(@__MODULE__, raw"$codes")
using .Codes
end
""")
includet(@__MODULE__, driver)
@test parentmodule(Driver.Routines.Common) == Driver.Routines
@test Base.moduleroot(Driver.Routines.Common) == Main
@test parentmodule(Driver.Codes.Common) == Driver.Codes
@test Base.moduleroot(Driver.Codes.Common) == Main
@test Driver.Routines.Common.foo == 2
@test Driver.Codes.Common.foo == 2
end
do_test("misc - coverage") && @testset "misc - coverage" begin
@test Revise.ReviseEvalException("undef", UndefVarError(:foo)).loc isa String
@test !Revise.throwto_repl(UndefVarError(:foo))
@test endswith(Revise.fallback_juliadir(), "julia")
@test isnothing(Revise.revise(REPL.REPLBackend()))
end
do_test("deprecated") && @testset "deprecated" begin
@test_logs (:warn, r"`steal_repl_backend` has been removed.*") Revise.async_steal_repl_backend()
@test_logs (:warn, r"`steal_repl_backend` has been removed.*") Revise.wait_steal_repl_backend()
end
println("beginning cleanup")
GC.gc(); GC.gc()
@testset "Cleanup" begin
logs, _ = Test.collect_test_logs() do
warnfile = randtmp()
open(warnfile, "w") do io
redirect_stderr(io) do
for name in to_remove
try
rm(name; force=true, recursive=true)
deleteat!(LOAD_PATH, findall(LOAD_PATH .== name))
catch
end
end
for i = 1:3
yry()
GC.gc()
end
end
end
msg = Revise.watching_files[] ? "is not an existing file" : "is not an existing directory"
isempty(ARGS) && !Sys.isapple() && @test occursin(msg, read(warnfile, String))
rm(warnfile)
end
end
GC.gc(); GC.gc(); GC.gc() # work-around for https://github.com/JuliaLang/julia/issues/28306
# see #532 Fix InitError opening none existent Project.toml
function load_in_empty_project_test()
# This will try to load Revise in a julia seccion
# with an empty environment (missing Project.toml)
julia = Base.julia_cmd()
revise_proj = escape_string(Base.active_project())
@assert isfile(revise_proj)
src = """
import Pkg
Pkg.activate("fake_env")
@assert !isfile(Base.active_project())
# force to load the package env Revise version
empty!(LOAD_PATH)
push!(LOAD_PATH, "$revise_proj")
@info "A warning about no Manifest.toml file found is expected"
try; using Revise
catch err
# just fail for this error (see #532)
err isa InitError && rethrow(err)
end
"""
cmd = `$julia --project=@. -E $src`
@test begin
wait(run(cmd))
true
end
end
do_test("Import in empty environment (issue #532)") && @testset "Import in empty environment (issue #532)" begin
load_in_empty_project_test();
end
include("backedges.jl")
include("non_jl_test.jl")
do_test("Base signatures") && @testset "Base signatures" begin
println("beginning signatures tests")
# Using the extensive repository of code in Base as a testbed
include("sigtest.jl")
end
# Run this test in a separate julia process, since it messes with projects, and we don't want to have to
# worry about making sure it resets cleanly.
do_test("Switch Versions") && @test success(pipeline(`$(Base.julia_cmd()) switch_version.jl`, stderr=stderr))
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 4334 | using Revise, Test
using Revise.CodeTracking
using Revise.LoweredCodeUtils
function isdefinedmod(mod::Module)
# Not all modules---e.g., LibGit2---are reachable without loading the stdlib
names = fullname(mod)
pmod = Main
for n in names
isdefined(pmod, n) || return false
pmod = getfield(pmod, n)
end
return true
end
function reljpath(path)
for subdir in ("base/", "stdlib/", "test/")
s = split(path, subdir)
if length(s) == 2
return subdir * s[end]
end
end
return path
end
function filepredicate(file, reffiles)
bfile = Base.find_source_file(file)
bfile === nothing && return false # when the file is "none"
return reljpath(bfile) ∈ reffiles
end
function signature_diffs(mod::Module, signatures; filepredicate=nothing)
extras = copy(signatures)
modeval, modinclude = getfield(mod, :eval), getfield(mod, :include)
failed = []
nmethods = 0
for fsym in names(mod; all=true)
isdefined(mod, fsym) || continue
f = getfield(mod, fsym)
isa(f, Base.Callable) || continue
(f === modeval || f === modinclude) && continue
for m in methods(f)
nmethods += 1
if haskey(signatures, m.sig)
delete!(extras, m.sig)
else
if filepredicate !== nothing
filepredicate(String(m.file)) || continue # find signatures only in selected files
end
push!(failed, m.sig)
end
end
end
return failed, extras, nmethods
end
function extracttype(T)
p1 = T.parameters[1]
isa(p1, Type) && return p1
isa(p1, TypeVar) && return p1.ub
error("unrecognized type ", T)
end
if isdefined(Core, :TypeofVararg)
istva(T) = isa(T, Core.TypeofVararg)
else
istva(T) = false
end
function in_module_or_core(T, mod::Module)
if isa(T, TypeVar)
return in_module_or_core(T.ub, mod)
end
if isa(T, UnionAll)
T = Base.unwrap_unionall(T)
end
T === Union{} && return true
if isa(T, Union)
in_module_or_core(T.a, mod) || return false
return in_module_or_core(T.b, mod)
end
if istva(T)
isdefined(T, :T) || return true
return in_module_or_core(T.T, mod)
end
Tname = T.name
if Tname.name === :Type
return in_module_or_core(extracttype(T), mod)
end
Tmod = Tname.module
return Tmod === mod || Tmod === Core
end
module Lowering end
@testset ":lambda expressions" begin
ex = quote
mutable struct InnerC
x::Int
valid::Bool
function InnerC(x; notvalid::Bool=false)
return new(x, !notvalid)
end
end
end
sigs, _ = Revise.eval_with_signatures(Lowering, ex)
@test length(sigs) >= 2
end
basefiles = Set{String}()
@time for (i, (mod, file)) in enumerate(Base._included_files)
(endswith(file, "sysimg.jl") || endswith(file, "Base.jl")) && continue
Base.VERSION < v"1.7" && Sys.iswindows() && endswith(file, "RNGs.jl") && continue # invalid redefinition of constant RandomDevice
file = Revise.fixpath(file)
push!(basefiles, reljpath(file))
mexs = Revise.parse_source(file, mod)
Revise.instantiate_sigs!(mexs; always_rethrow=true)
end
failed, extras, nmethods = signature_diffs(Base, CodeTracking.method_info; filepredicate = fn->filepredicate(fn, basefiles))
# In some cases, the above doesn't really select the file-of-origin. For example, anything
# defined with an @enum gets attributed to Enum.jl rather than the file in which @enum is used.
realfailed = similar(failed, 0)
for sig in failed
ft = Base.unwrap_unionall(sig).parameters[1]
match(r"^getfield\(Base, Symbol\(\"##\d", string(ft)) === nothing || continue # exclude anonymous functions
all(T->in_module_or_core(T, Base), Base.unwrap_unionall(sig).parameters[2:end]) || continue
push!(realfailed, sig)
end
if false # change to true to see the failures
world = Base.get_world_counter()
for tt in realfailed
println(tt)
mms = Base._methods_by_ftype(tt, -1, world)
for mm in mms
println(mm.method)
end
end
end
@test length(realfailed) < 60 # big enough for some cushion in case new "difficult" methods get added
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 649 | # For this test, Julia should be started without Revise and then it should be added to the running session
# Catches #664
using Test
t = @async(
VERSION >= v"1.12.0-DEV.612" ? Base.run_main_repl(true, true, :no, true) :
VERSION >= v"1.11.0-DEV.222" ? Base.run_main_repl(true, true, :no, true, false) :
Base.run_main_repl(true, true, false, true, false))
isdefined(Base, :errormonitor) && Base.errormonitor(t)
while !isdefined(Base, :active_repl_backend) || isnothing(Base.active_repl_backend)
sleep(0.5)
end
using Revise
@test Revise.revise_first ∈ Base.active_repl_backend.ast_transforms
exit()
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 1415 | using Revise, Pkg, Test
mktempdir() do thisdir
Pkg.activate(thisdir)
Pkg.develop(path = joinpath(dirname(@__FILE__), "pkgs", "PkgChange_v1"))
# This is only needed on Pkg versions that don't notify
Revise.active_project_watcher()
# Back to toplevel
@eval begin
using PkgChange
@test_throws UndefVarError somemethod() # not present in v1
# From a different process, switch the active version of ExponentialUtilities
v2_cmd = """using Pkg; Pkg.activate("."); Pkg.develop(path = joinpath("$(escape_string(dirname(@__FILE__)))", "pkgs", "PkgChange_v2"))"""
t = @async run(pipeline(Cmd(`$(Base.julia_cmd()) -e $v2_cmd`; dir=$thisdir); stderr, stdout))
isdefined(Base, :errormonitor) && Base.errormonitor(t)
wait(Revise.revision_event)
revise()
@test somemethod() === 1 # present in v2
# ...and then switch back (check that it's bidirectional and also to reset state)
v1_cmd = """using Pkg; Pkg.activate("."); Pkg.develop(path = joinpath("$(escape_string(dirname(@__FILE__)))", "pkgs", "PkgChange_v1"))"""
t = @async run(pipeline(Cmd(`$(Base.julia_cmd()) -e $v1_cmd`; dir=$thisdir); stderr, stdout))
isdefined(Base, :errormonitor) && Base.errormonitor(t)
wait(Revise.revision_event)
revise()
@test_throws MethodError somemethod() # not present in v1
end
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 67 | module Dep442A
export check442A
check442A() = true
end # module
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 281 | module Dep442B
using Requires
export check442B
check442B() = true
function link_442A()
@debug "Loading 442A support into 442B"
include("support_442A.jl")
end
function __init__()
@require Dep442A="76238f47-ed95-4e4a-a4d9-95a3fb1630ea" link_442A()
end
end # module
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 124 | module ExcludeFile
include_dependency(joinpath(dirname(@__DIR__), "deps", "dependency.txt"))
include("f.jl")
end # module
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 93 | module Pkg442
using Dep442A
using Dep442B
export check442
check442() = true
end # module
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 121 | module PkgChange
# Deliberately empty. This test also tests revising from an empty file to a file
# with contents.
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | code | 58 | module PkgChange
export somemethod
somemethod() = 1
end
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 5590 | # News
This file describes only major changes, and does not include bug fixes,
cleanups, or minor enhancements.
## Revise 3.3
* Upgrade to JuliaInterpreter 0.9 and drop support for Julia prior to 1.6 (the new LTS).
## Revise 3.2
* Switch to synchronous processing of new packages and `@require` blocks.
This is motivated by changes in Julia designed to make code-loading threadsafe.
There are small (100-200ms) increases in latency on first use, but more guarantees that
Revise's workqueue will finish before new operations commence.
## Revise 3.0
* Latencies at startup and upon first subsequent package load are greatly reduced.
* Support for selective evaluation: by default, `includet` will use a mode in which only
method definitions, not "data," are revised. By default, packages still
re-evaluate every changed expression, but packages can opt out of this behavior
by defining `__revise_mode__ = :evalmeth`. See the documentation for details.
This change should make `includet` more resistant to long latencies and other bad behavior.
* Evaluations now happen in order of dependency: if PkgA depends on PkgB,
PkgB's evaluations will occur before PkgA's. Likewise, if a package loads `"file1.jl"` before
`"file2.jl"`, `"file1.jl`"'s evaluations will be processed first.
* Duplicating a method and then deleting one copy no longer risks deleting the method from your
session--method deletion happens only when the final copy is removed.
* Error handling has been extensively reworked. Messages and stacktraces should be more consistent
with the error reporting of Julia itself. Only the first error in each file is shown.
Users are reminded of outstanding revision errors only by changing the prompt color to yellow.
* By default, Revise no longer tracks its own code or that of its dependencies.
Call `Revise.add_revise_deps()` (before making any changes) if you want Revise to track its
own code.
## Revise 2.7
* Add framework for user callbacks
* Faster startup and revision, depending on Julia version
## Revise 2.6
* Starting with Julia 1.5 it will be possible to run Revise with just `using Revise`
in your `startup.jl` file. Older Julia versions will still need the
backend-stealing code.
## Revise 2.5
* Allow previously reported errors to be re-reported with `Revise.errors()`
## Revise 2.4
* Automatic tracking of methods and included files in `@require` blocks
(needs Requires 1.0.0 or higher)
## Revise 2.3
* When running code (e.g., with `includet`), execute lines that "do work" rather than
"define methods" using the compiler. The greatly improves performance in
work-intensive cases.
* When analyzing code to compute method signatures, omit expressions that don't contribute
to signatures. By skipping initialization code this leads to improved safety and
performance.
* Switch to an O(N) algorithm for renaming frame methods to match their running variants.
* Support addition and deletion of source files.
* Improve handling and printing of errors.
## Revise 2.2
* Revise now warns you when the source files are not synchronized with running code.
(https://github.com/timholy/Revise.jl/issues/317)
## Revise 2.1
New features:
* Add `entr` for re-running code any time a set of dependent files and/or
packages change.
## Revise 2.0
Revise 2.0 is a major rewrite with
[JuliaInterpreter](https://github.com/JuliaDebug/JuliaInterpreter.jl)
at its foundation.
Breaking changes:
* Most of the internal data structures have changed
* The ability to revise code in Core.Compiler has regressed until technical
issues are resolved in JuliaInterpreter.
* In principle, code that cannot be evaluated twice (e.g., library initialization)
could be problematic.
New features:
* Revise now (re)evaluates top-level code to extract method signatures. This allows
Revise to identify methods defined by code, e.g., by an `@eval` block.
Moreover, Revise can identify important changes external to the definition, e.g.,
if
```julia
for T in (Float16, Float32, Float32)
@eval foo(::Type{$T}) = 1
end
```
gets revised to
```julia
for T in (Float32, Float32)
@eval foo(::Type{$T}) = 1
end
```
then Revise correctly deletes the `Float16` method of `foo`. ([#243])
* Revise handles all method deletions before enacting any new definitions.
As a consequence, moving methods from one file to another is more robust.
([#243])
* Revise was split, with a new package
[CodeTracking](https://github.com/timholy/CodeTracking.jl)
designed to be the "query" interface for Revise. ([#245])
* Line numbers in method lists are corrected for moving code (requires Julia 1.2 or higher)
([#278])
## Revise 1.0 (changes compared to the 0.7 branch)
Breaking changes:
* The internal structure has changed from using absolute paths for
individual files to a package-level organization that uses
`Base.PkgId` keys and relative paths ([#217]).
New features:
* Integration with Julia package manager. Revise now follows switches
from `dev`ed packages to `free`d packages, and also follows
version-upgrades of `free`d packages ([#217]).
* Tracking code in Julia's standard libraries even for users who
download Julia binaries. Users of Rebugger will be able to step into
such methods ([#222]).
[#217]: https://github.com/timholy/Revise.jl/pull/217
[#222]: https://github.com/timholy/Revise.jl/pull/222
[#243]: https://github.com/timholy/Revise.jl/pull/243
[#245]: https://github.com/timholy/Revise.jl/pull/245
[#278]: https://github.com/timholy/Revise.jl/pull/278
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 2069 | <div align="center"> <img src="images/revise-wordmark.svg" alt="Revise.jl"></img></div>
[](https://github.com/timholy/Revise.jl/actions/workflows/ci.yml)
[](http://codecov.io/github/timholy/Revise.jl?branch=master)
`Revise.jl` allows you to modify code and use the changes without restarting Julia.
With Revise, you can be in the middle of a session and then update packages, switch git branches,
and/or edit the source code in the editor of your choice; any changes will typically be incorporated
into the very next command you issue from the REPL.
This can save you the overhead of restarting Julia, loading packages, and waiting for code to JIT-compile.
See the [documentation](https://timholy.github.io/Revise.jl/stable):
[](https://timholy.github.io/Revise.jl/stable)
In particular, most users will probably want to alter their `.julia/config/startup.jl` file
to run Revise automatically, as described in the [Configuration section](https://timholy.github.io/Revise.jl/stable/config/#Using-Revise-by-default-1) of the documentation.
## Credits
Revise became possible because of Jameson Nash's fix of [Julia issue 265](https://github.com/JuliaLang/julia/issues/265).
[Julia for VSCode](https://www.julia-vscode.org/) and [Juno](http://junolab.org/) are IDEs that offer an editor-based mechanism for achieving a subset of
Revise's aims.
## Major releases
- Both the current 3.x and 2.x release cycles use JuliaInterpreter to step through your module-defining code.
- The 1.x release cycle does not use JuliaInterpreter, but does integrate with Pkg.jl. Try this if the more recent releases give you trouble. (But please report the problems first!)
- For Julia 0.6 [see this branch](https://github.com/timholy/Revise.jl/tree/v0.6). However, you really shouldn't be using Julia 0.6 anymore!
See the [NEWS](NEWS.md) for additional information.
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 7824 | # Configuration
!!! compat
These instructions are applicable only for Julia 1.5 and higher. If you are running an older version of Julia, upgrading to at least 1.6 is recommended. If you cannot upgrade, see the documentation for Revise 3.2.x or earlier.
## Using Revise by default
If you like Revise, you can ensure that every Julia session uses it by
launching it from your `~/.julia/config/startup.jl` file.
Note that using Revise adds a small latency at Julia startup, generally about 0.7s when you first launch Julia and another 0.25s for your first package load.
Users should weigh this penalty against whatever benefit they may derive from not having to restart their entire session.
This can be as simple as adding
```julia
using Revise
```
as the first line in your `startup.jl`. If you have a Unix terminal available, simply run
```bash
mkdir -p ~/.julia/config/ && echo "using Revise" >> ~/.julia/config/startup.jl
```
If you use different package environments and do not always have Revise available,
```julia
try
using Revise
catch e
@warn "Error initializing Revise" exception=(e, catch_backtrace())
end
```
is recommended instead.
### Using Revise automatically within Jupyter/IJulia
If you want Revise to launch automatically within IJulia, then you should also create a `.julia/config/startup_ijulia.jl` file with the contents
```julia
try
@eval using Revise
catch e
@warn "Error initializing Revise" exception=(e, catch_backtrace())
end
```
or simply run
```bash
mkdir -p ~/.julia/config/ && tee -a ~/.julia/config/startup_ijulia.jl << END
try
@eval using Revise
catch e
@warn "Error initializing Revise" exception=(e, catch_backtrace())
end
END
```
## Configuring the revise mode
By default, in packages all changes are tracked, but with `includet` only method definitions are tracked.
This behavior can be overridden by defining a variable `__revise_mode__` in the module(s) containing
your methods and/or data. `__revise_mode__` must be a `Symbol` taking one of the following values:
- `:eval`: evaluate everything (the default for packages)
- `:evalmeth`: evaluate changes to method definitions (the default for `includet`)
This should work even for quite complicated method definitions, such as those that might
be made within a `for`-loop and `@eval` block.
- `:evalassign`: evaluate method definitions and assignment statements. A top-level expression
`a = Int[]` would be evaluated, but `push!(a, 1)` would not because the latter is not an assignment.
- `:sigs`: do not implement any changes, only scan method definitions for their signatures so that
their location can be updated as changes to the file(s) are made.
If you're using `includet` from the REPL, you can enter `__revise_mode__ = :eval` to set
it throughout `Main`. `__revise_mode__` can be set independently in each module.
## Optional global configuration
Revise can be configured by setting environment variables. These variables have to be
set before you execute `using Revise`, because these environment variables are parsed
only during execution of Revise's `__init__` function.
There are several ways to set these environment variables:
- If you are [Using Revise by default](@ref) then you can include statements like
`ENV["JULIA_REVISE"] = "manual"` in your `.julia/config/startup.jl` file prior to
the line containing `using Revise`.
- On Unix systems, you can set variables in your shell initialization script
(e.g., put lines like `export JULIA_REVISE=manual` in your
[`.bashrc` file](http://www.linuxfromscratch.org/blfs/view/svn/postlfs/profile.html)
if you use `bash`).
- On Unix systems, you can launch Julia from the Unix prompt as `$ JULIA_REVISE=manual julia`
to set options for just that session.
The function of specific environment variables is described below.
### Manual revision: JULIA_REVISE
By default, Revise processes any modified source files every time you enter
a command at the REPL.
However, there might be times where you'd prefer to exert manual control over
the timing of revisions. `Revise` looks for an environment variable
`JULIA_REVISE`, and if it is set to anything other than `"auto"` it
will require that you manually call `revise()` to update code.
### User scripts: JULIA\_REVISE\_INCLUDE
By default, `Revise` only tracks files that have been required as a consequence of
a `using` or `import` statement; files loaded by `include` are not
tracked, unless you explicitly use `includet` or `Revise.track(filename)`. However, you can turn on
automatic tracking by setting the environment variable `JULIA_REVISE_INCLUDE` to the
string `"1"` (e.g., `JULIA_REVISE_INCLUDE=1` in a bash script).
!!! note
Most users should avoid setting `JULIA_REVISE_INCLUDE`.
Try `includet` instead.
## Configurations for fixing errors
### No space left on device
!!! note
This applies only to Linux
Revise needs to be notified by your filesystem about changes to your code,
which means that the files that define your modules need to be watched for updates.
Some systems impose limits on the number of files and directories that can be
watched simultaneously; if such a limit is hit, on Linux this can result in Revise silently ceasing to work
(albeit with unit tests failing) or in a fairly cryptic error like
```sh
ERROR: start_watching (File Monitor): no space left on device (ENOSPC)
```
The cure is to investigate and possibly increase the number of files that can be watched.
Invoking
```sh
$ sysctl fs.inotify
```
at the linux prompt may e.g. result in
```
fs.inotify.max_queued_events = 16384
fs.inotify.max_user_instances = 128
fs.inotify.max_user_watches = 524288
```
For Revise usage, `max_user_watches >= 65536` is recommended, and more can be helpful; the value of 524288 above is common on modern systems. One can set higher values as needed, e.g.,
```
$ sudo sysctl fs.inotify.max_user_instances=2048
```
After changing these values, it is advised to run Revise's unit tests to see if they pass.
This change can be made [permanent](https://www.suse.com/de-de/support/kb/doc/?id=000020048).
For more information see issues [#26](https://github.com/timholy/Revise.jl/issues/26)
and [#778](https://github.com/timholy/Revise.jl/issues/778).
### Polling and NFS-mounted code directories: JULIA\_REVISE\_POLL
!!! note
This applies only to Unix systems with code on network-mounted drives
`Revise` works by monitoring your filesystem for changes to the files that define your code.
On most operating systems, Revise can work "passively" and wait to be signaled
that one or more watched directories has changed.
Unfortunately, a few file systems (notably, the Unix-based Network File System NFS) don't support this approach. In such cases, Revise needs to "actively" check each file periodically to see whether it has changed since the last check. This active process is called [polling](https://en.wikipedia.org/wiki/Polling_(computer_science)).
You turn on polling by setting the environment variable `JULIA_REVISE_POLL` to the
string `"1"` (e.g., `JULIA_REVISE_POLL=1` in a bash script).
!!! warning
If you're using polling, you may have to wait several seconds before changes take effect.
Polling is *not* recommended unless you have no other alternative.
!!! note
NFS stands for [Network File System](https://en.wikipedia.org/wiki/Network_File_System) and is typically only used to mount shared network drives on *Unix* file systems.
Despite similarities in the acronym, NTFS, the standard [filesystem on Windows](https://en.wikipedia.org/wiki/NTFS), is completely different from NFS; Revise's default configuration should work fine on Windows without polling.
However, WSL2 users currently need polling due to [this bug](https://github.com/JuliaLang/julia/issues/37029).
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 7152 | # Revise usage: a cookbook
## Package-centric usage
For code that might be useful more than once, it's often a good idea to put it in
a package.
Revise cooperates with the package manager to enforce its distinction between
["versioned" and "under development" packages](https://julialang.github.io/Pkg.jl/v1/managing-packages/);
packages that you want to modify and have tracked by `Revise` should be `dev`ed rather than `add`ed.
!!! note
You should never modify package files in your `.julia/packages` directory,
because this breaks the "contract" that such package files correspond to registered versions of the code.
In recent versions of Julia, the source files in `.julia/packages` are read-only,
and you should leave them this way.
In keeping with this spirit, Revise is designed to avoid tracking changes in such files.
The correct way to make and track modifications is to `dev` the package.
For creating packages, the author recommends [PkgTemplates.jl](https://github.com/invenia/PkgTemplates.jl).
A fallback is to use "plain" `Pkg` commands.
Both options are described below.
### PkgTemplates
!!! note
Because PkgTemplates integrates nicely with [`git`](https://git-scm.com/),
this approach might require you to do some configuration.
(Once you get things set up, you shouldn't have to do this part ever again.)
PkgTemplates needs you to configure your `git` user name and email.
Some instructions on configuration are [here](https://docs.github.com/en/github/getting-started-with-github/set-up-git)
and [here](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup).
It's also helpful to sign up for a [GitHub account](https://github.com/)
and set git's `github.user` variable.
The [PkgTemplates documentation](https://juliaci.github.io/PkgTemplates.jl/stable/)
may also be useful.
If you struggle with this part, consider trying the "plain" `Pkg` variant below.
!!! note
If the current directory in your Julia session is itself a package folder, PkgTemplates
will use it as the parent environment (project) for your new package.
To reduce confusion, before trying the commands below it may help to first ensure you're in a
a "neutral" directory, for example by typing `cd()` at the Julia prompt.
Let's create a new package, `MyPkg`, to play with.
```julia
julia> using PkgTemplates
julia> t = Template()
Template:
→ User: timholy
→ Host: github.com
→ License: MIT (Tim Holy <[email protected]> 2019)
→ Package directory: ~/.julia/dev
→ Minimum Julia version: v1.0
→ SSH remote: No
→ Add packages to main environment: Yes
→ Commit Manifest.toml: No
→ Plugins: None
julia> t("MyPkg")
Generating project MyPkg:
/home/tim/.julia/dev/MyPkg/Project.toml
/home/tim/.julia/dev/MyPkg/src/MyPkg.jl
[lots more output suppressed]
```
In the first few lines you can see the location of your new package, here
the directory `/home/tim/.julia/dev/MyPkg`.
Press `]` to enter the [Pkg REPL](https://pkgdocs.julialang.org/v1/getting-started/#Basic-Usage).
Then add the new package to your current environment with the `dev` command.
```julia
(<environment>) pkg> dev MyPkg # the dev command will look in the ~/.julia/dev folder automatically
```
Press the backspace key to return to the Julia REPL.
Now let's try it out:
```julia
julia> using Revise # you must do this before loading any revisable packages
julia> using MyPkg
[ Info: Precompiling MyPkg [102b5b08-597c-4d40-b98a-e9249f4d01f4]
```
(It's perfectly fine if you see a different string of digits and letters after the "Precompiling MyPkg" message.)
You'll note that Julia found your package without you having to take any extra steps.
*Without* quitting this Julia session, open the `MyPkg.jl` file in an editor.
You might be able to open it with
```julia
julia> edit(pathof(MyPkg))
```
although that might require [configuring your EDITOR environment variable](https://askubuntu.com/questions/432524/how-do-i-find-and-set-my-editor-environment-variable).
You should see something like this:
```julia
module MyPkg
# Write your package code here.
end
```
This is the basic package created by PkgTemplates.
Let's create a simple `greet` function to return a message:
```julia
module MyPkg
greet() = print("Hello World!")
end # module
```
Now go back to that same Julia session, and try calling `greet`.
After a pause (while Revise's internal code compiles), you should see
```julia
julia> MyPkg.greet()
Hello World!
```
From this point forward, revisions should be fast. You can modify `MyPkg.jl`
quite extensively without quitting the Julia session, although there are some [Limitations](@ref).
### Using Pkg
[Pkg](https://julialang.github.io/Pkg.jl/v1/) works similarly to `PkgTemplates`,
but requires less configuration while also doing less on your behalf.
Let's create a blank `MyPkg` using `Pkg`. (If you tried the `PkgTemplates` version
above, you might first have to delete the package with `Pkg.rm("MyPkg")` following by
a complete removal from your `dev` directory.)
```julia
julia> using Revise, Pkg
julia> cd(Pkg.devdir()) # take us to the standard "development directory"
(v1.2) pkg> generate MyPkg
Generating project MyPkg:
MyPkg/Project.toml
MyPkg/src/MyPkg.jl
(v1.2) pkg> dev MyPkg
[ Info: resolving package identifier `MyPkg` as a directory at `~/.julia/dev/MyPkg`.
...
```
For the line starting `(v1.2) pkg>`, hit the `]` key at the beginning of the line,
then type `generate MyPkg`.
The next line, `dev MyPkg`, is necessary to tell `Pkg` about the existence of this new package.
Now you can do the following:
```julia
julia> using MyPkg
[ Info: Precompiling MyPkg [efe7ebfe-4313-4388-9b6c-3590daf47143]
julia> edit(pathof(MyPkg))
```
and the rest should be similar to what's above under `PkgTemplates`.
Note that with this approach, `MyPkg` has not been set up for version
control.
!!! note
If you `add` instead of `dev` the package, the package manager will make a copy of the `MyPkg` files in your `.julia/packages` directory.
This will be the "official" version of the files, and Revise will not track changes.
## `includet` usage
The alternative to creating packages is to manually load individual source files.
This approach is intended for early stages of development;
if you want to track multiple files and/or have some files include other files,
you should consider switching to the package style above.
Open your editor and create a file like this:
```julia
mygreeting() = "Hello, world!"
```
Save it as `mygreet.jl` in some directory. Here we will assume it's being saved in `/tmp/`.
Now load the code with `includet`, which stands for "include and track":
```julia
julia> using Revise
julia> includet("/tmp/mygreet.jl")
julia> mygreeting()
"Hello, world!"
```
Now, in your editor modify `mygreeting` to do this:
```julia
mygreeting() = "Hello, revised world!"
```
and then try it in the same session:
```julia
julia> mygreeting()
"Hello, revised world!"
```
As described above, the first revision you make may be very slow, but later revisions
should be fast.
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 11468 | # Debugging Revise
## Handling errors
Revise attempts to make error reports mimic Julia's own stacktraces, and as a consequence it has
to prevent stacktraces from containing lots of lines pointing to Revise's own code.
If you're trying to debug a Revise error, you'd probably prefer to see the entire stacktrace.
You can uncomment the obvious commented-out line in [`Revise.trim_toplevel!`](@ref).
## The logging framework
If Revise isn't behaving the way you expect it to, it can be useful to examine the
decisions it made.
Revise supports Julia's [Logging framework](https://docs.julialang.org/en/v1/stdlib/Logging/)
and can optionally record its decisions in a format suitable for later inspection.
What follows is a simple series of steps you can use to turn on logging, capture messages,
and then submit them with a bug report.
Alternatively, more advanced developers may want to examine the logs themselves to determine
the source of Revise's error, and for such users a few tips about interpreting the log
messages are also provided below.
### Turning on logging
Currently, the best way to turn on logging is within a running Julia session:
```jldoctest; setup=(using Revise)
julia> rlogger = Revise.debug_logger()
Revise.ReviseLogger(Revise.LogRecord[], Debug)
```
You'll use `rlogger` at the end to retrieve the logs.
Now carry out the series of julia commands and code edits that reproduces the problem.
### Capturing the logs and submitting them with your bug report
Once all the revisions have been triggered and the mistake has been reproduced,
it's time to capture the logs.
To capture all the logs, use
```julia
julia> using Base.CoreLogging: Debug
julia> logs = filter(r->r.level==Debug, rlogger.logs);
```
You can capture just the changes that Revise made to running code with
```julia
julia> logs = Revise.actions(rlogger)
```
You can either let these print to the console and copy/paste the text output into the
issue, or if they are extensive you can save `logs` to a file:
```julia
open("/tmp/revise.logs", "w") do io
for log in logs
println(io, log)
end
end
```
Then you can upload the logs somewhere (e.g., https://gist.github.com/) and link the url in your bug report.
To assist in the resolution of the bug, please also specify additional relevant information such as the name of the function that was misbehaving after revision and/or any error messages that your received.
See also [A complete debugging demo](@ref) below.
### Logging by default
If you suspect a bug in Revise but have difficulty isolating it, you can include the lines
```julia
# Turn on logging
Revise.debug_logger()
```
within the `Revise` block of your `~/.julia/config/startup.jl` file.
This will ensure that you always log Revise's actions.
Then carry out your normal Julia development.
If a Revise-related problem arises, executing these lines
```julia
rlogger = Revise.debug_logger()
using Base.CoreLogging: Debug
logs = filter(r->r.level==Debug, rlogger.logs)
open("/tmp/revise.logs", "w") do io
for log in logs
println(io, log)
end
end
```
within the same session will generate the `/tmp/revise.logs` file that
you can submit with your bug report.
(What makes this possible is that a second call to `Revise.debug_logger()` returns
the same logger object created by the first call--it is not necessary to hold
on to `rlogger`.)
### The structure of the logs
For those who want to do a little investigating on their own, it may be helpful to
know that Revise's core decisions are captured in the group called "Action," and they come in three
flavors:
- log entries with message `"Eval"` signify a call to `eval`; for these events,
keyword `:deltainfo` has value `(mod, expr)` where `mod` is the module of evaluation
and `expr` is a [`Revise.RelocatableExpr`](@ref) containing the expression
that was evaluated.
- log entries with message `"DeleteMethod"` signify a method deletion; for these events,
keyword `:deltainfo` has value `(sigt, methsummary)` where `sigt` is the signature of the
method that Revise *intended* to delete and `methsummary` is a [`MethodSummary`](@ref) of the
method that Revise actually found to delete.
- log entries with message `"LineOffset"` correspond to updates to Revise's own internal
estimates of how far a given method has become displaced from the line number it
occupied when it was last evaluated. For these events, `:deltainfo` has value
`(sigt, newlineno, oldoffset=>newoffset)`.
If you're debugging mistakes in method creation/deletion, the `"LineOffset"` events
may be distracting; by default [`Revise.actions`](@ref) excludes these events.
Note that Revise records the time of each revision, which can sometimes be useful in
determining which revisions occur in conjunction with which user actions.
If you want to make use of this, it can be handy to capture the start time with `tstart = time()`
before commencing on a session.
See [`Revise.debug_logger`](@ref) for information on groups besides "Action."
### A complete debugging demo
From within Revise's `test/` directory, try the following:
```julia
julia> rlogger = Revise.debug_logger();
shell> cp revisetest.jl /tmp/
julia> includet("/tmp/revisetest.jl")
julia> ReviseTest.cube(3)
81
shell> cp revisetest_revised.jl /tmp/revisetest.jl
julia> ReviseTest.cube(3)
27
julia> rlogger.logs
julia> rlogger.logs
9-element Array{Revise.LogRecord,1}:
Revise.LogRecord(Debug, DeleteMethod, Action, Revise_4ac0f476, "/home/tim/.julia/dev/Revise/src/Revise.jl", 226, (time=1.557996459055345e9, deltainfo=(Tuple{typeof(Main.ReviseTest.cube),Any}, MethodSummary(:cube, :ReviseTest, Symbol("/tmp/revisetest.jl"), 7, Tuple{typeof(Main.ReviseTest.cube),Any}))))
Revise.LogRecord(Debug, DeleteMethod, Action, Revise_4ac0f476, "/home/tim/.julia/dev/Revise/src/Revise.jl", 226, (time=1.557996459167895e9, deltainfo=(Tuple{typeof(Main.ReviseTest.Internal.mult3),Any}, MethodSummary(:mult3, :Internal, Symbol("/tmp/revisetest.jl"), 12, Tuple{typeof(Main.ReviseTest.Internal.mult3),Any}))))
Revise.LogRecord(Debug, DeleteMethod, Action, Revise_4ac0f476, "/home/tim/.julia/dev/Revise/src/Revise.jl", 226, (time=1.557996459167956e9, deltainfo=(Tuple{typeof(Main.ReviseTest.Internal.mult4),Any}, MethodSummary(:mult4, :Internal, Symbol("/tmp/revisetest.jl"), 13, Tuple{typeof(Main.ReviseTest.Internal.mult4),Any}))))
Revise.LogRecord(Debug, Eval, Action, Revise_9147188b, "/home/tim/.julia/dev/Revise/src/Revise.jl", 276, (time=1.557996459259605e9, deltainfo=(Main.ReviseTest, :(cube(x) = begin
#= /tmp/revisetest.jl:7 =#
x ^ 3
end))))
Revise.LogRecord(Debug, Eval, Action, Revise_9147188b, "/home/tim/.julia/dev/Revise/src/Revise.jl", 276, (time=1.557996459330512e9, deltainfo=(Main.ReviseTest, :(fourth(x) = begin
#= /tmp/revisetest.jl:9 =#
x ^ 4
end))))
Revise.LogRecord(Debug, LineOffset, Action, Revise_fb38a7f7, "/home/tim/.julia/dev/Revise/src/Revise.jl", 296, (time=1.557996459331061e9, deltainfo=(Any[Tuple{typeof(mult2),Any}], :(#= /tmp/revisetest.jl:11 =#) => :(#= /tmp/revisetest.jl:13 =#))))
Revise.LogRecord(Debug, Eval, Action, Revise_9147188b, "/home/tim/.julia/dev/Revise/src/Revise.jl", 276, (time=1.557996459391182e9, deltainfo=(Main.ReviseTest.Internal, :(mult3(x) = begin
#= /tmp/revisetest.jl:14 =#
3x
end))))
Revise.LogRecord(Debug, LineOffset, Action, Revise_fb38a7f7, "/home/tim/.julia/dev/Revise/src/Revise.jl", 296, (time=1.557996459391642e9, deltainfo=(Any[Tuple{typeof(unchanged),Any}], :(#= /tmp/revisetest.jl:18 =#) => :(#= /tmp/revisetest.jl:19 =#))))
Revise.LogRecord(Debug, LineOffset, Action, Revise_fb38a7f7, "/home/tim/.julia/dev/Revise/src/Revise.jl", 296, (time=1.557996459391695e9, deltainfo=(Any[Tuple{typeof(unchanged2),Any}], :(#= /tmp/revisetest.jl:20 =#) => :(#= /tmp/revisetest.jl:21 =#))))
```
You can see that Revise started by deleting three methods, followed by evaluating three new versions of those methods. Interspersed are various changes to the line numbering.
In rare cases it might be helpful to independently record the sequence of edits to the file.
You can make copies `cp editedfile.jl > /tmp/version1.jl`, edit code, `cp editedfile.jl > /tmp/version2.jl`,
etc.
`diff version1.jl version2.jl` can be used to capture a compact summary of the changes
and pasted into the bug report.
## Debugging problems with paths
During certain types of usage you might receive messages like
```julia
Warning: /some/system/path/stdlib/v1.0/SHA/src is not an existing directory, Revise is not watching
```
Unless you've just deleted that directory, this indicates that some of Revise's functionality is broken.
In the majority of cases, failures come down to Revise having trouble locating source
code on your drive.
This problem should be fixable, because Revise includes functionality
to update its links to source files, as long as it knows what to do.
One of the best approaches is to run Revise's own tests via `pkg> test Revise`.
Here are some possible test warnings and errors, and steps you might take to fix them:
- `Base & stdlib file paths: Test Failed at /some/path... Expression: isfile(Revise.basesrccache)`
This failure is quite serious, and indicates that you will be unable to access code in `Base`.
To fix this, look for a file called `"base.cache"` somewhere in your Julia install
or build directory (for the author, it is at `/home/tim/src/julia-1.0/usr/share/julia/base.cache`).
Now compare this with the value of `Revise.basesrccache`.
(If you're getting this failure, presumably they are different.)
An important "top level" directory is `Sys.BINDIR`; if they differ already at this level,
consider adding a symbolic link from the location pointed at by `Sys.BINDIR` to the
corresponding top-level directory in your actual Julia installation.
You'll know you've succeeded in specifying it correctly when, after restarting
Julia, `Revise.basesrccache` points to the correct file and `Revise.juliadir`
points to the directory that contains `base/`.
If this workaround is not possible or does not succeed, please
[file an issue](https://github.com/timholy/Revise.jl/issues) with a description of
why you can't use it and/or
+ details from `versioninfo` and information about how you obtained your Julia installation;
+ the values of `Revise.basesrccache` and `Revise.juliadir`, and the actual paths to `base.cache`
and the directory containing the running Julia's `base/`;
+ what you attempted when trying to fix the problem;
+ if possible, your best understanding of why this failed to fix it.
- `skipping Core.Compiler tests due to lack of git repo`: this likely indicates
that you downloaded a Julia binary rather than building Julia from source.
While Revise should be able to access the code in `Base` and standard libraries,
at the current time it is not possible for Revise to access julia's Core.Compiler module
unless you clone Julia's repository and build it from source.
- `skipping git tests because Revise is not under development`: this warning should be
harmless. Revise has built-in functionality for extracting source code using `git`,
and it uses itself (i.e., its own git repository) for testing purposes.
These tests run only if you have checked out Revise for development (`pkg> dev Revise`)
or on the continuous integration servers (Travis and Appveyor).
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 3585 | # Developer reference
## Internal global variables
### Configuration-related variables
These are set during execution of Revise's `__init__` function.
```@docs
Revise.watching_files
Revise.polling_files
Revise.tracking_Main_includes
```
### Path-related variables
```@docs
Revise.juliadir
Revise.basesrccache
Revise.basebuilddir
```
### Internal state management
```@docs
Revise.pkgdatas
Revise.watched_files
Revise.revision_queue
Revise.NOPACKAGE
Revise.queue_errors
Revise.included_files
Revise.watched_manifests
```
The following are specific to user callbacks (see [`Revise.add_callback`](@ref)) and
the implementation of [`entr`](@ref):
```@docs
Revise.revision_event
Revise.user_callbacks_queue
Revise.user_callbacks_by_file
Revise.user_callbacks_by_key
```
## Types
```@docs
Revise.RelocatableExpr
Revise.ModuleExprsSigs
Revise.FileInfo
Revise.PkgData
Revise.WatchList
Revise.TaskThunk
Revise.ReviseEvalException
MethodSummary
```
## Function reference
### Functions called when you load a new package
```@docs
Revise.watch_package
Revise.parse_pkg_files
Revise.init_watching
```
### Monitoring for changes
These functions get called on each directory or file that you monitor for revisions.
These block execution until the file(s) are updated, so you should only call them from
within an `@async` block.
They work recursively: once an update has been detected and execution resumes,
they schedule a revision (see [`Revise.revision_queue`](@ref)) and
then call themselves on the same directory or file to wait for the next set of changes.
```@docs
Revise.revise_dir_queued
Revise.revise_file_queued
```
The following functions support user callbacks, and are used in the implementation of `entr`
but can be used more broadly:
```@docs
Revise.add_callback
Revise.remove_callback
```
### Evaluating changes (revising) and computing diffs
[`revise`](@ref) is the primary entry point for implementing changes. Additionally,
```@docs
Revise.revise_file_now
```
### Caching the definition of methods
```@docs
Revise.get_def
```
### Parsing source code
```@docs
Revise.parse_source
Revise.parse_source!
```
### Lowered source code
Much of the "brains" of Revise comes from doing analysis on lowered code.
This part of the package is not as well documented.
```@docs
Revise.minimal_evaluation!
Revise.methods_by_execution!
Revise.CodeTrackingMethodInfo
```
### Modules and paths
```@docs
Revise.modulefiles
```
### Handling errors
```@docs
Revise.trim_toplevel!
```
In current releases of Julia, hitting Ctrl-C from the REPL can stop tasks running in the background.
This risks stopping Revise's ability to watch for changes in files and directories.
Revise has a work-around for this problem.
```@docs
Revise.throwto_repl
```
### Git integration
```@docs
Revise.git_source
Revise.git_files
Revise.git_repo
```
### Distributed computing
```@docs
Revise.init_worker
```
## Teaching Revise about non-julia source codes
Revise can be made to work for transpilers from non-Julia languages to Julia with a little effort.
For example, if you wrote a transpiler from C to Julia, you can define a `struct CFile`
which overrides enough of the common `String` methods (`abspath`,`isabspath`, `joinpath`, `normpath`,`isfile`,`findfirst`, and `String`),
it will be supported by Revise if you define a method like
```
function Revise.parse_source!(mod_exprs_sigs::Revise.ModuleExprsSigs, file::CFile, mod::Module; kwargs...)
ex = # julia Expr returned from running transpiler
Revise.process_source!(mod_exprs_sigs, ex, file, mod; kwargs...)
end
```
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 8927 | # Introduction to Revise
`Revise.jl` may help you keep your Julia sessions running longer, reducing the
need to restart when you make changes to code.
With Revise, you can be in the middle of a session and then edit source code,
update packages, switch git branches, and/or stash/unstash code;
typically, the changes will be incorporated into the very next command you issue from the REPL.
This can save you the overhead of restarting, loading packages, and waiting for code to JIT-compile.
Using Revise also improves your experience when using the
[debuggers](https://julialang.org/blog/2019/03/debuggers/).
Revise will keep track of changed locations of your methods in file, and ensure that the
debugger displays the source code of what you're actually debugging.
!!! note "Automatically loading Revise"
Many users automatically load Revise on startup.
On versions of Julia older than 1.5, this is slightly more involved
than just adding `using Revise` to `.julia/config/startup.jl`: see
[Using Revise by default](@ref) for details.
## Installation
You can obtain Revise using Julia's Pkg REPL-mode (hitting `]` as the first character of the command prompt):
```julia
(v1.0) pkg> add Revise
```
or with `using Pkg; Pkg.add("Revise")`.
## Usage example
We'll make changes to Julia's "Example" package (a trivial package designed to
illustrate the file and directory organization of typical packages).
We have to "develop" it in order to make changes:
```julia
(v1.0) pkg> dev Example
[...output related to installation...]
```
Now we load Revise (if we haven't already done so) and Example:
```julia
julia> using Revise # importantly, this must come before `using Example`
julia> using Example
julia> hello("world")
"Hello, world"
```
Now we're going to check that the `Example` module currently lacks a function named `f`:
```julia
julia> Example.f()
ERROR: UndefVarError: f not defined
```
But say we really want `f`, so let's add it.
You can either navigate to the source code (at `.julia/dev/Example/src/Example.jl`)
in an editor manually, or you can use Julia to open it for you:
```julia
julia> edit(hello) # opens Example.jl in the editor you have configured
```
Now, add a function `f() = π` and save the file.
Go back to the REPL (the *same* REPL, don't restart Julia) and try this:
```julia
julia> Example.f()
π = 3.1415926535897...
```
Voila! Even though we'd loaded Example before adding this function,
Revise noticed the change and inserted it into our running session.
!!! warning
Revise's first revision has latency of several seconds--it's compiling all of its internal code, which includes a complete [Julia interpreter](https://github.com/JuliaDebug/JuliaInterpreter.jl) and all of Revise's parse/diff/patch/cache machinery.
After your first revision, future revisions will generally be fast enough that they will seem nearly instantaneous. (There are exceptions, but they occur
only in specific circumstances, for example when Revise's own code gets [invalidated](https://julialang.org/blog/2020/08/invalidations/) by your changes.)
Now suppose we realize we've made a horrible mistake: that `f` method will mess up everything, because it's part of a more complicated dispatch process and incorrectly intercepts certain `f` calls.
No problem, just delete `f` in your editor, save the file, and you're back to this:
```julia
julia> Example.f()
ERROR: UndefVarError: f not defined
```
all without restarting Julia.
While you can evaluate *new* methods without Revise using [inline evaluation](https://www.julia-vscode.org/docs/stable/userguide/runningcode/#Julia:-Execute-Code-Block-(AltEnter)-1) through your IDE,
method *deletion* is just one example of a change that can only be made easily by Revise.
If you need more examples, see [Revise usage: a cookbook](@ref).
## Other key features of Revise
Revise updates its internal paths when you change versions of a package.
To try this yourself, first re-insert that definition of `f` in the `dev` version of
`Example` and save the file.
Now try toggling back and forth between the `dev` and released versions of `Example`:
```julia
(v1.0) pkg> free Example # switch to the released version of Example
julia> Example.f()
ERROR: UndefVarError: f not defined
(v1.0) pkg> dev Example
julia> Example.f()
π = 3.1415926535897...
```
Revise is not tied to any particular editor.
(The [EDITOR or JULIA_EDITOR](https://docs.julialang.org/en/v1/manual/environment-variables/#JULIA_EDITOR) environment variables can be used to specify your preference for which editor gets launched by Julia's `edit` function.)
If you don't want to have to remember to say `using Revise` each time you start
Julia, see [Using Revise by default](@ref).
## What Revise can track
Revise is fairly ambitious: if all is working, subject to a few [Limitations](@ref) you should be able to track changes to
- any package that you load with `import` or `using`
- any script you load with [`includet`](@ref) (see [Configuring the revise mode](@ref) for important default restrictions on `includet`)
- any file defining `Base` julia itself (with `Revise.track(Base)`)
- any of Julia's standard libraries (with, e.g., `using Unicode; Revise.track(Unicode)`)
- any file defining `Core.Compiler` (with `Revise.track(Core.Compiler)`)
The last one requires that you clone Julia and build it yourself from source.
## Secrets of Revise "wizards"
Revise can assist with methodologies like
[test-driven development](https://en.wikipedia.org/wiki/Test-driven_development).
While it's often desirable to write the test first, sometimes when fixing a bug
it's very difficult to write a good test until you understand the bug better.
Often that means basically fixing the bug before your write the test.
With Revise, you can
- fix the bug while simultaneously developing a high-quality test
- verify that your test passes with the fixed code
- `git stash` your fix and check that your new test fails on the old code,
thus verifying that your test captures the essence of the former bug (if it doesn't fail,
you need a better test!)
- `git stash pop`, test again, commit, and submit
all without restarting your Julia session.
## Other Revise workflows
Revise can be used to perform work when files update.
For example, let's say you want to regenerate a set of web pages whenever your code changes.
Suppose you've placed your Julia code in a package called `MyWebCode`,
and the pages depend on "file.js" and all files in the "assets/" directory; then
```julia
entr(["file.js", "assets"], [MyWebCode]) do
build_webpages(args...)
end
```
will execute `build_webpages(args...)` whenever you save updates to the listed files
or `MyWebCode`.
If you want to regenerate the web page as soon as any change is detected, not
only in `MyWebCode` but also in any package tracked by Revise, you can provide
the `all` keyword argument to [`entr`](@ref):
```julia
entr(["file.js", "assets"]; all=true) do
build_webpages(args...)
end
```
## Taking advantage of Revise in other packages
To make it easier for other packages to benefit from Revise without needing to add it
as a dependency or understand Revise's internals, Revise interfaces with
[CodeTracking](https://github.com/timholy/CodeTracking.jl),
which is a small package acting as Revise's "query" interface.
## What else do I need to know?
Except in cases of problems (see below), that's it!
Revise is a tool that runs in the background, and when all is well it should be
essentially invisible, except that you don't have to restart Julia so often.
Revise can also be used as a "library" by developers who want to add other new capabilities
to Julia; the sections [How Revise works](@ref) and [Developer reference](@ref) are
particularly relevant for them.
## If Revise doesn't work as expected
If Revise isn't working for you, here are some steps to try:
- See [Configuration](@ref) for information on customization options.
In particular, some file systems (like [NFS](https://en.wikipedia.org/wiki/Network_File_System)) and current users of [WSL2](https://devblogs.microsoft.com/commandline/announcing-wsl-2/) might require special options.
- Revise can't handle all kinds of code changes; for more information,
see the section on [Limitations](@ref).
- Try running `test Revise` from the Pkg REPL-mode.
If tests pass, check the documentation to make sure you understand how Revise should work.
If they fail (especially if it mirrors functionality that you need and isn't working), see
[Debugging problems with paths](@ref) for one set of suggestions.
If you still encounter problems, please [file an issue](https://github.com/timholy/Revise.jl/issues).
Especially if you think Revise is making mistakes in adding or deleting methods, please
see the page on [Debugging Revise](@ref) for information about how to attach logs
to your bug report.
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 19765 | # How Revise works
In addition to the material below, see these talks:
- [JuliaCon 2018](https://www.youtube.com/watch?v=KuM0AGaN09s)
- [JuliaCon 2019](https://www.youtube.com/watch?v=gXDI4DSp04c)
Revise is based on the fact that you can change functions even when
they are defined in other modules.
Here's an example showing how you do that manually (without using Revise):
```julia
julia> convert(Float64, π)
3.141592653589793
julia> # That's too hard, let's make life easier for students
julia> @eval Base convert(::Type{Float64}, x::Irrational{:π}) = 3.0
convert (generic function with 714 methods)
julia> convert(Float64, π)
3.0
```
Revise removes some of the tedium of manually copying and pasting code
into `@eval` statements.
To decrease the amount of re-JITting
required, Revise avoids reloading entire modules; instead, it takes care
to `eval` only the *changes* in your package(s), much as you would if you were
doing it manually.
Importantly, changes are detected in a manner that is independent of the specific
line numbers in your code, so that you don't have to re-evaluate just
because code moves around within the same file.
(One unfortunate side effect is that line numbers may become inaccurate in backtraces,
but Revise takes pains to correct these, see below.)
Conceptually, Revise implements
[`diff` and `patch`](https://acloudguru.com/blog/engineering/introduction-using-diff-and-patch/)
for a running Julia session. Schematically, Revise's inner loop (`revise()`) looks like this:
```julia
for def in setdiff(oldexprs, newexprs)
# `def` is an expression that defines a method.
# It was in `oldexprs`, but is no longer present in `newexprs`--delete the method.
delete_methods_corresponding_to_defexpr(mod, def)
end
for def in setdiff(newexprs, oldexprs)
# `def` is an expression for a new or modified method. Instantiate it.
Core.eval(mod, def)
end
```
In somewhat greater detail, Revise uses the following overall strategy:
- add callbacks to Base so that Revise gets notified when new
packages are loaded or new files `include`d
- prepare source-code caches for every new file. These caches
will allow Revise to detect changes when files are updated. For precompiled
packages this happens on an as-needed basis, using the cached
source in the `*.ji` file. For non-precompiled packages, Revise parses
the source for each `include`d file immediately so that the initial state is
known and changes can be detected.
- monitor the file system for changes to any of the dependent files;
it immediately appends any updates to a list of file names that need future
processing
- intercept the REPL's backend to ensure that the list of
files-to-be-revised gets processed each time you execute a new
command at the REPL
- when a revision is triggered, the source file(s) are re-parsed, and
a diff between the cached version and the new version is
created. `eval` the diff in the appropriate module(s).
- replace the cached version of each source file with the new version, so that
further changes are `diff`ed against the most recent update.
## The structure of Revise's internal representation

**Figure notes**: Nodes represent primary objects in Julia's compilation pipeline.
Arrows and their labels represent functions or data structures that allow you to move from one node to another.
Red ("destructive") paths force recompilation of dependent functions.
Revise bridges between text files (your source code) and compiled code.
Revise consequently maintains data structures that parallel Julia's own internal
processing of code.
When dealing with a source-code file, you start with strings, parse them to obtain Julia
expressions, evaluate them to obtain Julia objects, and (where appropriate,
e.g., for methods) compile them to machine code.
This will be called the *forward workflow*.
Revise sets up a few key structures that allow it to progress from files to modules
to Julia expressions and types.
Revise also sets up a *backward workflow*, proceeding from compiled code to Julia
types back to Julia expressions.
This workflow is useful, for example, when dealing with errors: the stack traces
displayed by Julia link from the compiled code back to the source files.
To make this possible, Julia builds "breadcrumbs" into compiled code that store the
filename and line number at which each expression was found.
However, these links are static, meaning they are set up once (when the code is compiled)
and are not updated when the source file changes.
Because trivial manipulations to source files (e.g., the insertion of blank lines
and/or comments) can change the line number of an expression without necessitating
its recompilation, Revise implements a way of correcting these line numbers before
they are displayed to the user.
The same problem presents when using a [debugger](https://julialang.org/blog/2019/03/debuggers/), in that one wants the debugger to display the correct code (at the correct line number) even after modifications have been made to the file.
This capability requires that Revise proceed backward from the compiled objects to
something resembling the original text file.
### Terminology
A few convenience terms are used throughout: *definition*,
*signature-expression*, and *signature-type*.
These terms are illustrated using the following example:
```@raw html
<p><pre><code class="language-julia">function <mark>print_item(io::IO, item, ntimes::Integer=1, pre::String="")</mark>
print(io, pre)
for i = 1:ntimes
print(io, item)
end
end</code></pre></p>
```
This represents the *definition* of a method.
Definitions are stored as expressions, using a [`Revise.RelocatableExpr`](@ref).
The highlighted portion is the *signature-expression*, specifying the name, argument names
and their types, and (if applicable) type-parameters of the method.
From the signature-expression we can generate one or more *signature-types*.
Since this function has two default arguments, this signature-expression generates
three signature-types, each corresponding to a different valid way of calling
this method:
```julia
Tuple{typeof(print_item),IO,Any} # print_item(io, item)
Tuple{typeof(print_item),IO,Any,Integer} # print_item(io, item, 2)
Tuple{typeof(print_item),IO,Any,Integer,String} # print_item(io, item, 2, " ")
```
In Revise's internal code, a definition is often represented with a variable `def`, and a signature-type with `sigt`.
Recent versions of Revise do not make extensive use of signature expressions.
### Computing signatures
Since version 2.0, Revise works primarily with lowered-code representations, specifically using the lowered code to compute method signatures (if you don't know about lowered code, see [this tutorial](https://juliadebug.github.io/JuliaInterpreter.jl/stable/ast/)).
There are several reasons that make this an attractive approach, of which the most important are:
- keyword-argument methods get "expanded" to multiple methods handling various ways of populating the arguments. The lowered code lists all of them, which ensures that Revise knows about them all. (There are some challenges regarding "gensymmed" names, see [LoweredCodeUtils](https://github.com/JuliaDebug/LoweredCodeUtils.jl) and [julia#30908](https://github.com/JuliaLang/julia/issues/30908), but in short LoweredCodeUtils "fixes" those difficulties.)
- for methods generated by code, the only really reliable mechanism to compute all the signatures is to step through the code that generates the methods. That is performed using [JuliaInterpreter](https://github.com/JuliaDebug/JuliaInterpreter.jl).
As an example, suppose the following code is part of your module definition:
```
for T in (Float16, Float32, Float64)
@eval sizefloat(x::$T) = sizeof($T)
end
```
!!! clarification
This is equivalent to the following explicit definitions:
```
sizefloat(x::Float16) = 2
sizefloat(x::Float32) = 4
sizefloat(x::Float64) = 8
```
If you replace the loop with `for T in (Float32, Float64)`, then Revise should delete the method for `Float16`. But this implies that Revise can deduce all the method-signatures created by this block, which essentially requires "simulating" the block that defines the methods. (In simple cases there are other approaches, but for [complex cases](https://github.com/JuliaLang/julia/blob/c7e4b9929b3b6ee89d47ce1320ef2de14c4ecf85/base/atomics.jl#L415-L430) stepping through the code seems to be the only viable answer.)
Because lowered code is far simpler than ordinary Julia code, it is much easier to interpret. Let's look briefly at a method definition:
```
floatwins(x::AbstractFloat, y::Integer) = x
```
which has lowered representation approximately equal to
```
CodeInfo(
│ $(Expr(:method, :floatwins))
│ %2 = Core.Typeof(floatwins)
│ %3 = Core.svec(%2, AbstractFloat, Integer)
│ %4 = Core.svec()
│ %5 = Core.svec(%3, %4)
│ $(Expr(:method, :floatwins, :(%5), CodeInfo(quote
return x
end)))
└── return floatwins
)
```
(I've edited this lightly for clarity.) As one steps through this, the first line tells us we're about to define a method for the function `floatwins`. Lines 2-5 compute the signature, in the representation `svec(sig, params)`, where here `sig = svec(typeof(floatwins), AbstractFloat, Integer)` and `params = svec()`.
(This example has no type parameters, which is why `params` is empty.)
What Revise does is steps through the first 5 of these lines, and when it encounters the `Expr(:method, :floatwins, :(%5), CodeInfo(...))` statement,
it pulls out the signature (the `%5`, which refers to the result computed on the 5th line) and records this as a method generated by this block of code. (It does not, however, evaluate the `Expr(:method, ...)` expression as a whole, because that would force it to be recompiled.) Stepping through this code ensures that Revise can compute the exact signature, no matter how this method is defined at the level of ordinary Julia code.
Unfortunately, modules sometimes contain code blocks that perhaps shouldn't be interpreted:
```julia
init_c_library() # library crashes if we call this twice
```
Starting with version 2.3, Revise attempts to avoid interpreting any code not necessary for signature computation.
If you are just tracking changes, Revise will skip over such blocks; if you're loading a file with `includet` for the first time, Revise will execute such blocks in compiled mode.
Revise achieves this by computing [backedges](https://juliadebug.github.io/LoweredCodeUtils.jl/stable/edges/), essentially a set of links encoding the dependencies among different lines of the lowered code.
For the `floatwins` example above, the backedges would represent the fact that line 2 has one direct dependant, line 3 (which uses `%2`), that lines 3 and 4 both have line 5 as their dependents, and line 5 has line 6 as a dependent. As a consequence, to (nearly) execute line 6, we have to execute lines 2-5, because they set up the signature. If an interdependent block doesn't contain any `:method` or related (`:struct_type`, `:eval`) expressions, then it doesn't need to interpret the block at all.
As should be evident, the lowered code makes it much easier to analyze the graph of these dependencies. There are, however, a few tricky cases.
For example, any code inside an `@eval` might, or might not, expand into lowered code that contains a `:method` expression. Because Revise can't reliably predict what it will look like after expansion, Revise will execute any code in (or needed for) an `@eval` block. As a consequence, even after version 2.3 Revise may sometimes interpret more code than is strictly necessary.
!!! note
If Revise executes code that still shouldn't be run twice, one good solution is to put all initialization inside your module's [`__init__` function](https://docs.julialang.org/en/v1/manual/modules/#Module-initialization-and-precompilation-1).
For files that you track with `includet`, you can also split "code that defines methods" into a separate file from "code that does work," and have Revise track only the method-defining file.
However, starting with version 2.3 Revise should be fairly good at doing this on its own; such manual interventions should not be necessary in most cases.
### Core data structures and representations
Most of Revise's magic comes down to just three internal variables:
- [`Revise.watched_files`](@ref): encodes information used by the filesystem (`FileWatching`)
to detect changes in source files.
- [`Revise.revision_queue`](@ref): a list of "work to do," containing the files that have been
modified since the last code update.
- [`Revise.pkgdatas`](@ref): the central repository of parsed code, used to "diff" for changes
and then "patch" the running session.
Two "maps" are central to Revise's inner workings: `ExprsSigs` maps link
definition=>signature-types (the forward workflow), while `CodeTracking` (specifically,
its internal variable `method_info`) links from
signature-type=>definition (the backward workflow).
Concretely, `CodeTracking.method_info` is just an `IdDict` mapping `sigt=>(locationinfo, def)`.
Of note, a stack frame typically contains a link to a method, which stores the equivalent
of `sigt`; consequently, this information allows one to look up the corresponding
`locationinfo` and `def`. (When methods move, the location information stored by CodeTracking
gets updated by Revise.)
Some additional notes about Revise's `ExprsSigs` maps:
- For expressions that do not define a method, it is just `def=>nothing`
- For expressions that do define a method, it is `def=>[sigt1, ...]`.
`[sigt1, ...]` is the list of signature-types generated from `def` (often just one,
but more in the case of methods with default arguments or keyword arguments).
- They are represented as an `OrderedDict` so as to preserve the sequence in which expressions
occur in the file.
This can be important particularly for updating macro definitions, which affect the
expansion of later code.
The order is maintained so as to match the current ordering of the source-file,
which is not necessarily the same as the ordering when these expressions were last
`eval`ed.
- Each key in the map (the definition `RelocatableExpr`) is the most recently
`eval`ed version of the expression.
This has an important consequence: the line numbers in the `def` (which are still present,
even though not used for equality comparisons) correspond to the ones in compiled code.
Any discrepancy with the current line numbers in the file is handled through updates to
the location information stored by `CodeTracking`.
`ExprsSigs` are organized by module and then file, so that one can map
`filename`=>`module`=>`def`=>`sigts`.
Importantly, single-file modules can be "reconstructed" from the keys of the corresponding
`ExprsSigs` (and multi-file modules from a collection of such items), since they hold
the complete ordered set of expressions that would be `eval`ed to define the module.
The global variable that holds all this information is [`Revise.pkgdatas`](@ref), organized
into a dictionary of [`Revise.PkgData`](@ref) objects indexed by Base Julia's `PkgId`
(a unique identifier for packages).
### An example
Consider a module, `Items`, defined by the following two source files:
`Items.jl`:
```julia
__precompile__(false)
module Items
include("indents.jl")
function print_item(io::IO, item, ntimes::Integer=1, pre::String=indent(item))
print(io, pre)
for i = 1:ntimes
print(io, item)
end
end
end
```
`indents.jl`:
```julia
indent(::UInt16) = 2
indent(::UInt8) = 4
```
If you create this as a mini-package and then say `using Revise, Items`, you can start
examining internal variables in the following manner:
```julia
julia> id = Base.PkgId(Items)
Items [b24a5932-55ed-11e9-2a88-e52f99e65a0d]
julia> pkgdata = Revise.pkgdatas[id]
PkgData(Items [b24a5932-55ed-11e9-2a88-e52f99e65a0d]:
"src/Items.jl": FileInfo(Main=>ExprsSigs(<1 expressions>, <0 signatures>), Items=>ExprsSigs(<2 expressions>, <3 signatures>), )
"src/indents.jl": FileInfo(Items=>ExprsSigs(<2 expressions>, <2 signatures>), )
```
(Your specific UUID may differ.)
Path information is stored in `pkgdata.info`:
```julia
julia> pkgdata.info
PkgFiles(Items [b24a5932-55ed-11e9-2a88-e52f99e65a0d]):
basedir: "/tmp/pkgs/Items"
files: ["src/Items.jl", "src/indents.jl"]
```
`basedir` is the only part using absolute paths; everything else is encoded relative
to that location. This facilitates, e.g., switching between `develop` and `add` mode in the
package manager.
`src/indents.jl` is particularly simple:
```julia
julia> pkgdata.fileinfos[2]
FileInfo(Items=>ExprsSigs with the following expressions:
:(indent(::UInt16) = begin
2
end)
:(indent(::UInt8) = begin
4
end), )
```
This is just a summary; to see the actual `def=>sigts` map, do the following:
```julia
julia> pkgdata.fileinfos[2].modexsigs[Items]
OrderedCollections.OrderedDict{Revise.RelocatableExpr,Union{Nothing, Array{Any,1}}} with 2 entries:
:(indent(::UInt16) = begin… => Any[Tuple{typeof(indent),UInt16}]
:(indent(::UInt8) = begin… => Any[Tuple{typeof(indent),UInt8}]
```
These are populated now because we specified `__precompile__(false)`, which forces
Revise to defensively parse all expressions in the package in case revisions are made
at some future point.
For precompiled packages, each `pkgdata.fileinfos[i]` can instead rely on the `cachefile`
(another field stored in the [`Revise.FileInfo`](@ref)) as a record of the state of the file
at the time the package was loaded; as a consequence, Revise can defer parsing the source
file(s) until they are updated.
`Items.jl` is represented with a bit more complexity,
`"Items.jl"=>Dict(Main=>map1, Items=>map2)`.
This is because `Items.jl` contains one expression (the `__precompile__` statement)
that is `eval`ed in `Main`,
and other expressions that are `eval`ed in `Items`.
### Revisions and computing diffs
When the file system notifies Revise that a file has been modified, Revise re-parses
the file and assigns the expressions to the appropriate modules, creating a
[`Revise.ModuleExprsSigs`](@ref) `mexsnew`.
It then compares `mexsnew` against `mexsref`, the reference object that is synchronized to
code as it was `eval`ed.
The following actions are taken:
- if a `def` entry in `mexsref` is equal to one in `mexsnew`, the expression is "unchanged"
except possibly for line number. The `locationinfo` in `CodeTracking` is updated as needed.
- if a `def` entry in `mexsref` is not present in `mexsnew`, that entry is deleted and
any corresponding methods are also deleted.
- if a `def` entry in `mexsnew` is not present in `mexsref`, it is `eval`ed and then added to
`mexsref`.
Technically, a new `mexsref` is generated every time to ensure that the expressions are
ordered as in `mexsnew`; however, conceptually this is better thought of as an updating of
`mexsref`, after which `mexsnew` is discarded.
Note that one consequence is that modifying a method causes two actions, the deletion of
the original followed by `eval`ing a new version.
During revision, all method deletions are performed first, followed by all the new `eval`ed methods.
This ensures that if a method gets moved from `fileB.jl` to `fileA.jl`, Revise doesn't mistakenly
redefine and then delete the method simply because `fileA.jl` got processed before `fileB.jl`.
### Internal API
You can find more detail about Revise's inner workings in the [Developer reference](@ref).
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 5271 | # Limitations
There are some kinds of changes that Revise (or often, Julia itself) cannot incorporate into a running Julia session:
- changes to type definitions or `const`s
- conflicts between variables and functions sharing the same name
- removal of `export`s
These kinds of changes require that you restart your Julia session.
During early stages of development, it's quite common to want to change type definitions. You can work around Julia's/Revise's limitations by temporary renaming. We'll illustrate this below, using `write` to be explicit about when updates to the file happen. But in ordinary usage, these are changes you'd likely make with your editor.
```julia
julia> using Pkg, Revise
julia> Pkg.generate("MyPkg")
Generating project MyPkg:
MyPkg/Project.toml
MyPkg/src/MyPkg.jl
Dict{String, Base.UUID} with 1 entry:
"MyPkg" => UUID("69940cda-0c72-4a1a-ae0b-fd3109336fe8")
julia> cd("MyPkg")
julia> write("src/MyPkg.jl","""
module MyPkg
export FooStruct, processFoo
abstract type AbstractFooStruct end
struct FooStruct1 <: AbstractFooStruct
bar::Int
end
FooStruct = FooStruct1
function processFoo(foo::AbstractFooStruct)
@info foo.bar
end
end
""")
230
julia> Pkg.activate(".")
Activating project at `~/blah/MyPkg`
julia> using MyPkg
No Changes to `~/blah/MyPkg/Project.toml`
No Changes to `~/blah/MyPkg/Manifest.toml`
Precompiling MyPkg
1 dependency successfully precompiled in 2 seconds
julia> processFoo(FooStruct(1))
[ Info: 1
julia> write("src/MyPkg.jl","""
module MyPkg
export FooStruct, processFoo
abstract type AbstractFooStruct end
struct FooStruct2 <: AbstractFooStruct # change version nuumber
bar::Float64 # change type of the field
end
FooStruct = FooStruct2 # update alias reference
function processFoo(foo::AbstractFooStruct)
@info foo.bar
end
end
""")
234
julia> FooStruct # make sure FooStruct refers to FooStruct2
MyPkg.FooStruct2
julia> processFoo(FooStruct(3.5))
[ Info: 3.5
```
Here, note that we made two changes: we updated the "version number" of FooStruct when we changed something about its fields, and we also re-assigned FooStruct to alias the new version. We did not change the definition of any methods that have been typed AbstractFooStruct.
This works as long as the new type name doesn't conflict with an existing name; within a session you need to change the name each time you change the definition.
Once your development has converged on a solution, it's best to switch to the "permanent" name: in the example above, `FooStruct` is a non-constant global variable, and if used internally in a function there will be consequent performance penalties. Switching to the permanent name will force you to restart your session.
```julia
julia> isconst(MyPkg, :FooStruct)
true
julia> write("src/MyPkg.jl","""
module MyPkg
export FooStruct, processFoo
abstract type AbstractFooStruct end # this could be removed
struct FooStruct <: AbstractFooStruct # change to just FooStruct
bar::Float64
end
function processFoo(foo::AbstractFooStruct) # consider changing to FooStruct
@info foo.bar
end
end
""")
julia> run(Base.julia_cmd()) # start a new Julia session, alternatively exit() and restart julia
julia> using Pkg, Revise # NEW Julia Session
julia> Pkg.activate(".")
Activating project at `~/blah/MyPkg`
julia> using MyPkg
Precompiling MyPkg
1 dependency successfully precompiled in 2 seconds
julia> isconst(MyPkg, :FooStruct)
true
```
In addition, some situations may require special handling:
### Macros and generated functions
If you change a macro definition or methods that get called by `@generated` functions
outside their `quote` block, these changes will not be propagated to functions that have
already evaluated the macro or generated function.
You may explicitly call `revise(MyModule)` to force reevaluating every definition in module
`MyModule`.
Note that when a macro changes, you have to revise all of the modules that *use* it.
### Distributed computing (multiple workers) and anonymous functions
Revise supports changes to code in worker processes.
The code must be loaded in the main process in which Revise is running.
Revise cannot handle changes in anonymous functions used in `remotecall`s.
Consider the following module definition:
```julia
module ParReviseExample
using Distributed
greet(x) = println("Hello, ", x)
foo() = for p in workers()
remotecall_fetch(() -> greet("Bar"), p)
end
end # module
```
Changing the remotecall to `remotecall_fetch((x) -> greet("Bar"), p, 1)` will fail,
because the new anonymous function is not defined on all workers.
The workaround is to write the code to use named functions, e.g.,
```julia
module ParReviseExample
using Distributed
greet(x) = println("Hello, ", x)
greetcaller() = greet("Bar")
foo() = for p in workers()
remotecall_fetch(greetcaller, p)
end
end # module
```
and the corresponding edit to the code would be to modify it to `greetcaller(x) = greet("Bar")`
and `remotecall_fetch(greetcaller, p, 1)`.
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 3.6.0 | 0a20a01fbb3a9531f3325a94b6dcf95c404a1658 | docs | 664 | # User reference
There are really only six functions that most users would be expected to call manually:
`revise`, `includet`, `Revise.track`, `entr`, `Revise.retry`, and `Revise.errors`.
Other user-level constructs might apply if you want to debug Revise or
prevent it from watching specific packages, or for fine-grained handling of callbacks.
```@docs
revise
Revise.track
includet
entr
Revise.retry
Revise.errors
```
### Revise logs (debugging Revise)
```@docs
Revise.debug_logger
Revise.actions
Revise.diffs
```
### Prevent Revise from watching specific packages
```@docs
Revise.dont_watch_pkgs
Revise.silence
```
### Revise module
```@docs
Revise
```
| Revise | https://github.com/timholy/Revise.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | code | 330 | using PythonCall
using CondaPkg
try
pyimport("andes")
pyimport("kvxopt")
catch
@warn "PyCall is not configured to an existing Python env."
@warn "Andes.jl will use Conda for PyCall and install andes."
ENV["PYTHON"] = Conda.PYTHONDIR
Pkg.build("PyCall")
Conda.add("andes", channel="conda-forge")
end
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | code | 133 | using Documenter, Andes
makedocs(modules = [Andes], sitename = "Andes.jl")
deploydocs(repo = "github.com/cuihantao/Andes.jl.git")
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | code | 339 | __precompile__()
module Andes
using PythonCall
const py = PythonCall.pynew()
include("kvxopt_pythoncall.jl")
function __init__()
PythonCall.pycopy!(py, pyimport("andes"))
PythonCall.pyconvert_add_rule("kvxopt.base:spmatrix",
AbstractSparseMatrixCSC,
spmatrix_to_julia,
)
end
export pyconvert, pytype
end | Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | code | 804 | using CondaPkg
using SparseArrays: AbstractSparseMatrixCSC, SparseMatrixCSC
using PythonCall: pyconvert_add_rule
"""
Convert KVXOPT.spmatrix to SparseMatrixCSC
"""
function spmatrix_to_julia(S::Type{T}, x::Py) where {T<:AbstractSparseMatrixCSC}
# Ensure x is a KVXOPT spmatrix
if Bool(pytype(x) != pyimport("kvxopt").spmatrix)
throw(ArgumentError("x must be a KVXOPT spmatrix"))
end
# Extract the size, I, J, and V arrays from the spmatrix and explicitly convert them
m, n = pyconvert(Tuple{Int64,Int64}, x.size)
I = pyconvert(Vector{Int64}, x.CCS[0]) .+ 1
J = pyconvert(Vector{Int64}, x.CCS[1]) .+ 1
V = pyconvert(Vector{Float64}, x.CCS[2])
# Create and return the SparseMatrixCSC
return PythonCall.pyconvert_return(SparseMatrixCSC(m, n, I, J, V))
end
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | code | 48 | using Test
include("test_andes_pythoncall.jl")
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | code | 654 | using Test
using Andes
using PythonCall
using SparseArrays
@testset "Test Andes functionalities" begin
@testset "SparseMatrixCSC conversion from Andes system example" begin
ss = Andes.py.system.example()
converted_matrix = pyconvert(SparseMatrixCSC, ss.dae.gy)
@test converted_matrix isa SparseMatrixCSC
@test size(converted_matrix) == (34, 34)
end
@testset "Power flow run" begin
Andes.py.config_logger(40)
kundur = Andes.py.utils.paths.get_case("kundur/kundur_full.xlsx")
system = Andes.py.run(kundur, no_output=true)
@test Bool(system.PFlow.converged == true)
end
end
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | docs | 3351 | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [email protected]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | docs | 2895 |
# Andes.jl
The Julia API for [ANDES](https://github.com/cuihantao/andes).
[](https://travis-ci.com/cuihantao/Andes.jl)
## Introduction
`Andes.jl` is the Julia API for ANDES, a power system simulation tool for symbolic modeling and numerical simulation.
`Andes.jl` provides APIs through `PyCall` and `Conda` for calling ANDES from Julia.
## Installation
Install `Andes.jl` with
```julia
using Pkg
Pkg.add("Andes")
```
### Customize Python Environment
If you have installed andes in an existing Python environment and do not want to reinstall it from conda, you can configure it in Pycall.
You can manually set the Python path with the following commands in Julia:
```
ENV["PYTHON"] = "... path of the python executable ..."
# ENV["PYTHON"] = "C:\\Python37-x64\\python.exe" # example for Windows
# ENV["PYTHON"] = "/usr/bin/python3.7" # example for *nix
# ENV["PYTHON"] = "/home/name/miniconda3/envs/andes" # example for conda
Pkg.build("PyCall")
```
Check out the [documentation](https://github.com/JuliaPy/PyCall.jl#specifying-the-python-version) of `PyCall.jl` for more details.
## Usage
`Andes.jl` exposes all Python APIs under `andes.py`. Use the package with
```julia
using Andes
[ Info: Precompiling andes [93a26e3f-343a-4ab9-b467-a68c67574964]
```
All subsequent usages can be made to `andes.py` in the same way as in Python.
For example, to run power flow for `kundur_full.xlsx` (assume exists in the current directory), run
```julia
julia> system = Andes.py.run("kundur_full.xlsx")
Parsing input file <kundur_full.xlsx>
Input file kundur_full.xlsx parsed in 0.0768 second.
-> Power flow calculation with Newton Raphson method:
Power flow initialized.
0: |F(x)| = 14.9283
1: |F(x)| = 3.60859
2: |F(x)| = 0.170093
3: |F(x)| = 0.00203827
4: |F(x)| = 3.76414e-07
Converged in 5 iterations in 0.0063 second.
Report saved to <kundur_full_out.txt> in 0.0007 second.
-> Single process finished in 0.1666 second.
PyObject <andes.system.System object at 0x1522910b8>
```
Visit [ANDES Documentation](https://andes.readthedocs.io) for tutorial and API details
## Development
Contributions to Andes.jl are welcome. Please see [CONTRIBUTING.md](https://github.com/cuihantao/Andes.jl/blob/master/CONTRIBUTING.md) for code contribution guidelines.
## License
`Andes.jl` (the ANDES Julia interface only) is released under [MIT license](https://github.com/cuihantao/Andes.jl/blob/master/LICENSE).
Andes.jl has been developed as part of the Large Scale Testbed (LTB)
project at the Center for Ultra-Wide-Area Resilient Electric Energy Transmission Networks ([CURENT](https://curent.utk.edu/)), a National Science Foundation Engineering Research Center that is jointly supported by NSF (National Science Foundation) and the DoE (Department of Energy) of the United States.
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.0 | dcf9c9d045f1ce989f62d83658bc7a4d267f5110 | docs | 2906 |
# Andes.jl
The Julia interface for ANDES.
[](https://travis-ci.com/cuihantao/Andes.jl)
## Introduction
`Andes.jl` is the Julia interface for ANDES, a power system simulation tool for symbolic modeling and numerical simulation.
`Andes.jl` provides APIs through `PyCall` and `Conda` for calling ANDES from Julia.
## Installation
Install `Andes.jl` with
```julia
using Pkg
Pkg.add("andes")
```
### Customize Python Environment
If you have installed andes in an existing Python environment and do not want to reinstall it from conda, you can configure it in Pycall.
You can manually set the Python path with the following commands in Julia:
```
ENV["PYTHON"] = "... path of the python executable ..."
# ENV["PYTHON"] = "C:\\Python37-x64\\python.exe" # example for Windows
# ENV["PYTHON"] = "/usr/bin/python3.7" # example for *nix
# ENV["PYTHON"] = "/home/name/miniconda3/envs/andes" # example for conda
Pkg.build("PyCall")
```
Check out the [documentation](https://github.com/JuliaPy/PyCall.jl#specifying-the-python-version) of `PyCall.jl` for more details.
## Usage
`Andes.jl` exposes all Python APIs under `andes.py`. Use the package with
```julia
using andes
[ Info: Precompiling andes [93a26e3f-343a-4ab9-b467-a68c67574964]
```
All subsequent usages can be made to `andes.py` in the same way as in Python.
For example, to run power flow for `kundur_full.xlsx` (assume exists in the current directory), run
```julia
julia> system = andes.py.run("kundur_full.xlsx")
Parsing input file <kundur_full.xlsx>
Input file kundur_full.xlsx parsed in 0.0768 second.
-> Power flow calculation with Newton Raphson method:
Power flow initialized.
0: |F(x)| = 14.9283
1: |F(x)| = 3.60859
2: |F(x)| = 0.170093
3: |F(x)| = 0.00203827
4: |F(x)| = 3.76414e-07
Converged in 5 iterations in 0.0063 second.
Report saved to <kundur_full_out.txt> in 0.0007 second.
-> Single process finished in 0.1666 second.
PyObject <andes.system.System object at 0x1522910b8>
```
Visit [ANDES Documentation](https://andes.readthedocs.io) for tutorial and API details
## Development
Contributions to Andes.jl are welcome. Please see [CONTRIBUTING.md](https://github.com/cuihantao/Andes.jl/blob/master/CONTRIBUTING.md) for code contribution guidelines.
## License
`Andes.jl` (the ANDES Julia interface only) is released under [MIT license](https://github.com/cuihantao/Andes.jl/blob/master/LICENSE).
Andes.jl has been developed as part of the Large Scale Testbed (LTB)
project at the Center for Ultra-Wide-Area Resilient Electric Energy Transmission Networks ([CURENT](https://curent.utk.edu/)), a National Science Foundation Engineering Research Center that is jointly supported by NSF (National Science Foundation) and the DoE (Department of Energy) of the United States.
```@autodocs
Modules = [Andes]
```
| Andes | https://github.com/cuihantao/Andes.jl.git |
|
[
"MIT"
] | 1.0.1 | 2849482a22772e789f684ac78c16db1f9132c892 | code | 190 | module MapTiles
using GeoInterface: GeoInterface, Extent, extent
using GeoFormatTypes: EPSG, CoordinateReferenceSystemFormat
import Extents
export Tile, TileGrid
include("tiles.jl")
end
| MapTiles | https://github.com/JuliaGeo/MapTiles.jl.git |
|
[
"MIT"
] | 1.0.1 | 2849482a22772e789f684ac78c16db1f9132c892 | code | 6537 | """
Determine the number of meters per pixel
### Parameters
* `lat`: latitude in radians
* `z`: zoom level
Source: http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Resolution_and_Scale
"""
function resolution(lat::Real, z::Integer)
meter_per_pixel = 156543.03 # For zoom = 0 at equator
meter_per_pixel * cos(lat) / (2^z)
end
const R2D = 180 / pi
const RE = 6378137.0
const CE = 2 * pi * RE
const EPSILON = 1e-14
const LL_EPSILON = 1e-11
# represent the two major CRS so we can dispatch on them
struct WGS84 <: CoordinateReferenceSystemFormat end
struct WebMercator <: CoordinateReferenceSystemFormat end
Base.convert(::Type{EPSG}, ::WebMercator) = EPSG(3857)
Base.convert(::Type{EPSG}, ::WGS84) = EPSG(4326)
Base.convert(::Type{String}, ::WebMercator) = "EPSG:3857"
Base.convert(::Type{String}, ::WGS84) = "EPSG:4326"
Base.convert(::Type{Int}, ::WebMercator) = 3857
Base.convert(::Type{Int}, ::WGS84) = 4326
"Convert web mercator x, y to longitude and latitude"
project(point, ::T, ::T) where T = point
function project(point, from::WebMercator, to::WGS84)
x = GeoInterface.x(point)
y = GeoInterface.y(point)
lng = x * R2D / RE
lat = ((pi / 2) - 2.0 * atan(exp(-y / RE))) * R2D
return lng, lat
end
"Convert longitude and latitude to web mercator x, y"
function project(point, from::WGS84, to::WebMercator)
lng = GeoInterface.x(point)
lat = GeoInterface.y(point)
x = RE * deg2rad(lng)
y = if lat <= -90
-Inf
elseif lat >= 90
Inf
else
RE * log(tan((pi / 4) + (0.5 * deg2rad(lat))))
end
return x, y
end
function project_extent(bbox::Extent, from::CoordinateReferenceSystemFormat, to::CoordinateReferenceSystemFormat)
left, bottom = project((bbox.X[1], bbox.Y[1]), from, to)
right, top = project((bbox.X[2], bbox.Y[2]), from, to)
return Extent(X=(left, right), Y=(bottom, top))
end
const web_mercator = WebMercator()
const wgs84 = WGS84()
struct Tile
x::Int
y::Int
z::Int
end
Tile(index::CartesianIndex{2}, zoom::Integer) = Tile(index[1], index[2], zoom)
"Get the tile containing a longitude and latitude"
function Tile(point, zoom::Integer, crs::WGS84)
lng = GeoInterface.x(point)
lat = GeoInterface.y(point)
x = lng / 360.0 + 0.5
sinlat = sin(deg2rad(lat))
y = 0.5 - 0.25 * log((1.0 + sinlat) / (1.0 - sinlat)) / pi
Z2 = 2^zoom
xtile = if x <= 0
0
elseif x >= 1
Z2 - 1
else
# To address loss of precision in round-tripping between tile
# and lng/lat, points within EPSILON of the right side of a tile
# are counted in the next tile over.
floor(Int, (x + EPSILON) * Z2)
end
ytile = if y <= 0
0
elseif y >= 1
Z2 - 1
else
floor(Int, (y + EPSILON) * Z2)
end
return Tile(xtile, ytile, zoom)
end
struct TileGrid
grid::CartesianIndices{2, Tuple{UnitRange{Int}, UnitRange{Int}}}
z::Int
end
TileGrid(tile::Tile) = TileGrid(CartesianIndices((tile.x:tile.x, tile.y:tile.y)), tile.z)
"Get the tiles overlapped by a geographic bounding box"
function TileGrid(bbox::Extent, zoom::Int, crs::WGS84)
# Mercantile splits the bbox in two along the antimeridian if this happens.
# Decide if that case should be handled here or before, also considering
# antimeridian discussion in https://github.com/rafaqz/Extents.jl/issues/4
@assert bbox.X[1] < bbox.X[2]
# Clamp bounding values.
max_bbox = Extent(X = (-180.0, 180.0), Y = (-85.051129, 85.051129))
bbox = Extents.intersection(bbox, max_bbox)
ul_tile = Tile((bbox.X[1], bbox.Y[1]), zoom, crs)
lr_tile = Tile((bbox.X[2] - LL_EPSILON, bbox.Y[2] + LL_EPSILON), zoom, crs)
grid = CartesianIndices((ul_tile.x:lr_tile.x, lr_tile.y:ul_tile.y))
return TileGrid(grid, zoom)
end
"Get the tiles overlapped by a web mercator bounding box"
function TileGrid(bbox::Extent, zoom::Int, crs::WebMercator)
bbox = project_extent(bbox, crs, wgs84)
return TileGrid(bbox, zoom, wgs84)
end
Base.length(tilegrid::TileGrid) = length(tilegrid.grid)
Base.size(tilegrid::TileGrid, dims...) = size(tilegrid.grid, dims...)
Base.getindex(tilegrid::TileGrid, i) = Tile(tilegrid.grid[i], tilegrid.z)
Base.firstindex(tilegrid::TileGrid) = firstindex(tilegrid.grid)
Base.lastindex(tilegrid::TileGrid) = lastindex(tilegrid.grid)
function Base.iterate(tilegrid::TileGrid, state=1)
if state > length(tilegrid)
nothing
else
(tilegrid[state], state+1)
end
end
"Returns the bounding box of a tile in lng lat"
function Extents.extent(tile::Tile, crs::WGS84)
Z2 = 2^tile.z
ul_lon_deg = tile.x / Z2 * 360.0 - 180.0
ul_lat_rad = atan(sinh(pi * (1 - 2 * tile.y / Z2)))
ul_lat_deg = rad2deg(ul_lat_rad)
lr_lon_deg = (tile.x + 1) / Z2 * 360.0 - 180.0
lr_lat_rad = atan(sinh(pi * (1 - 2 * (tile.y + 1) / Z2)))
lr_lat_deg = rad2deg(lr_lat_rad)
return Extent(X = (ul_lon_deg, lr_lon_deg), Y = (lr_lat_deg, ul_lat_deg))
end
"Get the web mercator bounding box of a tile"
function Extents.extent(tile::Tile, crs::WebMercator)
tile_size = CE / 2^tile.z
left = tile.x * tile_size - CE / 2
right = left + tile_size
top = CE / 2 - tile.y * tile_size
bottom = top - tile_size
return Extent(X=(left, right), Y=(bottom, top))
end
"Returns the bounding box of a tile in lng lat"
function Extents.extent(tilegrid::TileGrid, crs::WGS84)
Z2 = 2^tilegrid.z
ul_idx = tilegrid.grid[begin]
lr_idx = tilegrid.grid[end]
ul_x, ul_y = ul_idx[1], ul_idx[2]
lr_x, lr_y = lr_idx[1], lr_idx[2]
ul_lon_deg = ul_x / Z2 * 360.0 - 180.0
ul_lat_rad = atan(sinh(pi * (1 - 2 * ul_y / Z2)))
ul_lat_deg = rad2deg(ul_lat_rad)
lr_lon_deg = (lr_x + 1) / Z2 * 360.0 - 180.0
lr_lat_rad = atan(sinh(pi * (1 - 2 * (lr_y + 1) / Z2)))
lr_lat_deg = rad2deg(lr_lat_rad)
return Extent(X = (ul_lon_deg, lr_lon_deg), Y = (lr_lat_deg, ul_lat_deg))
end
"Get the web mercator bounding box of a tile"
function Extents.extent(tilegrid::TileGrid, crs::WebMercator)
tile_size = CE / 2^tilegrid.z
ul_idx = tilegrid.grid[begin]
ul_x, ul_y = ul_idx[1], ul_idx[2]
nx, ny = size(tilegrid)
left = ul_x * tile_size - CE / 2
right = left + tile_size * nx
top = CE / 2 - ul_y * tile_size
bottom = top - tile_size * ny
return Extent(X=(left, right), Y=(bottom, top))
end
function GeoInterface.extent(tile::Union{Tile, TileGrid}, crs::Union{WGS84, WebMercator})
return Extents.extent(tile, crs)
end
| MapTiles | https://github.com/JuliaGeo/MapTiles.jl.git |
|
[
"MIT"
] | 1.0.1 | 2849482a22772e789f684ac78c16db1f9132c892 | code | 54 | module vector_tile
include("vector_tile_pb.jl")
end
| MapTiles | https://github.com/JuliaGeo/MapTiles.jl.git |
|
[
"MIT"
] | 1.0.1 | 2849482a22772e789f684ac78c16db1f9132c892 | code | 3303 | # syntax: proto2
using ProtoBuf
export Tile_GeomType, Tile_Value, Tile_Feature, Tile_Layer, Tile
struct __enum_Tile_GeomType <: ProtoEnum
UNKNOWN::Int32
POINT::Int32
LINESTRING::Int32
POLYGON::Int32
__enum_Tile_GeomType() = new(0,1,2,3)
end #struct __enum_Tile_GeomType
const Tile_GeomType = __enum_Tile_GeomType()
struct Tile_Value
string_value::AbstractString
float_value::Float32
double_value::Float64
int_value::Int64
uint_value::UInt64
sint_value::Int64
bool_value::Bool
Tile_Value(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #struct Tile_Value
const __wtype_Tile_Value = Dict(:sint_value => :sint64)
ProtoBuf.meta(t::Type{Tile_Value}) = ProtoBuf.meta(t, ProtoBuf.DEF_REQ, ProtoBuf.DEF_FNUM, ProtoBuf.DEF_VAL, true, ProtoBuf.DEF_PACK, __wtype_Tile_Value, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES)
Base.hash(v::Tile_Value) = ProtoBuf.protohash(v)
Base.isequal(v1::Tile_Value, v2::Tile_Value) = ProtoBuf.protoisequal(v1, v2)
Base.==(v1::Tile_Value, v2::Tile_Value) = ProtoBuf.protoeq(v1, v2)
struct Tile_Feature
id::UInt64
tags::Array{UInt32,1}
_type::Int32
geometry::Array{UInt32,1}
Tile_Feature(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #struct Tile_Feature
const __val_Tile_Feature = Dict(:id => 0, :_type => Tile_GeomType.UNKNOWN)
const __pack_Tile_Feature = Symbol[:tags,:geometry]
ProtoBuf.meta(t::Type{Tile_Feature}) = ProtoBuf.meta(t, ProtoBuf.DEF_REQ, ProtoBuf.DEF_FNUM, __val_Tile_Feature, true, __pack_Tile_Feature, ProtoBuf.DEF_WTYPES, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES)
Base.hash(v::Tile_Feature) = ProtoBuf.protohash(v)
Base.isequal(v1::Tile_Feature, v2::Tile_Feature) = ProtoBuf.protoisequal(v1, v2)
Base.==(v1::Tile_Feature, v2::Tile_Feature) = ProtoBuf.protoeq(v1, v2)
struct Tile_Layer
version::UInt32
name::AbstractString
features::Array{Tile_Feature,1}
keys::Array{AbstractString,1}
values::Array{Tile_Value,1}
extent::UInt32
Tile_Layer(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #struct Tile_Layer
const __req_Tile_Layer = Symbol[:version,:name]
const __val_Tile_Layer = Dict(:version => 1, :extent => 4096)
const __fnum_Tile_Layer = Int[15,1,2,3,4,5]
ProtoBuf.meta(t::Type{Tile_Layer}) = ProtoBuf.meta(t, __req_Tile_Layer, __fnum_Tile_Layer, __val_Tile_Layer, true, ProtoBuf.DEF_PACK, ProtoBuf.DEF_WTYPES, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES)
Base.hash(v::Tile_Layer) = ProtoBuf.protohash(v)
Base.isequal(v1::Tile_Layer, v2::Tile_Layer) = ProtoBuf.protoisequal(v1, v2)
Base.==(v1::Tile_Layer, v2::Tile_Layer) = ProtoBuf.protoeq(v1, v2)
struct Tile
layers::Array{Tile_Layer,1}
Tile(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #struct Tile
const __fnum_Tile = Int[3]
ProtoBuf.meta(t::Type{Tile}) = ProtoBuf.meta(t, ProtoBuf.DEF_REQ, __fnum_Tile, ProtoBuf.DEF_VAL, true, ProtoBuf.DEF_PACK, ProtoBuf.DEF_WTYPES, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES)
Base.hash(v::Tile) = ProtoBuf.protohash(v)
Base.isequal(v1::Tile, v2::Tile) = ProtoBuf.protoisequal(v1, v2)
Base.==(v1::Tile, v2::Tile) = ProtoBuf.protoeq(v1, v2)
| MapTiles | https://github.com/JuliaGeo/MapTiles.jl.git |
|
[
"MIT"
] | 1.0.1 | 2849482a22772e789f684ac78c16db1f9132c892 | code | 2701 | import MapTiles as MT
using GeoInterface: Extent, extent
using MapTiles
using Test
import Aqua
import HTTP, ImageMagick
using TileProviders
RGB = ImageMagick.ColorTypes.RGB
N0f8 = ImageMagick.FixedPointNumbers.N0f8
@testset "MapTiles" begin
@testset "Tile" begin
point_wgs = (-105.0, 40.0)
tile = Tile(point_wgs, 1, MT.wgs84)
@test tile === Tile(0, 0, 1)
bbox = extent(tile, MT.wgs84)
bbox == Extent(X = (-180.0, 0.0), Y = (0.0, 85.0511287798066))
@test bbox isa Extent
@test bbox.X === (-180.0, 0.0)
@test bbox.Y[1] === 0.0
@test bbox.Y[2] ≈ 85.0511287798066
bbox = extent(tile, MT.web_mercator)
@test bbox isa Extent
@test bbox.X[1] ≈ -2.0037508342789244e7
@test bbox.X[2] === 0.0
@test bbox.Y[1] === 0.0
@test bbox.Y[2] ≈ 2.0037508342789244e7
end
@testset "TileGrid" begin
point_wgs = (-105.0, 40.0)
tile = Tile(point_wgs, 1, MT.wgs84)
bbox = extent(tile, MT.web_mercator)
@test TileGrid(tile) === TileGrid(CartesianIndices((0:0, 0:0)), 1)
@test TileGrid(bbox, 0, MT.wgs84) === TileGrid(CartesianIndices((0:0, 0:0)), 0)
tilegrid = TileGrid(bbox, 3, MT.wgs84)
@test tilegrid === TileGrid(CartesianIndices((0:3, 0:4)), 3)
bbox = Extent(X = (-1.23, 5.65), Y = (-5.68, 4.77))
tilegrid = TileGrid(bbox, 8, MT.wgs84)
@test size(tilegrid) === (6, 9)
@test length(tilegrid) === 54
# creating a TileGrid from a web mercator extent
webbox = MT.project_extent(bbox, MT.wgs84, MT.web_mercator)
@test tilegrid === TileGrid(webbox, 8, MT.web_mercator)
end
@testset "project" begin
# point
point_wgs = (-105.0, 40.0)
point_web = MT.project(point_wgs, MT.wgs84, MT.web_mercator)
@test point_web[1] ≈ -1.1688546533293726e7
@test point_web[2] ≈ 4.865942279503176e6
point_wgs2 = MT.project(point_web, MT.web_mercator, MT.wgs84)
point_wgs[1] ≈ point_wgs2[1]
point_wgs[2] ≈ point_wgs2[2]
# extent
bbox = Extent(X = (-180.0, 0.0), Y = (0.0, 85.0511287798066))
webbox = MT.project_extent(bbox, MT.wgs84, MT.web_mercator)
@test webbox.X[1] ≈ -2.0037508342789244e7
@test webbox.X[2] == 0.0
@test webbox.Y[1] ≈ -7.081154551613622e-10
@test webbox.Y[2] ≈ 2.0037508342789244e7
end
@testset "get tile image" begin
provider = OpenStreetMap()
# get the most zoomed out image of the whole world
tile = Tile(0, 0, 0)
url = geturl(provider, tile.x, tile.y, tile.z)
result = HTTP.get(url)
@test result.status == 200
@test HTTP.header(result, "Content-Type") == "image/png"
img = ImageMagick.readblob(result.body)
@test img isa Matrix{RGB{N0f8}}
@test size(img) == (256, 256)
end
Aqua.test_all(MapTiles)
end
| MapTiles | https://github.com/JuliaGeo/MapTiles.jl.git |
|
[
"MIT"
] | 1.0.1 | 2849482a22772e789f684ac78c16db1f9132c892 | docs | 2696 | # MapTiles
[](https://github.com/JuliaGeo/MapTiles.jl/actions?query=workflow%3ACI)
[](http://codecov.io/github/JuliaGeo/MapTiles.jl?branch=master)
MapTiles is a [Julia](https://julialang.org/) package for working with
[tiled web maps](https://en.wikipedia.org/wiki/Tiled_web_map), also known as slippy maps.
It mainly concerns itself with getting a set of tile indices based on a given area of
interest and zoom level, specified in WGS84 longitude/latitude or Web Mercator.
It does not download any tile images, but can be used together with
[TileProviders.jl](https://github.com/JuliaGeo/TileProviders.jl) to create URIs for tiles,
which can then be downloaded and plotted. [Tyler.jl](https://github.com/MakieOrg/Tyler.jl)
is a [Makie](http://makie.org/) package that uses MapTiles and TileProviders to plot
interactive web maps, for instance as a background layer to plot geospatial data on top of.
## Usage
```julia
using MapTiles, TileProviders
import HTTP, ImageMagick
using GeoInterface: Extent, extent
# get a single Tile with x, y and z index from a point and zoom level
point_wgs = (-105.0, 40.0)
tile = Tile(point_wgs, 8, MapTiles.wgs84)
# -> Tile(53, 96, 8)
# get the extent of a Tile in Web Mercator coordinates
bbox = extent(tile, MapTiles.web_mercator)
# -> Extent(X = (-1.1740727e7, -1.1584184e7), Y = (4.8528340e6, 5.0093770e6))
# get a TileGrid from an Extent and zoom level
bbox = Extent(X = (-1.23, 5.65), Y = (-5.68, 4.77))
tilegrid = TileGrid(bbox, 8, MapTiles.wgs84)
# -> TileGrid(CartesianIndices((127:132, 124:132)), 8)
# load the zoom 0 OpenStreetMap tile into an image
provider = OpenStreetMap()
tile = Tile(0, 0, 0)
url = geturl(provider, tile.x, tile.y, tile.z)
result = HTTP.get(url)
img = ImageMagick.readblob(result.body)
# -> 256×256 Array{RGB{N0f8},2}
```

## Packages in other Languages
If you're coming from Python or R, you might be interested in the following packages instead:
- [mercantile: Spherical mercator tile and coordinate utilities](https://github.com/mapbox/mercantile)
- The design of this package is largely based on mercantile.
- [Smopy: OpenStreetMap Image Tiles in Python](https://github.com/rossant/smopy)
- [Rio-tiler: Rasterio pluggin to serve tiles from AWS S3 hosted files](https://github.com/mapbox/rio-tiler)
- [ggmap: makes it easy to retrieve raster map tiles from popular online mapping services](https://github.com/dkahle/ggmap)
| MapTiles | https://github.com/JuliaGeo/MapTiles.jl.git |
|
[
"MIT"
] | 1.0.1 | 2849482a22772e789f684ac78c16db1f9132c892 | docs | 23489 | # Vector Tile Specification
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in
this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
## 1. Purpose
This document specifies a space-efficient encoding format for tiled geographic vector data. It is designed to be used in browsers or server-side applications for fast rendering or lookups of feature data.
## 2. File Format
The Vector Tile format uses [Google Protocol Buffers](https://developers.google.com/protocol-buffers/) as a encoding format. Protocol Buffers are a language-neutral, platform-neutral extensible mechanism for serializing structured data.
### 2.1. File Extension
The filename extension for Vector Tile files SHOULD be `mvt`. For example, a file might be named `vector.mvt`.
### 2.2. Multipurpose Internet Mail Extensions (MIME)
When serving Vector Tiles the MIME type SHOULD be `application/vnd.mapbox-vector-tile`.
## 3. Projection and Bounds
A Vector Tile represents data based on a square extent within a projection. A Vector Tile SHOULD NOT contain information about its bounds and projection. The file format assumes that the decoder knows the bounds and projection of a Vector Tile before decoding it.
[Web Mercator](https://en.wikipedia.org/wiki/Web_Mercator) is the projection of reference, and [the Google tile scheme](http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/) is the tile extent convention of reference. Together, they provide a 1-to-1 relationship between a specific geographical area, at a specific level of detail, and a path such as `https://example.com/17/65535/43602.mvt`.
Vector Tiles MAY be used to represent data with any projection and tile extent scheme.
## 4. Internal Structure
This specification describes the structure of data within a Vector Tile. The reader should have an understanding of the [Vector Tile protobuf schema document](vector_tile.proto) and the structures it defines.
### 4.1. Layers
A Vector Tile consists of a set of named layers. A layer contains geometric features and their metadata. The layer format is designed so that the data required for a layer is contiguous in memory, and so that layers can be appended to a Vector Tile without modifying existing data.
A Vector Tile SHOULD contain at least one layer. A layer SHOULD contain at least one feature.
A layer MUST contain a `version` field with the major version number of the Vector Tile specification to which the layer adheres. For example, a layer adhering to version 2.1 of the specification contains a `version` field with the integer value `2`. The `version` field SHOULD be the first field within the layer. Decoders SHOULD parse the `version` first to ensure that they are capable of decoding each layer. When a Vector Tile consumer encounters a Vector Tile layer with an unknown version, it MAY make a best-effort attempt to interpret the layer, or it MAY skip the layer. In either case it SHOULD continue to process subsequent layers in the Vector Tile.
A layer MUST contain a `name` field. A Vector Tile MUST NOT contain two or more layers whose `name` values are byte-for-byte identical. Prior to appending a layer to an existing Vector Tile, an encoder MUST check the existing `name` fields in order to prevent duplication.
Each feature in a layer (see below) may have one or more key-value pairs as its metadata. The keys and values are indices into two lists, `keys` and `values`, that are shared across the layer's features.
Each element in the `keys` field of the layer is a string. The `keys` include all the keys of features used in the layer, and each key may be referenced by its positional index in this set of `keys`, with the first key having an index of 0. The set of `keys` SHOULD NOT contain two or more values which are byte-for-byte identical.
Each element in the `values` field of the layer encodes a value of any of several types (see below). The `values` represent all the values of features used in the layer, and each value may be referenced by its positional index in this set of `values`, with the first value having an index of 0. The set of `values` SHOULD NOT contain two or more values of the same type which are byte-for-byte identical.
In order to support values of varying string, boolean, integer, and floating point types, the protobuf encoding of the `value` field consists of a set of `optional` fields. A value MUST contain exactly one of these optional fields.
A layer MUST contain an `extent` that describes the width and height of the tile in integer coordinates. The geometries within the Vector Tile MAY extend past the bounds of the tile's area as defined by the `extent`. Geometries that extend past the tile's area as defined by `extent` are often used as a buffer for rendering features that overlap multiple adjacent tiles.
For example, if a tile has an `extent` of 4096, coordinate units within the tile refer to 1/4096th of its square dimensions. A coordinate of 0 is on the top or left edge of the tile, and a coordinate of 4096 is on the bottom or right edge. Coordinates from 1 through 4095 inclusive are fully within the extent of the tile, and coordinates less than 0 or greater than 4096 are fully outside the extent of the tile. A point at `(1,10)` or `(4095,10)` is within the extent of the tile. A point at `(0,10)` or `(4096,10)` is on the edge of the extent. A point at `(-1,10)` or `(4097,10)` is outside the extent of the tile.
### 4.2. Features
A feature MUST contain a `geometry` field.
A feature MUST contain a `type` field as described in the Geometry Types section.
A feature MAY contain a `tags` field. Feature-level metadata, if any, SHOULD be stored in the `tags` field.
A feature MAY contain an `id` field. If a feature has an `id` field, the value of the `id` SHOULD be unique among the features of the parent layer.
### 4.3. Geometry Encoding
Geometry data in a Vector Tile is defined in a screen coordinate system. The upper left corner of the tile (as displayed by default) is the origin of the coordinate system. The X axis is positive to the right, and the Y axis is positive downward. Coordinates within a geometry MUST be integers.
A geometry is encoded as a sequence of 32 bit unsigned integers in the `geometry` field of a feature. Each integer is either a `CommandInteger` or a `ParameterInteger`. A decoder interprets these as an ordered series of operations to generate the geometry.
Commands refer to positions relative to a "cursor", which is a redefinable point. For the first command in a feature, the cursor is at `(0,0)` in the coordinate system. Some commands move the cursor, affecting subsequent commands.
#### 4.3.1. Command Integers
A `CommandInteger` indicates a command to be executed, as a command ID, and the number of times that the command will be executed, as a command count.
A command ID is encoded as an unsigned integer in the least significant 3 bits of the `CommandInteger`, and is in the range 0 through 7, inclusive. A command count is encoded as an unsigned integer in the remaining 29 bits of a `CommandInteger`, and is in the range `0` through `pow(2, 29) - 1`, inclusive.
A command ID, a command count, and a `CommandInteger` are related by these bitwise operations:
```javascript
CommandInteger = (id & 0x7) | (count << 3)
```
```javascript
id = CommandInteger & 0x7
```
```javascript
count = CommandInteger >> 3
```
A command ID specifies one of the following commands:
| Command | Id | Parameters | Parameter Count |
| ------------ |:----:| ------------- | --------------- |
| MoveTo | `1` | `dX`, `dY` | 2 |
| LineTo | `2` | `dX`, `dY` | 2 |
| ClosePath | `7` | No parameters | 0 |
##### Example Command Integers
| Command | ID | Count | CommandInteger | Binary Representation `[Count][Id]` |
| --------- |:----:|:-----:|:--------------:|:----------------------------------------:|
| MoveTo | `1` | `1` | `9` | `[00000000 00000000 0000000 00001][001]` |
| MoveTo | `1` | `120` | `961` | `[00000000 00000000 0000011 11000][001]` |
| LineTo | `2` | `1` | `10` | `[00000000 00000000 0000000 00001][010]` |
| LineTo | `2` | `3` | `26` | `[00000000 00000000 0000000 00011][010]` |
| ClosePath | `7` | `1` | `15` | `[00000000 00000000 0000000 00001][111]` |
#### 4.3.2. Parameter Integers
Commands requiring parameters are followed by a `ParameterInteger` for each parameter required by that command. The number of `ParameterIntegers` that follow a `CommandInteger` is equal to the parameter count of a command multiplied by the command count of the `CommandInteger`. For example, a `CommandInteger` with a `MoveTo` command with a command count of 3 will be followed by 6 `ParameterIntegers`.
A `ParameterInteger` is [zigzag](https://developers.google.com/protocol-buffers/docs/encoding#types) encoded so that small negative and positive values are both encoded as small integers. To encode a parameter value to a `ParameterInteger` the following formula is used:
```javascript
ParameterInteger = (value << 1) ^ (value >> 31)
```
Parameter values greater than `pow(2,31) - 1` or less than `-1 * (pow(2,31) - 1)` are not supported.
The following formula is used to decode a `ParameterInteger` to a value:
```javascript
value = ((ParameterInteger >> 1) ^ (-(ParameterInteger & 1)))
```
#### 4.3.3. Command Types
For all descriptions of commands the initial position of the cursor shall be described to be at the coordinates `(cX, cY)` where `cX` is the position of the cursor on the X axis and `cY` is the position of the `cursor` on the Y axis.
##### 4.3.3.1. MoveTo Command
A `MoveTo` command with a command count of `n` MUST be immediately followed by `n` pairs of `ParameterInteger`s. Each pair `(dX, dY)`:
1. Defines the coordinate `(pX, pY)`, where `pX = cX + dX` and `pY = cY + dY`.
* Within POINT geometries, this coordinate defines a new point.
* Within LINESTRING geometries, this coordinate defines the starting vertex of a new line.
* Within POLYGON geometries, this coordinate defines the starting vertex of a new linear ring.
2. Moves the cursor to `(pX, pY)`.
##### 4.3.3.2. LineTo Command
A `LineTo` command with a command count of `n` MUST be immediately followed by `n` pairs of `ParameterInteger`s. Each pair `(dX, dY)`:
1. Defines a segment beginning at the cursor `(cX, cY)` and ending at the coordinate `(pX, pY)`, where `pX = cX + dX` and `pY = cY + dY`.
* Within LINESTRING geometries, this segment extends the current line.
* Within POLYGON geometries, this segment extends the current linear ring.
2. Moves the cursor to `(pX, pY)`.
For any pair of `(dX, dY)` the `dX` and `dY` MUST NOT both be `0`.
##### 4.3.3.3. ClosePath Command
A `ClosePath` command MUST have a command count of 1 and no parameters. The command closes the current linear ring of a POLYGON geometry via a line segment beginning at the cursor `(cX, cY)` and ending at the starting vertex of the current linear ring.
This command does not change the cursor position.
#### 4.3.4. Geometry Types
The `geometry` field is described in each feature by the `type` field which must be a value in the enum `GeomType`. The following geometry types are supported:
* UNKNOWN
* POINT
* LINESTRING
* POLYGON
Geometry collections are not supported.
##### 4.3.4.1. Unknown Geometry Type
The specification purposefully leaves an unknown geometry type as an option. This geometry type encodes experimental geometry types that an encoder MAY choose to implement. Decoders MAY ignore any features of this geometry type.
##### 4.3.4.2. Point Geometry Type
The `POINT` geometry type encodes a point or multipoint geometry. The geometry command sequence for a point geometry MUST consist of a single `MoveTo` command with a command count greater than 0.
If the `MoveTo` command for a `POINT` geometry has a command count of 1, then the geometry MUST be interpreted as a single point; otherwise the geometry MUST be interpreted as a multipoint geometry, wherein each pair of `ParameterInteger`s encodes a single point.
##### 4.3.4.3. Linestring Geometry Type
The `LINESTRING` geometry type encodes a linestring or multilinestring geometry. The geometry command sequence for a linestring geometry MUST consist of one or more repetitions of the following sequence:
1. A `MoveTo` command with a command count of 1
2. A `LineTo` command with a command count greater than 0
If the command sequence for a `LINESTRING` geometry type includes only a single `MoveTo` command then the geometry MUST be interpreted as a single linestring; otherwise the geometry MUST be interpreted as a multilinestring geometry, wherein each `MoveTo` signals the beginning of a new linestring.
##### 4.3.4.4. Polygon Geometry Type
The `POLYGON` geometry type encodes a polygon or multipolygon geometry, each polygon consisting of exactly one exterior ring that contains zero or more interior rings. The geometry command sequence for a polygon consists of one or more repetitions of the following sequence:
1. An `ExteriorRing`
2. Zero or more `InteriorRing`s
Each `ExteriorRing` and `InteriorRing` MUST consist of the following sequence:
1. A `MoveTo` command with a command count of 1
2. A `LineTo` command with a command count greater than 1
3. A `ClosePath` command
An exterior ring is DEFINED as a linear ring having a positive area as calculated by applying the [surveyor's formula](https://en.wikipedia.org/wiki/Shoelace_formula) to the vertices of the polygon in tile coordinates. In the tile coordinate system (with the Y axis positive down and X axis positive to the right) this makes the exterior ring's winding order appear clockwise.
An interior ring is DEFINED as a linear ring having a negative area as calculated by applying the [surveyor's formula](https://en.wikipedia.org/wiki/Shoelace_formula) to the vertices of the polygon in tile coordinates. In the tile coordinate system (with the Y axis positive down and X axis positive to the right) this makes the interior ring's winding order appear counterclockwise.
If the command sequence for a `POLYGON` geometry type includes only a single exterior ring then the geometry MUST be interpreted as a single polygon; otherwise the geometry MUST be interpreted as a multipolygon geometry, wherein each exterior ring signals the beginning of a new polygon. If a polygon has interior rings they MUST be encoded directly after the exterior ring of the polygon to which they belong.
Linear rings MUST be geometric objects that have no anomalous geometric points, such as self-intersection or self-tangency. The position of the cursor before calling the `ClosePath` command of a linear ring SHALL NOT repeat the same position as the first point in the linear ring as this would create a zero-length line segment. A linear ring SHOULD NOT have an area calculated by the surveyor's formula equal to zero, as this would signify a ring with anomalous geometric points.
Polygon geometries MUST NOT have any interior rings that intersect and interior rings MUST be enclosed by the exterior ring.
#### 4.3.5. Example Geometry Encodings
##### 4.3.5.1. Example Point
An example encoding of a point located at:
* (25,17)
This would require a single command:
* MoveTo(+25, +17)
```
Encoded as: [ 9 50 34 ]
| | `> Decoded: ((34 >> 1) ^ (-(34 & 1))) = +17
| `> Decoded: ((50 >> 1) ^ (-(50 & 1))) = +25
| ===== relative MoveTo(+25, +17) == create point (25,17)
`> [00001 001] = command id 1 (MoveTo), command count 1
```
##### 4.3.5.2. Example Multi Point
An example encoding of two points located at:
* (5,7)
* (3,2)
This would require two commands:
* MoveTo(+5,+7)
* MoveTo(-2,-5)
```
Encoded as: [ 17 10 14 3 9 ]
| | | | `> Decoded: ((9 >> 1) ^ (-(9 & 1))) = -5
| | | `> Decoded: ((3 >> 1) ^ (-(3 & 1))) = -2
| | | === relative MoveTo(-2, -5) == create point (3,2)
| | `> Decoded: ((34 >> 1) ^ (-(34 & 1))) = +7
| `> Decoded: ((50 >> 1) ^ (-(50 & 1))) = +5
| ===== relative MoveTo(+5, +7) == create point (5,7)
`> [00010 001] = command id 1 (MoveTo), command count 2
```
##### 4.3.5.3. Example Linestring
An example encoding of a line with the points:
* (2,2)
* (2,10)
* (10,10)
This would require three commands:
* MoveTo(+2,+2)
* LineTo(+0,+8)
* LineTo(+8,+0)
```
Encoded as: [ 9 4 4 18 0 16 16 0 ]
| | ==== relative LineTo(+8, +0) == Line to Point (10, 10)
| | ==== relative LineTo(+0, +8) == Line to Point (2, 10)
| `> [00010 010] = command id 2 (LineTo), command count 2
| === relative MoveTo(+2, +2)
`> [00001 001] = command id 1 (MoveTo), command count 1
```
##### 4.3.5.4. Example Multi Linestring
An example encoding of two lines with the points:
* Line 1:
* (2,2)
* (2,10)
* (10,10)
* Line 2:
* (1,1)
* (3,5)
This would require the following commands:
* MoveTo(+2,+2)
* LineTo(+0,+8)
* LineTo(+8,+0)
* MoveTo(-9,-9)
* LineTo(+2,+4)
```
Encoded as: [ 9 4 4 18 0 16 16 0 9 17 17 10 4 8 ]
| | | | === relative LineTo(+2, +4) == Line to Point (3,5)
| | | `> [00001 010] = command id 2 (LineTo), command count 1
| | | ===== relative MoveTo(-9, -9) == Start new line at (1,1)
| | `> [00001 001] = command id 1 (MoveTo), command count 1
| | ==== relative LineTo(+8, +0) == Line to Point (10, 10)
| | ==== relative LineTo(+0, +8) == Line to Point (2, 10)
| `> [00010 010] = command id 2 (LineTo), command count 2
| === relative MoveTo(+2, +2)
`> [00001 001] = command id 1 (MoveTo), command count 1
```
##### 4.3.5.5. Example Polygon
An example encoding of a polygon feature that has the points:
* (3,6)
* (8,12)
* (20,34)
* (3,6) *Path Closing as Last Point*
Would encoded by using the following commands:
* MoveTo(3, 6)
* LineTo(5, 6)
* LineTo(12, 22)
* ClosePath
```
Encoded as: [ 9 6 12 18 10 12 24 44 15 ]
| | `> [00001 111] command id 7 (ClosePath), command count 1
| | ===== relative LineTo(+12, +22) == Line to Point (20, 34)
| | ===== relative LineTo(+5, +6) == Line to Point (8, 12)
| `> [00010 010] = command id 2 (LineTo), command count 2
| ==== relative MoveTo(+3, +6)
`> [00001 001] = command id 1 (MoveTo), command count 1
```
##### 4.3.5.6. Example Multi Polygon
An example of a more complex encoding of two polygons, one with a hole. The position of the points for the polygons are shown below. The winding order of the polygons is VERY important in this example as it signifies the difference between interior rings and a new polygon.
* Polygon 1:
* Exterior Ring:
* (0,0)
* (10,0)
* (10,10)
* (0,10)
* (0,0) *Path Closing as Last Point*
* Polygon 2:
* Exterior Ring:
* (11,11)
* (20,11)
* (20,20)
* (11,20)
* (11,11) *Path Closing as Last Point*
* Interior Ring:
* (13,13)
* (13,17)
* (17,17)
* (17,13)
* (13,13) *Path Closing as Last Point*
This polygon would be encoded with the following set of commands:
* MoveTo(+0,+0)
* LineTo(+10,+0)
* LineTo(+0,+10)
* LineTo(-10,+0) // Cursor at 0,10 after this command
* ClosePath // End of Polygon 1
* MoveTo(+11,+1) // NOTE THAT THIS IS RELATIVE TO LAST LINETO!
* LineTo(+9,+0)
* LineTo(+0,+9)
* LineTo(-9,+0) // Cursor at 11,20 after this command
* ClosePath // This is a new polygon because area is positive!
* MoveTo(+2,-7) // NOTE THAT THIS IS RELATIVE TO LAST LINETO!
* LineTo(+0,+4)
* LineTo(+4,+0)
* LineTo(+0,-4) // Cursor at 17,13
* ClosePath // This is an interior ring because area is negative!
```
Encoded as: [ 9 0 0 26 20 0 0 20 19 0 15 9 22 2 26 18 0 0 18 17 0 15 9 4 13 26 0 8 8 0 0 7 15 ]
| | | | | | | | `> [00001 111] (ClosePath)
| | | | | | | `> [00011 010] = (LineTo), command count 3
| | | | | | `> [00001 001] = command id 1 (MoveTo), command count 1
| | | | | `> [00001 111] (ClosePath)
| | | | `> [00011 010] = (LineTo), command count 3
| | | `> [00001 001] = command id 1 (MoveTo), command count 1
| | `> [00001 111] (ClosePath)
| `> [00011 010] = (LineTo), command count 3
`> [00001 001] = command id 1 (MoveTo), command count 1
```
### 4.4. Feature Attributes
Feature attributes are encoded as pairs of integers in the `tag` field of a feature. The first integer in each pair represents the zero-based index of the key in the `keys` set of the `layer` to which the feature belongs. The second integer in each pair represents the zero-based index of the value in the `values` set of the `layer` to which the feature belongs. Every key index MUST be unique within that feature such that no other attribute pair within that feature has the same key index. A feature MUST have an even number of `tag` fields. A feature `tag` field MUST NOT contain a key index or value index greater than or equal to the number of elements in the layer's `keys` or `values` set, respectively.
### 4.5. Example
For example, a GeoJSON feature like:
```json
{
"type": "FeatureCollection",
"features": [
{
"geometry": {
"type": "Point",
"coordinates": [
-8247861.1000836585,
4970241.327215323
]
},
"type": "Feature",
"properties": {
"hello": "world",
"h": "world",
"count": 1.23
}
},
{
"geometry": {
"type": "Point",
"coordinates": [
-8247861.1000836585,
4970241.327215323
]
},
"type": "Feature",
"properties": {
"hello": "again",
"count": 2
}
}
]
}
```
Could be structured like:
```js
layers {
version: 2
name: "points"
features: {
id: 1
tags: 0
tags: 0
tags: 1
tags: 0
tags: 2
tags: 1
type: Point
geometry: 9
geometry: 2410
geometry: 3080
}
features {
id: 2
tags: 0
tags: 2
tags: 2
tags: 3
type: Point
geometry: 9
geometry: 2410
geometry: 3080
}
keys: "hello"
keys: "h"
keys: "count"
values: {
string_value: "world"
}
values: {
double_value: 1.23
}
values: {
string_value: "again"
}
values: {
int_value: 2
}
extent: 4096
}
```
Keep in mind the exact values for the geometry would differ based on the projection and extent of the tile.
| MapTiles | https://github.com/JuliaGeo/MapTiles.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 18425 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module MultiObjectiveAlgorithms
import Combinatorics
import MathOptInterface as MOI
struct SolutionPoint
x::Dict{MOI.VariableIndex,Float64}
y::Vector{Float64}
end
function Base.isapprox(a::SolutionPoint, b::SolutionPoint; kwargs...)
return isapprox(a.y, b.y; kwargs...)
end
Base.:(==)(a::SolutionPoint, b::SolutionPoint) = a.y == b.y
"""
dominates(sense, a::SolutionPoint, b::SolutionPoint)
Returns `true` if point `a` dominates point `b`.
"""
function dominates(sense, a::SolutionPoint, b::SolutionPoint)
if a.y == b.y
return false
elseif sense == MOI.MIN_SENSE
return all(a.y .<= b.y)
else
return all(a.y .>= b.y)
end
end
function filter_nondominated(sense, solutions::Vector{SolutionPoint})
solutions = sort(solutions; by = x -> x.y)
nondominated_solutions = SolutionPoint[]
for candidate in solutions
if any(test -> dominates(sense, test, candidate), solutions)
# Point is dominated. Don't add
elseif any(test -> test.y ≈ candidate.y, nondominated_solutions)
# Point already added to nondominated solutions. Don't add
else
push!(nondominated_solutions, candidate)
end
end
return nondominated_solutions
end
function _scalarise(f::MOI.VectorOfVariables, w::Vector{Float64})
@assert MOI.output_dimension(f) == length(w)
return MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[i], f.variables[i]) for i in 1:length(w)],
0.0,
)
end
function _scalarise(f::MOI.VectorAffineFunction, w::Vector{Float64})
@assert MOI.output_dimension(f) == length(w)
constant = sum(w[i] * f.constants[i] for i in 1:length(w))
terms = MOI.ScalarAffineTerm{Float64}[
MOI.ScalarAffineTerm(
w[term.output_index] * term.scalar_term.coefficient,
term.scalar_term.variable,
) for term in f.terms
]
return MOI.ScalarAffineFunction(terms, constant)
end
function _scalarise(f::MOI.VectorQuadraticFunction, w::Vector{Float64})
@assert MOI.output_dimension(f) == length(w)
quad_terms = MOI.ScalarQuadraticTerm{Float64}[
MOI.ScalarQuadraticTerm(
w[term.output_index] * term.scalar_term.coefficient,
term.scalar_term.variable_1,
term.scalar_term.variable_2,
) for term in f.quadratic_terms
]
affine_terms = MOI.ScalarAffineTerm{Float64}[
MOI.ScalarAffineTerm(
w[term.output_index] * term.scalar_term.coefficient,
term.scalar_term.variable,
) for term in f.affine_terms
]
constant = sum(w[i] * f.constants[i] for i in 1:length(w))
return MOI.ScalarQuadraticFunction(quad_terms, affine_terms, constant)
end
function _scalarise(f::MOI.VectorNonlinearFunction, w::Vector{Float64})
scalars = map(zip(w, f.rows)) do (wi, fi)
return MOI.ScalarNonlinearFunction(:*, Any[wi, fi])
end
return MOI.ScalarNonlinearFunction(:+, scalars)
end
abstract type AbstractAlgorithm end
MOI.Utilities.map_indices(::Function, x::AbstractAlgorithm) = x
mutable struct Optimizer <: MOI.AbstractOptimizer
inner::MOI.AbstractOptimizer
algorithm::Union{Nothing,AbstractAlgorithm}
f::Union{Nothing,MOI.AbstractVectorFunction}
solutions::Vector{SolutionPoint}
termination_status::MOI.TerminationStatusCode
time_limit_sec::Union{Nothing,Float64}
solve_time::Float64
ideal_point::Vector{Float64}
function Optimizer(optimizer_factory)
return new(
MOI.instantiate(optimizer_factory; with_cache_type = Float64),
nothing,
nothing,
SolutionPoint[],
MOI.OPTIMIZE_NOT_CALLED,
nothing,
NaN,
Float64[],
)
end
end
function MOI.empty!(model::Optimizer)
MOI.empty!(model.inner)
model.f = nothing
empty!(model.solutions)
model.termination_status = MOI.OPTIMIZE_NOT_CALLED
model.solve_time = NaN
empty!(model.ideal_point)
return
end
function MOI.is_empty(model::Optimizer)
return MOI.is_empty(model.inner) &&
model.f === nothing &&
isempty(model.solutions) &&
model.termination_status == MOI.OPTIMIZE_NOT_CALLED &&
isnan(model.solve_time) &&
isempty(model.ideal_point)
end
MOI.supports_incremental_interface(::Optimizer) = true
function MOI.copy_to(dest::Optimizer, src::MOI.ModelLike)
return MOI.Utilities.default_copy_to(dest, src)
end
### TimeLimitSec
function MOI.supports(model::Optimizer, attr::MOI.TimeLimitSec)
return MOI.supports(model.inner, attr)
end
MOI.get(model::Optimizer, ::MOI.TimeLimitSec) = model.time_limit_sec
function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, value::Real)
model.time_limit_sec = Float64(value)
return
end
function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, ::Nothing)
model.time_limit_sec = nothing
return
end
function _time_limit_exceeded(model::Optimizer, start_time::Float64)
time_limit = MOI.get(model, MOI.TimeLimitSec())
if time_limit === nothing
return false
end
time_remaining = time_limit - (time() - start_time)
if time_remaining <= 0
return true
end
if MOI.supports(model.inner, MOI.TimeLimitSec())
MOI.set(model.inner, MOI.TimeLimitSec(), time_remaining)
end
return false
end
### SolveTimeSec
function MOI.get(model::Optimizer, ::MOI.SolveTimeSec)
return model.solve_time
end
### ObjectiveFunction
function MOI.supports(
::Optimizer,
::MOI.ObjectiveFunction{<:MOI.AbstractScalarFunction},
)
return false
end
function MOI.supports(
model::Optimizer,
::MOI.ObjectiveFunction{F},
) where {F<:MOI.AbstractVectorFunction}
G = MOI.Utilities.scalar_type(F)
H = MOI.Utilities.promote_operation(+, Float64, G, G)
return MOI.supports(model.inner, MOI.ObjectiveFunction{G}()) &&
MOI.supports(model.inner, MOI.ObjectiveFunction{H}())
end
const _ATTRIBUTES = Union{
MOI.AbstractConstraintAttribute,
MOI.AbstractModelAttribute,
MOI.AbstractOptimizerAttribute,
MOI.AbstractVariableAttribute,
}
### Algorithm
"""
Algorithm <: MOI.AbstractOptimizerAttribute
An attribute to control the algorithm used by MOA.
"""
struct Algorithm <: MOI.AbstractOptimizerAttribute end
MOI.supports(::Optimizer, ::Algorithm) = true
MOI.get(model::Optimizer, ::Algorithm) = model.algorithm
function MOI.set(model::Optimizer, ::Algorithm, alg::AbstractAlgorithm)
model.algorithm = alg
return
end
default(::Algorithm) = Lexicographic()
### AbstractAlgorithmAttribute
"""
AbstractAlgorithmAttribute <: MOI.AbstractOptimizerAttribute
A super-type for MOA-specific optimizer attributes.
"""
abstract type AbstractAlgorithmAttribute <: MOI.AbstractOptimizerAttribute end
default(::AbstractAlgorithm, attr::AbstractAlgorithmAttribute) = default(attr)
function MOI.supports(model::Optimizer, attr::AbstractAlgorithmAttribute)
return MOI.supports(model.algorithm, attr)
end
function MOI.set(model::Optimizer, attr::AbstractAlgorithmAttribute, value)
MOI.set(model.algorithm, attr, value)
return
end
function MOI.get(model::Optimizer, attr::AbstractAlgorithmAttribute)
return MOI.get(model.algorithm, attr)
end
"""
SolutionLimit <: AbstractAlgorithmAttribute -> Int
Terminate the algorithm once the set number of solutions have been found.
Defaults to `typemax(Int)`.
"""
struct SolutionLimit <: AbstractAlgorithmAttribute end
default(::SolutionLimit) = typemax(Int)
"""
ObjectivePriority(index::Int) <: AbstractAlgorithmAttribute -> Int
Assign an `Int` priority to objective number `index`. This is most commonly
used to group the objectives into sets of equal priorities. Greater numbers
indicate higher priority.
Defaults to `0`.
"""
struct ObjectivePriority <: AbstractAlgorithmAttribute
index::Int
end
default(::ObjectivePriority) = 0
"""
ObjectiveWeight(index::Int) <: AbstractAlgorithmAttribute -> Float64
Assign a `Float64` weight to objective number `index`. This is most commonly
used to scalarize a set of objectives using their weighted sum.
Defaults to `1.0`.
"""
struct ObjectiveWeight <: AbstractAlgorithmAttribute
index::Int
end
default(::ObjectiveWeight) = 1.0
"""
ObjectiveRelativeTolerance(index::Int) <: AbstractAlgorithmAttribute -> Float64
Assign a `Float64` tolerance to objective number `index`. This is most commonly
used to constrain an objective to a range relative to the optimal objective
value of that objective.
Defaults to `0.0`.
"""
struct ObjectiveRelativeTolerance <: AbstractAlgorithmAttribute
index::Int
end
default(::ObjectiveRelativeTolerance) = 0.0
"""
ObjectiveAbsoluteTolerance(index::Int) <: AbstractAlgorithmAttribute -> Float64
Assign a `Float64` tolerance to objective number `index`. This is most commonly
used to constrain an objective to a range in absolute terms to the optimal
objective value of that objective.
Defaults to `0.0`.
"""
struct ObjectiveAbsoluteTolerance <: AbstractAlgorithmAttribute
index::Int
end
default(::ObjectiveAbsoluteTolerance) = 0.0
"""
EpsilonConstraintStep <: AbstractAlgorithmAttribute -> Float64
The step `ε` to use in epsilon-constraint methods.
Defaults to `1.0`.
"""
struct EpsilonConstraintStep <: AbstractAlgorithmAttribute end
default(::EpsilonConstraintStep) = 1.0
"""
LexicographicAllPermutations <: AbstractAlgorithmAttribute -> Bool
Controls whether to return the lexicographic solution for all permutations of
the scalar objectives (when `true`), or only the solution corresponding to the
lexicographic solution of the original objective function (when `false`).
Defaults to true`.
"""
struct LexicographicAllPermutations <: AbstractAlgorithmAttribute end
default(::LexicographicAllPermutations) = true
### RawOptimizerAttribute
function MOI.supports(model::Optimizer, attr::MOI.RawOptimizerAttribute)
return MOI.supports(model.inner, attr)
end
function MOI.set(model::Optimizer, attr::MOI.RawOptimizerAttribute, value)
MOI.set(model.inner, attr, value)
return
end
function MOI.get(model::Optimizer, attr::MOI.RawOptimizerAttribute)
return MOI.get(model.inner, attr)
end
### AbstractOptimizerAttribute
function MOI.supports(model::Optimizer, arg::MOI.AbstractOptimizerAttribute)
return MOI.supports(model.inner, arg)
end
function MOI.set(model::Optimizer, attr::MOI.AbstractOptimizerAttribute, value)
MOI.set(model.inner, attr, value)
return
end
function MOI.get(model::Optimizer, attr::MOI.AbstractOptimizerAttribute)
return MOI.get(model.inner, attr)
end
function MOI.get(model::Optimizer, ::MOI.SolverName)
alg = typeof(something(model.algorithm, default(Algorithm())))
inner = MOI.get(model.inner, MOI.SolverName())
return "MOA[algorithm=$alg, optimizer=$inner]"
end
### AbstractModelAttribute
function MOI.supports(model::Optimizer, arg::MOI.AbstractModelAttribute)
return MOI.supports(model.inner, arg)
end
### AbstractVariableAttribute
function MOI.is_valid(model::Optimizer, x::MOI.VariableIndex)
return MOI.is_valid(model.inner, x)
end
function MOI.supports(
model::Optimizer,
arg::MOI.AbstractVariableAttribute,
::Type{MOI.VariableIndex},
)
return MOI.supports(model.inner, arg, MOI.VariableIndex)
end
function MOI.set(
model::Optimizer,
attr::MOI.AbstractVariableAttribute,
indices::Vector{<:MOI.VariableIndex},
args::Vector{T},
) where {T}
MOI.set.(model, attr, indices, args)
return
end
### AbstractConstraintAttribute
function MOI.is_valid(model::Optimizer, ci::MOI.ConstraintIndex)
return MOI.is_valid(model.inner, ci)
end
function MOI.supports(
model::Optimizer,
arg::MOI.AbstractConstraintAttribute,
::Type{MOI.ConstraintIndex{F,S}},
) where {F<:MOI.AbstractFunction,S<:MOI.AbstractSet}
return MOI.supports(model.inner, arg, MOI.ConstraintIndex{F,S})
end
function MOI.set(
model::Optimizer,
attr::MOI.AbstractConstraintAttribute,
indices::Vector{<:MOI.ConstraintIndex},
args::Vector{T},
) where {T}
MOI.set.(model, attr, indices, args)
return
end
function MOI.set(model::Optimizer, attr::_ATTRIBUTES, args...)
return MOI.set(model.inner, attr, args...)
end
function MOI.get(model::Optimizer, attr::_ATTRIBUTES, args...)
return MOI.get(model.inner, attr, args...)
end
function MOI.get(model::Optimizer, attr::_ATTRIBUTES, arg::Vector{T}) where {T}
return MOI.get.(model, attr, arg)
end
function MOI.get(model::Optimizer, ::Type{MOI.VariableIndex}, args...)
return MOI.get(model.inner, MOI.VariableIndex, args...)
end
function MOI.get(model::Optimizer, T::Type{<:MOI.ConstraintIndex}, args...)
return MOI.get(model.inner, T, args...)
end
MOI.add_variable(model::Optimizer) = MOI.add_variable(model.inner)
MOI.add_variables(model::Optimizer, n::Int) = MOI.add_variables(model.inner, n)
function MOI.supports_constraint(
model::Optimizer,
F::Type{<:MOI.AbstractFunction},
S::Type{<:MOI.AbstractSet},
)
return MOI.supports_constraint(model.inner, F, S)
end
function MOI.add_constraint(
model::Optimizer,
f::MOI.AbstractFunction,
s::MOI.AbstractSet,
)
return MOI.add_constraint(model.inner, f, s)
end
function MOI.set(
model::Optimizer,
::MOI.ObjectiveFunction{F},
f::F,
) where {F<:MOI.AbstractVectorFunction}
model.f = f
return
end
MOI.get(model::Optimizer, ::MOI.ObjectiveFunctionType) = typeof(model.f)
MOI.get(model::Optimizer, ::MOI.ObjectiveFunction) = model.f
function MOI.get(model::Optimizer, attr::MOI.ListOfModelAttributesSet)
ret = MOI.get(model.inner, attr)
if model.f !== nothing
F = MOI.get(model, MOI.ObjectiveFunctionType())
push!(ret, MOI.ObjectiveFunction{F}())
end
return ret
end
function MOI.delete(model::Optimizer, x::MOI.VariableIndex)
if model.f isa MOI.VectorNonlinearFunction
throw(MOI.DeleteNotAllowed(x))
end
MOI.delete(model.inner, x)
if model.f !== nothing
model.f = MOI.Utilities.remove_variable(model.f, x)
if MOI.output_dimension(model.f) == 0
model.f = nothing
end
end
return
end
function MOI.delete(model::Optimizer, ci::MOI.ConstraintIndex)
MOI.delete(model.inner, ci)
return
end
function MOI.optimize!(model::Optimizer)
start_time = time()
empty!(model.solutions)
model.termination_status = MOI.OPTIMIZE_NOT_CALLED
if model.f === nothing
model.termination_status = MOI.INVALID_MODEL
return
end
objectives = MOI.Utilities.eachscalar(model.f)
model.ideal_point = fill(NaN, length(objectives))
for (i, f) in enumerate(objectives)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if _is_scalar_status_optimal(status)
model.ideal_point[i] = MOI.get(model.inner, MOI.ObjectiveValue())
end
end
algorithm = something(model.algorithm, default(Algorithm()))
status, solutions = optimize_multiobjective!(algorithm, model)
model.termination_status = status
if solutions !== nothing
model.solutions = solutions
end
if MOI.supports(model.inner, MOI.TimeLimitSec())
MOI.set(model.inner, MOI.TimeLimitSec(), nothing)
end
model.solve_time = time() - start_time
return
end
MOI.get(model::Optimizer, ::MOI.ResultCount) = length(model.solutions)
function MOI.get(model::Optimizer, ::MOI.RawStatusString)
n = MOI.get(model, MOI.ResultCount())
return "Solve complete. Found $n solution(s)"
end
function MOI.get(
model::Optimizer,
attr::MOI.VariablePrimal,
x::MOI.VariableIndex,
)
sol = model.solutions[attr.result_index]
return sol.x[x]
end
function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue)
return model.solutions[attr.result_index].y
end
MOI.get(model::Optimizer, ::MOI.ObjectiveBound) = model.ideal_point
MOI.get(model::Optimizer, ::MOI.TerminationStatus) = model.termination_status
function MOI.get(model::Optimizer, attr::MOI.PrimalStatus)
if 1 <= attr.result_index <= length(model.solutions)
return MOI.FEASIBLE_POINT
end
return MOI.NO_SOLUTION
end
MOI.get(::Optimizer, ::MOI.DualStatus) = MOI.NO_SOLUTION
function _compute_point(
model::Optimizer,
variables::Vector{MOI.VariableIndex},
f,
)
X = Dict{MOI.VariableIndex,Float64}(
x => MOI.get(model.inner, MOI.VariablePrimal(), x) for x in variables
)
Y = MOI.Utilities.eval_variables(Base.Fix1(getindex, X), model, f)
return X, Y
end
function _is_scalar_status_feasible_point(status::MOI.ResultStatusCode)
return status == MOI.FEASIBLE_POINT
end
function _is_scalar_status_optimal(status::MOI.TerminationStatusCode)
return status == MOI.OPTIMAL || status == MOI.LOCALLY_SOLVED
end
function _is_scalar_status_optimal(model::Optimizer)
status = MOI.get(model.inner, MOI.TerminationStatus())
return _is_scalar_status_optimal(status)
end
function _warn_on_nonfinite_anti_ideal(algorithm, sense, index)
alg = string(typeof(algorithm))
direction = sense == MOI.MIN_SENSE ? "above" : "below"
bound = sense == MOI.MIN_SENSE ? "upper" : "lower"
@warn(
"Unable to solve the model using the `$alg` algorithm because the " *
"anti-ideal point of objective $index is not bounded $direction, and the " *
"algorithm requires a finitely bounded objective domain. The easiest " *
"way to fix this is to add objective $index as a constraint with a " *
"finite $bound. Alteratively, ensure that all of your decision " *
"variables have finite lower and upper bounds."
)
return
end
function _project(x::Vector{Float64}, axis::Int)
return [x[i] for i in 1:length(x) if i != axis]
end
for file in readdir(joinpath(@__DIR__, "algorithms"))
# The check for .jl is necessary because some users may have other files
# like .cov from running code coverage. See JuMP.jl#3746.
if endswith(file, ".jl")
include(joinpath(@__DIR__, "algorithms", file))
end
end
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 4455 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
Chalmet()
`Chalmet` implements the algorithm of:
Chalmet, L.G., and Lemonidis, L., and Elzinga, D.J. (1986). An algorithm for the
bi-criterion integer programming problem. European Journal of Operational
Research. 25(2), 292-300
## Supported optimizer attributes
* `MOI.TimeLimitSec()`: terminate if the time limit is exceeded and return the
list of current solutions.
"""
mutable struct Chalmet <: AbstractAlgorithm end
function _solve_constrained_model(
model::Optimizer,
::Chalmet,
rhs::Vector{Float64},
)
f = MOI.Utilities.scalarize(model.f)
g = sum(1.0 * fi for fi in f)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(g)}(), g)
sets = MOI.LessThan.(rhs .- 1)
c = MOI.Utilities.normalize_and_add_constraint.(model.inner, f, sets)
MOI.optimize!(model.inner)
MOI.delete.(model, c)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
X, Y = _compute_point(model, variables, model.f)
return status, SolutionPoint(X, Y)
end
function optimize_multiobjective!(algorithm::Chalmet, model::Optimizer)
start_time = time()
if MOI.output_dimension(model.f) != 2
error("Chalmet requires exactly two objectives")
end
sense = MOI.get(model.inner, MOI.ObjectiveSense())
if sense == MOI.MAX_SENSE
old_obj, neg_obj = copy(model.f), -model.f
MOI.set(model, MOI.ObjectiveFunction{typeof(neg_obj)}(), neg_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
status, solutions = optimize_multiobjective!(algorithm, model)
MOI.set(model, MOI.ObjectiveFunction{typeof(old_obj)}(), old_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
if solutions !== nothing
solutions = [SolutionPoint(s.x, -s.y) for s in solutions]
end
return status, solutions
end
solutions = SolutionPoint[]
E = Tuple{Int,Int}[]
Q = Tuple{Int,Int}[]
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
f1, f2 = MOI.Utilities.scalarize(model.f)
y1, y2 = zeros(2), zeros(2)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f2)}(), f2)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
_, y1[2] = _compute_point(model, variables, f2)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f1)}(), f1)
y1_constraint = MOI.Utilities.normalize_and_add_constraint(
model.inner,
f2,
MOI.LessThan(y1[2]),
)
MOI.optimize!(model.inner)
x1, y1[1] = _compute_point(model, variables, f1)
MOI.delete(model.inner, y1_constraint)
push!(solutions, SolutionPoint(x1, y1))
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f1)}(), f1)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
_, y2[1] = _compute_point(model, variables, f1)
if y2[1] ≈ solutions[1].y[1]
return MOI.OPTIMAL, [solutions]
end
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f2)}(), f2)
y2_constraint = MOI.Utilities.normalize_and_add_constraint(
model.inner,
f1,
MOI.LessThan(y2[1]),
)
MOI.optimize!(model.inner)
x2, y2[2] = _compute_point(model, variables, f2)
MOI.delete(model.inner, y2_constraint)
push!(solutions, SolutionPoint(x2, y2))
push!(Q, (1, 2))
t = 3
while !isempty(Q)
if _time_limit_exceeded(model, start_time)
return MOI.TIME_LIMIT, solutions
end
r, s = pop!(Q)
yr, ys = solutions[r].y, solutions[s].y
rhs = [max(yr[1], ys[1]), max(yr[2], ys[2])]
status, solution = _solve_constrained_model(model, algorithm, rhs)
if !_is_scalar_status_optimal(status)
push!(E, (r, s))
continue
end
push!(solutions, solution)
append!(Q, [(r, t), (t, s)])
t += 1
end
return MOI.OPTIMAL, solutions
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 4107 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
Dichotomy()
A solver that implements the algorithm of:
Y. P. Aneja, K. P. K. Nair, (1979) Bicriteria Transportation Problem. Management
Science 25(1), 73-78.
## Supported optimizer attributes
* `MOI.TimeLimitSec()`: terminate if the time limit is exceeded and return the
list of current solutions.
* `MOA.SolutionLimit()`: terminate once this many solutions have been found.
"""
mutable struct Dichotomy <: AbstractAlgorithm
solution_limit::Union{Nothing,Int}
Dichotomy() = new(nothing)
end
"""
NISE()
A solver that implements the Non-Inferior Set Estimation algorithm of:
Cohon, J. L., Church, R. L., & Sheer, D. P. (1979). Generating multiobjective
trade‐offs: An algorithm for bicriterion problems. Water Resources Research,
15(5), 1001-1010.
!!! note
This algorithm is identical to `Dichotomy()`, and it may be removed in a
future release.
## Supported optimizer attributes
* `MOA.SolutionLimit()`
"""
NISE() = Dichotomy()
MOI.supports(::Dichotomy, ::SolutionLimit) = true
function MOI.set(alg::Dichotomy, ::SolutionLimit, value)
alg.solution_limit = value
return
end
function MOI.get(alg::Dichotomy, attr::SolutionLimit)
return something(alg.solution_limit, default(alg, attr))
end
function _solve_weighted_sum(model::Optimizer, alg::Dichotomy, weight::Float64)
return _solve_weighted_sum(model, alg, [weight, 1 - weight])
end
function _solve_weighted_sum(
model::Optimizer,
::Dichotomy,
weights::Vector{Float64},
)
f = _scalarise(model.f, weights)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
X, Y = _compute_point(model, variables, model.f)
return status, SolutionPoint(X, Y)
end
function optimize_multiobjective!(algorithm::Dichotomy, model::Optimizer)
start_time = time()
if MOI.output_dimension(model.f) > 2
error("Only scalar or bi-objective problems supported.")
end
if MOI.output_dimension(model.f) == 1
status, solution = _solve_weighted_sum(model, algorithm, [1.0])
return status, [solution]
end
solutions = Dict{Float64,SolutionPoint}()
for w in (0.0, 1.0)
status, solution = _solve_weighted_sum(model, algorithm, w)
if !_is_scalar_status_optimal(status)
return status, nothing
end
solutions[w] = solution
end
queue = Tuple{Float64,Float64}[]
if !(solutions[0.0] ≈ solutions[1.0])
push!(queue, (0.0, 1.0))
end
limit = MOI.get(algorithm, SolutionLimit())
status = MOI.OPTIMAL
while length(queue) > 0 && length(solutions) < limit
if _time_limit_exceeded(model, start_time)
status = MOI.TIME_LIMIT
break
end
(a, b) = popfirst!(queue)
y_d = solutions[a].y .- solutions[b].y
w = y_d[2] / (y_d[2] - y_d[1])
status, solution = _solve_weighted_sum(model, algorithm, w)
if !_is_scalar_status_optimal(status)
# Exit the solve with some error.
return status, nothing
elseif solution ≈ solutions[a] || solution ≈ solutions[b]
# We have found an existing solution. We're free to prune (a, b)
# from the search space.
else
# Solution is identical to a and b, so search the domain (a, w) and
# (w, b), and add solution as a new Pareto-optimal solution!
push!(queue, (a, w))
push!(queue, (w, b))
solutions[w] = solution
end
end
solution_list =
[solutions[w] for w in sort(collect(keys(solutions)); rev = true)]
return status, solution_list
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 7249 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
DominguezRios()
`DominguezRios` implements the algorithm of:
Dominguez-Rios, M.A. & Chicano, F., & Alba, E. (2021). Effective anytime
algorithm for multiobjective combinatorial optimization problems. Information
Sciences, 565(7), 210-228.
## Supported optimizer attributes
* `MOI.TimeLimitSec()`: terminate if the time limit is exceeded and return the
list of current solutions.
"""
mutable struct DominguezRios <: AbstractAlgorithm end
mutable struct _DominguezRiosBox
l::Vector{Float64}
u::Vector{Float64}
priority::Float64
function _DominguezRiosBox(
l::Vector{Float64},
u::Vector{Float64},
p::Float64 = 0.0,
)
@assert length(l) == length(u) "Dimension mismatch between l and u"
return new(l, u, p)
end
end
function _reduced_scaled_priority(
l::Vector{Float64},
u::Vector{Float64},
i::Int,
z::Vector{Float64},
yI::Vector{Float64},
yN::Vector{Float64},
)
ret = prod((u - l) ./ (yN - yI))
if i != length(z)
return ret
end
return ret - prod((z - l) ./ (yN - yI))
end
function _p_partition(
B::_DominguezRiosBox,
z::Vector{Float64},
yI::Vector{Float64},
yN::Vector{Float64},
)
ẑ = max.(z, B.l)
ret = _DominguezRiosBox[]
for i in 1:length(z)
new_l = vcat(B.l[1:i], ẑ[i+1:end])
new_u = vcat(B.u[1:i-1], ẑ[i], B.u[i+1:end])
new_priority = _reduced_scaled_priority(new_l, new_u, i, ẑ, yI, yN)
push!(ret, _DominguezRiosBox(new_l, new_u, new_priority))
end
return ret
end
function _select_next_box(L::Vector{Vector{_DominguezRiosBox}}, k::Int)
p = length(L)
if any(.!isempty.(L))
k = k % p + 1
while isempty(L[k])
k = k % p + 1
end
i = argmax([B.priority for B in L[k]])
end
return i, k
end
function _join(
A::_DominguezRiosBox,
B::_DominguezRiosBox,
i::Int,
z::Vector{Float64},
yI::Vector{Float64},
yN::Vector{Float64},
)
lᵃ, uᵃ, lᵇ, uᵇ = A.l, A.u, B.l, B.u
@assert all(uᵃ .<= uᵇ) "`join` operation not valid. (uᵃ ≰ uᵇ)"
lᶜ, uᶜ = min.(lᵃ, lᵇ), uᵇ
ẑ = max.(z, lᶜ)
priority = _reduced_scaled_priority(lᶜ, uᶜ, i, ẑ, yI, yN)
return _DominguezRiosBox(lᶜ, uᶜ, priority)
end
function Base.isempty(B::_DominguezRiosBox)
return any(isapprox(B.l[i], B.u[i]) for i in 1:length(B.u))
end
function _update!(
L::Vector{Vector{_DominguezRiosBox}},
z::Vector{Float64},
yI::Vector{Float64},
yN::Vector{Float64},
)
T = [_DominguezRiosBox[] for _ in 1:length(L)]
for j in 1:length(L)
for B in L[j]
if all(z .< B.u)
for (i, Bᵢ) in enumerate(_p_partition(B, z, yI, yN))
if !isempty(Bᵢ)
push!(T[i], Bᵢ)
end
end
else
push!(T[j], B)
end
end
end
L .= T
for k in 1:length(L)
i = 1
N = length(L[k])
while i < N
index_to_remove = Int[]
for j in i:N
if i != j
if all(L[k][i].u .<= L[k][j].u)
L[k][i] = _join(L[k][i], L[k][j], k, z, yI, yN)
push!(index_to_remove, j)
elseif all(L[k][i].u .>= L[k][j].u)
L[k][i] = _join(L[k][j], L[k][i], k, z, yI, yN)
push!(index_to_remove, j)
end
end
end
i += 1
N -= length(index_to_remove)
deleteat!(L[k], index_to_remove)
end
end
return
end
function optimize_multiobjective!(algorithm::DominguezRios, model::Optimizer)
start_time = time()
sense = MOI.get(model.inner, MOI.ObjectiveSense())
if sense == MOI.MAX_SENSE
old_obj, neg_obj = copy(model.f), -model.f
MOI.set(model, MOI.ObjectiveFunction{typeof(neg_obj)}(), neg_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
status, solutions = optimize_multiobjective!(algorithm, model)
MOI.set(model, MOI.ObjectiveFunction{typeof(old_obj)}(), old_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
if solutions !== nothing
solutions = [SolutionPoint(s.x, -s.y) for s in solutions]
end
return status, solutions
end
n = MOI.output_dimension(model.f)
L = [_DominguezRiosBox[] for i in 1:n]
scalars = MOI.Utilities.scalarize(model.f)
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
yI, yN = zeros(n), zeros(n)
# Ideal and Nadir point estimation
for (i, f_i) in enumerate(scalars)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f_i)}(), f_i)
MOI.set(model.inner, MOI.ObjectiveSense(), sense)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yI[i] = Y
rev_sense = sense == MOI.MIN_SENSE ? MOI.MAX_SENSE : MOI.MIN_SENSE
MOI.set(model.inner, MOI.ObjectiveSense(), rev_sense)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
_warn_on_nonfinite_anti_ideal(algorithm, sense, i)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yN[i] = Y + 1
end
MOI.set(model.inner, MOI.ObjectiveSense(), sense)
ϵ = 1 / (2 * n * (maximum(yN - yI) - 1))
push!(L[1], _DominguezRiosBox(yI, yN, 0.0))
t_max = MOI.add_variable(model.inner)
solutions = SolutionPoint[]
k = 0
status = MOI.OPTIMAL
while any(!isempty(l) for l in L)
if _time_limit_exceeded(model, start_time)
status = MOI.TIME_LIMIT
break
end
i, k = _select_next_box(L, k)
B = L[k][i]
w = 1 ./ max.(1, B.u - yI)
constraints = [
MOI.Utilities.normalize_and_add_constraint(
model.inner,
t_max - (w[i] * (scalars[i] - yI[i])),
MOI.GreaterThan(0.0),
) for i in 1:n
]
new_f = t_max + ϵ * sum(w[i] * (scalars[i] - yI[i]) for i in 1:n)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(new_f)}(), new_f)
MOI.optimize!(model.inner)
if _is_scalar_status_optimal(model)
X, Y = _compute_point(model, variables, model.f)
obj = MOI.get(model.inner, MOI.ObjectiveValue())
if (obj < 1) && all(yI .< B.u)
push!(solutions, SolutionPoint(X, Y))
_update!(L, Y, yI, yN)
else
deleteat!(L[k], i)
end
end
MOI.delete.(model.inner, constraints)
end
MOI.delete(model.inner, t_max)
return status, solutions
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 4865 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
EpsilonConstraint()
`EpsilonConstraint` implements the epsilon-constraint algorithm for
bi-objective programs.
## Supported optimizer attributes
* `MOA.EpsilonConstraintStep()`: `EpsilonConstraint` uses this value
as the epsilon by which it partitions the first-objective's space. The
default is `1`, so that for a pure integer program this algorithm will
enumerate all non-dominated solutions.
* `MOA.SolutionLimit()`: if this attribute is set then, instead of using the
`MOA.EpsilonConstraintStep`, with a slight abuse of notation,
`EpsilonConstraint` divides the width of the first-objective's domain in
objective space by `SolutionLimit` to obtain the epsilon to use when
iterating. Thus, there can be at most `SolutionLimit` solutions returned, but
there may be fewer.
"""
mutable struct EpsilonConstraint <: AbstractAlgorithm
solution_limit::Union{Nothing,Int}
atol::Union{Nothing,Float64}
EpsilonConstraint() = new(nothing, nothing)
end
MOI.supports(::EpsilonConstraint, ::SolutionLimit) = true
function MOI.set(alg::EpsilonConstraint, ::SolutionLimit, value)
alg.solution_limit = value
return
end
function MOI.get(alg::EpsilonConstraint, attr::SolutionLimit)
return something(alg.solution_limit, default(alg, attr))
end
MOI.supports(::EpsilonConstraint, ::EpsilonConstraintStep) = true
function MOI.set(alg::EpsilonConstraint, ::EpsilonConstraintStep, value)
alg.atol = value
return
end
function MOI.get(alg::EpsilonConstraint, attr::EpsilonConstraintStep)
return something(alg.atol, default(alg, attr))
end
MOI.supports(::EpsilonConstraint, ::ObjectiveAbsoluteTolerance) = true
function MOI.set(alg::EpsilonConstraint, ::ObjectiveAbsoluteTolerance, value)
@warn("This attribute is deprecated. Use `EpsilonConstraintStep` instead.")
MOI.set(alg, EpsilonConstraintStep(), value)
return
end
function MOI.get(alg::EpsilonConstraint, ::ObjectiveAbsoluteTolerance)
@warn("This attribute is deprecated. Use `EpsilonConstraintStep` instead.")
return MOI.get(alg, EpsilonConstraintStep())
end
function optimize_multiobjective!(
algorithm::EpsilonConstraint,
model::Optimizer,
)
start_time = time()
if MOI.output_dimension(model.f) != 2
error("EpsilonConstraint requires exactly two objectives")
end
# Compute the bounding box ofthe objectives using Hierarchical().
alg = Hierarchical()
MOI.set.(Ref(alg), ObjectivePriority.(1:2), [1, 0])
status, solution_1 = optimize_multiobjective!(alg, model)
if !_is_scalar_status_optimal(status)
return status, nothing
end
MOI.set(alg, ObjectivePriority(2), 2)
status, solution_2 = optimize_multiobjective!(alg, model)
if !_is_scalar_status_optimal(status)
return status, nothing
end
a, b = solution_1[1].y[1], solution_2[1].y[1]
left, right = min(a, b), max(a, b)
# Compute the epsilon that we will be incrementing by each iteration
ε = MOI.get(algorithm, EpsilonConstraintStep())
n_points = MOI.get(algorithm, SolutionLimit())
if n_points != default(algorithm, SolutionLimit())
ε = abs(right - left) / (n_points - 1)
end
solutions = SolutionPoint[]
f1, f2 = MOI.Utilities.eachscalar(model.f)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f2)}(), f2)
# Add epsilon constraint
sense = MOI.get(model.inner, MOI.ObjectiveSense())
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
SetType, bound = if sense == MOI.MIN_SENSE
MOI.LessThan{Float64}, right
else
MOI.GreaterThan{Float64}, left
end
constant = MOI.constant(f1, Float64)
ci = MOI.Utilities.normalize_and_add_constraint(
model,
f1,
SetType(bound);
allow_modify_function = true,
)
bound -= constant
status = MOI.OPTIMAL
for _ in 1:n_points
if _time_limit_exceeded(model, start_time)
status = MOI.TIME_LIMIT
break
end
MOI.set(model, MOI.ConstraintSet(), ci, SetType(bound))
MOI.optimize!(model.inner)
if !_is_scalar_status_optimal(model)
break
end
X, Y = _compute_point(model, variables, model.f)
if isempty(solutions) || !(Y ≈ solutions[end].y)
push!(solutions, SolutionPoint(X, Y))
end
if sense == MOI.MIN_SENSE
bound = min(Y[1] - constant - ε, bound - ε)
else
bound = max(Y[1] - constant + ε, bound + ε)
end
end
MOI.delete(model, ci)
return status, filter_nondominated(sense, solutions)
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 4581 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
Hierarchical()
`Hierarchical` implements an algorithm that returns a single point via an
iterative scheme.
First, it partitions the objectives into sets according to
`MOA.ObjectivePriority`. Then, in order of decreasing priority, it formulates a
single-objective problem by scalarizing all of the objectives with the same
priority using `MOA.ObjectiveWeight`. Next, it constrains those objectives such
that they can be at most `MOA.ObjectiveRelativeTolerance` worse than optimal in
future solves. Finally, it steps to the next set of prioritized objectives.
The solution is a single point that trades off the various objectives. It does
not record the partial solutions that were found along the way.
## Supported optimizer attributes
* `MOA.ObjectivePriority`
* `MOA.ObjectiveWeight`
* `MOA.ObjectiveRelativeTolerance`
"""
mutable struct Hierarchical <: AbstractAlgorithm
priorities::Vector{Int}
weights::Vector{Float64}
rtol::Vector{Float64}
Hierarchical() = new(Int[], Float64[], Float64[])
end
MOI.supports(::Hierarchical, ::ObjectivePriority) = true
function MOI.get(alg::Hierarchical, attr::ObjectivePriority)
return get(alg.priorities, attr.index, default(alg, attr))
end
function _append_default(
alg::Hierarchical,
attr::AbstractAlgorithmAttribute,
x::Vector,
)
for _ in (1+length(x)):attr.index
push!(x, default(alg, attr))
end
return
end
function MOI.set(alg::Hierarchical, attr::ObjectivePriority, value)
_append_default(alg, attr, alg.priorities)
alg.priorities[attr.index] = value
return
end
MOI.supports(::Hierarchical, ::ObjectiveWeight) = true
function MOI.get(alg::Hierarchical, attr::ObjectiveWeight)
return get(alg.weights, attr.index, default(alg, attr))
end
function MOI.set(alg::Hierarchical, attr::ObjectiveWeight, value)
_append_default(alg, attr, alg.weights)
alg.weights[attr.index] = value
return
end
MOI.supports(::Hierarchical, ::ObjectiveRelativeTolerance) = true
function MOI.get(alg::Hierarchical, attr::ObjectiveRelativeTolerance)
return get(alg.rtol, attr.index, default(alg, attr))
end
function MOI.set(alg::Hierarchical, attr::ObjectiveRelativeTolerance, value)
_append_default(alg, attr, alg.rtol)
alg.rtol[attr.index] = value
return
end
function _sorted_priorities(priorities::Vector{Int})
unique_priorities = sort(unique(priorities); rev = true)
return [findall(isequal(u), priorities) for u in unique_priorities]
end
function optimize_multiobjective!(algorithm::Hierarchical, model::Optimizer)
objectives = MOI.Utilities.eachscalar(model.f)
N = length(objectives)
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
# Find list of objectives with same priority
constraints = Any[]
priorities = [MOI.get(algorithm, ObjectivePriority(i)) for i in 1:N]
weights = [MOI.get(algorithm, ObjectiveWeight(i)) for i in 1:N]
objective_subsets = _sorted_priorities(priorities)
for (round, indices) in enumerate(objective_subsets)
# Solve weighted sum
new_vector_f = objectives[indices]
new_f = _scalarise(new_vector_f, weights[indices])
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(new_f)}(), new_f)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
if round == length(objective_subsets)
break
end
# Add tolerance constraints
X, Y = _compute_point(model, variables, new_vector_f)
sense = MOI.get(model.inner, MOI.ObjectiveSense())
for (i, fi) in enumerate(MOI.Utilities.eachscalar(new_vector_f))
rtol = MOI.get(algorithm, ObjectiveRelativeTolerance(i))
set = if sense == MOI.MIN_SENSE
MOI.LessThan(Y[i] + rtol * abs(Y[i]))
else
MOI.GreaterThan(Y[i] - rtol * abs(Y[i]))
end
ci = MOI.Utilities.normalize_and_add_constraint(model, fi, set)
push!(constraints, ci)
end
end
X, Y = _compute_point(model, variables, model.f)
# Remove tolerance constraints
for c in constraints
MOI.delete(model, c)
end
return MOI.OPTIMAL, [SolutionPoint(X, Y)]
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 6943 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
KirlikSayin()
`KirlikSayin` implements the algorithm of:
Kirlik, G., & Sayın, S. (2014). A new algorithm for generating all nondominated
solutions of multiobjective discrete optimization problems. European Journal of
Operational Research, 232(3), 479-488.
This is an algorithm to generate all nondominated solutions for multi-objective
discrete optimization problems. The algorithm maintains `(p-1)`-dimensional
rectangle regions in the solution space, and a two-stage optimization problem
is solved for each rectangle.
## Supported optimizer attributes
* `MOI.TimeLimitSec()`: terminate if the time limit is exceeded and return the
list of current solutions.
"""
mutable struct KirlikSayin <: AbstractAlgorithm end
struct _Rectangle
l::Vector{Float64}
u::Vector{Float64}
function _Rectangle(l::Vector{Float64}, u::Vector{Float64})
@assert length(l) == length(u) "Dimension mismatch between l and u"
return new(l, u)
end
end
_volume(r::_Rectangle, l::Vector{Float64}) = prod(r.u - l)
function Base.issubset(x::_Rectangle, y::_Rectangle)
@assert length(x.l) == length(y.l) "Dimension mismatch"
return all(x.l .>= y.l) && all(x.u .<= y.u)
end
function _remove_rectangle(L::Vector{_Rectangle}, R::_Rectangle)
index_to_remove = Int[t for (t, x) in enumerate(L) if issubset(x, R)]
deleteat!(L, index_to_remove)
return
end
function _split_rectangle(r::_Rectangle, axis::Int, f::Float64)
l = [i != axis ? r.l[i] : f for i in 1:length(r.l)]
u = [i != axis ? r.u[i] : f for i in 1:length(r.l)]
return _Rectangle(r.l, u), _Rectangle(l, r.u)
end
function _update_list(L::Vector{_Rectangle}, f::Vector{Float64})
L_new = _Rectangle[]
for Rᵢ in L
lᵢ, uᵢ = Rᵢ.l, Rᵢ.u
T = [Rᵢ]
for j in 1:length(f)
if lᵢ[j] < f[j] < uᵢ[j]
T̄ = _Rectangle[]
for Rₜ in T
a, b = _split_rectangle(Rₜ, j, f[j])
push!(T̄, a)
push!(T̄, b)
end
T = T̄
end
end
append!(L_new, T)
end
return L_new
end
function optimize_multiobjective!(algorithm::KirlikSayin, model::Optimizer)
start_time = time()
sense = MOI.get(model.inner, MOI.ObjectiveSense())
if sense == MOI.MAX_SENSE
old_obj, neg_obj = copy(model.f), -model.f
MOI.set(model, MOI.ObjectiveFunction{typeof(neg_obj)}(), neg_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
status, solutions = optimize_multiobjective!(algorithm, model)
MOI.set(model, MOI.ObjectiveFunction{typeof(old_obj)}(), old_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
if solutions !== nothing
solutions = [SolutionPoint(s.x, -s.y) for s in solutions]
end
return status, solutions
end
solutions = SolutionPoint[]
# Problem with p objectives.
# Set k = 1, meaning the nondominated points will get projected
# down to the objective {2, 3, ..., p}
k = 1
YN = Vector{Float64}[]
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
n = MOI.output_dimension(model.f)
yI, yN = zeros(n), zeros(n)
δ = sense == MOI.MIN_SENSE ? -1 : 1
scalars = MOI.Utilities.scalarize(model.f)
# Ideal and Nadir point estimation
for (i, f_i) in enumerate(scalars)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f_i)}(), f_i)
MOI.set(model.inner, MOI.ObjectiveSense(), sense)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yI[i] = Y + 1
MOI.set(
model.inner,
MOI.ObjectiveSense(),
sense == MOI.MIN_SENSE ? MOI.MAX_SENSE : MOI.MIN_SENSE,
)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
_warn_on_nonfinite_anti_ideal(algorithm, sense, i)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yN[i] = Y
end
# Reset the sense after modifying it.
MOI.set(model.inner, MOI.ObjectiveSense(), sense)
L = [_Rectangle(_project(yI, k), _project(yN, k))]
SetType = ifelse(
sense == MOI.MIN_SENSE,
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
)
status = MOI.OPTIMAL
while !isempty(L)
if _time_limit_exceeded(model, start_time)
status = MOI.TIME_LIMIT
break
end
Rᵢ = L[argmax([_volume(Rᵢ, _project(yI, k)) for Rᵢ in L])]
lᵢ, uᵢ = Rᵢ.l, Rᵢ.u
# Solving the first stage model: P_k(ε)
# Set ε := uᵢ
ε = insert!(copy(uᵢ), k, 0.0)
ε_constraints = Any[]
MOI.set(
model.inner,
MOI.ObjectiveFunction{typeof(scalars[k])}(),
scalars[k],
)
for (i, f_i) in enumerate(scalars)
if i != k
ci = MOI.Utilities.normalize_and_add_constraint(
model.inner,
f_i,
SetType(ε[i] + δ),
)
push!(ε_constraints, ci)
end
end
MOI.optimize!(model.inner)
if !_is_scalar_status_optimal(model)
_remove_rectangle(L, _Rectangle(_project(yI, k), uᵢ))
MOI.delete.(model, ε_constraints)
continue
end
zₖ = MOI.get(model.inner, MOI.ObjectiveValue())
# Solving the second stage model: Q_k(ε, zₖ)
# Set objective sum(model.f)
sum_f = sum(1.0 * s for s in scalars)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(sum_f)}(), sum_f)
# Constraint to eliminate weak dominance
zₖ_constraint = MOI.Utilities.normalize_and_add_constraint(
model.inner,
scalars[k],
MOI.EqualTo(zₖ),
)
MOI.optimize!(model.inner)
MOI.delete.(model, ε_constraints)
MOI.delete(model, zₖ_constraint)
if !_is_scalar_status_optimal(model)
_remove_rectangle(L, _Rectangle(_project(yI, k), uᵢ))
continue
end
X, Y = _compute_point(model, variables, model.f)
Y_proj = _project(Y, k)
if !(Y in YN)
push!(solutions, SolutionPoint(X, Y))
push!(YN, Y)
L = _update_list(L, Y_proj)
end
_remove_rectangle(L, _Rectangle(Y_proj, uᵢ))
end
return status, solutions
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 4648 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
Lexicographic()
`Lexicographic()` implements a lexigographic algorithm that returns a single
point on the frontier, corresponding to solving each objective in order.
## Supported optimizer attributes
* `MOI.TimeLimitSec()`: terminate if the time limit is exceeded and return the
current best solutions.
* `MOA.LexicographicAllPermutations()`: Controls whether to return the
lexicographic solution for all permutations of the scalar objectives (when
`true`), or only the solution corresponding to the lexicographic solution of
the original objective function (when `false`).
* `MOA.ObjectiveRelativeTolerance(index)`: after solving objective `index`, a
constraint is added such that the relative degradation in the objective value
of objective `index` is less than this tolerance.
"""
mutable struct Lexicographic <: AbstractAlgorithm
rtol::Vector{Float64}
all_permutations::Bool
function Lexicographic(; all_permutations::Union{Nothing,Bool} = nothing)
if all_permutations !== nothing
@warn(
"The `all_permutations` argument to `Lexicographic` was " *
"removed in v1.0. Set the `MOA.LexicographicAllPermutations()` " *
"option to `$all_permutations` instead.",
)
end
return new(Float64[], default(LexicographicAllPermutations()))
end
end
MOI.supports(::Lexicographic, ::ObjectiveRelativeTolerance) = true
function MOI.get(alg::Lexicographic, attr::ObjectiveRelativeTolerance)
return get(alg.rtol, attr.index, default(alg, attr))
end
function MOI.set(alg::Lexicographic, attr::ObjectiveRelativeTolerance, value)
for _ in (1+length(alg.rtol)):attr.index
push!(alg.rtol, default(alg, attr))
end
alg.rtol[attr.index] = value
return
end
MOI.supports(::Lexicographic, ::LexicographicAllPermutations) = true
function MOI.get(alg::Lexicographic, ::LexicographicAllPermutations)
return alg.all_permutations
end
function MOI.set(alg::Lexicographic, ::LexicographicAllPermutations, val::Bool)
alg.all_permutations = val
return
end
function optimize_multiobjective!(algorithm::Lexicographic, model::Optimizer)
start_time = time()
sequence = 1:MOI.output_dimension(model.f)
if !MOI.get(algorithm, LexicographicAllPermutations())
return _solve_in_sequence(algorithm, model, sequence, start_time)
end
solutions = SolutionPoint[]
status = MOI.OPTIMAL
for sequence in Combinatorics.permutations(sequence)
status, solution =
_solve_in_sequence(algorithm, model, sequence, start_time)
if !isempty(solution)
push!(solutions, solution[1])
end
if !_is_scalar_status_optimal(status)
break
end
end
sense = MOI.get(model.inner, MOI.ObjectiveSense())
return status, filter_nondominated(sense, solutions)
end
function _solve_in_sequence(
algorithm::Lexicographic,
model::Optimizer,
sequence::AbstractVector{Int},
start_time::Float64,
)
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
constraints = Any[]
scalars = MOI.Utilities.eachscalar(model.f)
solution = SolutionPoint[]
status = MOI.OPTIMAL
for i in sequence
if _time_limit_exceeded(model, start_time)
status = MOI.TIME_LIMIT
break
end
f = scalars[i]
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
primal_status = MOI.get(model.inner, MOI.PrimalStatus())
if _is_scalar_status_feasible_point(primal_status)
X, Y = _compute_point(model, variables, model.f)
solution = [SolutionPoint(X, Y)]
end
if !_is_scalar_status_optimal(status)
break
end
X, Y = _compute_point(model, variables, f)
rtol = MOI.get(algorithm, ObjectiveRelativeTolerance(i))
set = if MOI.get(model.inner, MOI.ObjectiveSense()) == MOI.MIN_SENSE
MOI.LessThan(Y + rtol * abs(Y))
else
MOI.GreaterThan(Y - rtol * abs(Y))
end
ci = MOI.Utilities.normalize_and_add_constraint(model, f, set)
push!(constraints, ci)
end
for c in constraints
MOI.delete(model, c)
end
return status, solution
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 7566 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
TambyVanderpooten()
`TambyVanderpooten` implements the algorithm of:
Satya Tamby, Daniel Vanderpooten (2021) Enumeration of the Nondominated Set
of Multiobjective Discrete Optimization Problems. INFORMS Journal on
Computing 33(1):72-85.
This is an algorithm to generate all nondominated solutions for multi-objective
discrete optimization problems. The algorithm maintains upper bounds (for
minimization problems) and their associated defining points. At each iteration,
one of the objectives and an upper bound is picked and the single objective
reformulation is solved using one of the defining points as a starting solution.
## Supported optimizer attributes
* `MOI.TimeLimitSec()`: terminate if the time limit is exceeded and return the
list of current solutions.
"""
mutable struct TambyVanderpooten <: AbstractAlgorithm end
function _update_search_region(
U_N::Dict{Vector{Float64},Vector{Vector{Vector{Float64}}}},
y::Vector{Float64},
yN::Vector{Float64},
)
bounds_to_remove = Vector{Float64}[]
p = length(y)
for u in keys(U_N)
if all(y .< u)
push!(bounds_to_remove, u)
for l in 1:p
u_l = _get_child(u, y, l)
N = [
k != l ? [yi for yi in U_N[u][k] if yi[l] < y[l]] : [y]
for k in 1:p
]
if all(!isempty(N[k]) for k in 1:p if u_l[k] ≠ yN[k])
U_N[u_l] = N
end
end
else
for k in 1:p
if (y[k] == u[k]) && all(_project(y, k) .< _project(u, k))
push!(U_N[u][k], y)
end
end
end
end
for bound_to_remove in bounds_to_remove
delete!(U_N, bound_to_remove)
end
return
end
function _get_child(u::Vector{Float64}, y::Vector{Float64}, k::Int)
@assert length(u) == length(y)
return vcat(u[1:k-1], y[k], u[k+1:length(y)])
end
function _select_search_zone(
U_N::Dict{Vector{Float64},Vector{Vector{Vector{Float64}}}},
yI::Vector{Float64},
)
i, j =
argmax([
prod(_project(u, k) - _project(yI, k)) for k in 1:length(yI),
u in keys(U_N)
]).I
return i, collect(keys(U_N))[j]
end
function optimize_multiobjective!(
algorithm::TambyVanderpooten,
model::Optimizer,
)
start_time = time()
sense = MOI.get(model.inner, MOI.ObjectiveSense())
if sense == MOI.MAX_SENSE
old_obj, neg_obj = copy(model.f), -model.f
MOI.set(model, MOI.ObjectiveFunction{typeof(neg_obj)}(), neg_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
status, solutions = optimize_multiobjective!(algorithm, model)
MOI.set(model, MOI.ObjectiveFunction{typeof(old_obj)}(), old_obj)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
if solutions !== nothing
solutions = [SolutionPoint(s.x, -s.y) for s in solutions]
end
return status, solutions
end
warm_start_supported = false
if MOI.supports(model, MOI.VariablePrimalStart(), MOI.VariableIndex)
warm_start_supported = true
end
solutions = Dict{Vector{Float64},Dict{MOI.VariableIndex,Float64}}()
YN = Vector{Float64}[]
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
n = MOI.output_dimension(model.f)
yI, yN = zeros(n), zeros(n)
scalars = MOI.Utilities.scalarize(model.f)
for (i, f_i) in enumerate(scalars)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f_i)}(), f_i)
MOI.set(model.inner, MOI.ObjectiveSense(), sense)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yI[i] = Y + 1
MOI.set(model.inner, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
_warn_on_nonfinite_anti_ideal(algorithm, sense, i)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yN[i] = Y
end
MOI.set(model.inner, MOI.ObjectiveSense(), MOI.MIN_SENSE)
U_N = Dict{Vector{Float64},Vector{Vector{Vector{Float64}}}}()
V = [Tuple{Vector{Float64},Vector{Float64}}[] for k in 1:n]
U_N[yN] = [[_get_child(yN, yI, k)] for k in 1:n]
status = MOI.OPTIMAL
while !isempty(U_N)
if _time_limit_exceeded(model, start_time)
status = MOI.TIME_LIMIT
break
end
k, u = _select_search_zone(U_N, yI)
MOI.set(
model.inner,
MOI.ObjectiveFunction{typeof(scalars[k])}(),
scalars[k],
)
ε_constraints = Any[]
for (i, f_i) in enumerate(scalars)
if i != k
ci = MOI.Utilities.normalize_and_add_constraint(
model.inner,
f_i,
MOI.LessThan{Float64}(u[i] - 1),
)
push!(ε_constraints, ci)
end
end
if u[k] ≠ yN[k]
if warm_start_supported
variables_start = solutions[first(U_N[u][k])]
for x_i in variables
MOI.set(
model.inner,
MOI.VariablePrimalStart(),
x_i,
variables_start[x_i],
)
end
end
end
MOI.optimize!(model.inner)
if !_is_scalar_status_optimal(model)
return status, nothing
end
y_k = MOI.get(model.inner, MOI.ObjectiveValue())
sum_f = sum(1.0 * s for s in scalars)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(sum_f)}(), sum_f)
y_k_constraint = MOI.Utilities.normalize_and_add_constraint(
model.inner,
scalars[k],
MOI.EqualTo(y_k),
)
MOI.optimize!(model.inner)
if !_is_scalar_status_optimal(model)
return status, nothing
end
X, Y = _compute_point(model, variables, model.f)
MOI.delete.(model, ε_constraints)
MOI.delete(model, y_k_constraint)
push!(V[k], (u, Y))
if Y ∉ U_N[u][k]
_update_search_region(U_N, Y, yN)
solutions[Y] = X
end
bounds_to_remove = Vector{Float64}[]
for u_i in keys(U_N)
for k in 1:n
if u_i[k] == yI[k]
push!(bounds_to_remove, u_i)
else
for (u_j, y_j) in V[k]
if all(_project(u_i, k) .<= _project(u_j, k)) &&
(y_j[k] == u_i[k])
push!(bounds_to_remove, u_i)
end
end
end
end
end
if !isempty(bounds_to_remove)
for bound_to_remove in bounds_to_remove
delete!(U_N, bound_to_remove)
end
end
end
solutions = [SolutionPoint(X, Y) for (Y, X) in solutions]
return status, solutions
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 550 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
using Test
@testset "$file" for file in readdir(joinpath(@__DIR__, "algorithms"))
include(joinpath(@__DIR__, "algorithms", file))
end
@testset "$file" for file in readdir(@__DIR__)
if startswith(file, "test_") && endswith(file, ".jl")
include(joinpath(@__DIR__, file))
end
end
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 3609 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestModel
using Test
import HiGHS
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function _mock_optimizer()
return MOI.Utilities.MockOptimizer(
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
)
end
function test_moi_runtests()
MOI.Test.runtests(
MOA.Optimizer(_mock_optimizer),
MOI.Test.Config(; exclude = Any[MOI.optimize!]);
exclude = String[
# Skipped beause of UniversalFallback in _mock_optimizer
"test_attribute_Silent",
"test_attribute_after_empty",
"test_model_copy_to_UnsupportedAttribute",
"test_model_copy_to_UnsupportedConstraint",
"test_model_supports_constraint_ScalarAffineFunction_EqualTo",
"test_model_supports_constraint_VariableIndex_EqualTo",
"test_model_supports_constraint_VectorOfVariables_Nonnegatives",
],
)
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_time_limit()
model = MOA.Optimizer(HiGHS.Optimizer)
@test MOI.supports(model, MOI.TimeLimitSec())
@test MOI.get(model, MOI.TimeLimitSec()) === nothing
MOI.set(model, MOI.TimeLimitSec(), 2)
@test MOI.get(model, MOI.TimeLimitSec()) === 2.0
MOI.set(model, MOI.TimeLimitSec(), nothing)
@test MOI.get(model, MOI.TimeLimitSec()) === nothing
return
end
function test_solve_time()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
@test isnan(MOI.get(model, MOI.SolveTimeSec()))
MOI.optimize!(model)
@test MOI.get(model, MOI.SolveTimeSec()) >= 0
return
end
end
TestModel.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 3863 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestUtilities
using Test
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_filter_nondominated()
x = Dict{MOI.VariableIndex,Float64}()
solutions = [MOA.SolutionPoint(x, [0, 1]), MOA.SolutionPoint(x, [1, 0])]
@test MOA.filter_nondominated(MOI.MIN_SENSE, solutions) == solutions
@test MOA.filter_nondominated(MOI.MAX_SENSE, solutions) == solutions
return
end
function test_filter_nondominated_sort_in_order()
x = Dict{MOI.VariableIndex,Float64}()
solutions = [MOA.SolutionPoint(x, [0, 1]), MOA.SolutionPoint(x, [1, 0])]
r_solutions = reverse(solutions)
@test MOA.filter_nondominated(MOI.MIN_SENSE, r_solutions) == solutions
@test MOA.filter_nondominated(MOI.MAX_SENSE, r_solutions) == solutions
return
end
function test_filter_nondominated_remove_duplicates()
x = Dict{MOI.VariableIndex,Float64}()
solutions = [MOA.SolutionPoint(x, [0, 1]), MOA.SolutionPoint(x, [1, 0])]
trial = solutions[[1, 1]]
@test MOA.filter_nondominated(MOI.MIN_SENSE, trial) == [solutions[1]]
@test MOA.filter_nondominated(MOI.MAX_SENSE, trial) == [solutions[1]]
return
end
function test_filter_nondominated_weakly_dominated()
x = Dict{MOI.VariableIndex,Float64}()
solutions = [
MOA.SolutionPoint(x, [0, 1]),
MOA.SolutionPoint(x, [0.5, 1]),
MOA.SolutionPoint(x, [1, 0]),
]
@test MOA.filter_nondominated(MOI.MIN_SENSE, solutions) == solutions[[1, 3]]
@test MOA.filter_nondominated(MOI.MAX_SENSE, solutions) == solutions[[2, 3]]
solutions = [
MOA.SolutionPoint(x, [0, 1]),
MOA.SolutionPoint(x, [0.5, 1]),
MOA.SolutionPoint(x, [0.75, 1]),
MOA.SolutionPoint(x, [0.8, 0.5]),
MOA.SolutionPoint(x, [0.9, 0.5]),
MOA.SolutionPoint(x, [1, 0]),
]
@test MOA.filter_nondominated(MOI.MIN_SENSE, solutions) ==
solutions[[1, 4, 6]]
@test MOA.filter_nondominated(MOI.MAX_SENSE, solutions) ==
solutions[[3, 5, 6]]
return
end
function test_filter_nondominated_knapsack()
x = Dict{MOI.VariableIndex,Float64}()
solutions = [
MOA.SolutionPoint(x, [0, 1, 1]),
MOA.SolutionPoint(x, [0, 1, 1]),
MOA.SolutionPoint(x, [1, 0, 1]),
MOA.SolutionPoint(x, [1, 1, 0]),
MOA.SolutionPoint(x, [1, 1, 0]),
]
result = solutions[[1, 3, 4]]
@test MOA.filter_nondominated(MOI.MIN_SENSE, solutions) == result
@test MOA.filter_nondominated(MOI.MAX_SENSE, solutions) == result
return
end
function test_filter_nondominated_triple()
x = Dict{MOI.VariableIndex,Float64}()
for p in MOA.Combinatorics.permutations(1:3)
solutions = [
MOA.SolutionPoint(x, [0, 1, 1][p]),
MOA.SolutionPoint(x, [0, 2, 0][p]),
MOA.SolutionPoint(x, [1, 1, 1][p]),
]
# The permutation can change the ordering of the solutions that are
# returnned, so we can't use `@test min_sol == solutions[1:2]`
min_sol = MOA.filter_nondominated(MOI.MIN_SENSE, solutions)
@test solutions[1] in min_sol && solutions[2] in min_sol
@test length(min_sol) == 2
max_sol = MOA.filter_nondominated(MOI.MAX_SENSE, solutions)
@test solutions[2] in max_sol && solutions[3] in max_sol
@test length(max_sol) == 2
end
return
end
end
TestUtilities.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 6710 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestChalmet
using Test
import HiGHS
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_knapsack_min()
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Chalmet())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:2 for j in 1:n
],
[0.0, 0.0],
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 0 1 1 1 0 1 1 1 1
1 0 1 1 1 0 1 1 0 1
0 1 1 1 1 0 1 0 1 1
]
Y_N = Float64[
-2854 -4636
-3394 -3817
-3042 -4627
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(x_sol, X_E'; atol = 1e-6)
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)
@test isapprox(y_sol, Y_N'; atol = 1e-6)
return
end
function test_knapsack_max()
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Chalmet())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:2 for j in 1:n
],
[1.0, 0.0],
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 0 1 1 1 0 1 1 1 1
1 0 1 1 1 0 1 1 0 1
0 1 1 1 1 0 1 0 1 1
]
Y_N = Float64[
2855 4636
3395 3817
3043 4627
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(x_sol, X_E'; atol = 1e-6)
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)
@test isapprox(y_sol, Y_N'; atol = 1e-6)
return
end
function test_time_limit()
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Chalmet())
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.TimeLimitSec(), 0.0)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:2 for j in 1:n
],
[0.0, 0.0],
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.TIME_LIMIT
@test MOI.get(model, MOI.ResultCount()) > 0
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Chalmet())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Chalmet())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.Chalmet())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
end
TestChalmet.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 14503 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestDichotomy
using Test
import HiGHS
import Ipopt
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_Dichotomy_SolutionLimit()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Dichotomy())
@test MOI.supports(MOA.Dichotomy(), MOA.SolutionLimit())
@test MOI.supports(model, MOA.SolutionLimit())
@test MOI.get(model, MOA.SolutionLimit()) ==
MOA.default(MOA.SolutionLimit())
MOI.set(model, MOA.SolutionLimit(), 1)
@test MOI.get(model, MOA.SolutionLimit()) == 1
return
end
function test_moi_bolp_1()
f = MOI.OptimizerWithAttributes(
() -> MOA.Optimizer(HiGHS.Optimizer),
MOA.Algorithm() => MOA.Dichotomy(),
)
model = MOI.instantiate(f)
MOI.set(model, MOI.Silent(), true)
MOI.Utilities.loadfromstring!(
model,
"""
variables: x, y
minobjective: [2 * x + y + 1, x + 3 * y]
c1: x + y >= 1.0
c2: 0.5 * x + y >= 0.75
c3: x >= 0.0
c4: y >= 0.25
""",
)
x = MOI.get(model, MOI.VariableIndex, "x")
y = MOI.get(model, MOI.VariableIndex, "y")
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.ResultCount()) == 3
X = [[0.0, 1.0], [0.5, 0.5], [1.0, 0.25]]
Y = [[2.0, 3.0], [2.5, 2.0], [3.25, 1.75]]
for i in 1:3
@test MOI.get(model, MOI.PrimalStatus(i)) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.DualStatus(i)) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.ObjectiveValue(i)) == Y[i]
@test MOI.get(model, MOI.VariablePrimal(i), x) == X[i][1]
@test MOI.get(model, MOI.VariablePrimal(i), y) == X[i][2]
end
@test MOI.get(model, MOI.ObjectiveBound()) == [2.0, 1.75]
return
end
function test_moi_bolp_1_maximize()
f = MOI.OptimizerWithAttributes(
() -> MOA.Optimizer(HiGHS.Optimizer),
MOA.Algorithm() => MOA.Dichotomy(),
)
model = MOI.instantiate(f)
MOI.set(model, MOI.Silent(), true)
MOI.Utilities.loadfromstring!(
model,
"""
variables: x, y
maxobjective: [-2.0 * x + -1.0 * y, -1.0 * x + -3.0 * y + 0.5]
c1: x + y >= 1.0
c2: 0.5 * x + y >= 0.75
c3: x >= 0.0
c4: y >= 0.25
""",
)
x = MOI.get(model, MOI.VariableIndex, "x")
y = MOI.get(model, MOI.VariableIndex, "y")
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.ResultCount()) == 3
X = [[0.0, 1.0], [0.5, 0.5], [1.0, 0.25]]
Y = [-[1.0, 2.5], -[1.5, 1.5], -[2.25, 1.25]]
for i in 1:3
@test MOI.get(model, MOI.PrimalStatus(i)) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.DualStatus(i)) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.ObjectiveValue(i)) == Y[i]
@test MOI.get(model, MOI.VariablePrimal(i), x) == X[i][1]
@test MOI.get(model, MOI.VariablePrimal(i), y) == X[i][2]
end
@test MOI.get(model, MOI.ObjectiveBound()) == -[1.0, 1.25]
return
end
function test_moi_bolp_1_reversed()
f = MOI.OptimizerWithAttributes(
() -> MOA.Optimizer(HiGHS.Optimizer),
MOA.Algorithm() => MOA.Dichotomy(),
)
model = MOI.instantiate(f)
MOI.set(model, MOI.Silent(), true)
MOI.Utilities.loadfromstring!(
model,
"""
variables: x, y
minobjective: [x + 3 * y, 2 * x + y]
c1: x + y >= 1.0
c2: 0.5 * x + y >= 0.75
c3: x >= 0.0
c4: y >= 0.25
""",
)
x = MOI.get(model, MOI.VariableIndex, "x")
y = MOI.get(model, MOI.VariableIndex, "y")
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.ResultCount()) == 3
X = reverse([[0.0, 1.0], [0.5, 0.5], [1.0, 0.25]])
Y = reverse([[1.0, 3.0], [1.5, 2.0], [2.25, 1.75]])
for i in 1:3
@test MOI.get(model, MOI.PrimalStatus(i)) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.DualStatus(i)) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.ObjectiveValue(i)) == reverse(Y[i])
@test MOI.get(model, MOI.VariablePrimal(i), x) == X[i][1]
@test MOI.get(model, MOI.VariablePrimal(i), y) == X[i][2]
end
@test MOI.get(model, MOI.ObjectiveBound()) == reverse([1.0, 1.75])
return
end
function test_moi_bolp_1_scalar()
f = MOI.OptimizerWithAttributes(
() -> MOA.Optimizer(HiGHS.Optimizer),
MOA.Algorithm() => MOA.Dichotomy(),
)
model = MOI.instantiate(f)
MOI.set(model, MOI.Silent(), true)
MOI.Utilities.loadfromstring!(
model,
"""
variables: x, y
minobjective: [2 * x + y, x + 3 * y]
c1: x + y >= 1.0
c2: 0.5 * x + y >= 0.75
c3: x >= 0.0
c4: y >= 0.25
""",
)
x = MOI.get(model, MOI.VariableIndex, "x")
y = MOI.get(model, MOI.VariableIndex, "y")
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.ResultCount()) == 3
X = [[0.0, 1.0], [0.5, 0.5], [1.0, 0.25]]
Y = [[1.0, 3.0], [1.5, 2.0], [2.25, 1.75]]
for i in 1:3
@test MOI.get(model, MOI.PrimalStatus(i)) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.DualStatus(i)) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.ObjectiveValue(i)) == Y[i]
@test MOI.get(model, MOI.VariablePrimal(i), x) == X[i][1]
@test MOI.get(model, MOI.VariablePrimal(i), y) == X[i][2]
end
@test MOI.get(model, MOI.ObjectiveBound()) == [1.0, 1.75]
f = MOI.Utilities.operate(vcat, Float64, 2.0 * x + 1.0 * y)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.ResultCount()) == 1
X = [[0.0, 1.0]]
Y = [[1.0]]
for i in 1:1
@test MOI.get(model, MOI.PrimalStatus(i)) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.DualStatus(i)) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.ObjectiveValue(i)) == Y[i]
@test MOI.get(model, MOI.VariablePrimal(i), x) == X[i][1]
@test MOI.get(model, MOI.VariablePrimal(i), y) == X[i][2]
end
@test MOI.get(model, MOI.ObjectiveBound()) == [1.0]
return
end
function test_biobjective_knapsack()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
f = MOI.OptimizerWithAttributes(
() -> MOA.Optimizer(HiGHS.Optimizer),
MOA.Algorithm() => MOA.Dichotomy(),
)
model = MOI.instantiate(f)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
results = Dict(
[955.0, 906.0] => [2, 3, 5, 6, 9, 10, 11, 14, 15, 16, 17],
[948.0, 939.0] => [1, 2, 3, 5, 6, 8, 10, 11, 15, 16, 17],
[934.0, 971.0] => [2, 3, 5, 6, 8, 10, 11, 12, 15, 16, 17],
[918.0, 983.0] => [2, 3, 4, 5, 6, 8, 10, 11, 12, 16, 17],
)
for i in 1:MOI.get(model, MOI.ResultCount())
x_sol = MOI.get(model, MOI.VariablePrimal(i), x)
X = findall(elt -> elt > 0.9, x_sol)
Y = MOI.get(model, MOI.ObjectiveValue(i))
@test results[Y] == X
end
return
end
function test_time_limit()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
f = MOI.OptimizerWithAttributes(
() -> MOA.Optimizer(HiGHS.Optimizer),
MOA.Algorithm() => MOA.Dichotomy(),
)
model = MOI.instantiate(f)
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.TimeLimitSec(), 0.0)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.TIME_LIMIT
@test MOI.get(model, MOI.ResultCount()) > 0
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Dichotomy())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Dichotomy())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_bicriteria_transportation_nise()
m, n = 3, 4
c = Float64[1 2 7 7; 1 9 3 4; 8 9 4 6]
d = Float64[4 4 3 4; 5 8 9 10; 6 2 5 1]
a = Float64[11, 3, 14, 16]
b = Float64[8, 19, 17]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Dichotomy())
MOI.set(model, MOI.Silent(), true)
x = [MOI.add_variable(model) for i in 1:m, j in 1:n]
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
for j in 1:n
terms = [MOI.ScalarAffineTerm(1.0, x[i, j]) for i in 1:m]
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(terms, 0.0),
MOI.EqualTo(a[j]),
)
end
for i in 1:m
terms = [MOI.ScalarAffineTerm(1.0, x[i, j]) for j in 1:n]
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(terms, 0.0),
MOI.EqualTo(b[i]),
)
end
f = MOI.Utilities.vectorize([
sum(c[i, j] * x[i, j] for i in 1:m, j in 1:n),
sum(d[i, j] * x[i, j] for i in 1:m, j in 1:n),
])
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(model)
N = MOI.get(model, MOI.ResultCount())
y_sol = hcat(MOI.get.(model, MOI.ObjectiveValue.(1:N))...)
Y_N = Float64[143 156 176 186 208; 265 200 175 171 167]
@test isapprox(y_sol, Y_N; atol = 1e-6)
return
end
function test_deprecated()
nise = MOA.NISE()
dichotomy = MOA.Dichotomy()
@test nise isa typeof(dichotomy)
@test nise.solution_limit === dichotomy.solution_limit
return
end
function test_three_objective()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Dichotomy())
MOI.set(model, MOI.Silent(), true)
MOI.Utilities.loadfromstring!(
model,
"""
variables: x
maxobjective: [1.0 * x, -1.0 * x, 2.0 * x + 2.0]
""",
)
@test_throws(
ErrorException("Only scalar or bi-objective problems supported."),
MOI.optimize!(model),
)
return
end
function test_quadratic()
μ = [0.05470748600000001, 0.18257110599999998]
Q = [0.00076204 0.00051972; 0.00051972 0.00546173]
N = 2
model = MOA.Optimizer(Ipopt.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Dichotomy())
MOI.set(model, MOA.SolutionLimit(), 10)
MOI.set(model, MOI.Silent(), true)
w = MOI.add_variables(model, N)
MOI.add_constraint.(model, w, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, w, MOI.LessThan(1.0))
MOI.add_constraint(model, sum(1.0 * w[i] for i in 1:N), MOI.EqualTo(1.0))
var = sum(Q[i, j] * w[i] * w[j] for i in 1:N, j in 1:N)
mean = sum(-μ[i] * w[i] for i in 1:N)
f = MOI.Utilities.operate(vcat, Float64, var, mean)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) == 10
for i in 1:MOI.get(model, MOI.ResultCount())
w_sol = MOI.get(model, MOI.VariablePrimal(i), w)
y = MOI.get(model, MOI.ObjectiveValue(i))
@test y ≈ [w_sol' * Q * w_sol, -μ' * w_sol]
end
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.Dichotomy())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
end
TestDichotomy.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 19585 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestDominguezRios
using Test
import HiGHS
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
if Sys.WORD_SIZE == 32
return # Skip on 32-bit because HiGHS fails
end
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_knapsack_min_p3()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
ones(p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 1 1 1 0 1 0 1 1
0 0 1 1 1 0 1 1 1 1
0 1 1 1 1 0 0 1 0 1
1 0 1 1 1 0 1 1 0 1
1 1 1 1 1 0 0 0 1 0
1 1 1 1 1 0 0 0 0 1
1 0 1 1 1 0 0 1 1 0
]
Y_N = Float64[
-3042 -4627 -3189
-2854 -4636 -3076
-2854 -3570 -3714
-3394 -3817 -3408
-2706 -3857 -3304
-2997 -3539 -3509
-2518 -3866 -3191
]
Y_N .+= 1
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_knapsack_max_p3()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 1 1 1 0 1 0 1 1
0 0 1 1 1 0 1 1 1 1
0 1 1 1 1 0 0 1 0 1
1 0 1 1 1 0 1 1 0 1
1 1 1 1 1 0 0 0 1 0
1 1 1 1 1 0 0 0 0 1
1 0 1 1 1 0 0 1 1 0
]
Y_N = Float64[
3042 4627 3189
2854 4636 3076
2854 3570 3714
3394 3817 3408
2706 3857 3304
2997 3539 3509
2518 3866 3191
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_knapsack_min_p4()
p = 4
n = 10
W = 2653.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
557 898 148 63 78 964 246 662 386 272
]
w = Float64[979 448 355 955 426 229 9 695 322 889]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 1 0 1 1 1 1 1 0
0 1 1 1 1 0 1 0 1 0
0 1 1 0 1 0 1 0 1 1
0 0 1 0 1 1 1 0 1 1
1 1 1 0 1 1 1 0 0 0
0 0 1 1 0 1 1 1 1 0
0 1 1 1 1 1 1 0 0 0
0 1 1 1 0 1 1 0 1 0
0 1 0 0 0 1 1 1 1 1
0 0 1 1 1 1 1 0 1 0
]
Y_N = Float64[
-3152 -3232 -3596 -3382
-2725 -4064 -2652 -1819
-2862 -3648 -3049 -2028
-2435 -3618 -2282 -2094
-3269 -2320 -3059 -2891
-1904 -3253 -2530 -2469
-2883 -3237 -2535 -2397
-2092 -3244 -2643 -2705
-2146 -1944 -2947 -3428
-2298 -4034 -1885 -1885
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_knapsack_max_p4()
p = 4
n = 10
W = 2653.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
557 898 148 63 78 964 246 662 386 272
]
w = Float64[979 448 355 955 426 229 9 695 322 889]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 1 0 1 1 1 1 1 0
0 1 1 1 1 0 1 0 1 0
0 1 1 0 1 0 1 0 1 1
0 0 1 0 1 1 1 0 1 1
1 1 1 0 1 1 1 0 0 0
0 0 1 1 0 1 1 1 1 0
0 1 1 1 1 1 1 0 0 0
0 1 1 1 0 1 1 0 1 0
0 1 0 0 0 1 1 1 1 1
0 0 1 1 1 1 1 0 1 0
]
Y_N = Float64[
3152 3232 3596 3382
2725 4064 2652 1819
2862 3648 3049 2028
2435 3618 2282 2094
3269 2320 3059 2891
1904 3253 2530 2469
2883 3237 2535 2397
2092 3244 2643 2705
2146 1944 2947 3428
2298 4034 1885 1885
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_assignment_min_p3()
p = 3
n = 5
C = Float64[
6 1 20 2 3
2 6 9 10 18
1 6 20 5 9
6 8 6 9 6
7 10 10 6 2
17 20 8 8 20
10 13 1 10 15
4 11 1 13 1
19 13 7 18 17
15 3 5 1 11
10 7 1 19 12
2 15 12 10 3
11 20 16 12 9
10 15 20 11 7
1 9 20 7 6
]
C = permutedims(reshape(C, (n, p, n)), [2, 1, 3])
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = [MOI.add_variable(model) for i in 1:n, j in 1:n]
MOI.add_constraint.(model, x, MOI.ZeroOne())
for i in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for j in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
for j in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for i in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(k, MOI.ScalarAffineTerm(C[k, i, j], x[i, j])) for k in 1:p for i in 1:n for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0
1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0
0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0
0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1
0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 0
]
Y_N = Float64[
23 43 44
38 33 53
40 47 37
20 52 54
45 33 34
43 51 31
28 33 58
29 29 59
35 49 39
50 40 32
16 61 47
37 55 36
39 43 41
18 47 67
24 39 45
28 66 39
34 60 42
22 37 63
22 54 47
17 43 71
35 38 56
]
N = MOI.get(model, MOI.ResultCount())
x_sol =
hcat([MOI.get(model, MOI.VariablePrimal(i), vec(x)) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_assignment_max_p3()
p = 3
n = 5
C = Float64[
6 1 20 2 3
2 6 9 10 18
1 6 20 5 9
6 8 6 9 6
7 10 10 6 2
17 20 8 8 20
10 13 1 10 15
4 11 1 13 1
19 13 7 18 17
15 3 5 1 11
10 7 1 19 12
2 15 12 10 3
11 20 16 12 9
10 15 20 11 7
1 9 20 7 6
]
C = permutedims(reshape(C, (n, p, n)), [2, 1, 3])
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = [MOI.add_variable(model) for i in 1:n, j in 1:n]
MOI.add_constraint.(model, x, MOI.ZeroOne())
for i in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for j in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
for j in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for i in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(k, MOI.ScalarAffineTerm(-C[k, i, j], x[i, j])) for k in 1:p for i in 1:n for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0
1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0
0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0
0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1
0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 0
]
Y_N = Float64[
-23 -43 -44
-38 -33 -53
-40 -47 -37
-20 -52 -54
-45 -33 -34
-43 -51 -31
-28 -33 -58
-29 -29 -59
-35 -49 -39
-50 -40 -32
-16 -61 -47
-37 -55 -36
-39 -43 -41
-18 -47 -67
-24 -39 -45
-28 -66 -39
-34 -60 -42
-22 -37 -63
-22 -54 -47
-17 -43 -71
-35 -38 -56
]
N = MOI.get(model, MOI.ResultCount())
x_sol =
hcat([MOI.get(model, MOI.VariablePrimal(i), vec(x)) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_no_bounding_box()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
@test_logs (:warn,) MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_time_limit()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.TimeLimitSec(), 0.0)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.TIME_LIMIT
@test MOI.get(model, MOI.ResultCount()) == 0
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.DominguezRios())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
end
TestDominguezRios.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 18732 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestEpsilonConstraint
using Test
import HiGHS
import Ipopt
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_biobjective_knapsack()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOA.SolutionLimit(), 100)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
f.constants[1] = 1.0
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
results = Dict(
[956, 906] => [2, 3, 5, 6, 9, 10, 11, 14, 15, 16, 17],
[950, 915] => [1, 2, 5, 6, 8, 9, 10, 11, 15, 16, 17],
[949, 939] => [1, 2, 3, 5, 6, 8, 10, 11, 15, 16, 17],
[944, 940] => [2, 3, 5, 6, 8, 9, 10, 11, 15, 16, 17],
[937, 942] => [1, 2, 3, 5, 6, 10, 11, 12, 15, 16, 17],
[936, 947] => [2, 5, 6, 8, 9, 10, 11, 12, 15, 16, 17],
[935, 971] => [2, 3, 5, 6, 8, 10, 11, 12, 15, 16, 17],
[928, 972] => [2, 3, 5, 6, 8, 9, 10, 11, 12, 16, 17],
[919, 983] => [2, 3, 4, 5, 6, 8, 10, 11, 12, 16, 17],
)
@test MOI.get(model, MOI.ResultCount()) == 9
for i in 1:MOI.get(model, MOI.ResultCount())
x_sol = MOI.get(model, MOI.VariablePrimal(i), x)
X = findall(elt -> elt > 0.9, x_sol)
Y = MOI.get(model, MOI.ObjectiveValue(i))
@test results[round.(Int, Y)] == X
end
return
end
function test_biobjective_knapsack_atol()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
results = Dict(
[955, 906] => [2, 3, 5, 6, 9, 10, 11, 14, 15, 16, 17],
[949, 915] => [1, 2, 5, 6, 8, 9, 10, 11, 15, 16, 17],
[948, 939] => [1, 2, 3, 5, 6, 8, 10, 11, 15, 16, 17],
[943, 940] => [2, 3, 5, 6, 8, 9, 10, 11, 15, 16, 17],
[936, 942] => [1, 2, 3, 5, 6, 10, 11, 12, 15, 16, 17],
[935, 947] => [2, 5, 6, 8, 9, 10, 11, 12, 15, 16, 17],
[934, 971] => [2, 3, 5, 6, 8, 10, 11, 12, 15, 16, 17],
[927, 972] => [2, 3, 5, 6, 8, 9, 10, 11, 12, 16, 17],
[918, 983] => [2, 3, 4, 5, 6, 8, 10, 11, 12, 16, 17],
)
@test MOI.get(model, MOI.ResultCount()) == 9
for i in 1:MOI.get(model, MOI.ResultCount())
x_sol = MOI.get(model, MOI.VariablePrimal(i), x)
X = findall(elt -> elt > 0.9, x_sol)
Y = MOI.get(model, MOI.ObjectiveValue(i))
@test results[round.(Int, Y)] == X
end
return
end
function test_biobjective_knapsack_atol_large()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
@test MOI.supports(model, MOA.EpsilonConstraintStep())
MOI.set(model, MOA.EpsilonConstraintStep(), 10.0)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
results = Dict(
[948, 939] => [1, 2, 3, 5, 6, 8, 10, 11, 15, 16, 17],
[934, 971] => [2, 3, 5, 6, 8, 10, 11, 12, 15, 16, 17],
[918, 983] => [2, 3, 4, 5, 6, 8, 10, 11, 12, 16, 17],
)
@test MOI.get(model, MOI.ResultCount()) == 3
for i in 1:MOI.get(model, MOI.ResultCount())
x_sol = MOI.get(model, MOI.VariablePrimal(i), x)
X = findall(elt -> elt > 0.9, x_sol)
Y = MOI.get(model, MOI.ObjectiveValue(i))
@test results[round.(Int, Y)] == X
end
return
end
function test_biobjective_knapsack_min()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOA.SolutionLimit(), 100)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(-1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
results = Dict(
[955, 906] => [2, 3, 5, 6, 9, 10, 11, 14, 15, 16, 17],
[949, 915] => [1, 2, 5, 6, 8, 9, 10, 11, 15, 16, 17],
[948, 939] => [1, 2, 3, 5, 6, 8, 10, 11, 15, 16, 17],
[943, 940] => [2, 3, 5, 6, 8, 9, 10, 11, 15, 16, 17],
[936, 942] => [1, 2, 3, 5, 6, 10, 11, 12, 15, 16, 17],
[935, 947] => [2, 5, 6, 8, 9, 10, 11, 12, 15, 16, 17],
[934, 971] => [2, 3, 5, 6, 8, 10, 11, 12, 15, 16, 17],
[927, 972] => [2, 3, 5, 6, 8, 9, 10, 11, 12, 16, 17],
[918, 983] => [2, 3, 4, 5, 6, 8, 10, 11, 12, 16, 17],
)
@test MOI.get(model, MOI.ResultCount()) == 9
for i in 1:MOI.get(model, MOI.ResultCount())
x_sol = MOI.get(model, MOI.VariablePrimal(i), x)
X = findall(elt -> elt > 0.9, x_sol)
Y = MOI.get(model, MOI.ObjectiveValue(i))
@test results[-round.(Int, Y)] == X
end
return
end
function test_biobjective_knapsack_min_solution_limit()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
@test MOI.supports(model, MOA.SolutionLimit())
MOI.set(model, MOA.SolutionLimit(), 3)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
results = Dict(
[943, 940] => [2, 3, 5, 6, 8, 9, 10, 11, 15, 16, 17],
[918, 983] => [2, 3, 4, 5, 6, 8, 10, 11, 12, 16, 17],
)
@test MOI.get(model, MOI.ResultCount()) == 2
for i in 1:MOI.get(model, MOI.ResultCount())
x_sol = MOI.get(model, MOI.VariablePrimal(i), x)
X = findall(elt -> elt > 0.9, x_sol)
Y = MOI.get(model, MOI.ObjectiveValue(i))
@test results[round.(Int, Y)] == X
end
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded_second()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, x[1], MOI.LessThan(1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_deprecated()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
@test MOI.supports(model, MOA.ObjectiveAbsoluteTolerance(1))
@test_logs (:warn,) MOI.set(model, MOA.ObjectiveAbsoluteTolerance(1), 1.0)
@test_logs (:warn,) MOI.get(model, MOA.ObjectiveAbsoluteTolerance(1))
return
end
function test_quadratic()
μ = [0.05470748600000001, 0.18257110599999998]
Q = [0.00076204 0.00051972; 0.00051972 0.00546173]
N = 2
model = MOA.Optimizer(Ipopt.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOA.SolutionLimit(), 10)
MOI.set(model, MOI.Silent(), true)
w = MOI.add_variables(model, N)
MOI.add_constraint.(model, w, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, w, MOI.LessThan(1.0))
MOI.add_constraint(model, sum(1.0 * w[i] for i in 1:N), MOI.EqualTo(1.0))
var = sum(Q[i, j] * w[i] * w[j] for i in 1:N, j in 1:N)
mean = sum(-μ[i] * w[i] for i in 1:N)
f = MOI.Utilities.operate(vcat, Float64, var, mean)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) == 10
for i in 1:MOI.get(model, MOI.ResultCount())
w_sol = MOI.get(model, MOI.VariablePrimal(i), w)
y = MOI.get(model, MOI.ObjectiveValue(i))
@test y ≈ [w_sol' * Q * w_sol, -μ' * w_sol]
end
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
function test_poor_numerics()
μ = [0.006898463772627643, -0.02972609131603086]
Q = [0.030446 0.00393731; 0.00393731 0.00713285]
N = 2
model = MOA.Optimizer(Ipopt.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOA.SolutionLimit(), 10)
MOI.set(model, MOI.Silent(), true)
w = MOI.add_variables(model, N)
sharpe = MOI.add_variable(model)
MOI.add_constraint.(model, w, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, w, MOI.LessThan(1.0))
MOI.add_constraint(model, sum(1.0 * w[i] for i in 1:N), MOI.EqualTo(1.0))
variance = Expr(:call, :+)
for i in 1:N, j in 1:N
push!(variance.args, Expr(:call, :*, Q[i, j], w[i], w[j]))
end
nlp = MOI.Nonlinear.Model()
MOI.Nonlinear.add_constraint(
nlp,
:(($(μ[1]) * $(w[1]) + $(μ[2]) * $(w[2])) / sqrt($variance) - $sharpe),
MOI.EqualTo(0.0),
)
evaluator = MOI.Nonlinear.Evaluator(
nlp,
MOI.Nonlinear.SparseReverseMode(),
[w; sharpe],
)
MOI.set(model, MOI.NLPBlock(), MOI.NLPBlockData(evaluator))
f = MOI.Utilities.operate(vcat, Float64, μ' * w, sharpe)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) == 1
for i in 1:MOI.get(model, MOI.ResultCount())
w_sol = MOI.get(model, MOI.VariablePrimal(i), w)
sharpe_sol = MOI.get(model, MOI.VariablePrimal(i), sharpe)
y = MOI.get(model, MOI.ObjectiveValue(i))
@test y ≈ [μ' * w_sol, sharpe_sol]
end
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
function test_vectornonlinearfunction()
μ = [0.006898463772627643, -0.02972609131603086]
Q = [0.030446 0.00393731; 0.00393731 0.00713285]
N = 2
model = MOA.Optimizer(Ipopt.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOA.SolutionLimit(), 10)
MOI.set(model, MOI.Silent(), true)
w = MOI.add_variables(model, N)
MOI.add_constraint.(model, w, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, w, MOI.LessThan(1.0))
MOI.add_constraint(model, sum(1.0 * w[i] for i in 1:N), MOI.EqualTo(1.0))
f = MOI.VectorNonlinearFunction([
μ' * w,
MOI.ScalarNonlinearFunction(
:/,
Any[μ'*w, MOI.ScalarNonlinearFunction(:sqrt, Any[w'*Q*w])],
),
])
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) >= 1
for i in 1:MOI.get(model, MOI.ResultCount())
w_sol = MOI.get(model, MOI.VariablePrimal(i), w)
y = MOI.get(model, MOI.ObjectiveValue(i))
@test y ≈ [μ' * w_sol, (μ' * w_sol) / sqrt(w_sol' * Q * w_sol)]
end
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
function test_time_limit()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOI.TimeLimitSec(), 0.0)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.TIME_LIMIT
@test MOI.get(model, MOI.ResultCount()) == 0
return
end
function test_time_limit_large()
p1 = [77, 94, 71, 63, 96, 82, 85, 75, 72, 91, 99, 63, 84, 87, 79, 94, 90]
p2 = [65, 90, 90, 77, 95, 84, 70, 94, 66, 92, 74, 97, 60, 60, 65, 97, 93]
w = [80, 87, 68, 72, 66, 77, 99, 85, 70, 93, 98, 72, 100, 89, 67, 86, 91]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOI.TimeLimitSec(), 1.0)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, length(w))
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(
vcat,
Float64,
[sum(1.0 * p[i] * x[i] for i in 1:length(w)) for p in [p1, p2]]...,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(
model,
sum(1.0 * w[i] * x[i] for i in 1:length(w)),
MOI.LessThan(900.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) >= 0
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.EpsilonConstraint())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
end
TestEpsilonConstraint.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 5154 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestHierarchical
using Test
import HiGHS
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_sorted_priorities()
@test MOA._sorted_priorities([0, 0, 0]) == [[1, 2, 3]]
@test MOA._sorted_priorities([1, 0, 0]) == [[1], [2, 3]]
@test MOA._sorted_priorities([0, 1, 0]) == [[2], [1, 3]]
@test MOA._sorted_priorities([0, 0, 1]) == [[3], [1, 2]]
@test MOA._sorted_priorities([0, 1, 1]) == [[2, 3], [1]]
@test MOA._sorted_priorities([0, 2, 1]) == [[2], [3], [1]]
return
end
function test_knapsack()
P = Float64[1 0 0 0; 0 1 1 0; 0 0 1 1; 0 1 0 0]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Hierarchical())
MOI.set.(model, MOA.ObjectivePriority.(1:4), [2, 1, 1, 0])
MOI.set.(model, MOA.ObjectiveWeight.(1:4), [1, 0.5, 0.5, 1])
MOI.set(model, MOA.ObjectiveRelativeTolerance(1), 0.1)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 4)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, x, MOI.LessThan(1.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(vcat, Float64, P * x...)
f.constants[4] = 1_000.0
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * x[i] for i in 1:4), MOI.LessThan(2.0))
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) == 1
x_sol = MOI.get(model, MOI.VariablePrimal(), x)
@test ≈(x_sol, [0.9, 0, 0.9, 0.2]; atol = 1e-3)
y_sol = MOI.get(model, MOI.ObjectiveValue())
@test ≈(y_sol, P * x_sol .+ [0.0, 0.0, 0.0, 1_000.0]; atol = 1e-4)
return
end
function test_knapsack_min()
P = Float64[1 0 0 0; 0 1 1 0; 0 0 1 1; 0 1 0 0]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Hierarchical())
MOI.set.(model, MOA.ObjectivePriority.(1:4), [2, 1, 1, 0])
MOI.set.(model, MOA.ObjectiveWeight.(1:4), [1, 0.5, 0.5, 1])
MOI.set(model, MOA.ObjectiveRelativeTolerance(1), 0.1)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 4)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, x, MOI.LessThan(1.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
f = MOI.Utilities.operate(vcat, Float64, -P * x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * x[i] for i in 1:4), MOI.LessThan(2.0))
MOI.optimize!(model)
x_sol = MOI.get(model, MOI.VariablePrimal(), x)
@test ≈(x_sol, [0.9, 0, 0.9, 0.2]; atol = 1e-3)
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Hierarchical())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Hierarchical())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.Hierarchical())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
end
TestHierarchical.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 19647 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestKirlikSayin
using Test
import HiGHS
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_knapsack_min_p3()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
ones(p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 0 1 1 1 0 1 1 0 1
0 1 1 1 1 0 1 0 1 1
1 1 1 1 1 0 0 0 0 1
0 1 1 1 1 0 0 1 0 1
1 1 1 1 1 0 0 0 1 0
1 0 1 1 1 0 0 1 1 0
0 0 1 1 1 0 1 1 1 1
]
Y_N = Float64[
-3394 -3817 -3408
-3042 -4627 -3189
-2997 -3539 -3509
-2854 -3570 -3714
-2706 -3857 -3304
-2518 -3866 -3191
-2854 -4636 -3076
]
Y_N .+= 1
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_knapsack_max_p3()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 0 1 1 1 0 1 1 0 1
0 1 1 1 1 0 1 0 1 1
1 1 1 1 1 0 0 0 0 1
0 1 1 1 1 0 0 1 0 1
1 1 1 1 1 0 0 0 1 0
1 0 1 1 1 0 0 1 1 0
0 0 1 1 1 0 1 1 1 1
]
Y_N = Float64[
3394 3817 3408
3042 4627 3189
2997 3539 3509
2854 3570 3714
2706 3857 3304
2518 3866 3191
2854 4636 3076
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_knapsack_min_p4()
p = 4
n = 10
W = 2653.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
557 898 148 63 78 964 246 662 386 272
]
w = Float64[979 448 355 955 426 229 9 695 322 889]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 1 1 0 1 1 1 0 0 0
0 1 1 0 1 1 1 1 1 0
0 1 1 1 1 1 1 0 0 0
0 1 1 0 1 0 1 0 1 1
0 1 1 1 1 0 1 0 1 0
0 0 1 0 1 1 1 0 1 1
0 1 1 1 0 1 1 0 1 0
0 0 1 1 0 1 1 1 1 0
0 0 1 1 1 1 1 0 1 0
0 1 0 0 0 1 1 1 1 1
]
Y_N = Float64[
-3269 -2320 -3059 -2891
-3152 -3232 -3596 -3382
-2883 -3237 -2535 -2397
-2862 -3648 -3049 -2028
-2725 -4064 -2652 -1819
-2435 -3618 -2282 -2094
-2092 -3244 -2643 -2705
-1904 -3253 -2530 -2469
-2298 -4034 -1885 -1885
-2146 -1944 -2947 -3428
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_knapsack_max_p4()
p = 4
n = 10
W = 2653.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
557 898 148 63 78 964 246 662 386 272
]
w = Float64[979 448 355 955 426 229 9 695 322 889]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 1 1 0 1 1 1 0 0 0
0 1 1 0 1 1 1 1 1 0
0 1 1 1 1 1 1 0 0 0
0 1 1 0 1 0 1 0 1 1
0 1 1 1 1 0 1 0 1 0
0 0 1 0 1 1 1 0 1 1
0 1 1 1 0 1 1 0 1 0
0 0 1 1 0 1 1 1 1 0
0 0 1 1 1 1 1 0 1 0
0 1 0 0 0 1 1 1 1 1
]
Y_N = Float64[
3269 2320 3059 2891
3152 3232 3596 3382
2883 3237 2535 2397
2862 3648 3049 2028
2725 4064 2652 1819
2435 3618 2282 2094
2092 3244 2643 2705
1904 3253 2530 2469
2298 4034 1885 1885
2146 1944 2947 3428
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_assignment_min_p3()
if Sys.WORD_SIZE == 32
return # Skip on 32-bit because HiGHS fails
end
p = 3
n = 5
C = Float64[
6 1 20 2 3
2 6 9 10 18
1 6 20 5 9
6 8 6 9 6
7 10 10 6 2
17 20 8 8 20
10 13 1 10 15
4 11 1 13 1
19 13 7 18 17
15 3 5 1 11
10 7 1 19 12
2 15 12 10 3
11 20 16 12 9
10 15 20 11 7
1 9 20 7 6
]
C = permutedims(reshape(C, (n, p, n)), [2, 1, 3])
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = [MOI.add_variable(model) for i in 1:n, j in 1:n]
MOI.add_constraint.(model, x, MOI.ZeroOne())
for i in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for j in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
for j in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for i in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(k, MOI.ScalarAffineTerm(C[k, i, j], x[i, j])) for k in 1:p for i in 1:n for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1
0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0
0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 0
0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0
]
Y_N = Float64[
16 61 47
17 43 71
18 47 67
20 52 54
22 37 63
23 43 44
22 54 47
28 66 39
34 60 42
24 39 45
35 49 39
37 55 36
28 33 58
35 38 56
39 43 41
38 33 53
45 33 34
43 51 31
40 47 37
29 29 59
50 40 32
]
N = MOI.get(model, MOI.ResultCount())
x_sol =
hcat([MOI.get(model, MOI.VariablePrimal(i), vec(x)) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_assignment_max_p3()
if Sys.WORD_SIZE == 32
return # Skip on 32-bit because HiGHS fails
end
p = 3
n = 5
C = Float64[
6 1 20 2 3
2 6 9 10 18
1 6 20 5 9
6 8 6 9 6
7 10 10 6 2
17 20 8 8 20
10 13 1 10 15
4 11 1 13 1
19 13 7 18 17
15 3 5 1 11
10 7 1 19 12
2 15 12 10 3
11 20 16 12 9
10 15 20 11 7
1 9 20 7 6
]
C = permutedims(reshape(C, (n, p, n)), [2, 1, 3])
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = [MOI.add_variable(model) for i in 1:n, j in 1:n]
MOI.add_constraint.(model, x, MOI.ZeroOne())
for i in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for j in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
for j in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for i in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(k, MOI.ScalarAffineTerm(-C[k, i, j], x[i, j])) for k in 1:p for i in 1:n for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1
0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0
0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 0
0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0
]
Y_N = Float64[
-16 -61 -47
-17 -43 -71
-18 -47 -67
-20 -52 -54
-22 -37 -63
-23 -43 -44
-22 -54 -47
-28 -66 -39
-34 -60 -42
-24 -39 -45
-35 -49 -39
-37 -55 -36
-28 -33 -58
-35 -38 -56
-39 -43 -41
-38 -33 -53
-45 -33 -34
-43 -51 -31
-40 -47 -37
-29 -29 -59
-50 -40 -32
]
N = MOI.get(model, MOI.ResultCount())
x_sol =
hcat([MOI.get(model, MOI.VariablePrimal(i), vec(x)) for i in 1:N]...)
@test isapprox(sort(x_sol; dims = 1), sort(X_E'; dims = 1); atol = 1e-6)
y_sol = vcat([MOI.get(model, MOI.ObjectiveValue(i))' for i in 1:N]...)
@test isapprox(sort(y_sol; dims = 1), sort(Y_N; dims = 1); atol = 1e-6)
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_no_bounding_box()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
@test_logs (:warn,) MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_time_limit()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.TimeLimitSec(), 0.0)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.TIME_LIMIT
@test MOI.get(model, MOI.ResultCount()) == 0
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
end
TestKirlikSayin.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 7172 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestLexicographic
using Test
import HiGHS
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_knapsack()
P = Float64[1 0 0 0; 0 1 0 0; 0 0 0 1; 0 0 1 0]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Lexicographic())
@test MOI.supports(model, MOA.LexicographicAllPermutations())
MOI.set(model, MOA.LexicographicAllPermutations(), false)
MOI.set(model, MOA.ObjectiveRelativeTolerance(1), 0.1)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 4)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, x, MOI.LessThan(1.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(vcat, Float64, P * x...)
f.constants[4] = 1_000.0
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * x[i] for i in 1:4), MOI.LessThan(2.0))
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) == 1
x_sol = MOI.get(model, MOI.VariablePrimal(), x)
@test ≈(x_sol, [0.9, 1, 0, 0.1]; atol = 1e-3)
y_sol = MOI.get(model, MOI.ObjectiveValue())
@test ≈(y_sol, P * x_sol .+ [0.0, 0.0, 0.0, 1_000.0]; atol = 1e-4)
return
end
function test_knapsack_default()
P = Float64[1 0 0 0; 0 1 0 0; 0 0 0 1]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Lexicographic())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 4)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, x, MOI.LessThan(1.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(vcat, Float64, P * x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * x[i] for i in 1:4), MOI.LessThan(2.0))
MOI.optimize!(model)
results = Dict(
[0, 1, 1] => [0, 1, 0, 1],
[1, 0, 1] => [1, 0, 0, 1],
[1, 1, 0] => [1, 1, 0, 0],
)
@test MOI.get(model, MOI.ResultCount()) == 3
for i in 1:MOI.get(model, MOI.ResultCount())
X = round.(Int, MOI.get(model, MOI.VariablePrimal(i), x))
Y = round.(Int, MOI.get(model, MOI.ObjectiveValue(i)))
@test results[Y] == X
end
return
end
function test_knapsack_min()
P = Float64[1 0 0 0; 0 1 0 0; 0 0 0 1; 0 0 1 0]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Lexicographic())
MOI.set(model, MOA.LexicographicAllPermutations(), false)
MOI.set(model, MOA.ObjectiveRelativeTolerance(1), 0.1)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 4)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, x, MOI.LessThan(1.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
f = MOI.Utilities.operate(vcat, Float64, -P * x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * x[i] for i in 1:4), MOI.LessThan(2.0))
MOI.optimize!(model)
x_sol = MOI.get(model, MOI.VariablePrimal(), x)
@test ≈(x_sol, [0.9, 1, 0, 0.1]; atol = 1e-3)
return
end
function test_knapsack_one_solution()
P = Float64[1 0 0 0; 0 1 0 0; 0 0 0 1; 0 0 1 0]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Lexicographic())
MOI.set(model, MOA.LexicographicAllPermutations(), false)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 4)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, x, MOI.LessThan(1.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
f = MOI.Utilities.operate(vcat, Float64, P * x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * x[i] for i in 1:4), MOI.LessThan(2.0))
MOI.optimize!(model)
x_sol = MOI.get(model, MOI.VariablePrimal(), x)
@test ≈(x_sol, [1, 1, 0, 0]; atol = 1e-3)
@test MOI.get(model, MOI.RawStatusString()) ==
"Solve complete. Found 1 solution(s)"
return
end
function test_infeasible()
for flag in (true, false)
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Lexicographic())
MOI.set(model, MOA.LexicographicAllPermutations(), flag)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
end
return
end
function test_unbounded()
for flag in (true, false)
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.Lexicographic())
MOI.set(model, MOA.LexicographicAllPermutations(), flag)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
end
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.Lexicographic())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
function test_warn_all_permutations()
@test_logs (:warn,) MOA.Lexicographic(; all_permutations = true)
@test_logs (:warn,) MOA.Lexicographic(; all_permutations = false)
@test_logs MOA.Lexicographic()
return
end
end
TestLexicographic.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | code | 20437 | # Copyright 2019, Oscar Dowson and contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v.2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestTambyVanderpooten
using Test
import HiGHS
import MultiObjectiveAlgorithms as MOA
const MOI = MOA.MOI
function run_tests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$name" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_knapsack_min_p3()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
ones(p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 0 1 1 1 0 1 1 0 1
0 1 1 1 1 0 1 0 1 1
1 1 1 1 1 0 0 0 0 1
0 1 1 1 1 0 0 1 0 1
1 1 1 1 1 0 0 0 1 0
1 0 1 1 1 0 0 1 1 0
0 0 1 1 1 0 1 1 1 1
]
Y_N = Float64[
-3394 -3817 -3408
-3042 -4627 -3189
-2997 -3539 -3509
-2854 -3570 -3714
-2706 -3857 -3304
-2518 -3866 -3191
-2854 -4636 -3076
]
Y_N .+= 1
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)'
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)'
y_sol, x_sol = y_sol[sortperm(collect(eachrow(y_sol))), :],
x_sol[sortperm(collect(eachrow(y_sol))), :]
Y_N, X_E = Y_N[sortperm(collect(eachrow(Y_N))), :],
X_E[sortperm(collect(eachrow(Y_N))), :]
@test isapprox(x_sol, X_E; atol = 1e-6)
@test isapprox(y_sol, Y_N; atol = 1e-6)
return
end
function test_knapsack_max_p3()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 0 1 1 1 0 1 1 0 1
0 1 1 1 1 0 1 0 1 1
1 1 1 1 1 0 0 0 0 1
0 1 1 1 1 0 0 1 0 1
1 1 1 1 1 0 0 0 1 0
1 0 1 1 1 0 0 1 1 0
0 0 1 1 1 0 1 1 1 1
]
Y_N = Float64[
3394 3817 3408
3042 4627 3189
2997 3539 3509
2854 3570 3714
2706 3857 3304
2518 3866 3191
2854 4636 3076
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)'
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)'
y_sol, x_sol = y_sol[sortperm(collect(eachrow(y_sol))), :],
x_sol[sortperm(collect(eachrow(y_sol))), :]
Y_N, X_E = Y_N[sortperm(collect(eachrow(Y_N))), :],
X_E[sortperm(collect(eachrow(Y_N))), :]
@test isapprox(x_sol, X_E; atol = 1e-6)
@test isapprox(y_sol, Y_N; atol = 1e-6)
return
end
function test_knapsack_min_p4()
p = 4
n = 10
W = 2653.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
557 898 148 63 78 964 246 662 386 272
]
w = Float64[979 448 355 955 426 229 9 695 322 889]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 1 1 0 1 1 1 0 0 0
0 1 1 0 1 1 1 1 1 0
0 1 1 1 1 1 1 0 0 0
0 1 1 0 1 0 1 0 1 1
0 1 1 1 1 0 1 0 1 0
0 0 1 0 1 1 1 0 1 1
0 1 1 1 0 1 1 0 1 0
0 0 1 1 0 1 1 1 1 0
0 0 1 1 1 1 1 0 1 0
0 1 0 0 0 1 1 1 1 1
]
Y_N = Float64[
-3269 -2320 -3059 -2891
-3152 -3232 -3596 -3382
-2883 -3237 -2535 -2397
-2862 -3648 -3049 -2028
-2725 -4064 -2652 -1819
-2435 -3618 -2282 -2094
-2092 -3244 -2643 -2705
-1904 -3253 -2530 -2469
-2298 -4034 -1885 -1885
-2146 -1944 -2947 -3428
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)'
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)'
y_sol, x_sol = y_sol[sortperm(collect(eachrow(y_sol))), :],
x_sol[sortperm(collect(eachrow(y_sol))), :]
Y_N, X_E = Y_N[sortperm(collect(eachrow(Y_N))), :],
X_E[sortperm(collect(eachrow(Y_N))), :]
@test isapprox(x_sol, X_E; atol = 1e-6)
@test isapprox(y_sol, Y_N; atol = 1e-6)
return
end
function test_knapsack_max_p4()
p = 4
n = 10
W = 2653.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
557 898 148 63 78 964 246 662 386 272
]
w = Float64[979 448 355 955 426 229 9 695 322 889]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(C[i, j], x[j])) for
i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
1 1 1 0 1 1 1 0 0 0
0 1 1 0 1 1 1 1 1 0
0 1 1 1 1 1 1 0 0 0
0 1 1 0 1 0 1 0 1 1
0 1 1 1 1 0 1 0 1 0
0 0 1 0 1 1 1 0 1 1
0 1 1 1 0 1 1 0 1 0
0 0 1 1 0 1 1 1 1 0
0 0 1 1 1 1 1 0 1 0
0 1 0 0 0 1 1 1 1 1
]
Y_N = Float64[
3269 2320 3059 2891
3152 3232 3596 3382
2883 3237 2535 2397
2862 3648 3049 2028
2725 4064 2652 1819
2435 3618 2282 2094
2092 3244 2643 2705
1904 3253 2530 2469
2298 4034 1885 1885
2146 1944 2947 3428
]
N = MOI.get(model, MOI.ResultCount())
x_sol = hcat([MOI.get(model, MOI.VariablePrimal(i), x) for i in 1:N]...)'
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)'
y_sol, x_sol = y_sol[sortperm(collect(eachrow(y_sol))), :],
x_sol[sortperm(collect(eachrow(y_sol))), :]
Y_N, X_E = Y_N[sortperm(collect(eachrow(Y_N))), :],
X_E[sortperm(collect(eachrow(Y_N))), :]
@test isapprox(x_sol, X_E; atol = 1e-6)
@test isapprox(y_sol, Y_N; atol = 1e-6)
return
end
function test_assignment_min_p3()
p = 3
n = 5
C = Float64[
6 1 20 2 3
2 6 9 10 18
1 6 20 5 9
6 8 6 9 6
7 10 10 6 2
17 20 8 8 20
10 13 1 10 15
4 11 1 13 1
19 13 7 18 17
15 3 5 1 11
10 7 1 19 12
2 15 12 10 3
11 20 16 12 9
10 15 20 11 7
1 9 20 7 6
]
C = permutedims(reshape(C, (n, p, n)), [2, 1, 3])
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = [MOI.add_variable(model) for i in 1:n, j in 1:n]
MOI.add_constraint.(model, x, MOI.ZeroOne())
for i in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for j in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
for j in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for i in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(k, MOI.ScalarAffineTerm(C[k, i, j], x[i, j])) for k in 1:p for i in 1:n for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1
0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0
0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 0
0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0
]
Y_N = Float64[
16 61 47
17 43 71
18 47 67
20 52 54
22 37 63
23 43 44
22 54 47
28 66 39
34 60 42
24 39 45
35 49 39
37 55 36
28 33 58
35 38 56
39 43 41
38 33 53
45 33 34
43 51 31
40 47 37
29 29 59
50 40 32
]
N = MOI.get(model, MOI.ResultCount())
x_sol =
hcat([MOI.get(model, MOI.VariablePrimal(i), vec(x)) for i in 1:N]...)'
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)'
y_sol, x_sol = y_sol[sortperm(collect(eachrow(y_sol))), :],
x_sol[sortperm(collect(eachrow(y_sol))), :]
Y_N, X_E = Y_N[sortperm(collect(eachrow(Y_N))), :],
X_E[sortperm(collect(eachrow(Y_N))), :]
@test isapprox(x_sol, X_E; atol = 1e-6)
@test isapprox(y_sol, Y_N; atol = 1e-6)
return
end
function test_assignment_max_p3()
p = 3
n = 5
C = Float64[
6 1 20 2 3
2 6 9 10 18
1 6 20 5 9
6 8 6 9 6
7 10 10 6 2
17 20 8 8 20
10 13 1 10 15
4 11 1 13 1
19 13 7 18 17
15 3 5 1 11
10 7 1 19 12
2 15 12 10 3
11 20 16 12 9
10 15 20 11 7
1 9 20 7 6
]
C = permutedims(reshape(C, (n, p, n)), [2, 1, 3])
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = [MOI.add_variable(model) for i in 1:n, j in 1:n]
MOI.add_constraint.(model, x, MOI.ZeroOne())
for i in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for j in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
for j in 1:n
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(1.0, x[i, j]) for i in 1:n],
0.0,
),
MOI.EqualTo(1.0),
)
end
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(k, MOI.ScalarAffineTerm(-C[k, i, j], x[i, j])) for k in 1:p for i in 1:n for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
X_E = Float64[
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1
0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1
0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0
0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 0
0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 1 0
0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0
0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0
0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0
]
Y_N = Float64[
-16 -61 -47
-17 -43 -71
-18 -47 -67
-20 -52 -54
-22 -37 -63
-23 -43 -44
-22 -54 -47
-28 -66 -39
-34 -60 -42
-24 -39 -45
-35 -49 -39
-37 -55 -36
-28 -33 -58
-35 -38 -56
-39 -43 -41
-38 -33 -53
-45 -33 -34
-43 -51 -31
-40 -47 -37
-29 -29 -59
-50 -40 -32
]
N = MOI.get(model, MOI.ResultCount())
x_sol =
hcat([MOI.get(model, MOI.VariablePrimal(i), vec(x)) for i in 1:N]...)'
y_sol = hcat([MOI.get(model, MOI.ObjectiveValue(i)) for i in 1:N]...)'
y_sol, x_sol = y_sol[sortperm(collect(eachrow(y_sol))), :],
x_sol[sortperm(collect(eachrow(y_sol))), :]
Y_N, X_E = Y_N[sortperm(collect(eachrow(Y_N))), :],
X_E[sortperm(collect(eachrow(Y_N))), :]
@test isapprox(x_sol, X_E; atol = 1e-6)
@test isapprox(y_sol, Y_N; atol = 1e-6)
return
end
function test_infeasible()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_unbounded()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_no_bounding_box()
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
@test_logs (:warn,) MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.DUAL_INFEASIBLE
@test MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
return
end
function test_time_limit()
p = 3
n = 10
W = 2137.0
C = Float64[
566 611 506 180 817 184 585 423 26 317
62 84 977 979 874 54 269 93 881 563
664 982 962 140 224 215 12 869 332 537
]
w = Float64[557, 898, 148, 63, 78, 964, 246, 662, 386, 272]
model = MOA.Optimizer(HiGHS.Optimizer)
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.TimeLimitSec(), 0.0)
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, n)
MOI.add_constraint.(model, x, MOI.ZeroOne())
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(w[j], x[j]) for j in 1:n],
0.0,
),
MOI.LessThan(W),
)
f = MOI.VectorAffineFunction(
[
MOI.VectorAffineTerm(i, MOI.ScalarAffineTerm(-C[i, j], x[j]))
for i in 1:p for j in 1:n
],
fill(0.0, p),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.TIME_LIMIT
@test MOI.get(model, MOI.ResultCount()) == 0
return
end
function test_vector_of_variables_objective()
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.TambyVanderpooten())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, 2)
MOI.add_constraint.(model, x, MOI.ZeroOne())
f = MOI.VectorOfVariables(x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end
end
TestTambyVanderpooten.run_tests()
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"MPL-2.0"
] | 1.3.3 | 008b44e2b491b0af88a9d2fa3e625cb4d77a85b7 | docs | 2791 | <img src="https://raw.githubusercontent.com/jump-dev/MultiObjectiveAlgorithms.jl/master/moa.png" alt="An image of the Moa bird. Licensed into the Public Domain by https://freesvg.org/moa" width="100px"/>
# MultiObjectiveAlgorithms.jl
[](https://github.com/jump-dev/MultiObjectiveAlgorithms.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/jump-dev/MultiObjectiveAlgorithms.jl)
[MultiObjectiveAlgorithms.jl](https://github.com/jump-dev/MultiObjectiveAlgorithms.jl)
(MOA) is a collection of algorithms for multi-objective optimization.
## License
`MultiObjectiveAlgorithms.jl` is licensed under the [MPL 2.0 License](https://github.com/jump-dev/MultiObjectiveAlgorithms.jl/blob/master/LICENSE.md).
## Getting help
If you need help, please ask a question on the [JuMP community forum](https://jump.dev/forum).
If you have a reproducible example of a bug, please [open a GitHub issue](https://github.com/jump-dev/MultiObjectiveAlgorithms.jl/issues/new).
## Installation
Install MOA using `Pkg.add`:
```julia
import Pkg
Pkg.add("MultiObjectiveAlgorithms")
```
## Use with JuMP
Use `MultiObjectiveAlgorithms` with JuMP as follows:
```julia
using JuMP
import HiGHS
import MultiObjectiveAlgorithms as MOA
model = JuMP.Model(() -> MOA.Optimizer(HiGHS.Optimizer))
set_attribute(model, MOA.Algorithm(), MOA.Dichotomy())
set_attribute(model, MOA.SolutionLimit(), 4)
```
Replace `HiGHS.Optimizer` with an optimizer capable of solving a
single-objective instance of your optimization problem.
You may set additional optimizer attributes, the supported attributes depend on
the choice of solution algorithm.
## Algorithm
Set the algorithm using the `MOA.Algorithm()` attribute.
The value must be one of the algorithms supported by MOA:
* `MOA.Chalmet()`
* `MOA.Dichotomy()`
* `MOA.DominguezRios()`
* `MOA.EpsilonConstraint()`
* `MOA.Hierarchical()`
* `MOA.KirlikSayin()`
* `MOA.Lexicographic()` [default]
* `MOA.TambyVanderpooten()`
Consult their docstrings for details.
## Other optimizer attributes
There are a number of optimizer attributes supported by the algorithms in MOA.
Each algorithm supports only a subset of the attributes. Consult the algorithm's
docstring for details on which attributes it supports, and how it uses them in
the solution process.
* `MOA.EpsilonConstraintStep()`
* `MOA.LexicographicAllPermutations()`
* `MOA.ObjectiveAbsoluteTolerance(index::Int)`
* `MOA.ObjectivePriority(index::Int)`
* `MOA.ObjectiveRelativeTolerance(index::Int)`
* `MOA.ObjectiveWeight(index::Int)`
* `MOA.SolutionLimit()`
* `MOI.TimeLimitSec()`
| MultiObjectiveAlgorithms | https://github.com/jump-dev/MultiObjectiveAlgorithms.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 605 | using AtomicData
using Documenter
DocMeta.setdocmeta!(AtomicData, :DocTestSetup, :(using AtomicData); recursive=true)
makedocs(;
modules=[AtomicData],
authors="Tiago M. D. Pereira",
repo="https://github.com/tiagopereira/AtomicData.jl/blob/{commit}{path}#{line}",
sitename="AtomicData.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://tiagopereira.github.io/AtomicData.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/tiagopereira/AtomicData.jl",
)
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 542 | module AtomicData
export AtomicStage
export get_solar_abundances
export get_atomic_stage, read_NIST
export partition_function, partition_function_interpolator
using DelimitedFiles
using Interpolations
using PeriodicTable
using Unitful
using YAML
import PhysicalConstants.CODATA2018: h, k_B, c_0, R_∞, m_e, m_u
@derived_dimension PerLength Unitful.𝐋^-1
const element_symbols = [el.symbol for el in elements]
include("types.jl")
include("read_utils.jl")
include("partition_function.jl")
include("abundances.jl")
include("barklem.jl")
end
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 1018 | """
Tools to read and handle astrophysical element abundances.
"""
"""
function get_solar_abundances(;source="AAG2021")
Gets a dictionary with element names and their solar photospheric abundances.
Can read from multiple sources. Currently supported are:
* `"AAG2021"``, from Asplund, Amarsi, & Grevesse 2021, A&A, 653, A141
* `"AAGS2009"`, from Asplund, Grevesse, Sauval, & Scott 2009, ARA&A, 47, 481
* `"GS1998"`, from Grevesse & Sauval 1998, Space Science Reviews, 85, 161-174
"""
function get_solar_abundances(;source="AAG2021")
if source in ["AAG2021", "Asplund_et_al2021", "Asplund2021"]
file = "Asplund_et_al2021.yaml"
elseif source in ["AGSS2009", "Asplund_et_al2009", "Asplund2009"]
file = "Asplund_et_al2009.yaml"
elseif source in ["GS1998", "Grevesse_Sauval1998"]
file = "Grevesse_Sauval1998.yaml"
else
error("Unknown source $source")
end
filepath = joinpath(@__DIR__, "..", "data", "solar_abundances", file)
read_abundances(filepath)
end
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.