licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 12778 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
const _RULE = "-------------------------------------------------------------------"
function print_helper(f, io, args...)
f(stdout, args...)
return f(io, args...)
end
function print_banner(io)
println(io, _RULE)
println(io, " SDDP.jl (c) Oscar Dowson and contributors, 2017-24")
println(io, _RULE)
return
end
function _unique_paths(model::PolicyGraph{T}) where {T}
if is_cyclic(model)
return Inf
end
parents = Dict{T,Set{T}}(t => Set{T}() for t in keys(model.nodes))
children = Dict{T,Set{T}}(t => Set{T}() for t in keys(model.nodes))
for (t, node) in model.nodes
for child in node.children
if child.probability > 0
push!(parents[child.term], t)
push!(children[t], child.term)
end
end
end
ordered = T[]
in_order = Dict{T,Bool}(t => false for t in keys(model.nodes))
stack = Tuple{T,Bool}[]
for root_child in model.root_children
if iszero(root_child.probability) || in_order[root_child.term]
continue
end
push!(stack, (root_child.term, true))
while !isempty(stack)
node, needs_checking = pop!(stack)
if !needs_checking
push!(ordered, node)
in_order[node] = true
continue
elseif in_order[node]
continue
end
push!(stack, (node, false))
for child in children[node]
if !in_order[child]
push!(stack, (child, true))
end
end
end
end
total_scenarios = 0.0
incoming_scenarios = Dict{T,Float64}(t => 0.0 for t in keys(model.nodes))
for node in reverse!(ordered)
N = length(model[node].noise_terms)
if length(parents[node]) == 0 # Must come from the root node.
incoming_scenarios[node] = N
else
incoming_scenarios[node] =
N * sum(incoming_scenarios[p] for p in parents[node])
end
if length(children[node]) == 0 # It's a leaf!
total_scenarios += incoming_scenarios[node]
end
end
return total_scenarios
end
function _merge_tuple(x, y)
if x == (-1, -1)
return (y, y)
elseif y < x[1]
return (y, x[2])
elseif y > x[2]
return (x[1], y)
else
return x
end
end
_constraint_key(F, S) = replace("$(F) in $(S)", "MathOptInterface" => "MOI")
function print_problem_statistics(
io::IO,
model::PolicyGraph,
existing_cuts::Bool,
parallel_scheme,
risk_measure,
sampling_scheme,
)
constraint_types = Dict{String,Tuple{Int,Int}}()
variables = (-1, -1)
for (_, node) in model.nodes
variables = _merge_tuple(variables, JuMP.num_variables(node.subproblem))
for (F, S) in JuMP.list_of_constraint_types(node.subproblem)
key = _constraint_key(F, S)
num_con = get(constraint_types, key, (-1, -1))
constraint_types[key] = _merge_tuple(
num_con,
JuMP.num_constraints(node.subproblem, F, S),
)
end
end
pad = maximum(length(k) for k in keys(constraint_types))
println(io, "problem")
println(io, " nodes : ", length(model.nodes))
println(io, " state variables : ", length(model.initial_root_state))
paths = Printf.@sprintf("%1.5e", _unique_paths(model))
println(io, " scenarios : ", paths)
println(io, " existing cuts : ", existing_cuts)
println(io, "options")
println(io, " solver : ", parallel_scheme)
println(io, " risk measure : ", risk_measure)
println(io, " sampling scheme : ", typeof(sampling_scheme))
println(io, "subproblem structure")
a, b = variables
println(io, " ", rpad("VariableRef", pad), " : [", a, ", ", b, "]")
for k in sort!(collect(keys(constraint_types)))
F, S = constraint_types[k]
println(io, " ", rpad(k, pad), " : [", F, ", ", S, "]")
end
return
end
function print_iteration_header(io)
println(io, _RULE)
println(
io,
" iteration simulation bound time (s) solves pid",
)
println(io, _RULE)
return
end
print_value(x::Real) = lpad(Printf.@sprintf("%1.6e", x), 13)
print_value(x::Int) = Printf.@sprintf("%9d", x)
print_value3(x::Int) = Printf.@sprintf("%3d", x)
function print_iteration(io, log::Log)
print(io, log.serious_numerical_issue ? "†" : " ")
print(io, print_value(log.iteration))
print(io, log.duality_key)
print(io, " ", print_value(log.simulation_value))
print(io, " ", print_value(log.bound))
print(io, " ", print_value(log.time))
print(io, " ", print_value(log.total_solves))
print(io, " ", print_value3(log.pid))
println(io)
return
end
function print_footer(io, training_results::TrainingResults)
println(io, _RULE)
println(io, "status : ", training_results.status)
println(io, "total time (s) :", print_value(training_results.log[end].time))
println(io, "total solves : ", training_results.log[end].total_solves)
println(
io,
"best bound : ",
print_value(training_results.log[end].bound),
)
μ, σ =
confidence_interval(map(l -> l.simulation_value, training_results.log))
println(io, "simulation ci : ", print_value(μ), " ±", print_value(σ))
num_issues = sum(l -> l.serious_numerical_issue, training_results.log)
println(io, "numeric issues : ", num_issues)
println(io, _RULE)
println(io)
return
end
"""
confidence_interval(x::Vector{Float64}, z_score::Float64 = 1.96)
Return a confidence interval of `x` corresponding to the `z_score`.
`z_score` defaults to `1.96` for a 95% confidence interval.
"""
function confidence_interval(x::Vector{Float64}, z_score::Float64 = 1.96)
μ = Statistics.mean(x)
σ = z_score * Statistics.std(x) / sqrt(length(x))
return μ, σ
end
###
### Numerical stability checks
###
struct CoefficientRanges
matrix::Vector{Float64}
objective::Vector{Float64}
bounds::Vector{Float64}
rhs::Vector{Float64}
function CoefficientRanges()
return new([Inf, -Inf], [Inf, -Inf], [Inf, -Inf], [Inf, -Inf])
end
end
function _merge(x::Vector{Float64}, y::Vector{Float64})
x[1] = min(x[1], y[1])
x[2] = max(x[2], y[2])
return
end
function _merge(x::CoefficientRanges, y::CoefficientRanges)
_merge(x.matrix, y.matrix)
_merge(x.objective, y.objective)
_merge(x.bounds, y.bounds)
_merge(x.rhs, y.rhs)
return
end
function _stringify_bounds(bounds::Vector{Float64})
lower = bounds[1] < Inf ? _print_value(bounds[1]) : "0e+00"
upper = bounds[2] > -Inf ? _print_value(bounds[2]) : "0e+00"
return string("[", lower, ", ", upper, "]")
end
function _print_numerical_stability_report(
io::IO,
ranges::CoefficientRanges,
print::Bool,
warn::Bool,
)
warnings = Tuple{String,String}[]
_print_coefficients(io, "matrix", ranges.matrix, print, warnings)
_print_coefficients(io, "objective", ranges.objective, print, warnings)
_print_coefficients(io, "bounds", ranges.bounds, print, warnings)
_print_coefficients(io, "rhs", ranges.rhs, print, warnings)
if warn && !isempty(warnings)
println(io, "WARNING: numerical stability issues detected")
for (name, sense) in warnings
println(io, " - $(name) range contains $(sense) coefficients")
end
println(
io,
"Very large or small absolute values of coefficients\n",
"can cause numerical stability issues. Consider\n",
"reformulating the model.",
)
end
return
end
function _print_coefficients(
io::IO,
name::String,
range,
print::Bool,
warnings::Vector{Tuple{String,String}},
)
if print
println(
io,
" ",
rpad(string(name, " range"), 17),
_stringify_bounds(range),
)
end
if range[1] < 1e-4
push!(warnings, (name, "small"))
end
if range[2] > 1e7
push!(warnings, (name, "large"))
end
return
end
_print_value(x::Real) = Printf.@sprintf("%1.0e", x)
function _update_range(range::Vector{Float64}, value::Real)
if !(value ≈ 0.0)
range[1] = min(range[1], abs(value))
range[2] = max(range[2], abs(value))
end
return
end
function _update_range(range::Vector{Float64}, func::JuMP.GenericAffExpr)
for coefficient in values(func.terms)
_update_range(range, coefficient)
end
return
end
function _update_range(range::Vector{Float64}, func::MOI.LessThan)
_update_range(range, func.upper)
return
end
function _update_range(range::Vector{Float64}, func::MOI.GreaterThan)
_update_range(range, func.lower)
return
end
function _update_range(range::Vector{Float64}, func::MOI.EqualTo)
_update_range(range, func.value)
return
end
function _update_range(range::Vector{Float64}, func::MOI.Interval)
_update_range(range, func.upper)
_update_range(range, func.lower)
return
end
# Default fallback for unsupported constraints.
_update_range(range::Vector{Float64}, x) = nothing
function _coefficient_ranges(model::JuMP.Model)
ranges = CoefficientRanges()
_update_range(ranges.objective, JuMP.objective_function(model))
for var in JuMP.all_variables(model)
if JuMP.has_lower_bound(var)
_update_range(ranges.bounds, JuMP.lower_bound(var))
end
if JuMP.has_upper_bound(var)
_update_range(ranges.bounds, JuMP.upper_bound(var))
end
end
for (F, S) in JuMP.list_of_constraint_types(model)
F == JuMP.VariableRef && continue
for con in JuMP.all_constraints(model, F, S)
con_obj = JuMP.constraint_object(con)
_update_range(ranges.matrix, con_obj.func)
_update_range(ranges.rhs, con_obj.set)
end
end
return ranges
end
"""
numerical_stability_report(
[io::IO = stdout,]
model::PolicyGraph;
by_node::Bool = false,
print::Bool = true,
warn::Bool = true,
)
Print a report identifying possible numeric stability issues.
## Keyword arguments
- If `by_node`, print a report for each node in the graph.
- If `print`, print to `io`.
- If `warn`, warn if the coefficients may cause numerical issues.
"""
function numerical_stability_report(
io::IO,
model::PolicyGraph;
by_node::Bool = false,
print::Bool = true,
warn::Bool = true,
)
graph_ranges = CoefficientRanges()
node_keys = sort_nodes(collect(keys(model.nodes)))
for key in node_keys
node = model[key]
node_ranges = CoefficientRanges()
for noise in node.noise_terms
parameterize(node, noise.term)
node_ranges_2 = _coefficient_ranges(node.subproblem)
_merge(node_ranges, node_ranges_2)
end
if by_node
print && println(io, "numerical stability report for node: ", key)
_print_numerical_stability_report(io, node_ranges, print, warn)
end
_merge(graph_ranges, node_ranges)
end
if !by_node
print && println(io, "numerical stability report")
_print_numerical_stability_report(io, graph_ranges, print, warn)
end
return
end
function numerical_stability_report(model::PolicyGraph; kwargs...)
return numerical_stability_report(stdout, model; kwargs...)
end
###
### Machine readable log
###
"""
write_log_to_csv(model::PolicyGraph, filename::String)
Write the log of the most recent training to a csv for post-analysis.
Assumes that the model has been trained via [`SDDP.train`](@ref).
"""
function write_log_to_csv(model::PolicyGraph, filename::String)
if model.most_recent_training_results === nothing
error(
"Unable to write the log to file because the model has not " *
"been trained.",
)
end
open(filename, "w") do io
println(io, "iteration, simulation, bound, time")
for log in model.most_recent_training_results.log
println(
io,
log.iteration,
", ",
log.simulation_value,
", ",
log.bound,
", ",
log.time,
)
end
end
return
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 40035 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
struct Graph{T}
# The root node of the policy graph.
root_node::T
# nodes[x] returns a vector of the children of node x and their
# probabilities.
nodes::Dict{T,Vector{Tuple{T,Float64}}}
# A partition of the nodes into ambiguity sets.
belief_partition::Vector{Vector{T}}
belief_lipschitz::Vector{Vector{Float64}}
end
"""
Graph(root_node::T) where T
Create an empty graph struture with the root node `root_node`.
## Example
```jldoctest
julia> graph = SDDP.Graph(0)
Root
0
Nodes
{}
Arcs
{}
julia> graph = SDDP.Graph(:root)
Root
root
Nodes
{}
Arcs
{}
julia> graph = SDDP.Graph((0, 0))
Root
(0, 0)
Nodes
{}
Arcs
{}
```
"""
function Graph(root_node::T) where {T}
return Graph{T}(
root_node,
Dict{T,Vector{Tuple{T,Float64}}}(root_node => Tuple{T,Float64}[]),
Vector{T}[],
Vector{Float64}[],
)
end
# Helper utilities to sort the nodes for printing. This helps linear and
# Markovian policy graphs where the nodes might be stored in an unusual ordering
# in the dictionary.
sort_nodes(nodes::Vector{Int}) = sort!(nodes)
sort_nodes(nodes::Vector{Tuple{Int,Int}}) = sort!(nodes)
sort_nodes(nodes::Vector{Tuple{Int,Float64}}) = sort!(nodes)
sort_nodes(nodes::Vector{Symbol}) = sort!(nodes)
sort_nodes(nodes) = nodes
function Base.show(io::IO, graph::Graph)
println(io, "Root")
println(io, " ", graph.root_node)
println(io, "Nodes")
nodes = sort_nodes(collect(keys(graph.nodes)))
if first(nodes) != graph.root_node
splice!(nodes, findfirst(isequal(graph.root_node), nodes))
prepend!(nodes, [graph.root_node])
end
tree_nodes = filter(n -> n != graph.root_node, nodes)
if isempty(tree_nodes)
println(io, " {}")
else
for node in tree_nodes
println(io, " ", node)
end
end
print(io, "Arcs")
has_arc = false
for node in nodes
for (child, probability) in graph.nodes[node]
print(io, "\n ", node, " => ", child, " w.p. ", probability)
has_arc = true
end
end
if !has_arc
print(io, "\n {}")
end
if length(graph.belief_partition) > 0
print(io, "\nPartitions")
for element in graph.belief_partition
print(io, "\n {", join(string.(sort_nodes(element)), ", "), "}")
end
end
return
end
# Internal function used to validate the structure of a graph
function _validate_graph(graph::Graph)
for (node, children) in graph.nodes
if length(children) > 0
probability = sum(child[2] for child in children)
if !(-1e-8 <= probability <= 1.0 + 1e-8)
error(
"Probability on edges leaving node $(node) sum to " *
"$(probability), but this must be in [0.0, 1.0]",
)
end
end
end
if length(graph.belief_partition) > 0
# The -1 accounts for the root node, which shouldn't be in the
# partition.
if graph.root_node in union(graph.belief_partition...)
error(
"Belief partition $(graph.belief_partition) cannot contain " *
"the root node $(graph.root_node).",
)
end
if length(graph.nodes) - 1 != length(union(graph.belief_partition...))
error(
"Belief partition $(graph.belief_partition) does not form a" *
" valid partition of the nodes in the graph.",
)
end
end
return
end
"""
add_node(graph::Graph{T}, node::T) where {T}
Add a node to the graph `graph`.
## Examples
```jldoctest
julia> graph = SDDP.Graph(:root);
julia> SDDP.add_node(graph, :A)
julia> graph
Root
root
Nodes
A
Arcs
{}
```
```jldoctest
julia> graph = SDDP.Graph(0);
julia> SDDP.add_node(graph, 2)
julia> graph
Root
0
Nodes
2
Arcs
{}
```
"""
function add_node(graph::Graph{T}, node::T) where {T}
if haskey(graph.nodes, node) || node == graph.root_node
error("Node $(node) already exists!")
end
graph.nodes[node] = Tuple{T,Float64}[]
return
end
function add_node(graph::Graph{T}, node) where {T}
return error("Unable to add node $(node). Nodes must be of type $(T).")
end
function _add_node_if_missing(graph::Graph{T}, node::T) where {T}
if haskey(graph.nodes, node) || node == graph.root_node
return
end
return add_node(graph, node)
end
"""
add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T}
Add an edge to the graph `graph`.
## Examples
```jldoctest
julia> graph = SDDP.Graph(0);
julia> SDDP.add_node(graph, 1)
julia> SDDP.add_edge(graph, 0 => 1, 0.9)
julia> graph
Root
0
Nodes
1
Arcs
0 => 1 w.p. 0.9
```
```jldoctest
julia> graph = SDDP.Graph(:root);
julia> SDDP.add_node(graph, :A)
julia> SDDP.add_edge(graph, :root => :A, 1.0)
julia> graph
Root
root
Nodes
A
Arcs
root => A w.p. 1.0
```
"""
function add_edge(
graph::Graph{T},
edge::Pair{T,T},
probability::Float64,
) where {T}
(parent, child) = edge
if !(parent == graph.root_node || haskey(graph.nodes, parent))
error("Node $(parent) does not exist.")
elseif !haskey(graph.nodes, child)
error("Node $(child) does not exist.")
elseif child == graph.root_node
error("Cannot have an edge entering the root node.")
else
push!(graph.nodes[parent], (child, probability))
end
return
end
function _add_to_or_create_edge(
graph::Graph{T},
edge::Pair{T,T},
probability::Float64,
) where {T}
for (i, (child, p)) in enumerate(graph.nodes[edge[1]])
if child == edge[2]
graph.nodes[edge[1]][i] = (edge[2], p + probability)
return
end
end
return add_edge(graph, edge, probability)
end
"""
add_ambiguity_set(
graph::Graph{T},
set::Vector{T},
lipschitz::Vector{Float64},
) where {T}
Add `set` to the belief partition of `graph`.
`lipschitz` is a vector of Lipschitz constants, with one element for each node
in `set`. The Lipschitz constant is the maximum slope of the cost-to-go function
with respect to the belief state associated with each node at any point in the
state-space.
## Examples
```julia
julia> graph = SDDP.LinearGraph(3)
Root
0
Nodes
1
2
3
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 3 w.p. 1.0
julia> SDDP.add_ambiguity_set(graph, [1, 2], [1e3, 1e2])
julia> SDDP.add_ambiguity_set(graph, [3], [1e5])
julia> graph
Root
0
Nodes
1
2
3
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 3 w.p. 1.0
Partitions
{1, 2}
{3}
```
"""
function add_ambiguity_set(
graph::Graph{T},
set::Vector{T},
lipschitz::Vector{Float64},
) where {T}
if any(l -> l < 0.0, lipschitz)
error("Cannot provide negative Lipschitz constant: $(lipschitz)")
elseif length(set) != length(lipschitz)
error(
"You must provide on Lipschitz contsant for every element in " *
"the ambiguity set.",
)
end
push!(graph.belief_partition, set)
push!(graph.belief_lipschitz, lipschitz)
return
end
"""
add_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)
Add `set` to the belief partition of `graph`.
`lipschitz` is a Lipschitz constant for each node in `set`. The Lipschitz
constant is the maximum slope of the cost-to-go function with respect to the
belief state associated with each node at any point in the state-space.
## Examples
```julia
julia> graph = SDDP.LinearGraph(3);
julia> SDDP.add_ambiguity_set(graph, [1, 2], 1e3)
julia> SDDP.add_ambiguity_set(graph, [3], 1e5)
julia> graph
Root
0
Nodes
1
2
3
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 3 w.p. 1.0
Partitions
{1, 2}
{3}
```
"""
function add_ambiguity_set(
graph::Graph{T},
set::Vector{T},
lipschitz::Float64 = 1e5,
) where {T}
return add_ambiguity_set(graph, set, fill(lipschitz, length(set)))
end
function Graph(
root_node::T,
nodes::Vector{T},
edges::Vector{Tuple{Pair{T,T},Float64}};
belief_partition::Vector{Vector{T}} = Vector{T}[],
belief_lipschitz::Vector{Vector{Float64}} = Vector{Float64}[],
) where {T}
graph = Graph(root_node)
add_node.(Ref(graph), nodes)
for (edge, probability) in edges
add_edge(graph, edge, probability)
end
add_ambiguity_set.(Ref(graph), belief_partition, belief_lipschitz)
return graph
end
"""
LinearGraph(stages::Int)
Create a linear graph with `stages` number of nodes.
## Examples
```jldoctest
julia> graph = SDDP.LinearGraph(3)
Root
0
Nodes
1
2
3
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 3 w.p. 1.0
```
"""
function LinearGraph(stages::Int)
edges = Tuple{Pair{Int,Int},Float64}[]
for t in 1:stages
push!(edges, (t - 1 => t, 1.0))
end
return Graph(0, collect(1:stages), edges)
end
"""
MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})
Construct a Markovian graph from the vector of transition matrices.
`transition_matrices[t][i, j]` gives the probability of transitioning from
Markov state `i` in stage `t - 1` to Markov state `j` in stage `t`.
The dimension of the first transition matrix should be `(1, N)`, and
`transition_matrics[1][1, i]` is the probability of transitioning from the root
node to the Markov state `i`.
## Examples
```jldoctest
julia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]])
Root
(0, 1)
Nodes
(1, 1)
(2, 1)
(2, 2)
(3, 1)
(3, 2)
Arcs
(0, 1) => (1, 1) w.p. 1.0
(1, 1) => (2, 1) w.p. 0.5
(1, 1) => (2, 2) w.p. 0.5
(2, 1) => (3, 1) w.p. 0.8
(2, 1) => (3, 2) w.p. 0.2
(2, 2) => (3, 1) w.p. 0.2
(2, 2) => (3, 2) w.p. 0.8
```
"""
function MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})
if size(transition_matrices[1], 1) != 1
error(
"Expected the first transition matrix to be of size (1, N). It " *
"is of size $(size(transition_matrices[1])).",
)
end
node_type = Tuple{Int,Int}
root_node = (0, 1)
nodes = node_type[]
edges = Tuple{Pair{node_type,node_type},Float64}[]
for (stage, transition) in enumerate(transition_matrices)
if !all(transition .>= 0.0)
error("Entries in the transition matrix must be non-negative.")
end
if !all(0.0 - 1e-8 .<= sum(transition; dims = 2) .<= 1.0 + 1e-8)
error(
"Rows in the transition matrix must sum to between 0.0 and 1.0.",
)
end
if stage > 1
if size(transition_matrices[stage-1], 2) != size(transition, 1)
error("Transition matrix for stage $(stage) is the wrong size.")
end
end
for markov_state in 1:size(transition, 2)
push!(nodes, (stage, markov_state))
end
for markov_state in 1:size(transition, 2)
for last_markov_state in 1:size(transition, 1)
probability = transition[last_markov_state, markov_state]
if 0.0 < probability <= 1.0
push!(
edges,
(
(stage - 1, last_markov_state) =>
(stage, markov_state),
probability,
),
)
end
end
end
end
return Graph(root_node, nodes, edges)
end
"""
MarkovianGraph(;
stages::Int,
transition_matrix::Matrix{Float64},
root_node_transition::Vector{Float64},
)
Construct a Markovian graph object with `stages` number of stages and
time-independent Markov transition probabilities.
`transition_matrix` must be a square matrix, and the probability of
transitioning from Markov state `i` in stage `t` to Markov state `j` in stage
`t + 1` is given by `transition_matrix[i, j]`.
`root_node_transition[i]` is the probability of transitioning from the root node
to Markov state `i` in the first stage.
## Examples
```jldoctest
julia> graph = SDDP.MarkovianGraph(;
stages = 3,
transition_matrix = [0.8 0.2; 0.2 0.8],
root_node_transition = [0.5, 0.5],
)
Root
(0, 1)
Nodes
(1, 1)
(1, 2)
(2, 1)
(2, 2)
(3, 1)
(3, 2)
Arcs
(0, 1) => (1, 1) w.p. 0.5
(0, 1) => (1, 2) w.p. 0.5
(1, 1) => (2, 1) w.p. 0.8
(1, 1) => (2, 2) w.p. 0.2
(1, 2) => (2, 1) w.p. 0.2
(1, 2) => (2, 2) w.p. 0.8
(2, 1) => (3, 1) w.p. 0.8
(2, 1) => (3, 2) w.p. 0.2
(2, 2) => (3, 1) w.p. 0.2
(2, 2) => (3, 2) w.p. 0.8
```
"""
function MarkovianGraph(;
stages::Int = 1,
transition_matrix::Matrix{Float64} = [1.0],
root_node_transition::Vector{Float64} = [1.0],
)
@assert size(transition_matrix, 1) == size(transition_matrix, 2)
@assert length(root_node_transition) == size(transition_matrix, 1)
return MarkovianGraph(
vcat(
[
Base.reshape(
root_node_transition,
1,
length(root_node_transition),
),
],
[transition_matrix for stage in 1:(stages-1)],
),
)
end
"""
UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)
Construct a graph composed of `num_nodes` nodes that form a single cycle, with a
probability of `discount_factor` of continuing the cycle.
## Examples
```jldoctest
julia> graph = SDDP.UnicyclicGraph(0.9; num_nodes = 2)
Root
0
Nodes
1
2
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 1 w.p. 0.9
```
"""
function UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)
@assert 0 < discount_factor < 1
@assert num_nodes > 0
graph = LinearGraph(num_nodes)
add_edge(graph, num_nodes => 1, discount_factor)
return graph
end
"""
Noise(support, probability)
An atom of a discrete random variable at the point of support `support` and
associated probability `probability`.
"""
struct Noise{T}
# The noise term.
term::T
# The probability of sampling the noise term.
probability::Float64
end
struct State{T}
# The incoming state variable.
in::T
# The outgoing state variable.
out::T
end
mutable struct ObjectiveState{N}
update::Function
initial_value::NTuple{N,Float64}
state::NTuple{N,Float64}
lower_bound::NTuple{N,Float64}
upper_bound::NTuple{N,Float64}
μ::NTuple{N,JuMP.VariableRef}
end
# Storage for belief-related things.
struct BeliefState{T}
partition_index::Int
belief::Dict{T,Float64}
μ::Dict{T,JuMP.VariableRef}
updater::Function
end
mutable struct Node{T}
# The index of the node in the policy graph.
index::T
# The JuMP subproblem.
subproblem::JuMP.Model
# A vector of the child nodes.
children::Vector{Noise{T}}
# A vector of the discrete stagewise-independent noise terms.
noise_terms::Vector{Noise}
# A function parameterize(model::JuMP.Model, noise) that modifies the JuMP
# model based on the observation of the noise.
parameterize::Function # TODO(odow): make this a concrete type?
# A list of the state variables in the model.
states::Dict{Symbol,State{JuMP.VariableRef}}
# Stage objective
stage_objective::Any # TODO(odow): make this a concrete type?
stage_objective_set::Bool
# Bellman function
bellman_function::Any # TODO(odow): make this a concrete type?
# For dynamic interpolation of objective states.
objective_state::Union{Nothing,ObjectiveState}
# For dynamic interpolation of belief states.
belief_state::Union{Nothing,BeliefState{T}}
# An over-loadable hook for the JuMP.optimize! function.
pre_optimize_hook::Union{Nothing,Function}
post_optimize_hook::Union{Nothing,Function}
# Approach for handling discrete variables.
has_integrality::Bool
# The user's optimizer. We use this in asynchronous mode.
optimizer::Any
# An extension dictionary. This is a useful place for packages that extend
# SDDP.jl to stash things.
ext::Dict{Symbol,Any}
# Lock for threading
lock::ReentrantLock
end
function Base.show(io::IO, node::Node)
println(io, "Node $(node.index)")
println(io, " # State variables : ", length(node.states))
println(io, " # Children : ", length(node.children))
println(io, " # Noise terms : ", length(node.noise_terms))
return
end
function pre_optimize_hook(f::Function, node::Node)
node.pre_optimize_hook = f
return
end
function post_optimize_hook(f::Function, node::Node)
node.post_optimize_hook = f
return
end
struct Log
iteration::Int
bound::Float64
simulation_value::Float64
time::Float64
pid::Int
total_solves::Int
duality_key::String
serious_numerical_issue::Bool
end
struct TrainingResults
status::Symbol
log::Vector{Log}
end
mutable struct PolicyGraph{T}
# Must be MOI.MIN_SENSE or MOI.MAX_SENSE
objective_sense::MOI.OptimizationSense
# Index of the root node.
root_node::T
# Children of the root node. child => probability.
root_children::Vector{Noise{T}}
# Starting value of the state variables.
initial_root_state::Dict{Symbol,Float64}
# All nodes in the graph.
nodes::Dict{T,Node{T}}
# Belief partition.
belief_partition::Vector{Set{T}}
# Storage for the most recent training results.
most_recent_training_results::Union{Nothing,TrainingResults}
# An extension dictionary. This is a useful place for packages that extend
# SDDP.jl to stash things.
ext::Dict{Symbol,Any}
timer_output::TimerOutputs.TimerOutput
lock::ReentrantLock
function PolicyGraph(sense::Symbol, root_node::T) where {T}
if sense != :Min && sense != :Max
error(
"The optimization sense must be `:Min` or `:Max`. It is $(sense).",
)
end
optimization_sense = sense == :Min ? MOI.MIN_SENSE : MOI.MAX_SENSE
return new{T}(
optimization_sense,
root_node,
Noise{T}[],
Dict{Symbol,Float64}(),
Dict{T,Node{T}}(),
Set{T}[],
nothing,
Dict{Symbol,Any}(),
TimerOutputs.TimerOutput(),
ReentrantLock(),
)
end
end
function Base.show(io::IO, graph::PolicyGraph)
N = length(graph.nodes)
println(io, "A policy graph with $(N) nodes.")
nodes = sort_nodes(collect(keys(graph.nodes)))
if N < 10
println(io, " Node indices: ", join(nodes, ", "))
else
println(io, " Node indices: ", nodes[1], ", ..., ", nodes[end])
end
return
end
# So we can query nodes in the graph as graph[node].
function Base.getindex(graph::PolicyGraph{T}, index::T) where {T}
return graph.nodes[index]
end
# Work around different JuMP modes (Automatic / Manual / Direct).
function construct_subproblem(optimizer_factory, direct_mode::Bool)
if direct_mode
model = JuMP.direct_model(MOI.instantiate(optimizer_factory))
set_silent(model)
return model
end
return JuMP.Model()
end
# Work around different JuMP modes (Automatic / Manual / Direct).
function construct_subproblem(::Nothing, direct_mode::Bool)
if direct_mode
error(
"You must specify an optimizer in the form:\n" *
" with_optimizer(Module.Opimizer, args...) if " *
"direct_mode=true.",
)
end
return JuMP.Model()
end
"""
LinearPolicyGraph(builder::Function; stages::Int, kwargs...)
Create a linear policy graph with `stages` number of stages.
## Keyword arguments
- `stages`: the number of stages in the graph
- `kwargs`: other keyword arguments are passed to [`SDDP.PolicyGraph`](@ref).
## Examples
```jldoctest
julia> SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
# ... build model ...
end
A policy graph with 2 nodes.
Node indices: 1, 2
```
is equivalent to
```jldoctest
julia> graph = SDDP.LinearGraph(2);
julia> SDDP.PolicyGraph(graph; lower_bound = 0.0) do sp, t
# ... build model ...
end
A policy graph with 2 nodes.
Node indices: 1, 2
```
"""
function LinearPolicyGraph(builder::Function; stages::Int, kwargs...)
if stages < 1
error("You must create a LinearPolicyGraph with `stages >= 1`.")
end
return PolicyGraph(builder, LinearGraph(stages); kwargs...)
end
"""
MarkovianPolicyGraph(
builder::Function;
transition_matrices::Vector{Array{Float64,2}},
kwargs...
)
Create a Markovian policy graph based on the transition matrices given in
`transition_matrices`.
## Keyword arguments
- `transition_matrices[t][i, j]` gives the probability of transitioning from
Markov state `i` in stage `t - 1` to Markov state `j` in stage `t`.
The dimension of the first transition matrix should be `(1, N)`, and
`transition_matrics[1][1, i]` is the probability of transitioning from the
root node to the Markov state `i`.
- `kwargs`: other keyword arguments are passed to [`SDDP.PolicyGraph`](@ref).
## See also
See [`SDDP.MarkovianGraph`](@ref) for other ways of specifying a Markovian
policy graph.
See [`SDDP.PolicyGraph`](@ref) for the other keyword arguments.
## Examples
```jldoctest
julia> SDDP.MarkovianPolicyGraph(;
transition_matrices = [ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]],
lower_bound = 0.0,
) do sp, node
# ... build model ...
end
A policy graph with 5 nodes.
Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)
```
is equivalent to
```jldoctest
julia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]]);
julia> SDDP.PolicyGraph(graph; lower_bound = 0.0) do sp, t
# ... build model ...
end
A policy graph with 5 nodes.
Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)
```
"""
function MarkovianPolicyGraph(
builder::Function;
transition_matrices::Vector{Array{Float64,2}},
kwargs...,
)
return PolicyGraph(builder, MarkovianGraph(transition_matrices); kwargs...)
end
"""
PolicyGraph(
builder::Function,
graph::Graph{T};
sense::Symbol = :Min,
lower_bound = -Inf,
upper_bound = Inf,
optimizer = nothing,
) where {T}
Construct a policy graph based on the graph structure of `graph`. (See
[`SDDP.Graph`](@ref) for details.)
## Keyword arguments
- `sense`: whether we are minimizing (`:Min`) or maximizing (`:Max`).
- `lower_bound`: if mimimizing, a valid lower bound for the cost to go in all
subproblems.
- `upper_bound`: if maximizing, a valid upper bound for the value to go in all
subproblems.
- `optimizer`: the optimizer to use for each of the subproblems
## Examples
```julia
function builder(subproblem::JuMP.Model, index)
# ... subproblem definition ...
end
model = PolicyGraph(
builder,
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
)
```
Or, using the Julia `do ... end` syntax:
```julia
model = PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, index
# ... subproblem definitions ...
end
```
"""
function PolicyGraph(
builder::Function,
graph::Graph{T};
sense::Symbol = :Min,
lower_bound = -Inf,
upper_bound = Inf,
optimizer = nothing,
# These arguments are deprecated
bellman_function = nothing,
direct_mode::Bool = false,
) where {T}
# Spend a one-off cost validating the graph.
_validate_graph(graph)
# Construct a basic policy graph. We will add to it in the remainder of this
# function.
policy_graph = PolicyGraph(sense, graph.root_node)
# Create a Bellman function if one is not given.
if bellman_function === nothing
if sense == :Min && lower_bound === -Inf
error(
"You must specify a finite lower bound on the objective value" *
" using the `lower_bound = value` keyword argument.",
)
elseif sense == :Max && upper_bound === Inf
error(
"You must specify a finite upper bound on the objective value" *
" using the `upper_bound = value` keyword argument.",
)
else
bellman_function = BellmanFunction(;
lower_bound = lower_bound,
upper_bound = upper_bound,
)
end
end
# Initialize nodes.
for (node_index, children) in graph.nodes
if node_index == graph.root_node
continue
end
subproblem = construct_subproblem(optimizer, direct_mode)
node = Node(
node_index,
subproblem,
Noise{T}[],
Noise[],
(ω) -> nothing,
Dict{Symbol,State{JuMP.VariableRef}}(),
0.0,
false,
# Delay initializing the bellman function until later so that it can
# use information about the children and number of
# stagewise-independent noise realizations.
nothing,
# Likewise for the objective states.
nothing,
# And for belief states.
nothing,
# The optimize hook defaults to nothing.
nothing,
nothing,
false,
direct_mode ? nothing : optimizer,
# The extension dictionary.
Dict{Symbol,Any}(),
ReentrantLock(),
)
subproblem.ext[:sddp_policy_graph] = policy_graph
policy_graph.nodes[node_index] = subproblem.ext[:sddp_node] = node
JuMP.set_objective_sense(subproblem, policy_graph.objective_sense)
builder(subproblem, node_index)
# Add a dummy noise here so that all nodes have at least one noise term.
if length(node.noise_terms) == 0
push!(node.noise_terms, Noise(nothing, 1.0))
end
ctypes = JuMP.list_of_constraint_types(subproblem)
node.has_integrality =
(JuMP.VariableRef, MOI.Integer) in ctypes ||
(JuMP.VariableRef, MOI.ZeroOne) in ctypes
end
# Loop back through and add the arcs/children.
for (node_index, children) in graph.nodes
if node_index == graph.root_node
continue
end
node = policy_graph.nodes[node_index]
for (child, probability) in children
push!(node.children, Noise(child, probability))
end
# Intialize the bellman function. (See note in creation of Node above.)
node.bellman_function =
initialize_bellman_function(bellman_function, policy_graph, node)
end
# Add root nodes
for (child, probability) in graph.nodes[graph.root_node]
push!(policy_graph.root_children, Noise(child, probability))
# We check the feasibility of the initial point here. It is a really
# tricky feasibility bug to diagnose otherwise. See #387 for details.
for (k, v) in policy_graph.initial_root_state
x_out = policy_graph[child].states[k].out
if JuMP.has_lower_bound(x_out) && JuMP.lower_bound(x_out) > v
error("Initial point $(v) violates lower bound on state $k")
elseif JuMP.has_upper_bound(x_out) && JuMP.upper_bound(x_out) < v
error("Initial point $(v) violates upper bound on state $k")
end
end
end
# Initialize belief states.
if length(graph.belief_partition) > 0
initialize_belief_states(policy_graph, graph)
end
return policy_graph
end
# Internal function: set up ::BeliefState for each node.
function initialize_belief_states(
policy_graph::PolicyGraph{T},
graph::Graph{T},
) where {T}
# Pre-compute the function `belief_updater`. See `construct_belief_update`
# for details.
belief_updater =
construct_belief_update(policy_graph, Set.(graph.belief_partition))
# Initialize a belief dictionary (containing one element for each node in
# the graph).
belief = Dict{T,Float64}(keys(graph.nodes) .=> 0.0)
delete!(belief, graph.root_node)
# Now for each element in the partition...
for (partition_index, partition) in enumerate(graph.belief_partition)
# Store the partition in the `policy_graph` object.
push!(policy_graph.belief_partition, Set(partition))
# Then for each node in the partition.
for node_index in partition
# Get the `::Node` object.
node = policy_graph[node_index]
# Add the dual variable μ for the cut:
# <b, μ> + θ ≥ α + <β, x>
# We need one variable for each non-zero belief state.
μ = Dict{T,JuMP.VariableRef}()
for (node_name, L) in
zip(partition, graph.belief_lipschitz[partition_index])
μ[node_name] = @variable(
node.subproblem,
lower_bound = -L,
upper_bound = L
)
end
add_initial_bounds(node, μ)
# Attach the belief state as an extension.
node.belief_state =
BeliefState{T}(partition_index, copy(belief), μ, belief_updater)
node.bellman_function.global_theta.belief_states = μ
for theta in node.bellman_function.local_thetas
theta.belief_states = μ
end
end
end
return
end
# Internal function: When created, θ has bounds of [-M, M], but, since we are
# adding these μ terms, we really want to bound <b, μ> + θ ∈ [-M, M]. Keeping in
# mind that ∑b = 1, we really only need to add these constraints at the corners
# of the box where one element in b is 1, and all the rest are 0.
function add_initial_bounds(node, μ::Dict)
θ = bellman_term(node.bellman_function)
lower_bound = JuMP.has_lower_bound(θ) ? JuMP.lower_bound(θ) : -Inf
upper_bound = JuMP.has_upper_bound(θ) ? JuMP.upper_bound(θ) : Inf
for (_, variable) in μ
if lower_bound > -Inf
@constraint(node.subproblem, variable + θ >= lower_bound)
end
if upper_bound < Inf
@constraint(node.subproblem, variable + θ <= upper_bound)
end
end
return
end
# Internal function: helper to get the node given a subproblem.
function get_node(subproblem::JuMP.Model)
return subproblem.ext[:sddp_node]::Node
end
# Internal function: helper to get the policy graph given a subproblem.
function get_policy_graph(subproblem::JuMP.Model)
return subproblem.ext[:sddp_policy_graph]::PolicyGraph
end
"""
parameterize(
modify::Function,
subproblem::JuMP.Model,
realizations::Vector{T},
probability::Vector{Float64} = fill(1.0 / length(realizations))
) where {T}
Add a parameterization function `modify` to `subproblem`. The `modify` function
takes one argument and modifies `subproblem` based on the realization of the
noise sampled from `realizations` with corresponding probabilities
`probability`.
In order to conduct an out-of-sample simulation, `modify` should accept
arguments that are not in realizations (but still of type T).
## Examples
```julia
SDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω
JuMP.set_upper_bound(x, ω)
end
```
"""
function parameterize(
modify::Function,
subproblem::JuMP.Model,
realizations::AbstractVector{T},
probability::AbstractVector{Float64} = fill(
1.0 / length(realizations),
length(realizations),
),
) where {T}
node = get_node(subproblem)
if length(node.noise_terms) != 0
error("Duplicate calls to SDDP.parameterize detected.")
end
for (realization, prob) in zip(realizations, probability)
push!(node.noise_terms, Noise(realization, prob))
end
node.parameterize = modify
return
end
"""
set_stage_objective(
subproblem::JuMP.Model,
stage_objective::Union{Real,JuMP.AbstractJuMPScalar},
)
Set the stage-objective of `subproblem` to `stage_objective`.
## Examples
```julia
SDDP.set_stage_objective(subproblem, 2x + 1)
```
"""
function set_stage_objective(
subproblem::JuMP.Model,
stage_objective::Union{Real,JuMP.AbstractJuMPScalar},
)
node = get_node(subproblem)
node.stage_objective = stage_objective
node.stage_objective_set = false
return
end
function set_stage_objective(::JuMP.Model, f)
return error(
"Unable to set the stage-objective of type $(typeof(f)). It must be " *
"a scalar function.",
)
end
"""
@stageobjective(subproblem, expr)
Set the stage-objective of `subproblem` to `expr`.
## Examples
```julia
@stageobjective(subproblem, 2x + y)
```
"""
macro stageobjective(subproblem, expr)
code = MutableArithmetics.rewrite_and_return(expr)
return quote
SDDP.set_stage_objective($(esc(subproblem)), $code)
end
end
"""
add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)
Add an objective state variable to `subproblem`.
Required `kwargs` are:
- `initial_value`: The initial value of the objective state variable at the
root node.
- `lipschitz`: The lipschitz constant of the objective state variable.
Setting a tight value for the lipschitz constant can significantly improve the
speed of convergence.
Optional `kwargs` are:
- `lower_bound`: A valid lower bound for the objective state variable. Can be
`-Inf`.
- `upper_bound`: A valid upper bound for the objective state variable. Can be
`+Inf`.
Setting tight values for these optional variables can significantly improve the
speed of convergence.
If the objective state is `N`-dimensional, each keyword argument must be an
`NTuple{N,Float64}`. For example, `initial_value = (0.0, 1.0)`.
"""
function add_objective_state(
update::Function,
subproblem::JuMP.Model;
initial_value::Union{Real,Tuple},
lipschitz::Union{Real,Tuple},
lower_bound::Union{Real,Tuple} = -Inf,
upper_bound::Union{Real,Tuple} = Inf,
)
tup_initial_value = _to_tuple(initial_value)
N = length(tup_initial_value)
return add_objective_state(
update,
subproblem,
tup_initial_value,
_to_tuple(lower_bound, N),
_to_tuple(upper_bound, N),
_to_tuple(lipschitz, N),
)
end
_to_tuple(x::Real, N::Int = 1) = ntuple(i -> Float64(x), N)
function _to_tuple(x::Tuple, N::Int = length(x))
if length(x) != N
error(
"Invalid dimension in the input to `add_objective_state`. Got: ",
"`$x`, but expected it to have length `$N`.",
)
end
return Float64.(x)
end
# Internal function: add_objective_state with positional NTuple arguments.
function add_objective_state(
update::Function,
subproblem::JuMP.Model,
initial_value::NTuple{N,Float64},
lower_bound::NTuple{N,Float64},
upper_bound::NTuple{N,Float64},
lipschitz::NTuple{N,Float64},
) where {N}
node = get_node(subproblem)
if node.objective_state !== nothing
error("add_objective_state can only be called once.")
end
μ = @variable(
subproblem,
[i = 1:N],
lower_bound = -lipschitz[i],
upper_bound = lipschitz[i]
)
node.objective_state = ObjectiveState(
update,
initial_value,
initial_value,
lower_bound,
upper_bound,
tuple(μ...),
)
return
end
"""
objective_state(subproblem::JuMP.Model)
Return the current objective state of the problem.
Can only be called from [`SDDP.parameterize`](@ref).
"""
function objective_state(subproblem::JuMP.Model)
objective_state = get_node(subproblem).objective_state
if objective_state === nothing
error("No objective state defined.")
elseif length(objective_state.state) == 1
return objective_state.state[1]
else
return objective_state.state
end
end
# Internal function: calculate <y, μ>.
function get_objective_state_component(node::Node)
objective_state_component = JuMP.AffExpr(0.0)
objective_state = node.objective_state
if objective_state !== nothing
for (y, μ) in zip(objective_state.state, objective_state.μ)
JuMP.add_to_expression!(objective_state_component, y, μ)
end
end
return objective_state_component
end
function build_Φ(graph::PolicyGraph{T}) where {T}
Φ = Dict{Tuple{T,T},Float64}()
for (node_index_1, node_1) in graph.nodes
for child in node_1.children
Φ[(node_index_1, child.term)] = child.probability
end
end
for child in graph.root_children
Φ[(graph.root_node, child.term)] = child.probability
end
return Φ
end
"""
construct_belief_update(graph::PolicyGraph{T}, partition::Vector{Set{T}})
Returns a function that calculates the belief update. That function has the
following signature and returns the outgoing belief:
belief_update(
incoming_belief::Dict{T, Float64},
observed_partition::Int,
observed_noise
)::Dict{T,Float64}
We use Bayes theorem: P(X′ | Y) = P(Y | X′) × P(X′) / P(Y), where P(Xᵢ′ | Y) is
the probability of being in node i given the observation of ω. In addition
- P(Xⱼ′) = ∑ᵢ P(Xᵢ) × Φᵢⱼ
- P(Y|Xᵢ′) = P(ω ∈ Ωᵢ)
- P(Y) = ∑ᵢ P(Xᵢ′) × P(ω ∈ Ωᵢ)
"""
function construct_belief_update(
graph::SDDP.PolicyGraph{T},
partition::Vector{Set{T}},
) where {T}
# TODO: check that partition is proper.
Φ = build_Φ(graph) # Dict{Tuple{T, T}, Float64}
Ω = Dict{T,Dict{Any,Float64}}()
for (index, node) in graph.nodes
Ω[index] = Dict{Any,Float64}()
for noise in node.noise_terms
Ω[index][noise.term] = noise.probability
end
end
function belief_updater(
outgoing_belief::Dict{T,Float64},
incoming_belief::Dict{T,Float64},
observed_partition::Int,
observed_noise,
)::Dict{T,Float64}
# P(Y) = ∑ᵢ Xᵢ × ∑ⱼ P(i->j) × P(ω ∈ Ωⱼ)
PY = 0.0
for (node_i, belief) in incoming_belief
probability = 0.0
for (node_j, Ωj) in Ω
p_ij = get(Φ, (node_i, node_j), 0.0)
p_ω = get(Ωj, observed_noise, 0.0)
probability += p_ij * p_ω
end
PY += belief * probability
end
if PY ≈ 0.0
error(
"Unable to update belief in partition ",
observed_partition,
" after observing ",
observed_noise,
".The incoming belief ",
"is:\n ",
incoming_belief,
)
end
# Now update each belief.
for (node_i, belief) in incoming_belief
PX = sum(
belief * get(Φ, (node_j, node_i), 0.0) for
(node_j, belief) in incoming_belief
)
PY_X = 0.0
if node_i in partition[observed_partition]
PY_X += get(Ω[node_i], observed_noise, 0.0)
end
outgoing_belief[node_i] = PY_X * PX / PY
end
if length(outgoing_belief) == 2
for (node_i, belief) in incoming_belief
if belief < 1e-6
incoming_belief[node_i] = 0.0
elseif belief > 1 - 1e-6
incoming_belief[node_i] = 1.0
end
end
end
return outgoing_belief
end
return belief_updater
end
# Internal function: calculate <b, μ>.
function get_belief_state_component(node::Node)
belief_component = JuMP.AffExpr(0.0)
if node.belief_state !== nothing
belief = node.belief_state
for (key, μ) in belief.μ
JuMP.add_to_expression!(belief_component, belief.belief[key], μ)
end
end
return belief_component
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1026 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors and contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
CompleteSampler()
Backward sampler that returns all noises of the corresponding node.
"""
struct CompleteSampler <: AbstractBackwardSamplingScheme end
sample_backward_noise_terms(::CompleteSampler, node) = node.noise_terms
"""
MonteCarloSampler(number_of_samples::Int)
Backward sampler that returns `number_of_samples` noises sampled with
replacement from noises on the corresponding node.
"""
struct MonteCarloSampler <: AbstractBackwardSamplingScheme
number_of_samples::Int
end
function sample_backward_noise_terms(sampler::MonteCarloSampler, node::Node)
prob = 1 / sampler.number_of_samples
return [
Noise(sample_noise(node.noise_terms), prob) for
_ in 1:sampler.number_of_samples
]
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 28219 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
mutable struct Cut
intercept::Float64
coefficients::Dict{Symbol,Float64}
obj_y::Union{Nothing,NTuple{N,Float64} where {N}}
belief_y::Union{Nothing,Dict{T,Float64} where {T}}
non_dominated_count::Int
constraint_ref::Union{Nothing,JuMP.ConstraintRef}
end
mutable struct SampledState
state::Dict{Symbol,Float64}
obj_y::Union{Nothing,NTuple{N,Float64} where {N}}
belief_y::Union{Nothing,Dict{T,Float64} where {T}}
dominating_cut::Cut
best_objective::Float64
end
mutable struct ConvexApproximation
theta::JuMP.VariableRef
states::Dict{Symbol,JuMP.VariableRef}
objective_states::Union{Nothing,NTuple{N,JuMP.VariableRef} where {N}}
belief_states::Union{Nothing,Dict{T,JuMP.VariableRef} where {T}}
# Storage for cut selection
cuts::Vector{Cut}
sampled_states::Vector{SampledState}
cuts_to_be_deleted::Vector{Cut}
deletion_minimum::Int
function ConvexApproximation(
theta::JuMP.VariableRef,
states::Dict{Symbol,JuMP.VariableRef},
objective_states,
belief_states,
deletion_minimum::Int,
)
return new(
theta,
states,
objective_states,
belief_states,
Cut[],
SampledState[],
Cut[],
deletion_minimum,
)
end
end
_magnitude(x) = abs(x) > 0 ? log10(abs(x)) : 0
function _dynamic_range_warning(intercept, coefficients)
lo = hi = _magnitude(intercept)
lo_v = hi_v = intercept
for v in values(coefficients)
i = _magnitude(v)
if v < lo_v
lo, lo_v = i, v
elseif v > hi_v
hi, hi_v = i, v
end
end
if hi - lo > 10
@warn(
"""Found a cut with a mix of small and large coefficients.
The order of magnitude difference is $(hi - lo).
The smallest cofficient is $(lo_v).
The largest coefficient is $(hi_v).
You can ignore this warning, but it may be an indication of numerical issues.
Consider rescaling your model by using different units, e.g, kilometers instead
of meters. You should also consider reducing the accuracy of your input data (if
you haven't already). For example, it probably doesn't make sense to measure the
inflow into a reservoir to 10 decimal places.""",
maxlog = 1,
)
end
return
end
function _add_cut(
V::ConvexApproximation,
θᵏ::Float64,
πᵏ::Dict{Symbol,Float64},
xᵏ::Dict{Symbol,Float64},
obj_y::Union{Nothing,NTuple{N,Float64}},
belief_y::Union{Nothing,Dict{T,Float64}};
cut_selection::Bool = true,
) where {N,T}
for (key, x) in xᵏ
θᵏ -= πᵏ[key] * x
end
_dynamic_range_warning(θᵏ, πᵏ)
cut = Cut(θᵏ, πᵏ, obj_y, belief_y, 1, nothing)
_add_cut_constraint_to_model(V, cut)
if cut_selection
_cut_selection_update(V, cut, xᵏ)
end
return
end
function _add_cut_constraint_to_model(V::ConvexApproximation, cut::Cut)
model = JuMP.owner_model(V.theta)
yᵀμ = JuMP.AffExpr(0.0)
if V.objective_states !== nothing
for (y, μ) in zip(cut.obj_y, V.objective_states)
JuMP.add_to_expression!(yᵀμ, y, μ)
end
end
if V.belief_states !== nothing
for (k, μ) in V.belief_states
JuMP.add_to_expression!(yᵀμ, cut.belief_y[k], μ)
end
end
expr = @expression(
model,
V.theta + yᵀμ - sum(cut.coefficients[i] * x for (i, x) in V.states)
)
cut.constraint_ref = if JuMP.objective_sense(model) == MOI.MIN_SENSE
@constraint(model, expr >= cut.intercept)
else
@constraint(model, expr <= cut.intercept)
end
return
end
"""
Internal function: calculate the height of `cut` evaluated at `state`.
"""
function _eval_height(cut::Cut, sampled_state::SampledState)
height = cut.intercept
for (key, value) in cut.coefficients
height += value * sampled_state.state[key]
end
return height
end
"""
Internal function: check if the candidate point dominates the incumbent.
"""
function _dominates(candidate, incumbent, minimization::Bool)
return minimization ? candidate >= incumbent : candidate <= incumbent
end
function _cut_selection_update(
V::ConvexApproximation,
cut::Cut,
state::Dict{Symbol,Float64},
)
model = JuMP.owner_model(V.theta)
is_minimization = JuMP.objective_sense(model) == MOI.MIN_SENSE
sampled_state = SampledState(state, cut.obj_y, cut.belief_y, cut, NaN)
sampled_state.best_objective = _eval_height(cut, sampled_state)
# Loop through previously sampled states and compare the height of the most
# recent cut against the current best. If this new cut is an improvement,
# store this one instead.
for old_state in V.sampled_states
# Only compute cut selection at same points in concave space.
if old_state.obj_y != cut.obj_y || old_state.belief_y != cut.belief_y
continue
end
height = _eval_height(cut, old_state)
if _dominates(height, old_state.best_objective, is_minimization)
old_state.dominating_cut.non_dominated_count -= 1
cut.non_dominated_count += 1
old_state.dominating_cut = cut
old_state.best_objective = height
end
end
push!(V.sampled_states, sampled_state)
# Now loop through previously discovered cuts and compare their height at
# `sampled_state`. If a cut is an improvement, add it to a queue to be
# added.
for old_cut in V.cuts
if old_cut.constraint_ref !== nothing
# We only care about cuts not currently in the model.
continue
elseif old_cut.obj_y != sampled_state.obj_y
# Only compute cut selection at same points in objective space.
continue
elseif old_cut.belief_y != sampled_state.belief_y
# Only compute cut selection at same points in belief space.
continue
end
height = _eval_height(old_cut, sampled_state)
if _dominates(height, sampled_state.best_objective, is_minimization)
sampled_state.dominating_cut.non_dominated_count -= 1
old_cut.non_dominated_count += 1
sampled_state.dominating_cut = old_cut
sampled_state.best_objective = height
_add_cut_constraint_to_model(V, old_cut)
end
end
push!(V.cuts, cut)
# Delete cuts that need to be deleted.
for cut in V.cuts
if cut.non_dominated_count < 1
if cut.constraint_ref !== nothing
push!(V.cuts_to_be_deleted, cut)
end
end
end
if length(V.cuts_to_be_deleted) >= V.deletion_minimum
for cut in V.cuts_to_be_deleted
JuMP.delete(model, cut.constraint_ref)
cut.constraint_ref = nothing
cut.non_dominated_count = 0
end
end
empty!(V.cuts_to_be_deleted)
return
end
@enum(CutType, SINGLE_CUT, MULTI_CUT)
# Internal struct: this struct is just a cache for arguments until we can build
# an actual instance of the type T at a later point.
struct InstanceFactory{T}
args::Any
kwargs::Any
InstanceFactory{T}(args...; kwargs...) where {T} = new{T}(args, kwargs)
end
"""
BellmanFunction
A representation of the value function. SDDP.jl uses the following unique
representation of the value function that is undocumented in the literature.
It supports three types of state variables:
1) x - convex "resource" states
2) b - concave "belief" states
3) y - concave "objective" states
In addition, we have three types of cuts:
1) Single-cuts (also called "average" cuts in the literature), which involve
the risk-adjusted expectation of the cost-to-go.
2) Multi-cuts, which use a different cost-to-go term for each realization w.
3) Risk-cuts, which correspond to the facets of the dual interpretation of a
convex risk measure.
Therefore, ValueFunction returns a JuMP model of the following form:
```
V(x, b, y) =
min: μᵀb + νᵀy + θ
s.t. # "Single" / "Average" cuts
μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J
# "Multi" cuts
μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K
# "Risk-set" cuts
θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K
```
"""
mutable struct BellmanFunction
cut_type::CutType
global_theta::ConvexApproximation
local_thetas::Vector{ConvexApproximation}
# Cuts defining the dual representation of the risk measure.
risk_set_cuts::Set{Vector{Float64}}
end
"""
BellmanFunction(;
lower_bound = -Inf,
upper_bound = Inf,
deletion_minimum::Int = 1,
cut_type::CutType = MULTI_CUT,
)
"""
function BellmanFunction(;
lower_bound = -Inf,
upper_bound = Inf,
deletion_minimum::Int = 1,
cut_type::CutType = MULTI_CUT,
)
return InstanceFactory{BellmanFunction}(;
lower_bound = lower_bound,
upper_bound = upper_bound,
deletion_minimum = deletion_minimum,
cut_type = cut_type,
)
end
function bellman_term(bellman_function::BellmanFunction)
return bellman_function.global_theta.theta
end
function initialize_bellman_function(
factory::InstanceFactory{BellmanFunction},
model::PolicyGraph{T},
node::Node{T},
) where {T}
lower_bound, upper_bound, deletion_minimum, cut_type =
-Inf, Inf, 0, SINGLE_CUT
if length(factory.args) > 0
error(
"Positional arguments $(factory.args) ignored in BellmanFunction.",
)
end
for (kw, value) in factory.kwargs
if kw == :lower_bound
lower_bound = value
elseif kw == :upper_bound
upper_bound = value
elseif kw == :deletion_minimum
deletion_minimum = value
elseif kw == :cut_type
cut_type = value
else
error(
"Keyword $(kw) not recognised as argument to BellmanFunction.",
)
end
end
if lower_bound == -Inf && upper_bound == Inf
error("You must specify a finite bound on the cost-to-go term.")
end
if length(node.children) == 0
lower_bound = upper_bound = 0.0
end
Θᴳ = @variable(node.subproblem)
lower_bound > -Inf && JuMP.set_lower_bound(Θᴳ, lower_bound)
upper_bound < Inf && JuMP.set_upper_bound(Θᴳ, upper_bound)
# Initialize bounds for the objective states. If objective_state==nothing,
# this check will be skipped by dispatch.
_add_initial_bounds(node.objective_state, Θᴳ)
x′ = Dict(key => var.out for (key, var) in node.states)
obj_μ = node.objective_state !== nothing ? node.objective_state.μ : nothing
belief_μ = node.belief_state !== nothing ? node.belief_state.μ : nothing
return BellmanFunction(
cut_type,
ConvexApproximation(Θᴳ, x′, obj_μ, belief_μ, deletion_minimum),
ConvexApproximation[],
Set{Vector{Float64}}(),
)
end
# Internal function: helper used in _add_initial_bounds.
function _add_objective_state_constraint(
theta::JuMP.VariableRef,
y::NTuple{N,Float64},
μ::NTuple{N,JuMP.VariableRef},
) where {N}
is_finite = [-Inf < y[i] < Inf for i in 1:N]
model = JuMP.owner_model(theta)
lower_bound = JuMP.has_lower_bound(theta) ? JuMP.lower_bound(theta) : -Inf
upper_bound = JuMP.has_upper_bound(theta) ? JuMP.upper_bound(theta) : Inf
if lower_bound ≈ upper_bound ≈ 0.0
@constraint(model, [i = 1:N], μ[i] == 0.0)
return
end
expr = @expression(
model,
sum(y[i] * μ[i] for i in 1:N if is_finite[i]) + theta
)
if lower_bound > -Inf
@constraint(model, expr >= lower_bound)
end
if upper_bound < Inf
@constraint(model, expr <= upper_bound)
end
return
end
# Internal function: When created, θ has bounds of [-M, M], but, since we are
# adding these μ terms, we really want to bound <y, μ> + θ ∈ [-M, M]. We need to
# consider all possible values for `y`. Because the domain of `y` is
# rectangular, we want to add a constraint at each extreme point. This involves
# adding 2^N constraints where N = |μ|. This is only feasible for
# low-dimensional problems, e.g., N < 5.
_add_initial_bounds(::Nothing, ::Any) = nothing
function _add_initial_bounds(obj_state::ObjectiveState, theta)
if length(obj_state.μ) < 5
for y in
Base.product(zip(obj_state.lower_bound, obj_state.upper_bound)...)
_add_objective_state_constraint(theta, y, obj_state.μ)
end
else
_add_objective_state_constraint(
theta,
obj_state.lower_bound,
obj_state.μ,
)
_add_objective_state_constraint(
theta,
obj_state.upper_bound,
obj_state.μ,
)
end
return
end
function refine_bellman_function(
model::PolicyGraph{T},
node::Node{T},
bellman_function::BellmanFunction,
risk_measure::AbstractRiskMeasure,
outgoing_state::Dict{Symbol,Float64},
dual_variables::Vector{Dict{Symbol,Float64}},
noise_supports::Vector,
nominal_probability::Vector{Float64},
objective_realizations::Vector{Float64},
) where {T}
lock(node.lock)
try
return _refine_bellman_function_no_lock(
model,
node,
bellman_function,
risk_measure,
outgoing_state,
dual_variables,
noise_supports,
nominal_probability,
objective_realizations,
)
finally
unlock(node.lock)
end
end
function _refine_bellman_function_no_lock(
model::PolicyGraph{T},
node::Node{T},
bellman_function::BellmanFunction,
risk_measure::AbstractRiskMeasure,
outgoing_state::Dict{Symbol,Float64},
dual_variables::Vector{Dict{Symbol,Float64}},
noise_supports::Vector,
nominal_probability::Vector{Float64},
objective_realizations::Vector{Float64},
) where {T}
# Sanity checks.
@assert length(dual_variables) ==
length(noise_supports) ==
length(nominal_probability) ==
length(objective_realizations)
# Preliminaries that are common to all cut types.
risk_adjusted_probability = similar(nominal_probability)
offset = adjust_probability(
risk_measure,
risk_adjusted_probability,
nominal_probability,
noise_supports,
objective_realizations,
model.objective_sense == MOI.MIN_SENSE,
)
# The meat of the function.
if bellman_function.cut_type == SINGLE_CUT
return _add_average_cut(
node,
outgoing_state,
risk_adjusted_probability,
objective_realizations,
dual_variables,
offset,
)
else # Add a multi-cut
@assert bellman_function.cut_type == MULTI_CUT
_add_locals_if_necessary(node, bellman_function, length(dual_variables))
return _add_multi_cut(
node,
outgoing_state,
risk_adjusted_probability,
objective_realizations,
dual_variables,
offset,
)
end
end
function _add_average_cut(
node::Node,
outgoing_state::Dict{Symbol,Float64},
risk_adjusted_probability::Vector{Float64},
objective_realizations::Vector{Float64},
dual_variables::Vector{Dict{Symbol,Float64}},
offset::Float64,
)
N = length(risk_adjusted_probability)
@assert N == length(objective_realizations) == length(dual_variables)
# Calculate the expected intercept and dual variables with respect to the
# risk-adjusted probability distribution.
πᵏ = Dict(key => 0.0 for key in keys(outgoing_state))
θᵏ = offset
for i in 1:length(objective_realizations)
p = risk_adjusted_probability[i]
θᵏ += p * objective_realizations[i]
for (key, dual) in dual_variables[i]
πᵏ[key] += p * dual
end
end
# Now add the average-cut to the subproblem. We include the objective-state
# component μᵀy and the belief state (if it exists).
obj_y =
node.objective_state === nothing ? nothing : node.objective_state.state
belief_y =
node.belief_state === nothing ? nothing : node.belief_state.belief
_add_cut(
node.bellman_function.global_theta,
θᵏ,
πᵏ,
outgoing_state,
obj_y,
belief_y,
)
return (
theta = θᵏ,
pi = πᵏ,
x = outgoing_state,
obj_y = obj_y,
belief_y = belief_y,
)
end
function _add_multi_cut(
node::Node,
outgoing_state::Dict{Symbol,Float64},
risk_adjusted_probability::Vector{Float64},
objective_realizations::Vector{Float64},
dual_variables::Vector{Dict{Symbol,Float64}},
offset::Float64,
)
N = length(risk_adjusted_probability)
@assert N == length(objective_realizations) == length(dual_variables)
bellman_function = node.bellman_function
μᵀy = get_objective_state_component(node)
JuMP.add_to_expression!(μᵀy, get_belief_state_component(node))
for i in 1:length(dual_variables)
_add_cut(
bellman_function.local_thetas[i],
objective_realizations[i],
dual_variables[i],
outgoing_state,
node.objective_state === nothing ? nothing :
node.objective_state.state,
node.belief_state === nothing ? nothing : node.belief_state.belief,
)
end
model = JuMP.owner_model(bellman_function.global_theta.theta)
cut_expr = @expression(
model,
sum(
risk_adjusted_probability[i] *
bellman_function.local_thetas[i].theta for i in 1:N
) - (1 - sum(risk_adjusted_probability)) * μᵀy + offset
)
# TODO(odow): should we use `cut_expr` instead?
ξ = copy(risk_adjusted_probability)
if !(ξ in bellman_function.risk_set_cuts) || μᵀy != JuMP.AffExpr(0.0)
push!(bellman_function.risk_set_cuts, ξ)
if JuMP.objective_sense(model) == MOI.MIN_SENSE
@constraint(model, bellman_function.global_theta.theta >= cut_expr)
else
@constraint(model, bellman_function.global_theta.theta <= cut_expr)
end
end
return
end
# If we are adding a multi-cut for the first time, then the local θ variables
# won't have been added.
# TODO(odow): a way to set different bounds for each variable in the multi-cut.
function _add_locals_if_necessary(
node::Node,
bellman_function::BellmanFunction,
N::Int,
)
num_local_thetas = length(bellman_function.local_thetas)
if num_local_thetas == N
return # Do nothing. Already initialized.
elseif num_local_thetas > 0
error(
"Expected $(N) local θ variables but there were " *
"$(num_local_thetas).",
)
end
global_theta = bellman_function.global_theta
model = JuMP.owner_model(global_theta.theta)
local_thetas = @variable(model, [1:N])
if JuMP.has_lower_bound(global_theta.theta)
JuMP.set_lower_bound.(
local_thetas,
JuMP.lower_bound(global_theta.theta),
)
end
if JuMP.has_upper_bound(global_theta.theta)
JuMP.set_upper_bound.(
local_thetas,
JuMP.upper_bound(global_theta.theta),
)
end
for local_theta in local_thetas
push!(
bellman_function.local_thetas,
ConvexApproximation(
local_theta,
global_theta.states,
node.objective_state === nothing ? nothing :
node.objective_state.μ,
node.belief_state === nothing ? nothing : node.belief_state.μ,
global_theta.deletion_minimum,
),
)
end
return
end
"""
write_cuts_to_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = string,
) where {T}
Write the cuts that form the policy in `model` to `filename` in JSON format.
`node_name_parser` is a function which converts the name of each node into a
string representation. It has the signature: `node_name_parser(::T)::String`.
See also [`SDDP.read_cuts_from_file`](@ref).
"""
function write_cuts_to_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = string,
) where {T}
cuts = Dict{String,Any}[]
for (node_name, node) in model.nodes
if node.objective_state !== nothing || node.belief_state !== nothing
error(
"Unable to write cuts to file because model contains " *
"objective states or belief states.",
)
end
node_cuts = Dict(
"node" => node_name_parser(node_name),
"single_cuts" => Dict{String,Any}[],
"multi_cuts" => Dict{String,Any}[],
"risk_set_cuts" => Vector{Float64}[],
)
oracle = node.bellman_function.global_theta
for (cut, state) in zip(oracle.cuts, oracle.sampled_states)
intercept = cut.intercept
for (key, π) in cut.coefficients
intercept += π * state.state[key]
end
push!(
node_cuts["single_cuts"],
Dict(
"intercept" => intercept,
"coefficients" => copy(cut.coefficients),
"state" => copy(state.state),
),
)
end
for (i, theta) in enumerate(node.bellman_function.local_thetas)
for (cut, state) in zip(theta.cuts, theta.sampled_states)
intercept = cut.intercept
for (key, π) in cut.coefficients
intercept += π * state.state[key]
end
push!(
node_cuts["multi_cuts"],
Dict(
"realization" => i,
"intercept" => intercept,
"coefficients" => copy(cut.coefficients),
"state" => copy(state.state),
),
)
end
end
for p in node.bellman_function.risk_set_cuts
push!(node_cuts["risk_set_cuts"], p)
end
push!(cuts, node_cuts)
end
open(filename, "w") do io
return write(io, JSON.json(cuts))
end
return
end
_node_name_parser(::Type{Int}, name::String) = parse(Int, name)
_node_name_parser(::Type{Symbol}, name::String) = Symbol(name)
function _node_name_parser(::Type{NTuple{N,Int}}, name::String) where {N}
keys = parse.(Int, strip.(split(name[2:end-1], ",")))
if length(keys) != N
error("Unable to parse node called $(name). Expected $N elements.")
end
return tuple(keys...)
end
function _node_name_parser(::Any, name)
return error(
"Unable to read name $(name). Provide a custom parser to " *
"`read_cuts_from_file` using the `node_name_parser` keyword.",
)
end
"""
read_cuts_from_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = _node_name_parser,
) where {T}
Read cuts (saved using [`SDDP.write_cuts_to_file`](@ref)) from `filename` into
`model`.
Since `T` can be an arbitrary Julia type, the conversion to JSON is lossy. When
reading, `read_cuts_from_file` only supports `T=Int`, `T=NTuple{N, Int}`, and
`T=Symbol`. If you have manually created a policy graph with a different node
type `T`, provide a function `node_name_parser` with the signature
`node_name_parser(T, name::String)::T where {T}` that returns the name of each
node given the string name `name`.
If `node_name_parser` returns `nothing`, those cuts are skipped.
See also [`SDDP.write_cuts_to_file`](@ref).
"""
function read_cuts_from_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = _node_name_parser,
) where {T}
cuts = JSON.parsefile(filename; use_mmap = false)
for node_cuts in cuts
node_name = node_name_parser(T, node_cuts["node"])::Union{Nothing,T}
if node_name === nothing
continue # Skip reading these cuts
end
node = model[node_name]
bf = node.bellman_function
# Loop through and add the single-cuts.
for json_cut in node_cuts["single_cuts"]
has_state = haskey(json_cut, "state")
state = if has_state
Dict(Symbol(k) => v for (k, v) in json_cut["state"])
else
Dict(Symbol(k) => 0.0 for k in keys(json_cut["coefficients"]))
end
_add_cut(
bf.global_theta,
json_cut["intercept"],
Dict(Symbol(k) => v for (k, v) in json_cut["coefficients"]),
state,
nothing,
nothing;
cut_selection = has_state,
)
end
# Loop through and add the multi-cuts. There are two parts:
# (i) the cuts w.r.t. the state variable x
# (ii) the cuts that define the risk set
# There is one additional complication: if these cuts are being read
# into a new model, the local theta variables may not exist yet.
if length(node_cuts["risk_set_cuts"]) > 0
_add_locals_if_necessary(
node,
bf,
length(first(node_cuts["risk_set_cuts"])),
)
end
for json_cut in node_cuts["multi_cuts"]
has_state = haskey(json_cut, "state")
state = if has_state
Dict(Symbol(k) => v for (k, v) in json_cut["state"])
else
Dict(Symbol(k) => 0.0 for k in keys(json_cut["coefficients"]))
end
_add_cut(
bf.local_thetas[json_cut["realization"]],
json_cut["intercept"],
Dict(Symbol(k) => v for (k, v) in json_cut["coefficients"]),
state,
nothing,
nothing;
cut_selection = has_state,
)
end
# Here is part (ii): adding the constraints that define the risk-set
# representation of the risk measure.
for json_cut in node_cuts["risk_set_cuts"]
expr = @expression(
node.subproblem,
bf.global_theta.theta - sum(
p * V.theta for (p, V) in zip(json_cut, bf.local_thetas)
)
)
if JuMP.objective_sense(node.subproblem) == MOI.MIN_SENSE
@constraint(node.subproblem, expr >= 0)
else
@constraint(node.subproblem, expr <= 0)
end
end
end
return
end
"""
add_all_cuts(model::PolicyGraph)
Add all cuts that may have been deleted back into the model.
## Explanation
During the solve, SDDP.jl may decide to remove cuts for a variety of reasons.
These can include cuts that define the optimal value function, particularly
around the extremes of the state-space (e.g., reservoirs empty).
This function ensures that all cuts discovered are added back into the model.
You should call this after [`train`](@ref) and before [`simulate`](@ref).
"""
function add_all_cuts(model::PolicyGraph)
for node in values(model.nodes)
global_theta = node.bellman_function.global_theta
for cut in global_theta.cuts
if cut.constraint_ref === nothing
_add_cut_constraint_to_model(global_theta, cut)
end
end
for approximation in node.bellman_function.local_thetas
for cut in approximation.cuts
if cut.constraint_ref === nothing
_add_cut_constraint_to_model(approximation, cut)
end
end
end
end
return
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 13669 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors, Lea Kapelevich.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
function _deprecate_integrality_handler()
return error(
"""
SDDP.jl v0.4.0 introduced a number of breaking changes in how we deal with
binary and integer variables.
## Breaking changes
* We have renamed `SDDiP` to `LagrangianDuality`.
* We have renamed `ContinuousRelaxation` to `ContinuousConicDuality`.
* Instead of passing the argument to `PolicyGraph`, you now pass it to
`train`, e.g., `SDDP.train(model; duality_handler = SDDP.LagrangianDuality())`
* We no longer turn continuous and integer states into a binary expansion. If
you want to binarize your states, do it manually.
## Why did we do this?
SDDiP (the algorithm presented in the paper) is really two parts:
1. If you have an integer program, you can compute the dual of the fishing
constraint using Lagrangian duality; and
2. If you have pure binary state variables, then cuts constructed from the
Lagrangian duals result in an optimal policy.
However, these two points are quite independent. If you have integer or
continuous state variables, you can still use Lagrangian duality!
The new system is more flexible because the duality handler is a property of the
solution process, not the model. This allows us to use Lagrangian duality to
solve any dual problem, and it leaves the decision of binarizing the state
variables up to the user. (Hint: we don't think you should do it!)
## Other additions
We also added support for strengthened Benders cuts, which we call
`SDDP.StrengthenedConicDuality()`.
## Future plans
We have a number of future plans in the works, including better Lagrangian
solution methods and better ways of integrating the different types of duality
handlers (e.g., start with ContinuousConicDuality, then shift to
StrengthenedConicDuality, then LagrangianDuality).
If these sorts of things interest you, the code is now much more hackable, so
please reach out or read https://github.com/odow/SDDP.jl/issues/246.
Alternatively, if you have interesting examples using SDDiP that you find are
too slow, please send me the examples so we can use them as benchmarks in future
improvements.
""",
)
end
SDDiP(args...; kwargs...) = _deprecate_integrality_handler()
ContinuousRelaxation(args...; kwargs...) = _deprecate_integrality_handler()
function get_dual_solution(node::Node, ::Nothing)
return JuMP.objective_value(node.subproblem), Dict{Symbol,Float64}()
end
# ========================= Continuous relaxation ============================ #
"""
ContinuousConicDuality()
Compute dual variables in the backward pass using conic duality, relaxing any
binary or integer restrictions as necessary.
## Theory
Given the problem
```
min Cᵢ(x̄, u, w) + θᵢ
st (x̄, x′, u) in Xᵢ(w) ∩ S
x̄ - x == 0 [λ]
```
where `S ⊆ ℝ×ℤ`, we relax integrality and using conic duality to solve for `λ`
in the problem:
```
min Cᵢ(x̄, u, w) + θᵢ
st (x̄, x′, u) in Xᵢ(w)
x̄ - x == 0 [λ]
```
"""
struct ContinuousConicDuality <: AbstractDualityHandler end
function _has_dual_solution(node::Node)
status = JuMP.dual_status(node.subproblem)
return status in (JuMP.FEASIBLE_POINT, JuMP.NEARLY_FEASIBLE_POINT)
end
function get_dual_solution(node::Node, ::ContinuousConicDuality)
if !_has_dual_solution(node)
# Attempt to recover by resetting the optimizer and re-solving.
if JuMP.mode(node.subproblem) != JuMP.DIRECT
MOI.Utilities.reset_optimizer(node.subproblem)
optimize!(node.subproblem)
end
end
if !_has_dual_solution(node)
write_subproblem_to_file(
node,
"subproblem.mof.json";
throw_error = true,
)
end
# Note: due to JuMP's dual convention, we need to flip the sign for
# maximization problems.
dual_sign = JuMP.objective_sense(node.subproblem) == MOI.MIN_SENSE ? 1 : -1
λ = Dict{Symbol,Float64}(
name => dual_sign * JuMP.dual(JuMP.FixRef(state.in)) for
(name, state) in node.states
)
return objective_value(node.subproblem), λ
end
function _relax_integrality(node::Node)
if !node.has_integrality
return () -> nothing
end
return JuMP.relax_integrality(node.subproblem)
end
function prepare_backward_pass(node::Node, ::ContinuousConicDuality, ::Options)
return _relax_integrality(node)
end
duality_log_key(::ContinuousConicDuality) = " "
# =========================== LagrangianDuality ============================== #
"""
LagrangianDuality(;
method::LocalImprovementSearch.AbstractSearchMethod =
LocalImprovementSearch.BFGS(100),
)
Obtain dual variables in the backward pass using Lagrangian duality.
## Arguments
* `method`: the `LocalImprovementSearch` method for maximizing the Lagrangian
dual problem.
## Theory
Given the problem
```
min Cᵢ(x̄, u, w) + θᵢ
st (x̄, x′, u) in Xᵢ(w) ∩ S
x̄ - x == 0 [λ]
```
where `S ⊆ ℝ×ℤ`, we solve the problem `max L(λ)`, where:
```
L(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' h(x̄)
st (x̄, x′, u) in Xᵢ(w) ∩ S
```
and where `h(x̄) = x̄ - x`.
"""
mutable struct LagrangianDuality <: AbstractDualityHandler
method::LocalImprovementSearch.AbstractSearchMethod
function LagrangianDuality(;
method = LocalImprovementSearch.BFGS(100),
kwargs...,
)
if length(kwargs) > 0
@warn(
"Keyword arguments to LagrangianDuality have changed. " *
"See the documentation for details.",
)
end
return new(method)
end
end
function get_dual_solution(node::Node, lagrange::LagrangianDuality)
undo_relax = _relax_integrality(node)
optimize!(node.subproblem)
conic_obj, conic_dual = get_dual_solution(node, ContinuousConicDuality())
undo_relax()
s = JuMP.objective_sense(node.subproblem) == MOI.MIN_SENSE ? -1 : 1
N = length(node.states)
x_in_value = zeros(N)
λ_star, h_expr, h_k = zeros(N), Vector{AffExpr}(undef, N), zeros(N)
for (i, (key, state)) in enumerate(node.states)
x_in_value[i] = JuMP.fix_value(state.in)
h_expr[i] = @expression(node.subproblem, state.in - x_in_value[i])
JuMP.unfix(state.in)
λ_star[i] = conic_dual[key]
end
# Check that the conic dual is feasible for the subproblem. Sometimes it
# isn't if the LP dual solution is slightly infeasible due to numerical
# issues.
L_k = _solve_primal_problem(node.subproblem, λ_star, h_expr, h_k)
if L_k === nothing
return conic_obj, conic_dual
end
L_star, λ_star =
LocalImprovementSearch.minimize(lagrange.method, λ_star, conic_obj) do x
L_k = _solve_primal_problem(node.subproblem, x, h_expr, h_k)
return L_k === nothing ? nothing : (s * L_k, s * h_k)
end
for (i, (_, state)) in enumerate(node.states)
JuMP.fix(state.in, x_in_value[i]; force = true)
end
λ_solution = Dict{Symbol,Float64}(
name => λ_star[i] for (i, name) in enumerate(keys(node.states))
)
return s * L_star, λ_solution
end
function _solve_primal_problem(
model::JuMP.Model,
λ::Vector{Float64},
h_expr::Vector{GenericAffExpr{Float64,VariableRef}},
h_k::Vector{Float64},
)
primal_obj = JuMP.objective_function(model)
JuMP.set_objective_function(
model,
@expression(model, primal_obj - λ' * h_expr),
)
JuMP.optimize!(model)
if JuMP.termination_status(model) != MOI.OPTIMAL
JuMP.set_objective_function(model, primal_obj)
return nothing
end
h_k .= -JuMP.value.(h_expr)
L_λ = JuMP.objective_value(model)
JuMP.set_objective_function(model, primal_obj)
return L_λ
end
duality_log_key(::LagrangianDuality) = "L"
# ==================== StrengthenedConicDuality ==================== #
"""
StrengthenedConicDuality()
Obtain dual variables in the backward pass using strengthened conic duality.
## Theory
Given the problem
```
min Cᵢ(x̄, u, w) + θᵢ
st (x̄, x′, u) in Xᵢ(w) ∩ S
x̄ - x == 0 [λ]
```
we first obtain an estimate for `λ` using [`ContinuousConicDuality`](@ref).
Then, we evaluate the Lagrangian function:
```
L(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' (x̄ - x`)
st (x̄, x′, u) in Xᵢ(w) ∩ S
```
to obtain a better estimate of the intercept.
"""
mutable struct StrengthenedConicDuality <: AbstractDualityHandler end
function get_dual_solution(node::Node, ::StrengthenedConicDuality)
undo_relax = _relax_integrality(node)
optimize!(node.subproblem)
conic_obj, conic_dual = get_dual_solution(node, ContinuousConicDuality())
undo_relax()
if !node.has_integrality
return conic_obj, conic_dual # If we're linear, return this!
end
num_states = length(node.states)
λ_k, h_k, x = zeros(num_states), zeros(num_states), zeros(num_states)
h_expr = Vector{AffExpr}(undef, num_states)
for (i, (key, state)) in enumerate(node.states)
x[i] = JuMP.fix_value(state.in)
h_expr[i] = @expression(node.subproblem, state.in - x[i])
JuMP.unfix(state.in)
λ_k[i] = conic_dual[key]
end
lagrangian_obj = _solve_primal_problem(node.subproblem, λ_k, h_expr, h_k)
for (i, (_, state)) in enumerate(node.states)
JuMP.fix(state.in, x[i]; force = true)
end
# If lagrangian_obj is `nothing`, then the primal problem didn't solve
# correctly, probably because it was unbounded (i.e., the dual was
# infeasible.) But we got the dual from solving the LP relaxation so it must
# be feasible! Sometimes however, the dual from the LP solver might be
# numerically infeasible when solved in the primal. That's a shame :(
# If so, return the conic_obj instead.
return something(lagrangian_obj, conic_obj), conic_dual
end
duality_log_key(::StrengthenedConicDuality) = "S"
# ============================== BanditDuality =============================== #
mutable struct _BanditArm{T}
handler::T
rewards::Vector{Float64}
end
"""
BanditDuality()
Formulates the problem of choosing a duality handler as a multi-armed bandit
problem. The arms to choose between are:
* [`ContinuousConicDuality`](@ref)
* [`StrengthenedConicDuality`](@ref)
* [`LagrangianDuality`](@ref)
Our problem isn't a typical multi-armed bandit for a two reasons:
1. The reward distribution is non-stationary (each arm converges to 0 as it
keeps getting pulled.
2. The distribution of rewards is dependent on the history of the arms that
were chosen.
We choose a very simple heuristic: pick the arm with the best mean + 1 standard
deviation. That should ensure we consistently pick the arm with the best
likelihood of improving the value function.
In future, we should consider discounting the rewards of earlier iterations, and
focus more on the more-recent rewards.
"""
mutable struct BanditDuality <: AbstractDualityHandler
arms::Vector{_BanditArm}
last_arm_index::Int
logs_seen::Int
function BanditDuality(args::AbstractDualityHandler...)
return new(_BanditArm[_BanditArm(arg, Float64[]) for arg in args], 1, 1)
end
end
function Base.show(io::IO, handler::BanditDuality)
print(io, "BanditDuality with arms:")
for arm in handler.arms
print(io, "\n * ", arm.handler)
end
return
end
function BanditDuality()
return BanditDuality(ContinuousConicDuality(), StrengthenedConicDuality())
end
function _choose_best_arm(handler::BanditDuality)
_, index = findmax(
map(handler.arms) do arm
return Statistics.mean(arm.rewards) + Statistics.std(arm.rewards)
end,
)
handler.last_arm_index = index
return handler.arms[index]
end
function _update_rewards(handler::BanditDuality, log::Vector{Log})
# The bound is monotonic, so instead of worring about whether we are
# maximizing or minimizing, let's just compute:
# |bound_t - bound_{t-1}|
# reward = -----------------------
# time_t - time_{t-1}
t, t′ = log[end], log[end-1]
reward = abs(t.bound - t′.bound) / (t.time - t′.time)
# This check is needed because we should probably keep using the first
# handler until we start to improve the bound. This can take quite a few
# iterations in some models. (Until we start to improve, the reward will be
# zero, so we'd never revisit it.
const_bound = isapprox(log[1].bound, log[end].bound; atol = 1e-6)
# To start with, we should add the reward to all arms to construct a prior
# distribution for the arms. The 10 is somewhat arbitrary.
if length(log) < 10 || const_bound
for arm in handler.arms
push!(arm.rewards, reward)
end
else
push!(handler.arms[handler.last_arm_index].rewards, reward)
end
return
end
function prepare_backward_pass(
node::Node,
handler::BanditDuality,
options::Options,
)
if length(options.log) > handler.logs_seen
_update_rewards(handler, options.log)
handler.logs_seen = length(options.log)
end
arm = _choose_best_arm(handler)
return prepare_backward_pass(node, arm.handler, options)
end
function get_dual_solution(node::Node, handler::BanditDuality)
return get_dual_solution(node, handler.arms[handler.last_arm_index].handler)
end
function duality_log_key(handler::BanditDuality)
return duality_log_key(handler.arms[handler.last_arm_index].handler)
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 13645 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
DefaultForwardPass(; include_last_node::Bool = true)
The default forward pass.
If `include_last_node = false` and the sample terminated due to a cycle, then
the last node (which forms the cycle) is omitted. This can be useful option to
set when training, but it comes at the cost of not knowing which node formed the
cycle (if there are multiple possibilities).
"""
struct DefaultForwardPass <: AbstractForwardPass
include_last_node::Bool
function DefaultForwardPass(; include_last_node::Bool = true)
return new(include_last_node)
end
end
function forward_pass(
model::PolicyGraph{T},
options::Options,
pass::DefaultForwardPass,
) where {T}
# First up, sample a scenario. Note that if a cycle is detected, this will
# return the cycle node as well.
@_timeit_threadsafe model.timer_output "sample_scenario" begin
scenario_path, terminated_due_to_cycle =
sample_scenario(model, options.sampling_scheme)
end
final_node = scenario_path[end]
if terminated_due_to_cycle && !pass.include_last_node
pop!(scenario_path)
end
# Storage for the list of outgoing states that we visit on the forward pass.
sampled_states = Dict{Symbol,Float64}[]
# Storage for the belief states: partition index and the belief dictionary.
belief_states = Tuple{Int,Dict{T,Float64}}[]
current_belief = initialize_belief(model)
# Our initial incoming state.
incoming_state_value = copy(options.initial_state)
# A cumulator for the stage-objectives.
cumulative_value = 0.0
# Objective state interpolation.
objective_state_vector, N =
initialize_objective_state(model[scenario_path[1][1]])
objective_states = NTuple{N,Float64}[]
# Iterate down the scenario.
for (depth, (node_index, noise)) in enumerate(scenario_path)
node = model[node_index]
lock(node.lock)
try
# Objective state interpolation.
objective_state_vector = update_objective_state(
node.objective_state,
objective_state_vector,
noise,
)
if objective_state_vector !== nothing
push!(objective_states, objective_state_vector)
end
# Update belief state, etc.
if node.belief_state !== nothing
belief = node.belief_state::BeliefState{T}
partition_index = belief.partition_index
current_belief = belief.updater(
belief.belief,
current_belief,
partition_index,
noise,
)
push!(belief_states, (partition_index, copy(current_belief)))
end
# ===== Begin: starting state for infinite horizon =====
starting_states = options.starting_states[node_index]
if length(starting_states) > 0
# There is at least one other possible starting state. If our
# incoming state is more than δ away from the other states, add it
# as a possible starting state.
if distance(starting_states, incoming_state_value) >
options.cycle_discretization_delta
push!(starting_states, incoming_state_value)
end
# TODO(odow):
# - A better way of randomly sampling a starting state.
# - Is is bad that we splice! here instead of just sampling? For
# convergence it is probably bad, since our list of possible
# starting states keeps changing, but from a computational
# perspective, we don't want to keep a list of discretized points
# in the state-space δ distance apart...
incoming_state_value =
splice!(starting_states, rand(1:length(starting_states)))
end
# ===== End: starting state for infinite horizon =====
# Solve the subproblem, note that `duality_handler = nothing`.
@_timeit_threadsafe model.timer_output "solve_subproblem" begin
subproblem_results = solve_subproblem(
model,
node,
incoming_state_value,
noise,
scenario_path[1:depth];
duality_handler = nothing,
)
end
# Cumulate the stage_objective.
cumulative_value += subproblem_results.stage_objective
# Set the outgoing state value as the incoming state value for the next
# node.
incoming_state_value = copy(subproblem_results.state)
# Add the outgoing state variable to the list of states we have sampled
# on this forward pass.
push!(sampled_states, incoming_state_value)
finally
unlock(node.lock)
end
end
if terminated_due_to_cycle
# We terminated due to a cycle. Here is the list of possible
# starting states for that node:
starting_states = options.starting_states[final_node[1]]
# We also need the incoming state variable to the final node, which
# is the outgoing state value of the second to last node:
incoming_state_value = if pass.include_last_node
sampled_states[end-1]
else
sampled_states[end]
end
# If this incoming state value is more than δ away from another
# state, add it to the list.
if distance(starting_states, incoming_state_value) >
options.cycle_discretization_delta
push!(starting_states, incoming_state_value)
end
end
# ===== End: drop off starting state if terminated due to cycle =====
return (
scenario_path = scenario_path,
sampled_states = sampled_states,
objective_states = objective_states,
belief_states = belief_states,
cumulative_value = cumulative_value,
)
end
mutable struct RevisitingForwardPass <: AbstractForwardPass
period::Int
sub_pass::AbstractForwardPass
archive::Vector{Any}
last_index::Int
counter::Int
end
"""
RevisitingForwardPass(
period::Int = 500;
sub_pass::AbstractForwardPass = DefaultForwardPass(),
)
A forward pass scheme that generate `period` new forward passes (using
`sub_pass`), then revisits all previously explored forward passes. This can
be useful to encourage convergence at a diversity of points in the
state-space.
Set `period = typemax(Int)` to disable.
For example, if `period = 2`, then the forward passes will be revisited as
follows: `1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ...`.
"""
function RevisitingForwardPass(
period::Int = 500;
sub_pass::AbstractForwardPass = DefaultForwardPass(),
)
@assert period > 0
return RevisitingForwardPass(period, sub_pass, Any[], 0, 0)
end
function forward_pass(
model::PolicyGraph,
options::Options,
fp::RevisitingForwardPass,
)
fp.counter += 1
if fp.counter - fp.period > fp.last_index
fp.counter = 1
fp.last_index = length(fp.archive)
end
if fp.counter <= length(fp.archive)
return fp.archive[fp.counter]
else
pass = forward_pass(model, options, fp.sub_pass)
push!(fp.archive, pass)
return pass
end
end
mutable struct RiskAdjustedForwardPass{F,T} <: AbstractForwardPass
forward_pass::F
risk_measure::T
resampling_probability::Float64
rejection_count::Int
objectives::Vector{Float64}
nominal_probability::Vector{Float64}
adjusted_probability::Vector{Float64}
archive::Vector{Any}
resample_count::Vector{Int}
end
"""
RiskAdjustedForwardPass(;
forward_pass::AbstractForwardPass,
risk_measure::AbstractRiskMeasure,
resampling_probability::Float64,
rejection_count::Int = 5,
)
A forward pass that resamples a previous forward pass with
`resampling_probability` probability, and otherwise samples a new forward pass
using `forward_pass`.
The forward pass to revisit is chosen based on the risk-adjusted (using
`risk_measure`) probability of the cumulative stage objectives.
Note that this objective corresponds to the _first_ time we visited the
trajectory. Subsequent visits may have improved things, but we don't have the
mechanisms in-place to update it. Therefore, remove the forward pass from
resampling consideration after `rejection_count` revisits.
"""
function RiskAdjustedForwardPass(;
forward_pass::AbstractForwardPass,
risk_measure::AbstractRiskMeasure,
resampling_probability::Float64,
rejection_count::Int = 5,
)
if !(0 < resampling_probability < 1)
throw(ArgumentError("Resampling probability must be in `(0, 1)`"))
end
return RiskAdjustedForwardPass{typeof(forward_pass),typeof(risk_measure)}(
forward_pass,
risk_measure,
resampling_probability,
rejection_count,
Float64[],
Float64[],
Float64[],
Any[],
Int[],
)
end
function forward_pass(
model::PolicyGraph,
options::Options,
fp::RiskAdjustedForwardPass,
)
if length(fp.archive) > 0 && rand() < fp.resampling_probability
r = rand()
for i in 1:length(fp.adjusted_probability)
r -= fp.adjusted_probability[i]
if r > 1e-8
continue
end
pass = fp.archive[i]
if fp.resample_count[i] >= fp.rejection_count
# We've explored this pass too many times. Kick it out of the
# archive.
splice!(fp.objectives, i)
splice!(fp.nominal_probability, i)
splice!(fp.adjusted_probability, i)
splice!(fp.archive, i)
splice!(fp.resample_count, i)
else
fp.resample_count[i] += 1
end
return pass
end
end
pass = forward_pass(model, options, fp.forward_pass)
push!(fp.objectives, pass.cumulative_value)
push!(fp.nominal_probability, 0.0)
fill!(fp.nominal_probability, 1 / length(fp.nominal_probability))
push!(fp.adjusted_probability, 0.0)
push!(fp.archive, pass)
push!(fp.resample_count, 1)
adjust_probability(
fp.risk_measure,
fp.adjusted_probability,
fp.nominal_probability,
fp.objectives,
fp.objectives,
model.objective_sense == MOI.MIN_SENSE,
)
return pass
end
"""
RegularizedForwardPass(;
rho::Float64 = 0.05,
forward_pass::AbstractForwardPass = DefaultForwardPass(),
)
A forward pass that regularizes the outgoing first-stage state variables with an
L-infty trust-region constraint about the previous iteration's solution.
Specifically, the bounds of the outgoing state variable `x` are updated from
`(l, u)` to `max(l, x^k - rho * (u - l)) <= x <= min(u, x^k + rho * (u - l))`,
where `x^k` is the optimal solution of `x` in the previous iteration. On the
first iteration, the value of the state at the root node is used.
By default, `rho` is set to 5%, which seems to work well empirically.
Pass a different `forward_pass` to control the forward pass within the
regularized forward pass.
This forward pass is largely intended to be used for investment problems in
which the first stage makes a series of capacity decisions that then influence
the rest of the graph. An error is thrown if the first stage problem is not
deterministic, and states are silently skipped if they do not have finite
bounds.
"""
mutable struct RegularizedForwardPass{T<:AbstractForwardPass} <:
AbstractForwardPass
forward_pass::T
trial_centre::Dict{Symbol,Float64}
ρ::Float64
function RegularizedForwardPass(;
rho::Float64 = 0.05,
forward_pass::AbstractForwardPass = DefaultForwardPass(),
)
centre = Dict{Symbol,Float64}()
return new{typeof(forward_pass)}(forward_pass, centre, rho)
end
end
function forward_pass(
model::PolicyGraph,
options::Options,
fp::RegularizedForwardPass,
)
if length(model.root_children) != 1
error(
"RegularizedForwardPass cannot be applied because first-stage is " *
"not deterministic",
)
end
node = model[model.root_children[1].term]
if length(node.noise_terms) > 1
error(
"RegularizedForwardPass cannot be applied because first-stage is " *
"not deterministic",
)
end
old_bounds = Dict{Symbol,Tuple{Float64,Float64}}()
for (k, v) in node.states
if has_lower_bound(v.out) && has_upper_bound(v.out)
old_bounds[k] = (l, u) = (lower_bound(v.out), upper_bound(v.out))
x = get(fp.trial_centre, k, model.initial_root_state[k])
set_lower_bound(v.out, max(l, x - fp.ρ * (u - l)))
set_upper_bound(v.out, min(u, x + fp.ρ * (u - l)))
end
end
pass = forward_pass(model, options, fp.forward_pass)
for (k, (l, u)) in old_bounds
fp.trial_centre[k] = pass.sampled_states[1][k]
set_lower_bound(node.states[k].out, l)
set_upper_bound(node.states[k].out, u)
end
return pass
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 6172 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# ================================ risk_measures ============================= #
"""
AbstractRiskMeasure
The abstract type for the risk measure interface.
You need to define the following methods:
- [`SDDP.adjust_probability`](@ref)
"""
abstract type AbstractRiskMeasure end
"""
adjust_probability(
measure::Expectation
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
noise_support::Vector{Noise{T}},
objective_realizations::Vector{Float64},
is_minimization::Bool,
) where {T}
"""
function adjust_probability end
# ============================== sampling_schemes ============================ #
"""
AbstractSamplingScheme
The abstract type for the sampling-scheme interface.
You need to define the following methods:
- [`SDDP.sample_scenario`](@ref)
"""
abstract type AbstractSamplingScheme end
"""
sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T}
Sample a scenario from the policy graph `graph` based on the sampling scheme.
Returns `::Tuple{Vector{Tuple{T, <:Any}}, Bool}`, where the first element is the
scenario, and the second element is a Boolean flag indicating if the scenario
was terminated due to the detection of a cycle.
The scenario is a list of tuples (type `Vector{Tuple{T, <:Any}}`) where the
first component of each tuple is the index of the node, and the second component
is the stagewise-independent noise term observed in that node.
"""
function sample_scenario end
# =============================== stopping_rules ============================= #
"""
AbstractStoppingRule
The abstract type for the stopping-rule interface.
You need to define the following methods:
- [`SDDP.stopping_rule_status`](@ref)
- [`SDDP.convergence_test`](@ref)
"""
abstract type AbstractStoppingRule end
"""
stopping_rule_status(::AbstractStoppingRule)::Symbol
Return a symbol describing the stopping rule.
"""
function stopping_rule_status end
"""
convergence_test(
model::PolicyGraph,
log::Vector{Log},
::AbstractStoppingRule,
)::Bool
Return a `Bool` indicating if the algorithm should terminate the training.
"""
function convergence_test(
graph::PolicyGraph,
log::Vector{Log},
stopping_rules::Vector{AbstractStoppingRule},
)
for stopping_rule in stopping_rules
if convergence_test(graph, log, stopping_rule)
return true, stopping_rule_status(stopping_rule)
end
end
return false, :not_solved
end
# ============================== backward_samplers =========================== #
"""
AbstractBackwardSamplingScheme
The abstract type for backward sampling scheme interface.
You need to define the following methods:
- [`SDDP.sample_backward_noise_terms`](@ref)
"""
abstract type AbstractBackwardSamplingScheme end
"""
sample_backward_noise_terms(
backward_sampling_scheme::AbstractBackwardSamplingScheme,
node::Node{T},
)::Vector{Noise}
Returns a `Vector{Noise}` of noises sampled from `node.noise_terms` using
`backward_sampling_scheme`.
"""
function sample_backward_noise_terms end
"""
sample_backward_noise_terms_with_state(
sampler::AbstractBackwardSamplingScheme,
node::Node,
state::Dict{Symbol,Float64},
)::Vector{Noise}
Returns a `Vector{Noise}` of noises sampled conditionally on the `state` using
`sampler`.
"""
function sample_backward_noise_terms_with_state(
sampler::AbstractBackwardSamplingScheme,
node::Node,
::Dict{Symbol,Float64},
)
return sample_backward_noise_terms(sampler, node)
end
# =========================== duality_handlers =========================== #
"""
AbstractDualityHandler
The abstract type for the duality handler interface.
"""
abstract type AbstractDualityHandler end
"""
get_dual_solution(
node::Node,
duality_handler::AbstractDualityHandler,
)::Tuple{Float64,Dict{Symbol,Float64}}
Returns a `Float64` for the objective of the dual solution, and a
`Dict{Symbol,Float64}` where the keys are the names of the state variables and
the values are the dual variables associated with the fishing constraint at
`node`.
"""
function get_dual_solution end
"""
prepare_backward_pass(
node::Node,
handler::AbstractDualityHandler,
options::Options,
)
Performs any setup needed by the duality handler prior to the backward pass.
Returns a function that, when called with no arguments, undoes the setup.
"""
function prepare_backward_pass(::Node, ::AbstractDualityHandler, ::Any)
return () -> nothing
end
# ============================= parallel schemes ============================= #
"""
AbstractParallelScheme
Abstract type for different parallelism schemes.
"""
abstract type AbstractParallelScheme end
"""
master_loop(
::AbstractParallelScheme,
model::PolicyGraph{T},
options::Options,
)::Symbol where {T}
The solve loop of the SDDP algorithm. Returns a symbol corresponding to the
termination status.
"""
function master_loop end
"""
_simulate(
model::PolicyGraph,
::AbstractParallelScheme,
number_replications::Int,
variables::Vector{Symbol};
kwargs...,
)
Simulate the policy using the parallel scheme.
"""
function _simulate end
# ============================= forward pass ============================= #
"""
AbstractForwardPass
Abstract type for different forward passes.
"""
abstract type AbstractForwardPass end
"""
forward_pass(model::PolicyGraph, options::Options, ::AbstractForwardPass)
Return a forward pass as a named tuple with the following fields:
(
;scenario_path,
sampled_states,
objective_states,
belief_states,
cumulative_value,
)
See [`DefaultForwardPass`](@ref) for details.
"""
function forward_pass end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 6645 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors and contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module LocalImprovementSearch
import JuMP
_norm(x) = sqrt(sum(xi^2 for xi in x))
abstract type AbstractSearchMethod end
"""
minimize(
f::Function,
[method::AbstractSearchMethod = BFGS(100)],
x₀::Vector{Float64},
lower_bound::Float64 = -Inf,
)
Minimizes a convex function `f` using first-order information.
Compared to off-the-shelf implementations, it has a number of features tailored
to this purpose:
* Infeasibility is indicated by the function returning `nothing`. No other
constraint information is given.
* Sub-optimal solutions are okay, so we should focus on improving the feasible
starting point, instead of finding the global minimizer.
* `f` can be piecewise-linear convex with non-differentiable points.
## Arguments
* `f(::Vector{Float64})`: takes a vector `x` and returns one of the following:
* `nothing` if `x` is infeasible
* `(f, Δf)::Tuple{Float64,Vector{Float64}`: a tuple of the function
evaluation and first-order gradient information.
* `x₀::Vector{Float64}`: a feasible starting point.
## Default method
The default algorithm is a modified version of BFGS, with a specialized
back-tracking inexact line-search.
"""
function minnimize end
function minimize(f::Function, x₀::Vector{Float64}, lower_bound::Float64 = -Inf)
return minimize(f, BFGS(100), x₀, lower_bound)
end
###
### BFGS
###
struct BFGS <: AbstractSearchMethod
evaluation_limit::Int
end
function minimize(
f::F,
bfgs::BFGS,
x₀::Vector{Float64},
lower_bound::Float64 = -Inf,
) where {F<:Function}
# Initial estimte for the Hessian matrix in BFGS
B = zeros(length(x₀), length(x₀))
for i in 1:size(B, 1)
B[i, i] = 1.0
end
# We assume that the initial iterate is feasible
xₖ = x₀
fₖ, ∇fₖ = f(xₖ)::Tuple{Float64,Vector{Float64}}
# Initial step-length
αₖ = 1.0
# Evaluation counter
evals = Ref(bfgs.evaluation_limit)
while true
# Search direction. We could be clever here and maintain B⁻¹, but we're
# only ever going to be solving this for very small |x| << 100 problems,
# so taking the linear solve every time is okay. (The MIP solve is much
# more of a bottleneck.)
pₖ = B \ -∇fₖ
# Run line search in direction `pₖ`
αₖ, fₖ₊₁, ∇fₖ₊₁ = _line_search(f, fₖ, ∇fₖ, xₖ, pₖ, αₖ, evals)
if _norm(αₖ * pₖ) / max(1.0, _norm(xₖ)) < 1e-3
# Small steps! Probably at the edge of the feasible region.
# Return the current iterate.
#
# Note that "1e-3" isn't thaaaat small. But we hit a very annoying
# feature of solvers: their feasibility checks are only approximate.
# This tolerance is needed to pass the `test_kelleys_ip_xxx` tests.
# Decreasing the tolerance leads to a _worse_ estimate for the dual,
# because we abuse the solvers feasibility tolerance, and end up
# returning a solution that is on the edge of numerical dual
# feasibility.
return fₖ, xₖ
elseif _norm(∇fₖ₊₁) < 1e-6
# Zero(ish) gradient. Return what must be a local maxima.
return fₖ₊₁, xₖ + αₖ * pₖ
elseif evals[] <= 0
# We have evaluated the function too many times. Return our current
# best.
return fₖ₊₁, xₖ + αₖ * pₖ
end
# BFGS update.
sₖ = αₖ * pₖ
yₖ = ∇fₖ₊₁ - ∇fₖ
# A slight tweak to normal BFGS: because we're dealing with non-smooth
# problems, ||yₖ|| might be 0.0, i.e., we just moved along a facet from
# from an interior point to a vertex, so the gradient stays the same.
if _norm(yₖ) > 1e-12
B .=
B .+ (yₖ * yₖ') / (yₖ' * sₖ) -
(B * sₖ * sₖ' * B') / (sₖ' * B * sₖ)
end
fₖ, ∇fₖ, xₖ = fₖ₊₁, ∇fₖ₊₁, xₖ + sₖ
end
end
function _line_search(
f::F,
fₖ::Float64,
∇fₖ::Vector{Float64},
x::Vector{Float64},
p::Vector{Float64},
α::Float64,
evals::Ref{Int},
) where {F<:Function}
while _norm(α * p) > 1e-3 * max(1.0, _norm(x))
xₖ = x + α * p
ret = f(xₖ)
evals[] -= 1
if ret === nothing # Infeasible. So take a smaller step
α /= 2
continue
end
fₖ₊₁, ∇fₖ₊₁ = ret
if p' * ∇fₖ₊₁ < 1e-6
# Still a descent direction, so take a step.
return α, fₖ₊₁, ∇fₖ₊₁
elseif isapprox(fₖ + α * p' * ∇fₖ, fₖ₊₁; atol = 1e-8)
# Step is onto a kink
return α, fₖ₊₁, ∇fₖ₊₁
end
# Step is an ascent, so use Newton's method to find the intersection
α = (fₖ₊₁ - fₖ - p' * ∇fₖ₊₁ * α) / (p' * ∇fₖ - p' * ∇fₖ₊₁)
end
return 0.0, fₖ, ∇fₖ
end
###
### Cutting plane
###
struct OuterApproximation{O} <: AbstractSearchMethod
optimizer::O
end
function minimize(
f::F,
method::OuterApproximation,
x₀::Vector{Float64},
lower_bound::Float64,
) where {F<:Function}
model = JuMP.Model(method.optimizer)
JuMP.set_silent(model)
n = length(x₀)
JuMP.@variable(model, x[i in 1:n], start = x₀[i])
JuMP.@variable(model, θ >= lower_bound)
JuMP.@objective(model, Min, θ)
xₖ = x₀
fₖ, ∇fₖ = f(xₖ)::Tuple{Float64,Vector{Float64}}
upper_bound = fₖ
JuMP.@constraint(model, θ >= fₖ + ∇fₖ' * (x - xₖ))
evals = Ref(0)
d_step = Inf
while d_step > 1e-8 && evals[] < 20
JuMP.optimize!(model)
lower_bound, xₖ₊₁ = JuMP.value(θ), JuMP.value.(x)
ret = f(xₖ₊₁)
while ret === nothing
# point is infeasible
xₖ₊₁ = 0.5 * (xₖ + xₖ₊₁)
ret = f(xₖ₊₁)
end
fₖ₊₁, ∇fₖ₊₁ = ret::Tuple{Float64,Vector{Float64}}
evals[] += 1
upper_bound = fₖ₊₁
JuMP.@constraint(model, θ >= fₖ₊₁ + ∇fₖ₊₁' * (x - xₖ₊₁))
d = xₖ₊₁ - xₖ
d_step = _norm(d)
if sign(∇fₖ' * d) != sign(∇fₖ₊₁' * d)
# There is a kink between the x
xₖ₊₂ = 0.5 * (xₖ + xₖ₊₁)
fₖ₊₂, ∇fₖ₊₂ = f(xₖ₊₂)::Tuple{Float64,Vector{Float64}}
evals[] += 1
JuMP.@constraint(model, θ >= fₖ₊₂ + ∇fₖ₊₂' * (x - xₖ₊₂))
fₖ, xₖ = fₖ₊₂, xₖ₊₂
else
fₖ, xₖ = fₖ₊₁, xₖ₊₁
end
end
return fₖ, xₖ
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 12791 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
function _should_log(options, log_frequency::Int)
return options.print_level > 0 &&
mod(length(options.log), log_frequency) == 0
end
function _should_log(options, log_frequency::Function)
return options.print_level > 0 && log_frequency(options.log)
end
function log_iteration(options; force_if_needed::Bool = false)
options.dashboard_callback(options.log[end], false)
force_if_needed &= options.last_log_iteration[] != length(options.log)
if force_if_needed || _should_log(options, options.log_frequency)
print_helper(print_iteration, options.log_file_handle, options.log[end])
flush(options.log_file_handle)
options.last_log_iteration[] = length(options.log)
end
return
end
"""
Serial()
Run SDDP in serial mode.
"""
struct Serial <: AbstractParallelScheme end
Base.show(io::IO, ::Serial) = print(io, "serial mode")
interrupt(::Serial) = nothing
function master_loop(
::Serial,
model::PolicyGraph{T},
options::Options,
) where {T}
_initialize_solver(model; throw_error = false)
while true
result = iteration(model, options)
options.post_iteration_callback(result)
log_iteration(options)
if result.has_converged
return result.status
end
end
return
end
function _simulate(
model::PolicyGraph,
::Serial,
number_replications::Int,
variables::Vector{Symbol};
kwargs...,
)
_initialize_solver(model; throw_error = false)
return map(
i -> _simulate(model, variables; kwargs...),
1:number_replications,
)
end
struct Asynchronous <: AbstractParallelScheme
init_callback::Function
slave_ids::Vector{Int}
use_master::Bool
end
"""
Asynchronous(
[init_callback::Function,]
slave_pids::Vector{Int} = workers();
use_master::Bool = true,
)
Run SDDP in asynchronous mode workers with pid's `slave_pids`.
After initializing the models on each worker, call `init_callback(model)`. Note
that `init_callback` is run _locally on the worker_ and _not_ on the master
thread.
If `use_master` is `true`, iterations are also conducted on the master process.
"""
function Asynchronous(
init_callback::Function,
slave_ids::Vector{Int} = Distributed.workers();
use_master::Bool = true,
)
return Asynchronous(init_callback, slave_ids, use_master)
end
function Asynchronous(
slave_ids::Vector{Int} = Distributed.workers();
use_master::Bool = true,
)
return Asynchronous(slave_ids; use_master = use_master) do model
return _initialize_solver(model; throw_error = true)
end
end
"""
Asynchronous(
solver::Any,
slave_pids::Vector{Int} = workers();
use_master::Bool = true,
)
Run SDDP in asynchronous mode workers with pid's `slave_pids`.
Set the optimizer on each worker by calling `JuMP.set_optimizer(model, solver)`.
"""
function Asynchronous(
solver::Any,
slave_ids::Vector{Int} = Distributed.workers();
use_master::Bool = true,
)
return Asynchronous(slave_ids; use_master = use_master) do model
return JuMP.set_optimizer(model, solver)
end
end
interrupt(a::Asynchronous) = Distributed.interrupt(a.slave_ids)
function Base.show(io::IO, a::Asynchronous)
return print(io, "Asynchronous mode with $(length(a.slave_ids)) workers.")
end
"""
slave_update(model::PolicyGraph, result::IterationResult)
A callback called on a slave whenever a new result is available.
"""
function slave_update(model::PolicyGraph, result::IterationResult)
for (node_index, cuts) in result.cuts
for cut in cuts
if cut === nothing
error(
"This model uses features that are not suppored in async " *
"mode. Use `parallel_scheme = Serial()` instead.",
)
end
_add_cut(
model[node_index].bellman_function.global_theta,
cut.theta,
cut.pi,
cut.x,
cut.obj_y,
cut.belief_y;
cut_selection = true,
)
end
end
return
end
function slave_loop(
async::Asynchronous,
model::PolicyGraph{T},
options::Options,
updates::Distributed.RemoteChannel{Channel{IterationResult{T}}},
results::Distributed.RemoteChannel{Channel{IterationResult{T}}},
) where {T}
try
async.init_callback(model)
results_to_add = IterationResult{T}[]
while true
result = iteration(model, options)
options.post_iteration_callback(result)
# The next four lines are subject to a race condition: if the master closes
# `results` _after_ the call to `isopen` and _before_` the call to `put!` has
# executed, we get an `InvalidStateException`. This gets trapped in the outer
# try-catch.
if !isopen(results)
break
end
put!(results, result)
# Instead of pulling a result from `updates` and adding it immediately, we want
# to pull as many as possible in a short amount of time, the add them all and
# start the loop again. Otherwise, by the time we've finished updating the
# slave, there might be a new update :(
while isready(updates)
push!(results_to_add, take!(updates))
end
for result in results_to_add
slave_update(model, result)
end
empty!(results_to_add)
end
catch ex
trap_error(ex)
end
return
end
trap_error(ex::Exception) = throw(ex)
trap_error(::InterruptException) = nothing
trap_error(::InvalidStateException) = nothing
trap_error(ex::CapturedException) = trap_error(ex.ex)
trap_error(ex::Distributed.RemoteException) = trap_error(ex.captured)
function master_loop(
async::Asynchronous,
model::PolicyGraph{T},
options::Options,
) where {T}
# Initialize the remote channels. There are two types:
# 1) updates: master -> slaves[i]: a unique channel for each slave, which
# is used to distribute results found by other slaves.
# 2) results: slaves -> master: a channel which slaves collectively push to
# to feed the master new results.
updates = Dict(
pid => Distributed.RemoteChannel(
() -> Channel{IterationResult{T}}(Inf),
) for pid in async.slave_ids
)
results = Distributed.RemoteChannel(() -> Channel{IterationResult{T}}(Inf))
futures = Distributed.Future[]
_uninitialize_solver(model; throw_error = true)
for pid in async.slave_ids
let model_pid = model, options_pid = options
f = Distributed.remotecall(
slave_loop,
pid,
async,
model_pid,
options_pid,
updates[pid],
results,
)
push!(futures, f)
end
end
_initialize_solver(model; throw_error = true)
while true
# Starting workers has a high overhead. We have to copy the models across, and then
# precompile all the methods on every process :(. While that's happening, let's
# start running iterations on master. It has the added benefit that if the master
# is ever idle waiting for a result from a slave, it will do some useful work :).
#
# It also means that Asynchronous() can be our default setting, since if there are
# no workers, ther should be no overhead, _and_ this inner loop is just the serial
# implementation anyway.
while async.use_master && !isready(results)
result = iteration(model, options)
options.post_iteration_callback(result)
for (_, ch) in updates
put!(ch, result)
end
log_iteration(options)
if result.has_converged
close(results)
wait.(futures)
return result.status
end
end
while !isready(results)
sleep(1.0)
end
# We'll only reach here is isready(results) == true, so we won't hang waiting for a
# new result on take!. After we receive a new result from a slave, there are a few
# things to do:
# 1) send the result to the other slaves
# 2) update the master problem with the new cuts
# 3) compute the revised bound, update the log, and print to screen
# 4) test for convergence (e.g., bound stalling, time limit, iteration limit)
# 5) Exit, killing the running task on the workers.
result = take!(results)
for pid in async.slave_ids
if pid != result.pid
put!(updates[pid], result)
end
end
slave_update(model, result)
bound = calculate_bound(model)
push!(
options.log,
Log(
length(options.log) + 1,
bound,
result.cumulative_value,
time() - options.start_time,
result.pid,
lock(() -> model.ext[:total_solves], model.lock),
duality_log_key(options.duality_handler),
result.numerical_issue,
),
)
log_iteration(options)
has_converged, status =
convergence_test(model, options.log, options.stopping_rules)
if has_converged
close(results)
wait.(futures)
return status
end
end
return
end
function _simulate(
model::PolicyGraph,
async::Asynchronous,
number_replications::Int,
variables::Vector{Symbol};
kwargs...,
)
_uninitialize_solver(model; throw_error = true)
wp = Distributed.CachingPool(async.slave_ids)
let model = model,
init = false,
async = async,
variables = variables,
kwargs = kwargs
return Distributed.pmap(wp, 1:number_replications) do _
if !init
async.init_callback(model)
init = true
end
return _simulate(model, variables; kwargs...)
end
end
return
end
"""
Threaded()
Run SDDP in multi-threaded mode.
Use `julia --threads N` to start Julia with `N` threads. In most cases, you
should pick `N` to be the number of physical cores on your machine.
!!! danger
This plug-in is experimental, and parts of SDDP.jl may not be threadsafe. If
you encounter any problems or crashes, please open a GitHub issue.
## Example
```julia
SDDP.train(model; parallel_scheme = SDDP.Threaded())
SDDP.simulate(model; parallel_scheme = SDDP.Threaded())
```
"""
struct Threaded <: AbstractParallelScheme end
Base.show(io::IO, ::Threaded) = print(io, "Threaded()")
interrupt(::Threaded) = nothing
function master_loop(
::Threaded,
model::PolicyGraph{T},
options::Options,
) where {T}
_initialize_solver(model; throw_error = false)
keep_iterating, status = true, nothing
@sync for _ in 1:Threads.nthreads()
Threads.@spawn begin
try
# This access of `keep_iterating` is not thread-safe, but it
# doesn't matter because all it will do is another iteration
# before terminating.
while keep_iterating
result = iteration(model, options)
lock(options.lock) do
options.post_iteration_callback(result)
log_iteration(options)
if result.has_converged
keep_iterating = false
status = result.status
end
return
end
end
finally
lock(options.lock) do
keep_iterating = false
return
end
end
end
end
return status
end
function _simulate(
model::PolicyGraph,
::Threaded,
number_replications::Int,
variables::Vector{Symbol};
kwargs...,
)
_initialize_solver(model; throw_error = false)
ret = Vector{Vector{Dict{Symbol,Any}}}(undef, number_replications)
@sync for i in 1:number_replications
Threads.@spawn ret[i] = _simulate(model, variables; kwargs...)
end
return ret
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 19102 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# ========================== The Expectation Operator ======================== #
"""
Expectation()
The Expectation risk measure. Identical to taking the expectation with respect
to the nominal distribution.
"""
struct Expectation <: AbstractRiskMeasure end
function adjust_probability(
measure::Expectation,
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
noise_support::Vector,
objective_realizations::Vector{Float64},
is_minimization::Bool,
)
risk_adjusted_probability .= original_probability
return 0.0
end
# ========================== The Worst Case Operator ========================= #
"""
WorstCase()
The worst-case risk measure. Places all of the probability weight on the worst
outcome.
"""
struct WorstCase <: AbstractRiskMeasure end
function adjust_probability(
measure::WorstCase,
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
noise_support::Vector,
objective_realizations::Vector{Float64},
is_minimization::Bool,
)
risk_adjusted_probability .= 0.0
worst_index = 1
worst_observation = is_minimization ? -Inf : Inf
for (index, (probability, observation)) in
enumerate(zip(original_probability, objective_realizations))
if probability > 0.0
if (is_minimization && observation > worst_observation) ||
(!is_minimization && observation < worst_observation)
worst_index = index
worst_observation = observation
end
end
end
risk_adjusted_probability[worst_index] = 1.0
return 0.0
end
# =================================== AV@R =================================== #
"""
AVaR(β)
The average value at risk (AV@R) risk measure.
Computes the expectation of the β fraction of worst outcomes. β must be in `[0,
1]`. When `β=1`, this is equivalent to the [`Expectation`](@ref) risk measure.
When `β=0`, this is equivalent to the [`WorstCase`](@ref) risk measure.
AV@R is also known as the conditional value at risk (CV@R) or expected
shortfall.
"""
struct AVaR <: AbstractRiskMeasure
β::Float64
function AVaR(β::Float64)
if !(0 <= β <= 1)
throw(
ArgumentError(
"Risk-quantile β must be in [0, 1]. Currently it is $(β).",
),
)
end
return new(β)
end
end
"""
CVaR(γ)
The conditional value at risk (CV@R) risk measure.
Computes the expectation of the γ fraction of worst outcomes. γ must be in `[0,
1]`. When `γ=1`, this is equivalent to the [`Expectation`](@ref) risk measure.
When `γ=0`, this is equivalent to the [`WorstCase`](@ref) risk measure.
CV@R is also known as the average value at risk (AV@R) or expected shortfall.
"""
const CVaR = AVaR
function adjust_probability(
measure::AVaR,
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
noise_support::Vector,
objective_realizations::Vector{Float64},
is_minimization::Bool,
)
if measure.β ≈ 0.0
return adjust_probability(
WorstCase(),
risk_adjusted_probability,
original_probability,
noise_support,
objective_realizations,
is_minimization,
)
elseif measure.β ≈ 1.0
return adjust_probability(
Expectation(),
risk_adjusted_probability,
original_probability,
noise_support,
objective_realizations,
is_minimization,
)
end
risk_adjusted_probability .= 0.0
quantile_collected = 0.0
for i in sortperm(objective_realizations; rev = is_minimization)
quantile_collected >= measure.β && break
avar_prob =
min(original_probability[i], measure.β - quantile_collected) /
measure.β
risk_adjusted_probability[i] = avar_prob
quantile_collected += avar_prob * measure.β
end
return 0.0
end
# ============================ ConvexCombination ============================= #
"""
ConvexCombination((weight::Float64, measure::AbstractRiskMeasure)...)
Create a weighted combination of risk measures.
## Examples
SDDP.ConvexCombination(
(0.5, SDDP.Expectation()),
(0.5, SDDP.AVaR(0.25))
)
Convex combinations can also be constructed by adding weighted risk measures
together as follows:
0.5 * SDDP.Expectation() + 0.5 * SDDP.AVaR(0.5)
"""
struct ConvexCombination{T<:Tuple} <: AbstractRiskMeasure
measures::T
end
ConvexCombination(args::Tuple...) = ConvexCombination(args)
function Base.show(io::IO, measure::ConvexCombination)
print(io, "A convex combination of ")
is_first = true
for m in measure.measures
!is_first && print(io, " + ")
print(io, m[1], " * ", m[2])
is_first = false
end
return
end
import Base: +, *
function Base.:+(a::ConvexCombination, b::ConvexCombination)
return ConvexCombination(a.measures..., b.measures...)
end
function Base.:*(lhs::Float64, rhs::AbstractRiskMeasure)
return ConvexCombination(((lhs, rhs),))
end
function adjust_probability(
measure::ConvexCombination,
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
noise_support::Vector,
objective_realizations::Vector{Float64},
is_minimization::Bool,
)
risk_adjusted_probability .= 0.0
partial_distribution = similar(risk_adjusted_probability)
for (weight, measure) in measure.measures
partial_distribution .= 0.0
adjust_probability(
measure,
partial_distribution,
original_probability,
noise_support,
objective_realizations,
is_minimization,
)
risk_adjusted_probability .+= weight * partial_distribution
end
return 0.0
end
# =================================== EAV@R ================================== #
"""
EAVaR(;lambda=1.0, beta=1.0)
A risk measure that is a convex combination of Expectation and Average Value @
Risk (also called Conditional Value @ Risk).
λ * E[x] + (1 - λ) * AV@R(β)[x]
### Keyword Arguments
* `lambda`: Convex weight on the expectation (`(1-lambda)` weight is put on the
AV@R component. Inreasing values of `lambda` are less risk averse (more
weight on expectation).
* `beta`: The quantile at which to calculate the Average Value @ Risk.
Increasing values of `beta` are less risk averse. If `beta=0`, then the AV@R
component is the worst case risk measure.
"""
function EAVaR(; lambda::Float64 = 1.0, beta::Float64 = 1.0)
if !(0.0 <= lambda <= 1.0)
error(
"Lambda must be in the range [0, 1]. Increasing values of " *
"lambda are less risk averse. lambda=1 is identical to taking " *
"the expectation.",
)
end
if !(0.0 <= beta <= 1.0)
error(
"Beta must be in the range [0, 1]. Increasing values of beta " *
"are less risk averse. lambda=1 is identical to taking the " *
"expectation.",
)
end
return lambda * Expectation() + (1 - lambda) * AVaR(beta)
end
# ================================= Modified-Χ² ============================== #
"""
ModifiedChiSquared(radius::Float64; minimum_std=1e-5)
The distributionally robust SDDP risk measure of
Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP.
Computational Management Science (2018) 165:431-454.
## Explanation
In a Distributionally Robust Optimization (DRO) approach, we modify the
probabilities we associate with all future scenarios so that the resulting
probability distribution is the "worst case" probability distribution, in some
sense.
In each backward pass we will compute a worst case probability distribution
vector p. We compute p so that:
```
p ∈ argmax p'z
s.t. [r; p - a] in SecondOrderCone()
sum(p) == 1
p >= 0
```
where
1. z is a vector of future costs. We assume that our aim is to minimize
future cost p'z. If we maximize reward, we would have p ∈ argmin{p'z}.
2. a is the uniform distribution
3. r is a user specified radius - the larger the radius, the more conservative
the policy.
## Notes
The largest radius that will work with S scenarios is sqrt((S-1)/S).
If the uncorrected standard deviation of the objecive realizations is less than
`minimum_std`, then the risk-measure will default to `Expectation()`.
This code was contributed by Lea Kapelevich.
"""
struct ModifiedChiSquared <: AbstractRiskMeasure
radius::Float64
minimum_std::Float64
function ModifiedChiSquared(radius::Float64; minimum_std::Float64 = 1e-5)
if abs(radius) < 1e-9
@warn(
"Radius is very small. You should probably use " *
"`SDDP.Expectation()` instead."
)
end
return new(radius, minimum_std)
end
end
function Base.show(io::IO, measure::ModifiedChiSquared)
return print(io, "ModifiedChiSquared with radius=$(measure.radius)")
end
function adjust_probability(
measure::ModifiedChiSquared,
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
noise_support::Vector,
objective_realizations::Vector{Float64},
is_minimization::Bool,
)
if Statistics.std(objective_realizations; corrected = false) <
measure.minimum_std
return adjust_probability(
Expectation(),
risk_adjusted_probability,
original_probability,
noise_support,
objective_realizations,
is_minimization,
)
end
m = length(objective_realizations)
if all(x -> x ≈ 1 / m, original_probability)
_uniform_dro(
measure,
risk_adjusted_probability,
original_probability,
objective_realizations,
is_minimization,
)
else
_non_uniform_dro(
measure,
risk_adjusted_probability,
original_probability,
objective_realizations,
is_minimization,
)
end
return 0.0
end
"""
Algorithm (1) of Philpott et al. Assumes that the nominal distribution is _not_
uniform.
"""
function _non_uniform_dro(
measure::ModifiedChiSquared,
p::Vector{Float64},
q::Vector{Float64},
z::Vector{Float64},
is_minimization::Bool,
)
m = length(z)
if Statistics.std(z) < 1e-6
p .= q
return 0.0
end
if !is_minimization
z = -z
end
# step 1
K = collect(1:m)
# check if nomial probability is 0
for i in K
if isapprox(q[i], 0.0; atol = 1e-10)
p[i] = 0
splice!(K, i)
end
end
#update m
m = length(K)
# use this to store the index popped out of K
not_in_K = Int[]
# step 2
while length(K) > 1
# step 2(a)
z_bar = sum(z[i] for i in K) / length(K)
s = sqrt(sum(z[i]^2 - z_bar^2 for i in K) / length(K))
if isapprox(s, 0.0; atol = 1e-10)
error("s is too small")
end
# step 2(b)
if length(K) == m
for i in K
p[i] = q[i] + (z[i] - z_bar) / (sqrt(m) * s) * measure.radius
end
else
for i in not_in_K
p[i] = 0
end
sum_qj = sum(q[i] for i in not_in_K)
sum_qj_squared = sum(q[i]^2 for i in not_in_K)
len_k = length(K)
n = sqrt(len_k * (measure.radius^2 - sum_qj_squared) - sum_qj^2)
for i in K
p[i] = q[i] + 1 / len_k * (sum_qj + n * (z[i] - z_bar) / s)
end
end
# step 2(c)
if all(pi -> pi >= 0.0, p)
return 0.0
end
# find i(K)
# find the list of indexes for which p is less than 0
negative_p = K[p[K].<0]
computed_r = zeros(0)
sum_qj = 0
sum_qj_squared = 0
if length(not_in_K) > 0
sum_qj = sum(q[i] for i in not_in_K)
sum_qj_squared = sum(q[i]^2 for i in not_in_K)
end
len_k = length(K)
computed_r = [
(
((-q[i] * len_k - sum_qj) / ((z[i] - z_bar)) / s)^2 +
sum_qj_squared^2
) / len_k + sum_qj_squared for i in negative_p
]
i_K = negative_p[argmin(computed_r)]
append!(not_in_K, i_K)
filter!(e -> e != i_K, K)
end
# step 3
for i in not_in_K
p[i] = 0
end
p[K[1]] = 1
return 0.0
end
"""
Algorithm (2) of Philpott et al. Assumes that the nominal distribution is
uniform.
"""
function _uniform_dro(
measure::ModifiedChiSquared,
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
objective_realizations::Vector{Float64},
is_minimization::Bool,
)
m = length(objective_realizations)
# Take a permuted view of `risk_adjusted_probability` so we can refer to
# `p[i]` instead of `risk_adjusted_probability[perm[i]]`.
perm = sortperm(objective_realizations; rev = !is_minimization)
p = view(risk_adjusted_probability, perm)
z = view(objective_realizations, perm)
# Compute the new probabilities according to Algorithm (2) of the Philpott
# et al. paper.
# Step (1):
@inbounds for k in 0:m-2
# Step (1a):
z_bar = sum(z[i] for i in (k+1):m) / (m - k)
s² = sum(z[i]^2 - z_bar^2 for i in (k+1):m) / (m - k)
# Due to numerical error, s² may sometimes be a little bit negative.
if s² < -1e-8
error("Something unexpected happened with s² term: `$(s²) < 0.0`.")
elseif s² <= 0.0
error("`s²<0`: choose a larger threshold for `minimum_std`.")
end
# Step (1b): note that we cache a couple of terms that don't depend on i
# to speed things up.
term_1 = 1 / (m - k)
term_2 = sqrt((m - k) * measure.radius^2 - k / m) / ((m - k) * sqrt(s²))
# We really should set p[i] = 0 for i = 1, ..., k. But since we don't
# touch p[k-1] again, we can just set the k'th element to 0.
if k > 0
p[k] = 0.0
end
if is_minimization
@inbounds for i in (k+1):m
p[i] = term_1 + term_2 * (z[i] - z_bar)
end
else
# Okay, here's the rub: we should have converted
# objective_realizations (rewards) into costs by negating them. This
# would have required a copy. This means that z_bar is in fact the
# -ve of what it should be. `s` is fine since it is a difference of
# squares. Thus, all we have to do is negate both z[i] and z_bar
# here.
@inbounds for i in (k+1):m
p[i] = term_1 + term_2 * (z_bar - z[i])
end
end
# Step (1c)
if p[k+1] >= 0.0
return 0.0
end
end
# Step (2):
p[end] = 1.0
return 0.0
end
# ================================= Wasserstein ============================== #
"""
Wasserstein(norm::Function, solver_factory; alpha::Float64)
A distributionally-robust risk measure based on the Wasserstein distance.
As `alpha` increases, the measure becomes more risk-averse. When `alpha=0`, the
measure is equivalent to the expectation operator. As `alpha` increases, the
measure approaches the Worst-case risk measure.
"""
struct Wasserstein{T,F} <: AbstractRiskMeasure
alpha::Float64
solver_factory::T
norm::F
function Wasserstein(norm::Function, solver_factory; alpha::Float64)
if alpha < 0.0
error("alpha cannot be $(alpha) as it must be in the range [0, ∞).")
end
return new{typeof(solver_factory),typeof(norm)}(
alpha,
solver_factory,
norm,
)
end
end
Base.show(io::IO, measure::Wasserstein) = print(io, "SDDP.Wasserstein")
function adjust_probability(
measure::Wasserstein,
risk_adjusted_probability::Vector{Float64},
original_probability::Vector{Float64},
noise_support::Vector,
objective_realizations::Vector{Float64},
is_minimization::Bool,
)
N = length(objective_realizations)
wasserstein = JuMP.Model(measure.solver_factory)
set_silent(wasserstein)
@variable(wasserstein, z[1:N, 1:N] >= 0)
@variable(wasserstein, p[1:N] >= 0)
for i in 1:N
@constraint(wasserstein, sum(z[:, i]) == original_probability[i])
@constraint(wasserstein, sum(z[i, :]) == p[i])
end
@constraint(
wasserstein,
sum(
measure.norm(noise_support[i], noise_support[j]) * z[i, j] for
i in 1:N, j in 1:N
) <= measure.alpha
)
objective_sense = is_minimization ? MOI.MAX_SENSE : MOI.MIN_SENSE
@objective(
wasserstein,
objective_sense,
sum(objective_realizations[i] * p[i] for i in 1:N)
)
JuMP.optimize!(wasserstein)
if JuMP.primal_status(wasserstein) != MOI.FEASIBLE_POINT
error(
"Unable to solver Wasserstein subproblem. Status: ",
JuMP.termination_status(wassserstein),
)
end
copyto!(risk_adjusted_probability, JuMP.value.(p))
return 0.0
end
# ================================= Entropic ============================== #
"""
Entropic(γ::Float64)
The entropic risk measure as described by:
Dowson, O., Morton, D.P. & Pagnoncelli, B.K. Incorporating convex risk
measures into multistage stochastic programming algorithms. Annals of
Operations Research (2022). [doi](https://doi.org/10.1007/s10479-022-04977-w).
As γ increases, the measure becomes more risk-averse.
"""
mutable struct Entropic <: AbstractRiskMeasure
γ::Float64
end
function Base.show(io::IO, ent::Entropic)
return print(io, "Entropic risk measure with γ = $(ent.γ)")
end
function adjust_probability(
measure::Entropic,
Q::Vector{Float64},
p::Vector{Float64},
::Vector,
X::Vector{Float64},
is_min::Bool,
)
if measure.γ == 0.0 # Special case of entropic: if γ = 0, F[X] = E[X].
Q .= p
return 0.0
end
# Handle maximization problems by negating γ. Usually we would negate X, but
# with convex RM's, we also have to negate the α(q) term, so it's easier to
# just negate γ.
γ = (is_min ? 1.0 : -1.0) * measure.γ
# Use `BigFloat` to avoid overflow that occurs when calculating `exp(x)`.
y = p .* exp.(big.(γ .* X))
Q .= y / sum(y)
α = sum(
# To avoid numerical issues with the log, skip elements that are `≈ 0`.
qi * log(qi / pi) for (pi, qi) in zip(p, Q) if pi > 1e-14 && qi > 1e-14
)
return -α / γ
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 17515 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# ========================= Monte Carlo Sampling Scheme ====================== #
struct InSampleMonteCarlo <: AbstractSamplingScheme
max_depth::Int
terminate_on_cycle::Bool
terminate_on_dummy_leaf::Bool
rollout_limit::Function
initial_node::Any
end
"""
InSampleMonteCarlo(;
max_depth::Int = 0,
terminate_on_cycle::Function = false,
terminate_on_dummy_leaf::Function = true,
rollout_limit::Function = (i::Int) -> typemax(Int),
initial_node::Any = nothing,
)
A Monte Carlo sampling scheme using the in-sample data from the policy graph
definition.
If `terminate_on_cycle`, terminate the forward pass once a cycle is detected.
If `max_depth > 0`, return once `max_depth` nodes have been sampled.
If `terminate_on_dummy_leaf`, terminate the forward pass with 1 - probability of
sampling a child node.
Note that if `terminate_on_cycle = false` and `terminate_on_dummy_leaf = false`
then `max_depth` must be set > 0.
Control which node the trajectories start from using `initial_node`. If it is
left as `nothing`, the root node is used as the starting node.
You can use `rollout_limit` to set iteration specific depth limits. For example:
InSampleMonteCarlo(rollout_limit = i -> 2 * i)
"""
function InSampleMonteCarlo(;
max_depth::Int = 0,
terminate_on_cycle::Bool = false,
terminate_on_dummy_leaf::Bool = true,
rollout_limit::Function = i -> typemax(Int),
initial_node::Any = nothing,
)
if !terminate_on_cycle && !terminate_on_dummy_leaf && max_depth == 0
error(
"terminate_on_cycle and terminate_on_dummy_leaf cannot both be " *
"false when max_depth=0.",
)
end
new_rollout = let i = 0
() -> (i += 1; rollout_limit(i))
end
return InSampleMonteCarlo(
max_depth,
terminate_on_cycle,
terminate_on_dummy_leaf,
new_rollout,
initial_node,
)
end
# ==================== OutOfSampleMonteCarlo Sampling Scheme ================= #
struct OutOfSampleMonteCarlo{T} <: AbstractSamplingScheme
noise_terms::Dict{T,Vector{Noise}}
root_children::Vector{Noise{T}}
children::Dict{T,Vector{Noise{T}}}
terminate_on_cycle::Bool
terminate_on_dummy_leaf::Bool
max_depth::Int
rollout_limit::Function
initial_node::Union{Nothing,T}
end
"""
OutOfSampleMonteCarlo(
f::Function,
graph::PolicyGraph;
use_insample_transition::Bool = false,
max_depth::Int = 0,
terminate_on_cycle::Bool = false,
terminate_on_dummy_leaf::Bool = true,
rollout_limit::Function = i -> typemax(Int),
initial_node = nothing,
)
Create a Monte Carlo sampler using out-of-sample probabilities and/or supports
for the stagewise-independent noise terms, and out-of-sample probabilities for
the node-transition matrix.
`f` is a function that takes the name of a node and returns a tuple containing
a vector of new [`SDDP.Noise`](@ref) terms for the children of that node, and
a vector of new [`SDDP.Noise`](@ref) terms for the stagewise-independent
noise.
If `f` is called with the name of the root node (e.g., `0` in a linear policy
graph, `(0, 1)` in a Markovian Policy Graph), then return a vector of
[`SDDP.Noise`](@ref) for the children of the root node.
If `use_insample_transition`, the in-sample transition probabilities will be
used. Therefore, `f` should only return a vector of the stagewise-independent
noise terms, and `f` will not be called for the root node.
If `terminate_on_cycle`, terminate the forward pass once a cycle is detected.
If `max_depth > 0`, return once `max_depth` nodes have been sampled.
If `terminate_on_dummy_leaf`, terminate the forward pass with 1 - probability of
sampling a child node.
Note that if `terminate_on_cycle = false` and `terminate_on_dummy_leaf = false`
then `max_depth` must be set > 0.
Control which node the trajectories start from using `initial_node`. If it is
left as `nothing`, the root node is used as the starting node.
If a node is deterministic, pass `[SDDP.Noise(nothing, 1.0)]` as the vector of
noise terms.
You can use `rollout_limit` to set iteration specific depth limits. For example:
```julia
OutOfSampleMonteCarlo(rollout_limit = i -> 2 * i)
```
## Examples
Given linear policy graph `graph` with `T` stages:
```julia
sampler = OutOfSampleMonteCarlo(graph) do node
if node == 0
return [SDDP.Noise(1, 1.0)]
else
noise_terms = [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]
children = node < T ? [SDDP.Noise(node + 1, 0.9)] : SDDP.Noise{Int}[]
return children, noise_terms
end
end
```
Given linear policy graph `graph` with `T` stages:
```julia
sampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node
return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]
end
```
"""
function OutOfSampleMonteCarlo(
f::Function,
graph::PolicyGraph{T};
use_insample_transition::Bool = false,
max_depth::Int = 0,
terminate_on_cycle::Bool = false,
terminate_on_dummy_leaf::Bool = true,
rollout_limit::Function = i -> typemax(Int),
initial_node::Union{Nothing,T} = nothing,
) where {T}
if !terminate_on_cycle && !terminate_on_dummy_leaf && max_depth == 0
error(
"terminate_on_cycle and terminate_on_dummy_leaf cannot both be " *
"false when max_depth=0.",
)
end
noise_terms = Dict{T,Vector{Noise}}()
children = Dict{T,Vector{Noise{T}}}()
root_children = if use_insample_transition
graph.root_children
else
f(graph.root_node)::Vector{Noise{T}}
end
for key in keys(graph.nodes)
if use_insample_transition
child = graph.nodes[key].children
noise = f(key)
else
child, noise = f(key)
end
noise_terms[key] = convert(Vector{Noise}, noise)
children[key] = child
end
new_rollout = let i = 0
() -> (i += 1; rollout_limit(i))
end
return OutOfSampleMonteCarlo{T}(
noise_terms,
root_children,
children,
terminate_on_cycle,
terminate_on_dummy_leaf,
max_depth,
new_rollout,
initial_node,
)
end
function get_noise_terms(
sampling_scheme::InSampleMonteCarlo,
node::Node{T},
node_index::T,
) where {T}
return node.noise_terms
end
function get_noise_terms(
sampling_scheme::OutOfSampleMonteCarlo{T},
node::Node{T},
node_index::T,
) where {T}
return sampling_scheme.noise_terms[node_index]
end
function get_children(
sampling_scheme::InSampleMonteCarlo,
node::Node{T},
node_index::T,
) where {T}
return node.children
end
function get_children(
sampling_scheme::OutOfSampleMonteCarlo{T},
node::Node{T},
node_index::T,
) where {T}
return sampling_scheme.children[node_index]
end
function get_root_children(
sampling_scheme::InSampleMonteCarlo,
graph::PolicyGraph{T},
) where {T}
return graph.root_children
end
function get_root_children(
sampling_scheme::OutOfSampleMonteCarlo{T},
graph::PolicyGraph{T},
) where {T}
return sampling_scheme.root_children
end
function sample_noise(noise_terms::Vector{<:Noise})
if length(noise_terms) == 0
return nothing
end
cumulative_probability = sum(noise.probability for noise in noise_terms)
if cumulative_probability > 1.0 + 1e-6
error("Cumulative probability cannot be greater than 1.0.")
end
rnd = rand() * cumulative_probability
for noise in noise_terms
rnd -= noise.probability
if rnd <= 0.0
return noise.term
end
end
return error(
"Internal SDDP error: unable to sample noise from $(noise_terms)",
)
end
function sample_scenario(
graph::PolicyGraph{T},
sampling_scheme::Union{InSampleMonteCarlo,OutOfSampleMonteCarlo{T}},
) where {T}
max_depth = min(sampling_scheme.max_depth, sampling_scheme.rollout_limit())
# Storage for our scenario. Each tuple is (node_index, noise.term).
scenario_path = Tuple{T,Any}[]
# We only use visited_nodes if terminate_on_cycle=true. Just initialize
# anyway.
visited_nodes = Set{T}()
# Begin by sampling a node from the children of the root node.
node_index = something(
sampling_scheme.initial_node,
sample_noise(get_root_children(sampling_scheme, graph)),
)::T
while true
node = graph[node_index]
noise_terms = get_noise_terms(sampling_scheme, node, node_index)
children = get_children(sampling_scheme, node, node_index)
noise = sample_noise(noise_terms)
push!(scenario_path, (node_index, noise))
# Termination conditions:
if length(children) == 0
# 1. Our node has no children, i.e., we are at a leaf node.
return scenario_path, false
elseif sampling_scheme.terminate_on_cycle && node_index in visited_nodes
# 2. terminate_on_cycle = true and we have detected a cycle.
return scenario_path, true
elseif 0 < sampling_scheme.max_depth <= length(scenario_path)
# 3. max_depth > 0 and we have explored max_depth number of nodes.
return scenario_path, false
elseif sampling_scheme.terminate_on_dummy_leaf &&
rand() < 1 - sum(child.probability for child in children)
# 4. we sample a "dummy" leaf node in the next step due to the
# probability of the child nodes summing to less than one.
return scenario_path, false
end
# We only need to store a list of visited nodes if we want to terminate
# due to the presence of a cycle.
if sampling_scheme.terminate_on_cycle
push!(visited_nodes, node_index)
end
# Sample a new node to transition to.
node_index = sample_noise(children)::T
end
# Throw an error because we should never end up here.
return error(
"Internal SDDP error: something went wrong sampling a scenario.",
)
end
# ========================= Historical Sampling Scheme ======================= #
mutable struct Historical{T,S} <: AbstractSamplingScheme
scenarios::Vector{Noise{Vector{Tuple{T,S}}}}
sequential::Bool
counter::Int
terminate_on_cycle::Bool
end
function Base.show(io::IO, h::Historical)
print(
io,
"A Historical sampler with $(length(h.scenarios)) scenarios sampled ",
h.sequential ? "sequentially." : "probabilistically.",
)
return
end
"""
Historical(
scenarios::Vector{Vector{Tuple{T,S}}},
probability::Vector{Float64};
terminate_on_cycle::Bool = false,
) where {T,S}
A sampling scheme that samples a scenario from the vector of scenarios
`scenarios` according to `probability`.
## Examples
```julia
Historical(
[
[(1, 0.5), (2, 1.0), (3, 0.5)],
[(1, 0.5), (2, 0.0), (3, 1.0)],
[(1, 1.0), (2, 0.0), (3, 0.0)]
],
[0.2, 0.5, 0.3],
)
```
"""
function Historical(
scenarios::Vector{Vector{Tuple{T,S}}},
probability::Vector{Float64};
terminate_on_cycle::Bool = false,
) where {T,S}
if !(sum(probability) ≈ 1.0)
error(
"Probability of historical scenarios must sum to 1. Currently: " *
"$(sum(probability)).",
)
end
output = [Noise(s, p) for (s, p) in zip(scenarios, probability)]
return Historical(output, false, 0, terminate_on_cycle)
end
"""
Historical(
scenarios::Vector{Vector{Tuple{T,S}}};
terminate_on_cycle::Bool = false,
) where {T,S}
A deterministic sampling scheme that iterates through the vector of provided
`scenarios`.
## Examples
```julia
Historical([
[(1, 0.5), (2, 1.0), (3, 0.5)],
[(1, 0.5), (2, 0.0), (3, 1.0)],
[(1, 1.0), (2, 0.0), (3, 0.0)],
])
```
"""
function Historical(
scenarios::Vector{Vector{Tuple{T,S}}};
terminate_on_cycle::Bool = false,
) where {T,S}
return Historical(Noise.(scenarios, NaN), true, 0, terminate_on_cycle)
end
"""
Historical(
scenario::Vector{Tuple{T,S}};
terminate_on_cycle::Bool = false,
) where {T,S}
A deterministic sampling scheme that always samples `scenario`.
## Examples
```julia
Historical([(1, 0.5), (2, 1.5), (3, 0.75)])
```
"""
function Historical(scenario::Vector{Tuple{T,S}}; kwargs...) where {T,S}
return Historical([scenario]; kwargs...)
end
function sample_scenario(
graph::PolicyGraph{T},
sampling_scheme::Historical{T,NoiseTerm};
# Ignore the other kwargs because the user is giving
# us the full scenario.
kwargs...,
) where {T,NoiseTerm}
ret = sampling_scheme.terminate_on_cycle
if sampling_scheme.sequential
sampling_scheme.counter += 1
if sampling_scheme.counter > length(sampling_scheme.scenarios)
sampling_scheme.counter = 1
end
return sampling_scheme.scenarios[sampling_scheme.counter].term, ret
end
return sample_noise(sampling_scheme.scenarios), ret
end
"""
PSRSamplingScheme(N::Int; sampling_scheme = InSampleMonteCarlo())
A sampling scheme with `N` scenarios, similar to how PSR does it.
"""
mutable struct PSRSamplingScheme{A} <: AbstractSamplingScheme
N::Int
sampling_scheme::A
scenarios::Vector{Any}
counter::Int
lock::ReentrantLock
function PSRSamplingScheme(
N::Int;
sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),
)
lock = ReentrantLock()
return new{typeof(sampling_scheme)}(N, sampling_scheme, Any[], 0, lock)
end
end
function Base.show(io::IO, h::PSRSamplingScheme)
print(io, "A sampler with $(length(h.scenarios)) scenarios like PSR does.")
return
end
function sample_scenario(
graph::PolicyGraph{T},
s::PSRSamplingScheme{A};
kwargs...,
) where {T,A}
lock(s.lock)
try
s.counter += 1
if s.counter > s.N
s.counter = 1
end
if s.counter > length(s.scenarios)
push!(
s.scenarios,
sample_scenario(graph, s.sampling_scheme; kwargs...),
)
end
return s.scenarios[s.counter]
finally
unlock(s.lock)
end
end
"""
SimulatorSamplingScheme(simulator::Function)
Create a sampling scheme based on a univariate scenario generator `simulator`,
which returns a `Vector{Float64}` when called with no arguments like
`simulator()`.
This sampling scheme must be used with a Markovian graph constructed from the
same `simulator`.
The sample space for [`SDDP.parameterize`](@ref) must be a tuple with 1 or 2
values, value is the Markov state and the second value is the random variable
for the current node. If the node is deterministic, use `Ω = [(markov_state,)]`.
This sampling scheme generates a new scenario by calling `simulator()`, and then
picking the sequence of nodes in the Markovian graph that is closest to the new
trajectory.
## Example
```julia
julia> using SDDP
julia> import HiGHS
julia> simulator() = cumsum(rand(10))
simulator (generic function with 1 method)
julia> model = SDDP.PolicyGraph(
SDDP.MarkovianGraph(simulator; budget = 20, scenarios = 100);
sense = :Max,
upper_bound = 12,
optimizer = HiGHS.Optimizer,
) do sp, node
t, markov_state = node
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
@variable(sp, u >= 0)
@constraint(sp, x.out == x.in - u)
# Elements of Ω MUST be a tuple in which `markov_state` is the first
# element.
Ω = [(markov_state, (u = u_max,)) for u_max in (0.0, 0.5)]
SDDP.parameterize(sp, Ω) do (markov_state, ω)
set_upper_bound(u, ω.u)
@stageobjective(sp, markov_state * u)
end
end;
julia> SDDP.train(
model;
print_level = 0,
iteration_limit = 10,
sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
)
```
"""
mutable struct SimulatorSamplingScheme{F} <: AbstractSamplingScheme
simulator::F
end
function Base.show(io::IO, h::SimulatorSamplingScheme)
print(io, "SimulatorSamplingScheme")
return
end
function _closest_index(graph, t, value)
min_value, min_dist = value, Inf
for (t_, value_) in keys(graph.nodes)
if t_ == t
if abs(value - value_) < min_dist
min_value = value_
min_dist = abs(value - value_)
end
end
end
return (t, min_value)
end
function sample_scenario(
graph::PolicyGraph{Tuple{Int,Float64}},
s::SimulatorSamplingScheme{F},
) where {F}
scenario_path = Tuple{Tuple{Int,Float64},Any}[]
for (t, value) in enumerate(s.simulator())
node_index = _closest_index(graph, t, value)
node = graph[node_index]
noise_terms = get_noise_terms(InSampleMonteCarlo(), node, node_index)
noise = sample_noise(noise_terms)
@assert noise[1] == node_index[2]
ω = length(noise) == 1 ? (value,) : (value, noise[2])
push!(scenario_path, (node_index, ω))
end
return scenario_path, false
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 17011 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# ======================= Iteration Limit Stopping Rule ====================== #
"""
IterationLimit(limit::Int)
Teriminate the algorithm after `limit` number of iterations.
"""
mutable struct IterationLimit <: AbstractStoppingRule
limit::Int
end
stopping_rule_status(::IterationLimit) = :iteration_limit
function convergence_test(::PolicyGraph, log::Vector{Log}, rule::IterationLimit)
return log[end].iteration >= rule.limit
end
# ========================= Time Limit Stopping Rule ========================= #
"""
TimeLimit(limit::Float64)
Teriminate the algorithm after `limit` seconds of computation.
"""
mutable struct TimeLimit <: AbstractStoppingRule
limit::Float64
end
stopping_rule_status(::TimeLimit) = :time_limit
function convergence_test(::PolicyGraph, log::Vector{Log}, rule::TimeLimit)
return log[end].time >= rule.limit
end
# ========================= Statistical Stopping Rule ======================== #
"""
Statistical(;
num_replications::Int,
iteration_period::Int = 1,
z_score::Float64 = 1.96,
verbose::Bool = true,
disable_warning::Bool = false,
)
Perform an in-sample Monte Carlo simulation of the policy with
`num_replications` replications every `iteration_period`s and terminate if the
deterministic bound (lower if minimizing) falls into the confidence interval for
the mean of the simulated cost.
If `verbose = true`, print the confidence interval.
If `disable_warning = true`, disable the warning telling you not to use this
stopping rule (see below).
## Why this stopping rule is not good
This stopping rule is one of the most common stopping rules seen in the
literature. Don't follow the crowd. It is a poor choice for your model, and
should be rarely used. Instead, you should use the default stopping rule, or use
a fixed limit like a time or iteration limit.
To understand why this stopping rule is a bad idea, assume we have conducted
`num_replications` simulations and the objectives are in a vector
`objectives::Vector{Float64}`.
Our mean is `μ = mean(objectives)` and the half-width of the confidence interval
is `w = z_score * std(objectives) / sqrt(num_replications)`.
Many papers suggest terminating the algorithm once the deterministic bound
(lower if minimizing, upper if maximizing) is contained within the confidence
interval. That is, if `μ - w <= bound <= μ + w`. Even worse, some papers define
an optimization gap of `(μ + w) / bound` (if minimizing) or `(μ - w) / bound`
(if maximizing), and they terminate once the gap is less than a value like 1%.
Both of these approaches are misleading, and more often than not, they will
result in terminating with a sub-optimal policy that performs worse than
expected. There are two main reasons for this:
1. The half-width depends on the number of replications. To reduce the
computational cost, users are often tempted to choose a small number of
replications. This increases the half-width and makes it more likely that
the algorithm will stop early. But if we choose a large number of
replications, then the computational cost is high, and we would have been
better off to run a fixed number of iterations and use that computational
time to run extra training iterations.
2. The confidence interval assumes that the simulated values are normally
distributed. In infinite horizon models, this is almost never the case. The
distribution is usually closer to exponential or log-normal.
There is a third, more technical reason which relates to the conditional
dependence of constructing multiple confidence intervals.
The default value of `z_score = 1.96` corresponds to a 95% confidence
interval. You should interpret the interval as "if we re-run this simulation
100 times, then the true mean will lie in the confidence interval 95 times
out of 100." But if the bound is within the confidence interval, then we know
the true mean cannot be better than the bound. Therfore, there is a more than
95% chance that the mean is within the interval.
A separate problem arises if we simulate, find that the bound is outside the
confidence interval, keep training, and then re-simulate to compute a new
confidence interval. Because we will terminate when the bound enters the
confidence interval, the repeated construction of a confidence interval means
that the unconditional probability that we terminate with a false positive is
larger than 5% (there are now more chances that the sample mean is optimistic
and that the confidence interval includes the bound but not the true mean). One
fix is to simulate with a sequentially increasing number of replicates, so that
the unconditional probability stays at 95%, but this runs into the problem of
computational cost. For more information on sequential sampling, see, for
example, Güzin Bayraksan, David P. Morton, (2011) A Sequential Sampling
Procedure for Stochastic Programming. Operations Research 59(4):898-913.
"""
struct Statistical <: AbstractStoppingRule
num_replications::Int
iteration_period::Int
z_score::Float64
verbose::Bool
function Statistical(;
num_replications,
iteration_period = 1,
z_score = 1.96,
verbose = true,
disable_warning::Bool = false,
)
if !disable_warning
@warn(
"Are you really sure you want to use this stopping rule? " *
"Read why we don't recommend it by typing `? " *
"SDDP.Statistical` into the REPL to read the docstring.\n\n" *
"If you understand what you are doing, you can disable this " *
"warning with `SDDP.Statistical(; disable_warning = true)`",
)
end
return new(num_replications, iteration_period, z_score, verbose)
end
end
stopping_rule_status(::Statistical) = :statistical
function convergence_test(
graph::PolicyGraph,
log::Vector{Log},
rule::Statistical,
)
if length(log) % rule.iteration_period != 0
# Only run this convergence test every rule.iteration_period iterations.
return false
end
results = simulate(graph, rule.num_replications)
objectives =
map(simulation -> sum(s[:stage_objective] for s in simulation), results)
sample_mean = Statistics.mean(objectives)
sample_ci =
rule.z_score * Statistics.std(objectives) / sqrt(rule.num_replications)
if rule.verbose
println(
"Simulated policy value: [",
print_value(sample_mean - sample_ci),
", ",
print_value(sample_mean + sample_ci),
"]",
)
end
current_bound = log[end].bound
if graph.objective_sense == MOI.MIN_SENSE
return sample_mean - sample_ci <= current_bound
elseif graph.objective_sense == MOI.MAX_SENSE
return current_bound <= sample_mean + sample_ci
else
# If sense is none of the above for some awkward reason, return to
# previous criteria
return sample_mean - sample_ci <=
current_bound <=
sample_mean + sample_ci
end
end
# ======================= Bound-stalling Stopping Rule ======================= #
"""
BoundStalling(num_previous_iterations::Int, tolerance::Float64)
Teriminate the algorithm once the deterministic bound (lower if minimizing,
upper if maximizing) fails to improve by more than `tolerance` in absolute terms
for more than `num_previous_iterations` consecutve iterations, provided it has
improved relative to the bound after the first iteration.
Checking for an improvement relative to the first iteration avoids early
termination in a situation where the bound fails to improve for the first `N`
iterations. This frequently happens in models with a large number of stages,
where it takes time for the cuts to propogate backward enough to modify the
bound of the root node.
"""
struct BoundStalling <: AbstractStoppingRule
num_previous_iterations::Int
tolerance::Float64
end
stopping_rule_status(::BoundStalling) = :bound_stalling
function convergence_test(
::PolicyGraph{T},
log::Vector{Log},
rule::BoundStalling,
) where {T}
if length(log) < rule.num_previous_iterations + 1
return false
end
# No change in the bound. There are three possibilities:
# 1) we haven't added enough cuts
# 2) the problem was deterministic or myopic
# 3) there were existing cuts
existing_solves = log[1].total_solves > log[end].total_solves / length(log)
if !existing_solves && isapprox(log[1].bound, log[end].bound; atol = 1e-6)
return all(l -> isapprox(l.bound, l.simulation_value; atol = 1e-6), log)
end
for i in 1:rule.num_previous_iterations
if abs(log[end-i].bound - log[end-i+1].bound) > rule.tolerance
return false
end
end
return true
end
"""
StoppingChain(rules::AbstractStoppingRule...)
Terminate once all of the `rules` are statified.
This stopping rule short-circuits, so subsequent rules are only tested if the
previous pass.
## Examples
A stopping rule that runs 100 iterations, then checks for the bound stalling:
```julia
StoppingChain(IterationLimit(100), BoundStalling(5, 0.1))
```
"""
struct StoppingChain <: AbstractStoppingRule
rules::Vector{AbstractStoppingRule}
function StoppingChain(rules::AbstractStoppingRule...)
return new(collect(rules))
end
end
function stopping_rule_status(rule::StoppingChain)
return Symbol(join(stopping_rule_status.(rule.rules), " ∧ "))
end
function convergence_test(
graph::PolicyGraph,
log::Vector{Log},
chain::StoppingChain,
)
for rule in chain.rules
if !convergence_test(graph, log, rule)
return false
end
end
return true
end
# ========================== SimulationStoppingRule ========================== #
mutable struct SimulationStoppingRule{F} <: AbstractStoppingRule
simulator::F
replications::Int
period::Int
data::Vector{Any}
last_iteration::Int
distance_tol::Float64
bound_tol::Float64
end
function _get_state_variable_value(key)
return sp -> JuMP.value(JuMP.variable_by_name(sp, "$(key)_out"))
end
"""
SimulationStoppingRule(;
sampling_scheme::AbstractSamplingScheme = SDDP.InSampleMonteCarlo(),
replications::Int = -1,
period::Int = -1,
distance_tol::Float64 = 1e-2,
bound_tol::Float64 = 1e-4,
)
Terminate the algorithm using a mix of heuristics. Unless you know otherwise,
this is typically a good default.
## Termination criteria
First, we check that the deterministic bound has stabilized. That is, over the
last five iterations, the deterministic bound has changed by less than an
absolute or relative tolerance of `bound_tol`.
Then, if we have not done one in the last `period` iterations, we perform a
primal simulation of the policy using `replications` out-of-sample realizations
from `sampling_scheme`. The realizations are stored and re-used in each
simulation. From each simulation, we record the value of the stage objective.
We terminate the policy if each of the trajectories in two consecutive
simulations differ by less than `distance_tol`.
By default, `replications` and `period` are `-1`, and SDDP.jl will guess good
values for these. Over-ride the default behavior by setting an appropriate
value.
## Example
```julia
SDDP.train(model; stopping_rules = [SimulationStoppingRule()])
```
"""
function SimulationStoppingRule(;
sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),
replications::Int = -1,
period::Int = -1,
distance_tol::Float64 = 1e-2,
bound_tol::Float64 = 1e-4,
)
cached_sampling_scheme =
PSRSamplingScheme(replications; sampling_scheme = sampling_scheme)
function simulator(model, N)
cached_sampling_scheme.N = max(N, cached_sampling_scheme.N)
scenarios = simulate(
model,
N;
sampling_scheme = cached_sampling_scheme,
# TODO(odow): if we use Threaded() here, then we get out of order
# between the simulations and the PSRSamplingScheme: it's not the
# case that simulation `i` accesses sample `i`, because they might
# happen out-of-order.
parallel_scheme = Serial(),
)
# !!! info
# At one point, I tried adding the primal value of the state
# variables. But it didn't work for some models because of
# degeneracy, that is, the value of a state variable will oscillate
# between two equally optimal outcomes in subsequent iterations.
# So for now, I just use the stage objective and the bellman term.
keys = [:stage_objective, :bellman_term]
return map(scenarios) do scenario
return [getindex.(scenario, k) for k in keys]
end
end
return SimulationStoppingRule(
simulator,
replications,
period,
Any[],
0,
distance_tol,
bound_tol,
)
end
stopping_rule_status(::SimulationStoppingRule) = :simulation_stopping
function _compute_distance(x::Real, y::Real)
if x ≈ y
return 0.0
end
return abs(x - y) / max(1.0, abs(x), abs(y))
end
function _compute_distance(new_data::Vector, old_data::Vector)
d = sum(_compute_distance(x, y)^2 for (x, y) in zip(new_data, old_data))
return sqrt(d)
end
function _period(period, iterations)
if period != -1
return period
elseif iterations <= 100
return 20
elseif iterations <= 1_000
return 100
else
return 500
end
end
function convergence_test(
model::PolicyGraph{T},
log::Vector{Log},
rule::SimulationStoppingRule,
) where {T}
# Setup parameters based on the model.
if rule.replications == -1
rule.replications = min(100, _unique_paths(model))
end
if isempty(rule.data)
# On the first iteration, run a simulation and keep going.
rule.data = rule.simulator(model, rule.replications)
rule.last_iteration = 0
return false
end
if length(log) <= 5
return false # Always do at least 5 iterations.
end
if !isapprox(
log[end].bound,
log[end-5].bound;
atol = rule.bound_tol,
rtol = rule.bound_tol,
)
return false # If the lower bound haven't stalled, keep going.
end
if length(log) - rule.last_iteration < _period(rule.period, length(log))
return false # Do at least rule.period iterations since the last trial
end
new_data = rule.simulator(model, rule.replications)
distance = _compute_distance(new_data, rule.data)
rule.data = new_data
rule.last_iteration = length(log)
return distance < rule.distance_tol
end
# ========================== FirstStageStoppingRule ========================== #
mutable struct FirstStageStoppingRule <: AbstractStoppingRule
data::Vector{Any}
atol::Float64
iterations::Int
end
"""
FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)
Terminate the algorithm when the outgoing values of the first-stage state
variables have not changed by more than `atol` for `iterations` number of
consecutive iterations.
## Example
```julia
SDDP.train(model; stopping_rules = [FirstStageStoppingRule()])
```
"""
function FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)
return FirstStageStoppingRule(Any[], atol, iterations)
end
stopping_rule_status(::FirstStageStoppingRule) = :first_stage_stopping
function convergence_test(
model::PolicyGraph{T},
log::Vector{Log},
rule::FirstStageStoppingRule,
) where {T}
if length(model.root_children) != 1
error(
"FirstStageStoppingRule cannot be applied because first-stage is " *
"not deterministic",
)
end
node = model[model.root_children[1].term]
if length(node.noise_terms) > 1
error(
"FirstStageStoppingRule cannot be applied because first-stage is " *
"not deterministic",
)
end
lock(node.lock)
try
set_incoming_state(node, model.initial_root_state)
parameterize(node, first(node.noise_terms).term)
optimize!(node.subproblem)
state = get_outgoing_state(node)
push!(rule.data, state)
if length(rule.data) < rule.iterations
return false
end
for i in 1:(rule.iterations-1), (k, v) in state
if !isapprox(rule.data[end-i][k], v; atol = rule.atol)
return false
end
end
return true
finally
unlock(node.lock)
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1486 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
function launch_websocket(server_to_client::Channel{Log})
host, port = HTTP.ip"127.0.0.1", 8000
server = HTTP.Sockets.listen(host, port)
HTTP.WebSockets.listen!(host, port; server = server) do ws
for msg in ws
break # Wait for the first message, then exit the loop
end
while true
if !isready(server_to_client)
sleep(0.01)
continue
end
new_log = take!(server_to_client)
data = Dict(
"iteration" => new_log.iteration,
"bound" => new_log.bound,
"simulation" => new_log.simulation_value,
"time" => new_log.time,
)
HTTP.send(ws, JSON.json(data))
end
return
end
return server
end
function launch_dashboard()
server_to_client = Channel{Log}(typemax(Int))
server = launch_websocket(server_to_client)
launch_file(joinpath(@__DIR__, "dashboard.html"))
return (log::Union{Log,Nothing}, close_flag::Bool) -> begin
if close_flag
close(server)
elseif log !== nothing
put!(server_to_client, log)
end
return
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3033 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Internal function: convert dataset (from SDDP.simulate) into a matrix where
# the rows are quantiles, and the columns are stages.
function publication_data(
dataset::Vector{<:Vector{<:AbstractDict}},
quantiles::Vector{Float64},
stage_function::Function,
)
max_stages = maximum(length.(dataset))
output_array = fill(NaN, length(quantiles), max_stages)
for stage in 1:max_stages
stage_data = stage_function.([data[stage] for data in dataset])
for (i, s) in enumerate(stage_data)
if !isfinite(s)
error(
"Unable to plot `publication_plot` because stage $stage " *
"of replication $i contains data that is not finite. " *
"The data function must return a finite real-valued " *
"scalar. Got: $s",
)
end
end
output_array[:, stage] .= Statistics.quantile(stage_data, quantiles)
end
return output_array
end
"""
SDDP.publication_plot(
data_function, simulations;
quantile = [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0],
kwargs...)
Create a `Plots.jl` recipe plot of the simulations.
See `Plots.jl` for the list of keyword arguments.
## Examples
SDDP.publication_plot(simulations; title = "My title") do data
return data[:stage_objective]
end
"""
function publication_plot(
data_function::Function,
simulations::Vector{<:Vector{<:AbstractDict}};
kwargs...,
)
# An annoying over-load so that we can provide a consistent interface
# instead of the Plots.jl generated `publicationplot`.
return SDDP.publicationplot(simulations, data_function; kwargs...)
end
RecipesBase.@userplot PublicationPlot
RecipesBase.@recipe function f(
publication_plot::PublicationPlot;
quantile = [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0],
)
dataset, stage_function = publication_plot.args
size --> (500, 300)
data_matrix = publication_data(dataset, sort(quantile), stage_function)
for i in 1:floor(Int, size(data_matrix, 1) / 2)
μ = 0.5 * (data_matrix[i, :] + data_matrix[end-i+1, :])
r = data_matrix[end-i+1, :] - μ
RecipesBase.@series begin
x := 1:size(data_matrix, 2)
ribbon := r
y := μ
fillalpha --> 0.2
seriesalpha --> 0.0
seriescolor --> "#00467F"
label := ""
()
end
end
if mod(size(data_matrix, 1), 2) == 1
qi = ceil(Int, size(data_matrix, 1) / 2)
RecipesBase.@series begin
x := 1:size(data_matrix, 2)
y := data_matrix[qi, :]
seriescolor --> "#00467F"
label := ""
()
end
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 4606 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
SDDP.SpaghettiPlot(; stages, scenarios)
Initialize a new `SpaghettiPlot` with `stages` stages and `scenarios` number of
replications.
"""
struct SpaghettiPlot{D}
simulations::Vector{Vector{D}}
data::Vector{Dict{String,Any}}
function SpaghettiPlot(
simulations::Vector{Vector{D}},
) where {D<:AbstractDict}
return new{D}(simulations, Dict{String,Any}[])
end
end
function Base.show(io::IO, plt::SpaghettiPlot)
return print(
io,
"A spaghetti plot with ",
length(plt.simulations),
" scenarios ",
"and ",
length(plt.simulations[1]),
" stages.",
)
end
"""
SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)
# Description
Add a new figure to the SpaghettiPlot `plt`, where the y-value of the
`scenario`th line when x = `stage` is given by
`data_function(plt.simulations[scenario][stage])`.
# Keyword arguments
* `xlabel`: set the xaxis label
* `ylabel`: set the yaxis label
* `title`: set the title of the plot
* `ymin`: set the minimum y value
* `ymax`: set the maximum y value
* `cumulative`: plot the additive accumulation of the value across the stages
* `interpolate`: interpolation method for lines between stages.
Defaults to `"linear"` see [the d3 docs](https://github.com/d3/d3-3.x-api-reference/blob/master/SVG-Shapes.md#line_interpolate)
for all options.
## Examples
```julia
simulations = simulate(model, 10)
plt = SDDP.spaghetti_plot(simulations)
SDDP.add_spaghetti(plt; title = "Stage objective") do data
return data[:stage_objective]
end
```
"""
function add_spaghetti(
data_function::Function,
plt::SpaghettiPlot;
xlabel = "Stages",
ylabel = "",
cumulative = false,
title = "",
interpolate = "linear",
ymin = "",
ymax = "",
)
plot_dict = Dict{String,Any}(
"xlabel" => xlabel,
"ylabel" => ylabel,
"title" => title,
"cumulative" => cumulative,
"interpolate" => interpolate,
"ymin" => ymin,
"ymax" => ymax,
)
plot_dict["data"] = Vector{Float64}[]
for (i, scenario) in enumerate(plt.simulations)
push!(plot_dict["data"], Float64[])
series_value = 0.0
for stage in scenario
y_value = float(data_function(stage))
if cumulative
series_value += y_value
else
series_value = y_value
end
push!(plot_dict["data"][i], series_value)
end
end
push!(plt.data, plot_dict)
return
end
function fill_template(
dest::String,
args...;
template::String,
launch::Bool = false,
)
s = read(template, String)
for arg in args
s = replace(s, arg)
end
write(dest, s)
if launch
launch_file(dest)
end
return
end
"""
plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)
The SpaghettiPlot plot `plt` to `filename`. If `filename` is not given, it will
be saved to a temporary directory. If `open = true`, then a browser window will
be opened to display the resulting HTML file.
"""
function plot(
plt::SpaghettiPlot,
filename::String = joinpath(
tempdir(),
string(Random.randstring(), ".html"),
);
open::Bool = true,
)
ASSET_DIR = dirname(@__FILE__)
D3_JS_FILE = joinpath(ASSET_DIR, "d3.v3.min.js")
SPAGHETTI_JS_FILE = joinpath(ASSET_DIR, "spaghetti_plot.js")
SPAGHETTI_HTML_FILE = joinpath(ASSET_DIR, "spaghetti_plot.html")
fill_template(
filename,
"<!--DATA-->" => JSON.json(plt.data),
"<!--D3.JS-->" => read(D3_JS_FILE, String),
"<!--SPAGHETTI_PLOT.JS-->" => read(SPAGHETTI_JS_FILE, String);
template = SPAGHETTI_HTML_FILE,
launch = open,
)
return
end
function save(p::SpaghettiPlot, args...; kwargs...)
Base.depwarn("`SDDP.save` is deprecated. Use `SDDP.plot` instead.", :save)
return plot(p, args...; kwargs...)
end
function launch_file(filename)
if Sys.iswindows()
run(`$(ENV["COMSPEC"]) /c start $(filename)`)
elseif Sys.isapple()
run(`open $(filename)`)
elseif Sys.islinux() || Sys.isbsd()
run(`xdg-open $(filename)`)
else
error("Unable to show plot. Try opening the file $(filename) manually.")
end
return
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 11703 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
ValueFunction
A representation of the value function. SDDP.jl uses the following unique representation of
the value function that is undocumented in the literature.
It supports three types of state variables:
1) x - convex "resource" states
2) b - concave "belief" states
3) y - concave "objective" states
In addition, we have three types of cuts:
1) Single-cuts (also called "average" cuts in the literature), which involve the
risk-adjusted expectation of the cost-to-go.
2) Multi-cuts, which use a different cost-to-go term for each realization w.
3) Risk-cuts, which correspond to the facets of the dual interpretation of a coherent risk
measure.
Therefore, ValueFunction returns a JuMP model of the following form:
V(x, b, y) = min: μᵀb + νᵀy + θ
s.t. # "Single" / "Average" cuts
μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J
# "Multi" cuts
μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K
# "Risk-set" cuts
θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K
"""
struct ValueFunction{
O<:Union{Nothing,NTuple{N,JuMP.VariableRef} where {N}},
B<:Union{Nothing,Dict{T,JuMP.VariableRef} where {T}},
}
index::Any
model::JuMP.Model
theta::JuMP.VariableRef
states::Dict{Symbol,JuMP.VariableRef}
objective_state::O
belief_state::B
end
function Base.show(io::IO, v::ValueFunction)
print(io, "A value function for node $(v.index)")
return
end
function JuMP.set_optimizer(v::ValueFunction, optimizer)
set_optimizer(v.model, optimizer)
set_silent(v.model)
return
end
function _add_to_value_function(
model::JuMP.Model,
states::Dict{Symbol,JuMP.VariableRef},
objective_state,
belief_state,
convex_approximation::ConvexApproximation,
theta_name::String,
)
theta = @variable(model, base_name = theta_name)
if objective_sense(model) == MOI.MIN_SENSE
set_lower_bound(theta, lower_bound(convex_approximation.theta))
else
set_upper_bound(theta, upper_bound(convex_approximation.theta))
end
for cut in convex_approximation.cuts
cut_expr = @expression(
model,
cut.intercept +
sum(coef * states[key] for (key, coef) in cut.coefficients)
)
if objective_state !== nothing
@assert cut.obj_y !== nothing
cut_expr = @expression(
model,
cut_expr -
sum(y * μ for (y, μ) in zip(cut.obj_y, objective_state))
)
end
if belief_state !== nothing
@assert cut.belief_y !== nothing
cut_expr = @expression(
model,
cut_expr -
sum(cut.belief_y[key] * μ for (key, μ) in belief_state)
)
end
if objective_sense(model) == MOI.MIN_SENSE
@constraint(model, theta >= cut_expr)
else
@constraint(model, theta <= cut_expr)
end
end
return theta
end
function ValueFunction(model::PolicyGraph{T}; node::T) where {T}
return ValueFunction(model[node])
end
function ValueFunction(node::Node{T}) where {T}
b = node.bellman_function
sense = objective_sense(node.subproblem)
model = Model()
if node.optimizer !== nothing
set_optimizer(model, node.optimizer)
set_silent(model)
end
set_objective_sense(model, sense)
states = Dict{Symbol,VariableRef}(
key => @variable(model, base_name = "$(key)") for
(key, x) in node.states
)
objective_state = if node.objective_state === nothing
nothing
else
tuple(
VariableRef[
@variable(
model,
lower_bound = lower_bound(μ),
upper_bound = upper_bound(μ),
base_name = "_objective_state_$(i)"
) for (i, μ) in enumerate(node.objective_state.μ)
]...,
)
end
belief_state = if node.belief_state === nothing
nothing
else
Dict{T,VariableRef}(
key => @variable(
model,
lower_bound = lower_bound(μ),
upper_bound = upper_bound(μ),
base_name = "_belief_$(key)"
) for (key, μ) in node.belief_state.μ
)
end
global_theta = _add_to_value_function(
model,
states,
objective_state,
belief_state,
b.global_theta,
"V",
)
local_thetas = VariableRef[
_add_to_value_function(
model,
states,
belief_state,
objective_state,
l,
"v$(i)",
) for (i, l) in enumerate(b.local_thetas)
]
for risk_set in b.risk_set_cuts
expr = @expression(
model,
sum(p * v for (p, v) in zip(risk_set, local_thetas))
)
if sense == MOI.MIN_SENSE
@constraint(model, global_theta >= expr)
else
@constraint(model, global_theta <= expr)
end
end
return ValueFunction(
node.index,
model,
global_theta,
states,
objective_state,
belief_state,
)
end
"""
evaluate(
V::ValueFunction,
point::Dict{Union{Symbol,String},<:Real}
objective_state = nothing,
belief_state = nothing
)
Evaluate the value function `V` at `point` in the state-space.
Returns a tuple containing the height of the function, and the subgradient
w.r.t. the convex state-variables.
## Examples
```julia
evaluate(V, Dict(:volume => 1.0))
```
If the state variable is constructed like
`@variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0)`, use `[i]` to
index the state variable:
```julia
evaluate(V, Dict(Symbol("volume[1]") => 1.0))
```
You can also use strings or symbols for the keys.
```julia
evaluate(V, Dict("volume[1]" => 1))
```
"""
function evaluate(
V::ValueFunction,
point::Dict{Symbol,Float64};
objective_state = nothing,
belief_state = nothing,
)
for (state, val) in point
fix(V.states[state], val; force = true)
end
saddle = AffExpr(0.0)
if V.objective_state !== nothing
@assert objective_state !== nothing
for (y, x) in zip(objective_state, V.objective_state)
add_to_expression!(saddle, y, x)
end
end
if V.belief_state !== nothing
@assert belief_state !== nothing
for (key, x) in V.belief_state
add_to_expression!(saddle, belief_state[key], x)
end
end
@objective(V.model, objective_sense(V.model), V.theta + saddle)
optimize!(V.model)
obj = objective_value(V.model)
duals = Dict{Symbol,Float64}()
sign = objective_sense(V.model) == MOI.MIN_SENSE ? 1.0 : -1.0
for (key, var) in V.states
duals[key] = sign * dual(FixRef(var))
end
return obj, duals
end
# Define a fallback method to allow users to write things like `Dict("x" => 1)`.
function evaluate(V::ValueFunction, point; kwargs...)
return evaluate(
V,
Dict(Symbol(k) => convert(Float64, v)::Float64 for (k, v) in point);
kwargs...,
)
end
"""
evalute(V::ValueFunction{Nothing, Nothing}; kwargs...)
Evalute the value function `V` at the point in the state-space specified by
`kwargs`.
## Examples
evaluate(V; volume = 1)
"""
function evaluate(V::ValueFunction{Nothing,Nothing}; kwargs...)
return evaluate(V, Dict(k => float(v) for (k, v) in kwargs))
end
struct Point{Y,B}
x::Dict{Symbol,Float64}
y::Y
b::B
end
Point(x::Dict{Symbol,Float64}) = Point(x, nothing, nothing)
function height(V::ValueFunction{Y,B}, x::Point{Y,B}) where {Y,B}
return evaluate(V, x.x; objective_state = x.y, belief_state = x.b)[1]
end
function get_axis(x::Vector{Dict{K,V}}) where {K,V}
@assert length(x) >= 2
changing_key = nothing
for (key, val) in x[1]
if val == x[2][key]
continue
elseif changing_key !== nothing
error("Too many elements are changing")
end
changing_key = key
end
return changing_key === nothing ? nothing : [xi[changing_key] for xi in x]
end
function get_axis(x::Vector{NTuple{N,T}}) where {N,T}
@assert length(x) >= 2
changing_index = nothing
for i in 1:N
if x[1][i] == x[2][i]
continue
elseif changing_index !== nothing
error("Too many elements are changing")
end
changing_index = i
end
return changing_index === nothing ? nothing :
[xi[changing_index] for xi in x]
end
get_axis(x::Vector{Nothing}) = nothing
function get_axis(X::Vector{Point{Y,B}}) where {Y,B}
for f in [x -> x.x, x -> x.y, x -> x.b]
x = get_axis(f.(X))
x !== nothing && return x
end
return nothing
end
function get_data(V::ValueFunction{Y,B}, X::Vector{Point{Y,B}}) where {Y,B}
x = get_axis(X)
if x === nothing
error("Unable to detect changing dimension")
end
y = height.(Ref(V), X)
return x, y, Float64[]
end
function get_data(V::ValueFunction{Y,B}, X::Matrix{Point{Y,B}}) where {Y,B}
x = get_axis(collect(X[:, 1]))
if x === nothing
error("Unable to detect changing row")
end
y = get_axis(collect(X[1, :]))
if y === nothing
error("Unable to detect changing column")
end
z = height.(Ref(V), X)
return [i for _ in y for i in x], [i for i in y for _ in x], vec(z)
end
function plot(
V::ValueFunction{Y,B},
X::Array{Point{Y,B}};
filename::String = joinpath(
tempdir(),
string(Random.randstring(), ".html"),
),
open::Bool = true,
) where {Y,B}
x, y, z = get_data(V, X)
fill_template(
filename,
"<!--X-->" => JSON.json(x),
"<!--Y-->" => JSON.json(y),
"<!--Z-->" => JSON.json(z);
template = joinpath(@__DIR__, "value_functions.html"),
launch = open,
)
return
end
function plot(
V::ValueFunction{Nothing,Nothing};
filename::String = joinpath(
tempdir(),
string(Random.randstring(), ".html"),
),
open::Bool = true,
kwargs...,
)
d = Dict{Symbol,Float64}()
variables = Symbol[]
for (key, val) in kwargs
if isa(val, AbstractVector)
push!(variables, key)
else
d[key] = float(val)
end
end
if length(variables) == 1
points = Point{Nothing,Nothing}[]
key = variables[1]
for val in kwargs[key]
d2 = copy(d)
d2[key] = val
push!(points, Point(d2))
end
return plot(V, points; filename = filename, open = open)
elseif length(variables) == 2
k1, k2 = variables
N1, N2 = length(kwargs[k1]), length(kwargs[k2])
points = Array{Point{Nothing,Nothing},2}(undef, N1, N2)
for i in 1:N1
for j in 1:N2
d2 = copy(d)
d2[k1] = kwargs[k1][i]
d2[k2] = kwargs[k2][j]
points[i, j] = Point(d2)
end
end
return plot(V, points; filename = filename, open = open)
end
return error(
"Can only plot 1- or 2-dimensional value functions. You provided " *
"$(length(variables)).",
)
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 7509 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestExperimental
using SDDP
using Test
import Downloads
import HiGHS
import JSON
import JSONSchema
Downloads.download(
"https://odow.github.io/StochOptFormat/schemas/sof-1.schema.json",
"sof.schema.json",
)
const SCHEMA =
JSONSchema.Schema(JSON.parsefile("sof.schema.json"; use_mmap = false))
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
if isfile("experimental.sof.json")
rm("experimental.sof.json")
end
if isfile("sof.schema.json")
rm("sof.schema.json")
end
return
end
function _create_model(
minimization::Bool;
belief::Bool = false,
objective_state::Bool = false,
)
graph = SDDP.LinearGraph(3)
if belief
SDDP.add_ambiguity_set(graph, [1])
SDDP.add_ambiguity_set(graph, [2, 3])
end
model = SDDP.PolicyGraph(
graph;
sense = minimization ? :Min : :Max,
lower_bound = -50.0,
upper_bound = 50.0,
) do sp, t
N = 2
C = [0.2, 0.7]
S = 2 .+ [0.33, 0.54]
DEMAND = [2, 10]
if objective_state
SDDP.add_objective_state(
(y, w) -> y + ω,
sp;
initial_value = 0.0,
lower_bound = 0.0,
upper_bound = 1.0,
lipschitz = 1.0,
)
end
@variable(sp, x[1:N] >= 0, SDDP.State, initial_value = 0.0)
@variables(sp, begin
s[i = 1:N] >= 0
d
end)
@constraints(sp, begin
[i = 1:N], s[i] <= x[i].in
c, sum(s) <= d + 1
end)
SDDP.parameterize(sp, t == 1 ? [1] : 1:length(DEMAND)) do ω
JuMP.fix(d, DEMAND[ω])
set_upper_bound(s[1], 0.1 * ω)
set_lower_bound(x[1].out, ω)
set_normalized_rhs(c, ω)
sgn = minimization ? 1.0 : -1.0
@stageobjective(
sp,
sgn * (
sum(C[i] * x[i].out for i in 1:N) - S[ω] * s[ω] -
s[ω] * S[ω] + ω
)
)
end
end
return model
end
function test_write_to_file_Min()
base_model = _create_model(true)
set_optimizer(base_model, HiGHS.Optimizer)
SDDP.train(base_model; iteration_limit = 50, print_level = 0)
model = _create_model(true)
SDDP.write_to_file(
model,
"experimental.sof.json";
validation_scenarios = 10,
sampling_scheme = SDDP.PSRSamplingScheme(2),
)
set_optimizer(model, HiGHS.Optimizer)
SDDP.train(model; iteration_limit = 50, print_level = 0)
new_model, validation_scenarios =
SDDP.read_from_file("experimental.sof.json")
set_optimizer(new_model, HiGHS.Optimizer)
SDDP.train(new_model; iteration_limit = 50, print_level = 0)
@test isapprox(
SDDP.calculate_bound(base_model),
SDDP.calculate_bound(model);
atol = 1e-6,
)
@test isapprox(
SDDP.calculate_bound(base_model),
SDDP.calculate_bound(new_model);
atol = 1e-6,
)
scenarios = SDDP.evaluate(new_model, validation_scenarios)
@test length(scenarios["problem_sha256_checksum"]) == 64
@test length(scenarios["scenarios"]) == 10
@test length(scenarios["scenarios"][1]) == 3
node_1_1 = scenarios["scenarios"][1][1]
@test isapprox(node_1_1["objective"], 9.6; atol = 1e-8)
@test node_1_1["primal"]["d"] == 2
@test isapprox(node_1_1["primal"]["x[1]_out"], 1; atol = 1e-8)
@test isapprox(node_1_1["primal"]["x[2]_out"], 12; atol = 1e-8)
demands = map(scenarios["scenarios"]) do s
return [si["primal"]["d"] for si in s]
end
for i in 3:2:10
@test demands[1] == demands[i]
@test demands[2] == demands[i+1]
end
return
end
function test_write_to_file_Max()
base_model = _create_model(false)
set_optimizer(base_model, HiGHS.Optimizer)
SDDP.train(base_model; iteration_limit = 50, print_level = 0)
model = _create_model(false)
SDDP.write_to_file(
model,
"experimental.sof.json";
validation_scenarios = 10,
)
set_optimizer(model, HiGHS.Optimizer)
SDDP.train(model; iteration_limit = 50, print_level = 0)
new_model, validation_scenarios =
SDDP.read_from_file("experimental.sof.json")
set_optimizer(new_model, HiGHS.Optimizer)
SDDP.train(new_model; iteration_limit = 50, print_level = 0)
@test isapprox(
SDDP.calculate_bound(base_model),
SDDP.calculate_bound(model);
atol = 1e-6,
)
@test isapprox(
SDDP.calculate_bound(base_model),
SDDP.calculate_bound(new_model);
atol = 1e-6,
)
scenarios = SDDP.evaluate(new_model, validation_scenarios)
@test length(scenarios["problem_sha256_checksum"]) == 64
@test length(scenarios["scenarios"]) == 10
@test length(scenarios["scenarios"][1]) == 3
node_1_1 = scenarios["scenarios"][1][1]
@test isapprox(node_1_1["objective"], -9.6; atol = 1e-8)
@test node_1_1["primal"]["d"] == 2
@test isapprox(node_1_1["primal"]["x[1]_out"], 1; atol = 1e-8)
@test isapprox(node_1_1["primal"]["x[2]_out"], 12; atol = 1e-8)
end
function test_write_kwarg()
model = _create_model(true)
SDDP.write_to_file(
model,
"experimental.sof.json";
validation_scenarios = 0,
name = "Experimental",
description = "Experimental model",
author = "Oscar Dowson",
date = "1234-56-78",
)
data = JSON.parsefile("experimental.sof.json"; use_mmap = false)
@test data["description"] == "Experimental model"
@test data["author"] == "Oscar Dowson"
@test data["date"] == "1234-56-78"
return
end
function test_error_existing_cuts()
model = _create_model(true)
set_optimizer(model, HiGHS.Optimizer)
SDDP.train(model; iteration_limit = 1, print_level = 0)
err = ErrorException(
"StochOptFormat does not support writing after a call to " *
"`SDDP.train`.",
)
@test_throws err Base.write(IOBuffer(), model)
return
end
function test_error_belief_states()
model = _create_model(true; belief = true)
err = ErrorException("StochOptFormat does not support belief states.")
@test_throws err Base.write(IOBuffer(), model)
return
end
function test_error_objective_states()
model = _create_model(true; objective_state = true)
err = ErrorException("StochOptFormat does not support objective states.")
@test_throws err Base.write(IOBuffer(), model)
return
end
function test_slptestset()
model, validation_scenarios =
SDDP.read_from_file(joinpath(@__DIR__, "electric.sof.json"))
set_optimizer(model, HiGHS.Optimizer)
SDDP.train(model; iteration_limit = 30, print_level = 0)
@test isapprox(SDDP.calculate_bound(model), 381.8533; atol = 1e-3)
scenarios = SDDP.evaluate(model, validation_scenarios)
@test length(scenarios["problem_sha256_checksum"]) == 64
@test length(scenarios["scenarios"]) == 3
return
end
end # module
TestExperimental.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5259 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestMSPFormat
using SDDP
using Test
import HiGHS
import SDDP: MSPFormat
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_get_constant()
@test MSPFormat._get_constant(Any[1.0]) == 1.0
@test MSPFormat._get_constant(Any[2.4]) == 2.4
@test MSPFormat._get_constant(Any["inf"]) == Inf
@test MSPFormat._get_constant(Any["-inf"]) == -Inf
state = Dict{String,Any}("inflow" => 12.0)
@test MSPFormat._get_constant(Any[1.0], state) == 1.0
@test MSPFormat._get_constant(Any[2.4], state) == 2.4
@test MSPFormat._get_constant(Any["inf"], state) == Inf
@test MSPFormat._get_constant(Any["-inf"], state) == -Inf
terms = Any["inflow"]
@test MSPFormat._get_constant(terms) === terms
@test MSPFormat._get_constant(terms, state) == 12.0
terms = [Dict("ADD" => "inflow"), Dict("ADD" => 0.0)]
@test MSPFormat._get_constant(terms) === terms
@test MSPFormat._get_constant(terms, state) === 12.0
terms = Any[
Dict("ADD" => "inflow"),
Dict("ADD" => 200.0),
Dict("ADD" => Any[0.0]),
]
@test MSPFormat._get_constant(terms) === terms
@test MSPFormat._get_constant(terms, state) === 212.0
terms = Any[
Dict("ADD" => "inflow"),
Dict("ADD" => 200.0),
Dict("ADD" => Any[1.0]),
]
@test MSPFormat._get_constant(terms) === terms
@test MSPFormat._get_constant(terms, state) === 213.0
terms = Any[Dict("ADD" => "inflow"), Dict("MUL" => -1.0)]
@test MSPFormat._get_constant(terms) === terms
@test MSPFormat._get_constant(terms, state) === -12.0
terms =
Any[Dict("ADD" => "inflow"), Dict("MUL" => -1.0), Dict("MUL" => 0.5)]
@test MSPFormat._get_constant(terms) === terms
@test MSPFormat._get_constant(terms, state) === -6.0
@test MSPFormat._get_constant("bad_inflow", state) === 0.0
return
end
function test_set_type()
@test MSPFormat._set_type(1.0, "EQ") == JuMP.MOI.EqualTo(1.0)
@test MSPFormat._set_type(1.2, "EQ") == JuMP.MOI.EqualTo(1.2)
@test MSPFormat._set_type(Any[], "EQ") == JuMP.MOI.EqualTo(0.0)
@test MSPFormat._set_type(1.0, "LEQ") == JuMP.MOI.LessThan(1.0)
@test MSPFormat._set_type(1.2, "LEQ") == JuMP.MOI.LessThan(1.2)
@test MSPFormat._set_type(Any[], "LEQ") == JuMP.MOI.LessThan(0.0)
@test MSPFormat._set_type(1.0, "GEQ") == JuMP.MOI.GreaterThan(1.0)
@test MSPFormat._set_type(1.2, "GEQ") == JuMP.MOI.GreaterThan(1.2)
@test MSPFormat._set_type(Any[], "GEQ") == JuMP.MOI.GreaterThan(0.0)
return
end
function test_SimpleHydroThermal()
problem = joinpath(@__DIR__, "hydro_thermal")
model = MSPFormat.read_from_file(problem)
JuMP.set_optimizer(model, HiGHS.Optimizer)
SDDP.train(model; iteration_limit = 10, print_level = 0)
@test ≈(SDDP.calculate_bound(model), 8333.3333, atol = 1e-4)
return
end
function test_SimpleHydroThermal_round_trip()
problem = joinpath(@__DIR__, "hydro_thermal")
src = MSPFormat.read_from_file(problem)
SDDP.write_to_file(src, "$problem.sof.json")
model, validation = SDDP.read_from_file("$problem.sof.json")
@test validation === nothing
JuMP.set_optimizer(model, HiGHS.Optimizer)
SDDP.train(model; iteration_limit = 10, print_level = 0)
@test ≈(SDDP.calculate_bound(model), 8333.3333, atol = 1e-4)
rm("$problem.sof.json")
return
end
function test_electric_non_stagewise()
model_stagewise = MSPFormat.read_from_file(
joinpath(@__DIR__, "electric.problem.json"),
joinpath(@__DIR__, "electric.lattice.json"),
)
@test length(model_stagewise.nodes) == 2
@test model_stagewise["0"].children == [SDDP.Noise("1", 1.0)]
@test length(model_stagewise["0"].noise_terms) == 1
@test model_stagewise["1"].children == SDDP.Noise{String}[]
@test length(model_stagewise["1"].noise_terms) == 3
model_tree = MSPFormat.read_from_file(
joinpath(@__DIR__, "electric.problem.json"),
joinpath(@__DIR__, "electric-tree.lattice.json"),
)
@test length(model_tree.nodes) == 5
@test model_tree["0"].children == SDDP.Noise.(["2", "3"], [0.3, 0.7])
@test model_tree["1"].children == SDDP.Noise.(["3", "4"], [0.4, 0.6])
@test model_tree["2"].children == SDDP.Noise{String}[]
@test model_tree["3"].children == SDDP.Noise{String}[]
@test model_tree["4"].children == SDDP.Noise{String}[]
for i in 0:4
@test length(model_tree["$i"].noise_terms) == 1
end
return
end
function test_electric()
problem = joinpath(@__DIR__, "electric")
model = MSPFormat.read_from_file(problem)
JuMP.set_optimizer(model, HiGHS.Optimizer)
SDDP.train(model; iteration_limit = 40, print_level = 0)
@test ≈(SDDP.calculate_bound(model), 381.8533, atol = 1e-4)
return
end
end # module
TestMSPFormat.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 11037 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestAlgorithm
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_to_nodal_forms()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
bellman_function = SDDP.BellmanFunction(; lower_bound = 0.0),
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_lower_bound(x.out, ω)
end
end
SDDP.train(
model;
iteration_limit = 1,
print_level = 0,
risk_measure = SDDP.Expectation(),
)
@test SDDP.termination_status(model) == :iteration_limit
SDDP.train(
model;
iteration_limit = 1,
print_level = 0,
risk_measure = Dict(1 => SDDP.Expectation(), 2 => SDDP.WorstCase()),
)
@test SDDP.termination_status(model) == :iteration_limit
SDDP.train(
model;
iteration_limit = 1,
print_level = 0,
risk_measure = (idx) ->
idx == 1 ? SDDP.Expectation() : SDDP.WorstCase(),
)
@test SDDP.termination_status(model) == :iteration_limit
end
function test_solve()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
bellman_function = SDDP.BellmanFunction(; lower_bound = 0.0),
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_lower_bound(x.out, ω)
end
end
SDDP.train(model; iteration_limit = 4, print_level = 0)
@test SDDP.termination_status(model) == :iteration_limit
simulations = SDDP.simulate(model, 11, [:x])
@test length(simulations) == 11
@test all(length.(simulations) .== 2)
simulation = simulations[1][1]
@test length(keys(simulation)) == 7
@test sort(collect(keys(simulation))) == [
:belief,
:bellman_term,
:node_index,
:noise_term,
:objective_state,
:stage_objective,
:x,
]
@test typeof(simulation[:x]) == SDDP.State{Float64}
return
end
function test_simulate()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x[i = 1:2] >= i, SDDP.State, initial_value = 2i)
@stageobjective(sp, x[1].out + x[2].out)
end
simulations = SDDP.simulate(model, 1, [:x])
@test simulations[1][1][:x] == [SDDP.State(2.0, 1.0), SDDP.State(4.0, 2.0)]
return
end
function test_simulate_incoming_state()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x[i = 1:2] >= i, SDDP.State, initial_value = 2i)
@constraint(sp, [i = 1:2], x[i].out == x[i].in)
@stageobjective(sp, x[1].out + x[2].out)
end
simulations = SDDP.simulate(
model,
1,
[:x];
incoming_state = Dict("x[1]" => 3.0, "x[2]" => 3.0),
)
@test simulations[1][1][:x] == [SDDP.State(3.0, 3.0), SDDP.State(3.0, 3.0)]
simulations = SDDP.simulate(model, 1, [:x])
@test simulations[1][1][:x] == [SDDP.State(2.0, 2.0), SDDP.State(4.0, 4.0)]
return
end
function test_simulate_missing()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x[i = 1:2] >= i, SDDP.State, initial_value = 2i)
if t == 1
@variable(sp, y >= 0)
end
@stageobjective(sp, x[1].out + x[2].out)
end
@test_throws(
ErrorException,
SDDP.simulate(model, 1, [:y]; parallel_scheme = SDDP.Serial()),
)
sims = SDDP.simulate(model, 1, [:y]; skip_undefined_variables = true)
@test sims[1][1][:y] == 0.0
@test isnan(sims[1][2][:y])
return
end
function test_infeasible_model()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(node, x.out <= -1)
@stageobjective(node, x.out)
end
ex = ErrorException(
"""
Unable to retrieve solution from node 1.
Termination status : INFEASIBLE
Primal status : NO_SOLUTION
Dual status : NO_SOLUTION.
The current subproblem was written to `subproblem_1.mof.json`.
There are two common causes of this error:
1) you have a mistake in your formulation, or you violated
the assumption of relatively complete recourse
2) the solver encountered numerical issues
See https://odow.github.io/SDDP.jl/stable/tutorial/warnings/ for more information.""",
)
@test_throws(
ex,
SDDP.train(
model;
iteration_limit = 1,
print_level = 0,
parallel_scheme = SDDP.Serial(),
),
)
@test isfile("subproblem_1.mof.json")
rm("subproblem_1.mof.json")
return
end
function test_infeasible_direct_model()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
direct_mode = true,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(node, x.out <= -1)
@stageobjective(node, x.out)
end
ex = ErrorException(
"""
Unable to retrieve solution from node 1.
Termination status : INFEASIBLE
Primal status : NO_SOLUTION
Dual status : NO_SOLUTION.
The current subproblem was written to `subproblem_1.mof.json`.
There are two common causes of this error:
1) you have a mistake in your formulation, or you violated
the assumption of relatively complete recourse
2) the solver encountered numerical issues
See https://odow.github.io/SDDP.jl/stable/tutorial/warnings/ for more information.""",
)
@test_throws(
ex,
SDDP.train(
model;
iteration_limit = 1,
print_level = 0,
parallel_scheme = SDDP.Serial(),
),
)
@test isfile("subproblem_1.mof.json")
rm("subproblem_1.mof.json")
return
end
function test_refine_at_similar_nodes()
model = SDDP.MarkovianPolicyGraph(;
transition_matrices = [[0.5 0.5], [0.2 0.8; 0.8 0.2]],
optimizer = HiGHS.Optimizer,
lower_bound = 0.0,
) do sp, index
stage, markov_state = index
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(sp, x.out >= stage)
@stageobjective(sp, (stage + markov_state) * x.out)
end
SDDP.train(
model;
iteration_limit = 1,
refine_at_similar_nodes = false,
print_level = 0,
)
mi1 = length(model[(1, 1)].bellman_function.global_theta.cuts)
mi2 = length(model[(1, 2)].bellman_function.global_theta.cuts)
@test mi1 + mi2 == length(model.most_recent_training_results.log)
model = SDDP.MarkovianPolicyGraph(;
transition_matrices = [[0.5 0.5], [0.2 0.8; 0.8 0.2]],
optimizer = HiGHS.Optimizer,
lower_bound = 0.0,
) do sp, index
stage, markov_state = index
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(sp, x.out >= stage)
@stageobjective(sp, (stage + markov_state) * x.out)
end
SDDP.train(
model;
iteration_limit = 1,
refine_at_similar_nodes = true,
print_level = 0,
)
@test length(model[(1, 1)].bellman_function.global_theta.cuts) ==
length(model[(1, 2)].bellman_function.global_theta.cuts) ==
length(model.most_recent_training_results.log)
return
end
function test_optimize_hook()
model = SDDP.LinearPolicyGraph(;
stages = 2,
optimizer = HiGHS.Optimizer,
lower_bound = 0.0,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0)
@stageobjective(sp, x.out)
end
pre_optimize_called = 0
post_optimize_called = 0
node = model[1]
SDDP.pre_optimize_hook(
node,
) do model, node, state, noise, scenario_path, duality_handler
pre_optimize_called = 1
return pre_optimize_called
end
SDDP.post_optimize_hook(node) do ret
post_optimize_called = ret + 2
return
end
SDDP.solve_subproblem(
model,
node,
Dict(:x => 0.0),
nothing,
Tuple{Int,Any}[(1, nothing)];
duality_handler = nothing,
)
@test pre_optimize_called == 1
@test post_optimize_called == 3
return
end
function test_write_log_to_csv()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, [stage], [1.0]) do ω
return JuMP.set_lower_bound(x.out, ω)
end
end
@test_throws ErrorException SDDP.write_log_to_csv(model, "sddp.csv")
SDDP.train(model; iteration_limit = 2, print_level = 0)
SDDP.write_log_to_csv(model, "sddp.csv")
log = read("sddp.csv", String)
saved_log = """
iteration, simulation, bound, time
"""
for i in 1:length(model.most_recent_training_results.log)
saved_log *= "$i, 3.0, 3.0, 3.0\n"
end
@test replace(log, r"[0-9\.]+\n" => "") ==
replace(saved_log, r"[0-9\.]+\n" => "")
rm("sddp.csv")
return
end
function test_print_log()
log = SDDP.Log(12, 1.1, -0.5, 123.4, 123, 1, "L", false)
@test sprint(SDDP.print_iteration, log) ==
" 12L -5.000000e-01 1.100000e+00 1.234000e+02 1 123\n"
log = SDDP.Log(1, 1.1, -0.5, 1.0, 1, 1, "L", true)
@test sprint(SDDP.print_iteration, log) ==
"† 1L -5.000000e-01 1.100000e+00 1.000000e+00 1 1\n"
return
end
function test_log_frequency_argument_error()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
end
@test_throws ArgumentError SDDP.train(model; log_frequency = 0)
return
end
end # module
TestAlgorithm.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2266 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors, Lea Kapelevich.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestBinaryExpansion
using SDDP: binexpand, bincontract
using Test
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_Binary_Expansion()
int_len = round(Int, log(typemax(Int)) / log(2))
@test_throws Exception binexpand(0)
@test binexpand(1, 1) == [1]
@test binexpand(2, 2) == [0, 1]
@test binexpand(3, 3) == [1, 1]
@test binexpand(4, 4) == [0, 0, 1]
@test binexpand(5, 5) == [1, 0, 1]
@test binexpand(6, 6) == [0, 1, 1]
@test binexpand(7, 7) == [1, 1, 1]
@test_throws Exception binexpand(8, 7)
@test binexpand(typemax(Int), typemax(Int)) == ones(Int, int_len)
@test binexpand(0.5, 0.5) == binexpand(5, 5)
@test binexpand(0.54, 0.54) == binexpand(5, 5)
@test binexpand(0.56, 0.56, 0.1) == binexpand(6, 6)
@test binexpand(0.5, 0.5, 0.01) == binexpand(50, 50)
@test binexpand(0.54, 0.54, 0.01) == binexpand(54, 54)
@test binexpand(0.56, 0.56, 0.01) == binexpand(56, 56)
@test 0 == bincontract([0])
@test 1 == bincontract([1])
@test 0 == bincontract([0, 0])
@test 1 == bincontract([1, 0])
@test 2 == bincontract([0, 1])
@test 3 == bincontract([1, 1])
@test 2 == bincontract([0, 1, 0])
@test 3 == bincontract([1, 1, 0])
@test 4 == bincontract([0, 0, 1])
@test 5 == bincontract([1, 0, 1])
@test 6 == bincontract([0, 1, 1])
@test 7 == bincontract([1, 1, 1])
@test typemax(Int) == bincontract(ones(Int, int_len))
@test bincontract([0], 0.1) ≈ 0.0
@test bincontract([1], 0.1) ≈ 0.1
@test bincontract([0, 1], 0.1) ≈ 0.2
@test bincontract([1, 1], 0.1) ≈ 0.3
@test bincontract([0, 1, 0], 0.1) ≈ 0.2
@test bincontract([1, 1, 0], 0.1) ≈ 0.3
@test bincontract([1, 0, 1], 0.1) ≈ 0.5
end
end # module
TestBinaryExpansion.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 8302 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestDeterministicEquivalent
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_acyclic_linear()
graph = SDDP.LinearGraph(2)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
@test SDDP.is_cyclic(model) == false
det = SDDP.deterministic_equivalent(model)
@test typeof(det) == JuMP.Model
@test objective_sense(det) == MOI.MIN_SENSE
return
end
function test_cyclic_linear()
graph = SDDP.LinearGraph(2)
SDDP.add_edge(graph, 2 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
@test SDDP.is_cyclic(model) == true
@test_throws(
ErrorException(
"Unable to formulate deterministic equivalent: " *
"Cyclic policy graph detected!",
),
SDDP.deterministic_equivalent(model)
)
return
end
function test_cyclic_single_node()
graph = SDDP.Graph(
:root,
[:node],
[(:root => :node, 1.0), (:node => :node, 0.9)],
)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
@test SDDP.is_cyclic(model) == true
@test_throws(
ErrorException(
"Unable to formulate deterministic equivalent: " *
"Cyclic policy graph detected!",
),
SDDP.deterministic_equivalent(model)
)
return
end
function test_acyclic_Markovian()
model = SDDP.MarkovianPolicyGraph(;
transition_matrices = [[0.5 0.5], [0.2 0.8; 0.8 0.2]],
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
@test SDDP.is_cyclic(model) == false
@test typeof(SDDP.deterministic_equivalent(model)) == JuMP.Model
return
end
function test_cyclic_Markovian()
graph = SDDP.MarkovianGraph([[0.5 0.5], [0.2 0.8; 0.8 0.2]])
SDDP.add_edge(graph, (2, 1) => (1, 1), 0.9)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
@test SDDP.is_cyclic(model) == true
@test_throws(
ErrorException(
"Unable to formulate deterministic equivalent: " *
"Cyclic policy graph detected!",
),
SDDP.deterministic_equivalent(model)
)
return
end
function test_time_limit()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
@test_throws(
ErrorException(
"Unable to formulate deterministic equivalent: Time limit exceeded!",
),
# We use a negative time limit to force error.
SDDP.deterministic_equivalent(model; time_limit = -10.0)
)
return
end
function test_objective_states()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
SDDP.add_objective_state(
sp;
initial_value = 0.0,
lower_bound = 0.0,
upper_bound = 10.0,
lipschitz = 10.0,
) do p, ω
return p + ω
end
SDDP.parameterize(sp, [1, 2]) do ω
p = SDDP.objective_state(sp)
@stageobjective(sp, p * x.out)
end
end
@test_throws(
ErrorException(
"Unable to formulate deterministic equivalent: Objective states detected!",
),
SDDP.deterministic_equivalent(model)
)
return
end
function test_belief_states()
graph = SDDP.MarkovianGraph([[0.5 0.5], [0.2 0.8; 0.8 0.2]])
SDDP.add_ambiguity_set(graph, [(1, 1), (1, 2)])
SDDP.add_ambiguity_set(graph, [(2, 1), (2, 2)])
model = SDDP.PolicyGraph(
graph;
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
@test_throws(
ErrorException(
"Unable to formulate deterministic equivalent: Belief states detected!",
),
SDDP.deterministic_equivalent(model)
)
end
function test_existing_policy()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, x.out)
end
SDDP.train(model; iteration_limit = 2, print_level = 0)
@test_throws(
ErrorException(
"Unable to formulate deterministic equivalent: Model has been used " *
"for training. Can only form deterministic equivalent on a fresh model.",
),
SDDP.deterministic_equivalent(model)
)
return
end
function test_constant_objective()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@stageobjective(sp, 1.0)
end
d = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)
set_silent(d)
optimize!(d)
@test objective_value(d) == 2.0
return
end
function test_constraint_with_no_terms()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(sp, x.out <= x.out)
@stageobjective(sp, 1.0)
end
d = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)
set_silent(d)
optimize!(d)
@test objective_value(d) == 2.0
return
end
function test_quadratic_expr()
model = SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(sp, x.in^2 <= x.out)
@stageobjective(sp, x.out)
end
d = SDDP.deterministic_equivalent(model)
@test in(
(GenericQuadExpr{Float64,VariableRef}, MOI.LessThan{Float64}),
list_of_constraint_types(d),
)
return
end
function test_quadratic_expr_no_quad_terms()
model = SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(sp, x.in^2 <= x.out + x.in^2)
@stageobjective(sp, x.out)
end
d = SDDP.deterministic_equivalent(model)
@test in(
(GenericQuadExpr{Float64,VariableRef}, MOI.LessThan{Float64}),
list_of_constraint_types(d),
)
return
end
function test_vector_valued_functions()
model = SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
@constraint(sp, [x.in, x.out] in MOI.SOS1([1.0, 2.0]))
@stageobjective(sp, x.out)
end
d = SDDP.deterministic_equivalent(model)
@test (Vector{VariableRef}, MOI.SOS1{Float64}) in
list_of_constraint_types(d)
return
end
end # module
TestDeterministicEquivalent.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2992 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestModelingAids
using SDDP
using Test
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_find_min()
@test SDDP.find_min([1.0, 2.0, 3.0], 2.1) == (abs(2.0 - 2.1), 2)
@test SDDP.find_min([1.0, 2.0, 3.0], 0.0) == (1.0, 1)
@test SDDP.find_min([1.0, 2.0, 3.0], 5.0) == (2.0, 3)
return
end
function test__allocate_support_budget()
@inferred SDDP._allocate_support_budget(() -> rand(10), 20, 100)
states = SDDP._allocate_support_budget(() -> rand(10), 20, 100)
@test isa(states, Vector{Int})
@test sum(states) == 20
@test all(states .> 0)
@inferred SDDP._allocate_support_budget(() -> [1, 2, 3 + rand()], 17, 31)
states = SDDP._allocate_support_budget(() -> [1, 2, 3 + rand()], 17, 31)
@test sum(states) == 17
@test all(states .> 0)
@inferred SDDP._allocate_support_budget(() -> [1.0, 2.0, 3.0], 5, 10)
states = SDDP._allocate_support_budget(() -> [1.0, 2.0, 3.0], 5, 10)
@test states == [1, 1, 1]
@test all(states .> 0)
states = [1, 3, 5]
new_states =
SDDP._allocate_support_budget(() -> [1.0, 2.0, 3.0], states, 19)
@test states == new_states
@test SDDP._allocate_support_budget(() -> rand(3), 2, 10) == [1, 1, 1]
return
end
function test__lattice_approximation()
support, probability =
SDDP._lattice_approximation(() -> rand(5), [1, 2, 3, 4, 5], 100)
for (t, s) in enumerate(support)
@test length(s) == t
@test all(x -> 0 <= x <= 1, s)
@test !any(isnan, probability[t])
@test all(isapprox.(sum(probability[t]; dims = 2), 1.0))
end
return
end
function test_MarkovianGraph()
g = SDDP.MarkovianGraph(() -> rand(5); budget = 10, scenarios = 100)
@test g.root_node == (0, 0.0)
@test length(g.nodes) == 11
for (k, node) in g.nodes
if length(node) > 0
@test sum(arc[2] for arc in node) ≈ 1.0
else
@test length(node) == 0
end
end
return
end
function test_duplicate_nodes()
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
num_nodes = Int[]
for _ in 1:100
g = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
push!(num_nodes, length(g.nodes))
end
@test minimum(num_nodes) < 9
@test maximum(num_nodes) == 9
return
end
end # module
TestModelingAids.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 862 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
using Random
using Test
function util_test_directory(dir, exclude = String[])
for (root, _, files) in walkdir(dir)
for file in files
if endswith(file, ".jl") && !(file in exclude)
@testset "$(file)" begin
@info file
Random.seed!(12345)
include(joinpath(root, file))
end
end
end
end
return
end
@testset "SDDP.jl" begin
util_test_directory(".", ["runtests.jl"])
util_test_directory(joinpath(dirname(@__DIR__), "docs", "src", "examples"))
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 21899 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestUserInterface
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_LinearGraph()
graph = SDDP.LinearGraph(5)
@test graph.root_node == 0
for stage in 0:4
@test haskey(graph.nodes, stage)
@test graph.nodes[stage] == [(stage + 1, 1.0)]
end
@test haskey(graph.nodes, 5)
@test graph.nodes[5] == Tuple{Int,Float64}[]
graph = SDDP.LinearGraph(3)
@test sprint(show, graph) ==
"Root\n" *
" 0\n" *
"Nodes\n" *
" 1\n" *
" 2\n" *
" 3\n" *
"Arcs\n" *
" 0 => 1 w.p. 1.0\n" *
" 1 => 2 w.p. 1.0\n" *
" 2 => 3 w.p. 1.0"
@test length(graph.belief_partition) == 0
return
end
function test_Markovian_Error()
# Not root transition matrix.
@test_throws Exception SDDP.MarkovianGraph([[0.5 0.5; 0.5 0.5]])
# Negative probability.
@test_throws Exception SDDP.MarkovianGraph([[-0.5 0.75]])
# Proability sums to greater than 1.
@test_throws Exception SDDP.MarkovianGraph([[0.8 0.8]])
# Mis-matched dimensions.
@test_throws Exception SDDP.MarkovianGraph([
[0.1 0.2 0.7],
[0.5 0.5; 0.5 0.5],
])
return
end
function test_Markovian_keyword_vs_list()
graph_1 = SDDP.MarkovianGraph(;
stages = 2,
transition_matrix = [0.4 0.6; 0.25 0.75],
root_node_transition = [0.7, 0.3],
)
graph_2 = SDDP.MarkovianGraph([[0.7 0.3], [0.4 0.6; 0.25 0.75]])
@test graph_1.root_node == graph_2.root_node
@test graph_1.nodes == graph_2.nodes
@test length(graph_1.belief_partition) == 0
@test length(graph_2.belief_partition) == 0
return
end
function test_construct_Graph()
graph = SDDP.Graph(:root)
@test graph.root_node == :root
@test collect(keys(graph.nodes)) == [:root]
return
end
function test_add_node()
graph = SDDP.Graph(:root)
SDDP.add_node(graph, :x)
@test collect(keys(graph.nodes)) == [:root, :x]
return
end
function test_add_duplicate_node()
graph = SDDP.Graph(:root)
SDDP.add_node(graph, :x)
@test_throws Exception SDDP.add_node(graph, :x)
return
end
function test_add_edge()
graph = SDDP.Graph(:root)
SDDP.add_node(graph, :x)
SDDP.add_edge(graph, :root => :x, 1.0)
@test haskey(graph.nodes, :root)
@test graph.nodes[:root] == [(:x, 1.0)]
return
end
function test_add_node_wrong_type()
graph = SDDP.Graph(:root)
@test_throws Exception SDDP.add_node(graph, 1)
end
function test_add_edge_missing_node()
graph = SDDP.Graph(:root)
SDDP.add_node(graph, :x)
@test_throws Exception SDDP.add_edge(graph, :x => :y, 1.0)
@test_throws Exception SDDP.add_edge(graph, :y => :x, 1.0)
return
end
function test_add_edge_to_root()
graph = SDDP.Graph(:root)
SDDP.add_node(graph, :x)
@test_throws Exception SDDP.add_edge(graph, :x => :root, 1.0)
return
end
function test_belief_partition()
graph = SDDP.Graph(:root)
SDDP.add_node(graph, :x)
SDDP.add_node(graph, :y)
@test_throws ErrorException(
"You must provide on Lipschitz contsant for every element in " *
"the ambiguity set.",
) SDDP.add_ambiguity_set(graph, [:x], Float64[])
@test_throws ErrorException(
"Cannot provide negative Lipschitz constant: [-1.0]",
) SDDP.add_ambiguity_set(graph, [:x], -1.0)
SDDP.add_ambiguity_set(graph, [:x])
SDDP.add_ambiguity_set(graph, [:y])
@test graph.belief_partition == [[:x], [:y]]
graph = SDDP.Graph(
:root,
[:x, :y],
[(:root => :x, 0.5), (:root => :y, 0.5)];
belief_partition = [[:x, :y]],
belief_lipschitz = [[1.0, 1.0]],
)
@test graph.belief_partition == [[:x, :y]]
@test sprint(show, graph) == join(
[
"Root",
" root",
"Nodes",
" x",
" y",
"Arcs",
" root => x w.p. 0.5",
" root => y w.p. 0.5",
"Partitions",
" {x, y}",
],
"\n",
)
graph =
SDDP.Graph(:root, [:x, :y], [(:root => :x, 0.5), (:root => :y, 0.5)])
@test length(graph.belief_partition) == 0
return
end
function test_PolicyGraph_LinearGraph()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = false,
) do node, stage
return
end
@test_throws Exception SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = true,
) do node, stage
return
end
nodes = Set{Int}()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
return push!(nodes, stage)
end
@test nodes == Set([1, 2])
@test sprint(show, model) ==
"A policy graph with 2 nodes.\n Node indices: 1, 2\n"
return
end
function test_PolicyGraph_MarkovianGraph()
graph = SDDP.MarkovianGraph([
ones(Float64, (1, 1)),
[0.5 0.5],
[0.5 0.5; 0.3 0.4],
[0.5 0.5; 0.3 0.4],
[0.5 0.5; 0.3 0.4],
])
nodes = Set{Tuple{Int,Int}}()
SDDP.PolicyGraph(graph; lower_bound = 0.0, direct_mode = false) do _, stage
return push!(nodes, stage)
end
@test nodes == Set([
(1, 1),
(2, 1),
(2, 2),
(3, 1),
(3, 2),
(4, 1),
(4, 2),
(5, 1),
(5, 2),
])
return
end
function test_MarkovianPolicyGraph()
nodes = Set{Tuple{Int,Int}}()
SDDP.MarkovianPolicyGraph(;
transition_matrices = [
ones(Float64, (1, 1)),
[0.5 0.5],
[0.5 0.5; 0.3 0.4],
[0.5 0.5; 0.3 0.4],
[0.5 0.5; 0.3 0.4],
],
lower_bound = 0.0,
direct_mode = false,
) do _, stage
return push!(nodes, stage)
end
@test nodes == Set([
(1, 1),
(2, 1),
(2, 2),
(3, 1),
(3, 2),
(4, 1),
(4, 2),
(5, 1),
(5, 2),
])
return
end
function test_PolicyGraph_Graph()
graph = SDDP.Graph(
:root,
[:stage_1, :stage_2, :stage_3],
[
(:root => :stage_1, 1.0),
(:stage_1 => :stage_2, 1.0),
(:stage_2 => :stage_3, 1.0),
(:stage_3 => :stage_1, 0.9),
],
)
nodes = Set{Symbol}()
SDDP.PolicyGraph(graph; lower_bound = 0.0, direct_mode = false) do _, node
return push!(nodes, node)
end
@test nodes == Set([:stage_1, :stage_2, :stage_3])
return
end
function test_State()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0)
end
for stage in 1:2
node = model[stage]
@test haskey(node.states, :x)
@test length(keys(node.states)) == 1
@test node.states[:x] == node.subproblem[:x]
end
return
end
function test_parameterize()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, [1, 2, 3], [0.4, 0.5, 0.1]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
node = model[2]
@test length(node.noise_terms) == 3
@test JuMP.upper_bound(node.subproblem[:x]) == 1
node.parameterize(node.noise_terms[2].term)
@test JuMP.upper_bound(node.subproblem[:x]) == 2
node.parameterize(3)
@test JuMP.upper_bound(node.subproblem[:x]) == 3
end
function test_set_stage_objective_Min()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, 0 <= x <= 1)
@stageobjective(node, 2x)
end
node = model[2]
@test node.stage_objective == 2 * node.subproblem[:x]
@test model.objective_sense == SDDP.MOI.MIN_SENSE
@test_throws Exception SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Min,
upper_bound = 0.0,
direct_mode = false,
) do node, stage
return
end
return
end
function test_set_stage_objective_Max()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
upper_bound = 0.0,
sense = :Max,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, 0 <= x <= 1)
@stageobjective(node, 2x)
end
node = model[2]
@test node.stage_objective == 2 * node.subproblem[:x]
@test model.objective_sense == SDDP.MOI.MAX_SENSE
@test_throws Exception SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
return
end
return
end
function test_0_stages()
@test_throws(
ErrorException(
"You must create a LinearPolicyGraph with `stages >= 1`.",
),
SDDP.LinearPolicyGraph(; stages = 0) do sp, t
@variable(sp, x, SDDP.State, initial_value = 0)
end,
)
return
end
function test_missing_bounds()
@test_throws(
ErrorException(
"You must specify a finite lower bound on the objective value" *
" using the `lower_bound = value` keyword argument.",
),
SDDP.LinearPolicyGraph(; stages = 1, sense = :Min) do sp, t
@variable(sp, x, SDDP.State, initial_value = 0)
end,
)
@test_throws(
ErrorException(
"You must specify a finite upper bound on the objective value" *
" using the `upper_bound = value` keyword argument.",
),
SDDP.LinearPolicyGraph(; stages = 1, sense = :Max) do sp, t
@variable(sp, x, SDDP.State, initial_value = 0)
end,
)
return
end
function test_parameterize_duplicate()
exception = ErrorException("Duplicate calls to SDDP.parameterize detected.")
@test_throws exception SDDP.LinearPolicyGraph(;
stages = 2,
upper_bound = 0.0,
sense = :Max,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, [1, 2]) do ω
@stageobjective(node, ω * x)
end
SDDP.parameterize(node, [3, 4]) do ω
@stageobjective(node, ω * x)
end
end
return
end
function test_no_initial_value()
try
SDDP.LinearPolicyGraph(;
stages = 2,
upper_bound = 0.0,
sense = :Max,
direct_mode = false,
) do node, stage
@variable(node, x, SDDP.State)
@stageobjective(node, x.out)
end
error("This error should not be reached!")
catch err
@test occursin("When creating a state variable", err.msg)
end
return
end
function test_termination_status()
model = SDDP.LinearPolicyGraph(;
stages = 2,
upper_bound = 0.0,
sense = :Max,
direct_mode = false,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
end
@test SDDP.termination_status(model) == :model_not_solved
return
end
function test_numerical_stability_report()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = -1e10,
direct_mode = false,
) do subproblem, t
@variable(subproblem, x >= -1e7, SDDP.State, initial_value = 1e-5)
@variable(subproblem, 1 <= y <= 5, Int) # Note: this is just to test range fallback
@constraint(subproblem, 1e9 * x.out >= 1e-6 * x.in + 1e-8)
@stageobjective(subproblem, 1e9 * x.out)
end
report = sprint(SDDP.numerical_stability_report, model)
@test occursin("WARNING", report)
report_2 =
sprint(io -> SDDP.numerical_stability_report(io, model; by_node = true))
@test occursin("numerical stability report for node: 1", report_2)
@test occursin("numerical stability report for node: 2", report_2)
return
end
function test_objective_state()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x, SDDP.State, initial_value = 0)
SDDP.parameterize(subproblem, [1, 2]) do ω
price = SDDP.objective_state(subproblem)
@stageobjective(subproblem, price * x.out)
end
end
@test_throws(
ErrorException("No objective state defined."),
SDDP.simulate(model, 1; parallel_scheme = SDDP.Serial()),
)
@test_throws(
ErrorException("add_objective_state can only be called once."),
SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x, SDDP.State, initial_value = 0)
SDDP.add_objective_state(
subproblem;
initial_value = 1.5,
lower_bound = 0.75,
upper_bound = 2.25,
lipschitz = 100.0,
) do y, ω
return y + ω
end
SDDP.add_objective_state(
subproblem;
initial_value = 1.5,
lower_bound = 0.75,
upper_bound = 2.25,
lipschitz = 100.0,
) do y, ω
return y + ω
end
SDDP.parameterize(subproblem, [1, 2]) do ω
price = SDDP.objective_state(subproblem)
@stageobjective(subproblem, price * x.out)
end
end,
)
return
end
function test_belief_updater()
graph = SDDP.LinearGraph(2)
SDDP.add_edge(graph, 2 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
direct_mode = false,
) do subproblem, node
beliefs = [[0.2, 0.8], [0.7, 0.3]]
SDDP.parameterize(subproblem, [:A, :B], beliefs[node]) do ω
return nothing
end
end
belief_updater = SDDP.construct_belief_update(model, [Set([1]), Set([2])])
belief = Dict(1 => 1.0, 2 => 0.0)
belief′ = copy(belief)
@test belief_updater(belief′, belief, 2, :A) == Dict(1 => 0.0, 2 => 1.0)
@test belief′ == Dict(1 => 0.0, 2 => 1.0)
belief = Dict(1 => 0.0, 2 => 1.0)
@test belief_updater(belief′, belief, 1, :B) == Dict(1 => 1.0, 2 => 0.0)
@test belief′ == Dict(1 => 1.0, 2 => 0.0)
belief_updater = SDDP.construct_belief_update(model, [Set([1, 2])])
belief = Dict(1 => 1.0, 2 => 0.0)
@test belief_updater(belief′, belief, 1, :A) == Dict(1 => 0.0, 2 => 1.0)
belief = Dict(1 => 0.0, 2 => 1.0)
@test belief_updater(belief′, belief, 1, :B) == Dict(1 => 1.0, 2 => 0.0)
function is_approx(x::Dict{T,Float64}, y::Dict{T,Float64}) where {T}
if length(x) != length(y)
return false
end
for (key, value) in x
if !(value ≈ y[key])
return false
end
end
return true
end
belief = Dict(1 => 0.6, 2 => 0.4)
@test is_approx(
belief_updater(belief′, belief, 1, :A),
Dict(1 => 6 / 41, 2 => 35 / 41),
)
return
end
function test_Ensure_root_printed_first()
g = SDDP.Graph(:root, [:a], [(:root => :a, 1.0)])
@test sprint(show, g) == """
Root
root
Nodes
a
Arcs
root => a w.p. 1.0"""
return
end
function test_Tuple_Int_Float64_nodes_sorted()
@test SDDP.sort_nodes([(1, 1.0), (2, 0.1), (1, 0.5)]) ==
[(1, 0.5), (1, 1.0), (2, 0.1)]
g = SDDP.Graph(
(0, 0.0),
[(1, 1.0), (2, 0.1), (1, 0.5)],
[((0, 0.0) => (2, 0.1), 1.0)],
)
@test sprint(show, g) == """
Root
(0, 0.0)
Nodes
(1, 0.5)
(1, 1.0)
(2, 0.1)
Arcs
(0, 0.0) => (2, 0.1) w.p. 1.0"""
return
end
function test_String_nodes_unsorted()
@test SDDP.sort_nodes(["c", "b"]) == ["c", "b"]
g = SDDP.Graph("a", ["c", "b"], [("a" => "b", 1.0), ("b" => "c", 1.0)])
@test sprint(show, g) == """
Root
a
Nodes
c
b
Arcs
a => b w.p. 1.0
b => c w.p. 1.0"""
return
end
function test_stageobjective_sanitization()
@test_throws(
ErrorException(
"Unable to set the stage-objective of type $(Vector{SDDP.State{JuMP.VariableRef}}). " *
"It must be a scalar function.",
),
SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
@variable(sp, x, SDDP.State, initial_value = 0)
@stageobjective(sp, [x, x])
end,
)
@test_throws(
ErrorException(
"Unable to set the stage-objective of type $(SDDP.State{JuMP.VariableRef}). " *
"It must be a scalar function.",
),
SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
@variable(sp, x, SDDP.State, initial_value = 0)
@stageobjective(sp, x)
end,
)
return
end
function test_initial_feasibility()
@test_throws(
ErrorException(
"Initial point $(prevfloat(0.0)) violates lower bound on state x",
),
SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = prevfloat(0.0))
end,
)
@test_throws(
ErrorException(
"Initial point $(nextfloat(0.0)) violates upper bound on state x",
),
SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, x <= 0, SDDP.State, initial_value = nextfloat(0.0))
end,
)
return
end
function test_objective_state_error_missing_upper_bound()
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Max,
upper_bound = 50.0,
optimizer = HiGHS.Optimizer,
) do sp, _
@variable(sp, x >= 0, SDDP.State, initial_value = 2)
@constraint(sp, x.out <= x.in)
SDDP.add_objective_state(
sp;
initial_value = (1.5,),
lower_bound = (0.75,),
lipschitz = (100.0,),
) do y, ω
return y + ω
end
SDDP.parameterize(sp, [-0.25, -0.125, 0.125, 0.25]) do ω
price = SDDP.objective_state(sp)
@stageobjective(sp, price * (x.in - x.out))
end
end
state = model[1].objective_state
@test state.lower_bound == (0.75,)
@test state.upper_bound == (Inf,)
@test state.initial_value == (1.5,)
@test JuMP.lower_bound(state.μ[1]) == -100.0
@test JuMP.upper_bound(state.μ[1]) == 100.0
return
end
function test_objective_state_error_missing_lower_bound()
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Max,
upper_bound = 50.0,
optimizer = HiGHS.Optimizer,
) do sp, _
@variable(sp, x >= 0, SDDP.State, initial_value = 2)
@constraint(sp, x.out <= x.in)
SDDP.add_objective_state(
sp;
initial_value = (1,),
lipschitz = (100,),
) do y, ω
return y + ω
end
SDDP.parameterize(sp, [-0.25, -0.125, 0.125, 0.25]) do ω
price = SDDP.objective_state(sp)
@stageobjective(sp, price * (x.in - x.out))
end
end
state = model[1].objective_state
@test state.lower_bound == (-Inf,)
@test state.upper_bound == (Inf,)
@test state.initial_value == (1.0,)
@test JuMP.lower_bound.(state.μ) == (-100.0,)
@test JuMP.upper_bound.(state.μ) == (100.0,)
return
end
function test_objective_state_error_dimension_two_missing_lower_bound()
err = ErrorException(
"Invalid dimension in the input to `add_objective_state`. Got: " *
"`(100,)`, but expected it to have length `2`.",
)
@test_throws err SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Max,
upper_bound = 50.0,
optimizer = HiGHS.Optimizer,
) do sp, _
@variable(sp, x >= 0, SDDP.State, initial_value = 2)
@constraint(sp, x.out <= x.in)
SDDP.add_objective_state(
sp;
initial_value = (1, 0),
lipschitz = (100,),
upper_bound = (1, Inf),
) do y, ω
return y + ω
end
SDDP.parameterize(sp, [-0.25, -0.125, 0.125, 0.25]) do ω
price = SDDP.objective_state(sp)
@stageobjective(sp, price * (x.in - x.out))
end
end
return
end
function test_no_stage_objective()
model = SDDP.LinearPolicyGraph(;
stages = 2,
optimizer = HiGHS.Optimizer,
lower_bound = 0.0,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 1.0)
@constraint(sp, x.in == x.out)
end
@test model[1].stage_objective == 0.0
@test model[2].stage_objective == 0.0
SDDP.train(model; iteration_limit = 3, print_level = 0)
@test SDDP.calculate_bound(model) ≈ 0.0 atol = 1e-8
return
end
end # module
TestUserInterface.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 4372 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestBackwardPassSamplingSchemes
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_CompleteSampler()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
terms = SDDP.sample_backward_noise_terms(SDDP.CompleteSampler(), model[1])
@test terms == model[1].noise_terms
return
end
function test_MonteCarloSampler_1()
model = SDDP.LinearPolicyGraph(;
stages = 1,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, [1, 3], [0.9, 0.1]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
term_count = 0
for i in 1:100
terms = SDDP.sample_backward_noise_terms(
SDDP.MonteCarloSampler(1),
model[1],
)
@test terms[1].probability == 1.0
if terms[1].term == model[1].noise_terms[1].term
term_count += 1
else
term_count -= 1
end
end
@test term_count > 20
return
end
function test_MonteCarloSampler_100()
model = SDDP.LinearPolicyGraph(;
stages = 1,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, [1, 3], [0.9, 0.1]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
terms =
SDDP.sample_backward_noise_terms(SDDP.MonteCarloSampler(100), model[1])
term_count = 0
for term in terms
@test term.probability == 0.01
if term.term == model[1].noise_terms[1].term
term_count += 1
else
term_count -= 1
end
end
@test term_count > 20
return
end
mutable struct WithStateSampler <: SDDP.AbstractBackwardSamplingScheme
number_of_samples::Int
end
function test_WithStateSampler()
function sample_backward_noise_terms_with_state(
sampler::WithStateSampler,
node::SDDP.Node,
state::Dict{Symbol,Float64},
)
if state[:x] / node.index == 1.0
return [
SDDP.Noise((ϵ = 3.0,), 1 / sampler.number_of_samples) for
i in 1:sampler.number_of_samples
]
elseif state[:x] / node.index == 3.0
return [
SDDP.Noise((ϵ = 1.0,), 1 / sampler.number_of_samples) for
i in 1:sampler.number_of_samples
]
end
end
model = SDDP.LinearPolicyGraph(;
stages = 5,
lower_bound = 0.0,
direct_mode = false,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@variable(node, ϵ)
SDDP.parameterize(node, stage * [1, 3], [0.9, 0.1]) do ω
return JuMP.fix(ϵ, ω)
end
@constraint(node, x.out == ϵ)
end
forward_trajectory = SDDP.forward_pass(
model,
SDDP.Options(model, Dict(:x => 1.0)),
SDDP.DefaultForwardPass(),
)
for node_index in 1:length(forward_trajectory.scenario_path)
state = forward_trajectory.sampled_states[node_index]
terms = sample_backward_noise_terms_with_state(
WithStateSampler(100),
model[node_index],
state,
)
for term in terms
@test term.probability == 0.01
if state[:x] / node_index == 1.0
@test term.term.ϵ == 3.0
elseif state[:x] / node_index == 3.0
@test term.term.ϵ == 1.0
end
end
end
return
end
end # module
TestBackwardPassSamplingSchemes.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 11844 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestBellmanFunctions
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function _create_model(graph)
return SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, _
@variable(
subproblem,
5.0 <= reservoir <= 15.0,
SDDP.State,
initial_value = 10.0
)
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
spill >= 0
inflow
demand
end)
@constraints(
subproblem,
begin
reservoir.out ==
reservoir.in - hydro_generation - spill + inflow
hydro_generation + thermal_generation == demand
end
)
@stageobjective(subproblem, 10 * spill + thermal_generation)
SDDP.parameterize(
subproblem,
[
(inflow = 0.0, demand = 7.5),
(inflow = 5.0, demand = 5),
(inflow = 10.0, demand = 2.5),
],
) do ω
JuMP.fix(inflow, ω.inflow)
return JuMP.fix(demand, ω.demand)
end
end
end
function test_Read_write_cuts_to_file()
graphs = [
(
"Symbol",
SDDP.Graph(
:root_node,
[:week],
[(:root_node => :week, 1.0), (:week => :week, 0.9)],
),
),
("Int", SDDP.Graph(0, [1], [(0 => 1, 1.0), (1 => 1, 0.9)])),
(
"NTuple",
SDDP.Graph(
(0, 1),
[(1, 1)],
[((0, 1) => (1, 1), 1.0), ((1, 1) => (1, 1), 0.9)],
),
),
]
for (T, graph) in graphs
model = _create_model(graph)
@test SDDP.calculate_bound(model) ≈ 9.17 atol = 0.1
SDDP.train(model; iteration_limit = 50, print_level = 0)
@test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1
SDDP.write_cuts_to_file(model, "$(T).cuts.json")
model_2 = _create_model(graph)
@test SDDP.calculate_bound(model_2) ≈ 9.17 atol = 0.1
SDDP.read_cuts_from_file(model_2, "$(T).cuts.json")
@test SDDP.calculate_bound(model_2) ≈ 119.167 atol = 0.1
rm("$(T).cuts.json")
end
return
end
function test_read_write_cuts_to_file_String()
graph = SDDP.Graph(
"root_node",
["week"],
[("root_node" => "week", 1.0), ("week" => "week", 0.9)],
)
model = _create_model(graph)
@test SDDP.calculate_bound(model) ≈ 9.17 atol = 0.1
SDDP.train(model; iteration_limit = 50, cut_type = SDDP.MULTI_CUT)
@test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1
SDDP.write_cuts_to_file(model, "model.cuts.json")
model_2 = _create_model(graph)
@test SDDP.calculate_bound(model_2) ≈ 9.17 atol = 0.1
@test_throws Exception SDDP.read_cuts_from_file(model_2, "model.cuts.json")
SDDP.read_cuts_from_file(
model_2,
"model.cuts.json";
node_name_parser = (::Type{String}, x::String) -> x,
)
@test SDDP.calculate_bound(model_2) ≈ 119.167 atol = 0.1
rm("model.cuts.json")
return
end
function test_read_write_cuts_to_file_ValueFunction()
graph = SDDP.Graph(
"root_node",
["week"],
[("root_node" => "week", 1.0), ("week" => "week", 0.9)],
)
model = _create_model(graph)
SDDP.train(model; iteration_limit = 50, print_level = 0)
@test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1
V = SDDP.ValueFunction(model; node = "week")
value_f = SDDP.evaluate(V; reservoir = 10)
SDDP.write_cuts_to_file(model, "model.cuts.json")
model_2 = _create_model(graph)
@test SDDP.calculate_bound(model_2) ≈ 9.17 atol = 0.1
SDDP.read_cuts_from_file(
model_2,
"model.cuts.json";
node_name_parser = (::Type{String}, x::String) -> x,
)
V2 = SDDP.ValueFunction(model_2; node = "week")
@test value_f == SDDP.evaluate(V2; reservoir = 10)
rm("model.cuts.json")
return
end
function test_read_read_cuts_from_file_nothing()
graph = SDDP.Graph(
"root_node",
["week"],
[("root_node" => "week", 1.0), ("week" => "week", 0.9)],
)
model = _create_model(graph)
SDDP.train(model; iteration_limit = 50, print_level = 0)
@test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1
V = SDDP.ValueFunction(model; node = "week")
value_f = SDDP.evaluate(V; reservoir = 10)
SDDP.write_cuts_to_file(
model,
"model.cuts.json";
node_name_parser = s -> "myname_$s",
)
model_2 = _create_model(graph)
@test SDDP.calculate_bound(model_2) ≈ 9.17 atol = 0.1
function parser(::Type{String}, x::String)
@test startswith(x, "myname_")
return replace(x, "myname_" => "")
end
SDDP.read_cuts_from_file(
model_2,
"model.cuts.json";
node_name_parser = parser,
)
N = num_constraints(
model_2["week"].subproblem;
count_variable_in_set_constraints = true,
)
SDDP.read_cuts_from_file(
model_2,
"model.cuts.json";
node_name_parser = (::Any, s) -> nothing,
)
N2 = num_constraints(
model_2["week"].subproblem;
count_variable_in_set_constraints = true,
)
@test N == N2
rm("model.cuts.json")
return
end
function test_add_all_cuts_SINGLE_CUT()
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 5 <= x <= 15, SDDP.State, initial_value = 10)
@variable(sp, g >= 0)
@variable(sp, h >= 0)
@variable(sp, u >= 0)
@constraint(sp, inflow, x.out == x.in - h - u)
@constraint(sp, demand, h + g == 0)
@stageobjective(sp, 10 * u + g)
SDDP.parameterize(sp, [(0, 7.5), (5, 5.0), (10, 2.5)]) do ω
set_normalized_rhs(inflow, ω[1])
set_normalized_rhs(demand, ω[2])
return
end
end
SDDP.train(model; iteration_limit = 10)
for (t, node) in model.nodes
@test num_constraints(
node.subproblem,
AffExpr,
MOI.GreaterThan{Float64},
) < 10
end
SDDP.add_all_cuts(model)
n_iter = length(model.most_recent_training_results.log)
for (t, node) in model.nodes
n = num_constraints(node.subproblem, AffExpr, MOI.GreaterThan{Float64})
@test t == 3 || n == n_iter
end
return
end
function test_add_all_cuts_MULTI_CUT()
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 5 <= x <= 15, SDDP.State, initial_value = 10)
@variable(sp, g >= 0)
@variable(sp, h >= 0)
@variable(sp, u >= 0)
@constraint(sp, inflow, x.out == x.in - h - u)
@constraint(sp, demand, h + g == 0)
@stageobjective(sp, 10 * u + g)
SDDP.parameterize(sp, [(0, 7.5), (5, 5.0), (10, 2.5)]) do ω
set_normalized_rhs(inflow, ω[1])
set_normalized_rhs(demand, ω[2])
return
end
end
SDDP.train(model; iteration_limit = 10, cut_type = SDDP.MULTI_CUT)
for (t, node) in model.nodes
@test num_constraints(
node.subproblem,
AffExpr,
MOI.GreaterThan{Float64},
) < 31
end
SDDP.add_all_cuts(model)
n_iter = length(model.most_recent_training_results.log)
for (t, node) in model.nodes
n = num_constraints(node.subproblem, AffExpr, MOI.GreaterThan{Float64})
@test t == 3 || n == (3 * n_iter + 1)
end
return
end
function test_belief_state_cut_selection()
demand_values = [1.0, 2.0]
demand_prob = Dict(:Ah => [0.2, 0.8], :Bh => [0.8, 0.2])
graph = SDDP.Graph(
:root_node,
[:Ad, :Ah, :Bd, :Bh],
[
(:root_node => :Ad, 0.5),
(:root_node => :Bd, 0.5),
(:Ad => :Ah, 1.0),
(:Bd => :Bh, 1.0),
],
)
SDDP.add_ambiguity_set(graph, [:Ad, :Bd], 1e2)
SDDP.add_ambiguity_set(graph, [:Ah, :Bh], 1e2)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
@variables(
subproblem,
begin
0 <= inventory <= 2, (SDDP.State, initial_value = 0.0)
buy >= 0
demand
end
)
@constraint(subproblem, demand == inventory.in - inventory.out + buy)
if node == :Ad || node == :Bd || node == :D
JuMP.fix(demand, 0)
@stageobjective(subproblem, buy)
else
SDDP.parameterize(subproblem, demand_values, demand_prob[node]) do ω
return JuMP.fix(demand, ω)
end
@stageobjective(subproblem, 2 * buy + inventory.out)
end
end
SDDP.train(
model;
iteration_limit = 20,
cut_deletion_minimum = 30,
print_level = 0,
)
n_cuts = count(model[:Ad].bellman_function.global_theta.cuts) do cut
return cut.constraint_ref !== nothing
end
@test n_cuts == length(model.most_recent_training_results.log)
SDDP.train(
model;
iteration_limit = 1,
add_to_existing_cuts = true,
print_level = 0,
)
n_cuts_2 = count(model[:Ad].bellman_function.global_theta.cuts) do cut
return cut.constraint_ref !== nothing
end
@test n_cuts_2 < n_cuts
return
end
function test_biobjective_cut_selection()
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, _
@variable(subproblem, 0 <= v <= 200, SDDP.State, initial_value = 50)
@variables(subproblem, begin
0 <= g[i = 1:2] <= 100
0 <= u <= 150
s >= 0
shortage_cost >= 0
end)
@expressions(subproblem, begin
objective_1, g[1] + 10 * g[2]
objective_2, shortage_cost
end)
@constraints(subproblem, begin
inflow_constraint, v.out == v.in - u - s
g[1] + g[2] + u == 150
shortage_cost >= 40 - v.out
shortage_cost >= 60 - 2 * v.out
shortage_cost >= 80 - 4 * v.out
end)
## You must call this for a biobjective problem!
SDDP.initialize_biobjective_subproblem(subproblem)
SDDP.parameterize(subproblem, 0.0:5:50.0) do ω
JuMP.set_normalized_rhs(inflow_constraint, ω)
## You must call `set_biobjective_functions` from within
## `SDDP.parameterize`.
return SDDP.set_biobjective_functions(
subproblem,
objective_1,
objective_2,
)
end
end
SDDP.train_biobjective(
model;
solution_limit = 10,
iteration_limit = 10,
print_level = 0,
)
n_cuts = count(model[1].bellman_function.global_theta.cuts) do cut
return cut.constraint_ref !== nothing
end
@test n_cuts < 100
return
end
end # module
TestBellmanFunctions.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 15699 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors, Lea Kapelevich.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestDualityHandlers
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function SDDP.prepare_backward_pass(
model::SDDP.PolicyGraph,
duality_handler::SDDP.AbstractDualityHandler,
options::SDDP.Options,
)
undo = Function[]
for (_, node) in model.nodes
push!(undo, SDDP.prepare_backward_pass(node, duality_handler, options))
end
function undo_relax()
for f in undo
f()
end
return
end
return undo_relax
end
# Single-stage model helps set up a node and subproblem to test dual
# calculations
function easy_single_stage(duality_handler)
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Min,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, stage
@variable(sp, x[1:2], Bin, SDDP.State, initial_value = 0)
@variable(sp, y)
if stage == 1
@stageobjective(sp, 0)
else
@constraint(sp, y >= x[1].in + x[2].in)
fix(x[1].in, 0)
fix(x[2].in, 0)
@stageobjective(sp, y)
end
end
node = model.nodes[2]
options =
SDDP.Options(model, Dict(:x => 1.0); duality_handler = duality_handler)
_ = SDDP.prepare_backward_pass(model, duality_handler, options)
SDDP._initialize_solver(node; throw_error = false)
optimize!(node.subproblem)
obj, dual_vars = SDDP.get_dual_solution(node, duality_handler)
if duality_handler == SDDP.ContinuousConicDuality()
@test all(values(dual_vars) .<= ones(2))
else
@test all(values(dual_vars) .>= -ones(2))
end
return
end
# 'Exclusive or' function, no obvious choice of "tightest" dual
function xor_single_stage(duality_handler)
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Min,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, stage
@variable(sp, x[1:2], Bin, SDDP.State, initial_value = 1)
@variable(sp, y)
if stage == 1
@stageobjective(sp, 0)
else
@constraints(sp, begin
y >= x[1].in - x[2].in
y >= x[2].in - x[1].in
y <= x[1].in + x[2].in
y <= 2 - x[1].in - x[2].in
end)
fix(x[1].in, 0)
fix(x[2].in, 0)
@stageobjective(sp, y)
end
end
node = model.nodes[2]
options =
SDDP.Options(model, Dict(:x => 1.0); duality_handler = duality_handler)
_ = SDDP.prepare_backward_pass(model, duality_handler, options)
SDDP._initialize_solver(node; throw_error = false)
optimize!(node.subproblem)
obj, dual_vars = SDDP.get_dual_solution(node, duality_handler)
if duality_handler == SDDP.ContinuousConicDuality()
@test sum(values(dual_vars)) >= -1
else
@test sum(values(dual_vars)) <= 1
end
return
end
function test_easy_continuous()
easy_single_stage(SDDP.ContinuousConicDuality())
return
end
function test_easy_LagrangianDuality()
easy_single_stage(SDDP.LagrangianDuality())
return
end
function test_xor_continuous()
xor_single_stage(SDDP.ContinuousConicDuality())
return
end
function test_xor_LagrangianDuality()
xor_single_stage(SDDP.LagrangianDuality())
return
end
function test_prepare_backward_pass()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 2.0)
@variable(sp, b1, Bin)
@variable(sp, 0.2 <= b2, Bin)
@variable(sp, 0.5 <= b3 <= 1.2, Bin)
@variable(sp, i1, Int)
@variable(sp, 6.2 >= i2, Int)
@variable(sp, -8 <= i3 <= 2, Int)
@stageobjective(sp, b1 + b2 + b2 + i3 + i1)
end
options = SDDP.Options(
model,
Dict(:x => 1.0);
duality_handler = SDDP.ContinuousConicDuality(),
)
for node in [model[1], model[2]]
@test JuMP.is_binary(node.subproblem[:b1])
@test !JuMP.has_lower_bound(node.subproblem[:b1])
@test !JuMP.has_upper_bound(node.subproblem[:b1])
@test JuMP.is_binary(node.subproblem[:b2])
@test JuMP.lower_bound(node.subproblem[:b2]) == 0.2
@test !JuMP.has_upper_bound(node.subproblem[:b2])
@test JuMP.is_binary(node.subproblem[:b3])
@test JuMP.lower_bound(node.subproblem[:b3]) == 0.5
@test JuMP.upper_bound(node.subproblem[:b3]) == 1.2
@test JuMP.is_integer(node.subproblem[:i1])
@test !JuMP.has_lower_bound(node.subproblem[:i1])
@test !JuMP.has_upper_bound(node.subproblem[:i1])
@test JuMP.is_integer(node.subproblem[:i2])
@test JuMP.upper_bound(node.subproblem[:i2]) == 6.2
@test !JuMP.has_lower_bound(node.subproblem[:i2])
@test JuMP.is_integer(node.subproblem[:i3])
@test JuMP.lower_bound(node.subproblem[:i3]) == -8
@test JuMP.upper_bound(node.subproblem[:i3]) == 2
end
undo_relax = SDDP.prepare_backward_pass(
model,
SDDP.ContinuousConicDuality(),
options,
)
for node in [model[1], model[2]]
@test !JuMP.is_binary(node.subproblem[:b1])
@test JuMP.lower_bound(node.subproblem[:b1]) == 0.0
@test JuMP.upper_bound(node.subproblem[:b1]) == 1.0
@test !JuMP.is_binary(node.subproblem[:b2])
@test JuMP.lower_bound(node.subproblem[:b2]) == 0.2
@test JuMP.upper_bound(node.subproblem[:b2]) == 1.0
@test !JuMP.is_binary(node.subproblem[:b3])
@test JuMP.lower_bound(node.subproblem[:b3]) == 0.5
@test JuMP.upper_bound(node.subproblem[:b3]) == 1.0
@test !JuMP.is_integer(node.subproblem[:i1])
@test !JuMP.has_lower_bound(node.subproblem[:i1])
@test !JuMP.has_upper_bound(node.subproblem[:i1])
@test !JuMP.is_integer(node.subproblem[:i2])
@test JuMP.upper_bound(node.subproblem[:i2]) == 6.2
@test !JuMP.has_lower_bound(node.subproblem[:i2])
@test !JuMP.is_integer(node.subproblem[:i3])
@test JuMP.lower_bound(node.subproblem[:i3]) == -8
@test JuMP.upper_bound(node.subproblem[:i3]) == 2
end
undo_relax()
for node in [model[1], model[2]]
@test JuMP.is_binary(node.subproblem[:b1])
@test !JuMP.has_lower_bound(node.subproblem[:b1])
@test !JuMP.has_upper_bound(node.subproblem[:b1])
@test JuMP.is_binary(node.subproblem[:b2])
@test JuMP.lower_bound(node.subproblem[:b2]) == 0.2
@test !JuMP.has_upper_bound(node.subproblem[:b2])
@test JuMP.is_binary(node.subproblem[:b3])
@test JuMP.lower_bound(node.subproblem[:b3]) == 0.5
@test JuMP.upper_bound(node.subproblem[:b3]) == 1.2
@test JuMP.is_integer(node.subproblem[:i1])
@test !JuMP.has_lower_bound(node.subproblem[:i1])
@test !JuMP.has_upper_bound(node.subproblem[:i1])
@test JuMP.is_integer(node.subproblem[:i2])
@test JuMP.upper_bound(node.subproblem[:i2]) == 6.2
@test !JuMP.has_lower_bound(node.subproblem[:i2])
@test JuMP.is_integer(node.subproblem[:i3])
@test JuMP.lower_bound(node.subproblem[:i3]) == -8
@test JuMP.upper_bound(node.subproblem[:i3]) == 2
end
return
end
function test_kelleys_min()
model = SDDP.LinearPolicyGraph(;
stages = 10,
sense = :Min,
lower_bound = -1000,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 1.1)
@stageobjective(sp, (-5 + t) * x.out)
@constraint(sp, x.out == x.in)
end
set_optimizer(model, HiGHS.Optimizer)
for t in 1:10
SDDP.parameterize(model[t], nothing)
SDDP.set_incoming_state(model[t], Dict(:x => 1.1))
JuMP.optimize!(model[t].subproblem)
lobj, lagrange =
SDDP.get_dual_solution(model[t], SDDP.LagrangianDuality())
JuMP.optimize!(model[t].subproblem)
cobj, conic =
SDDP.get_dual_solution(model[t], SDDP.ContinuousConicDuality())
@test isapprox(lobj, cobj, atol = 1e-5)
csc, scd =
SDDP.get_dual_solution(model[t], SDDP.StrengthenedConicDuality())
@test csc == cobj
for (k, v) in lagrange
@test isapprox(v, conic[k], atol = 1e-5)
@test conic[k] == scd[k]
end
end
return
end
function test_kelleys_max()
model = SDDP.LinearPolicyGraph(;
stages = 10,
sense = :Max,
upper_bound = 1000,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 1.1)
@stageobjective(sp, (-5 + t) * x.out)
@constraint(sp, x.out == x.in)
end
set_optimizer(model, HiGHS.Optimizer)
for t in 1:10
SDDP.parameterize(model[t], nothing)
SDDP.set_incoming_state(model[t], Dict(:x => 1.1))
JuMP.optimize!(model[t].subproblem)
lobj, lagrange =
SDDP.get_dual_solution(model[t], SDDP.LagrangianDuality())
JuMP.optimize!(model[t].subproblem)
cobj, conic =
SDDP.get_dual_solution(model[t], SDDP.ContinuousConicDuality())
@test isapprox(lobj, cobj, atol = 1e-5)
csc, scd =
SDDP.get_dual_solution(model[t], SDDP.StrengthenedConicDuality())
@test csc == cobj
for (k, v) in lagrange
@test isapprox(v, conic[k], atol = 1e-5)
@test conic[k] == scd[k]
end
end
return
end
function test_kelleys_abs_function()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Min,
lower_bound = -10.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 1.0)
@constraint(sp, x.out >= 1.2(x.in - 1))
@constraint(sp, x.out >= 0.1(x.in - 1))
@constraint(sp, x.out >= -x.in)
@stageobjective(sp, x.out)
end
set_optimizer(model, HiGHS.Optimizer)
SDDP.parameterize(model[1], nothing)
SDDP.set_incoming_state(model[1], Dict(:x => 0.5))
JuMP.optimize!(model[1].subproblem)
lobj, lagrange = SDDP.get_dual_solution(model[1], SDDP.LagrangianDuality())
@test isapprox(lobj, -10.05, atol = 1e-5)
@test isapprox(lagrange[:x], 0.1, atol = 1e-5)
SDDP.set_incoming_state(model[1], Dict(:x => 1.5))
JuMP.optimize!(model[1].subproblem)
lobj, lagrange = SDDP.get_dual_solution(model[1], SDDP.LagrangianDuality())
@test isapprox(lobj, -9.4, atol = 1e-5)
@test isapprox(lagrange[:x], 1.2, atol = 1e-5)
return
end
function test_kelleys_abs_function_max()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 10.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 1.0)
@constraint(sp, x.out <= 1.2(x.in - 1))
@constraint(sp, x.out <= 0.1(x.in - 1))
@stageobjective(sp, x.out)
end
set_optimizer(model, HiGHS.Optimizer)
SDDP.parameterize(model[1], nothing)
SDDP.set_incoming_state(model[1], Dict(:x => 0.5))
JuMP.optimize!(model[1].subproblem)
lobj, lagrange = SDDP.get_dual_solution(model[1], SDDP.LagrangianDuality())
@test isapprox(lobj, 9.4, atol = 1e-5)
@test isapprox(lagrange[:x], 1.2, atol = 1e-5)
SDDP.set_incoming_state(model[1], Dict(:x => 1.5))
JuMP.optimize!(model[1].subproblem)
lobj, lagrange = SDDP.get_dual_solution(model[1], SDDP.LagrangianDuality())
@test isapprox(lobj, 10.05, atol = 1e-5)
@test isapprox(lagrange[:x], 0.1, atol = 1e-5)
return
end
"""
Test duality in a naturally integer problem
"""
function test_kelleys_ip_min()
model = SDDP.LinearPolicyGraph(;
stages = 10,
sense = :Min,
lower_bound = -1000,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x, Int, SDDP.State, initial_value = 1.0)
@stageobjective(sp, (-5 + t) * x.out)
@constraint(sp, x.out == x.in)
end
set_optimizer(model, HiGHS.Optimizer)
for t in 1:10
SDDP.parameterize(model[t], nothing)
SDDP.set_incoming_state(model[t], Dict(:x => 1.0))
JuMP.optimize!(model[t].subproblem)
lobj, lagrange =
SDDP.get_dual_solution(model[t], SDDP.LagrangianDuality())
csc, scd =
SDDP.get_dual_solution(model[t], SDDP.StrengthenedConicDuality())
@test isapprox(lobj, csc, atol = 1e-5)
for (k, v) in lagrange
@test isapprox(v, scd[k], atol = 1e-5)
end
end
return
end
function test_kelleys_ip_max()
model = SDDP.LinearPolicyGraph(;
stages = 10,
sense = :Max,
upper_bound = 1000,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x, Int, SDDP.State, initial_value = 2.0)
@stageobjective(sp, (-5 + t) * x.out)
@constraint(sp, x.out == x.in)
end
set_optimizer(model, HiGHS.Optimizer)
l = SDDP.LagrangianDuality()
for t in 1:10
SDDP.parameterize(model[t], nothing)
SDDP.set_incoming_state(model[t], Dict(:x => 2.0))
JuMP.optimize!(model[t].subproblem)
lobj, lagrange = SDDP.get_dual_solution(model[t], l)
csc, scd =
SDDP.get_dual_solution(model[t], SDDP.StrengthenedConicDuality())
@test isapprox(lobj, csc, atol = 1e-5)
for (k, v) in lagrange
@test isapprox(v, scd[k], atol = 1e-5)
end
end
return
end
function test_LagrangianDuality_warn()
@test_logs (:warn,) SDDP.LagrangianDuality(atol = 1e-6)
return
end
function test_BanditDuality_show()
@test sprint(show, SDDP.BanditDuality()) ==
"BanditDuality with arms:\n * SDDP.ContinuousConicDuality()\n * SDDP.StrengthenedConicDuality()"
return
end
function test_BanditDuality_eval()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = -100.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 0 <= x[1:2] <= 5, SDDP.State, initial_value = 0.0)
if t == 1
@stageobjective(sp, -1.5 * x[1].out - 4 * x[2].out)
else
@variable(sp, 0 <= y[1:4] <= 1, Bin)
@variable(sp, ω[1:2])
@stageobjective(sp, -16 * y[1] - 19 * y[2] - 23 * y[3] - 28 * y[4])
@constraint(
sp,
2 * y[1] + 3 * y[2] + 4 * y[3] + 5 * y[4] <= ω[1] - x[1].in
)
@constraint(
sp,
6 * y[1] + 1 * y[2] + 3 * y[3] + 2 * y[4] <= ω[2] - x[2].in
)
steps = range(5; stop = 15, length = 10)
SDDP.parameterize(sp, [[i, j] for i in steps for j in steps]) do φ
return JuMP.fix.(ω, φ)
end
end
end
handler = SDDP.BanditDuality()
SDDP.train(model; duality_handler = handler, iteration_limit = 100)
@test sum(
l.duality_key == " " for l in model.most_recent_training_results.log
) > 10
@test sum(
l.duality_key == "S" for l in model.most_recent_training_results.log
) > 0
return
end
end
TestDualityHandlers.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 9081 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestForwardPasses
using SDDP
using Test
import HiGHS
import Random
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_DefaultForwardPass()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
end
forward_trajectory = SDDP.forward_pass(
model,
SDDP.Options(model, Dict(:x => 1.0)),
SDDP.DefaultForwardPass(),
)
simulated_value = 0.0
for ((node_index, noise), state) in
zip(forward_trajectory.scenario_path, forward_trajectory.sampled_states)
@test state[:x] == noise
simulated_value += noise
end
@test simulated_value == forward_trajectory.cumulative_value
return
end
function test_RevisitingForwardPass()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
end
fp = SDDP.RevisitingForwardPass(2; sub_pass = SDDP.DefaultForwardPass())
@test length(fp.archive) == 0
for i in 1:5
pass = SDDP.forward_pass(
model,
SDDP.Options(model, Dict(:x => 1.0); forward_pass = fp),
fp,
)
if i <= 2
@test length(fp.archive) == i
elseif i == 3
@test length(fp.archive) == 2
@test pass.cumulative_value == fp.archive[1].cumulative_value
elseif i == 4
@test length(fp.archive) == 2
@test pass.cumulative_value == fp.archive[2].cumulative_value
elseif i == 5
@test length(fp.archive) == 3
end
end
return
end
function test_RiskAdjustedForwardPass()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
end
@test_throws ArgumentError SDDP.train(
model;
iteration_limit = 5,
forward_pass_resampling_probability = 0.0,
)
@test_throws ArgumentError SDDP.train(
model;
iteration_limit = 5,
forward_pass_resampling_probability = 1.0,
)
forward_pass = SDDP.RiskAdjustedForwardPass(;
forward_pass = SDDP.DefaultForwardPass(),
risk_measure = SDDP.WorstCase(),
resampling_probability = 0.9,
)
SDDP.train(
model;
print_level = 0,
iteration_limit = 20,
forward_pass = forward_pass,
)
@test length(forward_pass.archive) < 10
SDDP.train(
model;
iteration_limit = 10,
print_level = 0,
forward_pass_resampling_probability = 0.9,
)
@test SDDP.termination_status(model) == :iteration_limit
return
end
function test_DefaultForwardPass_cyclic()
graph = SDDP.LinearGraph(3)
SDDP.add_edge(graph, 3 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
end
pass = SDDP.DefaultForwardPass()
options = SDDP.Options(
model,
Dict(:x => 1.0);
sampling_scheme = SDDP.InSampleMonteCarlo(; terminate_on_cycle = true),
forward_pass = pass,
)
forward_trajectory = SDDP.forward_pass(model, options, pass)
@test length(forward_trajectory.scenario_path) == 4
@test length(forward_trajectory.sampled_states) == 4
@test options.starting_states[1] == [Dict(:x => 4.5)]
@test isempty(options.starting_states[2])
@test isempty(options.starting_states[3])
return
end
function test_DefaultForwardPass_cyclic_include_last_node()
graph = SDDP.LinearGraph(3)
SDDP.add_edge(graph, 3 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
end
pass = SDDP.DefaultForwardPass(; include_last_node = false)
options = SDDP.Options(
model,
Dict(:x => 1.0);
sampling_scheme = SDDP.InSampleMonteCarlo(; terminate_on_cycle = true),
forward_pass = pass,
)
forward_trajectory = SDDP.forward_pass(model, options, pass)
@test length(forward_trajectory.scenario_path) == 3
@test length(forward_trajectory.sampled_states) == 3
@test options.starting_states[1] == [Dict(:x => 4.5)]
@test isempty(options.starting_states[2])
@test isempty(options.starting_states[3])
return
end
function test_DefaultForwardPass_acyclic_include_last_node()
graph = SDDP.LinearGraph(3)
model = SDDP.PolicyGraph(
graph;
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
end
pass = SDDP.DefaultForwardPass(; include_last_node = false)
options = SDDP.Options(
model,
Dict(:x => 1.0);
sampling_scheme = SDDP.InSampleMonteCarlo(; terminate_on_cycle = true),
forward_pass = pass,
)
forward_trajectory = SDDP.forward_pass(model, options, pass)
@test length(forward_trajectory.scenario_path) == 3
@test length(forward_trajectory.sampled_states) == 3
@test isempty(options.starting_states[1])
@test isempty(options.starting_states[2])
@test isempty(options.starting_states[3])
return
end
function test_RegularizedForwardPass()
function main(capacity_cost, forward_pass, hint)
Random.seed!(1245)
graph = SDDP.LinearGraph(2)
SDDP.add_edge(graph, 2 => 2, 0.95)
model = SDDP.PolicyGraph(
graph;
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, node
@variable(sp, 0 <= x <= 400, SDDP.State, initial_value = hint)
@variable(sp, 0 <= y, SDDP.State, initial_value = 0)
if node == 1
@stageobjective(sp, capacity_cost * x.out)
@constraint(sp, y.out == y.in)
else
@variable(sp, 0 <= u_prod <= 200)
@variable(sp, u_overtime >= 0)
@stageobjective(sp, 100u_prod + 300u_overtime + 50y.out)
@constraint(sp, x.out == x.in)
@constraint(sp, y.out <= x.in)
@constraint(sp, c_bal, y.out == y.in + u_prod + u_overtime)
SDDP.parameterize(sp, [100, 300]) do ω
set_normalized_rhs(c_bal, -ω)
return
end
end
return
end
SDDP.train(
model;
print_level = 0,
forward_pass = forward_pass,
iteration_limit = 10,
parallel_scheme = SDDP.Serial(),
)
return SDDP.calculate_bound(model)
end
for (cost, hint) in [(0, 400), (200, 100), (400, 0)]
fp = SDDP.RegularizedForwardPass()
reg_bound = main(cost, fp, hint)
bound = main(cost, SDDP.DefaultForwardPass(), hint)
@test reg_bound >= bound - 1.0
end
# Test that initializing with a bad guess performs poorly
fp = SDDP.RegularizedForwardPass()
reg_bound = main(400, fp, 400)
bound = main(400, SDDP.DefaultForwardPass(), 0)
@test reg_bound < bound
return
end
end # module
TestForwardPasses.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3661 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestLocalImprovementSearch
using Test
import SDDP: LocalImprovementSearch
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_x_squared()
calls = 0
f, x = LocalImprovementSearch.minimize([0.0]) do x
f = 2.1 + (x[1] - 1.1)^2
f′ = [2 * (x[1] - 1.1)]
calls += 1
return f, f′
end
@info "squared = $(calls)"
@test isapprox(f, 2.1, atol = 1e-6)
@test isapprox(x, [1.1], atol = 1e-4)
return
end
function test_exp()
calls = 0
f, x = LocalImprovementSearch.minimize([1.0]) do x
calls += 1
if x[1] < 0.1 || x[1] > 20
return nothing
end
return exp(x[1]), [exp(x[1])]
end
@info "exp = $(calls)"
@test isapprox(f, exp(0.1), atol = 1e-2)
@test isapprox(x, [0.1], atol = 1e-2)
return
end
function test_piecewise()
calls = 0
f, x = LocalImprovementSearch.minimize([0.05]) do x
calls += 1
if x[1] < 0.0
return nothing
elseif 0.0 <= x[1] < 0.1
return -0.1 - 1 * (x[1] - 0.0), [-1.0]
elseif 0.1 <= x[1] < 0.4
return -0.2 - 0.8 * (x[1] - 0.1), [-0.8]
elseif 0.4 <= x[1] <= 1.0
return -0.44 + 0.1 * (x[1] - 0.4), [0.1]
else
@assert 1.0 <= x[1]
return nothing
end
end
@info "piecewise = $(calls)"
@test isapprox(f, -0.44, atol = 1e-3)
@test isapprox(x, [0.4], atol = 1e-3)
return
end
function test_x_squared_outer_approximation()
calls = 0
solver = LocalImprovementSearch.OuterApproximation(HiGHS.Optimizer)
f, x = LocalImprovementSearch.minimize(solver, [0.0], 0.0) do x
f = 2.1 + (x[1] - 1.1)^2
f′ = [2 * (x[1] - 1.1)]
calls += 1
return f, f′
end
@info "OA(squared) = $(calls)"
@test isapprox(f, 2.1, atol = 1e-6)
@test isapprox(x, [1.1], atol = 1e-4)
return
end
function test_exp_outer_approximation()
calls = 0
solver = LocalImprovementSearch.OuterApproximation(HiGHS.Optimizer)
f, x = LocalImprovementSearch.minimize(solver, [1.0], 0.0) do x
calls += 1
if x[1] < 0.1 || x[1] > 20
return nothing
end
return exp(x[1]), [exp(x[1])]
end
@info "OA(exp) = $(calls)"
@test isapprox(f, exp(0.1), atol = 1e-2)
@test isapprox(x, [0.1], atol = 1e-2)
return
end
function test_piecewise_outer_approximation()
calls = 0
solver = LocalImprovementSearch.OuterApproximation(HiGHS.Optimizer)
f, x = LocalImprovementSearch.minimize(solver, [0.05], -1.0) do x
calls += 1
if x[1] < 0.0
return nothing
elseif 0.0 <= x[1] < 0.1
return -0.1 - 1 * (x[1] - 0.0), [-1.0]
elseif 0.1 <= x[1] < 0.4
return -0.2 - 0.8 * (x[1] - 0.1), [-0.8]
elseif 0.4 <= x[1] <= 1.0
return -0.44 + 0.1 * (x[1] - 0.4), [0.1]
else
@assert 1.0 <= x[1]
return nothing
end
end
@info "OA(piecewise) = $(calls)"
@test isapprox(f, -0.44, atol = 1e-3)
@test isapprox(x, [0.4], atol = 1e-3)
return
end
end
TestLocalImprovementSearch.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5737 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
using Distributed
procs = Distributed.addprocs(4)
# !!! IMPORTANT !!!
#
# Workers **DON'T** inherit their parent's Pkg environment!
# Here's the relevant Julia issue: https://github.com/JuliaLang/julia/issues/28781
#
# This can cause reeeeeaally hard to track down bugs because
# a) workers may have different versions of packages on them
# b) you will run into a lot of complilation errors depending on the order of
# code loading.
#
# As hack, run the following script:
@everywhere begin
import Pkg
Pkg.activate(".")
end
@everywhere begin
using Test
using HiGHS
using SDDP
end
function test_Asynchronous()
a = SDDP.Asynchronous()
@test a.slave_ids == Distributed.workers()
@test length(a.slave_ids) > 1
b = SDDP.Asynchronous([1, 2])
@test b.slave_ids == [1, 2]
return
end
function test_Asynchronous_optimizer()
model = SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, _
@variable(sp, x, SDDP.State, initial_value = 0.0)
end
a = SDDP.Asynchronous(HiGHS.Optimizer)
a.init_callback(model)
@test solver_name(model[2].subproblem) == "HiGHS"
return
end
function test_slave_update()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Min,
lower_bound = 0.0,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
end
result = SDDP.IterationResult(
1,
0.0,
0.0,
false,
:not_converged,
Dict(
1 => Any[(
theta = 1.0,
pi = Dict(:x => 2.0),
x = Dict(:x => 3.0),
obj_y = nothing,
belief_y = nothing,
)],
2 => Any[],
),
false,
)
SDDP.slave_update(model, result)
cons = JuMP.all_constraints(
model[1].subproblem,
GenericAffExpr{Float64,VariableRef},
MOI.GreaterThan{Float64},
)
@test length(cons) == 1
obj = JuMP.constraint_object(cons[1])
@test obj.set == MOI.GreaterThan(-5.0)
@test length(obj.func.terms) == 2
result = SDDP.IterationResult(
1,
0.0,
0.0,
false,
:not_converged,
Dict(
1 => Any[
(
theta = 1.0,
pi = Dict(:x => 2.0),
x = Dict(:x => 3.0),
obj_y = nothing,
belief_y = nothing,
),
nothing,
],
2 => Any[],
),
false,
)
@test_throws ErrorException SDDP.slave_update(model, result)
return
end
function test_async_solve()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0.0)
@stageobjective(node, x.out)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_lower_bound(x.out, ω)
end
end
solver =
JuMP.optimizer_with_attributes(HiGHS.Optimizer, MOI.Silent() => true)
SDDP.train(
model;
stopping_rules = [SDDP.IterationLimit(20)],
parallel_scheme = SDDP.Asynchronous(solver; use_master = false),
)
@test SDDP.termination_status(model) == :iteration_limit
@test all(l -> l.pid != 1, model.most_recent_training_results.log)
@test SDDP.calculate_bound(model) == 6.0
SDDP.train(
model;
stopping_rules = [SDDP.IterationLimit(20)],
parallel_scheme = SDDP.Asynchronous(; use_master = true) do m
for (key, node) in m.nodes
JuMP.set_optimizer(node.subproblem, HiGHS.Optimizer)
JuMP.set_silent(node.subproblem)
end
end,
)
@test SDDP.termination_status(model) == :iteration_limit
@test any(l -> l.pid == 1, model.most_recent_training_results.log)
@test SDDP.calculate_bound(model) == 6.0
return
end
function test_simulate_parallel()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
sense = :Min,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x[i = 1:2] >= i, SDDP.State, initial_value = 2i)
@stageobjective(sp, x[1].out + x[2].out)
end
simulations = SDDP.simulate(
model,
20;
custom_recorders = Dict{Symbol,Function}(
:myid => (args...) -> Distributed.myid(),
),
parallel_scheme = SDDP.Asynchronous(; use_master = false),
)
@test all([s[1][:myid] != 1 for s in simulations])
return
end
function test_trap_error()
@test SDDP.trap_error(InvalidStateException("a", :a)) === nothing
@test SDDP.trap_error(InterruptException()) === nothing
ex = DomainError(-1.0)
@test_throws ex SDDP.trap_error(ex)
flag = true
ex = try
throw(InterruptException())
flag = false
catch ex
Distributed.RemoteException(CapturedException(ex, catch_backtrace()))
end
@test SDDP.trap_error(ex) === nothing
@test flag == true
return
end
test_Asynchronous()
test_slave_update()
test_async_solve()
test_simulate_parallel()
test_trap_error()
Distributed.rmprocs(procs)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 15118 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestRiskMeasures
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_Expectation()
@test sprint(show, SDDP.Expectation()) == "SDDP.Expectation()"
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.Expectation(),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4, 0.5],
[:a, :b, :c, :d, :e],
[5.0, 4.0, 6.0, 2.0, 1.0],
true,
)
@test risk_adjusted_probability == [0.1, 0.2, 0.3, 0.4, 0.5]
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.Expectation(),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.3, 0.1],
[:a, :b, :c, :d, :e],
[5.0, 4.0, 6.0, 2.0, 1.0],
false,
)
@test risk_adjusted_probability == [0.1, 0.2, 0.3, 0.3, 0.1]
return
end
function test_WorstCase()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.WorstCase(),
risk_adjusted_probability,
[0.1, 0.2, 0.0, 0.4, 0.5],
[:a, :b, :c, :d, :e],
[5.0, 4.0, 6.0, 2.0, 1.0],
true,
)
@test risk_adjusted_probability == [1.0, 0.0, 0.0, 0.0, 0.0]
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.WorstCase(),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4, 0.5],
[:a, :b, :c, :d, :e],
[5.0, 4.0, 6.0, 2.0, 1.0],
false,
)
@test risk_adjusted_probability == [0.0, 0.0, 0.0, 0.0, 1.0]
return
end
function test_Constructors()
a = SDDP.Expectation()
b = SDDP.AVaR(0.5)
c = SDDP.WorstCase()
d = 0.5a + 0.3b + 0.2c
@test d.measures[1] == (0.5, a)
@test d.measures[2] == (0.3, b)
@test d.measures[3] == (0.2, c)
aa = SDDP.EAVaR(; lambda = 0.5, beta = 0.25)
@test aa.measures[1] == (0.5, SDDP.Expectation())
@test aa.measures[2] == (0.5, SDDP.AVaR(0.25))
return
end
function test_AVaR()
@test_throws Exception SDDP.AVaR(-0.1)
@test_throws Exception SDDP.AVaR(1.1)
end
function test_AVaR_02()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
SDDP.AVaR(0.2),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[:a, :b, :c, :d],
[1.0, 2.0, 3.0, 4.0],
false,
)
@test risk_adjusted_probability == [0.5, 0.5, 0.0, 0.0]
return
end
function test_AVaR_0()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
SDDP.AVaR(0.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[:a, :b, :c, :d],
[1.0, 2.0, 3.0, 4.0],
false,
)
@test risk_adjusted_probability == [1.0, 0.0, 0.0, 0.0]
return
end
function test_AVaR_1()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
SDDP.AVaR(1.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[:a, :b, :c, :d],
[1.0, 2.0, 3.0, 4.0],
false,
)
@test risk_adjusted_probability == [0.1, 0.2, 0.3, 0.4]
return
end
function test_EAVaR()
@test sprint(show, SDDP.EAVaR(; lambda = 0.2, beta = 0.3)) ==
"A convex combination of 0.2 * SDDP.Expectation() + 0.8 * SDDP.AVaR(0.3)"
@test_throws Exception SDDP.EAVaR(lambda = 1.1)
@test_throws Exception SDDP.EAVaR(lambda = -0.1)
@test_throws Exception SDDP.EAVaR(beta = 1.1)
@test_throws Exception SDDP.EAVaR(beta = -0.1)
return
end
function test_EAVaR_max_25_2()
nominal_probability = [0.1, 0.2, 0.3, 0.4]
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
SDDP.EAVaR(; lambda = 0.25, beta = 0.2),
risk_adjusted_probability,
nominal_probability,
[:a, :b, :c, :d],
[1.0, 2.0, 3.0, 4.0],
false,
)
@test risk_adjusted_probability ≈
0.25 * nominal_probability + 0.75 * [1 / 2, 1 / 2, 0, 0]
return
end
function test_EAVaR_min_25_2()
nominal_probability = [0.1, 0.2, 0.3, 0.4]
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
SDDP.EAVaR(; lambda = 0.25, beta = 0.2),
risk_adjusted_probability,
nominal_probability,
[:a, :b, :c, :d],
[1.0, 2.0, 3.0, 4.0],
true,
)
@test risk_adjusted_probability ≈
0.25 * nominal_probability + 0.75 * [0, 0, 0, 1.0]
return
end
function test_EAVaR_max_50_0()
nominal_probability = [0.1, 0.2, 0.3, 0.4]
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
SDDP.EAVaR(; lambda = 0.5, beta = 0.0),
risk_adjusted_probability,
nominal_probability,
[:a, :b, :c, :d],
[1.0, 2.0, 3.0, 4.0],
false,
)
@test risk_adjusted_probability ≈
0.5 * nominal_probability + 0.5 * [1.0, 0, 0, 0]
return
end
function test_EAVaR_max_50_0_ii()
nominal_probability = [0.0, 0.2, 0.4, 0.4]
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
SDDP.EAVaR(; lambda = 0.5, beta = 0.0),
risk_adjusted_probability,
nominal_probability,
[:a, :b, :c, :d],
[1.0, 2.0, 3.0, 4.0],
false,
)
@test risk_adjusted_probability ≈
0.5 * nominal_probability + 0.5 * [0.0, 1.0, 0, 0]
return
end
function test_ModifiedChiSquared()
@test sprint(show, SDDP.ModifiedChiSquared(0.1)) ==
"ModifiedChiSquared with radius=0.1"
return
end
function test_ModifiedChiSquared_min_0()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.0),
risk_adjusted_probability,
fill(0.2, 5),
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
true,
)
@test risk_adjusted_probability ≈ [0.2, 0.2, 0.2, 0.2, 0.2] atol = 1e-6
return
end
function test_ModifiedChiSquared_Nonuniform_Expectation()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.2, 0.2],
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
true,
)
@test risk_adjusted_probability ≈ [0.1, 0.2, 0.3, 0.2, 0.2] atol = 1e-6
return
end
function test_ModifiedChiSquared_Nonuniform_Expectation_Max()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.2, 0.2],
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
false,
)
@test risk_adjusted_probability ≈ [0.1, 0.2, 0.3, 0.2, 0.2] atol = 1e-6
return
end
function test_ModifiedChiSquared_Nonuniform_WorstCase()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(6.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.3, 0.1],
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
true,
)
@test risk_adjusted_probability ≈ [0.0, 1.0, 0.0, 0.0, 0.0] atol = 1e-6
return
end
function test_ModifiedChiSquared_Nonuniform_WorstCase_Max()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(6.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.3, 0.1],
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
false,
)
@test risk_adjusted_probability ≈ [0.0, 0.0, 0.0, 0.0, 1.0] atol = 1e-6
return
end
function test_ModifiedChiSquared_Nonuniform()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.45),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.3, 0.1],
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -0.5],
true,
)
@test risk_adjusted_probability ≈
[0.115714, 0.372861, 0.158568, 0.001421, 0.351435] atol = 1e-6
return
end
function test_ModifiedChiSquared_Nonuniform_max()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.45),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.3, 0.1],
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -0.5],
false,
)
@test risk_adjusted_probability ≈ [0.0, 0.0, 0.323223, 0.676777, 0.0] atol =
1e-6
return
end
function test_ModifiedChiSquared_Min_025()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.25),
risk_adjusted_probability,
fill(0.2, 5),
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
true,
)
@test risk_adjusted_probability ≈
[0.279057, 0.358114, 0.2, 0.120943, 0.0418861] atol = 1e-6
return
end
function test_ModifiedChiSquared_Max_025()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.25),
risk_adjusted_probability,
fill(0.2, 5),
[:a, :b, :c, :d, :e],
[2.0, 1.0, 3.0, 4.0, 5.0],
false,
)
@test risk_adjusted_probability ≈
[0.279057, 0.358114, 0.2, 0.120943, 0.0418861] atol = 1e-6
return
end
function test_ModifiedChiSquared_Min_04()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.4),
risk_adjusted_probability,
fill(0.2, 5),
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
true,
)
@test risk_adjusted_probability ≈
[0.324162, 0.472486, 0.175838, 0.027514, 0.0] atol = 1e-6
return
end
function test_ModifiedChiSquared_Max_04()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.4),
risk_adjusted_probability,
fill(0.2, 5),
[:a, :b, :c, :d, :e],
[2.0, 1.0, 3.0, 4.0, 5.0],
false,
)
@test risk_adjusted_probability ≈
[0.324162, 0.472486, 0.175838, 0.027514, 0.0] atol = 1e-6
return
end
function test_ModifiedChiSquared_Min_sqrt08()
risk_adjusted_probability = Vector{Float64}(undef, 5)
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(sqrt(0.8)),
risk_adjusted_probability,
fill(0.2, 5),
[:a, :b, :c, :d, :e],
[-2.0, -1.0, -3.0, -4.0, -5.0],
true,
)
@test risk_adjusted_probability ≈ [0, 1.0, 0, 0, 0]
return
end
function _default_wasserstein(alpha)
return SDDP.Wasserstein(HiGHS.Optimizer; alpha = alpha) do x, y
return abs(x - y)
end
end
function test_Wasserstein()
@test sprint(show, _default_wasserstein(0.1)) == "SDDP.Wasserstein"
@test_throws Exception _default_wasserstein(-1.0)
return
end
function test_Wasserstein_Max_WorstCase()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
_default_wasserstein(10.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.3, 0.6, 0.4],
[1.1, 1.2, 0.6, 1.3],
false,
)
@test risk_adjusted_probability ≈ [0, 0, 1.0, 0]
return
end
function test_Wasserstein_Min_WorstCase()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
_default_wasserstein(10.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.3, 0.6, 0.4],
[1.1, 1.2, 0.6, 1.3],
true,
)
@test risk_adjusted_probability ≈ [0, 0, 0, 1.0]
return
end
function test_Wasserstein_Max_Expectation()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
_default_wasserstein(0.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.3, 0.6, 0.4],
[1.1, 1.2, 0.6, 1.3],
false,
)
@test risk_adjusted_probability ≈ [0.1, 0.2, 0.3, 0.4]
return
end
function test_Wasserstein_Min_Expectation()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
_default_wasserstein(0.0),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.3, 0.6, 0.4],
[1.1, 1.2, 0.6, 1.3],
true,
)
@test risk_adjusted_probability ≈ [0.1, 0.2, 0.3, 0.4]
return
end
function test_Wasserstein_Max_Intermediate()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
_default_wasserstein(0.1),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.3, 0.6, 0.4],
[1.1, 1.2, 0.6, 1.3],
false,
)
@test risk_adjusted_probability ≈ [0.0, 1 / 6, 5 / 6, 0.0]
return
end
function test_Wasserstein_Min_Intermediate()
risk_adjusted_probability = Vector{Float64}(undef, 4)
SDDP.adjust_probability(
_default_wasserstein(0.1),
risk_adjusted_probability,
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.3, 0.6, 0.4],
-[1.1, 1.2, 0.6, 1.3],
true,
)
@test risk_adjusted_probability ≈ [0.0, 1 / 6, 5 / 6, 0.0]
return
end
function test_Entropic()
@test sprint(show, SDDP.Entropic(0.1)) ==
"Entropic risk measure with γ = 0.1"
return
end
function test_Entropic_Min()
# Test that increasing values of θ lead to larger values for F[X].
X = [1.0, 2.0, 3.0]
P = [0.5, 0.5, 0.0]
Q = [NaN, NaN, NaN]
last, last_q2 = -Inf, 0.0
for i in -4:4
θ = 10.0^i
α = SDDP.adjust_probability(SDDP.Entropic(θ), Q, P, [], X, true)
current = Q' * X + α
@test current >= last
@test Q[2] >= last_q2
last, last_q2 = current, Q[2]
end
return
end
function test_Entropic_Max()
# Test that increasing values of θ lead to smaller values for F[X].
X = [1.0, 2.0, 3.0]
P = [0.5, 0.5, 0.0]
Q = [NaN, NaN, NaN]
last, last_q1 = Inf, 0.0
for i in -4:4
θ = 10.0^i
α = SDDP.adjust_probability(SDDP.Entropic(θ), Q, P, [], X, false)
current = Q' * X + α
if Q[1] < 1 - 1e-6
# If Q[1] ≈ 1, this test can fail due to numerical error.
@test current <= last
end
@test Q[1] >= last_q1
last, last_q1 = current, Q[1]
end
return
end
end # module
TestRiskMeasures.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 11787 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestSamplingSchemes
using SDDP
using Test
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_InSampleMonteCarlo_Acyclic()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
@test_throws ErrorException SDDP.InSampleMonteCarlo(
max_depth = 0,
terminate_on_dummy_leaf = false,
terminate_on_cycle = false,
)
scenario, terminated_due_to_cycle =
SDDP.sample_scenario(model, SDDP.InSampleMonteCarlo())
@test length(scenario) == 2
@test !terminated_due_to_cycle
for (stage, (node, noise)) in enumerate(scenario)
@test stage == node
@test noise in stage * [1, 3]
end
return
end
function test_InSampleMonteCarlo_Cyclic()
graph = SDDP.LinearGraph(2)
SDDP.add_edge(graph, 2 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
scenario, terminated_due_to_cycle = SDDP.sample_scenario(
model,
SDDP.InSampleMonteCarlo(;
terminate_on_dummy_leaf = false,
max_depth = 4,
),
)
@test length(scenario) == 4
@test !terminated_due_to_cycle # Terminated due to max depth.
for (index, (node, noise)) in enumerate(scenario)
stage = (index - 1) % 2 + 1
@test stage == node
@test noise in stage * [1, 3]
end
return
end
function test_OutOfSampleMonteCarlo_Acyclic()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
@test_throws ErrorException SDDP.OutOfSampleMonteCarlo(
(node) -> nothing,
model,
max_depth = 0,
terminate_on_dummy_leaf = false,
terminate_on_cycle = false,
)
sampler = SDDP.OutOfSampleMonteCarlo(
model;
use_insample_transition = true,
) do stage
return [SDDP.Noise(2 * stage, 0.4), SDDP.Noise(4 * stage, 0.6)]
end
scenario, terminated_due_to_cycle = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 2
@test !terminated_due_to_cycle
for (stage, (node, noise)) in enumerate(scenario)
@test stage == node
@test noise in stage * [2, 4]
end
sampler = SDDP.OutOfSampleMonteCarlo(
model;
use_insample_transition = false,
) do stage
if stage == 0
return [SDDP.Noise(2, 1.0)]
else
return SDDP.Noise{Int}[],
[SDDP.Noise(2 * stage, 0.4), SDDP.Noise(4 * stage, 0.6)]
end
end
scenario, terminated_due_to_cycle = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 1
@test !terminated_due_to_cycle
node, noise = scenario[1]
@test node == 2
@test noise in [4, 8]
return
end
function test_OutOfSampleMonteCarlo_Cyclic()
graph = SDDP.LinearGraph(2)
SDDP.add_edge(graph, 2 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
sampler = SDDP.OutOfSampleMonteCarlo(
model;
use_insample_transition = true,
terminate_on_dummy_leaf = false,
max_depth = 4,
) do stage
return [SDDP.Noise(2 * stage, 0.4), SDDP.Noise(4 * stage, 0.6)]
end
scenario, terminated_due_to_cycle = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 4
@test !terminated_due_to_cycle # Terminated due to max depth.
for (index, (node, noise)) in enumerate(scenario)
stage = (index - 1) % 2 + 1
@test stage == node
@test noise in stage * [2, 4]
end
end
function test_Historical()
@test_throws Exception SDDP.Historical([[1, 2], [3, 4]], [0.6, 0.6])
return
end
function test_Historical_SingleTrajectory()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
scenario, terminated_due_to_cycle = SDDP.sample_scenario(
model,
SDDP.Historical([(1, 0.1), (2, 0.2), (1, 0.3)]),
)
@test length(scenario) == 3
@test !terminated_due_to_cycle
@test scenario == [(1, 0.1), (2, 0.2), (1, 0.3)]
return
end
function test_Historical_SingleTrajectory_terminate_on_cycle()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
scenario, terminated_due_to_cycle = SDDP.sample_scenario(
model,
SDDP.Historical(
[(1, 0.1), (2, 0.2), (1, 0.3)];
terminate_on_cycle = true,
),
)
@test length(scenario) == 3
@test terminated_due_to_cycle
@test scenario == [(1, 0.1), (2, 0.2), (1, 0.3)]
return
end
function test_Historical_multiple()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
scenario_A = [(1, 0.1), (2, 0.2), (1, 0.3)]
scenario_B = [(1, 0.4), (2, 0.5)]
for i in 1:10
scenario, terminated_due_to_cycle = SDDP.sample_scenario(
model,
SDDP.Historical([scenario_A, scenario_B], [0.2, 0.8]),
)
if length(scenario) == 3
@test scenario == scenario_A
else
@test length(scenario) == 2
@test scenario == scenario_B
end
@test !terminated_due_to_cycle
end
return
end
function test_PSR()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
scheme = SDDP.PSRSamplingScheme(2)
scenario_1, term_1 = SDDP.sample_scenario(model, scheme)
@test length(scenario_1) == 2
@test !term_1
@test length(scheme.scenarios) == 1
scenario_2, term_2 = SDDP.sample_scenario(model, scheme)
@test length(scenario_2) == 2
@test !term_2
@test length(scheme.scenarios) == 2
scenario_3, _ = SDDP.sample_scenario(model, scheme)
@test scenario_1 == scenario_3
@test length(scheme.scenarios) == 2
scenario_4, _ = SDDP.sample_scenario(model, scheme)
@test scenario_2 == scenario_4
@test length(scheme.scenarios) == 2
return
end
function test_InSampleMonteCarlo_initial_node()
graph = SDDP.LinearGraph(2)
SDDP.add_edge(graph, 2 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
for (start, node) in (nothing => 1, 1 => 1, 2 => 2)
for _ in 1:10
scenario, _ = SDDP.sample_scenario(
model,
SDDP.InSampleMonteCarlo(; initial_node = start),
)
@test scenario[1][1] == node
end
end
return
end
function test_OutOfSampleMonteCarlo_initial_node()
graph = SDDP.LinearGraph(2)
SDDP.add_edge(graph, 2 => 1, 0.9)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, 0 <= x <= 1)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x, ω)
end
end
for (start, node) in (nothing => 1, 1 => 1, 2 => 2)
for _ in 1:10
sampler = SDDP.OutOfSampleMonteCarlo(
model;
use_insample_transition = true,
terminate_on_dummy_leaf = false,
max_depth = 4,
initial_node = start,
) do stage
return [SDDP.Noise(2 * stage, 0.4), SDDP.Noise(4 * stage, 0.6)]
end
scenario, _ = SDDP.sample_scenario(model, sampler)
@test scenario[1][1] == node
end
end
end
function test_SimulatorSamplingScheme()
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
direct_mode = false,
) do sp, node
t, price = node
@variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0)
SDDP.parameterize(sp, [(price,)]) do ω
return SDDP.@stageobjective(sp, price * x.out)
end
end
sampler = SDDP.SimulatorSamplingScheme(simulator)
scenario, _ = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 3
@test haskey(graph.nodes, scenario[1][1])
@test scenario[1][2] in ((40.0,), (50.1,), (59.6,))
return
end
function test_SimulatorSamplingScheme_with_noise()
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
direct_mode = false,
) do sp, node
t, price = node
@variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0)
SDDP.parameterize(sp, [(price, i) for i in 1:2]) do ω
return SDDP.@stageobjective(sp, price * x.out + i)
end
end
sampler = SDDP.SimulatorSamplingScheme(simulator)
scenario, _ = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 3
@test haskey(graph.nodes, scenario[1][1])
@test scenario[1][2] isa Tuple{Float64,Int}
@test scenario[1][2][1] in (40.0, 50.1, 59.6)
@test scenario[1][2][2] in 1:3
return
end
end # module
TestSamplingSchemes.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 11363 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
module TestStoppingRules
using Random
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_TimeLimit()
graph = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0)
end
rule = SDDP.TimeLimit(0.5)
@test SDDP.stopping_rule_status(rule) == :time_limit
@test SDDP.convergence_test(
graph,
[SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false)],
rule,
)
@test !SDDP.convergence_test(
graph,
[SDDP.Log(1, 0.0, 0.0, 0.1, 1, 1, " ", false)],
rule,
)
return
end
function test_IterationLimit()
graph = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0)
end
rule = SDDP.IterationLimit(2)
@test SDDP.stopping_rule_status(rule) == :iteration_limit
@test SDDP.convergence_test(
graph,
[
SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(2, 0.0, 0.0, 1.0, 1, 1, " ", false),
],
rule,
)
@test !SDDP.convergence_test(
graph,
[SDDP.Log(1, 0.0, 0.0, 0.1, 1, 1, " ", false)],
rule,
)
return
end
function test_Statistical()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
bellman_function = SDDP.BellmanFunction(; lower_bound = 0.0),
optimizer = HiGHS.Optimizer,
sense = :Min,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_lower_bound(x.out, ω)
end
@stageobjective(node, x.out)
end
SDDP.train(model; iteration_limit = 1, print_level = 0)
rule = SDDP.Statistical(; num_replications = 20)
@test SDDP.stopping_rule_status(rule) == :statistical
Random.seed!(123)
@test SDDP.convergence_test(
model,
[SDDP.Log(1, 6.0, 9.0, 1.0, 1, 1, " ", false)],
rule,
)
@test !SDDP.convergence_test(
model,
[SDDP.Log(1, 0.0, 9.0, 1.0, 1, 1, " ", false)],
rule,
)
@test SDDP.convergence_test(
model,
[SDDP.Log(1, 12.0, 9.0, 1.0, 1, 1, " ", false)],
rule,
)
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
bellman_function = SDDP.BellmanFunction(; upper_bound = 6.0),
optimizer = HiGHS.Optimizer,
sense = :Max,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0.0)
SDDP.parameterize(node, stage * [1, 3], [0.5, 0.5]) do ω
return JuMP.set_upper_bound(x.out, ω)
end
@stageobjective(node, x.out)
end
SDDP.train(model; iteration_limit = 1, print_level = 0)
rule = SDDP.Statistical(; num_replications = 20)
@test SDDP.stopping_rule_status(rule) == :statistical
Random.seed!(123)
@test SDDP.convergence_test(
model,
[SDDP.Log(1, 6.0, 9.0, 1.0, 1, 1, " ", false)],
rule,
)
@test SDDP.convergence_test(
model,
[SDDP.Log(1, 0.0, 9.0, 1.0, 1, 1, " ", false)],
rule,
)
@test !SDDP.convergence_test(
model,
[SDDP.Log(1, 12.0, 9.0, 1.0, 1, 1, " ", false)],
rule,
)
return
end
function test_BoundStalling()
graph = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0)
end
rule = SDDP.BoundStalling(3, 1.0)
@test SDDP.stopping_rule_status(rule) == :bound_stalling
# Not enough iterations to terminate.
@test !SDDP.convergence_test(
graph,
[
SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(2, 1.9, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(3, 2.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(4, 2.0, 0.0, 1.0, 1, 1, " ", false),
],
rule,
)
# Now there is. But only just...
@test SDDP.convergence_test(
graph,
[
SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(2, 1.9, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(3, 2.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(4, 2.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(5, 2.9, 0.0, 1.0, 1, 1, " ", false),
],
rule,
)
# This also meets the test, but we don't terminate because it hasn't
# differed from the initial bound.
@test !SDDP.convergence_test(
graph,
[
SDDP.Log(1, 0.0, 0.1, 1.0, 1, 1, " ", false),
SDDP.Log(2, 0.0, 0.2, 1.1, 1, 2, " ", false),
SDDP.Log(3, 0.0, 0.1, 1.2, 1, 3, " ", false),
SDDP.Log(4, 0.0, 0.0, 1.3, 1, 4, " ", false),
],
rule,
)
# This also meets the test, because it looks like a deterministic
# policy
@test SDDP.convergence_test(
graph,
[
SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(2, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(3, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(4, 0.0, 0.0, 1.0, 1, 1, " ", false),
],
rule,
)
return
end
function test_StoppingChain()
graph = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = 0.0,
direct_mode = false,
) do node, stage
@variable(node, x, SDDP.State, initial_value = 0)
end
rule = SDDP.StoppingChain(SDDP.IterationLimit(2), SDDP.TimeLimit(60.0))
@test SDDP.stopping_rule_status(rule) ==
Symbol("iteration_limit ∧ time_limit")
# Not enough iterations to terminate.
@test !SDDP.convergence_test(
graph,
[SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false)],
rule,
)
# How there is. But not enough time.
@test !SDDP.convergence_test(
graph,
[
SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(2, 0.0, 0.0, 59.0, 1, 1, " ", false),
],
rule,
)
# Both satisfied.
@test SDDP.convergence_test(
graph,
[
SDDP.Log(1, 0.0, 0.0, 1.0, 1, 1, " ", false),
SDDP.Log(2, 0.0, 0.0, 59.0, 1, 1, " ", false),
SDDP.Log(3, 0.0, 0.0, 60.1, 1, 1, " ", false),
],
rule,
)
return
end
function test_SimulationStoppingRule()
graph = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0)
@stageobjective(node, x.out)
end
rule = SDDP.SimulationStoppingRule()
@test rule.replications == -1
@test SDDP.stopping_rule_status(rule) == :simulation_stopping
log = [
SDDP.Log(1, 0.000000e+00, 8.316000e+03, 1.559195, 1, 14, "", false),
SDDP.Log(2, 3.171195e+03, 8.767171e+03, 1.801409, 1, 136, "", false),
SDDP.Log(3, 4.057980e+03, 4.500000e+03, 1.807249, 1, 150, "", false),
SDDP.Log(4, 4.074139e+03, 2.314272e+03, 1.813528, 1, 164, "", false),
SDDP.Log(5, 4.074139e+03, 4.716000e+03, 1.819679, 1, 178, "", false),
SDDP.Log(6, 4.074139e+03, 2.308500e+03, 1.824431, 1, 192, "", false),
SDDP.Log(7, 4.074139e+03, 2.308500e+03, 1.830817, 1, 206, "", false),
SDDP.Log(8, 4.074139e+03, 2.308500e+03, 1.837420, 1, 220, "", false),
SDDP.Log(9, 4.074139e+03, 5.132230e+03, 1.843861, 1, 234, "", false),
SDDP.Log(10, 4.074139e+03, 5.197500e+03, 1.850351, 1, 248, "", false),
SDDP.Log(11, 4.074139e+03, 4.716000e+03, 1.856620, 1, 262, "", false),
SDDP.Log(12, 4.074139e+03, 2.308500e+03, 1.862838, 1, 276, "", false),
SDDP.Log(13, 4.074139e+03, 2.308500e+03, 1.869224, 1, 290, "", false),
SDDP.Log(14, 4.074139e+03, 2.308500e+03, 1.875853, 1, 304, "", false),
SDDP.Log(15, 4.074139e+03, 2.308500e+03, 1.882504, 1, 318, "", false),
SDDP.Log(16, 4.074139e+03, 5.197500e+03, 1.889759, 1, 332, "", false),
SDDP.Log(17, 4.074139e+03, 5.132230e+03, 1.896462, 1, 346, "", false),
SDDP.Log(18, 4.074139e+03, 8.086500e+03, 1.903102, 1, 360, "", false),
SDDP.Log(19, 4.074139e+03, 2.308500e+03, 1.910075, 1, 374, "", false),
SDDP.Log(20, 4.074139e+03, 5.132230e+03, 1.917460, 1, 388, "", false),
]
@test !SDDP.convergence_test(graph, log[1:1], rule)
@test rule.replications == 1
@test !SDDP.convergence_test(graph, log[1:4], rule)
@test !SDDP.convergence_test(graph, log[1:10], rule)
@test !SDDP.convergence_test(graph, log[1:19], rule)
@test SDDP.convergence_test(graph, log[1:20], rule)
return
end
function test_FirstStageStoppingRule()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0)
@stageobjective(node, x.out)
end
rule = SDDP.FirstStageStoppingRule()
SDDP.train(model; stopping_rules = [rule])
n_iterations = length(rule.data)
@test 50 <= n_iterations <= 55 # Depends on nthreads
log = model.most_recent_training_results.log
set_lower_bound(model[1].subproblem[:x].out, 1.0)
@test !SDDP.convergence_test(model, log, rule)
@test length(rule.data) == n_iterations + 1
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0)
SDDP.parameterize(node, 1:2) do w
return @stageobjective(node, w * x.out)
end
return
end
@test_throws(
ErrorException(
"FirstStageStoppingRule cannot be applied because first-stage is " *
"not deterministic",
),
SDDP.train(
model;
print_level = 0,
stopping_rules = [SDDP.FirstStageStoppingRule()],
),
)
graph = SDDP.Graph(0, [1, 2], [(0 => 1, 0.5), (0 => 2, 0.5)])
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do node, stage
@variable(node, x >= 0, SDDP.State, initial_value = 0)
@stageobjective(node, x.out)
return
end
@test_throws(
ErrorException(
"FirstStageStoppingRule cannot be applied because first-stage is " *
"not deterministic",
),
SDDP.train(
model;
print_level = 0,
stopping_rules = [SDDP.FirstStageStoppingRule()],
),
)
return
end
end # module
TestStoppingRules.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2459 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
function test_threaded()
# We should test that JULIA_NUM_THREADS is set in CI jobs
if get(ENV, "CI", "false") == "true"
num_threads = get(ENV, "JULIA_NUM_THREADS", "0")
@test parse(Int, num_threads) == Threads.nthreads()
@test Threads.nthreads() > 1
end
c_eta, c_pt = [0.8, 0.5], [2, 5, 8, 11, 14]
df_demand = rand(10:10:60, 24)
model = SDDP.LinearPolicyGraph(;
stages = 24,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(
subproblem,
0 <= x_volume[1:2] <= 8,
SDDP.State,
initial_value = 1,
)
@variable(subproblem, u_u[1:2] >= 0)
@variable(subproblem, u_v[1:2] >= 0)
@variable(subproblem, 0 <= u_x[1:5] <= 5, Int)
@variable(subproblem, u_slack >= 0)
@variable(subproblem, w_noise)
@constraint(
subproblem,
[j in 1:2],
x_volume[j].out == x_volume[j].in + c_eta[j] * u_v[j] - u_u[j],
)
@constraint(
subproblem,
sum(u_x) + sum(u_u) - sum(u_v) + u_slack == df_demand[t] + w_noise,
)
SDDP.parameterize(subproblem, [-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]) do w
return JuMP.fix(w_noise, w)
end
@stageobjective(subproblem, c_pt' * u_x + 35 * u_slack)
return
end
SDDP.train(model; iteration_limit = 100, parallel_scheme = SDDP.Threaded())
thread_ids_seen =
Set{Int}(log.pid for log in model.most_recent_training_results.log)
min_threads = Threads.nthreads() == 1 ? 1 : 2
@test min_threads <= length(thread_ids_seen) <= Threads.nthreads()
recorder = Dict{Symbol,Function}(:thread_id => sp -> Threads.threadid())
simulations = SDDP.simulate(
model,
100;
parallel_scheme = SDDP.Threaded(),
custom_recorders = recorder,
)
thread_ids_seen =
Set{Int}(data[:thread_id] for sim in simulations for data in sim)
min_threads = Threads.nthreads() > 1 ? 1 : 2
@test min_threads <= length(thread_ids_seen) <= Threads.nthreads()
return
end
test_threaded()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5487 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestValueFunctions
using SDDP
using Test
import HiGHS
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_ValueFunction_Min()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, 2 * x.out)
end
V1 = SDDP.ValueFunction(model[1])
@test SDDP.evaluate(V1, Dict(:x => 1.0)) == (0.0, Dict(:x => 0.0))
SDDP.train(model; iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
for (xhat, yhat, pihat) in
[(0.0, 0.0, 0.0), (1.0, 2.0, 2.0), (2.0, 4.0, 2.0)]
@test SDDP.evaluate(V1, Dict(:x => xhat)) == (yhat, Dict(:x => pihat))
end
return
end
function test_ValueFunction_Max()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, -2 * x.out)
end
SDDP.train(model; iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
for (xhat, yhat, pihat) in
[(0.0, 0.0, 0.0), (1.0, 2.0, 2.0), (2.0, 4.0, 2.0)]
(y, duals) = SDDP.evaluate(V1, Dict(:x => xhat))
@test y == -yhat
@test duals == Dict(:x => -pihat)
end
return
end
function test_ValueFunction_optimizer()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
direct_mode = true,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, 2 * x.out)
end
SDDP.train(model; iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
@test_throws JuMP.NoOptimizer() SDDP.evaluate(V1, Dict(:x => 1.0))
JuMP.set_optimizer(V1, HiGHS.Optimizer)
(y, _) = SDDP.evaluate(V1, Dict(:x => 1.0))
@test y == 2.0
return
end
function test_ValueFunction_objective_state()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
SDDP.add_objective_state(
sp;
initial_value = 0.0,
lipschitz = 10.0,
) do p, ω
return p + ω
end
@constraint(sp, x.out == x.in)
SDDP.parameterize(sp, [1, 2]) do ω
price = SDDP.objective_state(sp)
@stageobjective(sp, price * x.out)
end
end
SDDP.train(model; iteration_limit = 10, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
@test_throws AssertionError SDDP.evaluate(V1, Dict(:x => 1.0))
@test SDDP.evaluate(V1, Dict(:x => 1.0); objective_state = 1) ==
(2.5, Dict(:x => 2.5))
@test SDDP.evaluate(V1, Dict(:x => 0.0); objective_state = 2) ==
(0.0, Dict(:x => 3.5))
return
end
function test_ValueFunction_belief_state()
graph = SDDP.MarkovianGraph(Matrix{Float64}[[0.5 0.5], [1.0 0.0; 0.0 1.0]])
SDDP.add_ambiguity_set(graph, [(1, 1), (1, 2)])
SDDP.add_ambiguity_set(graph, [(2, 1), (2, 2)])
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, node
(t, i) = node
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
P = [[0.2, 0.8], [0.8, 0.2]]
SDDP.parameterize(sp, [1, 2], P[i]) do ω
@stageobjective(sp, ω * x.out)
end
end
SDDP.train(model; iteration_limit = 10, print_level = 0)
V11 = SDDP.ValueFunction(model[(1, 1)])
@test_throws AssertionError SDDP.evaluate(V11, Dict(:x => 1.0))
b = Dict((1, 1) => 0.8, (1, 2) => 0.2)
(y, duals) = SDDP.evaluate(V11, Dict(:x => 1.0); belief_state = b)
@test duals[:x] ≈ y ≈ 1.68
V12 = SDDP.ValueFunction(model[(1, 2)])
(y, duals) = SDDP.evaluate(V12, Dict(:x => 1.0); belief_state = b)
@test duals[:x] ≈ y ≈ 1.68
return
end
function test_ValuaeFunction_plot()
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@variable(sp, y >= 0, SDDP.State, initial_value = 0)
@constraint(sp, x.out >= x.in)
@constraint(sp, x.out >= 2 * x.in - 1)
@constraint(sp, y.out == y.in)
@stageobjective(sp, x.out + y.out)
end
SDDP.train(model; iteration_limit = 3, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
SDDP.plot(V1; x = 0:0.1:2, y = 0, open = false)
SDDP.plot(V1; x = 0:0.1:2, y = 0:0.1:2, open = false)
return
end
end # module
TestValueFunctions.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2637 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestVisualization
using SDDP
using Test
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_SpaghettiPlot()
simulations = [
[
Dict(:x => 1.0, :y => 4.0),
Dict(:x => 2.0, :y => 5.0),
Dict(:x => 3.0, :y => 6.0),
],
[
Dict(:x => 1.5, :y => 4.5),
Dict(:x => 2.5, :y => 5.5),
Dict(:x => 3.5, :y => 6.5),
],
]
plt = SDDP.SpaghettiPlot(simulations)
SDDP.add_spaghetti(plt; cumulative = true) do data
return data[:x]
end
SDDP.add_spaghetti(plt; title = "y") do data
return 2 * data[:y]
end
SDDP.plot(plt, "test.html"; open = false)
@test sprint(show, plt) == "A spaghetti plot with 2 scenarios and 3 stages."
control = joinpath(@__DIR__, "control.html")
if Sys.WORD_SIZE == 64
# This fails on 32-bit machines.
@test read("test.html", String) == read(control, String)
end
SDDP.save(plt, "test.html"; open = false)
@test sprint(show, plt) == "A spaghetti plot with 2 scenarios and 3 stages."
if Sys.WORD_SIZE == 64
# This fails on 32-bit machines.
@test read("test.html", String) == read(control, String)
end
rm("test.html")
return
end
function test_PublicationPlot()
simulations = [
[Dict{Symbol,Any}(:x => 1), Dict{Symbol,Any}(:x => 5)],
[Dict{Symbol,Any}(:x => 2), Dict{Symbol,Any}(:x => 6)],
[Dict{Symbol,Any}(:x => 3), Dict{Symbol,Any}(:x => 4)],
]
data = SDDP.publication_data(simulations, [0.0, 0.25, 0.5, 1.0], d -> d[:x])
@test data == [1 4; 1.5 4.5; 2 5; 3 6]
for val in (-Inf, Inf, NaN)
simulations[2][2] = Dict{Symbol,Any}(:x => val)
@test_throws(
ErrorException(
"Unable to plot `publication_plot` because stage 2 of " *
"replication 2 contains data that is not finite. The data " *
"function must return a finite real-valued scalar. Got: $val",
),
SDDP.publication_data(simulations, [0.5], d -> d[:x]),
)
end
return
end
end # module
TestVisualization.runtests()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 907 | <img src="https://raw.githubusercontent.com/odow/SDDP.jl/e9de84e0a4b57374bd9e0c95148da1501816e4c5/docs/src/assets/logo.png" alt="logo" width="100px"/>
# SDDP.jl
[](https://github.com/odow/SDDP.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/odow/SDDP.jl)
[SDDP.jl](https://github.com/odow/SDDP.jl) is a JuMP extension for solving large
convex multistage stochastic programming problems using stochastic dual dynamic
programming.
## License
`SDDP.jl` is licensed under the [MPL 2.0 license](https://github.com/odow/SDDP.jl/blob/master/LICENSE.md).
## Documentation
You can find the documentation at [sddp.dev](https://sddp.dev).
## Help
If you need help, please [open a GitHub issue](https://github.com/odow/SDDP.jl/issues/new).
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 2360 | # [API Reference](@id api_reference_list)
## Policy graphs
```@docs
SDDP.Graph
SDDP.add_node
SDDP.add_edge
SDDP.add_ambiguity_set
SDDP.LinearGraph
SDDP.MarkovianGraph
SDDP.UnicyclicGraph
SDDP.LinearPolicyGraph
SDDP.MarkovianPolicyGraph
SDDP.PolicyGraph
```
## Subproblem definition
```@docs
@stageobjective
SDDP.parameterize
SDDP.add_objective_state
SDDP.objective_state
SDDP.Noise
```
## Training the policy
```@docs
SDDP.numerical_stability_report
SDDP.train
SDDP.termination_status
SDDP.write_cuts_to_file
SDDP.read_cuts_from_file
SDDP.write_log_to_csv
```
### [Stopping rules](@id api_stopping_rules)
```@docs
SDDP.AbstractStoppingRule
SDDP.stopping_rule_status
SDDP.convergence_test
SDDP.IterationLimit
SDDP.TimeLimit
SDDP.Statistical
SDDP.BoundStalling
SDDP.StoppingChain
SDDP.SimulationStoppingRule
SDDP.FirstStageStoppingRule
```
### Sampling schemes
```@docs
SDDP.AbstractSamplingScheme
SDDP.sample_scenario
SDDP.InSampleMonteCarlo
SDDP.OutOfSampleMonteCarlo
SDDP.Historical
SDDP.PSRSamplingScheme
SDDP.SimulatorSamplingScheme
```
### Parallel schemes
```@docs
SDDP.AbstractParallelScheme
SDDP.Serial
SDDP.Threaded
SDDP.Asynchronous
```
### Forward passes
```@docs
SDDP.AbstractForwardPass
SDDP.DefaultForwardPass
SDDP.RevisitingForwardPass
SDDP.RiskAdjustedForwardPass
SDDP.AlternativeForwardPass
SDDP.AlternativePostIterationCallback
SDDP.RegularizedForwardPass
```
### Risk Measures
```@docs
SDDP.AbstractRiskMeasure
SDDP.adjust_probability
```
### Duality handlers
```@docs
SDDP.AbstractDualityHandler
SDDP.ContinuousConicDuality
SDDP.LagrangianDuality
SDDP.StrengthenedConicDuality
SDDP.BanditDuality
```
## Simulating the policy
```@docs
SDDP.simulate
SDDP.calculate_bound
SDDP.add_all_cuts
```
## Decision rules
```@docs
SDDP.DecisionRule
SDDP.evaluate
```
## Visualizing the policy
```@docs
SDDP.SpaghettiPlot
SDDP.add_spaghetti
SDDP.publication_plot
SDDP.ValueFunction
SDDP.evaluate(::SDDP.ValueFunction, ::Dict{Symbol,Float64})
SDDP.plot
```
## Debugging the model
```@docs
SDDP.write_subproblem_to_file
SDDP.deterministic_equivalent
```
## StochOptFormat
```@docs
SDDP.write_to_file
SDDP.read_from_file
Base.write(::IO, ::SDDP.PolicyGraph)
Base.read(::IO, ::Type{SDDP.PolicyGraph})
SDDP.evaluate(::SDDP.PolicyGraph{T}, ::SDDP.ValidationScenarios{T}) where {T}
SDDP.ValidationScenarios
SDDP.ValidationScenario
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 16599 | ```@meta
CurrentModule = SDDP
```
# Release notes
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## v1.8.1 (August 5, 2024)
### Fixed
- Fixed various issues with `SDDP.Threaded()` (#761)
- Fixed a deprecation warning for sorting a dictionary (#763)
### Other
- Updated copyright notices (#762)
- Updated `.JuliaFormatter.toml` (#764)
## v1.8.0 (July 24, 2024)
### Added
- Added `SDDP.Threaded()`, which is an experimental parallel scheme that
supports solving problems using multiple threads. Some parts of SDDP.jl may
not be thread-safe, and this can cause incorrect results, segfaults, or other
errors. Please use with care and report any issues by opening a GitHub issue.
(#758)
### Other
- Documentation improvements and fixes (#747) (#759)
## v1.7.0 (June 4, 2024)
### Added
- Added `sample_backward_noise_terms_with_state` for creating backward pass
sampling schemes that depend on the current primal state. (#742) (Thanks
@arthur-brigatto)
### Fixed
- Fixed error message when `publication_plot` has non-finite data (#738)
### Other
- Updated the logo constructor (#730)
## v1.6.7 (February 1, 2024)
### Fixed
- Fixed non-constant state dimension in the `MSPFormat` reader (#695)
- Fixed SimulatorSamplingScheme for deterministic nodes (#710)
- Fixed line search in BFGS (#711)
- Fixed handling of `NEARLY_FEASIBLE_POINT` status (#726)
### Other
- Documentation improvements (#692) (#694) (#706) (#716) (#727)
- Updated to StochOptFormat v1.0 (#705)
- Added an experimental `OuterApproximation` algorithm (#709)
- Updated `.gitignore` (#717)
- Added code for MDP paper (#720) (#721)
- Added Google analytics (#723)
## v1.6.6 (September 29, 2023)
### Other
- Updated [Example: two-stage newsvendor](@ref) tutorial (#689)
- Added a warning for people using [`SDDP.Statistical`](@ref) (#687)
## v1.6.5 (September 25, 2023)
### Fixed
- Fixed duplicate nodes in [`MarkovianGraph`](@ref) (#681)
### Other
- Updated tutorials (#677) (#678) (#682) (#683)
- Fixed documentation preview (#679)
## v1.6.4 (September 23, 2023)
### Fixed
- Fixed error for invalid `log_frequency` values (#665)
- Fixed objective sense in [`deterministic_equivalent`](@ref) (#673)
### Other
- Documentation updates (#658) (#666) (#671)
- Switch to GitHub action for deploying docs (#668) (#670)
- Update to Documenter@1 (#669)
## v1.6.3 (September 8, 2023)
### Fixed
- Fixed default stopping rule with `iteration_limit` or `time_limit` set (#662)
### Other
- Various documentation improvements (#651) (#657) (#659) (#660)
## v1.6.2 (August 24, 2023)
### Fixed
- `MSPFormat` now detect and exploit stagewise independent lattices (#653)
- Fixed `set_optimizer` for models read from file (#654)
### Other
- Fixed typo in `pglib_opf.jl` (#647)
- Fixed documentation build and added color (#652)
## v1.6.1 (July 20, 2023)
### Fixed
- Fixed bugs in `MSPFormat` reader (#638) (#639)
### Other
- Clarified `OutOfSampleMonteCarlo` docstring (#643)
## v1.6.0 (July 3, 2023)
### Added
- Added [`RegularizedForwardPass`](@ref) (#624)
- Added [`FirstStageStoppingRule`](@ref) (#634)
### Other
- Removed an unbound type parameter (#632)
- Fixed typo in docstring (#633)
- Added [Here-and-now and hazard-decision](@ref) tutorial (#635)
## v1.5.1 (June 30, 2023)
This release contains a number of minor code changes, but it has a large impact
on the content that is printed to screen. In particular, we now log
periodically, instead of each iteration, and a "good" stopping rule is used as
the default if none are specified. Try using `SDDP.train(model)` to see the
difference.
### Other
- Fixed various typos in the documentation (#617)
- Fixed printing test after changes in JuMP (#618)
- Set [`SimulationStoppingRule`](@ref) as the default stopping rule (#619)
- Changed the default logging frequency. Pass `log_every_seconds = 0.0` to
[`train`](@ref) to revert to the old behavior. (#620)
- Added example usage with Distributions.jl (@slwu89) (#622)
- Removed the numerical issue `@warn` (#627)
- Improved the quality of docstrings (#630)
## v1.5.0 (May 14, 2023)
### Added
- Added the ability to use a different model for the forward pass. This is a
novel feature that lets you train better policies when the model is
non-convex or does not have a well-defined dual. See the [Alternative forward models](@ref)
tutorial in which we train convex and non-convex formulations of the optimal
power flow problem. (#611)
### Other
- Updated missing `changelog` entries (#608)
- Removed global variables (#610)
- Converted the `Options` struct to keyword arguments. This struct was a
private implementation detail, but the change is breaking if you developed an
extension to SDDP that touched these internals. (#612)
- Fixed some typos (#613)
## v1.4.0 (May 8, 2023)
### Added
- Added [`SDDP.SimulationStoppingRule`](@ref) (#598)
- Added `sampling_scheme` argument to [`SDDP.write_to_file`](@ref) (#607)
### Fixed
- Fixed parsing of some `MSPFormat` files (#602) (#604)
- Fixed printing in header (#605)
## v1.3.0 (May 3, 2023)
### Added
- Added experimental support for `SDDP.MSPFormat.read_from_file` (#593)
### Other
- Updated to StochOptFormat v0.3 (#600)
## v1.2.1 (May 1, 2023)
### Fixed
- Fixed `log_every_seconds` (#597)
## v1.2.0 (May 1, 2023)
### Added
- Added [`SDDP.SimulatorSamplingScheme`](@ref) (#594)
- Added `log_every_seconds` argument to [`SDDP.train`](@ref) (#595)
### Other
- Tweaked how the log is printed (#588)
- Updated to StochOptFormat v0.2 (#592)
## v1.1.4 (April 10, 2023)
### Fixed
- Logs are now flushed every iteration (#584)
### Other
- Added docstrings to various functions (#581)
- Minor documentation updates (#580)
- Clarified integrality documentation (#582)
- Updated the README (#585)
- Number of numerical issues is now printed to the log (#586)
## v1.1.3 (April 2, 2023)
### Other
- Fixed typo in [Example: deterministic to stochastic](@ref) tutorial (#578)
- Fixed typo in documentation of [`SDDP.simulate`](@ref) (#577)
## v1.1.2 (March 18, 2023)
### Other
- Added [Example: deterministic to stochastic](@ref) tutorial (#572)
## v1.1.1 (March 16, 2023)
### Other
- Fixed email in `Project.toml`
- Added notebook to documentation tutorials (#571)
## v1.1.0 (January 12, 2023)
### Added
- Added the `node_name_parser` argument to [`SDDP.write_cuts_to_file`](@ref)
and added the option to skip nodes in [`SDDP.read_cuts_from_file`](@ref)
(#565)
## v1.0.0 (January 3, 2023)
Although we're bumping MAJOR version, this is a non-breaking release. Going
forward:
- New features will bump the MINOR version
- Bug fixes, maintenance, and documentation updates will bump the PATCH
version
- We will support only the Long Term Support (currently v1.6.7) and the latest
patch (currently v1.8.4) releases of Julia. Updates to the LTS version will
bump the MINOR version
- Updates to the compat bounds of package dependencies will bump the PATCH
version.
We do not intend any breaking changes to the public API, which would require a
new MAJOR release. The public API is everything defined in the documentation.
Anything not in the documentation is considered private and may change in any
PATCH release.
### Added
- Added `num_nodes` argument to [`SDDP.UnicyclicGraph`](@ref) (#562)
- Added support for passing an optimizer to [`SDDP.Asynchronous`](@ref) (#545)
### Other
- Updated [Plotting tools](@ref) to use live plots (#563)
- Added [vale](https://vale.sh) as a linter (#565)
- Improved documentation for initializing a parallel scheme (#566)
## v0.4.9 (January 3, 2023)
### Added
- Added [`SDDP.UnicyclicGraph`](@ref) (#556)
### Other
- Added tutorial on Markov Decision Processes (#556)
- Added two-stage newsvendor tutorial (#557)
- Refactored the layout of the documentation (#554) (#555)
- Updated copyright to 2023 (#558)
- Fixed errors in the documentation (#561)
## v0.4.8 (December 19, 2022)
### Added
- Added `terminate_on_cycle` option to [`SDDP.Historical`](@ref) (#549)
- Added `include_last_node` option to [`SDDP.DefaultForwardPass`](@ref) (#547)
### Fixed
- Reverted then fixed (#531) because it failed to account for problems with
integer variables (#546) (#551)
## v0.4.7 (December 17, 2022)
### Added
- Added `initial_node` support to `InSampleMonteCarlo` and
`OutOfSampleMonteCarlo` (#535)
### Fixed
- Rethrow `InterruptException` when solver is interrupted (#534)
- Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
- Fixed re-using the `dashboard = true` option between solves (#538)
- Fixed bug when no `@stageobjective` is set (now defaults to `0.0`) (#539)
- Fixed errors thrown when invalid inputs are provided to `add_objective_state`
(#540)
### Other
- Drop support for Julia versions prior to 1.6 (#533)
- Updated versions of dependencies (#522) (#533)
- Switched to HiGHS in the documentation and tests (#533)
- Added license headers (#519)
- Fixed link in air conditioning example (#521) (Thanks @conema)
- Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
- Added this change log (#536)
- Cuts are now written to `model.cuts.json` when numerical instability is
discovered. This can aid debugging because it allows to you reload the cuts
as of the iteration that caused the numerical issue (#537)
## v0.4.6 (March 25, 2022)
### Other
- Updated to JuMP v1.0 (#517)
## v0.4.5 (March 9, 2022)
### Fixed
- Fixed issue with `set_silent` in a subproblem (#510)
### Other
- Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)
- Update to JuMP v0.23 (#514)
- Added auto-regressive tutorial (#507)
## v0.4.4 (December 11, 2021)
### Added
- Added `BanditDuality` (#471)
- Added benchmark scripts (#475) (#476) (#490)
- `write_cuts_to_file` now saves visited states (#468)
### Fixed
- Fixed `BoundStalling` in a deterministic policy (#470) (#474)
- Fixed magnitude warning with `zero` coefficients (#483)
### Other
- Improvements to LagrangianDuality (#481) (#482) (#487)
- Improvements to `StrengthenedConicDuality` (#486)
- Switch to functional form for the tests (#478)
- Fixed typos (#472) (Thanks @vfdev-5)
- Update to JuMP v0.22 (#498)
## v0.4.3 (August 31, 2021)
### Added
- Added biobjective solver (#462)
- Added `forward_pass_callback` (#466)
### Other
- Update tutorials and documentation (#459) (#465)
- Organize how paper materials are stored (#464)
## v0.4.2 (August 24, 2021)
### Fixed
- Fixed a bug in Lagrangian duality (#457)
## v0.4.1 (August 23, 2021)
### Other
- Minor changes to our implementation of `LagrangianDuality` (#454) (#455)
## v0.4.0 (August 17, 2021)
### Breaking
- A large refactoring for how we handle stochastic integer programs. This added
support for things like [`SDDP.ContinuousConicDuality`](@ref) and
[`SDDP.LagrangianDuality`](@ref). It was breaking because we removed the
`integrality_handler` argument to `PolicyGraph`. (#449) (#453)
### Other
- Documentation improvements (#447) (#448) (#450)
## v0.3.17 (July 6, 2021)
### Added
- Added [`SDDP.PSRSamplingScheme`](@ref) (#426)
### Other
- Display more model attributes (#438)
- Documentation improvements (#433) (#437) (#439)
## v0.3.16 (June 17, 2021)
### Added
- Added [`SDDP.RiskAdjustedForwardPass`](@ref) (#413)
- Allow [`SDDP.Historical`](@ref) to sample sequentially (#420)
### Other
- Update risk measure docstrings (#418)
## v0.3.15 (June 1, 2021)
### Added
- Added [`SDDP.StoppingChain`](@ref)
### Fixed
- Fixed scoping bug in [`SDDP.@stageobjective`](@ref) (#407)
- Fixed a bug when the initial point is infeasible (#411)
- Set subproblems to silent by default (#409)
### Other
- Add JuliaFormatter (#412)
- Documentation improvements (#406) (#408)
## v0.3.14 (March 30, 2021)
### Fixed
- Fixed `O(N^2)` behavior in `get_same_children` (#393)
## v0.3.13 (March 27, 2021)
### Fixed
- Fixed bug in `print.jl`
- Fixed compat of `Reexport` (#388)
## v0.3.12 (March 22, 2021)
### Added
- Added problem statistics to header (#385) (#386)
### Fixed
- Fixed subtypes in `visualization` (#384)
## v0.3.11 (March 22, 2021)
### Fixed
- Fixed constructor in direct mode (#383)
### Other
- Fix documentation (#379)
## v0.3.10 (February 23, 2021)
### Fixed
- Fixed `seriescolor` in publication plot (#376)
## v0.3.9 (February 20, 2021)
### Added
- Add option to simulate with different incoming state (#372)
- Added warning for cuts with high dynamic range (#373)
### Fixed
- Fixed `seriesalpha` in publication plot (#375)
## v0.3.8 (January 19, 2021)
### Other
- Documentation improvements (#367) (#369) (#370)
## v0.3.7 (January 8, 2021)
### Other
- Documentation improvements (#362) (#363) (#365) (#366)
- Bump copyright (#364)
## v0.3.6 (December 17, 2020)
### Other
- Fix typos (#358)
- Collapse navigation bar in docs (#359)
- Update `TagBot.yml` (#361)
## v0.3.5 (November 18, 2020)
### Other
- Update citations (#348)
- Switch to GitHub actions (#355)
## v0.3.4 (August 25, 2020)
### Added
- Added non-uniform distributionally robust risk measure (#328)
- Added numerical recovery functions (#330)
- Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
- Added entropic risk measure (#347)
### Other
- Documentation improvements (#327) (#333) (#339) (#340)
## v0.3.3 (June 19, 2020)
### Added
- Added asynchronous support for price and belief states (#325)
- Added `ForwardPass` plug-in system (#320)
### Fixed
- Fix check for probabilities in Markovian graph (#322)
## v0.3.2 (April 6, 2020)
### Added
- Added `log_frequency` argument to [`SDDP.train`](@ref) (#307)
### Other
- Improve error message in deterministic equivalent (#312)
- Update to `RecipesBase` 1.0 (#313)
## v0.3.1 (February 26, 2020)
### Fixed
- Fixed filename in `integrality_handlers.jl` (#304)
## v0.3.0 (February 20, 2020)
### Breaking
- Breaking changes to update to JuMP v0.21 (#300).
## v0.2.4 (February 7, 2020)
### Added
- Added a counter for the number of total subproblem solves (#301)
### Other
- Update formatter (#298)
- Added tests (#299)
## v0.2.3 (January 24, 2020)
### Added
- Added support for convex risk measures (#294)
### Fixed
- Fixed bug when subproblem is infeasible (#296)
- Fixed bug in deterministic equivalent (#297)
### Other
- Added example from IJOC paper (#293)
## v0.2.2 (January 10, 2020)
### Fixed
- Fixed flakey time limit in tests (#291)
### Other
- Removed MathOptFormat.jl (#289)
- Update copyright (#290)
## v0.2.1 (December 19, 2019)
### Added
- Added support for approximating a Markov lattice (#282) (#285)
- Add tools for visualizing the value function (#272) (#286)
- Write `.mof.json` files on error (#284)
### Other
- Improve documentation (#281) (#283)
- Update tests for Julia 1.3 (#287)
## v0.2.0 (December 16, 2019)
This version added the asynchronous parallel implementation with a few minor
breaking changes in how we iterated internally. It didn't break basic
user-facing models, only implementations that implemented some of the extension
features. It probably could have been a v1.1 release.
### Added
- Added asynchronous parallel implementation (#277)
- Added roll-out algorithm for cyclic graphs (#279)
### Other
- Improved error messages in `PolicyGraph` (#271)
- Added JuliaFormatter (#273) (#276)
- Fixed compat bounds (#274) (#278)
- Added documentation for simulating non-standard graphs (#280)
## v0.1.0 (October 17, 2019)
A complete rewrite of SDDP.jl based on the policy graph framework. This was
essentially a new package. It has minimal code in common with the previous
implementation.
Development started on September 28, 2018 in [Kokako.jl](https://github.com/odow/Kokako.jl),
and the code was merged into `SDDP.jl` on March 14, 2019.
The pull request [SDDP.jl#180](https://github.com/odow/SDDP.jl/pull/180) lists
the 29 issues that the rewrite closed.
## v0.0.1 (April 18, 2018)
Initial release. Development had been underway since January 22, 2016 in the
[StochDualDynamicProgram.jl](https://github.com/odow/StochDualDynamicProgram.jl)
repository. The last development commit there was April 5, 2017. Work then
continued in this repository for a year before the first tagged release.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 5378 | ```@meta
CurrentModule = SDDP
```
```@raw html
<img src="assets/logo_without_text.svg" alt="logo" width="150px"/>
```
# Introduction
[](https://github.com/odow/SDDP.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/odow/SDDP.jl)
Welcome to [SDDP.jl](https://github.com/odow/SDDP.jl), a package for solving large
convex multistage stochastic programming problems using stochastic dual dynamic
programming.
SDDP.jl is built on [JuMP](https://jump.dev), so it supports a number of
open-source and commercial solvers, making it a powerful and flexible tool for
stochastic optimization.
The implementation of the stochastic dual dynamic programming algorithm in
SDDP.jl is state of the art, and it includes support for a number of advanced
features not commonly found in other implementations. This includes support for:
* infinite horizon problems
* convex risk measures
* mixed-integer state and control variables
* partially observable stochastic processes.
## Installation
Install `SDDP.jl` as follows:
```julia
julia> import Pkg
julia> Pkg.add("SDDP")
```
## License
`SDDP.jl` is licensed under the [MPL 2.0 license](https://github.com/odow/SDDP.jl/blob/master/LICENSE.md).
## Resources for getting started
There are a few ways to get started with SDDP.jl:
* Become familiar with JuMP by reading the [JuMP documentation](http://jump.dev/JuMP.jl/stable/)
* Read the introductory tutorial [An introduction to SDDP.jl](@ref)
* Browse some of the examples, such as [Example: deterministic to stochastic](@ref)
## Getting help
If you need help, please [open a GitHub issue](https://github.com/odow/SDDP.jl/issues/new).
## How the documentation is structured
Having a high-level overview of how this documentation is structured will help
you know where to look for certain things.
* **Tutorials** contains step-by-step explanations of how to use SDDP.jl. Once
you've got `SDDP.jl` installed, start by reading [An introduction to SDDP.jl](@ref).
* **Guides** contains "how-to" snippets that demonstrate specific topics within
SDDP.jl. A good one to get started on is [Debug a model](@ref).
* **Explanation** contains step-by-step explanations of the theory and
algorithms that underpin SDDP.jl. If you want a basic understanding of the
algorithm behind SDDP.jl, start with [Introductory theory](@ref).
* **Examples** contain worked examples of various problems solved using SDDP.jl.
A good one to get started on is the [Hydro-thermal scheduling](@ref) problem.
In particular, it shows how to solve an infinite horizon problem.
* The **API Reference** contains a complete list of the functions you can use in
SDDP.jl. Look here if you want to know how to use a particular function.
## Citing `SDDP.jl`
If you use `SDDP.jl`, we ask that you please cite the following:
```
@article{dowson_sddp.jl,
title = {{SDDP}.jl: a {Julia} package for stochastic dual dynamic programming},
journal = {INFORMS Journal on Computing},
author = {Dowson, O. and Kapelevich, L.},
doi = {https://doi.org/10.1287/ijoc.2020.0987},
year = {2021},
volume = {33},
issue = {1},
pages = {27-33},
}
```
Here is an earlier [preprint](http://www.optimization-online.org/DB_FILE/2017/12/6388.pdf).
If you use the infinite horizon functionality, we ask that you please cite the
following:
```
@article{dowson_policy_graph,
title = {The policy graph decomposition of multistage stochastic optimization problems},
doi = {https://doi.org/10.1002/net.21932},
journal = {Networks},
author = {Dowson, O.},
volume = {76},
issue = {1},
pages = {3-23},
year = {2020}
}
```
Here is an earlier [preprint](http://www.optimization-online.org/DB_HTML/2018/11/6914.html).
If you use the partially observable functionality, we ask that you please cite
the following:
```
@article{dowson_pomsp,
title = {Partially observable multistage stochastic programming},
doi = {https://doi.org/10.1016/j.orl.2020.06.005},
journal = {Operations Research Letters},
author = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.},
volume = {48},
issue = {4},
pages = {505-512},
year = {2020}
}
```
Here is an earlier [preprint](http://www.optimization-online.org/DB_HTML/2019/03/7141.html).
If you use the objective state functionality, we ask that you please cite the
following:
```
@article{downward_objective,
title = {Stochastic dual dynamic programming with stagewise-dependent objective uncertainty},
doi = {https://doi.org/10.1016/j.orl.2019.11.002},
journal = {Operations Research Letters},
author = {Downward, A. and Dowson, O. and Baucke, R.},
volume = {48},
issue = {1},
pages = {33-39},
year = {2020}
}
```
Here is an earlier [preprint](http://www.optimization-online.org/DB_FILE/2018/02/6454.pdf).
If you use the entropic risk measure, we ask that you please cite the following:
```
@article{dowson_entropic,
title = {Incorporating convex risk measures into multistage stochastic programming algorithms},
doi = {https://doi.org/10.1007/s10479-022-04977-w},
journal = {Annals of Operations Research},
author = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.},
year = {2022},
}
```
Here is an earlier [preprint](http://www.optimization-online.org/DB_HTML/2020/08/7984.html).
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 3216 | # Access variables from a previous stage
A common question is "how do I use a variable from a previous stage in a
constraint?"
!!! info
If you want to use a variable from a previous stage, it must be a state
variable.
Here are some examples:
## Access a first-stage decision in a future stage
This is often useful if your first-stage decisions are capacity-expansion type
decisions (e.g., you choose first how much capacity to add, but because it takes
time to build, it only shows up in some future stage).
```@repl
using SDDP, HiGHS
SDDP.LinearPolicyGraph(
stages = 10,
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do sp, t
# Capacity of the generator. Decided in the first stage.
@variable(sp, capacity >= 0, SDDP.State, initial_value = 0)
# Quantity of water stored.
@variable(sp, reservoir >= 0, SDDP.State, initial_value = 0)
# Quantity of water to use for electricity generation in current stage.
@variable(sp, generation >= 0)
if t == 1
# There are no constraints in the first stage, but we need to push the
# initial value of the reservoir to the next stage.
@constraint(sp, reservoir.out == reservoir.in)
# Since we're maximizing profit, subtract cost of capacity.
@stageobjective(sp, -capacity.out)
else
# Water balance constraint.
@constraint(sp, balance, reservoir.out - reservoir.in + generation == 0)
# Generation limit.
@constraint(sp, generation <= capacity.in)
# Push capacity to the next stage.
@constraint(sp, capacity.out == capacity.in)
# Maximize generation.
@stageobjective(sp, generation)
# Random inflow in balance constraint.
SDDP.parameterize(sp, rand(4)) do w
set_normalized_rhs(balance, w)
end
end
end
```
## Access a decision from N stages ago
This is often useful if have some inventory problem with a lead-time on orders.
```@repl
using SDDP, HiGHS
SDDP.LinearPolicyGraph(
stages = 10,
sense = :Max,
upper_bound = 100,
optimizer = HiGHS.Optimizer,
) do sp, t
# Current inventory on hand.
@variable(sp, inventory >= 0, SDDP.State, initial_value = 0)
# Inventory pipeline.
# pipeline[1].out are orders placed today.
# pipeline[5].in are orders that arrive today and can be added to the
# current inventory.
# Stock moves up one slot in the pipeline each stage.
@variable(sp, pipeline[1:5], SDDP.State, initial_value = 0)
# The number of units to order today.
@variable(sp, 0 <= buy <= 10)
# The number of units to sell today.
@variable(sp, sell >= 0)
# Buy orders get placed in the pipeline.
@constraint(sp, pipeline[1].out == buy)
# Stock moves up one slot in the pipeline each stage.
@constraint(sp, [i=2:5], pipeline[i].out == pipeline[i-1].in)
# Stock balance constraint.
@constraint(sp, inventory.out == inventory.in - sell + pipeline[5].in)
# Maximize quantity of sold items.
@stageobjective(sp, sell)
end
```
!!! warning
You must initialize the same number of state variables in every stage, even
if they are not used in that stage.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 1157 | # Add a multi-dimensional state variable
```@meta
DocTestSetup = quote
using SDDP, HiGHS
end
```
Just like normal JuMP variables, it is possible to create containers of state
variables.
```jldoctest; filter=r"A policy graph.+"s
julia> model = SDDP.LinearPolicyGraph(
stages=1, lower_bound = 0, optimizer = HiGHS.Optimizer
) do subproblem, t
# A scalar state variable.
@variable(subproblem, x >= 0, SDDP.State, initial_value = 0)
println("Lower bound of outgoing x is: ", JuMP.lower_bound(x.out))
# A vector of state variables.
@variable(subproblem, y[i = 1:2] >= i, SDDP.State, initial_value = i)
println("Lower bound of outgoing y[1] is: ", JuMP.lower_bound(y[1].out))
# A JuMP.Containers.DenseAxisArray of state variables.
@variable(subproblem,
z[i = 3:4, j = [:A, :B]] >= i, SDDP.State, initial_value = i)
println("Lower bound of outgoing z[3, :B] is: ", JuMP.lower_bound(z[3, :B].out))
end;
Lower bound of outgoing x is: 0.0
Lower bound of outgoing y[1] is: 1.0
Lower bound of outgoing z[3, :B] is: 3.0
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 5109 | # Add a risk measure
```@meta
DocTestSetup = quote
using SDDP, HiGHS
end
```
## Training a risk-averse model
`SDDP.jl` supports a variety of risk measures. Two common ones are
[`SDDP.Expectation`](@ref) and [`SDDP.WorstCase`](@ref). Let's see how to
train a policy using them. There are three possible ways.
If the same risk measure is used at every node in the policy graph, we can just
pass an instance of one of the risk measures to the `risk_measure` keyword
argument of the [`SDDP.train`](@ref) function.
```julia
SDDP.train(
model,
risk_measure = SDDP.WorstCase(),
iteration_limit = 10
)
```
However, if you want different risk measures at different nodes, there are two
options. First, you can pass `risk_measure` a dictionary of risk measures,
with one entry for each node. The keys of the dictionary are the indices of the
nodes.
```julia
SDDP.train(
model,
risk_measure = Dict(
1 => SDDP.Expectation(),
2 => SDDP.WorstCase()
),
iteration_limit = 10
)
```
An alternative method is to pass `risk_measure` a function that takes one
argument, the index of a node, and returns an instance of a risk measure:
```julia
SDDP.train(
model,
risk_measure = (node_index) -> begin
if node_index == 1
return SDDP.Expectation()
else
return SDDP.WorstCase()
end
end,
iteration_limit = 10
)
```
!!! note
If you simulate the policy, the simulated value is the risk-neutral value of
the policy.
## Risk measures
To illustrate the risk-measures included in `SDDP.jl`, we consider a discrete
random variable with four outcomes.
The random variable is supported on the values 1, 2, 3, and 4:
```@repl intermediate_risk
noise_supports = [1, 2, 3, 4]
```
The associated probability of each outcome is as follows:
```@repl intermediate_risk
nominal_probability = [0.1, 0.2, 0.3, 0.4]
```
With each outcome ω, the agent observes a cost `Z(ω)`:
```@repl intermediate_risk
cost_realizations = [5.0, 4.0, 6.0, 2.0]
```
We assume that we are minimizing:
```@repl intermediate_risk
is_minimization = true
```
Finally, we create a vector that will be used to store the risk-adjusted
probabilities:
```@repl intermediate_risk
risk_adjusted_probability = zeros(4)
```
### Expectation
```@docs
SDDP.Expectation
```
```@repl intermediate_risk
using SDDP
SDDP.adjust_probability(
SDDP.Expectation(),
risk_adjusted_probability,
nominal_probability,
noise_supports,
cost_realizations,
is_minimization
)
risk_adjusted_probability
```
[`SDDP.Expectation`](@ref) is the default risk measure in `SDDP.jl`.
### Worst-case
```@docs
SDDP.WorstCase
```
```@repl intermediate_risk
SDDP.adjust_probability(
SDDP.WorstCase(),
risk_adjusted_probability,
nominal_probability,
noise_supports,
cost_realizations,
is_minimization
)
risk_adjusted_probability
```
### Average value at risk (AV@R)
```@docs
SDDP.AVaR
```
```@repl intermediate_risk
SDDP.adjust_probability(
SDDP.AVaR(0.5),
risk_adjusted_probability,
nominal_probability,
noise_supports,
cost_realizations,
is_minimization
)
risk_adjusted_probability
```
### Convex combination of risk measures
Using the axioms of coherent risk measures, it is easy to show that any convex
combination of coherent risk measures is also a coherent risk measure. Convex
combinations of risk measures can be created directly:
```@repl intermediate_risk
cvx_comb_measure = 0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()
SDDP.adjust_probability(
cvx_comb_measure,
risk_adjusted_probability,
nominal_probability,
noise_supports,
cost_realizations,
is_minimization
)
risk_adjusted_probability
```
As a special case, the [`SDDP.EAVaR`](@ref) risk-measure is a convex
combination of [`SDDP.Expectation`](@ref) and [`SDDP.AVaR`](@ref):
```@repl intermediate_risk
SDDP.EAVaR(beta=0.25, lambda=0.4)
```
```@docs
SDDP.EAVaR
```
### Distributionally robust
`SDDP.jl` supports two types of distributionally robust risk measures: the
modified Χ² method of Philpott et al. (2018), and a method based on the
Wasserstein distance metric.
#### Modified Chi-squard
```@docs
SDDP.ModifiedChiSquared
```
```@repl intermediate_risk
SDDP.adjust_probability(
SDDP.ModifiedChiSquared(0.5),
risk_adjusted_probability,
[0.25, 0.25, 0.25, 0.25],
noise_supports,
cost_realizations,
is_minimization
)
risk_adjusted_probability
```
#### Wasserstein
```@docs
SDDP.Wasserstein
```
```@repl intermediate_risk
import HiGHS
SDDP.adjust_probability(
SDDP.Wasserstein(HiGHS.Optimizer; alpha=0.5) do x, y
return abs(x - y)
end,
risk_adjusted_probability,
nominal_probability,
noise_supports,
cost_realizations,
is_minimization
)
risk_adjusted_probability
```
### Entropic
```@docs
SDDP.Entropic
```
```@repl intermediate_risk
SDDP.adjust_probability(
SDDP.Entropic(0.1),
risk_adjusted_probability,
nominal_probability,
noise_supports,
cost_realizations,
is_minimization
)
risk_adjusted_probability
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 4166 | ```@meta
CurrentModule = SDDP
```
# Integrality
There's nothing special about binary and integer variables in SDDP.jl. Your
models may contain a mix of binary, integer, or continuous state and control
variables. Use the standard JuMP syntax to add binary or integer variables.
For example:
```@example
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 0 <= x <= 100, Int, SDDP.State, initial_value = 0)
@variable(sp, 0 <= u <= 200, integer = true)
@variable(sp, v >= 0)
@constraint(sp, x.out == x.in + u + v - 150)
@stageobjective(sp, 2u + 6v + x.out)
end
```
If you want finer control over how SDDP.jl computes subgradients in the backward
pass, you can pass an [`SDDP.AbstractDualityHandler`](@ref) to the
`duality_handler` argument of [`SDDP.train`](@ref).
See [Duality handlers](@ref) for the list of handlers you can pass.
## Convergence
SDDP.jl cannot guarantee that it will find a globally optimal policy when some
of the variables are discrete. However, in most cases we find that it can still
find an integer feasible policy that performs well in simulation.
Moreover, when the number of nodes in the graph is large, or there is
uncertainty, we are not aware of another algorithm that _can_ claim to find a
globally optimal policy.
## Does SDDP.jl implement the SDDiP algorithm?
Most discussions of SDDiP in the literature confuse two unrelated things.
* First, how to compute dual variables
* Second, when the algorithm will converge to a globally optimal policy.
### Computing dual variables
The stochastic dual dynamic programming algorithm requires a subgradient of the
objective with respect to the incoming state variable.
One way to obtain a valid subgradient is to compute an optimal value of the
dual variable ``\lambda`` in the following subproblem:
```math
\begin{aligned}
V_i(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]\\
& x^\prime = T_i(\bar{x}, u, \omega) \\
& u \in U_i(\bar{x}, \omega) \\
& \bar{x} = x \quad [\lambda]
\end{aligned}
```
The easiest option is to relax integrality of the discrete variables to form a
linear program and then use linear programming duality to obtain the dual. But
we could also use Lagrangian duality without needing to relax the integrality
constraints.
To compute the Lagrangian dual ``\lambda``, we penalize ``\lambda^\top(\bar{x} - x)``
in the objective instead of enforcing the constraint:
```math
\begin{aligned}
\max\limits_{\lambda}\min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)] - \lambda^\top(\bar{x} - x)\\
& x^\prime = T_i(\bar{x}, u, \omega) \\
& u \in U_i(\bar{x}, \omega)
\end{aligned}
```
You can use Lagrangian duality in SDDP.jl by passing [`SDDP.LagrangianDuality`](@ref)
to the `duality_handler` argument of [`SDDP.train`](@ref).
Compared with linear programming duality, the Lagrangian problem is difficult
to solve because it requires the solution of many mixed-integer programs
instead of a single linear program. This is one reason why "SDDiP" has poor
performance.
### Convergence
The second part to SDDiP is a very tightly scoped claim: _if_ all of the state
variables are binary _and_ the algorithm uses Lagrangian duality to compute a
subgradient, _then_ it will converge to an optimal policy.
In many cases, papers claim to "do SDDiP," but they have state variables which
are not binary. In these cases, the algorithm is not guaranteed to converge to a
globally optimal policy.
One work-around that has been suggested is to discretize the state variables
into a set of binary state variables. However, this leads to a large number of
binary state variables, which is another reason why "SDDiP" has poor
performance.
In general, we recommend that you introduce integer variables into your model
without fear of the consequences, and that you treat the resulting policy as a
good heuristic, rather than an attempt to find a globally optimal policy.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 4074 | # Add multi-dimensional noise terms
```@meta
DocTestSetup = quote
using SDDP, HiGHS
end
```
Multi-dimensional stagewise-independent random variables can be created by
forming the Cartesian product of the random variables.
## A simple example
If the sample space and probabilities are given as vectors for each marginal
distribution, do:
```jldoctest; filter=[r"\(value = \d, coefficient = \d\)", r"1\-element.+"s]
julia> model = SDDP.LinearPolicyGraph(
stages = 3,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x, SDDP.State, initial_value = 0.0)
Ω = [(value = v, coefficient = c) for v in [1, 2] for c in [3, 4, 5]]
P = [v * c for v in [0.5, 0.5] for c in [0.3, 0.5, 0.2]]
SDDP.parameterize(subproblem, Ω, P) do ω
JuMP.fix(x.out, ω.value)
@stageobjective(subproblem, ω.coefficient * x.out)
println("ω is: ", ω)
end
end;
julia> SDDP.simulate(model, 1);
ω is: (value = 1, coefficient = 4)
ω is: (value = 1, coefficient = 3)
ω is: (value = 2, coefficient = 4)
```
## Using Distributions.jl
For sampling multidimensional random variates, it can be useful to use the
`Product` type from [Distributions.jl](https://github.com/JuliaStats/Distributions.jl).
### Finite discrete distributions
There are several ways to go about this. If the sample space is finite, and
small enough that it makes sense to enumerate each element, we can use
`Base.product` and `Distributions.support` to generate the entire sample space
`Ω` from each of the marginal distributions.
We can then evaluate the density function of the product distribution on each
element of this space to get the vector of corresponding probabilities, `P`.
```jldoctest; filter=[r"\[\d+, \d+, \d+\]", r"1\-element.+"s]
julia> import Distributions
julia> distributions = [
Distributions.Binomial(10, 0.5),
Distributions.Bernoulli(0.5),
Distributions.truncated(Distributions.Poisson(5), 2, 8)
];
julia> supports = Distributions.support.(distributions);
julia> Ω = vec([collect(ω) for ω in Base.product(supports...)]);
julia> P = [Distributions.pdf(Distributions.Product(distributions), ω) for ω in Ω];
julia> model = SDDP.LinearPolicyGraph(
stages = 3,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x, SDDP.State, initial_value = 0.0)
SDDP.parameterize(subproblem, Ω, P) do ω
JuMP.fix(x.out, ω[1])
@stageobjective(subproblem, ω[2] * x.out + ω[3])
println("ω is: ", ω)
end
end;
julia> SDDP.simulate(model, 1);
ω is: [10, 0, 3]
ω is: [0, 1, 6]
ω is: [6, 0, 5]
```
### Sampling
For sample spaces that are too large to explicitly represent, we can instead
approximate the distribution by a sample of `N` points. Now `Ω` is a sample from
the full sample space, and `P` is the uniform distribution over those points.
Points with higher density in the full sample space will appear more frequently
in `Ω`.
```jldoctest; filter=[r"\[\d+, \d+, \d+\]", r"1\-element.+"s]
julia> import Distributions
julia> distributions = Distributions.Product([
Distributions.Binomial(100, 0.5),
Distributions.Geometric(1 / 20),
Distributions.Poisson(20),
]);
julia> N = 100;
julia> Ω = [rand(distributions) for _ in 1:N];
julia> P = fill(1 / N, N);
julia> model = SDDP.LinearPolicyGraph(
stages = 3,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x, SDDP.State, initial_value = 0.0)
SDDP.parameterize(subproblem, Ω, P) do ω
JuMP.fix(x.out, ω[1])
@stageobjective(subproblem, ω[2] * x.out + ω[3])
println("ω is: ", ω)
end
end;
julia> SDDP.simulate(model, 1);
ω is: [54, 38, 19]
ω is: [43, 3, 13]
ω is: [43, 4, 17]
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 1263 | # Add noise in the constraint matrix
```@meta
DocTestSetup = quote
using SDDP, HiGHS
end
```
`SDDP.jl` supports coefficients in the constraint matrix through the
[`JuMP.set_normalized_coefficient`](https://jump.dev/JuMP.jl/stable/manual/constraints/#Modify-a-variable-coefficient)
function.
```jldoctest; filter=r" \: .+?1"
julia> model = SDDP.LinearPolicyGraph(
stages=3, lower_bound = 0, optimizer = HiGHS.Optimizer
) do subproblem, t
@variable(subproblem, x, SDDP.State, initial_value = 0.0)
@constraint(subproblem, emissions, 1x.out <= 1)
SDDP.parameterize(subproblem, [0.2, 0.5, 1.0]) do ω
JuMP.set_normalized_coefficient(emissions, x.out, ω)
println(emissions)
end
@stageobjective(subproblem, -x.out)
end
A policy graph with 3 nodes.
Node indices: 1, 2, 3
julia> SDDP.simulate(model, 1);
emissions : x_out <= 1
emissions : 0.2 x_out <= 1
emissions : 0.5 x_out <= 1
```
!!! note
JuMP will normalize constraints by moving all variables to the left-hand
side. Thus, `@constraint(model, 0 <= 1 - x.out)` becomes `x.out <= 1`.
`JuMP.set_normalized_coefficient` sets the coefficient on the _normalized_
constraint.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 1662 | # Choose a stopping rule
The theory of SDDP tells us that the algorithm converges to an optimal policy
almost surely in a finite number of iterations. In practice, this number is very
large. Therefore, we need some way of pre-emptively terminating SDDP when the
solution is “good enough.” We call heuristics for pre-emptively terminating SDDP
_stopping rules_.
## Basic limits
The training of an SDDP policy can be terminated after a fixed number of
iterations using the `iteration_limit` keyword.
```julia
SDDP.train(model; iteration_limit = 10)
```
The training of an SDDP policy can be terminated after a fixed number of
seconds using the `time_limit` keyword.
```julia
SDDP.train(model; time_limit = 2.0)
```
## Stopping rules
In addition to the limits provided as keyword arguments, a variety of other
stopping rules are available. These can be passed to [`SDDP.train`](@ref)
as a vector to the `stopping_rules` keyword. Training stops if any of the rules
becomes active. To stop when all of the rules become active, use
[`SDDP.StoppingChain`](@ref). For example:
```julia
# Terminate if BoundStalling becomes true
SDDP.train(
model;
stopping_rules = [SDDP.BoundStalling(10, 1e-4)],
)
# Terminate if BoundStalling OR TimeLimit becomes true
SDDP.train(
model;
stopping_rules = [SDDP.BoundStalling(10, 1e-4), SDDP.TimeLimit(100.0)],
)
# Terminate if BoundStalling AND TimeLimit becomes true
SDDP.train(
model;
stopping_rules = [
SDDP.StoppingChain(SDDP.BoundStalling(10, 1e-4), SDDP.TimeLimit(100.0)),
],
)
```
See [Stopping rules](@ref api_stopping_rules) for a list of stopping rules
supported by SDDP.jl.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 837 | ```@meta
DocTestSetup = quote
using SDDP
end
```
# Create a belief state
`SDDP.jl` includes an implementation of the algorithm described in Dowson, O.,
Morton, D.P., & Pagnoncelli, B.K. (2020). Partially observable multistage
stochastic optimization. _Operations Research Letters_, 48(4), 505--512.
Given a [`SDDP.Graph`](@ref) object (see [Create a general policy graph](@ref)
for details), we can define the ambiguity partition using
[`SDDP.add_ambiguity_set`](@ref).
For example, first we create a Markovian graph:
```@repl belief_states
using SDDP
G = SDDP.MarkovianGraph([[0.5 0.5], [0.2 0.8; 0.8 0.2]])
```
Then we add an ambiguity set over the nodes in the each stage:
```@repl belief_states
for t in 1:2
SDDP.add_ambiguity_set(G, [(t, 1), (t, 2)])
end
```
This results in the graph:
```@repl belief_states
G
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 5573 | # Create a general policy graph
```@meta
DocTestSetup = quote
using SDDP, HiGHS
end
```
SDDP.jl uses the concept of a _policy graph_ to formulate multistage stochastic
programming problems. For more details, read [An introduction to SDDP.jl](@ref)
or the paper Dowson, O., (2020). The policy graph decomposition of multistage
stochastic optimization problems. Networks, 76(1), 3-23. [doi](https://doi.org/10.1002/net.21932).
## Creating a [`SDDP.Graph`](@ref)
### Linear graphs
Linear policy graphs can be created using the [`SDDP.LinearGraph`](@ref)
function.
```jldoctest linear_graph
julia> graph = SDDP.LinearGraph(3)
Root
0
Nodes
1
2
3
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 3 w.p. 1.0
```
We can add nodes to a graph using [`SDDP.add_node`](@ref) and edges using
[`SDDP.add_edge`](@ref).
```jldoctest linear_graph
julia> SDDP.add_node(graph, 4)
julia> SDDP.add_edge(graph, 3 => 4, 1.0)
julia> SDDP.add_edge(graph, 4 => 1, 0.9)
julia> graph
Root
0
Nodes
1
2
3
4
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 3 w.p. 1.0
3 => 4 w.p. 1.0
4 => 1 w.p. 0.9
```
Look! We just made a cyclic graph! SDDP.jl can solve infinite horizon problems.
The probability on the arc that completes a cycle should be interpreted as a
discount factor.
### [Unicyclic policy graphs](@id guide_unicyclic_policy_graph)
Linear policy graphs with a single infinite-horizon cycle can be created using
the [`SDDP.UnicyclicGraph`](@ref) function.
```jldoctest
julia> SDDP.UnicyclicGraph(0.95; num_nodes = 2)
Root
0
Nodes
1
2
Arcs
0 => 1 w.p. 1.0
1 => 2 w.p. 1.0
2 => 1 w.p. 0.95
```
### [Markovian policy graphs](@id guide_markovian_policy_graph)
Markovian policy graphs can be created using the [`SDDP.MarkovianGraph`](@ref)
function.
```jldoctest
julia> SDDP.MarkovianGraph(Matrix{Float64}[[1.0]', [0.4 0.6]])
Root
(0, 1)
Nodes
(1, 1)
(2, 1)
(2, 2)
Arcs
(0, 1) => (1, 1) w.p. 1.0
(1, 1) => (2, 1) w.p. 0.4
(1, 1) => (2, 2) w.p. 0.6
```
### General graphs
Arbitrarily complicated graphs can be constructed using [`SDDP.Graph`](@ref),
[`SDDP.add_node`](@ref) and [`SDDP.add_edge`](@ref). For example
```jldoctest
julia> graph = SDDP.Graph(:root_node)
Root
root_node
Nodes
{}
Arcs
{}
julia> SDDP.add_node(graph, :decision_node)
julia> SDDP.add_edge(graph, :root_node => :decision_node, 1.0)
julia> SDDP.add_edge(graph, :decision_node => :decision_node, 0.9)
julia> graph
Root
root_node
Nodes
decision_node
Arcs
root_node => decision_node w.p. 1.0
decision_node => decision_node w.p. 0.9
```
## Creating a policy graph
Once you have constructed an instance of [`SDDP.Graph`](@ref), you can create a
policy graph by passing the graph as the first argument.
```jldoctest
julia> graph = SDDP.Graph(
:root_node,
[:decision_node],
[
(:root_node => :decision_node, 1.0),
(:decision_node => :decision_node, 0.9)
]);
julia> model = SDDP.PolicyGraph(
graph,
lower_bound = 0,
optimizer = HiGHS.Optimizer) do subproblem, node
println("Called from node: ", node)
end;
Called from node: decision_node
```
### Special cases
There are two special cases which cover the majority of models in the
literature.
- [`SDDP.LinearPolicyGraph`](@ref) is a special case where a
[`SDDP.LinearGraph`](@ref) is passed as the first argument.
- [`SDDP.MarkovianPolicyGraph`](@ref) is a special case where a
[`SDDP.MarkovianGraph`](@ref) is passed as the first argument.
Note that the type of the names of all nodes (including the root node) must be
the same. In this case, they are `Symbol`s.
## Simulating non-standard policy graphs
If you simulate a policy graph with a node that has outgoing arcs that sum to less than one,
you will end up with simulations of different lengths. (The most common case is an infinite
horizon stochastic program, aka a linear policy graph with a single cycle.)
To simulate a fixed number of stages, use:
```julia
simulations = SDDP.simulate(
model,
1,
sampling_scheme = SDDP.InSampleMonteCarlo(
max_depth = 10,
terminate_on_dummy_leaf = false
)
)
```
Here, `max_depth` controls the number of stages, and `terminate_on_dummy_leaf = false` stops
us from terminating early.
See also [Simulate using a different sampling scheme](@ref).
## Creating a Markovian graph automatically
SDDP.jl can create a Markovian graph by automatically discretizing a one-dimensional
stochastic process and fitting a Markov chain.
To access this functionality, pass a function that takes no arguments and returns a
`Vector{Float64}` to [`SDDP.MarkovianGraph`](@ref). To keyword arguments also need to be
provided: `budget` is the total number of nodes in the Markovian graph, and `scenarios` is
the number of realizations of the simulator function used to approximate the graph.
In some cases, `scenarios` may be too small to provide a reasonable fit of the stochastic
process. If so, SDDP.jl will automatically try to re-fit the Markov chain using more
scenarios.
```julia
function simulator()
scenario = zeros(5)
for i = 2:5
scenario[i] = scenario[i - 1] + rand() - 0.5
end
return scenario
end
model = SDDP.PolicyGraph(
SDDP.MarkovianGraph(simulator; budget = 10, scenarios = 100),
sense = :Max,
upper_bound = 1e3
) do subproblem, node
(stage, price) = node
@variable(subproblem, x >= 0, SDDP.State, initial_value = 1)
@constraint(subproblem, x.out <= x.in)
@stageobjective(subproblem, price * x.out)
end
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 3915 | # Debug a model
Building multistage stochastic programming models is hard. There are a lot of
different pieces that need to be put together, and we typically have no idea of
the optimal policy, so it can be hard (impossible?) to validate the solution.
That said, here are a few tips to verify and validate models built using
`SDDP.jl`.
## Writing subproblems to file
The first step to debug a model is to write out the subproblems to a file in
order to check that you are actually building what you think you are building.
This can be achieved with the help of two functions: [`SDDP.parameterize`](@ref)
and [`SDDP.write_subproblem_to_file`](@ref). The first lets you parameterize a
node given a noise, and the second writes out the subproblem to a file.
Here is an example model:
```jldoctest tutorial_eight
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x, SDDP.State, initial_value = 1)
@variable(subproblem, y)
@constraint(subproblem, balance, x.in == x.out + y)
SDDP.parameterize(subproblem, [1.1, 2.2]) do ω
@stageobjective(subproblem, ω * x.out)
JuMP.fix(y, ω)
end
end
# output
A policy graph with 2 nodes.
Node indices: 1, 2
```
Initially, `model` hasn't been parameterized with a concrete realizations of
`ω`. Let's do so now by parameterizing the first subproblem with `ω=1.1`.
```jldoctest tutorial_eight
julia> SDDP.parameterize(model[1], 1.1)
```
Easy! To parameterize the second stage problem, we would have used `model[2]`.
Now to write out the problem to a file. We'll get a few warnings because some
variables and constraints don't have names. They don't matter, so ignore them.
```jldoctest tutorial_eight; filter=r"MathOptFormat\ .+?MathOptFormat\.jl"
julia> SDDP.write_subproblem_to_file(model[1], "subproblem.lp")
julia> read("subproblem.lp") |> String |> print
minimize
obj: 1.1 x_out + 1 x4
subject to
balance: 1 x_in - 1 x_out - 1 y = 0
Bounds
x_in free
x_out free
y = 1.1
x4 >= 0
End
```
It is easy to see that `ω` has been set in the objective, and as the fixed value
for `y`.
It is also possible to parameterize the subproblems using values for `ω` that
are not in the original problem formulation.
```jldoctest tutorial_eight; filter=r"MathOptFormat\ .+?MathOptFormat\.jl"
julia> SDDP.parameterize(model[1], 3.3)
julia> SDDP.write_subproblem_to_file(model[1], "subproblem.lp")
julia> read("subproblem.lp") |> String |> print
minimize
obj: 3.3 x_out + 1 x4
subject to
balance: 1 x_in - 1 x_out - 1 y = 0
Bounds
x_in free
x_out free
y = 3.3
x4 >= 0
End
julia> rm("subproblem.lp") # Clean up.
```
## Solve the deterministic equivalent
Sometimes, it can be helpful to solve the deterministic equivalent of a
problem in order to obtain an exact solution to the problem. To obtain a JuMP
model that represents the deterministic equivalent, use [`SDDP.deterministic_equivalent`](@ref).
The returned model is just a normal JuMP model. Use JuMP to optimize it and
query the solution.
```jldoctest tutorial_eight; filter=r"5.4725[0]+[0-9]"
julia> det_equiv = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)
A JuMP Model
Minimization problem with:
Variables: 24
Objective function type: AffExpr
`AffExpr`-in-`MathOptInterface.EqualTo{Float64}`: 10 constraints
`VariableRef`-in-`MathOptInterface.EqualTo{Float64}`: 8 constraints
`VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 6 constraints
`VariableRef`-in-`MathOptInterface.LessThan{Float64}`: 4 constraints
Model mode: AUTOMATIC
CachingOptimizer state: EMPTY_OPTIMIZER
Solver name: HiGHS
julia> set_silent(det_equiv)
julia> optimize!(det_equiv)
julia> objective_value(det_equiv)
-5.472500000000001
```
!!! warning
The deterministic equivalent scales poorly with problem size. Only use this
on small problems!
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 6684 | # Improve computational performance
SDDP is a computationally intensive algorithm. Here are some suggestions for
how the computational performance can be improved.
## Numerical stability (again)
We've already discussed this in the [Numerical stability](@ref) section of
[Words of warning](@ref). But, it's so important that we're going to
say it again: improving the problem scaling is one of the best ways to improve
the numerical performance of your models.
## Solver selection
The majority of the solution time is spent inside the low-level solvers.
Choosing which solver (and the associated settings) correctly can lead to big
speed-ups.
- Choose a commercial solver.
Options include [CPLEX](https://github.com/jump-dev/CPLEX.jl),
[Gurobi](https://github.com/jump-dev/Gurobi.jl), and
[Xpress](https://github.com/jump-dev/Xpress.jl). Using free solvers such as
[CLP](https://github.com/jump-dev/Clp.jl) and
[HiGHS](https://github.com/jump-dev/HiGHS.jl) isn't a viable approach for large
problems.
- Try different solvers.
Even commercial solvers can have wildly different solution times. We've seen
models on which CPLEX was 50% fast than Gurobi, and vice versa.
- Experiment with different solver options.
Using the default settings is usually a good option. However, sometimes it can
pay to change these. In particular, forcing solvers to use the dual simplex
algorithm (e.g., [`Method=1` in Gurobi](https://www.gurobi.com/documentation/8.1/refman/method.html)
) is usually a performance win.
## Single-cut vs. multi-cut
There are two competing ways that cuts can be created in SDDP: _single_-cut and
_multi_-cut. By default, `SDDP.jl` uses the _single-cut_ version of SDDP.
The performance of each method is problem-dependent. We recommend that you try
both in order to see which one performs better. In general, the _single_-cut
method works better when the number of realizations of the stagewise-independent
random variable is large, whereas the multi-cut method works better on small
problems. However, the multi-cut method can cause numerical stability problems,
particularly if used in conjunction with objective or belief state variables.
You can switch between the methods by passing the relevant flag to `cut_type` in
[`SDDP.train`](@ref).
```julia
SDDP.train(model; cut_type = SDDP.SINGLE_CUT)
SDDP.train(model; cut_type = SDDP.MULTI_CUT)
```
## Parallelism
SDDP.jl can take advantage of the parallel nature of modern computers to solve problems
across multiple cores.
!!! info
We highly recommend that you read the Julia manual's section on [parallel computing](https://docs.julialang.org/en/v1/manual/parallel-computing/).
You can start Julia from a command line with `N` processors using the `-p` flag:
```julia
julia -p N
```
Alternatively, you can use the `Distributed.jl` package:
```julia
using Distributed
Distributed.addprocs(N)
```
!!! warning
Workers **DON'T** inherit their parent's Pkg environment. Therefore, if you started
Julia with `--project=/path/to/environment` (or if you activated an environment from the
REPL), you will need to put the following at the top of your script:
```julia
using Distributed
@everywhere begin
import Pkg
Pkg.activate("/path/to/environment")
end
```
Currently SDDP.jl supports to parallel schemes, [`SDDP.Serial`](@ref) and
[`SDDP.Asynchronous`](@ref). Instances of these parallel schemes should be passed to the
`parallel_scheme` argument of [`SDDP.train`](@ref) and [`SDDP.simulate`](@ref).
```julia
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(
stages = 2, lower_bound = 0, optimizer = HiGHS.Optimizer
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
@stageobjective(sp, x.out)
end
SDDP.train(model; iteration_limit = 10, parallel_scheme = SDDP.Asynchronous())
SDDP.simulate(model, 10; parallel_scheme = SDDP.Asynchronous())
```
There is a large overhead for using the asynchronous solver. Even if you choose asynchronous
mode, SDDP.jl will start in serial mode while the initialization takes place. Therefore, in
the log you will see that the initial iterations take place on the master thread (`Proc. ID
= 1`), and it is only after while that the solve switches to full parallelism.
!!! info
Because of the large data communication requirements (all cuts have to be shared with
all other cores), the solution time will not scale linearly with the number of cores.
!!! info
Given the same number of iterations, the policy obtained from asynchronous mode will be
_worse_ than the policy obtained from serial mode. However, the asynchronous solver can
take significantly less time to compute the same number of iterations.
### Data movement
By default, data defined on the master process is not made available to the workers.
Therefore, a model like the following:
```julia
data = 1
model = SDDP.LinearPolicyGraph(stages = 2, lower_bound = 0) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = data)
@stageobjective(sp, x.out)
end
```
will result in an `UndefVarError` error like `UndefVarError: data not defined`.
There are three solutions for this problem.
#### Option 1: declare data inside the build function
```julia
model = SDDP.LinearPolicyGraph(stages = 2) do sp, t
data = 1
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
@stageobjective(sp, x)
end
```
#### Option 2: use `@everywhere`
```julia
@everywhere begin
data = 1
end
model = SDDP.LinearPolicyGraph(stages = 2) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
@stageobjective(sp, x)
end
```
#### Option 3: build the model in a function
```julia
function build_model()
data = 1
return SDDP.LinearPolicyGraph(stages = 2) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
@stageobjective(sp, x)
end
end
model = build_model()
```
### Initialization hooks
!!! warning
This is important if you use Gurobi!
[`SDDP.Asynchronous`](@ref) accepts a pre-processing hook that is run on each
worker process _before_ the model is solved. The most useful situation is for
solvers than need an initialization step. A good example is Gurobi, which can
share an environment amongst all models on a worker. Notably, this environment
**cannot** be shared amongst workers, so defining one environment at the top of
a script will fail!
To initialize a new environment on each worker, use the following:
```julia
SDDP.train(
model;
parallel_scheme = SDDP.Asynchronous() do m::SDDP.PolicyGraph
env = Gurobi.Env()
set_optimizer(m, () -> Gurobi.Optimizer(env))
end,
)
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 9483 | # Simulate using a different sampling scheme
```@meta
DocTestSetup = quote
using SDDP, HiGHS
end
```
By default, [`SDDP.simulate`](@ref) will simulate the policy using the
distributions of noise terms that were defined when the model was created. We
call these _in-sample_ simulations. However, in general the _in-sample_
distributions are an approximation of some underlying probability model which
we term the _true process_. Therefore, `SDDP.jl` makes it easy to simulate the
policy using different probability distributions.
To demonstrate the different ways of simulating the policy, we're going to use
the model from the tutorial [Markovian policy graphs](@ref).
```jldoctest sampling_schemes
julia> using SDDP, HiGHS
julia> Ω = [
(inflow = 0.0, fuel_multiplier = 1.5),
(inflow = 50.0, fuel_multiplier = 1.0),
(inflow = 100.0, fuel_multiplier = 0.75),
]
3-element Vector{@NamedTuple{inflow::Float64, fuel_multiplier::Float64}}:
(inflow = 0.0, fuel_multiplier = 1.5)
(inflow = 50.0, fuel_multiplier = 1.0)
(inflow = 100.0, fuel_multiplier = 0.75)
julia> model = SDDP.MarkovianPolicyGraph(
transition_matrices = Array{Float64, 2}[
[1.0]',
[0.75 0.25],
[0.75 0.25; 0.25 0.75],
],
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
# Unpack the stage and Markov index.
t, markov_state = node
# Define the state variable.
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
# Define the control variables.
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
# Define the constraints
@constraints(subproblem, begin
volume.out == volume.in + inflow - hydro_generation - hydro_spill
thermal_generation + hydro_generation == 150.0
end)
# Note how we can use `markov_state` to dispatch an `if` statement.
probability = if markov_state == 1 # wet climate state
[1 / 6, 1 / 3, 1 / 2]
else # dry climate state
[1 / 2, 1 / 3, 1 / 6]
end
fuel_cost = [50.0, 100.0, 150.0]
SDDP.parameterize(subproblem, Ω, probability) do ω
JuMP.fix(inflow, ω.inflow)
@stageobjective(
subproblem,
ω.fuel_multiplier * fuel_cost[t] * thermal_generation,
)
return
end
return
end
A policy graph with 5 nodes.
Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)
julia> SDDP.train(model; iteration_limit = 10, print_level = 0);
```
## In-sample Monte Carlo simulation
To simulate the policy using the data defined when `model` was created, use
[`SDDP.InSampleMonteCarlo`](@ref).
```julia sampling_schemes
julia> simulations = SDDP.simulate(
model,
20;
sampling_scheme = SDDP.InSampleMonteCarlo(),
);
julia> sort(unique([data[:noise_term] for sim in simulations for data in sim]))
3-element Vector{@NamedTuple{inflow::Float64, fuel_multiplier::Float64}}:
(inflow = 0.0, fuel_multiplier = 1.5)
(inflow = 50.0, fuel_multiplier = 1.0)
(inflow = 100.0, fuel_multiplier = 0.75)
```
## Out-of-sample Monte Carlo simulation
Instead of using the _in-sample_ data, we can perform an _out-of-sample_
simulation of the policy using the [`SDDP.OutOfSampleMonteCarlo`](@ref)
sampling scheme.
For each node, the [`SDDP.OutOfSampleMonteCarlo`](@ref) needs to define a new
distribution for the transition probabilities between nodes in the policy graph,
and a new distribution for the stagewise independent noise terms.
!!! note
The support of the distribution for the stagewise independent noise terms
does not have to be the same as the in-sample distributions.
```jldoctest sampling_schemes
julia> sampling_scheme = SDDP.OutOfSampleMonteCarlo(model) do node
stage, markov_state = node
if stage == 0
# Called from the root node. Transition to (1, 1) with probability 1.0.
# Only return the list of children, _not_ a list of noise terms.
return [SDDP.Noise((1, 1), 1.0)]
elseif stage == 3
# Called from the final node. Return an empty list for the children,
# and a single, deterministic realization for the noise terms.
children = SDDP.Noise[]
noise_terms = [SDDP.Noise((inflow = 75.0, fuel_multiplier = 1.2), 1.0)]
return children, noise_terms
else
# Called from a normal node. Return the in-sample distribution for the
# noise terms, but modify the transition probabilities so that the
# Markov switching probability is now 50%.
probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]
# Note: `Ω` is defined at the top of this page of documentation
noise_terms = [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]
children = [
SDDP.Noise((stage + 1, 1), 0.5), SDDP.Noise((stage + 1, 2), 0.5)
]
return children, noise_terms
end
end;
julia> simulations = SDDP.simulate(model, 1; sampling_scheme = sampling_scheme);
julia> simulations[1][3][:noise_term]
(inflow = 75.0, fuel_multiplier = 1.2)
```
Alternatively, if you only want to modify the stagewise independent noise terms,
pass `use_insample_transition = true`.
```jldoctest sampling_schemes
julia> sampling_scheme = SDDP.OutOfSampleMonteCarlo(
model;
use_insample_transition = true
) do node
stage, markov_state = node
if stage == 3
# Called from the final node. Return a single, deterministic
# realization for the noise terms. Don't return the children because we
# use the in-sample data.
return [SDDP.Noise((inflow = 65.0, fuel_multiplier = 1.1), 1.0)]
else
# Called from a normal node. Return the in-sample distribution for the
# noise terms. Don't return the children because we use the in-sample
# data.
probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]
# Note: `Ω` is defined at the top of this page of documentation
return [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]
end
end;
julia> simulations = SDDP.simulate(model, 1; sampling_scheme = sampling_scheme);
julia> simulations[1][3][:noise_term]
(inflow = 65.0, fuel_multiplier = 1.1)
```
## Historical simulation
Instead of performing a Monte Carlo simulation like the previous tutorials, we
may want to simulate one particular sequence of noise realizations. This
_historical_ simulation can also be conducted by passing a
[`SDDP.Historical`](@ref) sampling scheme to the `sampling_scheme` keyword of
the [`SDDP.simulate`](@ref) function.
We can confirm that the historical sequence of nodes was visited by querying
the `:node_index` key of the simulation results.
```jldoctest sampling_schemes
julia> simulations = SDDP.simulate(
model;
sampling_scheme = SDDP.Historical(
# Note: `Ω` is defined at the top of this page of documentation
[((1, 1), Ω[1]), ((2, 2), Ω[3]), ((3, 1), Ω[2])],
),
);
julia> [stage[:node_index] for stage in simulations[1]]
3-element Vector{Tuple{Int64, Int64}}:
(1, 1)
(2, 2)
(3, 1)
```
You can also pass a vector of scenarios, which are sampled sequentially:
```jldoctest
julia> sampling_scheme = SDDP.Historical(
[
[
(1, (inflow = 65.0, fuel_multiplier = 1.1)),
(2, (inflow = 10.0, fuel_multiplier = 1.4)), # Can be out-of-sample
(3, (inflow = 65.0, fuel_multiplier = 1.1)),
],
[
(1, (inflow = 65.0, fuel_multiplier = 1.1)),
(2, (inflow = 100.0, fuel_multiplier = 0.75)),
(3, (inflow = 0.0, fuel_multiplier = 1.5)),
],
],
)
A Historical sampler with 2 scenarios sampled sequentially.
```
Or a vector of scenarios and a corresponding vector of probabilities so that the
historical scenarios are sampled probabilistically:
```jldoctest
julia> sampling_scheme = SDDP.Historical(
[
[
(1, (inflow = 65.0, fuel_multiplier = 1.1)),
(2, (inflow = 10.0, fuel_multiplier = 1.4)), # Can be out-of-sample
(3, (inflow = 65.0, fuel_multiplier = 1.1)),
],
[
(1, (inflow = 65.0, fuel_multiplier = 1.1)),
(2, (inflow = 100.0, fuel_multiplier = 0.75)),
(3, (inflow = 0.0, fuel_multiplier = 1.5)),
],
],
[0.3, 0.7],
)
A Historical sampler with 2 scenarios sampled probabilistically.
```
!!! tip
Your sample space doesn't have to be a `NamedTuple`. It an be any Julia type!
Use a `Vector` if that is easier, or define your own `struct`.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 286 | # Bi-objective multistage stochastic linear programming
The code in this file runs the examples from the working paper
Dowson, O., Morton, D.P. and Downward, A. Bi-objective multistage stochastic
linear programming.
## "A simple example"
```
julia --project=. simple_example.jl
```
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 367 | # Multistage stochastic programs with the entropic risk measure
The code in this file runs the examples from the working paper
Dowson, O., Morton, D.P., and Pagnoncelli, B.K., Multistage stochastic programs
with the entropic risk measure.
[http://www.optimization-online.org/DB_HTML/2020/08/7984.html](http://www.optimization-online.org/DB_HTML/2020/08/7984.html)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 210 | # MDP modeling for multi-stage stochastic programs
The code in this file runs the examples from the paper
Morton, D.P., Dowson, O., Pagnoncelli, B.K. (2023). MDP modeling for multi-stage
stochastic programs.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 608 | # MSPPy
**This example is not related to a paper written by the authors of SDDP.jl**
This example is taken from MSPPy:
https://github.com/lingquant/msppy/blob/dc85a2e8fa5243b3d5096d59085d9caad3ff2ede/examples/hydro_thermal/julia/test.jl
The original author was Lingquan Ding (@lingquant), but it was modified by
Oscar Dowson (@odow) to meet the latest SDDP.jl syntax.
The original model and data is from:
Shapiro, A., Tekaya, W., da Costa, J. P., & Soares, M. P. (2013). Risk neutral
and risk averse stochastic dual dynamic programming method. European journal
of operational research, 224(2), 375–391.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 407 | # Stochastic dual dynamic programming with stagewise-dependent objective uncertainty
The code in this file runs the examples from the paper
Downward, A., Dowson, O., and Baucke, R. (2020). Stochastic dual dynamic
programming with stagewise-dependent objective uncertainty. Operations
Research Letters, 48(1), 33--39.
[https://doi.org/10.1016/j.orl.2019.11.002](https://doi.org/10.1016/j.orl.2019.11.002)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 358 | # Partially observable multistage stochastic optimization
The code in this file runs the examples from the paper
Dowson, O., Morton, D.P., Pagnoncelli, B.K. (2020). Partially observable
multistage stochastic optimization. Operations Research Letters. 48(4),
505--512.
[https://doi.org/10.1016/j.orl.2020.06.005](https://doi.org/10.1016/j.orl.2020.06.005)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | docs | 331 | # The policy graph decomposition of multistage stochastic optimization problems
The code in this file runs the examples from the paper
Dowson, O. (2020). The policy graph decomposition of multistage stochastic
optimization problems. Networks, 76(1), 3--23.
[https://doi.org/10.1002/net.21932](https://doi.org/10.1002/net.21932)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MIT"
] | 0.3.0 | b3cef6a82f35ae8d1ef59702fc5568ddb6da81c4 | code | 6963 | # -*- coding: utf-8 -*-
"""A sampler using adaptive differential evolution proposals.
This is a standalone julia version of the `Adaptive Differential Ensemble MCMC sampler` as prosed in `Ensemble MCMC Sampling for Robust Bayesian Inference <https://gregorboehl.com/live/ademc_boehl.pdf>`_.
"""
module DIMESampler
using Distributions, ProgressBars, Printf, LinearAlgebra, StatsFuns
export RunDIME, CreateDIMETestFunc, DIMETestFuncMarginalPDF
@doc raw"""
DIMESampler(lprobFunc::Function, init::Array, niter::Int; sigma::Float64=1e-5, gamma=nothing, aimh_prob::Float64=0.05, nsamples_proposal_dist=nothing, df_proposal_dist::Int=10, rho::Float64=.999, progress::Bool=true)
# Arguments
- `lprobFunc::Function`: the likelihood function to be sampled. Expected to be vectorized.
- `init::Array`: the initial ensemble. Used to infer the number of chains and the dimensionality of `lprobFunc`. A rule of thumb for the number of chains is :math:`nchain = 5*ndim`.
- `niter::Int`: the number of iterations to be run.
- `sigma::Float=1e-5`: the standard deviation of the Gaussian used to stretch the proposal vector.
- `gamma::Float=nothing`: the mean stretch factor for the proposal vector. By default, it is ``2.38 / \sqrt{2\,\mathrm{ndim}}`` as recommended by `ter Braak (2006) <http://www.stat.columbia.edu/~gelman/stuff_for_blog/cajo.pdf>`_.
- `aimh_prob::Float=0.1`: the probability to draw a AIMH proposal.
- `rho::Float=0.999`: the decay parameter for mean and covariance of the AIMH proposals.
- `df_proposal_dist::Float=10`: the degrees of freedom of the multivariate t distribution used for AIMH proposals.
"""
function RunDIME(lprobFunc::Function, init::Array, niter::Int; sigma::Float64=1e-5, gamma=nothing, aimh_prob::Float64=0.1, df_proposal_dist::Int=10, rho::Float64=.999, progress::Bool=true)
ndim, nchain = size(init)
isplit = nchain ÷ 2
# get some default values
dft = df_proposal_dist
if gamma == nothing
g0 = 2.38 / sqrt(2 * ndim)
else
g0 = gamma
end
# fix that MvTDist does not accept positive semi-definite covariance matrices
fixPSD = Matrix(1e-16I, ndim, ndim)
# initialize
ccov = Matrix(1.0I, ndim, ndim)
cmean = zeros(ndim)
dist = MvTDist(dft, cmean, ccov + fixPSD)
accepted = ones(nchain)
cumlweight = -Inf
# calculate intial values
x = copy(init)
lprob = lprobFunc(x)
if any(lprob .< -1e6)
error("Density of at least one member of the initial ensemble is below -1e6")
end
# preallocate
lprobs = Array{Float64,2}(undef, niter, nchain)
lprobs = fill!(lprobs, 0.0)
chains = Array{Float64,3}(undef, niter, nchain, ndim)
chains = fill!(chains, 0.0)
# optional progress bar
if progress
iter = ProgressBar(1:niter)
else
iter = 1:niter
end
@inbounds for i in iter
# calculate stats for current ensemble
# log weight of current ensemble
lweight = logsumexp(lprobs) + log(sum(accepted)) - log(nchain)
ncov = cov(transpose(x))
nmean = mean(x, dims=2)
# update AIMH proposal distribution
newcumlweight = logaddexp(cumlweight, lweight)
statelweight = cumlweight - newcumlweight
ccov = exp(statelweight) * ccov + exp(lweight - newcumlweight) * ncov
cmean = exp(statelweight) * cmean + exp(lweight - newcumlweight) * nmean
cumlweight = newcumlweight + log(rho)
naccepted = 0
# must iterate over current and reference ensemble
@inbounds for complementary_ensemble in (false,true)
# define current ensemble
if complementary_ensemble
xcur, xref = (@view x[:, 1:isplit+1]), (@view x[:, isplit+1:end])
lprobcur = @view lprob[1:isplit+1]
else
xref, xcur = (@view x[:, 1:isplit+1]), (@view x[:, isplit+1:end])
lprobcur = @view lprob[isplit+1:end]
end
cursize = size(xcur)[2]
refsize = nchain - cursize + 1
# get differential evolution proposal
# draw the indices of the complementary chains
i1 = collect(0:cursize-1) .+ rand(1:cursize-1, cursize)
i2 = collect(0:cursize-1) .+ rand(1:cursize-2, cursize)
i2[i2 .>= i1] .+= 1
# add small noise and calculate proposal
f = sigma * rand(Normal(0,1), (1,cursize))
q = xcur + g0 * (xref[:,(i1 .% refsize) .+ 1] - xref[:,(i2 .% refsize) .+ 1]) .+ f
factors = zeros(cursize)
# get AIMH proposals if any chain is drawn
xchnge = rand(Uniform(0,1), cursize) .<= aimh_prob
if sum(xchnge) > 0
# draw alternative candidates and calculate their proposal density
dist = MvTDist(dft, cmean[:], ccov*(dft - 2)/dft + fixPSD)
xcand = rand(dist, sum(xchnge))
lprop_old = logpdf(dist, xcur[:, xchnge])
lprop_new = logpdf(dist, xcand)
# update proposals and factors
q[:,xchnge] = xcand
factors[xchnge] = lprop_old - lprop_new
end
# Metropolis-Hasings
newlprob = lprobFunc(q)
lnpdiff = factors + newlprob - lprobcur
accepted = lnpdiff .> log.(rand(Uniform(0,1), cursize))
naccepted += sum(accepted)
# update chains
xcur[:,accepted] = q[:,accepted]
lprobcur[accepted] = newlprob[accepted]
end
# store
chains[i,:,:] = transpose(x)
lprobs[i,:] = lprob
if progress
set_description(iter, string(@sprintf("[ll/MAF: %7.3f(%1.0e)/%2.0d%% | %1.0e]", maximum(lprob), std(lprob), 100*naccepted/nchain, statelweight)))
end
end
return chains, lprobs, dist
end
@doc raw"""
CreateDIMETestFunc(ndim::Int, weight::Float, distance::Float, scale::Float)
Create a trimodal Gaussian mixture for testing.
"""
function CreateDIMETestFunc(ndim, weight, distance, scale)
covm = I(ndim)*scale
meanm = zeros(ndim)
meanm[1] = distance
lw1 = log(weight[1])
lw2 = log(weight[2])
lw3 = log(1-weight[1]-weight[2])
dist = MvNormal(zeros(ndim), covm)
function TestLogProb(p)
stack = cat(lw1 .+ logpdf(dist, p .+ meanm),
lw2 .+ logpdf(dist, p),
lw3 .+ logpdf(dist, p .- meanm),
dims=2)
return logsumexp(stack, dims=2)[:]
end
end
@doc raw"""
DIMETestFuncMarginalPDF(x::Array, cov_scale::Float, distance::Float, weight::Float)
Get the marginal PDF over the first dimension of the test distribution.
"""
function DIMETestFuncMarginalPDF(x, cov_scale, distance, weight)
normd = Normal(0, sqrt(cov_scale))
return weight[1]*pdf.(normd, x .+ distance) + weight[2]*pdf.(normd, x) + (1-weight[1]-weight[2])*pdf.(normd, x .- distance)
end
end
| DIMESampler | https://github.com/gboehl/DIMESampler.jl.git |
|
[
"MIT"
] | 0.3.0 | b3cef6a82f35ae8d1ef59702fc5568ddb6da81c4 | code | 888 | using DIMESampler
using Test
using Distributions
using Random
using LinearAlgebra
@testset "DIMESampler.jl" begin
Random.seed!(1)
# define distribution
m = 2
cov_scale = 0.05
weight = (0.33, .1)
ndim = 35
LogProb = CreateDIMETestFunc(ndim, weight, m, cov_scale)
LogProbParallel(x) = pmap(LogProb, eachslice(x, dims=2))
# for chain
niter = 3000
nchain = ndim*5
initmean = zeros(ndim)
initcov = I(ndim)*2
initchain = rand(MvNormal(initmean, initcov), nchain)
# check 4 real
chains, lprobs, pdist = RunDIME(LogProb, initchain, niter, progress=false)
sample = chains[end-Int(niter/4):end,:,1][:]
tval = 1.7107162256490667
@test isapprox(median(sample), tval)
# check if also runs with progress and DE-MCMC only
chains, lprobs, pdist = RunDIME(LogProb, initchain, 10, progress=true, aimh_prob=0.)
end
| DIMESampler | https://github.com/gboehl/DIMESampler.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 615 | using Pkg
repo = ARGS[1]
if contains(repo, "#")
repo, group = split(repo, "#")
else
group = ARGS[2]
end
println("--- :julia: Instantiating project")
withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0, "GROUP" => group, "BACKEND_GROUP" => group) do
Pkg.instantiate()
try
Pkg.develop(repo)
println("+++ :julia: Running tests")
Pkg.test("$(repo)"; coverage=true)
catch err
err isa Pkg.Resolve.ResolverError || rethrow()
@info "Not compatible with this release. No problem." exception=err
exit(0)
end
end
println("+++ :julia: Finished Downstream Test")
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 3078 | module LuxDeviceUtilsAMDGPUExt
using Adapt: Adapt
using AMDGPU: AMDGPU
using LuxDeviceUtils: LuxDeviceUtils, LuxAMDGPUDevice, LuxCPUDevice, reset_gpu_device!
using Random: Random
__init__() = reset_gpu_device!()
# This code used to be in `LuxAMDGPU.jl`, but we no longer need that package.
const USE_AMD_GPU = Ref{Union{Nothing, Bool}}(nothing)
function _check_use_amdgpu!()
USE_AMD_GPU[] === nothing || return
USE_AMD_GPU[] = AMDGPU.functional()
if USE_AMD_GPU[] && !AMDGPU.functional(:MIOpen)
@warn "MIOpen is not functional in AMDGPU.jl, some functionality will not be \
available." maxlog=1
end
return
end
LuxDeviceUtils.loaded(::Union{LuxAMDGPUDevice, <:Type{LuxAMDGPUDevice}}) = true
function LuxDeviceUtils.functional(::Union{LuxAMDGPUDevice, <:Type{LuxAMDGPUDevice}})::Bool
_check_use_amdgpu!()
return USE_AMD_GPU[]
end
function LuxDeviceUtils._with_device(::Type{LuxAMDGPUDevice}, ::Nothing)
return LuxAMDGPUDevice(nothing)
end
function LuxDeviceUtils._with_device(::Type{LuxAMDGPUDevice}, id::Integer)
id > length(AMDGPU.devices()) &&
throw(ArgumentError("id = $id > length(AMDGPU.devices()) = $(length(AMDGPU.devices()))"))
old_dev = AMDGPU.device()
AMDGPU.device!(AMDGPU.devices()[id])
device = LuxAMDGPUDevice(AMDGPU.device())
AMDGPU.device!(old_dev)
return device
end
LuxDeviceUtils._get_device_id(dev::LuxAMDGPUDevice) = AMDGPU.device_id(dev.device)
# Default RNG
LuxDeviceUtils.default_device_rng(::LuxAMDGPUDevice) = AMDGPU.rocrand_rng()
# Query Device from Array
function LuxDeviceUtils._get_device(x::AMDGPU.AnyROCArray)
parent_x = parent(x)
parent_x === x && return LuxAMDGPUDevice(AMDGPU.device(x))
return LuxDeviceUtils._get_device(parent_x)
end
LuxDeviceUtils._get_device_type(::AMDGPU.AnyROCArray) = LuxAMDGPUDevice
# Set Device
function LuxDeviceUtils.set_device!(::Type{LuxAMDGPUDevice}, dev::AMDGPU.HIPDevice)
return AMDGPU.device!(dev)
end
function LuxDeviceUtils.set_device!(::Type{LuxAMDGPUDevice}, id::Integer)
return LuxDeviceUtils.set_device!(LuxAMDGPUDevice, AMDGPU.devices()[id])
end
function LuxDeviceUtils.set_device!(::Type{LuxAMDGPUDevice}, ::Nothing, rank::Integer)
id = mod1(rank + 1, length(AMDGPU.devices()))
return LuxDeviceUtils.set_device!(LuxAMDGPUDevice, id)
end
# Device Transfer
## To GPU
Adapt.adapt_storage(::LuxAMDGPUDevice{Nothing}, x::AbstractArray) = AMDGPU.roc(x)
function Adapt.adapt_storage(to::LuxAMDGPUDevice, x::AbstractArray)
old_dev = AMDGPU.device() # remember the current device
dev = LuxDeviceUtils.get_device(x)
if !(dev isa LuxAMDGPUDevice)
AMDGPU.device!(to.device)
x_new = AMDGPU.roc(x)
AMDGPU.device!(old_dev)
return x_new
elseif AMDGPU.device_id(dev.device) == AMDGPU.device_id(to.device)
return x
else
AMDGPU.device!(to.device)
x_new = copy(x)
AMDGPU.device!(old_dev)
return x_new
end
end
Adapt.adapt_storage(::LuxCPUDevice, rng::AMDGPU.rocRAND.RNG) = Random.default_rng()
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 3033 | module LuxDeviceUtilsCUDAExt
using Adapt: Adapt
using CUDA: CUDA
using CUDA.CUSPARSE: AbstractCuSparseMatrix, AbstractCuSparseVector
using LuxDeviceUtils: LuxDeviceUtils, LuxCUDADevice, LuxCPUDevice
using Random: Random
function LuxDeviceUtils._with_device(::Type{LuxCUDADevice}, id::Integer)
id > length(CUDA.devices()) &&
throw(ArgumentError("id = $id > length(CUDA.devices()) = $(length(CUDA.devices()))"))
old_dev = CUDA.device()
CUDA.device!(id - 1)
device = LuxCUDADevice(CUDA.device())
CUDA.device!(old_dev)
return device
end
function LuxDeviceUtils._with_device(::Type{LuxCUDADevice}, ::Nothing)
return LuxCUDADevice(nothing)
end
LuxDeviceUtils._get_device_id(dev::LuxCUDADevice) = CUDA.deviceid(dev.device) + 1
# Default RNG
LuxDeviceUtils.default_device_rng(::LuxCUDADevice) = CUDA.default_rng()
# Query Device from Array
function LuxDeviceUtils._get_device(x::CUDA.AnyCuArray)
parent_x = parent(x)
parent_x === x && return LuxCUDADevice(CUDA.device(x))
return LuxDeviceUtils.get_device(parent_x)
end
function LuxDeviceUtils._get_device(x::CUDA.CUSPARSE.AbstractCuSparseArray)
return LuxCUDADevice(CUDA.device(x.nzVal))
end
function LuxDeviceUtils._get_device_type(::Union{
<:CUDA.AnyCuArray, <:CUDA.CUSPARSE.AbstractCuSparseArray})
return LuxCUDADevice
end
# Set Device
function LuxDeviceUtils.set_device!(::Type{LuxCUDADevice}, dev::CUDA.CuDevice)
return CUDA.device!(dev)
end
function LuxDeviceUtils.set_device!(::Type{LuxCUDADevice}, id::Integer)
return LuxDeviceUtils.set_device!(LuxCUDADevice, collect(CUDA.devices())[id])
end
function LuxDeviceUtils.set_device!(::Type{LuxCUDADevice}, ::Nothing, rank::Integer)
id = mod1(rank + 1, length(CUDA.devices()))
return LuxDeviceUtils.set_device!(LuxCUDADevice, id)
end
# Device Transfer
Adapt.adapt_storage(::LuxCUDADevice{Nothing}, x::AbstractArray) = CUDA.cu(x)
function Adapt.adapt_storage(to::LuxCUDADevice, x::AbstractArray)
old_dev = CUDA.device() # remember the current device
dev = LuxDeviceUtils.get_device(x)
if !(dev isa LuxCUDADevice)
CUDA.device!(to.device)
x_new = CUDA.cu(x)
CUDA.device!(old_dev)
return x_new
elseif dev.device == to.device
return x
else
CUDA.device!(to.device)
x_new = copy(x)
CUDA.device!(old_dev)
return x_new
end
end
Adapt.adapt_storage(::LuxCPUDevice, rng::CUDA.RNG) = Random.default_rng()
# Defining as extensions seems to case precompilation errors
@static if isdefined(CUDA.CUSPARSE, :SparseArrays)
function Adapt.adapt_storage(::LuxCPUDevice, x::AbstractCuSparseMatrix)
return CUDA.CUSPARSE.SparseArrays.SparseMatrixCSC(x)
end
function Adapt.adapt_storage(::LuxCPUDevice, x::AbstractCuSparseVector)
return CUDA.CUSPARSE.SparseArrays.SparseVector(x)
end
else
@warn "CUDA.CUSPARSE seems to have removed SparseArrays as a dependency. Please open \
an issue in LuxDeviceUtils.jl repository."
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 325 | module LuxDeviceUtilsFillArraysExt
using Adapt: Adapt
using FillArrays: FillArrays, AbstractFill
using LuxDeviceUtils: LuxDeviceUtils, LuxCPUDevice, AbstractLuxDevice
Adapt.adapt_structure(::LuxCPUDevice, x::AbstractFill) = x
Adapt.adapt_structure(to::AbstractLuxDevice, x::AbstractFill) = Adapt.adapt(to, collect(x))
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 222 | module LuxDeviceUtilsGPUArraysExt
using Adapt: Adapt
using GPUArrays: GPUArrays
using LuxDeviceUtils: LuxCPUDevice
using Random: Random
Adapt.adapt_storage(::LuxCPUDevice, rng::GPUArrays.RNG) = Random.default_rng()
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 361 | module LuxDeviceUtilsLuxCUDAExt
using LuxCUDA: LuxCUDA
using LuxDeviceUtils: LuxDeviceUtils, LuxCUDADevice, reset_gpu_device!
__init__() = reset_gpu_device!()
LuxDeviceUtils.loaded(::Union{LuxCUDADevice, Type{<:LuxCUDADevice}}) = true
function LuxDeviceUtils.functional(::Union{LuxCUDADevice, Type{<:LuxCUDADevice}})
return LuxCUDA.functional()
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 762 | module LuxDeviceUtilsMetalExt
using Adapt: Adapt
using GPUArrays: GPUArrays
using LuxDeviceUtils: LuxDeviceUtils, LuxMetalDevice, reset_gpu_device!
using Metal: Metal, MtlArray
__init__() = reset_gpu_device!()
LuxDeviceUtils.loaded(::Union{LuxMetalDevice, Type{<:LuxMetalDevice}}) = true
function LuxDeviceUtils.functional(::Union{LuxMetalDevice, Type{<:LuxMetalDevice}})
return Metal.functional()
end
# Default RNG
LuxDeviceUtils.default_device_rng(::LuxMetalDevice) = GPUArrays.default_rng(MtlArray)
# Query Device from Array
LuxDeviceUtils._get_device(::MtlArray) = LuxMetalDevice()
LuxDeviceUtils._get_device_type(::MtlArray) = LuxMetalDevice
# Device Transfer
## To GPU
Adapt.adapt_storage(::LuxMetalDevice, x::AbstractArray) = Metal.mtl(x)
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 824 | module LuxDeviceUtilsRecursiveArrayToolsExt
using Adapt: Adapt, adapt
using LuxDeviceUtils: LuxDeviceUtils, AbstractLuxDevice
using RecursiveArrayTools: VectorOfArray, DiffEqArray
# We want to preserve the structure
function Adapt.adapt_structure(to::AbstractLuxDevice, x::VectorOfArray)
return VectorOfArray(map(Base.Fix1(adapt, to), x.u))
end
function Adapt.adapt_structure(to::AbstractLuxDevice, x::DiffEqArray)
# Don't move the `time` to the GPU
return DiffEqArray(map(Base.Fix1(adapt, to), x.u), x.t)
end
for op in (:_get_device, :_get_device_type)
@eval function LuxDeviceUtils.$op(x::Union{VectorOfArray, DiffEqArray})
length(x.u) == 0 && return $(op == :_get_device ? nothing : Nothing)
return mapreduce(LuxDeviceUtils.$op, LuxDeviceUtils.__combine_devices, x.u)
end
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 474 | module LuxDeviceUtilsReverseDiffExt
using LuxDeviceUtils: LuxDeviceUtils
using ReverseDiff: ReverseDiff
for op in (:_get_device, :_get_device_type)
@eval begin
function LuxDeviceUtils.$op(x::ReverseDiff.TrackedArray)
return LuxDeviceUtils.$op(ReverseDiff.value(x))
end
function LuxDeviceUtils.$op(x::AbstractArray{<:ReverseDiff.TrackedReal})
return LuxDeviceUtils.$op(ReverseDiff.value.(x))
end
end
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 202 | module LuxDeviceUtilsSparseArraysExt
using Adapt: Adapt
using LuxDeviceUtils: LuxCPUDevice
using SparseArrays: AbstractSparseArray
Adapt.adapt_storage(::LuxCPUDevice, x::AbstractSparseArray) = x
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 1002 | module LuxDeviceUtilsTrackerExt
using Adapt: Adapt
using LuxDeviceUtils: LuxDeviceUtils, LuxAMDGPUDevice, LuxCUDADevice, LuxMetalDevice,
LuxoneAPIDevice
using Tracker: Tracker
for op in (:_get_device, :_get_device_type)
@eval begin
LuxDeviceUtils.$op(x::Tracker.TrackedArray) = LuxDeviceUtils.$op(Tracker.data(x))
function LuxDeviceUtils.$op(x::AbstractArray{<:Tracker.TrackedReal})
return LuxDeviceUtils.$op(Tracker.data.(x))
end
end
end
LuxDeviceUtils.__special_aos(::AbstractArray{<:Tracker.TrackedReal}) = true
for T in (LuxAMDGPUDevice, LuxAMDGPUDevice{Nothing}, LuxCUDADevice,
LuxCUDADevice{Nothing}, LuxMetalDevice, LuxoneAPIDevice)
@eval function Adapt.adapt_storage(to::$(T), x::AbstractArray{<:Tracker.TrackedReal})
@warn "AbstractArray{<:Tracker.TrackedReal} is not supported for $(to). Converting \
to Tracker.TrackedArray." maxlog=1
return to(Tracker.collect(x))
end
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 283 | module LuxDeviceUtilsZygoteExt
using Adapt: Adapt
using LuxDeviceUtils: AbstractLuxDevice, LuxCPUDevice
using Zygote: OneElement
Adapt.adapt_structure(::LuxCPUDevice, x::OneElement) = x
Adapt.adapt_structure(to::AbstractLuxDevice, x::OneElement) = Adapt.adapt(to, collect(x))
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 1486 | module LuxDeviceUtilsoneAPIExt
using Adapt: Adapt
using GPUArrays: GPUArrays
using LuxDeviceUtils: LuxDeviceUtils, LuxoneAPIDevice, reset_gpu_device!
using oneAPI: oneAPI, oneArray, oneL0
const SUPPORTS_FP64 = Dict{oneL0.ZeDevice, Bool}()
function __init__()
reset_gpu_device!()
for dev in oneAPI.devices()
SUPPORTS_FP64[dev] = oneL0.module_properties(dev).fp64flags &
oneL0.ZE_DEVICE_MODULE_FLAG_FP64 ==
oneL0.ZE_DEVICE_MODULE_FLAG_FP64
end
end
LuxDeviceUtils.loaded(::Union{LuxoneAPIDevice, Type{<:LuxoneAPIDevice}}) = true
function LuxDeviceUtils.functional(::Union{LuxoneAPIDevice, Type{<:LuxoneAPIDevice}})
return oneAPI.functional()
end
# Default RNG
LuxDeviceUtils.default_device_rng(::LuxoneAPIDevice) = GPUArrays.default_rng(oneArray)
# Query Device from Array
LuxDeviceUtils._get_device(::oneArray) = LuxoneAPIDevice()
LuxDeviceUtils._get_device_type(::oneArray) = LuxoneAPIDevice
# Device Transfer
## To GPU
for (T1, T2) in ((Float64, Float32), (ComplexF64, ComplexF32))
@eval function Adapt.adapt_storage(::LuxoneAPIDevice, x::AbstractArray{$(T1)})
if !SUPPORTS_FP64[oneAPI.device()]
@warn LazyString(
"Double type is not supported on this device. Using `", $(T2), "` instead.")
return oneArray{$(T2)}(x)
end
return oneArray(x)
end
end
Adapt.adapt_storage(::LuxoneAPIDevice, x::AbstractArray) = oneArray(x)
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 19057 | module LuxDeviceUtils
using Adapt: Adapt
using ChainRulesCore: ChainRulesCore, NoTangent
using Functors: Functors, fmap, fleaves
using LuxCore: LuxCore
using Preferences: @delete_preferences!, @load_preference, @set_preferences!
using Random: AbstractRNG, Random
using UnrolledUtilities: unrolled_mapreduce
const CRC = ChainRulesCore
export gpu_backend!, supported_gpu_backends, reset_gpu_device!
export default_device_rng
export gpu_device, cpu_device
export LuxCPUDevice, LuxCUDADevice, LuxAMDGPUDevice, LuxMetalDevice, LuxoneAPIDevice
export get_device, get_device_type
abstract type AbstractLuxDevice <: Function end
abstract type AbstractLuxGPUDevice <: AbstractLuxDevice end
"""
functional(x::AbstractLuxDevice) -> Bool
functional(::Type{<:AbstractLuxDevice}) -> Bool
Checks if the device is functional. This is used to determine if the device can be used for
computation. Note that even if the backend is loaded (as checked via
[`LuxDeviceUtils.loaded`](@ref)), the device may not be functional.
Note that while this function is not exported, it is considered part of the public API.
"""
@inline functional(x) = false
Base.@deprecate __is_functional(x) functional(x)
"""
loaded(x::AbstractLuxDevice) -> Bool
loaded(::Type{<:AbstractLuxDevice}) -> Bool
Checks if the trigger package for the device is loaded. Trigger packages are as follows:
- `LuxCUDA.jl` for NVIDIA CUDA Support.
- `AMDGPU.jl` for AMD GPU ROCM Support.
- `Metal.jl` for Apple Metal GPU Support.
- `oneAPI.jl` for Intel oneAPI GPU Support.
"""
@inline loaded(x) = false
Base.@deprecate __is_loaded(x) loaded(x)
struct LuxCPUDevice <: AbstractLuxDevice end
@kwdef struct LuxCUDADevice{D} <: AbstractLuxGPUDevice
device::D = nothing
end
@kwdef struct LuxAMDGPUDevice{D} <: AbstractLuxGPUDevice
device::D = nothing
end
struct LuxMetalDevice <: AbstractLuxGPUDevice end
struct LuxoneAPIDevice <: AbstractLuxGPUDevice end
for dev in (LuxCPUDevice, LuxMetalDevice, LuxoneAPIDevice)
msg = "`device_id` is not applicable for `$dev`."
@eval begin
_with_device(::Type{$dev}, ::Nothing) = $dev()
function _with_device(::Type{$dev}, device_id)
@warn $(msg) maxlog=1
return $dev()
end
end
end
@inline functional(::Union{LuxCPUDevice, Type{<:LuxCPUDevice}}) = true
@inline loaded(::Union{LuxCPUDevice, Type{<:LuxCPUDevice}}) = true
for name in (:CPU, :CUDA, :AMDGPU, :Metal, :oneAPI)
tpkg = name === :CPU ? "" : (name == :CUDA ? "Lux$(name)" : string(name))
ldev = eval(Symbol(:Lux, name, :Device))
@eval begin
@inline _get_device_name(::Union{$ldev, Type{<:$ldev}}) = $(string(name))
@inline _get_triggerpkg_name(::Union{$ldev, Type{<:$ldev}}) = $(tpkg)
end
end
for T in (LuxCPUDevice, LuxCUDADevice{Nothing},
LuxAMDGPUDevice{Nothing}, LuxMetalDevice, LuxoneAPIDevice)
@eval @inline _get_device_id(::$(T)) = nothing
end
struct LuxDeviceSelectionException <: Exception end
function Base.showerror(io::IO, ::LuxDeviceSelectionException)
return print(io, "LuxDeviceSelectionException(No functional GPU device found!!)")
end
# Order is important here
const GPU_DEVICES = (LuxCUDADevice, LuxAMDGPUDevice, LuxMetalDevice, LuxoneAPIDevice)
const GPU_DEVICE = Ref{Union{Nothing, AbstractLuxDevice}}(nothing)
"""
reset_gpu_device!()
Resets the selected GPU device. This is useful when automatic GPU selection needs to be
run again.
"""
@inline reset_gpu_device!() = (GPU_DEVICE[] = nothing)
"""
supported_gpu_backends() -> Tuple{String, ...}
Return a tuple of supported GPU backends.
!!! warning
This is not the list of functional backends on the system, but rather backends which
`Lux.jl` supports.
!!! danger
`Metal.jl` and `oneAPI.jl` support is **extremely** experimental and most things are not
expected to work.
"""
@inline supported_gpu_backends() = map(_get_device_name, GPU_DEVICES)
"""
gpu_device(device_id::Union{Nothing, Integer}=nothing;
force_gpu_usage::Bool=false) -> AbstractLuxDevice()
Selects GPU device based on the following criteria:
1. If `gpu_backend` preference is set and the backend is functional on the system, then
that device is selected.
2. Otherwise, an automatic selection algorithm is used. We go over possible device
backends in the order specified by `supported_gpu_backends()` and select the first
functional backend.
3. If no GPU device is functional and `force_gpu_usage` is `false`, then `cpu_device()` is
invoked.
4. If nothing works, an error is thrown.
## Arguments
- `device_id::Union{Nothing, Integer}`: The device id to select. If `nothing`, then we return
the last selected device or if none was selected then we run the autoselection and
choose the current device using `CUDA.device()` or `AMDGPU.device()` or similar. If
`Integer`, then we select the device with the given id. Note that this is `1`-indexed, in
contrast to the `0`-indexed `CUDA.jl`. For example, `id = 4` corresponds to
`CUDA.device!(3)`.
!!! warning
`device_id` is only applicable for `CUDA` and `AMDGPU` backends. For `Metal`, `oneAPI`
and `CPU` backends, `device_id` is ignored and a warning is printed.
## Keyword Arguments
- `force_gpu_usage::Bool`: If `true`, then an error is thrown if no functional GPU
device is found.
"""
function gpu_device(device_id::Union{Nothing, <:Integer}=nothing;
force_gpu_usage::Bool=false)::AbstractLuxDevice
device_id == 0 && throw(ArgumentError("`device_id` is 1-indexed."))
if GPU_DEVICE[] !== nothing
dev = GPU_DEVICE[]
if device_id === nothing
force_gpu_usage &&
!(dev isa AbstractLuxGPUDevice) &&
throw(LuxDeviceSelectionException())
return dev
else
selected_device_id = _get_device_id(dev)
selected_device_id !== nothing && selected_device_id == device_id && return dev
end
end
device_type = _get_gpu_device(; force_gpu_usage)
device = _with_device(device_type, device_id)
GPU_DEVICE[] = device
return device
end
function _get_gpu_device(; force_gpu_usage::Bool)
backend = @load_preference("gpu_backend", nothing)
# If backend set with preferences, use it
if backend !== nothing
allowed_backends = supported_gpu_backends()
if backend ∉ allowed_backends
@warn "`gpu_backend` preference is set to $backend, which is not a valid \
backend. Valid backends are $allowed_backends. Defaulting to automatic \
GPU Backend selection." maxlog=1
else
@debug "Using GPU backend set in preferences: $backend."
idx = findfirst(isequal(backend), allowed_backends)
device = GPU_DEVICES[idx]
if !loaded(device)
@warn "Trying to use backend: $(_get_device_name(device)) but the trigger \
package $(_get_triggerpkg_name(device)) is not loaded. Ignoring the \
Preferences backend!!! Please load the package and call this \
function again to respect the Preferences backend." maxlog=1
else
if functional(device)
@debug "Using GPU backend: $(_get_device_name(device))."
return device
else
@warn "GPU backend: $(_get_device_name(device)) set via Preferences.jl \
is not functional. Defaulting to automatic GPU Backend \
selection." maxlog=1
end
end
end
end
@debug "Running automatic GPU backend selection..."
for device in GPU_DEVICES
if loaded(device)
@debug "Trying backend: $(_get_device_name(device))."
if functional(device)
@debug "Using GPU backend: $(_get_device_name(device))."
return device
end
@debug "GPU backend: $(_get_device_name(device)) is not functional."
else
@debug "Trigger package for backend ($(_get_device_name(device))): \
$(_get_triggerpkg_name(device)) not loaded."
end
end
if force_gpu_usage
throw(LuxDeviceSelectionException())
else
@warn """No functional GPU backend found! Defaulting to CPU.
1. If no GPU is available, nothing needs to be done.
2. If GPU is available, load the corresponding trigger package.
a. `LuxCUDA.jl` for NVIDIA CUDA Support.
b. `AMDGPU.jl` for AMD GPU ROCM Support.
c. `Metal.jl` for Apple Metal GPU Support. (Experimental)
d. `oneAPI.jl` for Intel oneAPI GPU Support. (Experimental)""" maxlog=1
return LuxCPUDevice
end
end
"""
gpu_backend!() = gpu_backend!("")
gpu_backend!(backend) = gpu_backend!(string(backend))
gpu_backend!(backend::AbstractLuxGPUDevice)
gpu_backend!(backend::String)
Creates a `LocalPreferences.toml` file with the desired GPU backend.
If `backend == ""`, then the `gpu_backend` preference is deleted. Otherwise, `backend` is
validated to be one of the possible backends and the preference is set to `backend`.
If a new backend is successfully set, then the Julia session must be restarted for the
change to take effect.
"""
gpu_backend!(backend) = gpu_backend!(string(backend))
gpu_backend!(backend::AbstractLuxGPUDevice) = gpu_backend!(_get_device_name(backend))
gpu_backend!() = gpu_backend!("")
function gpu_backend!(backend::String)
if backend == ""
@delete_preferences!("gpu_backend")
@info "Deleted the local preference for `gpu_backend`. Restart Julia to use the \
new backend."
return
end
allowed_backends = supported_gpu_backends()
set_backend = @load_preference("gpu_backend", nothing)
if set_backend == backend
@info "GPU backend is already set to $backend. No action is required."
return
end
if backend ∉ allowed_backends
throw(ArgumentError("Invalid backend: $backend. Valid backends are $allowed_backends."))
end
@set_preferences!("gpu_backend"=>backend)
@info "GPU backend has been set to $backend. Restart Julia to use the new backend."
return
end
"""
cpu_device() -> LuxCPUDevice()
Return a `LuxCPUDevice` object which can be used to transfer data to CPU.
"""
@inline cpu_device() = LuxCPUDevice()
"""
default_device_rng(::AbstractLuxDevice)
Returns the default RNG for the device. This can be used to directly generate parameters
and states on the device using
[WeightInitializers.jl](https://github.com/LuxDL/WeightInitializers.jl).
"""
function default_device_rng(D::AbstractLuxDevice)
return error("""`default_device_rng` not implemented for `$(typeof(D))`. This is \
either because:
1. The default RNG for this device is not known / officially provided.
2. The trigger package for the device ($(_get_device_name(D)).jl) is not loaded.
""")
end
default_device_rng(::LuxCPUDevice) = Random.default_rng()
# Dispatches for Different Data Structures
# Abstract Array / Tuples / NamedTuples have special fast paths to facilitate type stability
# For all other types we rely on fmap which means we lose type stability.
# For Lux, typically models only has these 3 datastructures so we should be mostly fine.
for (dev) in (:CPU, :CUDA, :AMDGPU, :Metal, :oneAPI)
ldev = Symbol("Lux$(dev)Device")
@eval begin
function (D::$(ldev))(x::AbstractArray{T}) where {T}
fn = Base.Fix1(Adapt.adapt, D)
return isbitstype(T) || __special_aos(x) ? fn(x) : map(D, x)
end
(D::$(ldev))(x::Tuple) = map(D, x)
(D::$(ldev))(x::NamedTuple{F}) where {F} = NamedTuple{F}(D(values(x)))
function (D::$(ldev))(x)
Functors.isleaf(x) && return Adapt.adapt(D, x)
return fmap(D, x)
end
function (::$(ldev))(NN::LuxCore.AbstractExplicitLayer)
@warn "Lux layers are stateless and hence don't participate in device \
transfers. Apply this function on the parameters and states generated \
using `Lux.setup`."
return NN
end
end
end
@inline __special_aos(x::AbstractArray) = false
const GET_DEVICE_ADMONITIONS = """
!!! note
Trigger Packages must be loaded for this to return the correct device.
!!! warning
RNG types currently don't participate in device determination. We will remove this
restriction in the future.
"""
# Query Device from Array
"""
get_device(x) -> dev::AbstractLuxDevice | Exception | nothing
If all arrays (on the leaves of the structure) are on the same device, we return that
device. Otherwise, we throw an error. If the object is device agnostic, we return `nothing`.
$(GET_DEVICE_ADMONITIONS)
See also [`get_device_type`](@ref) for a faster alternative that can be used for dispatch
based on device type.
"""
function get_device end
"""
get_device_type(x) -> Type{<:AbstractLuxDevice} | Exception | Type{Nothing}
Similar to [`get_device`](@ref) but returns the type of the device instead of the device
itself. This value is often a compile time constant and is recommended to be used instead
of [`get_device`](@ref) where ever defining dispatches based on the device type.
$(GET_DEVICE_ADMONITIONS)
"""
function get_device_type end
for op in (:get_device, :get_device_type)
_op = Symbol("_", op)
cpu_ret_val = op == :get_device ? LuxCPUDevice() : LuxCPUDevice
@eval begin
function $(op)(x)
hasmethod($(_op), Tuple{typeof(x)}) && return $(_op)(x)
return mapreduce($(_op), __combine_devices, fleaves(x))
end
CRC.@non_differentiable $op(::Any)
function $(_op)(x::AbstractArray{T}) where {T}
__recursible_array_eltype(T) && return mapreduce($(op), __combine_devices, x)
if hasmethod(parent, Tuple{typeof(x)})
parent_x = parent(x)
parent_x === x && return $(cpu_ret_val)
return $(_op)(parent_x)
end
return $(cpu_ret_val)
end
function $(_op)(x::Union{Tuple, NamedTuple})
length(x) == 0 && return $(op == :get_device ? nothing : Nothing)
return unrolled_mapreduce($(op), __combine_devices, values(x))
end
end
for T in (Number, AbstractRNG, Val, Symbol, String, Nothing)
@eval $(_op)(::$(T)) = $(op == :get_device ? nothing : Nothing)
end
end
__recursible_array_eltype(::Type{T}) where {T} = !isbitstype(T) && !(T <: Number)
__combine_devices(::Nothing, ::Nothing) = nothing
__combine_devices(::Type{Nothing}, ::Type{Nothing}) = Nothing
__combine_devices(::Nothing, dev::AbstractLuxDevice) = dev
__combine_devices(::Type{Nothing}, ::Type{T}) where {T <: AbstractLuxDevice} = T
__combine_devices(dev::AbstractLuxDevice, ::Nothing) = dev
__combine_devices(::Type{T}, ::Type{Nothing}) where {T <: AbstractLuxDevice} = T
function __combine_devices(dev1::AbstractLuxDevice, dev2::AbstractLuxDevice)
dev1 == dev2 && return dev1
throw(ArgumentError("Objects are on different devices: $(dev1) and $(dev2)."))
end
__combine_devices(::Type{T}, ::Type{T}) where {T <: AbstractLuxDevice} = T
function __combine_devices(
::Type{T1}, ::Type{T2}) where {T1 <: AbstractLuxDevice, T2 <: AbstractLuxDevice}
throw(ArgumentError("Objects are on devices with different types: $(T1) and $(T2)."))
end
# Set the device
const SET_DEVICE_DOCS = """
Set the device for the given type. This is a no-op for `LuxCPUDevice`. For `LuxCUDADevice`
and `LuxAMDGPUDevice`, it prints a warning if the corresponding trigger package is not
loaded.
Currently, `LuxMetalDevice` and `LuxoneAPIDevice` doesn't support setting the device.
"""
const SET_DEVICE_DANGER = """
!!! danger
This specific function should be considered experimental at this point and is currently
provided to support distributed training in Lux. As such please use
`Lux.DistributedUtils` instead of using this function.
"""
"""
set_device!(T::Type{<:AbstractLuxDevice}, dev_or_id)
$SET_DEVICE_DOCS
## Arguments
- `T::Type{<:AbstractLuxDevice}`: The device type to set.
- `dev_or_id`: Can be the device from the corresponding package. For example for CUDA it
can be a `CuDevice`. If it is an integer, it is the device id to set. This is
`1`-indexed.
$SET_DEVICE_DANGER
"""
function set_device!(::Type{T}, dev_or_id) where {T <: AbstractLuxDevice}
T === LuxCUDADevice &&
@warn "`CUDA.jl` hasn't been loaded. Ignoring the device setting."
T === LuxAMDGPUDevice &&
@warn "`AMDGPU.jl` hasn't been loaded. Ignoring the device setting."
T === LuxMetalDevice &&
@warn "Support for Multi Device Metal hasn't been implemented yet. Ignoring the device setting."
T === LuxoneAPIDevice &&
@warn "Support for Multi Device oneAPI hasn't been implemented yet. Ignoring the device setting."
T === LuxCPUDevice &&
@warn "Setting device for `LuxCPUDevice` doesn't make sense. Ignoring the device setting."
return
end
"""
set_device!(T::Type{<:AbstractLuxDevice}, ::Nothing, rank::Integer)
$SET_DEVICE_DOCS
## Arguments
- `T::Type{<:AbstractLuxDevice}`: The device type to set.
- `rank::Integer`: Local Rank of the process. This is applicable for distributed training and
must be `0`-indexed.
$SET_DEVICE_DANGER
"""
function set_device!(::Type{T}, ::Nothing, rank::Integer) where {T <: AbstractLuxDevice}
return set_device!(T, rank)
end
# Adapt Interface
# In older versions we had corresponding Adapt functions, rn we directly dispatch on the
# device type.
for name in (:CPU, :CUDA, :AMDGPU, :Metal, :oneAPI)
dev = Symbol(:Lux, name, :Device)
adaptor = Symbol(:Lux, name, :Adaptor)
@eval Base.@deprecate $(adaptor) $(dev) true
end
Adapt.adapt_storage(::LuxCPUDevice, x::AbstractArray) = Adapt.adapt(Array, x)
Adapt.adapt_storage(::LuxCPUDevice, rng::AbstractRNG) = rng
for T in (LuxAMDGPUDevice, LuxCUDADevice, LuxMetalDevice, LuxoneAPIDevice)
@eval begin
function Adapt.adapt_storage(to::$(T), ::Random.TaskLocalRNG)
return default_device_rng(to)
end
Adapt.adapt_storage(::$(T), rng::AbstractRNG) = rng
end
end
Adapt.adapt_storage(::LuxCPUDevice, x::AbstractRange) = x
# Prevent Ambiguity
for T in (LuxAMDGPUDevice, LuxAMDGPUDevice{Nothing}, LuxCUDADevice,
LuxCUDADevice{Nothing}, LuxMetalDevice, LuxoneAPIDevice)
@eval Adapt.adapt_storage(to::$(T), x::AbstractRange) = Adapt.adapt(to, collect(x))
end
# Chain Rules Core
function CRC.rrule(::typeof(Adapt.adapt_storage), to::AbstractLuxDevice, x::AbstractArray)
∇adapt_storage = let x = x
Δ -> (NoTangent(), NoTangent(), (get_device(x))(Δ))
end
return Adapt.adapt_storage(to, x), ∇adapt_storage
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 5765 | using LuxDeviceUtils, Random, Test
using ArrayInterface: parameterless_type
@testset "CPU Fallback" begin
@test !LuxDeviceUtils.functional(LuxAMDGPUDevice)
@test cpu_device() isa LuxCPUDevice
@test gpu_device() isa LuxCPUDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
@test_throws Exception default_device_rng(LuxAMDGPUDevice(nothing))
@test_logs (:warn, "`AMDGPU.jl` hasn't been loaded. Ignoring the device setting.") LuxDeviceUtils.set_device!(
LuxAMDGPUDevice, nothing, 1)
end
using AMDGPU
@testset "Loaded Trigger Package" begin
@test LuxDeviceUtils.GPU_DEVICE[] === nothing
if LuxDeviceUtils.functional(LuxAMDGPUDevice)
@info "AMDGPU is functional"
@test gpu_device() isa LuxAMDGPUDevice
@test gpu_device(; force_gpu_usage=true) isa LuxAMDGPUDevice
else
@info "AMDGPU is NOT functional"
@test gpu_device() isa LuxCPUDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
end
@test LuxDeviceUtils.GPU_DEVICE[] !== nothing
end
using FillArrays, Zygote # Extensions
@testset "Data Transfer" begin
ps = (a=(c=zeros(10, 1), d=1), b=ones(10, 1), e=:c,
d="string", mixed=[2.0f0, 3.0, ones(2, 3)], # mixed array types
range=1:10,
rng_default=Random.default_rng(), rng=MersenneTwister(),
one_elem=Zygote.OneElement(2.0f0, (2, 3), (1:3, 1:4)), farray=Fill(1.0f0, (2, 3)))
device = gpu_device()
aType = LuxDeviceUtils.functional(LuxAMDGPUDevice) ? ROCArray : Array
rngType = LuxDeviceUtils.functional(LuxAMDGPUDevice) ? AMDGPU.rocRAND.RNG :
Random.AbstractRNG
ps_xpu = ps |> device
@test get_device(ps_xpu) isa LuxAMDGPUDevice
@test get_device_type(ps_xpu) <: LuxAMDGPUDevice
@test ps_xpu.a.c isa aType
@test ps_xpu.b isa aType
@test ps_xpu.a.d == ps.a.d
@test ps_xpu.mixed isa Vector
@test ps_xpu.mixed[1] isa Float32
@test ps_xpu.mixed[2] isa Float64
@test ps_xpu.mixed[3] isa aType
@test ps_xpu.range isa aType
@test ps_xpu.e == ps.e
@test ps_xpu.d == ps.d
@test ps_xpu.rng_default isa rngType
@test ps_xpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxAMDGPUDevice)
@test ps_xpu.one_elem isa ROCArray
@test ps_xpu.farray isa ROCArray
else
@test ps_xpu.one_elem isa Zygote.OneElement
@test ps_xpu.farray isa Fill
end
ps_cpu = ps_xpu |> cpu_device()
@test get_device(ps_cpu) isa LuxCPUDevice
@test get_device_type(ps_cpu) <: LuxCPUDevice
@test ps_cpu.a.c isa Array
@test ps_cpu.b isa Array
@test ps_cpu.a.c == ps.a.c
@test ps_cpu.b == ps.b
@test ps_cpu.a.d == ps.a.d
@test ps_cpu.mixed isa Vector
@test ps_cpu.mixed[1] isa Float32
@test ps_cpu.mixed[2] isa Float64
@test ps_cpu.mixed[3] isa Array
@test ps_cpu.range isa Array
@test ps_cpu.e == ps.e
@test ps_cpu.d == ps.d
@test ps_cpu.rng_default isa Random.TaskLocalRNG
@test ps_cpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxAMDGPUDevice)
@test ps_cpu.one_elem isa Array
@test ps_cpu.farray isa Array
else
@test ps_cpu.one_elem isa Zygote.OneElement
@test ps_cpu.farray isa Fill
end
ps_mixed = (; a=rand(2), b=device(rand(2)))
@test_throws ArgumentError get_device(ps_mixed)
dev = gpu_device()
x = rand(Float32, 10, 2)
x_dev = x |> dev
@test get_device(x_dev) isa parameterless_type(typeof(dev))
@test get_device_type(x_dev) <: parameterless_type(typeof(dev))
if LuxDeviceUtils.functional(LuxAMDGPUDevice)
dev2 = gpu_device(length(AMDGPU.devices()))
x_dev2 = x_dev |> dev2
@test get_device(x_dev2) isa typeof(dev2)
@test get_device_type(x_dev2) <: parameterless_type(typeof(dev2))
end
@testset "get_device_type compile constant" begin
x = rand(10, 10) |> device
ps = (; weight=x, bias=x, d=(x, x))
return_val(x) = Val(get_device_type(x)) # If it is a compile time constant then type inference will work
@test @inferred(return_val(ps)) isa Val{parameterless_type(typeof(device))}
end
end
@testset "Wrapped Arrays" begin
if LuxDeviceUtils.functional(LuxAMDGPUDevice)
x = rand(10, 10) |> LuxAMDGPUDevice()
@test get_device(x) isa LuxAMDGPUDevice
@test get_device_type(x) <: LuxAMDGPUDevice
x_view = view(x, 1:5, 1:5)
@test get_device(x_view) isa LuxAMDGPUDevice
@test get_device_type(x_view) <: LuxAMDGPUDevice
end
end
@testset "Multiple Devices AMDGPU" begin
if LuxDeviceUtils.functional(LuxAMDGPUDevice)
ps = (; weight=rand(Float32, 10), bias=rand(Float32, 10))
ps_cpu = deepcopy(ps)
cdev = cpu_device()
for idx in 1:length(AMDGPU.devices())
amdgpu_device = gpu_device(idx)
@test typeof(amdgpu_device.device) <: AMDGPU.HIPDevice
@test AMDGPU.device_id(amdgpu_device.device) == idx
ps = ps |> amdgpu_device
@test ps.weight isa ROCArray
@test ps.bias isa ROCArray
@test AMDGPU.device_id(AMDGPU.device(ps.weight)) == idx
@test AMDGPU.device_id(AMDGPU.device(ps.bias)) == idx
@test isequal(cdev(ps.weight), ps_cpu.weight)
@test isequal(cdev(ps.bias), ps_cpu.bias)
end
ps = ps |> cdev
@test ps.weight isa Array
@test ps.bias isa Array
end
end
@testset "setdevice!" begin
if LuxDeviceUtils.functional(LuxAMDGPUDevice)
for i in 1:10
@test_nowarn LuxDeviceUtils.set_device!(LuxAMDGPUDevice, nothing, i)
end
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 7561 | using LuxDeviceUtils, Random, Functors, Test
using ArrayInterface: parameterless_type
@testset "CPU Fallback" begin
@test !LuxDeviceUtils.functional(LuxCUDADevice)
@test cpu_device() isa LuxCPUDevice
@test gpu_device() isa LuxCPUDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
@test_throws Exception default_device_rng(LuxCUDADevice(nothing))
@test_logs (:warn, "`CUDA.jl` hasn't been loaded. Ignoring the device setting.") LuxDeviceUtils.set_device!(
LuxCUDADevice, nothing, 1)
end
using LuxCUDA
@testset "Loaded Trigger Package" begin
@test LuxDeviceUtils.GPU_DEVICE[] === nothing
if LuxDeviceUtils.functional(LuxCUDADevice)
@info "LuxCUDA is functional"
@test gpu_device() isa LuxCUDADevice
@test gpu_device(; force_gpu_usage=true) isa LuxCUDADevice
else
@info "LuxCUDA is NOT functional"
@test gpu_device() isa LuxCPUDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
end
@test LuxDeviceUtils.GPU_DEVICE[] !== nothing
end
using FillArrays, Zygote # Extensions
@testset "Data Transfer" begin
ps = (a=(c=zeros(10, 1), d=1), b=ones(10, 1), e=:c,
d="string", mixed=[2.0f0, 3.0, ones(2, 3)], # mixed array types
range=1:10,
rng_default=Random.default_rng(), rng=MersenneTwister(),
one_elem=Zygote.OneElement(2.0f0, (2, 3), (1:3, 1:4)), farray=Fill(1.0f0, (2, 3)))
device = gpu_device()
aType = LuxDeviceUtils.functional(LuxCUDADevice) ? CuArray : Array
rngType = LuxDeviceUtils.functional(LuxCUDADevice) ? CUDA.RNG : Random.AbstractRNG
ps_xpu = ps |> device
@test get_device(ps_xpu) isa LuxCUDADevice
@test get_device_type(ps_xpu) <: LuxCUDADevice
@test ps_xpu.a.c isa aType
@test ps_xpu.b isa aType
@test ps_xpu.a.d == ps.a.d
@test ps_xpu.mixed isa Vector
@test ps_xpu.mixed[1] isa Float32
@test ps_xpu.mixed[2] isa Float64
@test ps_xpu.mixed[3] isa aType
@test ps_xpu.range isa aType
@test ps_xpu.e == ps.e
@test ps_xpu.d == ps.d
@test ps_xpu.rng_default isa rngType
@test ps_xpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxCUDADevice)
@test ps_xpu.one_elem isa CuArray
@test ps_xpu.farray isa CuArray
else
@test ps_xpu.one_elem isa Zygote.OneElement
@test ps_xpu.farray isa Fill
end
ps_cpu = ps_xpu |> cpu_device()
@test get_device(ps_cpu) isa LuxCPUDevice
@test get_device_type(ps_cpu) <: LuxCPUDevice
@test ps_cpu.a.c isa Array
@test ps_cpu.b isa Array
@test ps_cpu.a.c == ps.a.c
@test ps_cpu.b == ps.b
@test ps_cpu.a.d == ps.a.d
@test ps_cpu.mixed isa Vector
@test ps_cpu.mixed[1] isa Float32
@test ps_cpu.mixed[2] isa Float64
@test ps_cpu.mixed[3] isa Array
@test ps_cpu.range isa Array
@test ps_cpu.e == ps.e
@test ps_cpu.d == ps.d
@test ps_cpu.rng_default isa Random.TaskLocalRNG
@test ps_cpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxCUDADevice)
@test ps_cpu.one_elem isa Array
@test ps_cpu.farray isa Array
else
@test ps_cpu.one_elem isa Zygote.OneElement
@test ps_cpu.farray isa Fill
end
struct MyStruct
x::Any
end
Functors.@functor MyStruct
data = MyStruct(rand(10))
@test get_device(data) isa LuxCPUDevice
@test get_device_type(data) <: LuxCPUDevice
data_dev = data |> device
if LuxDeviceUtils.functional(LuxCUDADevice)
@test get_device(data_dev) isa LuxCUDADevice
@test get_device_type(data_dev) <: LuxCUDADevice
else
@test get_device(data_dev) isa LuxCPUDevice
@test get_device_type(data_dev) <: LuxCPUDevice
end
ps_mixed = (; a=rand(2), c=(rand(2), 1), st=MyStruct(rand(2)), b=device(rand(2)))
@test get_device(ps_mixed.st) isa LuxCPUDevice
@test get_device_type(ps_mixed.st) <: LuxCPUDevice
@test get_device(ps_mixed.c) isa LuxCPUDevice
@test get_device_type(ps_mixed.c) <: LuxCPUDevice
@test_throws ArgumentError get_device(ps_mixed)
@test_throws ArgumentError get_device_type(ps_mixed)
dev = gpu_device()
x = rand(Float32, 10, 2)
x_dev = x |> dev
@test get_device(x_dev) isa parameterless_type(typeof(dev))
@test get_device_type(x_dev) <: parameterless_type(typeof(dev))
if LuxDeviceUtils.functional(LuxCUDADevice)
dev2 = gpu_device(length(CUDA.devices()))
x_dev2 = x_dev |> dev2
@test get_device(x_dev2) isa typeof(dev2)
@test get_device_type(x_dev2) <: parameterless_type(typeof(dev2))
end
@testset "get_device_type compile constant" begin
x = rand(10, 10) |> device
ps = (; weight=x, bias=x, d=(x, x))
return_val(x) = Val(get_device_type(x)) # If it is a compile time constant then type inference will work
@test @inferred(return_val(ps)) isa Val{parameterless_type(typeof(device))}
return_val2(x) = Val(get_device(x))
@test_throws ErrorException @inferred(return_val2(ps))
end
end
@testset "Wrapped Arrays" begin
if LuxDeviceUtils.functional(LuxCUDADevice)
x = rand(10, 10) |> LuxCUDADevice()
@test get_device(x) isa LuxCUDADevice
@test get_device_type(x) <: LuxCUDADevice
x_view = view(x, 1:5, 1:5)
@test get_device(x_view) isa LuxCUDADevice
@test get_device_type(x_view) <: LuxCUDADevice
end
end
@testset "Multiple Devices CUDA" begin
if LuxDeviceUtils.functional(LuxCUDADevice)
ps = (; weight=rand(Float32, 10), bias=rand(Float32, 10))
ps_cpu = deepcopy(ps)
cdev = cpu_device()
for idx in 1:length(CUDA.devices())
cuda_device = gpu_device(idx)
@test typeof(cuda_device.device) <: CUDA.CuDevice
@test cuda_device.device.handle == (idx - 1)
ps = ps |> cuda_device
@test ps.weight isa CuArray
@test ps.bias isa CuArray
@test CUDA.device(ps.weight).handle == idx - 1
@test CUDA.device(ps.bias).handle == idx - 1
@test isequal(cdev(ps.weight), ps_cpu.weight)
@test isequal(cdev(ps.bias), ps_cpu.bias)
end
ps = ps |> cdev
@test ps.weight isa Array
@test ps.bias isa Array
end
end
using SparseArrays
@testset "CUDA Sparse Arrays" begin
if LuxDeviceUtils.functional(LuxCUDADevice)
ps = (; weight=sprand(Float32, 10, 10, 0.1), bias=sprand(Float32, 10, 0.1))
ps_cpu = deepcopy(ps)
cdev = cpu_device()
for idx in 1:length(CUDA.devices())
cuda_device = gpu_device(idx)
@test typeof(cuda_device.device) <: CUDA.CuDevice
@test cuda_device.device.handle == (idx - 1)
ps = ps |> cuda_device
@test ps.weight isa CUSPARSE.CuSparseMatrixCSC
@test ps.bias isa CUSPARSE.CuSparseVector
@test get_device(ps.weight).device.handle == idx - 1
@test get_device(ps.bias).device.handle == idx - 1
@test isequal(cdev(ps.weight), ps_cpu.weight)
@test isequal(cdev(ps.bias), ps_cpu.bias)
end
ps = ps |> cdev
@test ps.weight isa SparseMatrixCSC
@test ps.bias isa SparseVector
end
end
@testset "setdevice!" begin
if LuxDeviceUtils.functional(LuxCUDADevice)
for i in 1:10
@test_nowarn LuxDeviceUtils.set_device!(LuxCUDADevice, nothing, i)
end
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 4459 | using LuxDeviceUtils, Random, Test
using ArrayInterface: parameterless_type
@testset "CPU Fallback" begin
@test !LuxDeviceUtils.functional(LuxMetalDevice)
@test cpu_device() isa LuxCPUDevice
@test gpu_device() isa LuxCPUDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
@test_throws Exception default_device_rng(LuxMetalDevice())
end
using Metal
@testset "Loaded Trigger Package" begin
@test LuxDeviceUtils.GPU_DEVICE[] === nothing
if LuxDeviceUtils.functional(LuxMetalDevice)
@info "Metal is functional"
@test gpu_device() isa LuxMetalDevice
@test gpu_device(; force_gpu_usage=true) isa LuxMetalDevice
else
@info "Metal is NOT functional"
@test gpu_device() isa LuxMetalDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
end
@test LuxDeviceUtils.GPU_DEVICE[] !== nothing
end
using FillArrays, Zygote # Extensions
@testset "Data Transfer" begin
ps = (a=(c=zeros(10, 1), d=1), b=ones(10, 1), e=:c,
d="string", mixed=[2.0f0, 3.0, ones(2, 3)], # mixed array types
range=1:10,
rng_default=Random.default_rng(), rng=MersenneTwister(),
one_elem=Zygote.OneElement(2.0f0, (2, 3), (1:3, 1:4)), farray=Fill(1.0f0, (2, 3)))
device = gpu_device()
aType = LuxDeviceUtils.functional(LuxMetalDevice) ? MtlArray : Array
rngType = LuxDeviceUtils.functional(LuxMetalDevice) ? Metal.GPUArrays.RNG :
Random.AbstractRNG
ps_xpu = ps |> device
@test get_device(ps_xpu) isa LuxMetalDevice
@test get_device_type(ps_xpu) <: LuxMetalDevice
@test ps_xpu.a.c isa aType
@test ps_xpu.b isa aType
@test ps_xpu.a.d == ps.a.d
@test ps_xpu.mixed isa Vector
@test ps_xpu.mixed[1] isa Float32
@test ps_xpu.mixed[2] isa Float64
@test ps_xpu.mixed[3] isa aType
@test ps_xpu.range isa aType
@test ps_xpu.e == ps.e
@test ps_xpu.d == ps.d
@test ps_xpu.rng_default isa rngType
@test ps_xpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxMetalDevice)
@test ps_xpu.one_elem isa MtlArray
@test ps_xpu.farray isa MtlArray
else
@test ps_xpu.one_elem isa Zygote.OneElement
@test ps_xpu.farray isa Fill
end
ps_cpu = ps_xpu |> cpu_device()
@test get_device(ps_cpu) isa LuxCPUDevice
@test get_device_type(ps_cpu) <: LuxCPUDevice
@test ps_cpu.a.c isa Array
@test ps_cpu.b isa Array
@test ps_cpu.a.c == ps.a.c
@test ps_cpu.b == ps.b
@test ps_cpu.a.d == ps.a.d
@test ps_cpu.mixed isa Vector
@test ps_cpu.mixed[1] isa Float32
@test ps_cpu.mixed[2] isa Float64
@test ps_cpu.mixed[3] isa Array
@test ps_cpu.range isa Array
@test ps_cpu.e == ps.e
@test ps_cpu.d == ps.d
@test ps_cpu.rng_default isa Random.TaskLocalRNG
@test ps_cpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxMetalDevice)
@test ps_cpu.one_elem isa Array
@test ps_cpu.farray isa Array
else
@test ps_cpu.one_elem isa Zygote.OneElement
@test ps_cpu.farray isa Fill
end
ps_mixed = (; a=rand(2), b=device(rand(2)))
@test_throws ArgumentError get_device(ps_mixed)
@test_throws ArgumentError get_device_type(ps_mixed)
@testset "get_device_type compile constant" begin
x = rand(10, 10) |> device
ps = (; weight=x, bias=x, d=(x, x))
return_val(x) = Val(get_device_type(x)) # If it is a compile time constant then type inference will work
@test @inferred(return_val(ps)) isa Val{parameterless_type(typeof(device))}
return_val2(x) = Val(get_device(x))
@test @inferred(return_val2(ps)) isa Val{get_device(x)}
end
end
@testset "Wrapper Arrays" begin
if LuxDeviceUtils.functional(LuxMetalDevice)
x = rand(Float32, 10, 10) |> LuxMetalDevice()
@test get_device(x) isa LuxMetalDevice
@test get_device_type(x) <: LuxMetalDevice
x_view = view(x, 1:5, 1:5)
@test get_device(x_view) isa LuxMetalDevice
@test get_device_type(x_view) <: LuxMetalDevice
end
end
@testset "setdevice!" begin
if LuxDeviceUtils.functional(LuxMetalDevice)
@test_logs (:warn,
"Support for Multi Device Metal hasn't been implemented yet. Ignoring the device setting.") LuxDeviceUtils.set_device!(
LuxMetalDevice, nothing, 1)
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 5417 | using Adapt, LuxDeviceUtils, ComponentArrays, Random
using ArrayInterface: parameterless_type
using ChainRulesTestUtils: test_rrule
using ReverseDiff, Tracker, ForwardDiff
using SparseArrays, FillArrays, Zygote, RecursiveArrayTools
using LuxCore
@testset "https://github.com/LuxDL/LuxDeviceUtils.jl/issues/10 patch" begin
dev = LuxCPUDevice()
ps = (; weight=randn(10, 1), bias=randn(1))
ps_ca = ps |> ComponentArray
ps_ca_dev = ps_ca |> dev
@test ps_ca_dev isa ComponentArray
@test ps_ca_dev.weight == ps.weight
@test ps_ca_dev.bias == ps.bias
@test ps_ca_dev == (ps |> dev |> ComponentArray)
end
@testset "AD Types" begin
x = randn(Float32, 10)
x_rdiff = ReverseDiff.track(x)
@test get_device(x_rdiff) isa LuxCPUDevice
x_rdiff = ReverseDiff.track.(x)
@test get_device(x_rdiff) isa LuxCPUDevice
gdev = gpu_device()
x_tracker = Tracker.param(x)
@test get_device(x_tracker) isa LuxCPUDevice
x_tracker = Tracker.param.(x)
@test get_device(x_tracker) isa LuxCPUDevice
x_tracker_dev = Tracker.param(x) |> gdev
@test get_device(x_tracker_dev) isa parameterless_type(typeof(gdev))
x_tracker_dev = Tracker.param.(x) |> gdev
@test get_device(x_tracker_dev) isa parameterless_type(typeof(gdev))
x_fdiff = ForwardDiff.Dual.(x)
@test get_device(x_fdiff) isa LuxCPUDevice
x_fdiff_dev = ForwardDiff.Dual.(x) |> gdev
@test get_device(x_fdiff_dev) isa parameterless_type(typeof(gdev))
end
@testset "CRC Tests" begin
dev = cpu_device() # Other devices don't work with FiniteDifferences.jl
test_rrule(Adapt.adapt_storage, dev, randn(Float64, 10); check_inferred=true)
gdev = gpu_device()
if !(gdev isa LuxMetalDevice) # On intel devices causes problems
x = randn(10)
∂dev, ∂x = Zygote.gradient(sum ∘ Adapt.adapt_storage, gdev, x)
@test ∂dev === nothing
@test ∂x ≈ ones(10)
x = randn(10) |> gdev
∂dev, ∂x = Zygote.gradient(sum ∘ Adapt.adapt_storage, cpu_device(), x)
@test ∂dev === nothing
@test ∂x ≈ gdev(ones(10))
@test get_device(∂x) isa parameterless_type(typeof(gdev))
end
end
# The following just test for noops
@testset "NoOps CPU" begin
cdev = cpu_device()
@test cdev(sprand(10, 10, 0.9)) isa SparseMatrixCSC
@test cdev(1:10) isa AbstractRange
@test cdev(Zygote.OneElement(2.0f0, (2, 3), (1:3, 1:4))) isa Zygote.OneElement
end
@testset "RecursiveArrayTools" begin
gdev = gpu_device()
diffeqarray = DiffEqArray([rand(10) for _ in 1:10], rand(10))
@test get_device(diffeqarray) isa LuxCPUDevice
diffeqarray_dev = diffeqarray |> gdev
@test get_device(diffeqarray_dev) isa parameterless_type(typeof(gdev))
vecarray = VectorOfArray([rand(10) for _ in 1:10])
@test get_device(vecarray) isa LuxCPUDevice
vecarray_dev = vecarray |> gdev
@test get_device(vecarray_dev) isa parameterless_type(typeof(gdev))
end
@testset "CPU default rng" begin
@test default_device_rng(LuxCPUDevice()) isa Random.TaskLocalRNG
end
@testset "CPU setdevice!" begin
@test_logs (:warn,
"Setting device for `LuxCPUDevice` doesn't make sense. Ignoring the device setting.") LuxDeviceUtils.set_device!(
LuxCPUDevice, nothing, 1)
end
@testset "get_device on Arrays" begin
x = rand(10, 10)
x_view = view(x, 1:5, 1:5)
@test get_device(x) isa LuxCPUDevice
@test get_device(x_view) isa LuxCPUDevice
struct MyArrayType <: AbstractArray{Float32, 2}
data::Array{Float32, 2}
end
x_custom = MyArrayType(rand(10, 10))
@test get_device(x_custom) isa LuxCPUDevice
end
@testset "loaded and functional" begin
@test LuxDeviceUtils.loaded(LuxCPUDevice)
@test LuxDeviceUtils.functional(LuxCPUDevice)
end
@testset "writing to preferences" begin
@test_logs (:info,
"Deleted the local preference for `gpu_backend`. Restart Julia to use the new backend.") gpu_backend!()
for backend in (:CUDA, :AMDGPU, :oneAPI, :Metal, LuxAMDGPUDevice(),
LuxCUDADevice(), LuxMetalDevice(), LuxoneAPIDevice())
backend_name = backend isa Symbol ? string(backend) :
LuxDeviceUtils._get_device_name(backend)
@test_logs (:info,
"GPU backend has been set to $(backend_name). Restart Julia to use the new backend.") gpu_backend!(backend)
end
gpu_backend!(:CUDA)
@test_logs (:info, "GPU backend is already set to CUDA. No action is required.") gpu_backend!(:CUDA)
@test_throws ArgumentError gpu_backend!("my_backend")
end
@testset "LuxCore warnings" begin
struct MyCustomLayer <: LuxCore.AbstractExplicitContainerLayer{(:layer,)}
layer::Any
end
my_layer = MyCustomLayer(rand(10, 10))
dev = cpu_device()
@test_logs (
:warn, "Lux layers are stateless and hence don't participate in device \
transfers. Apply this function on the parameters and states generated \
using `Lux.setup`.") dev(my_layer)
end
@testset "get_device_type compile constant" begin
x = rand(10, 10)
ps = (; weight=x, bias=x, d=(x, x))
return_val(x) = Val(get_device_type(x)) # If it is a compile time constant then type inference will work
@test @inferred(return_val(ps)) isa Val{typeof(cpu_device())}
return_val2(x) = Val(get_device(x))
@test @inferred(return_val2(ps)) isa Val{cpu_device()}
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 4475 | using LuxDeviceUtils, Random, Test
using ArrayInterface: parameterless_type
@testset "CPU Fallback" begin
@test !LuxDeviceUtils.functional(LuxoneAPIDevice)
@test cpu_device() isa LuxCPUDevice
@test gpu_device() isa LuxCPUDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
@test_throws Exception default_device_rng(LuxoneAPIDevice())
end
using oneAPI
@testset "Loaded Trigger Package" begin
@test LuxDeviceUtils.GPU_DEVICE[] === nothing
if LuxDeviceUtils.functional(LuxoneAPIDevice)
@info "oneAPI is functional"
@test gpu_device() isa LuxoneAPIDevice
@test gpu_device(; force_gpu_usage=true) isa LuxoneAPIDevice
else
@info "oneAPI is NOT functional"
@test gpu_device() isa LuxoneAPIDevice
@test_throws LuxDeviceUtils.LuxDeviceSelectionException gpu_device(;
force_gpu_usage=true)
end
@test LuxDeviceUtils.GPU_DEVICE[] !== nothing
end
using FillArrays, Zygote # Extensions
@testset "Data Transfer" begin
ps = (a=(c=zeros(10, 1), d=1), b=ones(10, 1), e=:c,
d="string", mixed=[2.0f0, 3.0, ones(2, 3)], # mixed array types
range=1:10,
rng_default=Random.default_rng(), rng=MersenneTwister(),
one_elem=Zygote.OneElement(2.0f0, (2, 3), (1:3, 1:4)), farray=Fill(1.0f0, (2, 3)))
device = gpu_device()
aType = LuxDeviceUtils.functional(LuxoneAPIDevice) ? oneArray : Array
rngType = LuxDeviceUtils.functional(LuxoneAPIDevice) ? oneAPI.GPUArrays.RNG :
Random.AbstractRNG
ps_xpu = ps |> device
@test get_device(ps_xpu) isa LuxoneAPIDevice
@test get_device_type(ps_xpu) <: LuxoneAPIDevice
@test ps_xpu.a.c isa aType
@test ps_xpu.b isa aType
@test ps_xpu.a.d == ps.a.d
@test ps_xpu.mixed isa Vector
@test ps_xpu.mixed[1] isa Float32
@test ps_xpu.mixed[2] isa Float64
@test ps_xpu.mixed[3] isa aType
@test ps_xpu.range isa aType
@test ps_xpu.e == ps.e
@test ps_xpu.d == ps.d
@test ps_xpu.rng_default isa rngType
@test ps_xpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxoneAPIDevice)
@test ps_xpu.one_elem isa oneArray
@test ps_xpu.farray isa oneArray
else
@test ps_xpu.one_elem isa Zygote.OneElement
@test ps_xpu.farray isa Fill
end
ps_cpu = ps_xpu |> cpu_device()
@test get_device(ps_cpu) isa LuxCPUDevice
@test get_device_type(ps_cpu) <: LuxCPUDevice
@test ps_cpu.a.c isa Array
@test ps_cpu.b isa Array
@test ps_cpu.a.c == ps.a.c
@test ps_cpu.b == ps.b
@test ps_cpu.a.d == ps.a.d
@test ps_cpu.mixed isa Vector
@test ps_cpu.mixed[1] isa Float32
@test ps_cpu.mixed[2] isa Float64
@test ps_cpu.mixed[3] isa Array
@test ps_cpu.range isa Array
@test ps_cpu.e == ps.e
@test ps_cpu.d == ps.d
@test ps_cpu.rng_default isa Random.TaskLocalRNG
@test ps_cpu.rng == ps.rng
if LuxDeviceUtils.functional(LuxoneAPIDevice)
@test ps_cpu.one_elem isa Array
@test ps_cpu.farray isa Array
else
@test ps_cpu.one_elem isa Zygote.OneElement
@test ps_cpu.farray isa Fill
end
ps_mixed = (; a=rand(2), b=device(rand(2)))
@test_throws ArgumentError get_device(ps_mixed)
@test_throws ArgumentError get_device_type(ps_mixed)
@testset "get_device_type compile constant" begin
x = rand(10, 10) |> device
ps = (; weight=x, bias=x, d=(x, x))
return_val(x) = Val(get_device_type(x)) # If it is a compile time constant then type inference will work
@test @inferred(return_val(ps)) isa Val{parameterless_type(typeof(device))}
return_val2(x) = Val(get_device(x))
@test @inferred(return_val2(ps)) isa Val{get_device(x)}
end
end
@testset "Wrapper Arrays" begin
if LuxDeviceUtils.functional(LuxoneAPIDevice)
x = rand(10, 10) |> LuxoneAPIDevice()
@test get_device(x) isa LuxoneAPIDevice
@test get_device_type(x) <: LuxoneAPIDevice
x_view = view(x, 1:5, 1:5)
@test get_device(x_view) isa LuxoneAPIDevice
@test get_device_type(x_view) <: LuxoneAPIDevice
end
end
@testset "setdevice!" begin
if LuxDeviceUtils.functional(LuxoneAPIDevice)
@test_logs (:warn,
"Support for Multi Device oneAPI hasn't been implemented yet. Ignoring the device setting.") LuxDeviceUtils.set_device!(
LuxoneAPIDevice, nothing, 1)
end
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 800 | using Aqua, ExplicitImports, LuxDeviceUtils, Test
@testset "Aqua Tests" begin
Aqua.test_all(LuxDeviceUtils)
end
import FillArrays, RecursiveArrayTools, SparseArrays, Zygote
@testset "Explicit Imports" begin
@test check_no_implicit_imports(LuxDeviceUtils) === nothing
@test check_no_stale_explicit_imports(LuxDeviceUtils) === nothing
@test check_no_self_qualified_accesses(LuxDeviceUtils) === nothing
@test check_all_explicit_imports_via_owners(LuxDeviceUtils) === nothing
@test check_all_qualified_accesses_via_owners(LuxDeviceUtils) === nothing
@test_broken check_all_explicit_imports_are_public(LuxDeviceUtils) === nothing # mostly upstream problems
@test_broken check_all_qualified_accesses_are_public(LuxDeviceUtils) === nothing # mostly upstream problem
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | code | 1284 | import Pkg
using SafeTestsets, Test
const BACKEND_GROUP = lowercase(get(ENV, "BACKEND_GROUP", "NONE"))
const EXTRA_PKGS = String[]
(BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda") && push!(EXTRA_PKGS, "LuxCUDA")
(BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu") && push!(EXTRA_PKGS, "AMDGPU")
(BACKEND_GROUP == "all" || BACKEND_GROUP == "oneapi") && push!(EXTRA_PKGS, "oneAPI")
(BACKEND_GROUP == "all" || BACKEND_GROUP == "metal") && push!(EXTRA_PKGS, "Metal")
if !isempty(EXTRA_PKGS)
@info "Installing Extra Packages for testing" EXTRA_PKGS=EXTRA_PKGS
Pkg.add(EXTRA_PKGS)
Pkg.update()
Base.retry_load_extensions()
Pkg.instantiate()
end
@testset "LuxDeviceUtils Tests" begin
file_names = BACKEND_GROUP == "all" ?
["cuda_tests.jl", "amdgpu_tests.jl", "metal_tests.jl", "oneapi_tests.jl"] :
(BACKEND_GROUP == "cpu" ? [] : [BACKEND_GROUP * "_tests.jl"])
@testset "$(file_name)" for file_name in file_names
run(`$(Base.julia_cmd()) --color=yes --project=$(dirname(Pkg.project().path))
--startup-file=no --code-coverage=user $(@__DIR__)/$file_name`)
Test.@test true
end
@safetestset "Misc Tests" include("misc_tests.jl")
@safetestset "QA Tests" include("qa_tests.jl")
end
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.27 | 494db99a113c3a5a0b637d788a65494732f4558e | docs | 1877 | # LuxDeviceUtils
[](https://julialang.zulipchat.com/#narrow/stream/machine-learning)
[](https://lux.csail.mit.edu/dev/api/Accelerator_Support/LuxDeviceUtils)
[](https://lux.csail.mit.edu/stable/api/Accelerator_Support/LuxDeviceUtils)
[](https://github.com/LuxDL/LuxDeviceUtils.jl/actions/workflows/CI.yml)
[](https://buildkite.com/julialang/luxdeviceutils-dot-jl)
[](https://codecov.io/gh/LuxDL/LuxDeviceUtils.jl)
[](https://github.com/JuliaTesting/Aqua.jl)
[](https://github.com/SciML/ColPrac)
[](https://github.com/SciML/SciMLStyle)
`LuxDeviceUtils.jl` is a lightweight package defining rules for transferring data across
devices. Most users should directly use [Lux.jl](https://lux.csail.mit.edu/) instead.
Currently we provide support for the following backends:
1. `CUDA.jl` for NVIDIA GPUs.
2. `AMDGPU.jl` for AMD ROCM GPUs.
3. `Metal.jl` for Apple Metal GPUs. **(Experimental)**
4. `oneAPI.jl` for Intel GPUs. **(Experimental)**
| LuxDeviceUtils | https://github.com/LuxDL/MLDataDevices.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 527 | using Documenter
using AnimalBehavior
push!(LOAD_PATH,"../src/")
makedocs(sitename="AnimalBehavior.jl Documentation",
pages = [
"Index" => "index.md",
],
format = Documenter.HTML(prettyurls = false)
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(
repo = "github.com/sqwayer/AnimalBehavior.jl.git",
devbranch = "master",
branch = "gh-pages"
)
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 1123 | """
AnimalBehavior
"""
module AnimalBehavior
using Turing, StructArrays, Distributions, Random, PrettyTables
using StatsFuns: softmax!, logsumexp
using ForwardDiff: ForwardDiff
using DataFrames: DataFrames
import Base: rand, convert, show
import Turing: sample, loglikelihood, dic
# Models
include("models/evolutions.jl")
include("models/observations.jl")
include("models/macros.jl")
# Inference
include("inference/check_types.jl")
include("inference/sampling.jl")
include("inference/Posterior.jl")
include("inference/criteria.jl")
include("inference/posterior_sampling.jl")
include("inference/summarystats.jl")
# Simulation
include("simulation/simulate.jl")
include("simulation/interface.jl")
include("simulation/Simulation.jl")
export @evolution,
@observation,
@model, # from Turing
sample, # from Turing
posterior,
sample_hyperparams,
sample_latent,
expectation,
dic,
waic,
bic,
simulate,
delta_rule!,
epsilon_argmax,
epsilon_greedy!,
softmax!, # from StatsFuns
ucb!
end
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 2408 | struct Posterior{Tp, Tl}
name::Symbol # Model name
params_names::Vector{Symbol} # Names of hyperparameters
latent_names::Vector{Symbol} # Names of latent variables
hyperparameters::Tp # Posterior distribution of hyperparameters
latent::Tl # Posterior distribution of latent variables
loglikelihood_func::Function # Loglikelihood function
nsamples::Int # Number of posterior samples
nparams::Int # Number of hyperparameters
ndata::Int # Number of data points
Posterior(name, pnames, lnames, hp::Tp, latent::Tl, l_fun, ns, np, nd) where {Tp, Tl} = new{Tp, Tl}(
name,
pnames,
lnames,
hp,
latent,
l_fun,
ns,
np,
nd
)
end
# Constructor from chains
function posterior(mdl, chn::Chains, data)
npc, _, nc = size(chn)
nsmp = npc * nc
ndata = data_size(data)
# Extract parameters
params_posterior, latent_posterior = extract_posterior_samples(mdl, chn)
params_names = collect(keys(params_posterior[1]))
latent_names = collect(keys(latent_posterior[1]))
# Compute loglikelihoods for each sample at each data point
l_fun(x) = loglikelihood(mdl, x, data)
return Posterior(mdl.name,
params_names,
latent_names,
params_posterior,
latent_posterior,
l_fun,
nsmp,
length(params_names),
ndata
)
end
# Show
function Base.show(io::IO, ::MIME"text/plain", P::AnimalBehavior.Posterior)
table_conf = set_pt_conf(tf = tf_markdown, alignment = :c)
println(io, "Posterior probability for ", P.name, " with $(P.nsamples) samples, from $(P.ndata) data points.")
println(io)
pretty_table_with_conf(table_conf,
collect(values(expectation(P).hyperparameters))';
header = P.params_names,
title = "Expected hyperparameters values")
println(io)
DIC, pD = dic(P)
WAIC, pW = waic(P)
fit_header = ["", "Goodness of fit", "Complexity"]
fit_vals = ["DIC" DIC pD;
"WAIC" WAIC pW;
"BIC" bic(P) P.nparams * log(P.ndata)]
pretty_table_with_conf(table_conf, fit_vals; header=fit_header)
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 190 | check_singleton_type(x) = x
check_singleton_type(x::T) where T <:ForwardDiff.Dual = x.value
function check_tuple_types(X)
return NamedTuple{keys(X)}(check_singleton_type.(values(X)))
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 1619 | ## Log-likelihood
# Unique session
function loglikelihood(mdl::Tm, latent::NT, data::StructVector) where {Tm <: AbstractMCMC.AbstractModel, NT <: NamedTuple}
θ = deepcopy(latent)
P = [cycle!(θ, mdl, obs) for obs in data]
L = logpdf.(P, data.a)
return L
end
# Multiple sessions
function loglikelihood(mdl::Tm, latent::NT, data::Array{Ts}) where {Tm <: AbstractMCMC.AbstractModel, NT <: NamedTuple, Ts <: StructVector}
nsess = length(data)
L = Float64[]
for sess = 1:nsess
θ = deepcopy(latent) # Re-initialize the latent variables
P = [cycle!(θ, mdl, obs) for obs in data[sess]]
L = append!(L, logpdf.(P, data[sess].a))
end
return L
end
## DIC
# Number of effective parameters
neff_params(::Val{:pD}, D_samples, D_avg) = mean(D_samples) - D_avg
neff_params(::Val{:pV}, D_samples, _) = 0.5*var(D_samples)
function dic(P::Posterior, pDIC = :pD)
latent_avg = average(P.latent)
lp_avg = sum(P.loglikelihood_func(latent_avg))
lp_samples = sum.(P.loglikelihood_func.(P.latent))
D_samples = -2 .* lp_samples
D_avg = -2 * lp_avg
pD = neff_params(Val(pDIC), D_samples, D_avg)
return D_avg + 2 * pD, pD
end
## WAIC
function waic(P::Posterior)
lp_y = hcat([P.loglikelihood_func(P.latent[i]) for i = 1:P.nsamples]...)
N, K = size(lp_y)
lppd = sum(logsumexp(lp_y, dims=2)) - N*log(K)
pW = sum(var(lp_y, dims=2))
return -2 * lppd + 2 * pW, pW
end
## BIC
function bic(P::Posterior)
latent_avg = average(P.latent)
lp_avg = sum(P.loglikelihood_func(latent_avg))
return - 2 * lp_avg + P.nparams * log(P.ndata)
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 1353 | # Extract hyperparameters and latent variables from chains
function extract_posterior_samples(mdl, chn::Chains)
# Extract parameters
chains_params = Turing.MCMCChains.get_sections(chn, :parameters)
vals = [vec(chains_params[n].data) for n in names(chains_params)]
nt = NamedTuple{Tuple(names(chains_params))}(Tuple(vals))
params_posterior = StructVector(nt)
# Extract latent variables
latent_posterior = StructVector(vec(generated_quantities(mdl, chains_params)))
return params_posterior, latent_posterior
end
# Sampling from the posterior
sample_hyperparams(rng::AbstractRNG, post::Posterior, n::Int) = rand(rng, post.hyperparameters, n)
sample_hyperparams(post::Posterior, n::Int) = rand(post.hyperparameters, n)
sample_latent(rng::AbstractRNG, post::Posterior, n::Int) = rand(rng, post.latent, n)
sample_latent(post::Posterior, n::Int) = rand(post.latent, n)
function sample(rng::AbstractRNG, post::Posterior, n::Int)
idx = rand(rng, 1:post.nsamples, n)
hp = Vector{eltype(post.hyperparameters)}(undef, n)
l = Vector{eltype(post.latent)}(undef, n)
for i in eachindex(idx)
hp[i] = post.hyperparameters[idx[i]]
l[i] = post.latent[idx[i]]
end
return (hyperparameters = hp,
latent = l)
end
sample(post::Posterior, n::Int) = sample(Random.default_rng(), post, n) | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 1564 | function cycle!(θ, mdl, obs)
# action
P = AnimalBehavior.observ(mdl, obs.s; θ...)
# update
AnimalBehavior.evol!(mdl, obs.s, obs.a, obs.r; θ...)
return P
end
# Sample for a single session
function sample(mdl::Tm, data::StructVector, args...; kwargs...) where Tm <: AbstractMCMC.AbstractModel
sample(Random.default_rng(), mdl, data, args...; kwargs...)
end
function sample(rng::AbstractRNG, mdl::Tm, data::StructVector, args...; kwargs...) where Tm <: AbstractMCMC.AbstractModel
@model model(A) = begin
θ = @submodel mdl
θ = check_tuple_types(θ)
P = [cycle!(θ, mdl, obs) for obs in data]
A ~ arraydist(P)
return
end
return sample(rng, model(data.a), args...; kwargs...)
end
# Sample for multiple sessions
function sample(mdl::Tm, data::Array{Ts}, args...; kwargs...) where {Tm <: AbstractMCMC.AbstractModel, Ts <: StructVector}
sample(Random.default_rng(), mdl, data, args...; kwargs...)
end
function sample(rng::AbstractRNG, mdl::Tm, data::Array{Ts}, args...; kwargs...) where {Tm <: AbstractMCMC.AbstractModel, Ts <: StructVector}
nsess = length(data)
@model model(A) = begin
θ = @submodel mdl
θ = check_tuple_types(θ)
for sess in 1:nsess
sessdat = data[sess]
θ_init = deepcopy(θ)
P = [cycle!(θ_init, mdl, obs) for obs in sessdat]
A[sess] ~ arraydist(P)
end
return
end
actions = [sessdat.a for sessdat in data]
return sample(rng, model(actions), args...; kwargs...)
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 842 | # Fast mode
fast_mode(X::Vector{T}) where T <: Integer = fast_mode(X, minimum(X):maximum(X))
function fast_mode(X, range)
m, cm = (0, 0)
for i in range
c = count(isequal(i), X)
if c > cm
cm = c
m = i
end
end
return m
end
# Summary stats
function average(V::Vector{A}) where A <: AbstractArray
N = length(V)
M = V[1]
for i in 2:N
M .+= V[i]
end
M ./= N
return M
end
average(V::Vector{T}) where T <: AbstractFloat = mean(V)
average(V::Vector{T}) where T <: Integer = fast_mode(V)
function average(SV::StructVector{TV}) where TV
return TV(average.(values(StructArrays.components(SV))))
end
# Posterior expectation
function expectation(P::Posterior)
return (hyperparameters = average(P.hyperparameters),
latent = average(P.latent))
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 297 | ## Delta rule
function delta_rule!(Q, s::Int, a::Int, r, α)
Q[a,s] += α*(r - Q[a,s])
end
function delta_rule!(Q, a::Int, r::T, α) where T <: Real
Q[a] += α*(r - Q[a])
end
function delta_rule!(Q, s::Int, r::T, α) where T <: AbstractArray
q = @view(Q[:,s])
@. q += α * (r - q)
end
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 1004 | function _evolution(mdl, body)
nt = Base.return_types(mdl, ())[1]
mdl_kwargs = fieldnames(nt)
mdl_type = typeof(mdl)
callex = Expr(:call, :(AnimalBehavior.evol!), Expr(:parameters, mdl_kwargs...), :(M::T), :s, :a, :r)
whereex = Expr(:where, callex, :(T<:$mdl_type))
ex = Expr(:(=), whereex, body)
return ex
end
macro evolution(mdl, expr)
body = QuoteNode(expr)
return esc(quote
eval(AnimalBehavior._evolution($mdl, $body))
end)
end
function evol! end
function _observation(mdl, body)
nt = Base.return_types(mdl, ())[1]
mdl_kwargs = fieldnames(nt)
mdl_type = typeof(mdl)
callex = Expr(:call, :(AnimalBehavior.observ), Expr(:parameters, mdl_kwargs...), :(M::T), :s)
whereex = Expr(:where, callex, :(T<:$mdl_type))
ex = Expr(:(=), whereex, body)
return ex
end
macro observation(mdl, expr)
body = QuoteNode(expr)
return esc(quote
eval(AnimalBehavior._observation($mdl, $body))
end)
end
function observ end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 474 | """ Example usage :
@observation MyModel begin
Categorical(epsilon_argmax(ucb(Q, U, c)))
end
or
@observation MyModel begin
Categorical(epsilon_greedy(softmax(ucb(Q, U, c))))
end
"""
function epsilon_greedy!(P, ϵ)
N = length(P)
P .*= 1 - ϵ
P .+= ϵ/(N)
return P
end
function epsilon_argmax(Q::Vector{T}, ϵ) where T
P = zero(Q)
P[argmax(Q)] = one(T)
return epsilon_greedy!(P, ϵ)
end
function ucb!(Q, U, c)
Q .+= c * sqrt.(U)
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 969 | struct Simulation{Td, Tl}
name::Symbol
data::Td
latent::Tl
end
# Base functions
function Base.convert(::Type{DataFrames.DataFrame}, S::AnimalBehavior.Simulation)
df = hcat(unpack(S.data), unpack(S.latent))
return df
end
function Base.show(io::IO, mime::MIME"text/plain", S::AnimalBehavior.Simulation)
table_conf = set_pt_conf(tf = tf_markdown, alignment = :c)
println(io, "Simulation of one $(S.name) agent")
println(io)
pretty_table_with_conf(table_conf,
collect(values(S.latent[1]))';
header=collect(keys(S.latent[1])),
title="Initial latent variables")
println(io)
vals = hcat(collect(StructArrays.components(S.data))..., collect(StructArrays.components(S.latent))...)
header = vcat(["State", "Action", "Feedback", "Hidden"], keys(S.latent[1])...)
pretty_table_with_conf(table_conf,
vals;
header=header,
title="Simulation of $(length(S.data)) trials")
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 1759 |
data_size(data::Array{Ts}) where Ts <: StructVector = sum(length.(data))
data_size(data::Ts) where Ts <: StructVector = length(data)
# Unpacking vectors of arrays into dataframes
function all_indices_comb(A::AbstractMatrix)
m, n = size(A)
M = repeat(1:m, inner=n)
N = repeat(1:n, outer=m)
v = [(i,j) for (i,j) in zip(M,N)]
return v
end
function unpack(V::Vector{T}, name) where T <: Number
DataFrames.DataFrame(reshape(V, length(V), 1), [name])
end
function unpack(V::Vector{T}, name) where T <: AbstractVector
df = DataFrames.DataFrame([V], [name])
return select(df, name => ByRow(vec) => [Symbol(name,"[$i]") for i in eachindex(V[1])])
end
function unpack(V::Vector{T}, name) where T <: AbstractMatrix
all_indices = all_indices_comb(V[1])
df = DataFrames.DataFrame([V], [name])
return select(df, name => ByRow(vec) => [Symbol(name,"[$i, $j]") for (i,j) in all_indices])
end
function unpack(S::StructVector{T}) where T <: NamedTuple
tmp = DataFrames.DataFrame(S)
df = DataFrames.DataFrame()
for n in keys(S[1])
df = hcat(df, unpack(tmp[!,n], n))
end
return df
end
# Convert Dataframes into a StructVector with named fields s, a, and r
repack(df::DataFrames.DataFrame, val) = fill(val, nrow(df))
repack(df::DataFrames.DataFrame, name::Symbol) = Vector(df[!,name])
repack(df::DataFrames.DataFrame, names::Vector{Symbol}) = [(;df[!,names][i,:]...) for i in 1:nrow(df)]
function build_history(df::DataFrames.DataFrame; states=missing, actions, feedbacks=missing, hidden=missing)
return StructVector(s = repack(df, states),
a = repack(df, actions),
r = repack(df, feedbacks),
h = repack(df, hidden))
end | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 2246 | """
simulate(mdl)
Simulation function
# Arguments
# Examples
"""
function simulate(mdl, data::StructVector; feedback = x -> missing, init_θ=mdl())
initial_state = data.s[1]
initial_hidden = data.h[1]
state_transition(history) = data.s[min(length(history)+1, length(data))]
hidden_transition(history) = data.h[min(length(history)+1, length(data))]
ending_condition(history) = length(history) > length(data)
return simulate(mdl;
state_transition = state_transition,
hidden_transition = hidden_transition,
feedback = feedback,
initial_state = initial_state,
initial_hidden = initial_hidden,
ending_condition = ending_condition,
init_θ = init_θ)
end
function simulate(mdl;
state_transition = x -> 1,
feedback = x -> missing,
hidden_transition = x -> missing,
initial_state = 1,
initial_hidden = missing,
ending_condition = x -> length(x) > 100,
init_θ = mdl())
# Initialize
θ = init_θ
P_ = AnimalBehavior.observ(mdl, initial_state; θ...)
a_type = Distributions.eltype(typeof(P_))
r_type = Base.return_types(feedback, (StructVector,))[1]
history = StructVector(s=[initial_state], a=Vector{Any}([missing]), r=Vector{Any}([missing]), h=Vector{Any}([initial_hidden]))
latent = StructVector([deepcopy(θ)])
while !ending_condition(history)
# current state
s = history[end].s
# action
P = AnimalBehavior.observ(mdl, s; θ...)
a = rand(P)
history.a[end] = a
# feedback
r = feedback(history)
history.r[end] = r
# update
AnimalBehavior.evol!(mdl, s, a, r; θ...)
# next state
ns = state_transition(history)
nh = hidden_transition(history)
push!(history, (s=ns, a=missing, r=missing, h=nh))
push!(latent, deepcopy(θ))
end
data = StructVector(s = history[1:end-1].s,
a = convert.(a_type,history[1:end-1].a),
r = convert.(r_type, history[1:end-1].r),
h = history[1:end-1].h)
return Simulation(mdl.name, data, latent[1:end-1])
end
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 559 | @model Qlearning(na, ns) = begin
α ~ Beta()
β ~ Gamma(2,1)
return (α=α, β=β, Values = fill(1/na,na,ns))
end
mdl1 = Qlearning(2,1)
@evolution mdl1 begin
delta_rule!(Values, s, a, r, α)
end
@observation mdl1 begin
V = β .* @views(Values[:,s])
Categorical(softmax!(V))
end
θ = generated_quantities(mdl1, (α=0.2, β = 2.0))
@test θ == (α = 0.2, β = 2.0, Values = fill(0.5, 2, 1))
AnimalBehavior.evol!(mdl1, 1, 1, 1.0; θ... )
@test θ.Values[1] == 0.6
@test AnimalBehavior.observ(mdl1, 1; θ... ) == Categorical(softmax!([1.2, 1.0]))
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 246 | using AnimalBehavior
using Turing
using Test
println("Macros")
println("======")
include("models_test.jl")
println("Simulation")
println("==========")
include("simul_test.jl")
println("Inference")
println("=========")
include("sample_test.jl") | AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
|
[
"MIT"
] | 0.1.1 | dbc8620e56f681939c75ccae58bf32d5f4d983cb | code | 155 | chn = sample(mdl1, sim.data, HMC(0.05, 10), MCMCThreads(), 1000, 2)
post = posterior(mdl1, chn, sim.data)
@test expectation(post).latent.α ≈ 0.5 atol=0.1
| AnimalBehavior | https://github.com/sqwayer/AnimalBehavior.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.