licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 3687 | # TODO: maybe add some caching based on input
function _solve_pullback(solver, res, problem, x0, params)
(; lag_hess_rows, lag_hess_cols, parametric_lag_hess_vals) = problem.lag_hess_primals
(; lag_jac_rows, lag_jac_cols, parametric_lag_jac_vals) = problem.lag_jac_params
(; jac_rows, jac_cols, parametric_jac_vals) = problem.jac_primals
(; jac_p_rows, jac_p_cols, parametric_jac_p_vals) = problem.jac_params
(; n, num_equality) = problem
m = problem.num_equality + problem.num_inequality
l = size(params, 1)
(; primals, equality_duals, inequality_duals) = res
duals = [equality_duals; inequality_duals]
Qvals = zeros(size(lag_hess_rows, 1))
parametric_lag_hess_vals(Qvals, x0, params, primals, duals, 1.0, 1.0)
Q = sparse(lag_hess_rows, lag_hess_cols, Qvals, n, n)
Rvals = zeros(size(lag_jac_rows, 1))
parametric_lag_jac_vals(Rvals, x0, params, primals, duals, 1.0, 1.0)
R = sparse(lag_jac_rows, lag_jac_cols, Rvals, n, l)
Avals = zeros(size(jac_rows, 1))
parametric_jac_vals(Avals, x0, params, primals)
A = sparse(jac_rows, jac_cols, Avals, m, n)
Bvals = zeros(size(jac_p_rows, 1))
parametric_jac_p_vals(Bvals, x0, params, primals)
B = sparse(jac_p_rows, jac_p_cols, Bvals, m, l)
lower_active = duals .> 1e-3
lower_active[1:num_equality] .= 0
equality = zero(lower_active)
equality[1:num_equality] .= 1
active = lower_active .| equality
num_lower_active = sum(lower_active)
A_l_active = A[lower_active, :]
A_equality = A[equality, :]
B_l_active = B[lower_active, :]
B_equality = B[equality, :]
A_active = [A_equality; A_l_active]
B_active = [B_equality; B_l_active]
dual_inds = eachindex(duals)
lower_active_map = dual_inds[lower_active] .- num_equality
M = [
Q -A_active'
A_active 0I
]
N = [R; B_active]
MinvN = qr(-M) \ Matrix(N)
∂x∂y = MinvN[1:n, :]
∂duals∂y = spzeros(length(inequality_duals), length(params))
∂duals∂y[lower_active_map, :] .= let
lower_dual_range = (1:num_lower_active) .+ (n + num_equality)
MinvN[lower_dual_range, :]
end
(; ∂x∂y, ∂duals∂y)
end
function ChainRulesCore.rrule(::typeof(solve), solver, problem, x0, params; kwargs...)
res = solve(solver, problem, x0, params; kwargs...)
project_y = ProjectTo(params)
_back = _solve_pullback(solver, res, problem, x0, params)
function solve_pullback(∂res)
no_grad_args = (;
∂self = NoTangent(),
∂solver = NoTangent(),
∂problem = NoTangent(),
∂x0 = NoTangent(),
)
∂y = @thunk let
_back.∂x∂y' * ∂res.primals + _back.∂duals∂y' * ∂res.inequality_duals
end
no_grad_args..., project_y(∂y)
end
res, solve_pullback
end
function solve(
solver,
problem,
x0,
params::AbstractVector{<:ForwardDiff.Dual{T}};
kwargs...,
) where {T}
# strip off the duals:
params_v = ForwardDiff.value.(params)
params_d = ForwardDiff.partials.(params)
# forward pass
res = solve(solver, problem, x0, params_v; kwargs...)
# backward pass
_back = _solve_pullback(solver, res, problem, x0, params_v)
∂primals = _back.∂x∂y * params_d
∂inequality_duals = _back.∂duals∂y * params_d
# glue forward and backward pass together into dual number types
(;
primals = ForwardDiff.Dual{T}.(res.primals, ∂primals),
# we don't need these so I'm just creating a non-dual result size here
res.equality_duals,
inequality_duals = ForwardDiff.Dual{T}.(res.inequality_duals, ∂inequality_duals),
res.info,
)
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 4322 | """
A solver backend that casts the (potentially nonlinear and non-convex) trajectory optimization
problem as a mixed complementarity problem (MCP) and solves it via PATH.
The MCP is drived from the KKT conditions of the problem and takes the form
find z
s.t. lᵢ == zᵢ Fᵢ(z) >= 0
lᵢ < zᵢ < u, Fᵢ(z) == 0
zᵢ == u, Fᵢ(z) <= 0
# Note
The PATH solver is not open source but provides a free license. Without setting a license key,
this backend only works for small problems. Please consult the documentation of
[PATHSolver.jl](https://github.com/chkwon/PATHSolver.jl) to learn about loading the license key.
"""
struct MCPSolver end
is_thread_safe(::MCPSolver) = false
function solve(
solver::MCPSolver,
problem,
x0,
params::AbstractVector{<:AbstractFloat};
initial_guess = nothing,
)
(; n, parametric_cost, parametric_cost_grad, parametric_cons, jac_primals, lag_hess_primals) =
problem
(; jac_rows, jac_cols, parametric_jac_vals) = jac_primals
(; lag_hess_rows, lag_hess_cols, parametric_lag_hess_vals) = lag_hess_primals
function F(n, z, f)
primals = z[1:(problem.n)]
duals = z[(problem.n + 1):end]
∇l = zeros(problem.n)
parametric_cost_grad(∇l, params, primals)
∇g = let
jac_vals = zeros(length(jac_rows))
parametric_jac_vals(jac_vals, x0, params, primals)
sparse(jac_rows, jac_cols, jac_vals)
end
f[1:(problem.n)] .= ∇l - ∇g' * duals
f[(problem.n + 1):end] .= let
g = zeros(problem.num_equality + problem.num_inequality)
parametric_cons(g, x0, params, primals)
g
end
Cint(0)
end
"""
J = [
Q -A'
A 0
]
nnz: number of non-zeros of the sparse J = nnz(Q) + 2nnz(A)
z: [primals; duals]
(col, len, row, data): coo format sparse array representation
"""
function J(n, nnz, z, col, len, row, data)
primals = z[1:(problem.n)]
duals = z[(problem.n + 1):end]
# Hessian of the Lagrangian
Q = let
lag_hess_vals = zeros(length(lag_hess_rows))
parametric_lag_hess_vals(lag_hess_vals, x0, params, primals, duals, 1, 1)
sparse(lag_hess_rows, lag_hess_cols, lag_hess_vals)
end
# Jacobian of the constraints
A = let
jac_vals = zeros(length(jac_rows))
parametric_jac_vals(jac_vals, x0, params, primals)
sparse(jac_rows, jac_cols, jac_vals)
end
J = [
Q -A'
A 0I
]
_coo_from_sparse!(col, len, row, data, J)
Cint(0)
end
lb = [
fill(-Inf, problem.n + problem.num_equality)
zeros(problem.num_inequality)
]
ub = fill(Inf, length(lb))
z = !isnothing(initial_guess) ? initial_guess : zero(lb)
# structual zeros: nnz(J)) = nnz(Q) + 2*nnz(A)
nnz = length(lag_hess_rows) + 2 * length(jac_rows)
status, variables, info = PATHSolver.solve_mcp(F, J, lb, ub, z; silent = true, nnz)
if status === PATHSolver.MCP_UserInterrupt
throw(InterruptException())
elseif status != PATHSolver.MCP_Solved
@warn "MCP not cleanly solved. PATH solver status is $(status)"
end
(;
primals = variables[1:(problem.n)],
equality_duals = variables[((problem.n + 1):(problem.n + problem.num_equality))],
inequality_duals = variables[(problem.n + problem.num_equality + 1):end],
info = (; raw_solution = variables, status),
)
end
"""
Convert a Julia sparse array `M` into the \
[COO](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO)) format required by PATH.
This implementation has been extracted from \
[here](https://github.com/chkwon/PATHSolver.jl/blob/8e63723e51833cdbab58c39b6646f8cdf79d74a2/src/C_API.jl#L646)
"""
function _coo_from_sparse!(col, len, row, data, M)
@assert length(col) == length(len) == size(M, 1)
@assert length(row) == length(data)
n = length(col)
for i in 1:n
col[i] = M.colptr[i]
len[i] = M.colptr[i + 1] - M.colptr[i]
end
for (i, v) in enumerate(SparseArrays.rowvals(M))
row[i] = v
end
for (i, v) in enumerate(SparseArrays.nonzeros(M))
data[i] = v
end
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 2886 | """
Solves the trajectory optimization problem as NLP using Ipopt.
# Note
This solver is mostely here for historic reasons to provide a fully open-source backend for NLPs.
For many problems the [`MCPSolver`](@ref) backend using PATH is *much* faster.
"""
struct NLPSolver end
is_thread_safe(::NLPSolver) = true
function solve(
solver::NLPSolver,
problem,
x0,
params::AbstractVector{<:AbstractFloat};
initial_guess = nothing,
)
(;
horizon,
n,
parametric_cost,
parametric_cost_grad,
parametric_cons,
jac_primals,
lag_hess_primals,
) = problem
(; jac_rows, jac_cols, parametric_jac_vals) = jac_primals
(; lag_hess_rows, lag_hess_cols, parametric_lag_hess_vals) = lag_hess_primals
wrapper_cost = function (primals)
parametric_cost(params, primals)
end
wrapper_cons = function (primals, cons)
parametric_cons(cons, x0, params, primals)
end
wrapper_cost_grad = function (primals, grad)
parametric_cost_grad(grad, params, primals)
nothing
end
wrapper_con_jac = function (primals, rows, cols, values)
if isnothing(values)
rows .= jac_rows
cols .= jac_cols
else
parametric_jac_vals(values, x0, params, primals)
end
nothing
end
wrapper_lag_hess = function (primals, rows, cols, α, λ, values)
if isnothing(values)
rows .= lag_hess_rows
cols .= lag_hess_cols
else
parametric_lag_hess_vals(
values,
x0,
params,
primals,
λ,
α,
# IPOPT has a flipped internal sign convention
-1.0,
)
end
nothing
end
lb = zeros(problem.num_equality + problem.num_inequality)
ub = fill(Inf, length(lb))
ub[1:(problem.num_equality)] .= lb[1:(problem.num_equality)]
prob = Ipopt.CreateIpoptProblem(
n,
fill(-Inf, n),
fill(Inf, n),
size(lb, 1),
lb,
ub,
size(jac_rows, 1),
size(lag_hess_rows, 1),
wrapper_cost,
wrapper_cons,
wrapper_cost_grad,
wrapper_con_jac,
wrapper_lag_hess,
)
xinit = zeros(n)
let xinit = reshape(xinit, length(x0), :)
for t in 1:horizon
xinit[:, t] = x0
end
end
prob.x = xinit
Ipopt.AddIpoptIntOption(prob, "print_level", 0)
status = Ipopt.IpoptSolve(prob)
if status != 0
@warn "MCP not cleanly solved. IPOPT status is $(status)."
end
(;
primals = prob.x,
equality_duals = -prob.mult_g[1:(problem.num_equality)],
inequality_duals = -prob.mult_g[(problem.num_equality + 1):end],
info = (; raw_solution = prob),
)
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 1918 | """
Optimizer(problem, solver)
Constructs an `Optimizer` for the given `problem` using the specificed `solver` backend.
Supported backends are:
- [`QPSolver`](@ref)
- [`NLPSolver`](@ref)
- [`MCPSolver`](@ref)
Please consult their documentation for further information.
# Example
```@example running_example
solver = QPSolver()
optimizer = Optimizer(problem, solver)
```
"""
struct Optimizer{TP<:ParametricTrajectoryOptimizationProblem,TS}
problem::TP
solver::TS
end
is_thread_safe(optimizer::Optimizer) = is_thread_safe(optimizer.solver)
parameter_dimension(optimizer::Optimizer) = parameter_dimension(optimizer.problem)
"""
optimizer(x0, params)
Generates an optimal trajectory starting from `x0` according to the optimization problem
parameterized by `params`. This call is differentaible in `params`.
The output of this function is layed out as `(; xs, us, λs)` with
- `xs::Vector{<:Vector}`: Vector over time of vector-valued states.
- `us::Vector{<:Vector}`: Vector over time of vector-valued inputs.
- `λ::Vector`: Vector of scalar inequlaity-constraint multipliers. \
By our sign convention, all inequality duals are non-negative.
- `info::NamedTuple`: Additional "low-level" information. \
!!Note that this info output field is not differentiable!
# Example
``` @example running_example
x0 = zeros(4)
params = zeros(20)
solution = optimizer(x0, params)
```
"""
function (optimizer::Optimizer)(x0, params; initial_guess = nothing)
@assert length(x0) == optimizer.problem.state_dim
sol = solve(optimizer.solver, optimizer.problem, x0, params; initial_guess)
(; horizon, state_dim, control_dim) = optimizer.problem
xs = [[x0]; collect.(eachcol(reshape(sol.primals[1:(horizon * state_dim)], state_dim, :)))]
us = collect.(eachcol(reshape(sol.primals[((horizon * state_dim) + 1):end], control_dim, :)))
(; xs, us, λs = sol.inequality_duals, sol.info)
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 9773 | """
ParametricTrajectoryOptimizationProblem(
cost,
dynamics,
inequality_constraints,
state_dim,
control_dim,
parameter_dim,
horizon,
)
Constructs a `ParametricTrajectoryOptimizationProblem` from the given problem data:
- `cost` is callable as `cost(xs, us, params) -> c` to compute objective value for a given \
sequence of states `xs` and control inputs `us` for a parameter vector `params`.
- `dynamics` is callable as `dynamics(x, u, t [, params]) -> xp` to generate the next state `xp` \
from the previous state `x`, control `u`, time `t` and optional parameters `params`. See \
`parameterize_dynamics` for toggling the optional parameter vector.
- `inequality_constraints` is callable as `inequality_constraints(xs, us, params) -> gs` to \
generate a vector of constraints `gs` from states `xs` and `us` where the layout and types of `xs` \
and `us` are the same as for the `cost`. Constraints specified in this form will be enforced as \
`0 <= gs`; i.e., feasible trajectories evalute to non-negative constraints. If your prolbem has \
no inequality constraints, set `inequality_constraints = (xs, us, params) -> Symbolics.Num[]`.
- `state_dim::Integer` is the stagewise dimension of the state.
- `control_dim::Integer` is the stagewise dimension of the control input.
- `horizon::Integer` is the horizon of the problem
- `parameterize_dynamics` controls the optional `params` argument handed to dynamics. This flag is \
disabled by default. When set to `true`, `dynamics` are called as `dynamics(x, u, t, params)`
instead of `dynamics(x, u, t)`. Note that *all* parameters are handed to the dynamics call
# Note
This function uses `Syombolics.jl` to compile all of the functions, gradients, jacobians, and
hessians needed to solve a parametric trajectory optimization problem. Therfore, all callables above
must be sufficiently generic to accept `Syombolics.Num`-valued arguments.
Since the setup procedure involves code-generation, calls to this contructor are rather expensive
and shold be avoided in tight inner loops. By contrast, repeated solver invokations on the same
`ParametricTrajectoryOptimizationProblem` for varying parameter values are very fast. Therefore, it
is a good idea to choose a parameterization that avoids re-construction.
Furthermore, note that the *entire* parameter vector is handed to `costs`, `dynamics`, and
`inequality_constraints`. This allows parameters to be shared between multiple calls. For example,
a parameter that controlls the collision avoidance radius may apear both in the cost and
constraints. It's the users responsibility to correctly index into the `params` vector to extract
the desired parameters for each call.
# Example
Below we construct a parametric optimization problem for a 2D integrator with 2 states, 2 inputs
over a hrizon of 10 stages.
Additionally, this problem features ±0.1 box constraints on states and inputs.
```@example running_example
horizon = 10
state_dim = 2
control_dim = 2
cost = (xs, us, params) -> sum(sum((x - params).^2) + sum(u.^2) for (x, u) in zip(xs, us))
dynamics = (x, u, t) -> x + u
inequality_constraints = let
state_constraints = state -> [state .+ 0.1; -state .+ 0.1]
control_constraints = control -> [control .+ 0.1; -control .+ 0.1]
(xs, us, params) -> [
mapreduce(state_constraints, vcat, xs)
mapreduce(control_constraints, vcat, us)
]
end
problem = ParametricTrajectoryOptimizationProblem(
cost,
dynamics,
inequality_constraints,
state_dim,
control_dim,
horizon
)
```
"""
Base.@kwdef struct ParametricTrajectoryOptimizationProblem{T1,T2,T3,T4,T5,T6,T7,T8,T9}
# https://github.com/JuliaLang/julia/issues/31231
horizon::Int
n::Int
state_dim::Int
control_dim::Int
parameter_dim::Int
num_equality::Int
num_inequality::Int
parametric_cost::T1
parametric_cost_grad::T2
parametric_cost_jac::T3
parametric_cons::T4
jac_primals::T5
jac_params::T6
cost_hess::T7
lag_hess_primals::T8
lag_jac_params::T9
end
function ParametricTrajectoryOptimizationProblem(
cost,
dynamics,
inequality_constraints,
state_dim,
control_dim,
parameter_dim,
horizon;
parameterize_dynamics = false,
)
n = horizon * (state_dim + control_dim)
num_equality = nx = horizon * state_dim
x0, z, p = let
@variables(x0[1:state_dim], z[1:n], p[1:parameter_dim]) .|> scalarize
end
xs = hcat(x0, reshape(z[1:nx], state_dim, horizon)) |> eachcol |> collect
us = reshape(z[(nx + 1):n], control_dim, horizon) |> eachcol |> collect
cost_val = cost(xs[2:end], us, p)
cost_grad = Symbolics.gradient(cost_val, z)
cost_jac_param = Symbolics.sparsejacobian(cost_grad, p)
(cost_jac_rows, cost_jac_cols, cost_jac_vals) = findnz(cost_jac_param)
constraints_val = Symbolics.Num[]
# NOTE: The dynamics constraints **must** always be first since the backward pass exploits this
# structure to more easily identify active constraints.
dynamics_parameterized = parameterize_dynamics ? dynamics : (x, u, t, _) -> dynamics(x, u, t)
for t in eachindex(us)
append!(constraints_val, dynamics_parameterized(xs[t], us[t], t, p) .- xs[t + 1])
end
append!(constraints_val, inequality_constraints(xs[2:end], us, p))
num_inequality = length(constraints_val) - num_equality
con_jac = Symbolics.sparsejacobian(constraints_val, z)
(jac_rows, jac_cols, jac_vals) = findnz(con_jac)
con_jac_p = Symbolics.sparsejacobian(constraints_val, p)
(jac_p_rows, jac_p_cols, jac_p_vals) = findnz(con_jac_p)
num_constraints = length(constraints_val)
λ, cost_scaling, constraint_penalty_scaling = let
@variables(λ[1:num_constraints], cost_scaling, constraint_penalty_scaling) .|> scalarize
end
lag = cost_scaling * cost_val - constraint_penalty_scaling * λ' * constraints_val
lag_grad = Symbolics.gradient(lag, z)
lag_hess = Symbolics.sparsejacobian(lag_grad, z)
lag_jac = Symbolics.sparsejacobian(lag_grad, p)
expression = Val{false}
(lag_hess_rows, lag_hess_cols, hess_vals) = findnz(lag_hess)
(lag_jac_rows, lag_jac_cols, lag_jac_vals) = findnz(lag_jac)
parametric_cost = let
cost_fn = Symbolics.build_function(cost_val, [p; z]; expression)
(params, primals) -> cost_fn(vcat(params, primals))
end
parametric_cost_grad = let
cost_grad_fn! = Symbolics.build_function(cost_grad, [p; z]; expression)[2]
(grad, params, primals) -> cost_grad_fn!(grad, vcat(params, primals))
end
cost_hess = let
cost_hess_sym = Symbolics.sparsejacobian(cost_grad, z)
(cost_hess_rows, cost_hess_cols, cost_hess_vals) = findnz(cost_hess_sym)
cost_hess_fn! = Symbolics.build_function(cost_hess_vals, [p; z]; expression)[2]
parametric_cost_hess_vals =
(hess, params, primals) -> cost_hess_fn!(hess, vcat(params, primals))
(; cost_hess_rows, cost_hess_cols, parametric_cost_hess_vals)
end
parametric_cost_jac_vals = let
cost_jac_param_fn! = Symbolics.build_function(cost_jac_vals, [p; z]; expression)[2]
(vals, params, primals) -> cost_jac_param_fn!(vals, vcat(params, primals))
end
parametric_cons = let
con_fn! = Symbolics.build_function(constraints_val, [x0; p; z]; expression)[2]
(cons, x0, params, primals) -> con_fn!(cons, vcat(x0, params, primals))
end
parametric_jac_vals = let
jac_vals_fn! = Symbolics.build_function(jac_vals, [x0; p; z]; expression)[2]
(vals, x0, params, primals) -> jac_vals_fn!(vals, vcat(x0, params, primals))
end
parametric_jac_p_vals = let
jac_p_vals_fn! = Symbolics.build_function(jac_p_vals, [x0; p; z]; expression)[2]
(vals, x0, params, primals) -> jac_p_vals_fn!(vals, vcat(x0, params, primals))
end
parametric_lag_hess_vals = let
hess_vals_fn! = Symbolics.build_function(
hess_vals,
[x0; p; z; λ; cost_scaling; constraint_penalty_scaling];
expression,
)[2]
(vals, x0, params, primals, duals, cost_scaling, constraint_penalty_scaling) ->
hess_vals_fn!(
vals,
vcat(x0, params, primals, duals, cost_scaling, constraint_penalty_scaling),
)
end
parametric_lag_jac_vals = let
∇lac_jac_vals_fn! = Symbolics.build_function(
lag_jac_vals,
vcat(x0, p, z, λ, cost_scaling, constraint_penalty_scaling);
expression,
)[2]
(vals, x0, params, primals, duals, cost_scaling, constraint_penalty_scaling) ->
∇lac_jac_vals_fn!(
vals,
vcat(x0, params, primals, duals, cost_scaling, constraint_penalty_scaling),
)
end
parametric_cost_jac = (; cost_jac_rows, cost_jac_cols, parametric_cost_jac_vals)
jac_primals = (; jac_rows, jac_cols, parametric_jac_vals)
jac_params = (; jac_p_rows, jac_p_cols, parametric_jac_p_vals)
lag_hess_primals = (; lag_hess_rows, lag_hess_cols, parametric_lag_hess_vals)
lag_jac_params = (; lag_jac_rows, lag_jac_cols, parametric_lag_jac_vals)
ParametricTrajectoryOptimizationProblem(;
horizon,
n,
state_dim,
control_dim,
parameter_dim,
num_equality,
num_inequality,
parametric_cost,
parametric_cost_grad,
parametric_cost_jac,
parametric_cons,
jac_primals,
jac_params,
cost_hess,
lag_hess_primals,
lag_jac_params,
)
end
function parameter_dimension(problem::ParametricTrajectoryOptimizationProblem)
problem.parameter_dim
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 2324 | """
A solver backend that treats the problem as a quadratic program (QP)
QP(y) := argmin_x 0.5 x'Qx + x'(Ry+q)
s.t. lb <= Ax + By <= ub
# Note
Here, the problem data tuple `(Q, R, q A, B, lb, ub)` is derived from the provided
`ParametricTrajectoryOptimizationProblem` via linearization of constraints and quadraticization of
the objective. Therefore, if the problem is not a QP then this solution is not exact!
"""
struct QPSolver end
is_thread_safe(::QPSolver) = true
"""
Solves quadratic program:
QP(y) := argmin_x 0.5 x'Qx + x'(Ry+q)
s.t. lb <= Ax + By <= ub
Additionally provides gradients ∇_y QP(y)
Q, R, A, and B should be sparse matrices of type SparseMatrixCSC.
q, a, and y should be of type Vector{Float64}.
"""
function solve(
::QPSolver,
problem,
x0,
params::AbstractVector{<:AbstractFloat};
initial_guess = nothing,
)
(; cost_hess_rows, cost_hess_cols, parametric_cost_hess_vals) = problem.cost_hess
(; jac_rows, jac_cols, parametric_jac_vals) = problem.jac_primals
n = problem.n
m = problem.num_equality + problem.num_inequality
primals = zeros(n)
duals = zeros(m)
Qvals = zeros(size(cost_hess_rows, 1))
parametric_cost_hess_vals(Qvals, params, primals)
Q = sparse(cost_hess_rows, cost_hess_cols, Qvals, n, n)
q = zeros(n)
problem.parametric_cost_grad(q, params, primals)
Avals = zeros(size(jac_rows, 1))
parametric_jac_vals(Avals, x0, params, primals)
A = sparse(jac_rows, jac_cols, Avals, m, n)
cons = zeros(m)
problem.parametric_cons(cons, x0, params, primals)
lb = -cons
ub = fill(Inf, length(lb))
ub[1:(problem.num_equality)] .= lb[1:(problem.num_equality)]
m = OSQP.Model()
OSQP.setup!(m; P = sparse(Q), q = q, A = A, l = lb, u = ub, verbose = false, polish = true)
if !isnothing(initial_guess)
OSQP.warm_start!(m; x = initial_guess.x, y = initial_guess.y)
end
results = OSQP.solve!(m)
if (results.info.status_val != 1)
@warn "QP not cleanly solved. OSQP status is $(results.info.status_val)"
end
(;
primals = results.x,
equality_duals = -results.y[1:(problem.num_equality)],
inequality_duals = -results.y[(problem.num_equality + 1):end],
info = (; raw_solution = results),
)
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 300 | function get_constraints_from_box_bounds(bounds)
function (y)
mapreduce(vcat, [(bounds.lb, 1), (bounds.ub, -1)]) do (bound, sign)
# drop constraints for unbounded variables
mask = (!isinf).(bound)
sign * (y[mask] - bound[mask])
end
end
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 5672 | using DifferentiableTrajectoryOptimization:
Optimizer,
ParametricTrajectoryOptimizationProblem,
NLPSolver,
QPSolver,
MCPSolver,
parameter_dimension
using Test: @testset, @test, @test_logs
using Zygote: Zygote
using Random: MersenneTwister
using FiniteDiff: FiniteDiff
@testset "DifferentiableTrajectoryOptimization.jl" begin
δt = 0.01
x0 = zeros(2)
horizon = 10
state_dim = 2
control_dim = 2
dynamics = function (x, u, t, params=0.01)
local δt = last(params)
x + δt * u
end
inequality_constraints = let
state_constraints = state -> [state .+ 0.1; -state .+ 0.1]
control_constraints = control -> [control .+ 0.1; -control .+ 0.1]
(xs, us, params) -> [
mapreduce(state_constraints, vcat, xs)
mapreduce(control_constraints, vcat, us)
]
end
function goal_reference_cost(xs, us, params)
goal = params[1:2]
regularization = 10
sum(zip(xs, us)) do (x, u)
sum((x[1:2] - goal) .^ 2) + regularization * sum(u .^ 2)
end
end
function input_reference_cost(xs, us, params)
regularization = 10
input_reference = reshape(params[1:(2*length(us))], 2, :) |> eachcol
sum(zip(us, input_reference)) do (u, r)
sum(0.5 .* regularization .* u .^ 2 .- u .* r)
end
end
for solver in [NLPSolver(), QPSolver(), MCPSolver()]
@testset "$solver" begin
for (cost, parameter_dim) in
[(goal_reference_cost, 3), (input_reference_cost, (2 * horizon + 1))]
trivial_params = [zeros(parameter_dim - 1); δt]
@testset "$cost" begin
optimizer = let
problem = ParametricTrajectoryOptimizationProblem(
cost,
dynamics,
inequality_constraints,
state_dim,
control_dim,
parameter_dim,
horizon;
parameterize_dynamics=true
)
Optimizer(problem, solver)
end
@testset "forward" begin
@testset "trivial trajectory qp" begin
# In this trivial example, the goal equals the initial position (at the origin).
# Thus, we expect the trajectory to be all zeros
xs, us, λs, info = optimizer(x0, trivial_params)
@test all(all(isapprox.(x, 0, atol=1e-9)) for x in xs)
@test all(all(isapprox.(u, 0, atol=1e-9)) for u in us)
@test all(>=(-1e-9), λs)
# test warm-start
xs, us, λs, info =
optimizer(x0, trivial_params; initial_guess=info.raw_solution)
@test all(all(isapprox.(x, 0, atol=1e-9)) for x in xs)
@test all(all(isapprox.(u, 0, atol=1e-9)) for u in us)
@test all(>=(-1e-9), λs)
end
@testset "infeasible trajectory qp" begin
x0_infeasible = [10.0, 10.0]
@test_logs (:warn,) begin
xs, us, λs = optimizer(x0_infeasible, trivial_params)
end
end
end
@testset "ad" begin
function objective(params)
xs, us, λs = optimizer(x0, params)
sum(sum(x .^ 2) for x in xs) + sum(sum(λ .^ 2) for λ in λs)
end
for (mode, f) in [
("reverse mode", objective),
("forward mode", params -> Zygote.forwarddiff(objective, params)),
]
@testset "$mode" begin
@testset "trivial" begin
# The start and goal are the same. Thus, we expect the gradient of the objective
# that penalizes deviation from the origin to be zero.
@test all(
isapprox.(
only(Zygote.gradient(f, trivial_params)),
0,
atol=1e-9,
),
)
end
@testset "random" begin
rng = MersenneTwister(0)
for _ in 1:10
@test let
params = [10 * randn(rng, parameter_dim - 1); δt]
∇ = Zygote.gradient(f, params) |> only
∇_fd = FiniteDiff.finite_difference_gradient(f, params)
isapprox(∇, ∇_fd; atol=1e-3)
end
end
end
end
end
end
end
end
end
end
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | docs | 7729 | # DifferentiableTrajectoryOptimization.jl (Dito)
[](https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl/actions/workflows/ci.yml)
[](https://codecov.io/gh/lassepe/DifferentiableTrajectoryOptimization.jl)
[](https://opensource.org/licenses/MIT)
DifferentiableTrajectoryOptimization.jl (Dito for short) is a package for **Di**fferentiable **T**rajetory **O**ptimization in Julia. It supports both forward and reverse mode differentiation via [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) and [ChainRulesCore.jl](https://github.com/JuliaDiff/ChainRulesCore.jl) and therefore integrates seamlessly with machine learning frameworks such as [Flux.jl](https://github.com/FluxML/Flux.jl).
---
A substantial part of machine learning (ML) algorithms relies upon the ability to propagate gradient signals through the entire learning pipeline.
Traditionally, such models have been mostly limited to artificial neural networks and "simple" analytic functions.
Recent work has focused on extending the class of admissible models for gradient-based learning by making all sorts of procedures differentiable.
These efforts range from [differentiable physics engines](https://arxiv.org/pdf/2103.16021.pdf) over [differentiable rendering](https://arxiv.org/pdf/2006.12057.pdf?ref=https://githubhelp.com) to [differentiable optimization](https://arxiv.org/pdf/1703.00443.pdf).
Dito focuses on a special case of the latter category, differentiable trajectory optimization.
As such, Dito algorithmically provides a (local) answer to the question:
> *"How does the optimal solution of an inequality constrained trajectory optimization problem change if the problem changes?"*.
This implementation was originally developed as part of our research on [Learning Mixed Strategies in Trajectory Games](https://arxiv.org/pdf/2205.00291.pdf):
```bibtex
@inproceedings{peters2022rss,
title = {Learning Mixed Strategies in Trajectory Games},
author = {Peters, Lasse and Fridovich-Keil, David and Ferranti, Laura and Stachniss, Cyrill and Alonso-Mora, Javier and Laine, Forrest},
booktitle = {Proc.~of Robotics: Science and Systems (RSS)},
year = {2022},
url = {https://arxiv.org/abs/2205.00291}
}
```
There, Dito allowed us to efficiently train a neural network pipeline that rapidly generate feasible equilibrium trajectories in multi-player non-cooperative dynamic games.
Since this component has proven to be very useful in that context, we have since decided to factor it out into a stand-alone package.
## Installation
To install Dito, simply add it via Julia's package manager from the REPL:
```julia
# hit `]` to enter "pkg"-mode of the REPL
pkg> add DifferentiableTrajectoryOptimization
```
## Usage
Below we construct a parametric optimization problem for a 2D integrator with 2 states, 2 inputs
over a horizon of 10 stages with box constraints on states and inputs.
Please consult the documentation for each of the types below for further information. For example, just type `?ParametricTrajectoryOptimizationProblem` to learn more about the problem setup.
You can also consult the [tests](test/runtests.jl) as an additional source of implicit documentation.
### 1. Problem Setup
The entry-point for getting started with this package is to set up you problem of choice as an `ParametricTrajectoryOptimizationProblem`.
```julia
using DifferentiableTrajectoryOptimization
horizon = 10
state_dim = control_dim = parameter_dim = 2
cost = (xs, us, params) -> sum(sum((x - params).^2) + sum(u.^2) for (x, u) in zip(xs, us))
dynamics = (x, u, t) -> x + u
inequality_constraints = let
state_constraints = state -> [state .+ 0.1; -state .+ 0.1]
control_constraints = control -> [control .+ 0.1; -control .+ 0.1]
(xs, us, params) -> [
mapreduce(state_constraints, vcat, xs)
mapreduce(control_constraints, vcat, us)
]
end
problem = ParametricTrajectoryOptimizationProblem(
cost,
dynamics,
inequality_constraints,
state_dim,
control_dim,
parameter_dim,
horizon
)
```
### 2. Optimizer Setup
Given an instance of the `ParametricTrajectoryOptimizationProblem`, you can construct an `Optimizer` for the problem.
```julia
backend = QPSolver()
optimizer = Optimizer(problem, backend)
```
Currently, Dito supports the following optimization backends:
- `MCPSolver`: Casts trajectory optimization problem as a mixed complementarity problem (MCP) and solves it via PATH.
- This is the best option for nonlinear, non-convex problems. Even for QPs this solver is often as fast as the specialized QP solver.
- The PATH solver is not open source but provides a free license. Without setting a license key, this backend only works for small problems. Please consult the documentation of [PATHSolver.jl](https://github.com/chkwon/PATHSolver.jl) to learn about loading the license key.
- `QPSolver`: Treats the problem as convex QP by linearizing the constraints and quadraticizing the cost a priori.
- If the true problem is not a QP, this solution will not be exact.
- `NLPSolver`: Solves the trajectory optimization problem as NLP using Ipopt.
- This solver is mostely here for historic reasons to provide a fully open-source backend for NLPs. However, for many problems the `MCPSolver` backend using PATH is *much* faster.
### 3. Solving the Problem
Given an optimizer, we can solve a problem instance for a given initial state `x0` and parameter values `params`.
```julia
x0 = zeros(state_dim)
params = randn(2)
# the solution has fields for
# `xs`: the state sequence
# `us`: the control sequence
# `λs`: the constriant multipliers
# `info`: additional "low-level" solver info
(; xs, us, λs, info) = solution = optimizer(x0, params)
```
### 4. Computing Gradients
Since we provide gradient rules for the `optimizer(x0, params)` call, you can directly differentiate through it using your favorite autodiff framework. Here is a toy example of how this could look like:
```julia
using Zygote: Zygote
# an objective function that maps from parameters to a scalar performance index
function objective(params)
(; xs, λs) = optimizer(x0, params)
sum(sum(x .^ 2) for x in xs) + sum(sum(λ .^ 2) for λ in λs)
end
# the gradient of `objective` evalauted at the randomly sampled params from step 3 above
Zygote.gradient(objective, params)
```
## Background
Dito achieves differentiable trajectory optimization by augmenting existing optimization routines with custom derivative rules that apply the [implicit function theorem (IFT)](https://en.wikipedia.org/wiki/Implicit_function_theorem) to the resulting KKT-system.
Through this formulation, Dito avoids differentiation of the entire (potentially iterative) algorithm, leading to substantially accelerated derivative computation and facilitating differentiation of optimization backends that are not written in pure Julia.
The following body of work provides more information about this IFT-based differentiation approach:
- [Ralph, Daniel, and Stephan Dempe. "Directional derivatives of the solution of a parametric nonlinear program." Mathematical programming 70.1 (1995): 159-172.](https://link.springer.com/content/pdf/10.1007/BF01585934.pdf)
- [Amos, Brandon, and J. Zico Kolter. "Optnet: Differentiable optimization as a layer in neural networks." International Conference on Machine Learning. PMLR, 2017.](https://arxiv.org/pdf/1703.00443.pdf)
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 360 | module DictionariesExt
using Dictionaries: AbstractDictionary, Dictionary, ArrayDictionary
import DataManipulation: materialize_views, collectview
materialize_views(A::AbstractDictionary) = map(materialize_views, A) # effectively the same implementation as for AbstractArray and AbstractDict
collectview(A::Union{Dictionary,ArrayDictionary}) = A.values
end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 509 | module IntervalSetsExt
using IntervalSets
import DataManipulation: shift_range
import DataManipulation.InverseFunctions: inverse
function shift_range(x, (from, to)::Pair{<:AbstractInterval, <:AbstractInterval}; clamp::Bool=false)
y = (x - leftendpoint(from)) / _width(from) * _width(to) + leftendpoint(to)
clamp ? Base.clamp(y, to) : y
end
inverse(f::Base.Fix2{typeof(shift_range), <:Pair}) = Base.Fix2(shift_range, reverse(f.x))
_width(x::AbstractInterval) = rightendpoint(x) - leftendpoint(x)
end | DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 1219 | module StructArraysExt
using StructArrays
using DataManipulation.Accessors
import DataManipulation: nest, materialize_views, StaticRegex
Base.getindex(A::StructArray, p::Union{StaticRegex, Pair{<:StaticRegex}}, args...) =
@modify(StructArrays.components(A)) do nt
nt[p, args...]
end
Base.setindex(A::StructArray, val::StructArray, p::Union{StaticRegex, Pair{<:StaticRegex}}, args...) =
@modify(StructArrays.components(A)) do nt
Base.setindex(nt, StructArrays.components(val), p, args...)
end
Accessors.setindex(A::StructArray, val, p::Union{StaticRegex, Pair{<:StaticRegex}}, args...) = Base.setindex(A, val, p, args...)
Accessors.delete(A::StructArray, o::IndexLens{<:Tuple{StaticRegex, Vararg{Any}}}) =
@modify(StructArrays.components(A)) do nt
delete(nt, o)
end
function nest(x::StructArray, args...)
comps = StructArrays.components(x)
comps_n = nest(comps, args...)
_sa_from_comps_nested(comps_n)
end
_sa_from_comps_nested(X::AbstractArray) = X
_sa_from_comps_nested(X::Union{Tuple,NamedTuple}) = StructArray(map(_sa_from_comps_nested, X))
materialize_views(A::StructArray) = StructArray(map(materialize_views, StructArrays.components(A)))
end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 14800 | ### A Pluto.jl notebook ###
# v0.19.11
using Markdown
using InteractiveUtils
# ╔═╡ 7e7707a6-6bb5-4e20-a867-98dcd359dea1
using BenchmarkTools
# ╔═╡ 66ee434c-b6d0-46c6-9ebf-b548978ad6d5
using Dictionaries
# ╔═╡ 2ab6da2c-2c7d-477b-a336-89cfb05c57f8
using ProfileCanvas
# ╔═╡ 11f58b35-a882-4e18-a64c-e57050303601
using DataPipes
# ╔═╡ b30128ce-f146-45b1-ae62-d5cf2ccbaa11
using ConstructionBase
# ╔═╡ 74c2c181-00e8-4d65-b3c8-c44ed31a926b
using Accessors
# ╔═╡ 8e799461-2ffe-46e9-9008-35effd0e19ab
dct = Dict(zip(1:10^3, 1:10^3))
# ╔═╡ c4b4712a-bf6c-47fc-9a07-ea5f4a78c8f9
mapvalues_naive(f, dct) = Dict(zip(keys(dct), map(f, values(dct))))
# ╔═╡ 3c57a85f-ea6b-411b-9b00-9a747c805fb2
function mapvalues_copy(f, dct)
dct_ = copy(dct)
map!(f, values(dct_))
dct_
end
# ╔═╡ cb0c69a6-4509-42f7-9a8f-bc925ca8dee6
function mapvalues_vals1(f, dct)
@modify(dct.vals) do vals
map(f, vals)
end
end
# ╔═╡ 26f220aa-69bf-4c3c-80ac-1347016ba06a
mappairs_naive(f, obj::Dict) = Dict(f(p) for p in pairs(obj))
# ╔═╡ ff966cf6-4a8c-4c78-8ffb-dfba181be612
@btime mappairs_naive(((k, v),) -> (k, v + 1), $dct)
# ╔═╡ e9156a19-3bce-4f3d-a792-40fe501c2078
@btime mappairs_naive(((k, v),) -> k + 1 => v + 1, $dct)
# ╔═╡ 90d875d3-2820-49ba-a1fb-4cabcf62e64a
# ╔═╡ ec17707a-a2d7-4f5c-a269-ce8c9c827e3c
# ╔═╡ 80882a46-3c0e-4ae8-a594-d43a6452552b
@btime mapvalues_naive(x -> x + 1, $dct)
# ╔═╡ 18386539-6691-453e-82b4-eb56493a4fa9
@btime mapvalues_copy(x -> x + 1, $dct)
# ╔═╡ 38c8dac8-8ae2-4210-b3eb-9f2e9db26e34
@btime mapvalues_vals1(x -> x + 1, $dct)
# ╔═╡ 1765fd22-4e77-442c-a6ab-7ffb913a8d23
# ╔═╡ 71cbe102-852d-44b6-b9ba-c6e7e90cea71
# ╔═╡ b5aeb319-b190-44cc-b657-82bfd8c9d414
ddct = dictionary(dct)
# ╔═╡ ba40d7f6-4a3d-4146-b714-0861d842b356
@btime map(x -> x + 1, $ddct)
# ╔═╡ 97b7328b-41b3-4c6b-bdef-344c300c8b5e
# ╔═╡ 23775197-1076-4431-9159-78e3465e12f9
@btime collect($ddct)
# ╔═╡ 0a3efe2e-71ec-49d5-8d14-a17255025e31
ddct.values == collect(ddct)
# ╔═╡ 6bc04097-07af-4ccc-9c4c-35e6af5e350c
@btime map(keys($ddct), values($ddct)) do k, v
k + v
end
# ╔═╡ eaa68c38-902a-491a-b7bd-0fa8f8d2ae25
@btime map(pairs($ddct)) do (k, v)
(;k, v)
end
# ╔═╡ 388e58ab-50ae-4f50-b684-fdef00c7eccf
@btime map(pairs($ddct)) do (k, v)
(;k, v)
end.values
# ╔═╡ 11108388-347a-4719-9a0d-5fa3f64f4c1a
@btime map(pairs($ddct)) do (k, v)
(;k, v)
end |> collect
# ╔═╡ ad5454bb-9f43-4494-abc1-0ad1af7c1eae
@btime map($(collect(pairs(ddct)))) do (k, v)
(;k, v)
end
# ╔═╡ 58474708-3bd0-4400-baef-419ade66963d
@btime map(collect(pairs($ddct))) do (k, v)
(;k, v)
end
# ╔═╡ 0dc14e42-acf3-48fa-954b-354c28a1c775
# ╔═╡ 8fe2e214-1305-450a-9d0c-d6bb7069b8bf
# ╔═╡ 46999c78-04c4-45e0-8820-db133c1d2d49
# ╔═╡ a2f17316-2ec9-11ed-0400-514300b27b1a
function autotimed(f; mintime=0.5)
f()
T = @timed f()
T.time > mintime && return (; T.time, T.gctime, T.bytes, nallocs=Base.gc_alloc_count(T.gcstats) / 1, T.value)
n = 1
while true
n *= clamp(mintime / T.time, 1.2, 100)
T = @timed begin
for _ in 1:(n-1)
f()
end
f()
end
T.time > mintime && return (; time=T.time / n, gctime=T.gctime / n, bytes=T.bytes ÷ n, nallocs=Base.gc_alloc_count(T.gcstats) / n, T.value)
end
end
# ╔═╡ f4cea5a0-1c61-4760-be24-e1282a7f751a
function ConstructionBase.setproperties(d::Dict{K}, patch::NamedTuple{(:vals,), Tuple{Vector{V}}}) where {K, V}
@assert length(d.keys) == length(patch.vals)
Dict{K,V}(d.slots, d.keys, patch.vals, d.ndel, d.count, d.age, d.idxfloor, d.maxprobe)
end
# ╔═╡ c1f9fa7d-7c1c-4d08-8465-4930241e8223
function mapvalues_vals2(f, dct::Dict{K, V}) where {K, V}
# V = Core.Compiler.return_type(f, Tuple{valtype(dct)})
vals = dct.vals
newvals = similar(vals, V)
@inbounds for i in dct.idxfloor:lastindex(vals)
if Base.isslotfilled(dct, i)
newvals[i] = f(vals[i])
end
end
setproperties(dct, vals=newvals)
end
# ╔═╡ c743823f-0a36-4d82-af18-3edd01e7ea5c
@btime mapvalues_vals2(x -> x + 1, $dct)
# ╔═╡ 2a78308e-24f4-489d-b12f-140cc18cf19f
begin
function mappairs_vals(f, dct)
KV = Core.Compiler.return_type(f, Tuple{eltype(dct)})
_mappairs(f, dct, KV)
end
function _mappairs(f, dct::Dict{K}, ::Type{Pair{K, V}}) where {K, V}
vals = dct.vals
newvals = similar(vals, V)
@inbounds for i in dct.idxfloor:lastindex(vals)
if Base.isslotfilled(dct, i)
p = dct.keys[i] => vals[i]
newp = f(p)
if newp.first == p.first
newvals[i] = newp.second
else
return _mappairs_different(f, dct::Dict, Pair{K, V})
end
end
end
setproperties(dct, vals=newvals)
end
function _mappairs_different(f, dct::Dict, ::Type{Pair{K, V}}) where {K, V}
Dict{K, V}(f(p) for p in dct)
end
end
# ╔═╡ 479917d8-d00a-47a3-8d3d-ae8be32f73ca
@btime mappairs_vals(((k, v),) -> k => v + 1, $dct)
# ╔═╡ d6518d70-0188-4a85-a149-93accb36757e
@btime mappairs_vals(((k, v),) -> k + 1 => v + 1, $dct)
# ╔═╡ 18a98238-413e-445a-8ce0-d62e8f3f5d83
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
DataPipes = "02685ad9-2d12-40c3-9f73-c6aeda6a7ff5"
Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
ProfileCanvas = "efd6af41-a80b-495e-886c-e51b0c7d77a3"
[compat]
Accessors = "~0.1.20"
BenchmarkTools = "~1.3.1"
ConstructionBase = "~1.4.1"
DataPipes = "~0.3.0"
Dictionaries = "~0.3.24"
ProfileCanvas = "~0.1.4"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.8.0"
manifest_format = "2.0"
project_hash = "ddcda892c7d834369e25ed6e522f13fd69369ad9"
[[deps.Accessors]]
deps = ["Compat", "CompositionsBase", "ConstructionBase", "Dates", "InverseFunctions", "LinearAlgebra", "MacroTools", "Requires", "Test"]
git-tree-sha1 = "ce67f55da3a937bb001a8d00559bdfa4dba6e4f5"
uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
version = "0.1.20"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.BenchmarkTools]]
deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"]
git-tree-sha1 = "4c10eee4af024676200bc7752e536f858c6b8f93"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "1.3.1"
[[deps.Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "78bee250c6826e1cf805a88b7f1e86025275d208"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.46.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "455419f7e328a1a2493cabc6428d79e951349769"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.1"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "fb21ddd70a051d882a1686a5a550990bbe371a95"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.4.1"
[[deps.DataPipes]]
git-tree-sha1 = "b97559f7b941226df5bfef2893bf71f83cac5c41"
uuid = "02685ad9-2d12-40c3-9f73-c6aeda6a7ff5"
version = "0.3.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[deps.Dictionaries]]
deps = ["Indexing", "Random", "Serialization"]
git-tree-sha1 = "96dc5c5c8994be519ee3420953c931c55657a3f2"
uuid = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
version = "0.3.24"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.Indexing]]
git-tree-sha1 = "ce1566720fd6b19ff3411404d4b977acd4814f9f"
uuid = "313cdc1a-70c2-5d6a-ae34-0150d3930a38"
version = "1.1.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "b3364212fb5d870f724876ffcd34dd8ec6d98918"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.7"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.0+0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.2.1"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0"
[[deps.Parsers]]
deps = ["Dates"]
git-tree-sha1 = "3d5bf43e3e8b412656404ed9466f1dcbf7c50269"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.4.0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.8.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[deps.ProfileCanvas]]
deps = ["Base64", "JSON", "Pkg", "Profile", "REPL"]
git-tree-sha1 = "8fc50fe9b7a9a7425986c5709b2064775196bca7"
uuid = "efd6af41-a80b-495e-886c-e51b0c7d77a3"
version = "0.1.4"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.0"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.12+3"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.1+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"
"""
# ╔═╡ Cell order:
# ╠═8e799461-2ffe-46e9-9008-35effd0e19ab
# ╠═c4b4712a-bf6c-47fc-9a07-ea5f4a78c8f9
# ╠═3c57a85f-ea6b-411b-9b00-9a747c805fb2
# ╠═cb0c69a6-4509-42f7-9a8f-bc925ca8dee6
# ╠═c1f9fa7d-7c1c-4d08-8465-4930241e8223
# ╠═26f220aa-69bf-4c3c-80ac-1347016ba06a
# ╠═2a78308e-24f4-489d-b12f-140cc18cf19f
# ╠═7e7707a6-6bb5-4e20-a867-98dcd359dea1
# ╠═ff966cf6-4a8c-4c78-8ffb-dfba181be612
# ╠═e9156a19-3bce-4f3d-a792-40fe501c2078
# ╠═479917d8-d00a-47a3-8d3d-ae8be32f73ca
# ╠═d6518d70-0188-4a85-a149-93accb36757e
# ╠═90d875d3-2820-49ba-a1fb-4cabcf62e64a
# ╠═ec17707a-a2d7-4f5c-a269-ce8c9c827e3c
# ╠═80882a46-3c0e-4ae8-a594-d43a6452552b
# ╠═18386539-6691-453e-82b4-eb56493a4fa9
# ╠═38c8dac8-8ae2-4210-b3eb-9f2e9db26e34
# ╠═c743823f-0a36-4d82-af18-3edd01e7ea5c
# ╠═1765fd22-4e77-442c-a6ab-7ffb913a8d23
# ╠═71cbe102-852d-44b6-b9ba-c6e7e90cea71
# ╠═b5aeb319-b190-44cc-b657-82bfd8c9d414
# ╠═ba40d7f6-4a3d-4146-b714-0861d842b356
# ╠═97b7328b-41b3-4c6b-bdef-344c300c8b5e
# ╠═23775197-1076-4431-9159-78e3465e12f9
# ╠═0a3efe2e-71ec-49d5-8d14-a17255025e31
# ╠═6bc04097-07af-4ccc-9c4c-35e6af5e350c
# ╠═eaa68c38-902a-491a-b7bd-0fa8f8d2ae25
# ╠═388e58ab-50ae-4f50-b684-fdef00c7eccf
# ╠═11108388-347a-4719-9a0d-5fa3f64f4c1a
# ╠═ad5454bb-9f43-4494-abc1-0ad1af7c1eae
# ╠═58474708-3bd0-4400-baef-419ade66963d
# ╠═0dc14e42-acf3-48fa-954b-354c28a1c775
# ╠═8fe2e214-1305-450a-9d0c-d6bb7069b8bf
# ╠═46999c78-04c4-45e0-8820-db133c1d2d49
# ╠═66ee434c-b6d0-46c6-9ebf-b548978ad6d5
# ╠═2ab6da2c-2c7d-477b-a336-89cfb05c57f8
# ╠═11f58b35-a882-4e18-a64c-e57050303601
# ╠═b30128ce-f146-45b1-ae62-d5cf2ccbaa11
# ╠═74c2c181-00e8-4d65-b3c8-c44ed31a926b
# ╠═a2f17316-2ec9-11ed-0400-514300b27b1a
# ╠═f4cea5a0-1c61-4760-be24-e1282a7f751a
# ╠═18a98238-413e-445a-8ce0-d62e8f3f5d83
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 10256 | ### A Pluto.jl notebook ###
# v0.19.11
using Markdown
using InteractiveUtils
# ╔═╡ 6a38ae1a-4097-11ed-19f7-5b18b3fa3617
using DataManipulation
# Several functions are defined in `DataManipulation` directly. They can be split into other packages at some point, if considered useful not only for interactive work. These include:
# - `findonly`: like `findfirst`, but ensures that exactly a single match is present;
# - `filterfirst`, `filteronly`: more efficient `first(filter(f, X))` and `only(filter(f, X))`;
# - `uniqueonly`: more efficient `only(unique([f], X))`;
# - `mapset`, `mapinsert`, `mapsetview`, `mapinsertview`: generalized set/insert a table column, eg `mapset(a=x -> x.b^2, xs)` is equivalent to `map(x -> @set(x.a=x.b^2), xs)` and supports multiple properties as kwargs;
# - `filterview`, `sortview`, `uniqueview`: like `filter`/`sort`/`unique`, but return a view;
# - `collectview`: turn the input into an `AbstractArray`, like `collect` but doesn't copy; useful for general handling of arrays and dictionaries;
# - `materialize_views`: materialize views arbitrarily nested in dictionaries and `StructArray`s;
# - `discreterange`: similar to `maprange(...)`, but return `length` unique integers.
# ╔═╡ ae30f3d7-00e6-40ec-8fd1-dc3e8b736301
xs = [1., 2, 3]
# ╔═╡ cd8d0b6b-46ca-4879-98b0-0ed8aa109ecd
xsm = mapview(exp, xs)
# ╔═╡ 33a9ae56-b6f1-41e7-9d84-5db7410ffc3f
xsm[2] = 1000
# ╔═╡ f5ebb4d0-626a-40ce-89e3-4844005ba340
xs
# ╔═╡ 3ddaf5ee-2694-435e-bc56-2f33d6a354ea
# ╔═╡ 9c2d8a72-a088-4c53-9585-b84a93248579
flatten([[1, 2], [3]])
# ╔═╡ 71586607-b919-4eea-a828-9018f84bfb31
flatmap(i -> 1:i, 1:3)
# ╔═╡ e7eed221-6185-454b-a8ef-648c4534a2d6
# ╔═╡ 9159df12-748d-4da8-95c1-c0487b934858
filtermap(1:10) do x
y = x^2
sum(1:y) < 10 && return nothing
y + 1
end
# ╔═╡ 3ab3eb53-88df-4800-82e6-5a8970cc4a2c
# ╔═╡ 7e204430-4f88-4018-8fb2-a34474ad371c
vals = [0, missing, NaN, 1, 2, -1, 5]
# ╔═╡ 08cc856d-fc9e-4ced-90fc-7f4280b28ad9
vals_sm = skip(ismissing, vals)
# ╔═╡ 94493a2b-2187-4c5c-8b5e-f9050ff87a97
eltype(vals_sm)
# ╔═╡ ae2eceb4-1249-435d-b48a-d2b979a84ae9
collect(vals_sm)
# ╔═╡ 0a771457-ccc8-41fe-aeef-93cf7516d565
vals_s = skip(x -> ismissing(x) || isnan(x), vals)
# ╔═╡ a0a5585f-5634-491a-9661-017b9b8df064
eltype(vals_s)
# ╔═╡ a7f1759b-8c79-4421-b179-7a3a327c7f82
collect(vals_s)
# ╔═╡ b8e0949d-4a34-4b15-83fb-8201de7f00b1
vals_s ./= sum(vals_s)
# ╔═╡ 471d2a55-c2e3-4e9c-8c45-0c7a58f5065a
vals
# ╔═╡ 1ff15207-19b3-4f85-b2b9-6fb456b84837
# ╔═╡ 6a4b78d8-c7ed-4ea7-8903-a50d56e55ee9
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
DataManipulation = "38052440-ad76-4236-8414-61389b2c5143"
[compat]
DataManipulation = "~0.1.0"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.8.0"
manifest_format = "2.0"
project_hash = "cf4e738a4846e1a11c799166cf825c8ac9f7e190"
[[deps.Accessors]]
deps = ["Compat", "CompositionsBase", "ConstructionBase", "Dates", "InverseFunctions", "LinearAlgebra", "MacroTools", "Requires", "Test"]
git-tree-sha1 = "ce67f55da3a937bb001a8d00559bdfa4dba6e4f5"
uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
version = "0.1.20"
[[deps.Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "195c5505521008abea5aee4f96930717958eac6f"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.4.0"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Combinatorics]]
git-tree-sha1 = "08c8b6831dc00bfea825826be0bc8336fc369860"
uuid = "861a8166-3701-5b0c-9a16-15d98fcdc6aa"
version = "1.0.2"
[[deps.Compat]]
deps = ["Dates", "LinearAlgebra", "UUIDs"]
git-tree-sha1 = "5856d3031cdb1f3b2b6340dfdc66b6d9a149a374"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.2.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "455419f7e328a1a2493cabc6428d79e951349769"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.1"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "fb21ddd70a051d882a1686a5a550990bbe371a95"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.4.1"
[[deps.DataAPI]]
git-tree-sha1 = "1106fa7e1256b402a86a8e7b15c00c85036fef49"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.11.0"
[[deps.DataManipulation]]
deps = ["DataPipes", "Dictionaries", "FlexiGroups", "FlexiMaps", "InverseFunctions", "Reexport", "SentinelViews", "Skipper", "StructArrays"]
git-tree-sha1 = "8eacb3eb8b01841ce59ee1a9d8f4d605ac7a09b1"
uuid = "38052440-ad76-4236-8414-61389b2c5143"
version = "0.1.0"
[[deps.DataPipes]]
git-tree-sha1 = "b97559f7b941226df5bfef2893bf71f83cac5c41"
uuid = "02685ad9-2d12-40c3-9f73-c6aeda6a7ff5"
version = "0.3.0"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.Dictionaries]]
deps = ["Indexing", "Random", "Serialization"]
git-tree-sha1 = "96dc5c5c8994be519ee3420953c931c55657a3f2"
uuid = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
version = "0.3.24"
[[deps.FlexiGroups]]
deps = ["Combinatorics", "DataPipes", "Dictionaries", "FlexiMaps"]
git-tree-sha1 = "e11c9c10c95faa79e2aa58b8bafac9adb6de6364"
uuid = "1e56b746-2900-429a-8028-5ec1f00612ec"
version = "0.1.5"
[[deps.FlexiMaps]]
deps = ["Accessors", "InverseFunctions"]
git-tree-sha1 = "006f73dc1cf09257f5dc443a047f7f0942803e38"
uuid = "6394faf6-06db-4fa8-b750-35ccc60383f7"
version = "0.1.3"
[[deps.Indexing]]
git-tree-sha1 = "ce1566720fd6b19ff3411404d4b977acd4814f9f"
uuid = "313cdc1a-70c2-5d6a-ae34-0150d3930a38"
version = "1.1.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "b3364212fb5d870f724876ffcd34dd8ec6d98918"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.7"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0"
[[deps.OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.SentinelViews]]
git-tree-sha1 = "c7bff02ae89fd4cd0445bc7973470e830e656334"
uuid = "1c95a9c1-8e3f-460f-8963-106dcc440218"
version = "0.1.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Skipper]]
git-tree-sha1 = "ea4d60da1b785c2cf4cb34e574f4b1d6e2fadeb6"
uuid = "fc65d762-6112-4b1c-b428-ad0792653d81"
version = "0.1.0"
[[deps.StaticArraysCore]]
git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.0"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "StaticArraysCore", "Tables"]
git-tree-sha1 = "8c6ac65ec9ab781af05b08ff305ddc727c25f680"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.12"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
git-tree-sha1 = "2d7164f7b8a066bcfa6224e67736ce0eb54aef5b"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.9.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.1+0"
"""
# ╔═╡ Cell order:
# ╠═6a38ae1a-4097-11ed-19f7-5b18b3fa3617
# ╠═ae30f3d7-00e6-40ec-8fd1-dc3e8b736301
# ╠═cd8d0b6b-46ca-4879-98b0-0ed8aa109ecd
# ╠═33a9ae56-b6f1-41e7-9d84-5db7410ffc3f
# ╠═f5ebb4d0-626a-40ce-89e3-4844005ba340
# ╠═3ddaf5ee-2694-435e-bc56-2f33d6a354ea
# ╠═9c2d8a72-a088-4c53-9585-b84a93248579
# ╠═71586607-b919-4eea-a828-9018f84bfb31
# ╠═e7eed221-6185-454b-a8ef-648c4534a2d6
# ╠═9159df12-748d-4da8-95c1-c0487b934858
# ╠═3ab3eb53-88df-4800-82e6-5a8970cc4a2c
# ╠═7e204430-4f88-4018-8fb2-a34474ad371c
# ╠═08cc856d-fc9e-4ced-90fc-7f4280b28ad9
# ╠═94493a2b-2187-4c5c-8b5e-f9050ff87a97
# ╠═ae2eceb4-1249-435d-b48a-d2b979a84ae9
# ╠═0a771457-ccc8-41fe-aeef-93cf7516d565
# ╠═a0a5585f-5634-491a-9661-017b9b8df064
# ╠═a7f1759b-8c79-4421-b179-7a3a327c7f82
# ╠═b8e0949d-4a34-4b15-83fb-8201de7f00b1
# ╠═471d2a55-c2e3-4e9c-8c45-0c7a58f5065a
# ╠═1ff15207-19b3-4f85-b2b9-6fb456b84837
# ╠═6a4b78d8-c7ed-4ea7-8903-a50d56e55ee9
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 16997 | ### A Pluto.jl notebook ###
# v0.19.11
using Markdown
using InteractiveUtils
# ╔═╡ 2e7eec24-1b69-49d3-80ae-233873bfbc3f
using DataPipes
# ╔═╡ 2023726f-c64f-4ca5-be95-e3c4ba2ccbfb
using Dictionaries
# ╔═╡ f6e2233d-85fa-47e5-ba32-25af03b89abe
using StatsBase
# ╔═╡ 1af60db5-493d-4112-8142-09b6da29fbd7
using BenchmarkTools
# ╔═╡ ffc5026d-c3bd-469d-a13e-6be0093af522
using ProfileCanvas
# ╔═╡ f8e11e54-29e8-451f-a864-187161fb250d
# ╔═╡ 089e4520-67c7-41e9-b433-28e72f311049
tbl = [(a=rand(1:10), b=rand(1:1000), c=rand(1:10^15)) for _ in 1:10^5]
# ╔═╡ e78525dd-acfe-48c2-8445-71c754b8614a
fgr = x -> x.c
# ╔═╡ 27dacf95-27c1-4d25-9ccd-6330a5e3fc65
map([x -> x.a, x -> x.b, x -> x.c]) do f
println("SAC")
@btime @p $tbl |> SAC.group($f) |> pairs |> map(((k, gr),) -> (;k, l=length(gr)))
println("My")
@btime @p $tbl |> group($f) |> pairs |> map(((k, gr),) -> (;k, l=length(gr)))
println("My Dict")
@btime @p $tbl |> group($f; dicttype=Dict) |> pairs |> Iterators.map(identity) |> map(((k, gr),) -> (;k, l=length(gr)))
end
# ╔═╡ 1c545a87-129e-4976-8e57-26bdfb52507d
map([x -> x.a, x -> x.b, x -> x.c]) do f
println("SAC")
@btime @p $tbl |> SAC.groupview($f) |> pairs |> map(((k, gr),) -> (;k, l=length(gr)))
println("My")
@btime @p $tbl |> groupview($f) |> pairs |> map(((k, gr),) -> (;k, l=length(gr)))
println("My Dict")
@btime @p $tbl |> groupview($f; dicttype=Dict) |> pairs |> Iterators.map(identity) |> map(((k, gr),) -> (;k, l=length(gr)))
end
# ╔═╡ 99a20136-244d-4461-a469-1e26a60321bd
# ╔═╡ d1d584d4-b7ee-4ece-910e-4c6b1ffa3b91
_group_core(f, X, vals; dicttype=Dictionary) = _group_core(f, X, vals, dicttype)
# ╔═╡ 292a317c-ecd2-4b03-b69e-91010a885f8e
# ╔═╡ 1325e1e5-079f-477e-895e-af0e47f8a89e
_eltype(::T) where {T} = _eltype(T)
# ╔═╡ ca6f325e-3322-4c3b-8d28-4b467cb903fb
function _eltype(::Type{T}) where {T}
ETb = eltype(T)
ETb != Any && return ETb
# Base.eltype returns Any for mapped/flattened/... iterators
# here we attempt to infer a tighter type
ET = Core.Compiler.return_type(first, Tuple{T})
ET === Union{} ? Any : ET
end
# ╔═╡ 134b6a3b-7d44-40f5-b9c0-b5fdf052d1b9
_valtype(X) = _eltype(values(X))
# ╔═╡ e021dd30-9f64-45cf-9b33-b61b07dfa0c9
function _group_core(f, X::AbstractArray, vals::AbstractArray, ::Type{dicttype}) where {dicttype}
ngroups = 0
groups = similar(X, Int)
dct = dicttype{Core.Compiler.return_type(f, Tuple{_valtype(X)}), Int}()
@inbounds for (i, x) in pairs(X)
groups[i] = gid = get!(dct, f(x), ngroups + 1)
if gid == ngroups + 1
ngroups += 1
end
end
starts = zeros(Int, ngroups)
@inbounds for gid in groups
starts[gid] += 1
end
cumsum!(starts, starts)
push!(starts, length(groups))
rperm = similar(vals, Base.OneTo(length(vals)))
# rperm = Vector{_eltype(vals)}(undef, length(X))
@inbounds for (v, gid) in zip(vals, groups)
rperm[starts[gid]] = v
starts[gid] -= 1
end
# dct: key -> group_id
# rperm[starts[group_id + 1]:-1:1 + starts[group_id]] = group_values
return (; dct, starts, rperm)
end
# ╔═╡ ba77ae70-85c9-4001-83c2-dc069f93d68d
# ╔═╡ d9d55cfd-6c0a-4276-9562-85b4588e2edf
mapvalues(f, dict::AbstractDictionary) = map(f, dict)
# ╔═╡ 075b8a65-b78c-4577-9169-03aa76c778c3
function _setproperties(d::Dict, patch::NamedTuple{(:vals,)})
K = keytype(d)
V = eltype(patch.vals)
@assert length(d.keys) == length(patch.vals)
Dict{K,V}(d.slots, d.keys, patch.vals, d.ndel, d.count, d.age, d.idxfloor, d.maxprobe)
end
# ╔═╡ 800f94bb-23c1-4edb-9305-8f121e184e62
function mapvalues(f, dict::Dict)
V = Core.Compiler.return_type(f, Tuple{valtype(dict)})
vals = dict.vals
newvals = similar(vals, V)
@inbounds for i in dict.idxfloor:lastindex(vals)
if Base.isslotfilled(dict, i)
newvals[i] = f(vals[i])
end
end
_setproperties(dict, (;vals=newvals))
end
# ╔═╡ de2d1dde-5c00-4f49-b1bc-f14156598a8d
function groupfind(f, X; kwargs...)
(; dct, starts, rperm) = _group_core(f, X, keys(X); kwargs...)
mapvalues(dct) do gid
@view rperm[starts[gid + 1]:-1:1 + starts[gid]]
end
end
# ╔═╡ c9f42f7d-620e-4529-950c-15738c9da711
function groupview(f, X; kwargs...)
(; dct, starts, rperm) = _group_core(f, X, keys(X); kwargs...)
mapvalues(dct) do gid
ix = @view rperm[starts[gid + 1]:-1:1 + starts[gid]]
@view X[ix]
end
end
# ╔═╡ 43a719b6-4f0f-4b29-946b-316ac716b3d4
function group(f, X; kwargs...)
(; dct, starts, rperm) = _group_core(f, X, values(X); kwargs...)
mapvalues(dct) do gid
@view rperm[starts[gid + 1]:-1:1 + starts[gid]]
end
end
# ╔═╡ 2c098c8e-cfe0-4fff-b3b5-7c56231ffc65
function groupmap(f, ::typeof(length), X; kwargs...)
(; dct, starts, rperm) = _group_core(f, X, similar(X, Nothing); kwargs...)
mapvalues(dct) do gid
starts[gid + 1] - starts[gid]
end
end
# ╔═╡ cf16545e-a409-48d4-98ab-93aa3951b75d
function groupmap(f, ::typeof(first), X; kwargs...)
(; dct, starts, rperm) = _group_core(f, X, keys(X); kwargs...)
mapvalues(dct) do gid
ix = rperm[starts[gid + 1]]
X[ix]
end
end
# ╔═╡ c7e33758-8983-493e-8184-08545dc26835
function groupmap(f, ::typeof(last), X; kwargs...)
(; dct, starts, rperm) = _group_core(f, X, keys(X); kwargs...)
mapvalues(dct) do gid
ix = rperm[1 + starts[gid]]
X[ix]
end
end
# ╔═╡ fcf1f47c-7f30-46d0-aa87-8302cd86362b
function groupmap(f, ::typeof(only), X; kwargs...)
(; dct, starts, rperm) = _group_core(f, X, keys(X); kwargs...)
mapvalues(dct) do gid
starts[gid + 1] == starts[gid] + 1 || throw(ArgumentError("groupmap(only, X) requires that each group has exactly one element"))
ix = rperm[starts[gid + 1]]
X[ix]
end
end
# ╔═╡ c977bd4c-a861-4973-a3a7-d2a26412f7c4
# ╔═╡ f37c6114-30ee-11ed-00cc-1d35875cc46b
import SplitApplyCombine as SAC
# ╔═╡ 8c8ac28b-9096-47bf-a54e-2c926371622d
map([x -> x.a, x -> x.b, x -> x.c]) do f
println("SAC")
@btime SAC.group($f, $tbl)
println("My")
@btime group($f, $tbl)
println("My Dict")
@btime group($f, $tbl; dicttype=Dict)
end
# ╔═╡ 2a613f12-89ed-46aa-8132-125455ffe81a
map([x -> x.a, x -> x.b, x -> x.c]) do f
println("StatsBase")
@btime countmap($(map(f, tbl)))
println("SAC")
@btime SAC.groupcount($f, $tbl)
println("My")
@btime groupmap($f, length, $tbl)
println("My Dict")
@btime groupmap($f, length, $tbl; dicttype=Dict)
end
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
DataPipes = "02685ad9-2d12-40c3-9f73-c6aeda6a7ff5"
Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
ProfileCanvas = "efd6af41-a80b-495e-886c-e51b0c7d77a3"
SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
[compat]
BenchmarkTools = "~1.3.1"
DataPipes = "~0.3.0"
Dictionaries = "~0.3.24"
ProfileCanvas = "~0.1.4"
SplitApplyCombine = "~1.2.2"
StatsBase = "~0.33.21"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.8.0"
manifest_format = "2.0"
project_hash = "a001eb21bddb77b9b503e134f95746d2c3619b73"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.BenchmarkTools]]
deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"]
git-tree-sha1 = "4c10eee4af024676200bc7752e536f858c6b8f93"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "1.3.1"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "8a494fe0c4ae21047f28eb48ac968f0b8a6fcaa7"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.15.4"
[[deps.ChangesOfVariables]]
deps = ["ChainRulesCore", "LinearAlgebra", "Test"]
git-tree-sha1 = "38f7a08f19d8810338d4f5085211c7dfa5d5bdd8"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.4"
[[deps.Compat]]
deps = ["Dates", "LinearAlgebra", "UUIDs"]
git-tree-sha1 = "5856d3031cdb1f3b2b6340dfdc66b6d9a149a374"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.2.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0"
[[deps.DataAPI]]
git-tree-sha1 = "fb5f5316dd3fd4c5e7c30a24d50643b73e37cd40"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.10.0"
[[deps.DataPipes]]
git-tree-sha1 = "b97559f7b941226df5bfef2893bf71f83cac5c41"
uuid = "02685ad9-2d12-40c3-9f73-c6aeda6a7ff5"
version = "0.3.0"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "d1fff3a548102f48987a52a2e0d114fa97d730f0"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.13"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.Dictionaries]]
deps = ["Indexing", "Random", "Serialization"]
git-tree-sha1 = "96dc5c5c8994be519ee3420953c931c55657a3f2"
uuid = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
version = "0.3.24"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "5158c2b41018c5f7eb1470d558127ac274eca0c9"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.1"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.Indexing]]
git-tree-sha1 = "ce1566720fd6b19ff3411404d4b977acd4814f9f"
uuid = "313cdc1a-70c2-5d6a-ae34-0150d3930a38"
version = "1.1.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "b3364212fb5d870f724876ffcd34dd8ec6d98918"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.7"
[[deps.IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "94d9c52ca447e23eac0c0f074effbcd38830deb5"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.18"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.0+0"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.2.1"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0"
[[deps.OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[deps.Parsers]]
deps = ["Dates"]
git-tree-sha1 = "3d5bf43e3e8b412656404ed9466f1dcbf7c50269"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.4.0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.8.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[deps.ProfileCanvas]]
deps = ["Base64", "JSON", "Pkg", "Profile", "REPL"]
git-tree-sha1 = "8fc50fe9b7a9a7425986c5709b2064775196bca7"
uuid = "efd6af41-a80b-495e-886c-e51b0c7d77a3"
version = "0.1.4"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SplitApplyCombine]]
deps = ["Dictionaries", "Indexing"]
git-tree-sha1 = "48f393b0231516850e39f6c756970e7ca8b77045"
uuid = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66"
version = "1.2.2"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f9af7f195fb13589dd2e2d57fdb401717d2eb1f6"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.5.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "d1bf48bfcc554a3761a133fe3a9bb01488e06916"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.21"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.0"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.12+3"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.1+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"
"""
# ╔═╡ Cell order:
# ╠═f8e11e54-29e8-451f-a864-187161fb250d
# ╠═089e4520-67c7-41e9-b433-28e72f311049
# ╠═e78525dd-acfe-48c2-8445-71c754b8614a
# ╠═8c8ac28b-9096-47bf-a54e-2c926371622d
# ╠═27dacf95-27c1-4d25-9ccd-6330a5e3fc65
# ╠═1c545a87-129e-4976-8e57-26bdfb52507d
# ╠═2a613f12-89ed-46aa-8132-125455ffe81a
# ╠═99a20136-244d-4461-a469-1e26a60321bd
# ╠═de2d1dde-5c00-4f49-b1bc-f14156598a8d
# ╠═c9f42f7d-620e-4529-950c-15738c9da711
# ╠═43a719b6-4f0f-4b29-946b-316ac716b3d4
# ╠═2c098c8e-cfe0-4fff-b3b5-7c56231ffc65
# ╠═cf16545e-a409-48d4-98ab-93aa3951b75d
# ╠═c7e33758-8983-493e-8184-08545dc26835
# ╠═fcf1f47c-7f30-46d0-aa87-8302cd86362b
# ╠═d1d584d4-b7ee-4ece-910e-4c6b1ffa3b91
# ╠═e021dd30-9f64-45cf-9b33-b61b07dfa0c9
# ╠═292a317c-ecd2-4b03-b69e-91010a885f8e
# ╠═1325e1e5-079f-477e-895e-af0e47f8a89e
# ╠═ca6f325e-3322-4c3b-8d28-4b467cb903fb
# ╠═134b6a3b-7d44-40f5-b9c0-b5fdf052d1b9
# ╠═ba77ae70-85c9-4001-83c2-dc069f93d68d
# ╠═d9d55cfd-6c0a-4276-9562-85b4588e2edf
# ╠═800f94bb-23c1-4edb-9305-8f121e184e62
# ╠═075b8a65-b78c-4577-9169-03aa76c778c3
# ╠═c977bd4c-a861-4973-a3a7-d2a26412f7c4
# ╠═2e7eec24-1b69-49d3-80ae-233873bfbc3f
# ╠═f37c6114-30ee-11ed-00cc-1d35875cc46b
# ╠═2023726f-c64f-4ca5-be95-e3c4ba2ccbfb
# ╠═f6e2233d-85fa-47e5-ba32-25af03b89abe
# ╠═1af60db5-493d-4112-8142-09b6da29fbd7
# ╠═ffc5026d-c3bd-469d-a13e-6be0093af522
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 1538 | module DataManipulation
using Reexport
using InverseFunctions
@reexport using Accessors
using AccessorsExtra # for values()
@reexport using DataPipes
@reexport using Skipper
@reexport using FlexiGroups
@reexport using FlexiMaps
using StructArrays
export
@S_str,
findonly, filterfirst, filterlast, filteronly, uniqueonly,
sortview, uniqueview,
materialize_views, collectview,
nest, @sr_str, @ss_str,
shift_range,
discreterange,
rev,
vcat_concrete
include("symbols.jl")
include("simplefuncs.jl")
include("views.jl")
include("uniqueview.jl")
include("discreterange.jl")
include("typeval_strings.jl")
include("comptime_indexing.jl")
include("nest.jl")
include("vcat.jl")
include("../ext/DictionariesExt.jl")
include("../ext/StructArraysExt.jl")
""" shift_range(x, a..b => A..B; clamp=false)
Linearly transform `x` from range `a..b` to `A..B`.
"""
function shift_range end
""" rev(val)
A wrapper that reverses the order of `isless` comparisons. Useful when sorting by several keys, some forward, some reverse.
# Examples
```julia
sort(..., by=x -> (x.a, rev(x.b), rev(x.c)))
```
"""
struct rev{T}
val::T
end
Base.isless(a::rev, b::rev) = isless(b.val, a.val)
# some interactions: include type piracy, but this cannot be put in upstream packages
materialize_views(s::Skipper.Skip) = collect(s)
Base.getproperty(A::Skipper.Skip, p::Symbol) = mapview(FlexiMaps.Accessors.PropertyLens(p), A)
Base.getproperty(A::Skipper.Skip, p) = mapview(FlexiMaps.Accessors.PropertyLens(p), A)
end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 1737 | Base.getindex(nt::NamedTuple, p::Union{StaticRegex, Pair{<:StaticRegex}}, args...) = merge(nt[p], nt[args...])
@generated function Base.getindex(nt::NamedTuple{NS}, SR::StaticRegex) where {NS}
regex = unstatic(SR)
ns = filter(n -> occursin(regex, String(n)), NS)
return :( nt[$ns] )
end
@generated function Base.getindex(nt::NamedTuple{NS}, ::Pair{SR, SS}) where {NS, SR<:StaticRegex, SS<:StaticSubstitution}
regex = unstatic(SR)
subs = unstatic(SS)
ns = filter(n -> occursin(regex, String(n)), NS)
nss = map(n -> replace(String(n), regex => subs) |> Symbol, ns)
return :( NamedTuple{$nss}(($([:(nt.$ns) for ns in ns]...),)) )
end
# cannot avoid "method too new" error:
# @generated function Base.getindex(nt::NamedTuple{NS}, ::Pair{StaticRegex{rs}, F}) where {NS, rs, F <: Function}
# regex = Regex(String(rs))
# ns = filter(n -> occursin(regex, String(n)), NS)
# nss = map(n -> replace(String(n), regex => s -> Base.invokelatest(F.instance, s)) |> Symbol, ns)
# return :( NamedTuple{$nss}(($([:(nt.$ns) for ns in ns]...),)) )
# end
@generated function Base.setindex(nt::NamedTuple{NS}, val::NamedTuple{VNS}, SR::StaticRegex) where {NS, VNS}
regex = unstatic(SR)
ns = filter(n -> occursin(regex, String(n)), NS)
@assert VNS == ns
return :(merge(nt, val))
end
Accessors.delete(nt::NamedTuple, o::IndexLens{<:Tuple{StaticRegex, Vararg{Any}}}) = _delete(nt, o.indices...)
_delete(nt::NamedTuple, p::Union{StaticRegex, Pair{<:StaticRegex}}, args...) = _delete(_delete(nt, p), args...)
@generated function _delete(nt::NamedTuple{NS}, SR::StaticRegex) where {NS}
regex = unstatic(SR)
ns = filter(n -> !occursin(regex, String(n)), NS)
return :( nt[$ns] )
end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 2044 | """ discreterange(f, start, stop; length)
Similar to `maprange(...)`, but return `length` unique integers.
# Example
10 log-spaced values from 1 to 100:
```julia
# regular floating-point maprange
julia> maprange(log, 1, 100, length=10)
10-element FlexiMaps.MappedArray{Float64, 1, FlexiMaps.var"#12#13"{typeof(log), Int64, Int64, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, Int64, Int64}, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}}:
1.0
1.6681005372000588
2.7825594022071245
4.641588833612779
7.742636826811271
12.915496650148844
21.544346900318843
35.93813663804628
59.948425031894104
100.0
# discreterange of integers
julia> discreterange(log, 1, 100, length=10)
10-element Vector{Int64}:
1
2
3
5
9
14
23
38
61
100
```
"""
function discreterange(f, start, stop; length::Int, mul=_sameoneunit(start, stop))
start, stop, mul = promote(start, stop, mul)
_discreterange(f, start, stop; length, mul)
end
_sameoneunit(a, b) = uniqueonly((oneunit(a), oneunit(b)))
function _discreterange(f, start::T, stop::T; length::Int, mul::T) where {T}
if inverse(f) isa NoInverse
@assert set(start, f, f(start)) == set(stop, f, f(start))
@assert set(start, f, f(stop)) == set(stop, f, f(stop))
end
start < stop || throw(ArgumentError("start must be less than stop"))
length - 1 > abs(start - stop) / mul && throw(ArgumentError("length must be greater than the distance between start and stop"))
res = Vector{T}(undef, length)
res[1] = start
step = (f(stop) - f(start)) / (length - 1)
prev = start
for i in 2:length
next = @set f(prev) += step
if next >= prev + mul
res[i] = round(Integer, next / mul) * mul
prev = next
else
prev = prev + mul
res[i] = round(Integer, prev / mul) * mul
step = (f(stop) - f(prev)) / (length - i)
end
end
return res
end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 3904 | """
nest(::NamedTuple, [sr"regex" [=> (ss"sub", ...)], ...])
nest(::StructArray, [sr"regex" [=> (ss"sub", ...)], ...])
Put a subset of properties into a nested object of the same kind (eg a nested `NamedTuple``).
Properties to nest are selected in compile time by a regular expression. Their resulting names are extracted from the regex groups, or specified explicitly by substitution strings.
# Examples
```julia
julia> nest((a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)_(\\w+)" ))
(a=(x=1, y="2", z_z=3), b=:z)
julia> nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)_(\\w+)" => (ss"x\\1", ss"val", ss"\\2") ))
(xa=(val=(x=1, y="2", z_z=3),), b=:z)
```
"""
function nest end
@generated function nest(x::NamedTuple{KS}, ::StaticRegex{SR}) where {KS, SR}
regex = _anchored_regex(SR)
_nest_code(KS, regex) do m
@p m |> pairs |> collect |> filter(!isnothing(last(_))) |> sort |> map(last) |> map(Symbol)
end
end
@generated function nest(x::NamedTuple{KS}, ::Pair{StaticRegex{SR}, SS}) where {KS, SR, SS <: Tuple}
regex = _anchored_regex(SR)
subs = map(unstatic, SS.types)
_nest_code(KS, regex) do m
@p subs |> map(sub -> replace(m.match, regex => sub)) |> map(Symbol)
end
end
nest(x, rs...) =
foldl(rs; init=x) do x, r
nest(x, r)
end
_anchored_regex(SR::Symbol) = Regex(string(SR), Base.DEFAULT_COMPILER_OPTS | Base.PCRE.ANCHORED | Base.PCRE.ENDANCHORED, Base.DEFAULT_MATCH_OPTS)
function _nest_code(func, KS, regex)
paths = map(KS) do k
ks = string(k)
m = match(regex, ks)
isnothing(m) && return [k]
@assert m.match == ks
return func(m)
end
allunique(paths) || error("Target paths not unique: $paths")
npairs = paths_to_nested_pairs(paths, KS)
nested_pairs_to_ntexpr(npairs)
end
nested_pairs_to_ntexpr(npairs::Symbol) = :(x.$npairs)
function nested_pairs_to_ntexpr(npairs)
:(
NamedTuple{$(npairs .|> first |> Tuple)}((
$((npairs .|> last .|> nested_pairs_to_ntexpr)...),
))
)
end
function paths_to_nested_pairs(paths, values)
if length(paths) == 1 && only(paths) == []
return only(values)
end
@assert !any(isempty, paths)
@p let
zip(paths, values)
group(_[1][1])
pairs
collect
map() do (k, gr)
k => paths_to_nested_pairs(map(p -> p[2:end], first.(gr)), last.(gr))
end
end
end
# struct KeepSame end
# @generated function _unnest(nt::NamedTuple{KS, TS}, ::Val{KEYS}=Val(nothing), ::Val{TARGET}=Val(KeepSame())) where {KS, TS, KEYS, TARGET}
# types = fieldtypes(TS)
# assigns = mapreduce(vcat, KS, types) do k, T
# if !isnothing(KEYS) && k ∈ KEYS && !(T <: NamedTuple)
# error("Cannot unnest field $k::$T")
# end
# if (isnothing(KEYS) || k ∈ KEYS) && T <: NamedTuple
# ks = fieldnames(T)
# tgt_k = TARGET isa KeepSame ? k : TARGET
# ks_new = map(ks) do k_
# isnothing(tgt_k) ? k_ : Symbol(tgt_k, :_, k_)
# end
# map(ks, ks_new) do k_, k_n
# :( $k_n = nt.$k.$k_ )
# end |> collect
# else
# :( $k = nt.$k )
# end
# end
# :( ($(assigns...),) )
# end
# @inline unnest(nt::NamedTuple) = _unnest(nt)
# @inline unnest(nt::NamedTuple, k::Symbol) = _unnest(nt, Val((k,)))
# @inline unnest(nt::NamedTuple, kv::Pair{Symbol, <:Union{Symbol, Nothing}}) = _unnest(nt, Val((first(kv),)), Val(last(kv)))
# @inline unnest(nt::NamedTuple, ks::Tuple{Vararg{Symbol}}) = _unnest(nt, Val(ks))
# vcat_data(ds...; kwargs...) = reduce(vcat_data, ds; kwargs...)
# function Base.reduce(::typeof(vcat_data), ds; source=nothing)
# isnothing(source) ?
# reduce(vcat, ds) :
# mapmany(((k, d),) -> d, ((k, d), x) -> insert(x, source, k), zip(keys(ds), values(ds)))
# end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 2281 | """ findonly(pred, X)
Like `findfirst(pred, X)`, but ensures that exactly a single match is present. """
function findonly(pred::F, A) where {F}
ix = findfirst(pred, A)
isnothing(ix) && throw(ArgumentError("no element satisfies the predicate"))
isnothing(findnext(pred, A, nextind(A, ix))) || throw(ArgumentError("multiple elements satisfy the predicate"))
return ix
end
findonly(pred::F, A::NamedTuple{KS}) where {F,KS} = KS[findonly(pred, Tuple(A))]
""" filterfirst(pred, X)
More efficient `first(filter(pred, X))`. """
filterfirst(pred::F, A) where {F} = @p A |> Iterators.filter(pred) |> first
""" filterlast(pred, X)
More efficient `last(filter(pred, X))`. """
filterlast(pred::F, A) where {F} = @p A |> Iterators.filter(pred) |> last
""" filteronly(pred, X)
More efficient `only(filter(pred, X))`. """
filteronly(pred::F, A) where {F} = @p A |> Iterators.filter(pred) |> only
""" uniqueonly([pred], X)
More efficient `only(unique([pred], X))`. """
function uniqueonly end
uniqueonly(A) = uniqueonly(identity, A)
function uniqueonly(f::F, A) where {F}
if !allequal(mapview(f, A))
ufA = @p unique(map(f, A))
maxn = 5
throw(ArgumentError("got $(length(ufA)) distinct values: $(join(first(ufA, maxn), ", "))$(length(ufA) > maxn ? ", ..." : "")"))
end
return first(A)
end
Accessors.set(obj, ::typeof(uniqueonly), v) = set(obj, Elements(), v)
Accessors.set(obj, o::Base.Fix1{typeof(filterfirst)}, v) = @set obj[findfirst(o.x, obj)] = v
Accessors.set(obj, o::Base.Fix1{typeof(filterlast)}, v) = @set obj[findlast(o.x, obj)] = v
Accessors.set(obj, o::Base.Fix1{typeof(filteronly)}, v) = @set obj[findonly(o.x, obj)] = v
Accessors.delete(obj, o::Base.Fix1{typeof(filterfirst)}) = @delete obj[findfirst(o.x, obj)]
Accessors.delete(obj, o::Base.Fix1{typeof(filterlast)}) = @delete obj[findlast(o.x, obj)]
Accessors.delete(obj, o::Base.Fix1{typeof(filteronly)}) = @delete obj[findonly(o.x, obj)]
AccessorsExtra.hasoptic(obj, o::Base.Fix1{typeof(filterfirst)}) = any(o.x, obj)
AccessorsExtra.hasoptic(obj, o::Base.Fix1{typeof(filterlast)}) = any(o.x, obj)
AccessorsExtra.hasoptic(obj, o::Base.Fix1{typeof(filteronly)}) = count(o.x, obj) == 1
AccessorsExtra.hasoptic(obj, o::Base.Fix1{typeof(findonly)}) = count(o.x, obj) == 1
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 299 | macro S_str(str)
# :(Symbol($(esc(str)))) - simple version without interpolation
str_interpolated = esc(Meta.parse("\"$(escape_string(str))\""))
:(Symbol($str_interpolated))
end
# XXX: piracy
(name::Symbol)(x) = getproperty(x, name)
(::Val{name})(x) where {name} = getproperty(x, name)
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 1295 | # struct Str{S} end
struct StaticRegex{S} end
struct StaticSubstitution{S} end
# macro c_str(x)
# :($Str{Symbol($x)}())
# end
""" sr"regex"
"Static" regular expression in the type domain. The most direct use is indexing `NamedTuple`s and selecting columns from type-stable tables such as `StructArray`s.
The underlying value can be extracted with the `unstatic()` function.
See also: `ss"substitution"`, `nest`.
# Examples
```julia
nt = (a_1=1, a_2=10, b_1=100)
# select a subset of nt by regex:
nt[sr"a_\\d"] === (a_1 = 1, a_2 = 10)
# select and rename by regex and substitution string:
nt[sr"a_(\\d)" => ss"xxx_\\1_xxx"] === (xxx_1_xxx = 1, xxx_2_xxx = 10)
```
"""
macro sr_str(x)
:($StaticRegex{Symbol($x)}())
end
""" ss"substitution"
"Static" substitution string in the type domain.
The underlying value can be extracted with the `unstatic()` function.
See also: `sr"regex"`, `nest`.
# Examples
```julia
nt = (a_1=1, a_2=10, b_1=100)
# select and rename by regex and substitution string:
nt[sr"a_(\\d)" => ss"xxx_\\1_xxx"] === (xxx_1_xxx = 1, xxx_2_xxx = 10)
```
"""
macro ss_str(x)
:($StaticSubstitution{Symbol($x)}())
end
unstatic(::Type{StaticRegex{S}}) where {S} = Regex(String(S))
unstatic(::Type{StaticSubstitution{S}}) where {S} = SubstitutionString(String(S))
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 1944 | """ sortview(X; kws...)
Like `sort(X; kws...)`, but returns a view instead of a copy. """
sortview(A; kwargs...) = view(A, sortperm(A; kwargs...))
""" uniqueview([f], X)
Like `unique([f], X)`, but returns a view instead of a copy. """
uniqueview(A) = uniqueview(identity, A)
uniqueview(f::F, A) where {F} = UniqueView(A, groupfind(f, A) |> values |> collect)
struct UniqueView{T, TX <: AbstractArray{T}, TI} <: AbstractVector{T}
parent::TX
groupedindices::TI
end
Base.size(A::UniqueView) = size(A.groupedindices)
# Base.keys(A::UniqueView) = keys(parent(A))
# Base.values(A::UniqueView) = mapview(_f(A), values(parent(A)))
# Base.keytype(A::UniqueView) = keytype(parent(A))
# Base.valtype(A::UniqueView) = eltype(A)
Base.parent(A::UniqueView) = getfield(A, :parent)
Base.parentindices(A::UniqueView) = (mapview(first, getfield(A, :groupedindices)),)
function inverseindices(A::UniqueView)
out = similar(keys(parent(A)))
for (j, grixs) in pairs(A.groupedindices)
for i in grixs
@inbounds out[i] = j
end
end
return out
end
Base.@propagate_inbounds Base.getindex(A::UniqueView, I::Int) = parent(A)[first(A.groupedindices[I])]
Base.@propagate_inbounds Base.setindex!(A::UniqueView, v, I::Int) = (parent(A)[A.groupedindices[I]] .= v; A)
Accessors.set(obj, ::typeof(sortview), val) = @set obj[sortperm(obj)] = val
function Accessors.modify(f, obj, o::typeof(sortview))
sv = o(obj)
@set obj[parentindices(sv)...] = f(sv)
end
function Accessors.set(obj, ::typeof(uniqueview), val)
IXs = inverseindices(uniqueview(obj))
setall(obj, Elements(), @views val[IXs])
end
function Accessors.modify(f, obj, ::typeof(uniqueview))
uv = uniqueview(obj)
val = f(uv)
setall(obj, Elements(), @views val[inverseindices(uv)])
end
function Accessors.set(obj, ::typeof(unique), val)
IXs = inverseindices(uniqueview(obj))
setall(obj, Elements(), @views val[IXs])
end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 1177 | function vcat_concrete(a::StructVector, b::StructVector)
constr = result_constructor(eltype(a), eltype(b))
comps = if eltype(a) <: NamedTuple && eltype(b) <: NamedTuple
ks = intersect(propertynames(a), propertynames(b))
map(vcat_concrete, StructArrays.components(a)[ks], StructArrays.components(b)[ks])
else
map(vcat_concrete, StructArrays.components(a), StructArrays.components(b))
end
ET = Base.promote_op(constr, map(eltype, comps)...)
StructArray{ET}(comps)
end
function vcat_concrete(a::AbstractVector, b::AbstractVector)
if fieldcount(eltype(a)) == 0 || fieldcount(eltype(a)) == 0
vcat(a, b)
else
vcat_concrete(StructArray(a), StructArray(b))
end
end
result_constructor(A, B) = constructorof(A) == constructorof(B) ? constructorof(A) : error("Incompatible eltypes for vcat: $A and $B")
result_constructor(A::Type{<:Tuple}, B::Type{<:AbstractVector}) = constructorof(A)
result_constructor(A::Type{<:AbstractVector}, B::Type{<:Tuple}) = constructorof(A)
result_constructor(A::Type{<:NamedTuple{KA}}, B::Type{<:NamedTuple{KB}}) where {KA,KB} = constructorof(NamedTuple{Tuple(intersect(KA, KB))})
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 429 | """" materialize_views(X)
Materialize views arbitrarily nested in dictionaries and `StructArray`s. """
materialize_views(A::Union{AbstractArray,AbstractDict}) = @modify(materialize_views, values(A)[∗])
materialize_views(A) = A
""" collectview(X)
Turn the input into an `AbstractArray`, like `collect` but doesn't copy.
Mostly useful for general handling of arrays and dictionaries. """
collectview(A::AbstractArray) = A
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | code | 15600 | using TestItems
using TestItemRunner
@run_package_tests
@testitem "findonly" begin
using AccessorsExtra
@test @inferred(findonly(iseven, [11, 12])) == 2
@test_throws "multiple elements" findonly(isodd, [1, 2, 3])
@test_throws "no element" findonly(isodd, [2, 4])
@test @inferred(findonly(iseven, (11, 12))) == 2
@test @inferred(findonly(iseven, (a=1, b=2))) == :b
@test @inferred(findonly(iseven, (a=1, b=2, c=3))) == :b
@test_throws "multiple elements" findonly(iseven, (a=1, b=2, c=4))
@test (@maybe findonly(iseven, _))([11, 12]) == 2
@test (@maybe findonly(isodd, _))([10, 12]) == nothing
end
@testitem "filteronly" begin
using AccessorsExtra
@test @inferred(filteronly(iseven, [11, 12])) == 12
@test set([11, 12], @o(filteronly(iseven, _)), 2) == [11, 2]
@test delete([11, 12], @o(filteronly(iseven, _))) == [11]
@test @inferred(filteronly(iseven, (11, 12))) == 12
@test set((11, 12), @o(filteronly(iseven, _)), 2) == (11, 2)
@test delete((11, 12), @o(filteronly(iseven, _))) == (11,)
@test_throws "multiple elements" filteronly(isodd, [1, 2, 3])
@test_throws "is empty" filteronly(isodd, [2, 4])
@test (@maybe filteronly(iseven, _))([7,8,9]) == 8
@test (@maybe filteronly(isodd, _))([7,8,9]) == nothing
@test (@maybe filteronly(isodd, _))([]) == nothing
end
@testitem "filterfirst/last" begin
using AccessorsExtra
@test @inferred(filterfirst(iseven, [11, 12, 14])) == 12
@test set([11, 12, 14], @o(filterfirst(iseven, _)), 2) == [11, 2, 14]
@test delete([11, 12, 14], @o(filterfirst(iseven, _))) == [11, 14]
@test_throws "must be non-empty" filterfirst(isodd, [2, 4])
@test @inferred(filterlast(iseven, [11, 12, 14])) == 14
@test set([11, 12, 14], @o(filterlast(iseven, _)), 2) == [11, 12, 2]
@test delete([11, 12, 14], @o(filterlast(iseven, _))) == [11, 12]
@test_throws "must be non-empty" filterlast(isodd, [2, 4])
@test @inferred(filterfirst(iseven, (11, 12, 14))) == 12
@test set((11, 12, 14), @o(filterfirst(iseven, _)), 2) == (11, 2, 14)
@test delete((11, 12, 14), @o(filterfirst(iseven, _))) == (11, 14)
@test (@maybe filterfirst(isodd, _))([7,8,9]) == 7
@test (@maybe filterlast(isodd, _))([7,8,9]) == 9
@test (@maybe filterfirst(isodd, _))([8,10]) == nothing
@test (@maybe filterfirst(isodd, _))([]) == nothing
@test (@maybe filterlast(isodd, _))([]) == nothing
end
@testitem "uniqueonly" begin
@test uniqueonly([1, 1]) == 1
@test set([1, 1], uniqueonly, 2) == [2, 2]
@test_throws "2 distinct values" uniqueonly([1, 1, 2])
@test uniqueonly(isodd, [1, 3]) == 1
@test_throws "2 distinct values" uniqueonly(isodd, [1, 1, 2])
@test uniqueonly((1, 1)) == 1
@test set((1, 1), uniqueonly, 2) == (2, 2)
@test uniqueonly(isodd, (1, 3)) == 1
end
@testitem "symbols" begin
x = (a=123, def="c")
@test :a(x) == 123
@test S"def"(x) == "c"
@test Val(:a)(x) == 123
end
@testitem "discreterange" begin
using DataManipulation: discreterange
using AccessorsExtra
using Dates
using DateFormats
using Unitful
@test discreterange(log, 10, 10^5, length=5)::Vector{Int} == [10, 100, 1000, 10000, 100000]
@test discreterange(log, 2, 10, length=5)::Vector{Int} == [2, 3, 4, 7, 10]
@test discreterange(log, 2, 10, length=5, mul=1.)::Vector{Float64} == [2, 3, 4, 7, 10]
@test discreterange(log, 2, 10, length=5, mul=0.1)::Vector{Float64} == [2, 3, 4.5, 6.7, 10]
@test discreterange(@o(log(ustrip(u"m", _))), 2u"m", 10u"m", length=5) == [2, 3, 4, 7, 10]u"m"
@test_throws Exception discreterange(@o(log(ustrip(u"m", _))), 200u"cm", 10u"m", length=5) == [2, 3, 4, 7, 10]u"m"
@test discreterange(@o(log(ustrip(u"m", _))), 200u"cm", 10u"m", length=5, mul=1u"m") == [2, 3, 4, 7, 10]u"m"
@test_broken (discreterange(@o(log(_ / Second(1))), Second(2), Second(10), length=5); true)
@test discreterange(@o(log(_ /ₜ Second(1))), Second(2), Second(10), length=5) == [Second(2), Second(3), Second(4), Second(7), Second(10)]
@testset for a in [1, 10, 100, 1000, 10^10], b in [1, 10, 100, 1000, 10^10], len in [2:100; 12345]
a >= b && continue
if len > abs(a - b) + 1
@test_throws "length must be greater" discreterange(log, a, b, length=len)
continue
end
rng = discreterange(log, a, b, length=len)::Vector{Int}
@test length(rng) == len
@test allunique(rng)
@test issorted(rng, rev=a > b)
@test minimum(rng) == min(a, b)
@test maximum(rng) == max(a, b)
@test discreterange(log, a, b, length=len, mul=1)::Vector{Int} == rng
@test discreterange(log, a, b, length=len, mul=1.0)::Vector{Float64} == rng
@test 10 .* discreterange(log, 0.1*a, 0.1*b, length=len, mul=0.1)::Vector{Float64} ≈ rng
end
end
@testitem "shift_range" begin
using IntervalSets
using InverseFunctions
f = Base.Fix2(shift_range, 1..2 => 20..30)
@test f(1) == 20
@test f(1.6) == 26
@test f(-2) == -10
InverseFunctions.test_inverse(f, 1.2)
f = Base.Fix2(shift_range, 1..2 => 30..20)
@test f(1) == 30
@test f(1.6) == 24
@test f(-2) == 60
InverseFunctions.test_inverse(f, 1.2)
@test shift_range(1, 1..2 => 20..30; clamp=true) == 20
@test shift_range(1.6, 1..2 => 20..30; clamp=true) == 26
@test shift_range(-2, 1..2 => 20..30; clamp=true) == 20
end
@testitem "rev" begin
@testset for A in (
rand(Int, 5),
string.(rand(Int, 5)),
[1., NaN, 0.],
Any[10, 1.0]
)
@test isequal(sort(A; rev=true), sort(A; by=rev))
@test isequal(sort(A; rev=true), sort(A; by=x -> (rev(x^1), x)))
end
end
@testitem "interactions" begin
using Dictionaries
using StructArrays
a = mapview(x -> x + 1, skip(isnan, [1, 2, NaN, 3]))
@test eltype(a) == Float64
@test @inferred(a[1]) == 2
@test_throws "is skipped" a[3]
@test @inferred(sum(a)) == 9
a = skip(isnan, mapview(x -> x + 1, [1, 2, NaN, 3]))
@test eltype(a) == Float64
@test @inferred(a[1]) == 2
@test_throws "is skipped" a[3]
@test @inferred(sum(a)) == 9
a = StructArray(a=[missing, -1, 2, 3])
sa = @inferred skip(x -> ismissing(x.a) || x.a < 0, a)
@test collect(sa.a) == [2, 3]
end
@testitem "sortview" begin
using Accessors
a = [1:5; 5:-1:1]
as = @inferred sortview(a)
@test as == [1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
as[4] = 0
@test a == [1, 2, 3, 4, 5, 5, 4, 3, 0, 1]
@test set([5, 1, 4, 2, 3], sortview, 10 .* (1:5)) == [50, 10, 40, 20, 30]
@test modify(cumsum, [4, 1, 4, 2, 3], sortview) == [10, 1, 14, 3, 6]
end
@testitem "uniqueview" begin
a = [1:5; 5:-1:1]
a_orig = copy(a)
au = unique(a)
auv = @inferred(uniqueview(a))::AbstractVector{Int}
@test auv == au == 1:5
@test a[parentindices(auv)...] == auv
@test auv[DataManipulation.inverseindices(auv)] == a
auv[1] = 0
@test a == [0; 2:5; 5:-1:2; 0]
a .= a_orig
cnt = Ref(0)
f(x) = (cnt[] += 1; x * 10)
auv .= f.(auv)
@test a == 10 .* a_orig
@test auv == unique(a)
@test cnt[] == 5
a = [1:5; 5:-1:1]
au = unique(isodd, a)
auv = @inferred(uniqueview(isodd, a))::AbstractVector{Int}
@test au == auv == [1, 2]
@test a[parentindices(auv)...] == auv
auv .= [0, 10]
@test a == [0, 10, 0, 10, 0, 0, 10, 0, 10, 0]
for uf in (unique, uniqueview)
Accessors.test_getset_laws(uf, [5, 1, 5, 2, 3], rand(4), rand(4))
@test @inferred(modify(x -> 1:length(x), [:a, :b, :a, :a, :b], uf)) == [1, 2, 1, 1, 2]
cnt = Ref(0)
f(x) = (cnt[] += 1; 2x)
@test modify(f, [1:5; 1:10], @o(uf(_) |> Elements())) == [2:2:10; 2:2:20]
@test cnt[] == 10
end
end
@testitem "materialize_views" begin
using Dictionaries: dictionary, Dictionary, AbstractDictionary
using SentinelViews
@test materialize_views([10, 20, 30])::Vector{Int} == [10, 20, 30]
@test materialize_views(view([10, 20, 30], [1, 2]))::Vector{Int} == [10, 20]
@test materialize_views(filterview(x -> true, [10, 20, 30]))::Vector{Int} == [10, 20, 30]
@test materialize_views(mapview(x -> 10x, [1, 2, 3]))::Vector{Int} == [10, 20, 30]
@test materialize_views(skip(isnan, [10, 20, NaN]))::Vector{Float64} == [10, 20]
@test materialize_views(sentinelview([10, 20, 30], [1, nothing, 3], nothing))::Vector{Union{Int, Nothing}} == [10, nothing, 30]
@test materialize_views(group(isodd, 3 .* [1, 2, 3, 4, 5]))::AbstractDictionary{Bool, Vector{Int}} == dictionary([true => [3, 9, 15], false => [6, 12]])
@test materialize_views(group(isodd, 3 .* [1, 2, 3, 4, 5]; restype=Dict))::Dict{Bool, Vector{Int}} == Dict([true => [3, 9, 15], false => [6, 12]])
end
@testitem "collectview" begin
@test collectview([10, 20, 30])::Vector{Int} == [10, 20, 30]
@test collectview(view([10, 20, 30], [1, 2]))::SubArray{Int} == [10, 20]
@test collectview(group(isodd, 3 .* [1, 2, 3, 4, 5]))::Vector{<:SubArray{Int}} == [[3, 9, 15], [6, 12]]
end
@testitem "comptime indexing" begin
using StructArrays
nt = (a_1=1, a_2=10., b_1=100)
@test nt[sr"a_\d"] === (a_1 = 1, a_2 = 10.)
@test nt[sr"a_(\d)" => ss"xxx_\1_xxx"] === (xxx_1_xxx = 1, xxx_2_xxx = 10.)
@test nt[sr"a_(\d)" => ss"x_\1", sr"b.*"] === (x_1 = 1, x_2 = 10., b_1 = 100)
@test_broken (nt[sr"a_(\d)" => (x -> x), sr"b.*"]; true) # cannot avoid "method too new" error
A = StructArray(a_1=[1], a_2=[10.], b_1=[100])
B = A[sr"a_\d"]
@test B == StructArray(a_1=[1], a_2=[10.])
@test B.a_1 === A.a_1
@test @delete(nt[sr"a_\d"]) === (b_1 = 100,)
B = @delete A[sr"a_\d"]
@test B == StructArray(b_1=[100])
@test B.b_1 === A.b_1
@test @modify(x -> x + 1, nt[sr"a_\d"] |> Elements()) === (a_1 = 2, a_2 = 11., b_1 = 100)
@test (@inferred modify(x -> x + 1, nt, @optic _[sr"a_\d"] |> Elements())) === (a_1 = 2, a_2 = 11., b_1 = 100)
@test (@inferred modify(x -> x .+ ndims(x), A, @optic _[sr"a_\d"] |> Properties())) == StructArray(a_1=[2], a_2=[11.], b_1=[100])
end
@testitem "nest" begin
using StructArrays
@test @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)_(\w+)" )) ===
(a=(x=1, y="2", z_z=3), b=:z)
@test @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)(?:_(\w+))" )) ===
(a=(x=1, y="2", z_z=3), b=:z)
@test @inferred(nest( (b=:z,), sr"(a)_(\w+)" )) ===
(b=:z,)
@test @inferred(nest( (x_a=1, y_a="2", z_z_a=3, b=:z), sr"(?<y>\w+)_(?<x>a)" )) ===
(a=(x=1, y="2", z_z=3), b=:z)
@test @inferred(nest( (x_a=1, y_a="2", z_z_a=3, b_aa=1, b_a="xxx"), sr"(?<y>\w+)_(?<x>a)|(b)_(\w+)" )) ===
(a=(x=1, y="2", z_z=3, b="xxx"), b=(aa=1,))
@test @inferred(nest( (x_a=1, y_a="2", z_z_a=3, b_aa=1, b_a="xxx"), sr"(b)_(\w+)|(?<y>\w+)_(?<x>a)" )) ===
(a=(x=1, y="2", z_z=3), b=(aa=1, a="xxx"))
@test @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)_(\w+)" => (ss"xabc", ss"val_\2") )) ===
(xabc=(val_x=1, val_y="2", val_z_z=3), b=:z)
@test @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)(?:_(\w+))" => (ss"xabc", ss"val_\2") )) ===
(xabc=(val_x=1, val_y="2", val_z_z=3), b=:z)
@test @inferred(nest( (a=0, a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)(?:_(\w+))?" => (ss"xabc", ss"val_\2") )) ===
(xabc=(val_=0, val_x=1, val_y="2", val_z_z=3), b=:z)
@test @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)_(\w+)" => (ss"x\1", ss"val", ss"\2") )) ===
(xa=(val=(x=1, y="2", z_z=3),), b=:z)
@test @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)_(\w+)" => (ss"\2_\1",) )) ===
(x_a=1, y_a="2", z_z_a=3, b=:z)
@test @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b_1=:z, b_2=5), sr"(a)_(\w+)", sr"(b)_(\d)" => (ss"\1", ss"i\2") )) ===
(a=(x=1, y="2", z_z=3), b=(i1=:z, i2=5))
@test_broken @inferred(nest( (a_a=1, a_b=2, b=3), sr"(a)_(\w)", sr"(\w)" => (ss"xx", ss"\1") )) ===
(a=(a=1, b=2), xx=(b=3,))
@test_throws "not unique" @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a).+" )) ===
(a=(x=1, y="2", z_z=3), b=:z)
@test_throws "not unique" @inferred(nest( (a_x=1, a_y="2", a_z_z=3, b=:z), sr"(a)_(\w+)" => (ss"xabc",) )) ===
(xabc=(val_x=1, val_y="2", val_z_z=3), b=:z)
sa = StructArray(a_x=[1], a_y=["2"], a_z_z=[3], b=[:z])
san = @inferred nest(sa, sr"(a)_(\w+)")
@test only(san) === (a=(x=1, y="2", z_z=3), b=:z)
@test san.a.x === sa.a_x
@test san.b === sa.b
end
# @testitem "(un)nest" begin
# @test @inferred(unnest((a=(x=1, y="2"), b=:z))) === (a_x=1, a_y="2", b=:z)
# @test_throws ErrorException unnest((a=(x=1, y="2"), a_x=3, b=:z))
# @test @inferred(unnest((a=(x=1, y=(u="2", w=3)), b=:z))) === (a_x=1, a_y=(u="2", w=3), b=:z)
# f = nt -> unnest(nt, ())
# @test @inferred(f((a=(x=1, y="2"), b=:z))) === (a=(x=1, y="2"), b=:z)
# f = nt -> unnest(nt, (:a,))
# @test @inferred(f((a=(x=1, y="2"), b=:z))) === (a_x=1, a_y="2", b=:z)
# f = nt -> unnest(nt, :a)
# @test @inferred(f((a=(x=1, y="2"), b=:z))) === (a_x=1, a_y="2", b=:z)
# @test_throws ErrorException unnest((a=(x=1, y="2"), b=:z), (:a, :b))
# f = nt -> unnest(nt, :a => nothing)
# @test @inferred(f((a=(x=1, y="2"), b=:z))) === (x=1, y="2", b=:z)
# # @test nest( (a_x=1, a_y="2", a_z_z=3, b=:z), startswith(:a_) ) == (a=(x=1, y="2", z_z=3), b=:z)
# # @test nest( (x_a=1, y_a="2", z_z_a=3, b=:z), endswith(:_a) ) == (a=(x=1, y="2", z_z=3), b=:z)
# # @test nest( (x_a=1, y_a="2", z_z_a=3, b_aa=1), endswith(:_a), startswith(:b) ) == (a=(x=1, y="2", z_z=3), b=(aa=1,))
# # @test f( (a_x=1, a_y="2", a_z_z=3, b=:z), x -> (a=(x=x.a_x, y=x.a_y, z_z=x.a_z_z),) )
# # @test @replace( (name="abc", ra=1, dec=2), (coords=(_.ra, _.dec),) ) == (name="abc", coords=(1, 2))
# # @test @replace( (name="abc", ra=1, dec=2), (coords=(@o(_.ra), @o(_.dec)),) ) == (name="abc", coords=(1, 2))
# # @test replace( (name="abc", ra=1, dec=2), @o(_[(:ra, :dec)]) => tuple => @o(_.coords) ) == (name="abc", coords=(1, 2))
# end
@testitem "vcat" begin
using StructArrays
X = StructArray(x=[(a=1, b=2), (a=2, b=3)])
Y = StructArray(x=[(a=3,), (a=4,)])
@test vcat(X, Y).x::Vector{NamedTuple} == [(a=1, b=2), (a=2, b=3), (a=3,), (a=4,)]
@test vcat_concrete(X, Y).x::AbstractVector{@NamedTuple{a::Int}} == [(a=1,), (a=2,), (a=3,), (a=4,)]
@test vcat_concrete(X, Y).x.a == [1, 2, 3, 4]
# X = [(a=1, b=2), (a=2, b=3)]
# Y = [(a=2, b=1)]
# @test vcat_data(X, Y, fields=:setequal)
# @test vcat_data(X, Y, fields=:equal)
# @test vcat_data(X, Y, fields=intersect)
# @test vcat_data(X, Y, fields=union)
# @test vcat_data(X, Y) == [(a=1, b=2), (a=2, b=3), (a=2, b=1)]
# @test vcat_data(X, Y; source=@o(_.src)) == [(a=1, b=2, src=1), (a=2, b=3, src=1), (a=2, b=1, src=2)]
# @test reduce(vcat_data, (X, Y); source=@o(_.src)) == [(a=1, b=2, src=1), (a=2, b=3, src=1), (a=2, b=1, src=2)]
# @test reduce(vcat_data, (; X, Y); source=@o(_.src)) == [(a=1, b=2, src=:X), (a=2, b=3, src=:X), (a=2, b=1, src=:Y)]
# @test reduce(vcat_data, Dict("X" => X, "Y" => Y); source=@o(_.src)) |> sort == [(a=1, b=2, src="X"), (a=2, b=3, src="X"), (a=2, b=1, src="Y")] |> sort
end
@testitem "_" begin
import Aqua
Aqua.test_all(DataManipulation; ambiguities=false, piracies=false) # piracy - only set(unique)?
Aqua.test_ambiguities(DataManipulation)
import CompatHelperLocal as CHL
CHL.@check()
end
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MIT"
] | 0.1.17 | 9c5e6d93afa406596c1132cb452d00feb7d4dbe7 | docs | 5791 | # DataManipulation.jl
General and composable utilities for manipulating tabular, quasi-tabular, and non-tabular datasets.
 Base Julia provides the most foundational building blocks for data processing, such as `map` and `filter` functions. The goal of `DataManipulation` is to extend common data processing functionality on top of that. This package provides more general mapping over datasets, grouping, selecting, reshaping and so on.
`DataManipulation` handles basic data structures already familiar to Julia users and doesn't require switching to custom specialized dataset types. All functions aim to stay composable and support datasets represented as Julia collections: arrays, dictionaries, tuples, many `Tables` types, ... .
*Note* This package is intended primarily for interactive usage: load `DataManipulation` and get access to a wide range of data processing tools. Other packages should prefer depending on individual packages listed below, when possible. Nevertheless, `DataManipulation` follows semver and is well-tested.
 `DataManipulation` functionality consists of two major parts.
- Reexports from companion packages, each focused on a single area. These packages are designed so that they work together nicely and provide consistent and uniform interfaces:
- [DataPipes.jl](https://gitlab.com/aplavin/DataPipes.jl): boilerplate-free piping for data manipulation
- [FlexiMaps.jl](https://gitlab.com/aplavin/FlexiMaps.jl): functions such as `flatmap`, `filtermap`, and `mapview`, extending the all-familiar `map`
- [FlexiGroups.jl](https://gitlab.com/aplavin/FlexiGroups.jl): group data by arbitrary keys & more
- [Skipper.jl](https://gitlab.com/aplavin/Skipper.jl): `skip(predicate, collection)`, a generalization of `skipmissing`
- [Accessors.jl](https://github.com/JuliaObjects/Accessors.jl): modify nested data structures conveniently with optics
See the docs of these packages for more details.
Additionally, [FlexiJoins.jl](https://gitlab.com/aplavin/FlexiJoins.jl) is considered a companion package with relevant goals and compatible API. For now it's somewhat heavy in terms of dependencies and isn't included in `DataManipulation`, but can be added in the future.
- Functions defined in `DataManipulation` itself: they do not clearly belong to a narrower-scope package, or just have not been split out yet. See the docstrings for details.
- `findonly`, `filteronly`, `filterfirst`, `uniqueonly`: functions with the semantics of corresponding Base function combinations, but more efficient and naturally support `Accessors.set`.
- `sortview`, `uniqueview`, `collectview`: non-copying versions of corresponding Base functions.
- zero-cost property selection, indexing, and nesting by regex: `sr"regex"` literal, `nest` function
- And more: `discreterange`, `shift_range`, `rev`
# Featured example
```julia
using DataManipulation
# let's say you have this raw table, probably read from a file:
julia> data_raw = [(; t_a=rand(), t_b=rand(), t_c=rand(), id=rand(1:5), i) for i in 1:10]
10-element Vector{...}:
(t_a = 0.18651300247498126, t_b = 0.17891408921013918, t_c = 0.25088919057346093, id = 4, i = 1)
(t_a = 0.008638783104697567, t_b = 0.2725301420722497, t_c = 0.3731421925708567, id = 1, i = 2)
(t_a = 0.9263839548209668, t_b = 0.043017734093856785, t_c = 0.35927442939296217, id = 2, i = 3)
...
# we want to work with `t_a,b,c` values from the table, but it's not very convenient as-is:
# they are mixed with other unrelated columns, `id` and `i`
# let's gather all `t`s into one place by nesting another namedtuple:
julia> data_1 = @p data_raw |> map(nest(_, sr"(t)_(\w+)"))
10-element Vector{...}:
(t = (a = 0.18651300247498126, b = 0.17891408921013918, c = 0.25088919057346093), id = 4, i = 1)
(t = (a = 0.008638783104697567, b = 0.2725301420722497, c = 0.3731421925708567), id = 1, i = 2)
(t = (a = 0.9263839548209668, b = 0.043017734093856785, c = 0.35927442939296217), id = 2, i = 3)
...
# much better!
# in practice, all related steps can be written into a single pipeline @p ...,
# here, we split them to demonstrate individual functions
# for the sake of example, let's normalize all `t`s to sum to 1 for each row:
julia> data_2 = @p data_1 |> mapset(t=Tuple(_.t) ./ sum(_.t))
10-element Vector{...}:
(t = (0.3026254665729048, 0.29029589897330355, 0.40707863445379167), id = 4, i = 1)
(t = (0.013202867673153734, 0.4165146131252091, 0.5702825192016372), id = 1, i = 2)
(t = (0.6972233052557745, 0.0323763884223678, 0.27040030632185774), id = 2, i = 3)
...
# finally, let's find the maximum over all `t`s for each `id`
# we'll demonstrate two approaches leading to the same result here
# group immediately, then aggregate individual `t`s for each group at two levels - within row, and among rows:
julia> @p data_2 |> group(_.id) |> map(gr -> maximum(r -> maximum(r.t), gr))
5-element Dictionaries.Dictionary{Int64, Float64}
1 │ 0.5702825192016372
2 │ 0.6972233052557745
3 │ 0.8107403478840245
4 │ 0.4865089865249148
5 │ 0.44064846734993746
# alternatively, flatten `t` first into a flat column, then group and aggregate at a single level:
julia> data_2_flat = @p data_2 |> flatmap(_.t, (;_..., t=_2))
30-element Vector{...}:
(t = 0.3026254665729048, id = 4, i = 1)
(t = 0.29029589897330355, id = 4, i = 1)
(t = 0.40707863445379167, id = 4, i = 1)
(t = 0.013202867673153734, id = 1, i = 2)
...
julia> @p data_2_flat |> group(_.id) |> map(gr -> maximum(r -> r.t, gr))
5-element Dictionaries.Dictionary{Int64, Float64}
1 │ 0.5702825192016372
2 │ 0.6972233052557745
3 │ 0.8107403478840245
4 │ 0.4865089865249148
5 │ 0.44064846734993746
```
| DataManipulation | https://github.com/JuliaAPlavin/DataManipulation.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3696 | using SDDP
import Dates
import GLPK
import HypothesisTests
import JSON
import PrettyTables
function benchmark_file(filename::String; kwargs...)
model, _ = SDDP.read_from_file(filename)
JuMP.set_optimizer(model, GLPK.Optimizer)
SDDP.train(model; kwargs...)
log = model.most_recent_training_results.log
time_weighted_bound = log[1].bound * log[1].time
for i in 2:length(log)
time_weighted_bound += log[i].bound * (log[i].time - log[i-1].time)
end
sign = model.objective_sense == MOI.MAX_SENSE ? -1 : 1
return (
best_bound = sign * log[end].bound,
total_time = sign * log[end].time,
total_solves = log[end].total_solves,
time_weighted_bound = time_weighted_bound / log[end].time,
bound = map(l -> sign * l.bound, log),
)
end
function benchmark(; kwargs...)
model_dir = joinpath(@__DIR__, "models")
models = readdir(model_dir)
# Precompile to avoid polluting the results!
benchmark_file(joinpath(model_dir, models[1]); kwargs...)
solutions = Dict{String,Any}(
file => benchmark_file(joinpath(model_dir, file); kwargs...) for
file in models
)
time = Dates.format(Dates.now(), "Y_mm_dd_HHMM_SS")
data = Dict("date" => time, "solutions" => solutions)
open("benchmark_$(time).json", "w") do io
return write(io, JSON.json(data))
end
return "benchmark_$(time).json"
end
function _report_columns(filename)
data = JSON.parsefile(filename)
models = collect(keys(data["solutions"]))
d = data["solutions"]
return (
models = models,
best_bound = map(m -> d[m]["best_bound"], models),
avg_bound = map(m -> d[m]["time_weighted_bound"], models),
total_time = map(m -> d[m]["total_time"], models),
total_solves = map(m -> d[m]["total_solves"], models),
)
end
function _ttest(A, B)
x = B ./ A
t = HypothesisTests.OneSampleTTest(convert(Vector{Float64}, x), 1.0)
return vcat(x, HypothesisTests.pvalue(t), HypothesisTests.confint(t))
end
function _summarize(io, filename_A)
println(io, "```")
println(io, "filename: $(filename_A)")
A = _report_columns(filename_A)
println(io, "```\n")
data =
hcat(A.models, A.best_bound, A.avg_bound, A.total_time, A.total_solves)
PrettyTables.pretty_table(
io,
data;
header = ["Model", "Best Bound", "Avg. Bound", "Time", "Solves"],
tf = PrettyTables.tf_markdown,
)
return A
end
function report(A::String, B::String)
open("report.md", "w") do io
return report(io, A, B)
end
return
end
function report(io::IO, filename_A::String, filename_B::String)
println(io, "# Benchmark report")
println(io, "\n## Configuration A\n")
A = _summarize(io, filename_A)
println(io, "\n## Configuration B\n")
B = _summarize(io, filename_B)
println(io, "\n## Comparison B / A\n")
data = hcat(
vcat(A.models, "pvalue", "confint"),
_ttest(A.best_bound, B.best_bound),
_ttest(A.avg_bound, B.avg_bound),
_ttest(A.total_time, B.total_time),
_ttest(A.total_solves, B.total_solves),
)
PrettyTables.pretty_table(
io,
data;
header = ["Model", "Best Bound", "Avg. Bound", "Time", "Solves"],
tf = PrettyTables.tf_markdown,
)
return
end
filename_A = benchmark(;
time_limit = 60,
stopping_rules = [SDDP.BoundStalling(10, 1e-6)],
duality_handler = SDDP.ContinuousConicDuality(),
)
filename_B = benchmark(;
time_limit = 60,
stopping_rules = [SDDP.BoundStalling(10, 1e-6)],
duality_handler = SDDP.LagrangianDuality(),
)
report(filename_A, filename_B)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 6768 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import Documenter
import Literate
import Random
import Test
"Call julia docs/make.jl --fix to rebuild the doctests."
const FIX_DOCTESTS = any(isequal("--fix"), ARGS)
const EXAMPLES_DIR = joinpath(@__DIR__, "src", "examples")
_sorted_files(dir, ext) = sort(filter(f -> endswith(f, ext), readdir(dir)))
function list_of_sorted_files(prefix, dir, ext = ".md")
return Any["$(prefix)/$(file)" for file in _sorted_files(dir, ext)]
end
function _include_sandbox(filename)
mod = @eval module $(gensym()) end
return Base.include(mod, filename)
end
# Run the farmer's problem first to precompile a bunch of SDDP.jl functions.
# This is a little sneaky, but it avoids leaking long (6 sec) compilation times
# into the examples.
include(joinpath(EXAMPLES_DIR, "the_farmers_problem.jl"))
if FIX_DOCTESTS
# doctest=:fix only works with `\n` line endings. Replace any `\r\n` ones.
for dir in joinpath.(@__DIR__, "src", ("tutorial", "explanation"))
for filename in list_of_sorted_files(dir, dir)
code = read(filename, String)
write(filename, replace(code, "\r\n" => "\n"))
end
end
end
function add_binder_links(filename, content)
filename = replace(filename, ".jl" => ".ipynb")
links = """
#md # [](@__BINDER_ROOT_URL__/$filename)
#md # [](@__NBVIEWER_ROOT_URL__/$filename)
"""
m = match(r"(\# \# .+)", content)
return replace(content, m[1] => m[1] * links)
end
function _link_example(content, filename)
title_line = findfirst(r"\n# .+?\n", content)
line = content[title_line]
ipynb = filename[1:end-3] * ".ipynb"
new_title = string(
line,
"\n",
"_This tutorial was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl)._\n",
"[_Download the source as a `.jl` file_]($filename).\n",
"[_Download the source as a `.ipynb` file_]($ipynb).\n",
)
contennt = replace(content, "nothing #hide" => ""),
return replace(content, line => new_title)
end
for dir in joinpath.(@__DIR__, "src", ("examples", "tutorial", "explanation"))
for jl_filename in list_of_sorted_files(dir, dir, ".jl")
Random.seed!(12345)
# `include` the file to test it before `#src` lines are removed. It is
# in a testset to isolate local variables between files.
Test.@testset "$jl_filename" begin
_include_sandbox(jl_filename)
end
Random.seed!(12345)
filename = replace(jl_filename, dirname(jl_filename) * "/" => "")
Literate.markdown(
jl_filename,
dir;
documenter = true,
postprocess = content -> _link_example(content, filename),
# Turn off the footer. We manually add a modified one.
credit = false,
)
Literate.notebook(jl_filename, dir; execute = false, credit = false)
end
end
# ==============================================================================
# Modify the release notes
# ==============================================================================
function fix_release_line(
line::String,
url::String = "https://github.com/odow/SDDP.jl",
)
# (#XXXX) -> ([#XXXX](url/issue/XXXX))
while (m = match(r"\(\#([0-9]+)\)", line)) !== nothing
id = m.captures[1]
line = replace(line, m.match => "([#$id]($url/issues/$id))")
end
# ## vX.Y.Z -> [vX.Y.Z](url/releases/tag/vX.Y.Z)
while (m = match(r"\#\# (v[0-9]+.[0-9]+.[0-9]+)", line)) !== nothing
tag = m.captures[1]
line = replace(line, m.match => "## [$tag]($url/releases/tag/$tag)")
end
# (Thanks @XXX) -> (Thanks [@XXX](https://github.com/XXX))
while (m = match(r"\(Thanks \@(.+)\)", line)) !== nothing
tag = m.captures[1]
line = replace(
line,
m.match => "(Thanks [@$tag](https://github.com/$tag))",
)
end
return line
end
open(joinpath(@__DIR__, "src", "changelog.md"), "r") do in_io
open(joinpath(@__DIR__, "src", "release_notes.md"), "w") do out_io
for line in readlines(in_io; keep = true)
write(out_io, fix_release_line(line))
end
end
end
Documenter.makedocs(;
sitename = "SDDP.jl",
authors = "Oscar Dowson",
format = Documenter.HTML(;
analytics = "G-HZQQDVMPZW",
# See https://github.com/JuliaDocs/Documenter.jl/issues/868
prettyurls = get(ENV, "CI", nothing) == "true",
collapselevel = 1,
sidebar_sitename = false,
size_threshold_ignore = [
"apireference.md",
"examples/objective_state_newsvendor.md",
],
),
clean = true,
doctest = FIX_DOCTESTS ? :fix : true,
pages = [
"Home" => "index.md",
"Tutorials" => [
"tutorial/first_steps.md",
"tutorial/objective_uncertainty.md",
"tutorial/markov_uncertainty.md",
"tutorial/plotting.md",
"tutorial/warnings.md",
"tutorial/arma.md",
"tutorial/decision_hazard.md",
"tutorial/objective_states.md",
"tutorial/pglib_opf.md",
"tutorial/mdps.md",
"tutorial/example_newsvendor.md",
"tutorial/example_reservoir.md",
"tutorial/example_milk_producer.md",
],
"How-to guides" => [
"guides/access_previous_variables.md",
"guides/add_a_multidimensional_state_variable.md",
"guides/add_a_risk_measure.md",
"guides/add_integrality.md",
"guides/add_multidimensional_noise.md",
"guides/add_noise_in_the_constraint_matrix.md",
"guides/choose_a_stopping_rule.md",
"guides/create_a_general_policy_graph.md",
"guides/debug_a_model.md",
"guides/improve_computational_performance.md",
"guides/simulate_using_a_different_sampling_scheme.md",
"guides/create_a_belief_state.md",
],
"Explanation" => ["explanation/theory_intro.md", "explanation/risk.md"],
"Examples" => list_of_sorted_files("examples", EXAMPLES_DIR),
"API Reference" => "apireference.md",
"Release notes" => "release_notes.md",
],
doctestfilters = [r"[\s\-]?\d\.\d{6}e[\+\-]\d{2}"],
)
Documenter.deploydocs(;
repo = "github.com/odow/SDDP.jl.git",
push_preview = true,
)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1399 | using Luxor
function logo_with_text()
P = 20
R = 100
H = 10
Drawing(2R + 2P, 2R + H + 2P, joinpath(@__DIR__, "logo.svg"))
function draw_box(color, angle, w)
sethue(color)
rotate(angle)
box(Point(w * R, 0), R, 3R, :fill)
rotate(-angle)
return
end
origin()
setopacity(3 / 4)
circle(Point(0, -H / 2 - 50), R, :clip)
draw_box(Luxor.julia_green, 2π / 3, 1 / 3)
draw_box(Luxor.julia_purple, π / 3, 2 / 5)
draw_box(Luxor.julia_red, π / 6, 3 / 4)
clipreset()
setopacity(1)
setcolor("black")
setfont("Arial", 60)
settext(
"<b>SDDP.jl</b>",
Point(0, R + H / 2 + P);
halign = "center",
markup = true,
)
finish()
return
end
function logo_without_text()
P = 10
R = 100
H = 10
Drawing(2R + 2P, 2R + 2P - 50, joinpath(@__DIR__, "logo_without_text.svg"))
p = origin(Point(110, R + P - 25 + 55 / 2))
function draw_box(color, angle, w)
sethue(color)
rotate(angle)
box(Point(w * R, 0), R, 3R, :fill)
rotate(-angle)
return
end
setopacity(3 / 4)
circle(Point(0, -H / 2 - 50), R, :clip)
draw_box(Luxor.julia_green, 2π / 3, 1 / 3)
draw_box(Luxor.julia_purple, π / 3, 2 / 5)
draw_box(Luxor.julia_red, π / 6, 3 / 4)
finish()
return
end
logo_with_text()
logo_without_text()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1483 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # FAST: the hydro-thermal problem
# An implementation of the Hydro-thermal example from [FAST](https://github.com/leopoldcambier/FAST/tree/daea3d80a5ebb2c52f78670e34db56d53ca2e778/examples/hydro%20thermal)
using SDDP, HiGHS, Test
function fast_hydro_thermal()
model = SDDP.LinearPolicyGraph(;
stages = 2,
upper_bound = 0.0,
sense = :Max,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 0 <= x <= 8, SDDP.State, initial_value = 0.0)
@variables(sp, begin
y >= 0
p >= 0
ξ
end)
@constraints(sp, begin
p + y >= 6
x.out <= x.in - y + ξ
end)
RAINFALL = (t == 1 ? [6] : [2, 10])
SDDP.parameterize(sp, RAINFALL) do ω
return JuMP.fix(ξ, ω)
end
@stageobjective(sp, -5 * p)
end
det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)
set_silent(det)
JuMP.optimize!(det)
@test JuMP.objective_sense(det) == MOI.MAX_SENSE
@test JuMP.objective_value(det) == -10
SDDP.train(model)
@test SDDP.calculate_bound(model) == -10
return
end
fast_hydro_thermal()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1551 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # FAST: the production management problem
# An implementation of the Production Management example from [FAST](https://github.com/leopoldcambier/FAST/blob/daea3d80a5ebb2c52f78670e34db56d53ca2e778/examples/production management multiple stages/)
using SDDP, HiGHS, Test
function fast_production_management(; cut_type)
DEMAND = [2, 10]
H = 3
N = 2
C = [0.2, 0.7]
S = 2 .+ [0.33, 0.54]
model = SDDP.LinearPolicyGraph(;
stages = H,
lower_bound = -50.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x[1:N] >= 0, SDDP.State, initial_value = 0.0)
@variables(sp, begin
s[i = 1:N] >= 0
d
end)
@constraints(sp, begin
[i = 1:N], s[i] <= x[i].in
sum(s) <= d
end)
SDDP.parameterize(sp, t == 1 ? [0] : DEMAND) do ω
return JuMP.fix(d, ω)
end
@stageobjective(sp, sum(C[i] * x[i].out for i in 1:N) - S's)
end
SDDP.train(model; cut_type = cut_type, print_level = 2, log_frequency = 5)
@test SDDP.calculate_bound(model) ≈ -23.96 atol = 1e-2
end
fast_production_management(; cut_type = SDDP.SINGLE_CUT)
fast_production_management(; cut_type = SDDP.MULTI_CUT)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1327 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # FAST: the quickstart problem
# An implementation of the QuickStart example from [FAST](https://github.com/leopoldcambier/FAST/tree/daea3d80a5ebb2c52f78670e34db56d53ca2e778/demo)
using SDDP, HiGHS, Test
function fast_quickstart()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2);
lower_bound = -5,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
if t == 1
@stageobjective(sp, x.out)
else
@variable(sp, s >= 0)
@constraint(sp, s <= x.in)
SDDP.parameterize(sp, [2, 3]) do ω
return JuMP.set_upper_bound(s, ω)
end
@stageobjective(sp, -2s)
end
end
det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)
set_silent(det)
JuMP.optimize!(det)
@test JuMP.objective_value(det) == -2
SDDP.train(model; log_every_iteration = true)
@test SDDP.calculate_bound(model) == -2
end
fast_quickstart()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5080 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Hydro-thermal scheduling
# ## Problem Description
# In a hydro-thermal problem, the agent controls a hydro-electric generator and reservoir.
# Each time period, they need to choose a generation quantity from thermal `g_t`, and hydro
# `g_h`, in order to meet demand `w_d`, which is a stagewise-independent random variable.
# The state variable, `x`, is the quantity of water in the reservoir at the start of each
# time period, and it has a minimum level of 5 units and a maximum level of 15 units. We
# assume that there are 10 units of water in the reservoir at the start of time, so that
# `x_0 = 10`. The state-variable is connected through time by the water balance constraint:
# `x.out = x.in - g_h - s + w_i,` where `x.out` is the quantity of water at the end of the
# time period, `x.in` is the quantity of water at the start of the time period, `s` is the
# quantity of water spilled from the reservoir, and `w_i` is a stagewise-independent random
# variable that represents the inflow into the reservoir during the time period.
# We assume that there are three stages, `t=1, 2, 3`, representing summer-fall, winter, and
# spring, and that we are solving this problem in an infinite-horizon setting with a
# discount factor of `0.95`.
# In each stage, the agent incurs the cost of spillage, plus the cost of thermal generation.
# We assume that the cost of thermal generation is dependent on the stage `t = 1, 2, 3`, and
# that in each stage, `w` is drawn from the set `(w_i, w_d) = {(0, 7.5), (3, 5), (10, 2.5)}`
# with equal probability.
# ## Importing packages
# For this example, in addition to `SDDP`, we need `HiGHS` as a solver and `Statisitics` to
# compute the mean of our simulations.
using HiGHS
using SDDP
using Statistics
# ## Constructing the policy graph
# There are three stages in our infinite-horizon problem, so we construct a
# unicyclic policy graph using [`SDDP.UnicyclicGraph`](@ref):
graph = SDDP.UnicyclicGraph(0.95; num_nodes = 3)
# ## Constructing the model
# Much of the macro code (i.e., lines starting with `@`) in the first part of the following
# should be familiar to users of JuMP.
# Inside the `do-end` block, `sp` is a standard JuMP model, and `t` is an index
# for the state variable that will be called with `t = 1, 2, 3`.
# The state variable `x`, constructed by passing the `SDDP.State` tag to `@variable` is
# actually a Julia struct with two fields: `x.in` and `x.out` corresponding to the incoming
# and outgoing state variables respectively. Both `x.in` and `x.out` are standard JuMP
# variables. The `initial_value` keyword provides the value of the state variable in the
# root node (i.e., `x_0`).
# Compared to a JuMP model, one key difference is that we use [`@stageobjective`](@ref)
# instead of `@objective`. The [`SDDP.parameterize`](@ref) function takes a list of supports
# for `w` and parameterizes the JuMP model `sp` by setting the right-hand sides of the
# appropriate constraints (note how the constraints initially have a right-hand side of
# `0`). By default, it is assumed that the realizations have uniform probability, but a
# probability mass vector can also be provided.
model = SDDP.PolicyGraph(
graph;
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 5 <= x <= 15, SDDP.State, initial_value = 10)
@variable(sp, g_t >= 0)
@variable(sp, g_h >= 0)
@variable(sp, s >= 0)
@constraint(sp, balance, x.out - x.in + g_h + s == 0)
@constraint(sp, demand, g_h + g_t == 0)
@stageobjective(sp, s + t * g_t)
SDDP.parameterize(sp, [[0, 7.5], [3, 5], [10, 2.5]]) do w
set_normalized_rhs(balance, w[1])
return set_normalized_rhs(demand, w[2])
end
end
# ## Training the policy
# Once a model has been constructed, the next step is to train the policy. This can be
# achieved using [`SDDP.train`](@ref). There are many options that can be passed, but
# `iteration_limit` terminates the training after the prescribed number of SDDP iterations.
SDDP.train(model; iteration_limit = 100)
# ## Simulating the policy
# After training, we can simulate the policy using [`SDDP.simulate`](@ref).
sims = SDDP.simulate(model, 100, [:g_t])
mu = round(mean([s[1][:g_t] for s in sims]); digits = 2)
println("On average, $(mu) units of thermal are used in the first stage.")
# ## Extracting the water values
# Finally, we can use [`SDDP.ValueFunction`](@ref) and [`SDDP.evaluate`](@ref) to obtain and
# evaluate the value function at different points in the state-space. Note that since we
# are minimizing, the price has a negative sign: each additional unit of water leads to a
# decrease in the expected long-run cost.
V = SDDP.ValueFunction(model[1])
cost, price = SDDP.evaluate(V; x = 10)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2046 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # StochDynamicProgramming: the multistock problem
# This example comes from [StochDynamicProgramming.jl](https://github.com/JuliaOpt/StochDynamicProgramming.jl/tree/f68b9da541c2f811ce24fc76f6065803a0715c2f/examples/multistock-example.jl).
using SDDP, HiGHS, Test
function test_multistock_example()
model = SDDP.LinearPolicyGraph(;
stages = 5,
lower_bound = -5.0,
optimizer = HiGHS.Optimizer,
) do subproblem, stage
@variable(
subproblem,
0 <= stock[i = 1:3] <= 1,
SDDP.State,
initial_value = 0.5
)
@variables(subproblem, begin
0 <= control[i = 1:3] <= 0.5
ξ[i = 1:3] # Dummy for RHS noise.
end)
@constraints(
subproblem,
begin
sum(control) - 0.5 * 3 <= 0
[i = 1:3], stock[i].out == stock[i].in + control[i] - ξ[i]
end
)
Ξ = collect(
Base.product((0.0, 0.15, 0.3), (0.0, 0.15, 0.3), (0.0, 0.15, 0.3)),
)[:]
SDDP.parameterize(subproblem, Ξ) do ω
return JuMP.fix.(ξ, ω)
end
@stageobjective(subproblem, (sin(3 * stage) - 1) * sum(control))
end
SDDP.train(
model;
iteration_limit = 100,
cut_type = SDDP.SINGLE_CUT,
log_frequency = 10,
)
@test SDDP.calculate_bound(model) ≈ -4.349 atol = 0.01
simulation_results = SDDP.simulate(model, 5000)
@test length(simulation_results) == 5000
μ = SDDP.Statistics.mean(
sum(data[:stage_objective] for data in simulation) for
simulation in simulation_results
)
@test μ ≈ -4.349 atol = 0.1
return
end
test_multistock_example()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1503 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # StochDynamicProgramming: the stock problem
# This example comes from [StochDynamicProgramming.jl](https://github.com/JuliaOpt/StochDynamicProgramming.jl/tree/f68b9da541c2f811ce24fc76f6065803a0715c2f/examples/stock-example.jl).
using SDDP, HiGHS, Test
function stock_example()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(5);
lower_bound = -2,
optimizer = HiGHS.Optimizer,
) do sp, stage
@variable(sp, 0 <= state <= 1, SDDP.State, initial_value = 0.5)
@variable(sp, 0 <= control <= 0.5)
@variable(sp, ξ)
@constraint(sp, state.out == state.in - control + ξ)
SDDP.parameterize(sp, 0.0:1/30:0.3) do ω
return JuMP.fix(ξ, ω)
end
@stageobjective(sp, (sin(3 * stage) - 1) * control)
end
SDDP.train(model; log_frequency = 10)
@test SDDP.calculate_bound(model) ≈ -1.471 atol = 0.001
simulation_results = SDDP.simulate(model, 1_000)
@test length(simulation_results) == 1_000
μ = SDDP.Statistics.mean(
sum(data[:stage_objective] for data in simulation) for
simulation in simulation_results
)
@test μ ≈ -1.471 atol = 0.05
return
end
stock_example()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2407 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # StructDualDynProg: Problem 5.2, 2 stages
# This example comes from [StochasticDualDynamicProgramming.jl](https://github.com/blegat/StochasticDualDynamicProgramming.jl/blob/fe5ef82db6befd7c8f11c023a639098ecb85737d/test/prob5.2_2stages.jl)
using SDDP, HiGHS, Test
function test_prob52_2stages()
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, stage
## ========== Problem data ==========
n = 4
m = 3
i_c = [16, 5, 32, 2]
C = [25, 80, 6.5, 160]
T = [8760, 7000, 1500] / 8760
D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]
p2 = [0.9, 0.1]
## ========== State Variables ==========
@variable(subproblem, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)
## ========== Variables ==========
@variables(subproblem, begin
y[1:n, 1:m] >= 0
v[1:n] >= 0
penalty >= 0
rhs_noise[1:m] # Dummy variable for RHS noise term.
end)
## ========== Constraints ==========
@constraints(
subproblem,
begin
[i = 1:n], x[i].out == x[i].in + v[i]
[i = 1:n], sum(y[i, :]) <= x[i].in
[j = 1:m], sum(y[:, j]) + penalty >= rhs_noise[j]
end
)
if stage == 2
## No investment in last stage.
@constraint(subproblem, sum(v) == 0)
end
## ========== Uncertainty ==========
if stage != 1 # no uncertainty in first stage
SDDP.parameterize(subproblem, 1:size(D2, 2), p2) do ω
for j in 1:m
JuMP.fix(rhs_noise[j], D2[j, ω])
end
end
end
## ========== Stage objective ==========
@stageobjective(subproblem, i_c' * v + C' * y * T + 1e6 * penalty)
return
end
SDDP.train(model; log_frequency = 10)
@test SDDP.calculate_bound(model) ≈ 340315.52 atol = 0.1
return
end
test_prob52_2stages()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2062 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # StructDualDynProg: Problem 5.2, 3 stages
# This example comes from [StochasticDualDynamicProgramming.jl](https://github.com/blegat/StochasticDualDynamicProgramming.jl/blob/fe5ef82db6befd7c8f11c023a639098ecb85737d/test/prob5.2_3stages.jl).
using SDDP, HiGHS, Test
function test_prob52_3stages()
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
n = 4
m = 3
i_c = [16, 5, 32, 2]
C = [25, 80, 6.5, 160]
T = [8760, 7000, 1500] / 8760
D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]
p2 = [0.9, 0.1]
@variable(sp, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)
@variables(sp, begin
y[1:n, 1:m] >= 0
v[1:n] >= 0
penalty >= 0
ξ[j = 1:m]
end)
@constraints(sp, begin
[i = 1:n], x[i].out == x[i].in + v[i]
[i = 1:n], sum(y[i, :]) <= x[i].in
[j = 1:m], sum(y[:, j]) + penalty >= ξ[j]
end)
@stageobjective(sp, i_c'v + C' * y * T + 1e5 * penalty)
if t != 1 # no uncertainty in first stage
SDDP.parameterize(sp, 1:size(D2, 2), p2) do ω
for j in 1:m
JuMP.fix(ξ[j], D2[j, ω])
end
end
end
if t == 3
@constraint(sp, sum(v) == 0)
end
end
det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)
set_silent(det)
JuMP.optimize!(det)
@test JuMP.objective_value(det) ≈ 406712.49 atol = 0.1
SDDP.train(model; log_frequency = 10)
@test SDDP.calculate_bound(model) ≈ 406712.49 atol = 0.1
return
end
test_prob52_3stages()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5278 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # The farm planning problem
# There are four stages. The first stage is a deterministic planning stage. The
# next three are wait-and-see operational stages. The uncertainty in the three
# operational stages is a Markov chain for weather. There are three Markov
# states: dry, normal, and wet.
# Inspired by R. McCardle, Farm management optimization. Masters thesis,
# University of Louisville, Louisville, Kentucky, United States of America
# (2009).
# All data, including short variable names, is taken from that thesis.
using SDDP, HiGHS, Test
function test_mccardle_farm_model()
S = [ # cutting, stage
0 1 2
0 0 1
0 0 0
]
t = [60, 60, 245] # days in period
D = [210, 210, 858] # demand
q = [ # selling price per bale
[4.5 4.5 4.5; 4.5 4.5 4.5; 4.5 4.5 4.5],
[5.5 5.5 5.5; 5.5 5.5 5.5; 5.5 5.5 5.5],
[6.5 6.5 6.5; 6.5 6.5 6.5; 6.5 6.5 6.5],
]
b = [ # predicted yield (bales/acres) from cutting i in weather j.
30 75 37.5
15 37.5 18.25
7.5 18.75 9.325
]
w = 3000 # max storage
C = [50 50 50; 50 50 50; 50 50 50] # cost to grow hay
r = [ # Cost per bale of hay from cutting i during weather condition j.
[5 5 5; 5 5 5; 5 5 5],
[6 6 6; 6 6 6; 6 6 6],
[7 7 7; 7 7 7; 7 7 7],
]
M = 60.0 # max acreage for planting
H = 0.0 # initial inventory
V = [0.05, 0.05, 0.05] # inventory cost
L = 3000.0 # max demand for hay
graph = SDDP.MarkovianGraph([
ones(Float64, 1, 1),
[0.14 0.69 0.17],
[0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],
[0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],
])
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, index
stage, weather = index
## ===================== State Variables =====================
## Area planted.
@variable(subproblem, 0 <= acres <= M, SDDP.State, initial_value = M)
@variable(
subproblem,
bales[i = 1:3] >= 0,
SDDP.State,
initial_value = (i == 1 ? H : 0)
)
## ===================== Variables =====================
@variables(subproblem, begin
buy[1:3] >= 0 # Quantity of bales to buy from each cutting.
sell[1:3] >= 0 # Quantity of bales to sell from each cutting.
eat[1:3] >= 0 # Quantity of bales to eat from each cutting.
pen_p[1:3] >= 0 # Penalties
pen_n[1:3] >= 0 # Penalties
end)
## ===================== Constraints =====================
if stage == 1
@constraint(subproblem, acres.out <= acres.in)
@constraint(subproblem, [i = 1:3], bales[i].in == bales[i].out)
else
@expression(
subproblem,
cut_ex[c = 1:3],
bales[c].in + buy[c] - eat[c] - sell[c] + pen_p[c] - pen_n[c]
)
@constraints(
subproblem,
begin
## Cannot plant more land than previously cropped.
acres.out <= acres.in
## In each stage we need to meet demand.
sum(eat) >= D[stage-1]
## We can buy and sell other cuttings.
bales[stage-1].out ==
cut_ex[stage-1] + acres.in * b[stage-1, weather]
[c = 1:3; c != stage - 1], bales[c].out == cut_ex[c]
## There is some maximum storage.
sum(bales[i].out for i in 1:3) <= w
## We can only sell what is in storage.
[c = 1:3], sell[c] <= bales[c].in
## Maximum sales quantity.
sum(sell) <= L
end
)
end
## ===================== Stage objective =====================
if stage == 1
@stageobjective(subproblem, 0.0)
else
@stageobjective(
subproblem,
1000 * (sum(pen_p) + sum(pen_n)) +
## cost of growing
C[stage-1, weather] * acres.in +
sum(
## inventory cost
V[stage-1] * bales[cutting].in * t[stage-1] +
## purchase cost
r[cutting][stage-1, weather] * buy[cutting] +
## feed cost
S[cutting, stage-1] * eat[cutting] -
## sell reward
q[cutting][stage-1, weather] * sell[cutting] for
cutting in 1:3
)
)
end
return
end
SDDP.train(model)
@test SDDP.termination_status(model) == :simulation_stopping
@test SDDP.calculate_bound(model) ≈ 4074.1391 atol = 1e-5
end
test_mccardle_farm_model()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2026 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Air conditioning
# Taken from [Anthony Papavasiliou's notes on SDDP](https://web.archive.org/web/20200504214809/https://perso.uclouvain.be/anthony.papavasiliou/public_html/SDDP.pdf)
# Consider the following problem
# * Produce air conditioners for 3 months
# * 200 units/month at 100 \$/unit
# * Overtime costs 300 \$/unit
# * Known demand of 100 units for period 1
# * Equally likely demand, 100 or 300 units, for periods 2, 3
# * Storage cost is 50 \$/unit
# * All demand must be met
# The known optimal solution is \$62,500
using SDDP, HiGHS, Test
function air_conditioning_model(duality_handler)
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, stage
@variable(
sp,
0 <= stored_production <= 100,
Int,
SDDP.State,
initial_value = 0
)
@variable(sp, 0 <= production <= 200, Int)
@variable(sp, overtime >= 0, Int)
@variable(sp, demand)
DEMAND = [[100.0], [100.0, 300.0], [100.0, 300.0]]
SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, DEMAND[stage])
@constraint(
sp,
stored_production.out ==
stored_production.in + production + overtime - demand
)
@stageobjective(
sp,
100 * production + 300 * overtime + 50 * stored_production.out
)
end
SDDP.train(model; duality_handler = duality_handler)
@test isapprox(SDDP.calculate_bound(model), 62_500.0, atol = 0.1)
return
end
for duality_handler in [SDDP.LagrangianDuality(), SDDP.ContinuousConicDuality()]
air_conditioning_model(duality_handler)
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1633 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Training with a different forward model
using SDDP
import HiGHS
import Test
function create_air_conditioning_model(; convex::Bool)
return SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 0 <= x <= 100, SDDP.State, initial_value = 0)
@variable(sp, 0 <= u_production <= 200)
@variable(sp, u_overtime >= 0)
if !convex
set_integer(x.out)
set_integer(u_production)
set_integer(u_overtime)
end
@constraint(sp, demand, x.in - x.out + u_production + u_overtime == 0)
Ω = [[100.0], [100.0, 300.0], [100.0, 300.0]]
SDDP.parameterize(ω -> JuMP.set_normalized_rhs(demand, ω), sp, Ω[t])
@stageobjective(sp, 100 * u_production + 300 * u_overtime + 50 * x.out)
end
end
convex = create_air_conditioning_model(; convex = true)
non_convex = create_air_conditioning_model(; convex = false)
SDDP.train(
convex;
forward_pass = SDDP.AlternativeForwardPass(non_convex),
post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex),
iteration_limit = 10,
)
Test.@test isapprox(SDDP.calculate_bound(non_convex), 62_500.0, atol = 0.1)
Test.@test isapprox(SDDP.calculate_bound(convex), 62_500.0, atol = 0.1)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1408 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Deterministic All Blacks
using SDDP, HiGHS, Test
function all_blacks()
## Number of time periods, number of seats, R_ij = revenue from selling seat
## i at time j, offer_ij = whether an offer for seat i will come at time j
(T, N, R, offer) = (3, 2, [3 3 6; 3 3 6], [1 1 0; 1 0 1])
model = SDDP.LinearPolicyGraph(;
stages = T,
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do sp, stage
## Seat remaining?
@variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)
## Action: accept offer, or don't accept offer
@variable(sp, accept_offer, Bin)
## Balance on seats
@constraint(
sp,
[i in 1:N],
x[i].out == x[i].in - offer[i, stage] * accept_offer
)
@stageobjective(
sp,
sum(R[i, stage] * offer[i, stage] * accept_offer for i in 1:N)
)
end
SDDP.train(model; duality_handler = SDDP.LagrangianDuality())
@test SDDP.calculate_bound(model) ≈ 9.0
return
end
all_blacks()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2084 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Asset management
# Taken from the book
# J.R. Birge, F. Louveaux, Introduction to Stochastic Programming,
# Springer Series in Operations Research and Financial Engineering,
# Springer New York, New York, NY, 2011
using SDDP, HiGHS, Test
function asset_management_simple()
model = SDDP.PolicyGraph(
SDDP.MarkovianGraph(
Array{Float64,2}[
[1.0]',
[0.5 0.5],
[0.5 0.5; 0.5 0.5],
[0.5 0.5; 0.5 0.5],
],
);
lower_bound = -1_000.0,
optimizer = HiGHS.Optimizer,
) do subproblem, index
(stage, markov_state) = index
r_stock = [1.25, 1.06]
r_bonds = [1.14, 1.12]
@variable(subproblem, stocks >= 0, SDDP.State, initial_value = 0.0)
@variable(subproblem, bonds >= 0, SDDP.State, initial_value = 0.0)
if stage == 1
@constraint(subproblem, stocks.out + bonds.out == 55)
@stageobjective(subproblem, 0)
elseif 1 < stage < 4
@constraint(
subproblem,
r_stock[markov_state] * stocks.in +
r_bonds[markov_state] * bonds.in == stocks.out + bonds.out
)
@stageobjective(subproblem, 0)
else
@variable(subproblem, over >= 0)
@variable(subproblem, short >= 0)
@constraint(
subproblem,
r_stock[markov_state] * stocks.in +
r_bonds[markov_state] * bonds.in - over + short == 80
)
@stageobjective(subproblem, -over + 4 * short)
end
end
SDDP.train(model; log_frequency = 5)
@test SDDP.calculate_bound(model) ≈ 1.514 atol = 1e-4
return
end
asset_management_simple()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2550 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Asset management with modifications
# A modified version of the Asset Management Problem Taken from the book
# J.R. Birge, F. Louveaux, Introduction to Stochastic Programming,
# Springer Series in Operations Research and Financial Engineering,
# Springer New York, New York, NY, 2011
using SDDP, HiGHS, Test
function asset_management_stagewise(; cut_type)
w_s = [1.25, 1.06]
w_b = [1.14, 1.12]
Phi = [-1, 5]
Psi = [0.02, 0.0]
model = SDDP.MarkovianPolicyGraph(;
sense = :Max,
transition_matrices = Array{Float64,2}[
[1.0]',
[0.5 0.5],
[0.5 0.5; 0.5 0.5],
[0.5 0.5; 0.5 0.5],
],
upper_bound = 1000.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
t, i = node
@variable(subproblem, xs >= 0, SDDP.State, initial_value = 0)
@variable(subproblem, xb >= 0, SDDP.State, initial_value = 0)
if t == 1
@constraint(subproblem, xs.out + xb.out == 55 + xs.in + xb.in)
@stageobjective(subproblem, 0)
elseif t == 2 || t == 3
@variable(subproblem, phi)
@constraint(
subproblem,
w_s[i] * xs.in + w_b[i] * xb.in + phi == xs.out + xb.out
)
SDDP.parameterize(subproblem, [1, 2], [0.6, 0.4]) do ω
JuMP.fix(phi, Phi[ω])
@stageobjective(subproblem, Psi[ω] * xs.out)
end
else
@variable(subproblem, u >= 0)
@variable(subproblem, v >= 0)
@constraint(
subproblem,
w_s[i] * xs.in + w_b[i] * xb.in + u - v == 80,
)
@stageobjective(subproblem, -4u + v)
end
end
SDDP.train(
model;
cut_type = cut_type,
log_frequency = 10,
risk_measure = (node) -> begin
if node[1] != 3
SDDP.Expectation()
else
SDDP.EAVaR(; lambda = 0.5, beta = 0.5)
end
end,
)
@test SDDP.calculate_bound(model) ≈ 1.278 atol = 1e-3
return
end
asset_management_stagewise(; cut_type = SDDP.SINGLE_CUT)
asset_management_stagewise(; cut_type = SDDP.MULTI_CUT)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2439 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Partially observable inventory management
using SDDP, HiGHS, Random, Statistics, Test
function inventory_management_problem()
demand_values = [1.0, 2.0]
demand_prob = Dict(:Ah => [0.2, 0.8], :Bh => [0.8, 0.2])
graph = SDDP.Graph(
:root_node,
[:Ad, :Ah, :Bd, :Bh],
[
(:root_node => :Ad, 0.5),
(:root_node => :Bd, 0.5),
(:Ad => :Ah, 1.0),
(:Ah => :Ad, 0.8),
(:Ah => :Bd, 0.1),
(:Bd => :Bh, 1.0),
(:Bh => :Bd, 0.8),
(:Bh => :Ad, 0.1),
],
)
SDDP.add_ambiguity_set(graph, [:Ad, :Bd], 1e2)
SDDP.add_ambiguity_set(graph, [:Ah, :Bh], 1e2)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
@variables(
subproblem,
begin
0 <= inventory <= 2, (SDDP.State, initial_value = 0.0)
buy >= 0
demand
end
)
@constraint(subproblem, demand == inventory.in - inventory.out + buy)
if node == :Ad || node == :Bd || node == :D
JuMP.fix(demand, 0)
@stageobjective(subproblem, buy)
else
SDDP.parameterize(subproblem, demand_values, demand_prob[node]) do ω
return JuMP.fix(demand, ω)
end
@stageobjective(subproblem, 2 * buy + inventory.out)
end
end
## Train the policy.
Random.seed!(123)
SDDP.train(
model;
iteration_limit = 100,
cut_type = SDDP.SINGLE_CUT,
log_frequency = 10,
parallel_scheme = SDDP.Serial(),
)
results = SDDP.simulate(model, 500; parallel_scheme = SDDP.Serial())
objectives =
[sum(s[:stage_objective] for s in simulation) for simulation in results]
sample_mean = round(Statistics.mean(objectives); digits = 2)
sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)
@test SDDP.calculate_bound(model) ≈ sample_mean atol = sample_ci
return
end
inventory_management_problem()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2285 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Biobjective hydro-thermal
using SDDP, HiGHS, Statistics, Test
function biobjective_example()
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, _
@variable(subproblem, 0 <= v <= 200, SDDP.State, initial_value = 50)
@variables(subproblem, begin
0 <= g[i = 1:2] <= 100
0 <= u <= 150
s >= 0
shortage_cost >= 0
end)
@expressions(subproblem, begin
objective_1, g[1] + 10 * g[2]
objective_2, shortage_cost
end)
@constraints(subproblem, begin
inflow_constraint, v.out == v.in - u - s
g[1] + g[2] + u == 150
shortage_cost >= 40 - v.out
shortage_cost >= 60 - 2 * v.out
shortage_cost >= 80 - 4 * v.out
end)
## You must call this for a biobjective problem!
SDDP.initialize_biobjective_subproblem(subproblem)
SDDP.parameterize(subproblem, 0.0:5:50.0) do ω
JuMP.set_normalized_rhs(inflow_constraint, ω)
## You must call `set_biobjective_functions` from within
## `SDDP.parameterize`.
return SDDP.set_biobjective_functions(
subproblem,
objective_1,
objective_2,
)
end
end
pareto_weights =
SDDP.train_biobjective(model; solution_limit = 10, iteration_limit = 10)
solutions = [(k, v) for (k, v) in pareto_weights]
sort!(solutions; by = x -> x[1])
@test length(solutions) == 10
## Test for convexity! The gradient must be decreasing as we move from left
## to right.
gradient(a, b) = (b[2] - a[2]) / (b[1] - a[1])
grad = Inf
for i in 1:9
new_grad = gradient(solutions[i], solutions[i+1])
@test new_grad < grad
grad = new_grad
end
return
end
biobjective_example()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 4317 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Booking management
# This example concerns the acceptance of booking requests for rooms in a
# hotel in the lead up to a large event.
# Each stage, we receive a booking request and can choose to accept or decline
# it. Once accepted, bookings cannot be terminated.
using SDDP, HiGHS, Test
function booking_management_model(num_days, num_rooms, num_requests)
## maximum revenue that could be accrued.
max_revenue = (num_rooms + num_requests) * num_days * num_rooms
## booking_requests is a vector of {0,1} arrays of size
## (num_days x num_rooms) if the room is requested.
booking_requests = Array{Int,2}[]
for room in 1:num_rooms
for day in 1:num_days
## note: length_of_stay is 0 indexed to avoid unnecessary +/- 1
## on the indexing
for length_of_stay in 0:(num_days-day)
req = zeros(Int, (num_rooms, num_days))
req[room:room, day.+(0:length_of_stay)] .= 1
push!(booking_requests, req)
end
end
end
return model = SDDP.LinearPolicyGraph(;
stages = num_requests,
upper_bound = max_revenue,
sense = :Max,
optimizer = HiGHS.Optimizer,
) do sp, stage
@variable(
sp,
0 <= vacancy[room = 1:num_rooms, day = 1:num_days] <= 1,
SDDP.State,
Bin,
initial_value = 1
)
@variables(
sp,
begin
## Accept request for booking of room for length of time.
0 <= accept_request <= 1, Bin
## Accept a booking for an individual room on an individual day.
0 <= room_request_accepted[1:num_rooms, 1:num_days] <= 1, Bin
## Helper for JuMP.fix
req[1:num_rooms, 1:num_days]
end
)
for room in 1:num_rooms, day in 1:num_days
@constraints(
sp,
begin
## Update vacancy if we accept a room request
vacancy[room, day].out ==
vacancy[room, day].in - room_request_accepted[room, day]
## Can't accept a request of a filled room
room_request_accepted[room, day] <= vacancy[room, day].in
## Can't accept invididual room request if entire request is declined
room_request_accepted[room, day] <= accept_request
## Can't accept request if room not requested
room_request_accepted[room, day] <= req[room, day]
## Accept all individual rooms is entire request is accepted
room_request_accepted[room, day] + (1 - accept_request) >= req[room, day]
end
)
end
SDDP.parameterize(sp, booking_requests) do request
return JuMP.fix.(req, request)
end
@stageobjective(
sp,
sum(
(room + stage - 1) * room_request_accepted[room, day] for
room in 1:num_rooms for day in 1:num_days
)
)
end
end
function booking_management(duality_handler)
m_1_2_5 = booking_management_model(1, 2, 5)
SDDP.train(m_1_2_5; log_frequency = 5, duality_handler = duality_handler)
if duality_handler == SDDP.ContinuousConicDuality()
@test SDDP.calculate_bound(m_1_2_5) >= 7.25 - 1e-4
else
@test isapprox(SDDP.calculate_bound(m_1_2_5), 7.25, atol = 0.02)
end
m_2_2_3 = booking_management_model(2, 2, 3)
SDDP.train(m_2_2_3; log_frequency = 10, duality_handler = duality_handler)
if duality_handler == SDDP.ContinuousConicDuality()
@test SDDP.calculate_bound(m_1_2_5) > 6.13
else
@test isapprox(SDDP.calculate_bound(m_2_2_3), 6.13, atol = 0.02)
end
end
booking_management(SDDP.ContinuousConicDuality())
# New version of HiGHS stalls
# booking_management(SDDP.LagrangianDuality())
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2947 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Generation expansion
using SDDP
import HiGHS
import Test
function generation_expansion(duality_handler)
build_cost = 1e4
use_cost = 4
num_units = 5
capacities = ones(num_units)
demand_vals =
0.5 * [
5 5 5 5 5 5 5 5
4 3 1 3 0 9 8 17
0 9 4 2 19 19 13 7
25 11 4 14 4 6 15 12
6 7 5 3 8 4 17 13
]
## Cost of unmet demand
penalty = 5e5
## Discounting rate
rho = 0.99
model = SDDP.LinearPolicyGraph(;
stages = 5,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, stage
@variable(
sp,
0 <= invested[1:num_units] <= 1,
SDDP.State,
Int,
initial_value = 0
)
@variables(sp, begin
generation >= 0
unmet >= 0
demand
end)
@constraints(
sp,
begin
## Can't un-invest
investment[i in 1:num_units], invested[i].out >= invested[i].in
## Generation capacity
sum(capacities[i] * invested[i].out for i in 1:num_units) >=
generation
## Meet demand or pay a penalty
unmet >= demand - sum(generation)
## For fewer iterations order the units to break symmetry, units are identical (tougher numerically)
[j in 1:(num_units-1)], invested[j].out <= invested[j+1].out
end
)
## Demand is uncertain
SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, demand_vals[stage, :])
@expression(
sp,
investment_cost,
build_cost *
sum(invested[i].out - invested[i].in for i in 1:num_units)
)
@stageobjective(
sp,
(investment_cost + generation * use_cost) * rho^(stage - 1) +
penalty * unmet
)
end
if get(ARGS, 1, "") == "--write"
## Run `$ julia generation_expansion.jl --write` to update the benchmark
## model directory
model_dir = joinpath(@__DIR__, "..", "..", "..", "benchmarks", "models")
SDDP.write_to_file(
model,
joinpath(model_dir, "generation_expansion.sof.json.gz");
test_scenarios = 100,
)
exit(0)
end
SDDP.train(model; log_frequency = 10, duality_handler = duality_handler)
Test.@test SDDP.calculate_bound(model) ≈ 2.078860e6 atol = 1e3
return
end
generation_expansion(SDDP.ContinuousConicDuality())
generation_expansion(SDDP.LagrangianDuality())
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 10078 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Hydro valleys
# This problem is a version of the hydro-thermal scheduling problem. The goal is
# to operate two hydro-dams in a valley chain over time in the face of inflow
# and price uncertainty.
# Turbine response curves are modelled by piecewise linear functions which map
# the flow rate into a power. These can be controlled by specifying the
# breakpoints in the piecewise linear function as the knots in the Turbine
# struct.
# The model can be created using the `hydro_valley_model` function. It has a few
# keyword arguments to allow automated testing of the library.
# `hasstagewiseinflows` determines if the RHS noise constraint should be added.
# `hasmarkovprice` determines if the price uncertainty (modelled by a Markov
# chain) should be added.
# In the third stage, the Markov chain has some unreachable states to test
# some code-paths in the library.
# We can also set the sense to :Min or :Max (the objective and bound are
# flipped appropriately).
using SDDP, HiGHS, Test, Random
struct Turbine
flowknots::Vector{Float64}
powerknots::Vector{Float64}
end
struct Reservoir
min::Float64
max::Float64
initial::Float64
turbine::Turbine
spill_cost::Float64
inflows::Vector{Float64}
end
function hydro_valley_model(;
hasstagewiseinflows::Bool = true,
hasmarkovprice::Bool = true,
sense::Symbol = :Max,
)
valley_chain = [
Reservoir(
0,
200,
200,
Turbine([50, 60, 70], [55, 65, 70]),
1000,
[0, 20, 50],
),
Reservoir(
0,
200,
200,
Turbine([50, 60, 70], [55, 65, 70]),
1000,
[0, 0, 20],
),
]
turbine(i) = valley_chain[i].turbine
## Prices[t, Markov state]
prices = [
1 2 0
2 1 0
3 4 0
]
## Transition matrix
if hasmarkovprice
transition =
Array{Float64,2}[[1.0]', [0.6 0.4], [0.6 0.4 0.0; 0.3 0.7 0.0]]
else
transition = [ones(Float64, (1, 1)) for t in 1:3]
end
flipobj = (sense == :Max) ? 1.0 : -1.0
lower = (sense == :Max) ? -Inf : -1e6
upper = (sense == :Max) ? 1e6 : Inf
N = length(valley_chain)
## Initialise SDDP Model
return m = SDDP.MarkovianPolicyGraph(;
sense = sense,
lower_bound = lower,
upper_bound = upper,
transition_matrices = transition,
optimizer = HiGHS.Optimizer,
) do subproblem, node
t, markov_state = node
## ------------------------------------------------------------------
## SDDP State Variables
## Level of upper reservoir
@variable(
subproblem,
valley_chain[r].min <= reservoir[r = 1:N] <= valley_chain[r].max,
SDDP.State,
initial_value = valley_chain[r].initial
)
## ------------------------------------------------------------------
## Additional variables
@variables(
subproblem,
begin
outflow[r = 1:N] >= 0
spill[r = 1:N] >= 0
inflow[r = 1:N] >= 0
generation_quantity >= 0 # Total quantity of water
## Proportion of levels to dispatch on
0 <=
dispatch[r = 1:N, level = 1:length(turbine(r).flowknots)] <=
1
rainfall[i = 1:N]
end
)
## ------------------------------------------------------------------
## Constraints
@constraints(
subproblem,
begin
## flow from upper reservoir
reservoir[1].out ==
reservoir[1].in + inflow[1] - outflow[1] - spill[1]
## other flows
flow[i = 2:N],
reservoir[i].out ==
reservoir[i].in + inflow[i] - outflow[i] - spill[i] +
outflow[i-1] +
spill[i-1]
## Total quantity generated
generation_quantity == sum(
turbine(r).powerknots[level] * dispatch[r, level] for
r in 1:N for level in 1:length(turbine(r).powerknots)
)
## ------------------------------------------------------------------
## Flow out
turbineflow[r = 1:N],
outflow[r] == sum(
turbine(r).flowknots[level] * dispatch[r, level] for
level in 1:length(turbine(r).flowknots)
)
## Dispatch combination of levels
dispatched[r = 1:N],
sum(
dispatch[r, level] for
level in 1:length(turbine(r).flowknots)
) <= 1
end
)
## rainfall noises
if hasstagewiseinflows && t > 1 # in future stages random inflows
@constraint(subproblem, inflow_noise[i = 1:N], inflow[i] <= rainfall[i])
SDDP.parameterize(
subproblem,
[
(valley_chain[1].inflows[i], valley_chain[2].inflows[i]) for i in 1:length(transition)
],
) do ω
for i in 1:N
JuMP.fix(rainfall[i], ω[i])
end
end
else # in the first stage deterministic inflow
@constraint(
subproblem,
initial_inflow_noise[i = 1:N],
inflow[i] <= valley_chain[i].inflows[1]
)
end
## ------------------------------------------------------------------
## Objective Function
if hasmarkovprice
@stageobjective(
subproblem,
flipobj * (
prices[t, markov_state] * generation_quantity -
sum(valley_chain[i].spill_cost * spill[i] for i in 1:N)
)
)
else
@stageobjective(
subproblem,
flipobj * (
prices[t, 1] * generation_quantity -
sum(valley_chain[i].spill_cost * spill[i] for i in 1:N)
)
)
end
end
end
function test_hydro_valley_model()
## For repeatability
Random.seed!(11111)
## deterministic
deterministic_model = hydro_valley_model(;
hasmarkovprice = false,
hasstagewiseinflows = false,
)
SDDP.train(
deterministic_model;
iteration_limit = 10,
cut_deletion_minimum = 1,
print_level = 0,
)
@test SDDP.calculate_bound(deterministic_model) ≈ 835.0 atol = 1e-3
## stagewise inflows
stagewise_model = hydro_valley_model(; hasmarkovprice = false)
SDDP.train(stagewise_model; iteration_limit = 20, print_level = 0)
@test SDDP.calculate_bound(stagewise_model) ≈ 838.33 atol = 1e-2
## Markov prices
markov_model = hydro_valley_model(; hasstagewiseinflows = false)
SDDP.train(markov_model; iteration_limit = 10, print_level = 0)
@test SDDP.calculate_bound(markov_model) ≈ 851.8 atol = 1e-2
## stagewise inflows and Markov prices
markov_stagewise_model =
hydro_valley_model(; hasstagewiseinflows = true, hasmarkovprice = true)
SDDP.train(markov_stagewise_model; iteration_limit = 10, print_level = 0)
@test SDDP.calculate_bound(markov_stagewise_model) ≈ 855.0 atol = 1.0
## risk averse stagewise inflows and Markov prices
riskaverse_model = hydro_valley_model()
SDDP.train(
riskaverse_model;
risk_measure = SDDP.EAVaR(; lambda = 0.5, beta = 0.66),
iteration_limit = 10,
print_level = 0,
)
@test SDDP.calculate_bound(riskaverse_model) ≈ 828.157 atol = 1.0
## stagewise inflows and Markov prices
worst_case_model = hydro_valley_model(; sense = :Min)
SDDP.train(
worst_case_model;
risk_measure = SDDP.EAVaR(; lambda = 0.5, beta = 0.0),
iteration_limit = 10,
print_level = 0,
)
@test SDDP.calculate_bound(worst_case_model) ≈ -780.867 atol = 1.0
## stagewise inflows and Markov prices
cutselection_model = hydro_valley_model()
SDDP.train(
cutselection_model;
iteration_limit = 10,
print_level = 0,
cut_deletion_minimum = 2,
)
@test SDDP.calculate_bound(cutselection_model) ≈ 855.0 atol = 1.0
## Distributionally robust Optimization
dro_model = hydro_valley_model(; hasmarkovprice = false)
SDDP.train(
dro_model;
risk_measure = SDDP.ModifiedChiSquared(sqrt(2 / 3) - 1e-6),
iteration_limit = 10,
print_level = 0,
)
@test SDDP.calculate_bound(dro_model) ≈ 835.0 atol = 1.0
dro_model = hydro_valley_model(; hasmarkovprice = false)
SDDP.train(
dro_model;
risk_measure = SDDP.ModifiedChiSquared(1 / 6),
iteration_limit = 20,
print_level = 0,
)
@test SDDP.calculate_bound(dro_model) ≈ 836.695 atol = 1.0
## (Note) radius ≈ sqrt(2/3), will set all noise probabilities to zero except the worst case noise
## (Why?):
## The distance from the uniform distribution (the assumed "true" distribution)
## to a corner of a unit simplex is sqrt(S-1)/sqrt(S) if we have S scenarios. The corner
## of a unit simplex is just a unit vector, i.e.: [0 ... 0 1 0 ... 0]. With this probability
## vector, only one noise has a non-zero probablity.
## In the worst case rhsnoise (0 inflows) the profit is:
## Reservoir1: 70 * $3 + 70 * $2 + 65 * $1 +
## Reservoir2: 70 * $3 + 70 * $2 + 70 * $1
### = $835
end
test_hydro_valley_model()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2463 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Infinite horizon hydro-thermal
using SDDP, HiGHS, Test, Statistics
function infinite_hydro_thermal(; cut_type)
Ω = [
(inflow = 0.0, demand = 7.5),
(inflow = 5.0, demand = 5),
(inflow = 10.0, demand = 2.5),
]
graph = SDDP.Graph(
:root_node,
[:week],
[(:root_node => :week, 1.0), (:week => :week, 0.9)],
)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
@variable(
subproblem,
5.0 <= reservoir <= 15.0,
SDDP.State,
initial_value = 10.0
)
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
spill >= 0
inflow
demand
end)
@constraints(
subproblem,
begin
reservoir.out == reservoir.in - hydro_generation - spill + inflow
hydro_generation + thermal_generation == demand
end
)
@stageobjective(subproblem, 10 * spill + thermal_generation)
SDDP.parameterize(subproblem, Ω) do ω
JuMP.fix(inflow, ω.inflow)
return JuMP.fix(demand, ω.demand)
end
end
SDDP.train(
model;
cut_type = cut_type,
log_frequency = 100,
sampling_scheme = SDDP.InSampleMonteCarlo(; terminate_on_cycle = true),
parallel_scheme = SDDP.Serial(),
cycle_discretization_delta = 0.1,
)
@test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1
results = SDDP.simulate(model, 500)
objectives =
[sum(s[:stage_objective] for s in simulation) for simulation in results]
sample_mean = round(Statistics.mean(objectives); digits = 2)
sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)
println("Confidence_interval = $(sample_mean) ± $(sample_ci)")
@test sample_mean - sample_ci <= 119.167 <= sample_mean + sample_ci
return
end
infinite_hydro_thermal(; cut_type = SDDP.SINGLE_CUT)
infinite_hydro_thermal(; cut_type = SDDP.MULTI_CUT)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 972 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Infinite horizon trivial
using SDDP, HiGHS, Test
function infinite_trivial()
graph = SDDP.Graph(
:root_node,
[:week],
[(:root_node => :week, 1.0), (:week => :week, 0.9)],
)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
@variable(subproblem, state, SDDP.State, initial_value = 0)
@constraint(subproblem, state.in == state.out)
@stageobjective(subproblem, 2.0)
end
SDDP.train(model; log_frequency = 10)
@test SDDP.calculate_bound(model) ≈ 2.0 / (1 - 0.9) atol = 1e-3
return
end
infinite_trivial()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1024 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # No strong duality
# This example is interesting, because strong duality doesn't hold for the
# extensive form (see if you can show why!), but we still converge.
using SDDP, HiGHS, Test
function no_strong_duality()
model = SDDP.PolicyGraph(
SDDP.Graph(
:root,
[:node],
[(:root => :node, 1.0), (:node => :node, 0.5)],
);
optimizer = HiGHS.Optimizer,
lower_bound = 0.0,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 1.0)
@stageobjective(sp, x.out)
@constraint(sp, x.in == x.out)
end
SDDP.train(model)
@test SDDP.calculate_bound(model) ≈ 2.0 atol = 1e-5
return
end
no_strong_duality()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2747 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Newsvendor
# This example is based on the classical newsvendor problem, but features an
# AR(1) spot-price.
# ```
# V(x[t-1], ω[t]) = max p[t] × u[t]
# subject to x[t] = x[t-1] - u[t] + ω[t]
# u[t] ∈ [0, 1]
# x[t] ≥ 0
# p[t] = p[t-1] + ϕ[t]
# ```
# The initial conditions are
# ```
# x[0] = 2.0
# p[0] = 1.5
# ω[t] ~ {0, 0.05, 0.10, ..., 0.45, 0.5} with uniform probability.
# ϕ[t] ~ {-0.25, -0.125, 0.125, 0.25} with uniform probability.
# ```
using SDDP, HiGHS, Statistics, Test
function joint_distribution(; kwargs...)
names = tuple([first(kw) for kw in kwargs]...)
values = tuple([last(kw) for kw in kwargs]...)
output_type = NamedTuple{names,Tuple{eltype.(values)...}}
distribution = map(output_type, Base.product(values...))
return distribution[:]
end
function newsvendor_example(; cut_type)
model = SDDP.PolicyGraph(
SDDP.LinearGraph(3);
sense = :Max,
upper_bound = 50.0,
optimizer = HiGHS.Optimizer,
) do subproblem, stage
@variables(subproblem, begin
x >= 0, (SDDP.State, initial_value = 2)
0 <= u <= 1
w
end)
@constraint(subproblem, x.out == x.in - u + w)
SDDP.add_objective_state(
subproblem;
initial_value = 1.5,
lower_bound = 0.75,
upper_bound = 2.25,
lipschitz = 100.0,
) do y, ω
return y + ω.price_noise
end
noise_terms = joint_distribution(;
demand = 0:0.05:0.5,
price_noise = [-0.25, -0.125, 0.125, 0.25],
)
SDDP.parameterize(subproblem, noise_terms) do ω
JuMP.fix(w, ω.demand)
price = SDDP.objective_state(subproblem)
@stageobjective(subproblem, price * u)
end
end
SDDP.train(
model;
log_frequency = 10,
time_limit = 20.0,
cut_type = cut_type,
)
@test SDDP.calculate_bound(model) ≈ 4.04 atol = 0.05
results = SDDP.simulate(model, 500)
objectives =
[sum(s[:stage_objective] for s in simulation) for simulation in results]
@test round(Statistics.mean(objectives); digits = 2) ≈ 4.04 atol = 0.1
return
end
newsvendor_example(; cut_type = SDDP.SINGLE_CUT)
newsvendor_example(; cut_type = SDDP.MULTI_CUT)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1541 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # SLDP: example 1
# This example is derived from Section 4.2 of the paper:
# Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz
# Dynamic Programming. Optimization Online. [PDF](http://www.optimization-online.org/DB_FILE/2019/05/7193.pdf)
using SDDP, HiGHS, Test
function sldp_example_one()
model = SDDP.LinearPolicyGraph(;
stages = 8,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x, SDDP.State, initial_value = 2.0)
@variables(sp, begin
x⁺ >= 0
x⁻ >= 0
0 <= u <= 1, Bin
ω
end)
@stageobjective(sp, 0.9^(t - 1) * (x⁺ + x⁻))
@constraints(sp, begin
x.out == x.in + 2 * u - 1 + ω
x⁺ >= x.out
x⁻ >= -x.out
end)
points = [
-0.3089653673606697,
-0.2718277412744214,
-0.09611178608243474,
0.24645863921577763,
0.5204224537256875,
]
return SDDP.parameterize(φ -> JuMP.fix(ω, φ), sp, [points; -points])
end
SDDP.train(model; log_frequency = 10)
@test SDDP.calculate_bound(model) <= 1.1675
return
end
sldp_example_one()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2538 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # SLDP: example 2
# This example is derived from Section 4.3 of the paper:
# Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz
# Dynamic Programming. Optimization Online. [PDF](http://www.optimization-online.org/DB_FILE/2019/05/7193.pdf)
using SDDP
import HiGHS
import Test
function sldp_example_two(; first_stage_integer::Bool = true, N = 2)
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = -100.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 0 <= x[1:2] <= 5, SDDP.State, initial_value = 0.0)
if t == 1
if first_stage_integer
@variable(sp, 0 <= u[1:2] <= 5, Int)
@constraint(sp, [i = 1:2], u[i] == x[i].out)
end
@stageobjective(sp, -1.5 * x[1].out - 4 * x[2].out)
else
@variable(sp, 0 <= y[1:4] <= 1, Bin)
@variable(sp, ω[1:2])
@stageobjective(sp, -16 * y[1] - 19 * y[2] - 23 * y[3] - 28 * y[4])
@constraint(
sp,
2 * y[1] + 3 * y[2] + 4 * y[3] + 5 * y[4] <= ω[1] - x[1].in
)
@constraint(
sp,
6 * y[1] + 1 * y[2] + 3 * y[3] + 2 * y[4] <= ω[2] - x[2].in
)
steps = range(5; stop = 15, length = N)
SDDP.parameterize(sp, [[i, j] for i in steps for j in steps]) do φ
return JuMP.fix.(ω, φ)
end
end
end
if get(ARGS, 1, "") == "--write"
## Run `$ julia sldp_example_two.jl --write` to update the benchmark
## model directory
model_dir = joinpath(@__DIR__, "..", "..", "..", "benchmarks", "models")
SDDP.write_to_file(
model,
joinpath(model_dir, "sldp_example_two_$(N).sof.json.gz");
test_scenarios = 30,
)
return
end
SDDP.train(model; log_frequency = 10)
bound = SDDP.calculate_bound(model)
if N == 2
Test.@test bound <= -57.0
elseif N == 3
Test.@test bound <= -59.33
elseif N == 6
Test.@test bound <= -61.22
end
return
end
sldp_example_two(; N = 2)
sldp_example_two(; N = 3)
sldp_example_two(; N = 6)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1726 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Stochastic All Blacks
using SDDP, HiGHS, Test
function stochastic_all_blacks()
## Number of time periods
T = 3
## Number of seats
N = 2
## R_ij = price of seat i at time j
R = [3 3 6; 3 3 6]
## Number of noises
s = 3
offers = [
[[1, 1], [0, 0], [1, 1]],
[[1, 0], [0, 0], [0, 0]],
[[0, 1], [1, 0], [1, 1]],
]
model = SDDP.LinearPolicyGraph(;
stages = T,
sense = :Max,
upper_bound = 100.0,
optimizer = HiGHS.Optimizer,
) do sp, stage
## Seat remaining?
@variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)
## Action: accept offer, or don't accept offer
## We are allowed to accept some of the seats offered but not others
@variable(sp, accept_offer[1:N], Bin)
@variable(sp, offers_made[1:N])
## Balance on seats
@constraint(
sp,
balance[i in 1:N],
x[i].in - x[i].out == accept_offer[i]
)
@stageobjective(sp, sum(R[i, stage] * accept_offer[i] for i in 1:N))
SDDP.parameterize(sp, offers[stage]) do o
return JuMP.fix.(offers_made, o)
end
@constraint(sp, accept_offer .<= offers_made)
end
SDDP.train(model; duality_handler = SDDP.LagrangianDuality())
@test SDDP.calculate_bound(model) ≈ 8.0
return
end
stochastic_all_blacks()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 8884 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # The farmer's problem
#
# _This problem is taken from Section 1.1 of the book Birge, J. R., & Louveaux,
# F. (2011). Introduction to Stochastic Programming. New York, NY: Springer New
# York. Paragraphs in quotes are taken verbatim._
# ## Problem description
# > Consider a European farmer who specializes in raising wheat, corn, and sugar
# > beets on his 500 acres of land. During the winter, [they want] to decide how
# > much land to devote to each crop.
# >
# > The farmer knows that at least 200 tons (T) of wheat and 240 T of corn are
# > needed for cattle feed. These amounts can be raised on the farm or bought
# > from a wholesaler. Any production in excess of the feeding requirement would
# > be sold.
# >
# > Over the last decade, mean selling prices have been \$170 and \$150 per
# > ton of wheat and corn, respectively. The purchase prices are 40% more than
# > this due to the wholesaler’s margin and transportation costs.
# >
# > Another profitable crop is sugar beet, which [they expect] to sell at
# > \$36/T; however, the European Commission imposes a quota on sugar beet
# > production. Any amount in excess of the quota can be sold only at \$10/T.
# > The farmer’s quota for next year is 6000 T."
# >
# > Based on past experience, the farmer knows that the mean yield on [their]
# > land is roughly 2.5 T, 3 T, and 20 T per acre for wheat, corn, and sugar
# > beets, respectively.
# >
# > [To introduce uncertainty,] assume some correlation among the yields of the
# > different crops. A very simplified representation of this would be to assume
# > that years are good, fair, or bad for all crops, resulting in above average,
# > average, or below average yields for all crops. To fix these ideas, _above_
# > and _below_ average indicate a yield 20% above or below the mean yield.
# ## Problem data
# The area of the farm.
MAX_AREA = 500.0
# There are three crops:
CROPS = [:wheat, :corn, :sugar_beet]
# Each of the crops has a different planting cost (\$/acre).
PLANTING_COST = Dict(:wheat => 150.0, :corn => 230.0, :sugar_beet => 260.0)
# The farmer requires a minimum quantity of wheat and corn, but not of sugar
# beet (tonnes).
MIN_QUANTITIES = Dict(:wheat => 200.0, :corn => 240.0, :sugar_beet => 0.0)
# In Europe, there is a quota system for producing crops. The farmer owns the
# following quota for each crop (tonnes):
QUOTA_MAX = Dict(:wheat => Inf, :corn => Inf, :sugar_beet => 6_000.0)
# The farmer can sell crops produced under the quota for the following amounts
# (\$/tonne):
SELL_IN_QUOTA = Dict(:wheat => 170.0, :corn => 150.0, :sugar_beet => 36.0)
# If they sell more than their allotted quota, the farmer earns the following on
# each tonne of crop above the quota (\$/tonne):
SELL_NO_QUOTA = Dict(:wheat => 0.0, :corn => 0.0, :sugar_beet => 10.0)
# The purchase prices for wheat and corn are 40% more than their sales price.
# However, the description does not address the purchase price of sugar beet.
# Therefore, we use a large value of \$1,000/tonne.
BUY_PRICE = Dict(:wheat => 238.0, :corn => 210.0, :sugar_beet => 1_000.0)
# On average, each crop has the following yield in tonnes/acre:
MEAN_YIELD = Dict(:wheat => 2.5, :corn => 3.0, :sugar_beet => 20.0)
# However, the yield is random. In good years, the yield is +20% above average,
# and in bad years, the yield is -20% below average.
YIELD_MULTIPLIER = Dict(:good => 1.2, :fair => 1.0, :bad => 0.8)
# ## Mathematical formulation
# ## SDDP.jl code
# !!! note
# In what follows, we make heavy use of the fact that you can look up
# variables by their symbol name in a JuMP model as follows:
# ```julia
# @variable(model, x)
# model[:x]
# ```
# Read the [JuMP documentation](http://jump.dev/JuMP.jl/stable)
# if this isn't familiar to you.
# First up, load `SDDP.jl` and a solver. For this example, we use
# [`HiGHS.jl`](https://github.com/jump-dev/HiGHS.jl).
using SDDP, HiGHS
# ### State variables
# State variables are the information that flows between stages. In our example,
# the state variables are the areas of land devoted to growing each crop.
function add_state_variables(subproblem)
@variable(subproblem, area[c = CROPS] >= 0, SDDP.State, initial_value = 0)
end
# ### First stage problem
# We can only plant a maximum of 500 acres, and we want to minimize the planting
# cost
function create_first_stage_problem(subproblem)
@constraint(
subproblem,
sum(subproblem[:area][c].out for c in CROPS) <= MAX_AREA
)
@stageobjective(
subproblem,
-sum(PLANTING_COST[c] * subproblem[:area][c].out for c in CROPS)
)
end
# ### Second stage problem
# Now let's consider the second stage problem. This is more complicated than
# the first stage, so we've broken it down into four sections:
# 1) control variables
# 2) constraints
# 3) the objective
# 4) the uncertainty
# First, let's add the second stage control variables.
# #### Variables
# We add four types of control variables. Technically, the `yield` isn't a
# control variable. However, we add it as a dummy "helper" variable because it
# will be used when we add uncertainty.
function second_stage_variables(subproblem)
@variables(subproblem, begin
0 <= yield[c = CROPS] # tonnes/acre
0 <= buy[c = CROPS] # tonnes
0 <= sell_in_quota[c = CROPS] <= QUOTA_MAX[c] # tonnes
0 <= sell_no_quota[c = CROPS] # tonnes
end)
end
# #### Constraints
# We need to define is the minimum quantity constraint. This ensures that
# `MIN_QUANTITIES[c]` of each crop is produced.
function second_stage_constraint_min_quantity(subproblem)
@constraint(
subproblem,
[c = CROPS],
subproblem[:yield][c] + subproblem[:buy][c] -
subproblem[:sell_in_quota][c] - subproblem[:sell_no_quota][c] >=
MIN_QUANTITIES[c]
)
end
# #### Objective
# The objective of the second stage is to maximise revenue from selling crops,
# less the cost of buying corn and wheat if necessary to meet the minimum
# quantity constraint.
function second_stage_objective(subproblem)
@stageobjective(
subproblem,
sum(
SELL_IN_QUOTA[c] * subproblem[:sell_in_quota][c] +
SELL_NO_QUOTA[c] * subproblem[:sell_no_quota][c] -
BUY_PRICE[c] * subproblem[:buy][c] for c in CROPS
)
)
end
# #### Random variables
# Then, in the [`SDDP.parameterize`](@ref) function, we set the coefficient
# using `JuMP.set_normalized_coefficient`.
function second_stage_uncertainty(subproblem)
@constraint(
subproblem,
uncertainty[c = CROPS],
1.0 * subproblem[:area][c].in == subproblem[:yield][c]
)
SDDP.parameterize(subproblem, [:good, :fair, :bad]) do ω
for c in CROPS
JuMP.set_normalized_coefficient(
uncertainty[c],
subproblem[:area][c].in,
MEAN_YIELD[c] * YIELD_MULTIPLIER[ω],
)
end
end
end
# ### Putting it all together
# Now we're ready to build the multistage stochastic programming model. In
# addition to the things already discussed, we need a few extra pieces of
# information.
#
# First, we are maximizing, so we set `sense = :Max`. Second, we need to provide
# a valid upper bound. (See [Choosing an initial bound](@ref) for more on this.)
# We know from Birge and Louveaux that the optimal solution is \$108,390. So,
# let's choose \$500,000 just to be safe.
# Here is the full model.
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 500_000.0,
optimizer = HiGHS.Optimizer,
) do subproblem, stage
add_state_variables(subproblem)
if stage == 1
create_first_stage_problem(subproblem)
else
second_stage_variables(subproblem)
second_stage_constraint_min_quantity(subproblem)
second_stage_uncertainty(subproblem)
second_stage_objective(subproblem)
end
end
# ## Training a policy
# Now that we've built a model, we need to train it using [`SDDP.train`](@ref).
# The keyword `iteration_limit` stops the training after 40 iterations. See
# [Choose a stopping rule](@ref) for other ways to stop the training.
SDDP.train(model; iteration_limit = 40)
# ## Checking the policy
# Birge and Louveaux report that the optimal objective value is \$108,390.
# Check that we got the correct solution using [`SDDP.calculate_bound`](@ref):
@assert isapprox(SDDP.calculate_bound(model), 108_390.0, atol = 0.1)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5107 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Vehicle location
# This problem is a version of the Ambulance dispatch problem. A hospital is
# located at 0 on the number line that stretches from 0 to 100. Ambulance bases
# are located at points 20, 40, 60, 80, and 100. When not responding to a call,
# Ambulances must be located at a base, or the hospital. In this example there
# are three ambulances.
# Example location:
#
# H B B B B B
# 0 ---- 20 ---- 40 ---- 60 ---- 80 ---- 100
# Each stage, a call comes in from somewhere on the number line. The agent must
# decide which ambulance to dispatch. They pay the cost of twice the driving
# distance. If an ambulance is not dispatched in a stage, the ambulance can be
# relocated to a different base in preparation for future calls. This incurs a
# cost of the driving distance.
using SDDP
import HiGHS
import Test
function vehicle_location_model(duality_handler)
hospital_location = 0
bases = vcat(hospital_location, [20, 40, 60, 80, 100])
vehicles = [1, 2, 3]
requests = 0:10:100
shift_cost(src, dest) = abs(src - dest)
function dispatch_cost(base, request)
return 2 * (abs(request - hospital_location) + abs(request - base))
end
## Initial state of emergency vehicles at bases. All ambulances start at the
## hospital.
initial_state(b, v) = b == hospital_location ? 1.0 : 0.0
model = SDDP.LinearPolicyGraph(;
stages = 10,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
## Current location of each vehicle at each base.
@variable(
sp,
0 <= location[b = bases, v = vehicles] <= 1,
SDDP.State,
initial_value = initial_state(b, v)
)
@variables(sp, begin
## Which vehicle is dispatched?
0 <= dispatch[bases, vehicles] <= 1, Bin
## Shifting vehicles between bases: [src, dest, vehicle]
0 <= shift[bases, bases, vehicles] <= 1, Bin
end)
## Flow of vehicles in and out of bases:
@expression(
sp,
base_balance[b in bases, v in vehicles],
location[b, v].in - dispatch[b, v] - sum(shift[b, :, v]) +
sum(shift[:, b, v])
)
@constraints(
sp,
begin
## Only one vehicle dispatched to call.
sum(dispatch) == 1
## Can only dispatch vehicle from base if vehicle is at that base.
[b in bases, v in vehicles],
dispatch[b, v] <= location[b, v].in
## Can only shift vehicle if vehicle is at that src base.
[b in bases, v in vehicles],
sum(shift[b, :, v]) <= location[b, v].in
## Can only shift vehicle if vehicle is not being dispatched.
[b in bases, v in vehicles],
sum(shift[b, :, v]) + dispatch[b, v] <= 1
## Can't shift to same base.
[b in bases, v in vehicles], shift[b, b, v] == 0
## Update states for non-home/non-hospital bases.
[b in bases[2:end], v in vehicles],
location[b, v].out == base_balance[b, v]
## Update states for home/hospital bases.
[v in vehicles],
location[hospital_location, v].out ==
base_balance[hospital_location, v] + sum(dispatch[:, v])
end
)
SDDP.parameterize(sp, requests) do request
@stageobjective(
sp,
sum(
## Distance to travel from base to emergency and then to hospital.
dispatch[b, v] * dispatch_cost(b, request) +
## Distance travelled by vehicles relocating bases.
sum(
shift_cost(b, dest) * shift[b, dest, v] for
dest in bases
) for b in bases, v in vehicles
)
)
end
end
if get(ARGS, 1, "") == "--write"
## Run `$ julia vehicle_location.jl --write` to update the benchmark
## model directory
model_dir = joinpath(@__DIR__, "..", "..", "..", "benchmarks", "models")
SDDP.write_to_file(
model,
joinpath(model_dir, "vehicle_location.sof.json.gz");
test_scenarios = 100,
)
exit(0)
end
SDDP.train(
model;
iteration_limit = 20,
log_frequency = 10,
cut_deletion_minimum = 100,
duality_handler = duality_handler,
)
Test.@test SDDP.calculate_bound(model) >= 1000
return
end
## TODO(odow): find out why this fails
## vehicle_location_model(SDDP.ContinuousConicDuality())
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 33446 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Risk aversion
# In [Introductory theory](@ref), we implemented a basic version of the
# SDDP algorithm. This tutorial extends that implementation to add
# **risk-aversion**.
# **Packages**
#
# This tutorial uses the following packages. For clarity, we call
# `import PackageName` so that we must prefix `PackageName.` to all functions
# and structs provided by that package. Everything not prefixed is either part
# of base Julia, or we wrote it.
import ForwardDiff
import HiGHS
import Ipopt
import JuMP
import Statistics
# ## Risk aversion: what and why?
# Often, the agents making decisions in complex systems are **risk-averse**,
# that is, they care more about avoiding very bad outcomes, than they do about
# having a good average outcome.
# As an example, consumers in a hydro-thermal problem may be willing to pay a
# slightly higher electricity price on average, if it means that there is a
# lower probability of blackouts.
# Risk aversion in multistage stochastic programming has been well studied in
# the academic literature, and is widely used in production implementations
# around the world.
# ## Risk measures
# One way to add risk aversion to models is to use a **risk measure**. A risk
# measure is a function that maps a random variable to a real number.
#
# You are probably already familiar with lots of different risk measures. For
# example, the mean, median, mode, and maximum are all risk measures.
#
# We call the act of applying a risk measure to a random variable "computing the
# risk" of a random variable.
#
# To keep things simple, and because we need it for SDDP, we restrict our
# attention to random variables $Z$ with a finite sample space $\Omega$
# and positive probabilities $p_{\omega}$ for all $\omega \in \Omega$. We denote
# the realizations of $Z$ by $Z(\omega) = z_{\omega}$.
# A risk measure, $\mathbb{F}[Z]$, is a **convex risk measure** if it satisfies
# the following axioms:
#
# **Axiom 1: monotonicity**
#
# Given two random variables $Z_1$ and $Z_2$, with $Z_1 \le Z_2$ almost surely,
# then $\mathbb{F}[Z_1] \le F[Z_2]$.
#
# **Axiom 2: translation equivariance**
#
# Given two random variables $Z_1$ and $Z_2$, then for all $a \in \mathbb{R}$,
# $\mathbb{F}[Z + a] = \mathbb{F}[Z] + a$.
#
# **Axiom 3: convexity**
#
# Given two random variables $Z_1$ and $Z_2$, then for all $a \in [0, 1]$,
# ```math
# \mathbb{F}[a Z_1 + (1 - a) Z_2] \le a \mathbb{F}[Z_1] + (1-a)\mathbb{F}[Z_2].
# ```
# Now we know what a risk measure is, let's see how we can use them to form
# risk-averse decision rules.
# ## Risk-averse decision rules: Part I
# We started this tutorial by explaining that we are interested in risk aversion
# because some agents are risk-averse. What that really means, is that they
# want a policy that is also risk-averse. The question then becomes, how do we
# create risk-averse decision rules and policies?
# Recall from [Introductory theory](@ref) that we can form an optimal
# decision rule using the recursive formulation:
# ```math
# \begin{aligned}
# V_i(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]\\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega) \\
# & \bar{x} = x,
# \end{aligned}
# ```
# where our decision rule, $\pi_i(x, \omega)$, solves this optimization problem
# and returns a $u^*$ corresponding to an optimal solution.
# If we can replace the expectation operator $\mathbb{E}$ with another (more
# risk-averse) risk measure $\mathbb{F}$, then our decision rule will attempt to
# choose a control decision now that minimizes the risk of the future costs, as
# opposed to the expectation of the future costs. This makes our decisions more
# risk-averse, because we care more about the worst outcomes than we do about
# the average.
# Therefore, we can form a risk-averse decision rule using the formulation:
# ```math
# \begin{aligned}
# V_i(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{F}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]\\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega) \\
# & \bar{x} = x.
# \end{aligned}
# ```
# To convert this problem into a tractable equivalent, we apply Kelley's
# algorithm to the risk-averse cost-to-go term
# $\mathbb{F}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]$, to
# obtain the approximated problem:
# ```math
# \begin{aligned}
# V_i^K(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \theta\\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega) \\
# & \bar{x} = x \\
# & \theta \ge \mathbb{F}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi)\right] + \frac{d}{dx^\prime}\mathbb{F}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi)\right]^\top (x^\prime - x^\prime_k)\quad k=1,\ldots,K.
# \end{aligned}
# ```
# !!! warning
# Note how we need to explicitly compute a risk-averse subgradient! (We
# need a subgradient because the function might not be differentiable.) When
# constructing cuts with the expectation operator in [Introductory theory](@ref),
# we implicitly used the law of total expectation to combine the two
# expectations; we can't do that for a general risk measure.
# !!! tip "Homework challenge"
# If it's not obvious why we can use Kelley's here, try to use the axioms of
# a convex risk measure to show that
# $\mathbb{F}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]$
# is a convex function w.r.t. $x^\prime$ if $V_j$ is also a convex function.
# Our challenge is now to find a way to compute the risk-averse cost-to-go
# function $\mathbb{F}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi)\right]$,
# and a way to compute a subgradient of the risk-averse cost-to-go function
# with respect to $x^\prime$.
# ## Primal risk measures
# Now we know what a risk measure is, and how we will use it, let's implement
# some code to see how we can compute the risk of some random variables.
# !!! note
# We're going to start by implementing the **primal** version of each risk
# measure. We implement the **dual** version in the next section.
# First, we need some data:
Z = [1.0, 2.0, 3.0, 4.0]
# with probabilities:
p = [0.1, 0.2, 0.4, 0.3]
# We're going to implement a number of different risk measures, so to leverage
# Julia's multiple dispatch, we create an abstract type:
abstract type AbstractRiskMeasure end
# and function to overload:
"""
primal_risk(F::AbstractRiskMeasure, Z::Vector{<:Real}, p::Vector{Float64})
Use `F` to compute the risk of the random variable defined by a vector of costs
`Z` and non-zero probabilities `p`.
"""
function primal_risk end
# !!! note
# We want `Vector{<:Real}` instead of `Vector{Float64}` because we're going
# to automatically differentiate this function in the next section.
# ### Expectation
# The expectation, $\mathbb{E}$, also called the mean or the average, is the
# most widely used convex risk measure. The expectation of a random variable is
# just the sum of $Z$ weighted by the probability:
# ```math
# \mathbb{F}[Z] = \mathbb{E}_p[Z] = \sum\limits_{\omega\in\Omega} p_{\omega} z_{\omega}.
# ```
struct Expectation <: AbstractRiskMeasure end
function primal_risk(::Expectation, Z::Vector{<:Real}, p::Vector{Float64})
return sum(p[i] * Z[i] for i in 1:length(p))
end
# Let's try it out:
primal_risk(Expectation(), Z, p)
# ### WorstCase
# The worst-case risk measure, also called the maximum, is another widely used
# convex risk measure. This risk measure doesn't care about the probability
# vector `p`, only the cost vector `Z`:
# ```math
# \mathbb{F}[Z] = \max[Z] = \max\limits_{\omega\in\Omega} z_{\omega}.
# ```
struct WorstCase <: AbstractRiskMeasure end
function primal_risk(::WorstCase, Z::Vector{<:Real}, ::Vector{Float64})
return maximum(Z)
end
# Let's try it out:
primal_risk(WorstCase(), Z, p)
# ### Entropic
# A more interesting, and less widely used risk measure is the entropic risk
# measure. The entropic risk measure is parameterized by a value $\gamma > 0$,
# and computes the risk of a random variable as:
# ```math
# \mathbb{F}_\gamma[Z] = \frac{1}{\gamma}\log\left(\mathbb{E}_p[e^{\gamma Z}]\right) = \frac{1}{\gamma}\log\left(\sum\limits_{\omega\in\Omega}p_{\omega} e^{\gamma z_{\omega}}\right).
# ```
# !!! tip "Homework challenge"
# Prove that the entropic risk measure satisfies the three axioms of a
# convex risk measure.
struct Entropic <: AbstractRiskMeasure
γ::Float64
function Entropic(γ)
if !(γ > 0)
throw(DomainError(γ, "Entropic risk measure must have γ > 0."))
end
return new(γ)
end
end
function primal_risk(F::Entropic, Z::Vector{<:Real}, p::Vector{Float64})
return 1 / F.γ * log(sum(p[i] * exp(F.γ * Z[i]) for i in 1:length(p)))
end
# !!! warning
# `exp(x)` overflows when $x > 709$. Therefore, if we are passed a vector of
# `Float64`, use arbitrary precision arithmetic with `big.(Z)`.
function primal_risk(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})
return Float64(primal_risk(F, big.(Z), p))
end
# Let's try it out for different values of $\gamma$:
for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1_000.0]
println("γ = $(γ), F[Z] = ", primal_risk(Entropic(γ), Z, p))
end
# !!! info
# The entropic has two extremes. As $\gamma \rightarrow 0$, the entropic
# acts like the expectation risk measure, and as $\gamma \rightarrow \infty$,
# the entropic acts like the worst-case risk measure.
# Computing risk measures this way works well for computing the primal value.
# However, there isn't an obvious way to compute a subgradient of the
# risk-averse cost-to-go function, which we need for our cut calculation.
# There is a nice solution to this problem, and that is to use the dual
# representation of a risk measure, instead of the primal.
# ## Dual risk measures
# Convex risk measures have a dual representation as follows:
# ```math
# \mathbb{F}[Z] = \sup\limits_{q \in\mathcal{M}(p)} \mathbb{E}_q[Z] - \alpha(p, q),
# ```
# where $\alpha$ is a concave function that maps the probability vectors $p$ and
# $q$ to a real number, and $\mathcal{M}(p) \subseteq \mathcal{P}$ is a convex
# subset of the probability simplex:
# ```math
# \mathcal{P} = \{p \ge 0\;|\;\sum\limits_{\omega\in\Omega}p_{\omega} = 1\}.
# ```
# The dual of a convex risk measure can be interpreted as taking the expectation
# of the random variable $Z$ with respect to the worst probability vector $q$
# that lies within the set $\mathcal{M}$, less some concave penalty term
# $\alpha(p, q)$.
# If we define a function `dual_risk_inner` that computes `q` and `α`:
"""
dual_risk_inner(
F::AbstractRiskMeasure, Z::Vector{Float64}, p::Vector{Float64}
)::Tuple{Vector{Float64},Float64}
Return a tuple formed by the worst-case probability vector `q` and the
corresponding evaluation `α(p, q)`.
"""
function dual_risk_inner end
# then we can write a generic `dual_risk` function as:
function dual_risk(
F::AbstractRiskMeasure,
Z::Vector{Float64},
p::Vector{Float64},
)
q, α = dual_risk_inner(F, Z, p)
return sum(q[i] * Z[i] for i in 1:length(q)) - α
end
# ### Expectation
# For the expectation risk measure, $\mathcal{M}(p) = \{p\}$, and
# $\alpha(\cdot, \cdot) = 0$. Therefore:
function dual_risk_inner(::Expectation, ::Vector{Float64}, p::Vector{Float64})
return p, 0.0
end
# We can check we get the same result as the primal version:
dual_risk(Expectation(), Z, p) == primal_risk(Expectation(), Z, p)
# ### Worst-case
# For the worst-case risk measure, $\mathcal{M}(p) = \mathcal{P}$, and
# $\alpha(\cdot, \cdot) = 0$. Therefore, the dual representation just puts
# all of the probability weight on the maximum outcome:
function dual_risk_inner(::WorstCase, Z::Vector{Float64}, ::Vector{Float64})
q = zeros(length(Z))
_, index = findmax(Z)
q[index] = 1.0
return q, 0.0
end
# We can check we get the same result as the primal version:
dual_risk(WorstCase(), Z, p) == primal_risk(WorstCase(), Z, p)
# ### Entropic
# For the entropic risk measure, $\mathcal{M}(p) = \mathcal{P}$, and:
# ```math
# \alpha(p, q) = \frac{1}{\gamma}\sum\limits_{\omega\in\Omega} q_\omega \log\left(\frac{q_\omega}{p_{\omega}}\right).
# ```
# One way to solve the dual problem is to explicitly solve a nonlinear
# optimization problem:
function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})
N = length(p)
model = JuMP.Model(Ipopt.Optimizer)
JuMP.set_silent(model)
## For this problem, the solve is more accurate if we turn off problem
## scaling.
JuMP.set_optimizer_attribute(model, "nlp_scaling_method", "none")
JuMP.@variable(model, 0 <= q[1:N] <= 1)
JuMP.@constraint(model, sum(q) == 1)
JuMP.@NLexpression(
model,
α,
1 / F.γ * sum(q[i] * log(q[i] / p[i]) for i in 1:N),
)
JuMP.@NLobjective(model, Max, sum(q[i] * Z[i] for i in 1:N) - α)
JuMP.optimize!(model)
return JuMP.value.(q), JuMP.value(α)
end
# We can check we get the same result as the primal version:
for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
primal = primal_risk(Entropic(γ), Z, p)
dual = dual_risk(Entropic(γ), Z, p)
success = primal ≈ dual ? "✓" : "×"
println("$(success) γ = $(γ), primal = $(primal), dual = $(dual)")
end
# !!! info
# This method of solving the dual problem "on-the-side" is used by SDDP.jl
# for a number of risk measures, including a distributionally robust risk
# measure with the Wasserstein distance. Check out all the risk measures
# that SDDP.jl supports in [Add a risk measure](@ref).
# The "on-the-side" method is very general, and it lets us incorporate any
# convex risk measure into SDDP. However, this comes at an increased
# computational cost and potential numerical issues (e.g., not converging to the
# exact solution).
# However, for the entropic risk measure, [Dowson, Morton, and Pagnoncelli (2020)](http://www.optimization-online.org/DB_HTML/2020/08/7984.html)
# derive the following closed form solution for $q^*$:
# ```math
# q_\omega^* = \frac{p_{\omega} e^{\gamma z_{\omega}}}{\sum\limits_{\varphi \in \Omega} p_{\varphi} e^{\gamma z_{\varphi}}}.
# ```
# This is faster because we don't need to use Ipopt, and it avoids some of the
# numerical issues associated with solving a nonlinear program.
function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})
q, α = zeros(length(p)), big(0.0)
peγz = p .* exp.(F.γ .* big.(Z))
sum_peγz = sum(peγz)
for i in 1:length(q)
big_q = peγz[i] / sum_peγz
α += big_q * log(big_q / p[i])
q[i] = Float64(big_q)
end
return q, Float64(α / F.γ)
end
# !!! warning
# Again, note that we use `big` to avoid introducing overflow errors, before
# explicitly casting back to `Float64` for the values we return.
# We can check we get the same result as the primal version:
for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
primal = primal_risk(Entropic(γ), Z, p)
dual = dual_risk(Entropic(γ), Z, p)
success = primal ≈ dual ? "✓" : "×"
println("$(success) γ = $(γ), primal = $(primal), dual = $(dual)")
end
# ## Risk-averse subgradients
# We ended the section on primal risk measures by explaining how we couldn't
# use the primal risk measure in the cut calculation because we needed some way
# of computing a risk-averse subgradient:
# ```math
# \theta \ge \mathbb{F}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi)\right] + \frac{d}{dx^\prime}\mathbb{F}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi)\right]^\top (x^\prime - x^\prime_k).
# ```
# The reason we use the dual representation is because of the following theorem,
# which explains how to compute a risk-averse gradient.
# !!! info "The risk-averse subgradient theorem"
# Let $\omega \in \Omega$ index a random vector with finite support and with
# nominal probability mass function, $p \in \mathcal{P}$, which satisfies
# $p > 0$.
#
# Consider a convex risk measure, $\mathbb{F}$, with a convex risk set,
# $\mathcal{M}(p)$, so that $\mathbb{F}$ can be expressed as the dual form.
#
# Let $V(x,\omega)$ be convex with respect to $x$ for all fixed
# $\omega\in\Omega$, and let $\lambda(\tilde{x}, \omega)$ be a subgradient
# of $V(x,\omega)$ with respect to $x$ at $x = \tilde{x}$ for each
# $\omega \in \Omega$.
#
# Then, $\sum_{\omega\in\Omega}q^*_{\omega} \lambda(\tilde{x},\omega)$ is a
# subgradient of $\mathbb{F}[V(x,\omega)]$ at $\tilde{x}$, where
# ```math
# q^* \in \argmax_{q \in \mathcal{M}(p)}\left\{{\mathbb{E}}_q[V(\tilde{x},\omega)] - \alpha(p, q)\right\}.
# ```
# This theorem can be a little hard to unpack, so let's see an example:
function dual_risk_averse_subgradient(
V::Function,
## Use automatic differentiation to compute the gradient of V w.r.t. x,
## given a fixed ω.
λ::Function = (x, ω) -> ForwardDiff.gradient(x -> V(x, ω), x);
F::AbstractRiskMeasure,
Ω::Vector,
p::Vector{Float64},
x̃::Vector{Float64},
)
## Evaluate the function at x=x̃ for all ω ∈ Ω.
V_ω = [V(x̃, ω) for ω in Ω]
## Solve the dual problem to obtain an optimal q^*.
q, α = dual_risk_inner(F, V_ω, p)
## Compute the risk-averse subgradient by taking the expectation of the
## subgradients w.r.t. q^*.
dVdx = sum(q[i] * λ(x̃, ω) for (i, ω) in enumerate(Ω))
return dVdx
end
# We can compare the subgradient obtained with the dual form against the
# automatic differentiation of the `primal_risk` function.
function primal_risk_averse_subgradient(
V::Function;
F::AbstractRiskMeasure,
Ω::Vector,
p::Vector{Float64},
x̃::Vector{Float64},
)
inner(x) = primal_risk(F, [V(x, ω) for ω in Ω], p)
return ForwardDiff.gradient(inner, x̃)
end
# As our example function, we use:
V(x, ω) = ω * x[1]^2
# with:
Ω = [1.0, 2.0, 3.0]
# and:
p = [0.3, 0.4, 0.3]
# at the point:
x̃ = [3.0]
# If $\mathbb{F}$ is the expectation risk-measure, then:
# ```math
# \mathbb{F}[V(x, \omega)] = 2 x^2.
# ```
# The function evaluation $x=3$ is $18$ and the subgradient is $12$. Let's check
# we get it right with the dual form:
dual_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)
# and the primal form:
primal_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)
# If $\mathbb{F}$ is the worst-case risk measure, then:
# ```math
# \mathbb{F}[V(x, \omega)] = 3 x^2.
# ```
# The function evaluation at $x=3$ is $27$, and the subgradient is $18$. Let's
# check we get it right with the dual form:
dual_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)
# and the primal form:
primal_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)
# If $\mathbb{F}$ is the entropic risk measure, the math is a little more
# difficult to derive analytically. However, we can check against our primal
# version:
for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
dual =
dual_risk_averse_subgradient(V; F = Entropic(γ), Ω = Ω, p = p, x̃ = x̃)
primal = primal_risk_averse_subgradient(
V;
F = Entropic(γ),
Ω = Ω,
p = p,
x̃ = x̃,
)
success = primal ≈ dual ? "✓" : "×"
println("$(success) γ = $(γ), primal = $(primal), dual = $(dual)")
end
# Uh oh! What happened with the last line? It looks our `primal_risk_averse_subgradient`
# encountered an error and returned a subgradient of `NaN`. This is because of
# the overflow issue with `exp(x)`. However, we can be confident that our dual
# method of computing the risk-averse subgradient is both correct and more
# numerically robust than the primal version.
# !!! info
# As another sanity check, notice how as $\gamma \rightarrow 0$, we tend
# toward the solution of the expectation risk-measure `[12]`, and as
# $\gamma \rightarrow \infty$, we tend toward the solution of the worse-case
# risk measure `[18]`.
# # Risk-averse decision rules: Part II
# Why is the risk-averse subgradient theorem helpful? Using the dual
# representation of a convex risk measure, we can re-write the cut:
# ```math
# \theta \ge \mathbb{F}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi)\right] + \frac{d}{dx^\prime}\mathbb{F}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi)\right]^\top (x^\prime - x^\prime_k),\quad k=1,\ldots,K
# ```
# as:
# ```math
# \theta \ge \mathbb{E}_{q_k}\left[V_j^k(x^\prime_k, \varphi) + \frac{d}{dx^\prime}V_j^k(x^\prime_k, \varphi)^\top (x^\prime - x^\prime_k)\right] - \alpha(p, q_k),\quad k=1,\ldots,K,
# ```
# where $q_k = \mathrm{arg}\sup\limits_{q \in\mathcal{M}(p)} \mathbb{E}_q[V_j^k(x_k^\prime, \varphi)] - \alpha(p, q)$.
# Therefore, we can formulate a risk-averse decision rule as:
# ```math
# \begin{aligned}
# V_i^K(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \theta\\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega) \\
# & \bar{x} = x \\
# & \theta \ge \mathbb{E}_{q_k}\left[V_j^k(x^\prime_k, \varphi) + \frac{d}{dx^\prime}V_j^k(x^\prime_k, \varphi)^\top (x^\prime - x^\prime_k)\right] - \alpha(p, q_k),\quad k=1,\ldots,K \\
# & \theta \ge M.
# \end{aligned}
# ```
# where $q_k = \mathrm{arg}\sup\limits_{q \in\mathcal{M}(p)} \mathbb{E}_q[V_j^k(x_k^\prime, \varphi)] - \alpha(p, q)$.
# Thus, to implement risk-averse SDDP, all we need to do is modify the backward
# pass to include this calculation of $q_k$, form the cut using $q_k$ instead of
# $p$, and subtract the penalty term $\alpha(p, q_k)$.
# ## Implementation
# Now we're ready to implement our risk-averse version of SDDP.
# As a prerequisite, we need most of the code from [Introductory theory](@ref).
# ```@raw html
# <p><details>
# <summary>Click to view code from the tutorial "Introductory theory".</summary>
# ```
struct State
in::JuMP.VariableRef
out::JuMP.VariableRef
end
struct Uncertainty
parameterize::Function
Ω::Vector{Any}
P::Vector{Float64}
end
struct Node
subproblem::JuMP.Model
states::Dict{Symbol,State}
uncertainty::Uncertainty
cost_to_go::JuMP.VariableRef
end
struct PolicyGraph
nodes::Vector{Node}
arcs::Vector{Dict{Int,Float64}}
end
function Base.show(io::IO, model::PolicyGraph)
println(io, "A policy graph with $(length(model.nodes)) nodes")
println(io, "Arcs:")
for (from, arcs) in enumerate(model.arcs)
for (to, probability) in arcs
println(io, " $(from) => $(to) w.p. $(probability)")
end
end
return
end
function PolicyGraph(
subproblem_builder::Function;
graph::Vector{Dict{Int,Float64}},
lower_bound::Float64,
optimizer,
)
nodes = Node[]
for t in 1:length(graph)
model = JuMP.Model(optimizer)
states, uncertainty = subproblem_builder(model, t)
JuMP.@variable(model, cost_to_go >= lower_bound)
obj = JuMP.objective_function(model)
JuMP.@objective(model, Min, obj + cost_to_go)
if length(graph[t]) == 0
JuMP.fix(cost_to_go, 0.0; force = true)
end
push!(nodes, Node(model, states, uncertainty, cost_to_go))
end
return PolicyGraph(nodes, graph)
end
function sample_uncertainty(uncertainty::Uncertainty)
r = rand()
for (p, ω) in zip(uncertainty.P, uncertainty.Ω)
r -= p
if r < 0.0
return ω
end
end
return error("We should never get here because P should sum to 1.0.")
end
function sample_next_node(model::PolicyGraph, current::Int)
if length(model.arcs[current]) == 0
return nothing
else
r = rand()
for (to, probability) in model.arcs[current]
r -= probability
if r < 0.0
return to
end
end
return nothing
end
end
function forward_pass(model::PolicyGraph, io::IO = stdout)
incoming_state =
Dict(k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states)
simulation_cost = 0.0
trajectory = Tuple{Int,Dict{Symbol,Float64}}[]
t = 1
while t !== nothing
node = model.nodes[t]
ω = sample_uncertainty(node.uncertainty)
node.uncertainty.parameterize(ω)
for (k, v) in incoming_state
JuMP.fix(node.states[k].in, v; force = true)
end
JuMP.optimize!(node.subproblem)
if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL
error("Something went terribly wrong!")
end
outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)
stage_cost =
JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)
simulation_cost += stage_cost
incoming_state = outgoing_state
push!(trajectory, (t, outgoing_state))
t = sample_next_node(model, t)
end
return trajectory, simulation_cost
end
function upper_bound(model::PolicyGraph; replications::Int)
simulations = [forward_pass(model, devnull) for i in 1:replications]
z = [s[2] for s in simulations]
μ = Statistics.mean(z)
tσ = 1.96 * Statistics.std(z) / sqrt(replications)
return μ, tσ
end
function lower_bound(model::PolicyGraph)
node = model.nodes[1]
bound = 0.0
for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)
node.uncertainty.parameterize(ω)
JuMP.optimize!(node.subproblem)
bound += p * JuMP.objective_value(node.subproblem)
end
return bound
end
function evaluate_policy(
model::PolicyGraph;
node::Int,
incoming_state::Dict{Symbol,Float64},
random_variable,
)
the_node = model.nodes[node]
the_node.uncertainty.parameterize(random_variable)
for (k, v) in incoming_state
JuMP.fix(the_node.states[k].in, v; force = true)
end
JuMP.optimize!(the_node.subproblem)
return Dict(
k => JuMP.value.(v) for
(k, v) in JuMP.object_dictionary(the_node.subproblem)
)
end
# ```@raw html
# </details></p>
# ```
# First, we need to modify the backward pass to compute the cuts using the
# risk-averse subgradient theorem:
function backward_pass(
model::PolicyGraph,
trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},
io::IO = stdout;
risk_measure::AbstractRiskMeasure,
)
println(io, "| Backward pass")
for i in reverse(1:length(trajectory))
index, outgoing_states = trajectory[i]
node = model.nodes[index]
println(io, "| | Visiting node $(index)")
if length(model.arcs[index]) == 0
continue
end
## =====================================================================
## New! Create vectors to store the cut expressions, V(x,ω) and p:
cut_expressions, V_ω, p = JuMP.AffExpr[], Float64[], Float64[]
## =====================================================================
for (j, P_ij) in model.arcs[index]
next_node = model.nodes[j]
for (k, v) in outgoing_states
JuMP.fix(next_node.states[k].in, v; force = true)
end
for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)
next_node.uncertainty.parameterize(φ)
JuMP.optimize!(next_node.subproblem)
V = JuMP.objective_value(next_node.subproblem)
dVdx = Dict(
k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states
)
## =============================================================
## New! Construct and append the expression
## `V_j^K(x_k, φ) + dVdx_j^K(x'_k, φ)ᵀ(x - x_k)` to the list of
## cut expressions.
push!(
cut_expressions,
JuMP.@expression(
node.subproblem,
V + sum(
dVdx[k] * (x.out - outgoing_states[k]) for
(k, x) in node.states
),
)
)
## Add the objective value to Z:
push!(V_ω, V)
## Add the probability to p:
push!(p, P_ij * pφ)
## =============================================================
end
end
## =====================================================================
## New! Using the solutions in V_ω, compute q and α:
q, α = dual_risk_inner(risk_measure, V_ω, p)
println(io, "| | | Z = ", Z)
println(io, "| | | p = ", p)
println(io, "| | | q = ", q)
println(io, "| | | α = ", α)
## Then add the cut:
c = JuMP.@constraint(
node.subproblem,
node.cost_to_go >=
sum(q[i] * cut_expressions[i] for i in 1:length(q)) - α
)
## =====================================================================
println(io, "| | | Adding cut : ", c)
end
return nothing
end
# We also need to update the `train` loop of SDDP to pass a risk measure to the
# backward pass:
function train(
model::PolicyGraph;
iteration_limit::Int,
replications::Int,
## =========================================================================
## New! Add a risk_measure argument
risk_measure::AbstractRiskMeasure,
## =========================================================================
io::IO = stdout,
)
for i in 1:iteration_limit
println(io, "Starting iteration $(i)")
outgoing_states, _ = forward_pass(model, io)
## =====================================================================
## New! Pass the risk measure to the backward pass.
backward_pass(model, outgoing_states, io; risk_measure = risk_measure)
## =====================================================================
println(io, "| Finished iteration")
println(io, "| | lower_bound = ", lower_bound(model))
end
μ, tσ = upper_bound(model; replications = replications)
println(io, "Upper bound = $(μ) ± $(tσ)")
return
end
# ### Risk-averse bounds
# !!! warning
# This section is important.
# When we had a risk-neutral policy (i.e., we only used the expectation risk
# measure), we discussed how we could form valid lower and upper bounds.
# The upper bound is still valid as a Monte Carlo simulation of the expected
# cost of the policy. (Although this upper bound doesn't capture the change in
# the policy we wanted to achieve, namely that the impact of the worst outcomes
# were reduced.)
# However, if we use a different risk measure, the lower bound is no longer
# valid!
# We can still calculate a "lower bound" as the objective of the first-stage
# approximated subproblem, and this will converge to a finite value. However,
# we can't meaningfully interpret it as a bound with respect to the optimal
# policy. Therefore, it's best to just ignore the lower bound when training a
# risk-averse policy.
# ## Example: risk-averse hydro-thermal scheduling
# Now it's time for an example. We create the same problem as
# [Introductory theory](@ref):
model = PolicyGraph(;
graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict{Int,Float64}()],
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
JuMP.set_silent(subproblem)
JuMP.@variable(subproblem, volume_in == 200)
JuMP.@variable(subproblem, 0 <= volume_out <= 200)
states = Dict(:volume => State(volume_in, volume_out))
JuMP.@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
JuMP.@constraints(
subproblem,
begin
volume_out == volume_in + inflow - hydro_generation - hydro_spill
demand_constraint, thermal_generation + hydro_generation == 150.0
end
)
fuel_cost = [50.0, 100.0, 150.0]
JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)
uncertainty =
Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω
return JuMP.fix(inflow, ω)
end
return states, uncertainty
end
# Then we train a risk-averse policy, passing a risk measure to `train`:
train(
model;
iteration_limit = 3,
replications = 100,
risk_measure = Entropic(1.0),
)
# Finally, evaluate the decision rule:
evaluate_policy(
model;
node = 1,
incoming_state = Dict(:volume => 150.0),
random_variable = 75,
)
# !!! info
# For this trivial example, the risk-averse policy isn't very different from
# the policy obtained using the expectation risk-measure. If you try it on
# some bigger/more interesting problems, you should see the expected cost
# increase, and the upper tail of the policy decrease.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 32345 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Introductory theory
# !!! note
# This tutorial is aimed at advanced undergraduates or early-stage graduate
# students. You don't need prior exposure to stochastic programming!
# (Indeed, it may be better if you don't, because our approach is
# non-standard in the literature.)
#
# This tutorial is also a living document. If parts are unclear, please
# [open an issue](https://github.com/odow/SDDP.jl/issues/new) so it can be
# improved!
# This tutorial will teach you how the stochastic dual dynamic programming
# algorithm works by implementing a simplified version of the algorithm.
# Our implementation is very much a "vanilla" version of SDDP; it doesn't have
# (m)any fancy computational tricks (e.g., the ones included in SDDP.jl) that
# you need to code a performant or stable version that will work on realistic
# instances. However, our simplified implementation will work on arbitrary
# policy graphs, including those with cycles such as infinite horizon problems!
# **Packages**
#
# This tutorial uses the following packages. For clarity, we call
# `import PackageName` so that we must prefix `PackageName.` to all functions
# and structs provided by that package. Everything not prefixed is either part
# of base Julia, or we wrote it.
import ForwardDiff
import HiGHS
import JuMP
import Statistics
# !!! tip
# You can follow along by installing the above packages, and copy-pasting
# the code we will write into a Julia REPL. Alternatively, you can download
# the Julia `.jl` file which created this tutorial [from GitHub](https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/21_theory_intro.jl).
# ## Preliminaries: background theory
# Start this tutorial by reading [An introduction to SDDP.jl](@ref), which
# introduces the necessary notation and vocabulary that we need for this
# tutorial.
# ## Preliminaries: Kelley's cutting plane algorithm
# Kelley's cutting plane algorithm is an iterative method for minimizing convex
# functions. Given a convex function $f(x)$, Kelley's constructs an
# under-approximation of the function at the minimum by a set of first-order
# Taylor series approximations (called **cuts**) constructed at a set of points
# $k = 1,\ldots,K$:
# ```math
# \begin{aligned}
# f^K = \min\limits_{\theta \in \mathbb{R}, x \in \mathbb{R}^N} \;\; & \theta\\
# & \theta \ge f(x_k) + \frac{d}{dx}f(x_k)^\top (x - x_k),\quad k=1,\ldots,K\\
# & \theta \ge M,
# \end{aligned}
# ```
# where $M$ is a sufficiently large negative number that is a lower bound for
# $f$ over the domain of $x$.
# Kelley's cutting plane algorithm is a structured way of choosing points $x_k$
# to visit, so that as more cuts are added:
# ```math
# \lim_{K \rightarrow \infty} f^K = \min\limits_{x \in \mathbb{R}^N} f(x)
# ```
# However, before we introduce the algorithm, we need to introduce some bounds.
# ### Bounds
# By convexity, $f^K \le f(x)$ for all $x$. Thus, if $x^*$ is a minimizer of
# $f$, then at any point in time we can construct a lower bound for $f(x^*)$ by
# solving $f^K$.
# Moreover, we can use the primal solutions $x_k^*$ returned by solving $f^k$ to
# evaluate $f(x_k^*)$ to generate an upper bound.
# Therefore, $f^K \le f(x^*) \le \min\limits_{k=1,\ldots,K} f(x_k^*)$.
# When the lower bound is sufficiently close to the upper bound, we can
# terminate the algorithm and declare that we have found an solution that is
# close to optimal.
# ### Implementation
# Here is pseudo-code fo the Kelley algorithm:
# 1. Take as input a convex function $f(x)$ and a iteration limit $K_{max}$.
# Set $K = 0$, and initialize $f^K$. Set $lb = -\infty$ and $ub = \infty$.
# 2. Solve $f^K$ to obtain a candidate solution $x_{K+1}$.
# 3. Update $lb = f^K$ and $ub = \min\{ub, f(x_{K+1})\}$.
# 4. Add a cut $\theta \ge f(x_{K+1}) + \frac{d}{dx}f\left(x_{K+1}\right)^\top (x - x_{K+1})$ to form $f^{K+1}$.
# 5. Increment $K$.
# 6. If $K = K_{max}$ or $|ub - lb| < \epsilon$, STOP, otherwise, go to step 2.
# And here's a complete implementation:
function kelleys_cutting_plane(
## The function to be minimized.
f::Function,
## The gradient of `f`. By default, we use automatic differentiation to
## compute the gradient of f so the user doesn't have to!
dfdx::Function = x -> ForwardDiff.gradient(f, x);
## The number of arguments to `f`.
input_dimension::Int,
## A lower bound for the function `f` over its domain.
lower_bound::Float64,
## The number of iterations to run Kelley's algorithm for before stopping.
iteration_limit::Int,
## The absolute tolerance ϵ to use for convergence.
tolerance::Float64 = 1e-6,
)
## Step (1):
K = 0
model = JuMP.Model(HiGHS.Optimizer)
JuMP.set_silent(model)
JuMP.@variable(model, θ >= lower_bound)
JuMP.@variable(model, x[1:input_dimension])
JuMP.@objective(model, Min, θ)
x_k = fill(NaN, input_dimension)
lower_bound, upper_bound = -Inf, Inf
while true
## Step (2):
JuMP.optimize!(model)
x_k .= JuMP.value.(x)
## Step (3):
lower_bound = JuMP.objective_value(model)
upper_bound = min(upper_bound, f(x_k))
println("K = $K : $(lower_bound) <= f(x*) <= $(upper_bound)")
## Step (4):
JuMP.@constraint(model, θ >= f(x_k) + dfdx(x_k)' * (x .- x_k))
## Step (5):
K = K + 1
## Step (6):
if K == iteration_limit
println("-- Termination status: iteration limit --")
break
elseif abs(upper_bound - lower_bound) < tolerance
println("-- Termination status: converged --")
break
end
end
println("Found solution: x_K = ", x_k)
return
end
# Let's run our algorithm to see what happens:
kelleys_cutting_plane(;
input_dimension = 2,
lower_bound = 0.0,
iteration_limit = 20,
) do x
return (x[1] - 1)^2 + (x[2] + 2)^2 + 1.0
end
# !!! warning
# It's hard to choose a valid lower bound! If you choose one too loose, the
# algorithm can take a long time to converge. However, if you choose one so
# tight that $M > f(x^*)$, then you can obtain a suboptimal solution. For a
# deeper discussion of the implications for SDDP.jl, see
# [Choosing an initial bound](@ref).
# ## Preliminaries: approximating the cost-to-go term
# In the background theory section, we discussed how you could formulate an
# optimal policy to a multistage stochastic program using the dynamic
# programming recursion:
# ```math
# \begin{aligned}
# V_i(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]\\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega) \\
# & \bar{x} = x,
# \end{aligned}
# ```
# where our decision rule, $\pi_i(x, \omega)$, solves this optimization problem
# and returns a $u^*$ corresponding to an optimal solution. Moreover, we alluded
# to the fact that the cost-to-go term (the nasty recursive expectation) makes
# this problem intractable to solve.
# However, if, excluding the cost-to-go term (i.e., the `SP` formulation),
# $V_i(x, \omega)$ can be formulated as a linear program (this also works for
# convex programs, but the math is more involved), then we can make some
# progress by noticing that $x$ only appears as a right-hand side term of the
# fishing constraint $\bar{x} = x$. Therefore, $V_i(x, \cdot)$ is convex with
# respect to $x$ for fixed $\omega$. (If you have not seen this result before,
# try to prove it.)
# The fishing constraint $\bar{x} = x$ has an associated dual variable. The
# economic interpretation of this dual variable is that it represents the change
# in the objective function if the right-hand side $x$ is increased on the scale
# of one unit. In other words, and with a slight abuse of notation, it is the
# value $\frac{d}{dx} V_i(x, \omega)$. (Because $V_i$ is not differentiable, it
# is a [subgradient](https://en.wikipedia.org/wiki/Subderivative) instead of a
# derivative.)
# If we implement the constraint $\bar{x} = x$ by setting the lower- and upper
# bounds of $\bar{x}$ to $x$, then the [reduced cost](https://en.wikipedia.org/wiki/Reduced_cost)
# of the decision variable $\bar{x}$ is the subgradient, and we do not need to
# explicitly add the fishing constraint as a row to the constraint matrix.
# !!! tip
# The subproblem can have binary and integer variables, but you'll need to
# use Lagrangian duality to compute a subgradient!
# Stochastic dual dynamic programming converts this problem into a tractable
# form by applying Kelley's cutting plane algorithm to the $V_j$ functions in
# the cost-to-go term:
# ```math
# \begin{aligned}
# V_i^K(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \theta\\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega) \\
# & \bar{x} = x \\
# & \theta \ge \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi) + \frac{d}{dx^\prime}V_j^k(x^\prime_k, \varphi)^\top (x^\prime - x^\prime_k)\right],\quad k=1,\ldots,K \\
# & \theta \ge M.
# \end{aligned}
# ```
# All we need now is a way of generating these cutting planes in an iterative
# manner. Before we get to that though, let's start writing some code.
# ## Implementation: modeling
# Let's make a start by defining the problem structure. Like SDDP.jl, we need a
# few things:
#
# 1. A description of the structure of the policy graph: how many nodes there
# are, and the arcs linking the nodes together with their corresponding
# probabilities.
# 2. A JuMP model for each node in the policy graph.
# 3. A way to identify the incoming and outgoing state variables of each node.
# 4. A description of the random variable, as well as a function that we can
# call that will modify the JuMP model to reflect the realization of the
# random variable.
# 5. A decision variable to act as the approximated cost-to-go term.
# !!! warning
# In the interests of brevity, there is minimal error checking. Think about
# all the different ways you could break the code!
# ### Structs
# The first struct we are going to use is a `State` struct that will wrap an
# incoming and outgoing state variable:
struct State
in::JuMP.VariableRef
out::JuMP.VariableRef
end
# Next, we need a struct to wrap all of the uncertainty within a node:
struct Uncertainty
parameterize::Function
Ω::Vector{Any}
P::Vector{Float64}
end
# `parameterize` is a function which takes a realization of the random variable
# $\omega\in\Omega$ and updates the subproblem accordingly. The finite discrete
# random variable is defined by the vectors `Ω` and `P`, so that the random
# variable takes the value `Ω[i]` with probability `P[i]`. As such, `P` should
# sum to 1. (We don't check this here, but we should; we do in SDDP.jl.)
# Now we have two building blocks, we can declare the structure of each node:
struct Node
subproblem::JuMP.Model
states::Dict{Symbol,State}
uncertainty::Uncertainty
cost_to_go::JuMP.VariableRef
end
# * `subproblem` is going to be the JuMP model that we build at each node.
# * `states` is a dictionary that maps a symbolic name of a state variable to a
# `State` object wrapping the incoming and outgoing state variables in
# `subproblem`.
# * `uncertainty` is an `Uncertainty` object described above.
# * `cost_to_go` is a JuMP variable that approximates the cost-to-go term.
# Finally, we define a simplified policy graph as follows:
struct PolicyGraph
nodes::Vector{Node}
arcs::Vector{Dict{Int,Float64}}
end
# There is a vector of nodes, as well as a data structure for the arcs. `arcs`
# is a vector of dictionaries, where `arcs[i][j]` gives the probability of
# transitioning from node `i` to node `j`, if an arc exists.
# To simplify things, we will assume that the root node transitions to node `1`
# with probability 1, and there are no other incoming arcs to node 1. Notably,
# we can still define cyclic graphs though!
# We also define a nice `show` method so that we don't accidentally print a
# large amount of information to the screen when creating a model:
function Base.show(io::IO, model::PolicyGraph)
println(io, "A policy graph with $(length(model.nodes)) nodes")
println(io, "Arcs:")
for (from, arcs) in enumerate(model.arcs)
for (to, probability) in arcs
println(io, " $(from) => $(to) w.p. $(probability)")
end
end
return
end
# ### Functions
# Now we have some basic types, let's implement some functions so that the user
# can create a model.
# First, we need an example of a function that the user will provide. Like
# SDDP.jl, this takes an empty `subproblem`, and a node index, in this case
# `t::Int`. You could change this function to change the model, or define a new
# one later in the code.
# We're going to copy the example from [An introduction to SDDP.jl](@ref),
# with some minor adjustments for the fact we don't have many of the bells and
# whistles of SDDP.jl. You can probably see how some of the SDDP.jl
# functionality like [`@stageobjective`](@ref) and [`SDDP.parameterize`](@ref)
# help smooth some of the usability issues like needing to construct both the
# incoming and outgoing state variables, or needing to explicitly declare
# `return states, uncertainty`.
function subproblem_builder(subproblem::JuMP.Model, t::Int)
## Define the state variables. Note how we fix the incoming state to the
## initial state variable regardless of `t`! This isn't strictly necessary;
## it only matters that we do it for the first node.
JuMP.@variable(subproblem, volume_in == 200)
JuMP.@variable(subproblem, 0 <= volume_out <= 200)
states = Dict(:volume => State(volume_in, volume_out))
## Define the control variables.
JuMP.@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
## Define the constraints
JuMP.@constraints(
subproblem,
begin
volume_out == volume_in + inflow - hydro_generation - hydro_spill
demand_constraint, thermal_generation + hydro_generation == 150.0
end
)
## Define the objective for each stage `t`. Note that we can use `t` as an
## index for t = 1, 2, 3.
fuel_cost = [50.0, 100.0, 150.0]
JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)
## Finally, we define the uncertainty object. Because this is a simplified
## implementation of SDDP, we shall politely ask the user to only modify the
## constraints, and not the objective function! (Not that it changes the
## algorithm, we just have to add more information to keep track of things.)
uncertainty = Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω
return JuMP.fix(inflow, ω)
end
return states, uncertainty
end
# The next function we need to define is the analog of
# [`SDDP.PolicyGraph`](@ref). It should be pretty readable.
function PolicyGraph(
subproblem_builder::Function;
graph::Vector{Dict{Int,Float64}},
lower_bound::Float64,
optimizer,
)
nodes = Node[]
for t in 1:length(graph)
## Create a model.
model = JuMP.Model(optimizer)
JuMP.set_silent(model)
## Use the provided function to build out each subproblem. The user's
## function returns a dictionary mapping `Symbol`s to `State` objects,
## and an `Uncertainty` object.
states, uncertainty = subproblem_builder(model, t)
## Now add the cost-to-go terms:
JuMP.@variable(model, cost_to_go >= lower_bound)
obj = JuMP.objective_function(model)
JuMP.@objective(model, Min, obj + cost_to_go)
## If there are no outgoing arcs, the cost-to-go is 0.0.
if length(graph[t]) == 0
JuMP.fix(cost_to_go, 0.0; force = true)
end
push!(nodes, Node(model, states, uncertainty, cost_to_go))
end
return PolicyGraph(nodes, graph)
end
# Then, we can create a model using the `subproblem_builder` function we defined
# earlier:
model = PolicyGraph(
subproblem_builder;
graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict{Int,Float64}()],
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
)
# ## Implementation: helpful samplers
# Before we get properly coding the solution algorithm, it's also going to be
# useful to have a function that samples a realization of the random variable
# defined by `Ω` and `P`.
function sample_uncertainty(uncertainty::Uncertainty)
r = rand()
for (p, ω) in zip(uncertainty.P, uncertainty.Ω)
r -= p
if r < 0.0
return ω
end
end
return error("We should never get here because P should sum to 1.0.")
end
# !!! note
# `rand()` samples a uniform random variable in `[0, 1)`.
# For example:
for i in 1:3
println("ω = ", sample_uncertainty(model.nodes[1].uncertainty))
end
# It's also going to be useful to define a function that generates a random walk
# through the nodes of the graph:
function sample_next_node(model::PolicyGraph, current::Int)
if length(model.arcs[current]) == 0
## No outgoing arcs!
return nothing
else
r = rand()
for (to, probability) in model.arcs[current]
r -= probability
if r < 0.0
return to
end
end
## We looped through the outgoing arcs and still have probability left
## over! This means we've hit an implicit "zero" node.
return nothing
end
end
# For example:
for i in 1:3
## We use `repr` to print the next node, because `sample_next_node` can
## return `nothing`.
println("Next node from $(i) = ", repr(sample_next_node(model, i)))
end
# This is a little boring, because our graph is simple. However, more
# complicated graphs will generate more interesting trajectories!
# ## Implementation: the forward pass
# Recall that, after approximating the cost-to-go term, we need a way of
# generating the cuts. As the first step, we need a way of generating candidate
# solutions $x_k^\prime$. However, unlike the Kelley's example, our functions
# $V_j^k(x^\prime, \varphi)$ need two inputs: an outgoing state variable and a
# realization of the random variable.
# One way of getting these inputs is just to pick a random (feasible) value.
# However, in doing so, we might pick outgoing state variables that we will
# never see in practice, or we might infrequently pick outgoing state variables
# that we will often see in practice. Therefore, a better way of generating the
# inputs is to use a simulation of the policy, which we call the **forward**
# **pass**.
# The forward pass walks the policy graph from start to end, transitioning
# randomly along the arcs. At each node, it observes a realization of the random
# variable and solves the approximated subproblem to generate a candidate
# outgoing state variable $x_k^\prime$. The outgoing state variable is passed as
# the incoming state variable to the next node in the trajectory.
function forward_pass(model::PolicyGraph, io::IO = stdout)
println(io, "| Forward Pass")
## First, get the value of the state at the root node (e.g., x_R).
incoming_state =
Dict(k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states)
## `simulation_cost` is an accumlator that is going to sum the stage-costs
## incurred over the forward pass.
simulation_cost = 0.0
## We also need to record the nodes visited and resultant outgoing state
## variables so we can pass them to the backward pass.
trajectory = Tuple{Int,Dict{Symbol,Float64}}[]
## Now's the meat of the forward pass: beginning at the first node:
t = 1
while t !== nothing
node = model.nodes[t]
println(io, "| | Visiting node $(t)")
## Sample the uncertainty:
ω = sample_uncertainty(node.uncertainty)
println(io, "| | | ω = ", ω)
## Parameterizing the subproblem using the user-provided function:
node.uncertainty.parameterize(ω)
println(io, "| | | x = ", incoming_state)
## Update the incoming state variable:
for (k, v) in incoming_state
JuMP.fix(node.states[k].in, v; force = true)
end
## Now solve the subproblem and check we found an optimal solution:
JuMP.optimize!(node.subproblem)
if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL
error("Something went terribly wrong!")
end
## Compute the outgoing state variables:
outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)
println(io, "| | | x′ = ", outgoing_state)
## We also need to compute the stage cost to add to our
## `simulation_cost` accumulator:
stage_cost =
JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)
simulation_cost += stage_cost
println(io, "| | | C(x, u, ω) = ", stage_cost)
## As a penultimate step, set the outgoing state of stage t and the
## incoming state of stage t + 1, and add the node to the trajectory.
incoming_state = outgoing_state
push!(trajectory, (t, outgoing_state))
## Finally, sample a new node to step to. If `t === nothing`, the
## `while` loop will break.
t = sample_next_node(model, t)
end
return trajectory, simulation_cost
end
# Let's take a look at one forward pass:
trajectory, simulation_cost = forward_pass(model);
# ## Implementation: the backward pass
# From the forward pass, we obtained a vector of nodes visited and their
# corresponding outgoing state variables. Now we need to refine the
# approximation for each node at the candidate solution for the outgoing state
# variable. That is, we need to add a new cut:
# ```math
# \theta \ge \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}\left[V_j^k(x^\prime_k, \varphi) + \frac{d}{dx^\prime}V_j^k(x^\prime_k, \varphi)^\top (x^\prime - x^\prime_k)\right]
# ```
# or alternatively:
# ```math
# \theta \ge \sum\limits_{j \in i^+} \sum\limits_{\varphi \in \Omega_j} p_{ij} p_{\varphi}\left[V_j^k(x^\prime_k, \varphi) + \frac{d}{dx^\prime}V_j^k(x^\prime_k, \varphi)^\top (x^\prime - x^\prime_k)\right]
# ```
# It doesn't matter what order we visit the nodes to generate these cuts for.
# For example, we could compute them all in parallel, using the current
# approximations of $V^K_i$.
# However, we can be smarter than that.
# If we traverse the list of nodes visited in the forward pass in reverse, then
# we come to refine the $i^{th}$ node in the trajectory, we will already have
# improved the approximation of the $(i+1)^{th}$ node in the trajectory as well!
# Therefore, our refinement of the $i^{th}$ node will be better than if we
# improved node $i$ first, and then refined node $(i+1)$.
# Because we walk the nodes in reverse, we call this the **backward pass**.
# !!! info
# If you're into deep learning, you could view this as the equivalent of
# back-propagation: the forward pass pushes primal information through the
# graph (outgoing state variables), and the backward pass pulls dual
# information (cuts) back through the graph to improve our decisions on the
# next forward pass.
function backward_pass(
model::PolicyGraph,
trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},
io::IO = stdout,
)
println(io, "| Backward pass")
## For the backward pass, we walk back up the nodes.
for i in reverse(1:length(trajectory))
index, outgoing_states = trajectory[i]
node = model.nodes[index]
println(io, "| | Visiting node $(index)")
if length(model.arcs[index]) == 0
## If there are no children, the cost-to-go is 0.
println(io, "| | | Skipping node because the cost-to-go is 0")
continue
end
## Create an empty affine expression that we will use to build up the
## right-hand side of the cut expression.
cut_expression = JuMP.AffExpr(0.0)
## For each node j ∈ i⁺
for (j, P_ij) in model.arcs[index]
next_node = model.nodes[j]
## Set the incoming state variables of node j to the outgoing state
## variables of node i
for (k, v) in outgoing_states
JuMP.fix(next_node.states[k].in, v; force = true)
end
## Then for each realization of φ ∈ Ωⱼ
for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)
## Setup and solve for the realization of φ
println(io, "| | | Solving φ = ", φ)
next_node.uncertainty.parameterize(φ)
JuMP.optimize!(next_node.subproblem)
## Then prepare the cut `P_ij * pφ * [V + dVdxᵀ(x - x_k)]``
V = JuMP.objective_value(next_node.subproblem)
println(io, "| | | | V = ", V)
dVdx = Dict(
k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states
)
println(io, "| | | | dVdx′ = ", dVdx)
cut_expression += JuMP.@expression(
node.subproblem,
P_ij *
pφ *
(
V + sum(
dVdx[k] * (x.out - outgoing_states[k]) for
(k, x) in node.states
)
),
)
end
end
## And then refine the cost-to-go variable by adding the cut:
c = JuMP.@constraint(node.subproblem, node.cost_to_go >= cut_expression)
println(io, "| | | Adding cut : ", c)
end
return nothing
end
# ## Implementation: bounds
# ### Lower bounds
# Recall from Kelley's that we can obtain a lower bound for $f(x^*)$ be
# evaluating $f^K$. The analogous lower bound for a multistage stochastic
# program is:
# ```math
# \mathbb{E}_{i \in R^+, \omega \in \Omega_i}[V_i^K(x_R, \omega)] \le \min_{\pi} \mathbb{E}_{i \in R^+, \omega \in \Omega_i}[V_i^\pi(x_R, \omega)]
# ```
# Here's how we compute the lower bound:
function lower_bound(model::PolicyGraph)
node = model.nodes[1]
bound = 0.0
for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)
node.uncertainty.parameterize(ω)
JuMP.optimize!(node.subproblem)
bound += p * JuMP.objective_value(node.subproblem)
end
return bound
end
# !!! note
# The implementation is simplified because we assumed that there is only one
# arc from the root node, and that it pointed to the first node in the
# vector.
# Because we haven't trained a policy yet, the lower bound is going to be very
# bad:
lower_bound(model)
# ### Upper bounds
# With Kelley's algorithm, we could easily construct an upper bound by
# evaluating $f(x_K)$. However, it is almost always intractable to evaluate an
# upper bound for multistage stochastic programs due to the large number of
# nodes and the nested expectations. Instead, we can perform a Monte Carlo
# simulation of the policy to build a statistical estimate for the value of
# $\mathbb{E}_{i \in R^+, \omega \in \Omega_i}[V_i^\pi(x_R, \omega)]$, where
# $\pi$ is the policy defined by the current approximations $V^K_i$.
function upper_bound(model::PolicyGraph; replications::Int)
## Pipe the output to `devnull` so we don't print too much!
simulations = [forward_pass(model, devnull) for i in 1:replications]
z = [s[2] for s in simulations]
μ = Statistics.mean(z)
tσ = 1.96 * Statistics.std(z) / sqrt(replications)
return μ, tσ
end
# !!! note
# The width of the confidence interval is incorrect if there are cycles in
# the graph, because the distribution of simulation costs `z` is not
# symmetric. The mean is correct, however.
# ### Termination criteria
# In Kelley's algorithm, the upper bound was deterministic. Therefore, we could
# terminate the algorithm when the lower bound was sufficiently close to the
# upper bound. However, our upper bound for SDDP is not deterministic; it is a
# confidence interval!
# Some people suggest terminating SDDP when the lower bound is contained within
# the confidence interval. However, this is a poor choice because it is too easy
# to generate a false positive. For example, if we use a small number of
# replications then the width of the confidence will be large, and we are more
# likely to terminate!
# In a future tutorial (not yet written...) we will discuss termination criteria
# in more depth. For now, pick a large number of iterations and train for as
# long as possible.
# !!! tip
# For a rule of thumb, pick a large number of iterations to train the
# policy for (e.g.,
# $10 \times |\mathcal{N}| \times \max\limits_{i\in\mathcal{N}} |\Omega_i|$)
# ## Implementation: the training loop
# The `train` loop of SDDP just applies the forward and backward passes
# iteratively, followed by a final simulation to compute the upper bound
# confidence interval:
function train(
model::PolicyGraph;
iteration_limit::Int,
replications::Int,
io::IO = stdout,
)
for i in 1:iteration_limit
println(io, "Starting iteration $(i)")
outgoing_states, _ = forward_pass(model, io)
backward_pass(model, outgoing_states, io)
println(io, "| Finished iteration")
println(io, "| | lower_bound = ", lower_bound(model))
end
println(io, "Termination status: iteration limit")
μ, tσ = upper_bound(model; replications = replications)
println(io, "Upper bound = $(μ) ± $(tσ)")
return
end
# Using our `model` we defined earlier, we can go:
train(model; iteration_limit = 3, replications = 100)
# Success! We trained a policy for a finite horizon multistage stochastic
# program using stochastic dual dynamic programming.
# ## Implementation: evaluating the policy
# A final step is the ability to evaluate the policy at a given point.
function evaluate_policy(
model::PolicyGraph;
node::Int,
incoming_state::Dict{Symbol,Float64},
random_variable,
)
the_node = model.nodes[node]
the_node.uncertainty.parameterize(random_variable)
for (k, v) in incoming_state
JuMP.fix(the_node.states[k].in, v; force = true)
end
JuMP.optimize!(the_node.subproblem)
return Dict(
k => JuMP.value.(v) for
(k, v) in JuMP.object_dictionary(the_node.subproblem)
)
end
evaluate_policy(
model;
node = 1,
incoming_state = Dict(:volume => 150.0),
random_variable = 75,
)
# !!! note
# The random variable can be **out-of-sample**, i.e., it doesn't have to be
# in the vector $\Omega$ we created when defining the model! This is a
# notable difference to other multistage stochastic solution methods like
# progressive hedging or using the deterministic equivalent.
# ## Example: infinite horizon
# As promised earlier, our implementation is actually pretty general. It can
# solve any multistage stochastic (linear) program defined by a policy graph,
# including infinite horizon problems!
# Here's an example, where we have extended our earlier problem with an arc from
# node 3 to node 2 with probability 0.5. You can interpret the 0.5 as a discount
# factor.
model = PolicyGraph(
subproblem_builder;
graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict(2 => 0.5)],
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
)
# Then, train a policy:
train(model; iteration_limit = 3, replications = 100)
# Success! We trained a policy for an infinite horizon multistage stochastic
# program using stochastic dual dynamic programming. Note how some of the
# forward passes are different lengths!
evaluate_policy(
model;
node = 3,
incoming_state = Dict(:volume => 100.0),
random_variable = 10.0,
)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 8125 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Auto-regressive stochastic processes
# SDDP.jl assumes that the random variable in each node is independent of the
# random variables in all other nodes. However, a common request is to model
# the random variables by some auto-regressive process.
# There are two ways to do this:
# 1. model the random variable as a Markov chain
# 2. use the "state-space expansion" trick
# !!! info
# This tutorial is in the context of a hydro-thermal scheduling example, but
# it should be apparent how the ideas transfer to other applications.
using SDDP
import HiGHS
# ## [The state-space expansion trick](@id state-space-expansion)
# In [An introduction to SDDP.jl](@ref), we assumed that the inflows were
# stagewise-independent. However, in many cases this is not correct, and inflow
# models are more accurately described by an auto-regressive process such as:
# ```math
# inflow_{t} = inflow_{t-1} + \varepsilon
# ```
# Here ``\varepsilon`` is a random variable, and the inflow in stage ``t`` is
# the inflow in stage ``t-1`` plus ``\varepsilon`` (which might be negative).
# For simplicity, we omit any coefficients and other terms, but this could
# easily be extended to a model like
# ```math
# inflow_{t} = a \times inflow_{t-1} + b + \varepsilon
# ```
# In practice, you can estimate a distribution for ``\varepsilon`` by fitting
# the chosen statistical model to historical data, and then using the empirical
# residuals.
# To implement the auto-regressive model in SDDP.jl, we introduce `inflow` as a
# state variable.
# !!! tip
# Our rule of thumb for "when is something a state variable?" is: if you
# need the value of a variable from a previous stage to compute something in
# stage ``t``, then that variable is a state variable.
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)
@variable(sp, g_t >= 0)
@variable(sp, g_h >= 0)
@variable(sp, s >= 0)
@constraint(sp, g_h + g_t == 150)
c = [50, 100, 150]
@stageobjective(sp, c[t] * g_t)
## =========================================================================
## New stuff below Here
## Add inflow as a state
@variable(sp, inflow, SDDP.State, initial_value = 50.0)
## Add the random variable as a control variable
@variable(sp, ε)
## The equation describing our statistical model
@constraint(sp, inflow.out == inflow.in + ε)
## The new water balance constraint using the state variable
@constraint(sp, x.out == x.in - g_h - s + inflow.out)
## Assume we have some empirical residuals:
Ω = [-10.0, 0.1, 9.6]
SDDP.parameterize(sp, Ω) do ω
return JuMP.fix(ε, ω)
end
end
# ### When can this trick be used?
# The state-space expansion trick should be used when:
#
# * The random variable appears additively in the objective or in the
# constraints. Something like `inflow * decision_variable` will _not_ work.
# * The statistical model is linear, or can be written using the JuMP
# `@constraint` macro.
# * The dimension of the random variable is small (see
# [Vector auto-regressive models](@ref) for the multi-variate case).
# ## The Markov chain approach
# In the Markov chain approach, we model the stochastic process for inflow by a
# discrete Markov chain. Markov chains are nodes with transition probabilities
# between the nodes. SDDP.jl has good support for solving problems in which the
# uncertainty is formulated as a Markov chain.
# The first step of the Markov chain approach is to write a function which
# simulates the stochastic process. Here is a simulator for our inflow model:
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
# When called with no arguments, it produces a vector of inflows:
simulator()
# !!! warning
# The `simulator` must return a `Vector{Float64}`, so it is limited to a
# uni-variate random variable. It is possible to do something similar for
# multi-variate random variable, but you'll have to manually construct the
# Markov transition matrix, and solution times scale poorly, even in the
# two-dimensional case.
# The next step is to call [`SDDP.MarkovianGraph`](@ref) with our simulator.
# This function will attempt to fit a Markov chain to the stochastic process
# produced by your `simulator`. There are two key arguments:
# * `budget` is the total number of nodes we want in the Markov chain
# * `scenarios` is a limit on the number of times we can call `simulator`
graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
# Here we can see we have created a MarkovianGraph with nodes like `(2, 59.7)`.
# The first element of each node is the stage, and the second element is the
# inflow.
# Create a [`SDDP.PolicyGraph`](@ref) using `graph` as follows:
model = SDDP.PolicyGraph(
graph; # <--- New stuff
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, node
t, inflow = node # <--- New stuff
@variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)
@variable(sp, g_t >= 0)
@variable(sp, g_h >= 0)
@variable(sp, s >= 0)
@constraint(sp, g_h + g_t == 150)
c = [50, 100, 150]
@stageobjective(sp, c[t] * g_t)
## The new water balance constraint using the node:
@constraint(sp, x.out == x.in - g_h - s + inflow)
end
# ### When can this trick be used?
# The Markov chain approach should be used when:
#
# * The random variable is uni-variate
# * The random variable appears in the objective function or as a variable
# coefficient in the constraint matrix
# * It's non-trivial to write the stochastic process as a series of constraints
# (for example, it uses nonlinear terms)
# * The number of nodes is modest (for example, a budget of hundreds, up to
# perhaps 1000)
# ## Vector auto-regressive models
# The [state-space expansion](@ref state-space-expansion) section assumed that
# the random variable was uni-variate. However, the approach naturally extends
# to vector auto-regressive models. For example, if `inflow` is a 2-dimensional
# vector, then we can model a vector auto-regressive model to it as follows:
# ```math
# inflow_{t} = A \times inflow_{t-1} + b + \varepsilon
# ```
# Here `A` is a 2-by-2 matrix, and `b` and ``\varepsilon`` are 2-by-1 vectors.
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)
@variable(sp, g_t >= 0)
@variable(sp, g_h >= 0)
@variable(sp, s >= 0)
@constraint(sp, g_h + g_t == 150)
c = [50, 100, 150]
@stageobjective(sp, c[t] * g_t)
## =========================================================================
## New stuff below Here
## Add inflow as a state
@variable(sp, inflow[1:2], SDDP.State, initial_value = 50.0)
## Add the random variable as a control variable
@variable(sp, ε[1:2])
## The equation describing our statistical model
A = [0.8 0.2; 0.2 0.8]
@constraint(
sp,
[i = 1:2],
inflow[i].out == sum(A[i, j] * inflow[j].in for j in 1:2) + ε[i],
)
## The new water balance constraint using the state variable
@constraint(sp, x.out == x.in - g_h - s + inflow[1].out + inflow[2].out)
## Assume we have some empirical residuals:
Ω₁ = [-10.0, 0.1, 9.6]
Ω₂ = [-10.0, 0.1, 9.6]
Ω = [(ω₁, ω₂) for ω₁ in Ω₁ for ω₂ in Ω₂]
SDDP.parameterize(sp, Ω) do ω
JuMP.fix(ε[1], ω[1])
JuMP.fix(ε[2], ω[2])
return
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 6969 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Here-and-now and hazard-decision
# SDDP.jl assumes that the agent gets to make a decision _after_ observing the
# realization of the random variable. This is called a _wait-and-see_ or
# _hazard-decision_ model. In contrast, you might want your agent to make
# decisions _before_ observing the random variable. This is called a
# _here-and-now_ or _decision-hazard_ model.
# !!! info
# The terms decision-hazard and hazard-decision from the French _hasard_,
# meaning chance. It could also have been translated as uncertainty-decision
# and decision-uncertainty, but the community seems to have settled on the
# transliteration _hazard_ instead. We like the hazard-decision and
# decision-hazard terms because they clearly communicate the order of the
# decision and the uncertainty.
# The purpose of this tutorial is to demonstrate how to model here-and-how
# decisions in SDDP.jl.
# This tutorial uses the following packages:
using SDDP
import HiGHS
# ## Hazard-decision formulation
# As an example, we're going to build a standard hydro-thermal scheduling
# model, with a single hydro-reservoir and a single thermal generation plant.
# In each of the four stages, we need to choose some mix of `u_thermal` and
# `u_hydro` to meet a demand of `9` units, where unmet demand is penalized at a
# rate of \$500/unit.
hazard_decision = SDDP.LinearPolicyGraph(;
stages = 4,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, node
@variables(sp, begin
0 <= x_storage <= 8, (SDDP.State, initial_value = 6)
u_thermal >= 0
u_hydro >= 0
u_unmet_demand >= 0
end)
@constraint(sp, u_thermal + u_hydro == 9 - u_unmet_demand)
@constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)
SDDP.parameterize(sp, [2, 3]) do ω_inflow
return set_normalized_rhs(c_balance, ω_inflow)
end
@stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal)
end
# ## Decision-hazard formulation
# In the wait-and-see formulation, we get to decide the generation variables
# _after_ observing the realization of `ω_inflow`. However, a common modeling
# situation is that we need to decide the level of thermal generation
# `u_thermal` _before_ observing the inflow.
# SDDP.jl can model here-and-now decisions with a modeling trick: a wait-and-see
# decision in stage _t-1_ is equivalent to a here-and-now decision in stage _t_.
# In other words, we need to convert the `u_thermal` decision from a control
# variable that is decided in stage `t`, to a state variable that is decided in
# stage `t-1`. Here's our new model, with the three lines that have changed:
decision_hazard = SDDP.LinearPolicyGraph(;
stages = 4,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, node
@variables(sp, begin
0 <= x_storage <= 8, (SDDP.State, initial_value = 6)
u_thermal >= 0, (SDDP.State, initial_value = 0) # <-- changed
u_hydro >= 0
u_unmet_demand >= 0
end)
@constraint(sp, u_thermal.in + u_hydro == 9 - u_unmet_demand) # <-- changed
@constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)
SDDP.parameterize(sp, [2, 3]) do ω
return set_normalized_rhs(c_balance, ω)
end
@stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal.in) # <-- changed
end
# Can you understand the reformulation? In each stage, we now use the value of
# `u_thermal.in` instead of `u_thermal`, and the value of the outgoing
# `u_thermal.out` is the here-and-how decision for stage `t+1`.
# (If you can spot a "mistake" with this model, don't worry, we'll fix it below.
# Presenting it like this simplifies the exposition.)
# ## Comparison
# Let's compare the cost of operating the two models:
function train_and_compute_cost(model)
SDDP.train(model; print_level = 0)
return println("Cost = \$", SDDP.calculate_bound(model))
end
train_and_compute_cost(hazard_decision)
#-
train_and_compute_cost(decision_hazard)
# This suggests that choosing the thermal generation before observing the inflow
# adds a cost of \$250. But does this make sense?
# If we look carefully at our `decision_hazard` model, the incoming value of
# `u_thermal.in` in the first stage is fixed to the `initial_value` of `0`.
# Therefore, we must always meet the full demand with `u_hydro`, which we cannot
# do without incurring unmet demand.
# To allow the model to choose an optimal level of `u_thermal` in the
# first-stage, we need to add an extra stage that is deterministic with no
# stage objective.
# ## Fixing the decision-hazard
# In the following model, we now have five stages, so that stage `t+1` in
# `decision_hazard_2` corresponds to stage `t` in `decision_hazard`. We've also
# added an `if`-statement, which adds different constraints depending on the
# node. Note that we need to add an `x_storage.out == x_storage.in` constraint
# because the storage can't change in this new first-stage.
decision_hazard_2 = SDDP.LinearPolicyGraph(;
stages = 5, # <-- changed
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, node
@variables(sp, begin
0 <= x_storage <= 8, (SDDP.State, initial_value = 6)
u_thermal >= 0, (SDDP.State, initial_value = 0)
u_hydro >= 0
u_unmet_demand >= 0
end)
if node == 1 # <-- new
@constraint(sp, x_storage.out == x_storage.in) # <-- new
@stageobjective(sp, 0) # <-- new
else
@constraint(sp, u_thermal.in + u_hydro == 9 - u_unmet_demand)
@constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)
SDDP.parameterize(sp, [2, 3]) do ω
return set_normalized_rhs(c_balance, ω)
end
@stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal.in)
end
end
train_and_compute_cost(decision_hazard_2)
# Now we find that the cost of choosing the thermal generation before observing
# the inflow adds a much more reasonable cost of \$10.
# ## Summary
# To summarize, the difference between here-and-now and wait-and-see variables
# is a modeling choice.
# To create a here-and-now decision, add it as a state variable to the
# previous stage
# In some cases, you'll need to add an additional "first-stage" problem to
# enable the model to choose an optimal value for the here-and-now decision
# variable. You do not need to do this if the first stage is deterministic. You
# must make sure that the subproblem is feasible for all possible incoming
# values of the here-and-now decision variable.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 9325 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Example: the milk producer
# The purpose of this tutorial is to demonstrate how to fit a Markovian policy
# graph to a univariate stochastic process.
# This tutorial uses the following packages:
using SDDP
import HiGHS
import Plots
# ## Background
# A company produces milk for sale on a spot market each month. The quantity of
# milk they produce is uncertain, and so too is the price on the spot market.
# The company can store unsold milk in a stockpile of dried milk powder.
# The spot price is determined by an auction system, and so varies from month to
# month, but demonstrates serial correlation. In each auction, there is
# sufficient demand that the milk producer finds a buyer for all their
# milk, regardless of the quantity they supply. Furthermore, the spot price
# is independent of the milk producer (they are a small player in the market).
# The spot price is highly volatile, and is the result of a process that is out
# of the control of the company. To counteract their price risk, the company
# engages in a forward contracting programme.
# The forward contracting programme is a deal for physical milk four months in
# the future.
# The futures price is the current spot price, plus some forward contango (the
# buyers gain certainty that they will receive the milk in the future).
# In general, the milk company should forward contract (since they reduce
# their price risk), however they also have production risk. Therefore, it may
# be the case that they forward contract a fixed amount, but find that they do
# not produce enough milk to meet the fixed demand. They are then forced to
# buy additional milk on the spot market.
# The goal of the milk company is to choose the extent to which they forward
# contract in order to maximise (risk-adjusted) revenues, whilst managing their
# production risk.
# ## A stochastic process for price
# It is outside the scope of this tutorial, but assume that we have gone away
# and analysed historical data to fit a stochastic process to the sequence of
# monthly auction spot prices.
# One plausible model is a multiplicative auto-regressive model of order one,
# where the white noise term is modeled by a finite distribution of empirical
# residuals. We can simulate this stochastic process as follows:
function simulator()
residuals = [0.0987, 0.199, 0.303, 0.412, 0.530, 0.661, 0.814, 1.010, 1.290]
residuals = 0.1 * vcat(-residuals, 0.0, residuals)
scenario = zeros(12)
y, μ, α = 4.5, 6.0, 0.05
for t in 1:12
y = exp((1 - α) * log(y) + α * log(μ) + rand(residuals))
scenario[t] = clamp(y, 3.0, 9.0)
end
return scenario
end
simulator()
# It may be helpful to visualize a number of simulations of the price process:
plot = Plots.plot(
[simulator() for _ in 1:500];
color = "gray",
opacity = 0.2,
legend = false,
xlabel = "Month",
ylabel = "Price [\$/kg]",
xlims = (1, 12),
ylims = (3, 9),
)
# The prices gradually revert to the mean of \$6/kg, and there is high
# volatility.
# We can't incorporate this price process directly into SDDP.jl, but we can fit
# a [`SDDP.MarkovianGraph`](@ref) directly from the simulator:
graph = SDDP.MarkovianGraph(simulator; budget = 30, scenarios = 10_000);
nothing # hide
# Here `budget` is the number of nodes in the policy graph, and `scenarios` is
# the number of simulations to use when estimating the transition probabilities.
# The graph contains too many nodes to be show, but we can plot it:
for ((t, price), edges) in graph.nodes
for ((t′, price′), probability) in edges
Plots.plot!(
plot,
[t, t′],
[price, price′];
color = "red",
width = 3 * probability,
)
end
end
plot
# That looks okay. Try changing `budget` and `scenarios` to see how different
# Markovian policy graphs can be created.
# ## Model
# Now that we have a Markovian graph, we can build the model. See if you can
# work out how we arrived at this formulation by reading the background
# description. Do all the variables and constraints make sense?
model = SDDP.PolicyGraph(
graph;
sense = :Max,
upper_bound = 1e2,
optimizer = HiGHS.Optimizer,
) do sp, node
## Decompose the node into the month (::Int) and spot price (::Float64)
t, price = node::Tuple{Int,Float64}
## Transactions on the futures market cost 0.01
c_transaction = 0.01
## It costs the company +50% to buy milk on the spot market and deliver to
## their customers
c_buy_premium = 1.5
## Buyer is willing to pay +5% for certainty
c_contango = 1.05
## Distribution of production
Ω_production = range(0.1, 0.2; length = 5)
c_max_production = 12 * maximum(Ω_production)
## x_stock: quantity of milk in stock pile
@variable(sp, 0 <= x_stock, SDDP.State, initial_value = 0)
## x_forward[i]: quantity of milk for delivery in i months
@variable(sp, 0 <= x_forward[1:4], SDDP.State, initial_value = 0)
## u_spot_sell: quantity of milk to sell on spot market
@variable(sp, 0 <= u_spot_sell <= c_max_production)
## u_spot_buy: quantity of milk to buy on spot market
@variable(sp, 0 <= u_spot_buy <= c_max_production)
## u_spot_buy: quantity of milk to sell on futures market
c_max_futures = t <= 8 ? c_max_production : 0.0
@variable(sp, 0 <= u_forward_sell <= c_max_futures)
## ω_production: production random variable
@variable(sp, ω_production)
## Forward contracting constraints:
@constraint(sp, [i in 1:3], x_forward[i].out == x_forward[i+1].in)
@constraint(sp, x_forward[4].out == u_forward_sell)
## Stockpile balance constraint
@constraint(
sp,
x_stock.out ==
x_stock.in + ω_production + u_spot_buy - x_forward[1].in - u_spot_sell
)
## The random variables. `price` comes from the Markov node
##
## !!! warning
## The elements in Ω MUST be a tuple with 1 or 2 values, where the first
## value is `price` and the second value is the random variable for the
## current node. If the node is deterministic, use Ω = [(price,)].
Ω = [(price, p) for p in Ω_production]
SDDP.parameterize(sp, Ω) do ω
## Fix the ω_production variable
fix(ω_production, ω[2])
@stageobjective(
sp,
## Sales on spot market
ω[1] * (u_spot_sell - c_buy_premium * u_spot_buy) +
## Sales on futures smarket
(ω[1] * c_contango - c_transaction) * u_forward_sell
)
return
end
return
end
# ## Training a policy
# Now we have a model, we train a policy. The [`SDDP.SimulatorSamplingScheme`](@ref)
# is used in the forward pass. It generates an out-of-sample sequence of prices
# using `simulator` and traverses the closest sequence of nodes in the policy
# graph. When calling [`SDDP.parameterize`](@ref) for each subproblem, it uses
# the new out-of-sample price instead of the price associated with the Markov
# node.
SDDP.train(
model;
time_limit = 20,
risk_measure = 0.5 * SDDP.Expectation() + 0.5 * SDDP.AVaR(0.25),
sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
)
# !!! warning
# We're intentionally terminating the training early so that the
# documentation doesn't take too long to build. If you run this example
# locally, increase the time limit.
# ## Simulating the policy
# When simulating the policy, we can also use the
# [`SDDP.SimulatorSamplingScheme`](@ref).
simulations = SDDP.simulate(
model,
200,
Symbol[:x_stock, :u_forward_sell, :u_spot_sell, :u_spot_buy];
sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
);
nothing # hide
# To show how the sampling scheme uses the new out-of-sample price instead of
# the price associated with the Markov node, compare the index of the Markov
# state visited in stage 12 of the first simulation:
simulations[1][12][:node_index]
# to the realization of the noise `(price, ω)` passed to [`SDDP.parameterize`](@ref):
simulations[1][12][:noise_term]
# ## Visualizing the policy
# Finally, we can plot the policy to gain insight (although note that we
# terminated the training early, so we should run the re-train the policy for
# more iterations before making too many judgements).
plot = Plots.plot(
SDDP.publication_plot(simulations; title = "x_stock.out") do data
return data[:x_stock].out
end,
SDDP.publication_plot(simulations; title = "u_forward_sell") do data
return data[:u_forward_sell]
end,
SDDP.publication_plot(simulations; title = "u_spot_buy") do data
return data[:u_spot_buy]
end,
SDDP.publication_plot(simulations; title = "u_spot_sell") do data
return data[:u_spot_sell]
end;
layout = (2, 2),
)
# ## Next steps
# * Train the policy for longer. What do you observe?
# * Try creating different Markovian graphs. What happens if you add more nodes?
# * Try different risk measures
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 14702 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Example: two-stage newsvendor
# The purpose of this tutorial is to demonstrate how to model and solve a
# two-stage stochastic program.
# It is based on the [Two stage stochastic programs](https://jump.dev/JuMP.jl/dev/tutorials/applications/two_stage_stochastic/)
# tutorial in JuMP.
# This tutorial uses the following packages
using JuMP
using SDDP
import Distributions
import ForwardDiff
import HiGHS
import Plots
import StatsPlots
import Statistics
# ## Background
# The data for this problem is:
D = Distributions.TriangularDist(150.0, 250.0, 200.0)
N = 100
d = sort!(rand(D, N));
Ω = 1:N
P = fill(1 / N, N);
StatsPlots.histogram(d; bins = 20, label = "", xlabel = "Demand")
# ## Kelley's cutting plane algorithm
# Kelley's cutting plane algorithm is an iterative method for maximizing concave
# functions. Given a concave function $f(x)$, Kelley's constructs an
# outer-approximation of the function at the minimum by a set of first-order
# Taylor series approximations (called **cuts**) constructed at a set of points
# $k = 1,\ldots,K$:
# ```math
# \begin{aligned}
# f^K = \max\limits_{\theta \in \mathbb{R}, x \in \mathbb{R}^N} \;\; & \theta\\
# & \theta \le f(x_k) + \nabla f(x_k)^\top (x - x_k),\quad k=1,\ldots,K\\
# & \theta \le M,
# \end{aligned}
# ```
# where $M$ is a sufficiently large number that is an upper bound for $f$ over
# the domain of $x$.
# Kelley's cutting plane algorithm is a structured way of choosing points $x_k$
# to visit, so that as more cuts are added:
# ```math
# \lim_{K \rightarrow \infty} f^K = \max\limits_{x \in \mathbb{R}^N} f(x)
# ```
# However, before we introduce the algorithm, we need to introduce some bounds.
# ### Bounds
# By convexity, $f(x) \le f^K$ for all $x$. Thus, if $x^*$ is a maximizer of
# $f$, then at any point in time we can construct an upper bound for $f(x^*)$ by
# solving $f^K$.
# Moreover, we can use the primal solutions $x_k^*$ returned by solving $f^k$ to
# evaluate $f(x_k^*)$ to generate a lower bound.
# Therefore, $\max\limits_{k=1,\ldots,K} f(x_k^*) \le f(x^*) \le f^K$.
# When the lower bound is sufficiently close to the upper bound, we can
# terminate the algorithm and declare that we have found an solution that is
# close to optimal.
# ### Implementation
# Here is pseudo-code fo the Kelley algorithm:
# 1. Take as input a convex function $f(x)$ and a iteration limit $K_{max}$.
# Set $K = 1$, and initialize $f^{K-1}$. Set $lb = -\infty$ and $ub = \infty$.
# 2. Solve $f^{K-1}$ to obtain a candidate solution $x_{K}$.
# 3. Update $ub = f^{K-1}$ and $lb = \max\{lb, f(x_{K})\}$.
# 4. Add a cut $\theta \ge f(x_{K}) + \nabla f\left(x_{K}\right)^\top (x - x_{K})$ to form $f^{K}$.
# 5. Increment $K$.
# 6. If $K > K_{max}$ or $|ub - lb| < \epsilon$, STOP, otherwise, go to step 2.
# And here's a complete implementation:
function kelleys_cutting_plane(
## The function to be minimized.
f::Function,
## The gradient of `f`. By default, we use automatic differentiation to
## compute the gradient of f so the user doesn't have to!
∇f::Function = x -> ForwardDiff.gradient(f, x);
## The number of arguments to `f`.
input_dimension::Int,
## An upper bound for the function `f` over its domain.
upper_bound::Float64,
## The number of iterations to run Kelley's algorithm for before stopping.
iteration_limit::Int,
## The absolute tolerance ϵ to use for convergence.
tolerance::Float64 = 1e-6,
)
## Step (1):
K = 1
model = JuMP.Model(HiGHS.Optimizer)
JuMP.set_silent(model)
JuMP.@variable(model, θ <= upper_bound)
JuMP.@variable(model, x[1:input_dimension])
JuMP.@objective(model, Max, θ)
x_k = fill(NaN, input_dimension)
lower_bound, upper_bound = -Inf, Inf
while true
## Step (2):
JuMP.optimize!(model)
x_k .= JuMP.value.(x)
## Step (3):
upper_bound = JuMP.objective_value(model)
lower_bound = min(upper_bound, f(x_k))
println("K = $K : $(lower_bound) <= f(x*) <= $(upper_bound)")
## Step (4):
JuMP.@constraint(model, θ <= f(x_k) + ∇f(x_k)' * (x .- x_k))
## Step (5):
K = K + 1
## Step (6):
if K > iteration_limit
println("-- Termination status: iteration limit --")
break
elseif abs(upper_bound - lower_bound) < tolerance
println("-- Termination status: converged --")
break
end
end
println("Found solution: x_K = ", x_k)
return
end
# Let's run our algorithm to see what happens:
kelleys_cutting_plane(;
input_dimension = 2,
upper_bound = 10.0,
iteration_limit = 20,
) do x
return -(x[1] - 1)^2 + -(x[2] + 2)^2 + 1.0
end
# ## L-Shaped theory
# The L-Shaped method is a way of solving two-stage stochastic programs by
# Benders' decomposition. It takes the problem:
# ```math
# \begin{aligned}
# V = \max\limits_{x,y_\omega} \;\; & -2x + \mathbb{E}_\omega[5y_\omega - 0.1(x - y_\omega)] \\
# & y_\omega \le x & \quad \forall \omega \in \Omega \\
# & 0 \le y_\omega \le d_\omega & \quad \forall \omega \in \Omega \\
# & x \ge 0.
# \end{aligned}
# ```
# and decomposes it into a second-stage problem:
# ```math
# \begin{aligned}
# V_2(\bar{x}, d_\omega) = \max\limits_{x,x^\prime,y_\omega} \;\; & 5y_\omega - x^\prime \\
# & y_\omega \le x \\
# & x^\prime = x - y_\omega \\
# & 0 \le y_\omega \le d_\omega \\
# & x = \bar{x} & [\lambda]
# \end{aligned}
# ```
# and a first-stage problem:
# ```math
# \begin{aligned}
# V = \max\limits_{x,\theta} \;\; & -2x + \theta \\
# & \theta \le \mathbb{E}_\omega[V_2(x, \omega)] \\
# & x \ge 0
# \end{aligned}
# ```
# Then, because $V_2$ is convex with respect to $\bar{x}$ for fixed $\omega$,
# we can use a set of feasible points $\{x^k\}$ construct an outer approximation:
# ```math
# \begin{aligned}
# V^K = \max\limits_{x,\theta} \;\; & -2x + \theta \\
# & \theta \le \mathbb{E}_\omega[V_2(x^k, \omega) + \nabla V_2(x^k, \omega)^\top(x - x^k)] & \quad k = 1,\ldots,K\\
# & x \ge 0 \\
# & \theta \le M
# \end{aligned}
# ```
# where $M$ is an upper bound on possible values of $V_2$ so that the problem
# has a bounded solution.
# It is also useful to see that because $\bar{x}$ appears only on the right-hand
# side of a linear program, $\nabla V_2(x^k, \omega) = \lambda^k$.
# Ignoring how we choose $x^k$ for now, we can construct a lower and upper bound
# on the optimal solution:
# $$-2x^K + \mathbb{E}_\omega[V_2(x^K, \omega)] = \underbar{V} \le V \le \overline{V} = V^K$$
# Thus, we need some way of cleverly choosing a sequence of $x^k$ so that the
# lower bound converges to the upper bound.
# 1. Start with $K=1$
# 2. Solve $V^{K-1}$ to get $x^K$
# 3. Set $\overline{V} = V^k$
# 4. Solve $V_2(x^K, \omega)$ for all $\omega$ and store the optimal objective
# value and dual solution $\lambda^K$
# 5. Set $\underbar{V} = -2x^K + \mathbb{E}_\omega[V_2(x^k, \omega)]$
# 6. If $\underbar{V} \approx \overline{V}$, STOP
# 7. Add new constraint $\theta \le \mathbb{E}_\omega[V_2(x^K, \omega) +\lambda^K (x - x^K)]$
# 8. Increment $K$, GOTO 2
# The next section implements this algorithm in Julia.
# ## L-Shaped implementation
# Here's a function to compute the second-stage problem;
function solve_second_stage(x̅, d_ω)
model = Model(HiGHS.Optimizer)
set_silent(model)
@variable(model, x_in)
@variable(model, x_out >= 0)
fix(x_in, x̅)
@variable(model, 0 <= u_sell <= d_ω)
@constraint(model, x_out == x_in - u_sell)
@constraint(model, u_sell <= x_in)
@objective(model, Max, 5 * u_sell - 0.1 * x_out)
optimize!(model)
return (
V = objective_value(model),
λ = reduced_cost(x_in),
x = value(x_out),
u = value(u_sell),
)
end
solve_second_stage(200, 170)
# Here's the first-stage subproblem:
model = Model(HiGHS.Optimizer)
set_silent(model)
@variable(model, x_in == 0)
@variable(model, x_out >= 0)
@variable(model, u_make >= 0)
@constraint(model, x_out == x_in + u_make)
M = 5 * maximum(d)
@variable(model, θ <= M)
@objective(model, Max, -2 * u_make + θ)
# Importantly, to ensure we have a bounded solution, we need to add an upper
# bound to the variable `θ`.
kIterationLimit = 100
for k in 1:kIterationLimit
println("Solving iteration k = $k")
## Step 2
optimize!(model)
xᵏ = value(x_out)
println(" xᵏ = $xᵏ")
## Step 3
ub = objective_value(model)
println(" V̅ = $ub")
## Step 4
ret = [solve_second_stage(xᵏ, d[ω]) for ω in Ω]
## Step 5
lb = value(-2 * u_make) + sum(p * r.V for (p, r) in zip(P, ret))
println(" V̲ = $lb")
## Step 6
if ub - lb < 1e-6
println("Terminating with near-optimal solution")
break
end
## Step 7
c = @constraint(
model,
θ <= sum(p * (r.V + r.λ * (x_out - xᵏ)) for (p, r) in zip(P, ret)),
)
println(" Added cut: $c")
end
# To get the first-stage solution, we do:
optimize!(model)
xᵏ = value(x_out)
# To compute a second-stage solution, we do:
solve_second_stage(xᵏ, 170.0)
# ## Policy Graph
# Now let's see how we can formulate and train a policy for the two-stage
# newsvendor problem using `SDDP.jl`. Under the hood, `SDDP.jl` implements the
# exact algorithm that we just wrote by hand.
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 5 * maximum(d), # The `M` in θ <= M
optimizer = HiGHS.Optimizer,
) do subproblem::JuMP.Model, stage::Int
@variable(subproblem, x >= 0, SDDP.State, initial_value = 0)
if stage == 1
@variable(subproblem, u_make >= 0)
@constraint(subproblem, x.out == x.in + u_make)
@stageobjective(subproblem, -2 * u_make)
else
@variable(subproblem, u_sell >= 0)
@constraint(subproblem, u_sell <= x.in)
@constraint(subproblem, x.out == x.in - u_sell)
SDDP.parameterize(subproblem, d, P) do ω
set_upper_bound(u_sell, ω)
return
end
@stageobjective(subproblem, 5 * u_sell - 0.1 * x.out)
end
return
end
SDDP.train(model; log_every_iteration = true)
# One way to query the optimal policy is with [`SDDP.DecisionRule`](@ref):
first_stage_rule = SDDP.DecisionRule(model; node = 1)
#-
solution_1 = SDDP.evaluate(first_stage_rule; incoming_state = Dict(:x => 0.0))
# Here's the second stage:
second_stage_rule = SDDP.DecisionRule(model; node = 2)
solution = SDDP.evaluate(
second_stage_rule;
incoming_state = Dict(:x => solution_1.outgoing_state[:x]),
noise = 170.0, # A value of d[ω], can be out-of-sample.
controls_to_record = [:u_sell],
)
# ## Simulation
# Querying the decision rules is tedious. It's often more useful to simulate the
# policy:
simulations = SDDP.simulate(
model,
10, #= number of replications =#
[:x, :u_sell, :u_make]; #= variables to record =#
skip_undefined_variables = true,
);
# `simulations` is a vector with 10 elements
length(simulations)
# and each element is a vector with two elements (one for each stage)
length(simulations[1])
# The first stage contains:
simulations[1][1]
# The second stage contains:
simulations[1][2]
# We can compute aggregated statistics across the simulations:
objectives = map(simulations) do simulation
return sum(data[:stage_objective] for data in simulation)
end
μ, t = SDDP.confidence_interval(objectives)
println("Simulation ci : $μ ± $t")
# ## Risk aversion revisited
# SDDP.jl contains a number of risk measures. One example is:
0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()
# You can construct a risk-averse policy by passing a risk measure to the
# `risk_measure` keyword argument of [`SDDP.train`](@ref).
# We can explore how the optimal decision changes with risk by creating a
# function:
function solve_newsvendor(risk_measure::SDDP.AbstractRiskMeasure)
model = SDDP.LinearPolicyGraph(;
stages = 2,
sense = :Max,
upper_bound = 5 * maximum(d),
optimizer = HiGHS.Optimizer,
) do subproblem, node
@variable(subproblem, x >= 0, SDDP.State, initial_value = 0)
if node == 1
@stageobjective(subproblem, -2 * x.out)
else
@variable(subproblem, u_sell >= 0)
@constraint(subproblem, u_sell <= x.in)
@constraint(subproblem, x.out == x.in - u_sell)
SDDP.parameterize(subproblem, d, P) do ω
set_upper_bound(u_sell, ω)
return
end
@stageobjective(subproblem, 5 * u_sell - 0.1 * x.out)
end
return
end
SDDP.train(model; risk_measure = risk_measure, print_level = 0)
first_stage_rule = SDDP.DecisionRule(model; node = 1)
solution = SDDP.evaluate(first_stage_rule; incoming_state = Dict(:x => 0.0))
return solution.outgoing_state[:x]
end
# Now we can see how many units a decision maker would order using `CVaR`:
solve_newsvendor(SDDP.CVaR(0.4))
# as well as a decision-maker who cares only about the worst-case outcome:
solve_newsvendor(SDDP.WorstCase())
# In general, the decision-maker will be somewhere between the two extremes.
# The [`SDDP.Entropic`](@ref) risk measure is a risk measure that has a single
# parameter that lets us explore the space of policies between the two extremes.
# When the parameter is small, the measure acts like [`SDDP.Expectation`](@ref),
# and when it is large, it acts like [`SDDP.WorstCase`](@ref).
# Here is what we get if we solve our problem multiple times for different
# values of the risk aversion parameter ``\gamma``:
Γ = [10^i for i in -4:0.5:1]
buy = [solve_newsvendor(SDDP.Entropic(γ)) for γ in Γ]
Plots.plot(
Γ,
buy;
xaxis = :log,
xlabel = "Risk aversion parameter γ",
ylabel = "Number of pies to make",
legend = false,
)
# ## Things to try
# There are a number of things you can try next:
# * Experiment with different buy and sales prices
# * Experiment with different distributions of demand
# * Explore how the optimal policy changes if you use a different risk measure
# * What happens if you can only buy and sell integer numbers of newspapers?
# Try this by adding `Int` to the variable definitions:
# `@variable(subproblem, buy >= 0, Int)`
# * What happens if you use a different upper bound? Try an invalid one like
# `-100`, and a very large one like `1e12`.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 13755 |
# Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Example: deterministic to stochastic
# The purpose of this tutorial is to explain how we can go from a deterministic
# time-staged optimal control model in JuMP to a multistage stochastic
# optimization model in `SDDP.jl`. As a motivating problem, we consider the
# hydro-thermal problem with a single reservoir.
# ## Packages
# This tutorial requires the following packages:
using JuMP
using SDDP
import CSV
import DataFrames
import HiGHS
import Plots
# ## Data
# First, we need some data for the problem. For this tutorial, we'll write CSV
# files to a temporary directory from Julia. If you have an existing file, you
# could change the filename to point to that instead.
dir = mktempdir()
filename = joinpath(dir, "example_reservoir.csv")
# Here is the data
csv_data = """
week,inflow,demand,cost
1,3,7,10.2\n2,2,7.1,10.4\n3,3,7.2,10.6\n4,2,7.3,10.9\n5,3,7.4,11.2\n
6,2,7.6,11.5\n7,3,7.8,11.9\n8,2,8.1,12.3\n9,3,8.3,12.7\n10,2,8.6,13.1\n
11,3,8.9,13.6\n12,2,9.2,14\n13,3,9.5,14.5\n14,2,9.8,14.9\n15,3,10.1,15.3\n
16,2,10.4,15.8\n17,3,10.7,16.2\n18,2,10.9,16.6\n19,3,11.2,17\n20,3,11.4,17.4\n
21,3,11.6,17.7\n22,2,11.7,18\n23,3,11.8,18.3\n24,2,11.9,18.5\n25,3,12,18.7\n
26,2,12,18.9\n27,3,12,19\n28,2,11.9,19.1\n29,3,11.8,19.2\n30,2,11.7,19.2\n
31,3,11.6,19.2\n32,2,11.4,19.2\n33,3,11.2,19.1\n34,2,10.9,19\n35,3,10.7,18.9\n
36,2,10.4,18.8\n37,3,10.1,18.6\n38,2,9.8,18.5\n39,3,9.5,18.4\n40,3,9.2,18.2\n
41,2,8.9,18.1\n42,3,8.6,17.9\n43,2,8.3,17.8\n44,3,8.1,17.7\n45,2,7.8,17.6\n
46,3,7.6,17.5\n47,2,7.4,17.5\n48,3,7.3,17.5\n49,2,7.2,17.5\n50,3,7.1,17.6\n
51,3,7,17.7\n52,3,7,17.8\n
"""
write(filename, csv_data);
# And here we read it into a DataFrame:
data = CSV.read(filename, DataFrames.DataFrame)
# It's easier to visualize the data if we plot it:
Plots.plot(
Plots.plot(data[!, :inflow]; ylabel = "Inflow"),
Plots.plot(data[!, :demand]; ylabel = "Demand"),
Plots.plot(data[!, :cost]; ylabel = "Cost", xlabel = "Week");
layout = (3, 1),
legend = false,
)
# The number of weeks will be useful later:
T = size(data, 1)
# ## Deterministic JuMP model
# To start, we construct a deterministic model in pure JuMP.
# Create a JuMP model, using `HiGHS` as the optimizer:
model = Model(HiGHS.Optimizer)
set_silent(model)
# `x_storage[t]`: the amount of water in the reservoir at the start of stage `t`:
reservoir_max = 320.0
@variable(model, 0 <= x_storage[1:T+1] <= reservoir_max)
# We need an initial condition for `x_storage[1]`. Fix it to 300 units:
reservoir_initial = 300
fix(x_storage[1], reservoir_initial; force = true)
# `u_flow[t]`: the amount of water to flow through the turbine in stage `t`:
flow_max = 12
@variable(model, 0 <= u_flow[1:T] <= flow_max)
# `u_spill[t]`: the amount of water to spill from the reservoir in stage `t`,
# bypassing the turbine:
@variable(model, 0 <= u_spill[1:T])
# `u_thermal[t]`: the amount of thermal generation in stage `t`:
@variable(model, 0 <= u_thermal[1:T])
# `ω_inflow[t]`: the amount of inflow to the reservoir in stage `t`:
@variable(model, ω_inflow[1:T])
# For this model, our inflow is fixed, so we fix it to the data we have:
for t in 1:T
fix(ω_inflow[t], data[t, :inflow])
end
# The water balance constraint says that the water in the reservoir at the start
# of stage `t+1` is the water in the reservoir at the start of stage `t`, less
# the amount flowed through the turbine, `u_flow[t]`, less the amount spilled,
# `u_spill[t]`, plus the amount of inflow, `ω_inflow[t]`, into the reservoir:
@constraint(
model,
[t in 1:T],
x_storage[t+1] == x_storage[t] - u_flow[t] - u_spill[t] + ω_inflow[t],
)
# We also need a `supply = demand` constraint. In practice, the units of this
# would be in MWh, and there would be a conversion factor between the amount of
# water flowing through the turbine and the power output. To simplify, we assume
# that power and water have the same units, so that one "unit" of demand is
# equal to one "unit" of the reservoir `x_storage[t]`:
@constraint(model, [t in 1:T], u_flow[t] + u_thermal[t] == data[t, :demand])
# Our objective is to minimize the cost of thermal generation:
@objective(model, Min, sum(data[t, :cost] * u_thermal[t] for t in 1:T))
# Let's optimize and check the solution
optimize!(model)
solution_summary(model)
# The total cost is:
objective_value(model)
# Here's a plot of demand and generation:
Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
Plots.plot!(value.(u_thermal); label = "Thermal")
Plots.plot!(value.(u_flow); label = "Hydro")
# And here's the storage over time:
Plots.plot(value.(x_storage); label = "Storage", xlabel = "Week")
# ## Deterministic SDDP model
# For the next step, we show how to decompose our JuMP model into SDDP.jl. It
# should obtain the same solution.
model = SDDP.LinearPolicyGraph(;
stages = T,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(
sp,
0 <= x_storage <= reservoir_max,
SDDP.State,
initial_value = reservoir_initial,
)
@variable(sp, 0 <= u_flow <= flow_max)
@variable(sp, 0 <= u_thermal)
@variable(sp, 0 <= u_spill)
@variable(sp, ω_inflow)
fix(ω_inflow, data[t, :inflow])
@constraint(sp, x_storage.out == x_storage.in - u_flow - u_spill + ω_inflow)
@constraint(sp, u_flow + u_thermal == data[t, :demand])
@stageobjective(sp, data[t, :cost] * u_thermal)
return
end
# Can you see how the JuMP model maps to this syntax? We have created a
# [`SDDP.LinearPolicyGraph`](@ref) with `T` stages, we're minimizing, and we're
# using `HiGHS.Optimizer` as the optimizer.
# A few bits might be non-obvious:
#
# * We need to provide a lower bound for the objective function. Since our costs
# are always positive, a valid lower bound for the total cost is `0.0`.
# * We define `x_storage` as a state variable using `SDDP.State`. A state
# variable is any variable that flows through time, and for which we need to
# know the value of it in stage `t-1` to compute the best action in stage `t`.
# The state variable `x_storage` is actually two decision variables,
# `x_storage.in` and `x_storage.out`, which represent `x_storage[t]` and
# `x_storage[t+1]` respectively.
# * We need to use `@stageobjective` instead of `@objective`.
# Instead of calling `JuMP.optimize!`, SDDP.jl uses a `train` method. With our
# machine learning hat on, you can think of SDDP.jl as training a function for
# each stage that accepts the current reservoir state as input and returns the
# optimal actions as output. It is also an iterative algorithm, so we need to
# specify when it should terminate:
SDDP.train(model; iteration_limit = 10)
# As a quick sanity check, did we get the same cost as our JuMP model?
SDDP.calculate_bound(model)
# That's good. Next, to check the value of the decision variables. This isn't as
# straight forward as our JuMP model. Instead, we need to _simulate_ the policy,
# and then extract the values of the decision variables from the results of the
# simulation.
# Since our model is deterministic, we need only 1 replication of the
# simulation, and we want to record the values of the `x_storage`, `u_flow`, and
# `u_thermal` variables:
simulations = SDDP.simulate(
model,
1, # Number of replications
[:x_storage, :u_flow, :u_thermal],
);
# The `simulations` vector is too big to show. But it contains one element for
# each replication, and each replication contains one dictionary for each stage.
# For example, the data corresponding to the tenth stage in the first
# replication is:
simulations[1][10]
# Let's grab the trace of the `u_thermal` and `u_flow` variables in the first
# replication, and then plot them:
r_sim = [sim[:u_thermal] for sim in simulations[1]]
u_sim = [sim[:u_flow] for sim in simulations[1]]
Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
Plots.plot!(r_sim; label = "Thermal")
Plots.plot!(u_sim; label = "Hydro")
# Perfect. That's the same as we got before.
# Now let's look at `x_storage`. This is a little more complicated, because we
# need to grab the outgoing value of the state variable in each stage:
x_sim = [sim[:x_storage].out for sim in simulations[1]]
Plots.plot(x_sim; label = "Storage", xlabel = "Week")
# ## Stochastic SDDP model
# Now we add some randomness to our model. In each stage, we assume that the
# inflow could be: 2 units lower, with 30% probability; the same as before, with
# 40% probability; or 5 units higher, with 30% probability.
model = SDDP.LinearPolicyGraph(;
stages = T,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(
sp,
0 <= x_storage <= reservoir_max,
SDDP.State,
initial_value = reservoir_initial,
)
@variable(sp, 0 <= u_flow <= flow_max)
@variable(sp, 0 <= u_thermal)
@variable(sp, 0 <= u_spill)
@variable(sp, ω_inflow)
## <--- This bit is new
Ω, P = [-2, 0, 5], [0.3, 0.4, 0.3]
SDDP.parameterize(sp, Ω, P) do ω
fix(ω_inflow, data[t, :inflow] + ω)
return
end
## --->
@constraint(sp, x_storage.out == x_storage.in - u_flow - u_spill + ω_inflow)
@constraint(sp, u_flow + u_thermal == data[t, :demand])
@stageobjective(sp, data[t, :cost] * u_thermal)
return
end
# Can you see the differences?
# Let's train our new model. We need more iterations because of the
# stochasticity:
SDDP.train(model; iteration_limit = 100)
# Now simulate the policy. This time we do 100 replications because the policy
# is now stochastic instead of deterministic:
simulations =
SDDP.simulate(model, 100, [:x_storage, :u_flow, :u_thermal, :ω_inflow]);
# And let's plot the use of thermal generation in each replication:
plot = Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
for simulation in simulations
Plots.plot!(plot, [sim[:u_thermal] for sim in simulation]; label = "")
end
plot
# Viewing an interpreting static plots like this is difficult, particularly as
# the number of simulations grows. SDDP.jl includes an interactive
# `SpaghettiPlot` that makes things easier:
plot = SDDP.SpaghettiPlot(simulations)
SDDP.add_spaghetti(plot; title = "Storage") do sim
return sim[:x_storage].out
end
SDDP.add_spaghetti(plot; title = "Hydro") do sim
return sim[:u_flow]
end
SDDP.add_spaghetti(plot; title = "Inflow") do sim
return sim[:ω_inflow]
end
SDDP.plot(
plot,
"spaghetti_plot.html";
## We need this to build the documentation. Set to true if running locally.
open = false,
)
# ```@raw html
# <iframe src="../spaghetti_plot.html" style="width:100%;height:500px;"></iframe>
# ```
# !!! info
# If you have trouble viewing the plot, you can
# [open it in a new window](spaghetti_plot.html).
# ## Cyclic graphs
# One major problem with our model is that the reservoir is empty at the end of
# the time horizon. This is because our model does not consider the cost of
# future years after the `T` weeks.
# We can fix this using a cyclic policy graph. One way to construct a graph is
# with the [`SDDP.UnicyclicGraph`](@ref) constructor:
SDDP.UnicyclicGraph(0.7; num_nodes = 2)
# This graph has two nodes, and a loop from node 2 back to node 1 with
# probability 0.7.
# We can construct a cyclic policy graph as follows:
graph = SDDP.UnicyclicGraph(0.95; num_nodes = T)
model = SDDP.PolicyGraph(
graph;
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(
sp,
0 <= x_storage <= reservoir_max,
SDDP.State,
initial_value = reservoir_initial,
)
@variable(sp, 0 <= u_flow <= flow_max)
@variable(sp, 0 <= u_thermal)
@variable(sp, 0 <= u_spill)
@variable(sp, ω_inflow)
Ω, P = [-2, 0, 5], [0.3, 0.4, 0.3]
SDDP.parameterize(sp, Ω, P) do ω
fix(ω_inflow, data[t, :inflow] + ω)
return
end
@constraint(sp, x_storage.out == x_storage.in - u_flow - u_spill + ω_inflow)
@constraint(sp, u_flow + u_thermal == data[t, :demand])
@stageobjective(sp, data[t, :cost] * u_thermal)
return
end
# Notice how the only thing that has changed is our graph; the subproblems
# remain the same.
# Let's train a policy:
SDDP.train(model; iteration_limit = 100)
# When we simulate now, each trajectory will be a different length, because
# each cycle has a 95% probability of continuing and a 5% probability of
# stopping.
simulations = SDDP.simulate(model, 3);
length.(simulations)
# We can simulate a fixed number of cycles by passing a `sampling_scheme`:
simulations = SDDP.simulate(
model,
100,
[:x_storage, :u_flow];
sampling_scheme = SDDP.InSampleMonteCarlo(;
max_depth = 5 * T,
terminate_on_dummy_leaf = false,
),
);
length.(simulations)
# Let's visualize the policy:
Plots.plot(
SDDP.publication_plot(simulations; ylabel = "Storage") do sim
return sim[:x_storage].out
end,
SDDP.publication_plot(simulations; ylabel = "Hydro") do sim
return sim[:u_flow]
end;
layout = (2, 1),
)
# ## Next steps
# Our model is very basic. There are many aspects that we could improve:
#
# * Can you add a second reservoir to make a river chain?
#
# * Can you modify the problem and data to use proper units, including a
# conversion between the volume of water flowing through the turbine and the
# electrical power output?
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 36345 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # An introduction to SDDP.jl
# SDDP.jl is a solver for multistage stochastic optimization problems. By
# **multistage**, we mean problems in which an agent makes a sequence of
# decisions over time. By **stochastic**, we mean that the agent is making
# decisions in the presence of uncertainty that is gradually revealed over the
# multiple stages.
# !!! tip
# Multistage stochastic programming has a lot in common with fields like
# stochastic optimal control, approximate dynamic programming, Markov
# decision processes, and reinforcement learning. If it helps, you can think
# of SDDP as Q-learning in which we approximate the value function using
# linear programming duality.
# This tutorial is in two parts. First, it is an introduction to the background
# notation and theory we need, and second, it solves a simple multistage
# stochastic programming problem.
# ## What is a node?
# A common feature of multistage stochastic optimization problems is that they
# model an agent controlling a system over time. To simplify things initially,
# we're going to start by describing what happens at an instant in time at which
# the agent makes a decision. Only after this will we extend our problem to
# multiple stages and the notion of time.
# A **node** is a place at which the agent makes a decision.
# !!! tip
# For readers with a stochastic programming background, "node" is synonymous
# with "stage" in this section. However, for reasons that will become clear
# shortly, there can be more than one "node" per instant in time, which is
# why we prefer the term "node" over "stage."
# ### States, controls, and random variables
# The system that we are modeling can be described by three types of variables.
# 1. **State** variables track a property of the system over time.
# Each node has an associated _incoming_ state variable (the value of the
# state at the start of the node), and an _outgoing_ state variable (the
# value of the state at the end of the node).
#
# Examples of state variables include the volume of water in a reservoir, the
# number of units of inventory in a warehouse, or the spatial position of a
# moving vehicle.
#
# Because state variables track the system over time, each node must have the
# same set of state variables.
#
# We denote state variables by the letter $x$ for the incoming state variable
# and $x^\prime$ for the outgoing state variable.
#
# 2. **Control** variables are actions taken (implicitly or explicitly) by the
# agent within a node which modify the state variables.
#
# Examples of control variables include releases of water from the reservoir,
# sales or purchasing decisions, and acceleration or braking of the vehicle.
#
# Control variables are local to a node $i$, and they can differ between
# nodes. For example, some control variables may be available within certain
# nodes.
#
# We denote control variables by the letter $u$.
#
# 3. **Random** variables are finite, discrete, exogenous random variables that
# the agent observes at the start of a node, before the control variables are
# decided.
#
# Examples of random variables include rainfall inflow into a reservoir,
# probabilistic perishing of inventory, and steering errors in a vehicle.
#
# Random variables are local to a node $i$, and they can differ between
# nodes. For example, some nodes may have random variables, and some nodes
# may not.
#
# We denote random variables by the Greek letter $\omega$ and the sample
# space from which they are drawn by $\Omega_i$. The probability of sampling
# $\omega$ is denoted $p_{\omega}$ for simplicity.
#
# Importantly, the random variable associated with node $i$ is independent of
# the random variables in all other nodes.
# ### Dynamics
# In a node $i$, the three variables are related by a **transition function**,
# which maps the incoming state, the controls, and the random variables to the
# outgoing state as follows: $x^\prime = T_i(x, u, \omega)$.
# As a result of entering a node $i$ with the incoming state $x$, observing
# random variable $\omega$, and choosing control $u$, the agent incurs a cost
# $C_i(x, u, \omega)$. (If the agent is a maximizer, this can be a profit, or a
# negative cost.) We call $C_i$ the **stage objective**.
# To choose their control variables in node $i$, the agent uses a **decision**
# **rule** $u = \pi_i(x, \omega)$, which is a function that maps the incoming
# state variable and observation of the random variable to a control $u$. This
# control must satisfy some feasibility requirements $u \in U_i(x, \omega)$.
# Here is a schematic which we can use to visualize a single node:
# 
# ## Policy graphs
# Now that we have a node, we need to connect multiple nodes together to form a
# multistage stochastic program. We call the graph created by connecting nodes
# together a **policy graph**.
# The simplest type of policy graph is a **linear policy graph**. Here's a
# linear policy graph with three nodes:
# 
# Here we have dropped the notations inside each node and replaced them by a
# label (1, 2, and 3) to represent nodes `i=1`, `i=2`, and `i=3`.
# In addition to nodes 1, 2, and 3, there is also a root node (the circle), and
# three arcs. Each arc has an origin node and a destination node, like `1 => 2`,
# and a corresponding probability of transitioning from the origin to the
# destination. Unless specified, we assume that the arc probabilities are
# uniform over the number of outgoing arcs. Thus, in this picture the arc
# probabilities are all 1.0.
# State variables flow long the arcs of the graph. Thus, the outgoing state
# variable $x^\prime$ from node 1 becomes the incoming state variable $x$ to
# node 2, and so on.
# We denote the set of nodes by $\mathcal{N}$, the root node by $R$, and the
# probability of transitioning from node $i$ to node $j$ by $p_{ij}$. (If no arc
# exists, then $p_{ij} = 0$.) We define the set of successors of node $i$ as
# $i^+ = \{j \in \mathcal{N} | p_{ij} > 0\}$.
# Each node in the graph corresponds to a place at which the agent makes a
# decision, and we call moments in time at which the agent makes a decision
# **stages**. By convention, we try to draw policy graphs from left-to-right,
# with the stages as columns. There can be more than one node in a stage! Here's
# an example of a structure we call **Markovian policy graphs**:
# 
# Here each column represents a moment in time, the squiggly lines represent
# stochastic rainfall, and the rows represent the world in two discrete states:
# El Niño and La Niña. In the El Niño states, the distribution of the rainfall
# random variable is different to the distribution of the rainfall random
# variable in the La Niña states, and there is some switching probability
# between the two states that can be modelled by a Markov chain.
# Moreover, policy graphs can have cycles! This allows them to model infinite
# horizon problems. Here's another example, taken from the paper
# [Dowson (2020)](https://doi.org/10.1002/net.21932):
# 
# The columns represent time, and the rows represent different states of the
# world. In this case, the rows represent different prices that milk can be sold
# for at the end of each year. The squiggly lines denote a multivariate random
# variable that models the weekly amount of rainfall that occurs.
# !!! note
# The sum of probabilities on the outgoing arcs of node $i$ can be less than
# 1, i.e., $\sum\limits_{j\in i^+} p_{ij} \le 1$. What does this mean?
# One interpretation is that the probability is a [discount factor](https://en.wikipedia.org/wiki/Discounting).
# Another interpretation is that there is an implicit "zero" node that we
# have not modeled, with $p_{i0} = 1 - \sum\limits_{j\in i^+} p_{ij}$.
# This zero node has $C_0(x, u, \omega) = 0$, and $0^+ = \varnothing$.
# ## More notation
# Recall that each node $i$ has a **decision rule** $u = \pi_i(x, \omega)$,
# which is a function that maps the incoming state variable and observation of
# the random variable to a control $u$.
# The set of decision rules, with one element for each node in the policy graph,
# is called a **policy**.
# The goal of the agent is to find a policy that minimizes the expected cost of
# starting at the root node with some initial condition $x_R$, and proceeding
# from node to node along the probabilistic arcs until they reach a node with no
# outgoing arcs (or it reaches an implicit "zero" node).
# ```math
# \min_{\pi} \mathbb{E}_{i \in R^+, \omega \in \Omega_i}[V_i^\pi(x_R, \omega)],
# ```
# where
# ```math
# V_i^\pi(x, \omega) = C_i(x, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)],
# ```
# where $u = \pi_i(x, \omega) \in U_i(x, \omega)$, and
# $x^\prime = T_i(x, u, \omega)$.
# The expectations are a bit complicated, but they are equivalent to:
# ```math
# \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)] = \sum\limits_{j \in i^+} p_{ij} \sum\limits_{\varphi \in \Omega_j} p_{\varphi}V_j(x^\prime, \varphi).
# ```
# An optimal policy is the set of decision rules that the agent can use to make
# decisions and achieve the smallest expected cost.
# ## Assumptions
# !!! warning
# This section is important!
# The space of problems you can model with this framework is very large. Too
# large, in fact, for us to form tractable solution algorithms for! Stochastic
# dual dynamic programming requires the following assumptions in order to work:
# **Assumption 1: finite nodes**
#
# There is a finite number of nodes in $\mathcal{N}$.
#
# **Assumption 2: finite random variables**
#
# The sample space $\Omega_i$ is finite and discrete for each node
# $i\in\mathcal{N}$.
#
# **Assumption 3: convex problems**
#
# Given fixed $\omega$, $C_i(x, u, \omega)$ is a convex function,
# $T_i(x, u, \omega)$ is linear, and $U_i(x, u, \omega)$ is a non-empty,
# bounded convex set with respect to $x$ and $u$.
#
# **Assumption 4: no infinite loops**
#
# For all loops in the policy graph, the product of the arc transition
# probabilities around the loop is strictly less than 1.
#
# **Assumption 5: relatively complete recourse**
#
# This is a technical but important assumption. See [Relatively complete recourse](@ref)
# for more details.
# !!! note
# SDDP.jl relaxes assumption (3) to allow for integer state and control
# variables, but we won't go into the details here. Assumption (4)
# essentially means that we obtain a discounted-cost solution for
# infinite-horizon problems, instead of an average-cost solution; see
# [Dowson (2020)](https://doi.org/10.1002/net.21932) for details.
# ## Dynamic programming and subproblems
# Now that we have formulated our problem, we need some ways of computing
# optimal decision rules. One way is to just use a heuristic like "choose a
# control randomly from the set of feasible controls." However, such a policy
# is unlikely to be optimal.
# A better way of obtaining an optimal policy is to use [Bellman's principle of
# optimality](https://en.wikipedia.org/wiki/Bellman_equation#Bellman's_principle_of_optimality),
# a.k.a Dynamic Programming, and define a recursive **subproblem** as follows:
# ```math
# \begin{aligned}
# V_i(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]\\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega) \\
# & \bar{x} = x.
# \end{aligned}
# ```
# Our decision rule, $\pi_i(x, \omega)$, solves this optimization problem and
# returns a $u^*$ corresponding to an optimal solution.
#
# !!! note
# We add $\bar{x}$ as a decision variable, along with the fishing constraint
# $\bar{x} = x$ for two reasons: it makes it obvious that formulating a
# problem with $x \times u$ results in a bilinear program instead of a
# linear program (see Assumption 3), and it simplifies the implementation of
# the SDDP algorithm.
# These subproblems are very difficult to solve exactly, because they involve
# recursive optimization problems with lots of nested expectations.
# Therefore, instead of solving them exactly, SDDP.jl works by iteratively
# approximating the expectation term of each subproblem, which is also called
# the cost-to-go term. For now, you don't need to understand the details, other
# than that there is a nasty cost-to-go term that we deal with
# behind-the-scenes.
# The subproblem view of a multistage stochastic program is also important,
# because it provides a convenient way of communicating the different parts of
# the broader problem, and it is how we will communicate the problem to SDDP.jl.
# All we need to do is drop the cost-to-go term and fishing constraint, and
# define a new subproblem `SP` as:
# ```math
# \begin{aligned}
# \texttt{SP}_i(x, \omega) : \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) \\
# & x^\prime = T_i(\bar{x}, u, \omega) \\
# & u \in U_i(\bar{x}, \omega).
# \end{aligned}
# ```
# !!! note
# When we talk about formulating a **subproblem** with SDDP.jl, this is the
# formulation we mean.
# We've retained the transition function and uncertainty set because they help
# to motivate the different components of the subproblem. However, in general,
# the subproblem can be more general. A better (less restrictive) representation
# might be:
# ```math
# \begin{aligned}
# \texttt{SP}_i(x, \omega) : \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, x^\prime, u, \omega) \\
# & (\bar{x}, x^\prime, u) \in \mathcal{X}_i(\omega).
# \end{aligned}
# ```
# Note that the outgoing state variable can appear in the objective, and we can
# add constraints involving the incoming and outgoing state variables. It
# should be obvious how to map between the two representations.
# ## Example: hydro-thermal scheduling
# Hydrothermal scheduling is the most common application of stochastic dual
# dynamic programming. To illustrate some of the basic functionality of
# `SDDP.jl`, we implement a very simple model of the hydrothermal scheduling
# problem.
# ### Problem statement
# We consider the problem of scheduling electrical generation over three weeks
# in order to meet a known demand of 150 MWh in each week.
#
# There are two generators: a thermal generator, and a hydro generator. In each
# week, the agent needs to decide how much energy to generate from thermal, and
# how much energy to generate from hydro.
#
# The thermal generator has a short-run marginal cost of \$50/MWh in the first
# stage, \$100/MWh in the second stage, and \$150/MWh in the third stage.
#
# The hydro generator has a short-run marginal cost of \$0/MWh.
#
# The hydro generator draws water from a reservoir which has a maximum capacity
# of 200 MWh. (Although water is usually measured in m³, we measure it in the
# energy-equivalent MWh to simplify things. In practice, there is a conversion
# function between m³ flowing throw the turbine and MWh.) At the start of the
# first time period, the reservoir is full.
#
# In addition to the ability to generate electricity by passing water through
# the hydroelectric turbine, the hydro generator can also spill water down a
# spillway (bypassing the turbine) in order to prevent the water from
# over-topping the dam. We assume that there is no cost of spillage.
# In addition to water leaving the reservoir, water that flows into the reservoir
# through rainfall or rivers are referred to as inflows. These inflows are
# uncertain, and are the cause of the main trade-off in hydro-thermal
# scheduling: the desire to use water now to generate cheap electricity, against
# the risk that future inflows will be low, leading to blackouts or expensive
# thermal generation.
# For our simple model, we assume that the inflows can be modelled by a discrete
# distribution with the three outcomes given in the following table:
#
# | ω | 0 | 50 | 100 |
# | ---- | --- | --- | --- |
# | P(ω) | 1/3 | 1/3 | 1/3 |
# The value of the noise (the random variable) is observed by the agent at the
# start of each stage. This makes the problem a _wait-and-see_ or
# _hazard-decision_ formulation.
# The goal of the agent is to minimize the expected cost of generation over the
# three weeks.
# ### Formulating the problem
# Before going further, we need to load SDDP.jl:
using SDDP
# #### Graph structure
# First, we need to identify the structure of the policy graph. From the problem
# statement, we want to model the problem over three weeks in weekly stages.
# Therefore, the policy graph is a linear graph with three stages:
graph = SDDP.LinearGraph(3)
# #### Building the subproblem
# Next, we need to construct the associated subproblem for each node in `graph`.
# To do so, we need to provide SDDP.jl a function which takes two arguments. The
# first is `subproblem::Model`, which is an empty JuMP model. The second is
# `node`, which is the name of each node in the policy graph. If the graph is
# linear, SDDP defaults to naming the nodes using the integers in `1:T`. Here's
# an example that we are going to flesh out over the next few paragraphs:
function subproblem_builder(subproblem::Model, node::Int)
## ... stuff to go here ...
return subproblem
end
# !!! warning
# If you use a different type of graph, `node` may be a type different to
# `Int`. For example, in [`SDDP.MarkovianGraph`](@ref), `node` is a
# `Tuple{Int,Int}`.
# #### State variables
# The first part of the subproblem we need to identify are the state variables.
# Since we only have one reservoir, there is only one state variable, `volume`,
# the volume of water in the reservoir [MWh].
#
# The volume had bounds of `[0, 200]`, and the reservoir was full at the start
# of time, so $x_R = 200$.
# We add state variables to our `subproblem` using JuMP's `@variable` macro.
# However, in addition to the usual syntax, we also pass `SDDP.State`, and we
# need to provide the initial value ($x_R$) using the `initial_value` keyword.
function subproblem_builder(subproblem::Model, node::Int)
## State variables
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
return subproblem
end
# The syntax for adding a state variable is a little obtuse, because `volume` is
# not single JuMP variable. Instead, `volume` is a struct with two fields, `.in`
# and `.out`, corresponding to the incoming and outgoing state variables
# respectively.
# !!! note
# We don't need to add the fishing constraint $\bar{x} = x$; SDDP.jl does
# this automatically.
# #### Control variables
# The next part of the subproblem we need to identify are the control variables.
# The control variables for our problem are:
# - `thermal_generation`: the quantity of energy generated from thermal
# [MWh/week]
# - `hydro_generation`: the quantity of energy generated from hydro [MWh/week]
# - `hydro_spill`: the volume of water spilled from the reservoir in each week
# [MWh/week]
# Each of these variables is non-negative.
# We add control variables to our `subproblem` as normal JuMP variables, using
# `@variable` or `@variables`:
function subproblem_builder(subproblem::Model, node::Int)
## State variables
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Control variables
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
end)
return subproblem
end
# !!! tip
# Modeling is an art, and a tricky part of that art is figuring out which
# variables are state variables, and which are control variables. A good
# rule is: if you need a value of a control variable in some future node to
# make a decision, it is a state variable instead.
# #### Random variables
# The next step is to identify any random variables. In our example, we had
# - `inflow`: the quantity of water that flows into the reservoir each week
# [MWh/week]
# To add an uncertain variable to the model, we create a new JuMP variable
# `inflow`, and then call the function [`SDDP.parameterize`](@ref). The
# [`SDDP.parameterize`](@ref) function takes three arguments: the subproblem, a
# vector of realizations, and a corresponding vector of probabilities.
function subproblem_builder(subproblem::Model, node::Int)
## State variables
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Control variables
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
end)
## Random variables
@variable(subproblem, inflow)
Ω = [0.0, 50.0, 100.0]
P = [1 / 3, 1 / 3, 1 / 3]
SDDP.parameterize(subproblem, Ω, P) do ω
return JuMP.fix(inflow, ω)
end
return subproblem
end
# Note how we use the JuMP function
# [`JuMP.fix`](https://jump.dev/JuMP.jl/stable/reference/variables/#JuMP.fix)
# to set the value of the `inflow` variable to `ω`.
# !!! warning
# [`SDDP.parameterize`](@ref) can only be called once in each subproblem
# definition! If your random variable is multi-variate, read
# [Add multi-dimensional noise terms](@ref).
# #### Transition function and constraints
# Now that we've identified our variables, we can define the transition function
# and the constraints.
# For our problem, the state variable is the volume of water in the reservoir.
# The volume of water decreases in response to water being used for hydro
# generation and spillage. So the transition function is:
# `volume.out = volume.in - hydro_generation - hydro_spill + inflow`. (Note how
# we use `volume.in` and `volume.out` to refer to the incoming and outgoing
# state variables.)
# There is also a constraint that the total generation must sum to 150 MWh.
# Both the transition function and any additional constraint are added using
# JuMP's `@constraint` and `@constraints` macro.
function subproblem_builder(subproblem::Model, node::Int)
## State variables
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Control variables
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
end)
## Random variables
@variable(subproblem, inflow)
Ω = [0.0, 50.0, 100.0]
P = [1 / 3, 1 / 3, 1 / 3]
SDDP.parameterize(subproblem, Ω, P) do ω
return JuMP.fix(inflow, ω)
end
## Transition function and constraints
@constraints(
subproblem,
begin
volume.out == volume.in - hydro_generation - hydro_spill + inflow
demand_constraint, hydro_generation + thermal_generation == 150
end
)
return subproblem
end
# #### Objective function
# Finally, we need to add an objective function using `@stageobjective`. The
# objective of the agent is to minimize the cost of thermal generation. This is
# complicated by a fuel cost that depends on the `node`.
#
# One possibility is to use an `if` statement on `node` to define the correct
# objective:
function subproblem_builder(subproblem::Model, node::Int)
## State variables
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Control variables
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
end)
## Random variables
@variable(subproblem, inflow)
Ω = [0.0, 50.0, 100.0]
P = [1 / 3, 1 / 3, 1 / 3]
SDDP.parameterize(subproblem, Ω, P) do ω
return JuMP.fix(inflow, ω)
end
## Transition function and constraints
@constraints(
subproblem,
begin
volume.out == volume.in - hydro_generation - hydro_spill + inflow
demand_constraint, hydro_generation + thermal_generation == 150
end
)
## Stage-objective
if node == 1
@stageobjective(subproblem, 50 * thermal_generation)
elseif node == 2
@stageobjective(subproblem, 100 * thermal_generation)
else
@assert node == 3
@stageobjective(subproblem, 150 * thermal_generation)
end
return subproblem
end
# A second possibility is to use an array of fuel costs, and use `node` to index
# the correct value:
function subproblem_builder(subproblem::Model, node::Int)
## State variables
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Control variables
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
end)
## Random variables
@variable(subproblem, inflow)
Ω = [0.0, 50.0, 100.0]
P = [1 / 3, 1 / 3, 1 / 3]
SDDP.parameterize(subproblem, Ω, P) do ω
return JuMP.fix(inflow, ω)
end
## Transition function and constraints
@constraints(
subproblem,
begin
volume.out == volume.in - hydro_generation - hydro_spill + inflow
demand_constraint, hydro_generation + thermal_generation == 150
end
)
## Stage-objective
fuel_cost = [50, 100, 150]
@stageobjective(subproblem, fuel_cost[node] * thermal_generation)
return subproblem
end
# ### Constructing the model
# Now that we've written our subproblem, we need to construct the full model.
# For that, we're going to need a linear solver. Let's choose HiGHS:
using HiGHS
# !!! warning
# In larger problems, you should use a more robust commercial LP solver like
# Gurobi. Read [Words of warning](@ref) for more details.
# Then, we can create a full model using [`SDDP.PolicyGraph`](@ref), passing our
# `subproblem_builder` function as the first argument, and our `graph` as the
# second:
model = SDDP.PolicyGraph(
subproblem_builder,
graph;
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
)
# * `sense`: the optimization sense. Must be `:Min` or `:Max`.
# * `lower_bound`: you _must_ supply a valid bound on the objective. For our
# problem, we know that we cannot incur a negative cost so \$0 is a valid
# lower bound.
# * `optimizer`: This is borrowed directly from JuMP's `Model` constructor:
# `Model(HiGHS.Optimizer)`
# Because linear policy graphs are the most commonly used structure, we can use
# [`SDDP.LinearPolicyGraph`](@ref) instead of passing `SDDP.LinearGraph(3)` to
# [`SDDP.PolicyGraph`](@ref).
model = SDDP.LinearPolicyGraph(
subproblem_builder;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
)
# There is also the option is to use Julia's `do` syntax to avoid needing to
# define a `subproblem_builder` function separately:
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
## State variables
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Control variables
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
end)
## Random variables
@variable(subproblem, inflow)
Ω = [0.0, 50.0, 100.0]
P = [1 / 3, 1 / 3, 1 / 3]
SDDP.parameterize(subproblem, Ω, P) do ω
return JuMP.fix(inflow, ω)
end
## Transition function and constraints
@constraints(
subproblem,
begin
volume.out == volume.in - hydro_generation - hydro_spill + inflow
demand_constraint, hydro_generation + thermal_generation == 150
end
)
## Stage-objective
if node == 1
@stageobjective(subproblem, 50 * thermal_generation)
elseif node == 2
@stageobjective(subproblem, 100 * thermal_generation)
else
@assert node == 3
@stageobjective(subproblem, 150 * thermal_generation)
end
end
# !!! info
# Julia's `do` syntax is just a different way of passing an anonymous
# function `inner` to some function `outer` which takes `inner` as the first
# argument. For example, given:
# ```julia
# outer(inner::Function, x, y) = inner(x, y)
# ```
# then
# ```julia
# outer(1, 2) do x, y
# return x^2 + y^2
# end
# ```
# is equivalent to:
# ```julia
# outer((x, y) -> x^2 + y^2, 1, 2)
# ```
# For our purpose, `inner` is `subproblem_builder`, and `outer` is
# [`SDDP.PolicyGraph`](@ref).
# ## Training a policy
# Now we have a model, which is a description of the policy graph, we need to
# train a policy. Models can be trained using the [`SDDP.train`](@ref) function.
# It accepts a number of keyword arguments. `iteration_limit` terminates the
# training after the provided number of iterations.
SDDP.train(model; iteration_limit = 10)
# There's a lot going on in this printout! Let's break it down.
# The first section, "problem," gives some problem statistics. In this example
# there are 3 nodes, 1 state variable, and 27 scenarios ($3^3$). We haven't
# solved this problem before so there are no existing cuts.
# The "options" section lists some options we are using to solve the problem.
# For more information on the numerical stability report, read the
# [Numerical stability report](@ref) section.
# The "subproblem structure" section also needs explaining. This looks at all of
# the nodes in the policy graph and reports the minimum and maximum number of
# variables and each constraint type in the corresponding subproblem. In this
# case each subproblem has 7 variables and various numbers of different
# constraint types. Note that the exact numbers may not correspond to the
# formulation as you wrote it, because SDDP.jl adds some extra variables for the
# cost-to-go function.
# Then comes the iteration log, which is the main part of the printout. It has
# the following columns:
# - `iteration`: the SDDP iteration
# - `simulation`: the cost of the single forward pass simulation for that
# iteration. This value is stochastic and is not guaranteed to improve over
# time. However, it's useful to check that the units are reasonable, and that
# it is not deterministic if you intended for the problem to be stochastic,
# etc.
# - `bound`: this is a lower bound (upper if maximizing) for the value of the
# optimal policy. This bound should be monotonically improving (increasing if
# minimizing, decreasing if maximizing), but in some cases it can temporarily
# worsen due to cut selection, especially in the early iterations of the
# algorithm.
# - `time (s)`: the total number of seconds spent solving so far
# - `solves`: the total number of subproblem solves to date. This can be very
# large!
# - `pid`: the ID of the processor used to solve that iteration. This
# should be 1 unless you are using parallel computation.
# In addition, if the first character of a line is `†`, then SDDP.jl experienced
# numerical issues during the solve, but successfully recovered.
# The printout finishes with some summary statistics:
#
# - `status`: why did the solver stop?
# - `total time (s)`, `best bound`, and `total solves` are the values from the
# last iteration of the solve.
# - `simulation ci`: a confidence interval that estimates the quality of the
# policy from the `Simulation` column.
# - `numeric issues`: the number of iterations that experienced numerical
# issues.
# !!! warning
# The `simulation ci` result can be misleading if you run a small number of
# iterations, or if the initial simulations are very bad. On a more
# technical note, it is an _in-sample simulation_, which may not reflect the
# true performance of the policy. See [Obtaining bounds](@ref) for more
# details.
# ## Obtaining the decision rule
# After training a policy, we can create a decision rule using
# [`SDDP.DecisionRule`](@ref):
rule = SDDP.DecisionRule(model; node = 1)
# Then, to evaluate the decision rule, we use [`SDDP.evaluate`](@ref):
solution = SDDP.evaluate(
rule;
incoming_state = Dict(:volume => 150.0),
noise = 50.0,
controls_to_record = [:hydro_generation, :thermal_generation],
)
# ## Simulating the policy
# Once you have a trained policy, you can also simulate it using
# [`SDDP.simulate`](@ref). The return value from `simulate` is a vector with one
# element for each replication. Each element is itself a vector, with one
# element for each stage. Each element, corresponding to a particular stage in a
# particular replication, is a dictionary that records information from the
# simulation.
simulations = SDDP.simulate(
## The trained model to simulate.
model,
## The number of replications.
100,
## A list of names to record the values of.
[:volume, :thermal_generation, :hydro_generation, :hydro_spill],
)
replication = 1
stage = 2
simulations[replication][stage]
# Ignore many of the entries for now; they will be relevant later.
# One element of interest is `:volume`.
outgoing_volume = map(simulations[1]) do node
return node[:volume].out
end
# Another is `:thermal_generation`.
thermal_generation = map(simulations[1]) do node
return node[:thermal_generation]
end
# ## Obtaining bounds
# Because the optimal policy is stochastic, one common approach to quantify the
# quality of the policy is to construct a confidence interval for the expected
# cost by summing the stage objectives along each simulation.
objectives = map(simulations) do simulation
return sum(stage[:stage_objective] for stage in simulation)
end
μ, ci = SDDP.confidence_interval(objectives)
println("Confidence interval: ", μ, " ± ", ci)
# This confidence interval is an estimate for an upper bound of the policy's
# quality. We can calculate the lower bound using [`SDDP.calculate_bound`](@ref).
println("Lower bound: ", SDDP.calculate_bound(model))
# !!! tip
# The upper- and lower-bounds are reversed if maximizing, i.e., [`SDDP.calculate_bound`](@ref).
# returns an upper bound.
# ## Custom recorders
# In addition to simulating the primal values of variables, we can also pass
# custom recorder functions. Each of these functions takes one argument, the
# JuMP subproblem corresponding to each node. This function gets called after we
# have solved each node as we traverse the policy graph in the simulation.
# For example, the dual of the demand constraint (which we named
# `demand_constraint`) corresponds to the price we should charge for
# electricity, since it represents the cost of each additional unit of demand.
# To calculate this, we can go:
simulations = SDDP.simulate(
model,
1; ## Perform a single simulation
custom_recorders = Dict{Symbol,Function}(
:price => (sp::JuMP.Model) -> JuMP.dual(sp[:demand_constraint]),
),
)
prices = map(simulations[1]) do node
return node[:price]
end
# ## Extracting the marginal water values
# Finally, we can use [`SDDP.ValueFunction`](@ref) and [`SDDP.evaluate`](@ref)
# to obtain and evaluate the value function at different points in the
# state-space.
# !!! note
# By "value function" we mean $\mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]$,
# not the function $V_i(x, \omega)$.
# First, we construct a value function from the first subproblem:
V = SDDP.ValueFunction(model; node = 1)
# Then we can evaluate `V` at a point:
cost, price = SDDP.evaluate(V, Dict("volume" => 10))
# This returns the cost-to-go (`cost`), and the gradient of the cost-to-go
# function with respect to each state variable. Note that since we are
# minimizing, the price has a negative sign: each additional unit of water leads
# to a decrease in the expected long-run cost.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5133 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Markovian policy graphs
# In our previous tutorials ([An introduction to SDDP.jl](@ref) and
# [Uncertainty in the objective function](@ref)), we formulated a simple
# hydrothermal scheduling problem with stagewise-independent random variables in
# the right-hand side of the constraints and in the objective function.
# Now, in this tutorial, we introduce some *stagewise-dependent* uncertainty
# using a Markov chain.
# ## Formulating the problem
# In this tutorial we consider a Markov chain with two *climate* states: wet and
# dry. Each Markov state is associated with an integer, in this case the wet
# climate state is Markov state `1` and the dry climate state is Markov state
# `2`. In the wet climate state, the probability of the high inflow increases to
# 50%, and the probability of the low inflow decreases to 1/6. In the dry
# climate state, the converse happens. There is also persistence in the climate
# state: the probability of remaining in the current state is 75%, and the
# probability of transitioning to the other climate state is 25%. We assume that
# the first stage starts in the wet climate state.
# Here is a picture of the model we're going to implement.
#
# 
# There are five nodes in our graph. Each node is named by a tuple `(t, i)`,
# where `t` is the stage for `t=1,2,3`, and `i` is the Markov state for `i=1,2`.
# As before, the wavy lines denote the stagewise-independent random variable.
# For each stage, we need to provide a Markov transition matrix. This is an
# `M`x`N` matrix, where the element `A[i, j]` gives the probability of
# transitioning from Markov state `i` in the previous stage to Markov state `j`
# in the current stage. The first stage is special because we assume there is a
# "zero'th" stage which has one Markov state (the round node in the graph
# above). Furthermore, the number of columns in the transition matrix of a stage
# (i.e. the number of Markov states) must equal the number of rows in the next
# stage's transition matrix. For our example, the vector of Markov transition
# matrices is given by:
T = Array{Float64,2}[[1.0]', [0.75 0.25], [0.75 0.25; 0.25 0.75]]
# !!! note
# Make sure to add the `'` after the first transition matrix so Julia can
# distinguish between a vector and a matrix.
# ## Creating a model
using SDDP, HiGHS
Ω = [
(inflow = 0.0, fuel_multiplier = 1.5),
(inflow = 50.0, fuel_multiplier = 1.0),
(inflow = 100.0, fuel_multiplier = 0.75),
]
model = SDDP.MarkovianPolicyGraph(;
transition_matrices = Array{Float64,2}[
[1.0]',
[0.75 0.25],
[0.75 0.25; 0.25 0.75],
],
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
## Unpack the stage and Markov index.
t, markov_state = node
## Define the state variable.
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Define the control variables.
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
## Define the constraints
@constraints(
subproblem,
begin
volume.out == volume.in + inflow - hydro_generation - hydro_spill
thermal_generation + hydro_generation == 150.0
end
)
## Note how we can use `markov_state` to dispatch an `if` statement.
probability = if markov_state == 1 # wet climate state
[1 / 6, 1 / 3, 1 / 2]
else # dry climate state
[1 / 2, 1 / 3, 1 / 6]
end
fuel_cost = [50.0, 100.0, 150.0]
SDDP.parameterize(subproblem, Ω, probability) do ω
JuMP.fix(inflow, ω.inflow)
@stageobjective(
subproblem,
ω.fuel_multiplier * fuel_cost[t] * thermal_generation
)
end
end
# !!! tip
# For more information on [`SDDP.MarkovianPolicyGraph`](@ref)s, read
# [Create a general policy graph](@ref).
# ## Training and simulating the policy
# As in the previous three tutorials, we train the policy:
SDDP.train(model)
# Instead of performing a Monte Carlo simulation like the previous tutorials, we
# may want to simulate one particular sequence of noise realizations. This
# _historical_ simulation can also be conducted by passing a
# [`SDDP.Historical`](@ref) sampling scheme to the `sampling_scheme` keyword of
# the [`SDDP.simulate`](@ref) function.
# We can confirm that the historical sequence of nodes was visited by querying
# the `:node_index` key of the simulation results.
simulations = SDDP.simulate(
model;
sampling_scheme = SDDP.Historical([
((1, 1), Ω[1]),
((2, 2), Ω[3]),
((3, 1), Ω[2]),
]),
)
[stage[:node_index] for stage in simulations[1]]
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 6968 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Example: Markov Decision Processes
# `SDDP.jl` can be used to solve a variety of Markov Decision processes. If the
# problem has continuous state and control spaces, and the objective and
# transition function are convex, then SDDP.jl can find a globally optimal
# policy. In other cases, SDDP.jl will find a locally optimal policy.
# ## A simple example
# A simple demonstration of this is the example taken from page 98 of the book
# "Markov Decision Processes: Discrete stochastic Dynamic Programming", by
# Martin L. Putterman.
# The example, as described in Section 4.6.3 of the book, is to minimize a sum
# of squares of `N` non-negative variables, subject to a budget constraint that
# the variable values add up to `M`. Put mathematically, that is:
# ```math
# \begin{aligned}
# \min \;\; & \sum\limits_{i=1}^N x_i^2 \\
# s.t. \;\; & \sum\limits_{i=1}^N x_i = M \\
# & x_i \ge 0, \quad i \in 1,\ldots,N
# \end{aligned}
# ```
# The optimal objective value is ``M^2/N``, and the optimal solution is
# ``x_i = M / N``, which can be shown by induction.
# This can be reformulated as a Markov Decision Process by introducing a state
# variable, ``s``, which tracks the un-spent budget over ``N`` stages.
# ```math
# \begin{aligned}
# V_t(s) = \min \;\; & x^2 + V_{t+1}(s^\prime) \\
# s.t. \;\; & s^\prime = s - x \\
# & x \le s \\
# & x \ge 0 \\
# & s \ge 0
# \end{aligned}
# ```
# and in the last stage ``V_N``, there is an additional constraint that
# ``s^\prime = 0``.
# The budget of ``M`` is computed by solving for ``V_1(M)``.
# !!! info
# Since everything here is continuous and convex, SDDP.jl will find the
# globally optimal policy.
# If the reformulation from the single problem into the recursive form of the
# Markov Decision Process is not obvious, consult Putterman's book.
# We can model and solve this problem using SDDP.jl as follows:
using SDDP
import Ipopt
M, N = 5, 3
model = SDDP.LinearPolicyGraph(;
stages = N,
lower_bound = 0.0,
optimizer = Ipopt.Optimizer,
) do subproblem, node
@variable(subproblem, s >= 0, SDDP.State, initial_value = M)
@variable(subproblem, x >= 0)
@stageobjective(subproblem, x^2)
@constraint(subproblem, x <= s.in)
@constraint(subproblem, s.out == s.in - x)
if node == N
fix(s.out, 0.0; force = true)
end
return
end
SDDP.train(model)
# Check that we got the theoretical optimum:
SDDP.calculate_bound(model), M^2 / N
# And check that we found the theoretical value for each ``x_i``:
simulations = SDDP.simulate(model, 1, [:x])
for data in simulations[1]
println("x_$(data[:node_index]) = $(data[:x])")
end
# Close enough! We don't get exactly 5/3 because of numerical tolerances within
# our choice of optimization solver (in this case, Ipopt).
# ## A more complicated policy
# SDDP.jl is also capable of finding policies for other types of Markov Decision
# Processes. A classic example of a Markov Decision Process is the problem of
# finding a path through a maze.
# Here's one example of a maze. Try changing the parameters to explore different
# mazes:
M, N = 3, 4
initial_square = (1, 1)
reward, illegal_squares, penalties = (3, 4), [(2, 2)], [(3, 1), (2, 4)]
path = fill("⋅", M, N)
path[initial_square...] = "1"
for (k, v) in (illegal_squares => "▩", penalties => "†", [reward] => "*")
for (i, j) in k
path[i, j] = v
end
end
print(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\n'))
# Our goal is to get from square `1` to square `*`. If we step on a `†`, we
# incur a penalty of `1`. Squares with `▩` are blocked; we cannot move there.
# There are a variety of ways that we can solve this problem. We're going to
# solve it using a stationary binary stochastic programming formulation.
# Our state variable will be a matrix of binary variables ``x_{i,j}``, where
# each element is ``1`` if the agent is in the square and ``0`` otherwise. In
# each period, we incur a reward of ``1`` if we are in the `reward` square and a
# penalty of ``-1`` if we are in a `penalties` square. We cannot move to the
# `illegal_squares`, so those ``x_{i,j} = 0``. Feasibility between moves is
# modelled by constraints of the form:
# ```math
# x^\prime_{i,j} \le \sum\limits_{(a,b)\in P} x_{a,b}
# ```
# where ``P`` is the set of squares from which it is valid to move from `(a, b)`
# to `(i, j)`.
# Because we are looking for a stationary policy, we need a unicyclic graph with
# a discount factor:
discount_factor = 0.9
graph = SDDP.UnicyclicGraph(discount_factor)
# Then we can formulate our full model:
import HiGHS
model = SDDP.PolicyGraph(
graph;
sense = :Max,
upper_bound = 1 / (1 - discount_factor),
optimizer = HiGHS.Optimizer,
) do sp, _
## Our state is a binary variable for each square
@variable(
sp,
x[i = 1:M, j = 1:N],
Bin,
SDDP.State,
initial_value = (i, j) == initial_square,
)
## Can only be in one square at a time
@constraint(sp, sum(x[i, j].out for i in 1:M, j in 1:N) == 1)
## Incur rewards and penalties
@stageobjective(
sp,
x[reward...].out - sum(x[i, j].out for (i, j) in penalties)
)
## Some squares are illegal
@constraint(sp, [(i, j) in illegal_squares], x[i, j].out <= 0)
## Constraints on valid moves
for i in 1:M, j in 1:N
moves = [(i - 1, j), (i + 1, j), (i, j), (i, j + 1), (i, j - 1)]
filter!(v -> 1 <= v[1] <= M && 1 <= v[2] <= N, moves)
@constraint(sp, x[i, j].out <= sum(x[a, b].in for (a, b) in moves))
end
return
end
# The upper bound is obtained by assuming that we reach the reward square in one
# move and stay there.
# !!! warning
# Since there are discrete decisions here, SDDP.jl is not guaranteed to find
# the globally optimal policy.
SDDP.train(model)
# Simulating a cyclic policy graph requires an explicit `sampling_scheme` that
# does not terminate early based on the cycle probability:
simulations = SDDP.simulate(
model,
1,
[:x];
sampling_scheme = SDDP.InSampleMonteCarlo(;
max_depth = 5,
terminate_on_dummy_leaf = false,
),
);
# Fill in the `path` with the time-step in which we visit the square:
for (t, data) in enumerate(simulations[1]), i in 1:M, j in 1:N
if data[:x][i, j].in > 0.5
path[i, j] = "$t"
end
end
print(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\n'))
# !!! tip
# This formulation will likely struggle as the number of cells in the maze
# increases. Can you think of an equivalent formulation that uses fewer
# state variables?
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 10283 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Objective states
# There are many applications in which we want to model a price process that
# follows some auto-regressive process. Common examples include stock prices on
# financial exchanges and spot-prices in energy markets.
# However, it is well known that these cannot be incorporated in to SDDP because
# they result in cost-to-go functions that are convex with respect to some state
# variables (e.g., the reservoir levels) and concave with respect to other state
# variables (e.g., the spot price in the current stage).
# To overcome this problem, the approach in the literature has been to
# discretize the price process in order to model it using a Markovian policy
# graph like those discussed in [Markovian policy graphs](@ref).
# However, recent work offers a way to include stagewise-dependent objective
# uncertainty into the objective function of SDDP subproblems. Readers are
# directed to the following works for an introduction:
# - Downward, A., Dowson, O., and Baucke, R. (2017). Stochastic dual dynamic
# programming with stagewise dependent objective uncertainty. Optimization
# Online. [link](http://www.optimization-online.org/DB_HTML/2018/02/6454.html)
#
# - Dowson, O. PhD Thesis. University of Auckland, 2018. [link](https://researchspace.auckland.ac.nz/handle/2292/37700)
# The method discussed in the above works introduces the concept of an
# _objective state_ into SDDP. Unlike normal state variables in SDDP (e.g., the
# volume of water in the reservoir), the cost-to-go function is _concave_ with
# respect to the objective states. Thus, the method builds an outer
# approximation of the cost-to-go function in the normal state-space, and an
# inner approximation of the cost-to-go function in the objective state-space.
# !!! warning
# Support for objective states in `SDDP.jl` is experimental. Models are
# considerably more computational intensive, the interface is less
# user-friendly, and there are [subtle gotchas to be aware of](@ref objective_state_warnings).
# Only use this if you have read and understood the theory behind the method.
# ## One-dimensional objective states
# Let's assume that the fuel cost is not fixed, but instead evolves according to
# a multiplicative auto-regressive process: `fuel_cost[t] = ω * fuel_cost[t-1]`,
# where `ω` is drawn from the sample space `[0.75, 0.9, 1.1, 1.25]` with equal
# probability.
# An objective state can be added to a subproblem using the
# [`SDDP.add_objective_state`](@ref) function. This can only be called once per
# subproblem. If you want to add a multi-dimensional objective state, read
# [Multi-dimensional objective states](@ref). [`SDDP.add_objective_state`](@ref)
# takes a number of keyword arguments. The two required ones are
# - `initial_value`: the value of the objective state at the root node of the
# policy graph (i.e., identical to the `initial_value` when defining normal
# state variables.
#
# - `lipschitz`: the Lipschitz constant of the cost-to-go function with respect
# to the objective state. In other words, this value is the maximum change in
# the cost-to-go function _at any point in the state space_, given a one-unit
# change in the objective state.
# There are also two optional keyword arguments: `lower_bound` and
# `upper_bound`, which give SDDP.jl hints (importantly, not constraints) about
# the domain of the objective state. Setting these bounds appropriately can
# improve the speed of convergence.
# Finally, [`SDDP.add_objective_state`](@ref) requires an update function. This
# function takes two arguments. The first is the incoming value of the objective
# state, and the second is the realization of the stagewise-independent noise
# term (set using [`SDDP.parameterize`](@ref)). The function should return the
# value of the objective state to be used in the current subproblem.
# This connection with the stagewise-independent noise term means that
# [`SDDP.parameterize`](@ref) _must_ be called in a subproblem that defines an
# objective state. Inside [`SDDP.parameterize`](@ref), the value of the
# objective state to be used in the current subproblem (i.e., after the update
# function), can be queried using [`SDDP.objective_state`](@ref).
# Here is the full model with the objective state.
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
@constraints(
subproblem,
begin
volume.out == volume.in + inflow - hydro_generation - hydro_spill
demand_constraint, thermal_generation + hydro_generation == 150.0
end
)
## Add an objective state. ω will be the same value that is called in
## `SDDP.parameterize`.
SDDP.add_objective_state(
subproblem;
initial_value = 50.0,
lipschitz = 10_000.0,
lower_bound = 50.0,
upper_bound = 150.0,
) do fuel_cost, ω
return ω.fuel * fuel_cost
end
## Create the cartesian product of a multi-dimensional random variable.
Ω = [
(fuel = f, inflow = w) for f in [0.75, 0.9, 1.1, 1.25] for
w in [0.0, 50.0, 100.0]
]
SDDP.parameterize(subproblem, Ω) do ω
## Query the current fuel cost.
fuel_cost = SDDP.objective_state(subproblem)
@stageobjective(subproblem, fuel_cost * thermal_generation)
return JuMP.fix(inflow, ω.inflow)
end
end
# After creating our model, we can train and simulate as usual.
SDDP.train(model; run_numerical_stability_report = false)
simulations = SDDP.simulate(model, 1)
print("Finished training and simulating.")
# To demonstrate how the objective states are updated, consider the sequence of
# noise observations:
[stage[:noise_term] for stage in simulations[1]]
# This, the fuel cost in the first stage should be `0.75 * 50 = 37.5`. The fuel
# cost in the second stage should be `1.1 * 37.5 = 41.25`. The fuel cost in the
# third stage should be `0.75 * 41.25 = 30.9375`.
# To confirm this, the values of the objective state in a simulation can be
# queried using the `:objective_state` key.
[stage[:objective_state] for stage in simulations[1]]
# ## Multi-dimensional objective states
# You can construct multi-dimensional price processes using `NTuple`s. Just
# replace every scalar value associated with the objective state by a tuple. For
# example, `initial_value = 1.0` becomes `initial_value = (1.0, 2.0)`.
# Here is an example:
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
@constraints(
subproblem,
begin
volume.out == volume.in + inflow - hydro_generation - hydro_spill
demand_constraint, thermal_generation + hydro_generation == 150.0
end
)
SDDP.add_objective_state(
subproblem;
initial_value = (50.0, 50.0),
lipschitz = (10_000.0, 10_000.0),
lower_bound = (50.0, 50.0),
upper_bound = (150.0, 150.0),
) do fuel_cost, ω
## fuel_cost is a tuple, containing the (fuel_cost[t-1], fuel_cost[t-2])
## This function returns a new tuple containing
## (fuel_cost[t], fuel_cost[t-1]). Thus, we need to compute the new
## cost:
new_cost = fuel_cost[1] + 0.5 * (fuel_cost[1] - fuel_cost[2]) + ω.fuel
## And then return the appropriate tuple:
return (new_cost, fuel_cost[1])
end
Ω = [
(fuel = f, inflow = w) for f in [-10.0, -5.0, 5.0, 10.0] for
w in [0.0, 50.0, 100.0]
]
SDDP.parameterize(subproblem, Ω) do ω
fuel_cost, _ = SDDP.objective_state(subproblem)
@stageobjective(subproblem, fuel_cost * thermal_generation)
return JuMP.fix(inflow, ω.inflow)
end
end
SDDP.train(model; run_numerical_stability_report = false)
simulations = SDDP.simulate(model, 1)
print("Finished training and simulating.")
# This time, since our objective state is two-dimensional, the objective states
# are tuples with two elements:
[stage[:objective_state] for stage in simulations[1]]
# ## [Warnings](@id objective_state_warnings)
# There are number of things to be aware of when using objective states.
#
# - The key assumption is that price is independent of the states and actions in
# the model.
#
# That means that the price cannot appear in any `@constraint`s. Nor can you
# use any `@variable`s in the update function.
#
# - Choosing an appropriate Lipschitz constant is difficult.
#
# The points discussed in [Choosing an initial bound](@ref) are relevant.
# The Lipschitz constant should not be chosen as large as possible (since
# this will help with convergence and the numerical issues discussed above),
# but if chosen to small, it may cut of the feasible region and lead to a
# sub-optimal solution.
#
# - You need to ensure that the cost-to-go function is concave with respect to
# the objective state _before_ the update.
#
# If the update function is linear, this is always the case. In some
# situations, the update function can be nonlinear (e.g., multiplicative as
# we have above). In general, placing constraints on the price (e.g.,
# `clamp(price, 0, 1)`) will destroy concavity. [Caveat
# emptor](https://en.wikipedia.org/wiki/Caveat_emptor). It's up to you if
# this is a problem. If it isn't you'll get a good heuristic with no
# guarantee of global optimality.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3067 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Uncertainty in the objective function
# In the previous tutorial, [An introduction to SDDP.jl](@ref), we created a
# stochastic hydro-thermal scheduling model. In this tutorial, we extend the
# problem by adding uncertainty to the fuel costs.
# Previously, we assumed that the fuel cost was deterministic: \$50/MWh in the
# first stage, \$100/MWh in the second stage, and \$150/MWh in the third
# stage. For this tutorial, we assume that in addition to these base costs, the
# actual fuel cost is correlated with the inflows.
# Our new model for the uncertainty is given by the following table:
#
# | ω | 1 | 2 | 3 |
# | ---- | --- | --- | ---- |
# | P(ω) | 1/3 | 1/3 | 1/3 |
# | inflow | 0 | 50 | 100 |
# | fuel multiplier | 1.5 | 1.0 | 0.75 |
# In stage `t`, the objective is now to minimize:
#
# `fuel_multiplier * fuel_cost[t] * thermal_generation`
# ## Creating a model
# To add an uncertain objective, we can simply call [`@stageobjective`](@ref)
# from inside the [`SDDP.parameterize`](@ref) function.
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
## Define the state variable.
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
## Define the control variables.
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
## Define the constraints
@constraints(
subproblem,
begin
volume.out == volume.in + inflow - hydro_generation - hydro_spill
thermal_generation + hydro_generation == 150.0
end
)
fuel_cost = [50.0, 100.0, 150.0]
## Parameterize the subproblem.
Ω = [
(inflow = 0.0, fuel_multiplier = 1.5),
(inflow = 50.0, fuel_multiplier = 1.0),
(inflow = 100.0, fuel_multiplier = 0.75),
]
SDDP.parameterize(subproblem, Ω, [1 / 3, 1 / 3, 1 / 3]) do ω
JuMP.fix(inflow, ω.inflow)
@stageobjective(
subproblem,
ω.fuel_multiplier * fuel_cost[t] * thermal_generation
)
end
end
# ## Training and simulating the policy
# As in the previous two tutorials, we train and simulate the policy:
SDDP.train(model)
simulations = SDDP.simulate(model, 500)
objective_values =
[sum(stage[:stage_objective] for stage in sim) for sim in simulations]
using Statistics
μ = round(mean(objective_values); digits = 2)
ci = round(1.96 * std(objective_values) / sqrt(500); digits = 2)
println("Confidence interval: ", μ, " ± ", ci)
println("Lower bound: ", round(SDDP.calculate_bound(model); digits = 2))
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 4692 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Alternative forward models
# This example demonstrates how to train convex and non-convex models.
# This example uses the following packages:
using SDDP
import Ipopt
import PowerModels
import Test
# ## Formulation
# For our model, we build a simple optimal power flow model with a single
# hydro-electric generator.
# The formulation of our optimal power flow problem depends on `model_type`,
# which must be one of the `PowerModels` formulations.
# (To run locally, download [`pglib_opf_case5_pjm.m`](pglib_opf_case5_pjm.m) and
# update `filename` appropriately.)
function build_model(model_type)
filename = joinpath(@__DIR__, "pglib_opf_case5_pjm.m")
data = PowerModels.parse_file(filename)
return SDDP.PolicyGraph(
SDDP.UnicyclicGraph(0.95);
sense = :Min,
lower_bound = 0.0,
optimizer = Ipopt.Optimizer,
) do sp, t
power_model = PowerModels.instantiate_model(
data,
model_type,
PowerModels.build_opf;
jump_model = sp,
)
## Now add hydro power models. Assume that generator 5 is hydro, and the
## rest are thermal.
pg = power_model.var[:it][:pm][:nw][0][:pg][5]
sp[:pg] = pg
@variable(sp, x >= 0, SDDP.State, initial_value = 10.0)
@variable(sp, deficit >= 0)
@constraint(sp, balance, x.out == x.in - pg + deficit)
@stageobjective(sp, objective_function(sp) + 1e6 * deficit)
SDDP.parameterize(sp, [0, 2, 5]) do ω
return SDDP.set_normalized_rhs(balance, ω)
end
return
end
end
# ## Training a convex model
# We can build and train a convex approximation of the optimal power flow
# problem.
# The problem with the convex model is that it does not accurately simulate the
# true dynamics of the problem. Therefore, it under-estimates the true cost of
# operation.
convex = build_model(PowerModels.DCPPowerModel)
SDDP.train(convex; iteration_limit = 10)
# To more accurately simulate the dynamics of the problem, a common approach is
# to write the cuts representing the policy to a file, and then read them into
# a non-convex model:
SDDP.write_cuts_to_file(convex, "convex.cuts.json")
non_convex = build_model(PowerModels.ACPPowerModel)
SDDP.read_cuts_from_file(non_convex, "convex.cuts.json")
# Now we can simulate `non_convex` to evaluate the policy.
result = SDDP.simulate(non_convex, 1)
# A problem with reading and writing the cuts to file is that the cuts have been
# generated from trial points of the convex model. Therefore, the policy may be
# arbitrarily bad at points visited by the non-convex model.
# ## Training a non-convex model
# We can also build and train a non-convex formulation of the optimal power flow
# problem.
# The problem with the non-convex model is that because it is non-convex,
# SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the
# true cost of operation.
non_convex = build_model(PowerModels.ACPPowerModel)
SDDP.train(non_convex; iteration_limit = 10)
result = SDDP.simulate(non_convex, 1)
# ## Combining convex and non-convex models
# To summarize, training with the convex model constructs cuts at points that
# may never be visited by the non-convex model, and training with the non-convex
# model may construct arbitrarily poor cuts because a key assumption of SDDP is
# convexity.
# As a compromise, we can train a policy using a combination of the convex and
# non-convex models; we'll use the non-convex model to generate trial points on
# the forward pass, and we'll use the convex model to build cuts on the backward
# pass.
convex = build_model(PowerModels.DCPPowerModel)
#-
non_convex = build_model(PowerModels.ACPPowerModel)
# To do so, we train `convex` using the [`SDDP.AlternativeForwardPass`](@ref)
# forward pass, which simulates the model using `non_convex`, and we use
# [`SDDP.AlternativePostIterationCallback`](@ref) as a post-iteration callback,
# which copies cuts from the `convex` model back into the `non_convex` model.
SDDP.train(
convex;
forward_pass = SDDP.AlternativeForwardPass(non_convex),
post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex),
iteration_limit = 10,
)
# In practice, if we were to simulate `non_convex` now, we should obtain a
# better policy than either of the two previous approaches.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 6836 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Plotting tools
# In our previous tutorials, we formulated, solved, and simulated multistage
# stochastic optimization problems. However, we haven't really investigated what
# the solution looks like. Luckily, `SDDP.jl` includes a number of plotting
# tools to help us do that. In this tutorial, we explain the tools and make some
# pretty pictures.
# ## Preliminaries
# The next two plot types help visualize the policy. Thus, we first need to
# create a policy and simulate some trajectories. So, let's take the model from
# [Markovian policy graphs](@ref), train it for 20 iterations, and then
# simulate 100 Monte Carlo realizations of the policy.
using SDDP, HiGHS
Ω = [
(inflow = 0.0, fuel_multiplier = 1.5),
(inflow = 50.0, fuel_multiplier = 1.0),
(inflow = 100.0, fuel_multiplier = 0.75),
]
model = SDDP.MarkovianPolicyGraph(;
transition_matrices = Array{Float64,2}[
[1.0]',
[0.75 0.25],
[0.75 0.25; 0.25 0.75],
],
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
t, markov_state = node
@variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)
@variables(subproblem, begin
thermal_generation >= 0
hydro_generation >= 0
hydro_spill >= 0
inflow
end)
@constraints(
subproblem,
begin
volume.out == volume.in + inflow - hydro_generation - hydro_spill
thermal_generation + hydro_generation == 150.0
end
)
probability =
markov_state == 1 ? [1 / 6, 1 / 3, 1 / 2] : [1 / 2, 1 / 3, 1 / 6]
fuel_cost = [50.0, 100.0, 150.0]
SDDP.parameterize(subproblem, Ω, probability) do ω
JuMP.fix(inflow, ω.inflow)
@stageobjective(
subproblem,
ω.fuel_multiplier * fuel_cost[t] * thermal_generation
)
end
end
SDDP.train(model; iteration_limit = 20, run_numerical_stability_report = false)
simulations = SDDP.simulate(
model,
100,
[:volume, :thermal_generation, :hydro_generation, :hydro_spill],
)
println("Completed $(length(simulations)) simulations.")
# Great! Now we have some data in `simulations` to visualize.
# ## Spaghetti plots
# The first plotting utility we discuss is a _spaghetti_ plot (you'll understand
# the name when you see the graph).
# To create a spaghetti plot, begin by creating a new
# [`SDDP.SpaghettiPlot`](@ref) instance as follows:
plt = SDDP.SpaghettiPlot(simulations)
# We can add plots to `plt` using the [`SDDP.add_spaghetti`](@ref) function.
SDDP.add_spaghetti(plt; title = "Reservoir volume") do data
return data[:volume].out
end
# In addition to returning values from the simulation, you can compute things:
SDDP.add_spaghetti(plt; title = "Fuel cost", ymin = 0, ymax = 250) do data
if data[:thermal_generation] > 0
return data[:stage_objective] / data[:thermal_generation]
else # No thermal generation, so return 0.0.
return 0.0
end
end
# Note that there are many keyword arguments in addition to `title`. For
# example, we fixed the minimum and maximum values of the y-axis using `ymin`
# and `ymax`. See the [`SDDP.add_spaghetti`](@ref) documentation for all the
# arguments.
# Having built the plot, we now need to display it using [`SDDP.plot`](@ref).
# ```julia
# SDDP.plot(plt, "spaghetti_plot.html")
# ```
#
# ```@raw html
# <embed type="text/html" src="../../assets/spaghetti_plot.html" width="100%">
# ```
#
# This should open a webpage that looks like [this one](../assets/spaghetti_plot.html).
# Using the mouse, you can highlight individual trajectories by hovering over
# them. This makes it possible to visualize a single trajectory across multiple
# dimensions.
# If you click on the plot, then trajectories that are close to the mouse
# pointer are shown darker and those further away are shown lighter.
# ## Publication plots
# Instead of the interactive Javascript plots, you can also create some
# publication ready plots using the [`SDDP.publication_plot`](@ref) function.
# !!! info
# You need to install the [Plots.jl](https://github.com/JuliaPlots/Plots)
# package for this to work. We used the `GR` backend (`gr()`), but any
# `Plots.jl` backend should work.
# [`SDDP.publication_plot`](@ref) implements a plot recipe to create ribbon
# plots of each variable against the stages. The first argument is the vector of
# simulation dictionaries and the second argument is the dictionary key that you
# want to plot. Standard `Plots.jl` keyword arguments such as `title` and `xlabel`
# can be used to modify the look of each plot. By default, the plot displays
# ribbons of the 0-100, 10-90, and 25-75 percentiles. The dark, solid line in the
# middle is the median (i.e. 50'th percentile).
import Plots
Plots.plot(
SDDP.publication_plot(simulations; title = "Outgoing volume") do data
return data[:volume].out
end,
SDDP.publication_plot(simulations; title = "Thermal generation") do data
return data[:thermal_generation]
end;
xlabel = "Stage",
ylims = (0, 200),
layout = (1, 2),
)
# You can save this plot as a PDF using the `Plots.jl` function `savefig`:
# ```julia
# Plots.savefig("my_picture.pdf")
# ```
# ## Plotting the value function
# You can obtain an object representing the value function of a node using
# [`SDDP.ValueFunction`](@ref).
V = SDDP.ValueFunction(model[(1, 1)])
# The value function can be evaluated using [`SDDP.evaluate`](@ref).
SDDP.evaluate(V; volume = 1)
# `evaluate` returns the height of the value function, and a subgradient with respect to the
# convex state variables.
# You can also plot the value function using [`SDDP.plot`](@ref)
# ```julia
# SDDP.plot(V, volume = 0:200, filename = "value_function.html")
# ```
#
# ```@raw html
# <embed type="text/html" src="../../assets/value_function.html" width="100%">
# ```
#
# This should open a webpage that looks like [this one](../assets/value_function.html).
# ## Convergence dashboard
# If the text-based logging isn't to your liking, you can open a visualization of
# the training by passing `dashboard = true` to [`SDDP.train`](@ref).
# ```julia
# SDDP.train(model; dashboard = true)
# ```
# By default, `dashboard = false` because there is an initial overhead
# associated with opening and preparing the plot.
# !!! warning
# The dashboard is experimental. There are known bugs associated with it,
# e.g., [SDDP.jl#226](https://github.com/odow/SDDP.jl/issues/226).
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 7849 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Words of warning
# SDDP is a powerful solution technique for multistage stochastic programming.
# However, there are a number of subtle things to be aware of before creating
# your own models.
# ## Relatively complete recourse
# Models built in SDDP.jl need a property called _relatively complete recourse_.
# One definition of relatively complete recourse is that _all_ feasible decisions
# (not necessarily optimal) in a subproblem lead to feasible decisions in future
# subproblems.
# For example, in the following problem, one feasible first stage decision is
# `x.out = 0`. But this causes an infeasibility in the second stage which requires
# `x.in >= 1`. This will throw an error about infeasibility if you try to solve.
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
if t == 2
@constraint(sp, x.in >= 1)
end
@stageobjective(sp, x.out)
end
try #hide
SDDP.train(model; iteration_limit = 1, print_level = 0)
catch err #hide
showerror(stderr, err) #hide
end #hide
# !!! warning
# The actual constraints causing the infeasibilities can be deceptive! A good
# strategy to debug is to comment out all constraints. Then, one-by-one,
# un-comment the constraints and try resolving the model to check if it finds a
# feasible solution.
# ## Numerical stability
# If you aren't aware, SDDP builds an outer-approximation to a convex function
# using cutting planes. This results in a formulation that is particularly hard
# for solvers like HiGHS, Gurobi, and CPLEX to deal with. As a result, you may
# run into weird behavior. This behavior could include:
#
# - Iterations suddenly taking a long time (the solver stalled)
# - Subproblems turning infeasible or unbounded after many iterations
# - Solvers returning "Numerical Error" statuses
# ### Problem scaling
# In almost all cases, the cause of this is poor problem scaling. For our
# purpose, poor problem scaling means having variables with very large numbers
# and variables with very small numbers in the same model.
# !!! tip
# Gurobi has an excellent [set of articles](http://www.gurobi.com/documentation/8.1/refman/numerics_gurobi_guidelines.html)
# on numerical issues and how to avoid them.
# Consider, for example, the hydro-thermal scheduling problem we have been
# discussing in previous tutorials.
# If we define the volume of the reservoir in terms of m³, then a lake might
# have a capacity of 10^10 m³: `@variable(subproblem, 0 <= volume <= 10^10)`.
# Moreover, the cost per cubic meter might be around \$0.05/m³. To calculate
# the value of water in our reservoir, we need to multiple a variable on the
# order of 10^10, by one on the order of 10⁻²! That is twelve orders of
# magnitude!
# To improve the performance of the SDDP algorithm (and reduce the chance of
# weird behavior), try to re-scale the units of the problem in order to reduce
# the largest difference in magnitude. For example, if we talk in terms of
# million m³, then we have a capacity of 10⁴ million m³, and a price of
# \$50,000 per million m³. Now things are only one order of magnitude apart.
# ### Numerical stability report
# To aid in the diagnose of numerical issues, you can call
# [`SDDP.numerical_stability_report`](@ref). By default, this aggregates all of
# the nodes into a single report. You can produce a stability report for each
# node by passing `by_node=true`.
using SDDP
model =
SDDP.LinearPolicyGraph(; stages = 2, lower_bound = -1e10) do subproblem, t
@variable(subproblem, x >= -1e7, SDDP.State, initial_value = 1e-5)
@constraint(subproblem, 1e9 * x.out >= 1e-6 * x.in + 1e-8)
@stageobjective(subproblem, 1e9 * x.out)
end
SDDP.numerical_stability_report(model)
# The report analyses the magnitude (in absolute terms) of the coefficients in
# the constraint matrix, the objective function, any variable bounds, and in the
# RHS of the constraints. A warning will be thrown in `SDDP.jl` detects very
# large or small values. As discussed in [Problem scaling](@ref), this is an
# indication that you should reformulate your model.
# By default, a numerical stability check is run when you call
# [`SDDP.train`](@ref), although it can be turned off by passing
# `run_numerical_stability_report = false`.
# ### Solver-specific options
# If you have a particularly troublesome model, you should investigate setting
# solver-specific options to improve the numerical stability of each solver. For
# example, Gurobi has a [`NumericFocus`
# option](http://www.gurobi.com/documentation/8.1/refman/numericfocus.html#parameter:NumericFocus).
# ## Choosing an initial bound
# One of the important requirements when building a SDDP model is to choose an
# appropriate bound on the objective (lower if minimizing, upper if maximizing).
# However, it can be hard to choose a bound if you don't know the solution!
# (Which is very likely.)
# The bound should not be as large as possible (since this will help with
# convergence and the numerical issues discussed above), but if chosen too
# small, it may cut off the feasible region and lead to a sub-optimal solution.
# Consider the following simple model, where we first set `lower_bound` to `0.0`.
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x >= 0, SDDP.State, initial_value = 2)
@variable(subproblem, u >= 0)
@variable(subproblem, v >= 0)
@constraint(subproblem, x.out == x.in - u)
@constraint(subproblem, u + v == 1.5)
@stageobjective(subproblem, t * v)
end
SDDP.train(model; iteration_limit = 5, run_numerical_stability_report = false)
# Now consider the case when we set the `lower_bound` to `10.0`:
using SDDP, HiGHS
model = SDDP.LinearPolicyGraph(;
stages = 3,
sense = :Min,
lower_bound = 10.0,
optimizer = HiGHS.Optimizer,
) do subproblem, t
@variable(subproblem, x >= 0, SDDP.State, initial_value = 2)
@variable(subproblem, u >= 0)
@variable(subproblem, v >= 0)
@constraint(subproblem, x.out == x.in - u)
@constraint(subproblem, u + v == 1.5)
@stageobjective(subproblem, t * v)
end
SDDP.train(model; iteration_limit = 5, run_numerical_stability_report = false)
# How do we tell which is more appropriate? There are a few clues that you
# should look out for.
#
# - The bound converges to a value above (if minimizing) the simulated cost of
# the policy. In this case, the problem is deterministic, so it is easy to
# tell. But you can also check by performing a Monte Carlo simulation like we
# did in [An introduction to SDDP.jl](@ref).
#
# - The bound converges to different values when we change the bound. This is
# another clear give-away. The bound provided by the user is only used in the
# initial iterations. __It should not change the value of the converged
# policy.__ Thus, if you don't know an appropriate value for the bound, choose
# an initial value, and then increase (or decrease) the value of the bound to
# confirm that the value of the policy doesn't change.
#
# - The bound converges to a value _close_ to the bound provided by the user.
# This varies between models, but notice that `11.0` is quite close to `10.0`
# compared with `3.5` and `0.0`.
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 24408 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module BiObjectiveSDDP
# Include the Gurobi-specific versions of get_BinvA and get_basis.
include(joinpath(@__DIR__, "gurobi.jl"))
import Printf
import SDDP
const MOI = SDDP.MOI
const JuMP = SDDP.JuMP
### Utilities for bi-objective simplex problems. Assumes that the problem is in
### the standard-form:
###
### min c'x
### s.t Ax = b
### x ≥ 0
###
### This code is based on the description of the bi-objective simplex method
### given by M. Ehrgott in his slides located at:
### https://www.lamsade.dauphine.fr/~projet_cost/ALGORITHMIC_DECISION_THEORY/pdf/Ehrgott/HanLecture2_ME.pdf
"""
get_BinvA(model::MOI.ModelLike)
Return the matrix `B⁻¹A`, where `B` is the matrix formed by the columns of the
basic variables in the constraint matrix.
Note that this typically has `n + m` columns, where `n` is the number of
variables and `m` is the number of rows in the constraint matrix because Gurobi
adds slack variables to equality constraints constraints for some reason.
"""
function get_BinvA end
"""
get_basis(model::MOI.ModelLike)
Return the 1-indexed columns that comprise the basis.
"""
function get_basis end
# Here is where we kick things off properly.
struct BiObjectiveModel{M<:MOI.ModelLike}
model::M
c_1::Vector{Float64}
c_2::Vector{Float64}
end
function set_weighted_objective(bi_obj_model::BiObjectiveModel, lambda::Float64)
model = bi_obj_model.model
x = MOI.get(model, MOI.ListOfVariableIndices())
c = lambda .* bi_obj_model.c_1 + (1 - lambda) .* bi_obj_model.c_2
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(c, x), 0.0),
)
return
end
"""
phase_iii_biobjective_simplex_step(
bi_obj_model::BiObjectiveModel,
current_lambda::Float64,
)
Perform a Phase III step of the bi-objective simplex method.
"""
function phase_iii_biobjective_simplex_step(
bi_obj_model::BiObjectiveModel,
current_lambda::Float64,
)
set_weighted_objective(bi_obj_model, current_lambda)
model = bi_obj_model.model
MOI.optimize!(model)
# Get the matrix B⁻¹A and sanity checks.
BinvA = get_BinvA(model)
m, np = size(BinvA)
n = np - m
# Get the list of basis and non-basic variables, ignoring the slack
# variables.
B = get_basis(model)
N = setdiff(1:n, B)
# Now compute the c_N - c_B' B⁻¹A, but expand the cost vectors with `0.0` to
# account for the slack variables introduced by Gurobi. These are the
# reduced costs w.r.t. each objective, and are the change in the objective
# function that would occur if we were to bring the corresponding variable
# into the basis.
c_1 = copy(bi_obj_model.c_1)
c_2 = copy(bi_obj_model.c_2)
c_n = length(c_1)
if c_n < np
resize!(c_1, np)
resize!(c_2, np)
c_1[(c_n+1):end] .= 0.0
c_2[(c_n+1):end] .= 0.0
end
cb1 = c_1' .- c_1[B]' * BinvA
cb2 = c_2' .- c_2[B]' * BinvA
@assert length(cb1) == length(c_1)
@assert length(cb2) == length(c_2)
# Wondering from where the next peice of the formula comes? Here is a
# toy example to explain it.
#
# f: min 1x + 0y + 0z
# g: min 0x + 1y + 0z
# s.t. 2x + 1y - 1z == 1
# x, y, z >= 0
#
# Decision space Objective space
# y g
# | | | |
# 1+ x A:(0, 1, 0) 1+ x A:(0, 1)
# | \ | \
# | \ B:(0.5, 0, 0) | \ B:(0.5, 0)
# 0+ x------ 0+ x--
# |-+―――――+――――――+― x |-+-------+- f
# 0 0.5 1 0 0.5
#
# To move from point A to point B, we have the change in objectives of
#
# df/dΔ = +0.5
# dg/dΔ = -1.0
#
# The objective facet is defined by the normal vector
#
# u = [-dy/dΔ / (dx/dΔ - dy/dΔ), 1 + dy/dΔ / (dx/dΔ - dy/dΔ)]
#
# or, more concretely, u = [2 / 3, 1 / 3].
#
# Therefore, the first point is valid for λ ∈ (2 / 3, 1] and the second
# for λ ∈ [0, 2 / 3).
# Set the initial bounds [λ⁻, λ⁺] over which the basis remains optimal.
λ⁻, λ⁺ = -1.0, 2.0
for i in N
tmp = -cb2[i] / (cb1[i] - cb2[i])
if cb1[i] >= 0 && cb2[i] < 0
# An entering variable that will decrease the second objective and
# increase the first objective. Lift the lower limit of the bound.
λ⁻ = max(λ⁻, tmp)
elseif cb1[i] < 0 && cb2[i] >= 0
# An entering variable that will decrease the first objective and
# increase the second objective. Drop the upper limit of the bound.
λ⁺ = min(λ⁺, tmp)
end
end
if λ⁺ == 2.0
# We couldn't find an entering variable that will decrease the first
# objective while increasing the second objective.
λ⁺ = current_lambda
end
if λ⁻ == -1.0
# We couldn't find an entering variable that will decrease the second
# objective while increasing the first objective.
λ⁻ = 0.0
end
return λ⁻, λ⁺
end
function get_next_lambda(
bi_obj_model::BiObjectiveModel,
current_lambda::Float64;
lambda_minimum_step::Float64,
lambda_atol::Float64,
)
if current_lambda == 0.0
return 1.0
elseif current_lambda < lambda_minimum_step
return 0.0
end
λ⁻, _ = phase_iii_biobjective_simplex_step(bi_obj_model, current_lambda)
if λ⁻ < current_lambda - lambda_minimum_step
# The Phase III step found a new value for lambda.
return λ⁻
end
# Okay, so we didn't find a new value, we have a problem. The basis we
# currently have after the solve is valid for all λ ∈ [λ⁻, lambda]. So,
# if set the weight to λ⁻, then we're going to get the same basis :(.
# Instead of manually performing a simplex step, or changing the basis,
# we're going to do a bisection search in λ to find the next one. It's a
# little slower, because there might be multiple solves, but it's
# simpler for the solvers to implement.
#
# TODO(odow): improve this.
candidate = 0.0
while candidate < current_lambda - lambda_minimum_step
lambda_bar = (candidate + current_lambda) / 2
λ⁻, λ⁺ = phase_iii_biobjective_simplex_step(bi_obj_model, lambda_bar)
if isapprox(λ⁺, current_lambda, atol = lambda_atol)
# Success! We found a new lambda that is provably the next one
# in the sequence. Take a step of at least lambda_minimum_step
# unless we hit λ = 0.
candidate = λ⁻
break
else
candidate = λ⁺
end
if isapprox(candidate, 0.0, atol=lambda_atol)
break
end
end
# Before we leave, we need to reset the model to the state we found it in.
set_weighted_objective(bi_obj_model, current_lambda)
MOI.optimize!(bi_obj_model.model)
return min(candidate, max(current_lambda - lambda_minimum_step, 0))
end
###
### Utilities for converting an arbitrary MOI model into the standard form:
###
### min c'x
### s.t Ax = b
### x ≥ 0
###
# Here's a type that is used to bridge into Ax = b; x ∈ R₊.
# We can't do SingleVariable-in-GreaterThan because it won't added the required
# slacks, leaving us with x >= l.
MOI.Utilities.@model(
VectorStandardForm,
(),
(MOI.EqualTo,),
(MOI.Nonnegatives,),
(),
(),
(MOI.ScalarAffineFunction,),
(MOI.VectorOfVariables,),
()
)
# We dis-allow free variables to bridge x free into x = y⁺ - y⁻; y⁺, y⁻ >= 0.
function MOI.supports_constraint(
::VectorStandardForm,
::Type{MOI.VectorOfVariables},
::Type{MOI.Reals},
)
return false
end
# We dis-allow SingleVariable-in-S constraints to force bridging into
# VectorOfVariables-in-Nonnegatives.
function MOI.supports_constraint(
::VectorStandardForm{Float64},
::Type{MOI.SingleVariable},
::Type{S},
) where {
S<:Union{
MOI.GreaterThan{Float64},
MOI.LessThan{Float64},
MOI.Interval{Float64},
MOI.EqualTo{Float64},
},
}
return false
end
function vec_bridged_terms(x::MOI.VariableIndex, bridged, index_map)
variable = MOI.Bridges.bridged_variable_function(bridged, index_map[x])
if typeof(variable) == MOI.SingleVariable
return [MOI.ScalarAffineTerm(1.0, variable.variable)]
end
@assert typeof(variable) <: MOI.ScalarAffineFunction
return variable.terms
end
function std_bridged_terms(
terms::Vector{MOI.ScalarAffineTerm{Float64}},
bridged,
index_map,
)
terms_out = MOI.ScalarAffineTerm{Float64}[]
for term in terms
var = index_map[term.variable_index]
for new_term in vec_bridged_terms(var, bridged, index_map)
push!(
terms_out,
MOI.ScalarAffineTerm(
term.coefficient * new_term.coefficient,
new_term.variable_index,
),
)
end
end
return terms_out
end
"""
convert_to_standard_form(
dest::MOI.ModelLike,
src::MOI.ModelLike,
)::Dict{MOI.VariableIndex,Vector{MOI.ScalarAffineTerm{Float64}}}
Convert `src` to an equivalent model in `dest`, where `dest` has the form
`min{c'x: Ax = b, x >= 0}`.
Return a dictionary that maps variables from `src`-space, to an equivalent
vector of `ScalarAffineTerm`'s (if they were summed) in `dest`-space.
"""
function convert_to_standard_form(dest::MOI.ModelLike, src::MOI.ModelLike)
x = MOI.get(src, MOI.ListOfVariableIndices())
# First step: convert model in to `min{c'x: a'x = b, x ∈ R₊}`.
vec_std = VectorStandardForm{Float64}()
vec_bridge = MOI.Bridges.full_bridge_optimizer(vec_std, Float64)
vec_map = MOI.copy_to(vec_bridge, src)
vec_terms =
Dict(xi => vec_bridged_terms(xi, vec_bridge, vec_map) for xi in x)
# Second step: shift constants to RHS.
for index in MOI.get(
vec_std,
MOI.ListOfConstraintIndices{
MOI.ScalarAffineFunction{Float64},
MOI.EqualTo{Float64},
}(),
)
constant = MOI.get(vec_std, MOI.ConstraintFunction(), index).constant
if !iszero(constant)
set = MOI.get(vec_std, MOI.ConstraintSet(), index)
MOI.set(
vec_std,
MOI.ConstraintSet(),
index,
MOI.EqualTo(set.value - constant),
)
MOI.modify(vec_std, index, MOI.ScalarConstantChange(0.0))
end
end
# Third step: convert model in to `min{c'x: a'x = b, xᵢ >= 0}`.
dest_bridge = MOI.Bridges.full_bridge_optimizer(dest, Float64)
dest_map = MOI.copy_to(dest_bridge, vec_std)
# Fourth step: reconcile the variables.
return Dict(
xi => std_bridged_terms(terms, dest_bridge, dest_map) for
(xi, terms) in vec_terms
)
end
###
### Utilities for working with bi-objective SDDP problems.
###
"""
get_next_lambda(
model::SDDP.PolicyGraph{T},
node_index::T,
noise,
λ::Float64,
dest::MOI.ModelLike,
) where {T}
"""
function get_next_lambda(
model::SDDP.PolicyGraph{T},
node_index::T,
noise::Any,
λ::Float64,
dest::MOI.ModelLike;
lambda_minimum_step::Float64,
lambda_atol::Float64,
) where {T}
# Look-up the node that we want to compute the step at.
node = model[node_index]
# Convert `node.subproblem` into the standard form.
dest_variables =
convert_to_standard_form(dest, JuMP.backend(node.subproblem))
x = MOI.get(dest, MOI.ListOfVariableIndices())
function compute_objective_vector(λ)
# Set the objective and noise inn `node.subproblem`.
SDDP.set_trade_off_weight(model, λ)
SDDP.parameterize(node, noise)
# Initialize the objective vector `c`.
c = fill(0.0, length(x))
objective = JuMP.objective_function(node.subproblem)
for (variable, coef) in objective.terms
src_index = JuMP.index(variable)
for term in dest_variables[src_index]
# TODO: here, we assume that .value is the column! This is
# true in Gurobi, but it might not be the case for all solvers.
c[term.variable_index.value] = coef * term.coefficient
end
end
return c
end
c_1 = compute_objective_vector(1.0)
c_2 = compute_objective_vector(0.0)
# Quickly optimize `dest` to obtain a basis. Note: for sanity sake, the
# ObjectiveValue of `dest` after this should be identical to the objective
# value of node.subproblem (modulo numerical tolerances).
MOI.set(dest, MOI.Silent(), true)
MOI.optimize!(dest)
# Get the next lambda using MOI calls defined above.
return get_next_lambda(
BiObjectiveModel(dest, c_1, c_2),
λ;
lambda_minimum_step = lambda_minimum_step,
lambda_atol = lambda_atol,
)
end
"""
surrogate_lower_bound(
model::SDDP.PolicyGraph,
optimizer;
global_lower_bound::Real,
lambda_minimum_step::Float64 = 1e-4,
lambda_atol::Float64 = 1e-4,
)
Compute the surrogate lower bound for `model` using `optimizer` to calculate
the weight update.
`global_lower_bound` must be a valid lower bound across the entire weight-space.
"""
function surrogate_lower_bound(
model::SDDP.PolicyGraph,
optimizer;
global_lower_bound::Real,
lambda_minimum_step::Float64 = 1e-4,
lambda_atol::Float64 = 1e-4,
)
key = model.root_children[1].term
node = model[key]
noise = node.noise_terms[1].term
if (length(model.root_children) != 1) || (length(node.noise_terms) != 1)
error("Need deterministic first-stage")
end
weights, bounds = Float64[], Float64[]
λ = 1.0
while true
SDDP.set_trade_off_weight(model, λ)
bound = SDDP.calculate_bound(model)
push!(weights, λ)
push!(bounds, bound)
λ = get_next_lambda(
model,
key,
noise,
λ,
optimizer();
lambda_minimum_step = lambda_minimum_step,
lambda_atol = lambda_atol,
)
if λ == 1.0
break
end
end
lower_bound = 0.0
for i in 2:length(weights)
d_bound = 0.5 * (bounds[i-1] + bounds[i]) - global_lower_bound
lower_bound += d_bound * (weights[i-1] - weights[i])
end
return lower_bound, weights, bounds
end
function print_iteration_header(io)
println(
io,
"------------------------------------------------------------------",
)
println(
io,
" BI-OBJECTIVE SDDP.jl (c) Oscar Dowson, 2019-21 ",
)
println(io)
println(io, " Iterations")
println(
io,
" Maj. Min. SDDP Lower Bound Weight Time (s) ",
)
println(
io,
"------------------------------------------------------------------",
)
return
end
function print_iteration(
io::IO,
major_iterations::Int,
minor_iterations::Int,
sddp_iterations::Int,
lower::Float64,
weight::Float64,
time::Float64,
)
println(
io,
print_value(major_iterations),
" ",
print_value(minor_iterations),
" ",
print_value(sddp_iterations),
" ",
print_value(lower),
" ",
print_value(weight),
" ",
print_value(time),
)
return
end
function print_footer(io::IO)
println(
io,
"------------------------------------------------------------------",
)
return
end
print_value(x::Real) = lpad(Printf.@sprintf("%1.6e", x), 13)
print_value(x::Int) = lpad(Printf.@sprintf("%5d", x), 6)
abstract type AbstractLambdaUpdate end
struct RandomUpdate <: AbstractLambdaUpdate end
function lambda_update(
::RandomUpdate,
model::SDDP.PolicyGraph,
λ::Float64,
optimizer;
kwargs...,
)
key, node = rand(model.nodes)
noise = rand(node.noise_terms)
return get_next_lambda(model, key, noise.term, λ, optimizer(); kwargs...)
end
struct MinimumUpdate <: AbstractLambdaUpdate end
function lambda_update(
::MinimumUpdate,
model::SDDP.PolicyGraph,
λ::Float64,
optimizer;
kwargs...,
)
if length(model.nodes) != 2
error("Minimum update only implemented for two-stage problems.")
end
weights = Float64[]
for (key, node) in model.nodes
if length(node.noise_terms) != 1
error("Minimum update only implemented for deterministic problems.")
end
noise = node.noise_terms[1]
push!(
weights,
get_next_lambda(model, key, noise.term, λ, optimizer(); kwargs...),
)
end
return maximum(weights)
end
struct FirstStageUpdate <: AbstractLambdaUpdate end
function lambda_update(
::FirstStageUpdate,
model::SDDP.PolicyGraph,
λ::Float64,
optimizer;
kwargs...,
)
key = model.root_children[1].term
node = model[key]
noise = node.noise_terms[1]
return get_next_lambda(model, key, noise.term, λ, optimizer(); kwargs...)
end
function bi_objective_sddp(
model::SDDP.PolicyGraph,
optimizer;
bi_objective_major_iteration_limit::Int = 100,
bi_objective_minor_iteration_limit::Int = 1_000,
bi_objective_sddp_iteration_limit::Int = 10_000,
bi_objective_lower_bound::Float64,
bi_objective_lambda_atol::Float64 = 1e-4,
bi_objective_major_iteration_burn_in::Int = 10,
bi_objective_lambda_update_method::AbstractLambdaUpdate = RandomUpdate(),
bi_objective_post_train_callback::Union{Function,Nothing} = nothing,
kwargs...,
)
print_iteration_header(stdout)
start_time = time()
major_iterations = 0
minor_iterations = 0
sddp_iterations = 0
λ = 1.0
try
while true
if λ == 1.0
major_iterations += 1
end
SDDP.set_trade_off_weight(model, λ)
SDDP.train(
model;
run_numerical_stability_report = false,
add_to_existing_cuts = true,
kwargs...,
)
minor_iterations += 1
sddp_iterations += length(model.most_recent_training_results.log)
if bi_objective_post_train_callback !== nothing
bi_objective_post_train_callback(model, λ)
end
lower_bound, _, _ = surrogate_lower_bound(
model,
optimizer;
global_lower_bound = bi_objective_lower_bound,
lambda_minimum_step = bi_objective_lambda_atol,
lambda_atol = bi_objective_lambda_atol,
)
tmp_bi_objective_lambda_tol =
max(
1,
bi_objective_major_iteration_burn_in - major_iterations,
) * (0.1 - bi_objective_lambda_atol) /
bi_objective_major_iteration_burn_in
λ′ = lambda_update(
bi_objective_lambda_update_method,
model,
λ,
optimizer;
lambda_minimum_step = tmp_bi_objective_lambda_tol,
lambda_atol = tmp_bi_objective_lambda_tol,
)
# Clean up the iteration.
print_iteration(
stdout,
major_iterations,
minor_iterations,
sddp_iterations,
lower_bound,
λ,
time() - start_time,
)
λ = λ′
if major_iterations >= bi_objective_major_iteration_limit
break
elseif minor_iterations >= bi_objective_minor_iteration_limit
break
elseif sddp_iterations >= bi_objective_sddp_iteration_limit
break
end
end
catch ex
if isa(ex, InterruptException)
println("Terminating: solve interrupted by user.")
else
rethrow(ex)
end
end
print_footer(stdout)
return surrogate_lower_bound(
model,
optimizer;
global_lower_bound = bi_objective_lower_bound,
lambda_minimum_step = bi_objective_lambda_atol,
lambda_atol = bi_objective_lambda_atol,
)
end
struct Hyperplane
λ::Float64
y::Float64
Δ::Float64
end
# TODO(odow): this function is a pretty inefficient O(N²) operation. We could
# make this more efficient by being clever.
function compute_vertices(hyperplanes::Vector{Hyperplane}, atol::Float64)
N = length(hyperplanes)
vertices = Tuple{Float64,Float64}[(0.0, hyperplanes[1].y)]
x_min, i_min = 0.0, 1
while x_min < 1.0
# Here is our current λ co-ordinate
x = vertices[end][1]
# and our current intercept and slope.
y, Δ = hyperplanes[i_min].y, hyperplanes[i_min].Δ
# Now loop through every hyperplane and find the minimum x intersection
# greater than our current x.
x_min, i_min = Inf, 0
for i in 1:N
# Get intersection with current plane and the i'th one.
yᵢ, Δᵢ = hyperplanes[i].y, hyperplanes[i].Δ
x′ = if isapprox(y, yᵢ, atol = atol) && isapprox(Δ, Δᵢ; atol = atol)
1.0
else
(y - yᵢ) / (Δᵢ - Δ)
end
if x < x′ < x_min
x_min, i_min = x′, i
end
end
if x_min ≈ 1.0
y, Δ = hyperplanes[end].y, hyperplanes[end].Δ
end
push!(vertices, (x_min, y + Δ * x_min))
end
return vertices
end
struct ObjectiveSpacePoint
f₁::Float64
f₂::Float64
λ::Float64
end
V(p::ObjectiveSpacePoint, λ::Float64) = λ * p.f₁ + (1 - λ) * p.f₂
function compute_upper_bound(
points::Vector{ObjectiveSpacePoint};
atol::Float64,
lower_bound::Float64,
)
# Each tuple in hyperplanes is `(x, intercept, slope)`.
hyperplanes =
[Hyperplane(p.λ, V(p, 0.0), V(p, 1.0) - V(p, 0.0)) for p in points]
# Drop colinear hyperplanes.
unique!(h -> (h.y, h.Δ), hyperplanes)
# Sort first by smallest intercept, then by largest slope, then by x.
sort!(hyperplanes, by = h -> (h.y, -h.Δ, h.λ))
# Now compute the vertices.
vertices = compute_vertices(hyperplanes, atol)
# Now compute the area contained in the polygon
area = 0.0
for i in 2:length(vertices)
xl, yl = vertices[i-1]
xr, yr = vertices[i]
area += (xr - xl) * ((yl + yr) / 2 - lower_bound)
end
return area
end
function surrogate_upper_bound(
model::SDDP.PolicyGraph,
optimizer;
global_lower_bound::Real,
lambda_minimum_step::Float64 = 1e-4,
lambda_atol::Float64 = 1e-4,
)
if length(model.nodes) != 2
error("Upper bound only defined for two-stage problems")
elseif any(length(node.noise_terms) > 1 for (key, node) in model.nodes)
error("Upper bound only defined for deterministic problems")
end
points = ObjectiveSpacePoint[]
λ = 1.0
while true
SDDP.set_trade_off_weight(model, λ)
simulation = SDDP.simulate(model, 1, [:objective_1, :objective_2])
push!(
points,
ObjectiveSpacePoint(
sum(s[:objective_1] for s in simulation[1]),
sum(s[:objective_2] for s in simulation[1]),
λ,
),
)
key = model.root_children[1].term
node = model[key]
noise = node.noise_terms[1].term
λ = get_next_lambda(
model,
key,
noise,
λ,
optimizer();
lambda_minimum_step = lambda_minimum_step,
lambda_atol = lambda_atol,
)
if λ == 1.0
break
end
end
return compute_upper_bound(
points;
atol = lambda_atol,
lower_bound = global_lower_bound,
)
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 40110 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#! format: off
const thermal_ub = Array{Float64, 2}[
[657 1350 36 250 250 28 529 44 255 235 386 386 145 226 131 87 204 923 923 400 100 200 169 386 28 200 272 30 168 440 400 258 258 258 64 340 1058 1058 10 197 175 206 54],
[66 485 485 350 161 72 4 20 100 132 262 363 24 126 320 20 640],
[13 11 32 11 347 152 150 13 15 220 220 13 15 138 347 149 149 15 102 15 168 13 13 103 136 53 66 186 50 156 171 533 323],
[166 166]
]
const thermal_lb = Array{Float64,2}[
[520.0 1080.0 0.0 59.3 27.1 0.0 0.0 0.0 219.78 199.99 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 399.99 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 71.7 28.8 0.0 132.98 0.0 0.0 0.0],
[0.0 0.0 0.0 210.0 0.0 27.0 0.0 9.56 25.0 79.46 147.54 228.02 0.0 49.66 105.0 5.0 0.0],
[0.0 0.0 0.0 0.0 0.7 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 223.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 348.8 0.0],
[0 0]
]
const thermal_obj = Array{Float64, 2}[
[21.49 18.96 937.0 194.79 222.22 140.58 6.27 505.92 0.01 112.46 159.97 250.87 550.66 188.89 645.3 150.0 145.68 274.54 253.83 37.8 51.93 90.69 131.68 317.98 152.8 470.34 317.98 523.35 730.54 310.41 730.54 101.33 140.34 292.49 610.33 487.56 122.65 214.48 1047.38 0.01 329.57 197.85 733.54],
[564.57 219.0 219.0 50.47 541.93 154.1 180.51 218.77 189.54 143.04 142.86 116.9 780.0 115.9 115.9 248.31 141.18],
[464.64 464.64 455.13 464.64 834.35 509.86 509.86 464.64 464.64 185.09 492.29 464.64 464.64 188.15 82.34 329.37 329.37 464.64 464.64 464.64 317.19 464.64 464.64 678.03 559.39 611.57 611.56 204.43 325.67 678.03 329.2 70.16 287.83],
[329.56 329.56]
]
const N_THERMAL = [43, 17, 33, 2]
# Sanity check on the dimensions of the thermal data.
@assert N_THERMAL == length.(thermal_obj) == length.(thermal_lb) ==
length.(thermal_ub)
const hydro_ub = [45414.3, 13081.5, 9900.9, 7629.9]
const storedEnergy_initial = [59419.3000, 5874.9000, 12859.2000, 5271.5000]
const storedEnergy_ub = [200717.6, 19617.2, 51806.1, 12744.9]
const exchange_ub = Array{Float64, 2}[
[0 7379 1000 0 4000],
[5625 0 0 0 0],
[600 0 0 0 2236],
[0 0 0 0 99999],
[3154 0 3951 3053 0]
]
const deficit_obj = [1142.8, 2465.4, 5152.46, 5845.54]
const deficit_ub = [0.05, 0.05, 0.1, 0.8]
const demand = Array{Float64, 2}[
[45515 11692 10811 6507],
[46611 11933 10683 6564],
[47134 12005 10727 6506],
[46429 11478 10589 6556],
[45622 11145 10389 6645],
[45366 11146 10129 6669],
[45477 11055 10157 6627],
[46149 11051 10372 6772],
[46336 10917 10675 6843],
[46551 11015 10934 6815],
[46035 11156 11004 6871],
[45234 11297 10914 6701]
]
const scenarios =[
[[56896.8, 56451.95, 65408.16, 46580.39, 54645.97, 36916.81, 63301.09, 53645.87, 57959.52, 54200.71, 48448.49, 45010.7, 81105.82, 35019.44, 41229.43, 80514.3, 51163.61, 53329.75, 50702.92, 42240.62, 55109.13, 33675.91, 25129.81, 29190.4, 36301.34, 37791.77, 58268.57, 39743.58, 62565.51, 49713.71, 72890.69, 45726.42, 66364.28, 37928.66, 62789.08, 73042.18, 66665.7, 52993.58, 31559.83, 56002.39, 30617.05, 40899.73, 54433.46, 62568.66, 51341.31, 40927.71, 73413.29, 69900.43, 66850.87, 76182.63, 72077.01, 84213.88, 64555.38, 72950.98, 53957.75, 53046.27, 47082.36, 58093.71, 80492.3, 47948.51, 60261.41, 47993.66, 64712.19, 52583.97, 55161.47, 92429.3, 43304.33, 58739.64, 55468.74, 40817.11, 63727.25, 60146.67, 47266.3, 67074.83, 46916.31, 95400.9, 38313.51, 54999.69, 71369.67, 76664.21, 73633.5, 46999.32], [86488.31, 61922.34, 51128.33, 37113.35, 72711.15, 29950.27, 43981.97, 45561.95, 57663.45, 75154.55, 38353.62, 51036.28, 73890.02, 47726.22, 71175.26, 62788.22, 61001.38, 57412.95, 72857.05, 65235.58, 67209.56, 50100.68, 26092.14, 44292.32, 27535.59, 25749.92, 62387.31, 57549.88, 50654.11, 59386.44, 77049.72, 60836.23, 59055.5, 61202.37, 78078.5, 78991.29, 71465.94, 47197.58, 37602.63, 57691.13, 20504.58, 58197.41, 54655.96, 45842.69, 48096.68, 47690.98, 71610.48, 47429.75, 89520.11, 92447.91, 47928.35, 71909.26, 47626.61, 70620.4, 50034.14, 59045.22, 57525.75, 61449.32, 41790.65, 65572.44, 86433.67, 68600.8, 51672.08, 83801.3, 41190.08, 73474.32, 53990.6, 50665.03, 67756.3, 40550.35, 70939.21, 61771.53, 71467.42, 59049.86, 49152.53, 91724.84, 63801.6, 66431.93, 59988.07, 50373.33, 49701.05, 56717.86], [88646.94, 50742.1, 40424.25, 35392.72, 61323.17, 53709.93, 39542.99, 40185.12, 34083.82, 68610.98, 35718.51, 62628.01, 69692.0, 51495.85, 62809.69, 70220.74, 93477.21, 59883.76, 53172.92, 56491.72, 65725.0, 77701.54, 32542.38, 29444.29, 29070.61, 37872.92, 63481.8, 47988.85, 52728.54, 63441.32, 78376.69, 60248.39, 41110.96, 37183.08, 83982.68, 72883.91, 61051.15, 52788.25, 33315.79, 58386.71, 25463.7, 52687.47, 48144.5, 63161.07, 36378.46, 48314.32, 39789.9, 54422.3, 57184.51, 62386.9, 43430.01, 85259.92, 39652.55, 71335.51, 47263.74, 47293.6, 62981.06, 54515.96, 43462.15, 68127.72, 55948.83, 56153.76, 61042.16, 52448.52, 54230.46, 57821.91, 55790.78, 59436.74, 62295.26, 38312.37, 49257.88, 48780.76, 62710.63, 57188.43, 59325.77, 51971.26, 64294.7, 47939.56, 54351.96, 86775.98, 37212.98, 49482.34], [64581.71, 35954.27, 34627.46, 27179.93, 54644.39, 37115.42, 35188.12, 30857.44, 28957.8, 38145.67, 33851.76, 45004.41, 41888.5, 32537.77, 61618.73, 45492.33, 57661.37, 40768.91, 36675.8, 39891.03, 46600.38, 41833.13, 35331.16, 24828.12, 28963.44, 26392.81, 57277.26, 40083.16, 39037.46, 40510.26, 49649.42, 35895.16, 28420.51, 27653.31, 52072.86, 47120.75, 42126.42, 31221.08, 23267.58, 33811.21, 21492.44, 37357.77, 49257.71, 56164.65, 34749.3, 38150.11, 44706.32, 35701.34, 40448.63, 52746.17, 40067.9, 62428.88, 42785.49, 52326.19, 32913.93, 41161.01, 44123.86, 36565.93, 35065.17, 68823.2, 50669.57, 47121.6, 43602.65, 47373.13, 36659.8, 45289.32, 45618.72, 34198.26, 38268.72, 28944.83, 31516.06, 42862.66, 48390.75, 37943.12, 50501.94, 35807.24, 52671.46, 49262.74, 46138.46, 55543.29, 32317.51, 51632.89], [43078.74, 27000.91, 24456.72, 19080.88, 34731.77, 24512.01, 27671.57, 25654.34, 24788.09, 29380.5, 18923.09, 30031.07, 26936.13, 22540.12, 34858.01, 30581.26, 34635.02, 24931.13, 27008.57, 27616.21, 28566.65, 25574.69, 21342.69, 30732.45, 17083.18, 29745.45, 34161.48, 32373.28, 25060.83, 30213.42, 39757.11, 26435.75, 20601.37, 21707.83, 45436.42, 34622.49, 28792.77, 22897.49, 17471.04, 24573.34, 18395.36, 23937.22, 31238.07, 34468.55, 23514.07, 29321.19, 29379.36, 28537.54, 34587.4, 35207.58, 27619.03, 39712.36, 35169.64, 37162.41, 31130.86, 39983.01, 35946.54, 28366.52, 29951.98, 40186.78, 50571.36, 32563.19, 32207.74, 34805.16, 26620.61, 32791.29, 35023.07, 26037.19, 25014.17, 22937.9, 28856.74, 27047.64, 37864.66, 31422.07, 29368.14, 29439.74, 36583.08, 31676.46, 29744.25, 34050.76, 30086.86, 29348.76], [32150.18, 25285.24, 19099.7, 14173.57, 26184.86, 17752.6, 23148.61, 20780.68, 20168.94, 21178.59, 16560.55, 29365.82, 23855.04, 17234.34, 26267.7, 26141.76, 26250.48, 22090.39, 21541.84, 20595.83, 23051.97, 22945.63, 17305.82, 23164.67, 18172.0, 32394.56, 25360.63, 28995.24, 20275.84, 24027.02, 27505.81, 22703.44, 17584.25, 16834.37, 30722.54, 25847.39, 26649.2, 18519.69, 18348.12, 20415.98, 19592.53, 19778.88, 25505.35, 29621.4, 18642.25, 33204.9, 25960.33, 27167.15, 26294.99, 28241.02, 25324.0, 39552.75, 24145.72, 28054.54, 22726.21, 34494.79, 32289.77, 23823.56, 23255.09, 29651.66, 34628.41, 31515.82, 28821.56, 25045.89, 21037.21, 39653.57, 26762.9, 24243.85, 21432.57, 20539.73, 20990.06, 22862.46, 33766.52, 28192.96, 23389.44, 24328.4, 28371.8, 25816.19, 23648.83, 28276.29, 38658.88, 38515.33], [25738.04, 19913.2, 16790.67, 11860.9, 19927.38, 14706.5, 16354.45, 18619.35, 16549.77, 16631.69, 15759.09, 23062.33, 18597.07, 14480.2, 24962.54, 26915.97, 22787.45, 17159.91, 17111.7, 17576.81, 19086.23, 17400.83, 13620.22, 15434.43, 15196.4, 21098.2, 28206.5, 22466.48, 16221.58, 20073.41, 21229.3, 17647.23, 14939.49, 15664.9, 27837.17, 21366.45, 21327.28, 15599.11, 13938.13, 19611.86, 17830.94, 21998.19, 23299.17, 23916.43, 18258.67, 26281.64, 20098.85, 24116.7, 22630.54, 26739.58, 19458.87, 38487.54, 20195.47, 23486.83, 19228.76, 24159.73, 21535.23, 21521.31, 24639.11, 24871.76, 26304.24, 23268.23, 24487.15, 23871.07, 18284.17, 28291.17, 20945.59, 22369.94, 19811.22, 17389.8, 18273.11, 19522.99, 27882.67, 21450.9, 20552.6, 23607.35, 21404.13, 28296.36, 20808.6, 24527.67, 26593.14, 31033.18], [20606.05, 16801.72, 14192.07, 9904.96, 19833.32, 13891.48, 13870.17, 16287.23, 13383.61, 13732.02, 15122.03, 16335.75, 15380.33, 12027.54, 16431.03, 18109.8, 19971.19, 16989.68, 14138.87, 13614.16, 15974.7, 14098.04, 11590.2, 11597.36, 11400.63, 23751.08, 25079.71, 16940.29, 15396.49, 17698.38, 17329.15, 15414.73, 13043.51, 13406.36, 21583.89, 17619.21, 16981.52, 15169.28, 11339.97, 14285.46, 13204.94, 19390.34, 18399.97, 18752.41, 14739.07, 25994.83, 15773.26, 18723.76, 21783.59, 21137.32, 17354.26, 26962.54, 20638.69, 19665.77, 22455.74, 18405.64, 17890.93, 25762.9, 22246.87, 19802.24, 22481.12, 20835.04, 19030.91, 16927.84, 16072.49, 21382.07, 22745.14, 15799.05, 18981.85, 14811.25, 16522.04, 15604.83, 19421.44, 17115.78, 17471.55, 19105.94, 22977.11, 24641.57, 16121.9, 24298.46, 17342.35, 19248.41], [22772.16, 15034.4, 13741.74, 11835.56, 18289.43, 17056.12, 12040.76, 14448.98, 12275.62, 12255.37, 17561.96, 16624.51, 14661.5, 10510.08, 14125.62, 14929.4, 24107.77, 13659.7, 12283.03, 11761.84, 13469.6, 14120.36, 12719.73, 10034.25, 11259.79, 16559.39, 31762.07, 20615.73, 12660.14, 13992.03, 15396.42, 16368.64, 11565.03, 10941.74, 17168.12, 17513.5, 16020.86, 13917.48, 9500.66, 19390.18, 12671.12, 18785.94, 18905.01, 16947.82, 12242.09, 29837.37, 18936.32, 19703.51, 27774.12, 22748.74, 14303.81, 22450.11, 22886.6, 18904.29, 17416.04, 18858.4, 15528.36, 26579.42, 26121.59, 17212.57, 28153.06, 23376.16, 15224.49, 15583.27, 21217.4, 20427.43, 23452.75, 16741.52, 30198.36, 15672.8, 17535.92, 15052.75, 15273.23, 19793.78, 18612.71, 13365.51, 16452.14, 32081.91, 13964.53, 16749.78, 14773.73, 17116.17], [23767.55, 22347.14, 17339.43, 12925.78, 34902.46, 14124.93, 21610.55, 18611.16, 13874.47, 14368.61, 21513.16, 18401.39, 23147.1, 11673.81, 17407.93, 19596.07, 25084.21, 15580.89, 14990.72, 17952.25, 15543.08, 16393.5, 18436.23, 11431.06, 11965.3, 14726.15, 22453.76, 21456.65, 14941.06, 16395.21, 14685.41, 25111.82, 13330.21, 17590.17, 30383.6, 23226.29, 15479.99, 17075.9, 18972.43, 21841.24, 18849.04, 42223.31, 26037.88, 20118.15, 22821.5, 30995.92, 18894.75, 18612.61, 25497.0, 21882.33, 26321.83, 32933.12, 21215.67, 18142.29, 15945.98, 21042.48, 20583.84, 20560.02, 26845.0, 26617.84, 34139.15, 29715.86, 16112.45, 25927.78, 22152.14, 25137.8, 33871.69, 13858.76, 18798.82, 22151.35, 13284.47, 15851.55, 21942.79, 23406.24, 26217.27, 13559.07, 21131.23, 37332.1, 22115.41, 25199.08, 15185.71, 24053.0], [25831.89, 25004.76, 18503.66, 13594.84, 26176.69, 18411.58, 35511.48, 22441.18, 24967.68, 29859.02, 27124.93, 25773.5, 31708.34, 21600.95, 29777.43, 22391.65, 24543.99, 22869.29, 21100.25, 30429.5, 16296.95, 24523.57, 22523.1, 15501.59, 18872.23, 22472.39, 28037.37, 24399.38, 22887.14, 26654.33, 22534.09, 26281.36, 20988.75, 24865.56, 34692.72, 35360.85, 27010.2, 20988.36, 38522.5, 24500.92, 25310.61, 43420.75, 35193.52, 21707.73, 31387.95, 40779.06, 28225.77, 30537.62, 33284.42, 27321.85, 47896.08, 38532.69, 22792.1, 24029.92, 17271.2, 29198.04, 26072.51, 27553.75, 24614.4, 22967.04, 46357.71, 21313.7, 23953.59, 25345.95, 36517.51, 33685.87, 28944.96, 20108.6, 29306.63, 26133.38, 22837.29, 21655.14, 26349.73, 29076.36, 29204.23, 23931.0, 26396.78, 37931.62, 29379.11, 27410.53, 23276.7, 22846.94], [38566.5, 48755.6, 36070.84, 33700.23, 32033.65, 34416.72, 53788.68, 40579.16, 37058.66, 34210.67, 40816.63, 41600.78, 39227.88, 27408.38, 59081.29, 28648.63, 44644.33, 48005.17, 34140.7, 43351.87, 25642.92, 28734.88, 33971.28, 23822.86, 35724.0, 44481.07, 41970.5, 30205.39, 27119.48, 42229.06, 26492.55, 52567.64, 16616.9, 38767.96, 53610.53, 42361.49, 41348.52, 38279.56, 39293.9, 24936.41, 45488.06, 47985.13, 44074.47, 36129.72, 44509.73, 59740.8, 50561.77, 46501.2, 43322.51, 53996.58, 63642.28, 60795.26, 44335.09, 32110.3, 41561.28, 48844.35, 32901.12, 63065.14, 25343.75, 35462.89, 54242.97, 35365.63, 36427.0, 34081.72, 46049.51, 50920.23, 39745.84, 31191.19, 45301.54, 40578.93, 35212.74, 33943.97, 40568.42, 56921.92, 56973.22, 30130.6, 38489.84, 62539.4, 44712.54, 44675.39, 27895.03, 40031.75]],
[[7409.65, 5285.8, 3001.83, 4138.99, 3066.59, 10383.66, 5287.43, 8187.34, 3943.3, 10381.68, 6545.04, 4552.72, 2079.84, 6164.98, 1251.1, 8897.66, 6681.96, 5057.3, 2765.38, 5640.01, 6542.95, 3614.85, 5157.45, 9775.0, 3532.37, 7963.4, 4487.86, 4568.18, 5754.32, 2392.87, 5033.56, 4662.57, 5379.87, 3421.47, 3443.25, 10618.72, 7738.41, 4293.84, 9312.11, 5193.9, 21746.41, 4706.82, 10653.16, 8235.54, 6049.5, 12573.63, 10453.06, 4559.98, 3345.05, 7615.64, 13613.82, 4352.26, 6707.33, 3669.72, 2201.57, 10074.15, 4184.64, 10019.61, 17804.81, 3565.4, 5879.48, 5832.71, 4577.42, 25365.49, 12539.04, 10514.99, 15729.86, 5633.35, 5183.68, 13152.25, 7376.78, 7860.47, 8996.77, 5580.37, 4540.66, 6978.33, 7455.89, 5698.04, 16393.73, 12394.23, 6995.32, 9082.73], [3310.83, 8062.89, 3654.84, 9033.31, 2362.85, 3898.48, 4513.4, 13477.36, 4740.13, 7285.45, 12151.87, 10352.82, 3486.56, 3406.67, 2971.79, 20768.1, 9061.81, 8224.11, 1570.63, 5359.67, 12352.37, 3386.15, 6784.36, 6536.57, 4660.27, 8235.06, 8088.39, 3340.72, 7829.11, 4352.61, 4795.93, 5413.19, 11713.01, 4757.09, 4522.48, 20932.73, 9141.88, 3285.2, 10066.11, 4455.28, 11552.65, 12269.5, 10468.36, 9002.04, 5860.44, 7508.74, 12318.39, 3235.6, 2194.14, 5643.44, 11462.12, 6245.81, 6277.39, 8884.32, 4848.83, 9729.31, 5485.64, 14811.7, 10648.59, 3761.5, 7278.83, 9536.28, 13817.35, 13096.53, 17130.25, 22862.44, 24230.34, 8581.54, 7113.62, 20982.56, 6045.08, 9222.95, 4546.8, 2940.42, 3964.14, 7361.53, 5088.3, 5016.72, 15194.28, 21152.87, 5652.86, 7008.55], [3531.16, 8957.82, 3178.5, 5661.19, 3920.46, 3192.74, 7943.57, 4450.77, 9119.71, 4152.1, 7162.92, 8022.63, 2625.92, 6848.3, 3557.74, 14045.21, 6520.86, 6961.17, 3556.14, 8411.78, 11429.84, 2230.74, 3992.71, 7969.13, 4549.48, 3313.02, 4545.53, 8838.47, 4668.06, 3669.42, 15984.43, 6576.64, 10767.53, 3570.63, 4235.19, 13186.42, 10917.71, 2383.55, 7337.93, 4407.09, 11764.32, 9027.47, 6818.93, 9784.05, 5397.65, 8063.08, 8816.87, 3862.52, 3064.56, 9661.19, 4296.19, 4495.0, 5847.07, 6069.88, 4624.77, 3468.4, 4250.2, 7617.02, 6740.4, 2684.69, 8381.6, 8806.7, 7754.79, 7409.02, 13363.84, 9009.28, 19240.02, 5934.64, 8157.78, 11087.25, 4701.61, 8340.18, 3298.56, 2573.23, 3841.08, 9617.28, 4361.84, 4489.77, 9947.14, 12903.63, 4081.37, 12931.71], [2353.66, 22946.34, 1915.96, 7733.04, 2226.69, 2185.88, 6631.63, 6487.54, 5866.0, 8074.36, 7868.09, 10546.97, 1688.78, 3087.12, 1741.49, 5864.41, 3458.65, 7225.75, 6108.43, 3926.93, 3806.83, 1902.29, 3819.01, 6620.64, 9949.45, 11652.24, 4593.81, 3802.02, 7744.68, 3889.71, 9788.63, 3406.09, 6520.22, 6098.12, 3651.4, 5380.46, 4926.98, 2946.66, 12300.54, 3184.24, 12646.55, 7475.65, 5240.93, 4617.32, 3535.66, 5904.84, 7948.66, 1798.65, 3848.84, 4646.03, 4152.64, 2387.97, 6547.01, 10506.71, 8239.95, 9202.59, 5175.82, 7624.21, 12563.21, 2986.48, 7367.14, 5978.42, 6962.84, 4322.28, 11487.66, 3009.02, 31342.11, 8825.75, 4447.53, 9064.37, 4165.87, 4096.91, 3955.58, 6297.93, 2904.26, 8573.7, 5924.05, 1862.2, 20304.29, 11443.51, 4044.25, 7779.19], [17558.02, 17069.88, 2373.08, 6640.39, 1450.68, 5979.91, 3956.49, 14053.4, 11313.75, 8310.5, 17098.26, 11110.55, 3212.31, 1592.55, 1334.46, 5709.2, 4518.4, 11781.72, 4506.92, 5104.84, 2133.69, 1043.65, 3239.48, 18109.4, 14302.91, 12349.19, 4368.35, 2571.06, 8080.39, 3456.32, 6937.35, 4590.22, 3230.86, 6796.7, 14037.3, 3558.01, 2740.89, 2453.37, 6787.76, 7295.93, 15120.17, 3096.41, 12410.99, 4217.96, 2908.24, 7652.33, 3403.23, 1455.04, 15926.21, 6319.06, 3693.18, 2355.48, 11477.68, 7674.08, 7162.96, 28746.46, 19092.13, 12718.81, 13068.09, 2481.35, 25132.34, 12574.9, 11651.35, 2542.44, 3187.46, 3284.15, 22128.38, 4682.77, 4893.5, 12036.91, 9303.87, 4280.55, 8789.31, 14544.79, 1575.72, 19676.18, 9801.09, 2633.78, 23860.08, 6368.47, 5820.42, 5027.03], [16500.03, 14116.42, 2586.88, 5006.4, 4870.91, 24757.16, 3074.31, 18436.3, 8302.23, 5509.57, 12544.69, 8996.38, 11259.67, 2864.72, 2111.65, 9705.3, 8521.81, 6640.67, 8437.52, 4542.81, 2130.2, 7527.64, 5456.18, 20383.85, 19819.54, 7328.12, 6529.82, 7777.59, 7536.23, 6013.21, 8535.54, 4081.35, 2425.2, 5661.62, 5779.88, 8475.7, 5158.0, 2057.6, 14537.4, 13950.47, 18291.51, 13432.11, 14519.14, 10006.67, 5811.61, 14789.01, 6019.69, 1852.84, 5792.17, 4286.76, 3951.48, 11707.22, 19177.84, 4478.51, 8143.01, 16724.45, 14358.65, 3659.4, 31530.31, 10333.43, 29102.87, 11670.46, 16081.5, 5227.65, 7949.58, 10093.94, 8687.4, 10097.05, 5551.01, 11186.02, 11277.68, 7690.47, 8274.21, 17201.4, 2327.87, 7759.36, 9819.79, 3875.86, 10948.84, 8435.22, 14993.31, 18801.11], [13120.24, 10030.45, 2757.54, 3944.6, 9554.59, 9475.58, 4707.66, 20298.31, 7357.27, 6166.76, 8463.41, 8458.0, 7872.45, 2652.39, 6656.49, 16857.63, 6573.95, 7879.54, 4984.1, 4187.22, 2800.89, 8819.05, 4336.61, 20909.6, 24727.53, 6097.58, 19239.09, 4004.02, 5157.97, 3911.69, 6409.4, 4584.93, 2249.59, 6568.88, 14177.95, 9592.74, 6272.61, 3832.52, 9938.38, 16873.78, 16428.84, 11492.41, 18006.83, 7315.45, 4677.58, 8160.76, 8118.81, 6917.4, 7952.26, 10505.13, 3118.92, 23453.1, 14574.31, 5131.47, 4667.29, 13582.0, 5885.88, 8638.57, 14918.85, 7656.1, 21361.03, 19596.13, 19587.39, 12142.76, 14952.77, 12061.01, 14055.5, 20352.48, 11777.6, 14295.29, 7238.2, 7289.36, 12705.6, 9615.78, 3154.55, 12548.16, 6702.12, 12635.41, 11679.29, 22927.29, 10156.43, 16484.93], [5954.69, 7197.01, 4112.05, 5362.07, 13882.01, 15078.02, 6795.61, 6066.34, 4336.22, 8112.59, 16886.68, 7940.07, 12915.68, 1401.03, 4058.39, 6663.63, 8236.75, 17040.28, 5295.01, 8461.83, 1378.5, 3474.52, 3507.61, 6583.59, 10870.33, 10369.29, 35617.04, 9715.58, 6528.27, 13177.69, 3631.91, 3200.75, 5859.37, 10829.09, 20506.31, 9050.58, 9737.41, 1846.95, 4687.78, 6931.81, 12516.57, 22925.03, 20596.57, 5214.97, 11164.21, 17192.65, 16108.22, 6287.81, 8234.75, 18033.61, 2752.96, 10713.65, 28527.49, 6476.5, 6438.9, 10718.83, 2589.99, 10984.03, 14606.87, 7739.52, 17353.14, 6260.07, 7238.28, 6017.42, 10574.14, 18330.91, 25167.3, 4962.26, 6591.8, 8030.21, 12134.87, 3227.84, 4562.76, 7851.69, 5173.4, 6608.72, 9306.11, 18805.57, 9296.72, 31196.1, 8135.09, 18320.07], [15399.44, 13537.18, 5915.66, 5875.27, 17292.78, 12754.63, 8251.08, 5421.77, 10006.75, 4748.17, 9616.28, 6338.66, 10938.71, 2541.8, 4048.12, 5684.05, 16548.61, 5575.83, 7714.25, 6252.89, 1558.46, 10237.83, 13271.92, 21222.47, 10114.31, 11769.31, 34397.62, 16435.26, 10833.0, 13945.63, 20895.3, 9113.18, 7687.65, 12819.22, 20988.93, 17385.88, 17547.31, 3909.91, 6856.03, 7175.87, 6900.44, 28886.9, 23298.33, 8385.69, 17614.78, 10499.01, 6933.26, 9012.05, 8563.81, 17957.71, 6226.46, 6350.63, 12862.26, 7110.82, 8733.01, 7307.65, 8510.56, 29671.54, 20474.89, 2870.43, 12969.48, 12128.0, 4644.39, 7472.78, 13396.85, 8022.32, 26930.49, 5524.87, 26638.17, 9594.72, 15017.66, 2798.56, 8558.21, 27069.78, 5406.34, 8853.16, 8238.13, 30269.06, 7007.25, 25803.98, 4971.36, 17441.16], [7850.88, 13597.18, 11670.8, 6746.6, 36584.84, 13320.9, 12200.64, 4982.65, 5970.78, 7470.02, 9492.8, 7621.29, 8322.11, 2490.22, 4395.21, 10447.3, 13686.5, 7277.16, 6365.73, 19333.71, 15075.85, 16122.96, 17587.37, 25191.28, 6334.09, 6993.78, 13910.82, 12520.11, 7378.23, 11912.55, 21803.41, 11034.76, 21860.9, 7478.8, 18480.25, 16085.96, 7805.79, 3844.28, 7906.84, 9919.52, 7115.0, 18663.96, 14475.25, 4222.06, 20499.18, 7058.89, 11182.55, 4538.99, 25843.05, 11911.52, 7353.82, 19796.33, 12475.44, 4122.11, 8805.84, 13543.51, 6362.98, 12092.85, 24147.39, 8451.31, 7858.68, 22735.78, 11039.65, 15096.44, 18320.99, 36613.28, 25910.59, 13602.81, 24927.54, 26474.92, 19037.43, 5781.32, 14390.44, 29799.22, 5096.17, 10142.32, 22134.3, 25257.81, 6930.86, 11660.59, 7665.46, 13707.82], [4509.3, 5956.02, 4897.74, 2838.94, 9486.05, 7342.55, 12558.93, 4839.55, 14596.27, 4287.32, 12082.84, 3002.53, 5091.7, 4482.89, 2894.87, 6503.62, 5704.0, 8068.35, 3001.24, 7240.1, 11074.37, 9961.52, 14766.35, 6686.95, 3664.62, 3078.75, 10294.27, 11660.97, 3176.76, 11786.36, 18577.84, 5851.82, 19824.45, 4230.01, 11030.46, 11198.74, 5628.01, 8131.1, 12167.7, 3961.64, 2761.54, 11026.96, 9302.85, 6064.25, 9837.97, 11684.02, 10827.06, 8561.39, 19505.89, 11117.88, 7537.4, 33286.07, 12995.34, 4408.2, 10977.37, 6180.76, 4074.81, 5343.94, 17552.87, 7728.69, 8592.9, 6603.4, 12967.45, 5876.19, 11275.98, 34404.9, 7384.03, 5413.47, 8005.45, 7696.49, 17418.65, 7435.18, 12825.12, 12732.29, 7180.77, 14221.95, 19442.64, 14550.37, 5671.42, 7329.35, 4929.09, 6657.47], [5473.68, 8030.47, 2065.5, 5781.15, 10314.59, 4600.83, 5444.76, 3112.05, 20724.22, 6445.54, 9276.67, 2267.17, 2927.15, 3227.27, 3228.81, 8524.09, 7334.81, 2565.18, 2222.52, 5500.38, 6421.41, 4260.7, 6250.58, 4378.49, 3782.47, 2427.42, 6219.46, 11083.22, 2485.47, 5262.05, 8570.68, 3107.63, 9662.31, 3963.08, 15349.18, 11686.32, 7544.01, 5189.81, 5039.03, 10874.26, 2428.01, 10263.97, 5276.74, 5041.23, 18263.71, 11363.74, 7674.83, 5696.37, 13927.11, 16004.66, 10668.29, 16321.75, 8851.65, 1441.7, 10043.25, 3790.37, 2866.44, 3303.62, 7453.19, 9286.47, 5306.86, 11069.76, 7351.33, 4276.83, 8898.83, 13236.04, 5792.31, 4056.85, 6369.16, 8541.41, 16834.87, 17174.26, 5906.74, 4262.22, 7427.78, 7653.82, 5007.68, 12105.22, 16680.07, 3937.77, 4665.31, 6575.95]],
[[14125.25, 11137.33, 14894.38, 17872.77, 12961.77, 8523.07, 13395.37, 17993.41, 15030.43, 9527.25, 14352.85, 13932.39, 18874.34, 18788.32, 18242.14, 20747.54, 11025.3, 18356.93, 22517.23, 13704.31, 12840.93, 9696.21, 11746.48, 13976.56, 7957.88, 15767.78, 18055.11, 11829.15, 10992.18, 10542.52, 16792.82, 10409.15, 16252.85, 12511.3, 15316.82, 12412.16, 14674.8, 16367.04, 10164.23, 19264.71, 5270.05, 15730.58, 13662.67, 13716.82, 12656.45, 6181.17, 15114.16, 16890.29, 18895.19, 18615.94, 17437.31, 21132.97, 19163.62, 18561.38, 18700.06, 9724.1, 16159.27, 11197.38, 28071.03, 10441.34, 15200.44, 18079.55, 15088.78, 8983.27, 13805.93, 17758.16, 10343.59, 9341.51, 13158.72, 10240.83, 16854.34, 12301.54, 9778.86, 11834.89, 12967.66, 16870.53, 5820.28, 15967.6, 10283.08, 13164.8, 17121.77, 5010.98], [13168.57, 13524.3, 16900.66, 14160.46, 16846.85, 8119.92, 16599.45, 13539.99, 17196.18, 14465.81, 14219.37, 14144.77, 26692.87, 15242.93, 20706.68, 30724.15, 14769.21, 13844.6, 27899.16, 13483.77, 12112.76, 13423.51, 5439.56, 8209.26, 12214.02, 7715.27, 21173.92, 15375.77, 11386.96, 17527.71, 22630.29, 15919.25, 19438.28, 24177.46, 14803.55, 17865.17, 13618.77, 14646.72, 12920.5, 19019.59, 4499.9, 8458.2, 11239.43, 10813.58, 12213.21, 6033.53, 18114.37, 16523.37, 29515.57, 30803.18, 15454.45, 23344.96, 9709.56, 24975.85, 22754.39, 7973.19, 10963.66, 7855.45, 12452.22, 15734.95, 29990.87, 13811.95, 14585.28, 8891.07, 7080.73, 14930.59, 9773.42, 5689.05, 15046.76, 5360.91, 14634.41, 13178.36, 14874.53, 14907.54, 6989.04, 21485.49, 11437.34, 14559.93, 5666.86, 9317.22, 15436.98, 10506.04], [18892.59, 9711.88, 10348.39, 8393.24, 17225.48, 12079.7, 13607.31, 11837.59, 15825.48, 20019.52, 14671.43, 15271.09, 25196.88, 14970.56, 23718.32, 13966.86, 18108.0, 16639.52, 37416.18, 10772.55, 13977.74, 17576.12, 7863.3, 9172.11, 6848.06, 13128.2, 23075.09, 11154.34, 10193.16, 27209.32, 19210.99, 13250.44, 11265.62, 17504.06, 15911.49, 19442.08, 12862.74, 20515.12, 13701.14, 15047.35, 5320.4, 9983.59, 10975.01, 12720.26, 8454.26, 6380.8, 7080.38, 20532.21, 46244.73, 32685.37, 14648.6, 22226.8, 8414.51, 22083.72, 15554.44, 8219.33, 13621.68, 9536.48, 9685.37, 14345.67, 39291.98, 12530.75, 14956.85, 9307.98, 6496.76, 13838.83, 9553.73, 11718.1, 13521.75, 5358.91, 10688.91, 8016.64, 20160.58, 14563.16, 10601.5, 20483.03, 13472.67, 10546.41, 7878.51, 11530.87, 6448.73, 5343.64], [20907.16, 6163.4, 8674.78, 5256.22, 13641.79, 8956.44, 9788.14, 7454.47, 6540.44, 14581.88, 14622.26, 10896.27, 16194.83, 11700.7, 22854.0, 16285.17, 23037.78, 12656.67, 23015.1, 11710.85, 14452.49, 20656.51, 11262.13, 7819.65, 7841.24, 8735.76, 24664.21, 9586.62, 8777.09, 21549.12, 11103.77, 10155.95, 5226.49, 8959.51, 18009.44, 14468.29, 13044.23, 16157.84, 8099.24, 7905.27, 5177.73, 8876.0, 13725.6, 18223.25, 7474.21, 5469.9, 6813.44, 12569.93, 23461.89, 13974.78, 18947.49, 20862.27, 12461.96, 22033.14, 7629.89, 9152.12, 10385.72, 6031.19, 6656.15, 15657.14, 13271.14, 7947.66, 15333.26, 8453.53, 5737.34, 13664.62, 4845.96, 7664.07, 11027.82, 4028.13, 6945.32, 7894.69, 15946.22, 12383.54, 13306.08, 7766.22, 14219.46, 13048.96, 8453.34, 15223.69, 6345.52, 8114.03], [14299.76, 4278.01, 5985.75, 5434.2, 12534.47, 6601.81, 7269.49, 5172.17, 4810.39, 6266.04, 9244.21, 7077.85, 8975.18, 7807.51, 26010.03, 11180.11, 14050.63, 6802.52, 10792.26, 8237.76, 8121.21, 10677.56, 6190.88, 4441.4, 4732.86, 5163.96, 18876.18, 7546.74, 4211.97, 7995.22, 7008.97, 6066.53, 4381.04, 5326.07, 9081.09, 7997.79, 8219.65, 7399.67, 5165.49, 6042.38, 3787.57, 5911.37, 6672.67, 10307.27, 6388.42, 3178.9, 6113.13, 8533.1, 11405.85, 10935.35, 8929.89, 12374.54, 6341.99, 9928.68, 5659.95, 5601.08, 6479.9, 4211.82, 4260.96, 7792.5, 8670.26, 5715.38, 6898.31, 5580.48, 4017.54, 7950.56, 3612.55, 3947.06, 5505.24, 2642.28, 3356.73, 4144.77, 8502.69, 6689.04, 7024.81, 4624.0, 5904.69, 7836.14, 3867.45, 5898.12, 3729.67, 4436.83], [7186.5, 3856.12, 4070.03, 3639.9, 6137.82, 4022.12, 4463.72, 4258.5, 3853.22, 4448.53, 5093.69, 4674.39, 6332.34, 5232.47, 13126.14, 7437.61, 7134.21, 5572.06, 7921.04, 5250.03, 5595.19, 5959.66, 3983.24, 3732.44, 3204.75, 4933.04, 8862.73, 5156.53, 3418.01, 5568.97, 5310.42, 4459.51, 3350.92, 3674.86, 5572.53, 5758.04, 4613.34, 5037.79, 3904.56, 4404.93, 2770.17, 3858.8, 4450.36, 5441.74, 3811.42, 3074.65, 4131.36, 6844.59, 8314.34, 7147.15, 6250.82, 8070.47, 4395.27, 6173.65, 4238.15, 3733.33, 3915.13, 3289.74, 3400.48, 4840.57, 5729.91, 4335.35, 4949.45, 4255.12, 3149.33, 5297.94, 2865.3, 2798.98, 3711.39, 2554.99, 2720.15, 3007.94, 4503.35, 4403.79, 3913.8, 3857.52, 3386.59, 4594.35, 3093.06, 3691.22, 3623.01, 3354.04], [5559.36, 3505.1, 3778.06, 3056.73, 4750.92, 3332.63, 3735.09, 3478.98, 3315.28, 3554.91, 4257.73, 3917.13, 5321.92, 4406.87, 7897.38, 6014.22, 5756.65, 4899.18, 6493.34, 4534.69, 4467.55, 4829.82, 3358.52, 2962.96, 2749.41, 4134.87, 6314.96, 4327.64, 3087.86, 4335.62, 4297.52, 3774.59, 2942.73, 3142.18, 4499.19, 4625.8, 3895.94, 4355.52, 3431.25, 3916.6, 2639.92, 3379.99, 3784.12, 4231.58, 3827.21, 2467.09, 3540.98, 4688.85, 6774.18, 6604.68, 5090.57, 6338.98, 3884.21, 4920.06, 3921.22, 3422.94, 3330.7, 3236.79, 3493.62, 3858.48, 4636.19, 3789.54, 4216.57, 3319.8, 2640.65, 4073.01, 2496.6, 2387.7, 2932.12, 2069.87, 2581.31, 2644.28, 3900.74, 3544.59, 3241.01, 3017.42, 2789.99, 3703.97, 2482.62, 3156.08, 2744.39, 2553.22], [4824.29, 2977.42, 3280.58, 2769.4, 3756.76, 2855.07, 3133.63, 3053.68, 3117.6, 3187.34, 3852.29, 3415.31, 4525.84, 3858.99, 6367.19, 5211.33, 4854.33, 4144.19, 5479.03, 3946.36, 3811.89, 3973.15, 2934.7, 2645.4, 2398.16, 3224.14, 5066.13, 4326.79, 2744.92, 3656.36, 3607.0, 3116.68, 2681.35, 2895.38, 3852.57, 4078.28, 3373.9, 3947.14, 3024.92, 3394.71, 2283.0, 3175.88, 3088.95, 3722.67, 2987.75, 2292.13, 2881.11, 4492.51, 5893.95, 5631.81, 4547.79, 5363.23, 3482.66, 4415.55, 3863.15, 2758.94, 3331.28, 3016.34, 3213.13, 3521.15, 4242.84, 3626.55, 3444.77, 2933.05, 2091.91, 3657.59, 2296.35, 2086.7, 3003.8, 1918.41, 2026.03, 2261.85, 3027.27, 2944.82, 3030.66, 2968.11, 2513.33, 2925.71, 2098.29, 2433.83, 2258.57, 2264.21], [4242.98, 2715.01, 2746.51, 3028.44, 3164.24, 2579.08, 2719.55, 2861.11, 2897.99, 2635.62, 3076.89, 3062.19, 3900.65, 3332.13, 5179.72, 4449.05, 4668.06, 3620.54, 4696.48, 3255.88, 3297.43, 3529.72, 2596.76, 2183.56, 2061.05, 2738.3, 4197.26, 3364.12, 2490.01, 3075.44, 3030.66, 2501.17, 2374.8, 2299.04, 3205.28, 3516.09, 2903.91, 3723.44, 2567.93, 3147.29, 2276.15, 2645.12, 2533.68, 3282.41, 2474.97, 2646.84, 2718.44, 3628.7, 5856.33, 5010.0, 4209.51, 4908.46, 4183.75, 4228.58, 3659.84, 2699.23, 3200.37, 2824.73, 3236.01, 3293.14, 4257.96, 3238.62, 3417.46, 2341.2, 2041.27, 3213.83, 2048.56, 2336.91, 2783.2, 2028.78, 1582.95, 2063.77, 2710.17, 2880.92, 2912.3, 2218.75, 2004.71, 3005.14, 2024.28, 2183.95, 1965.18, 1847.74], [4030.32, 2784.09, 3251.36, 3035.99, 3173.36, 2905.28, 2683.63, 2701.98, 3750.94, 2809.41, 3261.16, 3952.17, 4022.67, 2953.09, 5656.38, 4591.93, 4383.56, 3778.97, 4362.42, 3726.37, 3021.81, 3473.24, 3192.62, 1961.51, 1936.32, 2489.8, 4563.05, 4432.06, 2785.5, 2748.06, 2631.61, 3281.46, 2122.57, 2609.32, 4011.19, 3541.54, 2700.06, 3758.43, 2657.26, 4667.84, 3239.04, 3881.6, 3745.82, 3265.1, 2878.74, 4519.87, 3617.98, 4461.3, 6030.3, 5325.62, 5391.92, 5028.0, 4451.2, 5486.39, 3235.86, 3095.87, 3287.97, 3095.21, 3547.57, 4173.54, 5671.84, 3811.89, 2701.62, 2780.45, 2457.62, 3326.58, 2095.74, 1881.73, 2465.35, 2299.61, 2072.96, 1527.4, 2705.88, 2729.08, 3938.35, 1796.5, 2192.38, 4096.74, 2473.2, 2748.47, 1674.08, 2016.68], [5863.03, 5519.61, 5568.33, 3004.45, 3744.12, 4501.9, 6520.07, 4012.34, 3212.78, 5819.06, 4918.94, 6668.63, 6525.95, 5059.36, 9584.5, 6339.71, 9586.15, 4339.8, 8738.34, 6127.19, 2849.24, 4827.53, 4513.54, 2941.21, 5983.21, 4325.68, 4208.66, 5842.71, 4833.37, 3245.77, 3174.55, 5614.32, 2729.49, 7933.09, 7768.94, 5395.73, 4616.41, 5854.28, 6742.1, 9338.47, 7507.03, 5717.24, 9424.97, 5692.63, 6857.94, 6495.77, 4186.79, 6169.46, 9560.0, 6674.42, 12710.18, 5059.0, 5497.27, 6924.5, 3756.12, 4531.74, 5144.7, 5364.5, 4462.48, 6205.07, 12715.29, 4035.73, 3053.11, 5087.73, 5121.75, 3881.08, 5885.8, 4865.02, 5288.73, 3598.98, 2833.41, 2496.36, 3194.49, 4077.99, 8496.46, 1903.73, 2702.64, 7847.06, 5202.23, 3931.52, 4413.1, 2618.38], [6256.49, 10225.14, 9968.78, 5006.69, 6990.38, 7883.92, 13725.98, 8782.96, 5102.14, 12219.55, 7133.7, 16553.03, 16255.56, 12042.24, 17087.28, 11613.97, 13900.61, 15970.6, 9697.68, 12177.77, 4479.88, 11082.14, 9563.26, 10451.75, 10081.6, 11649.13, 12343.92, 5038.37, 7875.39, 11855.63, 4538.32, 8825.42, 4174.36, 12581.63, 12027.99, 7563.53, 12335.86, 13610.76, 15583.87, 8853.22, 18137.54, 12049.79, 13162.29, 7521.34, 8373.87, 13694.39, 8420.08, 11193.72, 9908.45, 13797.4, 18116.56, 5517.01, 11575.38, 11449.64, 5718.81, 10749.18, 8169.43, 20432.63, 5188.9, 9603.91, 16234.51, 5838.32, 9120.64, 9733.66, 10198.24, 9220.61, 9942.07, 10074.89, 11429.73, 6486.19, 5528.8, 4818.89, 5399.51, 13018.05, 12071.65, 4718.52, 7218.15, 7808.31, 9239.02, 10549.87, 6730.56, 8943.03]],
[[11445.26, 9396.12, 9866.75, 12189.83, 12910.68, 9918.07, 8140.01, 12852.12, 9416.01, 10276.83, 11775.03, 11838.64, 24318.78, 13257.7, 18076.42, 15821.04, 7474.73, 11812.83, 15706.01, 5481.26, 7504.73, 6268.96, 7504.95, 9270.37, 6073.61, 8750.38, 21274.59, 9495.46, 12791.41, 7181.52, 10787.91, 5100.68, 10318.63, 10737.62, 7936.66, 8803.48, 7144.38, 7801.97, 9177.17, 10608.92, 3893.13, 6948.11, 8977.61, 9215.05, 8095.57, 5890.96, 9540.81, 16252.52, 13336.07, 11626.36, 15401.03, 15401.59, 8722.29, 9687.94, 20397.1, 6309.53, 9667.89, 10050.76, 23449.63, 8423.32, 7411.93, 10818.92, 11197.63, 9909.79, 10804.87, 10288.03, 7017.45, 8345.5, 15034.49, 9693.82, 19293.15, 7206.01, 8462.7, 6763.02, 10802.53, 6499.8, 5148.48, 8276.41, 12261.51, 10538.96, 14404.6, 7258.67], [14719.19, 13849.17, 13631.43, 12177.59, 18010.69, 13302.4, 11285.29, 12809.84, 13682.94, 18874.24, 12523.57, 16877.66, 29183.48, 14291.3, 22301.65, 14904.42, 12522.31, 11791.37, 18496.37, 6979.2, 9335.08, 5630.56, 5634.78, 10671.55, 9980.95, 7606.14, 20418.43, 14951.7, 18109.33, 14918.77, 16033.19, 9361.22, 12551.23, 19276.9, 10616.44, 15397.32, 9475.47, 10421.28, 12375.16, 14830.72, 5729.91, 9694.81, 8591.43, 14495.29, 14107.25, 9421.06, 19359.53, 16790.61, 26986.54, 25885.9, 16467.1, 22487.03, 9224.35, 20387.94, 18234.79, 7081.11, 11111.52, 11171.72, 15392.37, 11806.74, 22711.09, 12025.7, 14780.7, 14848.03, 9908.41, 13792.5, 9267.26, 8170.04, 16846.34, 10356.98, 18361.69, 11226.45, 23225.67, 13257.7, 10945.27, 15574.3, 10877.03, 11769.67, 12344.86, 14857.49, 17224.47, 13109.1], [23409.86, 14320.59, 13800.87, 14352.06, 18872.78, 18684.74, 12043.11, 14022.72, 11381.14, 25165.1, 14072.94, 22404.0, 24247.65, 15506.72, 20840.05, 13711.15, 17839.09, 14672.99, 17190.76, 10625.12, 8848.47, 12036.61, 7660.54, 11533.4, 9406.32, 12810.21, 27106.69, 15315.78, 17588.25, 19747.19, 16899.92, 11651.63, 12923.34, 17983.13, 14205.58, 16566.74, 13403.69, 22020.86, 14903.32, 19264.25, 7919.34, 12988.93, 13260.07, 20701.67, 14087.36, 12284.1, 16537.32, 26009.72, 24878.24, 33744.51, 14323.11, 20752.06, 10967.29, 19469.53, 19750.87, 12976.51, 16995.69, 14878.09, 18871.69, 14217.14, 16007.4, 13807.3, 20760.56, 15339.25, 12648.75, 19883.1, 12384.08, 13129.38, 21388.35, 13455.08, 15207.25, 14793.07, 24848.95, 19422.42, 13908.63, 17542.63, 15782.29, 14348.74, 12614.13, 20733.48, 16491.96, 13076.6], [23382.44, 10803.52, 14378.16, 13302.03, 23222.27, 16695.74, 13679.45, 13661.28, 11570.96, 20886.36, 15476.03, 16023.03, 22236.36, 14684.2, 21771.01, 13644.9, 22076.88, 15190.72, 14203.75, 12299.45, 11778.42, 12531.76, 9742.09, 12032.59, 10269.89, 11948.84, 32208.85, 17459.51, 16242.32, 17948.04, 13136.18, 11190.44, 9589.65, 15077.5, 18706.86, 14964.66, 15336.68, 17290.63, 12763.49, 14413.57, 11501.47, 12579.98, 16496.02, 23865.77, 17203.76, 12391.8, 15316.56, 20308.84, 19682.45, 23440.95, 18467.55, 19582.51, 16025.97, 22575.64, 17154.81, 13862.35, 17273.57, 17677.34, 15127.73, 18684.02, 15585.12, 12263.42, 18852.01, 18143.6, 14127.53, 25099.68, 8662.66, 10795.11, 18684.5, 14296.08, 15049.98, 16718.21, 22490.42, 16585.21, 21470.46, 14135.71, 18498.96, 17130.17, 16346.0, 20241.75, 11604.09, 15534.4], [10800.89, 6621.21, 9859.45, 10134.26, 15783.48, 11661.46, 9992.31, 8961.21, 7986.12, 11334.04, 9131.23, 10651.59, 14228.5, 8808.92, 16857.09, 8870.4, 11910.68, 8514.6, 9619.73, 9681.85, 8443.5, 9467.84, 6687.98, 8468.58, 7309.2, 8900.26, 17395.4, 12816.24, 10267.79, 10570.91, 8501.5, 7170.24, 7787.29, 10002.72, 13115.21, 9260.14, 10312.36, 11077.2, 7877.86, 9884.11, 7971.67, 6841.12, 11086.58, 20710.83, 13844.01, 8233.47, 15152.03, 15167.49, 12115.32, 11980.41, 10605.6, 12706.9, 11463.02, 16391.48, 11352.5, 7530.3, 12389.59, 13869.15, 8958.14, 14252.5, 9379.34, 7939.86, 10767.56, 16137.54, 10364.88, 17667.15, 5264.44, 8816.28, 13189.92, 7687.12, 8032.19, 9773.84, 12707.06, 11950.38, 17618.47, 7989.54, 13400.39, 20147.11, 7757.33, 14585.71, 6459.52, 11058.98], [6555.49, 4734.66, 5247.05, 5947.45, 8962.09, 6010.18, 5440.07, 5081.99, 4501.39, 6801.83, 5226.83, 6365.3, 7598.31, 5250.53, 10536.52, 6555.77, 6246.19, 4899.69, 5633.05, 4891.45, 5350.89, 4886.0, 3277.94, 4925.69, 4402.2, 5504.85, 9848.68, 7103.36, 5711.0, 6070.02, 5043.51, 3942.8, 4319.38, 6253.7, 6492.65, 5492.23, 5300.07, 5895.92, 4993.92, 4607.68, 4412.15, 3593.96, 5439.76, 10133.62, 6659.7, 4774.84, 7164.48, 7671.45, 5967.57, 6109.85, 5434.13, 6613.64, 4878.95, 7327.03, 5807.04, 3670.12, 5707.73, 6380.39, 4824.95, 6986.96, 4372.81, 3783.99, 5624.07, 8726.5, 5313.22, 7426.47, 3018.46, 4258.51, 5419.19, 4594.29, 4266.15, 4677.71, 5240.86, 5011.05, 7106.89, 3965.1, 5733.27, 9562.59, 3914.1, 5838.83, 3788.82, 4911.18], [4487.15, 3104.44, 3383.62, 3729.34, 5208.33, 3881.61, 3463.18, 3290.67, 3369.41, 4651.93, 3479.82, 4221.03, 5235.62, 3515.58, 5546.02, 3862.32, 3930.2, 3368.78, 3177.84, 3626.84, 2925.88, 2795.03, 2124.77, 3061.72, 2569.27, 3383.25, 5544.76, 3833.81, 3729.67, 3712.59, 2907.87, 2438.05, 3027.46, 3437.19, 3690.67, 3647.12, 3195.82, 3452.42, 3003.94, 2881.15, 2726.49, 2462.67, 3294.29, 5132.12, 4024.76, 3108.09, 3943.35, 4264.99, 3750.04, 4023.09, 3578.9, 3816.03, 2941.25, 3871.37, 3569.3, 2414.46, 3439.66, 3907.25, 3275.13, 3601.49, 2690.43, 2539.41, 3566.59, 4102.3, 3538.63, 4258.75, 2307.56, 2686.07, 3695.43, 2997.3, 2456.86, 2741.39, 3206.27, 2934.6, 3571.18, 2511.94, 3151.33, 4193.71, 2552.29, 3496.43, 2571.52, 2992.5], [3223.12, 2097.61, 2428.81, 2481.26, 3864.63, 2750.74, 2248.83, 2383.53, 2544.88, 3457.4, 2498.48, 2892.26, 3933.36, 2533.84, 3913.94, 2706.4, 3189.74, 2270.44, 2219.1, 2274.31, 2016.42, 1844.62, 1675.86, 1847.77, 1878.96, 2543.73, 3986.44, 2627.67, 2458.92, 2409.33, 2173.83, 1851.57, 2227.47, 2291.89, 2421.13, 2416.32, 2169.87, 2450.68, 2240.0, 2044.5, 2008.9, 1893.48, 2451.84, 3353.12, 2812.2, 2131.62, 2585.51, 3128.53, 2858.7, 2996.48, 2672.92, 2786.17, 2192.46, 2689.98, 2454.78, 1892.1, 2382.88, 2713.41, 2321.69, 2742.05, 2096.39, 1974.47, 2493.21, 2698.44, 2535.42, 2866.68, 1455.42, 1933.88, 2626.68, 2105.46, 1687.18, 1945.97, 2323.11, 1852.09, 2256.41, 1800.01, 2057.27, 2410.69, 1954.7, 2247.75, 1624.93, 2038.15], [2494.37, 1673.39, 1773.43, 1469.26, 2816.89, 2031.24, 1294.99, 1508.18, 1766.44, 2642.22, 1752.41, 2234.46, 2603.72, 1883.99, 2977.59, 1844.4, 2671.99, 1759.64, 1529.58, 1757.09, 1453.12, 1458.55, 1322.78, 1282.01, 1454.08, 1953.53, 3151.81, 1931.59, 1818.74, 1790.47, 1707.78, 1558.49, 1652.87, 1641.09, 1976.34, 1813.03, 1775.23, 1898.44, 1666.93, 1725.64, 1687.07, 1587.76, 1845.2, 2652.05, 2177.2, 1739.65, 2001.38, 2381.83, 2737.01, 2514.38, 2222.0, 2380.01, 2087.48, 2040.06, 2100.17, 1569.05, 1945.04, 2235.13, 2189.02, 2126.45, 1966.64, 1788.03, 1986.06, 1934.88, 1813.6, 2244.8, 1379.33, 1617.21, 2151.83, 1638.84, 1403.71, 1467.3, 1752.64, 1403.8, 1743.75, 1372.98, 1525.33, 1901.29, 1450.41, 1584.78, 1358.73, 1587.3], [2569.03, 2055.48, 1617.7, 1797.54, 2790.11, 2043.46, 1744.23, 1645.6, 2671.58, 2805.1, 2394.35, 3813.06, 3328.2, 1522.49, 4070.01, 1502.57, 2541.82, 1827.88, 1446.88, 1840.4, 1308.22, 1297.46, 1847.49, 1032.16, 1335.97, 1750.43, 3148.26, 2085.0, 1707.8, 1664.12, 1370.17, 1553.64, 1347.18, 2328.25, 2167.17, 1927.51, 1663.67, 1833.17, 1597.64, 2030.33, 1809.08, 1648.65, 2451.68, 2662.66, 2054.17, 2434.34, 2498.06, 2411.0, 2779.41, 2546.27, 2238.57, 2597.04, 1968.24, 2664.05, 2885.94, 1706.86, 1995.04, 2282.39, 2214.75, 2052.59, 2075.18, 2014.74, 1956.24, 1890.09, 2117.0, 2157.46, 1412.17, 1740.39, 2129.67, 1939.98, 1513.77, 1394.59, 1758.18, 1423.39, 2058.37, 1328.2, 1447.78, 2262.28, 1565.23, 2068.41, 1367.27, 1631.66], [4641.31, 3710.95, 4108.02, 2322.72, 4112.86, 3153.28, 4222.0, 2542.08, 4080.89, 4914.35, 5165.37, 5489.32, 5723.93, 3490.48, 7381.2, 2472.47, 5539.85, 2249.05, 2883.83, 2664.6, 1379.94, 1649.83, 2664.51, 1628.63, 2582.64, 5612.11, 2663.5, 2699.65, 2574.08, 2505.57, 1646.98, 2305.66, 1756.6, 3672.71, 3286.51, 2637.35, 2619.93, 2800.22, 2382.41, 3808.99, 3676.19, 2221.59, 4269.7, 3598.29, 3048.27, 3589.7, 3096.36, 2748.5, 3936.11, 3511.24, 4884.22, 2809.73, 2626.34, 4343.58, 3367.0, 2404.99, 3245.45, 4372.99, 2617.4, 2689.32, 3563.42, 2529.25, 2224.52, 3126.24, 3312.67, 2504.25, 2625.43, 3150.3, 3508.86, 3096.43, 2205.32, 2489.92, 2449.98, 1943.37, 3975.66, 1543.29, 2017.76, 4233.35, 2737.69, 4005.33, 2550.47, 2751.41], [5928.45, 6305.46, 7741.12, 5572.98, 7840.01, 5346.7, 9205.84, 5416.25, 5406.82, 8514.67, 8016.43, 12143.41, 11659.05, 7332.98, 11905.46, 4191.0, 7976.14, 11071.46, 4555.71, 4642.37, 3020.24, 4752.09, 4916.23, 5745.9, 4904.85, 12820.8, 5408.88, 3604.63, 3839.0, 5809.85, 2569.89, 4389.23, 2294.61, 6809.31, 6372.19, 4289.17, 4488.89, 10455.43, 5260.2, 3870.7, 6460.74, 4295.49, 7860.2, 5420.29, 4285.41, 6845.68, 5365.47, 5121.61, 4586.69, 8258.6, 7749.0, 3059.53, 3430.87, 8868.47, 4088.58, 6015.28, 7597.69, 13886.42, 3714.79, 4607.97, 8787.99, 4320.83, 5278.15, 6865.86, 4430.27, 4400.78, 5310.74, 7716.41, 8462.67, 6883.02, 3169.26, 3573.36, 3969.63, 6448.48, 4885.67, 3215.82, 6065.36, 6634.84, 5209.91, 7425.64, 5240.02, 5944.41]]
]
const inflow_initial = [39717.564, 6632.5141, 15897.183, 2525.938]
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 9344 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
include(joinpath(@__DIR__, "BiObjectiveSDDP.jl"))
using .BiObjectiveSDDP
using SDDP
import Gurobi
import Statistics
const OBJ_1_SCALING = 0.01
const OBJ_2_SCALING = 0.1
include("brazilian_data.jl")
function create_model(weight = nothing; stages = 60)
env = Gurobi.Env()
model = SDDP.LinearPolicyGraph(
stages = stages,
lower_bound = 0.0,
optimizer = () -> Gurobi.Optimizer(env),
) do sp, t
set_silent(sp)
month = t % 12 == 0 ? 12 : t % 12 # Year to month conversion.
@variables(
sp,
begin
0 <= storedEnergy[i = 1:4] <= storedEnergy_ub[i],
(SDDP.State, initial_value = storedEnergy_initial[i])
0 <= spillEnergy[i = 1:4]
0 <= hydroGeneration[i = 1:4] <= hydro_ub[i]
thermal_lb[i][j] <=
thermal[i = 1:4, j = 1:N_THERMAL[i]] <=
thermal_ub[i][j]
0 <= exchange[i = 1:5, j = 1:5] <= exchange_ub[i][j]
0 <=
deficit[i = 1:4, j = 1:4] <=
demand[month][i] * deficit_ub[j]
inflow[i = 1:4]
end
)
@constraints(
sp,
begin
[i = 1:4],
sum(deficit[i, :]) +
hydroGeneration[i] +
sum(thermal[i, j] for j in 1:N_THERMAL[i]) +
sum(exchange[:, i]) - sum(exchange[i, :]) ==
demand[month][i]
[i = 1:4],
storedEnergy[i].out + spillEnergy[i] + hydroGeneration[i] -
storedEnergy[i].in == inflow[i]
sum(exchange[:, 5]) == sum(exchange[5, :])
end
)
Ω = if t == 1
[inflow_initial]
else
r = (t - 1) % 12 == 0 ? 12 : (t - 1) % 12
[
[scenarios[i][r][ω] for i in 1:4] for
ω in 1:length(scenarios[1][r])
]
end
@expressions(
sp,
begin
objective_1,
OBJ_1_SCALING / (stages == 12 ? 1 : stages) *
sum(deficit_obj[i] * sum(deficit[i, :]) for i in 1:4)
objective_2,
OBJ_2_SCALING / (stages == 12 ? 1 : stages) * sum(
thermal_obj[i][j] * thermal[i, j] for i in 1:4 for
j in 1:N_THERMAL[i]
)
end
)
if weight === nothing
SDDP.initialize_biobjective_subproblem(sp)
end
SDDP.parameterize(sp, Ω) do ω
JuMP.fix.(inflow, ω)
if weight === nothing
SDDP.set_biobjective_functions(sp, objective_1, objective_2)
else
@stageobjective(
sp,
weight * objective_1 + (1 - weight) * objective_2,
)
end
return
end
end
return model
end
function _simulate_policy(model, keys)
simulations = Dict()
for λ in keys
BiObjectiveSDDP.set_scalarizing_weight(model, λ)
simulations[λ] = SDDP.simulate(
model,
1000,
[:storedEnergy, :objective_1, :objective_2],
)
end
return simulations
end
function _extract_objectives(simulation)
obj_1 = [sum(s[:objective_1] for s in sim) for sim in simulation]
obj_2 = [sum(s[:objective_2] for s in sim) for sim in simulation]
return obj_1, obj_2
end
function _save_simulations_to_dat(simulations, simulation_weights)
A = Matrix{Float64}(
undef,
length(simulations[first(simulation_weights)]),
2 * length(simulation_weights),
)
for (i, weight) in enumerate(simulation_weights)
obj_1, obj_2 = _extract_objectives(simulations[weight])
A[:, 2*i-1] .= obj_1
A[:, 2*i] .= obj_2
end
open("simulations.dat", "w") do io
for i in 1:size(A, 1)
println(io, join(A[i, :], " "))
end
end
open("new_data.dat", "w") do io
for (i, w) in enumerate([0.1, 0.7, 0.9])
s = w .* A[:, 2i-1] + (1 - w) .* A[:, 2i]
μ = Statistics.mean(s)
Q = Statistics.quantile(s, [0.1, 0.9])
println(io, w, " ", μ, " ", μ - Q[1], " ", Q[2] - μ)
end
end
return
end
"""
experiment_1()
Run the first experiment where we train the policy using the true biobjective
SDDP algorithm.
"""
function experiment_1()
model = create_model(stages = 12)
env = Gurobi.Env()
lower_bound, weights, bounds = BiObjectiveSDDP.bi_objective_sddp(
model,
() -> Gurobi.Optimizer(env);
# BiObjectiveSDDP kwargs ...
bi_objective_sddp_iteration_limit = 2000,
bi_objective_lambda_atol = 0.05,
bi_objective_lower_bound = 0.0,
# SDDP.jl kwargs ...
print_level = 0,
iteration_limit = 1,
)
open("bounds.dat", "w") do io
for (w, b) in zip(weights, bounds)
println(io, "$(w) $(b)")
end
end
simulation_weights = [0.1, 0.7, 0.9]
simulations = _simulate_policy(model, simulation_weights);
_save_simulations_to_dat(simulations, simulation_weights)
return
end
"""
BoundLimit(limit::Float64)
Terminate once the bound is better than `limit`.
"""
struct BoundLimit <: SDDP.AbstractStoppingRule
limit::Float64
atol::Float64
end
SDDP.stopping_rule_status(::BoundLimit) = :bound_limit
function SDDP.convergence_test(
model::SDDP.PolicyGraph,
log::Vector{SDDP.Log},
rule::BoundLimit,
)
if model.objective_sense == MOI.MIN_SENSE
return log[end].bound >= rule.limit - rule.atol
else
return log[end].bound <= rule.limit + rule.atol
end
end
"""
experiment_2(N::Int, atol::Float64)
Run an experiment in which we time how long it takes to solve the problems from
experiment_2 using the saddle cuts.
"""
function experiment_2(N::Int, atol::Float64)
# Precompilation to avoid measuring that overhead!
_model = create_model(1.0)
SDDP.train(_model; iteration_limit = 1, print_level = 0)
# Now the real model
start_time = time()
for weight in [0.0, 1.0, 0.5, 0.25, 0.75, 0.125, 0.375, 0.625, 0.875]
model = create_model(weight)
SDDP.train(
model;
log_file = "experiment_2_$(weight).txt",
stopping_rules = [SDDP.BoundStalling(10, atol)],
)
bound = SDDP.calculate_bound(model)
open("experiment_2.dat", "a") do io
println(io, weight, ", ", bound, ", ", time() - start_time)
end
end
return
end
"""
experiment_3(atol::Float64)
Run an experiment in which we time how long it takes to solve N different
policies.
"""
function experiment_3(atol::Float64)
# Precompilation to avoid measuring that overhead!
_model = create_model()
SDDP.train_biobjective(
_model;
solution_limit = 1,
iteration_limit = 1,
print_level = 0,
)
# Now the real model
limits = Dict{Float64,BoundLimit}()
open("experiment_2.dat", "r") do io
for line in readlines(io)
items = parse.(Float64, String.(split(line, ",")))
limits[items[1]] = BoundLimit(items[2], atol)
end
end
model = create_model()
solutions = SDDP.train_biobjective(
model;
solution_limit = 9,
include_timing = true,
print_level = 1,
log_file_prefix = "experiment_3",
stopping_rules = (weight) -> [limits[weight]],
)
open("experiment_3.dat", "w") do io
X = [(weight, bound, time) for (weight, (bound, time)) in solutions]
sort!(X, by = x -> x[3])
for (weight, bound, time) in X
println(io, weight, ", ", bound, ", ", time)
end
end
return
end
function arg(T, key)
i = findfirst(isequal(key), ARGS)
return i === nothing ? nothing : parse(T, ARGS[i+1])
end
function help()
println("""julia brazilian_example.jl --experiment={1,2,3} -n 9 -atol 100
## Arguments
* --experiment :: choose which experiment to Run
### Experiment 2
* -n :: Choose how many weights to run
* -atol :: The tolerance used by the `BoundStalling` stopping rule
## Examples
```
nohup ~/julia1.6 --project=. brazilian_example.jl --experiment=1 &
nohup ~/julia1.6 --project=. brazilian_example.jl --experiment=2 -n 9 -atol 1e2 &
nohup ~/julia1.6 --project=. brazilian_example.jl --experiment=3 &
```
""")
end
function main()
if findfirst(isequal("--experiment=1"), ARGS) !== nothing
experiment_1()
elseif findfirst(isequal("--experiment=2"), ARGS) !== nothing
experiment_2(
something(arg(Int, "-n"), 9),
something(arg(Float64, "-atol"), 1e2),
)
elseif findfirst(isequal("--experiment=3"), ARGS) !== nothing
experiment_3(something(arg(Float64, "-atol"), 1e2))
else
help()
end
return
end
if length(ARGS) > 0
main()
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1783 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This file implements two solver-specific functions need by BiObjectiveSDDP
# to compute the reduced costs of arbitrary objective vectors.
#
# See `BiObjectiveSDDP.get_BinvA` and `BiObjectiveSDDP.get_basis`.
import Gurobi
import SparseArrays
####
#### New functions for the Gurobi C API.
####
function get_basis(model::Gurobi.Optimizer)
p = Ref{Cint}()
@assert Gurobi.GRBgetintattr(model, "NumConstrs", p) == 0
bhead = zeros(Cint, p[])
ret = Gurobi.GRBgetBasisHead(model, bhead)
@assert ret == 0
bhead .+= 1
return bhead
end
mutable struct GRBsvec
len::Cint
ind::Ptr{Cint}
val::Ptr{Cdouble}
end
function get_BinvA(model::Gurobi.Optimizer)
p = Ref{Cint}()
@assert Gurobi.GRBgetintattr(model, "NumConstrs", p) == 0
ncon = p[]
@assert Gurobi.GRBgetintattr(model, "NumVars", p) == 0
nvar = p[]
function _GRBBinvRowi(model::Gurobi.Optimizer, i::Int)
ind = zeros(Cint, ncon + nvar)
val = zeros(Cdouble, ncon + nvar)
x = GRBsvec(0, pointer(ind), pointer(val))
GC.@preserve ind val x begin
@assert Gurobi.GRBBinvRowi(model, i, pointer_from_objref(x)) == 0
end
return ind[1:x.len], val[1:x.len]
end
rows, cols, coefs = Cint[], Cint[], Cdouble[]
for i in 1:ncon
ind, val = _GRBBinvRowi(model, i - 1)
append!(rows, fill(Cint(i), length(ind)))
append!(cols, ind .+ 1)
append!(coefs, val)
end
return SparseArrays.sparse(rows, cols, coefs)
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3089 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
include(joinpath(@__DIR__, "BiObjectiveSDDP.jl"))
using .BiObjectiveSDDP
using SDDP
import Gurobi
import Random
const GUROBI_ENV = Gurobi.Env()
function create_model()
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = () -> Gurobi.Optimizer(GUROBI_ENV),
) do sp, t
set_silent(sp)
@variable(sp, x >= 0, SDDP.State, initial_value = 0.0)
if t == 1
@expression(sp, objective_1, 2 * x.out)
@expression(sp, objective_2, x.out)
else
@variable(sp, y >= 0)
@constraints(sp, begin
1.0 * x.in + y >= 1.00
0.5 * x.in + y >= 0.75
y >= 0.25
end)
@expression(sp, objective_1, y)
@expression(sp, objective_2, 3 * y)
end
SDDP.initialize_biobjective_subproblem(sp)
SDDP.parameterize(sp, [nothing]) do ω
SDDP.set_biobjective_functions(sp, objective_1, objective_2)
end
end
return model
end
function my_optimizer()
model = Gurobi.Optimizer(GUROBI_ENV)
MOI.set(model, MOI.Silent(), true)
return model
end
function main(update_method, filename)
Random.seed!(1)
bounds_for_reporting = Tuple{Float64,Float64,Float64}[]
model = create_model()
_, _, _ = BiObjectiveSDDP.bi_objective_sddp(
model,
my_optimizer;
# BiObjectiveSDDP kwargs ...
bi_objective_sddp_iteration_limit = 20,
bi_objective_lower_bound = 0.0,
bi_objective_lambda_update_method = update_method,
bi_objective_lambda_atol = 1e-6,
bi_objective_major_iteration_burn_in = 1,
bi_objective_post_train_callback = (model::SDDP.PolicyGraph, λ) ->
begin
upper_bound = BiObjectiveSDDP.surrogate_upper_bound(
model,
my_optimizer;
global_lower_bound = 0.0,
lambda_minimum_step = 1e-4,
lambda_atol = 1e-4,
)
lower_bound, _, _ = BiObjectiveSDDP.surrogate_lower_bound(
model,
my_optimizer;
global_lower_bound = 0.0,
lambda_minimum_step = 1e-4,
lambda_atol = 1e-4,
)
push!(bounds_for_reporting, (λ, lower_bound, upper_bound))
end,
# SDDP.jl kwargs ...
iteration_limit = 1,
print_level = 0,
)
open(filename, "w") do io
for (i, b) in enumerate(bounds_for_reporting)
println(io, i, ", ", join(b, ", "))
end
end
return
end
main(BiObjectiveSDDP.MinimumUpdate(), "simple_example_min.dat")
main(BiObjectiveSDDP.RandomUpdate(), "simple_example_rand.dat")
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 9054 | # Copyright 2019-21, Oscar Dowson, Lingquan Ding (@lingquant).
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This example is based on one from MSPPy:
# https://github.com/lingquant/msppy/blob/dc85a2e8fa5243b3d5096d59085d9caad3ff2ede/examples/hydro_thermal/julia/test.jl
#
# The original author was Lingquan Ding (@lingquant), but it was modified by
# Oscar Dowson (@odow) to meet the latest SDDP.jl syntax.
#
# The original model and data is from:
#
# Shapiro, A., Tekaya, W., da Costa, J. P., & Soares, M. P. (2013). Risk neutral
# and risk averse stochastic dual dynamic programming method. European journal
# of operational research, 224(2), 375–391.
using SDDP
import DelimitedFiles
import Gurobi
import JSON
import Plots
import Random
import Statistics
import StatsPlots
function _build_model(data::Dict)
N_THERMAL = length.(data["thermal_obj"])
@assert N_THERMAL == length.(data["thermal_lb"])
@assert N_THERMAL == length.(data["thermal_ub"])
I = [1, 2, 3, 4]
K(i) = 1:N_THERMAL[i]
IM = 5
I_IM = union(I, IM)
env = Gurobi.Env()
function gurobi_optimizer()
model = Gurobi.Optimizer(env)
MOI.set(model, MOI.Silent(), true)
return model
end
model = SDDP.LinearPolicyGraph(
stages = 60,
lower_bound = 0.0,
optimizer = gurobi_optimizer,
) do sp, t
set_silent(sp)
month = t % 12 == 0 ? 12 : t % 12
@variable(
sp,
0 <= v[i in I] <= data["storedEnergy_ub"][i] / 1_000,
SDDP.State,
initial_value = data["storedEnergy_initial"][i] / 1_000,
)
@variable(sp, s[i in I] >= 0.0)
@variable(sp, 0.0 <= q[i in I] <= data["hydro_ub"][i] / 1_000)
@variable(
sp,
g[i in I, k in K(i)],
lower_bound = data["thermal_lb"][i][k] / 1_000,
upper_bound = data["thermal_ub"][i][k] / 1_000,
)
@variable(
sp,
0 <= ex[i in I_IM, j in I_IM] <= data["exchange_ub"][i][j] / 1_000,
)
@variable(
sp,
df[i in I, j in I] >= 0,
upper_bound =
data["demand"][month][i] * data["deficit_ub"][j] / 1_000,
)
@stageobjective(
sp,
sum(
data["deficit_obj"][i] * sum(df[i, :]) +
sum(data["thermal_obj"][i][k] * g[i, k] for k in K(i)) for
i in I
)
)
@constraint(
sp,
[i in I],
q[i] + sum(g[i, k] for k in K(i)) + sum(df[i, :]) + sum(ex[:, i]) - sum(ex[i, :]) ==
data["demand"][month][i] / 1_000
)
@constraint(
sp,
balance[i in I],
v[i].out ==
v[i].in + data["inflow_initial"][i] / 1_000 - s[i] - q[i]
)
@constraint(sp, sum(ex[:, IM]) == sum(ex[IM, :]))
if t == 1
# Deterministic first stage with `inflow_initial`.
else
r = (t - 1) % 12 == 0 ? 12 : (t - 1) % 12
Ω = data["scenarios"]
# To simplify things. Don't use all the scenarios!
num_scenarios = min(40, length(Ω[1][r]))
SDDP.parameterize(sp, 1:num_scenarios) do ω
for i in 1:4
set_normalized_rhs(balance[i], Ω[i][r][ω] / 1_000)
end
end
end
end
return model
end
function _train_model(model::SDDP.PolicyGraph, gamma::Float64)
risk_measure = isfinite(gamma) ? SDDP.Entropic(gamma) : SDDP.WorstCase()
# Set the same random seed for all runs!
Random.seed!(1234)
psr_sampling_scheme = SDDP.PSRSamplingScheme(1_000)
try
SDDP.train(
model;
iteration_limit = 10_000,
risk_measure = risk_measure,
sampling_scheme = psr_sampling_scheme,
log_file = "$(gamma).log",
)
catch ex
@info "Ignorring error"
println("$(ex)")
end
# Set the same random seed for all runs!
Random.seed!(12345)
simulations = SDDP.simulate(
model,
1_000,
[:v, :df, :g];
sampling_scheme = psr_sampling_scheme,
)
stage_objectives = [
round(simulations[i][t][:stage_objective]; digits = 1) for
i in 1:length(simulations) for t in 1:length(simulations[i])
]
stage_objectives =
reshape(stage_objectives, length(simulations[1]), length(simulations))
open("stage_objectives_$(gamma).csv", "w") do io
return DelimitedFiles.writedlm(
io,
collect(transpose(stage_objectives)),
',',
)
end
return
end
function _plot_cumulative_density(
filenames = [
"stage_objectives_0.000000.csv",
"stage_objectives_0.000010.csv",
"stage_objectives_5.0e-5.csv",
"stage_objectives_0.000100.csv",
],
)
Plots.plot()
line_styles = [:solid, :dash, :dot, :dashdot]
for (i, f) in enumerate(filenames)
X = sum(DelimitedFiles.readdlm(f, ','); dims = 2)[:]
x = parse(Float64, match(r"stage\_objectives\_(.+)\.csv", f)[1])
x = if x ≈ 0.0
"0"
elseif x ≈ 5e-5
"5 \\times 10^{-5}"
else
"1 \\times 10^{$(Int(log10(x)))}"
end
StatsPlots.cdensity!(
X,
label = "\$\\gamma = $(x)\$",
style = line_styles[i],
color = :black,
)
end
p = Plots.plot!(
xlabel = "Cost [\$]",
ylabel = "Cumulative density",
legend = :bottomright,
size = (400, 300),
)
Plots.savefig("cumulative_density.pdf")
return p
end
function _plot_objectives(filename::String)
quantiles = [0.0, 0.01, 0.05, 0.5, 0.95, 0.99, 1.0]
matrix = DelimitedFiles.readdlm(filename, ',')
A = mapreduce(
i -> Statistics.quantile(matrix[:, i], quantiles)',
vcat,
1:size(matrix, 2),
)
x = parse(Float64, match(r"stage\_objectives\_(.+)\.csv", filename)[1])
x = if x ≈ 0.0
"0"
elseif x ≈ 5e-5
"5 \\times 10^{-5}"
else
"10^{$(Int(log10(x)))}"
end
Plots.plot(
A[:, 4],
ribbon = (A[:, 4] .- A[:, 1], A[:, 7] .- A[:, 4]),
color = :black,
fillalpha = 0.2,
legend = false,
title = "\$\\gamma = $(x)\$",
size = (300, 400),
)
Plots.plot!(
A[:, 4],
ribbon = (A[:, 4] .- A[:, 2], A[:, 6] .- A[:, 4]),
color = :black,
fillalpha = 0.2,
)
Plots.plot!(
A[:, 4],
ribbon = (A[:, 4] .- A[:, 3], A[:, 5] .- A[:, 4]),
color = :black,
fillalpha = 0.2,
)
return Plots.plot!(
A[:, 4],
color = :black,
xlabel = "Stages",
ylabel = "Stage objective (\$)",
ylim = (0, 4e4),
)
end
function _plot_objectives(
filenames::Vector{String} = [
"stage_objectives_0.000000.csv",
"stage_objectives_0.000010.csv",
"stage_objectives_5.0e-5.csv",
"stage_objectives_0.000100.csv",
],
)
p = Plots.plot(
_plot_objectives.(filenames)...,
size = (1200, 800),
margin = 5Plots.mm,
)
Plots.savefig("objectives.pdf")
return p
end
function _print_help()
return println(
"""
usage: julia [-p N] [--project=.] model.jl [--gamma=<value>] [--help] [--plot]
Solve the hydro-thermal scheduling problem with the Entropic risk measure
parameterized with γ = <value>.
Use `-p N` to run the SDDP solver in parallel mode over `N` processors.
Use `--project=.` to reproduce the example in the paper using the provided
`Manifest.toml` and `Project.toml` files.
Examples:
julia model.jl --gamma=0.5
julia -p 4 --project=. model.jl --gamma=0.5
julia -p 4 --project=. model.jl --gamma=5.0
julia --project=. model.jl --plot
""",
)
end
function main(args)
if length(args) == 0
_print_help()
elseif findfirst(isequal("--help"), args) !== nothing
_print_help()
elseif findfirst(isequal("-h"), args) !== nothing
_print_help()
else
i = findfirst(arg -> startswith(arg, "--gamma="), args)
if i !== nothing
data = JSON.parsefile(joinpath(@__DIR__, "data.json"))
model = _build_model(data)
gamma = parse(Float64, replace(args[i], "--gamma=" => ""))::Float64
_train_model(model, gamma)
end
if findfirst(isequal("--plot"), args) !== nothing
_plot_cumulative_density()
_plot_objectives()
end
end
end
# ============================================================================ #
# Script entry point! #
# ============================================================================ #
main(ARGS)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 12530 | # Copyright 2020-21, Oscar Dowson.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This code is used to build the figures contained in the paper
#
# On solving multistage stochastic programs with the entropic risk measure.
# Dowson, O., Morton, D.P., and Pagnoncelli, B.K.
using SDDP
import Distributions
import Gurobi
import Random
import Statistics
const GRB_ENV = Gurobi.Env()
function gurobi_optimizer()
model = Gurobi.Optimizer(GRB_ENV)
MOI.set(model, MOI.Silent(), true)
return model
end
function _write_matrix(io, data)
for i in 1:size(data, 1)
for j in 1:size(data, 2)
print(io, data[i, j], " ")
end
print(io, "\n")
end
return
end
###
### FIGURE 1
###
function _ent(z, p, γ)
f = [pk * exp(γ * zk) for (pk, zk) in zip(p, z)]
f ./= sum(f)
return f
end
function _eoh_cvar(z, p, γ)
y = zeros(length(z))
α = 0.0
for k in sortperm(z; rev = true)
if α >= (1 - γ)
break
end
y[k] = min(p[k], (1 - γ) - α) / (1 - γ)
α += y[k] * (1 - γ)
end
return y
end
function _nested_cvar(z1, p1, z2, p2, γ)
y2 = _eoh_cvar(z2, p2, γ)
y1 = _eoh_cvar(z1 .+ y2' * z2, p1, γ)
return vcat(y1[1] .* y2, y1[2] .* y2)
end
function build_figure_1()
@info "Building Figure 1"
z = collect(1:8)
p = fill(1 / 8, 8)
γ = 0.4
data = hcat(
z,
p,
_ent(z, p, γ),
_eoh_cvar(z, p, γ),
_nested_cvar([0, 4], [0.5, 0.5], 1:4, fill(0.25, 4), γ),
)
open(joinpath(@__DIR__, "risk_set.dat"), "w") do io
return _write_matrix(io, data)
end
return
end
build_figure_1()
###
### FIGURE 3
###
function _solve_example(risk_measure)
X = Dict(2.0 => 0.1, 1.8 => 0.9)
Y = Dict(2.2 => 0.1, 1.7 => 0.9)
Z = Dict(2.0 => 0.1, 1.0 => 0.9)
two_stage_model = SDDP.LinearPolicyGraph(
stages = 2,
sense = :Min,
lower_bound = 0.0,
optimizer = gurobi_optimizer,
) do sp, t
@variable(sp, 0 <= x <= 1, SDDP.State, Bin, initial_value = 0)
if t == 1
@stageobjective(sp, 0)
else
Ω = [(ω1, ω2, ω3) for (ω1, _) in X for (ω2, _) in Y for (ω3, _) in Z]
P = [
p1 * p2 * p3 for (_, p1) in X for (_, p2) in Y for (_, p3) in Z
]
SDDP.parameterize(sp, Ω, P) do ω
@stageobjective(sp, ω[1] * x.in + ω[2] * (1 - x.in) + ω[3])
end
end
end
three_stage_model = SDDP.LinearPolicyGraph(
stages = 3,
sense = :Min,
lower_bound = 0.0,
optimizer = gurobi_optimizer,
) do sp, t
@variable(sp, 0 <= x <= 1, SDDP.State, Bin, initial_value = 0)
if t == 1
@stageobjective(sp, 0)
elseif t == 2
Ω = [(ω1, ω2) for (ω1, _) in X for (ω2, _) in Y]
P = [p1 * p2 for (_, p1) in X for (_, p2) in Y]
SDDP.parameterize(sp, Ω, P) do ω
@stageobjective(sp, ω[1] * x.in + ω[2] * (1 - x.in))
end
elseif t == 3
SDDP.parameterize(sp, [ω3 for (ω3, _) in Z], [p3 for (_, p3) in Z]) do ω
@stageobjective(sp, ω)
end
end
end
SDDP.train(
two_stage_model,
iteration_limit = 100,
print_level = 0,
risk_measure = risk_measure,
duality_handler = SDDP.LagrangianDuality(),
)
SDDP.train(
three_stage_model,
iteration_limit = 100,
print_level = 0,
risk_measure = risk_measure,
duality_handler = SDDP.LagrangianDuality(),
)
two_stage_x = SDDP.simulate(two_stage_model, 1, [:x])[1][1][:x].out
three_stage_x = SDDP.simulate(three_stage_model, 1, [:x])[1][1][:x].out
return two_stage_x, three_stage_x
end
function build_figure_3()
@info "Building Figure 3"
open(joinpath(@__DIR__, "results_avar.dat"), "w") do io
for γ in 0.5:0.01:1.0
println(" AVaR(γ) = $γ")
two, three = _solve_example(SDDP.AVaR(1 - γ))
println(io, "$(rpad(γ, 5, '0')) $(Int(two)) $(Int(three))")
end
end
open(joinpath(@__DIR__, "results_entropic.dat"), "w") do io
for γ in vcat(0.0, 4:0.05:5, 10.0)
println(" Entropic(γ) = $γ")
two, three = _solve_example(SDDP.Entropic(γ))
println(io, "$(lpad(γ, 4)) $(Int(two)) $(Int(three))")
end
end
return
end
build_figure_3()
###
### FIGURE 6
###
mutable struct CyclicHistorical{T,S} <: SDDP.AbstractSamplingScheme
scenarios::Vector{Vector{Tuple{T,S}}}
current::Int
end
function SDDP.sample_scenario(
::SDDP.PolicyGraph{T},
sampling_scheme::CyclicHistorical{T,S};
# Ignore the other kwargs because the user is giving
# us the full scenario.
kwargs...,
) where {T,S}
if sampling_scheme.current > length(sampling_scheme.scenarios)
sampling_scheme.current = 1
end
scenario = sampling_scheme.scenarios[sampling_scheme.current]
sampling_scheme.current += 1
return scenario, false
end
function _run_instance(risk_measure)
Ω, P = [(s = 1.11, b = 1.02), (s = 1.04, b = 1.06)], [0.2, 0.8]
model = SDDP.LinearPolicyGraph(
stages = 5,
sense = :Max,
upper_bound = 5.0,
optimizer = gurobi_optimizer,
) do sp, t
set_silent(sp)
@variable(sp, stocks >= 0, SDDP.State, initial_value = 0)
@variable(sp, bonds >= 0, SDDP.State, initial_value = 1)
@variable(sp, consumption >= 0)
@constraint(
sp,
c,
stocks.out + bonds.out + consumption - stocks.in - bonds.in == 0
)
@stageobjective(sp, consumption)
if t > 1
SDDP.parameterize(sp, Ω, P) do ω
set_normalized_coefficient(c, stocks.in, -ω.s)
return set_normalized_coefficient(c, bonds.in, -ω.b)
end
end
end
Random.seed!(1234)
scenarios = [
[(t, s[t]) for t in 1:5] for
s in collect(Iterators.product([nothing], Ω, Ω, Ω, Ω))[:]
]
probabilities = prod.(collect(Iterators.product([1.0], P, P, P, P))[:])
SDDP.train(
model;
risk_measure = risk_measure,
iteration_limit = 320,
print_level = 0,
sampling_scheme = CyclicHistorical(scenarios, 1),
)
Random.seed!(4321)
simulations = SDDP.simulate(
model,
length(scenarios),
[:stocks, :consumption];
sampling_scheme = CyclicHistorical(scenarios, 1),
)
Z = [sum(stage[:stage_objective] for stage in s) for s in simulations]
data = Dict{Float64,Float64}()
for (z, p) in zip(Z, probabilities)
if !haskey(data, z)
data[z] = 0.0
end
data[z] += p
end
D = Distributions.DiscreteNonParametric(
collect(keys(data)),
collect(values(data)),
)
return (
initial_stock = simulations[1][1][:stocks].out,
consumption = Statistics.quantile(D, [0.0, 0.1, 0.5, 0.9, 1.0]),
)
end
function build_figure_6()
@info "Building Figure 6"
γ_entropic = 0:1:50
data_entropic = Dict(
γ => d for
(γ, d) in map(γ -> (γ, _run_instance(SDDP.Entropic(γ))), γ_entropic)
)
open(joinpath(@__DIR__, "figure_6_entropic.dat"), "w") do io
return _write_matrix(
io,
hcat(
γ_entropic,
hcat([data_entropic[γ].consumption for γ in γ_entropic]...)',
),
)
end
open(joinpath(@__DIR__, "figure_6_entropic_initial.dat"), "w") do io
return _write_matrix(
io,
hcat(
γ_entropic,
reshape(
[data_entropic[γ].initial_stock for γ in γ_entropic],
(length(γ_entropic), 1),
),
),
)
end
γ_avar = 0:0.01:1
data_avar = Dict(
γ => d for
(γ, d) in map(γ -> (γ, _run_instance(SDDP.AVaR(1 - γ))), γ_avar)
)
open(joinpath(@__DIR__, "figure_6_avar.dat"), "w") do io
return _write_matrix(
io,
hcat(γ_avar, hcat([data_avar[γ].consumption for γ in γ_avar]...)'),
)
end
open(joinpath(@__DIR__, "figure_6_avar_initial.dat"), "w") do io
return _write_matrix(
io,
hcat(
γ_avar,
reshape(
[data_avar[γ].initial_stock for γ in γ_avar],
(length(γ_avar), 1),
),
),
)
end
return
end
build_figure_6()
###
### FIGURE 7
###
function _compute_stocks_bonds(risk_measure)
Ω, P = [(s = 1.11, b = 1.02), (s = 1.04, b = 1.06)], [0.2, 0.8]
scenarios = [
[(t, s[t]) for t in 1:5] for
s in collect(Iterators.product([(s = 1.0, b = 1.0)], Ω, Ω, Ω, Ω))[:]
]
probabilities = prod.(collect(Iterators.product([1.0], P, P, P, P))[:])
z_stocks_only = [prod(x[2].s for x in s) for s in scenarios]
z_bonds_only = [prod(x[2].b for x in s) for s in scenarios]
q = similar(probabilities)
SDDP.adjust_probability(
risk_measure,
q,
probabilities,
SDDP.Noise{Any}[],
z_stocks_only,
false,
)
stocks_only = q' * z_stocks_only
SDDP.adjust_probability(
risk_measure,
q,
probabilities,
SDDP.Noise{Any}[],
z_bonds_only,
false,
)
bonds_only = q' * z_bonds_only
return stocks_only, bonds_only
end
function _compute_nested_avar(z, γ)
function _eoh_cvar(z, p, γ)
y = zeros(length(z))
α = 0.0
for k in sortperm(z; rev = false)
if α >= (1 - γ)
break
end
y[k] = min(p[k], (1 - γ) - α) / (1 - γ)
α += y[k] * (1 - γ)
end
if sum(y) ≈ 0.0
_, i = findmin(z)
y[i] = 1.0
end
return z' * y
end
p = [0.2, 0.8]
return _eoh_cvar(
[
_eoh_cvar(
[
_eoh_cvar(
[
_eoh_cvar(z[1] * z[1] * z[1] .* z, p, γ)
_eoh_cvar(z[1] * z[1] * z[2] .* z, p, γ)
],
p,
γ,
),
_eoh_cvar(
[
_eoh_cvar(z[1] * z[2] * z[1] .* z, p, γ),
_eoh_cvar(z[1] * z[2] * z[2] .* z, p, γ),
],
p,
γ,
),
],
p,
γ,
),
_eoh_cvar(
[
_eoh_cvar(
[
_eoh_cvar(z[2] * z[1] * z[1] .* z, p, γ)
_eoh_cvar(z[2] * z[1] * z[2] .* z, p, γ)
],
p,
γ,
),
_eoh_cvar(
[
_eoh_cvar(z[2] * z[2] * z[1] .* z, p, γ),
_eoh_cvar(z[2] * z[2] * z[2] .* z, p, γ),
],
p,
γ,
),
],
p,
γ,
),
],
p,
γ,
)
end
function build_figure_7()
@info "Building Figure 7"
open(joinpath(@__DIR__, "figure_7_entropic.dat"), "w") do io
entropic = _compute_stocks_bonds.(SDDP.Entropic.(0:1:200))
return _write_matrix(
io,
hcat(0:1:200, [e[1] for e in entropic], [e[2] for e in entropic]),
)
end
open(joinpath(@__DIR__, "figure_7_avar.dat"), "w") do io
eoh_avar = _compute_stocks_bonds.(SDDP.AVaR.(1.0:-0.01:0.0))
return _write_matrix(
io,
hcat(
0.0:0.01:1.0,
[e[1] for e in eoh_avar],
[e[2] for e in eoh_avar],
_compute_nested_avar.(Ref([1.11, 1.04]), 0:0.01:1),
_compute_nested_avar.(Ref([1.02, 1.06]), 0:0.01:1),
),
)
end
return
end
build_figure_7()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 20295 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
using SDDP
import Gurobi
import Plots
import Random
import Statistics
import StatsPlots
###
### Section 7.1
###
function _add_ddu_constraints(model::SDDP.PolicyGraph{Int}, i::Int)
node = model[i]
if get(node.ext, :_ddu_is_set, false)
return
end
nominal_P = [
child.probability * noise.probability for child in node.children for
noise in model[child.term].noise_terms
]
push!(node.bellman_function.risk_set_cuts, nominal_P)
N = length(nominal_P)
SDDP._add_locals_if_necessary(node, node.bellman_function, N)
θʲʷ = VariableRef[node.bellman_function.local_thetas[i].theta for i in 1:N]
Θ = node.bellman_function.global_theta.theta
ddu = node.subproblem.ext[:__ddu__]
for (d, y_d) in enumerate(ddu.y)
P_d = Float64[
ddu.matrices[d][i+1, child.term] * noise.probability for
child in node.children for
noise in model[child.term].noise_terms
]
slack = ddu.M * (1 - y_d)
if JuMP.objective_sense(node.subproblem) == MOI.MIN_SENSE
JuMP.@constraint(node.subproblem, Θ >= P_d' * θʲʷ - slack)
else
JuMP.@constraint(node.subproblem, Θ <= P_d' * θʲʷ + slack)
end
end
node.ext[:_ddu_is_set] = true
return
end
function add_ddu_matrices(sp, matrices::Vector{<:Matrix}; M)
N = length(matrices)
@variable(sp, __ddu__[1:N], Bin)
@constraint(sp, sum(__ddu__) == 1)
sp.ext[:__ddu__] = (M = M, y = __ddu__, matrices = matrices)
return __ddu__
end
function solve_decision_dependent_trajectory(
model::SDDP.PolicyGraph{Int},
incoming_state_value,
variables::Vector{Symbol} = Symbol[];
explore::Bool = true,
depth::Union{Nothing,Int} = nothing,
)
for i in keys(model.nodes)
_add_ddu_constraints(model, i)
end
function sample_node(Φ::Matrix{Float64}, y::Int)
r = rand()
for j in 1:size(Φ, 2)
r -= Φ[y, j]
if r <= 0
return j
end
end
return nothing
end
sampled_states = Dict{Symbol,Float64}[]
cumulative_value = 0.0
scenario_path = Tuple{Int,Any}[]
simulation = Dict{Symbol,Any}[]
i, y = 0, 1
Φ = first(values(model.nodes)).subproblem.ext[:__ddu__].matrices
while length(scenario_path) < something(depth, Inf)
if depth === nothing
i = sample_node(Φ[y], i + 1)
if i === nothing
break
end
else
j = nothing
while j === nothing
j = sample_node(Φ[y], i + 1)
end
i = j
end
node = model[i]
ω = SDDP.sample_noise(node.noise_terms)
push!(scenario_path, (i, ω))
subproblem_results = SDDP.solve_subproblem(
model,
node,
incoming_state_value,
ω,
scenario_path,
duality_handler = nothing,
)
__ddu__ = node.subproblem.ext[:__ddu__]
y = findfirst([round(Bool, value(y)) for y in __ddu__.y])
if explore && rand() < 0.05
y = rand(1:length(__ddu__.y))
end
cumulative_value += subproblem_results.stage_objective
incoming_state_value = copy(subproblem_results.state)
push!(sampled_states, incoming_state_value)
# Record useful variables from the solve.
store = Dict{Symbol,Any}(
:node_index => i,
:noise_term => ω,
:stage_objective => subproblem_results.stage_objective,
:bellman_term =>
subproblem_results.objective -
subproblem_results.stage_objective,
# :objective_state => objective_state_vector,
# :belief => copy(current_belief),
)
# Loop through the primal variable values that the user wants.
for variable in variables
if haskey(node.subproblem.obj_dict, variable)
# Note: we broadcast the call to value for variables which are
# containers (like Array, Containers.DenseAxisArray, etc). If
# the variable is a scalar (e.g. just a plain VariableRef), the
# broadcast preseves the scalar shape.
# TODO: what if the variable container is a dictionary? They
# should be using Containers.SparseAxisArray, but this might not
# always be the case...
store[variable] = JuMP.value.(node.subproblem[variable])
elseif skip_undefined_variables
store[variable] = NaN
else
error(
"No variable named $(variable) exists in the subproblem.",
" If you want to simulate the value of a variable, make ",
"sure it is defined in _all_ subproblems, or pass ",
"`skip_undefined_variables=true` to `simulate`.",
)
end
end
push!(simulation, store)
if depth === nothing &&
rand() <=
1 - sum(child.probability for child in node.children; init = 0)
break
end
end
return (
scenario_path = scenario_path,
sampled_states = sampled_states,
objective_states = NTuple{0,Float64}[],
belief_states = Tuple{Int,Dict{Int,Float64}}[],
cumulative_value = cumulative_value,
simulation = simulation,
)
end
struct DecisionDependentForwardPass <: SDDP.AbstractForwardPass end
function SDDP.forward_pass(
model::SDDP.PolicyGraph{Int},
options::SDDP.Options,
::DecisionDependentForwardPass,
)
incoming_state_value = copy(options.initial_state)
return solve_decision_dependent_trajectory(model, incoming_state_value)
end
function create_figure_10()
Φ(ρ, z) = [1 0; ρ*(1-z) z; ρ 0]
ρ = 0.9
graph = SDDP.Graph(0)
SDDP.add_node.((graph,), 1:2)
Φ̅ = Φ(ρ, 0.5)
for i in 1:3, j in 1:2
Φ̅[i, j] > 0 && SDDP.add_edge(graph, (i - 1) => j, Φ̅[i, j])
end
model = SDDP.PolicyGraph(
graph;
sense = :Max,
optimizer = Gurobi.Optimizer,
upper_bound = 7 / (1 - ρ),
) do sp, node
@variable(sp, x >= 0, SDDP.State, initial_value = 0)
@variable(sp, u_sell >= 0)
sp[:z] = z = add_ddu_matrices(sp, [Φ(ρ, 0), Φ(ρ, 1)]; M = 1e3)
@constraint(sp, con_balance, x.out == x.in - u_sell + 0.0)
if node == 1 # farm
fix.(u_sell, 0; force = true)
@stageobjective(sp, -3 * z[2])
SDDP.parameterize(sp, [0, 2, 4, 6, 8]) do ω
return set_normalized_rhs(con_balance, ω)
end
else # market
@stageobjective(sp, 1 * u_sell)
@constraint(sp, u_sell <= x.in)
SDDP.parameterize(ω -> set_upper_bound(u_sell, ω), sp, [5, 10])
end
end
Random.seed!(12345)
SDDP.train(
model;
duality_handler = SDDP.LagrangianDuality(),
cut_type = SDDP.MULTI_CUT,
forward_pass = DecisionDependentForwardPass(),
iteration_limit = 100,
log_every_iteration = true,
cut_deletion_minimum = 100,
)
Random.seed!(5678)
ret = solve_decision_dependent_trajectory(
model,
model.initial_root_state,
[:x, :u_sell, :z];
explore = false,
depth = 50,
)
stock_plot = Plots.plot(
map(d -> d[:x].out, ret.simulation);
ylabel = "Quantity in stock (\$x^\\prime\$)\n",
ylims = (0, maximum(d -> d[:x].out, ret.simulation) + 1),
color = :slategray,
legend = false,
linewidth = 3,
)
Plots.scatter!(
stock_plot,
[
(i, data[:x].out) for (i, data) in enumerate(ret.simulation) if
data[:node_index] == 1 && data[:z][2] > 0.5
],
color = "#43a047",
)
Plots.scatter!(
stock_plot,
[
(i, data[:x].out) for (i, data) in enumerate(ret.simulation) if
data[:node_index] == 1 && data[:z][2] < 0.5
],
marker = :x,
markerstrokewidth = 3,
color = "#e53935",
)
plt = Plots.plot(
stock_plot,
Plots.plot(
map(d -> d[:u_sell], ret.simulation);
ylabel = "Sales decision (\$u_{sell}\$)",
seriestype = :steppre,
linewidth = 3,
color = :slategray,
xlabel = "Simulation step",
),
xlims = (0, length(ret.simulation) + 1),
legend = false,
layout = (2, 1),
dpi = 400,
)
Plots.savefig("cheese_producer2.pdf")
return model, plt
end
function _solve_finite_cheese_producer(T::Int; create_plot::Bool = false)
function Φ(z)
a = zeros(2T + 1, 2T)
a[1, 1] = 1.0
for t in 1:(T-1)
a[2t, 2t] = z
a[2t, 2t+1] = 1.0 - z
a[2t+1, 2t+1] = 1.0
end
a[2T, 2T] = z
return a
end
graph = SDDP.Graph(0)
SDDP.add_node.((graph,), 1:2T)
Φ̅ = Φ(0.5)
for i in 1:size(Φ̅, 1), j in 1:size(Φ̅, 2)
Φ̅[i, j] > 0 && SDDP.add_edge(graph, (i - 1) => j, Φ̅[i, j])
end
model = SDDP.PolicyGraph(
graph;
sense = :Max,
optimizer = Gurobi.Optimizer,
upper_bound = 7 * T,
) do sp, node
@variable(sp, x >= 0, SDDP.State, initial_value = 0)
@variable(sp, u_sell >= 0)
sp[:z] = z = add_ddu_matrices(sp, [Φ(0), Φ(1)]; M = 200)
@constraint(sp, con_balance, x.out == x.in - u_sell + 0.0)
if isodd(node) # farm
fix.(u_sell, 0; force = true)
@stageobjective(sp, -3 * z[2])
SDDP.parameterize(sp, [0, 2, 4, 6, 8]) do ω
return set_normalized_rhs(con_balance, ω)
end
else # market
@stageobjective(sp, 1 * u_sell)
@constraint(sp, u_sell <= x.in)
SDDP.parameterize(ω -> set_upper_bound(u_sell, ω), sp, [5, 10])
end
end
Random.seed!(123456)
SDDP.train(
model;
duality_handler = SDDP.BanditDuality(
# SDDP.ContinuousConicDuality(),
SDDP.StrengthenedConicDuality(),
SDDP.LagrangianDuality(),
),
# duality_handler = SDDP.LagrangianDuality(),
cut_type = SDDP.MULTI_CUT,
forward_pass = DecisionDependentForwardPass(),
iteration_limit = 200,
log_every_iteration = true,
cut_deletion_minimum = 500,
# stopping_rules = [SDDP.SimulationStoppingRule()],
)
Random.seed!(56789)
if create_plot
ret = solve_decision_dependent_trajectory(
model,
model.initial_root_state,
[:x, :u_sell, :z];
explore = false,
)
stock_plot = Plots.plot(
map(d -> d[:x].out, ret.simulation);
ylabel = "Quantity in stock (\$x^\\prime\$)\n",
ylims = (0, maximum(d -> d[:x].out, ret.simulation) + 1),
color = :slategray,
legend = false,
linewidth = 3,
)
Plots.scatter!(
stock_plot,
[
(i, data[:x].out) for (i, data) in enumerate(ret.simulation) if
isodd(data[:node_index]) && data[:z][2] > 0.5
],
color = "#43a047",
)
Plots.scatter!(
stock_plot,
[
(i, data[:x].out) for (i, data) in enumerate(ret.simulation) if
isodd(data[:node_index]) && data[:z][2] < 0.5
],
marker = :x,
markerstrokewidth = 3,
color = "#e53935",
)
plt = Plots.plot(
stock_plot,
Plots.plot(
map(d -> d[:u_sell], ret.simulation);
ylabel = "Sales decision (\$u_{sell}\$)",
seriestype = :steppre,
linewidth = 3,
color = :slategray,
xlabel = "Simulation step",
),
xlims = (0, length(ret.simulation) + 1),
legend = false,
layout = (2, 1),
dpi = 400,
)
Plots.savefig("cheese_producer_$T.pdf")
end
simulations = map(1:1_000) do -
ret = solve_decision_dependent_trajectory(
model,
model.initial_root_state,
Symbol[];
explore = false,
)
return ret.cumulative_value
end
upper_bound = SDDP.calculate_bound(model)
return upper_bound, simulations
end
function create_figure_11()
data = Dict()
for t in 2:2:10
data[t] = _solve_finite_cheese_producer(t)
end
x = sort(collect(keys(data)))
ub = [data[xi][1] for xi in x]
μ = [data[xi][2] for xi in x]
box_y = reduce(vcat, μ)
box_x = reduce(vcat, [fill(x[i], length(μ[i])) for i in 1:length(x)])
StatsPlots.violin(
box_x,
box_y;
xlims = (1, 11),
xticks = (2:2:10),
bar_width = 1,
xlabel = "Time horizon",
ylabel = "Objective value",
label = false,
color = :grey,
alpha = 0.5,
)
Plots.scatter!(
x,
ub;
label = "Upper bound",
color = :black,
)
Plots.scatter!(
x,
Statistics.mean.(μ);
label = "Sample mean",
marker = :o,
color = :white,
)
Plots.savefig("cheese_producer_violin.pdf")
return
end
###
### Section 7.2
###
mutable struct StoppingForwardPass <: SDDP.AbstractForwardPass
name::Symbol
end
function SDDP.forward_pass(
model::SDDP.PolicyGraph,
options::SDDP.Options,
f_pass::StoppingForwardPass,
)
pass = SDDP.forward_pass(model, options, SDDP.DefaultForwardPass())
index = findfirst(s -> s[f_pass.name] < 0.5, pass.sampled_states)
n = 3 + something(index, length(pass.sampled_states))
subset(x, n) = x[1:min(length(x), n)]
return (
scenario_path = subset(pass.scenario_path, n),
sampled_states = subset(pass.sampled_states, n),
objective_states = pass.objective_states,
belief_states = subset(pass.belief_states, n),
cumulative_value = pass.cumulative_value,
)
end
function _solve_tiger_problem(ε::Float64; create_plot::Bool = false)
ρ = 0.95
graph = SDDP.Graph(
:R,
[:l, :r],
[(:R => :l, 0.5), (:R => :r, 0.5), (:l => :l, ρ), (:r => :r, ρ)],
)
if ε < 0.5
SDDP.add_ambiguity_set(graph, [:l, :r], 1e3)
end
model = SDDP.PolicyGraph(
graph;
sense = :Min,
lower_bound = -10.0,
optimizer = Gurobi.Optimizer,
) do sp, node
# s: stay, l: open left, r: open right
@variable(sp, x_s, Bin, SDDP.State, initial_value = 1)
@variable(sp, x_l, Bin, SDDP.State, initial_value = 0)
@variable(sp, x_r, Bin, SDDP.State, initial_value = 0)
@constraint(sp, x_s.out + x_l.out + x_r.out <= 1 - x_l.in - x_r.in)
@constraint(sp, x_s.out + x_l.out + x_r.out == x_s.in)
if node == :l
@stageobjective(sp, 100 * x_l.in - 10 * x_r.in + x_s.in)
SDDP.parameterize(sp, [:left, :right], [0.5 + ε, 0.5 - ε]) do ω
# println("I heard the tiger on the $ω side.")
end
elseif node == :r
@stageobjective(sp, -10 * x_l.in + 100 * x_r.in + x_s.in)
SDDP.parameterize(sp, [:left, :right], [0.5 - ε, 0.5 + ε]) do ω
# println("I heard the tiger on the $ω side.")
end
end
end
Random.seed!(12345)
SDDP.train(
model;
iteration_limit = 100,
log_every_iteration = true,
cut_deletion_minimum = 1_000,
duality_handler = SDDP.LagrangianDuality(),
forward_pass = StoppingForwardPass(:x_s),
)
lower_bound = SDDP.calculate_bound(model)
Random.seed!(4567)
sampling_scheme =
SDDP.InSampleMonteCarlo(max_depth = 50, terminate_on_dummy_leaf = false)
simulations =
SDDP.simulate(model, 100, [:x_s, :x_l, :x_r]; sampling_scheme)
objectives = map(simulations) do simulation
return sum(
ρ^(t - 1) * d[:stage_objective] for (t, d) in enumerate(simulation)
)
end
if !create_plot
μ, σ = SDDP.confidence_interval(objectives)
println("lower_bound = $lower_bound")
println("upper_bound = $μ ± $σ")
return lower_bound, objectives, model
end
simulations = simulations[1:100]
belief_plot = Plots.plot(;
xlabel = "Time step",
ylabel = "Belief(Left)",
legend = false,
xlims = (0, 12),
ymajorgrid = true,
)
plot = Plots.plot(;
xlabel = "Time step",
ylabel = "# hear left - hear right",
legend = false,
ylims = (-4, 4),
xlims = (0, 12),
ymajorgrid = true,
)
for simulation in simulations
b = Float64[0.5]
y = Int[0]
for d in simulation
push!(b, d[:belief][:l])
if d[:noise_term] == :left
push!(y, y[end] + 1)
else
push!(y, y[end] - 1)
end
if d[:x_l].out > 0.5 || d[:x_r].out > 0.5
break
end
end
Plots.plot!(
belief_plot,
0:length(b)-1,
b;
color = :grey,
linewidth = 3,
alpha = 0.2,
)
Plots.plot!(
plot,
0:length(y)-1,
y;
color = :grey,
linewidth = 3,
alpha = 0.2,
)
function correct_door(d)
return (d[:x_l].out > 0.5 && d[:node_index] == :r) ||
(d[:x_r].out > 0.5 && d[:node_index] == :l)
end
function incorrect_door(d)
return (d[:x_l].out > 0.5 && d[:node_index] == :l) ||
(d[:x_r].out > 0.5 && d[:node_index] == :r)
end
if (i = findfirst(correct_door, simulation)) !== nothing
Plots.scatter!([i], [y[i+1]], color = "#43a047", markersize = 6, alpha = 0.1)
end
if (i = findfirst(incorrect_door, simulation)) !== nothing
@show i
Plots.scatter!(
[i],
[y[i+1]];
marker = :x,
markersize = 8,
markerstrokewidth = 3,
color = "#e53935",
)
end
end
Plots.plot(belief_plot, plot, layout = (2, 1), dpi = 400)
Plots.savefig("tiger_problem_$ε.pdf")
return lower_bound, objectives, model
end
create_figure_12() = _solve_tiger_problem(0.35; create_plot = true)
function create_figure_13()
data = Dict()
for ε in [0.2, 0.3, 0.4, 0.5]
data[ε] = _solve_tiger_problem(ε)
end
x = sort(collect(keys(data)))
lb = [data[xi][1] for xi in x]
μ = [data[xi][2] for xi in x]
box_y = reduce(vcat, μ)
box_x = reduce(vcat, [fill(0.5 - x[i], length(μ[i])) for i in 1:length(x)])
StatsPlots.violin(
box_x,
# Apply small perturbationn for the violin to show up when constant.
box_y .+ 0.15 * rand(length(box_y));
bar_width = 0.05,
# ylims = (-10, 25),
xlabel = "False positive rate",
ylabel = "Objective value",
label = false,
color = :grey,
alpha = 0.5,
)
Plots.scatter!(
0.5 .- x,
lb;
label = "Lower bound",
color = :black,
linewidth = 3,
)
Plots.scatter!(
0.5 .- x,
Statistics.mean.(μ);
label = "Sample mean",
marker = :o,
color = :white,
)
Plots.savefig("tiger_problem_violin.pdf")
return
end
# create_figure_10()
create_figure_11()
# create_figure_12()
# create_figure_13()
# d_5 = _solve_finite_cheese_producer(5)
# d_7 = _solve_finite_cheese_producer(7)
# d_9 = _solve_finite_cheese_producer(9)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 42773 | # Copyright 2019-20, Oscar Dowson, Lingquan Ding (@lingquant).
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This example is taken from MSPPy: https://github.com/lingquant/msppy/blob/dc85a2e8fa5243b3d5096d59085d9caad3ff2ede/examples/hydro_thermal/julia/test.jl
#
# The original author was Lingquan Ding (@lingquant), but it was modified by
# Oscar Dowson (@odow) to meet the latest SDDP.jl syntax.
#
# The original model and data is from:
#
# Shapiro, A., Tekaya, W., da Costa, J. P., & Soares, M. P. (2013). Risk neutral
# and risk averse stochastic dual dynamic programming method. European journal
# of operational research, 224(2), 375–391.
#! format: off
using SDDP, Gurobi, Test
function msppy_hydro_thermal()
thermal_ub = Array{Float64, 2}[
[657 1350 36 250 250 28 529 44 255 235 386 386 145 226 131 87 204 923 923 400 100 200 169 386 28 200 272 30 168 440 400 258 258 258 64 340 1058 1058 10 197 175 206 54],
[66 485 485 350 161 72 4 20 100 132 262 363 24 126 320 20 640],
[13 11 32 11 347 152 150 13 15 220 220 13 15 138 347 149 149 15 102 15 168 13 13 103 136 53 66 186 50 156 171 533 323],
[166 166]
]
thermal_lb = Array{Float64,2}[
[520.0 1080.0 0.0 59.3 27.1 0.0 0.0 0.0 219.78 199.99 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 399.99 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 71.7 28.8 0.0 132.98 0.0 0.0 0.0],
[0.0 0.0 0.0 210.0 0.0 27.0 0.0 9.56 25.0 79.46 147.54 228.02 0.0 49.66 105.0 5.0 0.0],
[0.0 0.0 0.0 0.0 0.7 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 223.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 348.8 0.0],
[0 0]
]
thermal_obj = Array{Float64, 2}[
[21.49 18.96 937.0 194.79 222.22 140.58 6.27 505.92 0.01 112.46 159.97 250.87 550.66 188.89 645.3 150.0 145.68 274.54 253.83 37.8 51.93 90.69 131.68 317.98 152.8 470.34 317.98 523.35 730.54 310.41 730.54 101.33 140.34 292.49 610.33 487.56 122.65 214.48 1047.38 0.01 329.57 197.85 733.54],
[564.57 219.0 219.0 50.47 541.93 154.1 180.51 218.77 189.54 143.04 142.86 116.9 780.0 115.9 115.9 248.31 141.18],
[464.64 464.64 455.13 464.64 834.35 509.86 509.86 464.64 464.64 185.09 492.29 464.64 464.64 188.15 82.34 329.37 329.37 464.64 464.64 464.64 317.19 464.64 464.64 678.03 559.39 611.57 611.56 204.43 325.67 678.03 329.2 70.16 287.83],
[329.56 329.56]
]
N_THERMAL = [43, 17, 33, 2]
# Sanity check on the dimensions of the thermal data.
@assert N_THERMAL == length.(thermal_obj) == length.(thermal_lb) ==
length.(thermal_ub)
hydro_ub = [45414.3, 13081.5, 9900.9, 7629.9]
storedEnergy_initial = [59419.3000, 5874.9000, 12859.2000, 5271.5000]
storedEnergy_ub = [200717.6, 19617.2, 51806.1, 12744.9]
exchange_ub = Array{Float64, 2}[
[0 7379 1000 0 4000],
[5625 0 0 0 0],
[600 0 0 0 2236],
[0 0 0 0 99999],
[3154 0 3951 3053 0]
]
deficit_obj = [1142.8, 2465.4, 5152.46, 5845.54]
deficit_ub = [0.05, 0.05, 0.1, 0.8]
demand = Array{Float64, 2}[
[45515 11692 10811 6507],
[46611 11933 10683 6564],
[47134 12005 10727 6506],
[46429 11478 10589 6556],
[45622 11145 10389 6645],
[45366 11146 10129 6669],
[45477 11055 10157 6627],
[46149 11051 10372 6772],
[46336 10917 10675 6843],
[46551 11015 10934 6815],
[46035 11156 11004 6871],
[45234 11297 10914 6701]
]
scenarios =[
[[56896.8, 56451.95, 65408.16, 46580.39, 54645.97, 36916.81, 63301.09, 53645.87, 57959.52, 54200.71, 48448.49, 45010.7, 81105.82, 35019.44, 41229.43, 80514.3, 51163.61, 53329.75, 50702.92, 42240.62, 55109.13, 33675.91, 25129.81, 29190.4, 36301.34, 37791.77, 58268.57, 39743.58, 62565.51, 49713.71, 72890.69, 45726.42, 66364.28, 37928.66, 62789.08, 73042.18, 66665.7, 52993.58, 31559.83, 56002.39, 30617.05, 40899.73, 54433.46, 62568.66, 51341.31, 40927.71, 73413.29, 69900.43, 66850.87, 76182.63, 72077.01, 84213.88, 64555.38, 72950.98, 53957.75, 53046.27, 47082.36, 58093.71, 80492.3, 47948.51, 60261.41, 47993.66, 64712.19, 52583.97, 55161.47, 92429.3, 43304.33, 58739.64, 55468.74, 40817.11, 63727.25, 60146.67, 47266.3, 67074.83, 46916.31, 95400.9, 38313.51, 54999.69, 71369.67, 76664.21, 73633.5, 46999.32], [86488.31, 61922.34, 51128.33, 37113.35, 72711.15, 29950.27, 43981.97, 45561.95, 57663.45, 75154.55, 38353.62, 51036.28, 73890.02, 47726.22, 71175.26, 62788.22, 61001.38, 57412.95, 72857.05, 65235.58, 67209.56, 50100.68, 26092.14, 44292.32, 27535.59, 25749.92, 62387.31, 57549.88, 50654.11, 59386.44, 77049.72, 60836.23, 59055.5, 61202.37, 78078.5, 78991.29, 71465.94, 47197.58, 37602.63, 57691.13, 20504.58, 58197.41, 54655.96, 45842.69, 48096.68, 47690.98, 71610.48, 47429.75, 89520.11, 92447.91, 47928.35, 71909.26, 47626.61, 70620.4, 50034.14, 59045.22, 57525.75, 61449.32, 41790.65, 65572.44, 86433.67, 68600.8, 51672.08, 83801.3, 41190.08, 73474.32, 53990.6, 50665.03, 67756.3, 40550.35, 70939.21, 61771.53, 71467.42, 59049.86, 49152.53, 91724.84, 63801.6, 66431.93, 59988.07, 50373.33, 49701.05, 56717.86], [88646.94, 50742.1, 40424.25, 35392.72, 61323.17, 53709.93, 39542.99, 40185.12, 34083.82, 68610.98, 35718.51, 62628.01, 69692.0, 51495.85, 62809.69, 70220.74, 93477.21, 59883.76, 53172.92, 56491.72, 65725.0, 77701.54, 32542.38, 29444.29, 29070.61, 37872.92, 63481.8, 47988.85, 52728.54, 63441.32, 78376.69, 60248.39, 41110.96, 37183.08, 83982.68, 72883.91, 61051.15, 52788.25, 33315.79, 58386.71, 25463.7, 52687.47, 48144.5, 63161.07, 36378.46, 48314.32, 39789.9, 54422.3, 57184.51, 62386.9, 43430.01, 85259.92, 39652.55, 71335.51, 47263.74, 47293.6, 62981.06, 54515.96, 43462.15, 68127.72, 55948.83, 56153.76, 61042.16, 52448.52, 54230.46, 57821.91, 55790.78, 59436.74, 62295.26, 38312.37, 49257.88, 48780.76, 62710.63, 57188.43, 59325.77, 51971.26, 64294.7, 47939.56, 54351.96, 86775.98, 37212.98, 49482.34], [64581.71, 35954.27, 34627.46, 27179.93, 54644.39, 37115.42, 35188.12, 30857.44, 28957.8, 38145.67, 33851.76, 45004.41, 41888.5, 32537.77, 61618.73, 45492.33, 57661.37, 40768.91, 36675.8, 39891.03, 46600.38, 41833.13, 35331.16, 24828.12, 28963.44, 26392.81, 57277.26, 40083.16, 39037.46, 40510.26, 49649.42, 35895.16, 28420.51, 27653.31, 52072.86, 47120.75, 42126.42, 31221.08, 23267.58, 33811.21, 21492.44, 37357.77, 49257.71, 56164.65, 34749.3, 38150.11, 44706.32, 35701.34, 40448.63, 52746.17, 40067.9, 62428.88, 42785.49, 52326.19, 32913.93, 41161.01, 44123.86, 36565.93, 35065.17, 68823.2, 50669.57, 47121.6, 43602.65, 47373.13, 36659.8, 45289.32, 45618.72, 34198.26, 38268.72, 28944.83, 31516.06, 42862.66, 48390.75, 37943.12, 50501.94, 35807.24, 52671.46, 49262.74, 46138.46, 55543.29, 32317.51, 51632.89], [43078.74, 27000.91, 24456.72, 19080.88, 34731.77, 24512.01, 27671.57, 25654.34, 24788.09, 29380.5, 18923.09, 30031.07, 26936.13, 22540.12, 34858.01, 30581.26, 34635.02, 24931.13, 27008.57, 27616.21, 28566.65, 25574.69, 21342.69, 30732.45, 17083.18, 29745.45, 34161.48, 32373.28, 25060.83, 30213.42, 39757.11, 26435.75, 20601.37, 21707.83, 45436.42, 34622.49, 28792.77, 22897.49, 17471.04, 24573.34, 18395.36, 23937.22, 31238.07, 34468.55, 23514.07, 29321.19, 29379.36, 28537.54, 34587.4, 35207.58, 27619.03, 39712.36, 35169.64, 37162.41, 31130.86, 39983.01, 35946.54, 28366.52, 29951.98, 40186.78, 50571.36, 32563.19, 32207.74, 34805.16, 26620.61, 32791.29, 35023.07, 26037.19, 25014.17, 22937.9, 28856.74, 27047.64, 37864.66, 31422.07, 29368.14, 29439.74, 36583.08, 31676.46, 29744.25, 34050.76, 30086.86, 29348.76], [32150.18, 25285.24, 19099.7, 14173.57, 26184.86, 17752.6, 23148.61, 20780.68, 20168.94, 21178.59, 16560.55, 29365.82, 23855.04, 17234.34, 26267.7, 26141.76, 26250.48, 22090.39, 21541.84, 20595.83, 23051.97, 22945.63, 17305.82, 23164.67, 18172.0, 32394.56, 25360.63, 28995.24, 20275.84, 24027.02, 27505.81, 22703.44, 17584.25, 16834.37, 30722.54, 25847.39, 26649.2, 18519.69, 18348.12, 20415.98, 19592.53, 19778.88, 25505.35, 29621.4, 18642.25, 33204.9, 25960.33, 27167.15, 26294.99, 28241.02, 25324.0, 39552.75, 24145.72, 28054.54, 22726.21, 34494.79, 32289.77, 23823.56, 23255.09, 29651.66, 34628.41, 31515.82, 28821.56, 25045.89, 21037.21, 39653.57, 26762.9, 24243.85, 21432.57, 20539.73, 20990.06, 22862.46, 33766.52, 28192.96, 23389.44, 24328.4, 28371.8, 25816.19, 23648.83, 28276.29, 38658.88, 38515.33], [25738.04, 19913.2, 16790.67, 11860.9, 19927.38, 14706.5, 16354.45, 18619.35, 16549.77, 16631.69, 15759.09, 23062.33, 18597.07, 14480.2, 24962.54, 26915.97, 22787.45, 17159.91, 17111.7, 17576.81, 19086.23, 17400.83, 13620.22, 15434.43, 15196.4, 21098.2, 28206.5, 22466.48, 16221.58, 20073.41, 21229.3, 17647.23, 14939.49, 15664.9, 27837.17, 21366.45, 21327.28, 15599.11, 13938.13, 19611.86, 17830.94, 21998.19, 23299.17, 23916.43, 18258.67, 26281.64, 20098.85, 24116.7, 22630.54, 26739.58, 19458.87, 38487.54, 20195.47, 23486.83, 19228.76, 24159.73, 21535.23, 21521.31, 24639.11, 24871.76, 26304.24, 23268.23, 24487.15, 23871.07, 18284.17, 28291.17, 20945.59, 22369.94, 19811.22, 17389.8, 18273.11, 19522.99, 27882.67, 21450.9, 20552.6, 23607.35, 21404.13, 28296.36, 20808.6, 24527.67, 26593.14, 31033.18], [20606.05, 16801.72, 14192.07, 9904.96, 19833.32, 13891.48, 13870.17, 16287.23, 13383.61, 13732.02, 15122.03, 16335.75, 15380.33, 12027.54, 16431.03, 18109.8, 19971.19, 16989.68, 14138.87, 13614.16, 15974.7, 14098.04, 11590.2, 11597.36, 11400.63, 23751.08, 25079.71, 16940.29, 15396.49, 17698.38, 17329.15, 15414.73, 13043.51, 13406.36, 21583.89, 17619.21, 16981.52, 15169.28, 11339.97, 14285.46, 13204.94, 19390.34, 18399.97, 18752.41, 14739.07, 25994.83, 15773.26, 18723.76, 21783.59, 21137.32, 17354.26, 26962.54, 20638.69, 19665.77, 22455.74, 18405.64, 17890.93, 25762.9, 22246.87, 19802.24, 22481.12, 20835.04, 19030.91, 16927.84, 16072.49, 21382.07, 22745.14, 15799.05, 18981.85, 14811.25, 16522.04, 15604.83, 19421.44, 17115.78, 17471.55, 19105.94, 22977.11, 24641.57, 16121.9, 24298.46, 17342.35, 19248.41], [22772.16, 15034.4, 13741.74, 11835.56, 18289.43, 17056.12, 12040.76, 14448.98, 12275.62, 12255.37, 17561.96, 16624.51, 14661.5, 10510.08, 14125.62, 14929.4, 24107.77, 13659.7, 12283.03, 11761.84, 13469.6, 14120.36, 12719.73, 10034.25, 11259.79, 16559.39, 31762.07, 20615.73, 12660.14, 13992.03, 15396.42, 16368.64, 11565.03, 10941.74, 17168.12, 17513.5, 16020.86, 13917.48, 9500.66, 19390.18, 12671.12, 18785.94, 18905.01, 16947.82, 12242.09, 29837.37, 18936.32, 19703.51, 27774.12, 22748.74, 14303.81, 22450.11, 22886.6, 18904.29, 17416.04, 18858.4, 15528.36, 26579.42, 26121.59, 17212.57, 28153.06, 23376.16, 15224.49, 15583.27, 21217.4, 20427.43, 23452.75, 16741.52, 30198.36, 15672.8, 17535.92, 15052.75, 15273.23, 19793.78, 18612.71, 13365.51, 16452.14, 32081.91, 13964.53, 16749.78, 14773.73, 17116.17], [23767.55, 22347.14, 17339.43, 12925.78, 34902.46, 14124.93, 21610.55, 18611.16, 13874.47, 14368.61, 21513.16, 18401.39, 23147.1, 11673.81, 17407.93, 19596.07, 25084.21, 15580.89, 14990.72, 17952.25, 15543.08, 16393.5, 18436.23, 11431.06, 11965.3, 14726.15, 22453.76, 21456.65, 14941.06, 16395.21, 14685.41, 25111.82, 13330.21, 17590.17, 30383.6, 23226.29, 15479.99, 17075.9, 18972.43, 21841.24, 18849.04, 42223.31, 26037.88, 20118.15, 22821.5, 30995.92, 18894.75, 18612.61, 25497.0, 21882.33, 26321.83, 32933.12, 21215.67, 18142.29, 15945.98, 21042.48, 20583.84, 20560.02, 26845.0, 26617.84, 34139.15, 29715.86, 16112.45, 25927.78, 22152.14, 25137.8, 33871.69, 13858.76, 18798.82, 22151.35, 13284.47, 15851.55, 21942.79, 23406.24, 26217.27, 13559.07, 21131.23, 37332.1, 22115.41, 25199.08, 15185.71, 24053.0], [25831.89, 25004.76, 18503.66, 13594.84, 26176.69, 18411.58, 35511.48, 22441.18, 24967.68, 29859.02, 27124.93, 25773.5, 31708.34, 21600.95, 29777.43, 22391.65, 24543.99, 22869.29, 21100.25, 30429.5, 16296.95, 24523.57, 22523.1, 15501.59, 18872.23, 22472.39, 28037.37, 24399.38, 22887.14, 26654.33, 22534.09, 26281.36, 20988.75, 24865.56, 34692.72, 35360.85, 27010.2, 20988.36, 38522.5, 24500.92, 25310.61, 43420.75, 35193.52, 21707.73, 31387.95, 40779.06, 28225.77, 30537.62, 33284.42, 27321.85, 47896.08, 38532.69, 22792.1, 24029.92, 17271.2, 29198.04, 26072.51, 27553.75, 24614.4, 22967.04, 46357.71, 21313.7, 23953.59, 25345.95, 36517.51, 33685.87, 28944.96, 20108.6, 29306.63, 26133.38, 22837.29, 21655.14, 26349.73, 29076.36, 29204.23, 23931.0, 26396.78, 37931.62, 29379.11, 27410.53, 23276.7, 22846.94], [38566.5, 48755.6, 36070.84, 33700.23, 32033.65, 34416.72, 53788.68, 40579.16, 37058.66, 34210.67, 40816.63, 41600.78, 39227.88, 27408.38, 59081.29, 28648.63, 44644.33, 48005.17, 34140.7, 43351.87, 25642.92, 28734.88, 33971.28, 23822.86, 35724.0, 44481.07, 41970.5, 30205.39, 27119.48, 42229.06, 26492.55, 52567.64, 16616.9, 38767.96, 53610.53, 42361.49, 41348.52, 38279.56, 39293.9, 24936.41, 45488.06, 47985.13, 44074.47, 36129.72, 44509.73, 59740.8, 50561.77, 46501.2, 43322.51, 53996.58, 63642.28, 60795.26, 44335.09, 32110.3, 41561.28, 48844.35, 32901.12, 63065.14, 25343.75, 35462.89, 54242.97, 35365.63, 36427.0, 34081.72, 46049.51, 50920.23, 39745.84, 31191.19, 45301.54, 40578.93, 35212.74, 33943.97, 40568.42, 56921.92, 56973.22, 30130.6, 38489.84, 62539.4, 44712.54, 44675.39, 27895.03, 40031.75]],
[[7409.65, 5285.8, 3001.83, 4138.99, 3066.59, 10383.66, 5287.43, 8187.34, 3943.3, 10381.68, 6545.04, 4552.72, 2079.84, 6164.98, 1251.1, 8897.66, 6681.96, 5057.3, 2765.38, 5640.01, 6542.95, 3614.85, 5157.45, 9775.0, 3532.37, 7963.4, 4487.86, 4568.18, 5754.32, 2392.87, 5033.56, 4662.57, 5379.87, 3421.47, 3443.25, 10618.72, 7738.41, 4293.84, 9312.11, 5193.9, 21746.41, 4706.82, 10653.16, 8235.54, 6049.5, 12573.63, 10453.06, 4559.98, 3345.05, 7615.64, 13613.82, 4352.26, 6707.33, 3669.72, 2201.57, 10074.15, 4184.64, 10019.61, 17804.81, 3565.4, 5879.48, 5832.71, 4577.42, 25365.49, 12539.04, 10514.99, 15729.86, 5633.35, 5183.68, 13152.25, 7376.78, 7860.47, 8996.77, 5580.37, 4540.66, 6978.33, 7455.89, 5698.04, 16393.73, 12394.23, 6995.32, 9082.73], [3310.83, 8062.89, 3654.84, 9033.31, 2362.85, 3898.48, 4513.4, 13477.36, 4740.13, 7285.45, 12151.87, 10352.82, 3486.56, 3406.67, 2971.79, 20768.1, 9061.81, 8224.11, 1570.63, 5359.67, 12352.37, 3386.15, 6784.36, 6536.57, 4660.27, 8235.06, 8088.39, 3340.72, 7829.11, 4352.61, 4795.93, 5413.19, 11713.01, 4757.09, 4522.48, 20932.73, 9141.88, 3285.2, 10066.11, 4455.28, 11552.65, 12269.5, 10468.36, 9002.04, 5860.44, 7508.74, 12318.39, 3235.6, 2194.14, 5643.44, 11462.12, 6245.81, 6277.39, 8884.32, 4848.83, 9729.31, 5485.64, 14811.7, 10648.59, 3761.5, 7278.83, 9536.28, 13817.35, 13096.53, 17130.25, 22862.44, 24230.34, 8581.54, 7113.62, 20982.56, 6045.08, 9222.95, 4546.8, 2940.42, 3964.14, 7361.53, 5088.3, 5016.72, 15194.28, 21152.87, 5652.86, 7008.55], [3531.16, 8957.82, 3178.5, 5661.19, 3920.46, 3192.74, 7943.57, 4450.77, 9119.71, 4152.1, 7162.92, 8022.63, 2625.92, 6848.3, 3557.74, 14045.21, 6520.86, 6961.17, 3556.14, 8411.78, 11429.84, 2230.74, 3992.71, 7969.13, 4549.48, 3313.02, 4545.53, 8838.47, 4668.06, 3669.42, 15984.43, 6576.64, 10767.53, 3570.63, 4235.19, 13186.42, 10917.71, 2383.55, 7337.93, 4407.09, 11764.32, 9027.47, 6818.93, 9784.05, 5397.65, 8063.08, 8816.87, 3862.52, 3064.56, 9661.19, 4296.19, 4495.0, 5847.07, 6069.88, 4624.77, 3468.4, 4250.2, 7617.02, 6740.4, 2684.69, 8381.6, 8806.7, 7754.79, 7409.02, 13363.84, 9009.28, 19240.02, 5934.64, 8157.78, 11087.25, 4701.61, 8340.18, 3298.56, 2573.23, 3841.08, 9617.28, 4361.84, 4489.77, 9947.14, 12903.63, 4081.37, 12931.71], [2353.66, 22946.34, 1915.96, 7733.04, 2226.69, 2185.88, 6631.63, 6487.54, 5866.0, 8074.36, 7868.09, 10546.97, 1688.78, 3087.12, 1741.49, 5864.41, 3458.65, 7225.75, 6108.43, 3926.93, 3806.83, 1902.29, 3819.01, 6620.64, 9949.45, 11652.24, 4593.81, 3802.02, 7744.68, 3889.71, 9788.63, 3406.09, 6520.22, 6098.12, 3651.4, 5380.46, 4926.98, 2946.66, 12300.54, 3184.24, 12646.55, 7475.65, 5240.93, 4617.32, 3535.66, 5904.84, 7948.66, 1798.65, 3848.84, 4646.03, 4152.64, 2387.97, 6547.01, 10506.71, 8239.95, 9202.59, 5175.82, 7624.21, 12563.21, 2986.48, 7367.14, 5978.42, 6962.84, 4322.28, 11487.66, 3009.02, 31342.11, 8825.75, 4447.53, 9064.37, 4165.87, 4096.91, 3955.58, 6297.93, 2904.26, 8573.7, 5924.05, 1862.2, 20304.29, 11443.51, 4044.25, 7779.19], [17558.02, 17069.88, 2373.08, 6640.39, 1450.68, 5979.91, 3956.49, 14053.4, 11313.75, 8310.5, 17098.26, 11110.55, 3212.31, 1592.55, 1334.46, 5709.2, 4518.4, 11781.72, 4506.92, 5104.84, 2133.69, 1043.65, 3239.48, 18109.4, 14302.91, 12349.19, 4368.35, 2571.06, 8080.39, 3456.32, 6937.35, 4590.22, 3230.86, 6796.7, 14037.3, 3558.01, 2740.89, 2453.37, 6787.76, 7295.93, 15120.17, 3096.41, 12410.99, 4217.96, 2908.24, 7652.33, 3403.23, 1455.04, 15926.21, 6319.06, 3693.18, 2355.48, 11477.68, 7674.08, 7162.96, 28746.46, 19092.13, 12718.81, 13068.09, 2481.35, 25132.34, 12574.9, 11651.35, 2542.44, 3187.46, 3284.15, 22128.38, 4682.77, 4893.5, 12036.91, 9303.87, 4280.55, 8789.31, 14544.79, 1575.72, 19676.18, 9801.09, 2633.78, 23860.08, 6368.47, 5820.42, 5027.03], [16500.03, 14116.42, 2586.88, 5006.4, 4870.91, 24757.16, 3074.31, 18436.3, 8302.23, 5509.57, 12544.69, 8996.38, 11259.67, 2864.72, 2111.65, 9705.3, 8521.81, 6640.67, 8437.52, 4542.81, 2130.2, 7527.64, 5456.18, 20383.85, 19819.54, 7328.12, 6529.82, 7777.59, 7536.23, 6013.21, 8535.54, 4081.35, 2425.2, 5661.62, 5779.88, 8475.7, 5158.0, 2057.6, 14537.4, 13950.47, 18291.51, 13432.11, 14519.14, 10006.67, 5811.61, 14789.01, 6019.69, 1852.84, 5792.17, 4286.76, 3951.48, 11707.22, 19177.84, 4478.51, 8143.01, 16724.45, 14358.65, 3659.4, 31530.31, 10333.43, 29102.87, 11670.46, 16081.5, 5227.65, 7949.58, 10093.94, 8687.4, 10097.05, 5551.01, 11186.02, 11277.68, 7690.47, 8274.21, 17201.4, 2327.87, 7759.36, 9819.79, 3875.86, 10948.84, 8435.22, 14993.31, 18801.11], [13120.24, 10030.45, 2757.54, 3944.6, 9554.59, 9475.58, 4707.66, 20298.31, 7357.27, 6166.76, 8463.41, 8458.0, 7872.45, 2652.39, 6656.49, 16857.63, 6573.95, 7879.54, 4984.1, 4187.22, 2800.89, 8819.05, 4336.61, 20909.6, 24727.53, 6097.58, 19239.09, 4004.02, 5157.97, 3911.69, 6409.4, 4584.93, 2249.59, 6568.88, 14177.95, 9592.74, 6272.61, 3832.52, 9938.38, 16873.78, 16428.84, 11492.41, 18006.83, 7315.45, 4677.58, 8160.76, 8118.81, 6917.4, 7952.26, 10505.13, 3118.92, 23453.1, 14574.31, 5131.47, 4667.29, 13582.0, 5885.88, 8638.57, 14918.85, 7656.1, 21361.03, 19596.13, 19587.39, 12142.76, 14952.77, 12061.01, 14055.5, 20352.48, 11777.6, 14295.29, 7238.2, 7289.36, 12705.6, 9615.78, 3154.55, 12548.16, 6702.12, 12635.41, 11679.29, 22927.29, 10156.43, 16484.93], [5954.69, 7197.01, 4112.05, 5362.07, 13882.01, 15078.02, 6795.61, 6066.34, 4336.22, 8112.59, 16886.68, 7940.07, 12915.68, 1401.03, 4058.39, 6663.63, 8236.75, 17040.28, 5295.01, 8461.83, 1378.5, 3474.52, 3507.61, 6583.59, 10870.33, 10369.29, 35617.04, 9715.58, 6528.27, 13177.69, 3631.91, 3200.75, 5859.37, 10829.09, 20506.31, 9050.58, 9737.41, 1846.95, 4687.78, 6931.81, 12516.57, 22925.03, 20596.57, 5214.97, 11164.21, 17192.65, 16108.22, 6287.81, 8234.75, 18033.61, 2752.96, 10713.65, 28527.49, 6476.5, 6438.9, 10718.83, 2589.99, 10984.03, 14606.87, 7739.52, 17353.14, 6260.07, 7238.28, 6017.42, 10574.14, 18330.91, 25167.3, 4962.26, 6591.8, 8030.21, 12134.87, 3227.84, 4562.76, 7851.69, 5173.4, 6608.72, 9306.11, 18805.57, 9296.72, 31196.1, 8135.09, 18320.07], [15399.44, 13537.18, 5915.66, 5875.27, 17292.78, 12754.63, 8251.08, 5421.77, 10006.75, 4748.17, 9616.28, 6338.66, 10938.71, 2541.8, 4048.12, 5684.05, 16548.61, 5575.83, 7714.25, 6252.89, 1558.46, 10237.83, 13271.92, 21222.47, 10114.31, 11769.31, 34397.62, 16435.26, 10833.0, 13945.63, 20895.3, 9113.18, 7687.65, 12819.22, 20988.93, 17385.88, 17547.31, 3909.91, 6856.03, 7175.87, 6900.44, 28886.9, 23298.33, 8385.69, 17614.78, 10499.01, 6933.26, 9012.05, 8563.81, 17957.71, 6226.46, 6350.63, 12862.26, 7110.82, 8733.01, 7307.65, 8510.56, 29671.54, 20474.89, 2870.43, 12969.48, 12128.0, 4644.39, 7472.78, 13396.85, 8022.32, 26930.49, 5524.87, 26638.17, 9594.72, 15017.66, 2798.56, 8558.21, 27069.78, 5406.34, 8853.16, 8238.13, 30269.06, 7007.25, 25803.98, 4971.36, 17441.16], [7850.88, 13597.18, 11670.8, 6746.6, 36584.84, 13320.9, 12200.64, 4982.65, 5970.78, 7470.02, 9492.8, 7621.29, 8322.11, 2490.22, 4395.21, 10447.3, 13686.5, 7277.16, 6365.73, 19333.71, 15075.85, 16122.96, 17587.37, 25191.28, 6334.09, 6993.78, 13910.82, 12520.11, 7378.23, 11912.55, 21803.41, 11034.76, 21860.9, 7478.8, 18480.25, 16085.96, 7805.79, 3844.28, 7906.84, 9919.52, 7115.0, 18663.96, 14475.25, 4222.06, 20499.18, 7058.89, 11182.55, 4538.99, 25843.05, 11911.52, 7353.82, 19796.33, 12475.44, 4122.11, 8805.84, 13543.51, 6362.98, 12092.85, 24147.39, 8451.31, 7858.68, 22735.78, 11039.65, 15096.44, 18320.99, 36613.28, 25910.59, 13602.81, 24927.54, 26474.92, 19037.43, 5781.32, 14390.44, 29799.22, 5096.17, 10142.32, 22134.3, 25257.81, 6930.86, 11660.59, 7665.46, 13707.82], [4509.3, 5956.02, 4897.74, 2838.94, 9486.05, 7342.55, 12558.93, 4839.55, 14596.27, 4287.32, 12082.84, 3002.53, 5091.7, 4482.89, 2894.87, 6503.62, 5704.0, 8068.35, 3001.24, 7240.1, 11074.37, 9961.52, 14766.35, 6686.95, 3664.62, 3078.75, 10294.27, 11660.97, 3176.76, 11786.36, 18577.84, 5851.82, 19824.45, 4230.01, 11030.46, 11198.74, 5628.01, 8131.1, 12167.7, 3961.64, 2761.54, 11026.96, 9302.85, 6064.25, 9837.97, 11684.02, 10827.06, 8561.39, 19505.89, 11117.88, 7537.4, 33286.07, 12995.34, 4408.2, 10977.37, 6180.76, 4074.81, 5343.94, 17552.87, 7728.69, 8592.9, 6603.4, 12967.45, 5876.19, 11275.98, 34404.9, 7384.03, 5413.47, 8005.45, 7696.49, 17418.65, 7435.18, 12825.12, 12732.29, 7180.77, 14221.95, 19442.64, 14550.37, 5671.42, 7329.35, 4929.09, 6657.47], [5473.68, 8030.47, 2065.5, 5781.15, 10314.59, 4600.83, 5444.76, 3112.05, 20724.22, 6445.54, 9276.67, 2267.17, 2927.15, 3227.27, 3228.81, 8524.09, 7334.81, 2565.18, 2222.52, 5500.38, 6421.41, 4260.7, 6250.58, 4378.49, 3782.47, 2427.42, 6219.46, 11083.22, 2485.47, 5262.05, 8570.68, 3107.63, 9662.31, 3963.08, 15349.18, 11686.32, 7544.01, 5189.81, 5039.03, 10874.26, 2428.01, 10263.97, 5276.74, 5041.23, 18263.71, 11363.74, 7674.83, 5696.37, 13927.11, 16004.66, 10668.29, 16321.75, 8851.65, 1441.7, 10043.25, 3790.37, 2866.44, 3303.62, 7453.19, 9286.47, 5306.86, 11069.76, 7351.33, 4276.83, 8898.83, 13236.04, 5792.31, 4056.85, 6369.16, 8541.41, 16834.87, 17174.26, 5906.74, 4262.22, 7427.78, 7653.82, 5007.68, 12105.22, 16680.07, 3937.77, 4665.31, 6575.95]],
[[14125.25, 11137.33, 14894.38, 17872.77, 12961.77, 8523.07, 13395.37, 17993.41, 15030.43, 9527.25, 14352.85, 13932.39, 18874.34, 18788.32, 18242.14, 20747.54, 11025.3, 18356.93, 22517.23, 13704.31, 12840.93, 9696.21, 11746.48, 13976.56, 7957.88, 15767.78, 18055.11, 11829.15, 10992.18, 10542.52, 16792.82, 10409.15, 16252.85, 12511.3, 15316.82, 12412.16, 14674.8, 16367.04, 10164.23, 19264.71, 5270.05, 15730.58, 13662.67, 13716.82, 12656.45, 6181.17, 15114.16, 16890.29, 18895.19, 18615.94, 17437.31, 21132.97, 19163.62, 18561.38, 18700.06, 9724.1, 16159.27, 11197.38, 28071.03, 10441.34, 15200.44, 18079.55, 15088.78, 8983.27, 13805.93, 17758.16, 10343.59, 9341.51, 13158.72, 10240.83, 16854.34, 12301.54, 9778.86, 11834.89, 12967.66, 16870.53, 5820.28, 15967.6, 10283.08, 13164.8, 17121.77, 5010.98], [13168.57, 13524.3, 16900.66, 14160.46, 16846.85, 8119.92, 16599.45, 13539.99, 17196.18, 14465.81, 14219.37, 14144.77, 26692.87, 15242.93, 20706.68, 30724.15, 14769.21, 13844.6, 27899.16, 13483.77, 12112.76, 13423.51, 5439.56, 8209.26, 12214.02, 7715.27, 21173.92, 15375.77, 11386.96, 17527.71, 22630.29, 15919.25, 19438.28, 24177.46, 14803.55, 17865.17, 13618.77, 14646.72, 12920.5, 19019.59, 4499.9, 8458.2, 11239.43, 10813.58, 12213.21, 6033.53, 18114.37, 16523.37, 29515.57, 30803.18, 15454.45, 23344.96, 9709.56, 24975.85, 22754.39, 7973.19, 10963.66, 7855.45, 12452.22, 15734.95, 29990.87, 13811.95, 14585.28, 8891.07, 7080.73, 14930.59, 9773.42, 5689.05, 15046.76, 5360.91, 14634.41, 13178.36, 14874.53, 14907.54, 6989.04, 21485.49, 11437.34, 14559.93, 5666.86, 9317.22, 15436.98, 10506.04], [18892.59, 9711.88, 10348.39, 8393.24, 17225.48, 12079.7, 13607.31, 11837.59, 15825.48, 20019.52, 14671.43, 15271.09, 25196.88, 14970.56, 23718.32, 13966.86, 18108.0, 16639.52, 37416.18, 10772.55, 13977.74, 17576.12, 7863.3, 9172.11, 6848.06, 13128.2, 23075.09, 11154.34, 10193.16, 27209.32, 19210.99, 13250.44, 11265.62, 17504.06, 15911.49, 19442.08, 12862.74, 20515.12, 13701.14, 15047.35, 5320.4, 9983.59, 10975.01, 12720.26, 8454.26, 6380.8, 7080.38, 20532.21, 46244.73, 32685.37, 14648.6, 22226.8, 8414.51, 22083.72, 15554.44, 8219.33, 13621.68, 9536.48, 9685.37, 14345.67, 39291.98, 12530.75, 14956.85, 9307.98, 6496.76, 13838.83, 9553.73, 11718.1, 13521.75, 5358.91, 10688.91, 8016.64, 20160.58, 14563.16, 10601.5, 20483.03, 13472.67, 10546.41, 7878.51, 11530.87, 6448.73, 5343.64], [20907.16, 6163.4, 8674.78, 5256.22, 13641.79, 8956.44, 9788.14, 7454.47, 6540.44, 14581.88, 14622.26, 10896.27, 16194.83, 11700.7, 22854.0, 16285.17, 23037.78, 12656.67, 23015.1, 11710.85, 14452.49, 20656.51, 11262.13, 7819.65, 7841.24, 8735.76, 24664.21, 9586.62, 8777.09, 21549.12, 11103.77, 10155.95, 5226.49, 8959.51, 18009.44, 14468.29, 13044.23, 16157.84, 8099.24, 7905.27, 5177.73, 8876.0, 13725.6, 18223.25, 7474.21, 5469.9, 6813.44, 12569.93, 23461.89, 13974.78, 18947.49, 20862.27, 12461.96, 22033.14, 7629.89, 9152.12, 10385.72, 6031.19, 6656.15, 15657.14, 13271.14, 7947.66, 15333.26, 8453.53, 5737.34, 13664.62, 4845.96, 7664.07, 11027.82, 4028.13, 6945.32, 7894.69, 15946.22, 12383.54, 13306.08, 7766.22, 14219.46, 13048.96, 8453.34, 15223.69, 6345.52, 8114.03], [14299.76, 4278.01, 5985.75, 5434.2, 12534.47, 6601.81, 7269.49, 5172.17, 4810.39, 6266.04, 9244.21, 7077.85, 8975.18, 7807.51, 26010.03, 11180.11, 14050.63, 6802.52, 10792.26, 8237.76, 8121.21, 10677.56, 6190.88, 4441.4, 4732.86, 5163.96, 18876.18, 7546.74, 4211.97, 7995.22, 7008.97, 6066.53, 4381.04, 5326.07, 9081.09, 7997.79, 8219.65, 7399.67, 5165.49, 6042.38, 3787.57, 5911.37, 6672.67, 10307.27, 6388.42, 3178.9, 6113.13, 8533.1, 11405.85, 10935.35, 8929.89, 12374.54, 6341.99, 9928.68, 5659.95, 5601.08, 6479.9, 4211.82, 4260.96, 7792.5, 8670.26, 5715.38, 6898.31, 5580.48, 4017.54, 7950.56, 3612.55, 3947.06, 5505.24, 2642.28, 3356.73, 4144.77, 8502.69, 6689.04, 7024.81, 4624.0, 5904.69, 7836.14, 3867.45, 5898.12, 3729.67, 4436.83], [7186.5, 3856.12, 4070.03, 3639.9, 6137.82, 4022.12, 4463.72, 4258.5, 3853.22, 4448.53, 5093.69, 4674.39, 6332.34, 5232.47, 13126.14, 7437.61, 7134.21, 5572.06, 7921.04, 5250.03, 5595.19, 5959.66, 3983.24, 3732.44, 3204.75, 4933.04, 8862.73, 5156.53, 3418.01, 5568.97, 5310.42, 4459.51, 3350.92, 3674.86, 5572.53, 5758.04, 4613.34, 5037.79, 3904.56, 4404.93, 2770.17, 3858.8, 4450.36, 5441.74, 3811.42, 3074.65, 4131.36, 6844.59, 8314.34, 7147.15, 6250.82, 8070.47, 4395.27, 6173.65, 4238.15, 3733.33, 3915.13, 3289.74, 3400.48, 4840.57, 5729.91, 4335.35, 4949.45, 4255.12, 3149.33, 5297.94, 2865.3, 2798.98, 3711.39, 2554.99, 2720.15, 3007.94, 4503.35, 4403.79, 3913.8, 3857.52, 3386.59, 4594.35, 3093.06, 3691.22, 3623.01, 3354.04], [5559.36, 3505.1, 3778.06, 3056.73, 4750.92, 3332.63, 3735.09, 3478.98, 3315.28, 3554.91, 4257.73, 3917.13, 5321.92, 4406.87, 7897.38, 6014.22, 5756.65, 4899.18, 6493.34, 4534.69, 4467.55, 4829.82, 3358.52, 2962.96, 2749.41, 4134.87, 6314.96, 4327.64, 3087.86, 4335.62, 4297.52, 3774.59, 2942.73, 3142.18, 4499.19, 4625.8, 3895.94, 4355.52, 3431.25, 3916.6, 2639.92, 3379.99, 3784.12, 4231.58, 3827.21, 2467.09, 3540.98, 4688.85, 6774.18, 6604.68, 5090.57, 6338.98, 3884.21, 4920.06, 3921.22, 3422.94, 3330.7, 3236.79, 3493.62, 3858.48, 4636.19, 3789.54, 4216.57, 3319.8, 2640.65, 4073.01, 2496.6, 2387.7, 2932.12, 2069.87, 2581.31, 2644.28, 3900.74, 3544.59, 3241.01, 3017.42, 2789.99, 3703.97, 2482.62, 3156.08, 2744.39, 2553.22], [4824.29, 2977.42, 3280.58, 2769.4, 3756.76, 2855.07, 3133.63, 3053.68, 3117.6, 3187.34, 3852.29, 3415.31, 4525.84, 3858.99, 6367.19, 5211.33, 4854.33, 4144.19, 5479.03, 3946.36, 3811.89, 3973.15, 2934.7, 2645.4, 2398.16, 3224.14, 5066.13, 4326.79, 2744.92, 3656.36, 3607.0, 3116.68, 2681.35, 2895.38, 3852.57, 4078.28, 3373.9, 3947.14, 3024.92, 3394.71, 2283.0, 3175.88, 3088.95, 3722.67, 2987.75, 2292.13, 2881.11, 4492.51, 5893.95, 5631.81, 4547.79, 5363.23, 3482.66, 4415.55, 3863.15, 2758.94, 3331.28, 3016.34, 3213.13, 3521.15, 4242.84, 3626.55, 3444.77, 2933.05, 2091.91, 3657.59, 2296.35, 2086.7, 3003.8, 1918.41, 2026.03, 2261.85, 3027.27, 2944.82, 3030.66, 2968.11, 2513.33, 2925.71, 2098.29, 2433.83, 2258.57, 2264.21], [4242.98, 2715.01, 2746.51, 3028.44, 3164.24, 2579.08, 2719.55, 2861.11, 2897.99, 2635.62, 3076.89, 3062.19, 3900.65, 3332.13, 5179.72, 4449.05, 4668.06, 3620.54, 4696.48, 3255.88, 3297.43, 3529.72, 2596.76, 2183.56, 2061.05, 2738.3, 4197.26, 3364.12, 2490.01, 3075.44, 3030.66, 2501.17, 2374.8, 2299.04, 3205.28, 3516.09, 2903.91, 3723.44, 2567.93, 3147.29, 2276.15, 2645.12, 2533.68, 3282.41, 2474.97, 2646.84, 2718.44, 3628.7, 5856.33, 5010.0, 4209.51, 4908.46, 4183.75, 4228.58, 3659.84, 2699.23, 3200.37, 2824.73, 3236.01, 3293.14, 4257.96, 3238.62, 3417.46, 2341.2, 2041.27, 3213.83, 2048.56, 2336.91, 2783.2, 2028.78, 1582.95, 2063.77, 2710.17, 2880.92, 2912.3, 2218.75, 2004.71, 3005.14, 2024.28, 2183.95, 1965.18, 1847.74], [4030.32, 2784.09, 3251.36, 3035.99, 3173.36, 2905.28, 2683.63, 2701.98, 3750.94, 2809.41, 3261.16, 3952.17, 4022.67, 2953.09, 5656.38, 4591.93, 4383.56, 3778.97, 4362.42, 3726.37, 3021.81, 3473.24, 3192.62, 1961.51, 1936.32, 2489.8, 4563.05, 4432.06, 2785.5, 2748.06, 2631.61, 3281.46, 2122.57, 2609.32, 4011.19, 3541.54, 2700.06, 3758.43, 2657.26, 4667.84, 3239.04, 3881.6, 3745.82, 3265.1, 2878.74, 4519.87, 3617.98, 4461.3, 6030.3, 5325.62, 5391.92, 5028.0, 4451.2, 5486.39, 3235.86, 3095.87, 3287.97, 3095.21, 3547.57, 4173.54, 5671.84, 3811.89, 2701.62, 2780.45, 2457.62, 3326.58, 2095.74, 1881.73, 2465.35, 2299.61, 2072.96, 1527.4, 2705.88, 2729.08, 3938.35, 1796.5, 2192.38, 4096.74, 2473.2, 2748.47, 1674.08, 2016.68], [5863.03, 5519.61, 5568.33, 3004.45, 3744.12, 4501.9, 6520.07, 4012.34, 3212.78, 5819.06, 4918.94, 6668.63, 6525.95, 5059.36, 9584.5, 6339.71, 9586.15, 4339.8, 8738.34, 6127.19, 2849.24, 4827.53, 4513.54, 2941.21, 5983.21, 4325.68, 4208.66, 5842.71, 4833.37, 3245.77, 3174.55, 5614.32, 2729.49, 7933.09, 7768.94, 5395.73, 4616.41, 5854.28, 6742.1, 9338.47, 7507.03, 5717.24, 9424.97, 5692.63, 6857.94, 6495.77, 4186.79, 6169.46, 9560.0, 6674.42, 12710.18, 5059.0, 5497.27, 6924.5, 3756.12, 4531.74, 5144.7, 5364.5, 4462.48, 6205.07, 12715.29, 4035.73, 3053.11, 5087.73, 5121.75, 3881.08, 5885.8, 4865.02, 5288.73, 3598.98, 2833.41, 2496.36, 3194.49, 4077.99, 8496.46, 1903.73, 2702.64, 7847.06, 5202.23, 3931.52, 4413.1, 2618.38], [6256.49, 10225.14, 9968.78, 5006.69, 6990.38, 7883.92, 13725.98, 8782.96, 5102.14, 12219.55, 7133.7, 16553.03, 16255.56, 12042.24, 17087.28, 11613.97, 13900.61, 15970.6, 9697.68, 12177.77, 4479.88, 11082.14, 9563.26, 10451.75, 10081.6, 11649.13, 12343.92, 5038.37, 7875.39, 11855.63, 4538.32, 8825.42, 4174.36, 12581.63, 12027.99, 7563.53, 12335.86, 13610.76, 15583.87, 8853.22, 18137.54, 12049.79, 13162.29, 7521.34, 8373.87, 13694.39, 8420.08, 11193.72, 9908.45, 13797.4, 18116.56, 5517.01, 11575.38, 11449.64, 5718.81, 10749.18, 8169.43, 20432.63, 5188.9, 9603.91, 16234.51, 5838.32, 9120.64, 9733.66, 10198.24, 9220.61, 9942.07, 10074.89, 11429.73, 6486.19, 5528.8, 4818.89, 5399.51, 13018.05, 12071.65, 4718.52, 7218.15, 7808.31, 9239.02, 10549.87, 6730.56, 8943.03]],
[[11445.26, 9396.12, 9866.75, 12189.83, 12910.68, 9918.07, 8140.01, 12852.12, 9416.01, 10276.83, 11775.03, 11838.64, 24318.78, 13257.7, 18076.42, 15821.04, 7474.73, 11812.83, 15706.01, 5481.26, 7504.73, 6268.96, 7504.95, 9270.37, 6073.61, 8750.38, 21274.59, 9495.46, 12791.41, 7181.52, 10787.91, 5100.68, 10318.63, 10737.62, 7936.66, 8803.48, 7144.38, 7801.97, 9177.17, 10608.92, 3893.13, 6948.11, 8977.61, 9215.05, 8095.57, 5890.96, 9540.81, 16252.52, 13336.07, 11626.36, 15401.03, 15401.59, 8722.29, 9687.94, 20397.1, 6309.53, 9667.89, 10050.76, 23449.63, 8423.32, 7411.93, 10818.92, 11197.63, 9909.79, 10804.87, 10288.03, 7017.45, 8345.5, 15034.49, 9693.82, 19293.15, 7206.01, 8462.7, 6763.02, 10802.53, 6499.8, 5148.48, 8276.41, 12261.51, 10538.96, 14404.6, 7258.67], [14719.19, 13849.17, 13631.43, 12177.59, 18010.69, 13302.4, 11285.29, 12809.84, 13682.94, 18874.24, 12523.57, 16877.66, 29183.48, 14291.3, 22301.65, 14904.42, 12522.31, 11791.37, 18496.37, 6979.2, 9335.08, 5630.56, 5634.78, 10671.55, 9980.95, 7606.14, 20418.43, 14951.7, 18109.33, 14918.77, 16033.19, 9361.22, 12551.23, 19276.9, 10616.44, 15397.32, 9475.47, 10421.28, 12375.16, 14830.72, 5729.91, 9694.81, 8591.43, 14495.29, 14107.25, 9421.06, 19359.53, 16790.61, 26986.54, 25885.9, 16467.1, 22487.03, 9224.35, 20387.94, 18234.79, 7081.11, 11111.52, 11171.72, 15392.37, 11806.74, 22711.09, 12025.7, 14780.7, 14848.03, 9908.41, 13792.5, 9267.26, 8170.04, 16846.34, 10356.98, 18361.69, 11226.45, 23225.67, 13257.7, 10945.27, 15574.3, 10877.03, 11769.67, 12344.86, 14857.49, 17224.47, 13109.1], [23409.86, 14320.59, 13800.87, 14352.06, 18872.78, 18684.74, 12043.11, 14022.72, 11381.14, 25165.1, 14072.94, 22404.0, 24247.65, 15506.72, 20840.05, 13711.15, 17839.09, 14672.99, 17190.76, 10625.12, 8848.47, 12036.61, 7660.54, 11533.4, 9406.32, 12810.21, 27106.69, 15315.78, 17588.25, 19747.19, 16899.92, 11651.63, 12923.34, 17983.13, 14205.58, 16566.74, 13403.69, 22020.86, 14903.32, 19264.25, 7919.34, 12988.93, 13260.07, 20701.67, 14087.36, 12284.1, 16537.32, 26009.72, 24878.24, 33744.51, 14323.11, 20752.06, 10967.29, 19469.53, 19750.87, 12976.51, 16995.69, 14878.09, 18871.69, 14217.14, 16007.4, 13807.3, 20760.56, 15339.25, 12648.75, 19883.1, 12384.08, 13129.38, 21388.35, 13455.08, 15207.25, 14793.07, 24848.95, 19422.42, 13908.63, 17542.63, 15782.29, 14348.74, 12614.13, 20733.48, 16491.96, 13076.6], [23382.44, 10803.52, 14378.16, 13302.03, 23222.27, 16695.74, 13679.45, 13661.28, 11570.96, 20886.36, 15476.03, 16023.03, 22236.36, 14684.2, 21771.01, 13644.9, 22076.88, 15190.72, 14203.75, 12299.45, 11778.42, 12531.76, 9742.09, 12032.59, 10269.89, 11948.84, 32208.85, 17459.51, 16242.32, 17948.04, 13136.18, 11190.44, 9589.65, 15077.5, 18706.86, 14964.66, 15336.68, 17290.63, 12763.49, 14413.57, 11501.47, 12579.98, 16496.02, 23865.77, 17203.76, 12391.8, 15316.56, 20308.84, 19682.45, 23440.95, 18467.55, 19582.51, 16025.97, 22575.64, 17154.81, 13862.35, 17273.57, 17677.34, 15127.73, 18684.02, 15585.12, 12263.42, 18852.01, 18143.6, 14127.53, 25099.68, 8662.66, 10795.11, 18684.5, 14296.08, 15049.98, 16718.21, 22490.42, 16585.21, 21470.46, 14135.71, 18498.96, 17130.17, 16346.0, 20241.75, 11604.09, 15534.4], [10800.89, 6621.21, 9859.45, 10134.26, 15783.48, 11661.46, 9992.31, 8961.21, 7986.12, 11334.04, 9131.23, 10651.59, 14228.5, 8808.92, 16857.09, 8870.4, 11910.68, 8514.6, 9619.73, 9681.85, 8443.5, 9467.84, 6687.98, 8468.58, 7309.2, 8900.26, 17395.4, 12816.24, 10267.79, 10570.91, 8501.5, 7170.24, 7787.29, 10002.72, 13115.21, 9260.14, 10312.36, 11077.2, 7877.86, 9884.11, 7971.67, 6841.12, 11086.58, 20710.83, 13844.01, 8233.47, 15152.03, 15167.49, 12115.32, 11980.41, 10605.6, 12706.9, 11463.02, 16391.48, 11352.5, 7530.3, 12389.59, 13869.15, 8958.14, 14252.5, 9379.34, 7939.86, 10767.56, 16137.54, 10364.88, 17667.15, 5264.44, 8816.28, 13189.92, 7687.12, 8032.19, 9773.84, 12707.06, 11950.38, 17618.47, 7989.54, 13400.39, 20147.11, 7757.33, 14585.71, 6459.52, 11058.98], [6555.49, 4734.66, 5247.05, 5947.45, 8962.09, 6010.18, 5440.07, 5081.99, 4501.39, 6801.83, 5226.83, 6365.3, 7598.31, 5250.53, 10536.52, 6555.77, 6246.19, 4899.69, 5633.05, 4891.45, 5350.89, 4886.0, 3277.94, 4925.69, 4402.2, 5504.85, 9848.68, 7103.36, 5711.0, 6070.02, 5043.51, 3942.8, 4319.38, 6253.7, 6492.65, 5492.23, 5300.07, 5895.92, 4993.92, 4607.68, 4412.15, 3593.96, 5439.76, 10133.62, 6659.7, 4774.84, 7164.48, 7671.45, 5967.57, 6109.85, 5434.13, 6613.64, 4878.95, 7327.03, 5807.04, 3670.12, 5707.73, 6380.39, 4824.95, 6986.96, 4372.81, 3783.99, 5624.07, 8726.5, 5313.22, 7426.47, 3018.46, 4258.51, 5419.19, 4594.29, 4266.15, 4677.71, 5240.86, 5011.05, 7106.89, 3965.1, 5733.27, 9562.59, 3914.1, 5838.83, 3788.82, 4911.18], [4487.15, 3104.44, 3383.62, 3729.34, 5208.33, 3881.61, 3463.18, 3290.67, 3369.41, 4651.93, 3479.82, 4221.03, 5235.62, 3515.58, 5546.02, 3862.32, 3930.2, 3368.78, 3177.84, 3626.84, 2925.88, 2795.03, 2124.77, 3061.72, 2569.27, 3383.25, 5544.76, 3833.81, 3729.67, 3712.59, 2907.87, 2438.05, 3027.46, 3437.19, 3690.67, 3647.12, 3195.82, 3452.42, 3003.94, 2881.15, 2726.49, 2462.67, 3294.29, 5132.12, 4024.76, 3108.09, 3943.35, 4264.99, 3750.04, 4023.09, 3578.9, 3816.03, 2941.25, 3871.37, 3569.3, 2414.46, 3439.66, 3907.25, 3275.13, 3601.49, 2690.43, 2539.41, 3566.59, 4102.3, 3538.63, 4258.75, 2307.56, 2686.07, 3695.43, 2997.3, 2456.86, 2741.39, 3206.27, 2934.6, 3571.18, 2511.94, 3151.33, 4193.71, 2552.29, 3496.43, 2571.52, 2992.5], [3223.12, 2097.61, 2428.81, 2481.26, 3864.63, 2750.74, 2248.83, 2383.53, 2544.88, 3457.4, 2498.48, 2892.26, 3933.36, 2533.84, 3913.94, 2706.4, 3189.74, 2270.44, 2219.1, 2274.31, 2016.42, 1844.62, 1675.86, 1847.77, 1878.96, 2543.73, 3986.44, 2627.67, 2458.92, 2409.33, 2173.83, 1851.57, 2227.47, 2291.89, 2421.13, 2416.32, 2169.87, 2450.68, 2240.0, 2044.5, 2008.9, 1893.48, 2451.84, 3353.12, 2812.2, 2131.62, 2585.51, 3128.53, 2858.7, 2996.48, 2672.92, 2786.17, 2192.46, 2689.98, 2454.78, 1892.1, 2382.88, 2713.41, 2321.69, 2742.05, 2096.39, 1974.47, 2493.21, 2698.44, 2535.42, 2866.68, 1455.42, 1933.88, 2626.68, 2105.46, 1687.18, 1945.97, 2323.11, 1852.09, 2256.41, 1800.01, 2057.27, 2410.69, 1954.7, 2247.75, 1624.93, 2038.15], [2494.37, 1673.39, 1773.43, 1469.26, 2816.89, 2031.24, 1294.99, 1508.18, 1766.44, 2642.22, 1752.41, 2234.46, 2603.72, 1883.99, 2977.59, 1844.4, 2671.99, 1759.64, 1529.58, 1757.09, 1453.12, 1458.55, 1322.78, 1282.01, 1454.08, 1953.53, 3151.81, 1931.59, 1818.74, 1790.47, 1707.78, 1558.49, 1652.87, 1641.09, 1976.34, 1813.03, 1775.23, 1898.44, 1666.93, 1725.64, 1687.07, 1587.76, 1845.2, 2652.05, 2177.2, 1739.65, 2001.38, 2381.83, 2737.01, 2514.38, 2222.0, 2380.01, 2087.48, 2040.06, 2100.17, 1569.05, 1945.04, 2235.13, 2189.02, 2126.45, 1966.64, 1788.03, 1986.06, 1934.88, 1813.6, 2244.8, 1379.33, 1617.21, 2151.83, 1638.84, 1403.71, 1467.3, 1752.64, 1403.8, 1743.75, 1372.98, 1525.33, 1901.29, 1450.41, 1584.78, 1358.73, 1587.3], [2569.03, 2055.48, 1617.7, 1797.54, 2790.11, 2043.46, 1744.23, 1645.6, 2671.58, 2805.1, 2394.35, 3813.06, 3328.2, 1522.49, 4070.01, 1502.57, 2541.82, 1827.88, 1446.88, 1840.4, 1308.22, 1297.46, 1847.49, 1032.16, 1335.97, 1750.43, 3148.26, 2085.0, 1707.8, 1664.12, 1370.17, 1553.64, 1347.18, 2328.25, 2167.17, 1927.51, 1663.67, 1833.17, 1597.64, 2030.33, 1809.08, 1648.65, 2451.68, 2662.66, 2054.17, 2434.34, 2498.06, 2411.0, 2779.41, 2546.27, 2238.57, 2597.04, 1968.24, 2664.05, 2885.94, 1706.86, 1995.04, 2282.39, 2214.75, 2052.59, 2075.18, 2014.74, 1956.24, 1890.09, 2117.0, 2157.46, 1412.17, 1740.39, 2129.67, 1939.98, 1513.77, 1394.59, 1758.18, 1423.39, 2058.37, 1328.2, 1447.78, 2262.28, 1565.23, 2068.41, 1367.27, 1631.66], [4641.31, 3710.95, 4108.02, 2322.72, 4112.86, 3153.28, 4222.0, 2542.08, 4080.89, 4914.35, 5165.37, 5489.32, 5723.93, 3490.48, 7381.2, 2472.47, 5539.85, 2249.05, 2883.83, 2664.6, 1379.94, 1649.83, 2664.51, 1628.63, 2582.64, 5612.11, 2663.5, 2699.65, 2574.08, 2505.57, 1646.98, 2305.66, 1756.6, 3672.71, 3286.51, 2637.35, 2619.93, 2800.22, 2382.41, 3808.99, 3676.19, 2221.59, 4269.7, 3598.29, 3048.27, 3589.7, 3096.36, 2748.5, 3936.11, 3511.24, 4884.22, 2809.73, 2626.34, 4343.58, 3367.0, 2404.99, 3245.45, 4372.99, 2617.4, 2689.32, 3563.42, 2529.25, 2224.52, 3126.24, 3312.67, 2504.25, 2625.43, 3150.3, 3508.86, 3096.43, 2205.32, 2489.92, 2449.98, 1943.37, 3975.66, 1543.29, 2017.76, 4233.35, 2737.69, 4005.33, 2550.47, 2751.41], [5928.45, 6305.46, 7741.12, 5572.98, 7840.01, 5346.7, 9205.84, 5416.25, 5406.82, 8514.67, 8016.43, 12143.41, 11659.05, 7332.98, 11905.46, 4191.0, 7976.14, 11071.46, 4555.71, 4642.37, 3020.24, 4752.09, 4916.23, 5745.9, 4904.85, 12820.8, 5408.88, 3604.63, 3839.0, 5809.85, 2569.89, 4389.23, 2294.61, 6809.31, 6372.19, 4289.17, 4488.89, 10455.43, 5260.2, 3870.7, 6460.74, 4295.49, 7860.2, 5420.29, 4285.41, 6845.68, 5365.47, 5121.61, 4586.69, 8258.6, 7749.0, 3059.53, 3430.87, 8868.47, 4088.58, 6015.28, 7597.69, 13886.42, 3714.79, 4607.97, 8787.99, 4320.83, 5278.15, 6865.86, 4430.27, 4400.78, 5310.74, 7716.41, 8462.67, 6883.02, 3169.26, 3573.36, 3969.63, 6448.48, 4885.67, 3215.82, 6065.36, 6634.84, 5209.91, 7425.64, 5240.02, 5944.41]]
]
inflow_initial = [39717.564, 6632.5141, 15897.183, 2525.938]
env = Gurobi.Env()
model = SDDP.LinearPolicyGraph(
stages = 120,
lower_bound = 0.0,
optimizer = () -> Gurobi.Optimizer(env),
) do sp, t
set_optimizer_attribute(sp, "OutputFlag", 0)
month = t % 12 == 0 ? 12 : t % 12 # Year to month conversion.
@variable(sp,
0 <= storedEnergy[i = 1:4] <= storedEnergy_ub[i],
SDDP.State, initial_value = storedEnergy_initial[i])
@variables(sp, begin
0 <= spillEnergy[i = 1:4]
0 <= hydroGeneration[i = 1:4] <= hydro_ub[i]
thermal_lb[i][j] <= thermal[i = 1:4, j = 1:N_THERMAL[i]] <= thermal_ub[i][j]
0 <= exchange[i = 1:5, j = 1:5] <= exchange_ub[i][j]
0 <= deficit[i = 1:4, j = 1:4] <= demand[month][i] * deficit_ub[j]
inflow[i = 1:4] == inflow_initial[i]
end)
@stageobjective(sp,
sum(deficit_obj[i] * sum(deficit[i, :]) for i in 1:4) +
sum(thermal_obj[i][j] * thermal[i, j] for i in 1:4 for j in 1:N_THERMAL[i]))
@constraints(sp, begin
[i = 1:4], sum(deficit[i, :]) + hydroGeneration[i] +
sum(thermal[i, j] for j in 1:N_THERMAL[i]) +
sum(exchange[:, i]) - sum(exchange[i, :]) == demand[month][i]
[i = 1:4], storedEnergy[i].out + spillEnergy[i] +
hydroGeneration[i] - storedEnergy[i].in == inflow[i]
sum(exchange[:, 5]) == sum(exchange[5, :])
end)
if t != 1 # t=1 is handled in the @variable constructor.
r = (t - 1) % 12 == 0 ? 12 : (t - 1) % 12
SDDP.parameterize(sp, 1:length(scenarios[1][r])) do ω
for i in 1:4
JuMP.fix(inflow[i], scenarios[i][r][ω])
end
end
end
end
SDDP.train(model, time_limit = 200, print_level = 2, cut_deletion_minimum = 50)
end
msppy_hydro_thermal()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3109 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors. #src
# This Source Code Form is subject to the terms of the Mozilla Public #src
# License, v. 2.0. If a copy of the MPL was not distributed with this #src
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #src
# # Complicated Hydro-thermal
using SDDP, JSON, GLPK, Test
const DATA = JSON.parsefile(joinpath(@__DIR__, "complicated_hydro.json"))
const T = 12
const PRICES = [18 + round(5 * sin(0.5 * (t - 2 - 1)), digits = 2) for t = 1:T]
const FLOW_KNOTS = [50.0, 60.0, 70.0]
const POWER_KNOTS = [55.0, 65.0, 70.0]
model = SDDP.LinearPolicyGraph(
stages = T,
sense = :Min;
lower_bound = 0,
optimizer = GLPK.Optimizer,
) do subproblem, t
@variable(subproblem, 0 <= volume[1:3] <= 200, SDDP.State, initial_value = 50)
@variable(
subproblem,
inflow[i = 1:3],
SDDP.State,
initial_value = DATA["initial_inflow"][1][i]
)
@variables(subproblem, begin
thermal_generation >= 0
thermal_cost >= 0
hydro_flow[1:3] >= 0
hydro_spill[1:3] >= 0
pour[1:3] >= 0
hydro_generation >= 0
0 <= dispatch[1:3, 1:3] <= 1
ω[1:3]
end)
@constraints(
subproblem,
begin
thermal_cost >= 10 * thermal_generation + 0
thermal_cost >= 20 * thermal_generation - 500
thermal_cost >= 50 * thermal_generation - 3_500
volume[1].out ==
volume[1].in + inflow[1].out - hydro_flow[1] - hydro_spill[1] + pour[1]
[i = 2:3],
volume[i].out ==
volume[i].in + inflow[i].out - hydro_flow[i] - hydro_spill[i] +
pour[i] +
hydro_flow[i-1] +
hydro_spill[i-1]
hydro_generation ==
sum(sum(POWER_KNOTS[j] * dispatch[i, j] for j = 1:3) for i = 1:3)
[i = 1:3], hydro_flow[i] == sum(FLOW_KNOTS[j] * dispatch[i, j] for j = 1:3)
[i = 1:3], sum(dispatch[i, j] for j = 1:3) <= 1
hydro_generation + thermal_generation >= 600
end
)
@stageobjective(
subproblem,
thermal_cost - PRICES[t] * (hydro_generation + thermal_generation) +
10_000 * sum(pour)
)
if t == 1
@constraint(subproblem, [i = 1:3], volume[i].out >= 30)
for i = 1:3
JuMP.fix(inflow[i].out, DATA["initial_inflow"][2][i])
end
else
for i = 1:3
R = DATA["ar_matrix"]["$(t-1)"]["$(i-1)"]
@constraint(
subproblem,
inflow[i].out ==
sum(get(R, "$(j-1)", 0.0) * inflow[j].in for j = 1:3) + ω[i]
)
end
SDDP.parameterize(subproblem, [1, 2]) do ϕ
for i = 1:3
JuMP.fix(ω[i], DATA["RHS_noise"][i][ϕ][t])
end
end
end
if t == T
@constraint(subproblem, [i = 1:3], volume[i].out >= 30)
end
end
SDDP.train(model; iteration_limit = 50, cut_deletion_minimum = 1_000_000)
@test SDDP.calculate_bound(model) ≈ 129_469 atol = 1
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 7030 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
using SDDP, GLPK, Test, Gurobi, Plots, StatsPlots
function river_chain_example(;
ar1::Bool = true,
N::Int = 2,
lipschitz = 1e6,
lower_bound = -50_000,
)
env = Gurobi.Env()
model = SDDP.LinearPolicyGraph(
stages = 12,
optimizer = () -> Gurobi.Optimizer(env),
lower_bound = lower_bound,
) do sp, t
set_optimizer_attribute(sp, "OutputFlag", 0)
flow_knots = [50.0, 60.0, 70.0]
power_knots = [55.0, 65.0, 70.0]
b = [
61.261,
56.716,
59.159,
66.080,
72.131,
76.708,
76.665,
76.071,
76.832,
69.970,
69.132,
67.176,
]
Ω = [-4.5, -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5]
@variable(sp, 0 <= volume[1:N] <= 200, SDDP.State, initial_value = 100)
@variables(sp, begin
0 <= flow[1:N] <= 70.0
0 <= spill[1:N]
0 <= generation
0 <= dispatch[1:N, 1:3] <= 1
end)
@constraints(
sp,
begin
volume[1].out == volume[1].in - flow[1] - spill[1]
[i = 2:N],
volume[i].out == volume[i].in - flow[i] - spill[i] + flow[i-1] + spill[i-1]
generation == sum(power_knots[j] * dispatch[i, j] for i = 1:N, j = 1:3)
[i = 1:N], flow[i] == sum(flow_knots[j] * dispatch[i, j] for j = 1:3)
[i = 1:N], sum(dispatch[i, j] for j = 1:3) <= 1
[i = 1:N], flow[i] <= volume[i].in
end
)
if ar1
SDDP.add_objective_state(
sp,
initial_value = 61.261,
lower_bound = 40.0,
upper_bound = 100.0,
lipschitz = lipschitz,
) do p, ω
if t == 1
return p
else
return 0.5 * p[1] + 0.5 * b[t] + ω
end
end
SDDP.parameterize(sp, Ω) do ω
p′ = SDDP.objective_state(sp)
@stageobjective(sp, 1_000 * sum(spill) - p′ * generation)
end
else
SDDP.add_objective_state(
sp,
initial_value = (61.261, 61.261),
lower_bound = (40.0, 40.0),
upper_bound = (100.0, 100.0),
lipschitz = (lipschitz, lipschitz),
) do p, ω
if t == 1
return p
else
return 0.5 * p[1] + 0.5 * b[t] - 0.5 * (p[1] - p[2]) + ω, p[1]
end
end
SDDP.parameterize(sp, Ω) do ω
p′, p = SDDP.objective_state(sp)
@stageobjective(sp, 1_000 * sum(spill) - p′ * generation)
end
end
end
return model
end
function example_one()
model = river_chain_example(ar1 = true, N = 2)
SDDP.train(
model;
iteration_limit = 2000,
stopping_rules = [SDDP.Statistical(num_replications = 250, iteration_period = 250)],
)
# Now plot the saddle function.
node = model[6]
x = 0.0:10.0:200.0
p = 50.0:5.0:100.0
A = zeros(length(x), length(p))
for (j, pj) in enumerate(p)
for (i, xi) in enumerate(x)
objective_state_vector =
SDDP.update_objective_state(node.objective_state, [pj], 0.0)
subproblem_results = SDDP.solve_subproblem(
model,
node,
Dict(Symbol("volume[1]") => 50.0, Symbol("volume[2]") => xi),
0.0,
Tuple{Int,Any}[],
require_duals = false,
)
A[i, j] = subproblem_results.objective
end
end
open("saddlefunction.dat", "w") do io
for (i, xi) in enumerate(x)
for (j, pj) in enumerate(p)
println(io, xi, " ", pj, " ", A[i, j])
end
println(io)
end
end
end
function example_two()
model = river_chain_example(ar1 = false, N = 5, lower_bound = -150_000.0)
SDDP.train(model; iteration_limit = 5_000)
simulations = SDDP.simulate(model, 1_000, [:generation, :volume])
profits = map(sim -> sum(s[:stage_objective] for s in sim), simulations)
min_profit, min_index = findmin(profits)
max_profit, max_index = findmax(profits)
function pretty_plot(f, simulations; kwargs...)
plt = SDDP.publication_plot(f, simulations)
high = f.(simulations[max_index])
plot!(high, linestyle = :dash, color = :red, width = 3)
low = f.(simulations[min_index])
plot!(low, linestyle = :dot, color = :green, width = 3)
plot!(; legend = false, xlabel = "Stage", kwargs...)
return plt
end
spot_price = pretty_plot(
simulations;
title = "(a) Spot Price",
ylabel = "Spot Price\n(\$/MWh)",
) do data
return data[:objective_state][1]
end
stored_energy = pretty_plot(
simulations;
title = "(b) Total Stored Energy",
ylabel = "Volume\n(m³)",
) do data
return sum(data[:volume][i].out for i = 1:5)
end
total_generation = pretty_plot(
simulations;
title = "(b) Total Generation",
ylabel = "Energy\n(MWh)",
) do data
return data[:generation]
end
profit = StatsPlots.density(
profits ./ 1_000;
title = "(d) Distribution of Profit",
xlabel = "Profit (000's)",
width = 3,
color = "#00467F",
legend = false,
yticks = false,
)
Plots.vline!([min_profit / 1_000], color = :green, linestyle = :dot, width = 3)
Plots.vline!([max_profit / 1_000], color = :red, linestyle = :dash, width = 3)
plot(spot_price, stored_energy, total_generation, profit)
savefig("example_two.pdf")
end
function example_lipschitz()
A = zeros(500, 5)
for (col, α) in enumerate([0.0, 10.0, 100.0, 1_000])
model = river_chain_example(ar1 = false, N = 5, lipschitz = α)
SDDP.train(model; iteration_limit = 500)
log = model.most_recent_training_results.log
for row = 1:500
A[row, col] = log[row].bound
end
end
open("lipschitz_experiment.dat", "w") do io
for row = 1:500
print(rpad(row, 4))
for col = 1:5
print(A[row, col], " ")
end
println()
end
end
end
if length(ARGS) > 0
if ARGS[1] == "example_one"
example_one()
elseif ARGS[1] == "example_two"
example_two()
elseif ARGS[1] == "example_lipschitz"
example_lipschitz()
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 5513 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This file implements the example described in
# Dowson, O., Morton, D.P., & Pagnoncelli, B. (2019). Partially observable
# multistage stochastic programming.
using SDDP, GLPK, Random, Statistics, Test
const demand_values = [1.0, 2.0]
const demand_probs = Dict(:Ah => [0.2, 0.8], :Bh => [0.8, 0.2], :H => [0.5, 0.5])
"Create the policy graph for the problem."
function build_graph(model_name)
if model_name == "hidden" || model_name == "visible"
graph = SDDP.Graph(
:root_node,
[:Ad, :Ah, :Bd, :Bh],
[
(:root_node => :Ad, 0.5),
(:root_node => :Bd, 0.5),
(:Ad => :Ah, 1.0),
(:Ah => :Ad, 0.9),
(:Bd => :Bh, 1.0),
(:Bh => :Bd, 0.9),
],
)
if model_name == "hidden"
SDDP.add_ambiguity_set(graph, [:Ad, :Bd], 1e2)
SDDP.add_ambiguity_set(graph, [:Ah, :Bh], 1e2)
end
return graph
elseif model_name == "expected_value"
graph = SDDP.Graph(
:root_node,
[:D, :H],
[(:root_node => :D, 1.0), (:D => :H, 1.0), (:H => :D, 0.9)],
)
return graph
else
error("Invalid option: model_name=$(model_name)")
end
end
function solve_inventory_management_problem(model_name, risk_measure)
graph = build_graph(model_name)
model = SDDP.PolicyGraph(
graph,
lower_bound = 0.0,
optimizer = GLPK.Optimizer,
) do subproblem, node
@variables(subproblem, begin
0 <= inventory <= 2, (SDDP.State, initial_value = 0.0)
buy >= 0
demand
end)
@constraint(subproblem, demand == inventory.in - inventory.out + buy)
if node == :Ad || node == :Bd || node == :D
JuMP.fix(demand, 0)
@stageobjective(subproblem, buy)
else
SDDP.parameterize(subproblem, demand_values, demand_probs[node]) do ω
JuMP.fix(demand, ω)
end
@stageobjective(subproblem, 2 * buy + inventory.out)
end
end
Random.seed!(123)
SDDP.train(model; risk_measure = risk_measure, iteration_limit = 200)
simulations =
simulate_policy(model, model_name; terminate_on_leaf = false, discount = true)
return (model = model, simulations = simulations)
end
function simulate_policy(model, model_name; terminate_on_leaf::Bool, discount::Bool)
# Simulate policy using same realizations for each policy.
Ad, Ah, Bd, Bh = :Ad, :Ah, :Bd, :Bh
if model_name == "expected_value"
Ad, Ah, Bd, Bh = :D, :H, :D, :H
end
Random.seed!(1234)
scenarios = Any[]
for replication = 1:2000
actual_node = rand() < 0.5 ? :A : :B
num_stages = if terminate_on_leaf
ceil(Int, log(rand()) / log(0.9))
else
50
end
scenario = Tuple{Symbol,Union{Nothing,Float64}}[]
for stage = 1:num_stages
r = rand()
if actual_node == :A
push!(scenario, (Ad, nothing))
push!(scenario, (Ah, r < demand_probs[:Ah][1] ? 1 : 2))
else
push!(scenario, (Bd, nothing))
push!(scenario, (Bh, r < demand_probs[:Bh][1] ? 1 : 2))
end
end
push!(scenarios, scenario)
end
simulations = Any[]
for scenario in scenarios
push!(
simulations,
SDDP.simulate(
model,
1,
[:inventory, :buy],
sampling_scheme = SDDP.Historical(scenario),
)[1],
)
end
function calculate_objective(simulation)
if discount
y = 0.0
ρ = 1.0
for (i, s) in enumerate(simulation)
y += ρ * s[:stage_objective]
if !isodd(i)
ρ *= 0.9
end
end
return y
else
return sum(s[:stage_objective] for s in simulation)
end
end
objectives = calculate_objective.(simulations)
return (simulations = simulations, objectives = objectives)
end
function quantile_data(data...)
return hcat([
Statistics.quantile(data_i, [0.0, 0.25, 0.5, 0.75, 1.0]) for data_i in data
]...)
end
function run_paper_analysis()
visible = solve_inventory_management_problem("visible", SDDP.Expectation())
hidden = solve_inventory_management_problem("hidden", SDDP.Expectation())
expected_value =
solve_inventory_management_problem("expected_value", SDDP.Expectation())
risk_averse_expected_value =
solve_inventory_management_problem("expected_value", SDDP.ModifiedChiSquared(0.25))
quantiles = quantile_data(
visible.simulations.objectives,
hidden.simulations.objectives,
expected_value.simulations.objectives,
risk_averse_expected_value.simulations.objectives,
)
open("quantiles.dat", "w") do io
for i = 1:size(quantiles, 1)
println(io, join(quantiles[i, :], " "))
end
end
end
if length(ARGS) > 0
@assert ARGS[1] == "--run"
run_paper_analysis()
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3717 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This example, known in the POMDP literature as the "Tiger Problem," is taken
# from the paper:
# Kaelbling, L. P., Littmany, M. L., & Cassandra, A. R. (1998). Planning and
# Acting in Partially Observable Stochastic Domains. Artificial Intelligence,
# 101, 99–134.
# Note: Because of numerical issues associated with the discount factor being
# close to 1, we use Gurobi instead of GLPK. GLPK complains about the basis
# being close to singular.
using SDDP
using Gurobi
using Test
function tiger_problem()
graph = SDDP.Graph(
:root_node,
[:Dl, :Dr, :Hl, :Hr],
[
(:root_node => :Dl, 0.5),
(:root_node => :Dr, 0.5),
(:Dl => :Hl, 1.0),
(:Hl => :Dl, 0.98),
(:Dr => :Hr, 1.0),
(:Hr => :Dr, 0.98),
],
)
SDDP.add_ambiguity_set(graph, [:Dl, :Dr], 1e3)
SDDP.add_ambiguity_set(graph, [:Hl, :Hr], 1e3)
model = SDDP.PolicyGraph(
graph,
lower_bound = -1000.0,
optimizer = () -> Gurobi.Optimizer(),
) do subproblem, node
set_optimizer_attribute(subproblem, "OutputFlag", 0)
@variable(subproblem, 0 <= x[[:s, :l, :r]] <= 1, SDDP.State, initial_value = 1)
if node == :Dl || node == :Dr
@stageobjective(subproblem, x[:s].out - x[:l].out - x[:r].out)
@constraints(subproblem, begin
x[:s].out <= x[:s].in
x[:l].out + x[:r].out <= x[:s].in
end)
elseif node == :Hl
@stageobjective(subproblem, 100 * x[:l].in - 10 * x[:r].in)
@constraint(subproblem, x[:s].out <= 1 - x[:l].in - x[:r].in)
@constraint(subproblem, x[:s].out <= x[:s].in)
SDDP.parameterize(subproblem, [:left, :right], [0.85, 0.15]) do ω
# println("I heard the tiger on the $ω side.")
end
elseif node == :Hr
@stageobjective(subproblem, -10 * x[:l].in + 100 * x[:r].in)
@constraint(subproblem, x[:s].out <= 1 - x[:l].in - x[:r].in)
@constraint(subproblem, x[:s].out <= x[:s].in)
SDDP.parameterize(subproblem, [:left, :right], [0.15, 0.85]) do ω
# println("I heard the tiger on the $ω side.")
end
end
# Dummy constraints to force the state variables to be binary.
@variable(subproblem, 0 <= u[[:s, :l, :r]] <= 1, Bin)
@constraint(subproblem, [i = [:s, :l, :r]], x[i].out == u[i])
end
# Train the policy.
SDDP.train(model; iteration_limit = 50, print_level = 1, dashboard = true)
simulations = SDDP.simulate(
model,
100,
[:x],
sampling_scheme = SDDP.InSampleMonteCarlo(
max_depth = 30,
terminate_on_dummy_leaf = false,
),
)
plt = SDDP.SpaghettiPlot(simulations)
SDDP.add_spaghetti(plt, cumulative = true) do data
data[:stage_objective]
end
SDDP.add_spaghetti(plt, title = "Stopping state", ymin = 0, ymax = 1) do data
data[:x][:s].out
end
SDDP.add_spaghetti(plt, title = "Open left", ymin = 0, ymax = 1) do data
data[:x][:l].out
end
SDDP.add_spaghetti(plt, title = "Open right", ymin = 0, ymax = 1) do data
data[:x][:r].out
end
SDDP.add_spaghetti(plt, title = "Belief-L", ymin = 0, ymax = 1) do data
data[:belief][:Dl] + data[:belief][:Hl]
end
SDDP.save(plt)
end
tiger_problem()
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3103 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This should kind of work, but it doesn't.
using SDDP, Ipopt, Test
function infinite_ball_on_beam()
graph = SDDP.Graph(
:root_node,
[:time_step],
[(:root_node => :time_step, 1.0), (:time_step => :time_step, 0.999)],
)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0.0,
optimizer = Ipopt.Optimizer,
sense = :Min,
) do subproblem, node
set_optimizer_attribute(subproblem, "print_level", 0)
Δt = 0.1 # time-step (s)
m = 0.1 # mass (kg)
J = 0.5 # moment of inertia (kg m²)
g = 9.81 # graviational acceleration (m s⁻²)
τ = 1 #3 # Maximum torque.
@variables(subproblem, begin
# Beam position.
r, SDDP.State, (initial_value = 1)
# Rate of change of beam position.
r′, SDDP.State, (initial_value = 0)
# Angle of beam.
θ, SDDP.State, (initial_value = -0.1745)
# Rate of change of angle of beam.
θ′, SDDP.State, (initial_value = 0)
# Control variable: torque to apply to beam.
-τ <= u <= τ
end)
@constraints(subproblem, begin
r.out - r.in == Δt * r′.in
θ.out - θ.in == Δt * θ′.in
end)
@NLconstraints(
subproblem,
begin
r′.out - r′.in == Δt * (r.in * θ′.in^2 - g * sin(θ.in))
θ′.out - θ′.in ==
Δt * (-2 * m * r.in * r′.in + -1 * m * g * r.in * cos(θ.in) + u) /
(m * r.in^2 + J)
end
)
@stageobjective(
subproblem,
100 * r.out^2 + r′.out^2 + θ.out^2 + θ′.out^2 + 0.01 * u^2
)
end
return model
end
using Random
Random.seed!(1234)
model = infinite_ball_on_beam()
SDDP.train(
model,
iteration_limit = 2_000,
print_level = 1,
cycle_discretization_delta = 0.001,
)
maximum_depth = 0
for depth = 10:10:100
try
simulation = SDDP.simulate(model; max_depth = depth, terminate_on_cycle = false)
global maximum_depth = depth
catch
print("Max_depth = $(maximum_depth)")
break
end
end
simulation = SDDP.simulate(
model,
1,
[:r, :θ, :u],
max_depth = maximum_depth,
terminate_on_cycle = false,
)
using Plots
plot(
plot(
[s[:r].out for s in simulation[1]],
ylabel = "Displacement",
ylims = (-1, 3),
xlims = (1, maximum_depth),
),
plot(
[s[:θ].out for s in simulation[1]],
ylabel = "Angle",
ylims = (-1, 1),
xlims = (1, maximum_depth),
),
plot(
[s[:u] for s in simulation[1]],
ylabel = "Torque",
ylims = (-3, 3),
xlims = (1, maximum_depth),
),
layout = (3, 1),
size = (1500, 500),
)
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 4520 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
using SDDP, GLPK, Test, Distributions
function infinite_lin_HD()
graph = SDDP.Graph(
:root_node,
[:week],
[(:root_node => :week, 1.0), (:week => :week, 0.95)],
)
model = SDDP.PolicyGraph(
graph,
lower_bound = 0,
optimizer = GLPK.Optimizer,
) do subproblem, node
@variable(subproblem, -10 <= state <= 10, SDDP.State, initial_value = 0)
@variables(subproblem, begin
0 <= order_quantity
0 <= lost_demand
0 <= disposed_units
0 <= backordered_units
0 <= held_units
demand
end)
@constraint(subproblem, backordered_units >= -state.out)
@constraint(subproblem, held_units >= state.out)
@constraint(
subproblem,
state.out == state.in + order_quantity - demand + lost_demand - disposed_units
)
# Truncated normal on [0, 10] with mean 5 and sd 2.
Pg = rand(Distributions.TruncatedNormal(5, 2, 0, 10), 50)
sort!(Pg)
SDDP.parameterize(subproblem, Pg) do ω
JuMP.fix(demand, ω)
end
@stageobjective(
subproblem,
20 * order_quantity + # Ordering cost cp.
2 * held_units + # Holding cost ch.
10 * backordered_units + # Backorder cost cb.
10 * disposed_units + # Disposal cost cd.
100 * lost_demand # Lost demand cost cl.
)
end
return model
end
function infinite_lin_DH()
graph = SDDP.Graph(
:root_node,
[:decision, :recourse],
[
(:root_node => :decision, 1.0),
(:decision => :recourse, 1.0),
(:recourse => :decision, 0.95),
],
)
model = SDDP.PolicyGraph(
graph;
lower_bound = 0,
optimizer = GLPK.Optimizer,
) do subproblem, node
@variable(subproblem, -10 <= state <= 10, SDDP.State, initial_value = 0)
@variable(subproblem, 0 <= order_quantity, SDDP.State, initial_value = 0)
if node == :decision
@constraint(subproblem, state.out == state.in)
@stageobjective(subproblem, 20 * order_quantity.out)
else
@variables(subproblem, begin
0 <= lost_demand
0 <= disposed_units
0 <= backordered_units
0 <= held_units
demand
end)
@constraints(
subproblem,
begin
state.out ==
state.in + order_quantity.in - demand + lost_demand - disposed_units
backordered_units >= -state.out
held_units >= state.out
end
)
Pg = rand(Distributions.TruncatedNormal(5, 2, 0, 10), 50)
sort!(Pg)
SDDP.parameterize(subproblem, Pg) do ω
JuMP.fix(demand, ω)
end
@stageobjective(
subproblem,
2 * held_units + # Holding cost ch.
10 * backordered_units + # Backorder cost cb.
10 * disposed_units + # Disposal cost cd.
100 * lost_demand # Lost demand cost cl.
)
end
end
return model
end
using Random
Random.seed!(1234)
begin
model = infinite_lin_HD()
SDDP.train(model, iteration_limit = 75, print_level = 1)
results = SDDP.simulate(model, 500)
objectives = [sum(s[:stage_objective] for s in simulation) for simulation in results]
sample_mean = round(Statistics.mean(objectives); digits = 2)
sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)
println("HD Confidence_interval = $(sample_mean) ± $(sample_ci)")
end
Random.seed!(1234)
begin
model = infinite_lin_DH()
SDDP.train(model, iteration_limit = 75, print_level = 1)
results = SDDP.simulate(model, 500)
objectives = [sum(s[:stage_objective] for s in simulation) for simulation in results]
sample_mean = round(Statistics.mean(objectives); digits = 2)
sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)
println("DH Confidence_interval = $(sample_mean) ± $(sample_ci)")
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 12710 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
using SDDP, Test, JSON, Gurobi, Plots, Random
"""
infinite_powder(; discount_factor = 0.75, stocking_rate::Float64 = NaN,
data_filename = "powder_data.json")
Create an instance of the infinite horizon POWDER model. If `stocking_rate =
NaN`, we use the value from the file `data_filename`.
"""
function infinite_powder(;
discount_factor = 0.95,
stocking_rate::Float64 = NaN,
data_filename = "powder_data.json",
)
data = JSON.parsefile(joinpath(@__DIR__, data_filename))
# Allow over-ride of the stocking rate contained in data.
if !isnan(stocking_rate)
data["stocking_rate"] = stocking_rate
end
# ===== Markovian Graph =====
transition = Array{Float64,2}[]
for transition_matrix in data["transition"]
push!(
transition,
convert(
Array{Float64,2},
Base.reshape(
vcat(transition_matrix...),
length(transition_matrix[1]),
length(transition_matrix),
),
),
)
end
graph = SDDP.MarkovianGraph(transition)
for markov_state = 1:size(transition[end], 2)
SDDP.add_edge(
graph,
(data["number_of_weeks"], markov_state) => (1, 1),
discount_factor,
)
end
gurobi_env = Gurobi.Env()
model = SDDP.PolicyGraph(
graph,
sense = :Max,
upper_bound = 1e6,
optimizer = () -> Gurobi.Optimizer(gurobi_env),
) do subproblem, index
set_optimizer_attribute(subproblem, "OutputFlag", 0)
# Unpack the node index.
stage, markov_state = index
# ========== Data Initialization ==========
# Data for Fat Evaluation Index penalty
cow_per_day = data["stocking_rate"] * 7
# Data for grass growth model two
Pₘ = data["maximum_pasture_cover"]
gₘ = data["maximum_growth_rate"]
Pₙ = data["number_of_pasture_cuts"]
g(p) = 4 * gₘ / Pₘ * p * (1 - p / Pₘ)
g′(p) = 4 * gₘ / Pₘ * (1 - 2 * p / Pₘ)
# ========== State Variables ==========
@variables(
subproblem,
begin
# Pasture cover (kgDM/ha). Note: to avoid numerical difficulties, we
# increase the lower bound so that it is not zero. This avoids the
# situaton where pasture_cover=0 and thus growth=0, effectively
# killing all grass for all time.
(
10 <= pasture_cover <= data["maximum_pasture_cover"],
SDDP.State,
initial_value = data["initial_pasture_cover"],
)
# Quantity of supplement in storage (kgDM/ha).
(
stored_supplement >= 0,
SDDP.State,
initial_value = data["initial_storage"],
)
# Soil moisture (mm).
(
0 <= soil_moisture <= data["maximum_soil_moisture"],
SDDP.State,
initial_value = data["initial_soil_moisture"],
)
# Number of cows milking (cows/ha).
(
0 <= cows_milking <= data["stocking_rate"],
SDDP.State,
initial_value = data["stocking_rate"],
)
(
0 <= milk_production <= data["maximum_milk_production"],
SDDP.State,
initial_value = 0.0,
)
end
)
# ========== Control Variables ==========
@variables(subproblem, begin
supplement >= 0 # Quantity of supplement to buy and feed (kgDM).
harvest >= 0 # Quantity of pasture to harvest (kgDM/ha).
feed_storage >= 0 # Feed herd grass from storage (kgDM).
feed_pasture >= 0 # Feed herd grass from pasture (kgDM).
evapotranspiration >= 0 # The actual evapotranspiration rate.
rainfall # Rainfall (mm); dummy variable for parameterization.
grass_growth >= 0 # The potential grass growth rate.
energy_for_milk_production >= 0 # Energy for milk production (MJ).
weekly_milk_production >= 0 # Weekly milk production (kgMS/week).
fei_penalty >= 0 # Fat Evaluation Index penalty ($)
end)
# ========== Parameterize model on uncertainty ==========
SDDP.parameterize(subproblem, data["niwa_data"][stage]) do ω
JuMP.set_upper_bound(evapotranspiration, ω["evapotranspiration"])
JuMP.fix(rainfall, ω["rainfall"])
end
@constraints(
subproblem,
begin
# ========== State constraints ==========
pasture_cover.out ==
pasture_cover.in + 7 * grass_growth - harvest - feed_pasture
stored_supplement.out ==
stored_supplement.in + data["harvesting_efficiency"] * harvest -
feed_storage
# This is a <= do account for the maximum soil moisture; excess
# water is assumed to drain away.
soil_moisture.out <= soil_moisture.in - evapotranspiration + rainfall
# ========== Energy balance ==========
data["pasture_energy_density"] * (feed_pasture + feed_storage) +
data["supplement_energy_density"] * supplement >=
data["stocking_rate"] * (
data["energy_for_pregnancy"][stage] +
data["energy_for_maintenance"] +
data["energy_for_bcs_dry"][stage]
) +
cows_milking.in * (
data["energy_for_bcs_milking"][stage] -
data["energy_for_bcs_dry"][stage]
) +
energy_for_milk_production
# ========== Milk production models ==========
# Upper bound on the energy that can be used for milk production.
energy_for_milk_production <=
data["max_milk_energy"][stage] * cows_milking.in
# Conversion between energy and physical milk
weekly_milk_production ==
energy_for_milk_production / data["energy_content_of_milk"][stage]
# Lower bound on milk production.
weekly_milk_production >= data["min_milk_production"] * cows_milking.in
# ========== Pasture growth models ==========
# Model One: grass_growth ~ evapotranspiration
grass_growth <= data["soil_fertility"][stage] * evapotranspiration / 7
# Model Two: grass_growth ~ pasture_cover
[p′ = range(0, stop = Pₘ, length = Pₙ)],
grass_growth <= g(p′) + g′(p′) * (pasture_cover.in - p′)
# ========== Fat Evaluation Index Penalty ==========
fei_penalty >= cow_per_day * (0.00 + 0.25 * (supplement / cow_per_day - 3))
fei_penalty >= cow_per_day * (0.25 + 0.50 * (supplement / cow_per_day - 4))
fei_penalty >= cow_per_day * (0.75 + 1.00 * (supplement / cow_per_day - 5))
end
)
# ========== Lactation cycle over the season ==========
if stage == data["number_of_weeks"]
@constraint(subproblem, cows_milking.out == data["stocking_rate"])
elseif data["maximum_lactation"] <= stage < data["number_of_weeks"]
@constraint(subproblem, cows_milking.out == 0)
else
@constraint(subproblem, cows_milking.out <= cows_milking.in)
end
# ========== Milk revenue cover penalty ==========
if stage == data["number_of_weeks"]
@constraint(subproblem, milk_production.out == 0.0)
@expression(
subproblem,
milk_revenue,
data["prices"][stage][markov_state] * milk_production.in
)
else
@constraint(
subproblem,
milk_production.out == milk_production.in + weekly_milk_production
)
@expression(subproblem, milk_revenue, 0.0)
end
# ========== Stage Objective ==========
@stageobjective(
subproblem,
milk_revenue - data["supplement_price"] * supplement -
data["harvest_cost"] * harvest - fei_penalty +
# Artificial term to encourage max soil moisture.
1e-4 * soil_moisture.out
)
end
return model
end
function visualize_policy(model, filename)
simulations = SDDP.simulate(
model,
1_000,
[
:cows_milking,
:pasture_cover,
:soil_moisture,
:grass_growth,
:supplement,
:weekly_milk_production,
:fei_penalty,
],
sampling_scheme = SDDP.InSampleMonteCarlo(
terminate_on_cycle = false,
terminate_on_dummy_leaf = false,
max_depth = 52 * 5,
),
)
xticks = (1:26:5*52, repeat(["Aug", "Feb"], outer = 5))
plot(
SDDP.publicationplot(
simulations,
data -> data[:cows_milking].out,
title = "(a)",
ylabel = "Cows Milking (cows/ha)",
xticks = xticks,
),
SDDP.publicationplot(
simulations,
data -> data[:pasture_cover].out / 1000,
ylabel = "Pasture Cover (t/ha)",
title = "(b)",
xticks = xticks,
),
SDDP.publicationplot(
simulations,
data -> data[:noise_term]["evapotranspiration"],
ylabel = "Evapotranspiration (mm)",
xlabel = " ",
title = "(c)",
xticks = xticks,
),
layout = (1, 3),
size = (1500, 300),
)
savefig(filename * ".pdf")
end
function estimate_statistical_bound(model, filename)
# Simulate to estimate the lower (statistical) bound. Note that we need to
# set `terminate_on_dummy_leaf = true`.
bound_simulations = SDDP.simulate(
model,
1_000,
sampling_scheme = SDDP.InSampleMonteCarlo(
terminate_on_cycle = false,
terminate_on_dummy_leaf = true,
),
)
objectives = [sum(x[:stage_objective] for x in sim) for sim in bound_simulations]
open(filename * ".json", "w") do io
write(io, JSON.json(objectives))
end
end
# The experiments can be run by calling `julia paper.jl run`.
if length(ARGS) > 0
if ARGS[1] == "run"
model = infinite_powder(discount_factor = 0.95, stocking_rate = 3.0)
Random.seed!(123)
SDDP.train(
model,
iteration_limit = 1_000,
print_level = 1,
log_file = "powder_complete.log",
)
Random.seed!(456)
visualize_policy(model, "powder_visualization")
model = infinite_powder(discount_factor = 0.95, stocking_rate = 3.0)
for loop = 1:5
Random.seed!(123 * loop)
SDDP.train(
model,
iteration_limit = 200,
print_level = 1,
log_file = "powder_$(loop).log",
)
Random.seed!(456 * loop)
estimate_statistical_bound(model, "powder_bound_$(loop)")
end
elseif ARGS[1] == "summarize"
using Statistics
function modified_cox(X, α = 1.96)
N = length(X)
logX = log.(X)
μ = Statistics.mean(logX)
σ² = Statistics.var(logX)
half_width = α * sqrt(σ² / N + σ²^2 / (2N - 2))
return exp(μ + σ² / 2 - half_width), exp(μ + σ² / 2 + half_width)
end
function normal(X, α = 1.96)
N = length(X)
μ = Statistics.mean(X)
σ = Statistics.std(X)
return μ + α * σ / sqrt(N), μ - α * σ / sqrt(N)
end
for i = 1:5
data = JSON.parsefile("powder_bound_$(i).json", use_mmap = false)
println(i, " ", modified_cox(data))
end
for i = 1:5
data = JSON.parsefile("powder_bound_$(i).json", use_mmap = false)
println(i, " ", normal(data))
end
end
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 28729 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
ValidationScenario{T,S}(scenario::Vector{Tuple{T,S}})
A single scenario for testing.
See also: [`ValidationScenarios`](@ref).
"""
struct ValidationScenario{T,S}
scenario::Vector{Tuple{T,S}}
end
"""
ValidationScenario{T,S}(scenarios::Vector{ValidationScenario{T,S}})
An [`AbstractSamplingScheme`](@ref) based on a vector of scenarios.
Each scenario is a vector of `Tuple{T, S}` where the first element is the node
to visit and the second element is the realization of the stagewise-independent
noise term. Pass `nothing` if the node is deterministic.
"""
mutable struct ValidationScenarios{T,S} <: AbstractSamplingScheme
scenarios::Vector{ValidationScenario{T,S}}
last::Int
SHA256::String
function ValidationScenarios(
scenarios::Vector{ValidationScenario{T,S}};
SHA256::String = "",
) where {T,S}
return new{T,S}(scenarios, 0, SHA256)
end
end
function sample_scenario(
model::PolicyGraph{T},
sampling_scheme::ValidationScenarios{T,S};
kwargs...,
) where {T,S}
sampling_scheme.last += 1
if sampling_scheme.last > length(sampling_scheme.scenarios)
sampling_scheme.last = 1
end
return sampling_scheme.scenarios[sampling_scheme.last].scenario, false
end
function _throw_if_belief_states(model::PolicyGraph)
if length(model.belief_partition) != 0
error("StochOptFormat does not support belief states.")
end
return
end
function _throw_if_objective_states(model::PolicyGraph)
for (_, node) in model.nodes
if node.objective_state !== nothing
error("StochOptFormat does not support objective states.")
end
end
return
end
function _throw_if_exisiting_cuts(model::PolicyGraph)
for (_, node) in model.nodes
if length(node.bellman_function.global_theta.cuts) != 0
error(
"StochOptFormat does not support writing after a call to " *
"`SDDP.train`.",
)
end
end
return
end
function _validation_scenarios(
model::PolicyGraph,
num_scenarios::Int,
scenario_map,
sampling_scheme::AbstractSamplingScheme,
)
scenarios = map(1:num_scenarios) do _
scenario, _ = sample_scenario(model, sampling_scheme)
return ValidationScenario(scenario)
end
return _validation_scenarios(
model,
ValidationScenarios(scenarios),
scenario_map,
sampling_scheme,
)
end
function _validation_scenarios(
::PolicyGraph,
validation_scenarios::ValidationScenarios,
scenario_map,
::AbstractSamplingScheme,
)
return map(validation_scenarios.scenarios) do scenario
return map(scenario.scenario) do (node, ω)
return Dict("node" => "$node", "support" => scenario_map[node][ω])
end
end
end
"""
Base.write(
io::IO,
model::PolicyGraph;
validation_scenarios::Union{Nothing,Int,ValidationScenarios} = nothing,
sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),
kwargs...
)
Write `model` to `io` in the StochOptFormat file format.
Pass an `Int` to `validation_scenarios` (default `nothing`) to specify the
number of test scenarios to generate using the `sampling_scheme` sampling
scheme. Alternatively, pass a [`ValidationScenarios`](@ref) object to manually
specify the test scenarios to use.
Any additional `kwargs` passed to `write` will be stored in the top-level of the
resulting StochOptFormat file. Valid arguments include `name`, `author`, `date`,
and `description`.
## Compatibility
!!! warning
THIS FUNCTION IS EXPERIMENTAL. THINGS MAY CHANGE BETWEEN COMMITS. YOU
SHOULD NOT RELY ON THIS FUNCTIONALITY AS A LONG-TERM FILE FORMAT (YET).
In addition to potential changes to the underlying format, only a subset of
possible modifications are supported. These include:
- `JuMP.fix`
- `JuMP.set_lower_bound`
- `JuMP.set_upper_bound`
- `JuMP.set_normalized_rhs`
- Changes to the constant or affine terms in a stage objective.
If your model uses something other than this, this function will silently write
an incorrect formulation of the problem.
## Examples
```julia
open("my_model.sof.json", "w") do io
write(
io,
model;
validation_scenarios = 10,
name = "MyModel",
author = "@odow",
date = "2020-07-20",
description = "Example problem for the SDDP.jl documentation",
)
end
```
"""
function Base.write(
io::IO,
model::PolicyGraph{T};
validation_scenarios::Union{Nothing,Int,ValidationScenarios{T,S}} = nothing,
sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),
kwargs...,
) where {T,S}
_throw_if_belief_states(model)
_throw_if_objective_states(model)
_throw_if_exisiting_cuts(model)
nodes = Dict{String,Any}()
subproblems = Dict{String,Any}()
scenario_map = Dict{T,Any}()
for (node_name, node) in model.nodes
_add_node_to_dict(node, node_name, nodes, subproblems, scenario_map)
end
sof = Dict{String,Any}(
"version" => Dict("major" => 1, "minor" => 0),
"root" => Dict{String,Any}(
"name" => "$(model.root_node)",
"state_variables" => Dict{String,Any}(
"$(k)" => v for (k, v) in model.initial_root_state
),
"successors" => Dict(
"$(child.term)" => child.probability for
child in model.root_children
),
),
"nodes" => nodes,
"subproblems" => subproblems,
)
if validation_scenarios !== nothing
sof["validation_scenarios"] = _validation_scenarios(
model,
validation_scenarios,
scenario_map,
sampling_scheme,
)
end
for (k, v) in kwargs
sof["$(k)"] = v
end
return Base.write(io, JSON.json(sof))
end
function _add_node_to_dict(
node::Node,
node_name,
nodes::Dict,
subproblems::Dict,
scenario_map::Dict,
)
s_node_name = "$node_name"
random_variables = String[]
realizations = Dict{String,Any}[
Dict{String,Any}(
"probability" => noise.probability,
"support" => Dict{String,Float64}(),
) for noise in node.noise_terms
]
undo_reformulation =
_reformulate_uncertainty(node, realizations, random_variables)
nodes[s_node_name] = Dict{String,Any}("subproblem" => s_node_name)
if !isempty(realizations)
nodes[s_node_name]["realizations"] = realizations
end
if !isempty(node.children)
nodes[s_node_name]["successors"] = Dict(
"$(child.term)" => child.probability for child in node.children
)
end
subproblems[s_node_name] = Dict{String,Any}(
"state_variables" => Dict(
"$state_name" =>
Dict("in" => name(state.in), "out" => name(state.out))
for (state_name, state) in node.states
),
"subproblem" => _subproblem_to_dict(node.subproblem),
)
if !isempty(random_variables)
subproblems[s_node_name]["random_variables"] = random_variables
end
undo_reformulation()
scenario_map[node_name] = Dict(
noise.term => realizations[i]["support"] for
(i, noise) in enumerate(node.noise_terms)
)
return
end
"""
_reformulate_uncertainty(
node::Node,
realizations,
random_variables,
)
Convert any lower and upper variable_bound_storage than depend on the
uncertainty into linear constraints with a random variable.
Fixed variables are recorded as random variables, but no transformation is done.
Given an affine stageobjective with stagewise independent uncertainty,
reformulate into a quadratic stage objective by replacing the random
coefficients with random decision variables.
Return a function that undoes the reformulation when called with no arguments.
"""
function _reformulate_uncertainty(
node::Node,
realizations::Vector,
random_variables::Vector{String},
)
# Storage for things that are changing.
variable_bound_storage = Dict{VariableRef,Any}[]
changing_variable_lower_bound = Set{VariableRef}()
changing_variable_upper_bound = Set{VariableRef}()
changing_variable_fixed_bound = Set{VariableRef}()
objective_storage = AffExpr[]
changing_objective_constant = false
changing_objective_coefficient = Set{VariableRef}()
constraint_rhs_storage = Dict{ConstraintRef,Float64}[]
changing_constraint_rhs = Set{ConstraintRef}()
# Collect terms that are changing
for noise in node.noise_terms
node.parameterize(noise.term)
# Collect changing variable bounds.
_collect_changing_variable_bounds(
node,
variable_bound_storage,
changing_variable_lower_bound,
changing_variable_upper_bound,
changing_variable_fixed_bound,
)
# Collect changing objective terms.
changing_objective_constant = _collect_changing_objective(
node,
objective_storage,
changing_objective_constant,
changing_objective_coefficient,
)
# Collect changing RHS terms.
_collect_changing_constraint_rhs(
node,
constraint_rhs_storage,
changing_constraint_rhs,
)
end
added_variables = VariableRef[]
added_constraints = ConstraintRef[]
# Reformulate the objective function.
_reformulate_objective(
node,
realizations,
random_variables,
added_variables,
objective_storage,
changing_objective_constant,
changing_objective_coefficient,
)
# Reformulate fixed variables.
for x in changing_variable_fixed_bound
_reformulate_fixed_bound(
node,
realizations,
random_variables,
added_variables,
added_constraints,
variable_bound_storage,
x,
)
end
# Reformulate lower bounded variables.
for x in changing_variable_lower_bound
_reformulate_lower_bound(
node,
realizations,
random_variables,
added_variables,
added_constraints,
variable_bound_storage,
x,
)
end
# Reformulate upper bounded variables.
for x in changing_variable_upper_bound
_reformulate_upper_bound(
node,
realizations,
random_variables,
added_variables,
added_constraints,
variable_bound_storage,
x,
)
end
# Reformulate changing RHS term.
for ci in changing_constraint_rhs
_reformulate_constraint_rhs(
node,
realizations,
random_variables,
added_variables,
constraint_rhs_storage,
ci,
)
end
return () -> begin
delete(node.subproblem, added_variables)
delete.(Ref(node.subproblem), added_constraints)
set_objective_function(node.subproblem, node.stage_objective)
return
end
end
function _collect_changing_variable_bounds(
node,
variable_bound_storage,
changing_variable_lower_bound,
changing_variable_upper_bound,
changing_variable_fixed_bound,
)
bound = Dict{VariableRef,Any}()
for x in all_variables(node.subproblem)
l, u, f = -Inf, Inf, 0.0
if has_lower_bound(x)
l = lower_bound(x)
end
if has_upper_bound(x)
u = upper_bound(x)
end
if is_fixed(x)
f = fix_value(x)
end
if length(variable_bound_storage) >= 1
if variable_bound_storage[1][x].l != l
push!(changing_variable_lower_bound, x)
end
if variable_bound_storage[1][x].u != u
push!(changing_variable_upper_bound, x)
end
if variable_bound_storage[1][x].f != f
push!(changing_variable_fixed_bound, x)
end
end
bound[x] = (l = l, u = u, f = f)
end
push!(variable_bound_storage, bound)
return
end
function _collect_changing_objective(
node,
objective_storage,
changing_objective_constant,
changing_objective_coefficient,
)
push!(objective_storage, convert(AffExpr, node.stage_objective))
if length(objective_storage) > 1
obj = objective_storage[end]
if obj.constant != objective_storage[1].constant
changing_objective_constant = true
end
for k in _dict_diff_keys(objective_storage[1].terms, obj.terms)
push!(changing_objective_coefficient, k)
end
end
return changing_objective_constant
end
function _collect_changing_constraint_rhs(
node,
constraint_rhs_storage,
changing_constraint_rhs,
)
rhs = Dict{ConstraintRef,Float64}()
sets =
(MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.EqualTo{Float64})
for (F, S) in list_of_constraint_types(node.subproblem)
if F == VariableRef || !(S in sets)
continue
end
for ci in all_constraints(node.subproblem, F, S)
obj = constraint_object(ci)
rhs[ci] = MOI.constant(obj.set)
if length(constraint_rhs_storage) >= 1
if constraint_rhs_storage[1][ci] != rhs[ci]
push!(changing_constraint_rhs, ci)
end
end
end
end
push!(constraint_rhs_storage, rhs)
return
end
function _reformulate_objective(
node::Node,
realizations::Vector,
random_variables::Vector{String},
added_variables::Vector{VariableRef},
objective_storage::Vector,
changing_objective_constant::Bool,
changing_objective_coefficient::Set{VariableRef},
)
objective = convert(QuadExpr, copy(node.stage_objective))
# Reformulate a changing objective constant.
if changing_objective_constant
new_name = "_SDDPjl_random_objective_constant_"
y = _add_new_random_variable(
node,
new_name,
random_variables,
added_variables,
)
for (r, o) in zip(realizations, objective_storage)
r["support"][new_name] = o.constant
end
objective.aff.constant = 0.0
objective.aff.terms[y] = 1.0
end
# No changes, so return current affine objective
if length(changing_objective_coefficient) > 0
# Reformulate changing objective coefficients.
for x in changing_objective_coefficient
new_name = "_SDDPjl_random_objective_$(name(x))_"
y = _add_new_random_variable(
node,
new_name,
random_variables,
added_variables,
)
for (r, o) in zip(realizations, objective_storage)
r["support"][new_name] = get(o.terms, x, 0.0)
end
delete!.(Ref(objective.aff.terms), x)
add_to_expression!(objective, 1.0, y, x)
end
end
# Set the objective function to be written out.
if length(objective.terms) == 0
set_objective_function(node.subproblem, objective.aff)
else
set_objective_function(node.subproblem, objective)
end
return
end
function _reformulate_fixed_bound(
::Node,
realizations::Vector,
random_variables::Vector{String},
::Vector{VariableRef},
::Vector,
variable_bound_storage::Vector,
x::VariableRef,
)
for (realization, bound) in zip(realizations, variable_bound_storage)
realization["support"][name(x)] = bound[x].f
end
push!(random_variables, name(x))
unfix(x)
return
end
function _reformulate_lower_bound(
node::Node,
realizations::Vector,
random_variables::Vector{String},
added_variables::Vector{VariableRef},
added_constraints::Vector,
variable_bound_storage::Vector,
x::VariableRef,
)
new_name = "_SDDPjl_lower_bound_$(name(x))_"
y = _add_new_random_variable(
node,
new_name,
random_variables,
added_variables,
)
c = @constraint(node.subproblem, x >= y)
push!(added_constraints, c)
delete_lower_bound(x)
for (realization, bound) in zip(realizations, variable_bound_storage)
realization["support"][new_name] = bound[x].l
end
return
end
function _reformulate_upper_bound(
node::Node,
realizations::Vector,
random_variables::Vector{String},
added_variables::Vector{VariableRef},
added_constraints::Vector,
variable_bound_storage::Vector,
x::VariableRef,
)
new_name = "_SDDPjl_upper_bound_$(name(x))_"
y = _add_new_random_variable(
node,
new_name,
random_variables,
added_variables,
)
c = @constraint(node.subproblem, x <= y)
push!(added_constraints, c)
delete_upper_bound(x)
for (realization, bound) in zip(realizations, variable_bound_storage)
realization["support"][new_name] = bound[x].u
end
return
end
function _reformulate_constraint_rhs(
node,
realizations,
random_variables,
added_variables,
constraint_rhs_storage,
ci,
)
new_name = "_SDDPjl_rhs_$(name(ci))_"
y = _add_new_random_variable(
node,
new_name,
random_variables,
added_variables,
)
set_normalized_coefficient(ci, y, -1.0)
set_normalized_rhs(ci, 0.0)
for (realization, rhs) in zip(realizations, constraint_rhs_storage)
realization["support"][new_name] = rhs[ci]
end
return
end
function _add_new_random_variable(
node,
new_name,
random_variables,
added_variables,
)
y = @variable(node.subproblem, base_name = new_name)
push!(added_variables, y)
push!(random_variables, new_name)
return y
end
function _dict_diff_keys(x::AbstractDict{K,V}, y::AbstractDict{K,V}) where {K,V}
diff = Set{K}()
for (k, v) in x
if haskey(y, k)
if v != y[k]
push!(diff, k)
end
else
push!(diff, k)
end
end
for k in keys(y)
if !haskey(x, k)
push!(diff, k)
end
end
return diff
end
function _subproblem_to_dict(subproblem::JuMP.Model)
dest_model = MOI.FileFormats.Model(; format = MOI.FileFormats.FORMAT_MOF)
MOI.copy_to(dest_model, backend(subproblem))
io = IOBuffer()
Base.write(io, dest_model)
seekstart(io)
return JSON.parse(io; dicttype = Dict{String,Any})
end
function _load_mof_model(sp::JuMP.Model, data::Dict, subproblem_name::String)
model = MOI.FileFormats.Model(; format = MOI.FileFormats.FORMAT_MOF)
io = IOBuffer()
subproblem = JSON.json(data["subproblems"][subproblem_name]["subproblem"])
Base.write(io, subproblem)
seekstart(io)
MOI.read!(io, model)
MOI.copy_to(sp, model)
return
end
"""
Base.read(
io::IO,
::Type{PolicyGraph};
bound::Float64 = 1e6,
)::Tuple{PolicyGraph,ValidationScenarios}
Return a tuple containing a [`PolicyGraph`](@ref) object and a
[`ValidationScenarios`](@ref) read from `io` in the StochOptFormat file format.
See also: [`evaluate`](@ref).
## Compatibility
!!! warning
This function is experimental. Things may change between commits. You should
not rely on this functionality as a long-term file format (yet).
In addition to potential changes to the underlying format, only a subset of
possible modifications are supported. These include:
- Additive random variables in the constraints or in the objective
- Multiplicative random variables in the objective
If your model uses something other than this, this function may throw an error
or silently build a non-convex model.
## Examples
```julia
open("my_model.sof.json", "r") do io
model, validation_scenarios = read(io, PolicyGraph)
end
```
"""
function Base.read(io::IO, ::Type{PolicyGraph}; bound::Float64 = 1e6)
data = JSON.parse(io; dicttype = Dict{String,Any})
graph = Graph("__root__")
for from_node in keys(data["nodes"])
add_node(graph, from_node)
end
for (to_node, probability) in get(data["root"], "successors", Any[])
add_edge(graph, "__root__" => to_node, probability)
end
for (from_node, node) in data["nodes"]
for (to_node, probability) in get(node, "successors", Any[])
add_edge(graph, from_node => to_node, probability)
end
end
proportion_min = sum(
node["subproblem"]["objective"]["sense"] == "min" for
(_, node) in data["subproblems"]
)
proportion_min /= length(data["subproblems"])
model_sense = proportion_min >= 0.5 ? MOI.MIN_SENSE : MOI.MAX_SENSE
function subproblem_builder(sp::Model, node_name::String)
subproblem_name = data["nodes"][node_name]["subproblem"]
_load_mof_model(sp, data, subproblem_name)
node = get_node(sp)
for (s, state) in
data["subproblems"][subproblem_name]["state_variables"]
node.states[Symbol(s)] = State(
variable_by_name(node.subproblem, state["in"]),
variable_by_name(node.subproblem, state["out"]),
)
end
Ω, P = Dict[], Float64[]
for realization in get(data["nodes"][node_name], "realizations", Any[])
push!(P, realization["probability"])
push!(Ω, get(realization, "support", Dict()))
end
if objective_sense(sp) != model_sense
@warn(
"Flipping the objective sense of node $(node_name) so that " *
"it matches the majority of the subproblems."
)
end
obj_sgn = objective_sense(sp) == model_sense ? 1 : -1
objective_coefficients, objf = _convert_objective_function(
sp,
convert(
Vector{String},
get(
data["subproblems"][subproblem_name],
"random_variables",
String[],
),
),
)
parameterize(sp, Ω, P) do ω
if ω !== nothing
for (k, v) in ω
x = get(objective_coefficients, k, nothing)
if x !== nothing
if objf isa AffExpr
objf.terms[x.var] = x.aff + v * x.coef
else
objf.aff.terms[x.var] = x.aff + v * x.coef
end
end
fix(variable_by_name(sp, k), v)
end
end
@stageobjective(sp, obj_sgn * objf)
end
end
model = if model_sense == MOI.MIN_SENSE
PolicyGraph(
subproblem_builder,
graph;
sense = :Min,
lower_bound = -abs(bound),
)
else
PolicyGraph(
subproblem_builder,
graph;
sense = :Max,
upper_bound = abs(bound),
)
end
for (k, v) in data["root"]["state_variables"]
model.initial_root_state[Symbol(k)] = v
end
seekstart(io)
SHA256 = bytes2hex(SHA.sha2_256(io))
return model, _validation_scenarios(data, SHA256)
end
function _validation_scenarios(data::Dict, SHA256::String)
if !haskey(data, "validation_scenarios")
return nothing
end
scenarios = map(data["validation_scenarios"]) do scenario
items = map(scenario) do item
support = get(item, "support", Any[])
return (item["node"], isempty(support) ? nothing : support)
end
return ValidationScenario(items)
end
return ValidationScenarios(scenarios; SHA256 = SHA256)
end
function _convert_objective_function(sp::Model, rvs::Vector{String})
return _convert_objective_function(sp, rvs, objective_function(sp))
end
function _convert_objective_function(::Model, ::Vector{String}, objf)
return Dict{String,Any}(), objf
end
function _convert_objective_function(
::Model,
rvs::Vector{String},
objf::QuadExpr,
)
terms = Dict{String,Any}()
aff_obj = copy(objf.aff)
quad_terms = empty(copy(objf.terms))
for (k, v) in objf.terms
a, b = name(k.a), name(k.b)
if a in rvs
terms[a] = (var = k.b, coef = v, aff = get(aff_obj.terms, a, 0.0))
elseif b in rvs
terms[b] = (var = k.a, coef = v, aff = get(aff_obj.terms, b, 0.0))
else
quad_terms[k] = v
end
end
if length(terms) == length(objf.terms)
return terms, aff_obj
end
return terms, QuadExpr(aff_obj, quad_terms)
end
"""
write_to_file(
model::PolicyGraph,
filename::String;
compression::MOI.FileFormats.AbstractCompressionScheme =
MOI.FileFormats.AutomaticCompression(),
kwargs...
)
Write `model` to `filename` in the StochOptFormat file format.
Pass an argument to `compression` to override the default of automatically
detecting the file compression to use based on the extension of `filename`.
See [`Base.write(::IO, ::PolicyGraph)`](@ref) for information on the
keyword arguments that can be provided.
!!! warning
This function is experimental. See the full warning in
[`Base.write(::IO, ::PolicyGraph)`](@ref).
## Examples
```julia
write_to_file(model, "my_model.sof.json"; validation_scenarios = 10)
```
"""
function write_to_file(
model::PolicyGraph,
filename::String;
compression::MOI.FileFormats.AbstractCompressionScheme = MOI.FileFormats.AutomaticCompression(),
kwargs...,
)
return MOI.FileFormats.compressed_open(filename, "w", compression) do io
return Base.write(io, model; kwargs...)
end
end
"""
read_from_file(
filename::String;
compression::MOI.FileFormats.AbstractCompressionScheme =
MOI.FileFormats.AutomaticCompression(),
kwargs...
)::Tuple{PolicyGraph, ValidationScenarios}
Return a tuple containing a [`PolicyGraph`](@ref) object and a
[`ValidationScenarios`](@ref) read from `filename` in the StochOptFormat file format.
Pass an argument to `compression` to override the default of automatically
detecting the file compression to use based on the extension of `filename`.
See [`Base.read(::IO, ::Type{PolicyGraph})`](@ref) for information on the
keyword arguments that can be provided.
!!! warning
This function is experimental. See the full warning in
[`Base.read(::IO, ::Type{PolicyGraph})`](@ref).
## Examples
```julia
model, validation_scenarios = read_from_file("my_model.sof.json")
```
"""
function read_from_file(
filename::String;
compression::MOI.FileFormats.AbstractCompressionScheme = MOI.FileFormats.AutomaticCompression(),
kwargs...,
)
return MOI.FileFormats.compressed_open(filename, "r", compression) do io
return Base.read(io, PolicyGraph; kwargs...)
end
end
"""
evaluate(
model::PolicyGraph{T},
validation_scenarios::ValidationScenarios{T,S},
) where {T,S}
Evaluate the performance of the policy contained in `model` after a call to
[`train`](@ref) on the scenarios specified by `validation_scenarios`.
## Examples
```julia
model, validation_scenarios = read_from_file("my_model.sof.json")
train(model; iteration_limit = 100)
simulations = evaluate(model, validation_scenarios)
```
"""
function evaluate(
model::PolicyGraph{T},
validation_scenarios::ValidationScenarios{T,S},
) where {T,S}
validation_scenarios.last = 0
simulations = simulate(
model,
length(validation_scenarios.scenarios);
sampling_scheme = validation_scenarios,
custom_recorders = Dict{Symbol,Function}(
:primal =>
(sp) -> begin
Dict{String,Float64}(
name(x) => value(x) for
x in all_variables(sp) if !isempty(name(x))
)
end,
),
)
return Dict(
"problem_sha256_checksum" => validation_scenarios.SHA256,
"scenarios" => [
[
Dict{String,Any}(
"objective" => s[:stage_objective],
"primal" => s[:primal],
) for s in sim
] for sim in simulations
],
)
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 3250 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors, Lea Kapelevich.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# ============================================================================ #
#
# Code to implement a JuMP variable extension.
#
# Usage:
# julia> @variable(subproblem, 0 <= x[i=1:2] <= i,
# SDDP.State, initial_value = i)
#
# julia> x
# 2-element Array{State{VariableRef},1}:
# State(x[1]_in,x[1]_out)
# State(x[2]_in,x[2]_out)
#
# julia> x[1].in
# x[1]_in
#
# julia> typeof(x[1].in)
# VariableRef
#
# julia> x[2].out
# x[2]_out
#
# Assuming subproblem has been solved, and there exists a primal solution
# julia> x_values = JuMP.value.(x)
# 2-element Array{State{Float64},1}:
# State(0.0,1.0)
# State(1.2,3.0)
#
# julia> x_values[1].out
# 1.0
# ============================================================================ #
struct StateInfo
in::JuMP.VariableInfo
out::JuMP.VariableInfo
initial_value::Float64
kwargs::Any
end
function JuMP.build_variable(
_error::Function,
info::JuMP.VariableInfo,
::Type{State};
initial_value = NaN,
kwargs...,
)
if isnan(initial_value)
_error(
"When creating a state variable, you must set the " *
"`initial_value` keyword to the value of the state variable at" *
" the root node.",
)
end
return StateInfo(
JuMP.VariableInfo(
false,
NaN, # lower bound
false,
NaN, # upper bound
false,
NaN, # fixed value
false,
NaN, # start value
false,
false, # binary and integer
),
info,
initial_value,
kwargs,
)
end
function JuMP.add_variable(
subproblem::JuMP.Model,
state_info::StateInfo,
name::String,
)
state = State(
JuMP.add_variable(
subproblem,
JuMP.ScalarVariable(state_info.in),
name * "_in",
),
JuMP.add_variable(
subproblem,
JuMP.ScalarVariable(state_info.out),
name * "_out",
),
)
node = get_node(subproblem)
sym_name = Symbol(name)
@assert !haskey(node.states, sym_name) # JuMP prevents duplicate names.
node.states[sym_name] = state
graph = get_policy_graph(subproblem)
graph.initial_root_state[sym_name] = state_info.initial_value
return state
end
function JuMP.value(state::State{JuMP.VariableRef})
return State(JuMP.value(state.in), JuMP.value(state.out))
end
# Overload for broadcast syntax such as `JuMP.value.([state_1, state_2])`.
Broadcast.broadcastable(state::State{JuMP.VariableRef}) = Ref(state)
# ==============================================================================
function JuMP.set_optimizer(model::SDDP.PolicyGraph, optimizer)
for node in values(model.nodes)
set_optimizer(node.subproblem, optimizer)
node.optimizer = optimizer
set_silent(node.subproblem)
end
return
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 13315 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module MSPFormat
import JSON
import JuMP
import ..SDDP
function _parse_lattice(filename::String)
data = JuMP.MOI.FileFormats.compressed_open(
JSON.parse,
filename,
"r",
JuMP.MOI.FileFormats.AutomaticCompression(),
)
graph = SDDP.Graph("root")
for key in keys(data)
SDDP.add_node(graph, key)
end
for (key, value) in data
for child in sort(collect(keys(value["successors"])))
SDDP.add_edge(graph, key => child, value["successors"][child])
end
end
# MSPFormat doesn't have explicit root -> stage 1 arcs. Assume uniform.
# Also, MSPFormat uses 0-indexed stages.
stage_zero = String[key for (key, value) in data if value["stage"] == 0]
for key in stage_zero
SDDP.add_edge(graph, "root" => key, 1 / length(stage_zero))
end
return _reduce_lattice(graph, data)
end
"""
_reduce_lattice(graph, data)
This function takes a graph and associated data from `_parse_lattice`, which is
assumed to represent a Markovian lattice, and tries to collapse nodes in the
graph to be stagewise independent (if possible).
For now, a trivial way to detect stagewise independence is to group nodes by
their unique `graph.nodes`, and then check that the nodes in the group match the
nodes in a stage.
"""
function _reduce_lattice(graph, data)
arcs_to_node = Dict{UInt64,Vector{String}}()
for (node, arcs) in graph.nodes
push!(get!(() -> String[], arcs_to_node, hash(arcs)), node)
end
for v in values(arcs_to_node)
sort!(v)
end
nodes_by_stage = Dict{Int,Vector{String}}()
for (node, d) in data
push!(get!(() -> String[], nodes_by_stage, d["stage"]), node)
end
for v in values(nodes_by_stage)
sort!(v)
end
if !all(n -> n in Set(values(arcs_to_node)), values(nodes_by_stage))
# Model is not stagewise independent
graph_data = Dict(
k => Dict(
"stage" => v["stage"],
"sample_space" => [v["state"]],
"probability" => [1.0],
) for (k, v) in data
)
return graph, graph_data
end
new_graph = SDDP.Graph("root")
for t in keys(nodes_by_stage)
SDDP.add_node(new_graph, "$t")
end
for t in keys(nodes_by_stage)
if t == 0
SDDP.add_edge(new_graph, "root" => "$t", 1.0)
else
SDDP.add_edge(new_graph, "$(t-1)" => "$t", 1.0)
end
end
graph_data = Dict(
"$t" => Dict{String,Any}(
"stage" => t,
"sample_space" => Any[],
"probability" => Float64[],
) for t in keys(nodes_by_stage)
)
parent = "root"
while !isempty(graph.nodes[parent])
for (node, probability) in sort(graph.nodes[parent])
t = string(data[node]["stage"])
push!(graph_data[t]["sample_space"], data[node]["state"])
push!(graph_data[t]["probability"], probability)
parent = node
end
end
return new_graph, graph_data
end
# Use a default of 0.0 for any missing keys.
_get_constant(terms::String, state::Dict) = get(state, terms, 0.0)
_get_constant(::String, ::Nothing) = nothing
_get_constant(key::Number, ::Union{Dict,Nothing}) = key
function _get_constant(terms::Vector, state::Union{Dict,Nothing} = nothing)
if length(terms) == 1
# Special case: if `terms = Any[1.0]` or `terms = Any["inf"]`, then we
# don't need complicated recursive logic. Bail early.
if terms[1] isa Number
return terms[1]
elseif terms[1] == "inf"
return Inf
elseif terms[1] == "-inf"
return -Inf
elseif terms[1] isa String
value = _get_constant(terms[1], state)
return something(value, terms)
end
end
result = nothing
for term in terms
@assert term isa Dict
if haskey(term, "ADD")
value = _get_constant(term["ADD"], state)
if value === nothing
return terms
end
result = something(result, 0.0) + value
else
@assert haskey(term, "MUL")
value = _get_constant(term["MUL"], state)
if value === nothing
return terms
end
result = something(result, 1.0) * value
end
end
return result::Number
end
function _set_type(rhs::Number, type)
if type == "EQ"
return JuMP.MOI.EqualTo{Float64}(rhs)
elseif type == "LEQ"
return JuMP.MOI.LessThan{Float64}(rhs)
else
@assert type == "GEQ"
return JuMP.MOI.GreaterThan{Float64}(rhs)
end
end
# If the RHS is not a Number, it must be a random expression. Use a default RHS
# of 0.0.
_set_type(::Any, type) = _set_type(0.0, type)
function _build_lhs(stage::Integer, sp::JuMP.Model, terms::Vector{Any})
if maximum(term["stage"] for term in terms) != stage
# Skip constraints which are not relevant for this stage.
return nothing, nothing
end
# For now, we assume the LHS is affine.
lhs = JuMP.AffExpr(0.0)
# lhs_data will store random coefficient terms for each variable.
lhs_data = Dict{JuMP.VariableRef,Any}()
for term in terms
# Lookup variable by name from the JuMP model.
x = sp[Symbol(term["name"])]
if x isa JuMP.VariableRef
@assert term["stage"] == stage
else
# `x` is a state, so we need to distinguish whether we want the
# `.in` or `.out` variables.
@assert x isa SDDP.State
if term["stage"] == stage
x = x.out
else
@assert term["stage"] == stage - 1
x = x.in
end
end
coef = _get_constant(term["coefficient"])
if coef isa Vector{Any}
lhs_data[x] = coef # Store the random data
# Set lhs += 1.0 * x for now. This will get updated in parameterize.
lhs += x
else
lhs += coef * x
end
end
return lhs, lhs_data
end
# MSPFormat does not store an explicit list of state variables. Detect state
# variables by finding two variables in the same constraint with the same name
# and different `stage` values.
function _state_variables(problem)
states = Set{String}()
for constraint in problem["constraints"]
terms = constraint["lhs"]
stage = maximum(term["stage"] for term in terms)
for term in terms
if term["stage"] != stage
push!(states, term["name"])
end
end
end
return sort(collect(states))
end
"""
read_from_file(
problem_filename::String,
lattice_filename::String;
bound::Float64 = 1e6,
)
Return a [`SDDP.PolicyGraph`](@ref) built from the MSPFormat files
`problem_filename` and `lattice_filename`, which point to the `.problem.json`
and `.lattice.json` files respectively.
!!! warning
This function is experimental and may change in any future commit.
## Keyword arguments
* `bound::Float64 = 1e6`. The absolute value of the lower bound (if minimizing)
or the upper bound (if maximizing).
"""
function read_from_file(
problem_filename::String,
lattice_filename::String;
bound::Float64 = 1e6,
)
graph, graph_data = _parse_lattice(lattice_filename)
problem = JSON.parsefile(problem_filename)
state_variables = _state_variables(problem)
initial_values = Dict{Symbol,Float64}()
model = SDDP.PolicyGraph(
graph;
sense = problem["maximize"] ? :Max : :Min,
lower_bound = problem["maximize"] ? -Inf : -bound,
upper_bound = problem["maximize"] ? bound : Inf,
) do sp, node
ω_lower_bound = Dict{JuMP.VariableRef,Any}()
ω_upper_bound = Dict{JuMP.VariableRef,Any}()
ω_objective = Dict{JuMP.VariableRef,Any}()
ω_lhs_coefficient = Dict{JuMP.ConstraintRef,Any}()
ω_rhs_coefficient = Dict{JuMP.ConstraintRef,Any}()
stage = graph_data[node]["stage"]
stage_objective = JuMP.AffExpr(0.0)
for variable in problem["variables"]
if variable["stage"] != stage
continue
end
lower_bound = _get_constant(variable["lb"])
upper_bound = _get_constant(variable["ub"])
objective = _get_constant(variable["obj"])
sym_name = Symbol(variable["name"])
if !haskey(initial_values, sym_name)
initial_values[sym_name] = 0.0
end
if lower_bound isa Number && isfinite(lower_bound)
initial_values[sym_name] =
max(initial_values[sym_name], lower_bound)
end
if upper_bound isa Number && isfinite(upper_bound)
initial_values[sym_name] =
min(initial_values[sym_name], upper_bound)
end
x = if variable["name"] in state_variables
sp[sym_name] = JuMP.@variable(
sp,
variable_type = SDDP.State,
initial_value = initial_values[sym_name],
base_name = "$sym_name",
)
sp[sym_name].out
else
sp[sym_name] = JuMP.@variable(sp, base_name = "$sym_name")
end
if variable["type"] == "BINARY"
set_binary(x)
elseif variable["type"] == "INTEGER"
set_integer(x)
else
@assert variable["type"] == "CONTINUOUS"
end
if lower_bound isa Number && isfinite(lower_bound)
JuMP.set_lower_bound(x, lower_bound)
elseif lower_bound isa Vector{Any}
ω_lower_bound[x] = lower_bound
end
if upper_bound isa Number && isfinite(upper_bound)
JuMP.set_upper_bound(x, upper_bound)
elseif upper_bound isa Vector{Any}
ω_upper_bound[x] = upper_bound
end
if objective isa Number
stage_objective += objective * x
elseif objective isa Vector{Any}
ω_objective[x] = objective
end
end
for str_name in state_variables
sym_name = Symbol(str_name)
if !haskey(JuMP.object_dictionary(sp), sym_name)
sp[sym_name] = JuMP.@variable(
sp,
variable_type = SDDP.State,
initial_value = 0.0,
base_name = "$sym_name",
)
end
end
for constraint in problem["constraints"]
lhs, lhs_data = _build_lhs(stage, sp, constraint["lhs"])
if lhs === nothing
continue
end
rhs = _get_constant(constraint["rhs"])
set = _set_type(rhs, constraint["type"])
con = JuMP.@constraint(sp, lhs in set)
if rhs isa Vector{Any}
ω_rhs_coefficient[con] = rhs
end
if lhs_data !== nothing
ω_lhs_coefficient[con] = lhs_data
end
end
Ω = graph_data[node]["sample_space"]
P = graph_data[node]["probability"]
SDDP.parameterize(sp, Ω, P) do ω
SDDP.@stageobjective(
sp,
stage_objective + sum(
x * _get_constant(terms, ω) for (x, terms) in ω_objective
)
)
for (x, terms) in ω_lower_bound
JuMP.set_lower_bound(x, _get_constant(terms, ω))
end
for (x, terms) in ω_upper_bound
JuMP.set_upper_bound(x, _get_constant(terms, ω))
end
for (con, lhs_data) in ω_lhs_coefficient
for (x, terms) in lhs_data
JuMP.set_normalized_coefficient(con, x, _get_constant(terms, ω))
end
end
for (con, terms) in ω_rhs_coefficient
JuMP.set_normalized_rhs(con, _get_constant(terms, ω))
end
end
return
end
return model
end
"""
read_from_file(problem_name::String; kwargs...)
A utility for reading MSPFormat files that saves writing out both the problem
and lattice filenames if they are in the same location and differ only by the
suffix.
It is equivalent to a call like:
```julia
read_from_file(problem_name * ".problem.json", problem_name * ".lattice.json")
```
In addition, this function searches for compressed `.gz` versions of the lattice
file, since it may be very large.
"""
function read_from_file(problem_name::String; kwargs...)
problem_filename = problem_name * ".problem.json"
lattice_filename = problem_name * ".lattice.json"
if !isfile(lattice_filename)
lattice_filename *= ".gz"
end
return read_from_file(problem_filename, lattice_filename; kwargs...)
end
end # module
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 1783 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module SDDP
import Reexport
Reexport.@reexport using JuMP
import Distributed
import HTTP
import JSON
import MutableArithmetics
import Printf
import Random
import SHA
import Statistics
import TimerOutputs
# Work-around for https://github.com/JuliaPlots/RecipesBase.jl/pull/55
# Change this back to `import RecipesBase` once the fix is tagged.
using RecipesBase
export @stageobjective
# Modelling interface.
include("user_interface.jl")
include("modeling_aids.jl")
# Default definitions for SDDP related modular utilities.
include("plugins/headers.jl")
# Tools for overloading JuMP functions
include("binary_expansion.jl")
include("JuMP.jl")
# Printing utilities.
include("cyclic.jl")
include("print.jl")
# The core SDDP code.
include("algorithm.jl")
# Specific plugins.
include("plugins/risk_measures.jl")
include("plugins/sampling_schemes.jl")
include("plugins/bellman_functions.jl")
include("plugins/stopping_rules.jl")
include("plugins/local_improvement_search.jl")
include("plugins/duality_handlers.jl")
include("plugins/parallel_schemes.jl")
include("plugins/backward_sampling_schemes.jl")
include("plugins/forward_passes.jl")
# Visualization related code.
include("visualization/publication_plot.jl")
include("visualization/spaghetti_plot.jl")
include("visualization/dashboard.jl")
include("visualization/value_functions.jl")
# Other solvers.
include("deterministic_equivalent.jl")
include("biobjective.jl")
include("alternative_forward.jl")
include("Experimental.jl")
include("MSPFormat.jl")
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 54374 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
macro _timeit_threadsafe(timer, label, block)
code = quote
# TimerOutputs is not thread-safe, so run it only if there is a single
# thread.
if Threads.nthreads() == 1
TimerOutputs.@timeit $timer $label $block
else
$block
end
end
return esc(code)
end
# to_nodal_form is an internal helper function so users can pass arguments like:
# risk_measure = SDDP.Expectation(),
# risk_measure = Dict(1=>Expectation(), 2=>WorstCase())
# risk_measure = (node_index) -> node_index == 1 ? Expectation() : WorstCase()
# It will return a dictionary with a key for each node_index in the policy
# graph, and a corresponding value of whatever the user provided.
function to_nodal_form(model::PolicyGraph{T}, element) where {T}
# Note: we don't copy element here, so if element is mutable, you should use
# to_nodal_form(model, x -> new_element()) instead. A good example is
# Vector{T}; use to_nodal_form(model, i -> T[]).
store = Dict{T,typeof(element)}()
for node_index in keys(model.nodes)
store[node_index] = element
end
return store
end
function to_nodal_form(model::PolicyGraph{T}, builder::Function) where {T}
store = Dict{T,Any}()
for node_index in keys(model.nodes)
store[node_index] = builder(node_index)
end
V = typeof(first(values(store)))
for val in values(store)
V = promote_type(V, typeof(val))
end
return Dict{T,V}(key => val for (key, val) in store)
end
function to_nodal_form(model::PolicyGraph{T}, dict::Dict{T,V}) where {T,V}
for key in keys(model.nodes)
if !haskey(dict, key)
error("Missing key: $(key).")
end
end
return dict
end
# Internal function: returns a dictionary with a key for each node, where the
# value is a list of other nodes that contain the same children. This is useful
# because on the backward pass we can add cuts to nodes with the same children
# without having to re-solve the children.
function get_same_children(model::PolicyGraph{T}) where {T}
tmp = Dict{Set{T},Set{T}}()
for (key, node) in model.nodes
children = Set(child.term for child in node.children)
if length(children) == 0
continue
elseif haskey(tmp, children)
push!(tmp[children], key)
else
tmp[children] = Set{T}([key])
end
end
same_children = Dict{T,Vector{T}}(key => T[] for key in keys(model.nodes))
for set in values(tmp)
for v in set
same_children[v] = collect(setdiff(set, Ref(v)))
end
end
return same_children
end
# Internal struct: storage for SDDP options and cached data. Users shouldn't
# interact with this directly.
struct Options{T}
# The initial state to start from the root node.
initial_state::Dict{Symbol,Float64}
# The sampling scheme to use on the forward pass.
sampling_scheme::AbstractSamplingScheme
backward_sampling_scheme::AbstractBackwardSamplingScheme
# Storage for the set of possible sampling states at each node. We only use
# this if there is a cycle in the policy graph.
starting_states::Dict{T,Vector{Dict{Symbol,Float64}}}
# Risk measure to use at each node.
risk_measures::Dict{T,AbstractRiskMeasure}
# The delta by which to check if a state is close to a previously sampled
# state.
cycle_discretization_delta::Float64
# Flag to add cuts to similar nodes.
refine_at_similar_nodes::Bool
# The node transition matrix.
Φ::Dict{Tuple{T,T},Float64}
# A list of nodes that contain a subset of the children of node i.
similar_children::Dict{T,Vector{T}}
stopping_rules::Vector{AbstractStoppingRule}
dashboard_callback::Function
print_level::Int
start_time::Float64
log::Vector{Log}
log_file_handle::Any
log_frequency::Union{Int,Function}
forward_pass::AbstractForwardPass
duality_handler::AbstractDualityHandler
# A callback called after the forward pass.
forward_pass_callback::Any
post_iteration_callback::Any
last_log_iteration::Ref{Int}
# For threading
lock::ReentrantLock
# Internal function: users should never construct this themselves.
function Options(
model::PolicyGraph{T},
initial_state::Dict{Symbol,Float64};
sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),
backward_sampling_scheme::AbstractBackwardSamplingScheme = CompleteSampler(),
risk_measures = Expectation(),
cycle_discretization_delta::Float64 = 0.0,
refine_at_similar_nodes::Bool = true,
stopping_rules::Vector{AbstractStoppingRule} = SDDP.AbstractStoppingRule[],
dashboard_callback::Function = (a, b) -> nothing,
print_level::Int = 0,
start_time::Float64 = 0.0,
log::Vector{Log} = Log[],
log_file_handle = IOBuffer(),
log_frequency::Union{Int,Function} = 1,
forward_pass::AbstractForwardPass = DefaultForwardPass(),
duality_handler::AbstractDualityHandler = ContinuousConicDuality(),
forward_pass_callback = x -> nothing,
post_iteration_callback = result -> nothing,
) where {T}
return new{T}(
initial_state,
sampling_scheme,
backward_sampling_scheme,
to_nodal_form(model, x -> Dict{Symbol,Float64}[]),
to_nodal_form(model, risk_measures),
cycle_discretization_delta,
refine_at_similar_nodes,
build_Φ(model),
get_same_children(model),
stopping_rules,
dashboard_callback,
print_level,
start_time,
log,
log_file_handle,
log_frequency,
forward_pass,
duality_handler,
forward_pass_callback,
post_iteration_callback,
Ref{Int}(0), # last_log_iteration
ReentrantLock(),
)
end
end
# Internal function: set the incoming state variables of node to the values
# contained in state.
function set_incoming_state(node::Node, state::Dict{Symbol,Float64})
for (state_name, value) in state
JuMP.fix(node.states[state_name].in, value)
end
return
end
# Internal function: get the values of the outgoing state variables in node.
# Requires node.subproblem to have been solved with PrimalStatus ==
# FeasiblePoint.
function get_outgoing_state(node::Node)
values = Dict{Symbol,Float64}()
for (name, state) in node.states
# To fix some cases of numerical infeasiblities, if the outgoing value
# is outside its bounds, project the value back onto the bounds. There
# is a pretty large (×5) penalty associated with this check because it
# typically requires a call to the solver. It is worth reducing
# infeasibilities though.
outgoing_value = JuMP.value(state.out)
if JuMP.has_upper_bound(state.out)
current_bound = JuMP.upper_bound(state.out)
if current_bound < outgoing_value
outgoing_value = current_bound
end
end
if JuMP.has_lower_bound(state.out)
current_bound = JuMP.lower_bound(state.out)
if current_bound > outgoing_value
outgoing_value = current_bound
end
end
values[name] = outgoing_value
end
return values
end
# Internal function: set the objective of node to the stage objective, plus the
# cost/value-to-go term.
function set_objective(node::Node{T}) where {T}
objective_state_component = get_objective_state_component(node)
belief_state_component = get_belief_state_component(node)
if objective_state_component != JuMP.AffExpr(0.0) ||
belief_state_component != JuMP.AffExpr(0.0)
node.stage_objective_set = false
end
if !node.stage_objective_set
JuMP.set_objective(
node.subproblem,
JuMP.objective_sense(node.subproblem),
@expression(
node.subproblem,
node.stage_objective +
objective_state_component +
belief_state_component +
bellman_term(node.bellman_function)
)
)
end
node.stage_objective_set = true
return
end
# Internal function: overload for the case where JuMP.value fails on a
# Real number.
stage_objective_value(stage_objective::Real) = stage_objective
stage_objective_value(stage_objective) = JuMP.value(stage_objective)
"""
write_subproblem_to_file(
node::Node,
filename::String;
throw_error::Bool = false,
)
Write the subproblem contained in `node` to the file `filename`.
The `throw_error` is an argument used internally by SDDP.jl. If set, an error
will be thrown.
## Example
```julia
SDDP.write_subproblem_to_file(model[1], "subproblem_1.lp")
```
"""
function write_subproblem_to_file(
node::Node,
filename::String;
throw_error::Bool = false,
)
model = MOI.FileFormats.Model(; filename = filename)
MOI.copy_to(model, JuMP.backend(node.subproblem))
MOI.write_to_file(model, filename)
if throw_error
error(
"Unable to retrieve solution from node $(node.index).\n\n",
" Termination status : $(JuMP.termination_status(node.subproblem))\n",
" Primal status : $(JuMP.primal_status(node.subproblem))\n",
" Dual status : $(JuMP.dual_status(node.subproblem)).\n\n",
"The current subproblem was written to `$(filename)`.\n\n",
"There are two common causes of this error:\n",
" 1) you have a mistake in your formulation, or you violated\n",
" the assumption of relatively complete recourse\n",
" 2) the solver encountered numerical issues\n\n",
"See https://odow.github.io/SDDP.jl/stable/tutorial/warnings/ for more information.",
)
end
return
end
"""
parameterize(node::Node, noise)
Parameterize node `node` with the noise `noise`.
"""
function parameterize(node::Node, noise)
node.parameterize(noise)
set_objective(node)
return
end
function _has_primal_solution(node::Node)
status = JuMP.primal_status(node.subproblem)
return status in (JuMP.FEASIBLE_POINT, JuMP.NEARLY_FEASIBLE_POINT)
end
function attempt_numerical_recovery(model::PolicyGraph, node::Node)
if JuMP.mode(node.subproblem) == JuMP.DIRECT
@warn(
"Unable to recover in direct mode! Remove `direct = true` when " *
"creating the policy graph."
)
else
model.ext[:numerical_issue] = true
MOI.Utilities.reset_optimizer(node.subproblem)
optimize!(node.subproblem)
end
if !_has_primal_solution(node)
model.ext[:numerical_issue] = true
# We use the `node.index` in the filename because two threads could both
# try to write the cuts to file at the same time. If, after writing this
# file, a second thread finds an infeasibility of the same node, it
# doesn't matter if we over-write this file.
filename = "model_infeasible_node_$(node.index).cuts.json"
@info "Writing cuts to the file `$filename`"
write_cuts_to_file(model, filename)
write_subproblem_to_file(
node,
"subproblem_$(node.index).mof.json";
throw_error = true,
)
end
return
end
"""
_initialize_solver(node::Node; throw_error::Bool)
After passing a model to a different process, we need to set the optimizer
again.
If `throw_error`, throw an error if the model is in direct mode.
See also: [`_uninitialize_solver`](@ref).
"""
function _initialize_solver(node::Node; throw_error::Bool)
if mode(node.subproblem) == DIRECT
if throw_error
error(
"Cannot use asynchronous solver with optimizers in direct mode.",
)
end
elseif MOI.Utilities.state(backend(node.subproblem)) == MOIU.NO_OPTIMIZER
if node.optimizer === nothing
error(
"""
You must supply an optimizer for the policy graph, either by passing
one to the `optimizer` keyword argument to `PolicyGraph`, or by
using `JuMP.set_optimizer(model, optimizer)`.
""",
)
end
set_optimizer(node.subproblem, node.optimizer)
set_silent(node.subproblem)
end
return
end
"""
_initialize_solver(model::PolicyGraph; throw_error::Bool)
After passing a model to a different process, we need to set the optimizer
again.
If `throw_error`, throw an error if the model is in direct mode.
See also: [`_uninitialize_solver`](@ref).
"""
function _initialize_solver(model::PolicyGraph; throw_error::Bool)
for (_, node) in model.nodes
_initialize_solver(node; throw_error = throw_error)
end
return
end
"""
_uninitialize_solver(model; throw_error::Bool)
Before passing a model to a different process, we need to drop the inner solver
in case it has some C pointers that we cannot serialize (e.g., HiGHS).
If `throw_error`, throw an error if the model is in direct mode.
See also: [`_initialize_solver`](@ref).
"""
function _uninitialize_solver(model::PolicyGraph; throw_error::Bool)
for (_, node) in model.nodes
if mode(node.subproblem) == DIRECT
if throw_error
error(
"Cannot use asynchronous solver with optimizers in direct mode.",
)
end
elseif MOI.Utilities.state(backend(node.subproblem)) !=
MOIU.NO_OPTIMIZER
MOI.Utilities.drop_optimizer(node.subproblem)
end
end
return
end
# Internal function: solve the subproblem associated with node given the
# incoming state variables state and realization of the stagewise-independent
# noise term noise.
function solve_subproblem(
model::PolicyGraph{T},
node::Node{T},
state::Dict{Symbol,Float64},
noise,
scenario_path::Vector{Tuple{T,S}};
duality_handler::Union{Nothing,AbstractDualityHandler},
) where {T,S}
_initialize_solver(node; throw_error = false)
# Parameterize the model. First, fix the value of the incoming state
# variables. Then parameterize the model depending on `noise`. Finally,
# set the objective.
set_incoming_state(node, state)
parameterize(node, noise)
pre_optimize_ret = if node.pre_optimize_hook !== nothing
node.pre_optimize_hook(
model,
node,
state,
noise,
scenario_path,
duality_handler,
)
else
nothing
end
JuMP.optimize!(node.subproblem)
lock(model.lock) do
model.ext[:total_solves] = get(model.ext, :total_solves, 0) + 1
return
end
if JuMP.primal_status(node.subproblem) == JuMP.MOI.INTERRUPTED
# If the solver was interrupted, the user probably hit CTRL+C but the
# solver gracefully exited. Since we're in the middle of training or
# simulation, we need to throw an interrupt exception to keep the
# interrupt percolating up to the user.
throw(InterruptException())
end
if !_has_primal_solution(node)
attempt_numerical_recovery(model, node)
end
state = get_outgoing_state(node)
stage_objective = stage_objective_value(node.stage_objective)
@_timeit_threadsafe model.timer_output "get_dual_solution" begin
objective, dual_values = get_dual_solution(node, duality_handler)
end
if node.post_optimize_hook !== nothing
node.post_optimize_hook(pre_optimize_ret)
end
return (
state = state,
duals = dual_values,
objective = objective,
stage_objective = stage_objective,
)
end
# Internal function to get the objective state at the root node.
function initialize_objective_state(first_node::Node)
objective_state = first_node.objective_state
if objective_state !== nothing
initial_objective_state = objective_state.initial_value
return initial_objective_state, length(initial_objective_state)
else
return nothing, 0
end
end
# Internal function: update the objective state given incoming `current_state`
# and `noise`.
update_objective_state(::Nothing, ::Any, ::Any) = nothing
function update_objective_state(obj_state, current_state, noise)
if length(current_state) == 1
obj_state.state = (obj_state.update(current_state[1], noise),)
else
obj_state.state = obj_state.update(current_state, noise)
end
return obj_state.state
end
# Internal function: calculate the initial belief state.
function initialize_belief(model::PolicyGraph{T}) where {T}
current_belief = Dict{T,Float64}(keys(model.nodes) .=> 0.0)
current_belief[model.root_node] = 1.0
return current_belief
end
# Internal function: calculate the minimum distance between the state `state`
# and the list of states in `starting_states` using the distance measure `norm`.
function distance(
starting_states::Vector{Dict{Symbol,Float64}},
state::Dict{Symbol,Float64},
norm::Function = inf_norm,
)
if length(starting_states) == 0
return Inf
end
return minimum(norm.(starting_states, Ref(state)); init = Inf)
end
# Internal function: the norm to use when checking the distance between two
# possible starting states. We're going to use: d(x, y) = |x - y| / (1 + |y|).
function inf_norm(x::Dict{Symbol,Float64}, y::Dict{Symbol,Float64})
norm = 0.0
for (key, value) in y
if abs(x[key] - value) > norm
norm = abs(x[key] - value) / (1 + abs(value))
end
end
return norm
end
# Internal function: perform a backward pass of the SDDP algorithm along the
# scenario_path, refining the bellman function at sampled_states. Assumes that
# scenario_path does not end in a leaf node (i.e., the forward pass was solved
# with include_last_node = false)
function backward_pass(
model::PolicyGraph{T},
options::Options,
scenario_path::Vector{Tuple{T,NoiseType}},
sampled_states::Vector{Dict{Symbol,Float64}},
objective_states::Vector{NTuple{N,Float64}},
belief_states::Vector{Tuple{Int,Dict{T,Float64}}},
) where {T,NoiseType,N}
# TODO(odow): improve storage type.
cuts = Dict{T,Vector{Any}}(index => Any[] for index in keys(model.nodes))
for index in length(scenario_path):-1:1
outgoing_state = sampled_states[index]
objective_state = get(objective_states, index, nothing)
partition_index, belief_state = get(belief_states, index, (0, nothing))
items = BackwardPassItems(T, Noise)
if belief_state !== nothing
# Update the cost-to-go function for partially observable model.
for (node_index, belief) in belief_state
if iszero(belief)
continue
end
solve_all_children(
model,
model[node_index],
items,
belief,
belief_state,
objective_state,
outgoing_state,
options.backward_sampling_scheme,
scenario_path[1:index],
options.duality_handler,
options,
)
end
# We need to refine our estimate at all nodes in the partition.
for node_index in model.belief_partition[partition_index]
node = model[node_index]
lock(node.lock)
try
# Update belief state, etc.
current_belief = node.belief_state::BeliefState{T}
for (idx, belief) in belief_state
current_belief.belief[idx] = belief
end
new_cuts = refine_bellman_function(
model,
node,
node.bellman_function,
options.risk_measures[node_index],
outgoing_state,
items.duals,
items.supports,
items.probability .* items.belief,
items.objectives,
)
push!(cuts[node_index], new_cuts)
finally
unlock(node.lock)
end
end
else
node_index, _ = scenario_path[index]
node = model[node_index]
if length(node.children) == 0
continue
end
solve_all_children(
model,
node,
items,
1.0,
belief_state,
objective_state,
outgoing_state,
options.backward_sampling_scheme,
scenario_path[1:index],
options.duality_handler,
options,
)
new_cuts = refine_bellman_function(
model,
node,
node.bellman_function,
options.risk_measures[node_index],
outgoing_state,
items.duals,
items.supports,
items.probability,
items.objectives,
)
push!(cuts[node_index], new_cuts)
if options.refine_at_similar_nodes
# Refine the bellman function at other nodes with the same
# children, e.g., in the same stage of a Markovian policy graph.
for other_index in options.similar_children[node_index]
copied_probability = similar(items.probability)
other_node = model[other_index]
for (idx, child_index) in enumerate(items.nodes)
copied_probability[idx] =
get(options.Φ, (other_index, child_index), 0.0) *
items.supports[idx].probability
end
new_cuts = refine_bellman_function(
model,
other_node,
other_node.bellman_function,
options.risk_measures[other_index],
outgoing_state,
items.duals,
items.supports,
copied_probability,
items.objectives,
)
push!(cuts[other_index], new_cuts)
end
end
end
end
return cuts
end
struct BackwardPassItems{T,U}
"Given a (node, noise) tuple, index the element in the array."
cached_solutions::Dict{Tuple{T,Any},Int}
duals::Vector{Dict{Symbol,Float64}}
supports::Vector{U}
nodes::Vector{T}
probability::Vector{Float64}
objectives::Vector{Float64}
belief::Vector{Float64}
function BackwardPassItems(T, U)
return new{T,U}(
Dict{Tuple{T,Any},Int}(),
Dict{Symbol,Float64}[],
U[],
T[],
Float64[],
Float64[],
Float64[],
)
end
end
function solve_all_children(
model::PolicyGraph{T},
node::Node{T},
items::BackwardPassItems,
belief::Float64,
belief_state,
objective_state,
outgoing_state::Dict{Symbol,Float64},
backward_sampling_scheme::AbstractBackwardSamplingScheme,
scenario_path,
duality_handler::Union{Nothing,AbstractDualityHandler},
options,
) where {T}
length_scenario_path = length(scenario_path)
for child in node.children
if isapprox(child.probability, 0.0; atol = 1e-6)
continue
end
child_node = model[child.term]
lock(child_node.lock)
try
@_timeit_threadsafe model.timer_output "prepare_backward_pass" begin
restore_duality = prepare_backward_pass(
child_node,
options.duality_handler,
options,
)
end
for noise in sample_backward_noise_terms_with_state(
backward_sampling_scheme,
child_node,
outgoing_state,
)
if length(scenario_path) == length_scenario_path
push!(scenario_path, (child.term, noise.term))
else
scenario_path[end] = (child.term, noise.term)
end
if haskey(items.cached_solutions, (child.term, noise.term))
sol_index = items.cached_solutions[(child.term, noise.term)]
push!(items.duals, items.duals[sol_index])
push!(items.supports, items.supports[sol_index])
push!(items.nodes, child_node.index)
push!(items.probability, items.probability[sol_index])
push!(items.objectives, items.objectives[sol_index])
push!(items.belief, belief)
else
# Update belief state, etc.
if belief_state !== nothing
current_belief = child_node.belief_state::BeliefState{T}
current_belief.updater(
current_belief.belief,
belief_state,
current_belief.partition_index,
noise.term,
)
end
if objective_state !== nothing
update_objective_state(
child_node.objective_state,
objective_state,
noise.term,
)
end
@_timeit_threadsafe model.timer_output "solve_subproblem" begin
subproblem_results = solve_subproblem(
model,
child_node,
outgoing_state,
noise.term,
scenario_path;
duality_handler = duality_handler,
)
end
push!(items.duals, subproblem_results.duals)
push!(items.supports, noise)
push!(items.nodes, child_node.index)
push!(
items.probability,
child.probability * noise.probability,
)
push!(items.objectives, subproblem_results.objective)
push!(items.belief, belief)
items.cached_solutions[(child.term, noise.term)] =
length(items.duals)
end
end
@_timeit_threadsafe model.timer_output "prepare_backward_pass" begin
restore_duality()
end
finally
unlock(child_node.lock)
end
end
if length(scenario_path) == length_scenario_path
# No-op. There weren't any children to solve.
else
# Drop the last element (i.e., the one we added).
pop!(scenario_path)
end
return
end
"""
SDDP.calculate_bound(
model::PolicyGraph,
state::Dict{Symbol,Float64} = model.initial_root_state;
risk_measure::AbstractRiskMeasure = Expectation(),
)
Calculate the lower bound (if minimizing, otherwise upper bound) of the problem
model at the point state, assuming the risk measure at the root node is
risk_measure.
"""
function calculate_bound(
model::PolicyGraph{T},
root_state::Dict{Symbol,Float64} = model.initial_root_state;
risk_measure::AbstractRiskMeasure = Expectation(),
) where {T}
# Initialization.
noise_supports = Any[]
probabilities = Float64[]
objectives = Float64[]
current_belief = initialize_belief(model)
# Solve all problems that are children of the root node.
for child in model.root_children
if isapprox(child.probability, 0.0; atol = 1e-6)
continue
end
node = model[child.term]
lock(node.lock)
try
for noise in node.noise_terms
if node.objective_state !== nothing
update_objective_state(
node.objective_state,
node.objective_state.initial_value,
noise.term,
)
end
# Update belief state, etc.
if node.belief_state !== nothing
belief = node.belief_state::BeliefState{T}
partition_index = belief.partition_index
belief.updater(
belief.belief,
current_belief,
partition_index,
noise.term,
)
end
subproblem_results = solve_subproblem(
model,
node,
root_state,
noise.term,
Tuple{T,Any}[(child.term, noise.term)];
duality_handler = nothing,
)
push!(objectives, subproblem_results.objective)
push!(probabilities, child.probability * noise.probability)
push!(noise_supports, noise.term)
end
finally
unlock(node.lock)
end
end
# Now compute the risk-adjusted probability measure:
risk_adjusted_probability = similar(probabilities)
offset = adjust_probability(
risk_measure,
risk_adjusted_probability,
probabilities,
noise_supports,
objectives,
model.objective_sense == MOI.MIN_SENSE,
)
# Finally, calculate the risk-adjusted value.
return sum(
obj * prob for (obj, prob) in zip(objectives, risk_adjusted_probability)
) + offset
end
struct IterationResult{T}
pid::Int
bound::Float64
cumulative_value::Float64
has_converged::Bool
status::Symbol
cuts::Dict{T,Vector{Any}}
numerical_issue::Bool
end
function iteration(model::PolicyGraph{T}, options::Options) where {T}
model.ext[:numerical_issue] = false
@_timeit_threadsafe model.timer_output "forward_pass" begin
forward_trajectory = forward_pass(model, options, options.forward_pass)
options.forward_pass_callback(forward_trajectory)
end
@_timeit_threadsafe model.timer_output "backward_pass" begin
cuts = backward_pass(
model,
options,
forward_trajectory.scenario_path,
forward_trajectory.sampled_states,
forward_trajectory.objective_states,
forward_trajectory.belief_states,
)
end
@_timeit_threadsafe model.timer_output "calculate_bound" begin
bound = calculate_bound(model)
end
lock(options.lock)
try
push!(
options.log,
Log(
length(options.log) + 1,
bound,
forward_trajectory.cumulative_value,
time() - options.start_time,
max(Threads.threadid(), Distributed.myid()),
lock(() -> model.ext[:total_solves], model.lock),
duality_log_key(options.duality_handler),
lock(() -> model.ext[:numerical_issue], model.lock),
),
)
has_converged, status =
convergence_test(model, options.log, options.stopping_rules)
return IterationResult(
max(Threads.threadid(), Distributed.myid()),
bound,
forward_trajectory.cumulative_value,
has_converged,
status,
cuts,
lock(() -> model.ext[:numerical_issue], model.lock),
)
finally
unlock(options.lock)
end
end
"""
termination_status(model::PolicyGraph)::Symbol
Query the reason why the training stopped.
"""
function termination_status(model::PolicyGraph)
if model.most_recent_training_results === nothing
return :model_not_solved
end
return model.most_recent_training_results.status
end
"""
SDDP.train(model::PolicyGraph; kwargs...)
Train the policy for `model`.
## Keyword arguments
- `iteration_limit::Int`: number of iterations to conduct before termination.
- `time_limit::Float64`: number of seconds to train before termination.
- `stoping_rules`: a vector of [`SDDP.AbstractStoppingRule`](@ref)s. Defaults
to [`SimulationStoppingRule`](@ref).
- `print_level::Int`: control the level of printing to the screen. Defaults to
`1`. Set to `0` to disable all printing.
- `log_file::String`: filepath at which to write a log of the training
progress. Defaults to `SDDP.log`.
- `log_frequency::Int`: control the frequency with which the logging is
outputted (iterations/log). It must be at least `1`. Defaults to `1`.
- `log_every_seconds::Float64`: control the frequency with which the logging is
outputted (seconds/log). Defaults to `0.0`.
- `log_every_iteration::Bool`; over-rides `log_frequency` and `log_every_seconds`
to force every iteration to be printed. Defaults to `false`.
- `run_numerical_stability_report::Bool`: generate (and print) a numerical
stability report prior to solve. Defaults to `true`.
- `refine_at_similar_nodes::Bool`: if SDDP can detect that two nodes have the
same children, it can cheaply add a cut discovered at one to the other. In
almost all cases this should be set to `true`.
- `cut_deletion_minimum::Int`: the minimum number of cuts to cache before
deleting cuts from the subproblem. The impact on performance is solver
specific; however, smaller values result in smaller subproblems (and
therefore quicker solves), at the expense of more time spent performing cut
selection.
- `risk_measure`: the risk measure to use at each node. Defaults to
[`Expectation`](@ref).
- `sampling_scheme`: a sampling scheme to use on the forward pass of the
algorithm. Defaults to [`InSampleMonteCarlo`](@ref).
- `backward_sampling_scheme`: a backward pass sampling scheme to use on the
backward pass of the algorithm. Defaults to `CompleteSampler`.
- `cut_type`: choose between `SDDP.SINGLE_CUT` and `SDDP.MULTI_CUT` versions of
SDDP.
- `dashboard::Bool`: open a visualization of the training over time. Defaults
to `false`.
- `parallel_scheme::AbstractParallelScheme`: specify a scheme for solving in
parallel. Defaults to `Threaded()`.
- `forward_pass::AbstractForwardPass`: specify a scheme to use for the forward
passes.
- `forward_pass_resampling_probability::Union{Nothing,Float64}`: set to a value
in `(0, 1)` to enable [`RiskAdjustedForwardPass`](@ref). Defaults to
`nothing` (disabled).
- `add_to_existing_cuts::Bool`: set to `true` to allow training a model that
was previously trained. Defaults to `false`.
- `duality_handler::AbstractDualityHandler`: specify a duality handler to use
when creating cuts.
- `post_iteration_callback::Function`: a callback with the signature
`post_iteration_callback(::IterationResult)` that is evaluated after each
iteration of the algorithm.
There is also a special option for infinite horizon problems
- `cycle_discretization_delta`: the maximum distance between states allowed on
the forward pass. This is for advanced users only and needs to be used in
conjunction with a different `sampling_scheme`.
"""
function train(
model::PolicyGraph;
iteration_limit::Union{Int,Nothing} = nothing,
time_limit::Union{Real,Nothing} = nothing,
print_level::Int = 1,
log_file::String = "SDDP.log",
log_frequency::Int = 1,
log_every_seconds::Float64 = log_frequency == 1 ? -1.0 : 0.0,
log_every_iteration::Bool = false,
run_numerical_stability_report::Bool = true,
stopping_rules = AbstractStoppingRule[],
risk_measure = SDDP.Expectation(),
sampling_scheme = SDDP.InSampleMonteCarlo(),
cut_type = SDDP.SINGLE_CUT,
cycle_discretization_delta::Float64 = 0.0,
refine_at_similar_nodes::Bool = true,
cut_deletion_minimum::Int = 1,
backward_sampling_scheme::AbstractBackwardSamplingScheme = SDDP.CompleteSampler(),
dashboard::Bool = false,
parallel_scheme::AbstractParallelScheme = Serial(),
forward_pass::AbstractForwardPass = DefaultForwardPass(),
forward_pass_resampling_probability::Union{Nothing,Float64} = nothing,
add_to_existing_cuts::Bool = false,
duality_handler::AbstractDualityHandler = SDDP.ContinuousConicDuality(),
forward_pass_callback::Function = (x) -> nothing,
post_iteration_callback = result -> nothing,
)
if any(node -> node.objective_state !== nothing, values(model.nodes))
# FIXME(odow): Threaded is broken for objective states
parallel_scheme = Serial()
end
if forward_pass isa AlternativeForwardPass ||
forward_pass isa RegularizedForwardPass
# FIXME(odow): Threaded is broken for these forward passes
parallel_scheme = Serial()
end
if log_frequency <= 0
msg = "`log_frequency` must be at least `1`. Got $log_frequency."
throw(ArgumentError(msg))
end
if log_every_iteration
log_frequency = 1
log_every_seconds = 0.0
end
function log_frequency_f(log::Vector{Log})
if mod(length(log), log_frequency) != 0
return false
end
last = options.last_log_iteration[]
if last == 0
return true
elseif last == length(log)
return false
end
seconds = log_every_seconds
if log_every_seconds < 0.0
if log[end].time <= 10
seconds = 1.0
elseif log[end].time <= 120
seconds = 5.0
else
seconds = 30.0
end
end
return log[end].time - log[last].time >= seconds
end
if !add_to_existing_cuts && model.most_recent_training_results !== nothing
@warn("""
Re-training a model with existing cuts!
Are you sure you want to do this? The output from this training may be
misleading because the policy is already partially trained.
If you meant to train a new policy with different settings, you must
build a new model.
If you meant to refine a previously trained policy, turn off this
warning by passing `add_to_existing_cuts = true` as a keyword argument
to `SDDP.train`.
In a future release, this warning may turn into an error.
""")
end
if forward_pass_resampling_probability !== nothing
forward_pass = RiskAdjustedForwardPass(;
forward_pass = forward_pass,
risk_measure = risk_measure,
resampling_probability = forward_pass_resampling_probability,
)
end
# Reset the TimerOutput.
TimerOutputs.reset_timer!(model.timer_output)
log_file_handle = open(log_file, "a")
log = Log[]
if print_level > 0
print_helper(print_banner, log_file_handle)
print_helper(
print_problem_statistics,
log_file_handle,
model,
model.most_recent_training_results !== nothing,
parallel_scheme,
risk_measure,
sampling_scheme,
)
end
if run_numerical_stability_report
report = sprint(
io -> numerical_stability_report(
io,
model;
print = print_level > 0,
),
)
print_helper(print, log_file_handle, report)
end
if print_level > 0
print_helper(print_iteration_header, log_file_handle)
end
# Convert the vector to an AbstractStoppingRule. Otherwise if the user gives
# something like stopping_rules = [SDDP.IterationLimit(100)], the vector
# will be concretely typed and we can't add a TimeLimit.
stopping_rules = convert(Vector{AbstractStoppingRule}, stopping_rules)
# Add the limits as stopping rules. An IterationLimit or TimeLimit may
# already exist in stopping_rules, but that doesn't matter.
if iteration_limit !== nothing
push!(stopping_rules, IterationLimit(iteration_limit))
end
if time_limit !== nothing
push!(stopping_rules, TimeLimit(time_limit))
end
# If no stopping rule exists, add the default rule.
if isempty(stopping_rules)
push!(stopping_rules, SimulationStoppingRule())
end
# Update the nodes with the selected cut type (SINGLE_CUT or MULTI_CUT)
# and the cut deletion minimum.
if cut_deletion_minimum < 0
cut_deletion_minimum = typemax(Int)
end
for (_, node) in model.nodes
node.bellman_function.cut_type = cut_type
node.bellman_function.global_theta.deletion_minimum =
cut_deletion_minimum
for oracle in node.bellman_function.local_thetas
oracle.deletion_minimum = cut_deletion_minimum
end
end
dashboard_callback = if dashboard
launch_dashboard()
else
(::Any, ::Any) -> nothing
end
options = Options(
model,
model.initial_root_state;
sampling_scheme,
backward_sampling_scheme,
risk_measures = risk_measure,
cycle_discretization_delta,
refine_at_similar_nodes,
stopping_rules,
dashboard_callback,
print_level,
start_time = time(),
log,
log_file_handle,
log_frequency = log_frequency_f,
forward_pass,
duality_handler,
forward_pass_callback,
post_iteration_callback,
)
status = :not_solved
try
status = master_loop(parallel_scheme, model, options)
catch ex
# Unwrap exceptions from tasks. If there are multiple exceptions,
# rethrow only the last one.
if ex isa CompositeException
ex = last(ex.exceptions)
end
if ex isa TaskFailedException
ex = ex.task.exception
end
if ex isa InterruptException
status = :interrupted
interrupt(parallel_scheme)
else
close(log_file_handle)
throw(ex)
end
finally
# And close the dashboard callback if necessary.
dashboard_callback(nothing, true)
end
training_results = TrainingResults(status, log)
model.most_recent_training_results = training_results
if print_level > 0
log_iteration(options; force_if_needed = true)
print_helper(print_footer, log_file_handle, training_results)
if print_level > 1
print_helper(
TimerOutputs.print_timer,
log_file_handle,
model.timer_output,
)
# Annoyingly, TimerOutputs doesn't end the print section with `\n`,
# so we do it here.
print_helper(println, log_file_handle)
end
end
close(log_file_handle)
return
end
# Internal function: helper to conduct a single simulation. Users should use the
# documented, user-facing function SDDP.simulate instead.
function _simulate(
model::PolicyGraph{T},
variables::Vector{Symbol};
sampling_scheme::AbstractSamplingScheme,
custom_recorders::Dict{Symbol,Function},
duality_handler::Union{Nothing,AbstractDualityHandler},
skip_undefined_variables::Bool,
incoming_state::Dict{Symbol,Float64},
) where {T}
# Sample a scenario path.
scenario_path, _ = sample_scenario(model, sampling_scheme)
# Storage for the simulation results.
simulation = Dict{Symbol,Any}[]
current_belief = initialize_belief(model)
# A cumulator for the stage-objectives.
cumulative_value = 0.0
# Objective state interpolation.
objective_state_vector, N =
initialize_objective_state(model[scenario_path[1][1]])
objective_states = NTuple{N,Float64}[]
for (depth, (node_index, noise)) in enumerate(scenario_path)
node = model[node_index]
lock(node.lock)
try
# Objective state interpolation.
objective_state_vector = update_objective_state(
node.objective_state,
objective_state_vector,
noise,
)
if objective_state_vector !== nothing
push!(objective_states, objective_state_vector)
end
if node.belief_state !== nothing
belief = node.belief_state::BeliefState{T}
partition_index = belief.partition_index
current_belief = belief.updater(
belief.belief,
current_belief,
partition_index,
noise,
)
else
current_belief = Dict(node_index => 1.0)
end
# Solve the subproblem.
subproblem_results = solve_subproblem(
model,
node,
incoming_state,
noise,
scenario_path[1:depth];
duality_handler = duality_handler,
)
# Add the stage-objective
cumulative_value += subproblem_results.stage_objective
# Record useful variables from the solve.
store = Dict{Symbol,Any}(
:node_index => node_index,
:noise_term => noise,
:stage_objective => subproblem_results.stage_objective,
:bellman_term =>
subproblem_results.objective -
subproblem_results.stage_objective,
:objective_state => objective_state_vector,
:belief => copy(current_belief),
)
if objective_state_vector !== nothing && N == 1
store[:objective_state] = store[:objective_state][1]
end
# Loop through the primal variable values that the user wants.
for variable in variables
if haskey(node.subproblem.obj_dict, variable)
# Note: we broadcast the call to value for variables which are
# containers (like Array, Containers.DenseAxisArray, etc). If
# the variable is a scalar (e.g. just a plain VariableRef), the
# broadcast preseves the scalar shape.
# TODO: what if the variable container is a dictionary? They
# should be using Containers.SparseAxisArray, but this might not
# always be the case...
store[variable] = JuMP.value.(node.subproblem[variable])
elseif skip_undefined_variables
store[variable] = NaN
else
error(
"No variable named $(variable) exists in the subproblem.",
" If you want to simulate the value of a variable, make ",
"sure it is defined in _all_ subproblems, or pass ",
"`skip_undefined_variables=true` to `simulate`.",
)
end
end
# Loop through any custom recorders that the user provided.
for (sym, recorder) in custom_recorders
store[sym] = recorder(node.subproblem)
end
# Add the store to our list.
push!(simulation, store)
# Set outgoing state as the incoming state for the next node.
incoming_state = copy(subproblem_results.state)
finally
unlock(node.lock)
end
end
return simulation
end
function _initial_state(model::PolicyGraph)
return Dict(String(k) => v for (k, v) in model.initial_root_state)
end
"""
simulate(
model::PolicyGraph,
number_replications::Int = 1,
variables::Vector{Symbol} = Symbol[];
sampling_scheme::AbstractSamplingScheme =
InSampleMonteCarlo(),
custom_recorders = Dict{Symbol, Function}(),
duality_handler::Union{Nothing,AbstractDualityHandler} = nothing,
skip_undefined_variables::Bool = false,
parallel_scheme::AbstractParallelScheme = Serial(),
incoming_state::Dict{String,Float64} = _initial_state(model),
)::Vector{Vector{Dict{Symbol,Any}}}
Perform a simulation of the policy model with `number_replications` replications.
## Return data structure
Returns a vector with one element for each replication. Each element is a vector
with one-element for each node in the scenario that was sampled. Each element in
that vector is a dictionary containing information about the subproblem that was
solved.
In that dictionary there are four special keys:
- `:node_index`, which records the index of the sampled node in the policy model
- `:noise_term`, which records the noise observed at the node
- `:stage_objective`, which records the stage-objective of the subproblem
- `:bellman_term`, which records the cost/value-to-go of the node.
The sum of `:stage_objective + :bellman_term` will equal the objective value of
the solved subproblem.
In addition to the special keys, the dictionary will contain the result of
`key => JuMP.value(subproblem[key])` for each `key` in `variables`. This is
useful to obtain the primal value of the state and control variables.
## Positonal arguments
- `model`: the model to simulate
- `number_replications::Int = 1`: the number of simulation replications to
conduct, that is, the length of the simulation vector that is returned by
this function. If omitted, this defaults to `1`.`
- `variables::Vector{Symbol} = Symbol[]`: a list of the variable names to
record the value of in each stage.
## Keyword arguments
- `sampling_scheme`: the sampling scheme used when simulating.
- `custom_recorders`: see `Custom recorders` section below.
- `duality_handler`: the [`SDDP.AbstractDualityHandler`](@ref) used to compute
dual variables. If you do not require dual variables (or if they are not
available), pass `duality_handler = nothing`.
- `skip_undefined_variables`: If you attempt to simulate the value of a
variable that is only defined in some of the stage problems, an error will be
thrown. To over-ride this (and return a `NaN` instead), pass
`skip_undefined_variables = true`.
- `parallel_scheme`: Use `parallel_scheme::[AbstractParallelScheme](@ref)` to
specify a scheme for simulating in parallel. Defaults to [`Serial`](@ref).
- `initial_state`: Use `incoming_state` to pass an initial value of the state
variable, if it differs from that at the root node. Each key should be the
string name of the state variable.
## Custom recorders
For more complicated data, the `custom_recorders` keyword argument can be used.
For example, to record the dual of a constraint named `my_constraint`, pass the
following:
```julia
simulation_results = SDDP.simulate(model, 2;
custom_recorders = Dict{Symbol, Function}(
:constraint_dual => sp -> JuMP.dual(sp[:my_constraint])
)
)
```
The value of the dual in the first stage of the second replication can be
accessed as:
```julia
simulation_results[2][1][:constraint_dual]
```
"""
function simulate(
model::PolicyGraph,
number_replications::Int = 1,
variables::Vector{Symbol} = Symbol[];
sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),
custom_recorders = Dict{Symbol,Function}(),
duality_handler::Union{Nothing,AbstractDualityHandler} = nothing,
skip_undefined_variables::Bool = false,
parallel_scheme::AbstractParallelScheme = Serial(),
incoming_state::Dict{String,Float64} = _initial_state(model),
)
return _simulate(
model,
parallel_scheme,
number_replications,
variables;
sampling_scheme = sampling_scheme,
custom_recorders = custom_recorders,
duality_handler = duality_handler,
skip_undefined_variables = skip_undefined_variables,
incoming_state = Dict(Symbol(k) => v for (k, v) in incoming_state),
)
end
"""
DecisionRule(model::PolicyGraph{T}; node::T)
Create a decision rule for node `node` in `model`.
## Example
```julia
rule = SDDP.DecisionRule(model; node = 1)
```
"""
struct DecisionRule{T}
model::PolicyGraph{T}
node::Node{T}
function DecisionRule(model::PolicyGraph{T}; node::T) where {T}
return new{T}(model, model[node])
end
end
function Base.show(io::IO, pi::DecisionRule)
print(io, "A decision rule for node $(pi.node.index)")
return
end
"""
evaluate(
rule::DecisionRule;
incoming_state::Dict{Symbol,Float64},
noise = nothing,
controls_to_record = Symbol[],
)
Evalute the decision rule `rule` at the point described by the `incoming_state`
and `noise`.
If the node is deterministic, omit the `noise` argument.
Pass a list of symbols to `controls_to_record` to save the optimal primal
solution corresponding to the names registered in the model.
"""
function evaluate(
rule::DecisionRule{T};
incoming_state::Dict{Symbol,Float64},
noise = nothing,
controls_to_record = Symbol[],
) where {T}
ret = solve_subproblem(
rule.model,
rule.node,
incoming_state,
noise,
Tuple{T,Any}[];
duality_handler = nothing,
)
return (
stage_objective = ret.stage_objective,
outgoing_state = ret.state,
controls = Dict(
c => value.(rule.node.subproblem[c]) for c in controls_to_record
),
)
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2212 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors, Lea Kapelevich.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
AlternativeForwardPass(
forward_model::SDDP.PolicyGraph{T};
forward_pass::AbstractForwardPass = DefaultForwardPass(),
)
A forward pass that simulates using `forward_model`, which may be different to
the model used in the backwards pass.
When using this forward pass, you should almost always pass
[`SDDP.AlternativePostIterationCallback`](@ref) to the `post_iteration_callback`
argument of [`SDDP.train`](@ref).
This forward pass is most useful when the `forward_model` is non-convex and we
use a convex approximation of the model in the backward pass.
For example, in optimal power flow models, we can use an AC-OPF formulation as
the `forward_model` and a DC-OPF formulation as the backward model.
For more details see the paper:
Rosemberg, A., and Street, A., and Garcia, J.D., and Valladão, D.M., and Silva,
T., and Dowson, O. (2021). Assessing the cost of network simplifications in
long-term hydrothermal dispatch planning models. IEEE Transactions on
Sustainable Energy. 13(1), 196-206.
"""
struct AlternativeForwardPass{T} <: AbstractForwardPass
model::PolicyGraph{T}
forward_pass::AbstractForwardPass
function AlternativeForwardPass(
model::PolicyGraph{T};
forward_pass::AbstractForwardPass = DefaultForwardPass(),
) where {T}
return new{T}(model, forward_pass)
end
end
function forward_pass(
::PolicyGraph{T},
options::Options,
pass::AlternativeForwardPass{T},
) where {T}
return forward_pass(pass.model, options, pass.forward_pass)
end
"""
AlternativePostIterationCallback(forward_model::PolicyGraph)
A post-iteration callback that should be used whenever [`SDDP.AlternativeForwardPass`](@ref)
is used.
"""
struct AlternativePostIterationCallback{T}
model::PolicyGraph{T}
end
function (callback::AlternativePostIterationCallback)(result::IterationResult)
slave_update(callback.model, result)
return
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2098 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors, Lea Kapelevich.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
_bitsrequired(x::T) where {T<:Integer} = floor(T, log(x) * inv(log(2))) + 1
"""
binexpand(x::Int, maximum::Int)
Returns a vector of binary coefficients for the binary expansion of `x`.
Length of the result is determined by the number of bits required to represent
`maximum` in binary.
"""
function binexpand(x::T, maximum::T) where {T<:Integer}
if x < 0
error(
"Cannot perform binary expansion on a negative number." *
"Initial values of state variables must be nonnegative.",
)
elseif maximum <= 0
error(
"Cannot perform binary expansion on zero-length " *
"vector. Upper bounds of state variables must be positive.",
)
end
y = zeros(T, _bitsrequired(maximum))
@inbounds for i in length(y):-1:1
k = 2^(i - 1)
if x >= k
y[i] = 1
x -= k
end
end
if x > 0
error("Unable to expand binary. Overflow of $x.")
end
return y
end
"""
binexpand(x::Float64, maximum::Float64, eps::Float64 = 0.1)
Returns a vector of binary coefficients for the binary expansion of `x`.
Length of the result is determined by the number of bits required to represent
`maximum` in binary to precision `eps`.
"""
function binexpand(x::Float64, maximum::Float64, eps::Float64 = 0.1)
@assert eps > 0
return binexpand(round(Int, x / eps), round(Int, maximum / eps))
end
"""
bincontract{T}(y::Vector{T})
For vector `y`, evaluates ∑ᵢ 2ⁱ⁻¹yᵢ.
"""
function bincontract(y::Vector{T}) where {T}
x = zero(T)
@inbounds for i in eachindex(y)
x += 2^(i - 1) * y[i]
end
return x
end
"""
bincontract(y::Vector, eps::Float64)
For vector `y`, evaluates ∑ᵢ 2ⁱ⁻¹yᵢ * `eps`.
"""
bincontract(y::Vector, eps::Float64) = bincontract(y) * eps
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 4297 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
set_biobjective_functions(subproblem, objective_1, objective_2)
Se the biobjective functions in `subproblem`.
This must be called from inside [`SDDP.parameterize`](@ref).
We recommend you define both objectives as JuMP expressions.
See also: [`initialize_biobjective_subproblem`](@ref).
!!! warning
This function is experimental! It may change in any future release.
"""
function set_biobjective_functions(subproblem, objective_1, objective_2)
λ = SDDP.objective_state(subproblem)
@stageobjective(subproblem, λ * objective_1 + (1 - λ) * objective_2)
return
end
"""
initialize_biobjective_subproblem(subproblem)
Run some initialization code to setup a biobjective problem.
This must be called outside [`SDDP.parameterize`](@ref).
!!! warning
This function is experimental! It may change in any future release.
"""
function initialize_biobjective_subproblem(subproblem)
SDDP.add_objective_state(
subproblem;
initial_value = 0.0,
lower_bound = 0.0,
upper_bound = 1.0,
lipschitz = 1e6,
) do y, _
return y
end
return
end
"""
set_trade_off_weight(model::SDDP.PolicyGraph, weight::Float64)
Set the trade-off weight of a bi-objective problem to `weight`.
!!! warning
This function is experimental! It may change in any future release.
"""
function set_trade_off_weight(model::SDDP.PolicyGraph, weight::Float64)
@assert 0 <= weight <= 1
for (_, node) in model.nodes
node.objective_state.initial_value = (weight,)
node.objective_state.state = (weight,)
end
return
end
"""
train_biobjective(
model::SDDP.PolicyGraph;
solution_limit::Int,
include_timing::Bool = false,
kwargs...,
)
Train a biobjective problem using a variation of the non-inferior set estimation
method.
## Arguments
* `solution_limit` is the maximum number of unique policies to return.
* `kwargs` are passed to [`SDDP.train`](@ref) when solving the scalarized
problems.
## Returns
Returns a dictionary mapping trade-off weights to their scalarized objective
value.
If `include_timing`, returns a dictionary mapping trade-off weights to a tuple
of the scalarized objective value and the solution time to date.
!!! warning
This function is experimental! It may change in any future release.
"""
function train_biobjective(
model::SDDP.PolicyGraph;
solution_limit::Int,
include_timing::Bool = false,
log_file_prefix::String = "SDDP",
stopping_rules::Function = weight -> SDDP.AbstractStoppingRule[],
kwargs...,
)
start_time = time()
solutions = if include_timing
Dict{Float64,Tuple{Float64,Float64}}()
else
Dict{Float64,Float64}()
end
value(bound) = include_timing ? (bound, time() - start_time) : bound
for weight in (0.0, 1.0)
set_trade_off_weight(model, weight)
SDDP.train(
model;
add_to_existing_cuts = true,
run_numerical_stability_report = false,
log_file = "$(log_file_prefix)_$(weight).log",
stopping_rules = stopping_rules(weight),
kwargs...,
)
solutions[weight] = value(SDDP.calculate_bound(model))
end
queue = Tuple{Float64,Float64}[(0.0, 1.0)]
while length(queue) > 0 && length(solutions) < solution_limit
(a, b) = popfirst!(queue)
w = 0.5 * (a + b)
set_trade_off_weight(model, w)
SDDP.train(
model;
add_to_existing_cuts = true,
run_numerical_stability_report = false,
log_file = "$(log_file_prefix)_$(w).log",
stopping_rules = stopping_rules(w),
kwargs...,
)
bound = SDDP.calculate_bound(model)
solutions[w] = value(bound)
best_bound = 0.5 * (solutions[a][1] + solutions[b][1])
if !isapprox(best_bound, bound; rtol = 1e-4)
push!(queue, (a, w))
push!(queue, (w, b))
end
end
return solutions
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 2585 | # This source code is licensed under the Creative Commons ShareAlike license 3.
# For more details, see:
# https://en.wikipedia.org/wiki/Wikipedia:Text_of_Creative_Commons_Attribution-ShareAlike_3.0_Unported_License
"""
is_cyclic(G::PolicyGraph{T}) where {T}
Return `true` or `false` if the graph `G` contains a cycle.
We implement Tarjan's strongly connected components algorithm to detect cycles
in a directed graph in O(|V| + |E|) time. See this Wiki for details
https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
The notation here follows the pseudocode in the Wikipedia article, rather than
the typical JuMP style guide.
Since we're only checking for cyclic graphs, we can stop as soon as one is
found. A cyclic graph has a stongly connected component with at least two
components, or it has a node with connects to itself. That means we don't need
to store the set of all strongly connected components.
"""
function is_cyclic(G::PolicyGraph{T}) where {T}
index_counter = 0
S = T[]
low_link = Dict{T,Int}()
index = Dict{T,Int}()
on_stack = Dict{T,Bool}()
function strong_connect(v)
index[v] = index_counter
low_link[v] = index_counter
index_counter += 1
push!(S, v)
on_stack[v] = true
for child in G[v].children
w = child.term
if v == w
# Cycle detected: Type I: a node that loops to itself.
return true
end
if !haskey(index, w)
if strong_connect(w)
# A cycle was detected further down the tree. Propogate it
# upwards.
return true
end
low_link[v] = min(low_link[v], low_link[w])
elseif on_stack[w]
low_link[v] = min(low_link[v], index[w])
end
end
if low_link[v] == index[v]
scc = T[]
w = G.root_node
while v != w
w = pop!(S)
on_stack[w] = false
push!(scc, w)
end
if length(scc) > 1
# Cycle detected: Type II: a strongly connected component with
# more than one element.
return true
end
end
return false # No cycle detected.
end
for v in keys(G.nodes)
if !haskey(index, v)
if strong_connect(v)
# Cycle detected!
return true
end
end
end
return false
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 7503 | # Copyright (c) 2017-24, Oscar Dowson and SDDP.jl contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
function throw_detequiv_error(msg::String)
return error("Unable to formulate deterministic equivalent: ", msg)
end
struct ScenarioTreeNode{T}
node::Node{T}
noise::Any
probability::Float64
children::Vector{ScenarioTreeNode{T}}
states::Dict{Symbol,State{JuMP.VariableRef}}
end
struct ScenarioTree{T}
children::Vector{ScenarioTreeNode{T}}
end
function add_node_to_scenario_tree(
parent::Vector{ScenarioTreeNode{T}},
pg::PolicyGraph{T},
node::Node{T},
probability::Float64,
check_time_limit::Function,
) where {T}
if node.objective_state !== nothing
throw_detequiv_error("Objective states detected!")
elseif node.belief_state !== nothing
throw_detequiv_error("Belief states detected!")
elseif length(node.bellman_function.global_theta.cuts) > 0
throw_detequiv_error(
"Model has been used for training. Can only form deterministic " *
"equivalent on a fresh model.",
)
else
check_time_limit()
end
for noise in node.noise_terms
scenario_node = ScenarioTreeNode(
node,
noise.term,
probability * noise.probability,
ScenarioTreeNode{T}[],
Dict{Symbol,State{JuMP.VariableRef}}(),
)
for child in node.children
add_node_to_scenario_tree(
scenario_node.children,
pg,
pg[child.term],
probability * noise.probability * child.probability,
check_time_limit,
)
end
push!(parent, scenario_node)
end
return
end
function copy_and_replace_variables(
src::Vector,
map::Dict{JuMP.VariableRef,JuMP.VariableRef},
)
return copy_and_replace_variables.(src, Ref(map))
end
function copy_and_replace_variables(
src::Real,
::Dict{JuMP.VariableRef,JuMP.VariableRef},
)
return src
end
function copy_and_replace_variables(
src::JuMP.VariableRef,
src_to_dest_variable::Dict{JuMP.VariableRef,JuMP.VariableRef},
)
return src_to_dest_variable[src]
end
function copy_and_replace_variables(
src::JuMP.GenericAffExpr,
src_to_dest_variable::Dict{JuMP.VariableRef,JuMP.VariableRef},
)
return JuMP.GenericAffExpr(
src.constant,
Pair{VariableRef,Float64}[
src_to_dest_variable[key] => val for (key, val) in src.terms
],
)
end
function copy_and_replace_variables(
src::JuMP.GenericQuadExpr,
src_to_dest_variable::Dict{JuMP.VariableRef,JuMP.VariableRef},
)
return JuMP.GenericQuadExpr(
copy_and_replace_variables(src.aff, src_to_dest_variable),
Pair{UnorderedPair{VariableRef},Float64}[
UnorderedPair{VariableRef}(
src_to_dest_variable[pair.a],
src_to_dest_variable[pair.b],
) => coef for (pair, coef) in src.terms
],
)
end
function copy_and_replace_variables(
src::Any,
::Dict{JuMP.VariableRef,JuMP.VariableRef},
)
return throw_detequiv_error(
"`copy_and_replace_variables` is not implemented for functions like `$(src)`.",
)
end
function add_scenario_to_ef(
model::JuMP.Model,
child::ScenarioTreeNode,
check_time_limit::Function,
)
check_time_limit()
node = child.node
parameterize(node, child.noise)
# Add variables:
src_variables = JuMP.all_variables(node.subproblem)
x = @variable(model, [1:length(src_variables)])
var_src_to_dest = Dict{JuMP.VariableRef,JuMP.VariableRef}()
for (src, dest) in zip(src_variables, x)
var_src_to_dest[src] = dest
name = JuMP.name(src)
if !isempty(name)
# append node index to original variable name
JuMP.set_name(dest, string(name, "#", node.index))
else
# append node index to original variable index
var_name = string("_[", index(src).value, "]")
JuMP.set_name(dest, string(var_name, "#", node.index))
end
end
# Add constraints:
for (F, S) in JuMP.list_of_constraint_types(node.subproblem)
for con in JuMP.all_constraints(node.subproblem, F, S)
obj = JuMP.constraint_object(con)
new_func = copy_and_replace_variables(obj.func, var_src_to_dest)
@constraint(model, new_func in obj.set)
end
end
# Add objective:
current = JuMP.objective_function(model)
subproblem_objective =
copy_and_replace_variables(node.stage_objective, var_src_to_dest)
JuMP.set_objective_function(
model,
current + child.probability * subproblem_objective,
)
# Add state variables to child.states:
for (key, state) in node.states
child.states[key] =
State(var_src_to_dest[state.in], var_src_to_dest[state.out])
end
# Recurse down the tree.
for child_2 in child.children
add_scenario_to_ef(model, child_2, check_time_limit)
end
return
end
function add_linking_constraints(
model::JuMP.Model,
node::ScenarioTreeNode,
check_time_limit::Function,
)
check_time_limit()
for child in node.children
for key in keys(node.states)
@constraint(model, node.states[key].out == child.states[key].in)
end
add_linking_constraints(model, child, check_time_limit)
end
return
end
"""
deterministic_equivalent(
pg::PolicyGraph{T},
optimizer = nothing;
time_limit::Union{Real,Nothing} = 60.0,
)
Form a JuMP model that represents the deterministic equivalent of the problem.
## Examples
```julia
deterministic_equivalent(model)
```
```julia
deterministic_equivalent(model, HiGHS.Optimizer)
```
"""
function deterministic_equivalent(
pg::PolicyGraph{T},
optimizer = nothing;
time_limit::Union{Real,Nothing} = 60.0,
) where {T}
# Step 0: helper function for the time limit.
start_time = time()
time_limit = time_limit === nothing ? typemax(Float64) : Float64(time_limit)
function check_time_limit()
if time() - start_time > time_limit::Float64
throw_detequiv_error("Time limit exceeded!")
end
end
# Step 1: convert the policy graph into a scenario tree.
if is_cyclic(pg)
throw_detequiv_error("Cyclic policy graph detected!")
end
tree = ScenarioTree{T}(ScenarioTreeNode{T}[])
for child in pg.root_children
add_node_to_scenario_tree(
tree.children,
pg,
pg[child.term],
child.probability,
check_time_limit,
)
end
# Step 2: create a extensive-form JuMP model and add subproblems.
model = optimizer === nothing ? JuMP.Model() : JuMP.Model(optimizer)
set_objective_sense(model, pg.objective_sense)
for child in tree.children
add_scenario_to_ef(model, child, check_time_limit)
end
# Step 3: add linking constraints between the nodes in the scenario tree.
for child in tree.children
add_linking_constraints(model, child, check_time_limit)
for (key, value) in pg.initial_root_state
JuMP.fix(child.states[key].in, value; force = true)
end
end
# Return the model
return model
end
| SDDP | https://github.com/odow/SDDP.jl.git |
|
[
"MPL-2.0"
] | 1.8.1 | 8e9842a0dc6b76edadab3486b07be9ee16f01ea0 | code | 6444 | # The function `_lattice_approximation` is derived from a function of the same name in the
# `ScenTrees.jl` package by Kipngeno Kirui and released under the MIT license.
# The reproduced function, and other functions contained only in this file, are also
# released under MIT.
#
# Copyright (c) 2019 Kipngeno Kirui <[email protected]>
# Copyright (c) 2019 Oscar Dowson <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
function find_min(x::Vector{T}, y::T) where {T<:Real}
best_i = 0
best_z = Inf
for i in 1:length(x)
z = abs(x[i] - y)
if z < best_z
best_i = i
best_z = z
end
end
return best_z, best_i
end
function _lattice_approximation(
f::Function,
states::Vector{Int},
scenarios::Int,
)
return _lattice_approximation(
f,
states,
scenarios,
[f()::Vector{Float64} for _ in 1:scenarios],
)
end
function _quantiles(x, N)
if N == 1
return [Statistics.mean(x)]
end
return Statistics.quantile(x, range(0.01, 0.99; length = N))
end
function _lattice_approximation(
f::Function,
states::Vector{Int},
scenarios::Int,
simulations::Vector{Vector{Float64}},
)
simulation_matrix = reduce(hcat, simulations)
support = map(1:length(states)) do t
return _quantiles(@view(simulation_matrix[t, :]), states[t])
end
probability = [zeros(states[t-1], states[t]) for t in 2:length(states)]
prepend!(probability, Ref(zeros(1, states[1])))
distance = 0.0
for (n, path) in enumerate(simulations)
dist, last_index = 0.0, 1
for t in 1:length(states)
for i in 1:length(states[t])
if sum(@view probability[t][:, i]) < 1.3 * sqrt(n) / states[t]
support[t][i] = path[t]
end
end
min_dist, best_idx = find_min(support[t], path[t])
dist += min_dist^2
probability[t][last_index, best_idx] += 1.0
support[t][best_idx] -=
min_dist * (support[t][best_idx] - path[t]) / (3000 + n)^0.75
last_index = best_idx
end
distance = (distance * (n - 1) + dist) / n
end
for p in probability
p ./= sum(p; dims = 2)
if any(isnan, p)
p[vec(isnan.(sum(p; dims = 2))), :] .= 0.0
end
end
return support, probability
end
"""
_allocate_support_budget(f, budget, scenarios)
Allocate the `budget` nodes amongst the stages for a Markovian approximation.
By default, we distribute nodes based on the relative variance of the stages.
"""
function _allocate_support_budget(
f::Function,
budget::Int,
scenarios::Int,
)::Vector{Int}
return _allocate_support_budget(
[f()::Vector{Float64} for _ in 1:scenarios],
budget,
scenarios,
)
end
function _allocate_support_budget(
simulations::Vector{Vector{Float64}},
budget::Int,
scenarios::Int,
)::Vector{Int}
stage_var = Statistics.var(simulations)
states = ones(Int, length(stage_var))
if budget < length(stage_var)
@warn(
"Budget for nodes is less than the number of stages. Using one " *
"node per stage.",
)
return states
end
s = sum(stage_var)
if s ≈ 0.0
# If the sum of the variances is 0, then the simulator must be
# deterministic. Regardless of the budget, return a single Markov state
# for each stage.
return states
end
for i in 1:length(states)
states[i] = max(1, round(Int, budget * stage_var[i] / s))
end
while sum(states) != budget
if sum(states) > budget
states[argmax(states)] -= 1
else
states[argmin(states)] += 1
end
end
return states
end
_allocate_support_budget(::Any, budget::Vector{Int}, ::Int) = budget
"""
MarkovianGraph(
simulator::Function;
budget::Union{Int,Vector{Int}},
scenarios::Int = 1000,
)
Construct a Markovian graph by fitting Markov chain to scenarios generated by
`simulator()`.
`budget` is the total number of nodes in the resulting Markov chain. This can
either be specified as a single `Int`, in which case we will attempt to
intelligently distributed the nodes between stages. Alternatively, `budget` can
be a `Vector{Int}`, which details the number of Markov state to have in each
stage.
"""
function MarkovianGraph(
simulator::Function;
budget::Union{Int,Vector{Int}},
scenarios::Int = 1000,
)
scenarios = max(scenarios, 10)
simulations = [simulator()::Vector{Float64} for _ in 1:scenarios]
states = _allocate_support_budget(simulations, budget, scenarios)
support, probability =
_lattice_approximation(simulator, states, scenarios, simulations)
g = Graph((0, 0.0))
for (i, si) in enumerate(support[1])
_add_node_if_missing(g, (1, si))
_add_to_or_create_edge(g, (0, 0.0) => (1, si), probability[1][1, i])
end
for t in 2:length(support)
for (j, sj) in enumerate(support[t])
_add_node_if_missing(g, (t, sj))
for (i, si) in enumerate(support[t-1])
_add_to_or_create_edge(
g,
(t - 1, si) => (t, sj),
probability[t][i, j],
)
end
end
end
return g
end
| SDDP | https://github.com/odow/SDDP.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.