licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 3330 | """
$(@__MODULE__).bitsize(T::Type) -> Int
$(@__MODULE__).bitsize(x::T) where T -> Int
Return the size of the internal binary representation of `T` in bits.
For `Bool` the function returns `1`.
See also `Base.sizeof`.
"""
bitsize(::T) where T = bitsize(T)
bitsize(::Type{T}) where T = 8*sizeof(T)
bitsize(::Type{Bool}) = 1
"""
$(@__MODULE__).top_set_bit(x::AbstractBitInteger) -> Int
Return the position of the highest set bit in `x` (counting from `1`),
or return `0` if `x` is `0`.
This function is analogous to Julia's internal function `Base.top_set_bit`,
but it is also fast and correct for bit integers defined by `BitIntegers.jl`.
See also `Base.top_set_bit`, [`$(@__MODULE__).AbstractBitInteger`](@ref).
"""
top_set_bit(x::T) where T <: AbstractBitInteger = bitsize(T) - leading_zeros(x)
"""
$(@__MODULE__).unsafe_shl(x::U, i::Integer) where U <: AbstractBitInteger -> U
This is a fast, but unsafe version of the left bit shift operator `x << i`.
The shift `i` is assumed to be between `0` and `bitsize(x)-1`.
See also [`$(@__MODULE__).bitsize`](@ref), [`$(@__MODULE__).AbstractBitInteger`](@ref).
"""
@generated function unsafe_shl(x::U, i::Integer) where U <: AbstractBitInteger
b = bitsize(U)
ir = """
%r = shl i$b %0, %1
ret i$b %r
"""
quote
$(Expr(:meta, :inline))
Base.llvmcall($ir, U, Tuple{U, U}, x, i % U)
end
end
"""
$(@__MODULE__).unsafe_lshr(x::U, i::Integer) where U <: AbstractBitInteger -> U
This is a fast, but unsafe version of the logical (or unsigned) right bit shift operator `x >>> i`.
The shift `i` is assumed to be between `0` and `bitsize(x)-1`.
See also [`$(@__MODULE__).bitsize`](@ref), [`$(@__MODULE__).AbstractBitInteger`](@ref).
"""
@generated function unsafe_lshr(x::U, i::Integer) where U <: AbstractBitInteger
b = bitsize(U)
ir = """
%r = lshr i$b %0, %1
ret i$b %r
"""
quote
$(Expr(:meta, :inline))
Base.llvmcall($ir, U, Tuple{U, U}, x, i % U)
end
end
blsi(x::Integer) = x & -x
# extract lowest set bit, compiles to single blsi instruction
blsr(x::Integer) = x & (x-one(x))
# reset lowest set bit, compiles to single blsr instruction
blsmsk(x::Integer) = x ⊻ (x-one(x))
# get mask up to lowest set bit, compiles to single blsmsk instruction
"""
$(@__MODULE__).pdep(x::Unsigned, y::U) where U <: Unsigned -> U
Assume that `y` has exactly `m` `1`-bits. Then `pdep(x, y)` replaces these bits by the `m` lowest bits
of `x` (in order) and returns the result. The remaining bits of `x` are ignored.
On `x86_64` and `i686` machines, this function uses the corresponding instruction from the
[BMI2](https://en.wikipedia.org/wiki/X86_Bit_manipulation_instruction_set#BMI2) instruction set
if possible. Without hardware support it is much slower.
"""
function pdep(x::Unsigned, y::U) where U <: Unsigned
a = zero(U)
while !iszero(y)
b = blsi(y)
a |= b & -(isodd(x) % U)
y ⊻= b
x >>>= 1
end
a
end
using CpuId: cpufeature
if (Sys.ARCH == :x86_64 || Sys.ARCH == :i686) && cpufeature(:BMI2)
const llvm_pdep = "llvm.x86.bmi.pdep.$(bitsize(UInt))"
pdep(x::Unsigned, y::U) where U <: Union{UInt8,UInt16,UInt32,UInt} =
ccall(llvm_pdep, llvmcall, UInt, (UInt, UInt), x % UInt, y % UInt) % U
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 19156 | #
# PackedVector
#
import Base: ==, getindex, setindex, length, size, empty, iterate, rest, split_rest,
iszero, zero, +, -, *, convert
export PackedVector, bits
"""
PackedVector{U<:Unsigned,M,T<:Union{Base.BitInteger,Bool}} <: AbstractSmallVector{T}
PackedVector{U,M,T}()
PackedVector{U,M,T}(iter)
PackedVector{U,M}(v::AbstractVector{T})
PackedVector{U,M}(t::Tuple)
PackedVector(v::SmallVector{M,T})
This type of immutable vector stores the elements in a common bit mask of type `U`
with `M` bits for each entry. The range of allowed values is `-2^(M-1):2^(M-1)-1`
if `T <: Signed`, `0:2^M-1` if `T <: Unsigned` and `false:true` if `T == Bool`.
Apart from that, the official element type `T` is only used when retrieving an entry.
The capacity, that is, the number of elements that can be stored, is given
by `bitsize(U)÷M`.
The element type `T` can be omitted when creating the `PackedVector` from an `AbstractVector`
or from a tuple. In the latter case, `T` is determined by promoting the element types of the tuple.
If no argument is given, then an empty vector is returned.
If the `PackedVector` is created from a `SmallVector` `v` and the parameters `U` and `M`
are omitted, then `M` is set to `bitsize(T)` and `U` is chosen such that the capacity
of the resulting vector is at least the capacity of `v`.
Overflow or underflow during addition or subtraction of vectors do not throw an error.
The same applies to multiplication by a scalar of type `T`. Scalar multiplication by
other types returns a `Vector`.
Compared to a `SmallVector`, a `PackedVector` may have faster insert and delete operations.
Arithmetic operations are usually slower unless `M` is the size of a hardware integer.
See also [`capacity`](@ref capacity(::Type{<:PackedVector})), [`$(@__MODULE__).bitsize`](@ref).
# Examples
```jldoctest
julia> v = PackedVector{UInt64,5,Int8}(-5:5:10)
4-element PackedVector{UInt64, 5, Int8}:
-5
0
5
10
julia> capacity(v)
12
julia> w = PackedVector{UInt64,5,Int8}([1, 2, 3, 4])
4-element PackedVector{UInt64, 5, Int8}:
1
2
3
4
julia> v+w
4-element PackedVector{UInt64, 5, Int8}:
-4
2
8
14
julia> Int8(2)*v
4-element PackedVector{UInt64, 5, Int8}:
-10
0
10
-12
```
"""
struct PackedVector{U<:Unsigned,M,T<:Union{BitInteger,Bool}} <: AbstractSmallVector{T}
m::U
n::Int
end
PackedVector{U,M,T}(v::PackedVector{U,M,T}) where {U,M,T} = v
@inline function PackedVector{U,M,T}(w::Union{AbstractVector,Tuple}) where {U,M,T}
v = PackedVector{U,M,T}()
@boundscheck if !isempty(w)
checklength(v, length(w))
x, y = extrema(w)
checkvalue(M, convert(T, x))
checkvalue(M, convert(T, y))
end
for x in Iterators.reverse(w)
@inbounds v = pushfirst(v, x)
end
v
end
@propagate_inbounds function PackedVector{U,M,T}(iter) where {U,M,T}
v = PackedVector{U,M,T}()
for x in iter
v = push(v, x)
end
v
end
PackedVector{U,M}(v::AbstractVector{T}) where {U,M,T} = PackedVector{U,M,T}(v)
function PackedVector{U,M}(v::V) where {U, M, V <: Tuple}
T = promote_type(fieldtypes(V)...)
PackedVector{U,M,T}(v)
end
@propagate_inbounds function PackedVector{U,M,S}(v::SmallVector{N,T}) where {U,M,S,N, T <: BitInteger}
if bitsize(T) == M && (T <: Signed) == (S <: Signed)
@boundscheck begin
c = capacity(PackedVector{U,M,S})
c >= N || length(v) <= c || error("vector cannot have more than $c elements")
end
PackedVector{U,M,S}(bits(v.b) % U, length(v))
else
invoke(PackedVector{U,M,S}, Tuple{AbstractVector{T}}, v)
end
end
function PackedVector(v::SmallVector{N,T}) where {N, T <: BitInteger}
m = bits(v.b)
M = bitsize(T)
U = typeof(m)
PackedVector{U,M,T}(v)
end
(::Type{V})() where V <: PackedVector = zeros(V, 0)
"""
bits(v::PackedVector{U}) where U -> U
Return the bit mask used internally to store the elements of the vector `v`.
"""
bits(v::PackedVector) = v.m
length(v::PackedVector) = v.n
size(v::PackedVector) = (length(v),)
capacity(::Type{<:PackedVector{U,M}}) where {U,M} = bitsize(U) ÷ M
==(v::PackedVector{U,M,T}, w::PackedVector{U,M,T}) where {U,M,T} =
v.n == w.n && v.m == w.m
empty(v::PackedVector{U,M,T}, ::Type{S} = T) where {U,M,T,S} = PackedVector{U,M,S}()
iszero(v::PackedVector) = iszero(v.m)
zero(v::V) where V <: PackedVector = zeros(V, length(v))
function zeros(::Type{V}, n::Integer) where {U, V <: PackedVector{U}}
n <= capacity(V) || error("vector cannot have more than $(capacity(V)) elements")
V(zero(U), n)
end
Base.@assume_effects :total all_ones(::Type{U}, M) where U =
foldl((m, i) -> m | unsafe_shl(one(U), i), 0:M:bitsize(U)-1; init = zero(U))
function ones(::Type{V}, n::Integer) where {U, M, V <: PackedVector{U,M}}
n <= capacity(V) || error("vector cannot have more than $(capacity(V)) elements")
mask = M*n == bitsize(U) ? ~zero(U) : unsafe_shl(one(U), M*n) - one(U)
# TODO: same test elsewhere!!! should be OK
m = all_ones(U, M) & mask
V(m, n)
end
function iterate(v::PackedVector, w = v)
if isempty(w)
nothing
else
w, x = popfirst(w)
x, w
end
end
rest(v::PackedVector, w = v) = w
if VERSION >= v"1.9"
@inline function split_rest(v::PackedVector, n::Int, w = v)
m = length(w)-n
@boundscheck (n >= 0 && m >= 0) || error("impossible number of elements requested")
@inbounds w[1:m], w[m+1:end]
end
end
@inline function checkvalue(M, x::T) where T
bitsize(T) == M && return
bitsize(T) < M && error("type $T has fewer than $M bits")
if T <: Signed
-one(T) << (M-1) <= x < one(T) << (M-1) || error("value $x out of range for $M bits signed integer")
else
x < one(T) << M || error("value $x out of range for $M bits unsigned integer")
end
nothing
end
function checklength(v::PackedVector, m = 1)
c = capacity(v)
length(v) <= c-m || error("vector cannot have more than $c elements")
nothing
end
# TODO: can we omit the conversion to U?
@inline maskvalue(::Type{U}, M, x::Union{Unsigned,Bool}) where U = x % U
@inline function maskvalue(::Type{U}, M, x::T) where {U, T <: Signed}
mask = one(T) << M - one(T)
unsigned(x & mask) % U
end
@inline function getindex(v::PackedVector{U,M,T}, i::Int) where {U,M,T}
@boundscheck checkbounds(v, i)
x = unsafe_lshr(v.m, M*(i-1)) % T
mask = one(T) << M - one(T)
signbit = one(T) << (M-1)
if T == Bool
x
elseif T <: Unsigned || iszero(x & signbit)
x & mask
else
x | ~mask
end
end
@inline function getindex(v::V, r::AbstractUnitRange{<:Integer}) where {U <: Unsigned, M, V <: PackedVector{U,M}}
@boundscheck checkbounds(v, r)
l = length(r)
l == capacity(v) && return v
mask = unsafe_shl(one(U), M*l) - one(U)
m = unsafe_lshr(v.m, M*(first(r)-1)) & mask
V(m, l)
end
@inline function getindex(v::V, ii::AbstractVector{<:Integer}) where V <: PackedVector
@boundscheck begin
c = capacity(v)
length(ii) <= c || error("vector cannot have more than $c elements")
checkbounds(v, ii)
end
@inbounds V(@inbounds(v[i]) for i in ii)
end
@inline function setindex(v::PackedVector{U,M,T}, x, i::Int) where {U,M,T}
x = convert(T, x)
@boundscheck begin
checkbounds(v, i)
checkvalue(M, x)
end
s = M*(i-1)
mask = one(U) << M - one(U)
y = maskvalue(U, M, x)
m = (v.m & ~unsafe_shl(mask, s)) | unsafe_shl(y, s)
PackedVector{U,M,T}(m, v.n)
end
@inline function insert(v::PackedVector{U,M,T}, i::Integer, x) where {U,M,T}
x = convert(T, x)
@boundscheck begin
isone(i) || checkbounds(v, i-1)
checklength(v)
checkvalue(M, x)
end
s = M*(i-1)
mask = unsafe_shl(one(U), s) - one(U)
m1 = v.m & mask
m2 = (v.m & ~mask) << M
y = maskvalue(U, M, x)
m = m1 | m2 | unsafe_shl(y, s)
PackedVector{U,M,T}(m, v.n+1)
end
@inline function deleteat(v::PackedVector{U,M,T}, i::Integer) where {U,M,T}
@boundscheck checkbounds(v, i)
s = M*(i-1)
mask = unsafe_shl(one(U), s) - one(U)
m1 = v.m >> M & ~mask
m2 = v.m & mask
PackedVector{U,M,T}(m1 | m2, v.n-1)
end
@propagate_inbounds popat(v::PackedVector, i::Integer) =
deleteat(v, i), @inbounds v[i]
@propagate_inbounds push(v::PackedVector, xs...) = append(v, xs)
# TODO: needed?
@inline function push(v::PackedVector{U,M,T}, x) where {U,M,T}
x = convert(T, x)
@boundscheck begin
checklength(v)
checkvalue(M, x)
end
s = M*v.n
y = maskvalue(U, M, x)
m = v.m | unsafe_shl(y, s)
PackedVector{U,M,T}(m, v.n+1)
end
@inline function pop(v::PackedVector{U,M,T}) where {U,M,T}
@boundscheck checkbounds(v, 1)
s = M*(v.n-1)
mask = unsafe_shl(one(U), s) - one(U)
m = v.m & mask
PackedVector{U,M,T}(m, v.n-1), @inbounds v[v.n]
end
pushfirst(v::PackedVector) = v
@inline function pushfirst(v::PackedVector{U,M,T}, x) where {U <: Unsigned, M, T <: Union{BitInteger,Bool}}
x = convert(T, x)
@boundscheck begin
checklength(v)
checkvalue(M, x)
end
y = maskvalue(U, M, x)
m = v.m << M | y
PackedVector{U,M,T}(m, v.n+1)
end
@propagate_inbounds pushfirst(v::PackedVector, xs...) = prepend(v, xs)
@inline function popfirst(v::PackedVector{U,M,T}) where {U,M,T}
@boundscheck checkbounds(v, 1)
PackedVector{U,M,T}(v.m >> M, v.n-1), @inbounds v[1]
end
append(v::PackedVector, ws...) = foldl(append, ws; init = v)
@propagate_inbounds append(v::V, w) where V <: PackedVector = append(v, V(w))
@inline function append(v::PackedVector{U,M,T}, w::PackedVector{W,M,T}) where {U <: Unsigned, M, T <: Union{BitInteger,Bool}, W}
isempty(w) && return v # otherwise we cannot use unsafe_shl
@boundscheck checklength(v, w.n)
m = v.m | unsafe_shl(w.m % U, M*v.n)
PackedVector{U,M,T}(m, v.n+w.n)
end
prepend(v::PackedVector, ws...) = foldr((w, v) -> prepend(v, w), ws; init = v)
@propagate_inbounds prepend(v::V, w) where V <: PackedVector = append(V(w), v)
@inline function duplicate(v::PackedVector{U,M,T}, i::Integer) where {U,M,T}
@boundscheck begin
checkbounds(v, i)
checklength(v)
end
mask = unsafe_shl(one(U), M*i) - one(U)
m1 = v.m & mask
m2 = (v.m & ~(mask >>> M)) << M
PackedVector{U,M,T}(m1 | m2, v.n+1)
end
@generated function bitcast_add(v::PackedVector{U,M,T}, w::PackedVector{U,M,T}) where {U,M,T}
b = bitsize(U)
n = b ÷ M
c = n * M
ir = c == b ? """
%a2 = bitcast i$c %0 to <$n x i$M>
%b2 = bitcast i$c %1 to <$n x i$M>
%c2 = add <$n x i$M> %a2, %b2
%c1 = bitcast <$n x i$M> %c2 to i$c
ret i$b %c1
""" : """
%a1 = trunc i$b %0 to i$c
%a2 = bitcast i$c %a1 to <$n x i$M>
%b1 = trunc i$b %1 to i$c
%b2 = bitcast i$c %b1 to <$n x i$M>
%c2 = add <$n x i$M> %a2, %b2
%c1 = bitcast <$n x i$M> %c2 to i$c
%c0 = zext i$c %c1 to i$b
ret i$b %c0
"""
quote
$(Expr(:meta, :inline))
m = Base.llvmcall($ir, U, Tuple{U, U}, v.m, w.m)
PackedVector{U,M,T}(m, v.n)
end
end
@generated function bitcast_sub(v::PackedVector{U,M,T}, w::PackedVector{U,M,T}) where {U,M,T}
b = bitsize(U)
n = b ÷ M
c = n * M
ir = c == b ? """
%a2 = bitcast i$c %0 to <$n x i$M>
%b2 = bitcast i$c %1 to <$n x i$M>
%c2 = sub <$n x i$M> %a2, %b2
%c1 = bitcast <$n x i$M> %c2 to i$c
ret i$b %c1
""" : """
%a1 = trunc i$b %0 to i$c
%a2 = bitcast i$c %a1 to <$n x i$M>
%b1 = trunc i$b %1 to i$c
%b2 = bitcast i$c %b1 to <$n x i$M>
%c2 = sub <$n x i$M> %a2, %b2
%c1 = bitcast <$n x i$M> %c2 to i$c
%c0 = zext i$c %c1 to i$b
ret i$b %c0
"""
quote
$(Expr(:meta, :inline))
m = Base.llvmcall($ir, U, Tuple{U, U}, v.m, w.m)
PackedVector{U,M,T}(m, v.n)
end
end
@generated function bitcast_mul(c::T, v::PackedVector{U,M,T}) where {U,M,T}
b = bitsize(U)
n = b ÷ M
(bitsize(T) == M && n*M == b) || error("not implemented")
ir = """
%a = bitcast i$b %1 to <$n x i$M>
%b1 = insertelement <$n x i$M> poison, i$M %0, i32 0
%b2 = shufflevector <$n x i$M> %b1, <$n x i$M> poison, <$n x i32> zeroinitializer
%c2 = mul <$n x i$M> %a, %b2
%c1 = bitcast <$n x i$M> %c2 to i$b
ret i$b %c1
"""
quote
$(Expr(:meta, :inline))
m = Base.llvmcall($ir, U, Tuple{T, U}, c, v.m)
PackedVector{U,M,T}(m, v.n)
end
end
+(v::PackedVector) = v
@inline function +(v::V, w::V) where {U, M, V <: PackedVector{U,M}}
@boundscheck length(v) == length(w) || error("vectors must have the same length")
M >= 8 && ispow2(M) && return bitcast_add(v, w)
mask = one(U) << M - one(U)
ones0 = all_ones(U, 2*M)
ones1 = ones0 << M
mask0 = mask*ones0
mask1 = mask*ones1
m0 = (v.m & mask0 + w.m & mask0) & mask0
m1 = (v.m & mask1 + w.m & mask1) & mask1
V(m0 | m1, v.n)
end
@inline function +(v::V, w::V) where {U, V <: PackedVector{U,1,<:BitInteger}}
@boundscheck length(v) == length(w) || error("vectors must have the same length")
V(v.m ⊻ w.m, v.n)
end
-(v::PackedVector) = @inbounds zero(v)-v
@inline function -(v::V, w::V) where {U, M, V <: PackedVector{U,M}}
@boundscheck length(v) == length(w) || error("vectors must have the same length")
M >= 8 && ispow2(M) && return bitcast_sub(v, w)
mask = one(U) << M - one(U)
ones0 = all_ones(U, 2*M)
ones1 = ones0 << M
mask0 = mask*ones0
mask1 = mask*ones1
wc = ~w.m
m0 = (v.m & mask0 + wc & mask0 + ones0) & mask0
m1 = (v.m & mask1 + wc & mask1 + ones1) & mask1
V(m0 | m1, v.n)
end
-(vs::V...) where {U, V <: PackedVector{U,1,<:BitInteger}} = +(vs...)
@inline function *(c::T, v::PackedVector{U,M,T}) where {U, M, T <: BitInteger}
@boundscheck checkvalue(M, c)
bitsize(T) == M && return bitcast_mul(c, v)
mask = one(U) << M - one(U)
ones0 = all_ones(U, 2*M)
ones1 = ones0 << M
mask0 = mask*ones0
mask1 = mask*ones1
cm = maskvalue(U, M, c)
m0 = (cm * (v.m & mask0)) & mask0
m1 = (cm * (v.m & mask1)) & mask1
PackedVector{U,M,T}(m0 | m1, v.n)
end
*(c::T, v::PackedVector{U,1,T}) where {U, T <: BitInteger} = isodd(c) ? v : zero(v)
*(v::PackedVector{U,M,T}, c::T) where {U, M, T <: BitInteger} = c*v
"""
$(@__MODULE__).unsafe_add(v::V, w::V) where V <: PackedVector -> V
Add `v` and `w` and return the result. It is not checked that `v` and `w` have the same length.
No overflow or underflow is allowed in any component, nor are sign changes in the case of signed integers.
This function is much faster than regular addition.
See also [`unsafe_sub`](@ref).
"""
unsafe_add(v::V, w::V) where V <: PackedVector = V(v.m+w.m, v.n)
"""
$(@__MODULE__).unsafe_sub(v::V, w::V) where V <: PackedVector -> V
Subtract `w` from `v` and return the result. It is not checked that `v` and `w` have the same length.
No overflow or underflow is allowed in any component, nor are sign changes in the case of signed integers.
This function is much faster than regular addition.
See also [`unsafe_add`](@ref).
"""
unsafe_sub(v::V, w::V) where V <: PackedVector = V(v.m-w.m, v.n)
@generated function sum_split(v::PackedVector{U,M,T}) where {U,M,T}
@assert M > 1
c = capacity(v)
l = top_set_bit(c-1)
quote
m = v.m
Base.Cartesian.@nexprs $l i -> begin
n = M*2^(i-1)
ones2 = all_ones(U, 2*n)
mask = one(U) << n - one(U)
mask2 = mask * ones2
m1 = m & mask2
if T <: Signed && isodd($c >> (i-1))
m2 = unsigned((signed(m) >> n)) & mask2
else
m2 = (m >> n) & mask2
end
if T <: Signed
signmask = ones2 << (M-1 + i-1)
m1 |= (m1 & signmask) << 1
m2 |= (m2 & signmask) << 1
end
m = m1 + m2
if T <: Signed
m &= ~(signmask << 2)
end
end
if bitsize(T) > bitsize(Int)
TT = T
elseif T <: Signed
TT = Int
else
TT = UInt
end
if T <: Unsigned
m % TT
else
k = bitsize(U) - (M+$l)
(signed(m << k) >> k) % TT
end
end
end
function sum_count(v::PackedVector{U,M,T}) where {U,M,T}
o = all_ones(U, M)
t = ntuple(Val(M)) do i
c = count_ones(v.m & (o << (i-1))) << (i-1)
T <: Signed && i == M ? -c : c
end
s = sum(t)
if bitsize(T) > bitsize(Int)
s % T
elseif T <: Signed
s
else
unsigned(s)
end
end
@generated inttype(::Val{M}) where M = Symbol(:Int, M)
@generated uinttype(::Val{M}) where M = Symbol(:UInt, M)
function sum(v::PackedVector{U,M,T}) where {U,M,T}
if M >= 8 && M <= 64 && ispow2(M)
S = T <: Signed ? inttype(Val(M)) : uinttype(Val(M))
w = PackedVector{U,M,S}(v.m, v.n)
s = sum(SmallVector(w))
bitsize(T) <= bitsize(s) ? s : s % T
else
log2u = top_set_bit(bitsize(U))-1
if M <= log2u + (T <: Signed ? 1 : -2)
sum_count(v)
else
sum_split(v)
end
end
end
function maximum(v::PackedVector{U,M,T}) where {U,M,T}
if M >= 8 && ispow2(M)
S = T <: Signed ? inttype(Val(M)) : uinttype(Val(M))
w = PackedVector{U,M,S}(v.m, v.n)
maximum(SmallVector(w)) % T
else
invoke(maximum, Tuple{AbstractVector{T}}, v)
end
end
function minimum(v::PackedVector{U,M,T}) where {U,M,T}
if M >= 8 && ispow2(M)
S = T <: Signed ? inttype(Val(M)) : uinttype(Val(M))
w = PackedVector{U,M,S}(v.m, v.n)
minimum(SmallVector(w)) % T
else
invoke(minimum, Tuple{AbstractVector{T}}, v)
end
end
function support(v::PackedVector{U,M}) where {U,M}
S = SmallBitSet{UInt}
capacity(v) <= capacity(S) || length(v) <= capacity(S) ||
error("$S can only contain integers between 1 and $(capacity(S))")
mask = one(U) << M - one(U)
m = zero(UInt)
b = one(m)
for i in 1:length(v)
if !iszero(v.m & mask)
m |= b
end
mask <<= M
b <<= 1
end
convert(S, m)
end
support(v::PackedVector{U,1}) where U = convert(SmallBitSet{UInt}, bits(v))
#
# conversion to SmallVector
#
@propagate_inbounds function SmallVector{N,T}(v::PackedVector{U,M,S}) where {N,T,U,M,S}
if bitsize(T) == M && (T <: Signed) == (S <: Signed)
@boundscheck if N < capacity(PackedVector{U,M,S})
length(v) <= N || error("vector cannot have more than $N elements")
end
b = _convert(Values{N,T}, v.m)
SmallVector{N,T}(b, length(v))
else
invoke(SmallVector{N,T}, Tuple{AbstractVector{S}}, v)
end
end
function SmallVector(v::PackedVector{U,M,T}) where {U,M,T}
N = capacity(v)
SmallVector{N,T}(v)
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 19793 | #
# small sets
#
export SmallBitSet, bits, delete, pop, push
using Base: hasfastin
import Base: show, ==, hash, copy, convert,
empty, isempty, in, first, last, iterate,
length, issubset, maximum, minimum, extrema,
union, intersect, setdiff, symdiff, filter
isinteger(x) = x isa Number && Base.isinteger(x)
"""
SmallBitSet{U<:Unsigned} <: AbstractSet{Int}
SmallBitSet{U}([iter])
SmallBitSet([iter])
`SmallBitSet{U}` is an immutable set that can hold integers between `1` and the bit length of `U`.
Called without an argument, it returns an empty set. If `U` is omitted, then `UInt` is taken.
All non-mutating functions for sets are supported. The non-mutating analogs
[`push`](@ref push(::SmallBitSet, ::Vararg{Any})), [`pop`](@ref pop(::SmallBitSet)) and
[`delete`](@ref) of the corresponding `!`-functions are also provided.
"""
struct SmallBitSet{U<:Unsigned} <: AbstractSet{Int}
mask::U
global _SmallBitSet(mask::U) where U = new{U}(mask)
end
function show(io::IO, s::SmallBitSet{U}) where U
print(io, "SmallBitSet")
get(io, :typeinfo, Any) == SmallBitSet{U} || print(io, '{', U, '}')
print(io, "([")
join(io, s, ", ")
print(io, "])")
end
==(s::SmallBitSet, t::SmallBitSet) = s.mask == t.mask
copy(s::SmallBitSet) = s
"""
bits(s::SmallBitSet{U}) where U -> U
Return the bit mask used internally to store the elements of the set `s`.
See also [`convert(::Type{SmallBitSet}, ::Integer)`](@ref).
"""
bits(s::SmallBitSet) = s.mask
"""
capacity(::Type{<:SmallBitSet}) -> Int
capacity(s::SmallBitSet) -> Int
Return the largest number that the given set or `SmallBitSet` type can store.
"""
capacity(::Type{<:SmallBitSet}),
capacity(::SmallBitSet)
capacity(::Type{SmallBitSet{U}}) where U = bitsize(U)
capacity(::Type{SmallBitSet}) = capacity(SmallBitSet{UInt})
"""
fasthash(s::SmallBitSet [, h0::UInt]) -> UInt
Return a hash for `s` that can be computed fast. This hash is consistent across
all `SmallBitSet`s, but it is not compatible with the `hash` used for sets.
See also `Base.hash`.
# Examples
```jldoctest
julia> s = SmallBitSet(1:3);
julia> fasthash(s)
0x828a4cc485149963
julia> fasthash(s) == hash(s)
false
julia> t = SmallBitSet{UInt16}(s);
julia> fasthash(s) == fasthash(t)
true
```
"""
fasthash(s::SmallBitSet, h0::UInt) = hash(bits(s), h0)
"""
convert(::Type{SmallBitSet{U}}, mask::Integer) where U -> SmallBitSet{U}
convert(::Type{SmallBitSet}, mask::Integer) -> SmallBitSet{UInt}
Convert a bit mask to a `SmallBitSet` of the given type. This is the inverse operation to `bits`.
See also [`bits`](@ref).
# Examples
```jldoctest
julia> s = SmallBitSet{UInt16}([1, 5, 6]);
julia> u = bits(s)
0x0031
julia> convert(SmallBitSet, u)
SmallBitSet{UInt64} with 3 elements:
1
5
6
```
"""
convert(::Type{SmallBitSet{U}}, ::Integer) where U <: Unsigned,
convert(::Type{SmallBitSet}, ::Integer)
convert(::Type{SmallBitSet{U}}, mask::Integer) where U = _SmallBitSet(U(mask))
convert(::Type{SmallBitSet}, mask::Integer) = convert(SmallBitSet{UInt}, mask)
@propagate_inbounds function _push(mask::U, iter) where U
for n in iter
@boundscheck if !isinteger(n) || n <= 0 || n > bitsize(U)
error("SmallBitSet{$U} can only contain integers between 1 and $(bitsize(U))")
end
mask |= one(U) << (Int(n)-1)
end
_SmallBitSet(mask)
end
SmallBitSet(args...) = SmallBitSet{UInt}(args...)
SmallBitSet{U}(s::SmallBitSet) where U = _SmallBitSet(s.mask % U)
SmallBitSet{U}() where U = _SmallBitSet(zero(U))
@propagate_inbounds SmallBitSet{U}(iter) where U = _push(zero(U), iter)
function SmallBitSet{U}(r::AbstractUnitRange{<:Integer}) where U
r0, r1 = first(r), last(r)
if r0 <= 0 || r1 > bitsize(U)
error("SmallBitSet{$U} can only contain integers between 1 and $(bitsize(U))")
end
if r1 < r0
_SmallBitSet(zero(U))
else
m = one(U) << (r1-r0+1) - one(U)
_SmallBitSet(m << (r0-1))
end
end
isempty(s::SmallBitSet) = iszero(bits(s))
"""
empty(s::S) where S <: SmallBitSet -> S
Return an empty `SmallBitSet` of the same type as `s`.
"""
empty(s::SmallBitSet)
empty(s::S) where S <: SmallBitSet = S()
default(::Type{S}) where S <: SmallBitSet = S()
length(s::SmallBitSet) = count_ones(bits(s))
# from https://discourse.julialang.org/t/faster-way-to-find-all-bit-arrays-of-weight-n/113658/12
iterate(s::SmallBitSet, m = bits(s)) =
iszero(m) ? nothing : (trailing_zeros(m)+1, blsr(m))
@inline function first(s::SmallBitSet)
@boundscheck isempty(s) && error("collection must be non-empty")
trailing_zeros(bits(s))+1
end
@inline function last(s::SmallBitSet)
@boundscheck isempty(s) && error("collection must be non-empty")
top_set_bit(bits(s))
end
function minimum(s::SmallBitSet; init = missing)
if !isempty(s)
@inbounds first(s)
elseif init !== missing
init
else
error("collection must be non-empty unless `init` is given")
end
end
function maximum(s::SmallBitSet; init = missing)
if !isempty(s)
@inbounds last(s)
elseif init !== missing
init
else
error("collection must be non-empty unless `init` is given")
end
end
extrema(v::SmallBitSet; init::Tuple{Any,Any} = (missing, missing)) =
(minimum(v; init = init[1]), maximum(v; init = init[2]))
# hasfastin(::Type{<:SmallBitSet}) = true
# this is the default for AbstractSet
function in(n, s::SmallBitSet{U}) where U <: Unsigned
if isinteger(n)
n = Int(n)
!iszero(s.mask & one(U) << (n-1))
else
false
end
end
issubset(s::SmallBitSet, t::SmallBitSet) = isempty(setdiff(s, t))
"""
push(s::S, xs...) where S <: SmallBitSet -> S
Return the `SmallBitSet` obtained from `s` by adding the other arguments `xs`.
See also `Base.push!`, `BangBang.push!!`.
"""
@propagate_inbounds push(s::SmallBitSet, ns...) = _push(s.mask, ns)
"""
pop(s::S) where S <: SmallBitSet -> Tuple{S, Int}
Return the pair `(t, x)` where `x` is the smallest element from `s` and
`t` is the set `s` with `x` deleted. The set `s` must be non-empty.
See also `Base.pop!`, `BangBang.pop!!`.
"""
@inline function pop(s::SmallBitSet)
@boundscheck isempty(s) && error("collection must be non-empty")
n = last(s)
delete(s, n), n
end
"""
pop(s::S, x) where S <: SmallBitSet -> Tuple{S, Int}
Return the pair `(t, x)` where `t` is the set `s` with `x` deleted.
The set `s` must be non-empty.
See also `Base.pop!`, `BangBang.pop!!`.
"""
@inline function pop(s::SmallBitSet, n)
@boundscheck n in s || error("set does not contain the element")
delete(s, n), n
end
"""
pop(s::S, x, default::T) where S <: SmallBitSet -> Tuple{S, Union{Int,T}}
If `s` contains `x`, return the pair `(t, x)` where `t` is the set `s` with `x` deleted.
Otherwise return `(s, default)`
See also `Base.pop!`, `BangBang.pop!!`.
"""
function pop(s::SmallBitSet, n, default)
n in s ? (delete(s, n), Int(n)) : (s, default)
end
"""
delete(s::S, x) where S <: SmallBitSet -> S
If `s` contains `x`, return the set obtained by deleting that element.
Otherwise return `s`.
See also `Base.delete!`, `BangBang.delete!!`.
"""
function delete(s::SmallBitSet{U}, n) where U
if isinteger(n)
m = one(U) << (Int(n)-1)
_SmallBitSet(s.mask & ~m)
else
s
end
end
function filter(f::F, s::SmallBitSet) where F
m = bits(s)
q = zero(m)
while !iszero(m)
p = blsr(m)
n = trailing_zeros(m)+1
if f(n)
q |= m ⊻ p
end
m = p
end
_SmallBitSet(q)
end
union(s::SmallBitSet, t::SmallBitSet) = _SmallBitSet(s.mask | t.mask)
union(s::SmallBitSet, ts::SmallBitSet...) = foldl(union, ts; init = s)
intersect(s::SmallBitSet{U}, t::SmallBitSet) where U <: Unsigned = _SmallBitSet(s.mask & (t.mask % U))
function intersect(s::SmallBitSet{U}, t) where U <: Unsigned
u = _SmallBitSet(zero(U))
for n in (hasfastin(t) ? s : t)
if n in (hasfastin(t) ? t : s)
@inbounds u = push(u, n)
end
end
u
end
intersect(s::SmallBitSet, ts...) = foldl(intersect, ts; init = s)
setdiff(s::SmallBitSet{U}, t::SmallBitSet) where U <: Unsigned = _SmallBitSet(s.mask & ~(t.mask % U))
function setdiff(s::SmallBitSet, t)
if hasfastin(t)
u = s
for n in s
if n in t
u = delete(u, n)
end
end
return u
else
foldl(delete, t; init = s)
end
end
setdiff(s::SmallBitSet, ts...) = foldl(setdiff, ts; init = s)
symdiff(s::SmallBitSet, t::SmallBitSet) = _SmallBitSet(s.mask ⊻ t.mask)
symdiff(s::SmallBitSet, ts::SmallBitSet...) = foldl(symdiff, ts; init = s)
#
# subset iterators
#
export compositions, subsets, shuffles, shuffle_signbit
using Base: Generator
import Base: eltype, length, size, getindex
struct Shuffles{N,S}
set::S
ks::NTuple{N,Int}
end
"""
shuffles(s::S, ks::Vararg{Integer,N}) where {S <: SmallBitSet, N}
shuffles(ks::Vararg{Integer,N}) where N
In the first form, return an iterator that yields all `ks`-compositions of the set `s`
together with the sign of the permutation that puts the elements back into an increasing order.
See `compositions` and `shuffle_signbit` for details.
The iterator returns tuples `(t, s)`, where `t` is of type `NTuple{N, S}`
and the sign bit `s` is of type `Bool` where `false` means `+1` and `true` means `-1`.
The partition sizes in `ks` must be non-negative and add up to `length(s)`.
In the second form the set `s` is taken to be `SmallBitSet(1:sum(ks))`.
See also [`compositions`](@ref), [`shuffle_signbit`](@ref).
# Examples
```jldoctest
julia> collect(shuffles(SmallBitSet([2, 4, 5]), 1, 2))
3-element Vector{Tuple{Tuple{SmallBitSet{UInt64}, SmallBitSet{UInt64}}, Bool}}:
((SmallBitSet([2]), SmallBitSet([4, 5])), 0)
((SmallBitSet([4]), SmallBitSet([2, 5])), 1)
((SmallBitSet([5]), SmallBitSet([2, 4])), 0)
julia> all(s == shuffle_signbit(a, b) for ((a, b), s) in shuffles(1, 2))
true
```
"""
function shuffles(ks::Integer...)
any(signbit, ks) && error("part sizes must be non-negative")
sum(ks; init = 0) <= bitsize(UInt) || error("at most $(bitsize(UInt)) elements supported")
Shuffles(missing, ks)
end,
function shuffles(s::SmallBitSet, ks::Integer...)
sum(ks; init = 0) == length(s) || error("part lengths must add up to size of the set")
any(signbit, ks) && error("part sizes must be non-negative")
Shuffles(s, ks)
end
eltype(sh::Shuffles{N,Missing}) where N = Tuple{NTuple{N,SmallBitSet{UInt}}, Bool}
eltype(sh::Shuffles{N,S}) where {N, S <: SmallBitSet} = Tuple{NTuple{N,S}, Bool}
length(sh::Shuffles{0}) = 1
function length(sh::Shuffles{N}) where N
foldl(sh.ks[2:end]; init = (1, sh.ks[1])) do (p, k), l
p*binomial(k+l, k), k+l
end |> first
end
iterate(sh::Shuffles{0}) = ((), false), nothing
iterate(sh::Shuffles{0}, _) = nothing
iterate(sh::Shuffles{1}) = ((sh.set,), false), nothing
iterate(sh::Shuffles{1,Missing}) = ((SmallBitSet(1:sh.ks[1]),), false), nothing
iterate(sh::Shuffles{1}, _) = nothing
@inline iterate(sh::Shuffles{2}) = any(signbit, sh.ks) ? nothing : _iterate(sh)
@inline function _iterate(sh::Shuffles{2,S}; signint = UInt(0)) where S
k, l = sh.ks
U = S == Missing ? UInt : typeof(bits(sh.set))
mask = U(1) << k - U(1)
lastmask = mask << l
set = S == Missing ? SmallBitSet{U}(1:k+l) : sh.set # TODO: is SmallBitSet{U}(...) too slow?
part1 = _SmallBitSet(S == Missing ? mask : pdep(mask, bits(set)))
part2 = symdiff(part1, set)
signbit = isodd(signint)
state = (; mask, lastmask, signint, set, part2)
((part1, part2), signbit), (state,)
end
@inline function iterate(sh::Shuffles{2,S}, (state,)) where S
(; mask, lastmask, signint, set) = state
# see also https://graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation
# and https://discourse.julialang.org/t/faster-way-to-find-all-bit-arrays-of-weight-n/113658/12
mask == lastmask && return nothing
p = mask + blsi(mask)
t = trailing_zeros(mask)
q = unsafe_lshr(mask ⊻ p, t) >>> 2
# q = unsafe_lshr(blsmsk(p), t) >>> 2
# t+2 can be the bit size of mask, so we can't use unsafe_lshr with t+2
mask = p | q
signint ⊻= ~(t & count_ones(q))
part1 = _SmallBitSet(S == Missing ? mask : pdep(mask, bits(set)))
part2 = symdiff(part1, set)
signbit = isodd(signint)
state = (; mask, lastmask, signint, set, part2)
((part1, part2), signbit), (state,)
end
@inline iterate(sh::Shuffles) = _iterate(sh)
@inline function _iterate(sh::Shuffles{N}) where N
sh2 = Shuffles(sh.set, (sh.ks[1]+sh.ks[2], sh.ks[3:end]...))
((set1, _), _), states_rest = _iterate(sh2)
((part1, part2), _), (state1,) = _iterate(Shuffles(set1, sh.ks[1:2]))
states = (state1, states_rest...)
parts = (part1, map(state -> state.part2, states)...)
signbit = false
(parts, signbit), states
end
@inline function iterate(sh::Shuffles{N}, states) where N
ts1 = iterate(Shuffles(states[1].set, sh.ks[1:2]), (states[1],))
if ts1 === nothing
sh_rest = Shuffles(states[2].set, (sh.ks[1]+sh.ks[2], sh.ks[3:end]...))
ts_rest = iterate(sh_rest, states[2:end])
ts_rest === nothing && return nothing
((set1, _), _), states_rest = ts_rest
((part1, _), signbit), (state1,) = _iterate(Shuffles(set1, sh.ks[1:2]); signint = states_rest[1].signint)
states = (state1, states_rest...)
else
((part1, _), signbit), (state1,) = ts1
states = (state1, states[2:end]...)
end
parts = (part1, map(state -> state.part2, states)...)
(parts, signbit), states
end
"""
shuffle_signbit(ss::SmallBitSet...) -> Bool
Return `true` if an odd number of transpositions is needed to transform the elements of the
sets `ss` into an increasing sequence, and `false` otherwise. The sets are considered as
increasing sequences and assumed to be disjoint.
See also [`shuffles`](@ref).
# Examples
```
julia> s, t, u = SmallBitSet([2, 3, 8]), SmallBitSet([1, 4, 6]), SmallBitSet([5, 7]);
julia> shuffle_signbit(s, t), shuffle_signbit(s, t, u)
(true, false)
```
"""
shuffle_signbit(ss::Vararg{SmallBitSet,N}) where N =
shuffle_signbit(ss[N-1], ss[N]) ⊻ (@inline shuffle_signbit(ss[1:N-2]..., ss[N-1] ∪ ss[N]))
shuffle_signbit() = false
shuffle_signbit(::SmallBitSet) = false
function shuffle_signbit(s::SmallBitSet, t::SmallBitSet)
m = bits(s)
p = zero(m)
while !iszero(m)
# p ⊻= blsi(m)-one(m)
p ⊻= blsmsk(m)
m = blsr(m)
end
isodd(count_ones(p & bits(t)))
end
"""
compositions(s::S, ks::Vararg{Integer,N}) where {S <: SmallBitSet, N}
compositions(ks::Vararg{Integer,N}) where N
In the first form, return an iterator that yields all `ks`-compositions of the set `s`, that is,
all ordered partitions of `s` into `N` sets of size `ks[1]` to `ks[N]`, respectively. The element type
is `NTuple{N, S}`. The partition sizes in `ks` must be non-negative and add up to `length(s)`.
In the second form the set `s` is taken to be `SmallBitSet(1:sum(ks))`.
This gives an iterator over all set compositions of the integer `sum(ks)`.
See also [`subsets`](@ref subsets(::SmallBitSet, ::Integer)),
[`shuffles`](@ref shuffles(::Vararg{Integer,N}) where N).
# Examples
```jldoctest
julia> collect(compositions(SmallBitSet([2, 4, 5]), 1, 2))
3-element Vector{Tuple{SmallBitSet{UInt64}, SmallBitSet{UInt64}}}:
(SmallBitSet([2]), SmallBitSet([4, 5]))
(SmallBitSet([4]), SmallBitSet([2, 5]))
(SmallBitSet([5]), SmallBitSet([2, 4]))
julia> collect(compositions(1, 1, 1))
6-element Vector{Tuple{SmallBitSet{UInt64}, SmallBitSet{UInt64}, SmallBitSet{UInt64}}}:
(SmallBitSet([1]), SmallBitSet([2]), SmallBitSet([3]))
(SmallBitSet([2]), SmallBitSet([1]), SmallBitSet([3]))
(SmallBitSet([1]), SmallBitSet([3]), SmallBitSet([2]))
(SmallBitSet([3]), SmallBitSet([1]), SmallBitSet([2]))
(SmallBitSet([2]), SmallBitSet([3]), SmallBitSet([1]))
(SmallBitSet([3]), SmallBitSet([2]), SmallBitSet([1]))
julia> collect(compositions(SmallBitSet([2, 4, 5]), 1, 0, 2))
3-element Vector{Tuple{SmallBitSet{UInt64}, SmallBitSet{UInt64}, SmallBitSet{UInt64}}}:
(SmallBitSet([2]), SmallBitSet([]), SmallBitSet([4, 5]))
(SmallBitSet([4]), SmallBitSet([]), SmallBitSet([2, 5]))
(SmallBitSet([5]), SmallBitSet([]), SmallBitSet([2, 4]))
julia> collect(compositions(SmallBitSet()))
1-element Vector{Tuple{}}:
()
```
"""
compositions(args...) = Generator(first, shuffles(args...))
eltype(g::Generator{<:Shuffles, typeof(first)}) = fieldtype(eltype(g.iter), 1)
struct Subsets{T,S<:SmallBitSet} <: AbstractVector{S}
set::T
length::Int
end
"""
subsets(s::S) where S <: SmallBitSet -> AbstractVector{S}
subsets(n::Integer) -> AbstractVector{SmallBitSet{UInt}}
In the first form, return a vector of length `2^length(s)` whose elements are the subsets of the set `s`.
In the second form the set `s` is taken to be `SmallBitSet(1:n)`.
See also [`subsets(::Integer, ::Integer)`](@ref).
# Examples
```jldoctest
julia> collect(subsets(SmallBitSet{UInt8}([3, 5])))
4-element Vector{SmallBitSet{UInt8}}:
SmallBitSet([])
SmallBitSet([3])
SmallBitSet([5])
SmallBitSet([3, 5])
julia> collect(subsets(2))
4-element Vector{SmallBitSet{UInt64}}:
SmallBitSet([])
SmallBitSet([1])
SmallBitSet([2])
SmallBitSet([1, 2])
julia> subsets(2)[2]
SmallBitSet{UInt64} with 1 element:
1
```
"""
function subsets(n::T) where T <: Integer
n >= 0 || error("argument must be non-negative")
n <= bitsize(UInt)-2 || error("at most $(bitsize(UInt)-2) elements supported")
Subsets{T,SmallBitSet{UInt}}(n, n >= 0 ? unsafe_shl(1, n) : 0)
end,
function subsets(s::S) where {U <: Unsigned, S <: SmallBitSet{U}}
bitsize(U) < bitsize(UInt) || length(s) <= bitsize(UInt)-2 ||
error("at most $(bitsize(UInt)-2) elements supported")
Subsets{S,S}(s, unsafe_shl(1, length(s)))
end
show(io::IO, ss::Subsets) = print(io, "Subsets(", ss.set, ')')
show(io::IO, ::MIME"text/plain", ss::Subsets) = print(io, "Subsets(", ss.set, ')')
size(ss::Subsets) = (ss.length,)
@inline function getindex(ss::Subsets{<:Integer}, i::Int)
@boundscheck checkbounds(ss, i)
_SmallBitSet((i-1) % UInt)
end
@inline function getindex(ss::Subsets{<:SmallBitSet}, i::Int)
@boundscheck checkbounds(ss, i)
_SmallBitSet(pdep((i-1) % UInt, bits(ss.set)))
end
"""
subsets(s::SmallBitSet, k::Integer)
subsets(n::Integer, k::Integer)
In the first form, return an iterator that yields all `k`-element subsets of the set `s`.
The element type is the type of `s`.
If `k` is negative or larger than `length(s)`, then the iterator is empty.
In the second form the set `s` is taken to be `SmallBitSet(1:n)`.
See also [`subsets(::Integer)`](@ref), [`shuffles`](@ref shuffles(::Vararg{Integer,N}) where N).
# Example
```jldoctest
julia> collect(subsets(SmallBitSet{UInt8}(2:2:8), 3))
4-element Vector{SmallBitSet{UInt8}}:
SmallBitSet([2, 4, 6])
SmallBitSet([2, 4, 8])
SmallBitSet([2, 6, 8])
SmallBitSet([4, 6, 8])
julia> collect(subsets(3, 2))
3-element Vector{SmallBitSet{UInt64}}:
SmallBitSet([1, 2])
SmallBitSet([1, 3])
SmallBitSet([2, 3])
julia> collect(subsets(3, 4))
SmallBitSet{UInt64}[]
```
"""
function subsets(n::Integer, k::Integer)
n >= 0 || error("first argument must be non-negative")
n <= bitsize(UInt) || error("at most $(bitsize(U)) elements supported")
Generator(first∘first, Shuffles(missing, (k, n-k)))
end,
function subsets(s::SmallBitSet, k::Integer)
Generator(first∘first, Shuffles(s, (k, length(s)-k)))
end
eltype(::Generator{Shuffles{2,Missing}, typeof(first∘first)}) = SmallBitSet{UInt}
eltype(::Generator{Shuffles{2,S}, typeof(first∘first)}) where S <: SmallBitSet = S
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 17153 | #
# small vectors
#
export SmallVector, fasthash, sum_fast
import Base: ==, Tuple, empty,
length, size, getindex, setindex, rest, split_rest,
zero, map,
+, -, *, sum, prod, maximum, minimum, extrema
"""
SmallVector{N,T} <: AbstractSmallVector{T}
SmallVector{N,T}()
SmallVector{N,T}(iter)
SmallVector{N}(v::AbstractVector{T})
SmallVector{N}(t::Tuple)
SmallVector(v::PackedVector{T})
`SmallVector{N,T}` is an immutable vector type that can hold up to `N` elements of type `T`.
Here `N` can be any (small) positive integer. However, at least for bit integer
and hardware float types, one usually takes `N` to be a power of `2`.
The element type `T` can be omitted when creating the `SmallVector` from an `AbstractVector`
or from a tuple. In the latter case, `T` is determined by promoting the element types of the tuple.
If no argument is given, then an empty vector is returned.
If the `SmallVector` is created from a `PackedVector` `v` and the parameter `N` is omitted,
then it is set to capacity of `v`.
The unused elements of a `SmallVector{N,T}` are filled with the value `default(T)`, which is
predefined for several types including `Number`. Default values for other types must be defined
explicitly.
Addition and subtraction of two `SmallVector`s is possible even if the vectors have different
capacity. (Of course, their lengths must agree.) The capacity of the result is the smaller
of the arguments' capacities in this case.
See also [`capacity`](@ref), [`$(@__MODULE__).default`](@ref), `promote_type`.
# Examples
```jldoctest
julia> v = SmallVector{8,Int8}(2*x for x in 1:3)
3-element SmallVector{8, Int8}:
2
4
6
julia> w = SmallVector{9}((1, 2.5, 4))
3-element SmallVector{9, Float64}:
1.0
2.5
4.0
julia> v+w
3-element SmallVector{8, Float64}:
3.0
6.5
10.0
```
"""
struct SmallVector{N,T} <: AbstractSmallVector{T}
b::Values{N,T}
n::Int
end
capacity(::Type{<:SmallVector{N}}) where N = N
function Base.FastMath.eq_fast(v::SmallVector{N1,T1}, w::SmallVector{N2,T2}) where
{N1, T1<:Union{FastInteger,FastFloat}, N2, T2<:Union{FastInteger,FastFloat}}
length(v) == length(w) && iszero(padded_sub(v.b, w.b))
end
function ==(v::SmallVector{N1}, w::SmallVector{N2}) where {N1,N2}
N = min(N1, N2)
length(v) == length(w) && all(ntuple(i -> v.b[i] == w.b[i], Val(N)))
end
==(v::SmallVector{N1,T1}, w::SmallVector{N2,T2}) where {N1,T1<:FastInteger,N2,T2<:FastInteger} = @fastmath v == w
==(v::SmallVector{N1,T1}, w::SmallVector{N2,T2}) where {N1,T1<:FastInteger,N2,T2<:FastFloat} = @fastmath v == w
==(v::SmallVector{N1,T1}, w::SmallVector{N2,T2}) where {N1,T1<:FastFloat,N2,T2<:FastInteger} = @fastmath v == w
"""
fasthash(v::SmallVector [, h0::UInt]) -> UInt
Return a hash for `v` that may be computed faster than the standard `hash`
for vectors. This new hash is consistent across all `SmallVectors`s
of the same element type, but it may not be compatible with `hash` or
with `fasthash` for a `SmallVector` having a different element type.
Currently, `fasthash` differs from `hash` only if the element type of `v`
is a bit integer type with at most 32 bits, `Bool` or `Char`.
See also `Base.hash`.
# Examples
```jldoctest
julia> v = SmallVector{8,Int8}([1, 5, 6]);
julia> fasthash(v)
0x6466067ab41d0916
julia> fasthash(v) == hash(v)
false
julia> w = SmallVector{16,Int8}(v); fasthash(v) == fasthash(w)
true
julia> w = SmallVector{8,Int16}(v); fasthash(v) == fasthash(w)
false
```
"""
fasthash(v::SmallVector, h0::UInt)
function fasthash(v::SmallVector{N,T}, h0::UInt) where {N,T}
if (T <: BitInteger && bitsize(T) <= 32) || T == Bool || T == Char
Base.hash_integer(bits(v.b), hash(length(v), h0))
else
hash(v, h0)
end
end
convert(::Type{V}, v::AbstractVector) where {N, V <: SmallVector{N}} = V(v)
Tuple(v::SmallVector) = ntuple(i -> v[i], length(v))
# this seems to be fast for length(v) <= 10
length(v::SmallVector) = v.n
size(v::SmallVector) = (length(v),)
rest(v::SmallVector, (r, i) = (Base.OneTo(length(v)), 0)) = @inbounds v[i+1:last(r)]
if VERSION >= v"1.9"
@inline function split_rest(v::SmallVector, n::Int, (r, i) = (Base.OneTo(length(v)), 0))
m = length(r)-n
@boundscheck (n >= 0 && m >= i) || error("impossible number of elements requested")
@inbounds v[i+1:m], v[m+1:end]
end
end
@inline function getindex(v::SmallVector, i::Int)
@boundscheck checkbounds(v, i)
@inbounds v.b[i]
end
#=
@propagate_inbounds getindex(v::V, ii::AbstractVector{<:Integer}) where V <: SmallVector =
V(v[i] for i in ii)
=#
@inline function getindex(v::SmallVector{N,T}, ii::AbstractVector{<:Integer}) where {N,T}
n = length(ii)
@boundscheck begin
n <= N || error("vector cannot have more than $N elements")
checkbounds(v, ii)
end
t = ntuple(Val(N)) do i
@inbounds i <= n ? v[ii[i]] : default(T)
end
SmallVector(Values{N,T}(t), n)
end
@inline function setindex(v::SmallVector, x, i::Integer)
@boundscheck checkbounds(v, i)
SmallVector((@inbounds _setindex(v.b, x, i)), length(v))
end
@inline function addindex(v::SmallVector, x, i::Integer)
@boundscheck checkbounds(v, i)
@inbounds v += setindex(zero(v), x, i)
end
"""
empty(v::V) where V <: SmallVector -> V
empty(v::SmallVector{N}, U::Type) where {N,U} -> SmallVector{N,U}
Called with one argument, return an empty `SmallVector` of the same type as `v`.
Called with two arguments, return an empty `SmallVector` with the same capacity as `v`
and element type `U`.
"""
empty(v::SmallVector),
empty(v::SmallVector, ::Type)
empty(v::SmallVector{N,T}, ::Type{U} = T) where {N,T,U} = SmallVector{N,U}()
default(::Type{SmallVector{N,T}}) where {N,T} = SmallVector{N,T}()
zero(v::SmallVector) = SmallVector(zero(v.b), length(v))
function zeros(::Type{SmallVector{N,T}}, n::Integer) where {N,T}
n <= N || error("vector cannot have more than $N elements")
SmallVector(zero(Values{N,T}), n)
end
function ones(::Type{SmallVector{N,T}}, n::Integer) where {N,T}
n <= N || error("vector cannot have more than $N elements")
t = ntuple(Val(N)) do i
i <= n ? one(T) : zero(T)
end
SmallVector{N,T}(Values{N,T}(t), n)
end
SmallVector{N,T}() where {N,T} = SmallVector{N,T}(default(Values{N,T}), 0)
function SmallVector{N,T}(v::SmallVector{M}) where {N,T,M}
M <= N || length(v) <= N || error("vector cannot have more than $N elements")
t = ntuple(i -> i <= M ? convert(T, v.b[i]) : default(T), Val(N))
SmallVector{N,T}(t, length(v))
end
function SmallVector{N,T}(v::Union{AbstractVector,Tuple}) where {N,T}
n = length(v)
n <= N || error("vector cannot have more than $N elements")
i1 = firstindex(v)
t = ntuple(i -> i <= n ? convert(T, @inbounds(v[i+i1-1])) : default(T), Val(N))
SmallVector{N,T}(t, n)
end
function SmallVector{N,T}(g::Base.Generator{<:Union{AbstractVector,Tuple}}) where {N,T}
v = g.iter
n = length(v)
n <= N || error("vector cannot have more than $N elements")
i1 = firstindex(v)
t = ntuple(i -> i <= n ? convert(T, g.f(@inbounds(v[i+i1-1]))) : default(T), Val(N))
SmallVector{N,T}(t, n)
end
function SmallVector{N,T}(iter) where {N,T}
b = default(Values{N,T})
n = 0
for (i, x) in enumerate(iter)
(n = i) <= N || error("vector cannot have more than $N elements")
b = @inbounds _setindex(b, x, i)
end
SmallVector(b, n)
end
SmallVector{N}(v::AbstractVector{T}) where {N,T} = SmallVector{N,T}(v)
function SmallVector{N}(v::V) where {N, V <: Tuple}
T = promote_type(fieldtypes(V)...)
SmallVector{N,T}(v)
end
+(v::SmallVector) = v
@inline function +(v::SmallVector, w::SmallVector)
@boundscheck length(v) == length(w) || error("vectors must have the same length")
SmallVector(padded_add(v.b, w.b), length(v))
end
-(v::SmallVector) = SmallVector(-v.b, length(v))
@inline function -(v::SmallVector, w::SmallVector)
@boundscheck length(v) == length(w) || error("vectors must have the same length")
SmallVector(padded_sub(v.b, w.b), length(v))
end
Base.FastMath.mul_fast(c, v::SmallVector) = SmallVector(c*v.b, length(v))
*(c::Integer, v::SmallVector{N}) where N = @fastmath c*v
function *(c::Number, v::SmallVector{N}) where N
# multiplication by Inf and NaN does not preserve zero padding
c0 = zero(c)
n = length(v)
t = ntuple(i -> (i <= n ? c : c0) * v.b[i], Val(N))
SmallVector(Values{N}(t), n)
end
*(v::SmallVector, c::Number) = c*v
function sum(v::SmallVector{N,T}) where {N,T}
if T <: Base.BitSignedSmall
sum(Int, v.b)
elseif T <: Base.BitUnsignedSmall
sum(UInt, v.b)
elseif T <: Base.BitInteger
sum(v.b)
else
n = length(v)
n == 0 && return zero(T)
@inbounds s = v[1]
for i in 2:n
@inbounds s += v[i]
end
s
end
end
"""
sum_fast(v::SmallVector{N,T}) where {N,T}
Return the sum of the elements of `v` using `@fastmath` arithmetic
if `T` is `Float32` or `Float64`. Otherwise return `sum(v)`.
See also `Base.@fastmath`.
# Example
```jldoctest
julia> v = SmallVector{4}([-0.0, -0.0])
2-element SmallVector{4, Float64}:
-0.0
-0.0
julia> sum(v), sum_fast(v)
(-0.0, 0.0)
```
"""
sum_fast(v::SmallVector) = sum(v)
sum_fast(v::SmallVector{N,T}) where {N, T <: FastFloat} = @fastmath foldl(+, v.b)
function prod(v::SmallVector{N,T}) where {N,T}
if T <: Base.BitInteger
b = padtail(v.b, length(v), one(T))
if T <: Base.BitSignedSmall
prod(Int, b)
elseif T <: Base.BitUnsignedSmall
prod(UInt, b)
else
prod(b)
end
else
n = length(v)
n == 0 && return one(T)
@inbounds s = v[1]
for i in 2:n
@inbounds s *= v[i]
end
s
end
end
function maximum(v::SmallVector{N,T}; init = missing) where {N,T}
if isempty(v)
if init === missing
error("collection must be non-empty unless `init` is given")
else
return init
end
elseif T <: Unsigned && T <: Base.HWReal
maximum(v.b)
elseif T <: Integer && T <: Base.HWReal
@inbounds maximum(padtail(v.b, length(v), typemin(T)))
else
invoke(maximum, Tuple{AbstractVector}, v)
end
end
function minimum(v::SmallVector{N,T}; init = missing) where {N,T}
if isempty(v)
if init === missing
error("collection must be non-empty unless `init` is given")
else
return init
end
elseif T <: Integer && T <: Base.HWReal
@inbounds minimum(padtail(v.b, length(v), typemax(T)))
else
invoke(minimum, Tuple{AbstractVector}, v)
end
end
extrema(v::SmallVector; init::Tuple{Any,Any} = (missing, missing)) =
(minimum(v; init = init[1]), maximum(v; init = init[2]))
@propagate_inbounds push(v::SmallVector, xs...) = append(v, xs)
# TODO: needed?
@inline function push(v::SmallVector{N}, x) where N
n = length(v)
@boundscheck n < N || error("vector cannot have more than $N elements")
@inbounds SmallVector(_setindex(v.b, x, n+1), n+1)
end
@inline function pop(v::SmallVector{N,T}) where {N,T}
n = length(v)
@boundscheck iszero(n) && error("vector must not be empty")
@inbounds SmallVector(_setindex(v.b, default(T), n), n-1), v[n]
end
@inline function pushfirst(v::SmallVector{N}, xs...) where N
n = length(xs)+length(v)
@boundscheck n <= N || error("vector cannot have more $N elements")
SmallVector(pushfirst(v.b, xs...), n)
end
@inline function popfirst(v::SmallVector)
n = length(v)
@boundscheck iszero(n) && error("vector must not be empty")
c, x = popfirst(v.b)
SmallVector(c, n-1), x
end
@inline function insert(v::SmallVector{N}, i::Integer, x) where N
n = length(v)
@boundscheck begin
1 <= i <= n+1 || throw(BoundsError(v, i))
n < N || error("vector cannot have more than $N elements")
end
@inbounds SmallVector(insert(v.b, i, x), n+1)
end
@inline function duplicate(v::SmallVector{N,T}, i::Integer) where {N,T}
@boundscheck begin
checkbounds(v, i)
length(v) < N || error("vector cannot have more than $N elements")
end
t = ntuple(Val(N)) do j
j <= i ? v.b[j] : v.b[j-1]
end
SmallVector(Values{N,T}(t), length(v)+1)
end
@propagate_inbounds deleteat(v::SmallVector, i::Integer) = first(popat(v, i))
@inline function popat(v::SmallVector, i::Integer)
n = length(v)
@boundscheck checkbounds(v, i)
c, x = @inbounds popat(v.b, i)
SmallVector(c, n-1), x
end
@propagate_inbounds append(v::SmallVector, ws...) = foldl(append, ws; init = v)
@propagate_inbounds append(v::SmallVector, w) = foldl(push, w; init = v)
@inline function append(v::SmallVector{N,T}, w::Union{AbstractVector,Tuple}) where {N,T}
n = length(v)
m = n+length(w)
@boundscheck m <= N || error("vector cannot have more than $N elements")
t = ntuple(Val(N)) do i
@inbounds n < i <= m ? convert(T, w[i-n]) : v.b[i]
end
SmallVector{N,T}(Values{N,T}(t), m)
end
@propagate_inbounds function prepend(v::SmallVector, ws...)
foldr((w, v) -> prepend(v, w), ws; init = v)
end
@inline function prepend(v::SmallVector{N,T}, w::Union{AbstractVector,Tuple}) where {N,T}
n = length(v)
m = n+length(w)
@boundscheck m <= N || error("vector cannot have more than $N elements")
SmallVector{N,T}(prepend(v.b, w), m)
end
prepend(v::SmallVector{N,T}, w) where {N,T} = append(SmallVector{N,T}(w), v)
support(v::SmallVector) = convert(SmallBitSet{UInt}, bits(map(!iszero, v.b)))
"""
map(f, v::SmallVector...) -> SmallVector
Apply `f` to the argument vectors elementwise and stop when one of them is exhausted.
Note that the capacity of the resulting `SmallVector` is the minimum of the argument
vectors' capacities.
See also [`capacity`](@ref), `Base.map(f, v::AbstractVector...)`,
[Section "Broadcasting"](@ref sec-broadcasting).
# Examples
```jldoctest
julia> v = SmallVector{8}(1:3); w = SmallVector{4}(2.0:4.0); map(*, v, w)
3-element SmallVector{4, Float64}:
2.0
6.0
12.0
julia> v = SmallVector{8}('a':'e'); w = SmallVector{4}('x':'z'); map(*, v, w)
3-element SmallVector{4, String}:
"ax"
"by"
"cz"
```
"""
function map(f::F, vs::Vararg{SmallVector,M}) where {F,M}
n = minimum(length, vs)
_map(f, n, vs...)
end
function map_fast(f::F, n, vs::Vararg{SmallVector{N},M}) where {F,N,M}
bs = map(v -> v.b, vs)
SmallVector(map(f, bs...), n)
end
function map_fast_pad(f::F, n, vs::Vararg{SmallVector{N},M}) where {F,N,M}
bs = map(v -> v.b, vs)
b = map(f, bs...)
SmallVector(padtail(b, n), n)
end
#
# broadcast
#
using Base.Broadcast: AbstractArrayStyle, DefaultArrayStyle, Broadcasted, flatten
import Base.Broadcast: BroadcastStyle
"""
$(@__MODULE__).SmallVectorStyle <: Broadcast.AbstractArrayStyle{1}
The broadcasting style used for `SmallVector`.
See also [`SmallVector`](@ref), `Broadcast.AbstractArrayStyle`.
"""
struct SmallVectorStyle <: AbstractArrayStyle{1} end
BroadcastStyle(::Type{<:SmallVector}) = SmallVectorStyle()
BroadcastStyle(::SmallVectorStyle, ::DefaultArrayStyle{0}) = SmallVectorStyle()
BroadcastStyle(::SmallVectorStyle, ::DefaultArrayStyle{N}) where N = DefaultArrayStyle{N}()
function copy(bc::Broadcasted{SmallVectorStyle})
bcflat = flatten(bc)
i = findfirst(x -> x isa SmallVector, bcflat.args)
n = length(bcflat.args[i])
_map(bcflat.f, n, bcflat.args...)
end
_eltype(v::Union{AbstractVector,Tuple}) = eltype(v)
_eltype(x::T) where T = T
_capacity(v::SmallVector) = capacity(v)
_capacity(_) = typemax(Int)
_getindex(v::SmallVector, i) = @inbounds v.b[i]
_getindex(v::Tuple, i) = i <= length(v) ? @inbounds(v[i]) : default(v[1])
_getindex(x, i) = x
function _map(f::F, n, vs::Vararg{Any,M}) where {F,M}
N = minimum(_capacity, vs)
TT = map(_eltype, vs)
U = Core.Compiler.return_type(f, Tuple{TT...})
if isconcretetype(U)
tt = ntuple(Val(N)) do i
ntuple(j -> _getindex(vs[j], i), Val(M))
end
t = ntuple(Val(N)) do i
i <= n ? f(tt[i]...) : default(U)
end
SmallVector(Values{N,U}(t), n)
else
VT = map(vs) do v
T = typeof(v)
T <: SmallVector ? AbstractVector{eltype(T)} : T
end
w = invoke(map, Tuple{F,VT...}, f, vs...)
SmallVector{N}(w)
end
end
_map(f::Union{typeof.(
(&, round, floor, ceil, trunc, abs, abs2, sign, sqrt)
)...}, n, vs::SmallVector{N}...) where N = map_fast(f, n, vs...)
_map(::typeof(*), n, vs::SmallVector{N,<:Integer}...) where N = map_fast(*, n, vs...)
_map(::typeof(signbit), n, v::SmallVector{N,<:Integer}) where N = map_fast(signbit, n, v)
_map(f::Union{typeof.(
(+, -, *, ~, |, xor, nand, nor, ==, !=, <, >, <=, >=, ===, isequal, signbit)
)...}, n, vs::SmallVector{N}...) where N = map_fast_pad(f, n, vs...)
_map(::typeof(/), n,
v::SmallVector{N,<:Union{Integer,AbstractFloat}},
w::SmallVector{N,<:Union{Integer,AbstractFloat}}) where N =
map_fast_pad(/, n, v, w)
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 6328 | #
# extensions of StaticVectors.jl
#
using StaticVectors
using Base: BitInteger, @assume_effects
@inline function _setindex(v::Values{N,T}, x, i::Integer) where {N,T}
@boundscheck checkbounds(v, i)
t = ntuple(Val(N)) do j
ifelse(j == i, convert(T, x), v[j])
end
Values{N,T}(t)
end
function padtail(v::Values{N,T}, i::Integer, x = default(T)) where {N,T}
t = ntuple(Val(N)) do j
ifelse(j <= i, v[j], convert(T, x))
end
Values{N,T}(t)
end
pushfirst(v::Values, xs...) = prepend(v, xs)
function prepend(v::Values{N,T}, w::Union{AbstractVector,Tuple}) where {N,T}
n = length(w)
t = ntuple(Val(N)) do i
@inbounds i <= n ? convert(T, w[i]) : v[i-n]
end
Values{N,T}(t)
end
popfirst(v::Values) = popat(v, 1)
@inline function insert(v::Values{N,T}, i::Integer, x) where {N,T}
@boundscheck checkbounds(v, i)
v = Tuple(v)
t = ntuple(Val(N)) do j
if j < i
v[j]
elseif j == i
convert(T, x)
else
v[j-1]
end
end
Values{N,T}(t)
end
@propagate_inbounds deleteat(v::Values, i::Integer) = first(popat(v, i))
@inline function popat(v::Values{N,T}, i::Integer) where {N,T}
@boundscheck checkbounds(v, i)
t = ntuple(Val(N)) do j
if j < i
v[j]
elseif j < N
v[j+1]
else
default(T)
end
end
Values{N,T}(t), v[i]
end
"""
$(@__MODULE__).default(::Type{T}) where T -> T
$(@__MODULE__).default(::T) where T -> T
Return the default value of type `T` used for filling unused elements of a `SmallVector`.
This must be defined as `zero(T)` if `T` supports algebraic operations. Otherwise it can
be any value of type `T`.
This function has methods for number types, bits types (including `Char`, `SmallVector`
and `SmallBitSet` types), `String` and `Symbol`. Methods for other types must be defined
explicitly.
See also `Base.isbitstype`.
"""
default(::T) where T = default(T)
function default(::Type{T}) where T
if isbitstype(T)
default_bitstype(T)
elseif Int <: T
0
else
error("no default value defined for type $T")
end
end
Base.@assume_effects :total function default_bitstype(::Type{T}) where T
m8, m1 = divrem(Base.packedsize(T), 8)
t8 = ntuple(Returns(UInt64(0)), Val(m8))
t1 = ntuple(Returns(UInt8(0)), Val(m1))
reinterpret(T, (t8, t1))
end
default(::Type{T}) where T <: Number = zero(T)
default(::Type{Char}) = Char(0)
default(::Type{String}) = ""
default(::Type{Symbol}) = Symbol()
default(::Type{Values{N,T}}) where {N,T} = Values(ntuple(Returns(default(T)), Val(N)))
function padded_add(v::TupleVector{N1,T1}, w::TupleVector{N2,T2}) where {N1,T1,N2,T2}
T = promote_type(T1, T2)
N = min(N1, N2)
Values{N,T}(ntuple(i -> v[i]+w[i], Val(N)))
end
function padded_sub(v::TupleVector{N1,T1}, w::TupleVector{N2,T2}) where {N1,T1,N2,T2}
T = promote_type(T1, T2)
N = min(N1, N2)
Values{N,T}(ntuple(i -> v[i]-w[i], Val(N)))
end
#
# bit conversions
#
vec(t::NTuple{N}) where N = ntuple(i -> VecElement(t[i]), Val(N))
unvec(t::NTuple{N,VecElement}) where N = ntuple(i -> t[i].value, Val(N))
@generated function bits(v::TupleVector{N,T}) where {N, T <: Union{BitInteger,Char}}
s = bitsize(T)
b = N*s
c = nextpow(2, b)
U = Symbol(:UInt, c)
if b == c
ir = """
%b = bitcast <$N x i$s> %0 to i$b
ret i$b %b
"""
else
ir = """
%b = bitcast <$N x i$s> %0 to i$b
%c = zext i$b %b to i$c
ret i$c %c
"""
end
quote
$(Expr(:meta, :inline))
Base.llvmcall($ir, $U, Tuple{NTuple{N, VecElement{T}}}, vec(Tuple(v)))
end
end
@generated function bits(v::TupleVector{N,T}) where {N, T <: Union{Int128,UInt128}}
n = nextpow(2, N)
U = Symbol(:UInt, n*128)
z = ntuple(Returns(zero(T)), n-N)
quote
t = (v.v..., $z...)
reinterpret($U, t)
end
end
@generated function bits(v::TupleVector{N,Bool}) where N
c = max(nextpow(2, N), 8)
U = Symbol(:UInt, c)
if N == c
ir = """
%b = trunc <$N x i8> %0 to <$N x i1>
%c = bitcast <$N x i1> %b to i$N
ret i$N %c
"""
else
ir = """
%a = trunc <$N x i8> %0 to <$N x i1>
%b = bitcast <$N x i1> %a to i$N
%c = zext i$N %b to i$c
ret i$c %c
"""
end
quote
$(Expr(:meta, :inline))
Base.llvmcall($ir, $U, Tuple{NTuple{N, VecElement{Bool}}}, vec(Tuple(v)))
end
end
@generated function _convert(::Type{Values{N,T}}, x::U) where {N, T <: BitInteger, U <: Unsigned}
s = bitsize(T)
b = N*s
c = bitsize(U)
if b == c
ir = """
%b = bitcast i$b %0 to <$N x i$s>
ret <$N x i$s> %b
"""
elseif b > c
ir = """
%b = zext i$c %0 to i$b
%a = bitcast i$b %b to <$N x i$s>
ret <$N x i$s> %a
"""
else
ir = """
%b = trunc i$c %0 to i$b
%a = bitcast i$b %b to <$N x i$s>
ret <$N x i$s> %a
"""
end
quote
$(Expr(:meta, :inline))
v = Base.llvmcall($ir, NTuple{N, VecElement{T}}, Tuple{$U}, x)
Values{N,T}(unvec(v))
end
end
@generated function _convert(::Type{Values{N,Bool}}, x::U) where {N, U <: Unsigned}
c = bitsize(U)
N2 = nextpow(2, N) # work around an LLVM bug
if N2 == c
ir = """
%b = bitcast i$N2 %0 to <$N2 x i1>
%c = zext <$N2 x i1> %b to <$N2 x i8>
ret <$N2 x i8> %c
"""
elseif N2 > c
ir = """
%a = zext i$c %0 to i$N2
%b = bitcast i$N2 %a to <$N2 x i1>
%c = zext <$N2 x i1> %b to <$N2 x i8>
ret <$N2 x i8> %c
"""
else
ir = """
%a = trunc i$c %0 to i$N2
%b = bitcast i$N2 %a to <$N2 x i1>
%c = zext <$N2 x i1> %b to <$N2 x i8>
ret <$N2 x i8> %c
"""
end
quote
$(Expr(:meta, :inline))
v2 = Base.llvmcall($ir, NTuple{$N2, VecElement{Bool}}, Tuple{$U}, x)
v = ntuple(Base.Fix1(getindex, v2), Val(N))
Values{N,Bool}(unvec(v))
end
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 1910 | using BangBang
@testset "SmallBitSet !!" begin
s = SmallBitSet(1:3)
t = SmallBitSet(3:5)
x = 6
@test_inferred push!!(s, x) push(s, x)
@test_inferred pop!!(s) pop(s)
@test_inferred delete!!(s, x) delete(s, x)
@test_broken filter!!(isodd, s) == filter(isodd, s)
@test_inferred union!!(s, t) union(s, t)
@test_inferred intersect!!(s, t) intersect(s, t)
@test_inferred setdiff!!(s, t) setdiff(s, t)
@test_inferred symdiff!!(s, t) symdiff(s, t)
end
@testset "SmallVector !!" begin
v = SmallVector{8}(1:6)
w = SmallVector{8}(4:9)
x = -1
i = 5
@test_inferred setindex!!(v, x, i) setindex(v, x, i)
@test_inferred push!!(v, x) push(v, x)
@test_inferred pushfirst!!(v, x) pushfirst(v, x)
@test_inferred pop!!(v) pop(v)
@test_inferred popfirst!!(v) popfirst(v)
@test_broken insert!!(v, i, x) == insert(v, i, x)
@test_inferred deleteat!!(v, i) deleteat(v, i)
@test_inferred append!!(v, (x,)) append(v, (x,))
@test_broken prepend!!(v, (x,)) == prepend(v, (x,))
@test_broken filter!!(isodd, v) == filter(isodd, v)
@test_broken isdefined(BangBang, :map!!)
@test_inferred add!!(v, w) v+w
end
@testset "PackedVector !!" begin
v = PackedVector{UInt64,8,Int8}(1:6)
w = PackedVector{UInt64,8,Int8}(4:9)
x = -1
i = 5
@test_inferred setindex!!(v, x, i) setindex(v, x, i)
@test_inferred push!!(v, x) push(v, x)
@test_inferred pushfirst!!(v, x) pushfirst(v, x)
@test_inferred pop!!(v) pop(v)
@test_inferred popfirst!!(v) popfirst(v)
@test_broken insert!!(v, i, x) == insert(v, i, x)
@test_inferred deleteat!!(v, i) deleteat(v, i)
@test_inferred append!!(v, (x,)) append(v, (x,))
@test_broken prepend!!(v, (x,)) == prepend(v, (x,))
@test_broken filter!!(isodd, v) == filter(isodd, v)
@test_broken isdefined(BangBang, :map!!)
@test_inferred add!!(v, w) v+w
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 495 | using SmallCollections: bitsize, top_set_bit
BitIntegers.@define_integers 440
@testset "bitsize" begin
for T in (Int8, UInt16, Int32, UInt64, Int128, UInt256, Int440)
@test_inferred bitsize(T) 8*sizeof(T)
end
end
@testset "top_set_bit" begin
for T in (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, UInt512, UInt440)
@test_inferred top_set_bit(T(0)) 0
m = bitsize(T)
x = T(1) << (m-3) - T(3)
@test_inferred top_set_bit(x) m-3
end
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 12074 | using SmallCollections: bitsize
function checkvalue(::Type{Bool}, N, x::T) where T
@assert bitsize(T) >= N
if bitsize(T) == N
true
elseif T <: Signed
-one(T) << (N-1) <= x < one(T) << (N-1)
else
x < one(T) << N
end
end
function isvalid(v::PackedVector{U,N,T}) where {U,N,T}
n = length(v)
mask = one(U) << (n*N) - one(U)
iszero(v.m & ~mask)
end
function packed_rand(N, T)
if T <: Unsigned || T == Bool
rand(T(0):T(BigInt(2)^N-1))
else
rand(T(-BigInt(2)^(N-1)):T(BigInt(2)^(N-1)-1))
end
end
packed_rand(N, T, n) = T[packed_rand(N, T) for _ in 1:n]
@testset "PackedVector" begin
for T in (Bool, Int8, UInt16, Int64, UInt128), N in (1, 2, 5, 8, max(1, bitsize(T)÷2-1), bitsize(T)), U in (UInt8, UInt32, UInt64, UInt128)
bitsize(T) < N && continue
c = bitsize(U)÷N
c == 0 && continue
for m in (0, 1, round(Int, 0.7*c), c-1, c)
u = packed_rand(N, T, m)
v = @inferred PackedVector{U,N,T}(u)
@test v === @inferred copy(v)
@test_inferred capacity(v) c Int
@test_inferred v == u true
@test isvalid(v)
@test_inferred collect(v) u Vector{T}
v2 = PackedVector{U,N,T}(u)
@test_inferred v == v2 true
if (N+1)*m <= bitsize(U) && N+1 <= bitsize(T)
v3 = PackedVector{U,N+1,T}(u)
@test_inferred v == v3 true
end
if !isempty(u)
i = rand(1:m)
x = packed_rand(N, T)
while x == v[i]
x = packed_rand(N, T)
end
v5 = setindex(v, x, i)
@test_inferred v == v5 false
end
if m < c
v6 = PackedVector{U,N,T}(push!(copy(u), packed_rand(N, T)))
@test_inferred v == v6 false
end
if !isempty(u)
u7 = copy(u)
pop!(u7)
v7 = PackedVector{U,N,T}(u7)
@test_inferred v == v7 false
end
@test_inferred hash(v) hash(u) UInt
v8 = PackedVector{U,N,T}(u)
# @test_inferred fasthash(v) fasthash(v8) UInt
@test_inferred length(v) length(u) Int
@test_inferred PackedVector{U,N,T}() PackedVector{U,N,T}(())
@test_inferred empty(v) PackedVector{U,N,T}()
@test_inferred empty(v, Int) PackedVector{U,N,Int}()
end
end
end
@testset "PackedVector indices" begin
for T in (Bool, Int8, UInt16, Int64, UInt128), N in (1, 2, 5, 8, max(1, bitsize(T)÷2-1), bitsize(T)), U in (UInt8, UInt32, UInt64, UInt128)
bitsize(T) < N && continue
c = bitsize(U)÷N
c == 0 && continue
for m in (0, 1, round(Int, 0.7*c), c-1, c)
u = packed_rand(N, T, m)
v = @inferred PackedVector{U,N,T}(u)
if isempty(u)
@test_throws Exception first(v)
@test_throws Exception last(v)
else
@test_inferred first(v) first(u) T
@test_inferred last(v) last(u) T
end
x = packed_rand(N, T)
for i in -1:length(u)+1
if 1 <= i <= length(u)
@test_inferred v[i] u[i] T
w = @test_inferred setindex(v, x, i) setindex!(copy(u), x, i) v
@test isvalid(w)
else
@test_throws Exception v[i]
@test_throws Exception setindex(v, x, i)
end
end
for i in 0:m, j in i-1:m+1
if checkbounds(Bool, u, i:j)
w = @test_inferred v[i:j] u[i:j] v
@test isvalid(w)
else
@test_throws Exception v[i:j]
end
end
end
end
end
@testset "PackedVector zeros" begin
for T in (Bool, Int8, UInt16, Int64, UInt128), N in (1, 2, 5, 8, max(1, bitsize(T)÷2-1), bitsize(T)), U in (UInt8, UInt32, UInt64, UInt128)
bitsize(T) < N && continue
c = bitsize(U)÷N
c == 0 && continue
for m in (0, 1, round(Int, 0.7*c), c-1, c)
u = zeros(T, m)
v = PackedVector{U,N,T}(u)
@test_inferred iszero(v) true
w = @test_inferred zero(v) u v
@test isvalid(w)
w = @test_inferred zeros(PackedVector{U,N,T}, m) v
@test isvalid(w)
T <: Signed && N == 1 && continue
w = @test_inferred ones(PackedVector{U,N,T}, m) ones(T, m) PackedVector{U,N,T}
@test isvalid(w)
end
end
end
@testset "PackedVector push/pop" begin
for T in (Bool, Int8, UInt16, Int64, UInt128),
N in (1, 2, 5, 8, max(1, bitsize(T)÷2-1), bitsize(T)),
U in (UInt8, UInt32, UInt64, UInt128)
bitsize(T) < N && continue
c = bitsize(U)÷N
c == 0 && continue
for m in (0, 1, round(Int, 0.7*c), c-1, c)
u = packed_rand(N, T, m)
v = @inferred PackedVector{U,N,T}(u)
x = packed_rand(N, T)
y = packed_rand(N, T)
@test_inferred push(v) v
@test_inferred pushfirst(v) v
if length(u) == c
@test_throws Exception push(v, x)
@test_throws Exception push(v, x, y)
@test_throws Exception pushfirst(v, x)
@test_throws Exception pushfirst(v, x, y)
else
w = @test_inferred push(v, x) push!(copy(u), x) v
@test isvalid(w)
w = @test_inferred pushfirst(v, x) pushfirst!(copy(u), x) v
@test isvalid(w)
if length(u) <= c-2
w = @test_inferred push(v, x, y) push(push(v, x), y)
@test isvalid(w)
w = @test_inferred pushfirst(v, x, y) pushfirst(pushfirst(v, y), x)
@test isvalid(w)
end
end
if isempty(u)
@test_throws Exception pop(v)
@test_throws Exception popfirst(v)
else
w, _ = @test_inferred pop(v) (deleteat!(copy(u), length(u)), last(u)) (v, last(v))
@test isvalid(w)
w, _ = @test_inferred popfirst(v) (deleteat!(copy(u), 1), first(u)) (v, first(v))
@test isvalid(w)
end
for i in (-1, 0, 1, 2, length(u), length(u)+1)
if 1 <= i <= length(u)+1 && length(u) < c
w = @test_inferred insert(v, i, x) insert!(copy(u), i, x) v
@test isvalid(w)
if i <= length(u)
w = @test_inferred duplicate(v, i) insert(v, i, v[i])
@test isvalid(w)
else
@test_throws Exception duplicate(v, i)
end
else
@test_throws Exception insert(v, i, x)
@test_throws Exception duplicate(v, i)
end
if 1 <= i <= length(u)
w = @test_inferred deleteat(v, i) deleteat!(copy(u), i) v
@test isvalid(w)
else
@test_throws Exception deleteat(v, i)
end
end
@test_inferred append(v) v
@test_inferred prepend(v) v
xy = [x, y]
if length(u) <= c-2
w = @test_inferred append(v, PackedVector{U,N,T}(xy)) push(v, x, y)
@test isvalid(w)
w = @test_inferred append(v, xy[i] for i in 1:2) push(v, x, y)
@test isvalid(w)
w = @test_inferred append(v, (x,), [y]) push(v, x, y)
@test isvalid(w)
w = @test_inferred prepend(v, PackedVector{U,N,T}(xy)) pushfirst(v, x, y)
@test isvalid(w)
w = @test_inferred prepend(v, xy[i] for i in 1:2) pushfirst(v, x, y)
@test isvalid(w)
w = @test_inferred prepend(v, (x,), [y]) pushfirst(v, x, y)
@test isvalid(w)
else
@test_throws Exception append(v, PackedVector{U,N,T}(xy))
@test_throws Exception append(v, xy[i] for i in 1:2)
@test_throws Exception append(v, (x,), [y])
@test_throws Exception prepend(v, PackedVector{U,N,T}(xy))
@test_throws Exception prepend(v, xy[i] for i in 1:2)
@test_throws Exception prepend(v, (x,), [y])
end
if T <: Integer
@test_inferred filter(isodd, v) filter(isodd, u) v
end
end
end
end
function red_mod(N, v::AbstractVector{T}) where T <: Integer
k = bitsize(T)-N
map(x -> (x << k) >> k, v)
end
@testset "PackedVector add/mul" begin
for U in (UInt8, UInt16, UInt32, UInt64, UInt128),
T in (Int8, UInt8, Int16, UInt16, Int32, UInt32),
N in 1:bitsize(T)
c = bitsize(U)÷N
c == 0 && continue
for n in 0:c
u1 = packed_rand(N, T, n)
v1 = PackedVector{U,N,T}(u1)
u2 = packed_rand(N, T, n)
v2 = PackedVector{U,N,T}(u2)
w = @test_inferred +v1 red_mod(N, +u1) v1
@test isvalid(w)
w = @test_inferred -v1 red_mod(N, -u1) v1
@test isvalid(w)
w = @test_inferred v1+v2 red_mod(N, u1+u2) v1
@test isvalid(w)
w = @test_inferred v1-v2 red_mod(N, u1-u2) v1
@test isvalid(w)
cc = packed_rand(N, T)
w = @test_inferred cc*v1 red_mod(N, cc*u1) v1
@test isvalid(w)
for i in -1:length(u1)+1
x = packed_rand(N, T)
if checkbounds(Bool, u1, i) && checkvalue(Bool, N, u1[i]+x)
w = @test_inferred addindex(v1, x, i) setindex!(copy(u1), u1[i]+x, i) v1
@test isvalid(w)
else
@test_throws Exception addindex(v1, x, i)
end
end
end
end
end
@testset "PackedVector sum/max" begin
for U in (UInt8, UInt16, UInt32, UInt64, UInt128),
T in (Int8, UInt8, Int16, UInt16, Int32, UInt32),
N in 1:bitsize(T)
c = bitsize(U)÷N
c == 0 && continue
for n in 0:c
u = packed_rand(N, T, n)
v = PackedVector{U,N,T}(u)
@test_inferred sum(v) sum(u)
for f in (maximum, minimum)
if isempty(u)
@test_throws Exception f(v)
@test_inferred f(v; init = zero(T)) f(u; init = zero(T))
else
@test_inferred f(v) f(u)
end
end
end
end
end
@testset "PackedVector rest" begin
v = PackedVector{UInt64,4,Int16}(1:2)
w..., = v
@test w == v && typeof(w) == typeof(v) && isvalid(w)
x1, w... = v
@test w == v[2:end] && typeof(w) == typeof(v) && isvalid(w)
x1, x2, w... = v
@test w == v[3:end] && typeof(w) == typeof(v) && isvalid(w)
@test_throws Exception x1, x2, x3, w... = v
if VERSION >= v"1.9"
v = PackedVector{UInt32,5,UInt8}(1:3)
w..., y1 = v
@test w == v[1:end-1] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end]
x1, w..., y1 = v
@test w == v[2:end-1] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end]
x1, x2, w..., y1 = v
@test w == v[3:end-1] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end]
@test_throws Exception x1, x2, x3, w..., y1 = v
v = PackedVector{UInt128,7,Int32}(1:4)
w..., y1, y2 = v
@test w == v[1:end-2] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end-1] && y2 === v[end]
x1, w..., y1, y2 = v
@test w == v[2:end-2] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end-1] && y2 === v[end]
x1, x2, w..., y1, y2 = v
@test w == v[3:end-2] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end-1] && y2 === v[end]
@test_throws Exception x1, x2, x3, w..., y1, y2 = v
end
end
@testset "PackedVector support" begin
for U in (UInt8, UInt32, UInt128), T in (Int8, UInt32), N in 1:bitsize(T)
c = bitsize(U)÷N
c == 0 && continue
for m in 0:min(c, bitsize(UInt))
u = T <: Unsigned ? rand(T(0):T(1), m) : rand(T(-1):T(0), m)
v = @inferred PackedVector{U,N,T}(u)
@test_inferred support(v) Set{Int}(i for i in 1:m if u[i] != 0) SmallBitSet
end
end
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 741 | using Test, SmallCollections, BitIntegers
macro test_inferred(cmd, good)
esc(quote
let result = @inferred($cmd), good = $good
@test isequal(result, good)
@test typeof(result) == typeof(good)
result
end
end)
end
macro test_inferred(cmd, good, type)
esc(quote
let result = @inferred($cmd), good = $good, type = $type
@test isequal(result, good)
if type isa Type
@test result isa type
else
@test result isa typeof(type)
end
result
end
end)
end
include("bits.jl")
include("smallbitset.jl")
include("smallvector.jl")
include("packedvector.jl")
include("bangbang.jl")
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 10922 | using SmallCollections: bitsize
unsigned_types = (UInt8, UInt64, UInt256, UInt440)
@testset "SmallBitSet" begin
@test_inferred SmallBitSet([1,2,3]) SmallBitSet{UInt64}([1,2,3])
for U in unsigned_types
s = SmallBitSet{U}()
@test_inferred isempty(s) true
@test_inferred empty(s) SmallBitSet{U}()
m = bitsize(U)
@test_throws Exception SmallBitSet{U}([1, 2, 'x'])
@test_throws Exception SmallBitSet{U}([1, 2, m+1])
@test_throws Exception SmallBitSet{U}([0, 1, 2])
t = Set{Int}(rand(1:m, rand(0:m)))
s = @inferred SmallBitSet{U}(t)
@test_inferred capacity(s) m
@test_inferred isempty(s) isempty(t)
@test_inferred length(s) == length(t) true
@test_inferred s == t true
@test_inferred copy(s) === s true
@test_inferred s == s true
v = collect(Float32, t)
@test_inferred SmallBitSet{U}(v) s
s3 = @inferred SmallBitSet{UInt512}(s)
@test s3 == s
@test_inferred hash(s) hash(t)
@test_inferred fasthash(s) fasthash(s3)
@test_inferred Set(s) t
end
end
@testset "SmallBitSet first/min etc" begin
for U in unsigned_types
m = bitsize(U)
t = Set{Int}(rand(1:m, rand(1:m)))
s = @inferred SmallBitSet{U}(t)
if isempty(t)
@test_throws Exception minimum(s)
@test_inferred minimum(s; init = m+1) minimum(t; init = m+1)
@test_throws Exception maximum(s)
@test_inferred maximum(s; init = 0) maximum(t; init = 0)
@test_throws Exception extrema(s)
@test_inferred extrema(s; init = (m+1, 0)) extrema(t; init = (m+1, 0))
else
@test_inferred first(s) minimum(t)
@test_inferred minimum(s) minimum(t)
@test_inferred last(s) maximum(t)
@test_inferred maximum(s) maximum(t)
@test_inferred extrema(s) extrema(t)
end
end
end
@testset "SmallBitSet in/subset" begin
for U in unsigned_types
m = bitsize(U)
t = Set{Int}(rand(1:m, rand(0:m)))
s = @inferred SmallBitSet{U}(t)
for i in 1:m
@test_inferred i in s i in t
@test_inferred Int16(i) in s i in t
@test_inferred Float64(i) in s i in t
end
@test_inferred 1.5 in s false
@test_inferred 'x' in s false
t2 = Set{Int}(rand(1:m, rand(0:2)))
s2 = @inferred SmallBitSet{U}(t2)
@test_inferred issubset(s2, s) issubset(t2, t)
@test_inferred issubset(s2, t) issubset(t2, t)
@test_inferred issubset(t2, s) issubset(t2, t)
end
end
@testset "SmallBitSet push/pop etc" begin
for U in unsigned_types
m = bitsize(U)
t = Set{Int}(rand(1:m, rand(0:m)))
s = @inferred SmallBitSet{U}(t)
i = rand(1:m)
@test_inferred push(s, i) push!(copy(t), i) SmallBitSet{U}
@test_inferred push(s, Float32(i)) push!(copy(t), i) SmallBitSet{U}
@test_throws Exception push(s, 0)
@test_throws Exception push(s, m+1)
@test_throws Exception push(s, 'x')
@test_inferred push(s, 3, 4, 5, 6) push!(copy(t), 3, 4, 5, 6) SmallBitSet{U}
if !isempty(t)
i = maximum(t)
@test_inferred pop(s) (delete!(copy(t), i), i) Tuple{SmallBitSet{U}, Int}
i = rand(t)
@test_inferred pop(s, i) (delete!(copy(t), i), i) Tuple{SmallBitSet{U}, Int}
@test_inferred pop(s, Float64(i)) (delete!(copy(t), i), i) Tuple{SmallBitSet{U}, Float64}
@test_inferred pop(s, 0, -1) (s, -1)
@test_throws Exception pop(s, 0)
@test_throws Exception pop(s, 'x')
@test_inferred delete(s, i) delete!(copy(t), i) SmallBitSet{U}
@test_inferred delete(s, m+1) s
end
@test_inferred filter(isodd, s) filter(isodd, t) s
end
end
@testset "SmallBitSet union etc" begin
for U1 in unsigned_types, U2 in unsigned_types
m1 = bitsize(U1)
m2 = bitsize(U2)
U = promote_type(U1, U2)
t1 = Set{Int}(rand(1:m1, rand(0:m1)))
s1 = @inferred SmallBitSet{U1}(t1)
t2 = Set{Int}(rand(1:m2, rand(0:m2)))
s2 = @inferred SmallBitSet{U2}(t2)
@test_inferred union(s1, s2) union(t1, t2) SmallBitSet{U}
@test_inferred intersect(s1, s2) intersect(t1, t2) SmallBitSet{U1}
@test_inferred intersect(s1, t2) intersect(t1, t2) SmallBitSet{U1}
@test_inferred setdiff(s1, s2) setdiff(t1, t2) SmallBitSet{U1}
@test_inferred setdiff(s1, t2) setdiff(t1, t2) SmallBitSet{U1}
@test_inferred symdiff(s1, s2) symdiff(t1, t2) SmallBitSet{U}
end
end
@testset "subsets(n,k)" begin
for n in [-1, 0, 1, 2, 10], k in [-1, 0, 1, n-1, n, n+1]
if n < 0
@test_throws Exception subsets(n, k)
continue
end
ss = @inferred subsets(n, k)
if 0 <= k <= n
@test_inferred length(ss) binomial(n, k)
else
@test_inferred length(ss) 0
end
@test eltype(ss) == SmallBitSet{UInt}
ssv = @inferred collect(ss)
@test length(ssv) == length(ss) == length(unique(ssv))
length(ss) == 0 && continue
@test unique(map(length, ssv)) == [k]
end
for U in unsigned_types, a in map(SmallBitSet{U}, [Int[], [3], [bitsize(U)-3, bitsize(U)], [2, 4, 6, 7]])
n = length(a)
for k in [-1, 0, 1, n-1, n, n+1]
ss = @inferred subsets(a, k)
if 0 <= k <= n
@test_inferred length(ss) binomial(n, k)
else
@test_inferred length(ss) 0
end
@test eltype(ss) == SmallBitSet{U}
ssv = @inferred collect(ss)
@test length(ssv) == length(ss) == length(unique(ssv))
length(ss) == 0 && continue
@test unique(map(length, ssv)) == [k]
end
end
end
@testset "subsets(n)" begin
for n in [-1, 0, 1, 2, 10]
if n < 0
@test_throws Exception subsets(n)
continue
end
ss = subsets(n)
@test_inferred length(ss) 2^n
@test eltype(ss) == SmallBitSet{UInt}
ssv = @inferred collect(ss)
@test length(ssv) == length(ss) == length(unique(ssv))
@test_throws BoundsError ss[firstindex(ss)-1]
@test_throws BoundsError ss[lastindex(ss)+1]
end
for U in unsigned_types, a in map(SmallBitSet{U}, [Int[], [3], [bitsize(U)-3, bitsize(U)], [2, 4, 6, 7]])
ss = subsets(a)
@test_inferred length(ss) 2^length(a)
@test eltype(ss) == SmallBitSet{U}
ssv = @inferred collect(ss)
@test length(ssv) == length(ss) == length(unique(ssv))
@test_throws BoundsError ss[firstindex(ss)-1]
@test_throws BoundsError ss[lastindex(ss)+1]
end
end
@testset "shuffles" begin
function test_shuffles(ks)
N = length(ks)
sh = @inferred shuffles(ks...)
a = SmallBitSet(1:sum(ks; init = 0))
@test @inferred(length(sh)) == factorial(big(sum(ks; init = 0))) ÷ prod(map(factorial∘big, ks); init = 1)
@test @inferred(eltype(sh)) == Tuple{NTuple{N, SmallBitSet{UInt}}, Bool}
@test all(map(length, t) == ks && s isa Bool &&
(isempty(t) ? isempty(a) : (union(t...) == a)) &&
shuffle_signbit(t...) == s for (t, s) in sh)
@test allunique(sh)
end
function test_shuffles(a::S, ks::NTuple{N,Int}) where {S <: SmallBitSet, N}
test_shuffles(ks)
sh = @inferred shuffles(a, ks...)
@test @inferred(length(sh)) == factorial(big(sum(ks; init = 0))) ÷ prod(map(factorial∘big, ks); init = 1)
@test @inferred(eltype(sh)) == Tuple{NTuple{N, S}, Bool}
@test all(t isa NTuple{N, S} && s isa Bool &&
map(length, t) == ks &&
(isempty(t) ? isempty(a) : (union(t...) == a)) &&
shuffle_signbit(t...) == s for (t, s) in sh)
@test allunique(sh)
end
for U in unsigned_types, (v, ks) in [(Int[], ()), (Int[], (0,)), (Int[], (0, 0)),
(bitsize(U)-4:2:bitsize(U), (1, 1, 1)),
(3:2:11, (5,)), (3:2:11, (2, 3)), (3:2:11, (0, 2, 3)), (3:2:11, (2, 0, 3)), (3:2:11, (2, 3, 0)),
(20:2:38, (2, 3, 2, 3)), (20:2:38, (1, 4, 0, 2, 3))]
maximum(v; init = 0) <= bitsize(U) || continue
a = SmallBitSet{U}(v)
test_shuffles(a, ks)
end
@test_throws Exception shuffles(-1, 2)
@test_throws Exception shuffles(bitsize(UInt)-1, 2)
for U in unsigned_types
@test_throws Exception shuffles(SmallBitSet{U}(2:2:6))
@test_throws Exception shuffles(SmallBitSet{U}(2:2:6), -1, 2, 2)
@test_throws Exception shuffles(SmallBitSet{U}(2:2:6), 3, 4)
@test (shuffles(SmallBitSet{U}(1:bitsize(U)), bitsize(U)-2, 2); true)
end
# check that unsafe_lshr in iterate for Shuffles is safe
@test collect(subsets(bitsize(UInt), 1)) == [SmallBitSet((k,)) for k in 1:bitsize(UInt)]
end
@testset "compositions" begin
function test_compositions(ks)
N = length(ks)
sh = @inferred compositions(ks...)
a = SmallBitSet(1:sum(ks; init = 0))
@test @inferred(length(sh)) == factorial(big(sum(ks; init = 0))) ÷ prod(map(factorial∘big, ks); init = 1)
@test @inferred(eltype(sh)) == NTuple{N, SmallBitSet{UInt}}
@test all(map(length, t) == ks && (isempty(t) ? isempty(a) : (union(t...) == a)) for t in sh)
@test allunique(sh)
end
function test_compositions(a::S, ks::NTuple{N,Int}) where {S <: SmallBitSet, N}
test_compositions(ks)
sh = @inferred compositions(a, ks...)
@test @inferred(length(sh)) == factorial(big(sum(ks; init = 0))) ÷ prod(map(factorial∘big, ks); init = 1)
@test @inferred(eltype(sh)) == NTuple{N, S}
@test all(t isa NTuple{N, S} && map(length, t) == ks && (isempty(t) ? isempty(a) : (union(t...) == a)) for t in sh)
@test allunique(sh)
end
for U in unsigned_types, (v, ks) in [(Int[], ()), (Int[], (0,)), (Int[], (0, 0)),
(bitsize(U)-4:2:bitsize(U), (1, 1, 1)),
(3:2:11, (5,)), (3:2:11, (2, 3)), (3:2:11, (0, 2, 3)), (3:2:11, (2, 0, 3)), (3:2:11, (2, 3, 0)),
(20:2:38, (2, 3, 2, 3)), (20:2:38, (1, 4, 0, 2, 3))]
maximum(v; init = 0) <= bitsize(U) || continue
a = SmallBitSet{U}(v)
test_compositions(a, ks)
end
@test_throws Exception compositions(-1, 2)
@test_throws Exception compositions(bitsize(UInt)-1, 2)
for U in unsigned_types
@test_throws Exception compositions(SmallBitSet{U}(2:2:6))
@test_throws Exception compositions(SmallBitSet{U}(2:2:6), -1, 2, 2)
@test_throws Exception compositions(SmallBitSet{U}(2:2:6), 3, 4)
@test (compositions(SmallBitSet{U}(1:bitsize(U)), bitsize(U)-2, 2); true)
end
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | code | 16578 | using SmallCollections: default
using Base.FastMath: eq_fast
function isvalid(v::SmallVector{N,T}) where {N,T}
n = length(v)
0 <= n <= N && all(==(default(T)), view(v.b, n+1:N))
end
using StructEqualHash: @struct_equal_hash
struct A
x::Char
y::Int
end
@struct_equal_hash A
test_types = (Int8, UInt64, Int128, UInt256, Float32, Float64, Char, String, Symbol, A)
Base.rand(::Type{String}) = string(rand(Char, 3)...)
Base.rand(::Type{Symbol}) = Symbol(rand(Char, 3)...)
Base.rand(::Type{A}) = A(map(rand, fieldtypes(A))...)
Base.rand(::Type{T}, n::Integer) where T <: Union{String,Symbol,A} = T[rand(T) for _ in 1:n]
@testset "SmallVector" begin
for N in (1, 2, 9, 16), T in test_types, m in (0, 1, round(Int, 0.7*N), N-1, N)
u = rand(T, m)
v = @inferred SmallVector{N,T}(u)
@test v === @inferred copy(v)
@test_inferred capacity(v) N Int
@test_inferred v == u true
@test isvalid(v)
@test_inferred collect(v) u Vector{T}
v2 = SmallVector{N,T}(u)
@test_inferred v == v2 true
v3 = SmallVector{2*N,T}(u)
@test_inferred v == v3 true
if T <: Number
@test_inferred eq_fast(v, v2) true
@test_inferred eq_fast(v, v3) true
v4 = SmallVector{N,Float64}(u)
@test_inferred v == v4 true
@test_inferred eq_fast(v, v4) true
end
if !isempty(u)
i = rand(1:m)
x = rand(T)
while x == v[i]
x = rand(T)
end
v5 = setindex(v, x, i)
@test_inferred v == v5 false
if T <: Number
@test_inferred eq_fast(v, v5) false
end
end
v6 = SmallVector{N+2,T}(push!(copy(u), rand(T)))
@test_inferred v == v6 false
if T <: Number
@test_inferred eq_fast(v, v6) false
end
if !isempty(u)
u7 = copy(u)
pop!(u7)
v7 = SmallVector{N+2,T}(u7)
@test_inferred v == v7 false
if T <: Number
@test_inferred eq_fast(v, v7) false
end
end
@test_inferred hash(v) hash(u) UInt
v8 = SmallVector{2*N,T}(u)
@test_inferred fasthash(v) fasthash(v8) UInt
@test_inferred length(v) length(u) Int
@test_inferred SmallVector{N,T}() SmallVector{N,T}(())
@test_inferred empty(v) SmallVector{N,T}()
@test_inferred empty(v, Char) SmallVector{N,Char}()
end
end
@testset "SmallVector indices" begin
for N in (1, 2, 9, 16), T in test_types, m in (0, 1, round(Int, 0.7*N), N-1, N)
u = rand(T, m)
v = @inferred SmallVector{N,T}(u)
if isempty(u)
@test_throws Exception first(v)
@test_throws Exception last(v)
else
@test_inferred first(v) first(u) T
@test_inferred last(v) last(u) T
end
x = rand(T)
for i in -1:length(u)+1
if 1 <= i <= length(u)
@test_inferred v[i] u[i] T
w = @test_inferred setindex(v, x, i) setindex!(copy(u), x, i) v
@test isvalid(w)
if T <: Number
w = @test_inferred addindex(v, x, i) setindex!(copy(u), u[i]+x, i) v
@test isvalid(w)
end
else
@test_throws Exception v[i]
@test_throws Exception setindex(v, x, i)
T <: Number && @test_throws Exception addindex(v, x, i)
end
end
for i in 0:m, j in i-1:m+1
if checkbounds(Bool, u, i:j)
w = @test_inferred v[i:j] u[i:j] v
@test isvalid(w)
else
@test_throws Exception v[i:j]
end
end
end
end
@testset "SmallVector zeros" begin
for N in (1, 2, 9, 16), T in test_types, m in (0, 1, round(Int, 0.7*N), N-1, N)
T <: Number || continue
u = zeros(T, m)
v = SmallVector{N,T}(u)
@test_inferred iszero(v) true
w = @test_inferred zero(v) u v
@test isvalid(w)
w = @test_inferred zeros(SmallVector{N,T}, m) v
@test isvalid(w)
w = @test_inferred ones(SmallVector{N,T}, m) ones(T, m) SmallVector{N,T}
@test isvalid(w)
end
end
@testset "SmallVector push/pop" begin
for N in (1, 2, 9, 16), T in test_types, m in (0, 1, round(Int, 0.7*N), N-1, N)
u = rand(T, m)
v = @inferred SmallVector{N,T}(u)
x = rand(T)
y = rand(T)
@test_inferred push(v) v
@test_inferred pushfirst(v) v
if length(u) == N
@test_throws Exception push(v, x)
@test_throws Exception push(v, x, y)
@test_throws Exception pushfirst(v, x)
@test_throws Exception pushfirst(v, x, y)
else
w = @test_inferred push(v, x) push!(copy(u), x) v
@test isvalid(w)
w = @test_inferred pushfirst(v, x) pushfirst!(copy(u), x) v
@test isvalid(w)
if length(u) <= N-2
w = @test_inferred push(v, x, y) push(push(v, x), y)
@test isvalid(w)
w = @test_inferred pushfirst(v, x, y) pushfirst(pushfirst(v, y), x)
@test isvalid(w)
end
end
if isempty(u)
@test_throws Exception pop(v)
@test_throws Exception popfirst(v)
else
w, _ = @test_inferred pop(v) (deleteat!(copy(u), length(u)), last(u)) (v, last(v))
@test isvalid(w)
w, _ = @test_inferred popfirst(v) (deleteat!(copy(u), 1), first(u)) (v, first(v))
@test isvalid(w)
end
for i in (-1, 0, 1, 2, length(u), length(u)+1)
if 1 <= i <= length(u)+1 && length(u) < N
w = @test_inferred insert(v, i, x) insert!(copy(u), i, x) v
@test isvalid(w)
if i <= length(u)
w = @test_inferred duplicate(v, i) insert(v, i, v[i])
@test isvalid(w)
else
@test_throws Exception duplicate(v, i)
end
else
@test_throws Exception insert(v, i, x)
@test_throws Exception duplicate(v, i)
end
if 1 <= i <= length(u)
w = @test_inferred deleteat(v, i) deleteat!(copy(u), i) v
@test isvalid(w)
else
@test_throws Exception deleteat(v, i)
end
end
@test_inferred append(v) v
@test_inferred prepend(v) v
xy = [x, y]
if length(u) <= N-2
w = @test_inferred append(v, SmallVector{4}(xy)) push(v, x, y)
@test isvalid(w)
w = @test_inferred append(v, xy[i] for i in 1:2) push(v, x, y)
@test isvalid(w)
w = @test_inferred append(v, (x,), [y]) push(v, x, y)
@test isvalid(w)
w = @test_inferred prepend(v, SmallVector{4}(xy)) pushfirst(v, x, y)
@test isvalid(w)
w = @test_inferred prepend(v, xy[i] for i in 1:2) pushfirst(v, x, y)
@test isvalid(w)
w = @test_inferred prepend(v, (x,), [y]) pushfirst(v, x, y)
@test isvalid(w)
else
@test_throws Exception append(v, SmallVector{4}(xy))
@test_throws Exception append(v, xy[i] for i in 1:2)
@test_throws Exception append(v, (x,), [y])
@test_throws Exception prepend(v, SmallVector{4}(xy))
@test_throws Exception prepend(v, xy[i] for i in 1:2)
@test_throws Exception prepend(v, (x,), [y])
end
if T <: Integer
@test_inferred filter(isodd, v) filter(isodd, u) v
end
end
end
@testset "SmallVector add/mul" begin
for N in (1, 2, 9, 16), T1 in test_types, m in (1, round(Int, 0.7*N), N-1, N)
T1 <: Number || continue
if T1 <: Unsigned
u1 = rand(T1(1):T1(9), m)
else
u1 = rand(T1(-9):T1(9), m)
end
v1 = SmallVector{N}(u1)
for op in (+, -)
w = @test_inferred op(v1) op(u1) SmallVector{N,T1}
@test isvalid(w)
end
for T2 in test_types
T2 <: Number || continue
if T2 <: Unsigned
u2 = rand(T2(1):T2(9), m)
c = rand(T2(1):T2(9))
else
u2 = rand(T2(-9):T2(9), m)
c = rand(T2(-9):T2(9))
end
v2 = SmallVector{N}(u2)
T = promote_type(T1, T2)
for op in (+, -)
w = @test_inferred op(v1, v2) op(u1, u2) SmallVector{N,T}
@test isvalid(w)
v3 = SmallVector{N+4}(u2)
w = @test_inferred op(v1, v3) op(u1, u2) SmallVector{N,T}
@test isvalid(w)
w = @test_inferred op(v3, v1) op(u2, u1) SmallVector{N,T}
@test isvalid(w)
end
w = @test_inferred c*v1 c*u1 SmallVector{N,T}
@test isvalid(w)
w = @test_inferred Base.FastMath.mul_fast(c, v1) c*u1 SmallVector{N,T}
@test isvalid(w)
@test_inferred v1*c c*v1
end
end
N = 8
T = Float64
u = T[-Inf, -1, 0, 1, Inf, NaN]
v = SmallVector{8,Float64}(u)
for c in (Inf, -Inf, NaN)
w = @test_inferred c*v c*u SmallVector{N,T}
@test isvalid(w)
end
end
@testset failfast = false "SmallVector sum/max" begin
for N in (1, 2, 9, 16), T in test_types, m in (0, 1, round(Int, 0.7*N), N-1, N)
T <: Number || continue
if T <: Unsigned
u = rand(T(1):T(9), m)
else
u = rand(T(-9):T(9), m)
end
v = SmallVector{N}(u)
for f in (maximum, minimum)
if isempty(u)
@test_throws Exception f(v)
@test_inferred f(v; init = zero(T)) f(u; init = zero(T))
else
@test_inferred f(v) f(u)
end
end
if isempty(u)
@test_throws Exception extrema(v)
@test_inferred extrema(v; init = (one(T), zero(T))) extrema(u; init = (one(T), zero(T)))
else
@test_inferred extrema(v) extrema(u)
end
@test_inferred sum(v) sum(u)
s = @inferred sum_fast(v)
@test abs(s-sum(u)) < 1e-5
@test_inferred prod(v) prod(u)
T <: AbstractFloat || continue
u = fill(-T(0), m)
v = SmallVector{N}(u)
@test_inferred sum(v) sum(u)
@test_inferred prod(-v) prod(-u)
end
for N in (5, 16), T in (Float32, Float64), x in (Inf, -Inf, NaN)
u = T[x, -1, 0, 1]
v = SmallVector{N}(u)
@test_inferred maximum(v) maximum(u)
@test_inferred minimum(v) minimum(u)
@test_inferred sum(v) sum(u)
if isnan(prod(u))
@test_inferred isnan(prod(v)) true
else
@test_inferred prod(v) prod(u)
end
end
for N in (5, 16), T in (Float32, Float64)
u = T[NaN, -1, 0, 1]
v = SmallVector{N}(u)
@test_inferred isnan(maximum(v)) true
@test_inferred isnan(minimum(v)) true
@test_inferred isnan(sum(v)) true
@test_inferred isnan(prod(v)) true
end
end
@testset "SmallVector map" begin
f(x) = Int32(2)*x
f(x, y) = 2*x + y
g(x) = iszero(x) ? 1 : 1.0
for N in (1, 2, 9, 16), T1 in test_types, m in (0, 1, round(Int, 0.7*N), N-1)
T1 <: Number || continue
u1 = rand(T1, m)
v1 = SmallVector{N}(u1)
u3 = map(f, u1)
w = @test_inferred map(f, v1) u3 SmallVector{N,eltype(u3)}
@test isvalid(w)
for T2 in test_types
T2 <: Number || continue
u2 = rand(T2, m+1)
v2 = SmallVector{N+2}(u2)
u4 = map(f, u1, u2)
w = @test_inferred map(f, v1, v2) u4 SmallVector{N,eltype(u4)}
@test isvalid(w)
end
end
for m in (0, 1, 3)
v = SmallVector{8}(1:m)
u = collect(v)
u5 = map(g, u)
w = map(g, v)
@test w == u5
@test eltype(w) == eltype(u5)
end
end
@testset "SmallVector rest" begin
v = SmallVector{8}(1:2)
w..., = v
@test w == v && typeof(w) == typeof(v) && isvalid(w)
x1, w... = v
@test w == v[2:end] && typeof(w) == typeof(v) && isvalid(w)
x1, x2, w... = v
@test w == v[3:end] && typeof(w) == typeof(v) && isvalid(w)
@test_throws Exception x1, x2, x3, w... = v
if VERSION >= v"1.9"
v = SmallVector{8,UInt8}(1:3)
w..., y1 = v
@test w == v[1:end-1] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end]
x1, w..., y1 = v
@test w == v[2:end-1] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end]
x1, x2, w..., y1 = v
@test w == v[3:end-1] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end]
@test_throws Exception x1, x2, x3, w..., y1 = v
v = SmallVector{8,Int16}(1:4)
w..., y1, y2 = v
@test w == v[1:end-2] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end-1] && y2 === v[end]
x1, w..., y1, y2 = v
@test w == v[2:end-2] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end-1] && y2 === v[end]
x1, x2, w..., y1, y2 = v
@test w == v[3:end-2] && typeof(w) == typeof(v) && isvalid(w) && y1 === v[end-1] && y2 === v[end]
@test_throws Exception x1, x2, x3, w..., y1, y2 = v
end
end
@testset "SmallVector support" begin
for N in (1, 2, 9, 16), T in test_types, m in (0, 1, round(Int, 0.7*N), N-1, N)
T <: Number || continue
u = rand(0:2, m)
v = @inferred SmallVector{N,T}(u)
@test_inferred support(v) Set{Int}(i for i in 1:m if u[i] != 0) SmallBitSet
end
end
@testset "broadcast" begin
N = 8
for T in (Int, Float64), m in (0, 1, 3, 8)
u = collect(T, 1:m)
v = SmallVector{N}(u)
t = Tuple(u)
c = T(2)
uu = m < N ? push!(copy(u), c) : copy(u)
vv = SmallVector{N}(uu)
w = @test_inferred v .+ v u .+ u SmallVector{N,T}
@test isvalid(w)
w = @test_inferred v .- v u .- u SmallVector{N,T}
@test isvalid(w)
w = @test_inferred v .* v u .* u SmallVector{N,T}
@test isvalid(w)
w = @test_inferred (c .* v) (c .* u) SmallVector{N,T}
@test isvalid(w)
w = abs.(-v)
@test w == v && w isa SmallVector{N,T} && isvalid(w)
f(x, y) = x + 2*y
# @test_inferred map(f, v, v) map(f, u, u) SmallVector{N,T}
# @test_inferred map(f, vv, v) map(f, uu, u) SmallVector{N,T}
w = f.(v, v)
@test w == f.(u, u) && w isa SmallVector{N,T}
w = f.(v, c)
@test w == f.(u, c) && w isa SmallVector{N,T}
if m > 0
w = f.(v, t)
@test w == f.(u, t) && w isa SmallVector{N,T}
end
end
for T in (Int16, Float32)
if T <: Integer
u1 = T[2, -1, 4, -3, 7]
u2 = T[-3, 9, 4, -1, -5, 6]
else
u1 = 10 .* rand(T, 5) .- 5
u2 = 10 .* rand(T, 6) .- 5
end
v1 = SmallVector{8,T}(u1)
v2 = SmallVector{8,T}(u2)
for f in (+, -, *, /, ==, !=, <, >, <=, >=, ===, isequal)
ww = map(f, u1, u2)
w = @test_inferred map(f, v1, v2) ww SmallVector{8,eltype(ww)}
@test isvalid(w)
end
for f in (round, floor, ceil, trunc, abs, abs2, sign, sqrt, signbit)
if f === sqrt
uu = map(abs, u1)
vv = map(abs, v1)
ww = map(f, uu)
w = @test_inferred map(f, vv) ww SmallVector{8,eltype(ww)}
@test isvalid(w)
else
ww = map(f, u1)
w = @test_inferred map(f, v1) ww SmallVector{8,eltype(ww)}
@test isvalid(w)
end
end
T <: Integer || continue
for f in (&, |, xor, nand, nor)
w = @test_inferred map(f, v1, v2) map(f, u1, u2) SmallVector{8,T}
@test isvalid(w)
end
for f in (~,)
w = @test_inferred map(f, v1) map(f, u1) SmallVector{8,T}
@test isvalid(w)
end
end
u = [7, 8]
v = SmallVector{3}(u)
a = [1 2; 3 4]
@test a .+ v == a .+ u
u = ['a', 'b', 'c']
v = SmallVector{5}(u)
w = v .* 'x'
@test w == u .* 'x' && w isa SmallVector{5,String}
end
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | docs | 6756 | # SmallCollections.jl
This Julia package defines three immutable collections types, `SmallVector`,
`PackedVector` and `SmallBitSet`. They don't allocate and are often much faster
than their allocating counterparts `Vector` and `BitSet`. Unlike the static vectors
provided by [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl),
[StaticVectors.jl](https://github.com/chakravala/StaticVectors.jl) and
[SIMD.jl](https://github.com/eschnett/SIMD.jl),
the length of a `SmallVector` or `PackedVector` is variable with a user-defined limit.
If the package [BangBang.jl](https://github.com/JuliaFolds2/BangBang.jl)
is loaded, then many functions defined by this package are also available
in a `!!`-form. For example, both `push` and `push!!` add an element
to a `SmallVector`, `PackedVector` or `SmallBitSet`.
Below are [examples](#examples) and [benchmarks](#benchmarks). For details see
the [documentation](https://matthias314.github.io/SmallCollections.jl/stable/).
## Examples
### `SmallVector`
A vector of type `SmallVector{N,T}` can hold up to `N` elements of type `T`.
Both `N` and `T` can be arbitrary. (If `T` is not a concrete type, however,
then creating a small vector does allocate.)
```julia
julia> v = SmallVector{8,Int8}(2*x for x in 1:3)
3-element SmallVector{8, Int8}:
2
4
6
julia> setindex(v, 7, 3)
3-element SmallVector{8, Int8}:
2
4
7
julia> w = SmallVector{9}((1, 2.5, 4))
3-element SmallVector{9, Float64}:
1.0
2.5
4.0
julia> v+2*w
3-element SmallVector{8, Float64}:
4.0
9.0
14.0
```
Non-numeric element types are possible. (One may have to define
a default element used for padding via `SmallCollections.default(T)`.
For `Char`, `String` and `Symbol` they are pre-defined.)
```julia
julia> u = SmallVector{6}(['a', 'b', 'c'])
3-element SmallVector{6, Char}:
'a': ASCII/Unicode U+0061 (category Ll: Letter, lowercase)
'b': ASCII/Unicode U+0062 (category Ll: Letter, lowercase)
'c': ASCII/Unicode U+0063 (category Ll: Letter, lowercase)
julia> popfirst(u)
(['b', 'c'], 'a')
julia> map(uppercase, u)
3-element SmallVector{6, Char}:
'A': ASCII/Unicode U+0041 (category Lu: Letter, uppercase)
'B': ASCII/Unicode U+0042 (category Lu: Letter, uppercase)
'C': ASCII/Unicode U+0043 (category Lu: Letter, uppercase)
```
### `PackedVector`
A `PackedVector` can store bit integers or `Bool` values.
The elements of a `PackedVector{U,M,T}` are stored in a common bit mask of type `U`
with `M` bits for each entry. When retrieving elements, they are of type `T`.
Compared to a `SmallVector`, a `PackedVector` may have faster insert and delete operations.
Arithmetic operations are usually slower unless `M` is the size of a hardware integer.
```julia
julia> v = PackedVector{UInt64,5,Int}(4:6)
3-element PackedVector{UInt64, 5, Int64}:
4
5
6
julia> capacity(v) # 64 bits available, 5 for each entry
12
julia> pushfirst(v, 7)
4-element PackedVector{UInt64, 5, Int64}:
7
4
5
6
julia> duplicate(v, 2)
4-element PackedVector{UInt64, 5, Int64}:
4
5
5
6
julia> 3*v # note the overflow in the last entry
3-element PackedVector{UInt64, 5, Int64}:
12
15
-14
```
### `SmallBitSet`
The default `SmallBitSet` type `SmallBitSet{UInt64}` can hold integers
between `1` and `64`.
```julia
julia> s = SmallBitSet([1, 4, 7])
SmallBitSet{UInt64} with 3 elements:
1
4
7
julia> t = SmallBitSet([3, 4, 5])
SmallBitSet{UInt64} with 3 elements:
3
4
5
julia> union(s, t)
SmallBitSet{UInt64} with 5 elements:
1
3
4
5
7
julia> push(s, 9)
SmallBitSet{UInt64} with 4 elements:
1
4
7
9
julia> filter(iseven, s)
SmallBitSet{UInt64} with 1 element:
4
```
Smaller or larger sets are possible by choosing a different unsigned bit integer
as bitmask type, for example `UInt16` or `UInt128` or types like `UInt256` defined
by the package [BitIntegers.jl](https://github.com/rfourquet/BitIntegers.jl).
```julia
julia> using BitIntegers
julia> SmallBitSet{UInt256}(n for n in 1:256 if isodd(n) && isinteger(sqrt(n)))
SmallBitSet{UInt256} with 8 elements:
1
9
25
49
81
121
169
225
```
## Benchmarks
### `SmallVector`
The timings are for pairwise adding the elements of two `Vector`s,
each containing 1000 vectors with element type `T`.
For `Vector` and `SmallVector` the length of each pair of elements is **variable** and
chosen randomly between 1 and `N`. For `SVector{N,T}` (from StaticArrays.jl),
`Values{N,T}` (from StaticVectors.jl) and `Vec{N,T}` (from SIMD.jl) the vectors have
**fixed** length `N`.
| `(N, T)` | `Vector{T}` | `SmallVector{N,T}` | `SVector{N,T}` | `Values{N,T}` | `Vec{N,T}` |
| ---: | ---: | ---: | ---: | ---: | ---: |
| (8, Float64) | 66.682 μs | 3.341 μs | 3.452 μs | 3.197 μs | 3.046 μs |
| (8, Int64) | 48.642 μs | 4.962 μs | 3.196 μs | 4.551 μs | 2.954 μs |
| (16, Int32) | 49.449 μs | 3.866 μs | 3.284 μs | 3.623 μs | 3.757 μs |
| (32, Int16) | 55.027 μs | 5.046 μs | 4.212 μs | 3.618 μs | 3.548 μs |
### `PackedVector`
Here we compare a `PackedVector{UInt128, 4, Int8}` (that can hold 32 elements) to a `SmallVector{32, Int8}`
and to a `Vector{Int8}` with 30 elements.
The function `duplicate(v, i)` is equivalent to `insert(v, i+1, v[i])`.
For the operations listed in the table below we have chosen the mutating variant for `Vector`.
| operation | `Vector` | `SmallVector` | `PackedVector` |
| ---: | ---: | ---: | ---: |
| getindex | 2.902 ns | 2.647 ns | 3.167 ns |
| setindex | 2.638 ns | 5.279 ns | 6.861 ns |
| add | 12.419 ns | 2.375 ns | 4.222 ns |
| scalar_mul | 9.762 ns | 4.749 ns | 4.223 ns |
| push | 8.241 ns | 5.541 ns | 8.970 ns |
| pushfirst | 8.750 ns | 4.221 ns | 4.223 ns |
| pop | 8.600 ns | 6.000 ns | 4.933 ns |
| popfirst | 11.267 ns | 4.667 ns | 3.867 ns |
| insert | 12.928 ns | 24.804 ns | 7.328 ns |
| deleteat | 12.933 ns | 18.200 ns | 5.667 ns |
| duplicate | 13.546 ns | 20.845 ns | 4.486 ns |
### `SmallBitSet`
The timings are for taking the pairwise union of the elements of two `Vector`s,
each containing 1000 sets of the indicated type.
Each set contains up to `b` integers between 1 and `b = 8*sizeof(U)-1`.
| `U` | `Set{Int16}` | `BitSet` | `SmallBitSet` |
| ---: | ---: | ---: | ---: |
| UInt8 | 366.256 μs | 69.439 μs | 95.698 ns |
| UInt16 | 801.736 μs | 68.195 μs | 311.559 ns |
| UInt32 | 1.537 ms | 68.354 μs | 400.259 ns |
| UInt64 | 2.836 ms | 68.751 μs | 640.833 ns |
| UInt128 | 5.686 ms | 68.846 μs | 1.540 μs |
| UInt256 | 11.579 ms | 69.398 μs | 2.441 μs |
| UInt512 | 23.819 ms | 92.041 μs | 4.866 μs |
Versions: Julia v1.10.4,
Chairmarks v1.2.1,
SmallCollections v0.3.0,
StaticArrays v1.9.7,
StaticVectors v1.0.5,
SIMD v3.5.0,
BitIntegers v0.3.1
Computer: Intel Core i3-10110U CPU @ 2.10GHz with 8GB RAM
The benchmark code can be found in the `benchmark` directory.
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.3.0 | bab377f9dc046a953c6213d038900456a5113d83 | docs | 3943 | ```@meta
DocTestSetup = quote
using SmallCollections
# for jldoctest outside of docstrings
end
```
# SmallCollections.jl
```@docs
SmallCollections
```
## [`AbstractSmallVector`](@id sec-abstractsmallvector)
```@docs
AbstractSmallVector
capacity(::Type{<:AbstractSmallVector})
zeros
ones
setindex
addindex
push(::AbstractSmallVector, ::Vararg)
pop(::AbstractSmallVector)
pushfirst
popfirst
insert
duplicate
deleteat
popat
append
prepend
support
```
### [`SmallVector`](@id sec-smallvector)
```@docs
SmallVector
empty(::SmallVector)
fasthash(::SmallVector, ::UInt)
sum_fast
map
```
### [`PackedVector`](@id sec-packedvector)
```@docs
PackedVector
bits(::PackedVector)
SmallCollections.unsafe_add
SmallCollections.unsafe_sub
```
### [Broadcasting](@id sec-broadcasting)
Broadcasting is supported for `SmallVector`. The result is again a `SmallVector`
if at least one argument is a `SmallVector` and all other arguments (if any) are
`Tuple`s or scalars. The capacity of the result is the minimum of the capacities
of the `SmallVector` arguments. Broadcasted assignments to a `SmallVector` are
of course not possible.
See also [`map`](@ref), [`capacity`](@ref capacity(::Type{<:AbstractSmallVector})),
[`SmallCollections.SmallVectorStyle`](@ref).
#### Examples
```jldoctest
julia> v = SmallVector{8}(1:3); w = SmallVector{6}(2:4); v .* w .- 1.0
3-element SmallVector{6, Float64}:
1.0
5.0
11.0
julia> v = SmallVector{8}(1:3); w = [2, 3, 4]; v .* w
3-element Vector{Int64}:
2
6
12
julia> v = SmallVector{8}('a':'c'); t = ('p', 'q', 'r'); uppercase.(v .* t .* 'x')
3-element SmallVector{8, String}:
"APX"
"BQX"
"CRX"
```
## [`SmallBitSet`](@id sec-smallbitset)
```@docs
SmallBitSet
bits(::SmallBitSet)
convert(::Type{SmallBitSet}, ::Integer)
capacity(::Type{<:SmallBitSet})
fasthash(::SmallBitSet, ::UInt)
empty(::SmallBitSet)
push(::SmallBitSet, ::Vararg)
pop(::SmallBitSet)
pop(::SmallBitSet, ::Any)
pop(::SmallBitSet, ::Any, ::Any)
delete
```
### Subsets and shuffles
When used with a `SmallBitSet` as first argument, the following functions internally use
the function [`pdep`](@ref SmallCollections.pdep).
As discussed in the docstring for `pdep`, performance is much better if the processor supports the BMI2 instruction set.
The same applies to `shuffles` with more than two parts, even if the first argument is not a `SmallBitSet`.
```@docs
subsets(::Integer)
subsets(::Integer, ::Integer)
compositions
shuffles(::Vararg{Integer})
shuffle_signbit
```
## [BangBang support](@id sec-bangbang)
If the package [`BangBang.jl`](https://github.com/JuliaFolds2/BangBang.jl)
is loaded, then the functions
[`push`](@ref push(::SmallBitSet, ::Vararg)),
[`pop`](@ref pop(::SmallBitSet)),
[`delete`](@ref),
`union`,
`intersect`,
`setdiff` and
`symdiff`
for `SmallBitSet` as well as
[`setindex`](@ref),
[`push`](@ref push(::SmallVector, ::Vararg)),
[`pushfirst`](@ref),
[ `pop`](@ref pop(::SmallVector)),
[`popfirst`](@ref),
[`deleteat`](@ref) and
[`append`](@ref)
for `AbstractSmallVector`
are also available in `!!`-form.
For example, `setindex!!` with an `AbstractSmallVector` as first argument calls `setindex`.
(`BangBang.jl` does not define `insert!!`, `prepend!!`, `filter!!` and `map!!`.)
Moreover, `add!!(v::AbstractSmallVector, w::AbstractSmallVector)` is a synonym for `v+w`.
This allows to write efficient code that works for both mutable and immutable arguments.
For example, the function
```julia
f!!(v, ws...) = foldl(add!!, ws; init = v)
```
adds up its arguments, mutating the first argument `v` if possible.
## Non-exported names
### Public names
```@docs
SmallCollections.bitsize
SmallCollections.default
SmallCollections.SmallVectorStyle
```
### Internal names
These names are not public and may change in future versions.
```@docs
SmallCollections.AbstractBitInteger
SmallCollections.top_set_bit
SmallCollections.unsafe_shl
SmallCollections.unsafe_lshr
SmallCollections.pdep
```
| SmallCollections | https://github.com/matthias314/SmallCollections.jl.git |
|
[
"MIT"
] | 0.1.0 | 1cc566369a372ca6026519003c50d6df90c58095 | code | 3281 | using TreeSitterHighlight
using Markdown
const treesitter_javascript =
"~/Projects/tree-sitter-javascript/build_so/libtreesitter_javascript.so" |> expanduser
const treesitter_julia =
"~/Projects/tree-sitter-julia/build_so/libtreesitter_julia.so" |> expanduser
captures = [
"symbol",
"include",
"variable",
"comment",
"tag",
"function",
"string",
"keyword",
"punctuation",
]
highlighter = Highlighter(Dict(capture => "class=$capture" for capture in captures))
js_scope = "source.js"
js_injection_regex = "^javascript"
js = Language(
:javascript,
@ccall treesitter_javascript.tree_sitter_javascript()::Ptr{Nothing}
)
js_highlights_query = read("./queries/javascript/highlights.scm", String)
js_injections_query = read("./queries/javascript/injections.scm", String)
js_locals_query = read("./queries/javascript/locals.scm", String)
add_language!(
highlighter,
js;
scope = js_scope,
injection_regex = js_injection_regex,
highlights_query = js_highlights_query,
injections_query = js_injections_query,
locals_query = js_locals_query,
)
jl_scope = "source.jl"
jl_injection_regex = "^julia"
jl = Language(:julia, @ccall treesitter_julia.tree_sitter_julia()::Ptr{Nothing})
jl_highlights_query = read("./queries/julia/highlights.scm", String)
jl_injection_query = read("./queries/julia/injections.scm", String)
jl_locals_query = read("./queries/julia/locals.scm", String)
add_language!(
highlighter,
jl;
scope = jl_scope,
injection_regex = jl_injection_regex,
highlights_query = jl_highlights_query,
injections_query = jl_injection_query,
locals_query = jl_locals_query,
)
scopes = Dict{String,String}("javascript" => js_scope, "julia" => jl_scope)
function Markdown.html(io::IO, code::Markdown.Code)
if code.language ∈ ("julia", "javascript")
write(io, "<pre>")
write(io, highlight(highlighter, code.code; scope = scopes[code.language]))
write(io, "</pre>")
else
write(io, code.code)
end
end
readme = read("./README.md", String) |> Markdown.parse
generate_index() =
open("index.html", "w") do f
write(
f,
"""
<html>
<head>
<style>
body {
margin: 0;
padding: 0;
font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Helvetica,Arial,sans-serif;
}
h1, h2, h3, p {
margin: 0;
padding-top: 5px;
padding-left: 20px;
padding-right: 20px;
}
pre {
padding: 20px;
background: hsla(46, 90%, 98%, 1);
color: #41323f;
}
pre p {
white-space: pre-wrap;
margin: 0;
}
.symbol, .symbol * {
color: #815ba4 !important;
}
.comment {
color: #e96ba8;
}
.keyword, .include {
color: #ef6155;
}
.string {
color: #da5616;
}
.function, .function * {
color: #cc80ac !important;
}
.number {
color: #815ba4;
}
.punctuation {
color: #41323f;
}
.variable {
color: #5668a4;
}
</style>
</head>
<body>
""",
)
show(f, MIME"text/html"(), readme)
write(
f,
"""
</body>
</html>
""",
)
end
generate_index()
| TreeSitterHighlight | https://github.com/Pangoraw/TreeSitterHighlight.jl.git |
|
[
"MIT"
] | 0.1.0 | 1cc566369a372ca6026519003c50d6df90c58095 | code | 5891 | module TreeSitterHighlight
include("./libtree_sitter_highlight.jl")
import .LibTreeSitterHighlight as LTSH
function maybe_throw_ts_error(maybe_err)
err(name) = error("Got error of type $name")
if maybe_err == LibTreeSitterHighlight.TSHighlightOk
# pass
elseif maybe_err == LibTreeSitterHighlight.TSHighlightUnknownScope
err("TSHighlightError::TSHighlightUnknownScope")
elseif maybe_err == LibTreeSitterHighlight.TSHighlightTimeout
err("TSHighlightError::TSHighlightTimeout")
elseif maybe_err == LibTreeSitterHighlight.TSHighlightInvalidLanguage
err("TSHighlightError::TSHighlightInvalidLanguage")
elseif maybe_err == LibTreeSitterHighlight.TSHighlightInvalidUtf8
err("TSHighlightError::TSHighlightInvalidUtf8")
elseif maybe_err == LibTreeSitterHighlight.TSHighlightInvalidRegex
err("TSHighlightError::TSHighlightInvalidRegex")
elseif maybe_err == LibTreeSitterHighlight.TSHighlightInvalidQuery
err("TSHighlightError::TSHighlightInvalidQuery")
end
end
macro tscall(call)
quote
local res = $(esc(call))
maybe_throw_ts_error(res)
end
end
struct Language
name::Symbol
language::Ptr{Nothing}
end
mutable struct Highlighter
# Keep these strings around
names::Vector{String}
attributes::Vector{String}
ptr::Ptr{LTSH.TSHighlighter}
languages::Vector{Language}
function Highlighter(names, attributes, ptr)
finalizer(LTSH.ts_highlighter_delete, new(names, attributes, ptr, Language[]))
end
end
"""
HighlightBuffer(names_attributes::Dict{String,String})
An HighlightBuffer is the object needed to perform syntax highlighting. The names
parameter represent the capture groups that should be wrapped in spans. The attributes
element corresponds to the corresponding HTML attributes that should be inserted in
the span of each of this spans.
```julia
julia> highlighter = Highlighter(Dict(
"keyword" => "class=keyword",
))
Highlighter(Language[])
```
"""
function Highlighter(names_attributes::Dict{String,String})
names = collect(keys(names_attributes))
attributes = collect(values(names_attributes))
Highlighter(names, attributes)
end
function Highlighter(names, attributes)
@assert length(names) == length(attributes) "There should be an attribute for each name (got $(length(names)) names and $(length(attributes))) attributes)."
res = LTSH.ts_highlighter_new(names, attributes, length(names))
if res == C_NULL
error("Failed to create an Highlighter")
end
Highlighter(names, attributes, res)
end
function Base.show(io::IO, highlighter::Highlighter)
write(io, "Highlighter(")
show(io, highlighter.languages)
write(io, ")")
end
Base.unsafe_convert(::Type{Ptr{LTSH.TSHighlighter}}, highlighter::Highlighter) =
highlighter.ptr
"""
add_language!(
highlighter::Highlighter,
language::Language;
scope::String,
highlights_query::Union{Nothing,String}=nothing,
injections_query::Union{Nothing,String}=nothing,
locals_query::Union{Nothing,String} = nothing,
injection_regex::String = string("^", language.name),
)
Adds a language definition to the given highlighter.
"""
function add_language!(
highlighter::Highlighter,
language::Language;
scope::String,
highlights_query::Union{Nothing,String} = nothing,
injections_query::Union{Nothing,String} = nothing,
locals_query::Union{Nothing,String} = nothing,
injection_regex::String = string("^", language.name),
)
push!(highlighter.languages, language)
# lang = Ref{TSLanguage}(language.language)
@tscall LTSH.ts_highlighter_add_language(
highlighter,
scope,
injection_regex,
language.language,
something(highlights_query, C_NULL),
something(injections_query, C_NULL),
something(locals_query, C_NULL),
highlights_query === nothing ? 0 : length(highlights_query),
injections_query === nothing ? 0 : length(injections_query),
locals_query === nothing ? 0 : length(locals_query),
)
highlighter
end
mutable struct HighlightBuffer
ptr::Ptr{LTSH.TSHighlightBuffer}
function HighlightBuffer(ptr)
finalizer(LTSH.ts_highlight_buffer_delete, new(ptr))
end
end
function HighlightBuffer()
res = LTSH.ts_highlight_buffer_new()
if res == C_NULL
error("Could not create HighlightBuffer")
end
HighlightBuffer(res)
end
Base.unsafe_convert(::Type{Ptr{LTSH.TSHighlightBuffer}}, buffer::HighlightBuffer) =
buffer.ptr
function Base.convert(::Type{String}, buffer::HighlightBuffer)
ptr = convert(Ptr{Cchar}, LTSH.ts_highlight_buffer_content(buffer))
len = LTSH.ts_highlight_buffer_len(buffer)
unsafe_string(ptr, len)
end
"""
highlight(highlighter::Highlighter, source_code::String; scope::String)::String
Highlights the given source code in scope and returns an HTML string.
"""
function highlight(highlighter, source_code; scope)
buffer = HighlightBuffer()
@tscall LTSH.ts_highlighter_highlight(
highlighter,
scope,
source_code,
length(source_code),
buffer,
C_NULL,
)
# output_line_count = LTSH.ts_highlight_buffer_line_count(buffer)
# output_line_offsets = LTSH.ts_highlight_buffer_line_offsets(buffer)
# output_line_offsets = Base.unsafe_wrap(Array{Cuint}, output_line_offsets, output_line_count)
# TODO: look into string indexing
# lines = [
# output_string[start:end_]
# for (start, end_) in zip(output_line_offsets[begin:end-1], output_line_offsets[begin+1:end])
# ]
# for (i, line) in enumerate(lines)
# println("line $i #", line)
# end
convert(String, buffer)
end
export add_language!, Highlighter, highlight, Language
end # module
| TreeSitterHighlight | https://github.com/Pangoraw/TreeSitterHighlight.jl.git |
|
[
"MIT"
] | 0.1.0 | 1cc566369a372ca6026519003c50d6df90c58095 | code | 5093 | module LibTreeSitterHighlight
using tree_sitter_highlight_jll: libtree_sitter_highlight
using CEnum: @cenum
@cenum TSHighlightError begin
TSHighlightOk
TSHighlightUnknownScope
TSHighlightTimeout
TSHighlightInvalidLanguage
TSHighlightInvalidUtf8
TSHighlightInvalidRegex
TSHighlightInvalidQuery
end
struct TSHighlighter end
struct TSHighlightBuffer end
struct TSLanguage end
# TSHighlighter *ts_highlighter_new(
# const char **highlight_names,
# const char **attribute_strings,
# uint32_t highlight_count
# );
function ts_highlighter_new(highlight_names, attribute_strings, highlight_count)
@ccall libtree_sitter_highlight.ts_highlighter_new(
highlight_names::Ptr{Ptr{Cchar}},
attribute_strings::Ptr{Ptr{Cchar}},
highlight_count::Cuint,
)::Ptr{TSHighlighter}
end
# // Delete a syntax highlighter.
# void ts_highlighter_delete(TSHighlighter *);
function ts_highlighter_delete(highlighter)
@ccall libtree_sitter_highlight.ts_highlighter_delete(
highlighter::Ptr{TSHighlighter},
)::Cvoid
end
# // Add a `TSLanguage` to a highlighter. The language is associated with a
# // scope name, which can be used later to select a language for syntax
# // highlighting. Along with the language, you must provide a JSON string
# // containing the compiled PropertySheet to use for syntax highlighting
# // with that language. You can also optionally provide an 'injection regex',
# // which is used to detect when this language has been embedded in a document
# // written in a different language.
# TSHighlightError ts_highlighter_add_language(
# TSHighlighter *self,
# const char *scope_name,
# const char *injection_regex,
# const TSLanguage *language,
# const char *highlight_query,
# const char *injection_query,
# const char *locals_query,
# uint32_t highlight_query_len,
# uint32_t injection_query_len,
# uint32_t locals_query_len
# );
function ts_highlighter_add_language(
self,
scope_name,
injection_regex,
language,
highlight_query,
injection_query,
locals_query,
highlight_query_len,
injection_query_len,
locals_query_len,
)
@ccall libtree_sitter_highlight.ts_highlighter_add_language(
self::Ptr{TSHighlighter},
scope_name::Cstring,
injection_regex::Cstring,
language::Ptr{TSLanguage},
highlight_query::Cstring,
injection_query::Cstring,
locals_query::Cstring,
highlight_query_len::Cuint,
injection_query_len::Cuint,
locals_query_len::Cuint,
)::TSHighlightError
end
# // Compute syntax highlighting for a given document. You must first
# // create a `TSHighlightBuffer` to hold the output.
# TSHighlightError ts_highlighter_highlight(
# const TSHighlighter *self,
# const char *scope_name,
# const char *source_code,
# uint32_t source_code_len,
# TSHighlightBuffer *output,
# const size_t *cancellation_flag
# );
function ts_highlighter_highlight(
self,
scope_name,
source_code,
source_code_len,
output,
cancellation_flag,
)
@ccall libtree_sitter_highlight.ts_highlighter_highlight(
self::Ptr{TSHighlighter},
scope_name::Cstring,
source_code::Cstring,
source_code_len::Cuint,
output::Ptr{TSHighlightBuffer},
cancellation_flag::Ptr{Csize_t},
)::TSHighlightError
end
# // TSHighlightBuffer: This struct stores the HTML output of syntax
# // highlighting. It can be reused for multiple highlighting calls.
# TSHighlightBuffer *ts_highlight_buffer_new();
function ts_highlight_buffer_new()
@ccall libtree_sitter_highlight.ts_highlight_buffer_new()::Ptr{TSHighlightBuffer}
end
# // Delete a highlight buffer.
# void ts_highlight_buffer_delete(TSHighlightBuffer *);
function ts_highlight_buffer_delete(highlighter)
@ccall libtree_sitter_highlight.ts_highlight_buffer_delete(
highlighter::Ptr{TSHighlightBuffer},
)::Cvoid
end
# // Access the HTML content of a highlight buffer.
# const uint8_t *ts_highlight_buffer_content(const TSHighlightBuffer *);
function ts_highlight_buffer_content(highlighter)
@ccall libtree_sitter_highlight.ts_highlight_buffer_content(
highlighter::Ptr{TSHighlightBuffer},
)::Ptr{Cuint}
end
# const uint32_t *ts_highlight_buffer_line_offsets(const TSHighlightBuffer *);
function ts_highlight_buffer_line_offsets(highlight_buffer)
@ccall libtree_sitter_highlight.ts_highlight_buffer_line_offsets(
highlight_buffer::Ptr{TSHighlightBuffer},
)::Ptr{Cuint}
end
# uint32_t ts_highlight_buffer_len(const TSHighlightBuffer *);
function ts_highlight_buffer_len(highlight_buffer)
@ccall libtree_sitter_highlight.ts_highlight_buffer_len(
highlight_buffer::Ptr{TSHighlightBuffer},
)::Cuint
end
# uint32_t ts_highlight_buffer_line_count(const TSHighlightBuffer *);
function ts_highlight_buffer_line_count(highlight_buffer)
@ccall libtree_sitter_highlight.ts_highlight_buffer_line_count(
highlight_buffer::Ptr{TSHighlightBuffer},
)::Cuint
end
end # module LibTreeSitter
| TreeSitterHighlight | https://github.com/Pangoraw/TreeSitterHighlight.jl.git |
|
[
"MIT"
] | 0.1.0 | 1cc566369a372ca6026519003c50d6df90c58095 | docs | 1587 | # TreeSitterHighlight.jl
> [🌠 View this readme rendered using TreeSitterHighlight.jl!](https://htmlview.glitch.me/?https://gist.github.com/Pangoraw/9ffaff45a2a0165dc9f10a6fdc116660)
A Julia package to export static HTML for highlighted code based on [`tree-sitter/highlight`](https://github.com/tree-sitter/tree-sitter/tree/master/highlight).
## Usage
To highlight a source file, you will need:
- A Tree-sitter language.
- Highlights queries that return category for matches.
- Injections queries that specify when the language should switch.
```julia
using TreeSitterHighlight, tree_sitter_javascript_jll
highlighter = Highlighter(Dict(
"keyword" => "class=keyword",
))
libts_js = tree_sitter_javascript_jll.libtreesitter_javascript_path
language = Language(
:javascript,
@ccall libts_js.tree_sitter_javascript()::Ptr{Nothing}
)
scope = "source.js"
add_language!(
highlighter,
language;
scope,
injection_regex,
highlights_query,
injections_query,
locals_query,
)
function highlight_js_code(code::String)::String
TreeSitterHighlight.highlight(highlighter, code; scope)
end
highlight_js_code("""
function main(msg) {
console.log(msg)
}
""")
```
## Gallery
This readme is using a [Pluto](https://github.com/JuliaPluto/Pluto.jl) inspired theme and all the code highlighting is made using TreeSitterHighlight.jl!
### Javascript
```javascript
/**
* Prints an hello world message to the console
**/
function main() {
let world = "world"
const message = `Hello, ${world} !`;
console.log(message);
}
main();
```
| TreeSitterHighlight | https://github.com/Pangoraw/TreeSitterHighlight.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 264 | using Documenter, StanOptimize
makedocs(
modules = [StanOptimize],
format = Documenter.HTML(),
checkdocs = :exports,
sitename = "StanOptimize.jl",
pages = Any["index.md"]
)
deploydocs(
repo = "github.com/StanJulia/StanOptimize.jl.git",
)
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 947 | ######### CmdStan optimize example ###########
using StanOptimize
bernoulli_model = "
data {
int<lower=1> N;
array[N] int<lower=0,upper=1> y;
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
";
#data = Dict("N" => 10, "y" => [0, 1, 0, 1, 0, 0, 0, 0, 0, 1])
data = (N = 10, y = [0, 1, 0, 1, 0, 0, 0, 0, 0, 1])
init = (theta = 0.5,)
#init = Dict("theta" => 0.5)
tmpdir = joinpath(@__DIR__, "tmp")
stanmodel = OptimizeModel("bernoulli", bernoulli_model, tmpdir);
rc = stan_optimize(stanmodel; num_chains=4, data, init);
if success(rc)
optim1, cnames = read_optimize(stanmodel)
println()
display(optim1)
println()
end
# Same with saved iterations
stanmodel = OptimizeModel("bernoulli", bernoulli_model);
rc2 = stan_optimize(stanmodel; data, save_iterations=true);
if success(rc2)
optim2, cnames = read_optimize(stanmodel)
println()
display(optim2)
println()
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 831 | """
$(SIGNATURES)
Helper infrastructure to compile and sample models using `cmdstan`.
"""
module StanOptimize
using Reexport
using Parameters, NamedTupleTools
using DocStringExtensions: FIELDS, SIGNATURES, TYPEDEF
@reexport using StanBase
import StanBase: update_model_file, par, handle_keywords!
import StanBase: executable_path, ensure_executable, stan_compile
import StanBase: update_json_files
import StanBase: data_file_path, init_file_path, sample_file_path
import StanBase: generated_quantities_file_path, log_file_path
import StanBase: diagnostic_file_path, setup_diagnostics
include("stanmodel/OptimizeModel.jl")
include("stanrun/cmdline.jl")
include("stanrun/stan_run.jl")
include("stansamples/read_optimize.jl")
stan_optimize = stan_run
export
OptimizeModel,
stan_optimize,
read_optimize
end # module
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 6406 | import Base: show
mutable struct OptimizeModel <: CmdStanModels
name::AbstractString; # Name of the Stan program
model::AbstractString; # Stan language model program
num_threads::Int64; # Number of C++ threads
num_cpp_chains::Int64; # Number of C++ chains in each exec process
# Sample fields
num_chains::Int64; # Number of (Julia level) chains
seed::Int; # Seed section of cmd to run cmdstan
init_bound::Int; # Bound for initial param values
refresh::Int; # Rate to stream to output
# Algorithm fields
algorithm::Symbol; # :bfgs, :lbfgs or :newton (Default :lbfgs)
# BFGS/L-BFGS specific fields
init_alpha::Float64; # Line search step size for first iteration
tol_obj::Float64; # Convergence tolerance
tol_rel_obj::Float64; # Relative convergence tolerance
tol_grad::Float64; # Convergence tolerance on norm of gradient
tol_rel_grad::Float64; # Relative convergence tolerance
tol_param::Float64; # Convergence tolerance on param changes
# L-BFGS specific fields
history_size::Int; # Amount of history to keep for L-BFGS
# Newton
iter::Int; # Total number of iterations
save_iterations::Bool; # Stream optimization progress to output
# Output files
output_base::AbstractString; # Used for file paths to be created
# Tmpdir setting
tmpdir::AbstractString; # Holds all created files
# Cmdstan path
exec_path::AbstractString; # Path to the cmdstan excutable
# Data and init file paths
data_file::Vector{AbstractString}; # Array of data files input to cmdstan
init_file::Vector{AbstractString}; # Array of init files input to cmdstan
# Generated command line vector
cmds::Vector{Cmd}; # Array of cmds to be spawned/pipelined
# Files created by cmdstan
sample_file::Vector{String}; # Sample file array (.csv)
log_file::Vector{String}; # Log file array
diagnostic_file::Vector{String}; # Diagnostic file array
# CMDSTAN_HOME
cmdstan_home::AbstractString; # Directory where cmdstan can be found
end
"""
# OptimizeModel
Create an OptimizeModel and compile Stan Language Model.
### Required arguments
```julia
* `name::AbstractString` : Name for the model
* `model::AbstractString` : Stan model source
```
### Optional arguments
```julia
* `tmpdir=mktempdir()` : Directory where output files are stored
```
"""
function OptimizeModel(name::AbstractString, model::AbstractString,
tmpdir = mktempdir())
!isdir(tmpdir) && mkdir(tmpdir)
update_model_file(joinpath(tmpdir, "$(name).stan"), strip(model))
output_base = joinpath(tmpdir, name)
exec_path = executable_path(output_base)
cmdstan_home = CMDSTAN_HOME
error_output = IOBuffer()
is_ok = cd(cmdstan_home) do
success(pipeline(`$(make_command()) -f $(cmdstan_home)/makefile -C $(cmdstan_home) $(exec_path)`;
stderr = error_output))
end
if !is_ok
throw(StanModelError(model, String(take!(error_output))))
end
OptimizeModel(name, model,
# num_threads, num_cpp_chains
1, 1,
# num_chains
4,
-1, # seed
2, # init_bound
100, # refresh
:lbfgs, # algorithm (:lbfgs, :bfgs or :newton)
0.001, # init_alpha
9.9999999999999998e-13, # tol_obj
10000.0, # tol_rel_obj
1e-8, # tol_grad
10000000.0, # tol_rel_grad
1e-8, # tol_params
5, # history_size for L-BFGS
2000, # Newton iterations
false, # save_iterations
output_base, # Output settings
tmpdir, # Tmpdir settings
exec_path, # exec_path
AbstractString[], # Data files
AbstractString[], # Init files
Cmd[], # Command lines
String[], # Sample .csv files
String[], # Log files
String[], # Diagnostic files
cmdstan_home # Path to cmdstan binary
)
end
function Base.show(io::IO, ::MIME"text/plain", m::OptimizeModel)
println("\nC++ threads and chains per forked process:")
println(io, " num_threads = $(m.num_threads)")
println(io, " num_cpp_chains = $(m.num_cpp_chains)")
println(io, "\nSample section:")
println(io, " name = ", m.name)
println(io, " num_chains = ", m.num_chains)
println(io, " seed = ", m.seed)
println(io, " init_bound = ", m.init_bound)
println(io, " refresh = ", m.refresh)
println(io, "\nAlgorithm section:")
println(io, " algorithm = ", m.algorithm)
if m.algorithm in [:lbfgs, :bfgs]
println(io, " init_alpha = ", m.init_alpha)
println(io, " tol_obj = ", m.tol_obj)
println(io, " tol_rel_obj = ", m.tol_rel_obj)
println(io, " tol_grad = ", m.tol_grad)
println(io, " tol_rel_grad = ", m.tol_rel_grad)
println(io, " tol_param = ", m.tol_param)
if m.algorithm == :lbfgs
println(io, " history_size = ", m.history_size)
end
elseif m.algorithm == :newton
println(io, " iter = ", m.iter)
println(io, " save_iterations = ", m.save_iterations)
end
println(io, "\nOther:")
println(io, " output_base = ", m.output_base)
println(io, " tmpdir = ", m.tmpdir)
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 2675 | """
# cmdline
Recursively parse the model to construct command line.
### Method
```julia
cmdline(m)
```
### Required arguments
```julia
* `m::CmdStanModel` : CmdStanSampleModel
```
### Related help
```julia
?OptimizeModel : Create a OptimizeModel
?stan_optimize : Execute an OptimizeModel
```
"""
function cmdline(m::OptimizeModel, id)
#=
`/Users/rob/.julia/dev/StanOptimize/examples/Bernoulli/tmp/bernoulli
optimize
algorithm=lbfgs init_alpha=0.001 tol_obj=1.0e-8 tol_rel_
obj=10000.0 tol_grad=1.0e-8 tol_rel_grad=1.0e7 tol_param=1.0e-8
history_size=5 iter=2000 save_iterations=1 random seed=-1 init=2
id=1 data file=/Users/rob/.julia/dev/StanOptimize/examples/Bernoulli/tmp/bernoulli_data_1.R
output file=/Users/rob/.julia/dev/StanOptimize/examples/Bernoulli/tmp/bernoulli_chain_1.csv
refresh=100`
=#
cmd = ``
if isa(m, OptimizeModel)
# Handle the model name field for unix and windows
cmd = `$(m.exec_path)`
# Sample() specific portion of the model
cmd = `$cmd optimize`
cmd = `$cmd algorithm=$(m.algorithm)`
if m.algorithm in [:lbfgs, :bfgs]
cmd = `$cmd init_alpha=$(m.init_alpha)`
cmd = `$cmd tol_obj=$(m.tol_obj)`
cmd = `$cmd tol_rel_obj=$(m.tol_rel_obj)`
cmd = `$cmd tol_grad=$(m.tol_grad)`
cmd = `$cmd tol_rel_grad=$(m.tol_rel_grad)`
cmd = `$cmd tol_param=$(m.tol_param)`
if m.algorithm == :lbfgs
cmd = `$cmd history_size=$(m.history_size)`
end
elseif m.algorithm == :newton
cmd = `$cmd iter=$(m.iter)`
if m.save_history
cmd = `$cmd save_iterations=1`
else
cmd = `$cmd save_iterations=0`
end
end
# Common to all models
cmd = `$cmd random seed=$(m.seed)`
# Init file required?
if length(m.init_file) > 0 && isfile(m.init_file[id])
cmd = `$cmd init=$(m.init_file[id])`
else
cmd = `$cmd init=$(m.init_bound)`
end
# Data file required?
if length(m.data_file) > 0 && isfile(m.data_file[id])
cmd = `$cmd id=$(id) data file=$(m.data_file[id])`
end
# Output options
cmd = `$cmd output`
if length(m.sample_file) > 0
cmd = `$cmd file=$(m.sample_file[id])`
end
if length(m.diagnostic_file) > 0
cmd = `$cmd diagnostic_file=$(m.diagnostic_file[id])`
end
cmd = `$cmd refresh=$(m.refresh)`
end
cmd
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 3079 |
"""
`stan_optimize(...)`
Optimize a StanJulia OptimizationModel <: CmdStanModel
# Extended help
### Dispatch arguments
```julia
* `m:: OptimizeModel` # CmdStanModel subtype
* `use_json=true` # Use JSON3 for data and init files
```
### Keyword arguments
```julia
* `init` : Init dict
* `data` : Data dict
```
$(SIGNATURES)
### Returns
```julia
* `rc` # Return code, 0 is success.
```
See extended help for other keyword arguments ( `??stan_optimize` ).
# Extended help
### Additional configuration keyword arguments
```julia
* `num_chains=4` # Update number of chains.
* `num_threads=8` # Update number of threads.
* `seed=-1` # Set seed value.
* `init_bound=2` # Boundary for initialization
* `refresh=200` # Stream to output
* `algorithm=:lbfgs` # Algorithms: :lbfgs, bfgs or :newton.
* `init_alpha=0.0001` # Line search step size first iteration.
* `tol_obj=9.999e-13` # Convergence tolerance
* `tol_rel_obj=9.999e-13` # Relative convergence tolerance
* `tol_grad=9.999e-13` # Convergence tolerance on norm of gradient
* `tol_rel_grad=9.999e-13` # Relative convergence tolerance
* `tol_param=1e-8` # Convergence tolerance on param changes
* `history_size=` # Amount of history to keep for L-BFGS
* `iter=200` # Total number of Newton iterations
* `save_iterations=0` # Stream iterations to output
```
"""
function stan_run(m::T, use_json=true; kwargs...) where {T <: CmdStanModels}
handle_keywords!(m, kwargs)
# Remove existing sample files
for id in 1:m.num_chains
sfile = sample_file_path(m.output_base, id)
isfile(sfile) && rm(sfile)
end
if use_json
:init in keys(kwargs) && update_json_files(m, kwargs[:init],
m.num_chains, "init")
:data in keys(kwargs) && update_json_files(m, kwargs[:data],
m.num_chains, "data")
else
:init in keys(kwargs) && update_R_files(m, kwargs[:init],
m.num_chains, "init")
:data in keys(kwargs) && update_R_files(m, kwargs[:data],
m.num_chains, "data")
end
m.cmds = [stan_cmds(m, id; kwargs...) for id in 1:m.num_chains]
#println(typeof(m.cmds))
#println()
#println(m.cmds)
run(pipeline(par(m.cmds), stdout=m.log_file[1]))
end
"""
Generate a cmdstan command line (a run `cmd`).
$(SIGNATURES)
Internal, not exported.
"""
function stan_cmds(m::T, id::Integer; kwargs...) where {T <: CmdStanModels}
append!(m.sample_file, [sample_file_path(m.output_base, id)])
append!(m.log_file, [log_file_path(m.output_base, id)])
if length(m.diagnostic_file) > 0
append!(m.diagnostic_file, [diagnostic_file_path(m.output_base, id)])
end
cmdline(m, id)
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 2150 | using Unicode, DelimitedFiles
"""
# read_optimize
Read optimize output file created by cmdstan.
### Method
```julia
read_optimize(m::OptimizeModel)
```
### Required arguments
```julia
* `m::OptimizeModel` # OptimizeModel object
```
"""
function read_optimize(m::OptimizeModel)
## Collect the results in a Dict
cnames = String[]
res_type = "chain"
## tdict contains the arrays of values ##
tdict = Dict()
for i in 1:m.num_chains
if isfile("$(m.output_base)_$(res_type)_$(i).csv")
# A result type file for chain i is present ##
instream = open("$(m.output_base)_$(res_type)_$(i).csv")
if i == 1
str = read(instream, String)
sstr = split(str)
tdict[:stan_version] = "$(parse(Int, sstr[4])).$(parse(Int, sstr[8])).$(parse(Int, sstr[12]))"
close(instream)
instream = open("$(m.output_base)_$(res_type)_$(i).csv")
end
# After reopening the file, skip all comment lines
skipchars(isspace, instream, linecomment='#')
line = Unicode.normalize(readline(instream), newline2lf=true)
# Extract samples variable names
idx = split(strip(line), ",")
index = [idx[k] for k in 1:length(idx)]
cnames = convert.(String, idx)
# Read optimized values
for i in 1:m.iter
line = Unicode.normalize(readline(instream), newline2lf=true)
flds = Float64[]
if eof(instream) && length(line) < 2
close(instream)
break
else
flds = parse.(Float64, split(strip(line), ","))
for k in 1:length(index)
if index[k] in keys(tdict)
# For all subsequent chains the entry should already be in tdict
append!(tdict[index[k]], flds[k])
else
# First chain
tdict[index[k]] = [flds[k]]
end
end
end
end
end
end
(tdict, cnames)
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 386 | using StanOptimize, Test
if haskey(ENV, "JULIA_CMDSTAN_HOME") || haskey(ENV, "CMDSTAN")
@testset "Bernoulli optimize" begin
include(joinpath(@__DIR__, "../examples/Bernoulli/bernoulli.jl"))
@test optim1["theta"][end] ≈ 0.3 atol=0.1
@test optim2["theta"][end] ≈ 0.3 atol=0.1
end
else
println("\nJULIA_CMDSTAN_HOME and CMDSTAN not set. Skipping tests")
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 363 | ####
#### Coverage summary, printed as "(percentage) covered".
####
#### Useful for CI environments that just want a summary (eg a Gitlab setup).
####
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
covered_lines, total_lines = get_summary(process_folder())
percentage = covered_lines / total_lines * 100
println("($(percentage)%) covered")
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | code | 266 | # only push coverage from one bot
get(ENV, "TRAVIS_OS_NAME", nothing) == "linux" || exit(0)
get(ENV, "TRAVIS_JULIA_VERSION", nothing) == "1.1" || exit(0)
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
Codecov.submit(Codecov.process_folder())
end
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | docs | 2068 | # StanOptimize.jl
| **Project Status** | **Build Status** |
|:---------------------------:|:-----------------:|
|![][project-status-img] | ![][CI-build] |
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://stanjulia.github.io/StanOptimize.jl/latest
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://stanjulia.github.io/StanOptimize.jl/stable
[CI-build]: https://github.com/stanjulia/StanOptimize.jl/workflows/CI/badge.svg?branch=master
[issues-url]: https://github.com/stanjulia/StanOptimize.jl/issues
[project-status-img]: https://img.shields.io/badge/lifecycle-stable-green.svg
## Important note
StanOptimize.jl v3 is a breaking change.
While in StanOptimize.jl v2 one could select Bfgs as optimization algorithm:
```Julia
om = OptimizeModel("bernoulli", bernoulli_model;
method=StanOptimize.Optimize(;method=StanOptimize.Bfgs()),
#tmpdir = joinpath(@__DIR__, "tmp"));
tmpdir = mktempdir());
rc = stan_optimize(om; data, init)
```
In StanOptimize.jl v3:
```Julia
om = OptimizeModel("bernoulli", bernoulli_model)
rc = stan_optimize(om; data, init, algorithm=:bfgs)
```
See `??OptimizeModel` and `??stan_optimize`.
## Installation
This package is registered. Install with
```julia
pkg> add StanOptimize
```
You need a working [cmdstan](https://mc-stan.org/users/interfaces/cmdstan.html) installation, the path of which you should specify in either `CMDSTAN` or JULIA_CMDSTAN_HOME`, eg in your `~/.julia/config/startup.jl` have a line like
```julia
# CmdStan setup
ENV["CMDSTAN"] = expanduser("~/src/cmdstan-2.35.0/") # replace with your path
```
This package is derived from Tamas Papp's [StanRun.jl]() package.
## Usage
It is recommended that you start your Julia process with multiple worker processes to take advantage of parallel sampling, eg
```sh
julia -p auto
```
Otherwise, `stan_sample` will use a single process.
Use this package like this:
```julia
using StanOptimize
```
See the docstrings (in particular `?StanOptimize`) for more.
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"MIT"
] | 4.4.3 | 1054fed941789b86a5975e70ca9783164510cf7a | docs | 43 | # StanOptimize
*Documentation goes here.*
| StanOptimize | https://github.com/StanJulia/StanOptimize.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 28096 | module MathOptInterfaceOSQP
include("modcaches.jl")
using .ModificationCaches
using SparseArrays
using MathOptInterface
using MathOptInterface.Utilities
using LinearAlgebra: rmul!
export Optimizer, OSQPSettings, OSQPModel
const MOI = MathOptInterface
const MOIU = MathOptInterface.Utilities
const CI = MOI.ConstraintIndex
const VI = MOI.VariableIndex
const SparseTriplets = Tuple{Vector{Int},Vector{Int},Vector{<:Any}}
const Affine = MOI.ScalarAffineFunction{Float64}
const Quadratic = MOI.ScalarQuadraticFunction{Float64}
const VectorAffine = MOI.VectorAffineFunction{Float64}
const Interval = MOI.Interval{Float64}
const LessThan = MOI.LessThan{Float64}
const GreaterThan = MOI.GreaterThan{Float64}
const EqualTo = MOI.EqualTo{Float64}
const IntervalConvertible = Union{Interval,LessThan,GreaterThan,EqualTo}
const Zeros = MOI.Zeros
const Nonnegatives = MOI.Nonnegatives
const Nonpositives = MOI.Nonpositives
const SupportedVectorSets = Union{Zeros,Nonnegatives,Nonpositives}
import OSQP
lower(::Zeros, i::Int) = 0.0
lower(::Nonnegatives, i::Int) = 0.0
lower(::Nonpositives, i::Int) = -Inf
upper(::Zeros, i::Int) = 0.0
upper(::Nonnegatives, i::Int) = Inf
upper(::Nonpositives, i::Int) = 0.0
# TODO: just use ∈ on 0.7 (allocates on 0.6):
function _contains(haystack, needle)
for x in haystack
x == needle && return true
end
return false
end
mutable struct Optimizer <: MOI.AbstractOptimizer
inner::OSQP.Model
hasresults::Bool
results::OSQP.Results
is_empty::Bool
silent::Bool
settings::Dict{Symbol,Any} # need to store these, because they should be preserved if empty! is called
sense::MOI.OptimizationSense
objconstant::Float64
constrconstant::Vector{Float64}
modcache::ProblemModificationCache{Float64}
warmstartcache::WarmStartCache{Float64}
rowranges::Dict{Int,UnitRange{Int}}
function Optimizer(; kwargs...)
inner = OSQP.Model()
hasresults = false
results = OSQP.Results()
is_empty = true
sense = MOI.MIN_SENSE
objconstant = 0.0
constrconstant = Float64[]
modcache = ProblemModificationCache{Float64}()
warmstartcache = WarmStartCache{Float64}()
rowranges = Dict{Int,UnitRange{Int}}()
optimizer = new(
inner,
hasresults,
results,
is_empty,
false,
Dict{Symbol,Any}(:verbose => true),
sense,
objconstant,
constrconstant,
modcache,
warmstartcache,
rowranges,
)
for (key, value) in kwargs
MOI.set(optimizer, MOI.RawOptimizerAttribute(String(key)), value)
end
return optimizer
end
end
MOI.get(::Optimizer, ::MOI.SolverName) = "OSQP"
MOI.supports(::Optimizer, ::MOI.Silent) = true
function MOI.set(optimizer::Optimizer, ::MOI.Silent, value::Bool)
optimizer.silent = value
if !MOI.is_empty(optimizer)
if optimizer.silent
OSQP.update_settings!(optimizer.inner; :verbose => false)
else
OSQP.update_settings!(
optimizer.inner;
:verbose => optimizer.settings[:verbose],
)
end
end
end
MOI.get(optimizer::Optimizer, ::MOI.Silent) = optimizer.silent
MOI.supports(::Optimizer, ::MOI.TimeLimitSec) = true
function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, limit::Real)
MOI.set(model, OSQPSettings.TimeLimit(), limit)
return
end
function MOI.set(model::Optimizer, attr::MOI.TimeLimitSec, ::Nothing)
delete!(model.settings, :time_limit)
if !MOI.is_empty(model)
OSQP.update_settings!(model.inner, time_limit = 0.0)
end
return
end
function MOI.get(model::Optimizer, ::MOI.TimeLimitSec)
return get(model.settings, :time_limit, nothing)
end
hasresults(optimizer::Optimizer) = optimizer.hasresults
function MOI.empty!(optimizer::Optimizer)
optimizer.inner = OSQP.Model()
optimizer.hasresults = false
optimizer.results = OSQP.Results()
optimizer.sense = MOI.MIN_SENSE # model parameter, so needs to be reset
optimizer.objconstant = 0.0
optimizer.constrconstant = Float64[]
optimizer.modcache = ProblemModificationCache{Float64}()
optimizer.warmstartcache = WarmStartCache{Float64}()
empty!(optimizer.rowranges)
return optimizer
end
MOI.is_empty(optimizer::Optimizer) = optimizer.inner.isempty
function MOI.copy_to(dest::Optimizer, src::MOI.ModelLike)
MOI.empty!(dest)
idxmap = MOIU.IndexMap(dest, src)
assign_constraint_row_ranges!(dest.rowranges, idxmap, src)
dest.sense, P, q, dest.objconstant = processobjective(src, idxmap)
A, l, u, dest.constrconstant =
processconstraints(src, idxmap, dest.rowranges)
settings = copy(dest.settings)
if dest.silent
settings[:verbose] = false
end
OSQP.setup!(dest.inner; P = P, q = q, A = A, l = l, u = u, settings...)
dest.modcache = ProblemModificationCache(P, q, A, l, u)
dest.warmstartcache = WarmStartCache{Float64}(size(A, 2), size(A, 1))
processprimalstart!(dest.warmstartcache.x, src, idxmap)
processdualstart!(dest.warmstartcache.y, src, idxmap, dest.rowranges)
return idxmap
end
"""
Set up index map from `src` variables and constraints to `dest` variables and constraints.
"""
function MOIU.IndexMap(dest::Optimizer, src::MOI.ModelLike)
idxmap = MOIU.IndexMap()
vis_src = MOI.get(src, MOI.ListOfVariableIndices())
for i in eachindex(vis_src)
idxmap[vis_src[i]] = VI(i)
end
i = 0
for (F, S) in MOI.get(src, MOI.ListOfConstraintTypesPresent())
MOI.supports_constraint(dest, F, S) ||
throw(MOI.UnsupportedConstraint{F,S}())
cis_src = MOI.get(src, MOI.ListOfConstraintIndices{F,S}())
for ci in cis_src
i += 1
idxmap[ci] = CI{F,S}(i)
end
end
return idxmap
end
function assign_constraint_row_ranges!(
rowranges::Dict{Int,UnitRange{Int}},
idxmap::MOIU.IndexMap,
src::MOI.ModelLike,
)
startrow = 1
for (F, S) in MOI.get(src, MOI.ListOfConstraintTypesPresent())
cis_src = MOI.get(src, MOI.ListOfConstraintIndices{F,S}())
for ci_src in cis_src
set = MOI.get(src, MOI.ConstraintSet(), ci_src)
ci_dest = idxmap[ci_src]
endrow = startrow + MOI.dimension(set) - 1
rowranges[ci_dest.value] = startrow:endrow
startrow = endrow + 1
end
end
end
function constraint_rows(
rowranges::Dict{Int,UnitRange{Int}},
ci::CI{<:Any,<:MOI.AbstractScalarSet},
)
rowrange = rowranges[ci.value]
length(rowrange) == 1 || error()
return first(rowrange)
end
function constraint_rows(
rowranges::Dict{Int,UnitRange{Int}},
ci::CI{<:Any,<:MOI.AbstractVectorSet},
)
return rowranges[ci.value]
end
function constraint_rows(optimizer::Optimizer, ci::CI)
return constraint_rows(optimizer.rowranges, ci)
end
"""
Return objective sense, as well as matrix `P`, vector `q`, and scalar `c` such that objective function is `1/2 x' P x + q' x + c`.
"""
function processobjective(src::MOI.ModelLike, idxmap)
sense = MOI.get(src, MOI.ObjectiveSense())
n = MOI.get(src, MOI.NumberOfVariables())
q = zeros(n)
if sense != MOI.FEASIBILITY_SENSE
function_type = MOI.get(src, MOI.ObjectiveFunctionType())
if function_type == MOI.ScalarAffineFunction{Float64}
faffine = MOI.get(
src,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
)
P = spzeros(n, n)
processlinearterms!(q, faffine.terms, idxmap)
c = faffine.constant
elseif function_type == MOI.ScalarQuadraticFunction{Float64}
fquadratic = MOI.get(
src,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
)
I = [
Int(idxmap[term.variable_1].value) for
term in fquadratic.quadratic_terms
]
J = [
Int(idxmap[term.variable_2].value) for
term in fquadratic.quadratic_terms
]
V = [term.coefficient for term in fquadratic.quadratic_terms]
upper_triangularize!(I, J, V)
P = sparse(I, J, V, n, n)
processlinearterms!(q, fquadratic.affine_terms, idxmap)
c = fquadratic.constant
else
throw(
MOI.UnsupportedAttribute(
MOI.ObjectiveFunction{function_type}(),
),
)
end
sense == MOI.MAX_SENSE && (rmul!(P, -1); rmul!(q, -1); c = -c)
else
P = spzeros(n, n)
q = zeros(n)
c = 0.0
end
return sense, P, q, c
end
function processlinearterms!(
q,
terms::Vector{<:MOI.ScalarAffineTerm},
idxmapfun::Function = identity,
)
# This is currently needed to avoid depwarns. TODO: make this nice again:
if q isa VectorModificationCache
q[:] = 0
else
q .= 0
end
for term in terms
var = term.variable
coeff = term.coefficient
q[idxmapfun(var).value] += coeff
end
end
function processlinearterms!(
q,
terms::Vector{<:MOI.ScalarAffineTerm},
idxmap::MOIU.IndexMap,
)
return processlinearterms!(q, terms, var -> idxmap[var])
end
function upper_triangularize!(I::Vector{Int}, J::Vector{Int}, V::Vector)
n = length(V)
(length(I) == length(J) == n) || error()
for i in 1:n
if I[i] > J[i]
I[i], J[i] = J[i], I[i]
end
end
end
function processconstraints(
src::MOI.ModelLike,
idxmap,
rowranges::Dict{Int,UnitRange{Int}},
)
m = mapreduce(length, +, values(rowranges), init = 0)
l = Vector{Float64}(undef, m)
u = Vector{Float64}(undef, m)
constant = Vector{Float64}(undef, m)
bounds = (l, u)
I = Int[]
J = Int[]
V = Float64[]
for (F, S) in MOI.get(src, MOI.ListOfConstraintTypesPresent())
processconstraints!(
(I, J, V),
bounds,
constant,
src,
idxmap,
rowranges,
F,
S,
)
end
l .-= constant
u .-= constant
n = MOI.get(src, MOI.NumberOfVariables())
A = sparse(I, J, V, m, n)
return (A, l, u, constant)
end
function processconstraints!(
triplets::SparseTriplets,
bounds::Tuple{<:Vector,<:Vector},
constant::Vector{Float64},
src::MOI.ModelLike,
idxmap,
rowranges::Dict{Int,UnitRange{Int}},
F::Type{<:MOI.AbstractFunction},
S::Type{<:MOI.AbstractSet},
)
cis_src = MOI.get(src, MOI.ListOfConstraintIndices{F,S}())
for ci in cis_src
s = MOI.get(src, MOI.ConstraintSet(), ci)
f = MOI.get(src, MOI.ConstraintFunction(), ci)
rows = constraint_rows(rowranges, idxmap[ci])
processconstant!(constant, rows, f)
processlinearpart!(triplets, f, rows, idxmap)
processconstraintset!(bounds, rows, s)
end
return nothing
end
function processconstant!(c::Vector{Float64}, row::Int, f::Affine)
c[row] = MOI.constant(f, Float64)
return nothing
end
function processconstant!(
c::Vector{Float64},
rows::UnitRange{Int},
f::VectorAffine,
)
for (i, row) in enumerate(rows)
c[row] = f.constants[i]
end
end
function processlinearpart!(
triplets::SparseTriplets,
f::MOI.ScalarAffineFunction,
row::Int,
idxmap,
)
(I, J, V) = triplets
for term in f.terms
var = term.variable
coeff = term.coefficient
col = idxmap[var].value
push!(I, row)
push!(J, col)
push!(V, coeff)
end
end
function processlinearpart!(
triplets::SparseTriplets,
f::MOI.VectorAffineFunction,
rows::UnitRange{Int},
idxmap,
)
(I, J, V) = triplets
for term in f.terms
row = rows[term.output_index]
var = term.scalar_term.variable
coeff = term.scalar_term.coefficient
col = idxmap[var].value
push!(I, row)
push!(J, col)
push!(V, coeff)
end
end
function processconstraintset!(
bounds::Tuple{<:Vector,<:Vector},
row::Int,
s::IntervalConvertible,
)
return processconstraintset!(bounds, row, MOI.Interval(s))
end
function processconstraintset!(
bounds::Tuple{<:Vector,<:Vector},
row::Int,
interval::Interval,
)
l, u = bounds
l[row] = interval.lower
u[row] = interval.upper
return nothing
end
function processconstraintset!(
bounds::Tuple{<:Vector,<:Vector},
rows::UnitRange{Int},
s::S,
) where {S<:SupportedVectorSets}
l, u = bounds
for (i, row) in enumerate(rows)
l[row] = lower(s, i)
u[row] = upper(s, i)
end
end
function processprimalstart!(x, src::MOI.ModelLike, idxmap)
has_primal_start = false
for attr in MOI.get(src, MOI.ListOfVariableAttributesSet())
if attr isa MOI.VariablePrimalStart
has_primal_start = true
end
end
if has_primal_start
vis_src = MOI.get(src, MOI.ListOfVariableIndices())
for vi in vis_src
value = MOI.get(src, MOI.VariablePrimalStart(), vi)
if value != nothing
x[idxmap[vi].value] = value
end
end
end
end
function processdualstart!(
y,
src::MOI.ModelLike,
idxmap,
rowranges::Dict{Int,UnitRange{Int}},
)
for (F, S) in MOI.get(src, MOI.ListOfConstraintTypesPresent())
has_dual_start = false
for attr in MOI.get(src, MOI.ListOfConstraintAttributesSet{F,S}())
if attr isa MOI.ConstraintDualStart
has_dual_start = true
end
end
if has_dual_start
cis_src = MOI.get(src, MOI.ListOfConstraintIndices{F,S}())
for ci in cis_src
rows = constraint_rows(rowranges, idxmap[ci])
dual = MOI.get(src, MOI.ConstraintDualStart(), ci)
if dual != nothing
for (i, row) in enumerate(rows)
y[row] = -dual[i] # opposite dual convention
end
end
end
end
end
end
## Standard optimizer attributes:
MOI.get(optimizer::Optimizer, ::MOI.ObjectiveSense) = optimizer.sense
function MOI.get(optimizer::Optimizer, a::MOI.NumberOfVariables)
return OSQP.dimensions(optimizer.inner)[1]
end
function MOI.get(optimizer::Optimizer, a::MOI.ListOfVariableIndices)
return [VI(i) for i in 1:MOI.get(optimizer, MOI.NumberOfVariables())] # TODO: support for UnitRange would be nice
end
## Solver-specific optimizer attributes:
module OSQPSettings
using MathOptInterface
using OSQP
export OSQPAttribute, isupdatable
abstract type OSQPAttribute <: MathOptInterface.AbstractOptimizerAttribute end
# TODO: just use ∈ on 0.7 (allocates on 0.6):
function _contains(haystack, needle)
for x in haystack
x == needle && return true
end
return false
end
for setting in fieldnames(OSQP.Settings)
Attribute =
Symbol(mapreduce(uppercasefirst, *, split(String(setting), '_'))) # to camelcase
@eval begin
export $Attribute
struct $Attribute <: OSQPAttribute end
Base.Symbol(::$Attribute) = $(QuoteNode(setting))
function isupdatable(::$Attribute)
return $(_contains(OSQP.UPDATABLE_SETTINGS, setting))
end
end
end
end # module
using .OSQPSettings
_symbol(param::MOI.RawOptimizerAttribute) = Symbol(param.name)
_symbol(a::OSQPAttribute) = Symbol(a)
function OSQPSettings.isupdatable(param::MOI.RawOptimizerAttribute)
return _contains(OSQP.UPDATABLE_SETTINGS, _symbol(param))
end
function MOI.set(
optimizer::Optimizer,
a::Union{OSQPAttribute,MOI.RawOptimizerAttribute},
value,
)
(isupdatable(a) || MOI.is_empty(optimizer)) ||
throw(MOI.SetAttributeNotAllowed(a))
setting = _symbol(a)
optimizer.settings[setting] = value
if !MOI.is_empty(optimizer)
OSQP.update_settings!(optimizer.inner; setting => value)
end
end
function MOI.get(
optimizer::Optimizer,
a::Union{OSQPAttribute,MOI.RawOptimizerAttribute},
)
return optimizer.settings[_symbol(a)]
end
## Optimizer methods:
function MOI.optimize!(optimizer::Optimizer)
processupdates!(optimizer.inner, optimizer.modcache)
processupdates!(optimizer.inner, optimizer.warmstartcache)
OSQP.solve!(optimizer.inner, optimizer.results)
optimizer.hasresults = true
# Copy previous solution into warm start cache without setting the dirty bit:
copyto!(optimizer.warmstartcache.x.data, optimizer.results.x)
copyto!(optimizer.warmstartcache.y.data, optimizer.results.y)
return nothing
end
## Optimizer attributes:
MOI.get(optimizer::Optimizer, ::MOI.RawSolver) = optimizer.inner
MOI.get(optimizer::Optimizer, ::MOI.ResultCount) = optimizer.hasresults ? 1 : 0
function MOI.supports(
::Optimizer,
::MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}},
)
return true
end
MOI.supports(::Optimizer, ::MOI.ObjectiveFunction{Quadratic}) = true
MOI.supports(::Optimizer, ::MOI.ObjectiveSense) = true
function MOI.set(
optimizer::Optimizer,
a::MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}},
obj::MOI.ScalarAffineFunction{Float64},
)
MOI.is_empty(optimizer) && throw(MOI.SetAttributeNotAllowed(a))
optimizer.modcache.P[:] = 0
processlinearterms!(optimizer.modcache.q, obj.terms)
optimizer.objconstant = MOI.constant(obj)
return nothing
end
function MOI.set(
optimizer::Optimizer,
a::MOI.ObjectiveFunction{Quadratic},
obj::Quadratic,
)
MOI.is_empty(optimizer) && throw(MOI.SetAttributeNotAllowed(a))
cache = optimizer.modcache
cache.P[:] = 0
for term in obj.quadratic_terms
row = term.variable_1.value
col = term.variable_2.value
coeff = term.coefficient
row > col && ((row, col) = (col, row)) # upper triangle only
if !(CartesianIndex(row, col) in cache.P.cartesian_indices_set)
throw(
MOI.SetAttributeNotAllowed(
a,
"This nonzero entry was not in the sparsity pattern of the objective function provided at `MOI.copy_to` and OSQP does not support changing the sparsity pattern.",
),
)
end
cache.P[row, col] += coeff
end
processlinearterms!(optimizer.modcache.q, obj.affine_terms)
optimizer.objconstant = MOI.constant(obj)
return nothing
end
function MOI.get(optimizer::Optimizer, a::MOI.ObjectiveValue)
MOI.check_result_index_bounds(optimizer, a)
rawobj = optimizer.results.info.obj_val + optimizer.objconstant
return ifelse(optimizer.sense == MOI.MAX_SENSE, -rawobj, rawobj)
end
error_not_solved() = error("Problem is unsolved.")
function check_has_results(optimizer::Optimizer)
if !hasresults(optimizer)
error_not_solved()
end
end
# Since these aren't explicitly returned by OSQP, I feel like it would be better to have a fallback method compute these:
function MOI.get(optimizer::Optimizer, ::MOI.SolveTimeSec)
check_has_results(optimizer)
return optimizer.results.info.run_time
end
function MOI.get(optimizer::Optimizer, ::MOI.RawStatusString)
return string(optimizer.results.info.status)
end
function MOI.get(optimizer::Optimizer, ::MOI.TerminationStatus)
hasresults(optimizer) || return MOI.OPTIMIZE_NOT_CALLED
osqpstatus = optimizer.results.info.status
if osqpstatus == :Unsolved
return MOI.OPTIMIZE_NOT_CALLED
elseif osqpstatus == :Interrupted
return MOI.INTERRUPTED
elseif osqpstatus == :Dual_infeasible
return MOI.DUAL_INFEASIBLE
elseif osqpstatus == :Primal_infeasible
return MOI.INFEASIBLE
elseif osqpstatus == :Max_iter_reached
return MOI.ITERATION_LIMIT
elseif osqpstatus == :Solved
return MOI.OPTIMAL
elseif osqpstatus == :Solved_inaccurate
return MOI.ALMOST_OPTIMAL
elseif osqpstatus == :Primal_infeasible_inaccurate
return MOI.ALMOST_INFEASIBLE
else
@assert osqpstatus == :Non_convex
return MOI.INVALID_MODEL
end
end
function MOI.get(optimizer::Optimizer, a::MOI.PrimalStatus)
if a.result_index > MOI.get(optimizer, MOI.ResultCount())
return MOI.NO_SOLUTION
end
osqpstatus = optimizer.results.info.status
if osqpstatus == :Unsolved
return MOI.NO_SOLUTION
elseif osqpstatus == :Primal_infeasible
# FIXME is it `NO_SOLUTION` (e.g. `NaN`s) or `INFEASIBLE_POINT` (e.g. current primal solution that we know is infeasible with the dual certificate)
return MOI.NO_SOLUTION
elseif osqpstatus == :Solved
return MOI.FEASIBLE_POINT
elseif osqpstatus == :Primal_infeasible_inaccurate
return MOI.UNKNOWN_RESULT_STATUS
elseif osqpstatus == :Dual_infeasible
return MOI.INFEASIBILITY_CERTIFICATE
else # :Interrupted, :Max_iter_reached, :Solved_inaccurate, :Non_convex (TODO: good idea? use OSQP.SOLUTION_PRESENT?)
return MOI.NO_SOLUTION
end
end
function MOI.get(optimizer::Optimizer, a::MOI.DualStatus)
if a.result_index > MOI.get(optimizer, MOI.ResultCount())
return MOI.NO_SOLUTION
end
osqpstatus = optimizer.results.info.status
if osqpstatus == :Unsolved
return MOI.NO_SOLUTION
elseif osqpstatus == :Dual_infeasible
# FIXME is it `NO_SOLUTION` (e.g. `NaN`s) or `INFEASIBLE_POINT` (e.g. current dual solution that we know is infeasible with the dual certificate)
return MOI.NO_SOLUTION
elseif osqpstatus == :Primal_infeasible
return MOI.INFEASIBILITY_CERTIFICATE
elseif osqpstatus == :Primal_infeasible_inaccurate
return MOI.NEARLY_INFEASIBILITY_CERTIFICATE
elseif osqpstatus == :Solved
return MOI.FEASIBLE_POINT
else # :Interrupted, :Max_iter_reached, :Solved_inaccurate, :Non_convex (TODO: good idea? use OSQP.SOLUTION_PRESENT?)
return MOI.NO_SOLUTION
end
end
## Variables:
function MOI.is_valid(optimizer::Optimizer, vi::VI)
return vi.value ∈ 1:MOI.get(optimizer, MOI.NumberOfVariables())
end
## Variable attributes:
function MOI.get(optimizer::Optimizer, a::MOI.VariablePrimal, vi::VI)
MOI.check_result_index_bounds(optimizer, a)
x = ifelse(
_contains(OSQP.SOLUTION_PRESENT, optimizer.results.info.status),
optimizer.results.x,
optimizer.results.dual_inf_cert,
)
return x[vi.value]
end
function MOI.set(
optimizer::Optimizer,
a::MOI.VariablePrimalStart,
vi::VI,
value,
)
MOI.is_empty(optimizer) && throw(MOI.SetAttributeNotAllowed(a))
return optimizer.warmstartcache.x[vi.value] = value
end
## Constraints:
function MOI.is_valid(optimizer::Optimizer, ci::CI)
MOI.is_empty(optimizer) && return false
return ci.value ∈ keys(optimizer.rowranges)
end
function MOI.set(
optimizer::Optimizer,
a::MOI.ConstraintDualStart,
ci::CI,
value,
)
MOI.is_empty(optimizer) && throw(MOI.SetAttributeNotAllowed(a))
rows = constraint_rows(optimizer, ci)
for (i, row) in enumerate(rows)
optimizer.warmstartcache.y[row] = -value[i] # opposite dual convention
end
return nothing
end
# function modification:
function MOI.set(
optimizer::Optimizer,
attr::MOI.ConstraintFunction,
ci::CI{Affine,<:IntervalConvertible},
f::Affine,
)
MOI.is_valid(optimizer, ci) || throw(MOI.InvalidIndex(ci))
row = constraint_rows(optimizer, ci)
optimizer.modcache.A[row, :] = 0
for term in f.terms
col = term.variable.value
coeff = term.coefficient
optimizer.modcache.A[row, col] += coeff
end
Δconstant = optimizer.constrconstant[row] - f.constant
optimizer.constrconstant[row] = f.constant
optimizer.modcache.l[row] += Δconstant
optimizer.modcache.u[row] += Δconstant
return nothing
end
function MOI.set(
optimizer::Optimizer,
attr::MOI.ConstraintFunction,
ci::CI{VectorAffine,<:SupportedVectorSets},
f::VectorAffine,
)
MOI.is_valid(optimizer, ci) || throw(MOI.InvalidIndex(ci))
rows = constraint_rows(optimizer, ci)
for row in rows
optimizer.modcache.A[row, :] = 0
end
for term in f.terms
row = rows[term.output_index]
col = term.scalar_term.variable.value
coeff = term.scalar_term.coefficient
optimizer.modcache.A[row, col] += coeff
end
for (i, row) in enumerate(rows)
Δconstant = optimizer.constrconstant[row] - f.constants[i]
optimizer.constrconstant[row] = f.constants[i]
optimizer.modcache.l[row] += Δconstant
optimizer.modcache.u[row] += Δconstant
end
end
# set modification:
function MOI.set(
optimizer::Optimizer,
attr::MOI.ConstraintSet,
ci::CI{Affine,S},
s::S,
) where {S<:IntervalConvertible}
MOI.is_valid(optimizer, ci) || throw(MOI.InvalidIndex(ci))
interval = S <: Interval ? s : MOI.Interval(s)
row = constraint_rows(optimizer, ci)
constant = optimizer.constrconstant[row]
optimizer.modcache.l[row] = interval.lower - constant
optimizer.modcache.u[row] = interval.upper - constant
return nothing
end
function MOI.set(
optimizer::Optimizer,
attr::MOI.ConstraintSet,
ci::CI{VectorAffine,S},
s::S,
) where {S<:SupportedVectorSets}
MOI.is_valid(optimizer, ci) || throw(MOI.InvalidIndex(ci))
rows = constraint_rows(optimizer, ci)
for (i, row) in enumerate(rows)
constant = optimizer.constrconstant[row]
optimizer.modcache.l[row] = lower(s, i) - constant
optimizer.modcache.u[row] = upper(s, i) - constant
end
return nothing
end
# partial function modification:
function MOI.modify(
optimizer::Optimizer,
ci::CI{Affine,<:IntervalConvertible},
change::MOI.ScalarCoefficientChange,
)
MOI.is_valid(optimizer, ci) || throw(MOI.InvalidIndex(ci))
row = constraint_rows(optimizer, ci)
optimizer.modcache.A[row, change.variable.value] = change.new_coefficient
return nothing
end
# TODO: MultirowChange?
function MOI.supports_constraint(
optimizer::Optimizer,
::Type{Affine},
::Type{<:IntervalConvertible},
)
return true
end
function MOI.supports_constraint(
optimizer::Optimizer,
::Type{VectorAffine},
::Type{<:SupportedVectorSets},
)
return true
end
## Constraint attributes:
function MOI.get(optimizer::Optimizer, a::MOI.ConstraintDual, ci::CI)
MOI.check_result_index_bounds(optimizer, a)
y = ifelse(
_contains(OSQP.SOLUTION_PRESENT, optimizer.results.info.status),
optimizer.results.y,
optimizer.results.prim_inf_cert,
)
rows = constraint_rows(optimizer, ci)
return -y[rows]
end
# Objective modification
function MOI.modify(
optimizer::Optimizer,
attr::MOI.ObjectiveFunction,
change::MOI.ScalarConstantChange,
)
MOI.is_empty(optimizer) && throw(MOI.ModifyObjectiveNotAllowed(change))
constant = change.new_constant
if optimizer.sense == MOI.MAX_SENSE
constant = -constant
end
return optimizer.objconstant = constant
end
function MOI.modify(
optimizer::Optimizer,
attr::MOI.ObjectiveFunction,
change::MOI.ScalarCoefficientChange,
)
MOI.is_empty(optimizer) && throw(MOI.ModifyObjectiveNotAllowed(change))
coef = change.new_coefficient
if optimizer.sense == MOI.MAX_SENSE
coef = -coef
end
return optimizer.modcache.q[change.variable.value] = coef
end
# There is currently no ScalarQuadraticCoefficientChange.
MOIU.@model(
OSQPModel, # modelname
(), # scalarsets
(MOI.Interval, MOI.LessThan, MOI.GreaterThan, MOI.EqualTo), # typedscalarsets
(MOI.Zeros, MOI.Nonnegatives, MOI.Nonpositives), # vectorsets
(), # typedvectorsets
(), # scalarfunctions
(MOI.ScalarAffineFunction, MOI.ScalarQuadraticFunction), # typedscalarfunctions
(), # vectorfunctions
(MOI.VectorAffineFunction,) # typedvectorfunctions
)
end # module
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 259 | module OSQP
export OSQPMathProgBaseInterface
using SparseArrays
using LinearAlgebra
using OSQP_jll
include("constants.jl")
include("types.jl")
include("interface.jl")
include("MOI_wrapper.jl")
const Optimizer = MathOptInterfaceOSQP.Optimizer
end # module
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 990 | const QDLDL_SOLVER = 0
const MKL_PARDISO_SOLVER = 1
# Define OSQP infinity constants
const OSQP_INFTY = 1e30
# OSQP return values
# https://github.com/oxfordcontrol/osqp/blob/master/include/constants.h
const status_map = Dict{Int,Symbol}(
4 => :Dual_infeasible_inaccurate,
3 => :Primal_infeasible_inaccurate,
2 => :Solved_inaccurate,
1 => :Solved,
-2 => :Max_iter_reached,
-3 => :Primal_infeasible,
-4 => :Dual_infeasible,
-5 => :Interrupted,
-6 => :Time_limit_reached,
-7 => :Non_convex,
-10 => :Unsolved,
)
const SOLUTION_PRESENT = [:Solved_inaccurate, :Solved, :Max_iter_reached]
# UPDATABLE_DATA
const UPDATABLE_DATA = [:q, :l, :u, :Px, :Px_idx, :Ax, :Ax_idx]
# UPDATABLE_SETTINGS
const UPDATABLE_SETTINGS = [
:max_iter,
:eps_abs,
:eps_rel,
:eps_prim_inf,
:eps_dual_inf,
:time_limit,
:rho,
:alpha,
:delta,
:polish,
:polish_refine_iter,
:verbose,
:check_termination,
:warm_start,
]
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 20378 | # Wrapper for the low level functions defined in https://github.com/oxfordcontrol/osqp/blob/master/include/osqp.h
# Ensure compatibility between Julia versions with @gc_preserve
@static if isdefined(Base, :GC)
import Base.GC: @preserve
else
macro preserve(args...)
body = args[end]
return esc(body)
end
end
"""
Model()
Initialize OSQP model
"""
mutable struct Model
workspace::Ptr{OSQP.Workspace}
lcache::Vector{Float64} # to facilitate converting l to use OSQP_INFTY
ucache::Vector{Float64} # to facilitate converting u to use OSQP_INFTY
isempty::Bool# a flag to keep track of the model's setup status
function Model()
model = new(C_NULL, Float64[], Float64[], true)
finalizer(OSQP.clean!, model)
return model
end
end
"""
setup!(model, P, q, A, l, u, settings)
Perform OSQP solver setup of model `model`, using the inputs `P`, `q`, `A`, `l`, `u`.
"""
function setup!(
model::OSQP.Model;
P::Union{SparseMatrixCSC,Nothing} = nothing,
q::Union{Vector{Float64},Nothing} = nothing,
A::Union{SparseMatrixCSC,Nothing} = nothing,
l::Union{Vector{Float64},Nothing} = nothing,
u::Union{Vector{Float64},Nothing} = nothing,
settings...,
)
# Check problem dimensions
if P === nothing
if q !== nothing
n = length(q)
elseif A !== nothing
n = size(A, 2)
else
error("The problem does not have any variables!")
end
else
n = size(P, 1)
end
if A === nothing
m = 0
else
m = size(A, 1)
end
# Check if parameters are nothing
if ((A === nothing) & ((l !== nothing) | (u !== nothing))) |
((A !== nothing) & ((l === nothing) & (u === nothing)))
error("A must be supplied together with l and u")
end
if (A !== nothing) & (l === nothing)
l = -Inf * ones(m)
end
if (A !== nothing) & (u === nothing)
u = Inf * ones(m)
end
if P === nothing
P = sparse([], [], [], n, n)
end
if q === nothing
q = zeros(n)
end
if A === nothing
A = sparse([], [], [], m, n)
l = zeros(m)
u = zeros(m)
end
# Check if dimensions are correct
if length(q) != n
error("Incorrect dimension of q")
end
if length(l) != m
error("Incorrect dimensions of l")
end
if length(u) != m
error("Incorrect dimensions of u")
end
# Constructing upper triangular from P
if !istriu(P)
P = triu(P)
end
# Convert lower and upper bounds from Julia infinity to OSQP infinity
u = min.(u, OSQP_INFTY)
l = max.(l, -OSQP_INFTY)
# Resize caches
resize!(model.lcache, m)
resize!(model.ucache, m)
# Create managed matrices to avoid segfaults (See SCS.jl)
managedP = OSQP.ManagedCcsc(P)
managedA = OSQP.ManagedCcsc(A)
# Get managed pointers (Ref) Pdata and Adata
Pdata = Ref(OSQP.Ccsc(managedP))
Adata = Ref(OSQP.Ccsc(managedA))
# Create OSQP settings
settings_dict = Dict{Symbol,Any}()
if !isempty(settings)
for (key, value) in settings
settings_dict[key] = value
end
end
stgs = OSQP.Settings(settings_dict)
@preserve managedP Pdata managedA Adata q l u begin
# Create OSQP data using the managed matrices pointers
data = OSQP.Data(
n,
m,
Base.unsafe_convert(Ptr{OSQP.Ccsc}, Pdata),
Base.unsafe_convert(Ptr{OSQP.Ccsc}, Adata),
pointer(q),
pointer(l),
pointer(u),
)
# Perform setup
workspace = Ref{Ptr{OSQP.Workspace}}()
exitflag = ccall(
(:osqp_setup, OSQP.osqp),
Cc_int,
(Ptr{Ptr{OSQP.Workspace}}, Ptr{OSQP.Data}, Ptr{OSQP.Settings}),
workspace,
Ref(data),
Ref(stgs),
)
model.workspace = workspace[]
end
if exitflag != 0
error("Error in OSQP setup")
end
return model.isempty = false
end
function solve!(model::OSQP.Model, results::Results = Results())
model.isempty && throw(
ErrorException(
"You are trying to solve an empty model. Please setup the model before calling solve!().",
),
)
ccall(
(:osqp_solve, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace},),
model.workspace,
)
workspace = unsafe_load(model.workspace)
info = results.info
copyto!(info, unsafe_load(workspace.info))
solution = unsafe_load(workspace.solution)
data = unsafe_load(workspace.data)
n = data.n
m = data.m
resize!(results, n, m)
has_solution = false
for status in SOLUTION_PRESENT
info.status == status && (has_solution = true; break)
end
if has_solution
# If solution exists, copy it
unsafe_copyto!(pointer(results.x), solution.x, n)
unsafe_copyto!(pointer(results.y), solution.y, m)
fill!(results.prim_inf_cert, NaN)
fill!(results.dual_inf_cert, NaN)
else
# else fill with NaN and return certificates of infeasibility
fill!(results.x, NaN)
fill!(results.y, NaN)
if info.status == :Primal_infeasible ||
info.status == :Primal_infeasible_inaccurate
unsafe_copyto!(pointer(results.prim_inf_cert), workspace.delta_y, m)
fill!(results.dual_inf_cert, NaN)
elseif info.status == :Dual_infeasible ||
info.status == :Dual_infeasible_inaccurate
fill!(results.prim_inf_cert, NaN)
unsafe_copyto!(pointer(results.dual_inf_cert), workspace.delta_x, n)
else
fill!(results.prim_inf_cert, NaN)
fill!(results.dual_inf_cert, NaN)
end
end
if info.status == :Non_convex
info.obj_val = NaN
end
return results
end
function version()
return unsafe_string(ccall((:osqp_version, OSQP.osqp), Cstring, ()))
end
function clean!(model::OSQP.Model)
exitflag = ccall(
(:osqp_cleanup, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace},),
model.workspace,
)
if exitflag != 0
error("Error in OSQP cleanup")
end
end
function update_q!(model::OSQP.Model, q::Vector{Float64})
(n, m) = OSQP.dimensions(model)
if length(q) != n
error("q must have length n = $(n)")
end
exitflag = ccall(
(:osqp_update_lin_cost, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}),
model.workspace,
q,
)
if exitflag != 0
error("Error updating q")
end
end
function update_l!(model::OSQP.Model, l::Vector{Float64})
(n, m) = OSQP.dimensions(model)
if length(l) != m
error("l must have length m = $(m)")
end
model.lcache .= max.(l, -OSQP_INFTY)
exitflag = ccall(
(:osqp_update_lower_bound, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}),
model.workspace,
model.lcache,
)
if exitflag != 0
error("Error updating l")
end
end
function update_u!(model::OSQP.Model, u::Vector{Float64})
(n, m) = OSQP.dimensions(model)
if length(u) != m
error("u must have length m = $(m)")
end
model.ucache .= min.(u, OSQP_INFTY)
exitflag = ccall(
(:osqp_update_upper_bound, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}),
model.workspace,
model.ucache,
)
if exitflag != 0
error("Error updating u")
end
end
function update_bounds!(
model::OSQP.Model,
l::Vector{Float64},
u::Vector{Float64},
)
(n, m) = OSQP.dimensions(model)
if length(l) != m
error("l must have length m = $(m)")
end
if length(u) != m
error("u must have length m = $(m)")
end
model.lcache .= max.(l, -OSQP_INFTY)
model.ucache .= min.(u, OSQP_INFTY)
exitflag = ccall(
(:osqp_update_bounds, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}, Ptr{Cdouble}),
model.workspace,
model.lcache,
model.ucache,
)
if exitflag != 0
error("Error updating bounds l and u")
end
end
prep_idx_vector_for_ccall(idx::Nothing, n::Int, namesym::Symbol) = C_NULL
function prep_idx_vector_for_ccall(idx::Vector{Int}, n::Int, namesym::Symbol)
if length(idx) != n
error("$(namesym) and $(namesym)_idx must have the same length")
end
idx .-= 1 # Shift indexing to match C
return idx
end
restore_idx_vector_after_ccall!(idx::Nothing) = nothing
function restore_idx_vector_after_ccall!(idx::Vector{Int})
idx .+= 1 # Unshift indexing
return nothing
end
function update_P!(
model::OSQP.Model,
Px::Vector{Float64},
Px_idx::Union{Vector{Int},Nothing},
)
Px_idx_prepped = prep_idx_vector_for_ccall(Px_idx, length(Px), :P)
exitflag = ccall(
(:osqp_update_P, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}, Ptr{Cc_int}, Cc_int),
model.workspace,
Px,
Px_idx_prepped,
length(Px),
)
restore_idx_vector_after_ccall!(Px_idx)
if exitflag != 0
error("Error updating P")
end
end
function update_A!(
model::OSQP.Model,
Ax::Vector{Float64},
Ax_idx::Union{Vector{Int},Nothing},
)
Ax_idx_prepped = prep_idx_vector_for_ccall(Ax_idx, length(Ax), :A)
exitflag = ccall(
(:osqp_update_A, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}, Ptr{Cc_int}, Cc_int),
model.workspace,
Ax,
Ax_idx_prepped,
length(Ax),
)
restore_idx_vector_after_ccall!(Ax_idx)
if exitflag != 0
error("Error updating A")
end
end
function update_P_A!(
model::OSQP.Model,
Px::Vector{Float64},
Px_idx::Union{Vector{Int},Nothing},
Ax::Vector{Float64},
Ax_idx::Union{Vector{Int},Nothing},
)
Px_idx_prepped = prep_idx_vector_for_ccall(Px_idx, length(Px), :P)
Ax_idx_prepped = prep_idx_vector_for_ccall(Ax_idx, length(Ax), :A)
exitflag = ccall(
(:osqp_update_P_A, OSQP.osqp),
Cc_int,
(
Ptr{OSQP.Workspace},
Ptr{Cdouble},
Ptr{Cc_int},
Cc_int,
Ptr{Cdouble},
Ptr{Cc_int},
Cc_int,
),
model.workspace,
Px,
Px_idx_prepped,
length(Px),
Ax,
Ax_idx_prepped,
length(Ax),
)
restore_idx_vector_after_ccall!(Ax_idx)
restore_idx_vector_after_ccall!(Px_idx)
if exitflag != 0
error("Error updating P and A")
end
end
function update!(
model::OSQP.Model;
q = nothing,
l = nothing,
u = nothing,
Px = nothing,
Px_idx = nothing,
Ax = nothing,
Ax_idx = nothing,
)
# q
if q !== nothing
update_q!(model, q)
end
# l and u
if l !== nothing && u !== nothing
update_bounds!(model, l, u)
elseif l !== nothing
update_l!(model, l)
elseif u !== nothing
update_u!(model, u)
end
# P and A
if Px !== nothing && Ax !== nothing
update_P_A!(model, Px, Px_idx, Ax, Ax_idx)
elseif Px !== nothing
update_P!(model, Px, Px_idx)
elseif Ax !== nothing
update_A!(model, Ax, Ax_idx)
end
end
function update_settings!(model::OSQP.Model; kwargs...)
if isempty(kwargs)
return
else
data = Dict{Symbol,Any}()
for (key, value) in kwargs
if !(key in UPDATABLE_SETTINGS)
error("$(key) cannot be updated or is not recognized")
else
data[key] = value
end
end
end
# Get arguments
max_iter = get(data, :max_iter, nothing)
eps_abs = get(data, :eps_abs, nothing)
eps_rel = get(data, :eps_rel, nothing)
eps_prim_inf = get(data, :eps_prim_inf, nothing)
eps_dual_inf = get(data, :eps_dual_inf, nothing)
rho = get(data, :rho, nothing)
alpha = get(data, :alpha, nothing)
delta = get(data, :delta, nothing)
polish = get(data, :polish, nothing)
polish_refine_iter = get(data, :polish_refine_iter, nothing)
verbose = get(data, :verbose, nothing)
scaled_termination = get(data, :early_terminate, nothing)
check_termination = get(data, :check_termination, nothing)
warm_start = get(data, :warm_start, nothing)
time_limit = get(data, :time_limit, nothing)
# Update individual settings
if max_iter !== nothing
exitflag = ccall(
(:osqp_update_max_iter, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cc_int),
model.workspace,
max_iter,
)
if exitflag != 0
error("Error updating max_iter")
end
end
if eps_abs !== nothing
exitflag = ccall(
(:osqp_update_eps_abs, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
eps_abs,
)
if exitflag != 0
error("Error updating eps_abs")
end
end
if eps_rel !== nothing
exitflag = ccall(
(:osqp_update_eps_rel, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
eps_rel,
)
if exitflag != 0
error("Error updating eps_rel")
end
end
if eps_prim_inf !== nothing
exitflag = ccall(
(:osqp_update_eps_prim_inf, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
eps_prim_inf,
)
if exitflag != 0
error("Error updating eps_prim_inf")
end
end
if eps_dual_inf !== nothing
exitflag = ccall(
(:osqp_update_eps_dual_inf, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
eps_dual_inf,
)
if exitflag != 0
error("Error updating eps_dual_inf")
end
end
if rho !== nothing
exitflag = ccall(
(:osqp_update_rho, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
rho,
)
if exitflag != 0
error("Error updating rho")
end
end
if alpha !== nothing
exitflag = ccall(
(:osqp_update_alpha, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
alpha,
)
if exitflag != 0
error("Error updating alpha")
end
end
if delta !== nothing
exitflag = ccall(
(:osqp_update_delta, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
delta,
)
if exitflag != 0
error("Error updating delta")
end
end
if polish !== nothing
exitflag = ccall(
(:osqp_update_polish, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cc_int),
model.workspace,
polish,
)
if exitflag != 0
error("Error updating polish")
end
end
if polish_refine_iter !== nothing
exitflag = ccall(
(:osqp_update_polish_refine_iter, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cc_int),
model.workspace,
polish_refine_iter,
)
if exitflag != 0
error("Error updating polish_refine_iter")
end
end
if verbose !== nothing
exitflag = ccall(
(:osqp_update_verbose, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cc_int),
model.workspace,
verbose,
)
if exitflag != 0
error("Error updating verbose")
end
end
if scaled_termination !== nothing
exitflag = ccall(
(:osqp_update_scaled_termination, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cc_int),
model.workspace,
scaled_termination,
)
if exitflag != 0
error("Error updating scaled_termination")
end
end
if check_termination !== nothing
exitflag = ccall(
(:osqp_update_check_termination, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cc_int),
model.workspace,
check_termination,
)
if exitflag != 0
error("Error updating check_termination")
end
end
if warm_start !== nothing
exitflag = ccall(
(:osqp_update_warm_start, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cc_int),
model.workspace,
warm_start,
)
if exitflag != 0
error("Error updating warm_start")
end
end
if time_limit !== nothing
exitflag = ccall(
(:osqp_update_time_limit, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Cdouble),
model.workspace,
time_limit,
)
if exitflag != 0
error("Error updating time_limit")
end
end
return nothing
end
function warm_start_x!(model::OSQP.Model, x::Vector{Float64})
(n, m) = OSQP.dimensions(model)
length(x) == n || error("Wrong dimension for variable x")
exitflag = ccall(
(:osqp_warm_start_x, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}),
model.workspace,
x,
)
exitflag == 0 || error("Error in warm starting x")
return nothing
end
function warm_start_y!(model::OSQP.Model, y::Vector{Float64})
(n, m) = OSQP.dimensions(model)
length(y) == m || error("Wrong dimension for variable y")
exitflag = ccall(
(:osqp_warm_start_y, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}),
model.workspace,
y,
)
exitflag == 0 || error("Error in warm starting y")
return nothing
end
function warm_start_x_y!(
model::OSQP.Model,
x::Vector{Float64},
y::Vector{Float64},
)
(n, m) = OSQP.dimensions(model)
length(x) == n || error("Wrong dimension for variable x")
length(y) == m || error("Wrong dimension for variable y")
exitflag = ccall(
(:osqp_warm_start, OSQP.osqp),
Cc_int,
(Ptr{OSQP.Workspace}, Ptr{Cdouble}, Ptr{Cdouble}),
model.workspace,
x,
y,
)
exitflag == 0 || error("Error in warm starting x and y")
return nothing
end
function warm_start!(
model::OSQP.Model;
x::Union{Vector{Float64},Nothing} = nothing,
y::Union{Vector{Float64},Nothing} = nothing,
)
if x isa Vector{Float64} && y isa Vector{Float64}
warm_start_x_y!(model, x, y)
elseif x isa Vector{Float64}
warm_start_x!(model, x)
elseif y isa Vector{Float64}
warm_start_y!(model, y)
end
end
# Auxiliary low-level functions
"""
dimensions(model::OSQP.Model)
Obtain problem dimensions from OSQP model
"""
function dimensions(model::OSQP.Model)
if model.workspace == C_NULL
error("Workspace has not been setup yet")
end
workspace = unsafe_load(model.workspace)
data = unsafe_load(workspace.data)
return data.n, data.m
end
function linsys_solver_str_to_int!(settings_dict::Dict{Symbol,Any})
# linsys_str = pop!(settings_dict, :linsys_solver)
linsys_str = get(settings_dict, :linsys_solver, nothing)
if linsys_str !== nothing
# Check type
if !isa(linsys_str, String)
error("linsys_solver is required to be a string")
end
# Convert to lower case
linsys_str = lowercase(linsys_str)
if linsys_str == "qdldl"
settings_dict[:linsys_solver] = QDLDL_SOLVER
elseif linsys_str == "mkl pardiso"
settings_dict[:linsys_solver] = MKL_PARDISO_SOLVER
elseif linsys_str == ""
settings_dict[:linsys_solver] = QDLDL_SOLVER
else
@warn("Linear system solver not recognized. Using default QDLDL")
settings_dict[:linsys_solver] = QDLDL_SOLVER
end
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 6121 | module ModificationCaches
using LinearAlgebra
using SparseArrays
export VectorModificationCache,
MatrixModificationCache,
ProblemModificationCache,
WarmStartCache,
processupdates!
import OSQP
mutable struct VectorModificationCache{T}
data::Vector{T}
dirty::Bool
function VectorModificationCache(data::Vector{T}) where {T}
return new{T}(copy(data), false)
end
end
function Base.setindex!(cache::VectorModificationCache, x, i::Integer)
return (cache.dirty = true; cache.data[i] = x)
end
function Base.setindex!(cache::VectorModificationCache, x, ::Colon)
return (cache.dirty = true; cache.data .= x)
end
Base.getindex(cache::VectorModificationCache, i::Integer) = cache.data[i]
function processupdates!(
model,
cache::VectorModificationCache,
updatefun::Function,
)
if cache.dirty
updatefun(model, cache.data)
cache.dirty = false
end
end
struct MatrixModificationCache{T}
cartesian_indices::Vector{CartesianIndex{2}}
cartesian_indices_set::Set{CartesianIndex{2}} # to speed up checking whether indices are out of bounds in setindex!
cartesian_indices_per_row::Dict{Int,Vector{CartesianIndex{2}}}
modifications::Dict{CartesianIndex{2},T}
vals::Vector{T}
inds::Vector{Int}
function MatrixModificationCache(S::SparseMatrixCSC{T}) where {T}
cartesian_indices = Vector{CartesianIndex{2}}(undef, nnz(S))
cartesian_indices_per_row = Dict{Int,Vector{CartesianIndex{2}}}()
sizehint!(cartesian_indices_per_row, nnz(S))
@inbounds for col in 1:S.n, k in S.colptr[col]:(S.colptr[col+1]-1) # from sparse findn
row = S.rowval[k]
I = CartesianIndex(row, col)
cartesian_indices[k] = I
push!(
get!(() -> CartesianIndex{2}[], cartesian_indices_per_row, row),
I,
)
end
modifications = Dict{CartesianIndex{2},Int}()
sizehint!(modifications, nnz(S))
return new{T}(
cartesian_indices,
Set(cartesian_indices),
cartesian_indices_per_row,
modifications,
T[],
Int[],
)
end
end
function Base.setindex!(
cache::MatrixModificationCache,
x,
row::Integer,
col::Integer,
)
I = CartesianIndex(row, col)
@boundscheck I ∈ cache.cartesian_indices_set || throw(
ArgumentError("Changing the sparsity pattern is not allowed."),
)
return cache.modifications[I] = x
end
function Base.setindex!(cache::MatrixModificationCache, x::Real, ::Colon)
# used to zero out the entire matrix
@boundscheck x == 0 || throw(
ArgumentError("Changing the sparsity pattern is not allowed."),
)
for I in cache.cartesian_indices
cache.modifications[I] = 0
end
end
function Base.setindex!(
cache::MatrixModificationCache,
x::Real,
row::Integer,
::Colon,
)
# used to zero out a row
@boundscheck x == 0 || throw(
ArgumentError("Changing the sparsity pattern is not allowed."),
)
for I in cache.cartesian_indices_per_row[row]
cache.modifications[I] = 0
end
end
function Base.getindex(
cache::MatrixModificationCache,
row::Integer,
col::Integer,
)
return cache.modifications[CartesianIndex(row, col)]
end
function processupdates!(
model,
cache::MatrixModificationCache,
updatefun::Function,
)
dirty = !isempty(cache.modifications)
if dirty
nmods = length(cache.modifications)
resize!(cache.vals, nmods)
resize!(cache.inds, nmods)
count = 1
for i in eachindex(cache.cartesian_indices)
I = cache.cartesian_indices[i]
if haskey(cache.modifications, I)
cache.vals[count] = cache.modifications[I]
cache.inds[count] = i
count += 1
end
end
updatefun(model, cache.vals, cache.inds)
empty!(cache.modifications)
end
end
# More OSQP-specific from here on:
struct ProblemModificationCache{T}
P::MatrixModificationCache{T}
q::VectorModificationCache{T}
A::MatrixModificationCache{T}
l::VectorModificationCache{T}
u::VectorModificationCache{T}
ProblemModificationCache{T}() where {T} = new{T}()
function ProblemModificationCache(
P::SparseMatrixCSC,
q::Vector{T},
A::SparseMatrixCSC,
l::Vector{T},
u::Vector{T},
) where {T}
MC = MatrixModificationCache
VC = VectorModificationCache
return new{T}(MC(triu(P)), VC(q), MC(A), VC(l), VC(u))
end
end
function processupdates!(model::OSQP.Model, cache::ProblemModificationCache)
if cache.l.dirty && cache.u.dirty
# Special case because setting just l or u may cause an 'upper bound must be greater than or equal to lower bound' error
OSQP.update_bounds!(model, cache.l.data, cache.u.data)
cache.l.dirty = false
cache.u.dirty = false
end
processupdates!(model, cache.P, OSQP.update_P!)
processupdates!(model, cache.q, OSQP.update_q!)
processupdates!(model, cache.A, OSQP.update_A!)
processupdates!(model, cache.l, OSQP.update_l!)
return processupdates!(model, cache.u, OSQP.update_u!)
end
struct WarmStartCache{T}
x::VectorModificationCache{T}
y::VectorModificationCache{T}
WarmStartCache{T}() where {T} = new{T}()
function WarmStartCache{T}(n::Integer, m::Integer) where {T}
return new{T}(
VectorModificationCache(zeros(T, n)),
VectorModificationCache(zeros(T, m)),
)
end
end
function processupdates!(model::OSQP.Model, cache::WarmStartCache)
if cache.x.dirty && cache.y.dirty
# Special case because setting warm start for x only zeroes the stored warm start for y and vice versa.
OSQP.warm_start_x_y!(model, cache.x.data, cache.y.data)
cache.x.dirty = false
cache.y.dirty = false
end
processupdates!(model, cache.x, OSQP.warm_start_x!)
return processupdates!(model, cache.y, OSQP.warm_start_y!)
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 6428 | # Types defined in types.h
# https://github.com/oxfordcontrol/osqp/blob/master/include/types.h
# Integer type from C
if Sys.WORD_SIZE == 64 # 64bit system
const Cc_int = Clonglong
else # 32bit system
const Cc_int = Cint
end
struct Ccsc
nzmax::Cc_int
m::Cc_int
n::Cc_int
p::Ptr{Cc_int}
i::Ptr{Cc_int}
x::Ptr{Cdouble}
nz::Cc_int
end
struct ManagedCcsc
nzmax::Cc_int
m::Cc_int
n::Cc_int
p::Vector{Cc_int}
i::Vector{Cc_int}
x::Vector{Cdouble}
nz::Cc_int
end
# Construct ManagedCcsc matrix from SparseMatrixCSC
function ManagedCcsc(M::SparseMatrixCSC)
# Get dimensions
m = M.m
n = M.n
# Get vectors of data, rows indices and column pointers
x = convert(Array{Float64,1}, M.nzval)
# C is 0 indexed
i = convert(Array{Cc_int,1}, M.rowval .- 1)
# C is 0 indexed
p = convert(Array{Cc_int,1}, M.colptr .- 1)
# Create new ManagedCcsc matrix
return ManagedCcsc(length(M.nzval), m, n, p, i, x, -1)
end
function Base.convert(::Type{SparseMatrixCSC}, c::OSQP.Ccsc)
m = c.m
n = c.n
nzmax = c.nzmax
nzval = [unsafe_load(c.x, i) for i in 1:nzmax]
rowval = [unsafe_load(c.i, i) for i in 1:nzmax] .+ 1
colptr = [unsafe_load(c.p, i) for i in 1:(n+1)] .+ 1
return SparseMatrixCSC(m, n, colptr, rowval, nzval)
end
# Returns an Ccsc matrix. The vectors are *not* GC tracked in the struct.
# Use this only when you know that the managed matrix will outlive the Ccsc
# matrix.
function Ccsc(m::ManagedCcsc)
return Ccsc(
m.nzmax,
m.m,
m.n,
pointer(m.p),
pointer(m.i),
pointer(m.x),
m.nz,
)
end
struct Solution
x::Ptr{Cdouble}
y::Ptr{Cdouble}
end
# Internal C type for info
# N.B. This is not the one returned to the user!
struct CInfo
iter::Cc_int
# We need to allocate 32 bytes for a character string, so we allocate 256 bits
# of integer instead
# TODO: Find a better way to do this
status::NTuple{32,Cchar}
status_val::Cc_int
status_polish::Cc_int
obj_val::Cdouble
pri_res::Cdouble
dua_res::Cdouble
setup_time::Cdouble
solve_time::Cdouble
update_time::Cdouble
polish_time::Cdouble
run_time::Cdouble
rho_updates::Cc_int
rho_estimate::Cdouble
end
struct Data
n::Cc_int
m::Cc_int
P::Ptr{Ccsc}
A::Ptr{Ccsc}
q::Ptr{Cdouble}
l::Ptr{Cdouble}
u::Ptr{Cdouble}
end
struct Settings
rho::Cdouble
sigma::Cdouble
scaling::Cc_int
adaptive_rho::Cc_int
adaptive_rho_interval::Cc_int
adaptive_rho_tolerance::Cdouble
adaptive_rho_fraction::Cdouble
max_iter::Cc_int
eps_abs::Cdouble
eps_rel::Cdouble
eps_prim_inf::Cdouble
eps_dual_inf::Cdouble
alpha::Cdouble
linsys_solver::Cint # Enum type
delta::Cdouble
polish::Cc_int
polish_refine_iter::Cc_int
verbose::Cc_int
scaled_termination::Cc_int
check_termination::Cc_int
warm_start::Cc_int
time_limit::Cdouble
end
function Settings()
s = Ref{OSQP.Settings}()
ccall(
(:osqp_set_default_settings, OSQP.osqp),
Nothing,
(Ref{OSQP.Settings},),
s,
)
return s[]
end
function Settings(settings_dict::Dict{Symbol,Any})
# function Settings(settings::Base.Iterators.IndexValue)
# function Settings(settings::Array{Any, 1})
default_settings = OSQP.Settings()
# Convert linsys_solver string to number
linsys_solver_str_to_int!(settings_dict)
# Get list with elements of default and user settings
# If setting is in the passed settings (settings_dict),
# then convert type to the right type. Otherwise just take
# the default one
settings_list = [
setting in keys(settings_dict) ?
convert(
fieldtype(typeof(default_settings), setting),
settings_dict[setting],
) : getfield(default_settings, setting) for
setting in fieldnames(typeof(default_settings))
]
# Create new settings with new dictionary
s = OSQP.Settings(settings_list...)
return s
end
struct Workspace
data::Ptr{OSQP.Data}
linsys_solver::Ptr{Nothing}
pol::Ptr{Nothing}
rho_vec::Ptr{Cdouble}
rho_inv_vec::Ptr{Cdouble}
constr_type::Ptr{Cc_int}
# Iterates
x::Ptr{Cdouble}
y::Ptr{Cdouble}
z::Ptr{Cdouble}
xz_tilde::Ptr{Cdouble}
x_prev::Ptr{Cdouble}
z_prev::Ptr{Cdouble}
# Primal and dual residuals
Ax::Ptr{Cdouble}
Px::Ptr{Cdouble}
Aty::Ptr{Cdouble}
# Primal infeasibility
delta_y::Ptr{Cdouble}
Atdelta_y::Ptr{Cdouble}
# Dual infeasibility
delta_x::Ptr{Cdouble}
Pdelta_x::Ptr{Cdouble}
Adelta_x::Ptr{Cdouble}
# Scaling
D_temp::Ptr{Cdouble}
D_temp_A::Ptr{Cdouble}
E_temp::Ptr{Cdouble}
settings::Ptr{OSQP.Settings}
scaling::Ptr{Nothing}
solution::Ptr{OSQP.Solution}
info::Ptr{OSQP.CInfo}
timer::Ptr{Nothing}
first_run::Cc_int
summary_printed::Cc_int
end
mutable struct Info
iter::Int64
status::Symbol
status_val::Int64
status_polish::Int64
obj_val::Float64
pri_res::Float64
dua_res::Float64
setup_time::Float64
solve_time::Float64
update_time::Float64
polish_time::Float64
run_time::Float64
rho_updates::Int64
rho_estimate::Float64
Info() = new()
end
function copyto!(info::Info, cinfo::CInfo)
info.iter = cinfo.iter
info.status = OSQP.status_map[cinfo.status_val]
info.status_val = cinfo.status_val
info.status_polish = cinfo.status_polish
info.obj_val = cinfo.obj_val
info.pri_res = cinfo.pri_res
info.dua_res = cinfo.dua_res
info.setup_time = cinfo.setup_time
info.solve_time = cinfo.solve_time
info.update_time = cinfo.update_time
info.polish_time = cinfo.polish_time
info.run_time = cinfo.run_time
info.rho_updates = cinfo.rho_updates
info.rho_estimate = cinfo.rho_estimate
return info
end
mutable struct Results
x::Vector{Float64}
y::Vector{Float64}
info::OSQP.Info
prim_inf_cert::Vector{Float64}
dual_inf_cert::Vector{Float64}
end
Results() = Results(Float64[], Float64[], Info(), Float64[], Float64[])
function Base.resize!(results::Results, n::Int, m::Int)
resize!(results.x, n)
resize!(results.y, m)
resize!(results.prim_inf_cert, m)
resize!(results.dual_inf_cert, n)
return results
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 26092 | module TestOSQP
using Test
using LinearAlgebra
using Random
using SparseArrays
using MathOptInterface
const MOI = MathOptInterface
const MOIU = MOI.Utilities
using OSQP
using OSQP.MathOptInterfaceOSQP
const Affine = MOI.ScalarAffineFunction{Float64}
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
const config = MOI.Test.Config(
atol = 1e-4,
rtol = 1e-4,
exclude = Any[
MOI.ConstraintBasisStatus,
MOI.VariableBasisStatus,
MOI.ConstraintName,
MOI.VariableName,
MOI.ObjectiveBound,
MOI.DualObjectiveValue,
],
)
function defaultoptimizer()
optimizer = OSQP.Optimizer()
MOI.set(optimizer, MOI.Silent(), true)
MOI.set(optimizer, OSQPSettings.EpsAbs(), 1e-8)
MOI.set(optimizer, OSQPSettings.EpsRel(), 1e-16)
MOI.set(optimizer, OSQPSettings.MaxIter(), 10000)
MOI.set(optimizer, OSQPSettings.AdaptiveRhoInterval(), 25) # required for deterministic behavior
return optimizer
end
function bridged_optimizer()
optimizer = defaultoptimizer()
cached = MOIU.CachingOptimizer(
MOIU.UniversalFallback(OSQPModel{Float64}()),
optimizer,
)
return MOI.Bridges.full_bridge_optimizer(cached, Float64)
end
function test_runtests()
model = bridged_optimizer()
MOI.Test.runtests(
model,
config,
exclude = String[
"test_attribute_SolverVersion",
# Expected test failures:
# MathOptInterface.jl issue #1431
"test_model_LowerBoundAlreadySet",
"test_model_UpperBoundAlreadySet",
# FIXME
# See https://github.com/jump-dev/MathOptInterface.jl/issues/1773
"test_infeasible_MAX_SENSE",
"test_infeasible_MAX_SENSE_offset",
"test_infeasible_MIN_SENSE",
"test_infeasible_MIN_SENSE_offset",
"test_infeasible_affine_MAX_SENSE",
"test_infeasible_affine_MAX_SENSE_offset",
"test_infeasible_affine_MIN_SENSE",
"test_infeasible_affine_MIN_SENSE_offset",
# FIXME
# See https://github.com/jump-dev/MathOptInterface.jl/issues/1759
"test_unbounded_MAX_SENSE",
"test_unbounded_MAX_SENSE_offset",
"test_unbounded_MIN_SENSE",
"test_unbounded_MIN_SENSE_offset",
# FIXME
"test_model_copy_to_UnsupportedAttribute",
# Segfault
"test_model_copy_to_UnsupportedConstraint",
],
)
return
end
function test_ProblemModificationCache()
rng = MersenneTwister(1234)
n = 15
m = 10
q = randn(rng, n)
P = (X = sprandn(rng, n, n, 0.1); X' * X)
P += eps() * I # needed for a test later on
l = -rand(rng, m)
u = rand(rng, m)
A = sprandn(rng, m, n, 0.6)
modcache = MathOptInterfaceOSQP.ProblemModificationCache(P, q, A, l, u)
model = OSQP.Model()
OSQP.setup!(
model;
P = P,
q = q,
A = A,
l = l,
u = u,
verbose = false,
eps_abs = 1e-8,
eps_rel = 1e-16,
)
baseresults = OSQP.solve!(model)
@test baseresults.info.status == :Solved
# Modify q, ensure that updating results in the same solution as calling setup! with the modified q
@test !modcache.q.dirty
modcache.q[3] = 5.0
@test modcache.q.dirty
@test modcache.q.data[3] == 5.0
MathOptInterfaceOSQP.processupdates!(model, modcache)
@test !modcache.q.dirty
qmod_update_results = OSQP.solve!(model)
@test !isapprox(baseresults.x, qmod_update_results.x; atol = 1e-1) # ensure that new results are significantly different
model2 = OSQP.Model()
OSQP.setup!(
model2;
P = P,
q = modcache.q.data,
A = A,
l = l,
u = u,
verbose = false,
eps_abs = 1e-8,
eps_rel = 1e-16,
)
qmod_setup_results = OSQP.solve!(model2)
@test qmod_update_results.x ≈ qmod_setup_results.x atol = 1e-7
# Modify A, ensure that updating results in the same solution as calling setup! with the modified A and q
(rows, cols, _) = findnz(A)
Amodindex = rand(rng, 1:nnz(A))
row = rows[Amodindex]
col = cols[Amodindex]
val = randn(rng)
modcache.A[row, col] = val
MathOptInterfaceOSQP.processupdates!(model, modcache)
@test isempty(modcache.A.modifications)
Amod_update_results = OSQP.solve!(model)
@test !isapprox(baseresults.x, Amod_update_results.x; atol = 1e-1) # ensure that new results are significantly different
@test !isapprox(qmod_update_results.x, Amod_update_results.x; atol = 1e-1) # ensure that new results are significantly different
Amod = copy(A)
Amod[row, col] = val
model3 = OSQP.Model()
OSQP.setup!(
model3;
P = P,
q = modcache.q.data,
A = Amod,
l = l,
u = u,
verbose = false,
eps_abs = 1e-8,
eps_rel = 1e-16,
)
Amod_setup_results = OSQP.solve!(model3)
@test Amod_update_results.x ≈ Amod_setup_results.x atol = 1e-7
# MatrixModificationCache: colon indexing
modcache.P[:] = 0.0
for i in 1:n
modcache.P[i, i] = 1.0
end
MathOptInterfaceOSQP.processupdates!(model, modcache)
Pmod_update_results = OSQP.solve!(model)
model4 = OSQP.Model()
Pmod = sparse(1.0I, n, n)
OSQP.setup!(
model4;
P = Pmod,
q = modcache.q.data,
A = Amod,
l = l,
u = u,
verbose = false,
eps_abs = 1e-8,
eps_rel = 1e-16,
)
Pmod_setup_results = OSQP.solve!(model4)
@test Pmod_update_results.x ≈ Pmod_setup_results.x atol = 1e-7
# Modifying the sparsity pattern is not allowed
nzinds = map(CartesianIndex, zip(rows, cols))
zinds = zinds = setdiff(vec(CartesianIndices(A)), nzinds)
for zind in zinds
@test_throws ArgumentError modcache.A[zind[1], zind[2]] = randn(rng)
end
@test_throws ArgumentError modcache.A[:] = 1
end
function _test_optimizer_modification(
modfun::Base.Callable,
model::MOI.ModelLike,
optimizer::T,
idxmap::MOIU.IndexMap,
cleanoptimizer::T,
config::MOI.Test.Config,
) where {T<:MOI.AbstractOptimizer}
# apply modfun to both the model and the optimizer
modfun(model)
modfun(optimizer)
# copy model into clean optimizer
cleanidxmap = MOI.copy_to(cleanoptimizer, model)
@test cleanidxmap.var_map == idxmap.var_map
@test cleanidxmap.con_map == idxmap.con_map
# call optimize! on both optimizers
MOI.optimize!(optimizer)
MOI.optimize!(cleanoptimizer)
# compare results
atol = config.atol
rtol = config.rtol
@test MOI.get(optimizer, MOI.TerminationStatus()) ==
MOI.get(cleanoptimizer, MOI.TerminationStatus())
@test MOI.get(optimizer, MOI.PrimalStatus()) ==
MOI.get(cleanoptimizer, MOI.PrimalStatus())
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈
MOI.get(cleanoptimizer, MOI.ObjectiveValue()) atol = atol rtol = rtol
if MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
modelvars = MOI.get(model, MOI.ListOfVariableIndices())
for v_model in modelvars
v_optimizer = idxmap[v_model]
@test MOI.get(optimizer, MOI.VariablePrimal(), v_optimizer) ≈
MOI.get(cleanoptimizer, MOI.VariablePrimal(), v_optimizer) atol =
atol rtol = rtol
end
end
@test MOI.get(optimizer, MOI.DualStatus()) ==
MOI.get(cleanoptimizer, MOI.DualStatus())
if MOI.get(optimizer, MOI.DualStatus()) == MOI.FEASIBLE_POINT
for (F, S) in MOI.get(model, MOI.ListOfConstraintTypesPresent())
cis_model = MOI.get(model, MOI.ListOfConstraintIndices{F,S}())
for ci_model in cis_model
ci_optimizer = idxmap[ci_model]
@test MOI.get(optimizer, MOI.ConstraintDual(), ci_optimizer) ≈
MOI.get(
cleanoptimizer,
MOI.ConstraintDual(),
ci_optimizer,
) atol = atol rtol = rtol
end
end
end
end
function zero_warm_start!(optimizer::MOI.ModelLike, vars, cons)
for vi in vars
MOI.set(optimizer, MOI.VariablePrimalStart(), vi, 0.0)
end
for ci in cons
MOI.set(optimizer, MOI.ConstraintDualStart(), ci, -0.0)
end
end
term(c, x::MOI.VariableIndex) = MOI.ScalarAffineTerm(c, x)
function term(c, x::MOI.VariableIndex, y::MOI.VariableIndex)
return MOI.ScalarQuadraticTerm(c, x, y)
end
function test_no_CachingOptimizer_problem_modification_after_copy_to()
# Initial setup: modified version of MOI.Test.linear1test
# min -x
# st x + y <= 1 (x + y - 1 ∈ Nonpositives)
# x, y >= 0 (x, y ∈ Nonnegatives)
model = OSQPModel{Float64}()
MOI.empty!(model)
v = MOI.add_variables(model, 2)
x, y = v
cf = MOI.ScalarAffineFunction(
[term.([0.0, 0.0], v); term.([1.0, 1.0], v); term.([0.0, 0.0], v)],
0.0,
)
c = MOI.add_constraint(model, cf, MOI.Interval(-Inf, 1.0))
vc1 = MOI.add_constraint(model, 1.0v[1], MOI.Interval(0.0, Inf))
vc2 = MOI.add_constraint(model, 1.0v[2], MOI.Interval(0.0, Inf))
objf = MOI.ScalarAffineFunction(
[term.([0.0, 0.0], v); term.([-1.0, 0.0], v); term.([0.0, 0.0], v)],
0.0,
)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
objf,
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
optimizer = defaultoptimizer()
idxmap = MOI.copy_to(optimizer, model)
@test MOI.get(optimizer, MOI.ObjectiveSense()) == MOI.MIN_SENSE
@test MOI.get(optimizer, MOI.NumberOfVariables()) == 2
@test MOI.get(optimizer, MOI.ListOfVariableIndices()) ==
[MOI.VariableIndex(1), MOI.VariableIndex(2)]
@test MOI.is_valid(optimizer, MOI.VariableIndex(2))
@test !MOI.is_valid(optimizer, MOI.VariableIndex(3))
MOI.optimize!(optimizer)
# ensure that unmodified model is correct
atol = config.atol
rtol = config.rtol
@test MOI.get(optimizer, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈ -1 atol = atol rtol = rtol
@test MOI.get(optimizer, MOI.VariablePrimal(), getindex.(Ref(idxmap), v)) ≈
[1, 0] atol = atol rtol = rtol
@test MOI.get(optimizer, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ConstraintDual(), idxmap[c]) ≈ -1 atol = atol rtol =
rtol
@test MOI.get(optimizer, MOI.ConstraintDual(), idxmap[vc1]) ≈ 0 atol = atol rtol =
rtol
@test MOI.get(optimizer, MOI.ConstraintDual(), idxmap[vc2]) ≈ 1 atol = atol rtol =
rtol
# test default warm start
itercold = optimizer.results.info.iter
MOI.optimize!(optimizer)
iterwarm = optimizer.results.info.iter
@test iterwarm < itercold
# test allocations
allocs = @allocated MOI.optimize!(optimizer)
@test allocs == 0
# ensure that solving a second time results in the same answer after zeroing warm start
zero_warm_start!(optimizer, values(idxmap.var_map), values(idxmap.con_map))
_test_optimizer_modification(
m -> (),
model,
optimizer,
idxmap,
defaultoptimizer(),
MOI.Test.Config(atol = 0.0, rtol = 0.0),
)
function mapfrommodel(
::MOI.AbstractOptimizer,
x::Union{MOI.VariableIndex,<:MOI.ConstraintIndex},
)
return idxmap[x]
end
function mapfrommodel(
::MOI.ModelLike,
x::Union{MOI.VariableIndex,<:MOI.ConstraintIndex},
)
return x
end
# change objective to min -2y
_test_optimizer_modification(
model,
optimizer,
idxmap,
defaultoptimizer(),
config,
) do m
newobjf =
MOI.ScalarAffineFunction([term(-2.0, mapfrommodel(m, y))], 0.0)
F = typeof(newobjf)
return MOI.set(m, MOI.ObjectiveFunction{F}(), newobjf)
end
# add a constant to the objective
objval_before = MOI.get(optimizer, MOI.ObjectiveValue())
objconstant = 1.5
_test_optimizer_modification(
model,
optimizer,
idxmap,
defaultoptimizer(),
config,
) do m
attr = MOI.ObjectiveFunction{Affine}()
return MOI.modify(m, attr, MOI.ScalarConstantChange(objconstant))
end
objval_after = MOI.get(optimizer, MOI.ObjectiveValue())
@test objval_after ≈ objval_before + objconstant atol = 1e-8
# change objective to min -y using ScalarCoefficientChange
_test_optimizer_modification(
model,
optimizer,
idxmap,
defaultoptimizer(),
config,
) do m
attr = MOI.ObjectiveFunction{Affine}()
return MOI.modify(
m,
attr,
MOI.ScalarCoefficientChange(mapfrommodel(m, y), -1.0),
)
end
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈
0.5 * objval_before + objconstant atol = 1e-8
# change x + y <= 1 to x + 2 y + 0.5 <= 1
_test_optimizer_modification(
model,
optimizer,
idxmap,
defaultoptimizer(),
config,
) do m
attr = MOI.ConstraintFunction()
ci = mapfrommodel(m, c)
return MOI.set(
m,
attr,
ci,
MOI.ScalarAffineFunction(
term.([1.0, 1.0, 1.0], mapfrommodel.(Ref(m), [x, x, y])),
0.5,
),
)
end
# change back to x + y <= 1 using ScalarCoefficientChange
_test_optimizer_modification(
model,
optimizer,
idxmap,
defaultoptimizer(),
config,
) do m
ci = mapfrommodel(m, c)
return MOI.modify(
m,
ci,
MOI.ScalarCoefficientChange(mapfrommodel(m, y), 1.0),
)
end
# flip the feasible set around from what it was originally and minimize +x
_test_optimizer_modification(
model,
optimizer,
idxmap,
defaultoptimizer(),
config,
) do m
# objective
newobjf = convert(MOI.ScalarAffineFunction{Float64}, mapfrommodel(m, x))
F = typeof(newobjf)
MOI.set(m, MOI.ObjectiveFunction{F}(), newobjf)
# c
attr = MOI.ConstraintFunction()
ci = mapfrommodel(m, c)
MOI.set(
m,
attr,
ci,
MOI.ScalarAffineFunction(
term.([1.0, 1.0], mapfrommodel.(Ref(m), [x, y])),
0.0,
),
)
attr = MOI.ConstraintSet()
MOI.set(m, attr, ci, MOI.Interval(-1.0, Inf))
# vc1
attr = MOI.ConstraintSet()
ci = mapfrommodel(m, vc1)
MOI.set(m, attr, ci, MOI.Interval(-Inf, 0.0))
# vc2
attr = MOI.ConstraintSet()
ci = mapfrommodel(m, vc2)
return MOI.set(m, attr, ci, MOI.Interval(-Inf, 0.0))
end
testflipped = function ()
@test MOI.get(optimizer, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈ -1 atol = atol rtol =
rtol
@test MOI.get(
optimizer,
MOI.VariablePrimal(),
getindex.(Ref(idxmap), v),
) ≈ [-1, 0] atol = atol rtol = rtol
@test MOI.get(optimizer, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ConstraintDual(), idxmap[c]) ≈ 1 atol =
atol rtol = rtol
@test MOI.get(optimizer, MOI.ConstraintDual(), idxmap[vc1]) ≈ 0 atol =
atol rtol = rtol
@test MOI.get(optimizer, MOI.ConstraintDual(), idxmap[vc2]) ≈ -1 atol =
atol rtol = rtol
end
testflipped()
# update settings
@test optimizer.results.info.status_polish == 0
MOI.set(optimizer, OSQPSettings.Polish(), true)
MOI.optimize!(optimizer)
@test optimizer.results.info.status_polish == 1
return testflipped()
end
function no_CachingOptimizer_Vector_problem_modification_after_copy_to()
# from basic.jl:
model = OSQPModel{Float64}()
x = MOI.add_variables(model, 2)
P11 = 11.0
q = [3.0, 4.0]
u = [0.0, 0.0, -15, 100, 80]
A = sparse(Float64[-1 0; 0 -1; -1 -3; 2 5; 3 4])
I, J, coeffs = findnz(A)
objf = MOI.ScalarQuadraticFunction(
[term(2 * P11, x[1], x[1]), term(0.0, x[1], x[2])],
term.(q, x),
0.0,
)
MOI.set(model, MOI.ObjectiveFunction{typeof(objf)}(), objf)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
cf = MOI.VectorAffineFunction(
MOI.VectorAffineTerm.(
Int64.(I),
term.(coeffs, map(j -> getindex(x, j), J)),
),
-u,
)
c = MOI.add_constraint(model, cf, MOI.Nonpositives(length(u)))
optimizer = defaultoptimizer()
idxmap = MOI.copy_to(optimizer, model)
@test MOI.get(optimizer, MOI.ObjectiveSense()) == MOI.MIN_SENSE
@test MOI.get(optimizer, MOI.NumberOfVariables()) == 2
@test MOI.get(optimizer, MOI.ListOfVariableIndices()) ==
[MOI.VariableIndex(1), MOI.VariableIndex(2)]
@test MOI.is_valid(optimizer, MOI.VariableIndex(2))
@test !MOI.is_valid(optimizer, MOI.VariableIndex(3))
MOI.optimize!(optimizer)
# check result before modification
atol = config.atol
rtol = config.rtol
@test MOI.get(optimizer, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈ 20.0 atol = atol rtol =
rtol
@test MOI.get(optimizer, MOI.VariablePrimal(), getindex.(Ref(idxmap), x)) ≈
[0.0; 5.0] atol = atol rtol = rtol
@test MOI.get(optimizer, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ConstraintDual(), idxmap[c]) ≈
-[1.666666666666; 0.0; 1.3333333; 0.0; 0.0] atol = atol rtol = rtol
# test allocations
allocs = @allocated MOI.optimize!(optimizer)
@test allocs == 0
function mapfrommodel(
::MOI.AbstractOptimizer,
x::Union{MOI.VariableIndex,<:MOI.ConstraintIndex},
)
return idxmap[x]
end
function mapfrommodel(
::MOI.ModelLike,
x::Union{MOI.VariableIndex,<:MOI.ConstraintIndex},
)
return x
end
# make random modifications to constraints
randvectorconfig = MOI.Test.Config(atol = Inf, rtol = 1e-4)
rng = MersenneTwister(1234)
for i in 1:100
newcoeffs = copy(coeffs)
modindex = rand(rng, 1:length(newcoeffs))
newcoeffs[modindex] = 0
newconst = round.(5 .* (rand(rng, length(u)) .- 0.5); digits = 2)
_test_optimizer_modification(
model,
optimizer,
idxmap,
defaultoptimizer(),
randvectorconfig,
) do m
attr = MOI.ConstraintFunction()
ci = mapfrommodel(m, c)
newcf = MOI.VectorAffineFunction(
MOI.VectorAffineTerm.(
Int64.(I),
term.(newcoeffs, map(j -> getindex(x, j), J)),
),
newconst,
)
return MOI.set(m, attr, ci, newcf)
end
end
end
function test_no_CachingOptimizer_Warm_starting()
# define QP:
# min 1/2 x'Px + q'x
# s.t. Ax <= u <=> -Ax + u in Nonnegatives
# Ax >= l <=> Ax - l in Nonnegatives
l = [1.0; 0; 0]
u = [1.0; 0.7; 0.7]
model = MOIU.UniversalFallback(OSQPModel{Float64}())
optimizer = defaultoptimizer()
x = MOI.add_variables(model, 2)
objective_function =
1.0x[1] + 1.0x[2] + 2.0x[1] * x[1] + 1.0x[1] * x[2] + 1.0x[2] * x[2]
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
objective_function,
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
Aneg = [
MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(-1.0, x[1])),
MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(-1.0, x[2])),
MOI.VectorAffineTerm(2, MOI.ScalarAffineTerm(-1.0, x[1])),
MOI.VectorAffineTerm(3, MOI.ScalarAffineTerm(-1.0, x[2])),
]
MOI.add_constraint(
model,
MOI.VectorAffineFunction(Aneg, u),
MOI.Nonnegatives(3),
)
A = [
MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(1.0, x[1])),
MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(1.0, x[2])),
MOI.VectorAffineTerm(2, MOI.ScalarAffineTerm(1.0, x[1])),
MOI.VectorAffineTerm(3, MOI.ScalarAffineTerm(1.0, x[2])),
]
con1 = MOI.add_constraint(
model,
MOI.VectorAffineFunction(A, -u),
MOI.Nonpositives(3),
)
con2 = MOI.add_constraint(
model,
MOI.VectorAffineFunction(A, -l),
MOI.Nonnegatives(3),
)
MOI.empty!(optimizer)
idxmap = MOI.copy_to(optimizer, model)
MOI.optimize!(optimizer)
# solve once to get optimal solution
x_sol = MOI.get(optimizer, MOI.VariablePrimal(), getindex.(Ref(idxmap), x))
y_c1 = MOI.get(optimizer, MOI.ConstraintDual(), idxmap[con1])
y_c2 = MOI.get(optimizer, MOI.ConstraintDual(), idxmap[con2])
y_c1_rows = OSQP.MathOptInterfaceOSQP.constraint_rows(
optimizer.rowranges,
idxmap[con1],
)
y_c2_rows = OSQP.MathOptInterfaceOSQP.constraint_rows(
optimizer.rowranges,
idxmap[con2],
)
# provide warm start values to the model
MOI.set.(model, MOI.VariablePrimalStart(), x, x_sol)
MOI.set.(model, MOI.ConstraintDualStart(), [con1, con2], [y_c1, y_c2])
MOI.empty!(optimizer)
idxmap = MOI.copy_to(optimizer, model)
# check that internal variables are set correctly
@test optimizer.warmstartcache.x.data == x_sol
@test optimizer.warmstartcache.y.data[y_c1_rows] == -y_c1
@test optimizer.warmstartcache.y.data[y_c2_rows] == -y_c2
end
function test_vector_equality_constraint()
# Minimize ||A x - b||^2 = x' A' A x - (2 * A' * b)' x + b' * b
# subject to C x = d
generate_problem_data = function (rng, n, m)
A = rand(rng, n, n)
b = rand(rng, n)
C = rand(rng, m, n)
d = rand(rng, m)
C⁺ = pinv(C)
Q = I - C⁺ * C
expected = Q * (pinv(A * Q) * (b - A * C⁺ * d)) + C⁺ * d # note: can be quite badly conditioned
@test C * expected ≈ d atol = 1e-12
P = Symmetric(sparse(triu(A' * A)))
q = -2 * A' * b
r = b' * b
return A, b, C, d, P, q, r, expected
end
make_objective = function (P, q, r, x)
I, J, coeffs = findnz(P.data)
return MOI.ScalarQuadraticFunction(
term.(
2 * coeffs,
map(i -> x[i]::MOI.VariableIndex, I),
map(j -> x[j]::MOI.VariableIndex, J),
),
term.(q, x),
r,
)
end
make_constraint_fun = function (C, d, x)
I, J, coeffs = findnz(sparse(C))
return cf = MOI.VectorAffineFunction(
MOI.VectorAffineTerm.(
Int64.(I),
term.(coeffs, map(j -> getindex(x, j)::MOI.VariableIndex, J)),
),
-d,
)
end
check_results = function (optimizer, idxmap, x, A, b, expected)
@test MOI.get(optimizer, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get.(
Ref(optimizer),
Ref(MOI.VariablePrimal()),
getindex.(Ref(idxmap), x),
) ≈ expected atol = 1e-4
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈
norm(A * expected - b)^2 atol = 1e-4
end
n = 8
m = 2
rng = MersenneTwister(1234)
A, b, C, d, P, q, r, expected = generate_problem_data(rng, n, m)
model = OSQPModel{Float64}()
x = MOI.add_variables(model, n)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
make_objective(P, q, r, x),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
c = MOI.add_constraint(
model,
make_constraint_fun(C, d, x),
MOI.Zeros(length(d)),
)
optimizer = defaultoptimizer()
idxmap = MOI.copy_to(optimizer, model)
MOI.optimize!(optimizer)
check_results(optimizer, idxmap, x, A, b, expected)
x = [idxmap[xi] for xi in x]
for i in 1:10
A, b, C, d, P, q, r, expected = generate_problem_data(rng, n, m)
MOI.set(
optimizer,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
make_objective(P, q, r, x),
)
attr = MOI.ConstraintFunction()
MOI.set(optimizer, attr, idxmap[c], make_constraint_fun(C, d, x))
attr = MOI.ConstraintSet()
MOI.set(optimizer, attr, idxmap[c], MOI.Zeros(length(d))) # noop, but ok
MOI.optimize!(optimizer)
check_results(optimizer, idxmap, x, A, b, expected)
end
end
function test_RawSolver()
optimizer = defaultoptimizer()
let inner = MOI.get(optimizer, MOI.RawSolver())
@test inner.workspace == C_NULL
end
@test MOI.get(optimizer, MOI.SolverName()) == "OSQP"
model = OSQPModel{Float64}()
MOI.empty!(model)
x = MOI.add_variable(model)
c = MOI.add_constraint(model, 1.0x, MOI.GreaterThan(2.0))
obj = convert(MOI.ScalarAffineFunction{Float64}, x)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(obj)}(), obj)
MOI.copy_to(optimizer, model)
inner = MOI.get(optimizer, MOI.RawSolver())
@test inner.workspace != C_NULL
end
end # module
TestOSQP.runtests()
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 6271 | function setup_basic()
# Simple QP problem
problem = Dict()
problem[:P] = sparse([11.0 0.0; 0.0 0.0])
problem[:q] = [3.0; 4]
problem[:A] = sparse([-1 0; 0 -1; -1 -3; 2 5; 3 4])
problem[:u] = [0.0; 0.0; -15; 100; 80]
problem[:l] = -Inf * ones(length(problem[:u]))
problem[:n] = size(problem[:P], 1)
problem[:m] = size(problem[:A], 1)
options = Dict(
:verbose => false,
:eps_abs => 1e-09,
:eps_rel => 1e-09,
:check_termination => 1,
:polish => false,
:max_iter => 4000,
:rho => 0.1,
:adaptive_rho => false,
:warm_start => true,
)
return problem, options
end
tol = 1e-5
@testset "basic" begin
@testset "basic_QP" begin
problem, options = setup_basic()
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
results = OSQP.solve!(model)
@test isapprox(norm(results.x - [0.0; 5.0]), 0.0, atol = tol)
@test isapprox(
norm(results.y - [1.666666666666; 0.0; 1.3333333; 0.0; 0.0]),
0.0,
atol = tol,
)
@test isapprox(results.info.obj_val, 20.0, atol = tol)
end
@testset "update_q" begin
problem, options = setup_basic()
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
OSQP.update!(model, q = [10.0; 20.0])
results = OSQP.solve!(model)
@test isapprox(norm(results.x - [0.0; 5.0]), 0.0, atol = tol)
@test isapprox(
norm(results.y - [3.33333333; 0.0; 6.66666666; 0.0; 0.0]),
0.0,
atol = tol,
)
@test isapprox(results.info.obj_val, 100.0, atol = tol)
end
@testset "update_l" begin
problem, options = setup_basic()
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
OSQP.update!(model, l = -100 * ones(problem[:m]))
results = OSQP.solve!(model)
@test isapprox(norm(results.x - [0.0; 5.0]), 0.0, atol = tol)
@test isapprox(
norm(results.y - [1.6666666666; 0.0; 1.333333333333; 0.0; 0.0]),
0.0,
atol = tol,
)
@test isapprox(results.info.obj_val, 20.0, atol = tol)
end
@testset "update_u" begin
problem, options = setup_basic()
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
OSQP.update!(model, u = 1000 * ones(problem[:m]))
results = OSQP.solve!(model)
@test isapprox(
norm(results.x - [-1.51515152e-01, -3.33282828e+02]),
0.0,
atol = tol,
)
@test isapprox(
norm(results.y - [0.0; 0.0; 1.333333333333; 0.0; 0.0]),
0.0,
atol = tol,
)
@test isapprox(results.info.obj_val, -1333.459595961, atol = tol)
end
@testset "update_max_iter" begin
problem, options = setup_basic()
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
OSQP.update_settings!(model, max_iter = 80)
results = OSQP.solve!(model)
@test results.info.status == :Max_iter_reached
end
@testset "update_check_termination" begin
problem, options = setup_basic()
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
OSQP.update_settings!(model, check_termination = false)
results = OSQP.solve!(model)
@test results.info.iter == options[:max_iter]
end
@testset "update_rho" begin
problem, options = setup_basic()
# Setup default problem
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
results_default = OSQP.solve!(model)
# Setup different rho and update to same rho
new_opts = copy(options)
new_opts[:rho] = 0.7
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
new_opts...,
)
OSQP.update_settings!(model, rho = options[:rho])
results_new_rho = OSQP.solve!(model)
@test results_default.info.iter == results_new_rho.info.iter
end
@testset "time_limit" begin
problem, options = setup_basic()
model = OSQP.Model()
OSQP.setup!(
model;
P = problem[:P],
q = problem[:q],
A = problem[:A],
l = problem[:l],
u = problem[:u],
options...,
)
results = OSQP.solve!(model)
@test results.info.status == :Solved
# Ensure solver will time out
OSQP.update_settings!(
model,
eps_abs = 1e-20,
eps_rel = 1e-20,
time_limit = 1e-6,
max_iter = 1000000,
check_termination = 0,
)
results_time_limit = OSQP.solve!(model)
@test results_time_limit.info.status == :Time_limit_reached
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 1739 | function setup_dual_infeasibility()
options = Dict(
:verbose => false,
:eps_abs => 1e-05,
:eps_rel => 1e-05,
:eps_prim_inf => 1e-15,
:check_termination => 1,
)
return options
end
tol = 1e-5
@testset "dual_infeasibility" begin
@testset "dual_infeasible_lp" begin
P = spzeros(2, 2)
q = [2.0; -1.0]
A = sparse(I, 2, 2)
u = Inf * ones(2)
l = [0.0; 0.0]
options = setup_dual_infeasibility()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
@test results.info.status == :Dual_infeasible
end
@testset "dual_infeasible_qp" begin
P = sparse(Diagonal([4.0; 0.0]))
q = [0.0; 2]
A = sparse([1.0 1.0; -1.0 1])
u = [2.0; 3]
l = -Inf * ones(2)
options = setup_dual_infeasibility()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
@test results.info.status == :Dual_infeasible
end
@testset "primal_dual_infeasible" begin
P = spzeros(2, 2)
q = [-1.0; -1.0]
A = sparse([1.0 -1.0; -1.0 1; 1.0 0; 0.0 1])
u = Inf * ones(4)
l = [1.0, 1.0, 0.0, 0.0]
options = setup_dual_infeasibility()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Warm start to avoid infeasibility detection at first step
OSQP.warm_start!(model, x = [50.0; 30.0], y = [-2.0; -2; -2; -2])
results = OSQP.solve!(model)
@test results.info.status == :Dual_infeasible
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 669 | function setup_feasibility()
options = Dict(
:verbose => false,
:eps_abs => 1e-06,
:eps_rel => 1e-06,
:max_iter => 5000,
)
return options
end
tol = 1e-3
@testset "feasibility" begin
@testset "feasibility_problem" begin
n = 30
m = 30
P = spzeros(n, n)
q = zeros(n)
A = sprandn(m, n, 0.8)
u = randn(m)
l = u
options = setup_feasibility()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
@test isapprox(norm(A * results.x - u), 0.0, atol = tol)
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 499 | using SparseArrays: SparseMatrixCSC, sparse
using LinearAlgebra: I
using OSQP: Ccsc, ManagedCcsc
@testset "sparse matrix interface roundtrip" begin
jl = sparse(Matrix{Bool}(LinearAlgebra.I, 5, 5))
mc = ManagedCcsc(jl)
GC.@preserve mc begin
c = Ccsc(mc)
jl2 = convert(SparseMatrixCSC, c)
@test jl == jl2
end
end
# Check model error handling
@testset "Model error handling" begin
model = OSQP.Model()
@test_throws ErrorException OSQP.solve!(model)
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 1266 |
@testset "non_convex" begin
@testset "non_convex_small_sigma" begin
# Setup problem
P = sparse([2.0 5.0; 5.0 1.0])
q = [3.0; 4]
A = sparse([-1 0; 0 -1; -1 -3; 2 5; 3 4])
u = [0.0; 0.0; -15; 100; 80]
l = -Inf * ones(length(u))
options = Dict(:verbose => false, :sigma => 1e-06)
model = OSQP.Model()
try
# Setup should fail due to (P + sigma I) having a negative eigenvalue
global test_setup = 1
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
catch
global test_setup = 0
end
@test test_setup == 0
end
@testset "non_convex_big_sigma" begin
# Setup workspace with new sigma
P = sparse([2.0 5.0; 5.0 1.0])
q = [3.0; 4]
A = sparse([-1 0; 0 -1; -1 -3; 2 5; 3 4])
u = [0.0; 0.0; -15; 100; 80]
l = -Inf * ones(length(u))
options = Dict(:verbose => false, :sigma => 5.0)
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Solve problem
results = OSQP.solve!(model)
@test isnan(results.info.obj_val)
@test results.info.status == :Non_convex
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 2839 | function setup_polishing()
options = Dict(
:verbose => false,
:polish => true,
:eps_abs => 1e-03,
:eps_rel => 1e-03,
:verbose => false,
:max_iter => 5000,
)
return options
end
tol = 1e-3
@testset "polishing" begin
@testset "polishing_problem" begin
P = sparse(Diagonal([11.0; 0.0]))
q = [3.0; 4.0]
A = sparse([-1.0 0.0; 0.0 -1.0; -1.0 -3; 2.0 5.0; 3.0 4.0])
u = [0.0; 0.0; -15.0; 100.0; 80]
l = -Inf * ones(length(u))
(m, n) = size(A)
# Solve problem
options = setup_polishing()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
x_test = [9.90341e-11; 5.0]
y_test = [1.66667; 0.0; 1.33333; 1.20431e-14; 1.49741e-14]
obj_test = 20.0
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status_polish == 1
end
@testset "polishing_unconstrained" begin
seed!(1)
n = 10
m = n
P = sparse(Diagonal(rand(n)) + 0.2 * sparse(I, n, n))
q = randn(n)
A = sparse(I, n, n)
l = -100 * ones(m)
u = 100 * ones(m)
# Solve problem
options = setup_polishing()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
# Explicit solution
invP = inv(Array(P))
x_test = -invP * q
y_test = zeros(m)
obj_test = -0.5 * q' * invP * q
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, zeros(m), atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status_polish == 1
end
@testset "polish_random" begin
# load randomly generated problem with known accurate solution from Mosek
problem_data = FileIO.load(
joinpath(@__DIR__, "problem_data/random_polish_qp.jld2"),
)
P = problem_data["P"]
q = problem_data["q"]
A = problem_data["A"]
u = problem_data["u"]
l = problem_data["l"]
options = setup_polishing()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
@test isapprox(results.x, problem_data["x_test"], atol = tol)
@test isapprox(results.y, problem_data["y_test"], atol = tol)
@test isapprox(
results.info.obj_val,
problem_data["obj_test"],
atol = tol,
)
@test results.info.status_polish == 1
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 1483 | function setup_primal_infeasibility()
options = Dict(
:verbose => false,
:eps_abs => 1e-05,
:eps_rel => 1e-05,
:eps_dual_inf => 1e-18,
:scaling => true,
)
return options
end
tol = 1e-5
@testset "primal_infeasibility" begin
@testset "primal_infeasible_problem" begin
seed!(1)
n = 50
m = 500
P = sprandn(n, n, 0.6)
P = P' * P
q = randn(n)
A = sprandn(m, n, 0.6)
u = 3 .+ randn(m)
l = -3 .+ randn(m)
# Make problem infeasible
A[Int(n / 2), :] = A[Int(n / 2)+1, :]
l[Int(n / 2)] = u[Int(n / 2)+1] + 10 * rand()
u[Int(n / 2)] = l[Int(n / 2)] + 0.5
options = setup_primal_infeasibility()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
@test results.info.status == :Primal_infeasible
end
@testset "primal_dual_infeasible_problem" begin
seed!(1)
n = 2
m = 4
P = spzeros(n, n)
q = [-1.0, -1]
A = sparse([1.0 -1; -1.0 1.0; 1.0 0.0; 0.0 1.0])
l = [1.0, 1.0, 0.0, 0.0]
u = Inf * ones(m)
options = setup_primal_infeasibility()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
@test results.info.status == :Primal_infeasible
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 455 | using OSQP
using Test, SparseArrays, LinearAlgebra
using Random: seed!
using FileIO
tests = [
"basic.jl",
"dual_infeasibility.jl",
"feasibility.jl",
"non_convex.jl",
"polishing.jl",
"primal_infeasibility.jl",
"unconstrained.jl",
"warm_start.jl",
"update_matrices.jl",
"MOI_wrapper.jl",
"interface.jl",
]
println("Running tests:")
for curtest in tests
println(" Test: $(curtest)")
include(curtest)
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 1046 | function setup_unconstrained()
options = Dict(
:verbose => false,
:eps_abs => 1e-08,
:eps_rel => 1e-08,
:eps_dual_inf => 1e-18,
)
return options
end
tol = 1e-5
@testset "unconstrained" begin
@testset "unconstrained_problem" begin
seed!(1)
n = 30
m = 0
P = sparse(Diagonal(rand(n)) + 0.2 * sparse(I, n, n))
q = randn(n)
A = spzeros(m, n)
u = Float64[]
l = Float64[]
# Explicit solution
invP = inv(Array(P))
x_test = -invP * q
y_test = zeros(m)
obj_test = -0.5 * q' * invP * q
options = setup_unconstrained()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 21713 | function setup_update_matrices()
options = Dict(
:verbose => false,
:eps_abs => 1e-08,
:eps_rel => 1e-08,
:polish => false,
:check_termination => 1,
)
seed!(1)
n = 5
m = 8
p = 0.7
Pt = sprandn(n, n, p)
Pt_new = copy(Pt)
P = Pt * Pt' + sparse(I, n, n)
# PtI = findall(!iszero, P)
# (Pti, Ptj) = (getindex.(PtI, 1), getindex.(PtI, 2))
Ptx = copy(Pt.nzval)
Pt_newx = Ptx + 0.1 * randn(length(Ptx))
# Pt_new = sparse(Pi, Pj, Pt_newx)
P_new = Pt_new * Pt_new' + sparse(I, n, n)
q = randn(n)
A = sprandn(m, n, p)
(Ai, Aj, _) = findnz(A)
Ax = copy(A.nzval)
A_newx = Ax + randn(length(Ax))
A_new = sparse(Ai, Aj, A_newx)
# A_new = copy(A)
# A_new.nzval += randn(length(A_new.nzval))
l = zeros(m)
u = 30 .+ randn(m)
problem = Dict()
problem[:P] = P
problem[:P_new] = P_new
problem[:q] = q
problem[:A] = A
problem[:A_new] = A_new
problem[:l] = l
problem[:u] = u
problem[:m] = m
problem[:n] = n
return problem, options
end
tol = 1e-5
# This unit test relies on a precomputed reference solution for a randomly generated problem (done in Julia v1.0).
# However, in newer versions of Julia the random number stream changed leading to a different problem being generated.
if VERSION < v"1.1"
@testset "update_matrices" begin
@testset "solve" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
x_test = [-0.324865, 0.598681, -0.066646, -0.00653471, -0.0736556]
y_test = [
-2.72542e-10,
-1.927e-12,
-0.547374,
-2.25907e-10,
-0.645086,
-4.09874e-9,
-1.4083e-11,
-1.26234e-11,
]
obj_test = -0.6736977821770016
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_P" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
P_new = problem[:P_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrix
Pnew_triu = triu(P_new)
Pnew_triu_idx = collect(1:length(Pnew_triu.nzval))
OSQP.update!(model, Px = Pnew_triu.nzval, Px_idx = Pnew_triu_idx)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P_new); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
x_test = [-0.324865, 0.598681, -0.066646, -0.00653471, -0.0736556]
y_test = [
-2.72542e-10,
-1.927e-12,
-0.547374,
-2.25907e-10,
-0.645086,
-4.09874e-9,
-1.4083e-11,
-1.26234e-11,
]
obj_test = -0.6736977821770016
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_P_allind" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
P_new = problem[:P_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrix
Pnew_triu = triu(P_new)
OSQP.update!(model, Px = Pnew_triu.nzval)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P_new); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
x_test = [-0.324865, 0.598681, -0.066646, -0.00653471, -0.0736556]
y_test = [
-2.72542e-10,
-1.927e-12,
-0.547374,
-2.25907e-10,
-0.645086,
-4.09874e-9,
-1.4083e-11,
-1.26234e-11,
]
obj_test = -0.6736977821770016
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_A" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
A_new = problem[:A_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrix
A_new_idx = collect(1:length(A_new.nzval))
OSQP.update!(model, Ax = A_new.nzval, Ax_idx = A_new_idx)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A_new, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A_new, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
#
x_test = [-0.0318085, 0.067069, -0.0242966, -0.0593736, -0.0321274]
y_test = [
-2.16989e-10,
-1.01848,
-0.516013,
-0.0227263,
-3.19721e-10,
-1.04482,
-3.4596e-10,
-4.51608e-10,
]
obj_test = -0.02187014865840703
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_A_allind" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
A_new = problem[:A_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrix
OSQP.update!(model, Ax = A_new.nzval)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A_new, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A_new, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
#
x_test = [-0.0318085, 0.067069, -0.0242966, -0.0593736, -0.0321274]
y_test = [
-2.16989e-10,
-1.01848,
-0.516013,
-0.0227263,
-3.19721e-10,
-1.04482,
-3.4596e-10,
-4.51608e-10,
]
obj_test = -0.02187014865840703
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_P_A_indP_indA" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
P_new = problem[:P_new]
A_new = problem[:A_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrices P and A
P_new_triu = triu(P_new)
P_new_triu_idx = collect(1:length(P_new_triu.nzval))
A_new_idx = collect(1:length(A_new.nzval))
OSQP.update!(
model,
Px = P_new_triu.nzval,
Px_idx = P_new_triu_idx,
Ax = A_new.nzval,
Ax_idx = A_new_idx,
)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A_new, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A_new, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P_new); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
#
x_test = [-0.0318085, 0.067069, -0.0242966, -0.0593736, -0.0321274]
y_test = [
-2.16989e-10,
-1.01848,
-0.516013,
-0.0227263,
-3.19721e-10,
-1.04482,
-3.4596e-10,
-4.51608e-10,
]
obj_test = -0.02187014865840703
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_P_A_indP" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
P_new = problem[:P_new]
A_new = problem[:A_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrices P and A
P_new_triu = triu(P_new)
P_new_triu_idx = collect(1:length(P_new_triu.nzval))
OSQP.update!(
model,
Px = P_new_triu.nzval,
Px_idx = P_new_triu_idx,
Ax = A_new.nzval,
)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A_new, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A_new, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P_new); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
#
x_test = [-0.0318085, 0.067069, -0.0242966, -0.0593736, -0.0321274]
y_test = [
-2.16989e-10,
-1.01848,
-0.516013,
-0.0227263,
-3.19721e-10,
-1.04482,
-3.4596e-10,
-4.51608e-10,
]
obj_test = -0.02187014865840703
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_P_A_indA" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
P_new = problem[:P_new]
A_new = problem[:A_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrices P and A
P_new_triu = triu(P_new)
A_new_idx = collect(1:length(A_new.nzval))
OSQP.update!(
model,
Px = P_new_triu.nzval,
Ax = A_new.nzval,
Ax_idx = A_new_idx,
)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A_new, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A_new, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P_new); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
#
x_test = [-0.0318085, 0.067069, -0.0242966, -0.0593736, -0.0321274]
y_test = [
-2.16989e-10,
-1.01848,
-0.516013,
-0.0227263,
-3.19721e-10,
-1.04482,
-3.4596e-10,
-4.51608e-10,
]
obj_test = -0.02187014865840703
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
@testset "update_P_A_allind" begin
problem, options = setup_update_matrices()
(n, m, P, q, A, l, u) = (
problem[:n],
problem[:m],
problem[:P],
problem[:q],
problem[:A],
problem[:l],
problem[:u],
)
P_new = problem[:P_new]
A_new = problem[:A_new]
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
# Update matrices P and A
P_new_triu = triu(P_new)
OSQP.update!(model, Px = P_new_triu.nzval, Ax = A_new.nzval)
results = OSQP.solve!(model)
# # Solve with Gurobi
# using Gurobi
# env = Gurobi.Env()
# setparam!(env, "OutputFlag", 1)
# model = Gurobi.Model(env, "model")
# add_cvars!(model, q); update_model!(model)
# add_constrs!(model, A_new, repmat(['<'], m), u); update_model!(model)
# add_constrs!(model, A_new, repmat(['>'], m), l); update_model!(model)
# add_qpterms!(model, P_new); update_model!(model)
# optimize(model)
# x_test = get_solution(model)
# obj_test= get_objval(model)
# y_test = -Gurobi.get_dblattrarray(model, "Pi", 1, 2 * m)
# y_test = y_test[m+1:end] + y_test[1:m]
#
# println("x_test = $(x_test)")
# println("y_test = $(y_test)")
# println("obj_test = $(obj_test)")
#
x_test = [-0.0318085, 0.067069, -0.0242966, -0.0593736, -0.0321274]
y_test = [
-2.16989e-10,
-1.01848,
-0.516013,
-0.0227263,
-3.19721e-10,
-1.04482,
-3.4596e-10,
-4.51608e-10,
]
obj_test = -0.02187014865840703
@test isapprox(results.x, x_test, atol = tol)
@test isapprox(results.y, y_test, atol = tol)
@test isapprox(results.info.obj_val, obj_test, atol = tol)
@test results.info.status == :Solved
end
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | code | 1274 | function setup_warm_start()
options = Dict(
:verbose => false,
:eps_abs => 1e-08,
:eps_rel => 1e-08,
:polish => false,
:adaptive_rho => false,
:check_termination => 1,
)
return options
end
tol = 1e-5
@testset "warm_start" begin
@testset "warm_start_problem" begin
seed!(1)
n = 100
m = 200
P = sprandn(n, n, 0.9)
P = P' * P
q = randn(n)
A = sprandn(m, n, 0.9)
u = rand(m) * 2
l = -rand(m) * 2
options = setup_warm_start()
model = OSQP.Model()
OSQP.setup!(model; P = P, q = q, A = A, l = l, u = u, options...)
results = OSQP.solve!(model)
# Store optimal values
x_opt = results.x
y_opt = results.y
tot_iter = results.info.iter
# Warm start with zeros to check if number of iterations is the same
OSQP.warm_start!(model, x = zeros(n), y = zeros(m))
results = OSQP.solve!(model)
@test results.info.iter == tot_iter
# Warm start with optimal values and check that total number of iter < 10
OSQP.warm_start!(model, x = x_opt, y = y_opt)
results = OSQP.solve!(model)
@test results.info.iter <= 10
end
end
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | docs | 1832 | Version 0.5.2 (24 May 2019): OSQP v0.5.0
----------------------------------------------
* Updated package manager system (#52): now all operating systems use precompiled shared libraries.
* Fixed issues #44 #51 #49 #50
Version 0.5.0 (10 December 2018): OSQP v0.5.0
----------------------------------------------
* Upgraded OSQP to 0.5.0 version
* Added `update_time` info structure
Version 0.4.0 (29 October 2018): OSQP v0.4.1
----------------------------------------------
* Upgraded OSQP to 0.4.1 version
Version 0.3.0 (4 September 2018): OSQP v0.4.0
----------------------------------------------
* Bugfixes for Julia 1.0
Version 0.2.0 (24 July 2018): OSQP v0.4.0
-----------------------------------------
* Upgraded OSQP to 0.4.0 version
Version 0.1.5 (12 June 2018): OSQP v0.3.1
-----------------------------------------
* Upgraded OSQP to 0.3.1 version
Version 0.1.5 (7 June 2018): OSQP v0.3.0
-----------------------------------------
* Added support for MathOptInterface 0.3 (adapted to https://github.com/JuliaOpt/MathOptInterface.jl/pull/351)
* Dropped support for MathOptInterface 0.2
Version 0.1.4 (5 March 2018): OSQP v0.3.0
-----------------------------------------
* Updated OSQP version
* Added `time_limit` option
Version 0.1.3 (12 February 2018): OSQP v0.2.1
----------------------------------------------
* Added MathProgBase interface
Version 0.1.2 (30 January 2018): OSQP v0.2.1
----------------------------------------------
* Updated matrix updates indexing to match Julia one
* Fixed issue [#2](https://github.com/oxfordcontrol/OSQP.jl/issues/2)
Version 0.1.1 (25 November 2017): OSQP v0.2.1
----------------------------------------------
* Update for new OSQP version
Version 0.1.0 (23 November 2017): OSQP v0.2.0
----------------------------------------------
* First release. Interface wrapped
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | 50faf456a64ac1ca097b78bcdf288d94708adcdd | docs | 1366 | # OSQP.jl
[](https://github.com/osqp/OSQP.jl/actions)
[](http://codecov.io/github/osqp/OSQP.jl?branch=master)
[OSQP.jl](https://github.com/osqp/OSQP.jl) is a Julia wrapper for
[OSQP](https://osqp.org/): the Operator Splitting QP Solver.
## License
OSQP.jl is licensed under the [Apache-2.0 license](https://github.com/osqp/OSQP.jl/blob/master/LICENSE.md).
The upstream solver, [osqp/osqp](https://github.com/osqp/osqp) is also licensed
under the [Apache-2.0 license](https://github.com/osqp/osqp/blob/master/LICENSE).
## Installation
Install OSQP.jl using the Julia package manager
```julia
import Pkg
Pkg.add("OSQP")
```
## Problem class
The OSQP (Operator Splitting Quadratic Program) solver is a numerical
optimization package for solving problems in the form
```
minimize 0.5 x' P x + q' x
subject to l <= A x <= u
```
where `x in R^n` is the optimization variable. The objective function is defined
by a positive semidefinite matrix `P in S^n_+` and vector `q in R^n`. The linear
constraints are defined by matrix `A in R^{m x n}` and vectors
`l in R^m U {-inf}^m`, `u in R^m U {+inf}^m`.
## Documentation
Detailed documentation is available at [https://osqp.org/](https://osqp.org/).
| OSQP | https://github.com/osqp/OSQP.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 5011 | using Mimi
# Aggregate damages across damage functions
@defcomp DamageAggregator_NewSectorDamages begin
fund_regions = Index()
country = Index()
energy_countries = Index()
domestic_countries = Index()
domestic_idxs_country_dim = Parameter{Int}(index=[domestic_countries])
domestic_idxs_energy_countries_dim = Parameter{Int}(index=[domestic_countries])
# internally compute for speed
domestic_idxs_country_dim_int = Variable{Int}(index=[domestic_countries])
domestic_idxs_energy_countries_dim_int = Variable{Int}(index=[domestic_countries])
# inclusion of different damages
# By default the individual sectoral damage calculations are ON, including
# SLR which runs after the main model, while global damage function calculations
# are OFF.
include_cromar_mortality = Parameter{Bool}(default=true)
include_ag = Parameter{Bool}(default=true)
include_slr = Parameter{Bool}(default=true)
include_energy = Parameter{Bool}(default=true)
include_new_sector = Parameter{Bool}(default=true)
include_dice2016R2 = Parameter{Bool}(default=false)
include_hs_damage = Parameter{Bool}(default=false)
damage_cromar_mortality = Parameter(index=[time,country], unit="US\$2005/yr")
damage_ag = Parameter(index=[time,fund_regions], unit="billion US\$2005/yr")
damage_energy = Parameter(index=[time,energy_countries], unit="billion US\$2005/yr")
damage_new_sector = Parameter(index=[time,energy_countries], unit="billion US\$2005/yr")
damage_dice2016R2 = Parameter(index=[time], unit="billion US\$2005/yr")
damage_hs = Parameter(index=[time], unit="billion US\$2005/yr")
gdp = Parameter(index=[time,country], unit="billion US\$2005/yr")
total_damage = Variable(index=[time], unit="US\$2005/yr")
total_damage_share = Variable(index=[time])
total_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
# global annual aggregates - for interim model outputs and partial SCCs
cromar_mortality_damage = Variable(index=[time], unit="US\$2005/yr")
agriculture_damage = Variable(index=[time], unit="US\$2005/yr")
energy_damage = Variable(index=[time], unit="US\$2005/yr")
new_sector_damage = Variable(index=[time], unit="US\$2005/yr")
# domestic annual aggregates - for interim model outputs and partial SCCs
cromar_mortality_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
agriculture_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
energy_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
new_sector_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
function init(p,v,d)
# convert to integers for indexing - do once here for speed
v.domestic_idxs_country_dim_int[:] = Int.(p.domestic_idxs_country_dim)
v.domestic_idxs_energy_countries_dim_int[:] = Int.(p.domestic_idxs_energy_countries_dim)
end
function run_timestep(p, v, d, t)
# global annual aggregates - for interim model outputs and partial SCCs
v.cromar_mortality_damage[t] = sum(p.damage_cromar_mortality[t,:])
v.agriculture_damage[t] = sum(p.damage_ag[t,:]) * 1e9
v.energy_damage[t] = sum(p.damage_energy[t,:]) * 1e9
v.new_sector_damage[t] = sum(p.damage_new_sector[t,:]) * 1e9
v.total_damage[t] =
(p.include_cromar_mortality ? v.cromar_mortality_damage[t] : 0.) +
(p.include_ag ? v.agriculture_damage[t] : 0.) +
(p.include_energy ? v.energy_damage[t] : 0.) +
(p.include_new_sector ? v.new_sector_damage[t] : 0.) +
(p.include_dice2016R2 ? p.damage_dice2016R2[t] * 1e9 : 0.) +
(p.include_hs_damage ? p.damage_hs[t] * 1e9 : 0.)
gdp = sum(p.gdp[t,:]) * 1e9
v.total_damage_share[t] = v.total_damage[t] / gdp
# domestic annual aggregates - for interim model outputs and partial SCCs
v.cromar_mortality_damage_domestic[t] = sum(p.damage_cromar_mortality[t, v.domestic_idxs_country_dim_int])
v.agriculture_damage_domestic[t] = p.damage_ag[t,1] * 1e9
v.energy_damage_domestic[t] = sum(p.damage_energy[t, v.domestic_idxs_energy_countries_dim_int] * 1e9)
v.new_sector_damage_domestic[t] = sum(p.damage_new_sector[t, v.domestic_idxs_country_dim_int] * 1e9)
# Calculate domestic damages
v.total_damage_domestic[t] =
(p.include_cromar_mortality ? v.cromar_mortality_damage_domestic[t] : 0.) +
(p.include_ag ? v.agriculture_damage_domestic[t] : 0.) +
(p.include_energy ? v.energy_damage_domestic[t] : 0.) +
(p.include_new_sector ? v.new_sector_damage_domestic[t] : 0.)
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 3965 | using Pkg
# Instantiate environment
Pkg.activate(joinpath(@__DIR__, ".."))
Pkg.instantiate()
using Mimi
using MimiGIVE
using DataFrames
using Query
using VegaLite
include("new_sector_damages.jl")
include("DamageAggregator_NewSectorDamages.jl")
include("main_model.jl")
include("mcs.jl")
include("scc.jl")
# Run the model
m = get_modified_model()
run(m)
# Explore the results in graphic form via the explorer and the plot functions
explore(m)
Mimi.plot(m, :NewSectorDamages, :damages)
# Examine results in tabular form, or plot them yourself with Vegalite
df = getdataframe(m, :NewSectorDamages, :damages) |> @filter(_.time >= 2020) |> DataFrame
df.time = string.(df.time)
df |> @vlplot(:line, x = "time:t", y = :damages, color = :country, width = 500, height = 500)
# Run a Monte Carlo Simulation - NOTE be careful, at a high value of n any
# country-disaggregated variable will save to an extremely large file
save_list = [
(:DamageAggregator, :total_damage),
(:DamageAggregator, :total_damage_share),
(:DamageAggregator, :cromar_mortality_damage),
(:DamageAggregator, :agriculture_damage),
(:DamageAggregator, :energy_damage),
(:DamageAggregator, :new_sector_damage),
(:global_netconsumption, :net_consumption),
(:global_netconsumption, :net_cpc),
(:global_netconsumption, :global_gdp),
(:global_netconsumption, :global_population),
(:temperature, :T),
(:glaciers_small_icecaps, :gsic_sea_level) ,
(:antarctic_icesheet, :ais_sea_level),
(:greenland_icesheet, :greenland_sea_level),
(:thermal_expansion, :te_sea_level),
(:landwater_storage, :lws_sea_level)
]
output_dir = joinpath(@__DIR__, "..", "output", "mcs", "MCS_main_output")
mkpath(output_dir)
results = run_modified_mcs(trials=10, save_list=save_list, output_dir=output_dir);
# Explore the results in graphic form via the explorer and the plot functions
explore(results)
Mimi.plot(results, :DamageAggregator, :new_sector_damage)
# Examine results in tabular form, or plot them yourself with Vegalite
getdataframe(results, :DamageAggregator, :new_sector_damage)
# Compute SCC
output_dir = joinpath(@__DIR__, "..", "output", "scc", "SCC_main_output")
mkpath(output_dir)
results = compute_modified_scc(
year=2020,
n=10,
discount_rates = [(label = "DICE discount rate", prtp = 0.015, eta = 1.45), (label = "2.0%", prtp = exp(0.001972641) - 1, eta = 1.244458999)],
output_dir=output_dir,
save_list=save_list,
compute_sectoral_values = true,
save_md = true
);
# Access the computed SCC values
scc_df = DataFrame(:region => [], :sector => [], :discount_rate_label => [], :expected_scc => [], :se_expected_scc => [])
results_scc = results[:scc] # results is a dictionary, :scc is a key to this dictionary
for (k,v) in results_scc
# results_scc is a dictionary, we iterate over it's keys (k) and values (v)
# --- the keys are each a NamedTuple with elements region, sector, dr_label, prtp, and eta
# --- the values are each a Named Tuple with elements expected_scc, se_expected_scc, and sccs (a vector of the sccs)
append!(scc_df, DataFrame(
:region => k.region,
:sector => k.sector,
:discount_rate_label => k.dr_label,
:expected_scc => v[:expected_scc],
:se_expected_scc => v[:se_expected_scc]
)
)
end
show(scc_df)
# Access the marginal damages (undiscounted)
# marginal damages for the global region for sector new_sector
mds = results[:mds][((region = :globe, sector = :new_sector))]
mds_df = DataFrame(mds, :auto)
rename!(mds_df, Symbol.(2020:2300))
insertcols!(mds_df, 1, :trial => 1:10)
mds_df = stack(mds_df, Not(:trial))
rename!(mds_df, [:trial, :time, :value])
mds_df |> @vlplot(:line, x = "time:t", y = :value, color = "trial:n")
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 903 | using Mimi
using MimiGIVE
include("new_sector_damages.jl")
include("DamageAggregator_NewSectorDamages.jl")
function get_modified_model()
# Obtain MimiGIVE model
m = MimiGIVE.get_model()
# Add new damage sector component
add_comp!(m, NewSectorDamages, first = 2020, after = :energy_damages)
# Replace Damage Aggregator component with modified one
replace!(m, :DamageAggregator => DamageAggregator_NewSectorDamages)
# Need to set this damage aggregator to run from 2020 to 2300, currently picks up
# 1750 to 2300 from replace!
Mimi.set_first_last!(m, :DamageAggregator, first=2020);
# Connections
connect_param!(m, :NewSectorDamages => :temperature, :temperature => :T)
connect_param!(m, :NewSectorDamages => :gdp, :Socioeconomic => :gdp)
connect_param!(m, :DamageAggregator => :damage_new_sector, :NewSectorDamages => :damages)
return m
end | MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 2639 | using Mimi
using MimiGIVE
using Distributions
using Dates
function get_modified_mcs(trials; args...)
mcs = MimiGIVE.get_mcs(trials; args...) # get the original MCS
# add new sector uncertainty
Mimi.add_RV!(mcs, :rv_new_sector_a, Normal(0.005, 0.005/2)) # add random variable
Mimi.add_transform!(mcs, :NewSectorDamages, :a, :(=), :rv_new_sector_a) # connect random variable to parameter
return mcs
end
function run_modified_mcs(;trials::Int64 = 10000,
output_dir::Union{String, Nothing} = nothing,
save_trials::Bool = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
m::Mimi.Model = get_modified_model(),
save_list::Vector = [],
results_in_memory::Bool = true,
)
m = deepcopy(m) # in the case that an `m` was provided, be careful that we don't modify the original
trials < 2 && error("Must run `run_mcs` function with a `trials` argument greater than 1 due to a Mimi specification about SampleStores. TO BE FIXED SOON!")
# Set up output directories
output_dir = output_dir === nothing ? joinpath(@__DIR__, "../output/mcs/", "MCS $(Dates.format(now(), "yyyy-mm-dd HH-MM-SS")) MC$trials") : output_dir
isdir("$output_dir/results") || mkpath("$output_dir/results")
trials_output_filename = save_trials ? joinpath("$output_dir/trials.csv") : nothing
socioeconomics_module = MimiGIVE._get_module_name(m, :Socioeconomic)
if socioeconomics_module == :MimiSSPs
socioeconomics_source = :SSP
elseif socioeconomics_module == :MimiRFFSPs
socioeconomics_source = :RFF
end
# Get an instance of the mcs
mcs = get_modified_mcs(trials;
socioeconomics_source = socioeconomics_source,
mcs_years = Mimi.time_labels(m),
fair_parameter_set = fair_parameter_set,
fair_parameter_set_ids = fair_parameter_set_ids,
rffsp_sampling = rffsp_sampling,
rffsp_sampling_ids = rffsp_sampling_ids,
save_list = save_list,
)
# run monte carlo trials
results = run(mcs,
m,
trials;
trials_output_filename = trials_output_filename,
results_output_dir = "$output_dir/results",
results_in_memory = results_in_memory
)
return results
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 681 | using Mimi
@defcomp NewSectorDamages begin
country = Index()
temperature = Parameter(index=[time], unit="degC")
gdp = Parameter(index=[time, country], unit="billion US\$2005/yr")
a = Parameter(default=0.005)
damfrac = Variable(index=[time, country])
damages = Variable(index=[time, country], unit="billion US\$2005/yr")
function run_timestep(p, v, d, t)
if p.temperature[t] < 0.
v.damfrac[t] = 0.
else
v.damfrac[t] = 1 - (1/(1+(p.a * p.temperature[t]^2)))
end
for c in d.country
v.damages[t,c] = v.damfrac[t] * p.gdp[t,c]
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 31199 | using Mimi
using MimiGIVE
using Distributions
using Dates
using Query
"""
Compute the SC of a gas in USD \$2005
"""
function compute_modified_scc(
m::Model = get_modified_model();
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
prtp::Union{Float64,Nothing} = 0.015,
eta::Union{Float64,Nothing} = 1.45,
discount_rates = nothing,
certainty_equivalent = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
n = 0,
gas::Symbol = :CO2,
save_list::Vector = [],
output_dir::Union{String, Nothing} = nothing,
save_md::Bool = false,
save_cpc::Bool = false,
save_slr_damages::Bool = false,
compute_sectoral_values::Bool = false,
compute_domestic_values::Bool = false,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
post_mcs_creation_function = nothing,
pulse_size::Float64 = 1.
)
hfc_list = [:HFC23, :HFC32, :HFC43_10, :HFC125, :HFC134a, :HFC143a, :HFC227ea, :HFC245fa]
gases_list = [:CO2, :CH4, :N2O, hfc_list ...]
m = deepcopy(m) # in the case that an `m` was provided, be careful that we don't modify the original
year === nothing ? error("Must specify an emission year. Try `compute_scc(m, year=2020)`.") : nothing
!(last_year in MimiGIVE._model_years) ? error("Invalid value of $last_year for last_year. last_year must be within the model's time index $(MimiGIVE._model_years).") : nothing
!(year in MimiGIVE._model_years) ? error("Cannot compute the scc for year $year, year must be within the model's time index $(MimiGIVE._model_years).") : nothing
!(gas in gases_list) ? error("Invalid value of $gas for gas, gas must be one of $(gases_list).") : nothing
n>0 && certainty_equivalent && !save_cpc && error("certainty_equivalent=true also requires save_cpc=true")
mm = MimiGIVE.get_marginal_model(m; year = year, gas = gas, pulse_size = pulse_size)
if n==0
return MimiGIVE._compute_scc(mm,
year=year,
last_year=last_year,
prtp=prtp,
eta=eta,
discount_rates=discount_rates,
gas=gas,
domestic=compute_domestic_values,
CIAM_foresight=CIAM_foresight,
CIAM_GDPcap=CIAM_GDPcap,
pulse_size=pulse_size
)
else
isnothing(discount_rates) ? error("To run the Monte Carlo compute_scc function (n != 0), please use the `discount_rates` argument.") : nothing
# Set up output directories
output_dir = output_dir === nothing ? joinpath(@__DIR__, "../output/mcs-SC/", "MCS $(Dates.format(now(), "yyyy-mm-dd HH-MM-SS")) MC$n") : output_dir
isdir("$output_dir/results") || mkpath("$output_dir/results")
return _compute_modified_scc_mcs(mm,
n,
year = year,
last_year = last_year,
discount_rates = discount_rates,
certainty_equivalent = certainty_equivalent,
fair_parameter_set = fair_parameter_set,
fair_parameter_set_ids = fair_parameter_set_ids,
rffsp_sampling = rffsp_sampling,
rffsp_sampling_ids = rffsp_sampling_ids,
gas = gas,
save_list = save_list,
output_dir = output_dir,
save_md = save_md,
save_cpc = save_cpc,
save_slr_damages = save_slr_damages,
compute_sectoral_values = compute_sectoral_values,
compute_domestic_values = compute_domestic_values,
CIAM_foresight = CIAM_foresight,
CIAM_GDPcap = CIAM_GDPcap,
post_mcs_creation_function = post_mcs_creation_function,
pulse_size = pulse_size
)
end
end
# Post trial function to to after each trial within the MCS
function modified_post_trial_func(mcs::SimulationInstance, trialnum::Int, ntimesteps::Int, tup)
# Unpack the payload object
scc_values, intermediate_ce_scc_values, md_values, cpc_values, slr_damages, year, last_year, discount_rates, gas, ciam_base, ciam_modified, segment_fingerprints, options = Mimi.payload2(mcs)
# Compute some useful indices
year_index = findfirst(isequal(year), MimiGIVE._model_years)
last_year_index = findfirst(isequal(last_year), MimiGIVE._model_years)
# Access the models
base, marginal = mcs.models
damages_base = base[:DamageAggregator, :total_damage]
damages_marginal = marginal[:DamageAggregator, :total_damage]
if options.compute_domestic_values
damages_base_domestic = base[:DamageAggregator, :total_damage_domestic]
damages_marginal_domestic = marginal[:DamageAggregator, :total_damage_domestic]
end
# Compute marginal damages
# Units Note:
# main_mds and non-ciam sectoral damages: we explicitly need to handle both pulse size and molecular mass so we use gas_units_multiplier
# slr_mds: within the _compute_ciam_marginal_damages function we handle both pulse size and molecular mass
gas_units_multiplier = MimiGIVE.scc_gas_molecular_conversions[gas] ./ (MimiGIVE.scc_gas_pulse_size_conversions[gas] .* options.pulse_size)
include_slr = base[:DamageAggregator, :include_slr]
if include_slr
ciam_mds = MimiGIVE._compute_ciam_marginal_damages(base, marginal, gas, ciam_base, ciam_modified, segment_fingerprints; CIAM_foresight=options.CIAM_foresight, CIAM_GDPcap=options.CIAM_GDPcap, pulse_size=options.pulse_size) # NamedTuple with globe and domestic
# zero out the CIAM marginal damages from start year (2020) through emissions
# year - they will be non-zero due to foresight but saved marginal damages
# should be zeroed out pre-emissions year
ciam_mds.globe[1:year_index] .= 0.
ciam_mds.domestic[1:year_index] .= 0.
end
main_mds = (damages_marginal .- damages_base) .* gas_units_multiplier
slr_mds = include_slr ? ciam_mds.globe : fill(0., length(MimiGIVE._model_years))
total_mds = main_mds .+ slr_mds
if options.compute_domestic_values
main_mds_domestic = (damages_marginal_domestic .- damages_base_domestic) .* gas_units_multiplier
slr_mds_domestic = include_slr ? ciam_mds.domestic : fill(0., length(MimiGIVE._model_years))
total_mds_domestic = main_mds_domestic .+ slr_mds_domestic
end
if options.compute_sectoral_values
cromar_mortality_mds = (marginal[:DamageAggregator, :cromar_mortality_damage] .- base[:DamageAggregator, :cromar_mortality_damage]) .* gas_units_multiplier
agriculture_mds = (marginal[:DamageAggregator, :agriculture_damage] .- base[:DamageAggregator, :agriculture_damage]) .* gas_units_multiplier
energy_mds = (marginal[:DamageAggregator, :energy_damage] .- base[:DamageAggregator, :energy_damage]) .* gas_units_multiplier
new_sector_mds = (marginal[:DamageAggregator, :new_sector_damage] .- base[:DamageAggregator, :new_sector_damage]) .* gas_units_multiplier
if options.compute_domestic_values
cromar_mortality_mds_domestic = (marginal[:DamageAggregator, :cromar_mortality_damage_domestic] .- base[:DamageAggregator, :cromar_mortality_damage_domestic]) .* gas_units_multiplier
agriculture_mds_domestic = (marginal[:DamageAggregator, :agriculture_damage_domestic] .- base[:DamageAggregator, :agriculture_damage_domestic]) .* gas_units_multiplier
energy_mds_domestic = (marginal[:DamageAggregator, :energy_damage_domestic] .- base[:DamageAggregator, :energy_damage_domestic]) .* gas_units_multiplier
new_sector_mds_domestic = (marginal[:DamageAggregator, :new_sector_damage_domestic] .- base[:DamageAggregator, :new_sector_damage_domestic]) .* gas_units_multiplier
end
end
# Save marginal damages
if options.save_md
# global
md_values[(region=:globe, sector=:total)][trialnum, :] = total_mds[MimiGIVE._damages_idxs]
if options.compute_sectoral_values
md_values[(region=:globe, sector=:cromar_mortality)][trialnum, :] = cromar_mortality_mds[MimiGIVE._damages_idxs]
md_values[(region=:globe, sector=:agriculture)][trialnum, :] = agriculture_mds[MimiGIVE._damages_idxs]
md_values[(region=:globe, sector=:energy)][trialnum, :] = energy_mds[MimiGIVE._damages_idxs]
md_values[(region=:globe, sector=:slr)][trialnum, :] = slr_mds[MimiGIVE._damages_idxs]
md_values[(region=:globe, sector=:new_sector)][trialnum, :] = new_sector_mds[MimiGIVE._damages_idxs]
end
# domestic
if options.compute_domestic_values
md_values[(region=:domestic, sector=:total)][trialnum, :] = total_mds_domestic[MimiGIVE._damages_idxs]
if options.compute_sectoral_values
md_values[(region=:domestic, sector=:cromar_mortality)][trialnum, :] = cromar_mortality_mds_domestic[MimiGIVE._damages_idxs]
md_values[(region=:domestic, sector=:agriculture)][trialnum, :] = agriculture_mds_domestic[MimiGIVE._damages_idxs]
md_values[(region=:domestic, sector=:energy)][trialnum, :] = energy_mds_domestic[MimiGIVE._damages_idxs]
md_values[(region=:domestic, sector=:slr)][trialnum, :] = slr_mds_domestic[MimiGIVE._damages_idxs]
md_values[(region=:domestic, sector=:new_sector)][trialnum, :] = new_sector_mds_domestic[MimiGIVE._damages_idxs]
end
end
end
# Save slr damages
if options.save_slr_damages
# get a dummy ciam model to be sure to accurately assign segment names to
# segment level damages
m = get_modified_model()
m_ciam, ~ = MimiGIVE.get_ciam(m)
if include_slr
# global
slr_damages[:base][trialnum,:] = ciam_mds.damages_base[MimiGIVE._damages_idxs]
slr_damages[:modified][trialnum,:] = ciam_mds.damages_modified[MimiGIVE._damages_idxs]
slr_damages[:base_lim_cnt][trialnum,:,:] = ciam_mds.base_lim_cnt
slr_damages[:modified_lim_cnt][trialnum,:,:] = ciam_mds.modified_lim_cnt
slr_damages[:base_segments_2100][trialnum, :] = ciam_mds.damages_base_segments_2100
# domestic - these Dictionary entries will only exist if we are computing
# domestic values
if options.compute_domestic_values
slr_damages[:base_domestic][trialnum,:] = ciam_mds.damages_base_domestic[MimiGIVE._damages_idxs]
slr_damages[:modified_domestic][trialnum,:] = ciam_mds.damages_modified_domestic[MimiGIVE._damages_idxs]
end
else
# global
slr_damages[:base][trialnum,:] .= 0.
slr_damages[:modified][trialnum,:] .= 0.
slr_damages[:base_lim_cnt][trialnum,:,:] .= 0.
slr_damages[:modified_lim_cnt][trialnum,:,:] .= 0.
slr_damages[:base_segments_2100][trialnum, :] .= 0.
# domestic - these Dictionary entries will only exist if we are computing
# domestic values
if options.compute_domestic_values
slr_damages[:base_domestic][trialnum,:] .= 0.
slr_damages[:modified_domestic][trialnum,:] .= 0.
end
end
end
# Get per capita consumption
# We don't care about units here because we are only going to use ratios
cpc = base[:global_netconsumption, :net_cpc]
# Save per capita consumption
if options.save_cpc
cpc_values[(region=:globe, sector=:total)][trialnum, :] = cpc[MimiGIVE._damages_idxs]
end
# Calculate the SCC for each discount rate
for dr in discount_rates
df = [((cpc[year_index]/cpc[i])^dr.eta * 1/(1+dr.prtp)^(t-year) for (i,t) in enumerate(MimiGIVE._model_years) if year<=t<=last_year)...]
if options.certainty_equivalent
df_ce = [((1. / cpc[i])^dr.eta * 1/(1+dr.prtp)^(t-year) for (i,t) in enumerate(MimiGIVE._model_years) if year<=t<=last_year)...] # only used if optionas.certainty_equivalent=true
end
# totals (sector=:total)
scc = sum(df .* total_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* total_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
end
# domestic totals (sector=:total)
if options.compute_domestic_values
scc = sum(df .* total_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* total_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
end
end
# sectoral
if options.compute_sectoral_values
scc = sum(df .* cromar_mortality_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* agriculture_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* energy_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* slr_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* new_sector_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:new_sector, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* cromar_mortality_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* agriculture_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* energy_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* slr_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* new_sector_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:new_sector, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
end
# sectoral domestic (region=:domestic)
if options.compute_domestic_values
scc = sum(df .* cromar_mortality_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* agriculture_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* energy_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* slr_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
scc = sum(df .* new_sector_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :new_sector, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* cromar_mortality_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* agriculture_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* energy_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* slr_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* new_sector_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :new_sector, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta)][trialnum] = intermediate_ce_scc
end
end
end
end
end
# Internal function to compute the SCC in a Monte Carlo Simulation
function _compute_modified_scc_mcs(mm::MarginalModel,
n;
year::Int,
last_year::Int,
discount_rates,
certainty_equivalent::Bool,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
gas::Symbol,
save_list::Vector,
output_dir::String,
save_md::Bool,
save_cpc::Bool,
save_slr_damages::Bool,
compute_sectoral_values::Bool,
compute_domestic_values::Bool,
CIAM_foresight::Symbol,
CIAM_GDPcap::Bool,
post_mcs_creation_function,
pulse_size::Float64
)
models = [mm.base, mm.modified]
socioeconomics_module = MimiGIVE._get_module_name(mm.base, :Socioeconomic)
if socioeconomics_module == :MimiSSPs
socioeconomics_source = :SSP
elseif socioeconomics_module == :MimiRFFSPs
socioeconomics_source = :RFF
end
mcs = get_modified_mcs(n;
socioeconomics_source=socioeconomics_source,
mcs_years = MimiGIVE._model_years,
fair_parameter_set = fair_parameter_set,
fair_parameter_set_ids = fair_parameter_set_ids,
rffsp_sampling = rffsp_sampling,
rffsp_sampling_ids = rffsp_sampling_ids,
save_list = save_list
)
if post_mcs_creation_function!==nothing
post_mcs_creation_function(mcs)
end
regions = compute_domestic_values ? [:globe, :domestic] : [:globe]
sectors = compute_sectoral_values ? [:total, :cromar_mortality, :agriculture, :energy, :slr, :new_sector] : [:total]
scc_values = Dict((region=r, sector=s, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta) => Vector{Float64}(undef, n) for dr in discount_rates, r in regions, s in sectors)
intermediate_ce_scc_values = certainty_equivalent ? Dict((region=r, sector=s, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta) => Vector{Float64}(undef, n) for dr in discount_rates, r in regions, s in sectors) : nothing
md_values = save_md ? Dict((region=r, sector=s) => Array{Float64}(undef, n, length(MimiGIVE._damages_years)) for r in regions, s in sectors) : nothing
cpc_values = save_cpc ? Dict((region=r, sector=s) => Array{Float64}(undef, n, length(MimiGIVE._damages_years)) for r in [:globe], s in [:total]) : nothing # just global and total for now
if save_slr_damages
# global
slr_damages = Dict(
:base => Array{Float64}(undef, n, length(MimiGIVE._damages_years)),
:modified => Array{Float64}(undef, n, length(MimiGIVE._damages_years)),
:base_lim_cnt => Array{Float64}(undef, n, length(MimiGIVE._damages_years), 145), # 145 CIAM countries
:modified_lim_cnt => Array{Float64}(undef, n, length(MimiGIVE._damages_years), 145), # 145 CIAM countries
:base_segments_2100 => Array{Float64}(undef, n, 11835) # 11,835 segments
)
# domestic
# optionally add arrays to hold the domestic base and modified damages
if compute_domestic_values
slr_damages[:base_domestic] = Array{Float64}(undef, n, length(MimiGIVE._damages_years))
slr_damages[:modified_domestic] = Array{Float64}(undef, n, length(MimiGIVE._damages_years))
end
else
slr_damages = nothing
end
ciam_base, segment_fingerprints = MimiGIVE.get_ciam(mm.base)
ciam_modified, _ = MimiGIVE.get_ciam(mm.base)
ciam_base = Mimi.build(ciam_base)
ciam_modified = Mimi.build(ciam_modified)
# set some computation options
options = (
compute_sectoral_values=compute_sectoral_values,
compute_domestic_values=compute_domestic_values,
save_md=save_md,
save_cpc=save_cpc,
save_slr_damages=save_slr_damages,
CIAM_foresight=CIAM_foresight,
CIAM_GDPcap=CIAM_GDPcap,
certainty_equivalent=certainty_equivalent,
pulse_size=pulse_size
)
payload = [scc_values, intermediate_ce_scc_values, md_values, cpc_values, slr_damages, year, last_year, discount_rates, gas, ciam_base, ciam_modified, segment_fingerprints, options]
Mimi.set_payload2!(mcs, payload)
# Run all model years even if taking a shorter last_year - running unnecessary
# timesteps but simplifies accumulation
sim_results = run(mcs,
models,
n,
post_trial_func = modified_post_trial_func,
results_in_memory = false,
results_output_dir = "$output_dir/results"
)
# unpack the payload object
scc_values, intermediate_ce_scc_values, md_values, cpc_values, slr_damages, year, last_year, discount_rates, gas, ciam_base, ciam_modified, segment_fingerprints, options = Mimi.payload2(sim_results)
# Write out the slr damages to disk in the same place that variables from the save_list would be written out
if save_slr_damages
isdir("$output_dir/results/model_1") || mkpath("$output_dir/results/model_1")
isdir("$output_dir/results/model_2") || mkpath("$output_dir/results/model_2")
# global
df = DataFrame(slr_damages[:base], :auto) |>
i -> rename!(i, Symbol.(MimiGIVE._damages_years)) |>
i -> insertcols!(i, 1, :trial => 1:n) |>
i -> stack(i, Not(:trial)) |>
i -> rename!(i, [:trial, :time, :slr_damages]) |>
save("$output_dir/results/model_1/slr_damages.csv")
df = DataFrame(slr_damages[:modified], :auto) |>
i -> rename!(i, Symbol.(MimiGIVE._damages_years)) |>
i -> insertcols!(i, 1, :trial => 1:n) |>
i -> stack(i, Not(:trial)) |>
i -> rename!(i, [:trial, :time, :slr_damages]) |>
save("$output_dir/results/model_2/slr_damages.csv")
segments = Symbol.(dim_keys(ciam_base, :segments))
df = DataFrame(slr_damages[:base_segments_2100], :auto) |>
i -> rename!(i, segments) |>
i -> insertcols!(i, 1, :trial => 1:n) |>
i -> stack(i, Not(:trial)) |>
i -> rename!(i, [:trial, :segment, :slr_damages_2100]) |>
save("$output_dir/results/model_1/slr_damages_2100_by_segment.csv")
# domestic
if compute_domestic_values
df = DataFrame(slr_damages[:base_domestic], :auto) |>
i -> rename!(i, Symbol.(MimiGIVE._damages_years)) |>
i -> insertcols!(i, 1, :trial => 1:n) |>
i -> stack(i, Not(:trial)) |>
i -> rename!(i, [:trial, :time, :slr_damages_domestic]) |>
save("$output_dir/results/model_1/slr_damages_domestic.csv")
df = DataFrame(slr_damages[:modified_domestic], :auto) |>
i -> rename!(i, Symbol.(MimiGIVE._damages_years)) |>
i -> insertcols!(i, 1, :trial => 1:n) |>
i -> stack(i, Not(:trial)) |>
i -> rename!(i, [:trial, :time, :slr_damages_domestic]) |>
save("$output_dir/results/model_2/slr_damages_domestic.csv")
end
ciam_country_names = Symbol.(dim_keys(ciam_base, :ciam_country))
ciam_country_names = Symbol.(dim_keys(ciam_base, :ciam_country))
df = DataFrame(:trial => [], :time => [], :country => [], :capped_flag => [])
for trial in 1:n # loop over trials
trial_df = DataFrame(slr_damages[:base_lim_cnt][trial,:,:], :auto) |>
i -> rename!(i, ciam_country_names) |>
i -> insertcols!(i, 1, :time => MimiGIVE._damages_years) |>
i -> stack(i, Not(:time)) |>
i -> insertcols!(i, 1, :trial => fill(trial, length(MimiGIVE._damages_years) * 145)) |>
i -> rename!(i, [:trial, :time, :country, :capped_flag]) |>
i -> @filter(i, _.capped_flag == 1) |>
DataFrame
append!(df, trial_df)
end
df |> save("$output_dir/results/slr_damages_base_lim_counts.csv")
df = DataFrame(:trial => [], :time => [], :country => [], :capped_flag => [])
ciam_country_names = Symbol.(dim_keys(ciam_base, :ciam_country))
for trial in 1:n # loop over trials
trial_df = DataFrame(slr_damages[:modified_lim_cnt][trial,:,:], :auto) |>
i -> rename!(i, ciam_country_names) |>
i -> insertcols!(i, 1, :time => MimiGIVE._damages_years) |>
i -> stack(i, Not(:time)) |>
i -> insertcols!(i, 1, :trial => fill(trial, length(MimiGIVE._damages_years) * 145)) |>
i -> rename!(i, [:trial, :time, :country, :capped_flag]) |>
i -> @filter(i, _.capped_flag == 1) |>
DataFrame
append!(df, trial_df)
end
df |> save("$output_dir/results/slr_damages_modified_lim_counts.csv")
end
expected_mu_in_year_of_emission = Dict()
if certainty_equivalent
year_index = findfirst(isequal(year), MimiGIVE._damages_years)
# In this case the normalization from utils to $ hasn't happened in the post trial function
# and instead we now do this here, based on expected per capita consumption in the year
# of the marginal emission pulse
cpc_in_year_of_emission = view(cpc_values[(region=:globe, sector=:total)], :, year_index)
for k in keys(scc_values)
expected_mu_in_year_of_emission[k] = mean(1 ./ (cpc_in_year_of_emission .^ k.eta))
end
end
# Construct the returned result object
result = Dict()
# add an :scc dictionary, where key value pairs (k,v) are NamedTuples with keys(prtp, eta, region, sector) => values are 281 element vectors (2020:2300)
result[:scc] = Dict()
for (k,v) in scc_values
if certainty_equivalent
result[:scc][k] = (
expected_scc = mean(v),
se_expected_scc = std(v) / sqrt(n),
ce_scc = mean(intermediate_ce_scc_values[k]) ./ expected_mu_in_year_of_emission[k],
ce_sccs= intermediate_ce_scc_values[k] ./ expected_mu_in_year_of_emission[k],
sccs = v,
)
else
result[:scc][k] = (
expected_scc = mean(v),
se_expected_scc = std(v) / sqrt(n),
sccs = v
)
end
end
# add a :mds dictionary, where key value pairs (k,v) are NamedTuples with keys(region, sector) => values are (n x 281 (2020:2300)) matrices
if save_md
result[:mds] = Dict()
for (k,v) in md_values
result[:mds][k] = v
end
end
# add a :cpc dictionary, where key value pairs (k,v) are NamedTuples with keys(region, sector) => values are (n x 281 (2020:2300)) matrices
if save_cpc
result[:cpc] = Dict()
for (k,v) in cpc_values
result[:cpc][k] = v
end
end
return result
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 1739 | module MimiGIVE
using Mimi
# External Components
using MimiFAIRv1_6_2 # Climate module
using MimiSSPs # SSP socioeconomic projections and emissions
using MimiRFFSPs # RFF socioeconomic projections and emissions
using MimiBRICK # Sea Level Rise
using Mimi_NAS_pH # Ocean PH
using MimiCIAM # Sea Level Rise Damages
using MimiMooreEtAlAgricultureImpacts # Agriculture Damages
# Constants
# (10/25/2021) BEA Table 1.1.9, line 1 GDP annual values as linked here: https://apps.bea.gov/iTable/iTable.cfm?reqid=19&step=3&isuri=1&select_all_years=0&nipa_table_list=13&series=a&first_year=2005&last_year=2020&scale=-99&categories=survey&thetable=
const pricelevel_2010_to_2005 = 87.504/96.166
const pricelevel_2005_to_2020 = 113.648/87.504
const pricelevel_1995_to_2005 = 87.504/71.823
const pricelevel_2006_to_2005 = 87.504/90.204
const pricelevel_2011_to_2005 = 87.504/98.164
# Utilites
include("utils/utils.jl")
include("utils/lsl_downscaling.jl")
include("utils/ciam_params.jl")
# Local Helper Components
include("components/Agriculture_helper_components.jl")
include("components/PerCapitaGDP.jl")
include("components/VSL.jl")
include("components/DamageAggregator.jl")
include("components/netconsumption.jl")
include("components/identity.jl")
include("components/GlobalTempNorm.jl")
include("components/OceanHeatAccumulator.jl")
include("components/GlobalSLRNorm.jl")
include("components/Damages_RegionAggregatorSum.jl")
# Local Damage Components
include("components/energy_damages.jl")
include("components/cromar_mortality_damages.jl")
include("components/dice2016R2_damages.jl")
include("components/howard_sterner_damages.jl")
# Primary API
include("main_model.jl")
include("main_mcs.jl")
include("main_ciam.jl")
include("scc.jl")
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 9971 | using Mimi, CSVFiles, DataFrames, Query, StatsBase, XLSX, Interpolations, DelimitedFiles, Distributions
function get_ciam(m_give::Mimi.Model)
# --------------------------------------------------------------------------
# Model Parameters
# load the list of countries to use for CIAM, these are CIAM countries intersected
# with available CIAM components
ciam_countries = (load(joinpath(@__DIR__, "..", "data", "Dimension_ciam_countries.csv")) |> @select(:CountryISO) |> DataFrame |> Matrix)[:]
countries = Mimi.dim_keys(m_give, :country)
segments = Mimi.dim_keys(m_give, :segments)
# get segment fingerprint information
segment_fingerprints = load(joinpath(@__DIR__, "../data/CIAM/segment_fingerprints.csv")) |>
DataFrame |>
@filter(_.rgn in ciam_countries) |> # only the segments in the coastal countries we are using
DataFrame
segments != segment_fingerprints.segments && error("The segments in segment_fingerprints key need to match the segments in m_give.")
# time parameters
tstep = 10 # this is assumed within the slrcost component -- DO NOT CHANGE
period_length = 50
start_year = 2020
end_year = 2300
times = start_year:tstep:end_year
adaptPers = convert.(Int64, indexin(unique([start_year, start_year:period_length:end_year..., end_year]), times))
# --------------------------------------------------------------------------
# Model Construction
m = Model()
set_dimension!(m, :time, length(times))
set_dimension!(m, :ciam_country, ciam_countries) # countries used in CIAM that overlap with socioeconomics
set_dimension!(m, :segments, segments) # segments in the ciam_countries
set_dimension!(m, :adaptPers, length(adaptPers))
# Add CIAM components
add_comp!(m, MimiCIAM.slrcost)
# --------------------------------------------------------------------------
# The Rest of the Parameters
rgns, segs, ciam_params = get_ciam_params(;first = start_year, tstep = tstep, last = end_year,
adaptation_firsts = adaptPers, ciam_countries = ciam_countries,
xsc_params_path = joinpath(@__DIR__,"..","data","CIAM", "xsc_ciam_countries.csv")
)
# Check Dimensions
Mimi.dim_keys(m, :ciam_country) != rgns && error("The countries in xsc key need to match the segments in m_give.")
Mimi.dim_keys(m, :segments) != segs && error("The segments in xsc key need to match the segments in m_give.")
for (k,v) in ciam_params
# these are parameters we don't need to set, the correct one for the run
# is held in "surgeexposure"
if !(k in ["surgeexposure_dc-gtsr", "surgeexposure_gtsr"])
update_param!(m, :slrcost, Symbol(k), v)
end
end
# Set dummy variables
update_param!(m, :slrcost, :lslr, zeros(length(times), length(segments))) # local sea level rise in meters
update_param!(m, :slrcost, :pop, zeros(length(times), length(ciam_countries))) # population in millions
update_param!(m, :slrcost, :ypcc, zeros(length(times), length(ciam_countries))) # ypcc in USD in $2010/yr/person
update_param!(m, :slrcost, :vsl_ciam_country, zeros(length(times), length(ciam_countries)))
return m, segment_fingerprints
end
function update_ciam!(m, m_give, segment_fingerprints)
# time parameters
tstep = 10 # this is assumed within the slrcost component -- DO NOT CHANGE
start_year = 2020
end_year = 2300
normalization_year = 2000 # normalize sea level rise to 2000
times = start_year:tstep:end_year
# --------------------------------------------------------------------------
# Parameters from GIVE Model m_give
# Indices from GIVE Model m_give to slrcost
m_give_years = Mimi.dim_keys(m_give, :time)
time_idxs = convert.(Int64, indexin(start_year:tstep:end_year, m_give_years))
idx_2000 = findfirst(i -> i == normalization_year, m_give_years)
country_idxs = convert.(Int64, indexin(Mimi.dim_keys(m, :ciam_country), Mimi.dim_keys(m_give, :country)))
segments = Mimi.dim_keys(m, :segments)
# Downscale GMSL to LSL for CIAM segemnts
lslr = zeros(length(times), length(segments))
for i in 1:length(times)
lslr[i,:] = segment_fingerprints.fpGSIC_loc .* (m_give[:glaciers_small_icecaps, :gsic_sea_level][time_idxs][i] .- m_give[:glaciers_small_icecaps, :gsic_sea_level][idx_2000]) +
segment_fingerprints.fpGIS_loc .* (m_give[:greenland_icesheet, :greenland_sea_level][time_idxs][i] .- m_give[:greenland_icesheet, :greenland_sea_level][idx_2000])+
segment_fingerprints.fpAIS_loc .* (m_give[:antarctic_icesheet, :ais_sea_level][time_idxs][i] .- m_give[:antarctic_icesheet, :ais_sea_level][idx_2000])+
segment_fingerprints.fpTE_loc .* (m_give[:thermal_expansion, :te_sea_level][time_idxs][i] .- m_give[:thermal_expansion, :te_sea_level][idx_2000])+
segment_fingerprints.fpLWS_loc .* (m_give[:landwater_storage, :lws_sea_level][time_idxs][i] .- m_give[:landwater_storage, :lws_sea_level][idx_2000])
end
update_param!(m, :slrcost, :lslr, lslr) # local sea level rise in meters
# Socioeconomics
# (1) select the ciam countries from the full set of countries
# (2) convert GDP and Population into Per Capita GDP (2010\$ USD per year)
# per capita) for the slrcost component
# (3) get the VSL for each CIAM country
population = m_give[:Socioeconomic, :population][time_idxs,country_idxs] # millions
gdp = m_give[:Socioeconomic, :gdp][time_idxs,country_idxs] .* 1/pricelevel_2010_to_2005 # billion US $2005/yr -> billion US $2010/yr
ypcc = gdp ./ population .* 1000 # USD $2010/yr/person
update_param!(m, :slrcost, :pop, population) # population in millions
update_param!(m, :slrcost, :ypcc, ypcc) # ypcc in USD in $2010/yr/person
# Calculate the VSL
# CIAM slrcost component expects vsl in millions of US $2010 dollars
vsl_ciam_country = m_give[:VSL, :vsl][time_idxs,country_idxs] .* 1/pricelevel_2010_to_2005 ./ 1e6 # US $2005/yr -> millions of US $2010/yr
update_param!(m, :slrcost, :vsl_ciam_country, vsl_ciam_country)
end
function compute_PerfectForesight_OptimalCosts_typestable(protect_cost, retreat_cost, no_adapt_cost, ntsteps, nsegments)
# These are the decision options, each is a permutation of choice and level,
# that we will allow. Note we ignore ProtectCost0 and RetreatCost0, with idxs
# 4 and 5 respectively, because we use allowMaintain = false.
decision_options = [(label = :ProtectCost10, choice = :ProtectCost, level = 10, idx = 1),
(label = :ProtectCost100, choice = :ProtectCost, level = 100, idx = 2),
(label = :ProtectCost1000, choice = :ProtectCost, level = 1000, idx = 3),
(label = :ProtectCost10000, choice = :ProtectCost, level = 10000, idx = 4),
(label = :RetreatCost1, choice = :RetreatCost, level = 1, idx = 1),
(label = :RetreatCost10, choice = :RetreatCost, level = 10, idx = 2),
(label = :RetreatCost100, choice = :RetreatCost, level = 100, idx = 3),
(label = :RetreatCost1000, choice = :RetreatCost, level = 1000, idx = 4),
(label = :RetreatCost10000, choice = :RetreatCost, level = 10000, idx = 5),
(label = :NoAdaptCost0, choice = :NoAdaptCost, level = 1, idx = 1)
]
noptions = length(decision_options)
# this will hold the optimal costs for each segment after considering Perfect Foresight
optimal_costs = Array{Float64}(undef, ntsteps, nsegments)
# Preallocate this array and reuse for each segment
npv = Vector{Float64}(undef, noptions)
# Precompute discount factor
df = [(1.04)^((1-t)*10) for t in 1:ntsteps]
# loop over segments finding the optimal decision for each in light of perfect foresight
# NPV and filling in the optimal costs with the undiscounted costs for that decision
for segment in 1:nsegments
npv[1:4] .= (sum(protect_cost[t,segment,level] * df[t] * 10 for t in 1:ntsteps) for level in 1:4) # remove the Maintain level (allowMaintain = false for these runs) which is index 5
npv[5:9] .= (sum(retreat_cost[t,segment,level] * df[t] * 10 for t in 1:ntsteps) for level in 1:5) # remove the Maintain level (allowMaintain = false for these runs) which is index 6
npv[10] = sum(no_adapt_cost[t,segment] * df[t] * 10 for t in 1:ntsteps)
optimal_decision = decision_options[findmin(npv)[2]]
if optimal_decision.choice == :ProtectCost
optimal_costs[:, segment] .= view(protect_cost, :, segment, optimal_decision.idx)
elseif optimal_decision.choice == :RetreatCost
optimal_costs[:, segment] .= view(retreat_cost, :, segment, optimal_decision.idx)
elseif optimal_decision.choice == :NoAdaptCost
optimal_costs[:, segment] .= view(no_adapt_cost, :, segment, optimal_decision.idx)
else
error("Unknown option.")
end
end
return optimal_costs
end
# NPV foresight correction
# This correction accounts for the fact that the new version of CIAM considers NPV
# over the current adaptation period (50 years), whereas the previous
# GAMS version assumes NPV is known across the entire model time horizon (2000-2100,
# for example).
function compute_PerfectForesight_OptimalCosts(m::Mimi.ModelInstance)
ntsteps = length(Mimi.dim_keys(m, :time))
nsegments = length(Mimi.dim_keys(m, :segments))
optimal_costs = compute_PerfectForesight_OptimalCosts_typestable(
m[:slrcost, :ProtectCost],
m[:slrcost, :RetreatCost],
m[:slrcost, :NoAdaptCost],
ntsteps,
nsegments,
)
return optimal_costs
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 25818 | using Distributions, Dates, Mimi, CSVFiles, DataFrames, MimiMooreEtAlAgricultureImpacts, StatsBase
import Mimi: SampleStore, add_RV!, add_transform!, add_save!
"""
get_mcs(trials;
socioeconomics_source::Symbol = :RFF,
mcs_years = 1750:2300,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
save_list::Vector = [],
Agriculture_gtap::String = "midDF"
)
Return a Monte Carlo Simulation definition of type Mimi.SimulationDefinition that
holds all random variables and distributions, as assigned to model component/parameter
pairs, that will be used in a Monte Carlo Simulation.
- `trials` (required) - number of trials to be run, used for presampling
- `socioeconomics_source` (default :RFF) - which source the Socioeconomics component uses
- `fair_parameter_set` (default :random) - :random means FAIR mcs samples will be
chosen randomly from the provided sets, while :deterministic means they will
be based on the provided vector of to `fair_parameter_set_ids` keyword argument.
- `fair_parameter_set_ids` (default nothing) - if `fair_parameter_set` is set
to :deterministic, this `n` element vector provides the fair parameter set ids
that will be run, otherwise it is set to `nothing` and ignored.
- `rffsp_sampling` (default :random) - which sampling strategy to use for the RFF
SPs, :random means RFF SPs will be chosen randomly, while :deterministic means they
will be based on the provided vector of to `rffsp_sampling_ids` keyword argument.
- `rffsp_sampling_ids` (default nothing) - if `rffsp_sampling` is set to :deterministic,
this `n` element vector provides the RFF SP ids that will be run, otherwise it is
set to `nothing` and ignored.
- `save_list` (default []) - which parameters and varaibles to save for each trial,
entered as a vector of Tuples (:component_name, :variable_name)
- Agriculture_gtap (default midDF) - specify the `Agriculture_gtap` input parameter as one of
`["AgMIP_AllDF", "AgMIP_NoNDF", "highDF", "lowDF", "midDF"]`, indicating which
gtap damage function the component should use.
"""
function get_mcs(trials;
socioeconomics_source::Symbol = :RFF,
mcs_years = 1750:2300,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
save_list::Vector = [],
Agriculture_gtap::String = "midDF"
)
# check some argument conditions
if fair_parameter_set == :deterministic
isnothing(fair_parameter_set_ids) && error("If `fair_parameter_set` is :determinsitic, must provide a `fair_parameter_set_ids` vector.")
length(fair_parameter_set_ids) !== trials && error("The length of the provided `fair_parameter_set_ids` vector must be equal to the number of trials ($trials) run.")
sum(fair_parameter_set_ids .> 2237) > 0. || sum(fair_parameter_set_ids .< 1) > 0. && error("FAIR parameter set ids must be between 1 and 2237, inclusive.")
end
if rffsp_sampling == :deterministic
isnothing(rffsp_sampling_ids) && error("If `rffsp_sampling` is :determinsitic, must provide a `rffsp_sampling_ids` vector.")
length(rffsp_sampling_ids) !== trials && error("The length of the provided `rffsp_sampling_ids` vector must be equal to the number of trials ($trials) run.")
sum(rffsp_sampling_ids .> 10_000) > 0. || sum(rffsp_sampling_ids .< 1) > 0. && error("RFF SP sample ids must be between 1 and 10,000, inclusive.")
end
# define the Monte Carlo Simulation and add some simple random variables
mcs = @defsim begin
dice2016R2_damage.a2 = Normal(0.00236, 0.00236/2) # Nordhaus (2017, PNAS) DICE2016 RV
end
# Howard and Sterner (2017) Damage specification table 2 column 3
hs_μ_3 = [ 0.595382733860703; 0.259851128136597]
hs_σ_3 = [ 0.0322523508274087 -0.0373892045213768
-0.0373892045213768 0.063496518648112]
hs_distribution_3 = MvNormal(hs_μ_3, hs_σ_3)
hs_coefficients_3 = rand(hs_distribution_3, trials)
add_RV!(mcs, :rv_hs_damage_t2_base_3, SampleStore(hs_coefficients_3[1,:]))
add_transform!(mcs, :hs_damage, :t2_base_3, :(=), :rv_hs_damage_t2_base_3)
add_RV!(mcs, :rv_hs_damage_t2_cat_3, SampleStore(hs_coefficients_3[2,:]))
add_transform!(mcs, :hs_damage, :t2_cat_3, :(=), :rv_hs_damage_t2_cat_3)
# Howard and Sterner (2017) Damage specification table 2 column 4
hs_μ_4 = [ 0.595382733860703; 0.259851128136597; 0.113324887895228]
hs_σ_4 = [ 0.0362838946808348 -0.0420628550865489 0.
-0.0420628550865489 0.0714335834791260 0.
0. 0. 0.0157459807497214]
hs_distribution_4 = MvNormal(hs_μ_4, hs_σ_4)
hs_coefficients_4 = rand(hs_distribution_4, trials)
add_RV!(mcs, :rv_hs_damage_t2_base_4, SampleStore(hs_coefficients_4[1,:]))
add_transform!(mcs, :hs_damage, :t2_base_4, :(=), :rv_hs_damage_t2_base_4)
add_RV!(mcs, :rv_hs_damage_t2_cat_4, SampleStore(hs_coefficients_4[2,:]))
add_transform!(mcs, :hs_damage, :t2_cat_4, :(=), :rv_hs_damage_t2_cat_4)
add_RV!(mcs, :rv_hs_damage_t2_prod_4, SampleStore(hs_coefficients_4[3,:]))
add_transform!(mcs, :hs_damage, :t2_prod_4, :(=), :rv_hs_damage_t2_prod_4)
# Howard and Sterner (2017) Damage specification table 2 column 7
hs_μ_7 = [ 0.318149737017145; 0.362274271711041]
hs_σ_7 = [ 0.00953254601993184 -0.00956576259414058
-0.00956576259414058 0.00970956896549987]
hs_distribution_7 = MvNormal(hs_μ_7, hs_σ_7)
hs_coefficients_7 = rand(hs_distribution_7, trials)
add_RV!(mcs, :rv_hs_damage_t2_base_7, SampleStore(hs_coefficients_7[1,:]))
add_transform!(mcs, :hs_damage, :t2_base_7, :(=), :rv_hs_damage_t2_base_7)
add_RV!(mcs, :rv_hs_damage_t2_cat_7, SampleStore(hs_coefficients_7[2,:]))
add_transform!(mcs, :hs_damage, :t2_cat_7, :(=), :rv_hs_damage_t2_cat_7)
# Howard and Sterner (2017) Damage specification table 2 column 8
hs_μ_8 = [ 0.318149737017145; 0.362274271711041; 0.398230480262918]
hs_σ_8 = [ 0.0104404075456397 -0.0104767876031064 0.
-0.0104767876031064 0.010634289819357 0.
0. 0. 0.0563560833680617]
hs_distribution_8 = MvNormal(hs_μ_8, hs_σ_8)
hs_coefficients_8 = rand(hs_distribution_8, trials)
add_RV!(mcs, :rv_hs_damage_t2_base_8, SampleStore(hs_coefficients_8[1,:]))
add_transform!(mcs, :hs_damage, :t2_base_8, :(=), :rv_hs_damage_t2_base_8)
add_RV!(mcs, :rv_hs_damage_t2_cat_8, SampleStore(hs_coefficients_8[2,:]))
add_transform!(mcs, :hs_damage, :t2_cat_8, :(=), :rv_hs_damage_t2_cat_8)
add_RV!(mcs, :rv_hs_damage_t2_prod_8, SampleStore(hs_coefficients_8[3,:]))
add_transform!(mcs, :hs_damage, :t2_prod_8, :(=), :rv_hs_damage_t2_prod_8)
# add the socioeconomics RV if the socioeconomics source is Mimi RFF SPs
# Use SampleStore for a deterministic RFF SP sampling approach, otherwise
# use an EmpiricalDistribution across all ids (equal probability is assumed
# if probabilities not provided)
if socioeconomics_source == :RFF
distrib = rffsp_sampling == :random ? EmpiricalDistribution(collect(1:10_000)) : SampleStore(rffsp_sampling_ids)
add_RV!(mcs, :socio_id_rv, distrib)
add_transform!(mcs, :Socioeconomic, :id, :(=), :socio_id_rv)
end
#add BRICK random variable - assign one Normally distributed RV per year
for year in mcs_years
rv_name = Symbol("rv_landwater_storage_$year")
add_RV!(mcs, rv_name, Normal(0.0003, 0.00018))
add_transform!(mcs, :landwater_storage, :lws_random_sample, :(=), rv_name, [year])
end
BRICK_parameters = load(joinpath(@__DIR__, "..", "data", "BRICK_posterior_parameters_10k.csv")) |> DataFrame
BRICK_uncertain_parameters = [
(source_name=:thermal_s0, comp_name=:thermal_expansion, param_name=:te_s₀),
(source_name=:thermal_alpha, comp_name=:thermal_expansion, param_name=:te_α),
(source_name=:glaciers_v0, comp_name=:glaciers_small_icecaps, param_name=:gsic_v₀),
(source_name=:glaciers_s0, comp_name=:glaciers_small_icecaps, param_name=:gsic_s₀),
(source_name=:glaciers_beta0, comp_name=:glaciers_small_icecaps, param_name=:gsic_β₀),
(source_name=:glaciers_n, comp_name=:glaciers_small_icecaps, param_name=:gsic_n),
(source_name=:greenland_v0, comp_name=:greenland_icesheet, param_name=:greenland_v₀),
(source_name=:greenland_a, comp_name=:greenland_icesheet, param_name=:greenland_a),
(source_name=:greenland_b, comp_name=:greenland_icesheet, param_name=:greenland_b),
(source_name=:greenland_alpha, comp_name=:greenland_icesheet, param_name=:greenland_α),
(source_name=:greenland_beta, comp_name=:greenland_icesheet, param_name=:greenland_β),
(source_name=:anto_alpha, comp_name=:antarctic_ocean, param_name=:anto_α),
(source_name=:anto_beta, comp_name=:antarctic_ocean, param_name=:anto_β),
(source_name=:ais_gamma, comp_name=:antarctic_icesheet, param_name=:ais_γ),
(source_name=:ais_alpha, comp_name=:antarctic_icesheet, param_name=:ais_α),
(source_name=:ais_mu, comp_name=:antarctic_icesheet, param_name=:ais_μ),
(source_name=:ais_v, comp_name=:antarctic_icesheet, param_name=:ais_ν),
(source_name=:ais_precip0, comp_name=:antarctic_icesheet, param_name=:ais_precipitation₀),
(source_name=:ais_kappa, comp_name=:antarctic_icesheet, param_name=:ais_κ),
(source_name=:ais_flow0, comp_name=:antarctic_icesheet, param_name=:ais_iceflow₀),
(source_name=:ais_runoff_height0, comp_name=:antarctic_icesheet, param_name=:ais_runoffline_snowheight₀),
(source_name=:ais_c, comp_name=:antarctic_icesheet, param_name=:ais_c),
(source_name=:ais_bedheight0, comp_name=:antarctic_icesheet, param_name=:ais_bedheight₀),
(source_name=:ais_slope, comp_name=:antarctic_icesheet, param_name=:ais_slope),
(source_name=:ais_lambda, comp_name=:antarctic_icesheet, param_name=:λ),
(source_name=:ais_temp_threshold, comp_name=:antarctic_icesheet, param_name=:temperature_threshold),
(source_name=:antarctic_s0, comp_name=:antarctic_icesheet, param_name=:ais_sea_level₀), # DOUBLE CHECK
]
for p in BRICK_uncertain_parameters
rv_name = Symbol("rv_brick_$(p.source_name)")
add_RV!(mcs, rv_name, SampleStore(BRICK_parameters[:,p.source_name]))
add_transform!(mcs, p.comp_name, p.param_name, :(=), rv_name)
end
# add Agriculture mcs over gtap region damage function parameterizations
ag_sample_stores = MimiMooreEtAlAgricultureImpacts.get_probdists_gtap_df(Agriculture_gtap, trials)
# If ag sample stores are available for a given Agriculture_gtap damage function
# then ag_sample_stores will be a Vector, and otherwise will return a
# warning and `nothing`.
if !isnothing(ag_sample_stores)
for coef in [1,2,3] # three coefficients defined with an anonymous dimension
for (i, region) in enumerate(["USA","CAN","WEU","JPK","ANZ","EEU","FSU","MDE","CAM","LAM","SAS","SEA","CHI","MAF","SSA","SIS"]) # fund regions for ag
rv_name = Symbol("rv_gtap_coef$(coef)_$region")
add_RV!(mcs, rv_name, ag_sample_stores[i, coef])
add_transform!(mcs, :Agriculture, :gtap_df, :(=), rv_name, [region, coef])
end
end
end
# add Cromar uncertainty based on coefficients from Cromar et al.
cromar_coeffs = load(joinpath(@__DIR__, "..", "data", "CromarMortality_damages_coefficients.csv")) |> DataFrame
cromar_mapping_raw = load(joinpath(@__DIR__, "..", "data", "Mapping_countries_to_cromar_mortality_regions.csv")) |> DataFrame
# Get one random variable per region
for (i, region) in enumerate(cromar_coeffs[!, "Cromar Region Name"])
rv_name = Symbol("rv_β_mortality_$(region)")
add_RV!(mcs, rv_name, Normal(cromar_coeffs[i, "Pooled Beta"], cromar_coeffs[i, "Pooled SE"]))
end
# add one transform per country asigning each to the appropriate regional random variable
for row in 1:size(cromar_mapping_raw, 1)
rv_name = Symbol("rv_β_mortality_$(cromar_mapping_raw.cromar_region[row])")
add_transform!(mcs, :CromarMortality, :β_mortality, :(=), rv_name, [cromar_mapping_raw.ISO3[row]])
end
# add the FAIR random variables and transforms - note this could be done within
# the @defsim macro but we use the dictionary to make this less verbose
# Note that if a parameter component is not included in add_transform!, the
# parameters are shared model parameters, and each line will create ONE random
# variable and assign all component parameters connected to that shared model
# parameter to the value taken on by that random variable
fair_samples_map, fair_samples = get_fair_mcs_params(trials; fair_parameter_set=fair_parameter_set, fair_parameter_set_ids=fair_parameter_set_ids)
fair_samples_left = deepcopy(fair_samples) # we know we've added everything when this is empty!
# add and assign all random variables for single dimensional parameters
for (k,v) in fair_samples_left
if size(v, 2) == 1 # one column of values
rv_name = Symbol("rv_$k")
add_RV!(mcs, rv_name, SampleStore(fair_samples[k][!, 1]))
add_transform!(mcs, k, :(=), rv_name)
delete!(fair_samples_left, k)
end
end
# assign one random variable per year with a unique distribution from fair_samples
# assume F_solar parameter set defines value starting in 1750 with 361 years total
for year in 1750:2110
rv_name = Symbol("rv_F_solar_$year")
add_RV!(mcs, rv_name, SampleStore(fair_samples[:F_solar][!,string(year)]))
add_transform!(mcs, :F_solar, :(=), rv_name, [year])
end
delete!(fair_samples_left, :F_solar)
# Radiative forcing scaling - one distribution per "other" greenhouse gas, and
# one per ods
for gas in names(fair_samples[:scale_other_ghg])
rv_name = Symbol("rv_scale_other_ghg_$(gas)")
add_RV!(mcs, rv_name, SampleStore(fair_samples[:scale_other_ghg][!, gas]))
add_transform!(mcs, :scale_other_ghg, :(=), rv_name, [gas])
end
delete!(fair_samples_left, :scale_other_ghg)
for ods in names(fair_samples[:scale_ods])
rv_name = Symbol("rv_scale_ods_$(ods)")
add_RV!(mcs, rv_name, SampleStore(fair_samples[:scale_ods][!, ods]))
add_transform!(mcs, :scale_ods, :(=), rv_name, [ods])
end
delete!(fair_samples_left, :scale_ods)
# ocean_heat_capacity takes an anonymous dim of 2 (deep and mixed, should label
# explicilty) - anonymouse dims are named with Ints 1 and 2
rv_name = Symbol("rv_ocean_heat_capacity_1")
add_RV!(mcs, rv_name, SampleStore(fair_samples[:ocean_heat_capacity][!, "1"]))
add_transform!(mcs, :ocean_heat_capacity, :(=), rv_name, [1])
rv_name = Symbol("rv_ocean_heat_capacity_2")
add_RV!(mcs, rv_name, SampleStore(fair_samples[:ocean_heat_capacity][!, "2"]))
add_transform!(mcs, :ocean_heat_capacity, :(=), rv_name, [2])
delete!(fair_samples_left, :ocean_heat_capacity)
# check if we've added all FAIR parameters
isempty(fair_samples_left) ? nothing : error("The following FAIR mcs uncertain parameters has not been added to the simulation: $(keys(fair_samples_left))")
# add the requested saved variables
for i in save_list
add_save!(mcs, i)
end
return mcs
end
"""
run_mcs(;trials::Int64 = 10000,
output_dir::Union{String, Nothing} = nothing,
save_trials::Bool = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
m::Mimi.Model = get_model(),
save_list::Vector = [],
results_in_memory::Bool = true,
)
Return the results of a Monte Carlo Simulation with the defined number of trials
and save data into the `output_dir` folder, optionally also saving trials if
`save_trials` is set to `true.` If no model is provided, use the default model
returned by get_model().
- `trials` (default 10,000) - number of trials to be run, used for presampling
- `output_dir` (default constructed folder name) - folder to hold results
- `save_trials` (default false) - whether to save all random variables for all trials to trials.csv
- `fair_parameter_set` (default :random) - :random means FAIR mcs samples will be
chosen randomly from the provided sets, while :deterministic means they will
be based on the provided vector of to `fair_parameter_set_ids` keyword argument.
- `fair_parameter_set_ids` - (default nothing) - if `fair_parameter_set` is set
to :deterministic, this `n` element vector provides the fair parameter set ids
that will be run, otherwise it is set to `nothing` and ignored.
- `rffsp_sampling` (default :random) - which sampling strategy to use for the RFF
SPs, :random means RFF SPs will be chosen randomly, while :deterministic means they
will be based on the provided vector of to `rffsp_sampling_ids` keyword argument.
- `rffsp_sampling_ids` - (default nothing) - if `rffsp_sampling` is set to :deterministic,
this `n` element vector provides the RFF SP ids that will be run, otherwise it is
set to `nothing` and ignored.
- `m` (default get_model()) - the model to run the simulation for
- `save_list` (default []) - which parameters and variables to save for each trial,
entered as a vector of Tuples (:component_name, :variable_name)
- `results_in_memory` (default true) - this should be turned off if you are running
into memory problems, data will be streamed out to disk but not saved in memory
to the mcs object
"""
function run_mcs(;trials::Int64 = 10000,
output_dir::Union{String, Nothing} = nothing,
save_trials::Bool = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
m::Mimi.Model = get_model(),
save_list::Vector = [],
results_in_memory::Bool = true,
)
m = deepcopy(m) # in the case that an `m` was provided, be careful that we don't modify the original
trials < 2 && error("Must run `run_mcs` function with a `trials` argument greater than 1 due to a Mimi specification about SampleStores. TO BE FIXED SOON!")
# Set up output directories
output_dir = output_dir === nothing ? joinpath(@__DIR__, "../output/mcs/", "MCS $(Dates.format(now(), "yyyy-mm-dd HH-MM-SS")) MC$trials") : output_dir
isdir("$output_dir/results") || mkpath("$output_dir/results")
trials_output_filename = save_trials ? joinpath("$output_dir/trials.csv") : nothing
socioeconomics_module = _get_module_name(m, :Socioeconomic)
if socioeconomics_module == :MimiSSPs
socioeconomics_source = :SSP
elseif socioeconomics_module == :MimiRFFSPs
socioeconomics_source = :RFF
end
Agriculture_gtap = _get_mooreag_gtap(m)
# Get an instance of the mcs
mcs = get_mcs(trials;
socioeconomics_source = socioeconomics_source,
mcs_years = Mimi.time_labels(m),
fair_parameter_set = fair_parameter_set,
fair_parameter_set_ids = fair_parameter_set_ids,
rffsp_sampling = rffsp_sampling,
rffsp_sampling_ids = rffsp_sampling_ids,
save_list = save_list,
Agriculture_gtap = Agriculture_gtap
)
# run monte carlo trials
results = run(mcs,
m,
trials;
trials_output_filename = trials_output_filename,
results_output_dir = "$output_dir/results",
results_in_memory = results_in_memory
)
return results
end
"""
get_fair_mcs_params(n::Int;
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Nothing, Vector{Int}}
)
Return the FAIR mcs parameters mapped from parameter name to string name, and a dictionary
using the parameter names as keys and a DataFrame holding the values as a value.
If fair_parameter_set is :random (default), then FAIR mcs samples will be chosen
randomly from the provided sets. If it set to :deterministic they will be the vector
provided by fair_parameter_set_ids.
"""
function get_fair_mcs_params(n::Int;
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Nothing, Vector{Int}}
)
# check some argument conditions
if fair_parameter_set == :deterministic
isnothing(fair_parameter_set_ids) && error("If `fair_parameter_set` is :determinsitic, must provide a `fair_parameter_set_ids` vector.")
length(fair_parameter_set_ids) !== n && error("The length of the provided `fair_parameter_set_ids` vector must be equal to the number of trials ($n) run.")
sum(fair_parameter_set_ids .> 2237) .> 0 || sum(fair_parameter_set_ids .< 1) > 0. && error("FAIR parameter set ids must be between 1 and 2237, inclusive.")
end
names_map = get_fair_mcs_params_map()
params_dict = Dict()
if fair_parameter_set == :deterministic
samples = fair_parameter_set_ids
elseif fair_parameter_set == :random
samples = sample(collect(1:2237), n, replace=true) # randomly sample n sample sets of 2237 options, with replacement
end
for (k,v) in names_map
values = load(joinpath(@__DIR__, "..", "data", "FAIR_mcs", "fair_mcs_params_$v.csv")) |> DataFrame # load the deterministic set of 2237 parameters
push!(params_dict, k => values[samples,:])
end
return names_map, params_dict
end
"""
get_fair_mcs_params_map()
Return a dictionary of FAIR elements with the FAIR v1.6.2 parameter name being
the component, parameter pair and the value being the parameter csv name.
"""
function get_fair_mcs_params_map()
return Dict(
:β_CO => "b_aero_CO",
:scale_CH₄ => "scale_CH4",
:F_solar => "F_solar",
:Ψ_CH₄ => "b_tro3_CH4",
:scale_N₂O => "scale_N2O",
:CO₂_pi => "C_pi",
:deep_ocean_efficacy => "deep_ocean_efficacy",
:scale_bcsnow => "scale_bcsnow",
:scale_aerosol_direct_OC => "scale_aerosol_direct_OC",
:b_SOx => "ghan_params_SOx",
:feedback => "ozone_feedback",
:scale_O₃ => "scale_O3",
:b_POM => "ghan_params_b_POM",
:r0_co2 => "r0",
:β_NH3 => "b_aero_NH3",
:lambda_global => "lambda_global",
:scale_landuse => "scale_landuse",
:scale_volcanic => "scale_volcanic",
:scale_aerosol_direct_SOx => "scale_aerosol_direct_SOx",
:β_NOx => "b_aero_NOx",
:Ψ_N₂O => "b_tro3_N2O",
:ocean_heat_capacity => "ocean_heat_capacity",
:β_OC => "b_aero_OC",
:scale_solar => "scale_solar",
:rC_co2 => "rc",
:scale_aerosol_direct_BC => "scale_aerosol_direct_BC",
:scale_CH₄_H₂O => "scale_CH4_H2O",
:scale_aerosol_indirect => "scale_aerosol_indirect",
:scale_ods => "scale_ods",
:Ψ_CO => "b_tro3_CO",
:scale_aerosol_direct_NOx_NH3 => "scale_aerosol_direct_NOx_NH3",
:scale_other_ghg => "scale_other_ghg",
:Ψ_NMVOC => "b_tro3_NMVOC",
:F2x => "F2x",
:β_SOx => "b_aero_SOx",
:β_NMVOC => "b_aero_NMVOC",
:rT_co2 => "rt",
:β_BC => "b_aero_BC",
:scale_CO₂ => "scale_CO2",
:Ψ_ODS => "b_tro3_ODS",
:scale_aerosol_direct_CO_NMVOC => "scale_aerosol_direct_CO_NMVOC",
:Ψ_NOx => "b_tro3_NOx",
:ocean_heat_exchange => "ocean_heat_exchange",
:ϕ => "ghan_params_Pi"
)
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 41465 | using Mimi, CSVFiles, DataFrames, Query, StatsBase, XLSX, Interpolations, DelimitedFiles, Distributions
"""
get_model(; Agriculture_gtap::String = "midDF",
socioeconomics_source::Symbol = :RFF,
SSP_scenario::Union{Nothing, String} = nothing,
RFFSPsample::Union{Nothing, Int} = nothing,
Agriculture_floor_on_damages::Bool = true,
Agriculture_ceiling_on_benefits::Bool = false,
vsl::Symbol= :epa
)
Get a GIVE Model with the given argument Settings
-- Socioeconomic --
- socioeconomics_source (default :RFF) - The options are :RFF, which uses data from
the RFF socioeconomic projections, or :SSP, which uses data from one of the
Shared Socioeconomic Pathways
- SSP_scenario (default to nothing) - This setting is used only if one is using
the SSPs as the socioeconomics_source, and the current options are "SSP119",
"SSP126", "SSP245", "SSP370", "SSP585", and this will be used as follows.
See the SSPs component here: https://github.com/anthofflab/MimiSSPs.jl for more information.
(1) Select the population and GDP trajectories for 2020 through 2300, mapping
each RCMIP scenario to the SSP (SSP1, 2, 3, 5 respectively)
(2) Choose the ar6 scenario for data from 1750 - 2019 and the RCMIP emissions
scenario from the MimiSSPs component to pull Leach et al. RCMIP scenario
data for 2020 to 2300 for CO2, CH4, and N2O.
(NOTE) that if the socioeconomics_source is :RFF this will not be consequential
and ssp245 will be used for the ar6 data from 1750 - 2019 and trace gases
from 2020 onwards, while emissions for CO2, CH4, and N2O will come from
the MimiRFFSPs component.
- RFFSPsample (default to nothing, which will pull the in MimiRFFSPs) - choose
the sample for which to run the RFF SP. See the RFFSPs component here:
https://github.com/rffscghg/MimiRFFSPs.jl.
-- Agriculture --
- Agriculture_gtap (default midDF) - specify the `Agriculture_gtap` input parameter as one of
`["AgMIP_AllDF", "AgMIP_NoNDF", "highDF", "lowDF", "midDF"]`, indicating which
gtap damage function the component should use.
- Agriculture_floor_on_damages (default true) - If `Agriculture_gtap_floor_on_damages` = true, then
the agricultural damages (negative values of the `agcost` variable) in each
timestep will not be allowed to exceed 100% of the size of the agricultural
sector in each region.
- Agriculture_ceiling_on_benefits (default false) - If `Agriculture_gtap_ceiling_on_benefits` = true,
then the agricultural benefits (positive values of the `agcost` variable) in
each timestep will not be allowed to exceed 100% of the size of the agricultural
sector in each region.
-- Other --
- vsl (default :epa) - specify the soruce of the value of statistical life (VSL) being used in the model
"""
function get_model(; Agriculture_gtap::String = "midDF",
socioeconomics_source::Symbol = :RFF,
SSP_scenario::Union{Nothing, String} = nothing,
RFFSPsample::Union{Nothing, Int} = nothing,
Agriculture_floor_on_damages::Bool = true,
Agriculture_ceiling_on_benefits::Bool = false,
vsl::Symbol= :epa
)
# --------------------------------------------------------------------------
# MODEL - Check Arguments
# --------------------------------------------------------------------------
if socioeconomics_source == :SSP && isnothing(SSP_scenario)
error("The socioeconomics_source argument :SSP requires setting a SSP_scenario")
end
if socioeconomics_source == :RFF && !isnothing(SSP_scenario)
@warn("You have set a SSP_scenario to a non-nothing value, but note that setting the socioeconomics_source argument to :RFF means that this will have no effect on the model.")
end
# Restrictions on arguments
socioeconomics_source_options = [:SSP, :RFF]
socioeconomics_source in socioeconomics_source_options ? nothing : error("The socioeconomics_source must be one of $(socioeconomics_source_options)")
Agriculture_gtap in MimiMooreEtAlAgricultureImpacts.gtaps ? nothing : error("Unknown GTAP dataframe specification: \"$Agriculture_gtap\". Must be one of the following: $(MimiMooreEtAlAgricultureImpacts.gtaps)")
SSP_scenario_options = [nothing, "SSP119", "SSP126", "SSP245", "SSP370", "SSP585"]
SSP_scenario in SSP_scenario_options ? nothing : error("The SSP_scenario must be one of $(SSP_scenario_options)")
# --------------------------------------------------------------------------
# MODEL - Model Data and Settings
# --------------------------------------------------------------------------
# dimensions and countries/regions lists
countries = (load(joinpath(@__DIR__, "..", "data", "Dimension_countries.csv")) |> @select(:CountryISO) |> DataFrame |> Matrix)[:]
ciam_countries = (load(joinpath(@__DIR__, "..", "data", "Dimension_ciam_countries.csv")) |> @select(:CountryISO) |> DataFrame |> Matrix)[:]
fund_regions = (load(joinpath(@__DIR__, "..", "data", "Dimension_fund_regions.csv")) |> @select(:fund_region) |> DataFrame |> Matrix)[:]
gcam_regions = (load(joinpath(@__DIR__, "..", "data", "Dimension_gcam_energy_regions.csv")) |> @select(:gcam_energy_region) |> DataFrame |> Matrix)[:] # not currently a dimension in model
cromar_regions = (load(joinpath(@__DIR__, "..", "data", "Dimension_cromar_mortality_regions.csv")) |> @select(:cromar_mortality_region) |> DataFrame |> Matrix)[:] # not currently a dimension in model
domestic_countries = ["USA", "PRI"] # Country ISO3 codes to be accumulated for domestic
# Create country-region (FUND derived) mapping for Agriculture damage function
ag_mapping = load(joinpath(@__DIR__, "..", "data", "Mapping_countries_to_fund_regions.csv")) |> DataFrame
ag_mapping.ISO3 != countries && error("FUND mapping file ISO3 column must match model countries vector exactly.")
sort(unique(ag_mapping.fundregion)) != sort(fund_regions) && error("FUND mapping file fund_regions column must match model fund_regions vector exactly (when both are sorted).")
ag_mapping = ag_mapping.fundregion
# Create country-region mapping for GCAM energy damage function.
energy_mapping = load(joinpath(@__DIR__, "..", "data", "Mapping_countries_to_gcam_energy_regions.csv")) |> DataFrame
energy_mapping.ISO3 != countries && error("GCAM mapping file ISO3 column must match model countries vector exactly.")
sort(unique(energy_mapping.gcamregion)) != sort(gcam_regions) && error("GCAM mapping file gcam_regions column must match model gcamregions vector exactly (when both are sorted).")
energy_mapping = energy_mapping.gcamregion
# Create country-region mapping for Cromar et al. temperature-mortality damage function.
cromar_mapping = load(joinpath(@__DIR__, "..", "data", "Mapping_countries_to_cromar_mortality_regions.csv")) |> DataFrame
cromar_mapping.ISO3 != countries && error("Cromar mortality mapping file ISO3 column must match model countries vector exactly.")
sort(unique(cromar_mapping.cromar_region)) != sort(cromar_regions) && error("Cromar mortality mapping file gcam_regions column must match model gcamregions vector exactly (when both are sorted).")
cromar_mapping = cromar_mapping.cromar_region
# BRICK Fingerprinting
segment_fingerprints = load(joinpath(@__DIR__, "../data/CIAM/segment_fingerprints.csv")) |>
DataFrame |>
@filter(_.rgn in ciam_countries) |> # reduce to the segments in the coastal countries we are using
DataFrame
# get the ar6 forcing scenario to be used for the FAIR model and Mortality component
if socioeconomics_source == :RFF
ar6_scenario = "ssp245" # use SSP245 emissions scenario as the basis for trace gases for RFF SP
elseif socioeconomics_source == :SSP
ar6_scenario = lowercase(SSP_scenario)
end
# Baseline mortality use SSP2 as a proxy for SSP4 and SSP1 as a proxy for
# SSP5 per instructions from the literature
mortality_SSP_map = Dict("SSP1" => "SSP1", "SSP2" => "SSP2", "SSP3" => "SSP3", "SSP4" => "SSP2", "SSP5" => "SSP1")
# Grab the SSP name from the full scenario ie. SSP2 from SSP245
if socioeconomics_source == :SSP
SSP = SSP_scenario[1:4]
else
SSP = nothing
end
# --------------------------------------------------------------------------
# Model Construction
# --------------------------------------------------------------------------
# component first and lasts
model_first = 1750
brick_first = 1850
damages_first = 2020
model_last = 2300
# Start with an instance of the FAIR model.
m = MimiFAIRv1_6_2.get_model(start_year=model_first, end_year=model_last, ar6_scenario = ar6_scenario)
# Set Dimensions
set_dimension!(m, :time, model_first:model_last) # used in all components - already set in FAIR but reset for clarity
set_dimension!(m, :country, countries) # used in most components
set_dimension!(m, :fund_regions, fund_regions) # Agriculture components
set_dimension!(m, :segments, segment_fingerprints.segments) # BRICK components
set_dimension!(m, :ag_mapping_input_regions, countries) # Agriculture Aggregator components
set_dimension!(m, :ag_mapping_output_regions, fund_regions) # Agriculture Aggregator components
set_dimension!(m, :energy_countries, countries) # Countries used in energy damage function
set_dimension!(m, :domestic_countries, domestic_countries) # Country ISO3 codes to be accumulated for domestic
# Add Socioeconomics component BEFORE the FAIR model to allow for emissions feedbacks after damages_first year
if socioeconomics_source == :RFF
add_comp!(m, MimiRFFSPs.SPs, :Socioeconomic, first = damages_first, before = :ch4_cycle);
elseif socioeconomics_source == :SSP
add_comp!(m, MimiSSPs.SSPs, :Socioeconomic, first = damages_first, before = :ch4_cycle);
end
# Add PerCapitaGDP component
add_comp!(m, PerCapitaGDP, :PerCapitaGDP, first=damages_first, after = :Socioeconomic);
# Add VSL component
add_comp!(m, VSL, :VSL, first=damages_first, after = :PerCapitaGDP);
# We add an identity component that simply passes values through here
# This makes it easier to later insert the marginal emission modification component
# between two components that don't use backup data
add_comp!(m, IdentityComponent_co2, :co2_emissions_identity, before = :co2_cycle);
add_comp!(m, IdentityComponent_ch4, :ch4_emissions_identity, before = :ch4_cycle);
add_comp!(m, IdentityComponent_n2o, :n2o_emissions_identity, before = :n2o_cycle);
# Add Temperature Normalization Components
add_comp!(m, GlobalTempNorm, :TempNorm_1880, after = :temperature); # Howard and Sterner
add_comp!(m, GlobalTempNorm, :TempNorm_1900, after = :TempNorm_1880); # DICE
add_comp!(m, GlobalTempNorm, :TempNorm_1850to1900, after = :TempNorm_1900); # Useful Reference to IPCC
add_comp!(m, GlobalTempNorm, :TempNorm_1995to2005, after = :TempNorm_1850to1900); # Agriculture
# Add Ocean Heat Accumulator to Link FAIR and BRICK
add_comp!(m, OceanHeatAccumulator, after = :TempNorm_1995to2005);
# Add BRICK components
add_comp!(m, MimiBRICK.antarctic_ocean, first = brick_first, after = :OceanHeatAccumulator);
add_comp!(m, MimiBRICK.antarctic_icesheet, first = brick_first, after = :antarctic_ocean);
add_comp!(m, MimiBRICK.glaciers_small_icecaps, first = brick_first, after = :antarctic_icesheet);
add_comp!(m, MimiBRICK.greenland_icesheet, first = brick_first, after = :glaciers_small_icecaps);
add_comp!(m, MimiBRICK.thermal_expansion, first = brick_first, after = :greenland_icesheet);
add_comp!(m, MimiBRICK.landwater_storage, first = brick_first, after = :thermal_expansion);
add_comp!(m, MimiBRICK.global_sea_level, first = brick_first, after = :landwater_storage);
# Add SLR Normalization components
add_comp!(m, GlobalSLRNorm, :GlobalSLRNorm_1900, first = brick_first, after = :global_sea_level)
# Add OceanPH components
add_comp!(m, Mimi_NAS_pH.ocean_pH, :OceanPH, after = :GlobalSLRNorm_1900);
# Add CromarMortality component
add_comp!(m, cromar_mortality_damages, :CromarMortality, first = damages_first, after = :OceanPH)
# Add Agriculture components
add_comp!(m, Agriculture_RegionAggregatorSum, :Agriculture_aggregator_population, first = damages_first, after = :CromarMortality);
add_comp!(m, Agriculture_RegionAggregatorSum, :Agriculture_aggregator_gdp, first = damages_first, after = :Agriculture_aggregator_population);
add_comp!(m, MimiMooreEtAlAgricultureImpacts.Agriculture, :Agriculture, first = damages_first, after = :Agriculture_aggregator_gdp);
add_comp!(m, AgricultureDamagesDisaggregator, :AgricultureDamagesDisaggregator, first = damages_first, after = :Agriculture)
# add aggregators for 1990 population and GDP if we are using the GIVE model
socioeconomics_source == :RFF ? add_comp!(m, Agriculture_RegionAggregatorSum_NoTime, :Agriculture_aggregator_pop90, first = damages_first, after = :Agriculture_aggregator_gdp) : nothing
socioeconomics_source == :RFF ? add_comp!(m, Agriculture_RegionAggregatorSum_NoTime, :Agriculture_aggregator_gdp90, first = damages_first, after = :Agriculture_aggregator_pop90) : nothing
# Add a Regional Per Capita GDP component that takes inputs from the Agiculture aggregator components
add_comp!(m, RegionalPerCapitaGDP, :RegionalPerCapitaGDP, first=damages_first, after = :AgricultureDamagesDisaggregator);
# Add Energy components
add_comp!(m, energy_damages, :energy_damages, first = damages_first, after = :RegionalPerCapitaGDP);
# Add DICE2016R2 damage component
add_comp!(m, dice2016R2_damage, :dice2016R2_damage, first = damages_first, after = :energy_damages);
# Add Howard and Sterner damage components
add_comp!(m, hs_damage, :hs_damage, first = damages_first, after = :dice2016R2_damage);
# Add DamageAggregator component and regional damages aggregator helper function
add_comp!(m, Damages_RegionAggregatorSum, first = damages_first);
add_comp!(m, DamageAggregator, first = damages_first);
# Add net consumption components (global and regional)
add_comp!(m, GlobalNetConsumption, :global_netconsumption, first = damages_first, after=:DamageAggregator)
add_comp!(m, RegionalNetConsumption, :regional_netconsumption, first = damages_first, after=:global_netconsumption)
add_comp!(m, CountryNetConsumption, :country_netconsumption, first = damages_first, after = :regional_netconsumption)
# --------------------------------------------------------------------------
# Shared Model Parameters
# --------------------------------------------------------------------------
add_shared_param!(m, :model_country_names, countries, dims = [:country])
add_shared_param!(m, :model_fund_regions, fund_regions, dims = [:fund_regions])
# Agriculture
add_shared_param!(m, :model_ag_mapping_input_regions, countries, dims = [:ag_mapping_input_regions])
add_shared_param!(m, :model_ag_mapping_output_regions, fund_regions, dims = [:ag_mapping_output_regions])
add_shared_param!(m, :model_ag_mapping, ag_mapping, dims = [:ag_mapping_input_regions])
# BRICK
add_shared_param!(m, :model_brick_seawater_freeze, -1.8)
# Mortality
if socioeconomics_source == :SSP
mortality_data = load(joinpath(@__DIR__, "..", "data", "Mortality_cdr_spp_country_extensions_annual.csv")) |>
DataFrame |>
@filter(_.year in damages_first:model_last && _.scenario == mortality_SSP_map[SSP]) |>
DataFrame |>
@select(:year, :ISO, :cdf) |>
DataFrame |>
@orderby(:ISO) |>
DataFrame |>
i -> unstack(i, :year, :ISO, :cdf) |>
DataFrame |>
i -> select!(i, Not(:year))
# make sure the columns match the mortality countries
names(mortality_data) == countries ? nothing : "Countries in mortality data must match model countries."
add_shared_param!(m, :model_ssp_baseline_mortality_rate, vcat(fill(NaN, (length(model_first:damages_first-1), size(mortality_data)[2])), mortality_data |> Matrix), dims = [:time, :country]) # Pad with NaN b/c starting component in later year.
end
# --------------------------------------------------------------------------
# Component-Specific Parameters and Connections
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# BRICK
# --------------------------------------------------------------------------
# ----- Ocean Heat Accumulator ----- #
connect_param!(m, :OceanHeatAccumulator, :del_ohc, :temperature, :del_ohc)
# ----- Antarctic Ocean ----- #
update_param!(m, :antarctic_ocean, :anto_α, 0.28)
update_param!(m, :antarctic_ocean, :anto_β, 0.95)
# ----- Antarctic Ice Sheet ----- #
update_param!(m, :antarctic_icesheet, :ais_ρ_ice, 917.0)
update_param!(m, :antarctic_icesheet, :ais_ρ_seawater, 1030.0)
update_param!(m, :antarctic_icesheet, :ais_ρ_rock, 4000.0)
update_param!(m, :antarctic_icesheet, :ais_sea_level₀, 0.0)
update_param!(m, :antarctic_icesheet, :ais_ocean_temperature₀, 0.72)
update_param!(m, :antarctic_icesheet, :ais_radius₀, 1.864e6)
update_param!(m, :antarctic_icesheet, :ais_bedheight₀, 781.0)
update_param!(m, :antarctic_icesheet, :ais_slope, 0.0006)
update_param!(m, :antarctic_icesheet, :ais_μ, 11.0)
update_param!(m, :antarctic_icesheet, :ais_runoffline_snowheight₀, 1400.0)
update_param!(m, :antarctic_icesheet, :ais_c, 100.0)
update_param!(m, :antarctic_icesheet, :ais_precipitation₀, 0.37)
update_param!(m, :antarctic_icesheet, :ais_κ, 0.062)
update_param!(m, :antarctic_icesheet, :ais_ν, 0.0086)
update_param!(m, :antarctic_icesheet, :ais_iceflow₀, 1.2)
update_param!(m, :antarctic_icesheet, :ais_γ, 2.9)
update_param!(m, :antarctic_icesheet, :ais_α, 0.23)
update_param!(m, :antarctic_icesheet, :ais_temperature_coefficient, 0.8365)
update_param!(m, :antarctic_icesheet, :ais_temperature_intercept, 15.42)
update_param!(m, :antarctic_icesheet, :ais_local_fingerprint, -1.18)
update_param!(m, :antarctic_icesheet, :ocean_surface_area, 3.619e14)
update_param!(m, :antarctic_icesheet, :temperature_threshold, -15.0)
update_param!(m, :antarctic_icesheet, :λ, 0.0093)
update_param!(m, :antarctic_icesheet, :include_ais_DSL, true)
# ----- Glaciers & Small Ice Caps ----- #
update_param!(m, :glaciers_small_icecaps, :gsic_β₀, 0.0013)
update_param!(m, :glaciers_small_icecaps, :gsic_v₀, 0.376)
update_param!(m, :glaciers_small_icecaps, :gsic_s₀, -0.0138)
update_param!(m, :glaciers_small_icecaps, :gsic_n, 0.847)
update_param!(m, :glaciers_small_icecaps, :gsic_teq, -0.15)
# ----- Greenland Ice Sheet ----- #
update_param!(m, :greenland_icesheet, :greenland_a, -1.37)
update_param!(m, :greenland_icesheet, :greenland_b, 8.06)
update_param!(m, :greenland_icesheet, :greenland_α, 0.0008)
update_param!(m, :greenland_icesheet, :greenland_β, 0.00009)
update_param!(m, :greenland_icesheet, :greenland_v₀, 7.52)
# ----- Thermal Expansion ----- #
update_param!(m, :thermal_expansion, :te_A, 3.619e14)
update_param!(m, :thermal_expansion, :te_C, 3991.86795711963)
update_param!(m, :thermal_expansion, :te_ρ, 1027.0)
update_param!(m, :thermal_expansion, :te_α, 0.16)
update_param!(m, :thermal_expansion, :te_s₀, 0.0)
update_param!(m, :thermal_expansion, :ocean_heat_mixed, zeros(length(model_first:model_last)))
connect_param!(m, :thermal_expansion, :ocean_heat_interior, :OceanHeatAccumulator, :del_ohc_accum)
# ----- Landwater Storage ----- #
update_param!(m, :landwater_storage, :lws₀, 0.0)
update_param!(m, :landwater_storage, :first_projection_year, 2018)
update_param!(m, :landwater_storage, :lws_random_sample, fill(0.0003, model_last-model_first+1))
# ----- Set Parameters With Common Values Across Components ----- #
connect_param!(m, :antarctic_icesheet, :seawater_freeze, :model_brick_seawater_freeze)
connect_param!(m, :antarctic_ocean, :seawater_freeze, :model_brick_seawater_freeze)
update_param!(m, :GlobalSLRNorm_1900, :norm_range_start, 1900)
update_param!(m, :GlobalSLRNorm_1900, :norm_range_end, 1900)
# --------------------------------------------------------------------------
# Create Component Connections
connect_param!(m, :global_sea_level => :slr_glaciers_small_ice_caps, :glaciers_small_icecaps => :gsic_sea_level)
connect_param!(m, :global_sea_level => :slr_greeland_icesheet, :greenland_icesheet => :greenland_sea_level)
connect_param!(m, :global_sea_level => :slr_antartic_icesheet, :antarctic_icesheet => :ais_sea_level)
connect_param!(m, :global_sea_level => :slr_thermal_expansion, :thermal_expansion => :te_sea_level)
connect_param!(m, :global_sea_level => :slr_landwater_storage, :landwater_storage => :lws_sea_level)
connect_param!(m, :antarctic_icesheet => :antarctic_ocean_temperature, :antarctic_ocean => :anto_temperature)
connect_param!(m, :antarctic_icesheet => :global_sea_level, :global_sea_level => :sea_level_rise)
connect_param!(m, :antarctic_icesheet => :global_surface_temperature, :temperature => :T)
connect_param!(m, :antarctic_ocean => :global_surface_temperature, :temperature => :T)
connect_param!(m, :glaciers_small_icecaps => :global_surface_temperature, :temperature => :T)
connect_param!(m, :greenland_icesheet => :global_surface_temperature, :temperature => :T)
connect_param!(m, :GlobalSLRNorm_1900 => :global_slr, :global_sea_level => :sea_level_rise)
# --------------------------------------------------------------------------
# OceanPH
# --------------------------------------------------------------------------
update_param!(m, :OceanPH, :β1, -0.3671)
update_param!(m, :OceanPH, :β2, 10.2328)
update_param!(m, :OceanPH, :pH_0, 8.123)
connect_param!(m, :OceanPH => :atm_co2_conc, :co2_cycle => :co2)
# --------------------------------------------------------------------------
# Socioeconomic
# --------------------------------------------------------------------------
if socioeconomics_source == :SSP
update_param!(m, :Socioeconomic, :SSP_source, "Benveniste") # only available source to 2300 at this time in MimiSSPs
update_param!(m, :Socioeconomic, :SSP, SSP) # select the SSP from RCMIP name ie. SSP2
update_param!(m, :Socioeconomic, :emissions_source, "Leach") # only available source to 2300 at this time in MimiSSPs
update_param!(m, :Socioeconomic, :emissions_scenario, SSP_scenario) # full name ie. SSSP245
elseif socioeconomics_source == :RFF
isnothing(RFFSPsample) ? nothing : update_param!(m, :Socioeconomic, :id, RFFSPsample)
end
connect_param!(m, :Socioeconomic, :country_names, :model_country_names)
# Feedback of Socioeconomic Emissions back to FAIR
# Load IPCC AR6 emissions scenario used for FAIRv1.6.2 ensemble runs (options = "ssp119", "ssp126", "ssp245", "ssp370", "ssp460", "ssp585").
ar6_emissions_raw = DataFrame(load(joinpath(@__DIR__, "..", "data", "FAIR_ar6", "AR6_emissions_"*ar6_scenario*"_1750_2300.csv")))
# Subset AR6 emissions to proper years.
emission_indices = indexin(collect(model_first:model_last), ar6_emissions_raw.Year)
ar6_emissions = ar6_emissions_raw[emission_indices, :]
# Here we couple the identity component co2_emissions to the SSP output, and then the
# FAIR emissions component to that identity component co2_emissions
connect_param!(m, :co2_emissions_identity => :input_co2, :Socioeconomic => :co2_emissions, ar6_emissions.FossilCO2 .+ ar6_emissions.OtherCO2)
connect_param!(m, :co2_cycle => :E_co2, :co2_emissions_identity => :output_co2)
# do the same for n2o_emissions
connect_param!(m, :n2o_emissions_identity => :input_n2o, :Socioeconomic => :n2o_emissions, ar6_emissions.N2O)
connect_param!(m, :n2o_cycle => :fossil_emiss_N₂O, :n2o_emissions_identity => :output_n2o)
# do the same for ch4_emissions
connect_param!(m, :ch4_emissions_identity => :input_ch4, :Socioeconomic => :ch4_emissions, ar6_emissions.CH4)
connect_param!(m, :ch4_cycle => :fossil_emiss_CH₄, :ch4_emissions_identity => :output_ch4)
# Land Use CO2 Emissions - FAIRv1.6.2 component :landuse_forcing and parameter :landuse_emiss
#
# For the SSPs, the MimiSSPs component does not break carbon dioxide out by industrial
# and other, so we will simply let FAIR1.6.2 run with its original settings for land use CO2, which is
# consistent with Leach (FAIRv2.0) but just not broken out in that dataset, so this is consistent.
# For the RFF SPs we can either let them run with the middle-of the road and best matched (pop and emissions)
# ssp245 AR6 scenario, or explicitly connect new data. Currently do the former.
# --------------------------------------------------------------------------
# PerCapitaGDP
# --------------------------------------------------------------------------
connect_param!(m, :PerCapitaGDP => :gdp, :Socioeconomic => :gdp)
connect_param!(m, :PerCapitaGDP => :population, :Socioeconomic => :population)
# --------------------------------------------------------------------------
# VSL
# --------------------------------------------------------------------------
if vsl==:fund
update_param!(m, :VSL, :α, 4.99252262888626e6 * pricelevel_1995_to_2005) # convert from FUND USD $1995 to USD $2005
update_param!(m, :VSL, :y₀, 24_962.6131444313 * pricelevel_1995_to_2005) # convert from FUND USD $1995 to USD $2005
elseif vsl==:epa
update_param!(m, :VSL, :α, 7.73514707e6) # 2020 EPA VSL in 2005$. See DataExplainer.ipynb for information
update_param!(m, :VSL, :y₀, 48_726.60) # 2020 U.S. income per capita in 2005$; See DataExplainer.ipynb for information
elseif vsl==:uba
update_param!(m, :VSL, :α, 5_920_000. / pricelevel_2005_to_2020) # 2020 UBA VSL in 2005$
update_param!(m, :VSL, :y₀, 44_646.78) # 2020 German income per capita in 2005$
else
error("Invalid vsl argument of $vsl.")
end
update_param!(m, :VSL, :ϵ, 1.0)
connect_param!(m, :VSL => :pc_gdp, :PerCapitaGDP => :pc_gdp)
# --------------------------------------------------------------------------
# Temperature Normlization Components
# --------------------------------------------------------------------------
# TempNorm_1880 - Normalize temperature to deviation from 1880 for Howard and Sterner damage function
update_param!(m, :TempNorm_1880, :norm_range_start, 1880)
update_param!(m, :TempNorm_1880, :norm_range_end, 1880)
connect_param!(m, :TempNorm_1880 => :global_temperature, :temperature => :T)
# TempNorm_1900 - Normalize temperature to deviation from 1900 for DICE2016 damage function
update_param!(m, :TempNorm_1900, :norm_range_start, 1900)
update_param!(m, :TempNorm_1900, :norm_range_end, 1900)
connect_param!(m, :TempNorm_1900 => :global_temperature, :temperature => :T)
# TempNorm_1850to1900 - Normalize temperature to deviation from 1850 to 1900 for IPCC Comparison Graphics
update_param!(m, :TempNorm_1850to1900, :norm_range_start, 1850)
update_param!(m, :TempNorm_1850to1900, :norm_range_end, 1900)
connect_param!(m, :TempNorm_1850to1900 => :global_temperature, :temperature => :T)
# TempNorm_1995to2005 - Normalize temperature to deviation from 1995 to 2005 for Agriculture Component
update_param!(m, :TempNorm_1995to2005, :norm_range_start, 1995)
update_param!(m, :TempNorm_1995to2005, :norm_range_end, 2005)
connect_param!(m, :TempNorm_1995to2005 => :global_temperature, :temperature => :T)
# --------------------------------------------------------------------------
# Cromar et al. Temperature-Mortality Damages
# --------------------------------------------------------------------------
# Assign Cromar et al. regional temperature mortality coefficients to appropriate countries.
# Load raw data.
cromar_coeffs = load(joinpath(@__DIR__, "..", "data", "CromarMortality_damages_coefficients.csv")) |> DataFrame
cromar_mapping_raw = load(joinpath(@__DIR__, "..", "data", "Mapping_countries_to_cromar_mortality_regions.csv")) |> DataFrame
# Initialize an array to store country-level coefficients
country_β_mortality = zeros(length(cromar_mapping_raw.ISO3))
# Loop through the regions and assign regional coefficients to proper sets of countries.
for r = 1:length(cromar_regions)
# Find country indices for region "r"
r_index = findall(x->x==cromar_regions[r], cromar_mapping_raw.cromar_region)
# Find index for region "r" coefficient.
β_index = findfirst(x->x==cromar_regions[r], cromar_coeffs[!, "Cromar Region Name"])
# Assign all countries in that region proper coefficient.
country_β_mortality[r_index] .= cromar_coeffs[β_index, "Pooled Beta"]
end
# Get indices to reorder Cromar countries mapped to countries dimension (could be correct oder already, this is a safety check)
cromar_indices = indexin(countries, cromar_mapping_raw.ISO3)
country_β_mortality = country_β_mortality[cromar_indices]
update_param!(m, :CromarMortality, :β_mortality, country_β_mortality)
if socioeconomics_source == :SSP
connect_param!(m, :CromarMortality, :baseline_mortality_rate, :model_ssp_baseline_mortality_rate) # shared model parameter
elseif socioeconomics_source == :RFF
connect_param!(m, :CromarMortality => :baseline_mortality_rate, :Socioeconomic => :deathrate)
end
connect_param!(m, :CromarMortality => :population, :Socioeconomic => :population)
connect_param!(m, :CromarMortality => :temperature, :temperature => :T)
connect_param!(m, :CromarMortality => :vsl, :VSL => :vsl)
# --------------------------------------------------------------------------
# Agriculture Aggregators
# --------------------------------------------------------------------------
connect_param!(m, :Agriculture_aggregator_population, :input_region_names, :model_ag_mapping_input_regions)
connect_param!(m, :Agriculture_aggregator_population, :output_region_names, :model_ag_mapping_output_regions)
connect_param!(m, :Agriculture_aggregator_population, :input_output_mapping, :model_ag_mapping)
connect_param!(m, :Agriculture_aggregator_population => :input, :Socioeconomic => :population)
connect_param!(m, :Agriculture_aggregator_gdp, :input_region_names, :model_ag_mapping_input_regions)
connect_param!(m, :Agriculture_aggregator_gdp, :output_region_names, :model_ag_mapping_output_regions)
connect_param!(m, :Agriculture_aggregator_gdp, :input_output_mapping, :model_ag_mapping)
connect_param!(m, :Agriculture_aggregator_gdp => :input, :Socioeconomic => :gdp)
if socioeconomics_source == :RFF
connect_param!(m, :Agriculture_aggregator_gdp90, :input_region_names, :model_ag_mapping_input_regions)
connect_param!(m, :Agriculture_aggregator_gdp90, :output_region_names, :model_ag_mapping_output_regions)
connect_param!(m, :Agriculture_aggregator_gdp90, :input_output_mapping, :model_ag_mapping)
connect_param!(m, :Agriculture_aggregator_gdp90 => :input, :Socioeconomic => :gdp1990)
connect_param!(m, :Agriculture_aggregator_pop90, :input_region_names, :model_ag_mapping_input_regions)
connect_param!(m, :Agriculture_aggregator_pop90, :output_region_names, :model_ag_mapping_output_regions)
connect_param!(m, :Agriculture_aggregator_pop90, :input_output_mapping, :model_ag_mapping)
connect_param!(m, :Agriculture_aggregator_pop90 => :input, :Socioeconomic => :population1990)
end
# --------------------------------------------------------------------------
# Agriculture
# --------------------------------------------------------------------------
fund_regions != MimiMooreEtAlAgricultureImpacts.fund_regions && error("FUND regions for RFF Model do not match FUND regions for Agriculture.")
# Handle in pop and gdp 1990 baseline values
if socioeconomics_source == :SSP
data1990 = load(joinpath(@__DIR__, "..", "data", "Benveniste_SSPs", "Agriculture_1990vals.csv")) |>
DataFrame |>
@filter(_.SSP == SSP) |>
DataFrame
idxs = indexin(data1990.fund_region, fund_regions) # get the ordering of 1990 regions matched to fund regions in model
!isnothing(findfirst(i -> isnothing(i), idxs)) ? error("FUND regions for RFF Model do not match FUND regions for Agriculture 1990 values.") : nothing
data1990 = data1990[idxs,:] # reorder based on idxs
update_param!(m, :Agriculture, :pop90, data1990.pop)
update_param!(m, :Agriculture, :gdp90, data1990.gdp)
elseif socioeconomics_source == :RFF
connect_param!(m, :Agriculture => :pop90, :Agriculture_aggregator_pop90 => :output)
connect_param!(m, :Agriculture => :gdp90, :Agriculture_aggregator_gdp90 => :output)
end
# Access which of the 5 possible DFs to use for the damage function
gtap_idx = findfirst(isequal(Agriculture_gtap), MimiMooreEtAlAgricultureImpacts.gtaps)
gtap_df = MimiMooreEtAlAgricultureImpacts.gtap_df_all[:, :, gtap_idx]
update_param!(m, :Agriculture, :gtap_df, gtap_df)
update_param!(m, :Agriculture, :gtap_name, Agriculture_gtap)
update_param!(m, :Agriculture, :floor_on_damages, Agriculture_floor_on_damages)
update_param!(m, :Agriculture, :ceiling_on_benefits, Agriculture_ceiling_on_benefits)
update_param!(m, :Agriculture, :agrish0, Array{Float64, 1}(readdlm(joinpath(MimiMooreEtAlAgricultureImpacts.fund_datadir, "agrish0.csv"), ',', skipstart=1)[:,2]))
connect_param!(m, :Agriculture => :population, :Agriculture_aggregator_population => :output)
connect_param!(m, :Agriculture => :income, :Agriculture_aggregator_gdp => :output)
connect_param!(m, :Agriculture => :temp, :TempNorm_1995to2005 => :global_temperature_norm)
# --------------------------------------------------------------------------
# Regional Per Capita GDP
# --------------------------------------------------------------------------
connect_param!(m, :RegionalPerCapitaGDP => :population, :Agriculture_aggregator_population => :output)
connect_param!(m, :RegionalPerCapitaGDP => :gdp, :Agriculture_aggregator_gdp => :output)
# --------------------------------------------------------------------------
# Agriculture Damages Disaggregator
# --------------------------------------------------------------------------
connect_param!(m, :AgricultureDamagesDisaggregator, :mapping, :model_ag_mapping)
connect_param!(m, :AgricultureDamagesDisaggregator, :fund_region_names, :model_ag_mapping_output_regions)
connect_param!(m, :AgricultureDamagesDisaggregator => :gdp_fund_region, :Agriculture_aggregator_gdp => :output)
connect_param!(m, :AgricultureDamagesDisaggregator => :gdp_country, :Socioeconomic => :gdp)
connect_param!(m, :AgricultureDamagesDisaggregator => :damages_ag_fund_region, :Agriculture => :agcost)
# --------------------------------------------------------------------------
# Energy
# --------------------------------------------------------------------------
# Assign GCAM regional energy damage coefficients to appropriate countries.
# Load raw data.
energy_coeffs = load(joinpath(@__DIR__, "..", "data", "energy_damages_gcam_region_coefficients.csv")) |> DataFrame
gcam_mapping_raw = load(joinpath(@__DIR__, "..", "data", "Mapping_countries_to_gcam_energy_regions.csv")) |> DataFrame
# Initialize an array to store country-level coefficients
country_β_energy = zeros(length(gcam_mapping_raw.ISO3))
# Loop through the regions and assign regional coefficients to proper subset of countries.
for r = 1:length(gcam_regions)
# Find country indices for region "r"
r_index = findall(x->x==gcam_regions[r], gcam_mapping_raw.gcamregion)
# Find index for region "r" coefficient.
β_index = findfirst(x->x==gcam_regions[r], energy_coeffs.gcam_region)
# Assign all countries in that region proper coefficient.
country_β_energy[r_index] .= energy_coeffs[β_index, "coefficient"]
end
set_param!(m, :energy_damages, :β_energy, country_β_energy)
connect_param!(m, :energy_damages => :gdp, :Socioeconomic => :gdp)
connect_param!(m, :energy_damages => :temperature, :temperature => :T)
# --------------------------------------------------------------------------
# DICE2016R2 Damages
# --------------------------------------------------------------------------
connect_param!(m, :dice2016R2_damage => :temperature, :TempNorm_1900 => :global_temperature_norm)
connect_param!(m, :dice2016R2_damage => :gdp, :Socioeconomic => :gdp)
# --------------------------------------------------------------------------
# Howard and Sterner Damages
# --------------------------------------------------------------------------
connect_param!(m, :hs_damage => :temperature, :TempNorm_1880 => :global_temperature_norm)
connect_param!(m, :hs_damage => :gdp, :Socioeconomic => :gdp)
# --------------------------------------------------------------------------
# Damage Aggregation
# --------------------------------------------------------------------------
# small regional damage aggregator helper component
connect_param!(m, :Damages_RegionAggregatorSum, :input_region_names, :model_ag_mapping_input_regions)
connect_param!(m, :Damages_RegionAggregatorSum, :output_region_names, :model_ag_mapping_output_regions)
connect_param!(m, :Damages_RegionAggregatorSum, :input_output_mapping, :model_ag_mapping)
connect_param!(m, :Damages_RegionAggregatorSum => :damage_cromar_mortality, :CromarMortality => :mortality_costs)
connect_param!(m, :Damages_RegionAggregatorSum => :damage_energy, :energy_damages => :energy_costs_dollar)
# main damage aggregator
connect_param!(m, :DamageAggregator => :damage_ag, :Agriculture => :agcost)
connect_param!(m, :DamageAggregator => :damage_ag_countries, :AgricultureDamagesDisaggregator => :damages_ag_country)
connect_param!(m, :DamageAggregator => :damage_cromar_mortality, :CromarMortality => :mortality_costs)
connect_param!(m, :DamageAggregator => :gdp, :Socioeconomic => :gdp)
connect_param!(m, :DamageAggregator => :damage_energy, :energy_damages => :energy_costs_dollar)
connect_param!(m, :DamageAggregator => :damage_dice2016R2, :dice2016R2_damage => :damages)
connect_param!(m, :DamageAggregator => :damage_hs, :hs_damage => :damages)
connect_param!(m, :DamageAggregator => :damage_cromar_mortality_regions, :Damages_RegionAggregatorSum => :damage_cromar_mortality_regions)
connect_param!(m, :DamageAggregator => :damage_energy_regions, :Damages_RegionAggregatorSum => :damage_energy_regions)
domestic_idxs_country_dim = Int.(indexin(dim_keys(m, :domestic_countries), dim_keys(m, :country)))
update_param!(m, :DamageAggregator, :domestic_idxs_country_dim, domestic_idxs_country_dim)
domestic_idxs_energy_countries_dim = Int.(indexin(dim_keys(m, :domestic_countries), dim_keys(m, :energy_countries)))
update_param!(m, :DamageAggregator, :domestic_idxs_energy_countries_dim, domestic_idxs_energy_countries_dim)
# --------------------------------------------------------------------------
# Net Consumption
# --------------------------------------------------------------------------
# global
connect_param!(m, :global_netconsumption => :gdp, :Socioeconomic => :gdp)
connect_param!(m, :global_netconsumption => :population, :Socioeconomic => :population)
connect_param!(m, :global_netconsumption => :total_damage, :DamageAggregator => :total_damage)
# regional
connect_param!(m, :regional_netconsumption => :population, :Agriculture_aggregator_population => :output)
connect_param!(m, :regional_netconsumption => :gdp, :Agriculture_aggregator_gdp => :output)
connect_param!(m, :regional_netconsumption => :total_damage, :DamageAggregator => :total_damage_regions)
# country
connect_param!(m, :country_netconsumption => :gdp, :Socioeconomic => :gdp)
connect_param!(m, :country_netconsumption => :population, :Socioeconomic => :population)
connect_param!(m, :country_netconsumption => :total_damage, :DamageAggregator => :total_damage_countries)
return m
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 77687 | using Dates, CSVFiles, DataFrames, FileIO, Mimi, Query
include("utils/scc_constants.jl")
include("utils/scc_streaming.jl")
"""
compute_scc(m::Model = get_model();
year::Union{Int, Nothing} = nothing,
last_year::Int = _model_years[end],
prtp::Union{Float64,Nothing} = 0.015,
eta::Union{Float64,Nothing} = 1.45,
discount_rates = nothing,
certainty_equivalent = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
n = 0,
gas::Symbol = :CO2,
save_list::Vector = [],
output_dir::Union{String, Nothing} = nothing,
save_md::Bool = false,
save_cpc::Bool = false,
save_slr_damages::Bool = false,
compute_sectoral_values::Bool = false,
compute_disaggregated_values::Bool = false,
compute_domestic_values::Bool = false,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
post_mcs_creation_function = nothing,
pulse_size::Float64 = 1.
)
Compute the SC of a gas for the GIVE in USD \$2005
- `m` (default get_model()) - if no model is provided, the default model from MimiGIVE.get_model() is used.
- 'year` (default nothing) - year of which to calculate SC (year of pulse)
- `last_year` (default 2300) - last year to include in damages summation
- `prtp` (default 0.015) and `eta` (default 1.45) - Ramsey discounting parameterization
- `discount_rates` (default nothing) - a vector of Named Tuples ie. [(label = "dr1", prtp = 0.03., eta = 1.45, ew = consumption_region, ew_norm_region = "USA"), (label = "dr2", prtp = 0.015, eta = 1.45, ew = nothing, ew_norm_region = nothing)] - required if running n > 1
- `certainty_equivalent` (default false) - whether to compute the certainty equivalent or expected SCC
- `fair_parameter_set` (default :random) - :random means FAIR mcs samples will be chosen randomly from the provided sets, while :deterministic means they will be based on the provided vector of to `fair_parameter_set_ids` keyword argument.
- `fair_parameter_set_ids` - (default nothing) - if `fair_parameter_set` is set
to :deterministic, this `n` element vector provides the fair parameter set ids
that will be run, otherwise it is set to `nothing` and ignored.
- `rffsp_sampling` (default :random) - which sampling strategy to use for the RFF SPs, :random means RFF SPs will be chosen randomly, while :deterministic means they will be based on the provided vector of to `rffsp_sampling_ids` keyword argument.
- `rffsp_sampling_ids` - (default nothing) - if `rffsp_sampling` is set to :deterministic, this `n` element vector provides the RFF SP ids that will be run, otherwise it is set to `nothing` and ignored.
- `n` (default 0) - If `n` is 0, the deterministic version will be run, otherwise, a monte carlo simulation will be run.
- `gas` (default :CO2) - the gas for which to compute the SC, options are :CO2, :CH4, and :N2O.
- `save_list` (default []) - which parameters and varaibles to save for each trial, entered as a vector of Tuples (:component_name, :variable_name)
- `output_dir` (default constructed folder name) - folder to hold results
- `save_md` (default is false) - save and return the marginal damages from a monte carlo simulation
- `save_cpc` (default is false) - save and return the per capita consumption from a monte carlo simulation
- `save_slr_damages`(default is false) - save global sea level rise damages from CIAM to disk
- `compute_sectoral_values` (default is false) - compute and return sectoral values as well as total
- `compute_disaggregated_values` (default is false) - compute spatially disaggregated marginal damages, sectoral damages, and socioeconomic variables
- `compute_domestic_values` (default is false) - compute and return domestic values in addition to global
- `CIAM_foresight`(default is :perfect) - Use limited foresight (:limited) or perfect foresight (:perfect) for MimiCIAM cost calculations
- `CIAM_GDPcap` (default is false) - Limit SLR damages to country-level annual GDP
- `pulse_size` (default 1.) - This determines the size of the additional pulse of emissions. Default of `1.` implies the standard pulse size of 1Gt of C for CO2, 1Mt of CH4, and 1Mt of N2O.
"""
function compute_scc(m::Model = get_model();
year::Union{Int, Nothing} = nothing,
last_year::Int = _model_years[end],
prtp::Union{Float64,Nothing} = 0.015,
eta::Union{Float64,Nothing} = 1.45,
discount_rates = nothing,
certainty_equivalent = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
n = 0,
gas::Symbol = :CO2,
save_list::Vector = [],
output_dir::Union{String, Nothing} = nothing,
save_md::Bool = false,
save_cpc::Bool = false,
save_slr_damages::Bool = false,
compute_sectoral_values::Bool = false,
compute_disaggregated_values::Bool = false,
compute_domestic_values::Bool = false,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
post_mcs_creation_function = nothing,
pulse_size::Float64 = 1.
)
hfc_list = [:HFC23, :HFC32, :HFC43_10, :HFC125, :HFC134a, :HFC143a, :HFC227ea, :HFC245fa]
gases_list = [:CO2, :CH4, :N2O, hfc_list ...]
m = deepcopy(m) # in the case that an `m` was provided, be careful that we don't modify the original
year === nothing ? error("Must specify an emission year. Try `compute_scc(m, year=2020)`.") : nothing
!(last_year in _model_years) ? error("Invalid value of $last_year for last_year. last_year must be within the model's time index $_model_years.") : nothing
!(year in _model_years) ? error("Cannot compute the scc for year $year, year must be within the model's time index $_model_years.") : nothing
!(gas in gases_list) ? error("Invalid value of $gas for gas, gas must be one of $(gases_list).") : nothing
# post-process the provided discount rates to allow for backwards compatibility
# with Named Tuples that did not include equity weighting args ew and ew_norm_region
if discount_rates !== nothing
# create new Vector of discount rates that include equity weighting fields
discount_rates_compatible = Array{NamedTuple}(undef, length(discount_rates))
for (i, dr) in enumerate(discount_rates)
if !hasfield(typeof(dr), :ew) # deprecated version without the ew specification
discount_rates_compatible[i] = (label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=nothing, ew_norm_region=nothing)
else
discount_rates_compatible[i] = dr
end
end
# replace discount rates with the agumented ones
discount_rates = copy(discount_rates_compatible)
ew_calcs = sum([!isnothing(dr.ew)==true for dr in discount_rates]) > 0
(compute_domestic_values && ew_calcs) ? error("Equity weighting cannot be used when calculating domestic values. More specifically, the `compute_domestic_values` cannot be set to `true` if the `ew` field of the discount rate Named Tuple is not nothing.") : nothing
end
mm = get_marginal_model(m; year = year, gas = gas, pulse_size = pulse_size)
if n==0
return _compute_scc(mm,
year = year,
last_year = last_year,
prtp = prtp,
eta = eta,
discount_rates = discount_rates,
gas = gas,
domestic = compute_domestic_values,
CIAM_foresight = CIAM_foresight,
CIAM_GDPcap = CIAM_GDPcap,
pulse_size = pulse_size
)
else
isnothing(discount_rates) ? error("To run the Monte Carlo compute_scc function (n != 0), please use the `discount_rates` argument.") : nothing
# Set up output directories
output_dir = output_dir === nothing ? joinpath(@__DIR__, "../output/mcs-SC/", "MCS $(Dates.format(now(), "yyyy-mm-dd HH-MM-SS")) MC$n") : output_dir
isdir("$output_dir/results") || mkpath("$output_dir/results")
return _compute_scc_mcs(mm,
n,
year = year,
last_year = last_year,
discount_rates = discount_rates,
certainty_equivalent = certainty_equivalent,
fair_parameter_set = fair_parameter_set,
fair_parameter_set_ids = fair_parameter_set_ids,
rffsp_sampling = rffsp_sampling,
rffsp_sampling_ids = rffsp_sampling_ids,
gas = gas,
save_list = save_list,
output_dir = output_dir,
save_md = save_md,
save_cpc = save_cpc,
save_slr_damages = save_slr_damages,
compute_sectoral_values = compute_sectoral_values,
compute_disaggregated_values = compute_disaggregated_values,
compute_domestic_values = compute_domestic_values,
CIAM_foresight = CIAM_foresight,
CIAM_GDPcap = CIAM_GDPcap,
post_mcs_creation_function = post_mcs_creation_function,
pulse_size = pulse_size
)
end
end
# Internal function to compute the SCC from a MarginalModel in a deterministic run
function _compute_scc(mm::MarginalModel;
year::Int,
last_year::Int,
prtp,
eta,
discount_rates,
gas::Symbol,
domestic::Bool,
CIAM_foresight::Symbol,
CIAM_GDPcap::Bool,
pulse_size::Float64
)
year_index = findfirst(isequal(year), _model_years)
last_year_index = findfirst(isequal(last_year), _model_years)
# Run all model years even if taking a shorter last_year - running unnecessary
# timesteps but simplifies accumulation
run(mm)
# at this point create identical copies ciam_base and ciam_modified, they will
# be updated in _compute_ciam_marginal_damages with update_ciam!
ciam_base, segment_fingerprints = get_ciam(mm.base)
ciam_modified, _ = get_ciam(mm.base)
ciam_base = Mimi.build(ciam_base)
ciam_modified = Mimi.build(ciam_modified)
# calculate ciam marginal damages (for globe, country, and domestic) only if
# we are including slr
if mm.base[:DamageAggregator, :include_slr]
all_ciam_marginal_damages = _compute_ciam_marginal_damages(mm.base, mm.modified, gas, ciam_base, ciam_modified, segment_fingerprints; CIAM_foresight=CIAM_foresight, CIAM_GDPcap=CIAM_GDPcap, pulse_size=pulse_size)
# zero out the CIAM marginal damages from start year (2020) through emissions
# year - they will be non-zero due to foresight but saved marginal damages
# should be zeroed out pre-emissions year
all_ciam_marginal_damages.globe[1:year_index] .= 0.
all_ciam_marginal_damages.domestic[1:year_index] .= 0.
all_ciam_marginal_damages.country[1:year_index, :] .= 0.
end
# Units Note:
# main_marginal_damages: the marginal model will handle pulse size, we handle molecular mass conversion explicilty
# ciam_marginal_damages: within the _compute_ciam_marginal_damages function we handle both pulse size and molecular mass
if domestic
main_marginal_damages = mm[:DamageAggregator, :total_damage_domestic] .* scc_gas_molecular_conversions[gas]
ciam_marginal_damages = mm.base[:DamageAggregator, :include_slr] ? all_ciam_marginal_damages.domestic : fill(0., length(_model_years))
else
main_marginal_damages = mm[:DamageAggregator, :total_damage] .* scc_gas_molecular_conversions[gas]
ciam_marginal_damages = mm.base[:DamageAggregator, :include_slr] ? all_ciam_marginal_damages.globe : fill(0., length(_model_years))
end
marginal_damages = main_marginal_damages .+ ciam_marginal_damages
# We don't care about units here because we are only going to use ratios
cpc = mm.base[:global_netconsumption, :net_cpc]
if discount_rates!==nothing
sccs = Dict{NamedTuple{(:dr_label,:prtp,:eta,:ew,:ew_norm_region),Tuple{Any,Float64,Float64,Union{Nothing, Symbol},Union{Nothing, String}}}, Float64}()
for dr in discount_rates
if isnothing(dr.ew) # no equity weighting
df = [((cpc[year_index]/cpc[i])^dr.eta * 1/(1+dr.prtp)^(t-year) for (i,t) in enumerate(_model_years) if year<=t<=last_year)...]
scc = sum(df .* marginal_damages[year_index:last_year_index])
elseif dr.ew==:gdp_country # equity weight using gdp per capita
ag_marginal_damages = mm[:Agriculture, :agcost] .* scc_gas_molecular_conversions[gas] * 1e9 # fund regions
en_marginal_damages = mm[:energy_damages, :energy_costs_dollar] .* scc_gas_molecular_conversions[gas] * 1e9 # country
health_marginal_damages = mm[:CromarMortality, :mortality_costs] .* scc_gas_molecular_conversions[gas] # country
# note slr_marginal_damages allocated below for conciseness of variables
pc_gdp_for_health = mm.base[:PerCapitaGDP, :pc_gdp]
n_regions_for_health = size(pc_gdp_for_health, 2)
health_scc_in_utils = sum(
health_marginal_damages[i,r] / pc_gdp_for_health[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions_for_health if year<=t<=last_year
)
pc_gdp_for_ag = mm.base[:Agriculture, :income] ./ mm.base[:Agriculture, :population] .* 1000.0
n_regions_for_ag = size(pc_gdp_for_ag, 2)
ag_scc_in_utils = sum(
ag_marginal_damages[i,r] / pc_gdp_for_ag[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions_for_ag if year<=t<=last_year
)
pc_gdp_for_en = mm.base[:PerCapitaGDP, :pc_gdp]
n_regions_for_en = size(pc_gdp_for_en, 2)
en_scc_in_utils = sum(
en_marginal_damages[i,r] / pc_gdp_for_en[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions_for_en if year<=t<=last_year
)
pc_gdp_for_slr = [fill(0., 2020-1750, 145); repeat(ciam_base[:slrcost, :ypcc][1:end-1,:], inner=(10,1)); ciam_base[:slrcost, :ypcc][end:end,:]]
n_regions_for_slr = size(pc_gdp_for_slr, 2)
slr_marginal_damages = mm.base[:DamageAggregator, :include_slr] ? all_ciam_marginal_damages.country : fill(0., length(_model_years), n_regions_for_slr) # 145 countries (coastal only), only run ciam if needed
slr_scc_in_utils = sum(
slr_marginal_damages[i,r] / pc_gdp_for_slr[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions_for_slr if year<=t<=last_year
)
# sum up total utils for included sectors to calculate scc
total_utils =
(mm.base[:DamageAggregator, :include_cromar_mortality] ? health_scc_in_utils : 0.) +
(mm.base[:DamageAggregator, :include_ag] ? ag_scc_in_utils : 0.) +
(mm.base[:DamageAggregator, :include_energy] ? en_scc_in_utils : 0.) +
(mm.base[:DamageAggregator, :include_slr] ? slr_scc_in_utils : 0.)
normalization_region_index = findfirst(isequal(dr.ew_norm_region), dim_keys(mm.base, :country))
scc = mm.base[:PerCapitaGDP, :pc_gdp][year_index,normalization_region_index]^dr.eta * total_utils
elseif dr.ew==:consumption_region || dr.ew==:consumption_country # equity weight using consumption
if dr.ew==:consumption_region
net_cpc_component_name = :regional_netconsumption
spatial_key_name = :fund_regions # dimension key name for fund regions
non_slr_marginal_damages = mm[:DamageAggregator, :total_damage_regions] .* scc_gas_molecular_conversions[gas] # fund regions
pc_consumption = mm.base[net_cpc_component_name, :net_cpc]
n_regions = size(pc_consumption, 2)
slr_marginal_damages = zeros(551, n_regions) # all regions initialized to 0
if mm.base[:DamageAggregator, :include_slr] # only run ciam if including slr
all_countries = mm.base[:Damages_RegionAggregatorSum, :input_region_names]
idxs = indexin(dim_keys(ciam_base, :ciam_country), all_countries) # subset for the slr cost coastal countries
mapping = mm.base[:Damages_RegionAggregatorSum, :input_output_mapping_int][idxs] # mapping from ciam coastal countries to region index
# mm.base[:Damages_RegionAggregatorSum, :input_region_names][idxs] == dim_keys(ciam_base, :ciam_country) # this check should be true
n_ciam_countries = length(idxs)
# aggregate from ciam countries to fund regions
for i in 1:n_ciam_countries
slr_marginal_damages[:, mapping[i]] += all_ciam_marginal_damages.country[:,i]
end
end
elseif dr.ew==:consumption_country
spatial_key_name = :country # dimension key name for countries
net_cpc_component_name = :country_netconsumption
non_slr_marginal_damages = mm[:DamageAggregator, :total_damage_countries] .* scc_gas_molecular_conversions[gas]
pc_consumption = mm.base[net_cpc_component_name, :net_cpc]
n_regions = size(pc_consumption, 2)
slr_marginal_damages = zeros(551, n_regions) # all countries initialized to 0
if mm.base[:DamageAggregator, :include_slr] # only run if including slr
idxs = indexin(dim_keys(ciam_base, :ciam_country), dim_keys(mm.base, :country)) # subset for the slr cost coastal countries
slr_marginal_damages[:,idxs] .= all_ciam_marginal_damages.country # insert country values into matching rows for marginal damages Matrix
end
end
marginal_damages = non_slr_marginal_damages .+ slr_marginal_damages
scc_in_utils = sum(
marginal_damages[i,r] / pc_consumption[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
normalization_region_index = findfirst(isequal(dr.ew_norm_region), dim_keys(mm.base, spatial_key_name))
scc = pc_consumption[year_index,normalization_region_index]^dr.eta * (scc_in_utils)
else
error("$(dr.ew) is not a valid option for equity weighting method, must be nothing, :gdp_country, :consumption_region, or :consumption_country.")
end # end ew conditional
# fill in the computed scc value
sccs[(dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)] = scc
end # end discount rates loop
return sccs
else
# Note that to use equity weighitng, users will have to use the Named Tuple format of discount rates argument
df = [((cpc[year_index]/cpc[i])^eta * 1/(1+prtp)^(t-year) for (i,t) in enumerate(_model_years) if year<=t<=last_year)...]
scc = sum(df .* marginal_damages[year_index:last_year_index])
return scc
end
end
# Post trial function to to after each trial within the MCS
function post_trial_func(mcs::SimulationInstance, trialnum::Int, ntimesteps::Int, tup)
# Unpack the payload object
scc_values, intermediate_ce_scc_values, norm_cpc_values_ce, md_values, cpc_values, slr_damages, year, last_year, discount_rates, gas, ciam_base, ciam_modified, segment_fingerprints, streams, options = Mimi.payload2(mcs)
# Compute some useful indices
year_index = findfirst(isequal(year), _model_years)
last_year_index = findfirst(isequal(last_year), _model_years)
# Access the models
base, marginal = mcs.models # Access the models
# Compute marginal damages
# Units Note:
# main_mds and non-ciam sectoral damages: we explicitly need to handle both pulse size and molecular mass so we use gas_units_multiplier
# slr_mds: within the _compute_ciam_marginal_damages function we handle both pulse size and molecular mass
# Create a marginal model to use for computation of the marginal damages from
# non-slr sectors, and IMPORTANTLY include the gas_units_multiplier as the
# `delta` attribute such that it is used to scale results and can be used for
# marginal damages calculations
gas_units_multiplier = scc_gas_molecular_conversions[gas] ./ (scc_gas_pulse_size_conversions[gas] .* options.pulse_size)
post_trial_mm = Mimi.MarginalModel(base, marginal, 1/gas_units_multiplier)
include_slr = base[:DamageAggregator, :include_slr]
if include_slr
# return a NamedTuple with globe and domestic and country as well as other helper values
ciam_mds = _compute_ciam_marginal_damages(base, marginal, gas, ciam_base, ciam_modified, segment_fingerprints; CIAM_foresight=options.CIAM_foresight, CIAM_GDPcap=options.CIAM_GDPcap, pulse_size=options.pulse_size)
# zero out the CIAM marginal damages from start year (2020) through emissions
# year - they will be non-zero due to foresight but saved marginal damages
# should be zeroed out pre-emissions year
ciam_mds.globe[1:year_index] .= 0.
ciam_mds.domestic[1:year_index] .= 0.
ciam_mds.country[1:year_index, :] .= 0.
end
main_mds = post_trial_mm[:DamageAggregator, :total_damage]
slr_mds = include_slr ? ciam_mds.globe : fill(0., length(_model_years))
total_mds = main_mds .+ slr_mds
if options.compute_domestic_values
main_mds_domestic = post_trial_mm[:DamageAggregator, :total_damage_domestic]
slr_mds_domestic = include_slr ? ciam_mds.domestic : fill(0., length(_model_years))
total_mds_domestic = main_mds_domestic .+ slr_mds_domestic
end
if options.compute_sectoral_values
cromar_mortality_mds = post_trial_mm[:DamageAggregator, :cromar_mortality_damage]
agriculture_mds = post_trial_mm[:DamageAggregator, :agriculture_damage]
energy_mds = post_trial_mm[:DamageAggregator, :energy_damage]
if options.compute_domestic_values
cromar_mortality_mds_domestic = post_trial_mm[:DamageAggregator, :cromar_mortality_damage_domestic]
agriculture_mds_domestic = post_trial_mm[:DamageAggregator, :agriculture_damage_domestic]
energy_mds_domestic = post_trial_mm[:DamageAggregator, :energy_damage_domestic]
end
end
# stream out sectoral damages disaggregated by country along with the socioeconomics
if options.compute_disaggregated_values
_stream_disagg_damages(base, streams["output_dir"], trialnum, streams)
_stream_disagg_socioeconomics(base, streams["output_dir"], trialnum, streams)
if include_slr
_stream_disagg_damages_slr(ciam_base, ciam_mds.damages_base_country, streams["output_dir"], trialnum, streams)
_stream_disagg_md(base, marginal, ciam_base, ciam_mds.country, streams["output_dir"], trialnum, streams; gas_units_multiplier=gas_units_multiplier)
else
_stream_disagg_md(base, marginal, nothing, nothing, streams["output_dir"], trialnum, streams; gas_units_multiplier=gas_units_multiplier)
end
end
# Save marginal damages
if options.save_md
# global
md_values[(region=:globe, sector=:total)][trialnum, :] = total_mds[_damages_idxs]
if options.compute_sectoral_values
md_values[(region=:globe, sector=:cromar_mortality)][trialnum, :] = cromar_mortality_mds[_damages_idxs]
md_values[(region=:globe, sector=:agriculture)][trialnum, :] = agriculture_mds[_damages_idxs]
md_values[(region=:globe, sector=:energy)][trialnum, :] = energy_mds[_damages_idxs]
md_values[(region=:globe, sector=:slr)][trialnum, :] = slr_mds[_damages_idxs]
end
# domestic
if options.compute_domestic_values
md_values[(region=:domestic, sector=:total)][trialnum, :] = total_mds_domestic[_damages_idxs]
if options.compute_sectoral_values
md_values[(region=:domestic, sector=:cromar_mortality)][trialnum, :] = cromar_mortality_mds_domestic[_damages_idxs]
md_values[(region=:domestic, sector=:agriculture)][trialnum, :] = agriculture_mds_domestic[_damages_idxs]
md_values[(region=:domestic, sector=:energy)][trialnum, :] = energy_mds_domestic[_damages_idxs]
md_values[(region=:domestic, sector=:slr)][trialnum, :] = slr_mds_domestic[_damages_idxs]
end
end
end
# Save slr damages
if options.save_slr_damages
# get a dummy ciam model to be sure to accurately assign segment names to
# segment level damages
m = MimiGIVE.get_model()
m_ciam, ~ = MimiGIVE.get_ciam(m)
if include_slr
# global
slr_damages[:base][trialnum,:] = ciam_mds.damages_base[_damages_idxs]
slr_damages[:modified][trialnum,:] = ciam_mds.damages_modified[_damages_idxs]
slr_damages[:base_lim_cnt][trialnum,:,:] = ciam_mds.base_lim_cnt
slr_damages[:modified_lim_cnt][trialnum,:,:] = ciam_mds.modified_lim_cnt
slr_damages[:base_segments_2100][trialnum, :] = ciam_mds.damages_base_segments_2100
# domestic - these Dictionary entries will only exist if we are computing
# domestic values
if options.compute_domestic_values
slr_damages[:base_domestic][trialnum,:] = ciam_mds.damages_base_domestic[_damages_idxs]
slr_damages[:modified_domestic][trialnum,:] = ciam_mds.damages_modified_domestic[_damages_idxs]
end
else
# global
slr_damages[:base][trialnum,:] .= 0.
slr_damages[:modified][trialnum,:] .= 0.
slr_damages[:base_lim_cnt][trialnum,:,:] .= 0.
slr_damages[:modified_lim_cnt][trialnum,:,:] .= 0.
slr_damages[:base_segments_2100][trialnum, :] .= 0.
# domestic - these Dictionary entries will only exist if we are computing
# domestic values
if options.compute_domestic_values
slr_damages[:base_domestic][trialnum,:] .= 0.
slr_damages[:modified_domestic][trialnum,:] .= 0.
end
end
end
# Get per capita consumption
# We don't care about units here because we are only going to use ratios
cpc = base[:global_netconsumption, :net_cpc]
# Save per capita consumption
if options.save_cpc
cpc_values[(region=:globe, sector=:total)][trialnum, :] = cpc[_damages_idxs]
end
# Calculate the SCC for each discount rate
for dr in discount_rates
if isnothing(dr.ew) # no equity weighting
df = [((cpc[year_index]/cpc[i])^dr.eta * 1/(1+dr.prtp)^(t-year) for (i,t) in enumerate(_model_years) if year<=t<=last_year)...]
if options.certainty_equivalent
df_ce = [((1. / cpc[i])^dr.eta * 1/(1+dr.prtp)^(t-year) for (i,t) in enumerate(_model_years) if year<=t<=last_year)...] # only used if optionas.certainty_equivalent=true
end
# totals (sector=:total)
scc = sum(df .* total_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* total_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
norm_cpc_values_ce[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = cpc[year_index]
end
# domestic totals (sector=:total)
if options.compute_domestic_values
scc = sum(df .* total_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* total_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
end
end
# sectoral
if options.compute_sectoral_values
scc = sum(df .* cromar_mortality_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = sum(df .* agriculture_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = sum(df .* energy_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = sum(df .* slr_mds[year_index:last_year_index])
scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* cromar_mortality_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* agriculture_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* energy_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* slr_mds[year_index:last_year_index])
intermediate_ce_scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
end
# sectoral domestic (region=:domestic)
if options.compute_domestic_values
scc = sum(df .* cromar_mortality_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = sum(df .* agriculture_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = sum(df .* energy_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = sum(df .* slr_mds_domestic[year_index:last_year_index])
scc_values[(region=:domestic, sector= :slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc = sum(df_ce .* cromar_mortality_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* agriculture_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* energy_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
intermediate_ce_scc = sum(df_ce .* slr_mds_domestic[year_index:last_year_index])
intermediate_ce_scc_values[(region=:domestic, sector= :slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = intermediate_ce_scc
end
end
end
elseif dr.ew==:gdp_region || dr.ew==:gdp_country # equity weight with gdp
if dr.ew==:gdp_region
pc_gdp_component_name = :RegionalPerCapitaGDP # used later for equity weighting
spatial_key_name = :fund_regions # dimension key name for fund regions
en_marginal_damages = post_trial_mm[:DamageAggregator, :damage_energy_regions] .* 1e9 # fund regions
health_marginal_damages = post_trial_mm[:DamageAggregator, :damage_cromar_mortality_regions] # fund regions
# don't care about units here because just using ratios
pc_gdp = base[pc_gdp_component_name, :pc_gdp]
n_regions = size(pc_gdp, 2)
slr_marginal_damages = zeros(551, n_regions)
if post_trial_mm.base[:DamageAggregator, :include_slr] # only run ciam if including slr
all_countries = base[:Damages_RegionAggregatorSum, :input_region_names]
idxs = indexin(dim_keys(ciam_base, :ciam_country), all_countries) # subset for the slr cost coastal countries
mapping = post_trial_mm.base[:Damages_RegionAggregatorSum, :input_output_mapping_int][idxs] # mapping from ciam coastal countries to region index
# base[:Damages_RegionAggregatorSum, :input_region_names][idxs] == dim_keys(ciam_base, :ciam_country) # this check should be true
n_ciam_countries = length(idxs)
# aggregate from ciam countries to fund regions
for i in 1:n_ciam_countries
slr_marginal_damages[:, mapping[i]] += ciam_mds.country[:,i]
end
end
elseif dr.ew==:gdp_country
pc_gdp_component_name = :PerCapitaGDP # used later for equity weighting
spatial_key_name = :country # dimension key name for fund regions
en_marginal_damages = post_trial_mm[:energy_damages, :energy_costs_dollar] .* 1e9
health_marginal_damages = post_trial_mm[:DamageAggregator, :damage_cromar_mortality]
# don't care about units here because just using ratios
pc_gdp = base[pc_gdp_component_name, :pc_gdp]
n_regions = size(pc_gdp, 2)
slr_marginal_damages = zeros(551, n_regions) # all countries initialized to 0
if post_trial_mm.base[:DamageAggregator, :include_slr] # only run if including slr
idxs = indexin(dim_keys(ciam_base, :ciam_country), dim_keys(post_trial_mm.base, spatial_key_name)) # subset for the slr cost coastal countries
slr_marginal_damages[:,idxs] .= ciam_mds.country # insert country values into matching rows for marginal damages Matrix
end
end
health_scc_in_utils = sum(
health_marginal_damages[i,r] / pc_gdp[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
# do this regardless of regional choice # TODO review how this impacts country vs. region approach to equity weighting
ag_marginal_damages = post_trial_mm[:Agriculture, :agcost] .* 1e9 # fund regions
pc_gdp_for_ag = base[:Agriculture, :income] ./ base[:Agriculture, :population] .* 1000.0
n_regions_for_ag = size(pc_gdp_for_ag, 2)
ag_scc_in_utils = sum(
ag_marginal_damages[i,r] / pc_gdp_for_ag[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions_for_ag if year<=t<=last_year
)
en_scc_in_utils = sum(
en_marginal_damages[i,r] / pc_gdp[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
slr_scc_in_utils = sum(
slr_marginal_damages[i,r] / pc_gdp[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
# sum up total utils for included sectors to calculate scc
total_utils =
(base[:DamageAggregator, :include_cromar_mortality] ? health_scc_in_utils : 0.) +
(base[:DamageAggregator, :include_ag] ? ag_scc_in_utils : 0.) +
(base[:DamageAggregator, :include_energy] ? en_scc_in_utils : 0.) +
(base[:DamageAggregator, :include_slr] ? slr_scc_in_utils : 0.)
normalization_region_index = findfirst(isequal(dr.ew_norm_region), dim_keys(base, spatial_key_name))
scc = base[pc_gdp_component_name, :pc_gdp][year_index,normalization_region_index]^dr.eta * total_utils
scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = total_utils
norm_cpc_values_ce[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = base[pc_gdp_component_name, :pc_gdp][year_index,normalization_region_index]
end
# sectoral
if options.compute_sectoral_values
scc = base[pc_gdp_component_name, :pc_gdp][year_index,normalization_region_index]^dr.eta * health_scc_in_utils
scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = base[pc_gdp_component_name, :pc_gdp][year_index,normalization_region_index]^dr.eta * ag_scc_in_utils
scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = base[pc_gdp_component_name, :pc_gdp][year_index,normalization_region_index]^dr.eta * en_scc_in_utils
scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = base[pc_gdp_component_name, :pc_gdp][year_index,normalization_region_index]^dr.eta * slr_scc_in_utils
scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = health_scc_in_utils
intermediate_ce_scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = ag_scc_in_utils
intermediate_ce_scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = en_scc_in_utils
intermediate_ce_scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = slr_scc_in_utils
end
end
elseif dr.ew==:consumption_region || dr.ew==:consumption_country # equity weight with consumption
if dr.ew==:consumption_region
net_cpc_component_name = :regional_netconsumption # used later for equity weighting
spatial_key_name = :fund_regions # dimension key name for fund regions
ag_marginal_damages = post_trial_mm[:Agriculture, :agcost] .* 1e9 # fund regions
en_marginal_damages = post_trial_mm[:DamageAggregator, :damage_energy_regions] .* 1e9 # fund regions
health_marginal_damages = post_trial_mm[:DamageAggregator, :damage_cromar_mortality_regions] # fund regions
# don't care about units here because just using ratios
pc_consumption = base[net_cpc_component_name, :net_cpc]
n_regions = size(pc_consumption, 2)
slr_marginal_damages = zeros(551, n_regions)
if post_trial_mm.base[:DamageAggregator, :include_slr] # only run ciam if including slr
all_countries = base[:Damages_RegionAggregatorSum, :input_region_names]
idxs = indexin(dim_keys(ciam_base, :ciam_country), all_countries) # subset for the slr cost coastal countries
mapping = post_trial_mm.base[:Damages_RegionAggregatorSum, :input_output_mapping_int][idxs] # mapping from ciam coastal countries to region index
# base[:Damages_RegionAggregatorSum, :input_region_names][idxs] == dim_keys(ciam_base, :ciam_country) # this check should be true
n_ciam_countries = length(idxs)
# aggregate from ciam countries to fund regions
for i in 1:n_ciam_countries
slr_marginal_damages[:, mapping[i]] += ciam_mds.country[:,i]
end
end
elseif dr.ew==:consumption_country
net_cpc_component_name = :country_netconsumption # used later in script for equity weighting
spatial_key_name = :country # dimension key name for countries
ag_marginal_damages = post_trial_mm[:AgricultureDamagesDisaggregator, :damages_ag_country] .* 1e9
en_marginal_damages = post_trial_mm[:energy_damages, :energy_costs_dollar] .* 1e9
health_marginal_damages = post_trial_mm[:DamageAggregator, :damage_cromar_mortality]
# don't care about units here because just using ratios
pc_consumption = base[net_cpc_component_name, :net_cpc]
n_regions = size(pc_consumption, 2)
slr_marginal_damages = zeros(551, n_regions) # all countries initialized to 0
if post_trial_mm.base[:DamageAggregator, :include_slr] # only run if including slr
idxs = indexin(dim_keys(ciam_base, :ciam_country), dim_keys(post_trial_mm.base, spatial_key_name)) # subset for the slr cost coastal countries
slr_marginal_damages[:,idxs] .= ciam_mds.country # insert country values into matching rows for marginal damages Matrix
end
end
if any(x->x<=0, skipmissing(pc_consumption))
scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = missing
if options.compute_sectoral_values
scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = missing
scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = missing
scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = missing
scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = missing
end
else
health_scc_in_utils = sum(
health_marginal_damages[i,r] / pc_consumption[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
ag_scc_in_utils = sum(
ag_marginal_damages[i,r] / pc_consumption[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
en_scc_in_utils = sum(
en_marginal_damages[i,r] / pc_consumption[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
slr_scc_in_utils = sum(
slr_marginal_damages[i,r] / pc_consumption[i,r]^dr.eta * 1/(1+dr.prtp)^(t-year)
for (i,t) in enumerate(_model_years), r in 1:n_regions if year<=t<=last_year
)
# sum up total utils for included sectors to calculate scc
total_utils =
(base[:DamageAggregator, :include_cromar_mortality] ? health_scc_in_utils : 0.) +
(base[:DamageAggregator, :include_ag] ? ag_scc_in_utils : 0.) +
(base[:DamageAggregator, :include_energy] ? en_scc_in_utils : 0.) +
(base[:DamageAggregator, :include_slr] ? slr_scc_in_utils : 0.)
normalization_region_index = findfirst(isequal(dr.ew_norm_region), dim_keys(base, spatial_key_name))
scc = base[net_cpc_component_name, :net_cpc][year_index,normalization_region_index]^dr.eta * total_utils
scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc_values[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = total_utils
norm_cpc_values_ce[(region=:globe, sector=:total, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = base[pc_gdp_component_name, :pc_gdp][year_index,normalization_region_index]
end
# sectoral
if options.compute_sectoral_values
scc = base[net_cpc_component_name, :net_cpc][year_index,normalization_region_index]^dr.eta * health_scc_in_utils
scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = base[net_cpc_component_name, :net_cpc][year_index,normalization_region_index]^dr.eta * ag_scc_in_utils
scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = base[net_cpc_component_name, :net_cpc][year_index,normalization_region_index]^dr.eta * en_scc_in_utils
scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
scc = base[net_cpc_component_name, :net_cpc][year_index,normalization_region_index]^dr.eta * slr_scc_in_utils
scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = scc
if options.certainty_equivalent
intermediate_ce_scc_values[(region=:globe, sector=:cromar_mortality, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = health_scc_in_utils
intermediate_ce_scc_values[(region=:globe, sector=:agriculture, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = ag_scc_in_utils
intermediate_ce_scc_values[(region=:globe, sector=:energy, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = en_scc_in_utils
intermediate_ce_scc_values[(region=:globe, sector=:slr, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region)][trialnum] = slr_scc_in_utils
end
end
end
else
error("$(dr.ew) is not a valid option for equity weighting method, must be nothing, :gdp_region, :gdp_country, :consumption_region, or :consumption_country.")
end # end ew conditional
end # end discount rates loop
end
# Internal function to compute the SCC in a Monte Carlo Simulation
function _compute_scc_mcs(mm::MarginalModel,
n;
year::Int,
last_year::Int,
discount_rates,
certainty_equivalent::Bool,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
gas::Symbol,
save_list::Vector,
output_dir::String,
save_md::Bool,
save_cpc::Bool,
save_slr_damages::Bool,
compute_sectoral_values::Bool,
compute_disaggregated_values::Bool,
compute_domestic_values::Bool,
CIAM_foresight::Symbol,
CIAM_GDPcap::Bool,
post_mcs_creation_function,
pulse_size::Float64
)
models = [mm.base, mm.modified]
socioeconomics_module = _get_module_name(mm.base, :Socioeconomic)
if socioeconomics_module == :MimiSSPs
socioeconomics_source = :SSP
elseif socioeconomics_module == :MimiRFFSPs
socioeconomics_source = :RFF
end
Agriculture_gtap = _get_mooreag_gtap(mm.base)
mcs = get_mcs(n;
socioeconomics_source=socioeconomics_source,
mcs_years = _model_years,
fair_parameter_set = fair_parameter_set,
fair_parameter_set_ids = fair_parameter_set_ids,
rffsp_sampling = rffsp_sampling,
rffsp_sampling_ids = rffsp_sampling_ids,
save_list = save_list,
Agriculture_gtap = Agriculture_gtap
)
if post_mcs_creation_function!==nothing
post_mcs_creation_function(mcs)
end
regions = compute_domestic_values ? [:globe, :domestic] : [:globe]
sectors = compute_sectoral_values ? [:total, :cromar_mortality, :agriculture, :energy, :slr] : [:total]
if compute_disaggregated_values
streams = Dict()
streams["output_dir"] = output_dir
else
streams = nothing
end
# create a set of subdirectories for streaming spatially and sectorally
# disaggregated damages files - one per region
if compute_disaggregated_values
top_path = joinpath(output_dir, "results", "disaggregated_values")
# clear out streams folders
ispath(top_path) ? rm(top_path, recursive=true) : nothing
mkpath(joinpath(top_path, "damages_cromar_mortality"))
mkpath(joinpath(top_path, "damages_energy"))
mkpath(joinpath(top_path, "damages_agriculture"))
mm.base[:DamageAggregator, :include_slr] && mkpath(joinpath(top_path, "damages_slr")) # slr only if we are including sea level rise
mkpath(joinpath(top_path, "socioeconomics_country"))
mkpath(joinpath(top_path, "socioeconomics_region"))
mkpath(joinpath(top_path, "mds_country_no_ag"))
mkpath(joinpath(top_path, "mds_region_ag_only"))
# DataFrames with metadata
DataFrame( :variable => [:damages, :md, :population, :pc_gdp],
:units => ["USD 2005", "USD 2005 per tonne of CO2", "millions of persons", "USD 2005 per capita"],
:notes => ["baseline run", "difference between pulse run and baseline run", "baseline run", "baseline run"]
) |> save(joinpath(top_path, "disaggregated_values_README.csv"))
end
scc_values = Dict((region=r, sector=s, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region) => Vector{Union{Float64, Missing}}(undef, n) for dr in discount_rates, r in regions, s in sectors)
intermediate_ce_scc_values = certainty_equivalent ? Dict((region=r, sector=s, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region) => Vector{Float64}(undef, n) for dr in discount_rates, r in regions, s in sectors) : nothing
norm_cpc_values_ce = certainty_equivalent ? Dict((region=r, sector=s, dr_label=dr.label, prtp=dr.prtp, eta=dr.eta, ew=dr.ew, ew_norm_region=dr.ew_norm_region) => Vector{Float64}(undef, n) for dr in discount_rates, r in regions, s in sectors) : nothing
md_values = save_md ? Dict((region=r, sector=s) => Array{Float64}(undef, n, length(_damages_years)) for r in regions, s in sectors) : nothing
cpc_values = save_cpc ? Dict((region=r, sector=s) => Array{Float64}(undef, n, length(_damages_years)) for r in [:globe], s in [:total]) : nothing # just global and total for now
if save_slr_damages
# global
slr_damages = Dict(
:base => Array{Float64}(undef, n, length(_damages_years)),
:modified => Array{Float64}(undef, n, length(_damages_years)),
:base_lim_cnt => Array{Float64}(undef, n, length(_damages_years), 145), # 145 CIAM countries
:modified_lim_cnt => Array{Float64}(undef, n, length(_damages_years), 145), # 145 CIAM countries
:base_segments_2100 => Array{Float64}(undef, n, 11835) # 11,835 segments
)
# domestic
# optionally add arrays to hold the domestic base and modified damages
if compute_domestic_values
slr_damages[:base_domestic] = Array{Float64}(undef, n, length(_damages_years))
slr_damages[:modified_domestic] = Array{Float64}(undef, n, length(_damages_years))
end
else
slr_damages = nothing
end
ciam_base, segment_fingerprints = get_ciam(mm.base)
ciam_modified, _ = get_ciam(mm.base)
ciam_base = Mimi.build(ciam_base)
ciam_modified = Mimi.build(ciam_modified)
# set some computation options
options = (
compute_sectoral_values=compute_sectoral_values,
compute_disaggregated_values=compute_disaggregated_values,
compute_domestic_values=compute_domestic_values,
save_md=save_md,
save_cpc=save_cpc,
save_slr_damages=save_slr_damages,
CIAM_foresight=CIAM_foresight,
CIAM_GDPcap=CIAM_GDPcap,
certainty_equivalent=certainty_equivalent,
pulse_size=pulse_size
)
payload = [scc_values, intermediate_ce_scc_values, norm_cpc_values_ce, md_values, cpc_values, slr_damages, year, last_year, discount_rates, gas, ciam_base, ciam_modified, segment_fingerprints, streams, options]
Mimi.set_payload2!(mcs, payload)
# Run all model years even if taking a shorter last_year - running unnecessary
# timesteps but simplifies accumulation
sim_results = run(mcs,
models,
n,
post_trial_func = post_trial_func,
results_in_memory = false,
results_output_dir = "$output_dir/results"
)
# unpack the payload object
scc_values, intermediate_ce_scc_values, norm_cpc_values_ce, md_values, cpc_values, slr_damages, year, last_year, discount_rates, gas, ciam_base, ciam_modified, segment_fingerprints, streams, options = Mimi.payload2(sim_results)
if !isnothing(streams)
delete!(streams, "output_dir")
close.(values(streams)) # use broadcasting to close all stream
end
# Write out the slr damages to disk in the same place that variables from the save_list would be written out
if save_slr_damages
isdir("$output_dir/results/model_1") || mkpath("$output_dir/results/model_1")
isdir("$output_dir/results/model_2") || mkpath("$output_dir/results/model_2")
# global
df = DataFrame(slr_damages[:base], :auto) |>
i -> rename!(i, Symbol.(_damages_years)) |>
i -> insertcols!(i, 1, :trialnum => 1:n) |>
i -> stack(i, Not(:trialnum)) |>
i -> rename!(i, [:trialnum, :time, :slr_damages]) |>
save("$output_dir/results/model_1/slr_damages.csv")
df = DataFrame(slr_damages[:modified], :auto) |>
i -> rename!(i, Symbol.(_damages_years)) |>
i -> insertcols!(i, 1, :trialnum => 1:n) |>
i -> stack(i, Not(:trialnum)) |>
i -> rename!(i, [:trialnum, :time, :slr_damages]) |>
save("$output_dir/results/model_2/slr_damages.csv")
segments = Symbol.(dim_keys(ciam_base, :segments))
df = DataFrame(slr_damages[:base_segments_2100], :auto) |>
i -> rename!(i, segments) |>
i -> insertcols!(i, 1, :trialnum => 1:n) |>
i -> stack(i, Not(:trialnum)) |>
i -> rename!(i, [:trialnum, :segment, :slr_damages_2100]) |>
save("$output_dir/results/model_1/slr_damages_2100_by_segment.csv")
# domestic
if compute_domestic_values
df = DataFrame(slr_damages[:base_domestic], :auto) |>
i -> rename!(i, Symbol.(_damages_years)) |>
i -> insertcols!(i, 1, :trialnum => 1:n) |>
i -> stack(i, Not(:trialnum)) |>
i -> rename!(i, [:trialnum, :time, :slr_damages_domestic]) |>
save("$output_dir/results/model_1/slr_damages_domestic.csv")
df = DataFrame(slr_damages[:modified_domestic], :auto) |>
i -> rename!(i, Symbol.(_damages_years)) |>
i -> insertcols!(i, 1, :trialnum => 1:n) |>
i -> stack(i, Not(:trialnum)) |>
i -> rename!(i, [:trialnum, :time, :slr_damages_domestic]) |>
save("$output_dir/results/model_2/slr_damages_domestic.csv")
end
ciam_country_names = Symbol.(dim_keys(ciam_base, :ciam_country))
ciam_country_names = Symbol.(dim_keys(ciam_base, :ciam_country))
df = DataFrame(:trialnum => [], :time => [], :country => [], :capped_flag => [])
for trial in 1:n # loop over trials
trial_df = DataFrame(slr_damages[:base_lim_cnt][trial,:,:], :auto) |>
i -> rename!(i, ciam_country_names) |>
i -> insertcols!(i, 1, :time => _damages_years) |>
i -> stack(i, Not(:time)) |>
i -> insertcols!(i, 1, :trialnum => fill(trial, length(_damages_years) * 145)) |>
i -> rename!(i, [:trialnum, :time, :country, :capped_flag]) |>
i -> @filter(i, _.capped_flag == 1) |>
DataFrame
append!(df, trial_df)
end
df |> save("$output_dir/results/slr_damages_base_lim_counts.csv")
df = DataFrame(:trialnum => [], :time => [], :country => [], :capped_flag => [])
ciam_country_names = Symbol.(dim_keys(ciam_base, :ciam_country))
for trial in 1:n # loop over trials
trial_df = DataFrame(slr_damages[:modified_lim_cnt][trial,:,:], :auto) |>
i -> rename!(i, ciam_country_names) |>
i -> insertcols!(i, 1, :time => _damages_years) |>
i -> stack(i, Not(:time)) |>
i -> insertcols!(i, 1, :trialnum => fill(trial, length(_damages_years) * 145)) |>
i -> rename!(i, [:trialnum, :time, :country, :capped_flag]) |>
i -> @filter(i, _.capped_flag == 1) |>
DataFrame
append!(df, trial_df)
end
df |> save("$output_dir/results/slr_damages_modified_lim_counts.csv")
end
# Construct the returned result object
result = Dict()
# add an :scc dictionary, where key value pairs (k,v) are NamedTuples with keys(prtp, eta, region, sector) => values are 281 element vectors (2020:2300)
result[:scc] = Dict()
for (k,v) in scc_values
if certainty_equivalent
# In this case the normalization from utils to $ hasn't happened in the post trial function
# and instead we now do this here, based on expected per capita consumption in the year
# of the marginal emission pulse
cpc_in_year_of_emission = norm_cpc_values_ce[k]
expected_mu_in_year_of_emission = mean(1 ./ (cpc_in_year_of_emission .^ k.eta))
result[:scc][k] = (
expected_scc = mean(v),
se_expected_scc = std(v) / sqrt(n),
ce_scc = mean(intermediate_ce_scc_values[k]) ./ expected_mu_in_year_of_emission,
ce_sccs= intermediate_ce_scc_values[k] ./ expected_mu_in_year_of_emission,
sccs = v,
)
else
result[:scc][k] = (
expected_scc = mean(skipmissing(v)),
se_expected_scc = std(skipmissing(v)) / sqrt(n),
sccs = v
)
end
end
# add a :mds dictionary, where key value pairs (k,v) are NamedTuples with keys(region, sector) => values are (n x 281 (2020:2300)) matrices
if save_md
result[:mds] = Dict()
for (k,v) in md_values
result[:mds][k] = v
end
end
# add a :cpc dictionary, where key value pairs (k,v) are NamedTuples with keys(region, sector) => values are (n x 281 (2020:2300)) matrices
if save_cpc
result[:cpc] = Dict()
for (k,v) in cpc_values
result[:cpc][k] = v
end
end
return result
end
function _compute_ciam_marginal_damages(base, modified, gas, ciam_base, ciam_modified, segment_fingerprints; CIAM_foresight, CIAM_GDPcap, pulse_size)
gas_units_multiplier = scc_gas_molecular_conversions[gas] ./ (scc_gas_pulse_size_conversions[gas] .* pulse_size) # adjust for the (1) molecular mass and (2) pulse size
update_ciam!(ciam_base, base, segment_fingerprints)
update_ciam!(ciam_modified, modified, segment_fingerprints)
run(ciam_base)
run(ciam_modified)
# Adjust to use perfect foresight if CIAM_foresight == :perfect
if CIAM_foresight == :perfect
OptimalCost_base = compute_PerfectForesight_OptimalCosts(ciam_base)
OptimalCost_modified = compute_PerfectForesight_OptimalCosts(ciam_modified)
elseif CIAM_foresight == :limited
OptimalCost_base = ciam_base[:slrcost, :OptimalCost]
OptimalCost_modified = ciam_modified[:slrcost, :OptimalCost]
else
error("CIAM_foresight must be either :limited or :perfect.")
end
# Aggregate to Country-Level Damages
# Obtain a key mapping segment ids to ciam country ids, both of which
# line up with the orders of dim_keys of ciam_base
# IMPORTANT here to note that the segment ID refers here to the row, or equivalently
# the index of the segment in a CIAM model created using MimiGIVE.get_ciam(),
# NOT the segid field in many of the key files
xsc = ciam_base[:slrcost, :xsc]::Dict{Int, Tuple{Int, Int, Int}}
ciam_country_mapping = DataFrame(:segment_id => collect(keys(xsc)), :ciam_country_id => first.(collect(values(xsc))))
num_ciam_countries = length(dim_keys(ciam_base, :ciam_country))
OptimalCost_base_country = Array{Float64}(undef, length(_damages_years), num_ciam_countries)
OptimalCost_modified_country = Array{Float64}(undef, length(_damages_years), num_ciam_countries)
for country in 1:num_ciam_countries # 145 consecutive Region IDs mapping to the 145 countries in ciam_base dimension ciam_country
rows = [findall(i -> i == country, ciam_country_mapping.ciam_country_id)...] # rows of the mapping DataFrame that have this ciam country
matching_segment_ids = [ciam_country_mapping.segment_id[rows]...] # the actual segment IDs that map to this ciam country
base_damages = sum(view(OptimalCost_base, :, matching_segment_ids), dims=2)
OptimalCost_base_country[:, country] = [repeat(base_damages[1:end-1], inner=10); base_damages[end]] # repeat to annual from decadal
modified_damages = sum(view(OptimalCost_modified, :, matching_segment_ids), dims=2)
OptimalCost_modified_country[:, country] = [repeat(modified_damages[1:end-1], inner=10); modified_damages[end]] # repeat to annual from decadal
end
# Limit Country-Level Sea Level Rise Damages to Country-Level GDP
if CIAM_GDPcap
# Obtain annual country-level GDP, select 2020:2300 and CIAM countries, convert from $2005 to $2010 to match CIAM
gdp = base[:Socioeconomic, :gdp][_damages_idxs, indexin(dim_keys(ciam_base, :ciam_country), dim_keys(base, :country))] .* 1 / pricelevel_2010_to_2005
base_lim_cnt = Int64.(OptimalCost_base_country .> gdp)
modified_lim_cnt = Int64.(OptimalCost_modified_country .> gdp)
OptimalCost_base_country = min.(OptimalCost_base_country, gdp)
OptimalCost_modified_country = min.(OptimalCost_modified_country, gdp)
else
base_lim_cnt = fill(0., length(_damages_years), num_ciam_countries)
modified_lim_cnt = fill(0., length(_damages_years), num_ciam_countries)
end
# domestic
domestic_countries = ["USA", "PRI"] # Country ISO3 codes to be accumulated for domestic
domestic_idxs = indexin(domestic_countries, dim_keys(ciam_base, :ciam_country))
damages_base_domestic = vec(sum(OptimalCost_base_country[:,domestic_idxs],dims=2)) .* pricelevel_2010_to_2005 # Unit of CIAM is billion USD $2010, convert to billion USD $2005
damages_modified_domestic = vec(sum(OptimalCost_modified_country[:,domestic_idxs],dims=2)) .* pricelevel_2010_to_2005 # Unit of CIAM is billion USD $2010, convert to billion USD $2005
damages_marginal_domestic = (damages_modified_domestic .- damages_base_domestic) .* gas_units_multiplier # adjust for the (1) molecular mass and (2) pulse size
damages_marginal_domestic = damages_marginal_domestic .* 1e9 # Unit at this point is billion USD $2005, we convert to USD here
# global
damages_base = vec(sum(OptimalCost_base_country,dims=2)) .* pricelevel_2010_to_2005 # Unit of CIAM is billion USD $2010, convert to billion USD $2005
damages_modified = vec(sum(OptimalCost_modified_country,dims=2)) .* pricelevel_2010_to_2005 # Unit of CIAM is billion USD $2010, convert to billion USD $2005
damages_marginal = (damages_modified .- damages_base) .* gas_units_multiplier # adjust for the (1) molecular mass and (2) pulse size
damages_marginal = damages_marginal .* 1e9 # Unit at this point is billion USD $2005, we convert to USD here
# country
damages_marginal_country = (OptimalCost_modified_country .- OptimalCost_base_country) .* pricelevel_2010_to_2005 .* gas_units_multiplier # Adjust for the (1) price level (2) molecular mass and (3) pulse size
damages_marginal_country = damages_marginal_country .* 1e9 # Unit at this point is billion USD $2005, we convert to USD here
# CIAM starts in 2020 so pad with zeros at the beginning
return (globe = [fill(0., 2020 - _model_years[1]); damages_marginal], # USD $2005
domestic = [fill(0., 2020 - _model_years[1]); damages_marginal_domestic], # USD $2005
country = [fill(0., 2020 - _model_years[1], num_ciam_countries); damages_marginal_country], # USD $2005
damages_base = [fill(0., 2020 - _model_years[1]); damages_base], # billion USD $2005
damages_modified = [fill(0., 2020 - _model_years[1]); damages_modified], # billion USD $2005
damages_base_domestic = [fill(0., 2020 - _model_years[1]); damages_base_domestic], # billion USD $2005
damages_modified_domestic = [fill(0., 2020 - _model_years[1]); damages_modified_domestic], # billion USD $2005
base_lim_cnt = base_lim_cnt, # 2020:2300 x countries
modified_lim_cnt = modified_lim_cnt, # 2020:2300 x countries
damages_base_segments_2100 = OptimalCost_base[9, :] .* pricelevel_2010_to_2005, # billion USD $2005, 2100 is index 9 in 2020:10:2300, this is uncapped segment-level baseline damages in 2100
damages_base_country = OptimalCost_base_country .* pricelevel_2010_to_2005 # Unit of CIAM is billion USD $2010, convert to billion USD $2005
)
end
"""
get_marginal_model(m::Model; year::Union{Int, Nothing} = nothing, gas::Symbol, pulse_size::Float64)
Create a Mimi MarginalModel where the provided m is the base model, and the marginal
model has additional emissions in year `year`. The marginal model will have an additional
`pulse_size` of emissions in the specified `year` for gas `gas`, which will be in
units of GtC for CO2, MtN2 for N2O, and MtCH4 for CH4. If no Model m is provided,
the default model from MimiGIVE.get_model() is used as the base model.
"""
function get_marginal_model(m::Model; year::Union{Int, Nothing} = nothing, gas::Symbol, pulse_size::Float64)
year === nothing ? error("Must specify an emission year. Try `get_marginal_model(m, year=2020)`.") : nothing
!(year in _model_years) ? error("Cannot add marginal emissions in $year, year must be within the model's time index $_model_years.") : nothing
# NOTE: the pulse size will be used as the `delta` parameter for
# the `MarginalModel` and, thus, allow computation of the SCC to return units of
# dollars per ton, as long as `pulse_size` is interpreted as baseline units
# of the given gas, which is units of GtC for CO2, MtN2 for N2O, and MtCH4 for CH4.
mm = create_marginal_model(m, scc_gas_pulse_size_conversions[gas] .* pulse_size)
add_marginal_emissions!(mm.modified, year, gas, pulse_size)
return mm
end
"""
add_marginal_emissions!(m::Model, year::Int, gas::Symbol, pulse_size::Float64)
Add a marginal emission component to year m which adds the pulse_size of additional
emissions in the specified `year` for gas `gas`, which will be in units of GtC
for CO2, MtN2 for N2O, MtCH4 for CH4, and kt for HFCs.
"""
function add_marginal_emissions!(m::Model, year::Int, gas::Symbol, pulse_size::Float64)
time = Mimi.dim_keys(m, :time)
pulse_year_index = findfirst(i -> i == year, time)
hfc_list = [:HFC23, :HFC32, :HFC43_10, :HFC125, :HFC134a, :HFC143a, :HFC227ea, :HFC245fa]
if gas == :CO2
add_comp!(m, Mimi.adder, :marginalemission, before=:co2_cycle)
addem = zeros(length(time))
addem[pulse_year_index] = pulse_size # GtC in this year
set_param!(m, :marginalemission, :add, addem)
connect_param!(m, :marginalemission => :input, :co2_emissions_identity => :output_co2)
connect_param!(m, :co2_cycle => :E_co2, :marginalemission => :output)
elseif gas == :CH4
add_comp!(m, Mimi.adder, :marginalemission, before=:ch4_cycle)
addem = zeros(length(time))
addem[pulse_year_index] = pulse_size # MtCH4 in this year
set_param!(m, :marginalemission, :add, addem)
connect_param!(m, :marginalemission => :input, :ch4_emissions_identity => :output_ch4)
connect_param!(m, :ch4_cycle => :fossil_emiss_CH₄, :marginalemission => :output)
elseif gas == :N2O
add_comp!(m, Mimi.adder, :marginalemission, before=:n2o_cycle)
addem = zeros(length(time))
addem[pulse_year_index] = pulse_size # MtN2 in this year
set_param!(m, :marginalemission, :add, addem)
connect_param!(m, :marginalemission => :input, :n2o_emissions_identity => :output_n2o)
connect_param!(m, :n2o_cycle => :fossil_emiss_N₂O, :marginalemission => :output)
elseif gas in hfc_list
# get gas index
other_ghg_gases = Mimi.dim_keys(m, :other_ghg)
gas_index = findfirst(i -> i == gas, Symbol.(other_ghg_gases))
# perturb hfc emissions
# for now this will return :emiss_other_ghg because it is treated as a
# shared parameter in MimiFAIRv1_6_2, and thus also in this model, but this
# line keeps us robust if it becomes an unshared parameter.
model_param_name = Mimi.get_model_param_name(m, :other_ghg_cycles, :emiss_other_ghg)
# obtain the base emissions values from the model - the following line
# allows us to do so without running the model. If we had run the model
# we can use deepcopy(m[:other_ghg_cycles, :emiss_other_ghg])
new_emissions = deepcopy(Mimi.model_param(m, model_param_name).values.data)
# update emissions parameter with a pulse
new_emissions[pulse_year_index, gas_index] += pulse_size # add pulse in kt hfc
update_param!(m, :emiss_other_ghg, new_emissions)
else
error("Gas `" + gas + "` is not supported.")
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 4483 | using Mimi
# Aggregate from countries to FUND regions using sum function
@defcomp Agriculture_RegionAggregatorSum begin
ag_mapping_input_regions = Index()
ag_mapping_output_regions = Index()
input_output_mapping = Parameter{String}(index=[ag_mapping_input_regions]) # one element per input region containing it's corresponding output region
input_output_mapping_int = Variable{Int}(index=[ag_mapping_input_regions]) # internally computed for speed up
input_region_names = Parameter{Vector{String}}(index=[ag_mapping_input_regions])
output_region_names = Parameter{Vector{String}}(index=[ag_mapping_output_regions])
input = Parameter(index=[time, ag_mapping_input_regions])
output = Variable(index=[time, ag_mapping_output_regions])
function init(p,v,d)
idxs = indexin(p.input_output_mapping, p.output_region_names)
!isnothing(findfirst(i -> isnothing(i), idxs)) ? error("All provided region names in the Agriculture_RegionAggregatorSum's input_output_mapping Parameter must exist in the output_region_names Parameter.") : nothing
v.input_output_mapping_int[:] = idxs
end
function run_timestep(p,v,d,t)
v.output[t, :] .= 0.
for i in d.ag_mapping_input_regions
v.output[t, v.input_output_mapping_int[i]] += p.input[t,i]
end
end
end
# Version of above with no time dimension
@defcomp Agriculture_RegionAggregatorSum_NoTime begin
ag_mapping_input_regions = Index()
ag_mapping_output_regions = Index()
input_output_mapping = Parameter{String}(index=[ag_mapping_input_regions]) # one element per input region containing it's corresponding output region
input_output_mapping_int = Variable{Int}(index=[ag_mapping_input_regions]) # internally computed for speed up
input_region_names = Parameter{Vector{String}}(index=[ag_mapping_input_regions])
output_region_names = Parameter{Vector{String}}(index=[ag_mapping_output_regions])
input = Parameter(index=[ag_mapping_input_regions])
output = Variable(index=[ag_mapping_output_regions])
function init(p,v,d)
idxs = indexin(p.input_output_mapping, p.output_region_names)
!isnothing(findfirst(i -> isnothing(i), idxs)) ? error("All provided region names in the Agriculture_RegionAggregatorSum's input_output_mapping Parameter must exist in the output_region_names Parameter.") : nothing
v.input_output_mapping_int[:] = idxs
# can simply fill in the data here because there is no time dimensions
v.output[:] .= 0.
for i in d.ag_mapping_input_regions
v.output[v.input_output_mapping_int[i]] += p.input[i]
end
end
function run_timestep(p,v,d,t)
# blank
end
end
# Component to disaggregate the agricultural damages from agriculture regions (FUND
# regions) to individual ISO3 countries
@defcomp AgricultureDamagesDisaggregator begin
ag_mapping_input_regions = Index()
ag_mapping_output_regions = Index()
# Mapping
mapping = Parameter{String}(index=[ag_mapping_input_regions]) # one element per country containing it's corresponding region
mapping_int = Variable{Int}(index=[ag_mapping_input_regions]) # internally computed for speed up
fund_region_names = Parameter{Vector{String}}(index=[ag_mapping_output_regions])
# GDP input
gdp_country = Parameter(index=[time, ag_mapping_input_regions], unit="billion US\$2005/yr")
gdp_fund_region = Parameter(index=[time, ag_mapping_output_regions], unit="billion US\$2005/yr")
# Damages input
damages_ag_fund_region = Parameter(index=[time, ag_mapping_output_regions])
# Disaggregation
gdp_share = Variable(index=[time, ag_mapping_input_regions]) # share of region's GDP in a given country in a given year
damages_ag_country = Variable(index=[time, ag_mapping_input_regions])
function init(p,v,d)
idxs = indexin(p.mapping, p.fund_region_names)
!isnothing(findfirst(i -> isnothing(i), idxs)) ? error("All provided region names in the AgricultureDamagesDisaggregator's mapping Parameter must exist in the region_names Parameter.") : nothing
v.mapping_int[:] = idxs
end
function run_timestep(p,v,d,t)
for c in d.country
v.gdp_share[t,c] = p.gdp_country[t, c] / p.gdp_fund_region[t, v.mapping_int[c]]
v.damages_ag_country[t,c] = p.damages_ag_fund_region[t, v.mapping_int[c]] * v.gdp_share[t,c]
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 5828 | using Mimi
# Aggregate damages across damage functions
@defcomp DamageAggregator begin
fund_regions = Index()
country = Index()
energy_countries = Index()
domestic_countries = Index()
domestic_idxs_country_dim = Parameter{Int}(index=[domestic_countries])
domestic_idxs_energy_countries_dim = Parameter{Int}(index=[domestic_countries])
# internally compute for speed
domestic_idxs_country_dim_int = Variable{Int}(index=[domestic_countries])
domestic_idxs_energy_countries_dim_int = Variable{Int}(index=[domestic_countries])
# inclusion of different damages
# By default the individual sectoral damage calculations are ON, including
# SLR which runs after the main model, while global damage function calculations
# are OFF.
include_cromar_mortality = Parameter{Bool}(default=true)
include_ag = Parameter{Bool}(default=true)
include_slr = Parameter{Bool}(default=true)
include_energy = Parameter{Bool}(default=true)
include_dice2016R2 = Parameter{Bool}(default=false)
include_hs_damage = Parameter{Bool}(default=false)
damage_cromar_mortality = Parameter(index=[time,country], unit="US\$2005/yr")
damage_ag = Parameter(index=[time,fund_regions], unit="billion US\$2005/yr")
damage_ag_countries = Parameter(index=[time,country], unit="billion US\$2005/yr") # ag damages disaggregated via method in AgricultureDamagesDisaggregator
damage_energy = Parameter(index=[time,energy_countries], unit="billion US\$2005/yr")
damage_dice2016R2 = Parameter(index=[time], unit="billion US\$2005/yr")
damage_hs = Parameter(index=[time], unit="billion US\$2005/yr")
# damages aggregated by fund regions
damage_cromar_mortality_regions = Parameter(index=[time,fund_regions], unit="US\$2005/yr")
damage_energy_regions = Parameter(index=[time,fund_regions], unit="billion US\$2005/yr")
gdp = Parameter(index=[time,country], unit="billion US\$2005/yr")
total_damage = Variable(index=[time], unit="US\$2005/yr")
total_damage_regions = Variable(index=[time, fund_regions], unit="US\$2005/yr")
total_damage_countries = Variable(index=[time, country], unit="US\$2005/yr") # ag damages disaggregated via method in AgricultureDamagesDisaggregator
total_damage_share = Variable(index=[time])
total_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
# global annual aggregates - for interim model outputs and partial SCCs
cromar_mortality_damage = Variable(index=[time], unit="US\$2005/yr")
agriculture_damage = Variable(index=[time], unit="US\$2005/yr")
energy_damage = Variable(index=[time], unit="US\$2005/yr")
# domestic annual aggregates - for interim model outputs and partial SCCs
cromar_mortality_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
agriculture_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
energy_damage_domestic = Variable(index=[time], unit="US\$2005/yr")
function init(p,v,d)
# convert to integers for indexing - do once here for speed
v.domestic_idxs_country_dim_int[:] = Int.(p.domestic_idxs_country_dim)
v.domestic_idxs_energy_countries_dim_int[:] = Int.(p.domestic_idxs_energy_countries_dim)
end
function run_timestep(p, v, d, t)
# regional annual aggregates
for r in d.fund_regions
v.total_damage_regions[t,r] =
(p.include_cromar_mortality ? p.damage_cromar_mortality_regions[t,r] : 0.) +
(p.include_ag ? p.damage_ag[t,r] * 1e9 : 0.) +
(p.include_energy ? p.damage_energy_regions[t,r] * 1e9 : 0.)
end
# country level aggregates where ag damages disaggregated via method in
# AgricultureDamagesDisaggregator
num_countries = length(d.country)
v.total_damage_countries[t,:] =
(p.include_cromar_mortality ? p.damage_cromar_mortality[t,:] : fill(0., num_countries)) +
(p.include_ag ? p.damage_ag_countries[t,:] * 1e9 : fill(0., num_countries)) +
(p.include_energy ? p.damage_energy[t,:] * 1e9 : fill(0., num_countries))
# global annual aggregates - for interim model outputs and partial SCCs
v.cromar_mortality_damage[t] = sum(p.damage_cromar_mortality[t,:])
v.agriculture_damage[t] = sum(p.damage_ag[t,:]) * 1e9
v.energy_damage[t] = sum(p.damage_energy[t,:]) * 1e9
v.total_damage[t] =
(p.include_cromar_mortality ? v.cromar_mortality_damage[t] : 0.) +
(p.include_ag ? v.agriculture_damage[t] : 0.) +
(p.include_energy ? v.energy_damage[t] : 0.) +
(p.include_dice2016R2 ? p.damage_dice2016R2[t] * 1e9 : 0.) +
(p.include_hs_damage ? p.damage_hs[t] * 1e9 : 0.)
gdp = sum(p.gdp[t,:]) * 1e9
v.total_damage_share[t] = v.total_damage[t] / gdp
# domestic annual aggregates - for interim model outputs and partial SCCs
v.cromar_mortality_damage_domestic[t] = sum(p.damage_cromar_mortality[t, v.domestic_idxs_country_dim_int])
v.agriculture_damage_domestic[t] = p.damage_ag[t,1] * 1e9
v.energy_damage_domestic[t] = sum(p.damage_energy[t, v.domestic_idxs_energy_countries_dim_int] * 1e9)
# Calculate domestic damages
v.total_damage_domestic[t] =
(p.include_cromar_mortality ? v.cromar_mortality_damage_domestic[t] : 0.) +
(p.include_ag ? v.agriculture_damage_domestic[t] : 0.) +
(p.include_energy ? v.energy_damage_domestic[t] : 0.)
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 1879 | using Mimi
# Component to support summing damages across fund regions -- used to support the
# DamageAggregator Component
@defcomp Damages_RegionAggregatorSum begin
ag_mapping_input_regions = Index()
ag_mapping_output_regions = Index()
input_output_mapping = Parameter{String}(index=[ag_mapping_input_regions]) # one element per input region containing it's corresponding output region
input_output_mapping_int = Variable{Int}(index=[ag_mapping_input_regions]) # internally computed for speed up
input_region_names = Parameter{Vector{String}}(index=[ag_mapping_input_regions])
output_region_names = Parameter{Vector{String}}(index=[ag_mapping_output_regions])
damage_cromar_mortality = Parameter(index=[time,ag_mapping_input_regions], unit="US\$2005/yr")
damage_energy = Parameter(index=[time,ag_mapping_input_regions], unit="billion US\$2005/yr")
damage_cromar_mortality_regions = Variable(index=[time,ag_mapping_output_regions], unit="US\$2005/yr")
damage_energy_regions = Variable(index=[time,ag_mapping_output_regions], unit="billion US\$2005/yr")
function init(p,v,d)
idxs = indexin(p.input_output_mapping, p.output_region_names)
!isnothing(findfirst(i -> isnothing(i), idxs)) ? error("All provided region names in the Damages_RegionAggregatorSum's input_output_mapping Parameter must exist in the output_region_names Parameter.") : nothing
v.input_output_mapping_int[:] = idxs
end
function run_timestep(p,v,d,t)
v.damage_cromar_mortality_regions[t, :] .= 0.
v.damage_energy_regions[t, :] .= 0.
for i in d.ag_mapping_input_regions
v.damage_cromar_mortality_regions[t, v.input_output_mapping_int[i]] += p.damage_cromar_mortality[t,i]
v.damage_energy_regions[t, v.input_output_mapping_int[i]] += p.damage_energy[t,i]
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 1186 | using Mimi
# Normalize global sea level rise (SLR) to a provided range of years
# template component to be used by any component needing SLR normalization
@defcomp GlobalSLRNorm begin
global_slr = Parameter(index=[time], unit = "m") # total sea level rise from all components (includes landwater storage for projection periods).
norm_range_start = Parameter() # the first year of the range of years to normalize to
norm_range_end = Parameter() # the last year of the range of years to normalize to
global_slr_norm = Variable(index=[time], unit = "degC") # Global sea level rise deviation normalized to the new baseline (m).
global_slr_norm_range_mean = Variable(unit="m")
function run_timestep(p, v, d, t)
if gettime(t) == p.norm_range_end
t_values = TimestepValue.(collect(p.norm_range_start:1:p.norm_range_end)) # Mimi errors if you use a `:` to index with timesteps. This is a workaround for now.
v.global_slr_norm_range_mean = mean(p.global_slr[t_values])
end
if gettime(t) >= p.norm_range_end
v.global_slr_norm[t] = p.global_slr[t] - v.global_slr_norm_range_mean
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 1194 | using Mimi
# Normalize global temperature to a provided range of years
# template component to be used by any component needing temperature normalization
@defcomp GlobalTempNorm begin
global_temperature = Parameter(index=[time], unit = "degC") # Global temperature deviation (°C).
norm_range_start = Parameter() # the first year of the range of years to normalize to
norm_range_end = Parameter() # the last year of the range of years to normalize to
global_temperature_norm = Variable(index=[time], unit = "degC") # Global temperature deviation normalized to the new baseline (°C).
global_temperature_norm_range_mean = Variable(unit="degC")
function run_timestep(p, v, d, t)
if gettime(t) == p.norm_range_end
t_values = TimestepValue.(collect(p.norm_range_start:1:p.norm_range_end)) # Mimi errors if you use a `:` to index with timesteps. This is a workaround for now.
v.global_temperature_norm_range_mean = mean(p.global_temperature[t_values])
end
if gettime(t) >= p.norm_range_end
v.global_temperature_norm[t] = p.global_temperature[t] - v.global_temperature_norm_range_mean
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 847 | using Mimi
# Accumulate the ocean heat content from BRICK over time
# Wong et al., 2017
@defcomp OceanHeatAccumulator begin
del_ohc = Parameter(index=[time], unit="J") # year over year Ocean heat content anomaly
del_ohc_accum = Variable(index=[time], unit="1e22 J") # accumulated Ocean heat content anomaly
function run_timestep(p, v, d, t)
# The BRICK TE component multiplies ocean heat by 1e22 because it assumes
# SNEASY units. FAIR ocean heat is already in units of 10^22, so this
# divides by 1e22 so it can be re-scaled again in the BRICK TE component.
if is_first(t)
v.del_ohc_accum[t] = 0. # FAIR won't provide del_ohc for first period so leave at 0.
else
v.del_ohc_accum[t] = v.del_ohc_accum[t-1] + (p.del_ohc[t] ./ 1e22)
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 1343 | using Mimi
# Calculate the per capita GDP
@defcomp PerCapitaGDP begin
country = Index()
gdp = Parameter(index=[time, country], unit="billion US\$2005/yr")
population = Parameter(index=[time, country], unit="million")
pc_gdp = Variable(index=[time, country], unit = "US\$2005/yr/person") # Country-level per capita GDP ($/person).
global_pc_gdp = Variable(index=[time], unit = "US\$2005/yr/person")
function run_timestep(p, v, d, t)
# calculate global per capita gdp
v.global_pc_gdp[t] = sum(p.gdp[t,:]) / sum(p.population[t,:]) * 1e3
# calculate country level per capita gdp
for c in d.country
v.pc_gdp[t, c] = (p.gdp[t, c]) ./ (p.population[t, c]) * 1e3
end
end
end
@defcomp RegionalPerCapitaGDP begin
fund_regions = Index()
gdp = Parameter(index=[time, fund_regions], unit="billion US\$2005/yr")
population = Parameter(index=[time, fund_regions], unit="million")
pc_gdp = Variable(index=[time, fund_regions], unit = "US\$2005/yr/person") # Region-level per capita GDP ($/person).
function run_timestep(p, v, d, t)
# calculate region level per capita gdp
for r in d.fund_regions
v.pc_gdp[t, r] = (p.gdp[t, r]) ./ (p.population[t, r]) * 1e3
end
end
end | MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 788 | using Mimi
# Calculate the value of a statistical life
# follows equations from FUND
@defcomp VSL begin
country = Index()
α = Parameter(unit = "US\$2005") # VSL scaling parameter
ϵ = Parameter() # Income elasticity of the value of a statistical life.
y₀ = Parameter(unit = "US\$2005") # Normalization constant.
pc_gdp = Parameter(index=[time, country], unit = "US\$2005/yr/person") # Country-level per capita GDP ($/person).
vsl = Variable(index=[time, country], unit = "US\$2005/yr") # Value of a statistical life ($).
function run_timestep(p, v, d, t)
for c in d.country
v.vsl[t,c] = p.α * (p.pc_gdp[t,c] / p.y₀) ^ p.ϵ
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 2567 | using Mimi
# Calculate temperature mortality damages
# Cromar et al. 2021
@defcomp cromar_mortality_damages begin
country = Index() # Index for countries in the regions used for the Cromar et al. temperature-mortality damage functions.
β_mortality = Parameter(index=[country]) # Coefficient relating global temperature to change in mortality rates.
baseline_mortality_rate = Parameter(index=[time, country], unit = "deaths/1000 persons/yr") # Crude death rate in a given country (deaths per 1,000 population).
temperature = Parameter(index=[time], unit="degC") # Global average surface temperature anomaly relative to pre-industrial (°C).
population = Parameter(index=[time, country], unit="million") # Population in a given country (millions of persons).
vsl = Parameter(index=[time, country], unit="US\$2005/yr") # Value of a statistical life ($).
mortality_change = Variable(index=[time, country]) # Change in a country's baseline mortality rate due to combined effects of cold and heat (with positive values indicating increasing mortality rates).
mortality_costs = Variable(index=[time, country], unit="US\$2005/yr") # Costs of temperature mortality based on the VSL ($).
excess_death_rate = Variable(index=[time, country], unit = "deaths/1000 persons/yr") # Change in a country's baseline death rate due to combined effects of cold and heat (additional deaths per 1,000 population).
excess_deaths = Variable(index=[time, country], unit="persons") # Additional deaths that occur in a country due to the combined effects of cold and heat (individual persons).
function run_timestep(p, v, d, t)
for c in d.country
# Calculate change in a country's baseline mortality rate due to combined effects of heat and cold.
v.mortality_change[t,c] = p.β_mortality[c] * p.temperature[t]
# Calculate additional deaths per 1,000 population due to cold and heat.
v.excess_death_rate[t,c] = p.baseline_mortality_rate[t,c] * v.mortality_change[t,c]
# Calculate additional deaths that occur due to cold and heat (assumes population units in millions of persons so converts to thousands to match deathrates units).
v.excess_deaths[t,c] = (p.population[t,c] .* 1000) * v.excess_death_rate[t,c]
# Multiply excess deaths by the VSL.
v.mortality_costs[t,c] = p.vsl[t,c] * v.excess_deaths[t,c]
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 699 | using Mimi
# Calculate global damages
# Nordhaus, 2017
@defcomp dice2016R2_damage begin
country = Index()
temperature = Parameter(index=[time], unit="degC")
gdp = Parameter(index=[time, country])
a2 = Parameter(default=0.00236)
damfrac = Variable(index=[time])
damages = Variable(index=[time])
function run_timestep(p, v, d, t)
if p.temperature[t] < 0.
v.damfrac[t] = 0.
else
v.damfrac[t] = 1 - (1/(1+(p.a2 * p.temperature[t]^2))) # log transform to keep damages < 100%, only relevant for bad draws of the mcs.
end
v.damages[t] = v.damfrac[t] * sum(p.gdp[t,:])
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 1395 | using Mimi
# Calculate energy-use damages
# Clarke et al. 2018
@defcomp energy_damages begin
energy_countries = Index() # Index for countries in the GCAM regions used for energy damage functions.
β_energy = Parameter(index=[energy_countries]) # Coefficient relating global tempeature to change in energy expenditures as a share of GDP.
gdp = Parameter(index=[time, energy_countries], unit="billion US\$2005/yr") # Country-level GDP (billions US $2005 / yr").
temperature = Parameter(index=[time], unit="degC") # Global average surface temperature anomaly relative to pre-industrial (°C).
energy_costs_dollar = Variable(index=[time, energy_countries], unit="billion US\$2005/yr") # Change in energy expenditures in dollars (billions US $2005 / yr).
energy_costs_share = Variable(index=[time, energy_countries]) # Change in energy expenditures as a share of GDP (Δ gdp share / °C).
function run_timestep(p, v, d, t)
for c in d.energy_countries
# Calculate additional energy expenditures as a share of GDP (coefficient gives percentages, so divide by 100 to get share).
v.energy_costs_share[t,c] = p.β_energy[c] * p.temperature[t] / 100.0
# Calculate additoinal energy expenditures in monetary terms.
v.energy_costs_dollar[t,c] = v.energy_costs_share[t,c] * p.gdp[t,c]
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 3038 | using Mimi
# Calculate global damages
# Howard, P. H., & Sterner, T. (2017)
@defcomp hs_damage begin
country = Index()
temperature = Parameter(index=[time], unit="degC")
gdp = Parameter(index=[time, country])
effects = Parameter{Symbol}(default = :base)
add25pct = Parameter{Bool}(default = false)
specification = Parameter{Int64}(default = 7)
t2_base_3 = Parameter{Float64}(default = 0.595382733860703)
t2_prod_3 = Parameter{Float64}(default = 0.)
t2_cat_3 = Parameter{Float64}(default = 0.259851128136597)
t2_base_4 = Parameter{Float64}(default = 0.595382733860703)
t2_prod_4 = Parameter{Float64}(default = 0.113324887895228)
t2_cat_4 = Parameter{Float64}(default = 0.259851128136597)
t2_base_7 = Parameter{Float64}(default = 0.318149737017145)
t2_prod_7 = Parameter{Float64}(default = 0.)
t2_cat_7 = Parameter{Float64}(default = 0.362274271711041)
t2_base_8 = Parameter{Float64}(default = 0.318149737017145)
t2_prod_8 = Parameter{Float64}(default = 0.398230480262918)
t2_cat_8 = Parameter{Float64}(default = 0.362274271711041)
t2 = Variable()
t2_base = Variable()
t2_prod = Variable()
t2_cat = Variable()
damfrac = Variable(index=[time])
damages = Variable(index=[time])
function run_timestep(p, v, d, t)
if p.specification == 3
v.t2_base = p.t2_base_3
v.t2_prod = p.t2_prod_3
v.t2_cat = p.t2_cat_3
elseif p.specification == 4
v.t2_base = p.t2_base_4
v.t2_prod = p.t2_prod_4
v.t2_cat = p.t2_cat_4
elseif p.specification == 7
v.t2_base = p.t2_base_7
v.t2_prod = p.t2_prod_7
v.t2_cat = p.t2_cat_7
elseif p.specification == 8
v.t2_base = p.t2_base_8
v.t2_prod = p.t2_prod_8
v.t2_cat = p.t2_cat_8
else
error("Invalid effects argument of p.hs_specification")
end
# effects options
if p.effects == :base
v.t2 = v.t2_base
elseif p.effects == :productivity
(p.specification==3 || p.specification==7 ? error("Invalid effects argument of p.effects. This effect is not estimated in the Howard and Sterner (2017) specification p.specification") : v.t2 = v.t2_base + v.t2_prod)
elseif p.effects == :catastrophic
v.t2 = v.t2_base + v.t2_cat
elseif p.effects == :total
v.t2 = v.t2_base + v.t2_prod + v.t2_cat
else
error("Invalid effects argument of p.effects.")
end
# 25 percent adder option
v.t2 = p.add25pct ? v.t2*1.25 : v.t2
# damage function
v.damfrac[t] = 1-(1/(1+(v.t2/100) * p.temperature[t]^2))
v.damages[t] = v.damfrac[t] * sum(p.gdp[t,:])
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 712 | using Mimi
# Create identity components to assist with adding pulse of emissions
@defcomp IdentityComponent_co2 begin
input_co2 = Parameter(index=[time])
output_co2 = Variable(index=[time])
function run_timestep(p, v, d, t)
v.output_co2[t] = p.input_co2[t]
end
end
@defcomp IdentityComponent_n2o begin
input_n2o = Parameter(index=[time])
output_n2o = Variable(index=[time])
function run_timestep(p, v, d, t)
v.output_n2o[t] = p.input_n2o[t]
end
end
@defcomp IdentityComponent_ch4 begin
input_ch4 = Parameter(index=[time])
output_ch4 = Variable(index=[time])
function run_timestep(p, v, d, t)
v.output_ch4[t] = p.input_ch4[t]
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 2900 | using Mimi
# Calculate global net consumption
@defcomp GlobalNetConsumption begin
country = Index()
gdp = Parameter(index=[time,country], unit="billion US\$2005/yr")
population = Parameter(index=[time, country], unit="million")
total_damage = Parameter(index=[time], unit="US\$2005/yr")
net_consumption = Variable(index=[time])
net_cpc = Variable(index=[time])
global_gdp = Variable(index=[time])
global_population = Variable(index=[time])
function run_timestep(p, v, d, t)
# Sum the population and gdp of all countries for the current timestep
v.global_population[t] = sum(p.population[t,:])
v.global_gdp[t] = sum(p.gdp[t,:])
# Convert damages to billions
total_damage = p.total_damage[t] / 1e9
# Compute net consumption as GDP - damages
v.net_consumption[t] = v.global_gdp[t] - total_damage
# Multiply by 1e3 because net_consumption is in billion, and population is in million
v.net_cpc[t] = v.net_consumption[t] * 1e3 / v.global_population[t]
end
end
# Calculate regional net consumption
@defcomp RegionalNetConsumption begin
fund_regions = Index()
gdp = Parameter(index=[time,fund_regions], unit="billion US\$2005/yr")
population = Parameter(index=[time, fund_regions], unit="million")
total_damage = Parameter(index=[time,fund_regions], unit="US\$2005/yr")
net_consumption = Variable(index=[time,fund_regions])
net_cpc = Variable(index=[time,fund_regions])
function run_timestep(p, v, d, t)
for r in d.fund_regions
# Convert damages to billions
total_damage = p.total_damage[t,r] / 1e9
# Compute net consumption as GDP - damages
v.net_consumption[t,r] = p.gdp[t,r] - total_damage
# Multiply by 1e3 because net_consumption is in billion, and population is in million
v.net_cpc[t,r] = v.net_consumption[t,r] * 1e3 / p.population[t,r]
end
end
end
# Calculate country level net consumption
@defcomp CountryNetConsumption begin
fund_regions = Index()
gdp = Parameter(index=[time,country], unit="billion US\$2005/yr")
population = Parameter(index=[time, country], unit="million")
total_damage = Parameter(index=[time,country], unit="US\$2005/yr")
net_consumption = Variable(index=[time,country])
net_cpc = Variable(index=[time,country])
function run_timestep(p, v, d, t)
for c in d.country
# Convert damages to billions
total_damage = p.total_damage[t,c] / 1e9
# Compute net consumption as GDP - damages
v.net_consumption[t,c] = p.gdp[t,c] - total_damage
# Multiply by 1e3 because net_consumption is in billion, and population is in million
v.net_cpc[t,c] = v.net_consumption[t,c] * 1e3 / p.population[t,c]
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 5253 | using MimiCIAM, Query, DataFrames, CSVFiles
# Supporting functions to support setting CIAM parameters
# Adapted from scripts in MimiCIAM.jl
"""
prep_ciam_xsc(xsc_params_path::String)
Process the segment-country mapping file (xsc) in CIAM by (1) Reads from CSV
and outputs list of dictionaries and arrays (2) Filters xsc file to desired
segments/regions
"""
function prep_ciam_xsc(xsc_params_path::String)
xsc_params = load(xsc_params_path) |> DataFrame
# Read in csv and convert to dictionary format
xsc_char = Dict{Any,Any}(xsc_params.seg[i] => (xsc_params.rgn[i],xsc_params.greenland[i], xsc_params.island[i]) for i in 1:length(xsc_params.seg))
# Create region and segment indices
rgns = sort(unique([i[1] for i in collect(values(xsc_char))]))
segs = string.(sort(unique(collect(keys(xsc_char)))))
xsc_ind = Dict{Int,Tuple{Int,Int,Int}}() # numeric seg -> (numeric rgn, greenland bool)
xsc_segmap = Dict{Any,Any}() # Numeric seg/rgn -> char seg/rgn
xsc_rgnmap = Dict{Any,Any}()
for i in 1:length(segs)
r = xsc_char[segs[i]][1] # Region character
grn = xsc_char[segs[i]][2] # 0 = non-Greenland, 1 = greenland bool
isl = xsc_char[segs[i]][3] # 0 = non-island, 1 = island bool
r_ind = MimiCIAM.findind(r, rgns) # Region index
new_val = (r_ind, grn, isl) # New tuple w/ region index instead of character
# Build XSC Seg/rgn Maps
r2 = rgns[r_ind] # New region char
s = segs[i]
xsc_segmap[i] = s
if !(r2 in values(xsc_rgnmap))
xsc_rgnmap[r_ind] = r2
end
xsc_ind[i] = new_val
end
return (xsc_ind, rgns, segs, xsc_rgnmap)
end
"""
get_ciam_params(;tstep::Int64, first::Int64, last::Int64, ciam_countries::Vector, xsc_params_path::String, adaptation_firsts::Array)
Obtain the CIAM parameters for the ciam_countries using the key in xsc_params_path
for a model with time dimension first:tstep:last and adaptation starting in `adaptation_firsts`.
"""
function get_ciam_params(;tstep::Int64, first::Int64, last::Int64, ciam_countries::Vector, xsc_params_path::String, adaptation_firsts::Array)
# --------------------------------------------------------------------------
# Get CIAM Default Parameters
# Pull in main parameters and select just our countries
ciam_params = MimiCIAM.load_ciam_params()
for (k,v) in ciam_params
if "country" in names(v)
filter!(row -> row.country in ciam_countries, ciam_params[k])
end
end
# Process XSC (segment-country mapping dictionary)
xsc_ind, rgns, segs, xsc_rgnmap = prep_ciam_xsc(xsc_params_path)
rgns != ciam_countries && error("The provided ciam_countries in the get_ciam_params function must match those in the provided xsc_params_path File.") : nothing
# Process params using xsc
MimiCIAM.parse_ciam_params!(ciam_params, rgns, segs, 0)
# --------------------------------------------------------------------------
# Adjust, Delete, and Add Parameters
# --> Delete Parameters that never get used
for p in ["s1000", "s100", "s10", "smax", "land_appr_canada", "ypc_usa", "gtapland_canada", "wbvm", "fundland_canada", "refpopdens_usa"]
delete!(ciam_params, p)
end
# --> Time Related
ciam_params["tstep"] = tstep # Length of individual time-step (years)
ciam_params["at"] = adaptation_firsts # times that start each adaptation period
ciam_params["ntsteps"] = length(first:tstep:last)
# --> Metadata; not used in run
ciam_params["rcp"] = 0
ciam_params["percentile"] = 50
ciam_params["ssp"] = 0
# --> Default Settings
ciam_params["fixed"] = true
ciam_params["noRetreat"] = false
ciam_params["allowMaintain"] = false
ciam_params["popinput"] = 0
ciam_params["discountrate"] = 0.04
# --> IDs and Dimensions
# Dynamically find indices corresponding to USA and CAN and manually set time steps
# If the lengths are 0, then assume those segments are not used. Note that
# if including Greenland, need Canada too as a reference for land appreciation
rgn_ind_canada = [k for (k,v) in xsc_rgnmap if v=="CAN"]
rgn_ind_canada = (length(rgn_ind_canada) > 0) ? rgn_ind_canada[1] : 0
rgn_ind_usa = [k for (k,v) in xsc_rgnmap if v=="USA"]
rgn_ind_usa = (length(rgn_ind_usa) > 0) ? rgn_ind_usa[1] : 0
segID = MimiCIAM.segStr_to_segID(segs)
ciam_params["segID"] = segID
ciam_params["xsc"] = xsc_ind
ciam_params["rgn_ind_canada"] = rgn_ind_canada
ciam_params["rgn_ind_usa"] = rgn_ind_usa
# --> Population and GDP Parameters - need to be connected to Socioeconomics
delete!(ciam_params, "pop") # pop = Parameter(index = [time, regions]) # Population of region (million people)
delete!(ciam_params, "ypcc") # ypcc = Parameter(index = [time, regions]) # GDP per capita per region ($2010 per capita)
# --> Storm Damage Parameters - we adjust these to be consistent with the VSL
# component, so remove these two parameters (see calc of vsl_ciam_country in
# main_ciam.jl))
delete!(ciam_params, "vslel")
delete!(ciam_params, "vslmult")
return (rgns, segs, ciam_params)
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 7220 | using Query, NetCDF, StatsBase, DataFrames, CSVFiles
# Supporting Functions to Downscale BRICK from GMSL to LSL
# Adapted from https://github.com/raddleverse/CIAM_uncertainty_propagation
"""
get_fingerprints(;fp_file::String = joinpath(@__DIR__, "../../data/CIAM/FINGERPRINTS_SLANGEN_Bakker.nc"))
Retrieve BRICK fingerprints from NetCDF file
"""
function get_fingerprints(;fp_file::String = joinpath(@__DIR__, "../../data/CIAM/FINGERPRINTS_SLANGEN_Bakker.nc"))
fplat = ncread(fp_file,"lat")
fplon = ncread(fp_file,"lon")
fpAIS = ncread(fp_file,"AIS")
fpGSIC = ncread(fp_file,"GLAC")
fpGIS = ncread(fp_file,"GIS")
ncclose()
return fplat,fplon,fpAIS,fpGSIC,fpGIS
end
"""
get_segment_fingerprints(;fp_file::String = joinpath(@__DIR__, "../../data/CIAM/FINGERPRINTS_SLANGEN_Bakker.nc"),
segIDs_file::String = joinpath(@__DIR__, "../../data/CIAM/diva_segment_latlon.csv"),
fp_segments_file::String = joinpath(@__DIR__, "../../data/CIAM/segment_fingerprints.csv"))
Get segment specific fingerprints for segments in segIDs_file using fingerprints in
fp_file as the baseline information. Write out these segment specific fingerprints.
"""
function get_segment_fingerprints(;fp_file::String = joinpath(@__DIR__, "../../data/CIAM/FINGERPRINTS_SLANGEN_Bakker.nc"),
segIDs_file::String = joinpath(@__DIR__, "../../data/CIAM/diva_segment_latlon.csv"),
fp_segments_file::String = joinpath(@__DIR__, "../../data/CIAM/segment_fingerprints.csv"))
# getfingerprints from FINGERPRINTS_SLANGEN_Bakker
# the fplat and fplon are -90 to 90 and 0 to 360 respectively
(fplat,fplon,fpAIS,fpGSIC,fpGIS) = get_fingerprints(fp_file = fp_file)
# segment data
ciamlonlat = load(segIDs_file) |> DataFrame |> i -> sort!(i, :segments)
ciamlonlat.longi[findall(i -> i < 0, ciamlonlat.longi)] .+= 360 # Convert Longitude to degrees East, CIAM Lat is already in (-90,90) by default
df = DataFrame(:segments => [], :segid => [], :lon => [], :lat => [], :rgn => [],
:fpGIS_loc => [],
:fpAIS_loc => [],
:fpGSIC_loc => [],
:fpTE_loc => [],
:fpLWS_loc => []
)
for i in 1:size(ciamlonlat,1)
lon = ciamlonlat.longi[i]
lat = ciamlonlat.lati[i]
segid = ciamlonlat.segid[i]
segment = ciamlonlat.segments[i]
rgn = ciamlonlat.rgn[i]
# Find fingerprint degrees nearest to lat,lon
ilat = findall(isequal(minimum(abs.(fplat.-lat))),abs.(fplat.-lat))
ilon = findall(isequal(minimum(abs.(fplon.-lon))),abs.(fplon.-lon))
# Take average of closest lat/lon values
fpAIS_flat = collect(skipmissing(Iterators.flatten(fpAIS[ilon,ilat])))
fpGSIC_flat = collect(skipmissing(Iterators.flatten(fpGSIC[ilon,ilat])))
fpGIS_flat = collect(skipmissing(Iterators.flatten(fpGIS[ilon,ilat])))
fpAIS_loc = mean(fpAIS_flat[isnan.(fpAIS_flat).==false],dims=1)[1] # [1] converts Vector to Float64
fpGSIC_loc = mean(fpGSIC_flat[isnan.(fpGSIC_flat).==false],dims=1)[1] # [1] converts Vector to Float64
fpGIS_loc = mean(fpGIS_flat[isnan.(fpGIS_flat).==false],dims=1)[1] # [1] converts Vector to Float64
fpTE_loc = 1.0
fpLWS_loc=1.0
# Keep searching nearby lat/lon values if fingerprint value is NaN unless limit is hit
inc = 1
while isnan(fpAIS_loc) || isnan(fpGIS_loc) || isnan(fpGSIC_loc) && inc<5
newlonStart = next_lon.(fplon[ilon], inc, :decrease)[1]
newlatStart = next_lat.(fplat[ilat], inc, :decrease)[1]
newlonEnd = next_lon.(fplon[ilon], inc, :increase)[1]
newlatEnd = next_lat.(fplat[ilat], inc, :increase)[1]
latInd1 = minimum(findall(isequal(minimum(abs.(fplat.-newlatStart))),abs.(fplat.-newlatStart)))
latInd2 = maximum(findall(isequal(minimum(abs.(fplat.-newlatEnd))),abs.(fplat.-newlatEnd)))
lonInd1 = minimum(findall(isequal(minimum(abs.(fplon.-newlonStart))),abs.(fplon.-newlonStart)))
lonInd2 = maximum(findall(isequal(minimum(abs.(fplon.-newlonEnd))),abs.(fplon.-newlonEnd)))
if latInd2 < latInd1
latInds=[latInd1; 1:latInd2]
else
latInds=latInd1:latInd2
end
if lonInd2 < lonInd1
lonInds=[lonInd1; 1:lonInd2]
else
lonInds = lonInd1:lonInd2
end
fpAIS_flat = collect(skipmissing(Iterators.flatten(fpAIS[lonInds,latInds])))
fpGSIC_flat = collect(skipmissing(Iterators.flatten(fpGSIC[lonInds,latInds])))
fpGIS_flat = collect(skipmissing(Iterators.flatten(fpGIS[lonInds,latInds])))
fpAIS_loc = mean(fpAIS_flat[isnan.(fpAIS_flat).==false],dims=1)[1]
fpGSIC_loc = mean(fpGSIC_flat[isnan.(fpGSIC_flat).==false],dims=1)[1]
fpGIS_loc = mean(fpGIS_flat[isnan.(fpGIS_flat).==false],dims=1)[1]
inc = inc + 1
end
# If still NaN, throw an error
if isnan(fpAIS_loc) || isnan(fpGIS_loc) || isnan(fpGSIC_loc)
println("Error: no fingerprints found for ($(lon),$(lat))")
return nothing
end
#append to the DataFrame
append!(df, DataFrame(:segments => segment, :segid => segid, :lon => lon, :lat => lat, :rgn => rgn,
:fpGIS_loc => fpGIS_loc,
:fpAIS_loc => fpAIS_loc,
:fpGSIC_loc => fpGSIC_loc,
:fpTE_loc => fpTE_loc,
:fpLWS_loc => fpLWS_loc)
)
end # End lonlat tuple
df |> save(fp_segments_file)
end
# Small helper functions for dealing with sea level fingerprints near land
"""
next_lat(lat::Float64, inc::Int64, direction::Symbol)
Increment latitude by `inc` in either positive direction (`direction=:increase`)
or in the negative direction (`direction=:decrease`).
Assumes latitude runs from -90 to 90 (deg N).
"""
function next_lat(lat::Float64, inc::Int64, direction::Symbol)
if lat < -90 || lat > 90
error("Latitude must be between -90 and 90")
end
if direction == :increase
new_lat = lat + inc
if new_lat > 90
new_lat = new_lat - 180 #wrap around
end
elseif direction == :decrease
new_lat = lat - inc
if new_lat < -90
new_lat = new_lat + 180
end
end
return new_lat
end
"""
next_lon(lon::Float64, inc::Int64, direction::Symbol)
Increment longitude by `inc` in either positive direction
(`direction=:increase`) or in the negative direction (`direction=:decrease`).
Assumes longitude runs from 0 to 360 (deg E).
"""
function next_lon(lon::Float64, inc::Int64, direction::Symbol)
if lon < 0 || lon > 360
error("Longitude must be between 0 and 360")
end
if direction == :increase
new_lon = lon + inc
if new_lon > 360
new_lon = new_lon - 360
end
elseif direction == :decrease
new_lon = lon - inc
if new_lon < 0
new_lon = new_lon + 360
end
end
return new_lon
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 11802 | using JSON, CSVFiles, DataFrames
# Process all FAIR Monte Carlo Simulation constrained parameter sets from JSON
# to CSV files to use directly by functions in main_mcs.jl
# Pairings of parameters to csv files names, for reference, also produced by
# `get_fair_mcs_params_map` in `main_mcs.jl`
# Dict(
# :β_CO => "b_aero_CO",
# :scale_CH₄ => "scale_CH4",
# :F_solar => "F_solar",
# :Ψ_CH₄ => "b_tro3_CH4",
# :scale_N₂O => "scale_N2O",
# :CO₂_pi => "C_pi",
# :deep_ocean_efficacy => "deep_ocean_efficacy",
# :scale_bcsnow => "scale_bcsnow",
# :scale_aerosol_direct_OC => "scale_aerosol_direct_OC",
# :b_SOx => "ghan_params_SOx",
# :feedback => "ozone_feedback",
# :scale_O₃ => "scale_O3",
# :b_POM => "ghan_params_b_POM",
# :r0_co2 => "r0",
# :β_NH3 => "b_aero_NH3",
# :lambda_global => "lambda_global",
# :scale_landuse => "scale_landuse",
# :scale_volcanic => "scale_volcanic",
# :scale_aerosol_direct_SOx => "scale_aerosol_direct_SOx",
# :β_NOx => "b_aero_NOx",
# :Ψ_N₂O => "b_tro3_N2O",
# :ocean_heat_capacity => "ocean_heat_capacity",
# :β_OC => "b_aero_OC",
# :scale_solar => "scale_solar",
# :rC_co2 => "rc",
# :scale_aerosol_direct_BC => "scale_aerosol_direct_BC",
# :scale_CH₄_H₂O => "scale_CH4_H2O",
# :scale_aerosol_indirect => "scale_aerosol_indirect",
# :scale_ods => "scale_ods",
# :Ψ_CO => "b_tro3_CO",
# :scale_aerosol_direct_NOx_NH3 => "scale_aerosol_direct_NOx_NH3",
# :scale_other_ghg => "scale_other_ghg",
# :Ψ_NMVOC => "b_tro3_NMVOC",
# :F2x => "F2x",
# :β_SOx => "b_aero_SOx",
# :β_NMVOC => "b_aero_NMVOC",
# :rT_co2 => "rt",
# :β_BC => "b_aero_BC",
# :scale_CO₂ => "scale_CO2",
# :Ψ_ODS => "b_tro3_ODS",
# :scale_aerosol_direct_CO_NMVOC => "scale_aerosol_direct_CO_NMVOC",
# :Ψ_NOx => "b_tro3_NOx",
# :ocean_heat_exchange => "ocean_heat_exchange",
# :ϕ => "ghan_params_Pi"
# )
n = 2237 # total number of available samples
fair_params = JSON.parsefile(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair-1.6.2-wg3-params.json"));
# Names of minor greenhouse gases and ozone-depleting substances (used or indexing).
other_ghg_names = ["CF4", "C2F6", "C6F14", "HFC23", "HFC32", "HFC43_10", "HFC125", "HFC134a", "HFC143a", "HFC227ea", "HFC245fa", "SF6"]
ods_names = ["CFC_11", "CFC_12", "CFC_113", "CFC_114", "CFC_115", "CARB_TET", "MCF", "HCFC_22", "HCFC_141B", "HCFC_142B", "HALON1211", "HALON1202", "HALON1301", "HALON2402", "CH3BR", "CH3CL"]
# Carbon cycle
for p in ["r0", "rt", "rc"]
DataFrame(p => [fair_params[i][p] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_$p.csv"))
end
# Forcing from a doubling of CO₂.
for p in ["F2x"]
DataFrame(p => [fair_params[i][p] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_$p.csv"))
end
# Ozone radiative forcing feedback.
for p in ["ozone_feedback"]
DataFrame(p => [fair_params[i][p] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_$p.csv"))
end
# Solar radiative forcing.
for p in ["C_pi"] # Choose first element of vector of 31 elements
DataFrame(p => [fair_params[i][p][1] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_$p.csv"))
end
# Pre-industrial CO₂ concentration (other concentrations fixed across samples).
# assume F_solar parameter set defines value starting in 1750 with 361 years total
for p in ["F_solar"]
arr = [fair_params[i][p] for i in 1:n]
arr = reduce(hcat, arr)' # 361 years per sample - flatten out from vector of vectors to a matrix
df = DataFrame(arr, :auto) |>
i -> rename!(i, Symbol.(1750:2110))
df |> save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_$p.csv"))
end
# Temperature component
for p in ["ocean_heat_exchange", "deep_ocean_efficacy", "lambda_global", "ocean_heat_capacity"]
if p == "ocean_heat_capacity"
arr = [fair_params[i][p] for i in 1:n]
arr = reduce(hcat, arr)' # 2 members (deep and mixed) per sample - flatten out from vector of vectors to a matrix
df = DataFrame(arr, :auto)
rename!(df, ["1", "2"])
else
df = DataFrame(p => [fair_params[i][p] for i in 1:n])
end
df |> save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_$p.csv"))
end
# "ghan_params" for aerosol indirect forcing effect.
DataFrame(:ghan_params_Pi => [fair_params[i]["ghan_params"][1] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_ghan_params_Pi.csv"))
DataFrame(:ghan_params_SOx => [fair_params[i]["ghan_params"][2] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_ghan_params_SOx.csv"))
DataFrame(:ghan_params_b_POM => [fair_params[i]["ghan_params"][3] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_ghan_params_b_POM.csv"))
# Radiative forcing scaling terms (based on ordering of forcing agents in Python code). - select from a vector of 45 elements
# NOTE for :scale_contrails: Default FAIR has contrail forcing switched off. But
# the data used does sample a scaling term. Currently not included in this model.
DataFrame(:scale_CO2 => [fair_params[i]["scale"][1] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_CO2.csv"))
DataFrame(:scale_CH4 => [fair_params[i]["scale"][2] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_CH4.csv"))
DataFrame(:scale_N2O => [fair_params[i]["scale"][3] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_N2O.csv"))
DataFrame(:scale_O3 => [fair_params[i]["scale"][32] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_O3.csv"))
DataFrame(:scale_CH4_H2O => [fair_params[i]["scale"][34] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_CH4_H2O.csv"))
DataFrame(:scale_aerosol_direct_SOx => [fair_params[i]["scale"][36] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_aerosol_direct_SOx.csv"))
DataFrame(:scale_aerosol_direct_CO_NMVOC => [fair_params[i]["scale"][37] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_aerosol_direct_CO_NMVOC.csv"))
DataFrame(:scale_aerosol_direct_NOx_NH3 => [fair_params[i]["scale"][38] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_aerosol_direct_NOx_NH3.csv"))
DataFrame(:scale_aerosol_direct_BC => [fair_params[i]["scale"][39] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_aerosol_direct_BC.csv"))
DataFrame(:scale_aerosol_direct_OC => [fair_params[i]["scale"][40] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_aerosol_direct_OC.csv"))
DataFrame(:scale_aerosol_indirect => [fair_params[i]["scale"][41] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_aerosol_indirect.csv"))
DataFrame(:scale_bcsnow => [fair_params[i]["scale"][42] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_bcsnow.csv"))
DataFrame(:scale_landuse => [fair_params[i]["scale"][43] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_landuse.csv"))
DataFrame(:scale_volcanic => [fair_params[i]["scale"][44] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_volcanic.csv"))
DataFrame(:scale_solar => [fair_params[i]["scale"][45] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_solar.csv"))
scale_other_ghg = [fair_params[i]["scale"][4:15] for i in 1:n]
scale_other_ghg = reduce(hcat, scale_other_ghg)'
scale_other_ghg = DataFrame(scale_other_ghg, :auto) |>
i -> rename!(i, other_ghg_names) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_other_ghg.csv"))
scale_ods = [fair_params[i]["scale"][16:31] for i in 1:n]
scale_ods = reduce(hcat, scale_ods)'
scale_ods = DataFrame(scale_ods, :auto) |>
i -> rename!(i, ods_names) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_scale_ods.csv"))
# Ozone radiative forcing - select from a vector of 6 elements
DataFrame(:b_tro3_CH4 => [fair_params[i]["b_tro3"][1] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_tro3_CH4.csv"))
DataFrame(:b_tro3_N2O => [fair_params[i]["b_tro3"][2] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_tro3_N2O.csv"))
DataFrame(:b_tro3_ODS => [fair_params[i]["b_tro3"][3] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_tro3_ODS.csv"))
DataFrame(:b_tro3_CO => [fair_params[i]["b_tro3"][4] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_tro3_CO.csv"))
DataFrame(:b_tro3_NMVOC => [fair_params[i]["b_tro3"][5] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_tro3_NMVOC.csv"))
DataFrame(:b_tro3_NOx => [fair_params[i]["b_tro3"][6] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_tro3_NOx.csv"))
# Aerosol direct forcing.
DataFrame(:b_aero_SOx => [fair_params[i]["b_aero"][1] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_aero_SOx.csv"))
DataFrame(:b_aero_CO => [fair_params[i]["b_aero"][2] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_aero_CO.csv"))
DataFrame(:b_aero_NMVOC => [fair_params[i]["b_aero"][3] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_aero_NMVOC.csv"))
DataFrame(:b_aero_NOx => [fair_params[i]["b_aero"][4] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_aero_NOx.csv"))
DataFrame(:b_aero_BC => [fair_params[i]["b_aero"][5] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_aero_BC.csv"))
DataFrame(:b_aero_OC => [fair_params[i]["b_aero"][6] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_aero_OC.csv"))
DataFrame(:b_aero_NH3 => [fair_params[i]["b_aero"][7] for i in 1:n]) |>
save(joinpath(@__DIR__, "..", "..", "data", "FAIR_mcs", "fair_mcs_params_b_aero_NH3.csv"))
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 1763 | # constants needed for SCC calculations
const _model_years = collect(1750:2300)
const _damages_years = collect(2020:2300)
const _damages_idxs = indexin(_damages_years, _model_years)
const scc_gas_molecular_conversions = Dict(:CO2 => 12/44, # C to CO2
:N2O => 28/44, # N2 to N2O,
:CH4 => 1., # CH4 to CH4
:HFC23 => 1., # HFC23 to HFC23
:HFC32 => 1., # HFC32 to HFC32
:HFC43_10 => 1., # HFC43_10 to HFC43_10
:HFC125 => 1., # HFC125 to HFC125
:HFC134a => 1., # HFC134a to HFC134a
:HFC143a => 1., # HFC143a to HFC143a
:HFC227ea => 1., # HFC227ea to HFC227ea
:HFC245fa => 1.) # HFC245fa to HFC245fa
const scc_gas_pulse_size_conversions = Dict(:CO2 => 1e9, # Gt to t
:N2O => 1e6, # Mt to t
:CH4 => 1e6, # Mt to t
:HFC23 => 1e3, # kt to t
:HFC32 => 1e3, # kt to t
:HFC43_10 => 1e3, # kt to t
:HFC125 => 1e3, # kt to t
:HFC134a => 1e3, # kt to t
:HFC143a => 1e3, # kt to t
:HFC227ea => 1e3, # kt to t
:HFC245fa => 1e3) # kt to t | MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 9298 | using FileIO, CSVFiles, DataFrames, Query
# Helper functions for streaming disaggregated data within the compute_scc funciton
function _stream_disagg_damages(m::Mimi.Model, output_dir::String, trialnum::Int, streams::Dict)
# println("Streaming out trialnum $trialnum ...")
cromar_mortality_damages = getdataframe(m, :DamageAggregator, :damage_cromar_mortality) |>
@filter(_.time > 2019) |>
@rename(:damage_cromar_mortality => :damages) |>
DataFrame |>
i -> insertcols!(i, 1, :trialnum => trialnum)
energy_damages = getdataframe(m, :DamageAggregator, :damage_energy) |>
@filter(_.time > 2019) |>
@mutate(damage_energy = _.damage_energy * 1e9) |> # billions of USD to USD
@rename(:damage_energy => :damages, :energy_countries => :country) |>
DataFrame |>
i -> insertcols!(i, 1, :trialnum => trialnum)
ag_damages = getdataframe(m, :DamageAggregator, :damage_ag) |>
@filter(_.time > 2019) |>
@mutate(damage_ag = _.damage_ag * 1e9) |> # billions of USD to USD
@rename(:damage_ag => :damages, :fund_regions => :region) |>
DataFrame |>
i -> insertcols!(i, 1, :trialnum => trialnum)
for country in unique(cromar_mortality_damages.country)
filename = joinpath("$output_dir/results/disaggregated_values/damages_cromar_mortality/$(country).csv")
trial_df = cromar_mortality_damages |> @filter(_.country == country) |> @select(:trialnum, :time, :damages) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
for country in unique(energy_damages.country)
filename = joinpath("$output_dir/results/disaggregated_values/damages_energy/$(country).csv")
trial_df = energy_damages |> @filter(_.country == country) |> @select(:trialnum, :time, :damages) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
for region in unique(ag_damages.region)
filename = joinpath("$output_dir/results/disaggregated_values/damages_agriculture/$(region).csv")
trial_df = ag_damages |> @filter(_.region == region) |> @select(:trialnum, :time, :damages) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
end
function _stream_disagg_socioeconomics(m::Mimi.Model, output_dir::String, trialnum::Int, streams::Dict)
# println("Streaming out trialnum $trialnum ...")
country_pop = getdataframe(m, :Socioeconomic, :population) |> @filter(_.time > 2019) |> DataFrame # millions
country_pc_gdp = getdataframe(m, :PerCapitaGDP, :pc_gdp) |> @filter(_.time > 2019) |> DataFrame # USD per capita
country_data = innerjoin(country_pop, country_pc_gdp, on = [:time, :country]) |> i -> insertcols!(i, 1, :trialnum => trialnum)
for country in unique(country_data.country)
filename = joinpath("$output_dir/results/disaggregated_values/socioeconomics_country/$(country).csv")
trial_df = country_data |> @filter(_.country == country) |> @select(:trialnum, :time, :population, :pc_gdp) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
region_pop = getdataframe(m, :Agriculture, :population) |> @filter(_.time > 2019) |> DataFrame # millions
region_gdp = getdataframe(m, :Agriculture, :income) |> @filter(_.time > 2019) |> DataFrame # billions USD
region_data = innerjoin(region_pop, region_gdp, on = [:time, :fund_regions])
region_data = insertcols!(region_data, :pc_gdp => (region_data.income ./ region_data.population) * 1e3) |>
@select(:time, :fund_regions, :population, :pc_gdp) |>
DataFrame |>
i -> insertcols!(i, 1, :trialnum => trialnum)
for region in unique(region_data.fund_regions)
filename = joinpath("$output_dir/results/disaggregated_values/socioeconomics_region/$(region).csv")
trial_df = region_data |> @filter(_.fund_regions == region) |> @select(:trialnum, :time, :population, :pc_gdp) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
end
# note we pass a ModelInstance because ciam_base and ciam_modified are instances
function _stream_disagg_damages_slr(m::Mimi.ModelInstance, data::Array, output_dir::String, trialnum::Int, streams::Dict)
slr_damages = DataFrame(data, dim_keys(m, :ciam_country)) |>
i -> insertcols!(i, 1, :time => _damages_years) |>
i -> stack(i, Not(:time)) |>
@filter(_.time > 2019) |>
@rename(:variable => :country, :value => :damages) |>
@mutate(damages = _.damages * 1e9) |> # billions of USD to USD
DataFrame |>
i -> insertcols!(i, 1, :trialnum => trialnum)
for country in unique(slr_damages.country)
filename = joinpath("$output_dir/results/disaggregated_values/damages_slr/$(country).csv")
trial_df = slr_damages |> @filter(_.country == country) |> @select(:trialnum, :time, :damages) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
end
function _stream_disagg_md(m_base::Mimi.Model, m_modified::Mimi.Model, ciam_base::Union{Nothing, Mimi.ModelInstance}, md_ciam::Union{Nothing, Array},
output_dir::String, trialnum::Int, streams::Dict; gas_units_multiplier::Float64)
# get marginal damages in USD $2005 and be sure to adjust for # adjust for the (1) molecular mass and (2) pulse size, as well as billions of USD to USD for ag and energy
md_cromar_mortality = (view(m_modified[:DamageAggregator, :damage_cromar_mortality], _damages_idxs,:) .- view(m_base[:DamageAggregator, :damage_cromar_mortality], _damages_idxs,:)) .* gas_units_multiplier
md_energy = (view(m_modified[:DamageAggregator, :damage_energy], _damages_idxs,:) .- view(m_base[:DamageAggregator, :damage_energy], _damages_idxs,:)) .* 1e9 .* gas_units_multiplier
md_ag = (view(m_modified[:DamageAggregator, :damage_ag], _damages_idxs,:) .- view(m_base[:DamageAggregator, :damage_ag], _damages_idxs,:)) .* 1e9 .* gas_units_multiplier
# save agriculture
md_ag_df = DataFrame(md_ag, dim_keys(m_base, :fund_regions)) |>
i -> insertcols!(i, 1, :time => _damages_years) |>
i -> stack(i, Not(:time)) |>
@rename(:variable => :region, :value => :md) |>
DataFrame |>
i-> insertcols!(i, 1, :trialnum => trialnum)
for region in unique(md_ag_df.region)
filename = joinpath("$output_dir/results/disaggregated_values/mds_region_ag_only/$(region).csv")
trial_df = md_ag_df |> @filter(_.region == region) |> @select(:trialnum, :time, :md) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
# save country level mds
# aggregate ciam marginal damages
md_ciam_all_countries = fill(0., size(md_cromar_mortality))
if !isnothing(ciam_base)
country_idxs = indexin(dim_keys(ciam_base, :ciam_country), dim_keys(m_base, :country))
md_ciam_all_countries[:, country_idxs] = md_ciam[_damages_idxs,:]
end
md_country_df = DataFrame(md_cromar_mortality .+ md_energy .+ md_ciam_all_countries, dim_keys(m_base, :country)) |>
i -> insertcols!(i, 1, :time => _damages_years) |>
i -> stack(i, Not(:time)) |>
@rename(:variable => :country, :value => :md) |>
DataFrame |>
i-> insertcols!(i, 1, :trialnum => trialnum)
for country in unique(md_country_df.country)
filename = joinpath("$output_dir/results/disaggregated_values/mds_country_no_ag/$(country).csv")
trial_df = md_country_df |> @filter(_.country == country) |> @select(:trialnum, :time, :md) |> DataFrame
if haskey(streams, filename)
write(streams[filename], trial_df)
else
streams[filename] = savestreaming(filename, trial_df)
end
end
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 794 | using Interpolations, Mimi
"""
Return the name of the module of a given component with `comp_name` in model `m`.
This is a small helper function useful for internals like the mcs.
"""
function _get_module_name(m::Model, comp_name::Symbol)
return nameof(m.md.namespace[comp_name].comp_id.module_obj)
end
"""
Return the name of the Moore agriculture GTAP damage function specification in
model `m`. This is a small helper function useful for internals like the mcs.
"""
function _get_mooreag_gtap(m::Model)
# model may not have been run yet, so need to get model parameter name to look
# up the value
model_param_name = Mimi.get_model_param_name(m, :Agriculture, :gtap_name)
Agriculture_gtap = Mimi.model_param(m, model_param_name).value
return Agriculture_gtap
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 623 | using Test, MimiGIVE
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
@testset "Mimi" begin
@info("test_get_model.jl")
@time include("test_get_model.jl")
@info("test_compute_scc.jl")
@time include("test_compute_scc.jl")
@info("test_regression_deterministic.jl")
@time include("test_regression_deterministic.jl")
if VERSION == v"1.10" # random number generator not alwasy stable between versions
@info("test_regression_mcs.jl")
@time include("test_regression_mcs.jl")
end
@info("test_disaggregated_values.jl")
@time include("test_save_disaggregated_values.jl")
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 6871 | using MimiGIVE
using Random
include("utils.jl")
# This script saves a set of validation data in a post-fixed validation_label
# subfolder of the validation_data folder.
# label of folder to be created
validation_label = "current"
##------------------------------------------------------------------------------
## Model Data
##------------------------------------------------------------------------------
savevars = [
(compname = :DamageAggregator, varname = :total_damage),
(compname = :DamageAggregator, varname = :total_damage_share),
(compname = :DamageAggregator, varname = :total_damage_domestic),
(compname = :DamageAggregator, varname = :cromar_mortality_damage),
(compname = :DamageAggregator, varname = :agriculture_damage),
(compname = :DamageAggregator, varname = :energy_damage),
(compname = :DamageAggregator, varname = :cromar_mortality_damage_domestic),
(compname = :DamageAggregator, varname = :agriculture_damage_domestic),
(compname = :DamageAggregator, varname = :energy_damage_domestic),
(compname = :global_netconsumption, varname = :net_consumption),
(compname = :global_netconsumption, varname = :net_cpc),
(compname = :global_netconsumption, varname = :global_gdp),
(compname = :global_netconsumption, varname = :global_population),
(compname = :temperature, varname = :T),
(compname = :glaciers_small_icecaps, varname = :gsic_sea_level) ,
(compname = :antarctic_icesheet, varname = :ais_sea_level),
(compname = :greenland_icesheet, varname = :greenland_sea_level),
(compname = :thermal_expansion, varname = :te_sea_level),
(compname = :landwater_storage, varname = :lws_sea_level)
]
# default model
outdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "default_model")
isdir(outdir) || mkpath(outdir)
m = MimiGIVE.get_model()
save_model_data(m, savevars::Vector, outdir::String)
# SSP245
outdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "SSP245_model")
isdir(outdir) || mkpath(outdir)
m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245")
save_model_data(m, savevars::Vector, outdir::String)
##------------------------------------------------------------------------------
## Compute SCC Data
##------------------------------------------------------------------------------
discount_rates = [
# Constant discount rates
(label = "CR 1%", prtp = 0.01, eta = 0.0), (label = "CR 2%", prtp = 0.02, eta = 0.0), (label = "CR 2.5%", prtp = 0.025, eta = 0.0), (label = "CR 3%", prtp = 0.03, eta = 0.0), (label = "CR 5%", prtp = 0.05, eta = 0.0),
# Some Ramsey discount rates
(label = "DICE2016", prtp = 0.015, eta = 1.45), (label = "OtherRamsey", prtp = 0.01, eta = 1.)
]
# default model, SC-CO2 and SC-CH4 and SC-N2O in year 2020
outdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "default_model_SCC_2020")
isdir(outdir) || mkpath(outdir)
save_scc_data(outdir; m = MimiGIVE.get_model(), year = 2020, discount_rates = discount_rates, gas = :CO2)
save_scc_data(outdir; m = MimiGIVE.get_model(), year = 2020, discount_rates = discount_rates, gas = :CH4)
save_scc_data(outdir; m = MimiGIVE.get_model(), year = 2020, discount_rates = discount_rates, gas = :N2O)
# SSP245, SC-CO2 and SC-CH4 and SC-N2O in year 2020
outdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "SSP245_model_SCC_2020")
isdir(outdir) || mkpath(outdir)
save_scc_data(outdir; m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245"), year = 2020, discount_rates = discount_rates, gas = :CO2)
save_scc_data(outdir; m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245"), year = 2020, discount_rates = discount_rates, gas = :CH4)
save_scc_data(outdir; m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245"), year = 2020, discount_rates = discount_rates, gas = :N2O)
##------------------------------------------------------------------------------
## Compute SCC MCS Data
##------------------------------------------------------------------------------
discount_rates = [
# Constant discount rates
(label = "CR 1%", prtp = 0.01, eta = 0.0), (label = "CR 2%", prtp = 0.02, eta = 0.0), (label = "CR 2.5%", prtp = 0.025, eta = 0.0), (label = "CR 3%", prtp = 0.03, eta = 0.0), (label = "CR 5%", prtp = 0.05, eta = 0.0),
# Some Ramsey discount rates
(label = "DICE2016", prtp = 0.015, eta = 1.45), (label = "OtherRamsey", prtp = 0.01, eta = 1.)
]
save_list = [
(:DamageAggregator, :total_damage),
(:DamageAggregator, :total_damage_share),
(:DamageAggregator, :total_damage_domestic),
(:DamageAggregator, :cromar_mortality_damage),
(:DamageAggregator, :agriculture_damage),
(:DamageAggregator, :energy_damage),
(:DamageAggregator, :cromar_mortality_damage_domestic),
(:DamageAggregator, :agriculture_damage_domestic),
(:DamageAggregator, :energy_damage_domestic),
(:global_netconsumption, :net_consumption),
(:global_netconsumption, :net_cpc),
(:global_netconsumption, :global_gdp),
(:global_netconsumption, :global_population),
(:temperature, :T),
(:glaciers_small_icecaps, :gsic_sea_level) ,
(:antarctic_icesheet, :ais_sea_level),
(:greenland_icesheet, :greenland_sea_level),
(:thermal_expansion, :te_sea_level),
(:landwater_storage, :lws_sea_level)
]
n = 3
seed = 999
# default model, SC-CO2 and SC-CH4 and SC-N2O in year 2020
for gas in [:CO2, :CH4, :N2O]
outdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "default_model_MCS_SCC_2020", "$gas")
isdir(outdir) || mkpath(outdir)
m = MimiGIVE.get_model()
save_scc_mcs_data(seed, outdir, n;
m = m, year = 2020, discount_rates = discount_rates, gas = gas,
save_list = save_list, save_md = true, save_cpc = true, save_slr_damages = true,
compute_sectoral_values = true, compute_domestic_values = true)
end
# SSP245, SC-CO2 and SC-CH4 and SC-N2O in year 2020
for gas in [:CO2, :CH4, :N2O]
outdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "SSP245_model_MCS_SCC_2020", "$gas")
isdir(outdir) || mkpath(outdir)
m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245")
save_scc_mcs_data(seed, outdir, n;
m = m, year = 2020, discount_rates = discount_rates, gas = gas,
save_list = save_list, save_md = true, save_cpc = true, save_slr_damages = true,
compute_sectoral_values = true, compute_domestic_values = true)
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 5510 | module TestComputeSCC
using MimiGIVE
using Test
import MimiGIVE: get_model, compute_scc
# function compute_scc(m::Model=get_model();
# year::Union{Int, Nothing} = nothing,
# last_year::Int = _model_years[end],
# prtp::Union{Float64,Nothing} = 0.015,
# eta::Union{Float64,Nothing}=1.45,
# discount_rates=nothing,
# certainty_equivalent=false,
# fair_parameter_set::Symbol = :random,
# fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
# rffsp_sampling::Symbol = :random,
# rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
# n=0,
# gas::Symbol = :CO2,
# save_list::Vector = [],
# output_dir::Union{String, Nothing} = nothing,
# save_md::Bool = false,
# save_cpc::Bool = false,
# save_slr_damages::Bool = false,
# compute_sectoral_values::Bool = false,
# compute_domestic_values::Bool = false,
# CIAM_foresight::Symbol = :perfect,
# CIAM_GDPcap::Bool = false,
# post_mcs_creation_function=nothing,
# pulse_size::Float64=1.
# )
##------------------------------------------------------------------------------
## API - test that basic cases run without error - will test more of the API in
## regression testing file (test_regression.jl)
##------------------------------------------------------------------------------
# deterministic
scc = compute_scc(year = 2020)
scc_rff = compute_scc(get_model(; socioeconomics_source=:RFF, RFFSPsample=1000), year = 2020)
scc_ssp = compute_scc(get_model(; socioeconomics_source=:SSP, SSP_scenario="SSP126"), year = 2020)
# deterministic - non-default options
scc = compute_scc(year = 2025, last_year = 2200, prtp = 0.02, eta = 1.5, gas = :CH4,
CIAM_foresight = :limited, CIAM_GDPcap = true, pulse_size = 10.)
# monte carlo
drs = [(label = "label", prtp = 0.015, eta = 1.45)]
scc = compute_scc(year = 2020, n = 5, discount_rates = drs)
scc_rff = compute_scc(get_model(; socioeconomics_source=:RFF, RFFSPsample=1000), year = 2020, n = 5, discount_rates=drs)
scc_ssp = compute_scc(get_model(; socioeconomics_source=:SSP, SSP_scenario="SSP126"), year = 2020, n = 5, discount_rates=drs)
# monte carlo - non-default options
drs = [(label = "CR 1%", prtp = 0.01, eta = 0.0),
(label = "CR 2%", prtp = 0.02, eta = 0.0),
(label = "CR 3%", prtp = 0.03, eta = 0.0)]
scc = compute_scc(year = 2025, last_year = 2200, discount_rates = drs, gas = :CH4,
CIAM_foresight = :limited, CIAM_GDPcap = true, pulse_size = 10., n = 5)
##------------------------------------------------------------------------------
## keyword arguments and values
##------------------------------------------------------------------------------
# year and last_year
@test compute_scc(year = 2020) < compute_scc(year = 2025) < compute_scc(year = 2030)
@test compute_scc(year = 2020) > compute_scc(year = 2020; last_year=2200)
# discount rate
@test compute_scc(year = 2020, prtp = 0.01, eta = 0.0) > compute_scc(year = 2020, prtp = 0.02, eta = 0.0) > compute_scc(year = 2020, prtp = 0.03, eta = 0.0)
drs = [(label = "CR 1%", prtp = 0.01, eta = 0.0, ew = nothing, ew_norm_region = nothing),
(label = "CR 2%", prtp = 0.02, eta = 0.0, ew = nothing, ew_norm_region = nothing),
(label = "CR 3%", prtp = 0.03, eta = 0.0, ew = nothing, ew_norm_region = nothing)]
sccs = compute_scc(year = 2020; discount_rates = drs)
@test sccs[(dr_label = "CR 1%", prtp = 0.01, eta = 0.0, ew = nothing, ew_norm_region = nothing)] > sccs[(dr_label = "CR 2%", prtp = 0.02, eta = 0.0, ew = nothing, ew_norm_region = nothing)] > sccs[(dr_label = "CR 3%", prtp = 0.03, eta = 0.0, ew = nothing, ew_norm_region = nothing)]
# deprecated form of discount rates - internally should add the ew and ew_norm_region fields to the discount rates Named Tuples
drs = [(label = "CR 1%", prtp = 0.01, eta = 0.0),
(label = "CR 2%", prtp = 0.02, eta = 0.0),
(label = "CR 3%", prtp = 0.03, eta = 0.0)]
sccs = compute_scc(year = 2020; discount_rates = drs)
@test haskey(sccs, (dr_label = "CR 1%", prtp = 0.01, eta = 0.0, ew = nothing, ew_norm_region = nothing))
@test haskey(sccs, (dr_label = "CR 2%", prtp = 0.02, eta = 0.0, ew = nothing, ew_norm_region = nothing))
@test haskey(sccs, (dr_label = "CR 3%", prtp = 0.03, eta = 0.0, ew = nothing, ew_norm_region = nothing))
# gas
@test compute_scc(year = 2020, gas = :CO2) < compute_scc(year = 2020, gas = :CH4) < compute_scc(year = 2020, gas = :N2O)
# pulse size
scc_0_5 = compute_scc(year = 2020, pulse_size = 0.5)
scc_1_0 = compute_scc(year = 2020, pulse_size = 1.)
scc_1_5 = compute_scc(year = 2020, pulse_size = 1.5)
@test scc_0_5 ≈ scc_1_0 rtol = 1e-3
@test scc_0_5 ≈ scc_1_5 rtol = 1e-3
# CIAM parameters
# use lower discount rate to see differences in the out years
scc_limited = compute_scc(year = 2020, prtp = 0.01, eta = 0.0, CIAM_foresight = :limited)
scc_perfect = compute_scc(year = 2020, prtp = 0.01, eta = 0.0, CIAM_foresight = :perfect)
@test scc_perfect < scc_limited
scc_nocap = compute_scc(year = 2020, prtp = 0.01, eta = 0.0, CIAM_GDPcap = false)
scc_GDPcap = compute_scc(year = 2020, prtp = 0.01, eta = 0.0, CIAM_GDPcap = true)
@test scc_GDPcap == scc_nocap # no difference for this (default) trial
m = get_model(; RFFSPsample=1798) # known difference with this trial
scc_nocap = compute_scc(m; year = 2020, prtp = 0.01, eta = 0.0, CIAM_GDPcap = false)
scc_GDPcap = compute_scc(m; year = 2020, prtp = 0.01, eta = 0.0, CIAM_GDPcap = true)
@test scc_GDPcap < scc_nocap
end # module
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 5436 | module TestGetModel
using MimiGIVE
using Test
using MimiMooreEtAlAgricultureImpacts
import MimiGIVE: get_model, compute_scc
# function get_model(;
# Agriculture_gtap::String = "midDF",
# socioeconomics_source::Symbol = :RFF,
# SSP_scenario::Union{Nothing, String} = nothing,
# RFFSPsample::Union{Nothing, Int} = nothing,
# Agriculture_floor_on_damages::Bool = true,
# Agriculture_ceiling_on_benefits::Bool = false,
# vsl::Symbol= :epa
# )
##------------------------------------------------------------------------------
## API - test that run without error
##------------------------------------------------------------------------------
m = get_model()
run(m)
# RFF socioeconomics
for Agriculture_gtap in ["AgMIP_AllDF", "AgMIP_NoNDF", "highDF", "lowDF", "midDF"]
for RFFSPsample in [1,2]
for Agriculture_floor_on_damages in [true, false]
for Agriculture_ceiling_on_benefits in [true, false]
for vsl in [:epa, :fund]
get_model(;Agriculture_gtap = Agriculture_gtap,
socioeconomics_source = :RFF,
RFFSPsample = RFFSPsample,
Agriculture_floor_on_damages = Agriculture_floor_on_damages,
Agriculture_ceiling_on_benefits = Agriculture_ceiling_on_benefits,
vsl = vsl)
end
end
end
end
end
# SSP socioeconomics
for Agriculture_gtap in ["AgMIP_AllDF", "AgMIP_NoNDF", "highDF", "lowDF", "midDF"]
for SSP_scenario in ["SSP126", "SSP245", "SSP370", "SSP585"]
for Agriculture_floor_on_damages in [true, false]
for Agriculture_ceiling_on_benefits in [true, false]
for vsl in [:epa, :fund]
get_model(;Agriculture_gtap = Agriculture_gtap,
socioeconomics_source = :SSP,
SSP_scenario = SSP_scenario,
Agriculture_floor_on_damages = Agriculture_floor_on_damages,
Agriculture_ceiling_on_benefits = Agriculture_ceiling_on_benefits,
vsl = vsl)
end
end
end
end
end
# some errors
@test_throws ErrorException get_model(; socioeconomics_source = :SSP) # missing SSP scenario
@test_throws ErrorException get_model(; socioeconomics_source = :foo) # not a legal SSP option
@test_throws ErrorException get_model(; Agriculture_gtap = "foo") # not a legal gtap spec option
@test_throws ErrorException get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP8") # not a legal SSP scenario option
@test_throws ErrorException get_model(; vsl = :foo) # not a legal vsl option
##------------------------------------------------------------------------------
## keyword arguments and values
##------------------------------------------------------------------------------
# Agriculture GTAP Parameter (Agriculture_gtap)
sccs = []
agcosts = []
for Agriculture_gtap in ["AgMIP_AllDF", "AgMIP_NoNDF", "highDF", "lowDF", "midDF"]
m = get_model(; Agriculture_gtap=Agriculture_gtap)
run(m)
append!(sccs, compute_scc(m, year=2020))
append!(agcosts, sum(skipmissing(m[:Agriculture, :agcost])))
gtap_idx = findfirst(isequal(Agriculture_gtap), MimiMooreEtAlAgricultureImpacts.gtaps)
@test m[:Agriculture, :gtap_df] == MimiMooreEtAlAgricultureImpacts.gtap_df_all[:, :, gtap_idx]
end
@test allunique(sccs)
@test allunique(agcosts)
@test agcosts[4] > agcosts[5] > agcosts[3] # lowDF > midDF > highDF
@test sccs[4] > sccs[5] > sccs[3] # lowDF > midDF > highDF
# socioeconomics_source and SSP_scenario and RFFSPsample
sccs = []
co2_emissions = []
gdp = []
pop = []
for id in [1,2,3]
m_rff = get_model(;RFFSPsample=id)
run(m_rff)
append!(sccs, compute_scc(m_rff, year=2020))
push!(co2_emissions, m_rff[:Socioeconomic, :co2_emissions])
push!(gdp, m_rff[:Socioeconomic, :gdp_global])
push!(pop, m_rff[:Socioeconomic, :population_global])
@test(m_rff[:Socioeconomic, :id] == id)
end
for ssp in ["SSP126", "SSP245", "SSP370", "SSP585"]
m_ssp = get_model(;socioeconomics_source=:SSP, SSP_scenario=ssp)
run(m_ssp)
append!(sccs, compute_scc(m_ssp, year=2020))
push!(co2_emissions, m_ssp[:Socioeconomic, :co2_emissions])
push!(gdp, m_ssp[:Socioeconomic, :gdp_global])
push!(pop, m_ssp[:Socioeconomic, :population_global])
@test(m_ssp[:Socioeconomic, :SSP] == ssp[1:4])
@test(m_ssp[:Socioeconomic, :emissions_scenario] == ssp)
end
@test allunique(sccs)
for i in 1:length(gdp), j in 1:length(gdp) # equivalent to allunique for two arrays
if i !== j
@test gdp[i] !== gdp[j]
@test pop[i] !== pop[j]
@test co2_emissions[i] !== co2_emissions[j]
end
end
@test compute_scc(get_model(;socioeconomics_source=:SSP, SSP_scenario="SSP585"), year = 2020) > compute_scc(get_model(;socioeconomics_source=:SSP, SSP_scenario="SSP126"), year = 2020)
@test compute_scc(get_model(;socioeconomics_source=:SSP, SSP_scenario="SSP245"), year = 2020) > compute_scc(get_model(;socioeconomics_source=:SSP, SSP_scenario="SSP126"), year = 2020)
# vsl
m_epa = get_model(vsl=:epa)
m_fund = get_model(vsl=:fund)
run(m_epa)
run(m_fund)
@test skipmissing(m_epa[:VSL, :vsl]) !== skipmissing(m_fund[:VSL, :vsl])
end # module
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 3458 | module TestRegressionDeterministic
using MimiGIVE
include("utils.jl")
# label of validation data to compare AGAINST
validation_label = "current"
##------------------------------------------------------------------------------
## Validate Model Data
##------------------------------------------------------------------------------
savevars = [
(compname = :DamageAggregator, varname = :total_damage),
(compname = :DamageAggregator, varname = :total_damage_share),
(compname = :DamageAggregator, varname = :total_damage_domestic),
(compname = :DamageAggregator, varname = :cromar_mortality_damage),
(compname = :DamageAggregator, varname = :agriculture_damage),
(compname = :DamageAggregator, varname = :energy_damage),
(compname = :DamageAggregator, varname = :cromar_mortality_damage_domestic),
(compname = :DamageAggregator, varname = :agriculture_damage_domestic),
(compname = :DamageAggregator, varname = :energy_damage_domestic),
(compname = :global_netconsumption, varname = :net_consumption),
(compname = :global_netconsumption, varname = :net_cpc),
(compname = :global_netconsumption, varname = :global_gdp),
(compname = :global_netconsumption, varname = :global_population),
(compname = :temperature, varname = :T),
(compname = :glaciers_small_icecaps, varname = :gsic_sea_level) ,
(compname = :antarctic_icesheet, varname = :ais_sea_level),
(compname = :greenland_icesheet, varname = :greenland_sea_level),
(compname = :thermal_expansion, varname = :te_sea_level),
(compname = :landwater_storage, varname = :lws_sea_level)
]
# default model
validationdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "default_model")
m = MimiGIVE.get_model()
validate_model_data(m, savevars, validationdir)
# SSP245
validationdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "SSP245_model")
m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245")
validate_model_data(m, savevars, validationdir)
##------------------------------------------------------------------------------
## Validate SCC Data
##------------------------------------------------------------------------------
discount_rates = [
# Constant discount rates
(label = "CR 1%", prtp = 0.01, eta = 0.0), (label = "CR 2%", prtp = 0.02, eta = 0.0), (label = "CR 2.5%", prtp = 0.025, eta = 0.0), (label = "CR 3%", prtp = 0.03, eta = 0.0), (label = "CR 5%", prtp = 0.05, eta = 0.0),
# Some Ramsey discount rates
(label = "DICE2016", prtp = 0.015, eta = 1.45), (label = "OtherRamsey", prtp = 0.01, eta = 1.)
]
for gas in [:CO2, :N2O, :CH4]
# default model, SC-CO2 and SC-CH4 and SC-N2O in year 2020
validationdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "default_model_SCC_2020")
m = MimiGIVE.get_model()
validate_scc_data(validationdir; m = m, year = 2020, discount_rates = discount_rates, gas = gas)
# SSP245 model, SC-CO2 and SC-CH4 and SC-N2O in year 2020
validationdir = joinpath(@__DIR__, "validation_data","validation_data_$validation_label", "SSP245_model_SCC_2020")
m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245")
validate_scc_data(validationdir; m = m, year = 2020, discount_rates = discount_rates, gas = gas)
end
end # module
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 4889 | module TestRegressionMCS
using MimiGIVE
include("utils.jl")
# label of validation data to compare AGAINST
validation_label = "current"
##------------------------------------------------------------------------------
## Validate Model Data
##------------------------------------------------------------------------------
savevars = [
(compname = :DamageAggregator, varname = :total_damage),
(compname = :DamageAggregator, varname = :total_damage_share),
(compname = :DamageAggregator, varname = :total_damage_domestic),
(compname = :DamageAggregator, varname = :cromar_mortality_damage),
(compname = :DamageAggregator, varname = :agriculture_damage),
(compname = :DamageAggregator, varname = :energy_damage),
(compname = :DamageAggregator, varname = :cromar_mortality_damage_domestic),
(compname = :DamageAggregator, varname = :agriculture_damage_domestic),
(compname = :DamageAggregator, varname = :energy_damage_domestic),
(compname = :global_netconsumption, varname = :net_consumption),
(compname = :global_netconsumption, varname = :net_cpc),
(compname = :global_netconsumption, varname = :global_gdp),
(compname = :global_netconsumption, varname = :global_population),
(compname = :temperature, varname = :T),
(compname = :glaciers_small_icecaps, varname = :gsic_sea_level) ,
(compname = :antarctic_icesheet, varname = :ais_sea_level),
(compname = :greenland_icesheet, varname = :greenland_sea_level),
(compname = :thermal_expansion, varname = :te_sea_level),
(compname = :landwater_storage, varname = :lws_sea_level)
]
##------------------------------------------------------------------------------
## Validate SCC MCS Data
##------------------------------------------------------------------------------
discount_rates = [
# Constant discount rates
(label = "CR 1%", prtp = 0.01, eta = 0.0), (label = "CR 2%", prtp = 0.02, eta = 0.0), (label = "CR 2.5%", prtp = 0.025, eta = 0.0), (label = "CR 3%", prtp = 0.03, eta = 0.0), (label = "CR 5%", prtp = 0.05, eta = 0.0),
# Some Ramsey discount rates
(label = "DICE2016", prtp = 0.015, eta = 1.45), (label = "OtherRamsey", prtp = 0.01, eta = 1.)
]
save_list = [
(:DamageAggregator, :total_damage),
(:DamageAggregator, :total_damage_share),
(:DamageAggregator, :total_damage_domestic),
(:DamageAggregator, :cromar_mortality_damage),
(:DamageAggregator, :agriculture_damage),
(:DamageAggregator, :energy_damage),
(:DamageAggregator, :cromar_mortality_damage_domestic),
(:DamageAggregator, :agriculture_damage_domestic),
(:DamageAggregator, :energy_damage_domestic),
(:global_netconsumption, :net_consumption),
(:global_netconsumption, :net_cpc),
(:global_netconsumption, :global_gdp),
(:global_netconsumption, :global_population),
(:temperature, :T),
(:glaciers_small_icecaps, :gsic_sea_level) ,
(:antarctic_icesheet, :ais_sea_level),
(:greenland_icesheet, :greenland_sea_level),
(:thermal_expansion, :te_sea_level),
(:landwater_storage, :lws_sea_level)
]
n = 3
seed = 999
# default model, SC-CO2 and SC-CH4 and SC-N2O in year 2020
for gas in [:CO2, :N2O, :CH4]
validationdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "default_model_MCS_SCC_2020", "$gas")
m = MimiGIVE.get_model()
validate_scc_mcs_data(seed, validationdir, n;
m = m,
year = 2020,
discount_rates = discount_rates,
gas = gas,
save_list = save_list,
save_md = true,
save_cpc = true,
save_slr_damages = true,
compute_sectoral_values = true,
compute_domestic_values = true,
)
end
# SSP245 model, SC-CO2 and SC-CH4 and SC-N2O in year 2020
for gas in [:CO2, :N2O, :CH4]
validationdir = joinpath(@__DIR__, "validation_data", "validation_data_$validation_label", "SSP245_model_MCS_SCC_2020", "$gas")
m = MimiGIVE.get_model(; socioeconomics_source = :SSP, SSP_scenario = "SSP245")
validate_scc_mcs_data(seed, validationdir, n;
m = m,
year = 2020,
discount_rates = discount_rates,
gas = gas,
save_list = save_list,
save_md = true,
save_cpc = true,
save_slr_damages = true,
compute_sectoral_values = true,
compute_domestic_values = true,
)
end
end # module | MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 7162 | module TestSaveDisaggregatedValues
using MimiGIVE
using Test
using Query
using DataFrames
using CSVFiles
using Mimi
atol = 1e-9
rtol = 1e-4
output_dir = mktempdir()
n = 3
m = MimiGIVE.get_model()
discount_rates = [(label="Ramsey", prtp=0.015, eta=1.45)]
results = MimiGIVE.compute_scc(m,
year=2020,
discount_rates = discount_rates,
output_dir = output_dir,
save_md = true,
compute_sectoral_values = true,
compute_disaggregated_values = true,
compute_domestic_values = true,
save_slr_damages = true,
n = n,
save_list = [(:Socioeconomic, :population),
(:Socioeconomic, :gdp),
(:DamageAggregator, :cromar_mortality_damage),
(:DamageAggregator, :agriculture_damage),
(:DamageAggregator, :energy_damage),
]
)
# Many of these tests check for internal consistency, meaning they check if the
# disaggregated results properly sum to the aggregated results, which are produced
# via the `save_list` or internal marginal damages calculations in the post trial
# function. This will raise flags if either functionality changes without the other,
# but checks via Figures are also encouraged.
##
## Sectoral Damages
##
# agriculture
df1 = DataFrame()
for region in dim_keys(m, :fund_regions)
append!(df1,
load(joinpath(output_dir, "results", "disaggregated_values", "damages_agriculture", "$(region).csv")) |>
DataFrame |>
i -> insertcols!(i, :region => Symbol(region))
)
end
df1 = df1 |> @groupby({_.time, _.trialnum}) |> @map({key(_)..., damages = sum(_.damages)}) |> DataFrame
sort!(df1, [:trialnum, :time])
df2 = load(joinpath(output_dir, "results", "model_1", "DamageAggregator_agriculture_damage.csv")) |> @filter(_.time >= 2020) |> DataFrame
sort!(df2, [:trialnum, :time])
@test df1.damages ≈ df2.agriculture_damage
# mortality
df1 = DataFrame()
for country in dim_keys(m, :country)
append!(df1,
load(joinpath(output_dir, "results", "disaggregated_values", "damages_cromar_mortality", "$(country).csv")) |>
DataFrame |>
i -> insertcols!(i, :country => Symbol(country))
)
end
df1 = df1 |> @groupby({_.time, _.trialnum}) |> @map({key(_)..., damages = sum(_.damages)}) |> DataFrame
sort!(df1, [:trialnum, :time])
df2 = load(joinpath(output_dir, "results", "model_1", "DamageAggregator_cromar_mortality_damage.csv")) |> @filter(_.time >= 2020) |> DataFrame
sort!(df2, [:trialnum, :time])
@test df1.damages ≈ df2.cromar_mortality_damage
# energy
df1 = DataFrame()
for country in dim_keys(m, :country)
append!(df1,
load(joinpath(output_dir, "results", "disaggregated_values", "damages_energy", "$(country).csv")) |>
DataFrame |>
i -> insertcols!(i, :country => Symbol(country))
)
end
df1 = df1 |> @groupby({_.time, _.trialnum}) |> @map({key(_)..., damages = sum(_.damages)}) |> DataFrame
sort!(df1, [:trialnum, :time])
df2 = load(joinpath(output_dir, "results", "model_1", "DamageAggregator_energy_damage.csv")) |> @filter(_.time >= 2020) |> DataFrame
sort!(df2, [:trialnum, :time])
@test df1.damages ≈ df2.energy_damage
#slr
df1 = DataFrame()
for country in dim_keys(m, :country)
filepath = joinpath(output_dir, "results", "disaggregated_values", "damages_slr", "$(country).csv")
if isfile(filepath) # some countries not included
append!(df1,
load(filepath) |>
DataFrame |>
i -> insertcols!(i, :country => Symbol(country))
)
end
end
df1 = df1 |> @groupby({_.time, _.trialnum}) |> @map({key(_)..., damages = sum(_.damages)}) |> DataFrame
sort!(df1, [:trialnum, :time])
df2 = load(joinpath(output_dir, "results", "model_1", "slr_damages.csv")) |> @filter(_.time >= 2020) |> DataFrame
sort!(df2, [:trialnum, :time])
@test (df1.damages ./ 1e9) ≈ df2.slr_damages # convert disaggregated data into billions of USD
##
## Socioeconomics
##
# country_test = ["ABW", "CHI", "ZAF"] # random countries to test
gdp_savelist = load(joinpath(output_dir, "results", "model_1", "Socioeconomic_gdp.csv")) |>
DataFrame |>
@filter(_.time >= 2020) |>
DataFrame
pop_savelist = load(joinpath(output_dir, "results", "model_1", "Socioeconomic_population.csv")) |>
DataFrame |>
@filter(_.time >= 2020) |>
DataFrame
df_savelist = innerjoin(gdp_savelist, pop_savelist, on = [:time, :country, :trialnum])
insertcols!(df_savelist, :gdppc => df_savelist.gdp ./ df_savelist.population .* 1e3)
sort!(df_savelist, [:trialnum, :country, :time])
for country in Mimi.dim_keys(m, :country)
disagg_values = load(joinpath(output_dir, "results", "disaggregated_values", "socioeconomics_country", "$country.csv")) |> DataFrame
savelist_values = df_savelist |> @filter(_.country == country) |> DataFrame
@test disagg_values.population ≈ savelist_values.population
@test disagg_values.pc_gdp ≈ savelist_values.gdppc
end
##
## Marginal Damages
##
# compare domestic agriculture (FUND region) saved via original methods to the
# disaggregated values added functionality
md_ag_domestic = DataFrame(results[:mds][(region = :domestic, sector = :agriculture)], Symbol.(2020:2300))
md_ag_domestic = md_ag_domestic |>
i -> insertcols!(i, 1, :trialnum => 1:3) |>
i -> stack(i, Not(:trialnum)) |>
i -> sort!(i, :trialnum) |>
DataFrame
md_usa = load(joinpath(output_dir, "results", "disaggregated_values", "mds_region_ag_only", "USA.csv")) |> DataFrame
@test md_usa.md ≈ md_ag_domestic.value
# compare domestic all other damages (USA + PRI) saved via original methods to the
# disaggregated values added functionality
md_nonag_domestic = DataFrame(results[:mds][(region = :domestic, sector = :cromar_mortality)] .+ results[:mds][(region = :domestic, sector = :energy)] .+ results[:mds][(region = :domestic, sector = :slr)],
Symbol.(2020:2300))
md_nonag_domestic = md_nonag_domestic |>
i -> insertcols!(i, 1, :trialnum => 1:3) |>
i -> stack(i, Not(:trialnum)) |>
i -> sort!(i, :trialnum) |>
DataFrame
md_usa = load(joinpath(output_dir, "results", "disaggregated_values", "mds_country_no_ag", "USA.csv")) |> DataFrame
md_pri = load(joinpath(output_dir, "results", "disaggregated_values", "mds_country_no_ag", "PRI.csv")) |> DataFrame
md_usa_pri = copy(md_usa)
md_usa_pri.md = md_usa.md .+ md_pri.md
@test md_usa_pri.md ≈ md_nonag_domestic.value
end # module
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | code | 18650 | using MimiGIVE
using CSVFiles
using DataFrames
using Mimi
using Test
using Query
using Random
"""
save_model_data(m::Model, savevars::Vector, outdir::String)
Save the data from model `m` indicated by the `savevars` vector into output
folder `outdir`. The `savevars` vector should hold Named Tuples (compname, varname).
"""
function save_model_data(m::Model, savevars::Vector, outdir::String)
run(m)
for tup in savevars
filename = string(tup.compname, "_", tup.varname, ".csv")
getdataframe(m, tup.compname, tup.varname) |> save(joinpath(outdir, filename))
end
end
"""
save_scc_data(outdir::String;
m::Model=MimiGIVE.get_model(),
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
discount_rates = [(label="default", prtp=0.15, eta=1.45)],
gas::Symbol = :CO2,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
pulse_size::Float64=1.
)
Save the data from the user-specifified SCC computation using model `m` into output
folder `outdir`.
"""
function save_scc_data(outdir::String;
m::Model=MimiGIVE.get_model(),
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
discount_rates = [(label="default", prtp=0.15, eta=1.45)],
gas::Symbol = :CO2,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
pulse_size::Float64=1.
)
df = DataFrame(:dr_label => [], :prtp => [], :eta => [], :sector => [], :scc => [])
# global
results = MimiGIVE.compute_scc(m; year=year, last_year=last_year, discount_rates=discount_rates,
gas=gas, CIAM_foresight=CIAM_foresight, CIAM_GDPcap=CIAM_GDPcap,
pulse_size=pulse_size)
for (k,v) in results
append!(df, DataFrame(:dr_label => k.dr_label, :prtp => k.prtp, :eta => k.eta, :sector => :global, :scc => v))
end
# check which sectors are true
run(m)
included_sectors = []
for sector in [:energy, :ag, :cromar_mortality, :slr]
if m[:DamageAggregator, Symbol(:include_, sector)]
push!(included_sectors, sector) # add to the list
update_param!(m, :DamageAggregator, Symbol(:include_, sector), false) # turn it off
end
end
# sectoral
for sector in included_sectors
update_param!(m, :DamageAggregator, Symbol(:include_, sector), true)
results = MimiGIVE.compute_scc(m; year=year, last_year=last_year, discount_rates=discount_rates,
gas=gas, CIAM_foresight=CIAM_foresight, CIAM_GDPcap=CIAM_GDPcap,
pulse_size=pulse_size)
for (k,v) in results
append!(df, DataFrame(:dr_label => k.dr_label, :prtp => k.prtp, :eta => k.eta, :sector => sector, :scc => v))
end
update_param!(m, :DamageAggregator, Symbol(:include_, sector), false)
end
# turn back on so m is unchanged
for sector in included_sectors
if m[:DamageAggregator, Symbol(:include_, sector)]
update_param!(m, :DamageAggregator, Symbol(:include_, sector), true) # turn it on
end
end
df |> save(joinpath(outdir, "SCC-$gas.csv"))
end
"""
save_scc_mcs_data()
Save the data from the user-specifified SCC Monte Carlo Simulation computation
using model `m` into output folder `outdir`.
"""
function save_scc_mcs_data(seed::Int, outdir::String, n::Int;
m::Model=MimiGIVE.get_model(),
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
discount_rates=nothing,
certainty_equivalent=false,
gas::Symbol = :CO2,
save_list::Vector = [],
save_md::Bool = false,
save_cpc::Bool = false,
save_slr_damages::Bool = false,
compute_sectoral_values::Bool = false,
compute_domestic_values::Bool = false,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
pulse_size::Float64 = 1.
)
Random.seed!(seed)
results = MimiGIVE.compute_scc(m;
n = n,
year = year,
last_year = last_year,
discount_rates = discount_rates,
certainty_equivalent = certainty_equivalent,
fair_parameter_set = :deterministic,
fair_parameter_set_ids = collect(1:n),
rffsp_sampling = :deterministic,
rffsp_sampling_ids = collect(1:n),
gas = gas,
save_list = save_list,
output_dir = outdir,
save_md = save_md,
save_cpc = save_cpc,
save_slr_damages = save_slr_damages,
compute_sectoral_values = compute_sectoral_values,
compute_domestic_values = compute_domestic_values,
CIAM_foresight = CIAM_foresight,
CIAM_GDPcap = CIAM_GDPcap,
pulse_size = pulse_size
)
# above will save out the save list variables for model1 and model2, we now
# need to save the scc information
# scc
scc_outdir = joinpath(outdir, "scc")
mkpath(scc_outdir)
df_expected_scc = DataFrame(:region => [], :sector => [], :dr_label => [], :prtp => [], :eta => [], :scc => [])
df_se_expected_scc = DataFrame(:region => [], :sector => [], :dr_label => [], :prtp => [], :eta => [], :se => [])
df_sccs = DataFrame(:region => [], :sector => [], :dr_label => [], :prtp => [], :eta => [], :scc => [], :trial => [])
for (k,v) in results[:scc]
append!(df_expected_scc, DataFrame(:region => k.region, :sector => k.sector, :dr_label => k.dr_label, :prtp => k.prtp, :eta => k.eta, :scc => v.expected_scc))
append!(df_se_expected_scc, DataFrame(:region => k.region, :sector => k.sector, :dr_label => k.dr_label, :prtp => k.prtp, :eta => k.eta, :se => v.se_expected_scc))
append!(df_sccs, DataFrame(:region => k.region, :sector => k.sector, :dr_label => k.dr_label, :prtp => k.prtp, :eta => k.eta, :scc => v.sccs, :trial => collect(1:length(v.sccs))))
end
df_expected_scc|> save(joinpath(scc_outdir, "expected_scc.csv"))
df_se_expected_scc|> save(joinpath(scc_outdir, "se_expected_scc.csv"))
df_sccs|> save(joinpath(scc_outdir, "sccs.csv"))
# marginal damages
mds_outdir = joinpath(outdir, "mds")
mkpath(mds_outdir)
for (k,v) in results[:mds]
df = DataFrame(v, :auto)
rename!(df, Symbol.(year:last_year))
insertcols!(df, 1, :trial => 1:size(df,1))
df = stack(df, Not(:trial))
df |> save(joinpath(mds_outdir, "mds-$(k.region)-$(k.sector).csv"))
end
# consumption per capita
cpc_outdir = joinpath(outdir, "cpc")
mkpath(cpc_outdir)
region = :globe
sector = :total
df = DataFrame(results[:cpc][(region = region, sector = sector)], :auto)
rename!(df, Symbol.(year:last_year))
insertcols!(df, 1, :trial => 1:size(df,1))
df = stack(df, Not(:trial))
df |> save(joinpath(cpc_outdir, "cpc-$region-$sector.csv"))
end
"""
validate_model_data(m::Model, savevars::Vector, validationdir::String)
Validate the model `m`'s data indicated by the `savevars` vector against the
data in the `valdiationdir`. The `savevars` vector should hold Named Tuples
(compname, varname).
"""
function validate_model_data(m::Model, savevars::Vector, validationdir::String)
# TOLERANCE
rtol = 1e-9 # use relative tolerance for model data since can't assume orders of magnitude
run(m)
for tup in savevars
# load validation data
filename = string(tup.compname, "_", tup.varname, ".csv")
validation_df = load(joinpath(validationdir, filename)) |> DataFrame
# get the model data
m_df = getdataframe(m, tup.compname, tup.varname)
# test each column
for col in names(validation_df)
@test collect(skipmissing(validation_df[!, col])) ≈ collect(skipmissing(m_df[!, col])) rtol = rtol
end
end
end
"""
validate_scc_data(validationdir::String;
m::Model=MimiGIVE.get_model(),
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
discount_rates = [(label="default", prtp=0.15, eta=1.45)],
gas::Symbol = :CO2,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
pulse_size::Float64=1.
)
Validate the the data from the user-specifified SCC computation using model `m`
against the data in the `valdiationdir`.
"""
function validate_scc_data(validationdir::String;
m::Model=MimiGIVE.get_model(),
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
discount_rates = [(label="default", prtp=0.15, eta=1.45)],
gas::Symbol = :CO2,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
pulse_size::Float64=1.
)
# TOLERANCE
atol = 1e-3 # for SCC dollar values
# load validation data
filename = "SCC-$gas.csv"
validation_df = load(joinpath(validationdir, filename)) |> DataFrame
# get the global model data
results = MimiGIVE.compute_scc(m; year=year, last_year=last_year, discount_rates=discount_rates,
gas=gas, CIAM_foresight=CIAM_foresight, CIAM_GDPcap=CIAM_GDPcap,
pulse_size=pulse_size)
# test each discount rate/sector combination
for (k,v) in results
validation_scc = validation_df |>
@filter(_.dr_label == k.dr_label && _.sector == "global") |>
DataFrame
validation_scc = (validation_scc.scc)[1]
@test validation_scc ≈ v atol = atol
end
# check which sectors are true
run(m)
included_sectors = []
for sector in [:energy, :ag, :cromar_mortality, :slr]
if m[:DamageAggregator, Symbol(:include_, sector)]
push!(included_sectors, sector) # add to the list
update_param!(m, :DamageAggregator, Symbol(:include_, sector), false) # turn it off
end
end
for sector in included_sectors
# get the sectoral model data
update_param!(m, :DamageAggregator, Symbol(:include_, sector), true)
results = MimiGIVE.compute_scc(m; year=year, last_year=last_year, discount_rates=discount_rates,
gas=gas, CIAM_foresight=CIAM_foresight, CIAM_GDPcap=CIAM_GDPcap,
pulse_size=pulse_size)
# test each discount rate/sector combination
for (k,v) in results
validation_scc = validation_df |>
@filter(_.dr_label == k.dr_label && _.sector == string(sector)) |>
DataFrame
validation_scc = (validation_scc.scc)[1]
@test validation_scc ≈ v atol = atol
end
update_param!(m, :DamageAggregator, Symbol(:include_, sector), false)
end
# turn back on so m is unchanged
for sector in included_sectors
if m[:DamageAggregator, Symbol(:include_, sector)]
update_param!(m, :DamageAggregator, Symbol(:include_, sector), true) # turn it on
end
end
end
"""
validate_scc_mcs_data(validationdir::String, n::Int;
m::Model=MimiGIVE.get_model(),
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
discount_rates=nothing,
certainty_equivalent=false,
gas::Symbol = :CO2,
save_list::Vector = [],
save_md::Bool = false,
save_cpc::Bool = false,
save_slr_damages::Bool = false,
compute_sectoral_values::Bool = false,
compute_domestic_values::Bool = false,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
pulse_size::Float64 = 1.
)
Validate the the data from the user-specifified SCC Monte Carlo Simulation computation
using model `m` against the data in the `valdiationdir`.
"""
function validate_scc_mcs_data(seed::Int, validationdir::String, n::Int;
m::Model=MimiGIVE.get_model(),
year::Union{Int, Nothing} = nothing,
last_year::Int = MimiGIVE._model_years[end],
discount_rates=nothing,
certainty_equivalent=false,
gas::Symbol = :CO2,
save_list::Vector = [],
save_md::Bool = false,
save_cpc::Bool = false,
save_slr_damages::Bool = false,
compute_sectoral_values::Bool = false,
compute_domestic_values::Bool = false,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
pulse_size::Float64 = 1.
)
# TOLERANCE
atol = 1e-3 # for SCC dollar values
rtol = 1e-4 # use relative tolerance for non-SCC values
# get the model data
tmpdir = tempdir()
Random.seed!(seed)
results = MimiGIVE.compute_scc(m;
n = n,
year = year,
last_year = last_year,
discount_rates = discount_rates,
certainty_equivalent = certainty_equivalent,
fair_parameter_set = :deterministic,
fair_parameter_set_ids = collect(1:n),
rffsp_sampling = :deterministic,
rffsp_sampling_ids = collect(1:n),
gas = gas,
save_list = save_list,
output_dir = tmpdir,
save_md = save_md,
save_cpc = save_cpc,
save_slr_damages = save_slr_damages,
compute_sectoral_values = compute_sectoral_values,
compute_domestic_values = compute_domestic_values,
CIAM_foresight = CIAM_foresight,
CIAM_GDPcap = CIAM_GDPcap,
pulse_size = pulse_size
)
# save list - just compare model_1 for now, model1 is sufficiently tested
# by testing the scc values
for el in save_list
validation_df = load(joinpath(validationdir, "results", "model_1", "$(el[1])_$(el[2]).csv")) |> DataFrame
m_df = load(joinpath(tmpdir, "results", "model_1", "$(el[1])_$(el[2]).csv")) |> DataFrame
for col in names(validation_df) # test each column
@test collect(skipmissing(validation_df[!, col])) ≈ collect(skipmissing(m_df[!, col])) rtol = rtol
end
end
# sccs
validation_df_expected_scc = load(joinpath(validationdir, "scc", "expected_scc.csv")) |> DataFrame
validation_df_se_expected_scc = load(joinpath(validationdir, "scc", "se_expected_scc.csv")) |> DataFrame
validation_df_sccs = load(joinpath(validationdir, "scc", "sccs.csv")) |> DataFrame
for (k,v) in results[:scc]
validation_vals = validation_df_expected_scc |>
@filter(_.dr_label == k.dr_label && _.region == String.(k.region) && _.sector == String.(k.sector)) |>
DataFrame
validation_vals = (validation_vals.scc)[1]
@test validation_vals ≈ v.expected_scc atol = atol
validation_vals = validation_df_se_expected_scc |>
@filter(_.dr_label == k.dr_label && _.region == String.(k.region) && _.sector == String.(k.sector)) |>
DataFrame
validation_vals = (validation_vals.se)[1]
@test validation_vals ≈ v.se_expected_scc atol = atol
validation_vals = validation_df_sccs |>
@filter(_.dr_label == k.dr_label && _.region == String.(k.region) && _.sector == String.(k.sector)) |>
DataFrame
validation_vals = validation_vals.scc
@test validation_vals ≈ v.sccs atol = atol
end
# marginal damages
for (k,v) in results[:mds]
m_df = DataFrame(v, :auto)
rename!(m_df, Symbol.(year:last_year))
insertcols!(m_df, 1, :trial => 1:size(m_df,1))
m_df = stack(m_df, Not(:trial))
validation_df = load(joinpath(validationdir, "mds", "mds-$(k.region)-$(k.sector).csv")) |> DataFrame
@test validation_df[!, :value] ≈ m_df[!, :value] rtol = rtol
end
# consumption per capita
region = :globe
sector = :total
m_df = DataFrame(results[:cpc][(region = region, sector = sector)], :auto)
rename!(m_df, Symbol.(year:last_year))
insertcols!(m_df, 1, :trial => 1:size(m_df,1))
m_df = stack(m_df, Not(:trial))
validation_df = load(joinpath(validationdir, "cpc", "cpc-$region-$sector.csv")) |> DataFrame
@test validation_df[!, :value] ≈ m_df[!, :value] rtol = rtol
end
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 2.1.0 | 2aab61557fbfb0092a4adc435e20720ab6406ca7 | docs | 31385 | # MimiGIVE.jl
This package holds the scripts to run the GIVE integrated assessment model.
# 1. Preparing the Software Environment
You need to install [Julia](https://julialang.org/) version 1.6 to run this model.
To add the package to your current environment, run the following command at the julia package REPL:
```julia
pkg> add MimiGIVE
```
You probably also want to install the Mimi package into your julia environment, so that you can use some of the tools from that package:
```julia
pkg> add Mimi
```
# 2. Running the Model
## 2a. Fully Coupled Model
The model uses the Mimi framework and reading the Mimi documentation first to understand the code structure is highly recommended. The basic way to access the model, run it, and explore the results is the following:
```julia
using Mimi
using MimiGIVE
# Create the model using default specifications
m = MimiGIVE.get_model()
# Run the model
run(m)
# Explore interactive plots of all the model output.
explore(m)
# Access a specific variable's data in tabular format.
co2_emissions = m[:co2_cycle, :E_co2]
# Access a specific variable's data in a Dataframe
co2_emissions = getdataframe(m, :co2_cycle, :E_co2)
```
The `get_model` function above has the signature and options as follows:
```julia
function get_model(; Agriculture_gtap::String = "midDF",
socioeconomics_source::Symbol = :RFF,
SSP_scenario::Union{Nothing, String} = nothing,
RFFSPsample::Union{Nothing, Int} = nothing,
Agriculture_floor_on_damages::Bool = true,
Agriculture_ceiling_on_benefits::Bool = false,
vsl::Symbol= :epa
)
```
The relevant arguments above are described as:
**Socioeconomic**
- socioeconomics_source (default :RFF) - The options are :RFF, which uses data from
the RFF socioeconomic projections, or :SSP, which uses data from one of the
Shared Socioeconomic Pathways
- SSP_scenario (default to nothing) - This setting is used only if one is using
the SSPs as the socioeconomics_source, and the current options are "SSP119",
"SSP126", "SSP245", "SSP370", "SSP585", and this will be used as follows.
See the SSPs component here: https://github.com/anthofflab/MimiSSPs.jl for more information.
(1) Select the population and GDP trajectories for 2020 through 2300, mapping
each RCMIP scenario to the SSP (SSP1, 2, 3, 5 respectively)
(2) Choose the ar6 scenario for data from 1750 - 2019 and the RCMIP emissions
scenario from the MimiSSPs component to pull Leach et al. RCMIP scenario
data for 2020 to 2300 for CO2, CH4, and N2O.
(NOTE) that if the socioeconomics_source is :RFF this will not be consequential
and ssp245 will be used for the ar6 data from 1750 - 2019 and trace gases
from 2020 onwards, while emissions for CO2, CH4, and N2O will come from
the MimiRFFSPs component.
* RFFSPsample (default to nothing, which will pull the in MimiRFFSPs) - choose
the sample for which to run the RFF SSP. See the RFFSPs component here:
https://github.com/rffscghg/MimiRFFSPs.jl.
**Agriculture**
- Agriculture_gtap (default midDF) - specify the `Agriculture_gtap` input parameter as one of
`["AgMIP_AllDF", "AgMIP_NoNDF", "highDF", "lowDF", "midDF"]`, indicating which
gtap damage function the component should use.
- Agriculture_floor_on_damages (default true) - If `Agriculture_gtap_floor_on_damages` = true, then
the agricultural damages (negative values of the `agcost` variable) in each
timestep will not be allowed to exceed 100% of the size of the agricultural
sector in each region.
- Agriculture_ceiling_on_benefits (default false) - If `Agriculture_gtap_ceiling_on_benefits` = true,
then the agricultural benefits (positive values of the `agcost` variable) in
each timestep will not be allowed to exceed 100% of the size of the agricultural
sector in each region.
**Other**
- vsl (default :epa) - Specify the soruce of the value of statistical life (VSL) being used in the model. The default `:epa` uses the 2017 VSL used in U.S. EPA anlayses. Alternatively, one could use `:fund`. Both are described in the `DataExplainer` along with references to the underlying values.
## 2b. Append Offline MimiCIAM Coupling
The MimiCIAM sea level rise damages component is currently "offline" coupled to the main model due to integration barriers based on model construction and need for foresight. To run MimiCIAM using the outputs of the GIVE model, first create and run a GIVE model as above in Section 2a., and then use the `get_ciam` and `update_ciam!` functions to obtain a CIAM model parameterized by the outputs of the GIVE model, as follows:
```julia
using Mimi
using MimiGIVE
# Create the default GIVE model
m = MimiGIVE.get_model()
# Run the model
run(m)
# Get the default CIAM model
m_ciam, segment_fingerprints = MimiGIVE.get_ciam(m)
# Update the CIAM model with MimiGIVE specific parameters
MimiGIVE.update_ciam!(m_ciam, m, segment_fingerprints)
# Run the CIAM model
run(m_ciam)
# Access and explore the reults of CIAM using `getindex`, `getdataframe`, and
# `explore` as above
# NOTE: `explore` may be unwiedly and/or slow for CIAM, as the dimensionality
# is large with ~12,000 coastal segments, we recommend accessing variables individually
# instead
explore(m_ciam)
```
# 3. Running a Monte Carlo Simulation (MCS)
You can run a Monte Carlo Simulation on the GIVE model to explore the effects of parameteric uncertainty. This functionality leverages the MCS functionality of the Mimi package, and it is recommended that you review the documentation in that package for background and context.
## 3a. API
Running a basic Monte Carlo Simulation on the default model `m = MimiGIVE.get_model()` with 100 trials can be carried out as follows.
```julia
using Mimi
using MimiGIVE
mcs_results = MimiGIVE.run_mcs(trials = 100)
```
The built-in Monte Carlo Simulation details can be found in `src/main_mcs.jl` and the primary function has the signature as follows:
```julia
function run_mcs(;trials::Int64 = 10000,
output_dir::Union{String, Nothing} = nothing,
save_trials::Bool = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
m::Mimi.Model = get_model(),
save_list::Vector = [],
results_in_memory::Bool = true
)
```
This function returns the results of a Monte Carlo Simulation with the defined number of `trials`, and save data into the `output_dir` folder, optionally also saving the parameter settings for each individual trial if `save_trials` is set to `true` If no model `m` is provided we will run with the default model from `get_model()`. The rest of the arguments are described as follows:
- `trials` (default 10,000) - number of trials to be run, used for presampling
- `output_dir` (default constructed folder name) - folder to hold results
- `save_trials` (default false) - whether to save all random variables for all trials to trials.csv
- `fair_parameter_set` (default :random) - :random means FAIR mcs samples will be chosen randomly from the provided sets, while :deterministic means they will be based on the provided vector of to `fair_parameter_set_ids` keyword argument.
- `fair_parameter_set_ids` - (default nothing) - if `fair_parameter_set` is set to :deterministic, this `n` element vector provides the fair parameter set ids that will be run, otherwise it is set to `nothing` and ignored.
- `rffsp_sampling` (default :random) - which sampling strategy to use for the RFF SPs, :random means RFF SPs will be chosen randomly, while :deterministic means they will be based on the provided vector of to `rffsp_sampling_ids` keyword argument.
- `rffsp_sampling_ids` - (default nothing) - if `rffsp_sampling` is set to :deterministic, this `n` element vector provides the RFF SP ids that will be run, otherwise it is set to `nothing` and ignored.
- `m` (default get_model()) - the model to run the simulation for
- `save_list` (default []) - which parameters and variables to save for each trial,entered as a vector of Tuples (:component_name, :variable_name)
- `results_in_memory` (default true) - this should be turned off if you are running into memory problems, data will be streamed out to disk but not saved in memory to the mcs object
For a more in depth analysis, try exploring some saved values like temperature `T` and co2 total emissions `co2`, with
```julia
save_list = [(:temperature, :T), (:co2_cycle, :co2)]
# default model
mcs_results = MimiGIVE.run_mcs(trials = 100, save_list = save_list)
explore(mcs_results)
# specific model and save the trials values
m = MimiGIVE.get_model(socioeconomics_source=:SSP, SSP_scenario = "SSP585")
mcs_results = MimiGIVE.run_mcs(trials = 100, save_trials = true, m = m, save_list = save_list)
explore(mcs_results)
```
Note that if `results_in_memory` is set to `false`, you will not be able to explore your saved results from the `mcs_results` object, but instead read them in from the CSV files in the `output_dir`.
## 3b. More Details on Saving Results and Memory
**run_mcs function arguments**
- `output_dir` (default is `nothing` and then constructed under the hood) - if this is not entered, a default folder will be constructed within your `output` folder with the date, time, and trials number. Any saved data will be saved to this folder, including `trials.csv` if `save_trials = true` (a large `n` will make this too big :)) and anything in the `save_list` as described below.
- `save_list` (default is empty vector `[]`) is a vector of Tuples, each holding two Symbols, an example is below. Note this can be any variable or parameter you see in any component of the model (all also shown in the explorer). If you are wondering about a specific parameter that is set with a random variable, take a look at the `get_mcs` function and see where a given random variable is assigned to.a component/parameter pair. **Inquire with model developers if you are curious about how to export something specific!**
```
mcs = run_mcs(m, trials = 100, save_list = [(:temperature, :T), (:co2_cycle, :co2)])
```
- `results_in_memory` (default is `true`) - if this is `true`, you will be able to access the data in the `save_list` from the returned `mcs` object with `getdataframe(mcs, :component, :variable/parameter)`, or `explore(mcs)`, but this will build up a large dataframe in memory. If this becomes a problem, just turn this flag off as follows, and your data will **only** be streamed to files in `output_dir`.
```
mcs = run_mcs(m, trials = 100, save_list = [(:temperature, :T), (:co2_cycle, :co2)], results_in_memory = false)
```
**compute_scc function arguments**
This function uses similar arguments to above, with the following differences:
- `results_in_memory` is automatically off without the option to turn it on. This is changeable if desired.
- `output_dir` will hold **two** folders, `model_1` and `model_2`, which correspond to `base` and `marginal` models ie. `base` and base + pulse of gas (`marginal`). This may be helpful for looking at temperature trajectories and marginal damages. Remember that the pulse units are important, take a look at these two dictionaries from `scc.jl` which may help with conversions.
```
const scc_gas_molecular_conversions = Dict(:CO2 => 12/44, # C to CO2
:N2O => 28/44, # N2 to N2O,
:CH4 => 1.) # CH4 to CH4
const scc_gas_pulse_size_conversions = Dict(:CO2 => 1e9, # Gt to t
:N2O => 1e6, # Mt to t
:CH4 => 1e6) # Mt to t
```
## 3c. Uncertain Parameters
Currently, this Monte Carlo Simulation includes the following uncertain parameters:
**Climate**
- The implementation of FAIRv1.6.2 uses the 2237 constrained parameter sets used in the AR6 (see description of details [here](https://github.com/rffscghg/MimiGIVE.jl/blob/main/docs/DataExplainer.ipynb) under the FAIR v1.6.2 heading.
- Sea Level Rise - The BRICK model varies a land water storage parameter.
**Damages**
- Socioeconomics: uncertainty using Uniform distribution across all 10,000 scenarios when RFF scenarios are enabled
- Agriculture: uncertainty using Triangular distribution across damage function parameters
- Mortality: uncertainty using resampled parameterization of damage function for Cromar et al.
- Global Damage Functions: uncertainty in Nordhaus (2017) and Howard and Sterner (2017) is derived from the parametric parameter uncertainty as stated in the corresponding publication and replication code.
# 4. Calculating the SCC
We provide a user-facing API call `compute_scc` to compute the Social Cost of CO2 **in USD $\2005** for this model. The signature of this function is as follows:
```julia
function compute_scc(m::Model = get_model();
year::Union{Int, Nothing} = nothing,
last_year::Int = _model_years[end],
prtp::Union{Float64,Nothing} = 0.015,
eta::Union{Float64,Nothing} = 1.45,
discount_rates = nothing,
certainty_equivalent = false,
fair_parameter_set::Symbol = :random,
fair_parameter_set_ids::Union{Vector{Int}, Nothing} = nothing,
rffsp_sampling::Symbol = :random,
rffsp_sampling_ids::Union{Vector{Int}, Nothing} = nothing,
n = 0,
gas::Symbol = :CO2,
save_list::Vector = [],
output_dir::Union{String, Nothing} = nothing,
save_md::Bool = false,
save_cpc::Bool = false,
save_slr_damages::Bool = false,
compute_sectoral_values::Bool = false,
compute_disaggregated_values::Bool = false,
compute_domestic_values::Bool = false,
CIAM_foresight::Symbol = :perfect,
CIAM_GDPcap::Bool = false,
post_mcs_creation_function = nothing,
pulse_size::Float64 = 1.
)
```
This function computes the social cost of a gas for an emissions pulse in `year` **in $2005 USD** for the provided MimiGIVE model, which can be specified as above with a particular settings. If no model is provided, the default model from MimiGIVE.get_model() is used. Furthermore, the `DamagesAggregator` component allows users to decide which damages are included in the aggregated damages used for the SCC. The rest of the arguments are described as follows:
- `m` (default get_model()) - if no model is provided, the default model from MimiGIVE.get_model() is used.
- 'year` (default nothing) - year of which to calculate SC (year of pulse)
- `last_year` (default 2300) - last year to include in damages summation
- `prtp` (default 0.015) and `eta` (default 1.45) - Ramsey discounting parameterization
- `discount_rates` (default nothing) - a vector of Named Tuples ie. [(label = "dr1", prtp = 0.03., eta = 1.45, ew = :consumption_region, ew_norm_region = "USA"), (label = "dr2", prtp = 0.015, eta = 1.45, ew = nothing, ew_norm_region = nothing)] - required if running n > 1
- `certainty_equivalent` (default false) - whether to compute the certainty equivalent or expected SCC
- `fair_parameter_set` (default :random) - :random means FAIR mcs samples will be chosen randomly from the provided sets, while :deterministic means they will be based on the provided vector of to `fair_parameter_set_ids` keyword argument.
- `fair_parameter_set_ids` - (default nothing) - if `fair_parameter_set` is set to :deterministic, this `n` element vector provides the fair parameter set ids that will be run, otherwise it is set to `nothing` and ignored.
- `rffsp_sampling` (default :random) - which sampling strategy to use for the RFF SPs, :random means RFF SPs will be chosen randomly, while :deterministic means they will be based on the provided vector of to `rffsp_sampling_ids` keyword argument.
- `rffsp_sampling_ids` - (default nothing) - if `rffsp_sampling` is set to :deterministic, this `n` element vector provides the RFF SP ids that will be run, otherwise it is set to `nothing` and ignored.
- `n` (default 0) - If `n` is 0, the deterministic version will be run, otherwise, a monte carlo simulation will be run.
- `gas` (default :CO2) - the gas for which to compute the SC, options are :CO2, :CH4, and :N2O
- `save_list` (default []) - which parameters and varaibles to save for each trial, entered as a vector of Tuples (:component_name, :variable_name)
- `output_dir` (default constructed folder name) - folder to hold results
- `save_md` (default is false) - save and return the marginal damages from a monte carlo simulation
- `save_cpc` (default is false) - save and return the per capita consumption from a monte carlo simulation
- `save_slr_damages`(default is false) - save global sea level rise damages from CIAM to disk
- `compute_sectoral_values` (default is false) - compute and return sectoral values as well as total
- `compute_disaggregated_values` (default is false) - compute spatially disaggregated marginal damages, sectoral damages, and socioeconomic variables
- `compute_domestic_values` (default is false) - compute and return domestic values in addition to global
- `CIAM_foresight`(default is :perfect) - Use limited foresight (:limited) or perfect foresight (:perfect) for MimiCIAM cost calculations
- `CIAM_GDPcap` (default is false) - Limit SLR damages to country-level annual GDP
- `pulse_size` (default 1.) - This determines the size of the additional pulse of emissions. Default of `1.` implies the standard pulse size of 1Gt of C for CO2, 1Mt of CH4, and 1Mt of N2O.
**Discount Rate Note**: In scc.jl , the rate of pure time preference `prtp` is treated as a discrete variable, such that `prtp` is applied in the discrete form `1/(1+prtp)^(t-year)`. Note that if using a `prtp` value calculated as a continuous time rate, like those in Rennert et al. (2021), one must transform this discount factor with `prtp_discrete = exp(prtp)-1)`. Please be in touch with the developers if you need assistance or further explanation!
The social cost of a gas can be calculated either deterministically or through a Monte Carlo Simulation which incorporates parametric uncertainty and is the recommended approach. Details follow in sections 4a and 4b.
## 4a. Deterministic SCC Calculation
Running a deterministic SCC calculation uses a subset of the possible arguments to `compute_scc` to compute a single cost from default parameter specifications.
Some example use cases include:
```julia
# Compute a simple baseline case
scc = MimiGIVE.compute_scc(year=2020)
# Compute the SCC for a different SSP/emissions scenario combination using the default sources of data (Benveniste and Leach, respectively) and a different discounting scheme parameterization
m = MimiGIVE.get_model(socioeconomics_source=:SSP, SSP_scenario="SSP585")
MimiGIVE.compute_scc(m, year=2020, prtp=0.03, eta=0.)
# Compute the partial SCC for ag:
m = MimiGIVE.get_model()
update_param!(m, :DamageAggregator, :include_ag, true)
update_param!(m, :DamageAggregator, :include_cromar_mortality, false)
update_param!(m, :DamageAggregator, :include_slr, false)
update_param!(m, :DamageAggregator, :include_energy, false)
MimiGIVE.compute_scc(m, year=2020, prtp=0.03, eta=0.)
```
You can also pass `compute_scc` a vector of `NamedTuple`s to the `discount_rates` argument if you would like to compute the SCC for a few different discounting schemes. Each `NamedTuple` should have five elements:
- label - a `String` label for the discount rate
- prtp - a `Float64` for the pure rate of time preference Ramsey parameter
- eta - a `Float64` for the risk aversion Ramsey parameter
- ew - a member of `[nothing, :gdp, :consumption_region, :consumption_country]` indication whether to equity weight, and if so, whether to use gdp or consumption to do so
- ew_norm_region - a `String` dictating the normalization region for equity weighting (a country if using `:gdp` or `:consumption_country` or a FUND region if using `:consumption_region`)
For example:
```julia
discount_rates = [(label="Ramsey", prtp=0.015, eta=1.45, ew=nothing, ew_norm_region=nothing), (label="Constant 2%", prtp=0.02, eta=0., ew=nothing, ew_norm_region=nothing)]
MimiGIVE.compute_scc(m, year=2020, discount_rates = discount_rates)
```
**Returned `result` Object Structure**
If only one discount rate specification is provided, the `compute_scc(...)` function run deterministically will return a single number. If a vector of discount rates are provided via the `discount_rates` argument, then the returned object is a Dictionary with keys being `NamedTuples` with elements (dr_label, prtp, eta, ew, ew_norm_region) corresponding to the `discount_rates` elements (label, prtp, eta).
## 4b. Monte Carlo Simulation (MCS) SCC Calculation
As described in Section 3, you can run the model using a Monte Carlo Simulation. This functionality can be used to compute a distribution of SCCs using the `n` parameter of `compute_scc`. If `n` is 0, as is default, the deterministic SCC will be calculated. If `n` is 2 or greater, a Monte Carlo Simulation will be run. Note that currently for this option *you must use the `discount_rates` argument*.
The simplest case of running a Monte Carlo Simulation would look like:
```julia
discount_rates = [(label="Ramsey", prtp=0.015, eta=1.45), (label="Constant 2%", prtp=0.02, eta=0.)]
result = MimiGIVE.compute_scc(year = 2020, discount_rates = discount_rates, n = 5)
```
**Optional Extra Output Specifications**
- **Marginal Damages** (only relevant for Monte Carlo Simulation): Set keyword argument `save_md` to `true` to include undiscounted marginal damages in the returned results.
- **Net Per Capita Consumption** (only relevant for Monte Carlo Simulation): Set keyword argument `save_cpc` to `true` to include net per capita consumption in the returned results.
- **Sectorally Disaggregated Values** (only relevant for Monte Carlo Simulation): Set keyword argument `compute_sectoral_values` to `true` to compute sectorally disaggregated values. Calculations of the disaggregated sectoral SCCs will use global consumption to calculate discount factors, and thus the discount factors are consistent between the global and sectoral calculations of the SCC. To compute an isolated sectoral SCC one may run a separate simulation with only that sector's damages turned on.
- **Within U.S. Borders Values** - Set keyword argument `compute_domestic_values` to `true` to include SCC (and optional marginal damage) values disaggregated to the within-borders USA damages. Calculations of the disaggregated within U.S. borders SCC will use global consumption to calculate discount factors, and thus the discount factors are consistent between the global and within borders calculations of the SCC.
If all four of these are set to true one would runs something like:
```julia
discount_rates = [(label="Ramsey", prtp=0.015, eta=1.45, ew=nothing, ew_norm_region=nothing), (label="Constant 2%", prtp=0.02, eta=0., ew=nothing, ew_norm_region=nothing)]
result = MimiGIVE.compute_scc(year = 2020, discount_rates = discount_rates, n = 5, compute_sectoral_values = true, compute_domestic_values = true, save_md = true, save_cpc = true)
```
**Spatially and Sectorally Disaggregated Baseline Damages**
One can additionally set the `compute_disaggregated_values` flag to `true` to get country (or regional for agricultural) values streamed out to file including baseline run sectoral damages, marginal damages, and socioeconomic variables. These will be output to a `disaggregated_values` folder along with a small README file detailing units and important notes. Output variables include:
- damages for all four damage functions at the lowest spatial resolution available (FUND region for agriculture, country for all others)
- marginal damages for (1) agriculture at FUND region level (2) all other sectors summed up at the country level
- population and gdp per capita at both the country and FUND region level
**Returned `result` Object Structure**
The object returned by `result = MimiGIVE.compute_scc(...)` for a MCS is a `Dictionary` with 1-3 keys: `scc` (always), `:mds` (if `save_md` is set to `true`) and `:cpc` (if `save_cpc` is set to `true`). The structure of the values returned by these keys is as follows:
- `results[:scc]` accesses a Dictionary with keys being `NamedTuples` with elements (region, sector, dr_label, prtp, eta) and values which are `NamedTuples` with elements (expected_scc, se_expected_scc, and scc) as well as ce_scc and ce_sccs if certainty_equivalent=true
- `results[:mds]` accesses a Dictionary with keys being `NamedTuples` with elements (region, sector) and values which are matrices of size num trials x 281 years (2020:2300) of undiscounted marginal damages in USD $2011
- `results[:cpc]` accesses a Dictionary with keys being `NamedTuples` with elements (region, sector) and values which are matrices of size num trials x 281 years (2020:2300) of net per capita consumption in USD $2011
Below we show examples of accessing these values:
```julia
discount_rates = [(label="Ramsey", prtp=0.015, eta=1.45, ew=nothing, ew_norm_region=nothing), (label="Constant 2%", prtp=0.02, eta=0., ew=nothing, ew_norm_region=nothing)]
# run the simulation with all optional outputs
result = MimiGIVE.compute_scc(year = 2020, discount_rates = discount_rates, n = 5, compute_sectoral_values = true, compute_domestic_values = true, save_md = true, save_cpc = true)
# print out information on the calculated SCCs
for (k,v) in result[:scc]
println("Specification: $(k.region) SCC in $(k.sector) sector, using discount rate $(k.dr_label) specified by prtp = $(k.prtp) and eta = $(k.eta):")
println(" --> Expected SCC = $(v.expected_scc) with standard error $(v.se_expected_scc)")
end
# compare global and within US borders marginal damaeges
mds_global = result[:mds][(region=:globe, sector=:total)]
mds_domestic = result[:mds][(region=:domestic, sector=:total)]
# compare global and within US borders agriculture marginal damages
mds_global_ag = result[:mds][(region=:globe, sector=:agriculture)]
mds_domestic_ag = result[:mds][(region=:domestic, sector=:agriculture)]
# access net per capita consumption
net_cpc = result[:cpc][(region=:globe, sector=:total)]
```
**IMPORTANT: Please look at section 3 above for details and arguments of the Monte Carlo Simulation.**
# 5. Model Structure and References
Below we list the main structure of the model, for information on direct input data see docs/DataExplainer.ipynb.
## Climate
### BRICK Sea Level Rise
- Citation (1): Wong, T. E., Bakker, A. M., Ruckert, K., Applegate, P., Slangen, A., & Keller, K. (2017). BRICK v0. 2, a simple, accessible, and transparent model framework for climate and regional sea-level projections. Geoscientific Model Development, 10(7), 2741-2760.
- Citation (2): Improved Climate Modeling Reduces Extreme Social Cost of Carbon Estimates. _Submitted to Nature Climate Change_
- Scripts: [MimiBRICK.jl](https://github.com/raddleverse/MimiBRICK.jl)
### FAIR Climate
- Citation: Smith, C. J., Forster, P. M., Allen, M., Leach, N., Millar, R. J., Passerello, G. A., and Regayre, L. A.: FAIR v1.3: A simple emissions-based impulse response and carbon cycle model, Geosci. Model Dev., https://doi.org/10.5194/gmd-11-2273-2018, 2018.; Millar, R. J., Nicholls, Z. R., Friedlingstein, P., and Allen, M. R.: A modified impulse-response representation of the global near-surface air temperature and atmospheric concentration response to carbon dioxide emissions, Atmos. Chem. Phys., 17, 7213-7228, https://doi.org/10.5194/acp-17-7213-2017, 2017.; specific v1.6.2 used in AR6
- Scripts: FAIR with the version noted by [IPCC-WG1](https://github.com/IPCC-WG1/Chapter-7) maps to PyPI's [FAIR version 1.6.2](https://pypi.org/project/fair/1.6.2) at the repository in [OMS-NetZero/FAIR]( https://github.com/OMS-NetZero/FAIR/tree/v1.6.2.)for the equations
### Ocean Acidification
- Citation: This package provides a simplified expression to calculate globally averaged ocean pH. Is follows Equation 7 from [Appendix F](https://www.nap.edu/read/24651/chapter/17) of "Valuing Climate Damages: Updating Estimation of the Social Cost of Carbon Dioxide": National Academies of Sciences, Engineering, and Medicine. 2017. Valuing Climate Damages: Updating Estimation of the Social Cost of Carbon Dioxide. Washington, DC: The National Academies Press.https://doi.org/10.17226/24651.
- Scripts: https://github.com/FrankErrickson/Mimi_NAS_pH.jl
## Socioeconomics (Population, GDP, and Emissions)
### RFF (CH4, CO2, and N2O)
- Citation: Rennert, K., Prest, B. C., Pizer, W. A., Newell, R. G., Anthoff, D., Kingdon, C., ... & Errickson, F. (2022). The social cost of carbon: advances in long-term probabilistic projections of population, GDP, emissions, and discount rates. Brookings Papers on Economic Activity, 2021(2), 223-305.
- Home Repository: [MimiRFFSPs.jl](https://github.com/rffscghg/MimiRFFSPs.jl)
### Shared Socioeconomic Pathways (SSPs) (CH4, CO2, SF6, and N2O)
- Citation: see MimiSSPs.jl for complete list, including Benveniste et al., 2020, Leach et al., 2021, Kikstra et al., 2021, and Riahi et al., 2017
- Home Repository: [MimiSSPs.jl](https://github.com/anthofflab/MimiSSPs.jl)
## Damages
### Sea Level Rise
- Citation: Diaz, D. B. (2016). Estimating global damages from sea level rise with the Coastal Impact and Adaptation Model (CIAM). Climatic Change, 137(1), 143-156.
- Home Repository: [MimiCIAM.jl](https://github.com/raddleverse/MimiCIAM.jl)
### Agriculture
- Citation: Moore, F.C., Baldos, U., Hertel, T. et al. New science of climate change impacts on agriculture implies higher social cost of carbon. Nat Commun 8, 1607 (2017). https://doi.org/10.1038/s41467-017-01792-x
- Home Repository: [MimiMooreEtAlAgricultureImpacts.jl](https://github.com/rffscghg/MimiMooreEtAlAgricultureImpacts.jl)
### Mortality
- Citation: Cromar, K., Howard, P., Vásquez, V. N., & Anthoff, D. (2021). Health impacts of climate change as contained in economic models estimating the social cost of carbon dioxide. GeoHealth, 5(8), e2021GH000405.
### Energy
- Citation: Clarke, L., Eom, J., Marten, E. H., Horowitz, R., Kyle, P., Link, R., ... & Zhou, Y. (2018). Effects of long-term climate change on global building energy expenditures. Energy Economics, 72, 667-677.
### Global Damages Functions
_Non-default options to use for comparisons etc._
- Citation: Nordhaus, W. D. (2017). Revisiting the social cost of carbon. Proceedings of the National Academy of Sciences, 114(7), 1518-1523.
- Citation: Howard, P. H., & Sterner, T. (2017). Few and not so far between: a meta-analysis of climate damage estimates. Environmental and Resource Economics, 68(1), 197-225.
| MimiGIVE | https://github.com/rffscghg/MimiGIVE.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 892 | # Use
#
# DOCUMENTER_DEBUG=true julia --color=yes make.jl local [nonstrict] [fixdoctests]
#
# for local builds.
using Documenter
using InverseFunctions
# Doctest setup
DocMeta.setdocmeta!(
InverseFunctions,
:DocTestSetup,
:(using InverseFunctions);
recursive=true,
)
makedocs(
sitename = "InverseFunctions",
modules = [InverseFunctions],
format = Documenter.HTML(
prettyurls = !("local" in ARGS),
canonical = "https://JuliaMath.github.io/InverseFunctions.jl/stable/"
),
pages = [
"Home" => "index.md",
"API" => "api.md",
"LICENSE" => "LICENSE.md",
],
doctest = ("fixdoctests" in ARGS) ? :fix : true,
linkcheck = !("nonstrict" in ARGS),
warnonly = ("nonstrict" in ARGS),
)
deploydocs(
repo = "github.com/JuliaMath/InverseFunctions.jl.git",
forcepush = true,
push_preview = true,
)
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 549 | module InverseFunctionsDatesExt
using Dates
import InverseFunctions: inverse
inverse(::typeof(Dates.datetime2epochms)) = Dates.epochms2datetime
inverse(::typeof(Dates.epochms2datetime)) = Dates.datetime2epochms
inverse(::typeof(Dates.date2epochdays)) = Dates.epochdays2date
inverse(::typeof(Dates.epochdays2date)) = Dates.date2epochdays
inverse(::typeof(datetime2unix)) = unix2datetime
inverse(::typeof(unix2datetime)) = datetime2unix
inverse(::typeof(datetime2julian)) = julian2datetime
inverse(::typeof(julian2datetime)) = datetime2julian
end
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 476 | module InverseFunctionsTestExt
using Test: @test, @testset
using InverseFunctions: InverseFunctions, inverse
function InverseFunctions.test_inverse(f, x; compare=isapprox, kwargs...)
@testset "test_inverse: $f with input $x" begin
y = f(x)
inverse_f = inverse(f)
@test compare(inverse_f(y), x; kwargs...)
inverse_inverse_f = inverse(inverse_f)
@test compare(inverse_inverse_f(x), y; kwargs...)
end
return nothing
end
end
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 1370 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
"""
InverseFunctions
Lightweight package that defines an interface to invert functions.
"""
module InverseFunctions
include("functions.jl")
include("inverse.jl")
include("setinverse.jl")
"""
InverseFunctions.test_inverse(f, x; compare=isapprox, kwargs...)
Test if [`inverse(f)`](@ref) is implemented correctly.
The function tests (as a `Test.@testset`) if
* `compare(inverse(f)(f(x)), x) == true` and
* `compare(inverse(inverse(f))(x), f(x)) == true`.
`kwargs...` are forwarded to `compare`.
!!! Note
On Julia >= 1.9, you have to load the `Test` standard library to be able to use
this function.
"""
function test_inverse end
@static if !isdefined(Base, :get_extension)
include("../ext/InverseFunctionsDatesExt.jl")
include("../ext/InverseFunctionsTestExt.jl")
end
# Better error message if users forget to load Test
if isdefined(Base, :get_extension) && isdefined(Base.Experimental, :register_error_hint)
function __init__()
Base.Experimental.register_error_hint(MethodError) do io, exc, _, _
if exc.f === test_inverse &&
(Base.get_extension(InverseFunctions, :InverseFunctionsTest) === nothing)
print(io, "\nDid you forget to load Test?")
end
end
end
end
end # module
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 2719 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
"""
square(x::Real)
Inverse of `sqrt(x)` for non-negative `x`.
"""
function square(x)
if is_real_type(typeof(x)) && x < zero(x)
throw(DomainError(x, "`square` is defined as the inverse of `sqrt` and can only be evaluated for non-negative values"))
end
return x^2
end
function invpow_arg2(x::Number, p::Real)
if is_real_type(typeof(x))
x ≥ zero(x) ? x^inv(p) : # x > 0 - trivially invertible
isinteger(p) && isodd(Integer(p)) ? copysign(abs(x)^inv(p), x) : # p odd - invertible even for x < 0
throw(DomainError(x, "inverse for x^$p is not defined at $x"))
else
# complex x^p is only invertible for p = 1/n
isinteger(inv(p)) ? x^inv(p) : throw(DomainError(x, "inverse for x^$p is not defined at $x"))
end
end
function invpow_arg1(b::Real, x::Real)
# b < 0 should never happen in actual use: this check is done in inverse(f)
if b ≥ zero(b) && x ≥ zero(x)
log(b, x)
else
throw(DomainError(x, "inverse for $b^x is not defined at $x"))
end
end
function invlog_arg1(b::Real, x::Real)
# exception may happen here: check cannot be done in inverse(f) because of log(Real, Complex)
b > zero(b) && !isone(b) || throw(DomainError(x, "inverse for log($b, x) is not defined at $x"))
b^x
end
invlog_arg1(b::Number, x::Number) = b^x
function invlog_arg2(b::Real, x::Real)
# exception may happen here: check cannot be done in inverse(f) because of log(Complex, Real)
x > zero(x) && !isone(x) || throw(DomainError(x, "inverse for log($b, x) is not defined at $x"))
x^inv(b)
end
invlog_arg2(b, x) = x^inv(b)
function invdivrem((q, r)::NTuple{2,Number}, divisor::Number)
res = muladd(q, divisor, r)
if abs(r) ≤ abs(divisor) && (iszero(r) || sign(r) == sign(res))
res
else
throw(DomainError((q, r), "inverse for divrem(x) is not defined at this point"))
end
end
function invfldmod((q, r)::NTuple{2,Number}, divisor::Number)
if abs(r) ≤ abs(divisor) && (iszero(r) || sign(r) == sign(divisor))
muladd(q, divisor, r)
else
throw(DomainError((q, r), "inverse for fldmod(x) is not defined at this point"))
end
end
# check if T is a real-Number type
# this is not the same as T <: Real which immediately excludes custom Number subtypes such as unitful numbers
# also, isreal(x) != is_real_type(typeof(x)): the former is true for complex numbers with zero imaginary part
@inline is_real_type(@nospecialize _::Type{<:Real}) = true
@inline is_real_type(::Type{T}) where {T<:Number} = real(T) == T
@inline is_real_type(@nospecialize _::Type) = false
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 5499 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
"""
inverse(f)
Return the inverse of function `f`.
`inverse` supports mapped and broadcasted functions (via
`Base.Broadcast.BroadcastFunction` or `Base.Fix1`) and function composition
(requires Julia >= 1.6).
# Examples
```jldoctest
julia> foo(x) = inv(exp(-x) + 1);
julia> inv_foo(y) = log(y / (1 - y));
julia> InverseFunctions.inverse(::typeof(foo)) = inv_foo;
julia> InverseFunctions.inverse(::typeof(inv_foo)) = foo;
julia> x = 4.2;
julia> inverse(foo)(foo(x)) ≈ x
true
julia> inverse(inverse(foo)) === foo
true
julia> broadcast_foo = VERSION >= v"1.6" ? Base.Broadcast.BroadcastFunction(foo) : Base.Fix1(broadcast, foo);
julia> X = rand(10);
julia> inverse(broadcast_foo)(broadcast_foo(X)) ≈ X
true
julia> bar = log ∘ foo;
julia> VERSION < v"1.6" || inverse(bar)(bar(x)) ≈ x
true
```
# Implementation
Implementations of `inverse(::typeof(f))` have to satisfy
* `inverse(f)(f(x)) ≈ x` for all `x` in the domain of `f`, and
* `inverse(inverse(f))` is defined and `inverse(inverse(f))(x) ≈ f(x)` for all `x` in the domain of `f`.
You can check your implementation with [`InverseFunctions.test_inverse`](@ref).
"""
inverse(f)
export inverse
"""
struct NoInverse{F}
An instance `NoInverse(f)` signifies that `inverse(f)` is not defined.
"""
struct NoInverse{F}
f::F
end
export NoInverse
NoInverse(::Type{F}) where F = NoInverse{Type{F}}(F)
(f::NoInverse)(x) = error("inverse of ", f.f, " is not defined")
inverse(f) = NoInverse(f)
inverse(f::NoInverse) = f.f
inverse(::typeof(inverse)) = inverse
@static if VERSION >= v"1.6"
function inverse(f::Base.ComposedFunction)
inv_inner = inverse(f.inner)
inv_outer = inverse(f.outer)
if inv_inner isa NoInverse || inv_outer isa NoInverse
NoInverse(f)
else
Base.ComposedFunction(inv_inner, inv_outer)
end
end
function inverse(bf::Base.Broadcast.BroadcastFunction)
inv_f_kernel = inverse(bf.f)
if inv_f_kernel isa NoInverse
NoInverse(bf)
else
Base.Broadcast.BroadcastFunction(inv_f_kernel)
end
end
end
function inverse(mapped_f::Base.Fix1{<:Union{typeof(map),typeof(broadcast)}})
inv_f_kernel = inverse(mapped_f.x)
if inv_f_kernel isa NoInverse
NoInverse(mapped_f)
else
Base.Fix1(mapped_f.f, inv_f_kernel)
end
end
inverse(::typeof(identity)) = identity
inverse(::typeof(inv)) = inv
inverse(::typeof(adjoint)) = adjoint
inverse(::typeof(transpose)) = transpose
inverse(::typeof(conj)) = conj
inverse(::typeof(!)) = !
inverse(::typeof(+)) = +
inverse(::typeof(-)) = -
inverse(f::Base.Fix1{typeof(+)}) = Base.Fix2(-, f.x)
inverse(f::Base.Fix2{typeof(+)}) = Base.Fix2(-, f.x)
inverse(f::Base.Fix1{typeof(-)}) = Base.Fix1(-, f.x)
inverse(f::Base.Fix2{typeof(-)}) = Base.Fix1(+, f.x)
inverse(f::Base.Fix1{typeof(*)}) = iszero(f.x) ? throw(DomainError(f.x, "Cannot invert multiplication by zero")) : Base.Fix1(\, f.x)
inverse(f::Base.Fix2{typeof(*)}) = iszero(f.x) ? throw(DomainError(f.x, "Cannot invert multiplication by zero")) : Base.Fix2(/, f.x)
inverse(f::Base.Fix1{typeof(/)}) = Base.Fix2(\, f.x)
inverse(f::Base.Fix2{typeof(/)}) = Base.Fix2(*, f.x)
inverse(f::Base.Fix1{typeof(\)}) = Base.Fix1(*, f.x)
inverse(f::Base.Fix2{typeof(\)}) = Base.Fix1(/, f.x)
inverse(::typeof(deg2rad)) = rad2deg
inverse(::typeof(rad2deg)) = deg2rad
inverse(::typeof(exp)) = log
inverse(::typeof(log)) = exp
inverse(::typeof(exp2)) = log2
inverse(::typeof(log2)) = exp2
inverse(::typeof(exp10)) = log10
inverse(::typeof(log10)) = exp10
inverse(::typeof(expm1)) = log1p
inverse(::typeof(log1p)) = expm1
inverse(::typeof(sinh)) = asinh
inverse(::typeof(tanh)) = atanh
inverse(::typeof(coth)) = acoth
inverse(::typeof(csch)) = acsch
inverse(::typeof(asinh)) = sinh
inverse(::typeof(atanh)) = tanh
inverse(::typeof(acoth)) = coth
inverse(::typeof(acsch)) = csch
inverse(::typeof(sqrt)) = square
inverse(::typeof(square)) = sqrt
inverse(::typeof(cbrt)) = Base.Fix2(^, 3)
inverse(f::Base.Fix2{typeof(^)}) = iszero(f.x) ? throw(DomainError(f.x, "Cannot invert x^$(f.x)")) : Base.Fix2(invpow_arg2, f.x)
inverse(f::Base.Fix2{typeof(^), <:Integer}) = isodd(f.x) ? Base.Fix2(invpow_arg2, f.x) : throw(DomainError(f.x, "Cannot invert x^$(f.x)"))
inverse(f::Base.Fix2{typeof(invpow_arg2)}) = Base.Fix2(^, f.x)
inverse(f::Base.Fix1{typeof(^), <:Real}) = f.x > zero(f.x) ? Base.Fix1(invpow_arg1, f.x) : throw(DomainError(f.x, "Cannot invert $(f.x)^x"))
inverse(f::Base.Fix1{typeof(^)}) = Base.Fix1(invpow_arg1, f.x)
inverse(f::Base.Fix1{typeof(invpow_arg1)}) = Base.Fix1(^, f.x)
inverse(f::Base.Fix1{typeof(log)}) = isone(f.x) ? throw(DomainError(f.x, "Cannot invert log($(f.x), x)")) : Base.Fix1(invlog_arg1, f.x)
inverse(f::Base.Fix1{typeof(invlog_arg1)}) = Base.Fix1(log, f.x)
inverse(f::Base.Fix2{typeof(log)}) = isone(f.x) ? throw(DomainError(f.x, "Cannot invert log(x, $(f.x))")) : Base.Fix2(invlog_arg2, f.x)
inverse(f::Base.Fix2{typeof(invlog_arg2)}) = Base.Fix2(log, f.x)
inverse(f::Base.Fix2{typeof(divrem)}) = Base.Fix2(invdivrem, f.x)
inverse(f::Base.Fix2{typeof(invdivrem)}) = Base.Fix2(divrem, f.x)
inverse(f::Base.Fix2{typeof(fldmod)}) = Base.Fix2(invfldmod, f.x)
inverse(f::Base.Fix2{typeof(invfldmod)}) = Base.Fix2(fldmod, f.x)
inverse(::typeof(reim)) = Base.splat(complex)
inverse(::typeof(Base.splat(complex))) = reim
inverse(::typeof(reverse)) = reverse
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 1590 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
"""
struct FunctionWithInverse{F,InvF} <: Function
A function with an inverse.
Do not construct directly, use [`setinverse(f, invf)`](@ref) instead.
"""
struct FunctionWithInverse{F,InvF} <: Function
f::F
invf::InvF
end
FunctionWithInverse(::Type{F}, invf::InvF) where {F,InvF} = FunctionWithInverse{Type{F},InvF}(F,invf)
FunctionWithInverse(f::F, ::Type{InvF}) where {F,InvF} = FunctionWithInverse{F,Type{InvF}}(f,InvF)
FunctionWithInverse(::Type{F}, ::Type{InvF}) where {F,InvF} = FunctionWithInverse{Type{F},Type{InvF}}(F,InvF)
(f::FunctionWithInverse)(x) = f.f(x)
inverse(f::FunctionWithInverse) = setinverse(f.invf, f.f)
"""
setinverse(f, invf)
Return a function that behaves like `f` and uses `invf` as its inverse.
Useful in cases where no inverse is defined for `f` or to set an inverse that
is only valid within a given context, e.g. only for a limited argument
range that is guaranteed by the use case but not in general.
For example, `asin` is not a valid inverse of `sin` for arbitrary arguments
of `sin`, but can be a valid inverse if the use case guarantees that the
argument of `sin` will always be within `-π` and `π`:
```jldoctest
julia> foo = setinverse(sin, asin);
julia> x = π/3;
julia> foo(x) == sin(x)
true
julia> inverse(foo)(foo(x)) ≈ x
true
julia> inverse(foo) === setinverse(asin, sin)
true
```
"""
setinverse(f, invf) = FunctionWithInverse(_unwrap_f(f), _unwrap_f(invf))
export setinverse
_unwrap_f(f) = f
_unwrap_f(f::FunctionWithInverse) = f.f
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 506 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
import Test
import InverseFunctions
import Documenter
Test.@testset "Package InverseFunctions" begin
include("test_functions.jl")
include("test_inverse.jl")
include("test_setinverse.jl")
# doctests
Documenter.DocMeta.setdocmeta!(
InverseFunctions,
:DocTestSetup,
:(using InverseFunctions);
recursive=true,
)
Documenter.doctest(InverseFunctions)
end # testset
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 289 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
using Test
using InverseFunctions
@testset "square" begin
for x in (0.0, 0.73)
@test InverseFunctions.square(x) ≈ x * x
end
@test_throws DomainError InverseFunctions.square(-1)
end
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 7602 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
using Test
using InverseFunctions
using Unitful
using Dates
foo(x) = inv(exp(-x) + 1)
inv_foo(y) = log(y / (1 - y))
InverseFunctions.inverse(::typeof(foo)) = inv_foo
InverseFunctions.inverse(::typeof(inv_foo)) = foo
struct Bar{MT<:AbstractMatrix}
A::MT
end
(f::Bar)(x) = f.A * x
InverseFunctions.inverse(f::Bar) = Bar(inv(f.A))
@static if VERSION >= v"1.6"
_bc_func(f) = Base.Broadcast.BroadcastFunction(f)
else
_bc_func(f) = Base.Fix1(broadcast, f)
end
@testset "inverse" begin
f_without_inverse(x) = 1
@test inverse(f_without_inverse) isa NoInverse
@test_throws ErrorException inverse(f_without_inverse)(42)
@test inverse(inverse(f_without_inverse)) === f_without_inverse
for f in (f_without_inverse ∘ exp, exp ∘ f_without_inverse, _bc_func(f_without_inverse), Base.Fix1(broadcast, f_without_inverse), Base.Fix1(map, f_without_inverse))
@test inverse(f) == NoInverse(f)
@test inverse(inverse(f)) == f
end
@test @inferred(inverse(Complex)) isa NoInverse{Type{Complex}}
@test @inferred(NoInverse(Complex)) isa NoInverse{Type{Complex}}
InverseFunctions.test_inverse(inverse, log, compare = ===)
end
@testset "maths" begin
InverseFunctions.test_inverse(!, false)
x = rand()
for f in (
foo, inv_foo, log, log2, log10, log1p, sqrt,
Base.Fix2(^, 3*rand() - 0.5), Base.Fix2(^, rand(float.([-10:-1; 1:10]))), Base.Fix1(^, rand()), Base.Fix1(log, rand()), Base.Fix1(log, 1/rand()), Base.Fix2(log, rand()),
)
InverseFunctions.test_inverse(f, x)
end
for f in (
+, -, exp, exp2, exp10, expm1, cbrt, deg2rad, rad2deg, conj,
sinh, tanh, coth, csch, asinh, atanh, acsch, # all invertible hyperbolic functions aside from acoth
Base.Fix1(+, rand()), Base.Fix2(+, rand()), Base.Fix1(-, rand()), Base.Fix2(-, rand()),
Base.Fix1(*, rand()), Base.Fix2(*, rand()), Base.Fix1(/, rand()), Base.Fix2(/, rand()), Base.Fix1(\, rand()), Base.Fix2(\, rand()),
Base.Fix2(^, rand(-11:2:11)),
)
InverseFunctions.test_inverse(f, x)
InverseFunctions.test_inverse(f, -x)
end
# acoth only defined for |x| > 1
InverseFunctions.test_inverse(acoth, 1 + x)
InverseFunctions.test_inverse(acoth, -1 - x)
InverseFunctions.test_inverse(conj, 2 - 3im)
InverseFunctions.test_inverse(reverse, [10, 20, 30])
x = rand(0:10)
for f in (Base.Fix2(divrem, rand([-5:-1; 1:5])), Base.Fix2(fldmod, rand([-5:-1; 1:5])), Base.Fix2(divrem, 0.123), Base.Fix2(fldmod, 0.123))
compare = (a, b) -> all(isapprox.(a, b))
InverseFunctions.test_inverse(f, x; compare=compare)
InverseFunctions.test_inverse(f, -x; compare=compare)
InverseFunctions.test_inverse(f, x/9; compare=compare)
InverseFunctions.test_inverse(f, -x/9; compare=compare)
end
# ensure that inverses have domains compatible with original functions
@test_throws DomainError inverse(sqrt)(-1.0)
InverseFunctions.test_inverse(sqrt, complex(-1.0))
InverseFunctions.test_inverse(sqrt, complex(1.0))
@test_throws DomainError inverse(Base.Fix1(*, 0))
@test_throws DomainError inverse(Base.Fix2(^, 0))
@test_throws DomainError inverse(Base.Fix1(log, -2))(5)
@test_throws DomainError inverse(Base.Fix1(log, 2))(-5)
InverseFunctions.test_inverse(inverse(Base.Fix1(log, 2)), complex(-5))
@test_throws DomainError inverse(Base.Fix2(^, 0.5))(-5)
@test_throws DomainError inverse(Base.Fix2(^, 0.51))(complex(-5))
@test_throws DomainError inverse(Base.Fix2(^, 2))(complex(-5))
InverseFunctions.test_inverse(Base.Fix2(^, 0.5), complex(-5))
@test_throws DomainError inverse(Base.Fix2(^, 2))
@test_throws DomainError inverse(Base.Fix2(^, -4))
InverseFunctions.test_inverse(Base.Fix2(^, 2.0), 4)
@test_throws DomainError inverse(Base.Fix1(^, 2.0))(-4)
@test_throws DomainError inverse(Base.Fix1(^, -2.0))(4)
@test_throws DomainError inverse(Base.Fix1(^, 0))(4)
@test_throws DomainError inverse(Base.Fix1(log, -2))(4)
@test_throws DomainError inverse(Base.Fix1(log, 1))(4)
@test_throws DomainError inverse(Base.Fix2(^, 0))(4)
@test_throws DomainError inverse(Base.Fix2(log, -2))(4)
@test_throws DomainError inverse(Base.Fix2(log, 1))(4)
InverseFunctions.test_inverse(Base.Fix2(^, -1), complex(-5.))
@test_throws DomainError inverse(Base.Fix2(^, 2))(-5)
@test_throws DomainError inverse(Base.Fix1(^, 2))(-5)
@test_throws DomainError inverse(Base.Fix1(^, -2))(3)
@test_throws DomainError inverse(Base.Fix1(^, -2))(3)
@test_throws DomainError inverse(Base.Fix2(divrem, 5))((-3, 2))
@test_throws DomainError inverse(Base.Fix2(fldmod, 5))((-3, -2))
InverseFunctions.test_inverse(inverse(Base.Fix2(divrem, 5)), (-3, -2); compare=(==))
InverseFunctions.test_inverse(inverse(Base.Fix2(fldmod, 5)), (-3, 2); compare=(==))
InverseFunctions.test_inverse(reim, -3; compare=(==))
InverseFunctions.test_inverse(reim, -3+2im; compare=(==))
InverseFunctions.test_inverse(Base.splat(complex), (-3, 2); compare=(==))
A = rand(5, 5)
for f in (
identity, inv, adjoint, transpose,
log, sqrt, +, -, exp,
Base.Fix1(+, rand(5, 5)), Base.Fix2(+, rand(5, 5)), Base.Fix1(-, rand(5, 5)), Base.Fix2(-, rand(5, 5)),
Base.Fix1(*, rand()), Base.Fix2(*, rand()), Base.Fix1(*, rand(5, 5)), Base.Fix2(*, rand(5, 5)),
Base.Fix2(/, rand()), Base.Fix1(/, rand(5, 5)), Base.Fix2(/, rand(5, 5)),
Base.Fix1(\, rand()), Base.Fix1(\, rand(5, 5)), Base.Fix2(\, rand(5, 5)),
)
if f != log || VERSION >= v"1.6"
# exp(log(A::AbstractMatrix)) ≈ A is broken on at least Julia v1.0
InverseFunctions.test_inverse(f, A)
end
end
X = rand(5)
for f in (_bc_func(foo), Base.Fix1(broadcast, foo), Base.Fix1(map, foo))
for x in (x, fill(x, 3), X)
InverseFunctions.test_inverse(f, x)
end
end
InverseFunctions.test_inverse(Bar(rand(3,3)), rand(3))
@static if VERSION >= v"1.6"
InverseFunctions.test_inverse(log ∘ foo, x)
end
end
@testset "unitful" begin
# the majority of inverse just propagate to underlying mathematical functions and don't have any issues with unitful numbers
# only those that behave treat real numbers differently have to be tested here
x = rand()u"m"
InverseFunctions.test_inverse(sqrt, x)
@test_throws DomainError inverse(sqrt)(-x)
InverseFunctions.test_inverse(Base.Fix2(^, 3), x)
InverseFunctions.test_inverse(Base.Fix2(^, 3), -x)
InverseFunctions.test_inverse(Base.Fix2(^, -3.5), x)
@test_throws DomainError inverse(Base.Fix2(^, 2))(-x)
end
@testset "dates" begin
InverseFunctions.test_inverse(Dates.date2epochdays, Date(2020, 1, 2); compare = ===)
InverseFunctions.test_inverse(Dates.datetime2epochms, DateTime(2020, 1, 2, 12, 34, 56); compare = ===)
InverseFunctions.test_inverse(Dates.epochdays2date, Int64(1234); compare = ===)
InverseFunctions.test_inverse(Dates.epochms2datetime, Int64(1234567890); compare = ===)
InverseFunctions.test_inverse(datetime2unix, DateTime(2020, 1, 2, 12, 34, 56); compare = ===)
InverseFunctions.test_inverse(unix2datetime, 1234.56; compare = ===)
InverseFunctions.test_inverse(datetime2julian, DateTime(2020, 1, 2, 12, 34, 56); compare = ===)
InverseFunctions.test_inverse(julian2datetime, 1234.56; compare = ===)
end
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
|
[
"MIT"
] | 0.1.17 | a779299d77cd080bf77b97535acecd73e1c5e5cb | code | 1472 | # This file is a part of InverseFunctions.jl, licensed under the MIT License (MIT).
using Test
using InverseFunctions
@testset "setinverse" begin
@test @inferred(setinverse(Complex, Real)) isa InverseFunctions.FunctionWithInverse{Type{Complex},Type{Real}}
@test @inferred(InverseFunctions.FunctionWithInverse(Complex, Real)) isa InverseFunctions.FunctionWithInverse{Type{Complex},Type{Real}}
@test @inferred(InverseFunctions.FunctionWithInverse(Real, identity)) isa InverseFunctions.FunctionWithInverse{Type{Real},typeof(identity)}
@test @inferred(InverseFunctions.FunctionWithInverse(identity, Real)) isa InverseFunctions.FunctionWithInverse{typeof(identity),Type{Real}}
InverseFunctions.test_inverse(setinverse(Complex, Real), 4.2)
InverseFunctions.test_inverse(setinverse(Real, identity), 4.2)
InverseFunctions.test_inverse(setinverse(identity, Real), 4.2)
@test @inferred(setinverse(sin, asin)) === InverseFunctions.FunctionWithInverse(sin, asin)
@test @inferred(setinverse(sin, setinverse(asin, sqrt))) === InverseFunctions.FunctionWithInverse(sin, asin)
@test @inferred(setinverse(setinverse(sin, sqrt), asin)) === InverseFunctions.FunctionWithInverse(sin, asin)
@test @inferred(setinverse(setinverse(sin, asin), setinverse(asin, sqrt))) === InverseFunctions.FunctionWithInverse(sin, asin)
InverseFunctions.test_inverse(setinverse(sin, asin), π/4)
InverseFunctions.test_inverse(setinverse(asin, sin), 0.5)
end
| InverseFunctions | https://github.com/JuliaMath/InverseFunctions.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.