licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.5.2 | 512b75d09aab52e93192e68de612fc472f001979 | code | 5317 |
MRANGE(::Type{Float64}) = 10000000
MRANGE(::Type{Float32}) = 10000
IntF(::Type{Float64}) = Int64
IntF(::Type{Float32}) = Int32
@testset "Accuracy (max error in ulp) for $T" for T in (Float32, Float64)
println("Accuracy tests for $T")
xx = map(T, vcat(-10:0.0002:10, -1000:0.1:1000))
fun_table = Dict(SLEEF.exp => Base.exp)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-10:0.0002:10, -1000:0.02:1000))
fun_table = Dict(SLEEF.asinh => Base.asinh, SLEEF.atanh => Base.atanh)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(1:0.0002:10, 1:0.02:1000))
fun_table = Dict(SLEEF.acosh => Base.acosh)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = T[]
for i = 1:10000
s = reinterpret(T, reinterpret(IntF(T), T(pi)/4 * i) - IntF(T)(20))
e = reinterpret(T, reinterpret(IntF(T), T(pi)/4 * i) + IntF(T)(20))
d = s
while d <= e
append!(xx, d)
d = reinterpret(T, reinterpret(IntF(T), d) + IntF(T)(1))
end
end
xx = append!(xx, -10:0.0002:10)
xx = append!(xx, -MRANGE(T):200.1:MRANGE(T))
fun_table = Dict(SLEEF.sin => Base.sin, SLEEF.cos => Base.cos, SLEEF.tan => Base.tan)
tol = 1
test_acc(T, fun_table, xx, tol)
fun_table = Dict(SLEEF.sin_fast => Base.sin, SLEEF.cos_fast => Base.cos, SLEEF.tan_fast => Base.tan)
tol = 4
test_acc(T, fun_table, xx, tol)
global sin_sincos_fast(x) = (SLEEF.sincos_fast(x))[1]
global cos_sincos_fast(x) = (SLEEF.sincos_fast(x))[2]
fun_table = Dict(sin_sincos_fast => Base.sin, cos_sincos_fast => Base.cos)
tol = 4
test_acc(T, fun_table, xx, tol)
global sin_sincos(x) = (SLEEF.sincos(x))[1]
global cos_sincos(x) = (SLEEF.sincos(x))[2]
fun_table = Dict(sin_sincos => Base.sin, cos_sincos => Base.cos)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-1:0.00002:1))
fun_table = Dict(SLEEF.asin_fast => Base.asin, SLEEF.acos_fast => Base.acos)
tol = 3
test_acc(T, fun_table, xx, tol)
fun_table = Dict(SLEEF.asin => asin, SLEEF.acos => Base.acos)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-10:0.0002:10, -10000:0.2:10000, -10000:0.201:10000))
fun_table = Dict(SLEEF.atan_fast => Base.atan)
tol = 3
test_acc(T, fun_table, xx, tol)
fun_table = Dict(SLEEF.atan => Base.atan)
tol = 1
test_acc(T, fun_table, xx, tol)
xx1 = map(Tuple{T,T}, [zip(-10:0.050:10, -10:0.050:10)...])
xx2 = map(Tuple{T,T}, [zip(-10:0.051:10, -10:0.052:10)...])
xx3 = map(Tuple{T,T}, [zip(-100:0.51:100, -100:0.51:100)...])
xx4 = map(Tuple{T,T}, [zip(-100:0.51:100, -100:0.52:100)...])
xx = vcat(xx1, xx2, xx3, xx4)
fun_table = Dict(SLEEF.atan_fast => Base.atan)
tol = 2.5
test_acc(T, fun_table, xx, tol)
fun_table = Dict(SLEEF.atan => Base.atan)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(0.0001:0.0001:10, 0.001:0.1:10000, 1.1.^(-1000:1000), 2.1.^(-1000:1000)))
fun_table = Dict(SLEEF.log_fast => Base.log)
tol = 3
test_acc(T, fun_table, xx, tol)
fun_table = Dict(SLEEF.log => Base.log)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(0.0001:0.0001:10, 0.0001:0.1:10000))
fun_table = Dict(SLEEF.log10 => Base.log10, SLEEF.log2 => Base.log2)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(0.0001:0.0001:10, 0.0001:0.1:10000, 10.0.^-(0:0.02:300), -10.0.^-(0:0.02:300)))
fun_table = Dict(SLEEF.log1p => Base.log1p)
tol = 1
test_acc(T, fun_table, xx, tol)
xx1 = map(Tuple{T,T}, [(x,y) for x = -100:0.20:100, y = 0.1:0.20:100])[:]
xx2 = map(Tuple{T,T}, [(x,y) for x = -100:0.21:100, y = 0.1:0.22:100])[:]
xx3 = map(Tuple{T,T}, [(x,y) for x = 2.1, y = -1000:0.1:1000])
xx = vcat(xx1, xx2, xx2)
fun_table = Dict(SLEEF.pow => Base.:^)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-10000:0.2:10000, 1.1.^(-1000:1000), 2.1.^(-1000:1000)))
fun_table = Dict(SLEEF.cbrt_fast => Base.cbrt)
tol = 2
test_acc(T, fun_table, xx, tol)
fun_table = Dict(SLEEF.cbrt => Base.cbrt)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-10:0.0002:10, -120:0.023:1000, -1000:0.02:2000))
fun_table = Dict(SLEEF.exp2 => Base.exp2)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-10:0.0002:10, -35:0.023:1000, -300:0.01:300))
fun_table = Dict(SLEEF.exp10 => Base.exp10)
tol = 1
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-10:0.0002:10, -1000:0.021:1000, -1000:0.023:1000,
10.0.^-(0:0.02:300), -10.0.^-(0:0.02:300), 10.0.^(0:0.021:300), -10.0.^-(0:0.021:300)))
fun_table = Dict(SLEEF.expm1 => Base.expm1)
tol = 2
test_acc(T, fun_table, xx, tol)
xx = map(T, vcat(-10:0.0002:10, -1000:0.02:1000))
fun_table = Dict(SLEEF.sinh => Base.sinh, SLEEF.cosh => Base.cosh, SLEEF.tanh => Base.tanh)
tol = 1
test_acc(T, fun_table, xx, tol)
@testset "xilogb at arbitrary values" begin
xd = Dict{T,Int}(T(1e-30) => -100, T(2.31e-11) => -36, T(-1.0) => 0, T(1.0) => 0,
T(2.31e11) => 37, T(1e30) => 99)
for (i,j) in xd
@test SLEEF.ilogb(i) === j
end
end
end
| SLEEF | https://github.com/musm/SLEEF.jl.git |
|
[
"MIT"
] | 0.5.2 | 512b75d09aab52e93192e68de612fc472f001979 | code | 10161 | @testset "exceptional $T" for T in (Float32, Float64)
@testset "exceptional $xatan" for xatan in (SLEEF.atan_fast, SLEEF.atan)
@test xatan(T(0.0), T(-0.0)) === T(pi)
@test xatan(T(-0.0), T(-0.0)) === -T(pi)
@test ispzero(xatan(T(0.0), T(0.0)))
@test isnzero(xatan(T(-0.0), T(0.0)))
@test xatan( T(Inf), -T(Inf)) === T(3*pi/4)
@test xatan(-T(Inf), -T(Inf)) === T(-3*pi/4)
@test xatan( T(Inf), T(Inf)) === T(pi/4)
@test xatan(-T(Inf), T(Inf)) === T(-pi/4)
y = T(0.0)
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5]
for x in xa
@test xatan(y,x) === T(pi)
end
y = T(-0.0)
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5]
for x in xa
@test xatan(y,x) === T(-pi)
end
ya = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5]
xa = T[T(0.0), T(-0.0)]
for x in xa, y in ya
@test xatan(y,x) === T(-pi/2)
end
ya = T[100000.5, 100000, 3, 2.5, 2, 1.5, 1.0, 0.5]
xa = T[T(0.0), T(-0.0)]
for x in xa, y in ya
@test xatan(y,x) === T(pi/2)
end
y = T(Inf)
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5, -0.0, +0.0, 0.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
for x in xa
@test xatan(y,x) === T(pi/2)
end
y = T(-Inf)
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5, -0.0, +0.0, 0.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
for x in xa
@test xatan(y,x) === T(-pi/2)
end
ya = T[0.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
x = T(Inf)
for y in ya
@test ispzero(xatan(y,x))
end
ya = T[-0.5, -1.5, -2.0, -2.5, -3.0, -100000, -100000.5]
x = T(Inf)
for y in ya
@test isnzero(xatan(y,x))
end
ya = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5, -0.0, +0.0, 0.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5, NaN]
x = T(NaN)
for y in ya
@test isnan(xatan(y,x))
end
y = T(NaN)
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5, -0.0, +0.0, 0.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5, NaN]
for x in xa
@test isnan(xatan(y,x))
end
end # denormal/nonumber atan
@testset "exceptional xpow" begin
@test SLEEF.pow(T(1), T(NaN)) === T(1)
@test SLEEF.pow( T(NaN), T(0)) === T(1)
@test SLEEF.pow(-T(1), T(Inf)) === T(1)
@test SLEEF.pow(-T(1), T(-Inf)) === T(1)
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5]
ya = T[-100000.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 100000.5]
for x in xa, y in ya
@test isnan(SLEEF.pow(x,y))
end
x = T(NaN)
ya = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
for y in ya
@test isnan(SLEEF.pow(x,y))
end
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5, -0.0, +0.0, 0.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
y = T(NaN)
for x in xa
@test isnan(SLEEF.pow(x,y))
end
x = T(0.0)
ya = T[1, 3, 5, 7, 100001]
for y in ya
@test ispzero(SLEEF.pow(x,y))
end
x = T(-0.0)
ya = T[1, 3, 5, 7, 100001]
for y in ya
@test isnzero(SLEEF.pow(x,y))
end
xa = T[0.0, -0.0]
ya = T[0.5, 1.5, 2.0, 2.5, 4.0, 100000, 100000.5]
for x in xa, y in ya
@test ispzero(SLEEF.pow(x,y))
end
xa = T[-0.999, -0.5, -0.0, +0.0, +0.5, +0.999]
y = T(-Inf)
for x in xa
@test SLEEF.pow(x,y) === T(Inf)
end
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
y = T(-Inf)
for x in xa
@test ispzero(SLEEF.pow(x,y))
end
xa = T[-0.999, -0.5, -0.0, +0.0, +0.5, +0.999]
y = T(Inf)
for x in xa
@test ispzero(SLEEF.pow(x,y))
end
xa = T[-100000.5, -100000, -3, -2.5, -2, -1.5, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
y = T(Inf)
for x in xa
@test SLEEF.pow(x,y) === T(Inf)
end
x = T(-Inf)
ya = T[-100001, -5, -3, -1]
for y in ya
@test isnzero(SLEEF.pow(x,y))
end
x = T(-Inf)
ya = T[-100000.5, -100000, -4, -2.5, -2, -1.5, -0.5]
for y in ya
@test ispzero(SLEEF.pow(x,y))
end
x = T(-Inf)
ya = T[1, 3, 5, 7, 100001]
for y in ya
@test SLEEF.pow(x,y) === T(-Inf)
end
x = T(-Inf)
ya = T[0.5, 1.5, 2, 2.5, 3.5, 4, 100000, 100000.5]
for y in ya
@test SLEEF.pow(x,y) === T(Inf)
end
x = T(Inf)
ya = T[-100000.5, -100000, -3, -2.5, -2, -1.5, -1.0, -0.5]
for y in ya
@test ispzero(SLEEF.pow(x,y))
end
x = T(Inf)
ya = T[0.5, 1, 1.5, 2.0, 2.5, 3.0, 100000, 100000.5]
for y in ya
@test SLEEF.pow(x,y) === T(Inf)
end
x = T(0.0)
ya = T[-100001, -5, -3, -1]
for y in ya
@test SLEEF.pow(x,y) === T(Inf)
end
x = T(-0.0)
ya = T[-100001, -5, -3, -1]
for y in ya
@test SLEEF.pow(x,y) === T(-Inf)
end
xa = T[0.0, -0.0]
ya = T[-100000.5, -100000, -4, -2.5, -2, -1.5, -0.5]
for x in xa, y in ya
@test SLEEF.pow(x,y) === T(Inf)
end
xa = T[1000, -1000]
ya = T[1000, 1000.5, 1001]
for x in xa, y in ya
@test cmpdenorm(SLEEF.pow(x,y), Base.:^(BigFloat(x),BigFloat(y)))
end
end # denormal/nonumber pow
fun_table = Dict(SLEEF.sin_fast => Base.sin, SLEEF.sin => Base.sin)
@testset "exceptional $xtrig" for (xtrig, trig) in fun_table
xa = T[NaN, -0.0, 0.0, Inf, -Inf]
for x in xa
@test cmpdenorm(xtrig(x), trig(BigFloat(x)))
end
end
fun_table = Dict(SLEEF.cos_fast => Base.cos, SLEEF.cos => Base.cos)
@testset "exceptional $xtrig" for (xtrig, trig) in fun_table
xa = T[NaN, -0.0, 0.0, Inf, -Inf]
for x in xa
@test cmpdenorm(xtrig(x), trig(BigFloat(x)))
end
end
@testset "exceptional sin in $xsincos"for xsincos in (SLEEF.sincos_fast, SLEEF.sincos)
xa = T[NaN, -0.0, 0.0, Inf, -Inf]
for x in xa
q = xsincos(x)[1]
@test cmpdenorm(q, Base.sin(BigFloat(x)))
end
end
@testset "exceptional cos in $xsincos"for xsincos in (SLEEF.sincos_fast, SLEEF.sincos)
xa = T[NaN, -0.0, 0.0, Inf, -Inf]
for x in xa
q = xsincos(x)[2]
@test cmpdenorm(q, Base.cos(BigFloat(x)))
end
end
@testset "exceptional $xtan" for xtan in (SLEEF.tan_fast, SLEEF.tan)
xa = T[NaN, Inf, -Inf, -0.0, 0.0, pi/2, -pi/2]
for x in xa
@test cmpdenorm(xtan(x), Base.tan(BigFloat(x)))
end
end
fun_table = Dict(SLEEF.asin => Base.asin, SLEEF.asin_fast => Base.asin, SLEEF.acos => Base.acos, SLEEF.acos_fast => Base.acos)
@testset "exceptional $xatrig" for (xatrig, atrig) in fun_table
xa = T[NaN, Inf, -Inf, 2, -2, 1, -1, -0.0, 0.0]
for x in xa
@test cmpdenorm(xatrig(x), atrig(BigFloat(x)))
end
end
@testset "exceptional $xatan" for xatan in (SLEEF.atan, SLEEF.atan_fast)
xa = T[NaN, Inf, -Inf, -0.0, 0.0]
for x in xa
@test cmpdenorm(xatan(x), Base.atan(BigFloat(x)))
end
end
@testset "exceptional exp" begin
xa = T[NaN, Inf, -Inf, 10000, -10000]
for x in xa
@test cmpdenorm(SLEEF.exp(x), Base.exp(BigFloat(x)))
end
end
@testset "exceptional sinh" begin
xa = T[NaN, 0.0, -0.0, Inf, -Inf, 10000, -10000]
for x in xa
@test cmpdenorm(SLEEF.sinh(x), Base.sinh(BigFloat(x)))
end
end
@testset "exceptional cosh" begin
xa = T[NaN, 0.0, -0.0, Inf, -Inf, 10000, -10000]
for x in xa
@test cmpdenorm(SLEEF.cosh(x), Base.cosh(BigFloat(x)))
end
end
@testset "exceptional tanh" begin
xa = T[NaN, 0.0, -0.0, Inf, -Inf, 10000, -10000]
for x in xa
@test cmpdenorm(SLEEF.tanh(x), Base.tanh(BigFloat(x)))
end
end
@testset "exceptional asinh" begin
xa = T[NaN, 0.0, -0.0, Inf, -Inf, 10000, -10000]
for x in xa
@test cmpdenorm(SLEEF.asinh(x), Base.asinh(BigFloat(x)))
end
end
@testset "exceptional acosh" begin
xa = T[NaN, 0.0, -0.0, 1.0, Inf, -Inf, 10000, -10000]
for x in xa
@test cmpdenorm(SLEEF.acosh(x), Base.acosh(BigFloat(x)))
end
end
@testset "exceptional atanh" begin
xa = T[NaN, 0.0, -0.0, 1.0, -1.0, Inf, -Inf, 10000, -10000]
for x in xa
@test cmpdenorm(SLEEF.atanh(x), Base.atanh(BigFloat(x)))
end
end
@testset "exceptional $xcbrt" for xcbrt = (SLEEF.cbrt, SLEEF.cbrt_fast)
xa = T[NaN, Inf, -Inf, 0.0, -0.0]
for x in xa
@test cmpdenorm(SLEEF.cbrt(x), Base.cbrt(BigFloat(x)))
end
end
@testset "exceptional exp2" begin
xa = T[NaN, Inf, -Inf]
for x in xa
@test cmpdenorm(SLEEF.exp2(x), Base.exp2(BigFloat(x)))
end
end
@testset "exceptional exp10" begin
xa = T[NaN, Inf, -Inf]
for x in xa
@test cmpdenorm(SLEEF.exp10(x), Base.exp10(BigFloat(x)))
end
end
@testset "exceptional expm1" begin
xa = T[NaN, Inf, -Inf, 0.0, -0.0]
for x in xa
@test cmpdenorm(SLEEF.expm1(x), Base.expm1(BigFloat(x)))
end
end
@testset "exceptional $xlog" for xlog in (SLEEF.log, SLEEF.log_fast)
xa = T[NaN, Inf, -Inf, 0, -1]
for x in xa
@test cmpdenorm(xlog(x), Base.log(BigFloat(x)))
end
end
@testset "exceptional log10" begin
xa = T[NaN, Inf, -Inf, 0, -1]
for x in xa
@test cmpdenorm(SLEEF.log10(x), Base.log10(BigFloat(x)))
end
end
@testset "exceptional log2" begin
xa = T[NaN, Inf, -Inf, 0, -1]
for x in xa
@test cmpdenorm(SLEEF.log2(x), Base.log2(BigFloat(x)))
end
end
@testset "exceptional log1p" begin
xa = T[NaN, Inf, -Inf, 0.0, -0.0, -1.0, -2.0]
for x in xa
@test cmpdenorm(SLEEF.log1p(x), Base.log1p(BigFloat(x)))
end
end
@testset "exceptional ldexp" begin
for i = -10000:10000
a = SLEEF.ldexp(T(1.0), i)
b = Base.ldexp(BigFloat(1.0), i)
@test (isfinite(b) && a == b || cmpdenorm(a,b))
end
end
@testset "exceptional ilogb" begin
@test SLEEF.ilogb(+T(Inf)) == SLEEF.INT_MAX
@test SLEEF.ilogb(-T(Inf)) == SLEEF.INT_MAX
@test SLEEF.ilogb(+T(0.0)) == SLEEF.FP_ILOGB0
@test SLEEF.ilogb(-T(0.0)) == SLEEF.FP_ILOGB0
@test SLEEF.ilogb( T(NaN)) == SLEEF.FP_ILOGBNAN
end
end #exceptional
| SLEEF | https://github.com/musm/SLEEF.jl.git |
|
[
"MIT"
] | 0.5.2 | 512b75d09aab52e93192e68de612fc472f001979 | code | 4300 | using SLEEF
using Test
using Printf
using Base.Math: significand_bits
isnzero(x::T) where {T <: AbstractFloat} = signbit(x)
ispzero(x::T) where {T <: AbstractFloat} = !signbit(x)
function cmpdenorm(x::Tx, y::Ty) where {Tx <: AbstractFloat, Ty <: AbstractFloat}
sizeof(Tx) < sizeof(Ty) ? y = Tx(y) : x = Ty(x) # cast larger type to smaller type
(isnan(x) && isnan(y)) && return true
(isnan(x) || isnan(y)) && return false
(isinf(x) != isinf(y)) && return false
(x == Tx(Inf) && y == Ty(Inf)) && return true
(x == Tx(-Inf) && y == Ty(-Inf)) && return true
if y == 0
(ispzero(x) && ispzero(y)) && return true
(isnzero(x) && isnzero(y)) && return true
return false
end
(!isnan(x) && !isnan(y) && !isinf(x) && !isinf(y)) && return sign(x) == sign(y)
return false
end
# the following compares the ulp between x and y.
# First it promotes them to the larger of the two types x,y
const infh(::Type{Float64}) = 1e300
const infh(::Type{Float32}) = 1e37
function countulp(T, x::AbstractFloat, y::AbstractFloat)
X, Y = promote(x, y)
x, y = T(X), T(Y) # Cast to smaller type
(isnan(x) && isnan(y)) && return 0
(isnan(x) || isnan(y)) && return 10000
if isinf(x)
(sign(x) == sign(y) && abs(y) > infh(T)) && return 0 # relaxed infinity handling
return 10001
end
(x == Inf && y == Inf) && return 0
(x == -Inf && y == -Inf) && return 0
if y == 0
x == 0 && return 0
return 10002
end
if isfinite(x) && isfinite(y)
return T(abs(X - Y) / ulp(y))
end
return 10003
end
const DENORMAL_MIN(::Type{Float64}) = 2.0^-1074
const DENORMAL_MIN(::Type{Float32}) = 2f0^-149
function ulp(x::T) where {T<:AbstractFloat}
x = abs(x)
x == T(0.0) && return DENORMAL_MIN(T)
val, e = frexp(x)
return max(ldexp(T(1.0), e - significand_bits(T) - 1), DENORMAL_MIN(T))
end
countulp(x::T, y::T) where {T <: AbstractFloat} = countulp(T, x, y)
# get rid off annoying warnings from overwritten function
macro nowarn(expr)
quote
_stderr = stderr
tmp = tempname()
stream = open(tmp, "w")
redirect_stderr(stream)
result = $(esc(expr))
redirect_stderr(_stderr)
close(stream)
result
end
end
# overide domain checking that base adheres to
using Base.MPFR: ROUNDING_MODE
for f in (:sin, :cos, :tan, :asin, :acos, :atan, :asinh, :acosh, :atanh, :log, :log10, :log2, :log1p)
@eval begin
import Base.$f
@nowarn function ($f)(x::BigFloat)
z = BigFloat()
ccall($(string(:mpfr_, f), :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, ROUNDING_MODE[])
return z
end
end
end
strip_module_name(f::Function) = last(split(string(f), '.')) # strip module name from function f
# test the accuracy of a function where fun_table is a Dict mapping the function you want
# to test to a reference function
# xx is an array of values (which may be tuples for multiple arugment functions)
# tol is the acceptable tolerance to test against
function test_acc(T, fun_table, xx, tol; debug = false, tol_debug = 5)
@testset "accuracy $(strip_module_name(xfun))" for (xfun, fun) in fun_table
rmax = 0.0
rmean = 0.0
xmax = map(zero, first(xx))
for x in xx
q = xfun(x...)
c = fun(map(BigFloat, x)...)
u = countulp(T, q, c)
rmax = max(rmax, u)
xmax = rmax == u ? x : xmax
rmean += u
if debug && u > tol_debug
@printf("%s = %.20g\n%s = %.20g\nx = %.20g\nulp = %g\n", strip_module_name(xfun), q, strip_module_name(fun), T(c), x, ulp(T(c)))
end
end
rmean = rmean / length(xx)
t = @test trunc(rmax, digits=1) <= tol
fmtxloc = isa(xmax, Tuple) ? string('(', join((@sprintf("%.5f", x) for x in xmax), ", "), ')') : @sprintf("%.5f", xmax)
println(rpad(strip_module_name(xfun), 18, " "), ": max ", @sprintf("%f", rmax),
rpad(" at x = " * fmtxloc, 40, " "),
": mean ", @sprintf("%f", rmean))
end
end
function runtests()
@testset "SLEEF" begin
include("exceptional.jl")
include("accuracy.jl")
end
end
runtests()
| SLEEF | https://github.com/musm/SLEEF.jl.git |
|
[
"MIT"
] | 0.5.2 | 512b75d09aab52e93192e68de612fc472f001979 | docs | 2015 | <div align="center"> <img
src="https://rawgit.com/musm/SLEEF.jl/master/doc/src/assets/logo.svg"
alt="SLEEF Logo" width="380"></img> </div>
A pure Julia port of the [SLEEF math library](https://github.com/shibatch/SLEEF)
**History**
- Release [v0.4.0](https://github.com/musm/SLEEF.jl/releases/tag/v0.4.0) based on SLEEF v2.110
- Release [v0.3.0](https://github.com/musm/SLEEF.jl/releases/tag/v0.3.0) based on SLEEF v2.100
- Release [v0.2.0](https://github.com/musm/SLEEF.jl/releases/tag/v0.2.0) based on SLEEF v2.90
- Release [v0.1.0](https://github.com/musm/SLEEF.jl/releases/tag/v0.1.0) based on SLEEF v2.80
<br><br>
[](https://travis-ci.org/musm/SLEEF.jl)
[](https://ci.appveyor.com/project/musm/SLEEF-jl/branch/master)
[](https://coveralls.io/github/musm/SLEEF.jl?branch=master)
[](http://codecov.io/github/musm/SLEEF.jl?branch=master)
# Usage
To use `SLEEF.jl`
```julia
pkg> add SLEEF
julia> using SLEEF
julia> SLEEF.exp(3.0)
20.085536923187668
julia> SLEEF.exp(3f0)
20.085537f0
```
The available functions include (within 1 ulp)
```julia
sin, cos, tan, asin, acos, atan, sincos, sinh, cosh, tanh,
asinh, acosh, atanh, log, log2, log10, log1p, ilogb, exp, exp2, exp10, expm1, ldexp, cbrt, pow
```
Faster variants (within 3 ulp)
```julia
sin_fast, cos_fast, tan_fast, sincos_fast, asin_fast, acos_fast, atan_fast, atan2_fast, log_fast, cbrt_fast
```
## Notes
The trigonometric functions are tested to return values with specified
accuracy when the argument is within the following range:
- Double (Float64) precision trigonometric functions : `[-1e+14, 1e+14]`
- Single (Float32) precision trigonometric functions : `[-39000, 39000]`
| SLEEF | https://github.com/musm/SLEEF.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 667 | using PlotlyJS, DataFrames, CSV
Z_data = CSV.read("example_measurements.csv",DataFrame)
freqs = Z_data[:,3]
measurements = Z_data[:,1] .+ (Z_data[:,2] .* im)
# Nyquist plot so see what the original data looks like.
Plots.plot(real(measurements),-imag(measurements), label="Nyquist")
# Find the optimal lambda parameter using Saccoccio et al.'s Re-Im cross-validation test functions, this may take several minutes.
@time λ_opt = find_optimal_lambda(freqs, measurements)
# DRT calculation.
relaxation_times, peak_amplitudes, taus_out, drt = compute_DRT(freqs, measurements, λ = λ_opt)
# Visualisation.
plot_DRT(relaxation_times, peak_amplitudes, taus_out, drt)
| DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 3255 | # In this script we validate the DRT deconvolution method through comparison with two circuits for which the
# expression for the analytical DRT is available.
using DRT, Plots
# I) Zarc model
# function to calculate the impedance of the Zarc model.
function zarc_impedance(R_inf,R_ct,τ_0,ϕ,f)
Z = R_inf + (R_ct/(1 + (2*π*im*f*τ_0)^ϕ))
return Z
end
# function for the analytical DRT calculation for the Zarc model.
function zarc_analytical_DRT(R_ct,τ_0,ϕ,f) #R_inf unused
τ = 1/f
gamma = (R_ct/(2*π))*(sin((1-ϕ)*π)/(cosh(ϕ*log(τ/τ_0)) - cos((1-ϕ)*π)))
return gamma
end
# define frequencies and parameters of the Zarc model.
frequencies_test = [10.0^i for i in LinRange(-2, 6, 100)]
R_inf, R_ct, τ_0, ϕ = [20,100,0.015,0.65]
# simulate and visualize the EIS measurements of the Zarc model.
measurements_test = [zarc_impedance(R_inf,R_ct,τ_0,ϕ,f_) for f_ in frequencies_test]
plot(real(measurements_test),-imag(measurements_test),label= "Zarc Nyquist")
# simulate and visualize the analytical DRT of the Zarc model.
analyrical_DRT_test = [zarc_analytical_DRT(R_ct,τ_0,ϕ,f_) for f_ in frequencies_test]
taus = 1 ./ frequencies_test
scatter(taus,analyrical_DRT_test,xaxis=:log, label = "Zarc analytical DRT")
# Calculate the DRT from the impedance measurements and compare with the analytical solution.
tau_relax_test , amps_test , taus_test , drt_test = compute_DRT(frequencies_test,measurements_test,λ = 10^-6, width_coeff = 0.1 ,method= "re_im")
plot!(taus_test , drt_test,xaxis = :log, label = "Zarc numerical DRT")
xlabel!("τ (s)")
ylabel!("γ (Ω)")
# II) Double Zarc model
# function to calculate the impedance of the double Zarc model.
function double_zarc_imepdance(R_inf,R_ct,τ_0,ϕ,τ_02,f)
Z = R_inf + (R_ct/(1 + (2*π*im*f*τ_0)^ϕ)) + (R_ct/(1 + (2*π*im*f*τ_02)^ϕ))
return Z
end
# function for the analytical DRT calculation for the double Zarc model.
function double_zarc_analytical_DRT(R_ct,τ_0,τ_02,ϕ,f) #R_inf unused
τ = 1/f
gamma = (R_ct/(2*π))*sin((1-ϕ)*π) *(1/(cosh(ϕ*log(τ/τ_0)) - cos((1-ϕ)*π)) + 1/(cosh(ϕ*log(τ/τ_02)) - cos((1-ϕ)*π)))
return gamma
end
# Define additional parameters for double Zarc model.
τ_0,τ_02 = 0.02, 0.0008
# simulate and visualize the EIS measurements of the double Zarc model.
measurements_test2 = [double_zarc_imepdance(R_inf,R_ct,τ_0,ϕ,τ_02,f_) for f_ in frequencies_test]
plot(real(measurements_test2),-imag(measurements_test2),label= "Zarc Nyquist") #Note that the relaxation processes are badly resolved in the Nyquist plot.
# simulate and visualize the analytical DRT of the double Zarc model.
analyrical_DRT_test = [double_zarc_analytical_DRT(R_ct,τ_0,τ_02,ϕ,f_) for f_ in frequencies_test]
taus = 1 ./ frequencies_test
scatter(taus,analyrical_DRT_test,xaxis=:log, label="analytical DRT") # With two clear peaks, the relaxation processes are well resolved here.
# Calculate the DRT from the impedance measurements and compare with the analytical solution.
tau_relax_test_double , amps_test_double , taus_test_double , drt_test_double = compute_DRT(frequencies_test,measurements_test2,λ = 10^-6, width_coeff = 0.1 ,method= "re_im")
plot!(taus_test_double , drt_test_double, xaxis = :log, label="numerical DRT")
xlabel!("τ (s)")
ylabel!("γ (Ω)")
| DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 449 | module DistributedRelaxationTimes
export compute_DRT, find_optimal_lambda, plot_DRT
using Optim, QuadGK, Roots, FindPeaks1D
using Statistics, ToeplitzMatrices, LinearAlgebra, KernelFunctions
using Plots
include("construct_matrices.jl")
include("optimisation_functions.jl")
include("hyperparameter_tuning.jl")
include("calculate_shape_factor.jl")
include("compute_DRT.jl")
include("visualisations.jl")
end | DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 538 | """
calculate_shape_factor(frequencies,coefficient, rbf_kernel)
Calculates the shape factor of the RBF using the Full Width at Half Maximum (FWHM).
The inputs are the `frequencies`, the width coefficient hyperparameter `coefficient` and
the used RBF function `rbf_kernel`.
"""
function calculate_shape_factor(frequencies,coefficient, rbf_kernel)
rbf_fwhm(x) = rbf_kernel(x, 0) - 0.5
fwhm_coeff = 2find_zero(rbf_fwhm, 1);
D_f = mean(diff(log.(1 ./ frequencies)))
ϵ = fwhm_coeff * coefficient / D_f
return ϵ
end | DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 4424 | """
compute_DRT(frequencies, measurements; <keyword arguments>)
Calculate the Distribution of Relaxation Times using RBF discretization and Tikhonov regularisation.
The essential inputs are a set of frequencies and the impedance measurements conducted at those frequencies.
There are also a number of keyword arguments to fine-tune the calculation.
## Keyword arguments
- `method::String="im"`: the part of the measurements used to calculate the DRT.
- `rbf_kernel = SqExponentialKernel()`: The RBF used to discretize the DRT.
- `width_coeff::Float64=0.10`: the hyperparameter influencing the shape factor of the RBF.
- `λ::Float64=1e-2`: a hyperparameter tuning the degree of regularisation.
- `peak_strictness::Float64=0.01`: A measure to avoid artifacts in the DRT by removing peaks
with amplitude less than a given percentage of the highest peak.
"""
function compute_DRT(frequencies, measurements;
method="im",
width_coeff=0.10,
rbf_kernel=SqExponentialKernel(),
λ=1e-2,
peak_strictness=0.01)
ϵ = calculate_shape_factor(frequencies, width_coeff, rbf_kernel)
# Get the real and imaginary parts of the impedance measurements.
Z_exp_imag = imag(measurements)
Z_exp_real = real(measurements)
# Calculate the matrices for the reconstruction of the real and imaginary parts of the impedance spectra.
Z_drt_imag = construct_Z_imag(frequencies, ϵ, rbf_kernel)
Z_drt_real = construct_Z_real(frequencies, ϵ, rbf_kernel)
# Select the appropriate objective function for DRT calculation using the real part of the measurements, the imaginary part or both.
if method == "re_im"
obj = x -> joint_objective(Z_drt_imag, -Z_exp_imag, Z_drt_real, Z_exp_real, x, λ)
elseif method == "im"
obj = x -> objective(Z_drt_imag, -Z_exp_imag, x, λ)
elseif method == "re"
obj = x -> objective(Z_drt_real, Z_exp_real, x, λ)
end
# Initialize the weight vector θ, with values offset slightly from zero (boundary).
n = length(frequencies) + 1
θ = fill(0.05, n)
# Optimize the weights θ with the restriction θ >= 0 (no negative peaks).
upper = 1e8ones(n) # Arbitrarily large upper bound.
lower = zeros(n)
results = optimize(obj, lower, upper, θ, Fminbox(BFGS()), autodiff=:forward)
# The first value corresponds to R_inf (unless "im" is used), while the rest are the DRT weights.
θ_hat = results.minimizer[2:end]
# Get the frequencies over which the DRT is calculated (higher resolution than input frequencies through interpolation).
# taumax = ceil(f->maximum(log10(1 / f),frequencies)) + 1
# taumin = floor(f->min(log10(1 / f),frequencies)) - 1
taumax = ceil(maximum(log10.(1 ./ frequencies))) + 1
taumin = floor(minimum(log10.(1 ./ (frequencies)))) .-1
out_frequencies = [10.0^i for i in range(-taumin, -taumax, length = 10length(frequencies))]
# Calculate the DRT over these frequencies, using the estimated weight vector θ_hat.
drt = drt_interpolation(out_frequencies, frequencies, θ_hat, ϵ, rbf_kernel)
# Find the indices of the peaks in the DRT profile, with removal of possible artifacts.
pkindices = get_peak_inds(drt, peak_strictness)
taus_out = 1 ./ out_frequencies
relaxation_times = taus_out[pkindices]
peak_amplitudes = drt[pkindices]
return relaxation_times, peak_amplitudes, taus_out, drt
end
"""
drt_interpolation(out_frequencies,frequencies, θ, ϵ, rbf_kernel)
Calculates the DRT (defined on the whole real line), using the weights θ the RBF information, and the frequencies.
"""
function drt_interpolation(out_frequencies, frequencies, θ, ϵ, rbf_kernel)
out_drt = Vector{Float64}(undef, length(out_frequencies))
x0 = -log.(frequencies)
x = -log.(out_frequencies)
for k in eachindex(out_frequencies)
out_drt[k] = (transpose(θ) * rbf_kernel.(ϵ.*x[k], ϵ.*x0))[1]
end
return out_drt
end
"""
get_peak_inds(drt,strictness)
Find the peaks in the DRT. Possible artifacts are eliminated depending
on the value of the strictness argument.
"""
function get_peak_inds(drt, strictness)
pkindices = findpeaks1d(drt)[1]
amplitudes = drt[pkindices]
max_amplitude = maximum(amplitudes)
to_remove = amplitudes .≤ strictness*max_amplitude
return pkindices[.!to_remove]
end | DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 3659 | """
integration_Z_imag(ϵ, fᵣ, fc, rbf_kernel)
Performs the numerical integration required to calculate the values
of the reconstructed imaginary impedance values in the DRT equation.
"""
function integration_Z_imag(ϵ, fᵣ, fc, rbf_kernel)
tmp = 2π * (fᵣ / fc)
integrand = x -> tmp / (1 ./ exp(x) + (tmp^2) * exp(x)) * rbf_kernel(x * ϵ, 0)
out_integral = quadgk(integrand, -Inf, Inf, rtol=1e-9)
return out_integral[1]
end
"""
integration_Z_real(ϵ,fᵣ,fc, rbf_kernel)
Performs the numerical integration required to calculate the values
of the reconstructed real impedance values in the DRT equation.
"""
function integration_Z_real(ϵ, fᵣ, fc, rbf_kernel)
tmp = 2π * (fᵣ / fc)
integrand = x -> (1 ./ (1 + ( tmp^2 ) * exp(2 * x))) * rbf_kernel(x * ϵ, 0)
out_integral = quadgk(integrand, -Inf, Inf, rtol=1e-9)
return out_integral[1]
end
"""
construct_Z_imag(freq, ϵ, rbf_kernel)
Calculates the matrix to be multiplied with the weights `Θ` to obtain the imaginary part
of the reconstructed impedance values in the DRT equation.
As this matrix can be Toeplitz factorized, it can be efficiently constructed using
the first column and first rows.
"""
function construct_Z_imag(freq, ϵ, rbf_kernel)
#Initialize first row and column of out_Z_im
R = zeros(1, length(freq))
C = zeros(length(freq), 1)
# Calculate the values of the first column.
for i in eachindex(freq)
freq_R = freq[i]
freq_C = freq[1]
C[i,1] = integration_Z_imag(ϵ, freq_R, freq_C, rbf_kernel)
end
# Calculate the values of the first row.
for j in eachindex(freq)
freq_R = freq[1]
freq_C = freq[j]
R[1,j] = integration_Z_imag(ϵ, freq_R, freq_C, rbf_kernel)
end
temp_Z_im = Toeplitz(vec(C), vec(R))
out_Z_im = hcat(zeros((length(freq), 1)), temp_Z_im)
return out_Z_im
end
"""
construct_Z_real(freq, ϵ, rbf_kernel)
Calculates the matrix to be multiplied with the weights `Θ` to obtain
the real part of the reconstructed impedance values in the DRT equation.
As this matrix can be Toeplitz factorized, it can be efficiently constructed
using the first column and first rows.
"""
function construct_Z_real(freq, ϵ, rbf_kernel)
#Initialize first row and column of out_Z_re
R = zeros(1, length(freq))
C = zeros(length(freq), 1)
# Calculate the values of the first column.
for i in eachindex(freq)
freq_R = freq[i]
freq_C = freq[1]
C[i,1] = integration_Z_real(ϵ, freq_R, freq_C, rbf_kernel)
end
# Calculate the values for the first row.
for j in eachindex(freq)
freq_R = freq[1]
freq_C = freq[j]
R[1,j] = integration_Z_real(ϵ, freq_R, freq_C, rbf_kernel)
end
# Construct the Toeplitz matrix using the first column and row.
temp_Z_re = Toeplitz(vec(C), vec(R))
out_Z_re = hcat(ones(length(freq)), temp_Z_re)
return out_Z_re
end
# Slow functions that don't make use of the Toeplitz factorization.
function construct_Z_real_full(freq, ϵ, rbf_kernel)
out_Z = [integration_Z_real(ϵ,freq[i],freq[j], rbf_kernel)
for i in eachindex(freq)
for j in eachindex(freq)]
return [ones(length(freq)) out_Z]
# REMARK: could be faster by generating this matrix in one step
end
function construct_Z_im_full(freq, ϵ, rbf_kernel)
out_Z = [integration_Z_imag(ϵ,freq[i],freq[j], rbf_kernel)
for i in eachindex(freq)
for j in eachindex(freq)]
return [ones(length(freq)) out_Z]
end | DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 5694 | """
find_optimal_lambda(frequencies, measurements;
method="re_im_cv",
width_coeff=0.1,
rbf_kernel=SqExponentialKernel())
Suggests values for the hyperparameter `λ`` using Saccoccio et al.'s discrepancy or Re-Im-crossvalidation methods.
The function's inputs and keyword arguments are similar to the those of the `compute_DRT` function,
with the exception of the `method` keyword argument, which allows users to choose between
- `re_im_cv` : Re-Im-crossvalidation
- `discrepancy`: minimisation of the discrepancy between the weights `θ` calculated
using the real and imaginary parts of the impedance spectrum.
"""
function find_optimal_lambda(frequencies, measurements;
method="re_im_cv",
width_coeff=0.1,
rbf_kernel=SqExponentialKernel()) #TODO: method based on found peaks
ϵ = calculate_shape_factor(frequencies, width_coeff, rbf_kernel)
Z_exp_real = real(measurements)
Z_exp_imag = imag(measurements)
#Select a range of λ values to evaluate.
lambda_values = [10.0^i for i in -6:1]
#initialize Vector of hyperparameter objective values.
hyperparam_objective_values = Vector{Float64}(undef, length(lambda_values))
Z_drt_imag = construct_Z_imag(frequencies, ϵ, rbf_kernel)
Z_drt_real = construct_Z_real(frequencies, ϵ, rbf_kernel)
#initialize thetas.
n = length(frequencies) + 1
θ = fill(0.05, n)
upper = 1e8ones(n)
lower = zeros(n)
for i in eachindex(lambda_values)
λ = lambda_values[i]
# Get the estimated θ vectors using the real and imaginary parts of the impedance.
results_imag = optimize(x -> objective(Z_drt_imag, -Z_exp_imag, x, λ), lower, upper, θ,
Fminbox(BFGS()),autodiff=:forward);
results_real = optimize(x -> objective(Z_drt_real, Z_exp_real, x, λ), lower, upper, θ,
Fminbox(BFGS()),autodiff=:forward);
# Select the relevant parts.
θ_imag = results_imag.minimizer
θ_real = results_real.minimizer
#reconstructed impedances where real part is calculated with thetas from imaginary part, and vice versa.
re_im_cv_Re = norm(Z_drt_real*θ_imag - Z_exp_real)^2
re_im_cv_Im = norm(Z_drt_imag*θ_real + Z_exp_imag)^2
#The differences between θ_imag and θ_real, as well as their reconstructed Z's should be minimized.
if method == "re_im_cv"
hyperparam_objective_values[i] = re_im_cv_Re + re_im_cv_Im
elseif method == "discrepancy"
hyperparam_objective_values[i] = norm(θ_imag - θ_real)^2
end
end
min_idx = findmin(hyperparam_objective_values)[2]
return lambda_values[min_idx]
end
function find_optimal_wc(frequencies, measurements;
method="re_im_cv",
width_coeff=0.1,
rbf_kernel=SqExponentialKernel()) #TODO: method based on found peaks
ϵ = calculate_shape_factor(frequencies, width_coeff, rbf_kernel)
Z_exp_real = real(measurements)
Z_exp_imag = imag(measurements)
#Select a range of λ values to evaluate.
lambda_values = [10.0^i for i in -6:1]
#initialize Vector of hyperparameter objective values.
hyperparam_objective_values = Vector{Float64}(undef, length(lambda_values))
Z_drt_imag = construct_Z_imag(frequencies, ϵ, rbf_kernel)
Z_drt_real = construct_Z_real(frequencies, ϵ, rbf_kernel)
#initialize thetas.
n = length(frequencies) + 1
θ = fill(0.05, n)
upper = 1e8ones(n)
lower = zeros(n)
for i in eachindex(lambda_values)
λ = lambda_values[i]
# Get the estimated θ vectors using the real and imaginary parts of the impedance.
results_imag = optimize(x -> objective(Z_drt_imag, -Z_exp_imag, x, λ), lower, upper, θ,
Fminbox(BFGS()),autodiff=:forward);
results_real = optimize(x -> objective(Z_drt_real, Z_exp_real, x, λ), lower, upper, θ,
Fminbox(BFGS()),autodiff=:forward);
# Select the relevant parts.
θ_imag = results_imag.minimizer
θ_real = results_real.minimizer
#reconstructed impedances where real part is calculated with thetas from imaginary part, and vice versa.
re_im_cv_Re = norm(Z_drt_real*θ_imag - Z_exp_real)^2
re_im_cv_Im = norm(Z_drt_imag*θ_real + Z_exp_imag)^2
#The differences between θ_imag and θ_real, as well as their reconstructed Z's should be minimized.
if method == "re_im_cv"
hyperparam_objective_values[i] = re_im_cv_Re + re_im_cv_Im
elseif method == "discrepancy"
hyperparam_objective_values[i] = norm(θ_imag - θ_real)^2
end
end
min_idx = findmin(hyperparam_objective_values)[2]
return lambda_values[min_idx]
end
function DRT_residuals(frequencies, measurements;
λ = 10^-2,
width_coeff=0.1,
rbf_kernel=SqExponentialKernel())
ϵ = calculate_shape_factor(frequencies, width_coeff, rbf_kernel)
Z_exp_real = real(measurements)
Z_exp_imag = imag(measurements)
Z_drt_imag = construct_Z_imag(frequencies, ϵ, rbf_kernel)
Z_drt_real = construct_Z_real(frequencies, ϵ, rbf_kernel)
#initialize thetas.
n = length(frequencies) + 1
θ = fill(0.05, n)
upper = 1e8ones(n)
lower = zeros(n)
# Get the estimated θ vectors using the real and imaginary parts of the impedance.
results_imag = optimize(x -> objective(Z_drt_imag, -Z_exp_imag, x, λ), lower, upper, θ,
Fminbox(BFGS()),autodiff=:forward);
results_real = optimize(x -> objective(Z_drt_real, Z_exp_real, x, λ), lower, upper, θ,
Fminbox(BFGS()),autodiff=:forward);
# Select the relevant parts.
θ_imag = results_imag.minimizer
θ_real = results_real.minimizer
return Z_drt_real*θ_real, -Z_drt_imag*θ_imag, Z_exp_real, Z_exp_imag
end
| DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 1301 | """
regulariser(λ ,Θ)
Calculates the regularisation term using the L2-norm of
the weight vector `Θ` and the tuning hyperparameter `λ`.
"""
function regulariser(λ, θ)
return λ * norm(θ)^2
end
"""
objective(X, Y, θ, λ)
Objective function for the Tikhonov regularisation, where `X` is the matrix
for the reconstruction of the real or imaginary impedance values,
`Y` is the real or imaginary part of the impedance measurements, `θ` is
a vector of weights to be optimised, and `λ` is the regularisation hyperparameter.
"""
function objective(X, Y, θ, λ)
return norm(X * θ - Y)^2 + regulariser(λ, θ)
end
"""
oint_objective(X₁, Y₁, X₂, Y₂, θ, λ, weights)
Objective function for the DRT calculation using both the real (`Y₁`) and imaginary (`Y₂`)
parts of the impedance measurements.
- `X₁` and `X₂` are the matrices for the reconstruction of the real and imaginary impedance values, respectively.
- `θ` is a vector of weights to be optimised.
- `λ` is the regularisation hyperparameter.
- `weights` provides the option to provide more weight to the real or imaginary parts during the optimisation.
"""
function joint_objective(X₁, Y₁, X₂, Y₂, θ, λ, weights = [1,1])
return weights[1] * objective(X₁, Y₁, θ, λ) + weights[2] * objective(X₂, Y₂, θ, λ)
end | DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 1723 | # methods with dashed lines at the peaks
"""
plot_DRT(peak_times, peak_amps, τs, γs)
Visualises the output of the `compute_DRT` function.
Keyword arguments:
- `lab::String="DRT"`: label for the plot.
- `title::String="Distribution of relaxation times"`: The title of the plot.
- `color::Symbol=:auto`: The color of the DRT plot.
- `style::Symbol=:solid` : The linestyle of the DRT plot.
"""
function plot_DRT(peak_times, peak_amps, τs, γs; lab="DRT",
title_ = "Distribution of relaxation times",color = :auto, style= :solid)
fig = Plots.plot(τs,γs,xaxis=:log,label = lab,ylabel = "γ (Ω)",xlabel = "τ (s)", title = title_,linecolor=color,linestyle= style)
for (time,amp) in zip(peak_times,peak_amps)
Plots.plot!([time,time],[0,amp],linestyle=:dash,linewidth = 0.5, linecolor=:black, label=nothing)
end
return fig
end
function plot_DRT!(peak_times,peak_amps,τs,γs;lab="DRT",color = :auto, style= :solid)
fig = Plots.plot!(τs,γs,xaxis=:log,label = lab,ylabel = "γ (Ω)",xlabel = "τ (s)",linecolor=color,linestyle= style)
for (time,amp) in zip(peak_times,peak_amps)
Plots.plot!([time,time],[0,amp],linestyle=:dash,linewidth = 0.5, linecolor=:black, label=nothing)
end
return fig
end
function plot_DRT(τs,γs;lab="DRT",title_ = "Distribution of relaxation times",color = :auto, style= :solid)
fig = Plots.plot(τs,γs,xaxis=:log,label = lab,ylabel = "γ (Ω)",xlabel = "τ (s)", title = title_,linecolor=color,linestyle= style)
return fig
end
function plot_DRT!(τs,γs;lab="DRT",color = :auto,style = :solid)
fig = Plots.plot!(τs,γs,xaxis=:log,label = lab,ylabel = "γ (Ω)",xlabel = "τ (s)",linecolor=color,linestyle= style)
return fig
end
| DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | code | 4662 | using DistributedRelaxationTimes, Test
frequencies_ = [10.0^i for i in LinRange(-1, 5, 100)]
measurements = [5919.90084073586 - 15.794826681804063im, 5919.868918957754 - 18.159981630878193im, 5919.826721493486 - 20.879215074112448im, 5919.77094110946 - 24.005490361641563im, 5919.697206534505 - 27.5996700536246im, 5919.59974042323 - 31.731682036223145im, 5919.470907592793 - 36.481851523944236im, 5919.300618498117 - 41.94241925382893im, 5919.075541848076 - 48.21926686986334im, 5918.778065802768 - 55.43387008291376im, 5918.38492835473 - 63.72549788892651im, 5917.865413085487 - 73.25367072511777im, 5917.178975070375 - 84.2008800878181im, 5916.272121651965 - 96.77555401915463im, 5915.074322421108 - 111.21522277959507im, 5913.492660580024 - 127.78979070506573im, 5911.404863326102 - 146.8047445389881im, 5908.650263466139 - 168.6040122409868im, 5905.01815386162 - 193.5720108632083im, 5900.232914230937 - 222.1341623748456im, 5893.935244284243 - 254.7547799128479im, 5885.658880364992 - 291.9306958671349im, 5874.802395994789 - 334.1782808516779im, 5860.596240193616 - 382.01057171273965im, 5842.066282228876 - 435.9001282151594im, 5817.997131602745 - 496.2221448530169im, 5786.901777588199 - 563.1716852011452im, 5747.008971929933 - 636.649541823888im, 5696.286199425884 - 716.1146246637753im, 5632.5229447709735 - 800.40902838821im, 5553.503164244866 - 887.5771603050828im, 5457.291421987694 - 974.7229870738107im, 5342.63571539089 - 1057.9750960758556im, 5209.4450731665065 - 1132.6446972778447im, 5059.236125230311 - 1193.6450875217138im, 4895.386133169998 - 1236.172561118586im, 4723.026713879386 - 1256.5333240114528im, 4548.501342878661 - 1252.8884687705508im, 4378.476213125283 - 1225.659170623928im, 4218.953164799103 - 1177.4357324203618im, 4074.481664971828 - 1112.4265929000287im, 3947.7701493274017 - 1035.6542684768967im, 3839.722530710712 - 952.1595081247917im, 3749.780265405823 - 866.4069866223913im, 3676.396150822044 - 781.9638497571525im, 3617.4939566926623 - 701.420400138136im, 3570.8317812956043 - 626.4733612567127im, 3534.245902976201 - 558.0896645518244im, 3505.788210543568 - 496.6898904301924im, 3483.784725125271 - 442.316077721325im, 3466.8432339785336 - 394.76882902583475im, 3453.8323835170477 - 353.71120514267693im, 3443.847637151783 - 318.7431703076497im, 3436.173566411038 - 289.4525620921043im, 3430.24764536956 - 265.4486049253349im, 3425.627926154446 - 246.38310254978558im, 3421.965311336009 - 231.96331698846592im, 3418.980231849205 - 221.95948932826835im, 3416.4430939302847 - 216.20909421612302im, 3414.1576734237437 - 214.61925725131152im, 3411.946581771727 - 217.16827186968976im, 3409.637927337527 - 223.90678639410245im, 3407.052304085562 - 234.95894874507883im, 3403.989233741757 - 250.523554971262im, 3400.2121579676777 - 270.8750095898293im, 3395.431025444823 - 296.3636315873987im, 3389.281458997103 - 327.41448706936364im, 3381.299451678079 - 364.52344909395174im, 3370.890588381016 - 408.2485221315609im, 3357.293027215301 - 459.19356735128684im, 3339.534079521559 - 517.9803876365069im, 3316.3814761692647 - 585.2036991342916im, 3286.2927015970045 - 661.3619922702401im, 3247.369626855728 - 746.7561190194042im, 3197.3315815837877 - 841.3475891914335im, 3133.5281243349123 - 944.5717302553313im, 3053.0221738121836 - 1055.1096556297796im, 2952.781645138942 - 1170.6404236111093im, 2830.016468891848 - 1287.6225607516105im, 2682.6772534815427 - 1401.1888600099994im, 2510.0812678967413 - 1505.266278330185im, 2313.5507090656965 - 1593.0272636293244im, 2096.8626356612517 - 1657.7095022691499im, 1866.275791227017 - 1693.7003981626112im, 1629.9786274825053 - 1697.6182705539127im, 1397.0038356021628 - 1669.035951938821im, 1175.8894122499416 - 1610.573950641444im, 973.4896731229098 - 1527.326198286415im, 794.2658164284339 - 1425.8394209818277im, 640.1666399505157 - 1312.996963129836im, 510.9874097117643 - 1195.110547896894im, 404.9803160737285 - 1077.3688437226629im, 319.49886763057725 - 963.637281562148im, 251.536931927523 - 856.5140708070351im, 198.10898333966395 - 757.5274334450082im, 156.4775353551784 - 667.3811117725145im, 124.26151768696323 - 586.1900054869882im, 99.46465196353648 - 513.6781103222461im, 80.45690042721759 - 449.3310955471872im, 65.93273498232111 - 392.50667235657im]
λ_opt = find_optimal_lambda(frequencies_, measurements,method = "discrepancy")
tau_relax , amps , taus , drt = compute_DRT(frequencies_,measurements,λ = λ_opt, width_coeff = 0.1 ,method= "re_im")
@testset "DRT.jl" begin
#test Re-Im discrepancy method for λ estimation.
@test λ_opt == 1
#number of peaks for the example measurements should be 2.
@test length(tau_relax) == 2
end
| DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"MIT"
] | 0.1.0 | c233307f6162237f169a26e50d2d5596b0e60602 | docs | 3085 | # DistributedRelaxationTimes.jl
A Julia package to calculate the **Distribution of Relaxation Times (DRT)** of a given set of electrochemical impedance spectroscopy measurements and frequency values. The current implementation uses the Tikhonov regularisation approach with radial basis function discretization.
# Usage
The DRT is a method to analyse **electrochemical impedance spectroscopy (EIS)** measurements. Two of its main benefits are
- that it does not require a specific prior equivalent electrical circuit model assumption.
- it is capable of resolving polarisation processes with similar time constants, which are indistinguishable when using traditional EIS data representation methods, such as Nyquist and Bode plots.
The main function exported in this module is the `compute_DRT` function. Its two essential arguments are a set of frequencies and the complex-valued impedances measured at those frequencies.
```julia
using DistributedRelaxationTimes, DataFrames, CSV
# Read the measurements.
#assuming the measurements in working directory.
Z_data = CSV.read("example_measurements.csv",DataFrame)
#Obtain the frequencies and measurements from the loaded data.
frequencies = Z_data[:,3]
measurements = Z_data[:,1] .+ (Z_data[:,2] .* im)
#Calculacte the DRT using default keyword argument values.
peak_taus, peak_amplitudes, taus_out, drt = compute_DRT(frequencies, measurements)
```
The `compute_DRT` function has four outputs, these are the relaxation times of the DRT peaks, their amplitudes, the time values over which the DRT is calculated, and the calculated DRT, respectively. Apart from the two essential arguments, the `compute_DRT` function also accepts several keyword arguments:
- `method` : Determines whether the DRT is calculated using the real part of the measurements ("re"), the imaginary part of the measurements ("im"), or both ("re_im"). The default value is "re_im".
- `rbf_kernel` : The radial basis function used for the discretisation of the DRT. The default value is KernelFunctions.jl's `SqExponentialKernel()`.
- `width_coeff` : hyperparameter related to the shape factor of the radial basis functions, lower values lead to wider DRT peaks. The default value is 0.08.
- `λ` : The hyperparameter tuning the regularisation during ridge regression. The default value is 1e-2.
While there is no universal way to automatically find the most suitable value for the regularisation hyperparameter `λ`, the discrepancy minimisation and Re-Im crossvalidation methods proposed by [Saccoccio et al.](https://www.sciencedirect.com/science/article/abs/pii/S0013468614018763) are implemented in this package as the `find_optimal_lambda` function.
Some basic plotting functionality is provided to visualise the outputs of the `compute_DRT` function. The function `plot_DRT` does this as follows (using the outputs calculated earlier):
```julia
#With optional arguments for a label "example" and a green color of the plot.
plot_DRT(peak_taus, peak_amplitudes, taus_out, drt, lab = "example", color = "green")
```
| DistributedRelaxationTimes | https://github.com/MaximeVH/DistributedRelaxationTimes.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 716 | using BifrostTools
using Documenter
DocMeta.setdocmeta!(BifrostTools, :DocTestSetup, :(using BifrostTools); recursive=true)
makedocs(;
modules=[BifrostTools],
authors="meudnaes <[email protected]>, eilifso <[email protected]> and contributors",
sitename="BifrostTools.jl",
format=Documenter.HTML(;
canonical="https://ITA-Solar.github.io/BifrostTools.jl",
edit_link="develop",
assets=String[],
),
pages=[
"Home" => "index.md",
"Installation" => "install.md",
"Example Usage" => "usage.md",
"Documentation" => "documentation.md"
],
)
deploydocs(;
repo="github.com/ITA-Solar/BifrostTools.jl",
devbranch="develop",
)
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 1121 | module BifrostTools
using FortranFiles: FortranFile, read, readlines
using OffsetArrays
using DelimitedFiles
using Printf
using Interpolations
using Mmap
using LoopVectorization
include("mesh.jl")
include("experiment.jl")
include("utils.jl")
include("stagger_operators.jl")
include("read_params_snap_aux.jl")
include("eos_tables.jl")
include("unit_conversion.jl")
#-------------------------------------------------------------------------------
# Exports
#-------------------------------------------------------------------------------
# Structs
export BifrostMesh
export BifrostExperiment
export EOSTables
# mesh.jl
export make_uniform_axes
export get_axes
export mesh2file
# read_params_snap_aux.jl
export read_params
export get_var, get_snap_numbers, get_electron_density
# eos_tables.jl
export get_eostable
export eos_interpolate
# stagger_operators.jl
# Basic stagger operations with optional BC extrapolation
export xup
export dxup
export xdn
export dxdn
export yup
export dyup
export ydn
export dydn
export zup
export dzup
export zdn
export dzdn
export destaggeroperation
end # module BifrostTools
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 3235 |
struct EOSTables
tabparamsf::String
tabparamsf_root::String
params::Dict{String,String}
nRhoBin::Int32
RhoAxis::Vector{Float32}
nEiBin::Int32
EiAxis::Vector{Float32}
RhoEi_recl::Int32
RhoEiRadTable_recl::Int
nTgBin::Int32
TgAxis::Vector{Float32}
nNeBin::Int32
NeAxis::Vector{Float32}
NeTgRadTable_recl::Int
nRadBins::Int32
function EOSTables(tabparams::String)
tabparamsf = normpath(tabparams)
tabparamsf_root = dirname(tabparamsf)
p = read_params(tabparams)
RhoMin = log(parse(Float64, p["RhoMin"]))
lnRhor = log(parse(Float64, p["RhoMax"])) - RhoMin
nRhoBin = parse(Int, p["nRhoBin"])
lnRho = [RhoMin + Float32(i - 1) / Float32(nRhoBin - 1) * lnRhor for i = 1:nRhoBin]
EiMin = log(parse(Float64, p["EiMin"]))
lnEir = log(parse(Float64, p["EiMax"])) - EiMin
nEiBin = parse(Int,p["nEiBin"])
lnEi = [EiMin + Float32(i - 1) / Float32(nEiBin - 1) * lnEir for i = 1:nEiBin]
nRadBins = parse(Int, p["nRadBins"])
RhoEi_recl = nEiBin * nRhoBin * 4
RhoEiRadTable_recl = nEiBin * nRhoBin * nRadBins
nTBin = -1
lnTg = [-1]
if haskey(p, "TMin") && haskey(p, "TMax")
TMin = log(parse(Float64, p["TMin"]))
lnTgr = log(parse(Float64, p["TMax"])) - TMin
nTBin = parse(Int, p["nTBin"])
lnTg = [TMin + Float32(i - 1) / Float32(nTBin - 1) * lnTgr for i = 1:nTBin]
end
nNeBin = -1
lnNe = [-1]
NeTgRadTable_recl = -1
if haskey(p, "NeMin") && haskey(p, "NeMax")
NeMin = log(parse(Float64, p["NeMin"]))
lnNer = log(parse(Float64, p["NeMax"])) - NeMin
nNeBin = parse(Int, p["nNeBin"])
lnNe = [NeMin + Float32(i - 1) / Float32(nNeBin - 1) * lnNer for i = 1:nNeBin]
NeTgRadTable_recl = nNeBin * nTBin * nRadBins * 2
end
new(
tabparamsf,
tabparamsf_root,
p,
nRhoBin,
lnRho,
nEiBin,
lnEi,
RhoEi_recl,
RhoEiRadTable_recl,
nTBin,
lnTg,
nNeBin,
lnNe,
NeTgRadTable_recl,
nRadBins,
)
end
end
function get_eostable(t::EOSTables)
f = FortranFile(
joinpath(t.tabparamsf_root, t.params["EOSTableFile"]),
"r",
access="direct",
recl=t.RhoEi_recl * 4,
)
var = read(f, rec=1, (Float32, (t.nEiBin, t.nRhoBin, 4)))
return var
end
# --- interpolate from eos
function eos_interpolate(eos::EOSTables, nvar::Int)
lnRho = log(parse(Float32,eos.params["RhoMax"]) / parse(Float32,eos.params["RhoMin"]))
dlnRho = lnRho / (parse(Float32,eos.params["nRhoBin"]) - 1)
lnEi = log(parse(Float32,eos.params["EiMax"]) / parse(Float32,eos.params["EiMin"]))
dlnEi = lnEi / (parse(Float32,eos.params["nEiBin"]) - 1)
eia = eos.EiAxis[1]:dlnEi:eos.EiAxis[end]
rhoa = eos.RhoAxis[1]:dlnRho:eos.RhoAxis[end]
tab = get_eostable(eos)
return cubic_spline_interpolation((eia, rhoa), tab[:, :, nvar], extrapolation_bc=Line())
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 2079 |
struct BifrostExperiment
mesh ::BifrostMesh
expname ::String
expdir ::String
snaps ::Vector{Int64}
snapsize ::Tuple{Int64, Int64, Int64}
num_snaps ::Int64
num_primary_vars::Int64
function BifrostExperiment(
expname::String="none",
expdir::String=pwd()
;
mesh_file=nothing
)
if expname=="none"
expname = splitpath(expdir)[end]
end
filenames = readdir(expdir)
# Find mesh-file
if mesh_file == nothing
mesh_file = ""
mesh_match = false
for filename in filenames
match_result = match(r"^" * expname * r".*\.mesh$", filename)
if match_result != nothing
if mesh_match
error("Found multiple mesh files.")
else
mesh_file *= match_result.match
mesh_match = true
end
end
end
else
mesh_match = true
end
if mesh_match
mesh = BifrostMesh(string(expdir, "/", mesh_file))
else
error("Did not find mesh file with expname '$expname' in $expdir")
end
# Find number of snaps
snaps = get_snap_numbers(expdir, expname; filenames=filenames,
findall=true)
# Get some snap-independent parameters
params_file = string(
joinpath(expdir,expname),
"_", lpad(snaps[1],3,"0"),
".idl"
)
params = read_params(params_file)
snapsize, num_vars = get_snapsize_and_numvars(params)
new(mesh, expname, expdir, sort(snaps), snapsize, length(snaps),
num_vars[1]
)
end
end
function get_axes(
xp::BifrostExperiment
;
units="code"
)
params = read_params(xp, xp.snaps[1])
return convert_axesunits(xp.mesh, params; units=units)
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 3300 | """
BifrostMesh
Stores Bifrost grid information in struct
"""
struct BifrostMesh
mx::Int64
x::Vector{Float32}
xmdn::Vector{Float32}
dxidxup::Vector{Float32}
dxidxdn::Vector{Float32}
my::Int64
y::Vector{Float32}
ymdn::Vector{Float32}
dyidyup::Vector{Float32}
dyidydn::Vector{Float32}
mz::Int64
z::Vector{Float32}
zmdn::Vector{Float32}
dzidzup::Vector{Float32}
dzidzdn::Vector{Float32}
n::Int64
function BifrostMesh(expdir::String)
# Check if `expdir` is path to mesh_file or folder of experiment
expname = splitpath(expdir)[end]
if split(expname, ".")[end] == "mesh"
mesh_file = expdir
else
mesh_file = joinpath(expdir, expname*".mesh")
end
f = open(mesh_file, "r")
l = readlines(f)
# -- x direction
mx = parse.(Int64, l[1])
x = parse.(Float32, split(l[2]))
xmdn = parse.(Float32, split(l[3]))
dxidxup = parse.(Float32, split(l[4]))
dxidxdn = parse.(Float32, split(l[5]))
# -- y direction
my = parse.(Int64, l[6])
y = parse.(Float32, split(l[7]))
ymdn = parse.(Float32, split(l[8]))
dyidyup = parse.(Float32, split(l[9]))
dyidydn = parse.(Float32, split(l[10]))
# -- z direction
mz = parse.(Int64, l[11])
z = parse.(Float32, split(l[12]))
zmdn = parse.(Float32, split(l[13]))
dzidzup = parse.(Float32, split(l[14]))
dzidzdn = parse.(Float32, split(l[15]))
new(
mx,
x,
xmdn,
dxidxup,
dxidxdn,
my,
y,
ymdn,
dyidyup,
dyidydn,
mz,
z,
zmdn,
dzidzup,
dzidzdn,
mx * my * mz
)
end
end
function mesh2file(M::BifrostMesh, file_name::String ="bifrost.mesh")
open(file_name,"w") do io
println(io, @sprintf "%d" M.mx)
println(io, join([@sprintf "%e" x for x in M.x], " "))
println(io, join([@sprintf "%e" x for x in M.xmdn], " "))
println(io, join([@sprintf "%e" x for x in M.dxidxup], " "))
println(io, join([@sprintf "%e" x for x in M.dxidxdn], " "))
println(io, @sprintf "%d" M.my)
println(io, join([@sprintf "%e" x for x in M.y], " "))
println(io, join([@sprintf "%e" x for x in M.ymdn], " "))
println(io, join([@sprintf "%e" x for x in M.dyidyup], " "))
println(io, join([@sprintf "%e" x for x in M.dyidydn], " "))
println(io, @sprintf "%d" M.mz)
println(io, join([@sprintf "%e" x for x in M.z], " "))
println(io, join([@sprintf "%e" x for x in M.zmdn], " "))
println(io, join([@sprintf "%e" x for x in M.dzidzup], " "))
println(io, join([@sprintf "%e" x for x in M.dzidzdn], " "))
end
end
function make_uniform_axes(
mesh ::BifrostMesh,
new_mx::Integer,
new_my::Integer,
new_mz::Integer,
)
# Get new mesh-axes
new_x = collect(LinRange(mesh.x[1], mesh.x[end], new_mx))
new_y = collect(LinRange(mesh.y[1], mesh.y[end], new_my))
new_z = collect(LinRange(mesh.z[1], mesh.z[end], new_mz))
return new_x, new_y, new_z
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 24845 | const destaggeroperation = Dict(
"px" => xup,
"py" => yup,
"pz" => zup,
"bx" => xup,
"by" => yup,
"bz" => zup,
"ex" => yupzup,
"ey" => zupxup,
"ez" => xupyup,
"ix" => yupzup,
"iy" => zupxup,
"iz" => xupyup
)
"""
read_params(file_name::String)
Reads and returns parameters `params` of a Bifrost simulation snapshot given
the path `file_name` to the simulation snapshot. The input file should have the
format 'name_xxx.idl' where 'name' is the simulation name and 'xxx' is the
snapshot number
"""
function read_params(file_name::String)
params = Dict{String,String}()
open(file_name, "r") do file
for line in eachline(file)
line = strip(line)
if !isempty(line) && line[1] ≠ ';'
line = replace(line, "\"" => "")
line = replace(line, "'" => "")
key, val = split(strip(line), '=')
params[strip(key)] = strip(val)
end
end
end
# special case for the ixy1 variable, lives in a separate file
if "aux" in keys(params)
params["aux"] = replace(params["aux"], " ixy1" => "")
end
return params
end
"""
read_params(expname::String, snap::Integer, expdir::String)
"""
function read_params(
expname::String,
snap::Integer,
expdir::String
)
isnap = lpad(snap,3,"0")
idl_file = string(joinpath(expdir,expname),"_",isnap,".idl")
read_params(idl_file)
end
"""
read_params(xp::BifrostExperiment, snap::Integer)
"""
function read_params(
xp::BifrostExperiment,
snap::Integer,
)
read_params(xp.expname, snap, xp.expdir)
end
"""
get_snap(
expname::String,
snap ::Int,
expdir ::String,
)
Reads Bifrost *.snap binary file as an Array in dimension: (mx,my,mz,nvar).
Takes experiment name, experiment directory, and snap number as arguments.
Returns `snapdata` (the data) and `params` (the snap parameters).
Assumes single floating point precision by default.
Variables of `snapdata`:
snapdata[:,:,:,1] : r, density
snapdata[:,:,:,2] : px, x-component of momentum
snapdata[:,:,:,3] : py, y-component of momentum
snapdata[:,:,:,4] : pz, z-component of momentum
snapdata[:,:,:,5] : e, energy
if params["do_mhd"] == 1 # numvars = 8, otherwise numvars = 5
snapdata[:,:,:,6] : bx, x-component of magnetic field
snapdata[:,:,:,7] : by, y-component of magnetic field
snapdata[:,:,:,8] : bz, z-component of magnetic field
Warning:
variables in code units.ts
"""
function get_snap(
expname ::String,
snap ::Int,
expdir ::String,
precision::DataType=Float32
)
# Parse filenames
basename = string(joinpath(expdir, expname),"_$(lpad(snap,3,"0"))")
idl_filename = string(basename, ".idl")
snap_filename = string(basename, ".snap")
params = read_params(idl_filename)
return get_snap(snap_filename, params, precision), params
end
"""
get_snap(
file_name::String,
params ::Dict{String,String}
)
"""
function get_snap(
file_name::String,
params ::Dict{String,String},
precision::DataType=Float32
)
datadims = 4 # 3 spatial dimensions and 1 variable dimension
snapsize, numvars, _ = get_snapsize_and_numvars(params)
file = open(file_name)
# Use Julia standard-library memory-mapping to extract file values
snapdata = mmap(file, Array{precision, datadims}, (snapsize..., numvars))
close(file)
return snapdata
end
"""
get_aux(
file_name::String,
params ::Dict{String,String}
)
Reads Bifrost *.aux binary file using memory-mapping. The returned
`auxdata` array will have dimensions (mx,my,mz,nvars) where nvars is the
number of aux-variables. Assumes single floating point precision by default.
"""
function get_aux(
file_name::String,
params ::Dict{String,String},
precision::DataType=Float32
)
datadims = 4
snapsize, _, numauxvars = get_snapsize_and_numvars(params)
if numauxvars == 0
return
else
file = open(file_name)
# Use Julia standard-library memory-mapping to extract file values
auxdata = mmap(file, Array{precision, datadims},
(snapsize..., numauxvars)
)
close(file)
return auxdata
end
end
"""
get_var(
xp::BifrostExperiment,
snap::Union{<:Integer, AbstractVector{<:Integer}},
variable::String,
args...
;
kwargs...
)
Load a `variable` from one or multiple snapshots of `xp`.
# Available variables
The primary variables:
- "r": density
- "px": x-component of momentum
- "py": y-component of momentum
- "pz": z-component of momentum
- "e": energy
`if params["do_mhd"] == true`
- "bx": x-component of magnetic field
- "by": y-component of magnetic field
- "bz": z-component of magnetic field
auxilliary variables (variables in params["aux"]):
- "p": pressure
- "tg": gas temperature
...
# Optional keyword-arguments
Converts variables to "si" or "cgs" units: `units="si"` or `units="cgs"`.
To load a slice of the variable, give e.g. `slicex=[32, 410]` or `slicey=40:90`
# Example usage:
```{julia}
exp_name = "cb24oi"
exp_dir = "/mn/stornext/d21/RoCS/matsc/3d/run/cb24oi"
snap = 700
xp = BifrostExperiment(expname, expdir)
# Load pressude for the full cube in si units
pressure = get_var(xp, snap, "p"; units="si")
# Load gas density in a slize along the xy-plane in cgs units
rho = get_var(xp, snap, "r"; units="cgs", slicez=[100])
```
"""
function get_var(
xp ::BifrostExperiment,
snap ::Union{<:Integer, AbstractVector{<:Integer}},
variable::String,
args...
;
kwargs...
)
get_var(xp.expname, snap, xp.expdir, variable, args...; kwargs...)
end
"""
get_var(
expname ::String,
snap ::Union{<:Integer, AbstractVector{<:Integer}},
expdir ::String,
variable::String,
args...
;
kwargs...
)
Load a `variable` from one or multiple snapshots of a Bifrost experiment with
experiment directory `expdir` and experiment name `expname`.
"""
function get_var(
expname ::String,
snaps ::Union{<:Integer, AbstractVector{<:Integer}},
expdir ::String,
variable::String,
args...
;
precision::DataType=Float32,
kwargs...
)
if variable == "t"
# The special case of getting the snapshot time
return get_time(expname,snaps,expdir;kwargs...)
end
# Allocate space for variable
data = Vector{Array{precision, 3}}(undef, length(snaps))
# Check if user wants data to be destaggered. If so we have to
# call get_and_destagger_var. If not, we may call get_var
if get(kwargs, :destagger, false)
# Check if destagger-operation is passed as a keyword-argument.
# If not, use default operation corresponding to the requested
# variable.
# get correct boundary condition if it is not given
# only for b-field and velocity vector
if !haskey(kwargs,:periodic)
if variable in ["bx","by","px","py"]
kwargs = addtokwargs(
;periodic=true,
kwargs...
)
elseif variable in ["bz","pz"]
kwargs = addtokwargs(
;periodic=false,
kwargs...
)
end
end
if get(kwargs,:destaggeroperation,false)
get_function = get_and_destagger_var
elseif variable in keys(destaggeroperation)
get_function = get_and_destagger_var
kwargs = addtokwargs(
;destaggeroperation=destaggeroperation[variable],
kwargs...
)
else
error("Destaggering of $variable is not implemented. "*
"Set the keyword-argument `destaggeroperation`"
)
end
else
get_function = get_var
end
# What we need in params should be constant for experiment
params = read_params(expname,snaps[1],expdir)
varnr, file_ext = get_varnr_and_file_extension(params, variable)
# Loop over snapshots
for (i,snap) in enumerate(snaps)
tmp_file = string(joinpath(expdir,expname),"_",lpad(snap,3,"0"),file_ext)
data[i] = get_function(
tmp_file,
params,
varnr,
;
precision=precision,
kwargs...)
end
# -------------------------------------------------------------------------
# Below is where you extend the functionality of get_var by handling
# arbitrary keyword arguments. Please put your new implementation into a
# new function.
# -------------------------------------------------------------------------
# UNITS: Scale from code units to something else
# If multiple snapshots: Assumes the same conversion factor for all
if haskey(kwargs,:units)
data = convert_units(data, variable, params, kwargs[:units])
end
# ORIENTATION: Rotate coordinate system
if haskey(kwargs,:rotate_about)
data = rotate(data, variable, kwargs[:rotate_about])
end
# SQUEEZE: Drop empty dimensions
# Allocates new data with fewer dims, and copies this into data
if get(kwargs,:squeeze,false)
# number of non-empty dimension
dims = count( size(data[1]) .≠ 1 )
if dims == 0
new_data = Vector{precision}(undef,length(snaps))
else
new_data = Vector{Array{precision,dims}}(undef,length(snaps))
end
for i in eachindex(data)
new_data[i] = squeeze(data[i])
end
data = new_data
end
# CONCATENATION: Concatenate vector to 3D array if single snapshot
if length(snaps) == 1
data = data[1]
end
# -------------------------------------------------------------------------
# Additional kwargs and functionality go under here
# -------------------------------------------------------------------------
return data
end
"""
get_var(
filename ::String,
params ::Dict{String,String},
varnr ::Integer,
precision ::DataType=Float32;
slicex ::AbstractVector{<:Integer}=Int[],
slicey ::AbstractVector{<:Integer}=Int[],
slicez ::AbstractVector{<:Integer}=Int[]
)
Load variable nr. `varnr` from `filename`. The variable could be either
primary or auxiliary. Slicing the snapshot is optional. Assumes single
precision snapshot by default.
"""
function get_var(
filename ::String,
params ::Dict{String,String},
varnr ::Integer
;
precision ::DataType=Float32,
slicex ::AbstractVector{<:Integer}=Int[],
slicey ::AbstractVector{<:Integer}=Int[],
slicez ::AbstractVector{<:Integer}=Int[],
kwargs...
)
datadims = 3 # 3 spatial dimensions and 1 variable dimension
snapsize = get_snapsize(params)
# Calculate offset in file
offset = get_variable_offset_in_file(precision, snapsize, varnr)
file = open(filename)
# Do slicing or not (returns the mmap)
# Use Julia standard-library memory-mapping to extract file values
if isempty(slicex) && isempty(slicey) && isempty(slicez)
# do not slice the variable
data = mmap(file,
Array{precision, datadims},
snapsize,
offset
)
else
isempty(slicex) && ( slicex = 1:snapsize[1] )
isempty(slicey) && ( slicey = 1:snapsize[2] )
isempty(slicez) && ( slicez = 1:snapsize[3] )
# slice the variable
data = mmap(file,
Array{precision, datadims},
snapsize,
offset
)[slicex,slicey,slicez]
end
close(file)
return data
end
function get_time(
expname::String,
snap ::Union{<:Integer, AbstractVector{<:Integer}},
expdir ::String
;
units::String="code",
kwargs...
)
nsnaps = length(snap)
if typeof(snap) <: Integer
params = read_params(expname,snap,expdir)
data = parse(Float64, params["t"])
else
data = Vector{Float64}(undef, nsnaps)
# Load the variable directly from params
for (i,snap) in enumerate(snap)
params = read_params(expname,snap,expdir)
time = parse(Float64, params["t"])
data[i] = time
end
end
if units != "code"
data = convert_timeunits(data, params)
end
return data
end
"""
function get_and_destagger_var(expname::String,
filename::String,
params::Dict{String,String},
varnr::Integer,
;
destaggeroperation::Function,
units::String="none",
periodic::Bool=false,
order::Int=6,
slicex::AbstractVector{<:Integer}=Int[],
slicey::AbstractVector{<:Integer}=Int[],
slicez::AbstractVector{<:Integer}=Int[],
kwargs...
)
Function to load a staggered variable and interpolate it to cell center.
The staggered variables that typically need to be interpolated are the velocity
and magnetic field components. Normally you need to use `destaggeroperation=zup`
for vz and bz with `periodic=false`, and `destaggeroperation=xup` for vx and bx
with `periodic=true` (same for y direction).
"""
function get_and_destagger_var(
filename::String,
params::Dict{String,String},
varnr::Integer,
;
destaggeroperation::Function,
periodic::Bool=false,
order::Int=6,
slicex::AbstractVector{<:Integer}=Int[],
slicey::AbstractVector{<:Integer}=Int[],
slicez::AbstractVector{<:Integer}=Int[],
kwargs...
)
if isempty(slicex) && isempty(slicey) && isempty(slicez)
# load the entire variable and destaggeroperation it in the desired direction
data = get_var(filename,params,varnr,)
data = destaggeroperation(data,periodic,order)
else
if destaggeroperation in (xup, xdn)
# Load var
data = get_var(filename,params,varnr;
slicey=slicey,slicez=slicez,kwargs...
)
if isempty(slicex)
# All indices in 'x' are loaded, don't worry about slicing
data = destaggeroperation(data,periodic,order)
else
# Call to the function that slices in x
data = destaggeroperation(data,slicex,periodic,order)
end
elseif destaggeroperation in (yup, ydn)
data = get_var(filename,params,varnr;
slicex=slicex,slicez=slicez,kwargs...
)
if isempty(slicey)
# All indices in 'y' are loaded, don't worry about slicing
data = destaggeroperation(data,periodic,order)
else
# Call to the function that slices in y
data = destaggeroperation(data,slicey,periodic,order)
end
elseif destaggeroperation in (zup, zdn)
data = get_var(filename, params, varnr;
slicex=slicex, slicey=slicey, kwargs...
)
if isempty(slicez)
# All indices in 'z' are loaded, don't worry about slicing
data = destaggeroperation(data,periodic,order)
else
# Call to the function that slices in z
data = destaggeroperation(data,slicez,periodic,order)
end
#
# POSSIBLE TO SIMPLIFY THIS?
# Always passing slice to the operation and handling it there?
#
elseif destaggeroperation == yupzup
data = get_var(filename, params, varnr; slicex=slicex, kwargs...)
if isempty(slicez) && isempty(slicey)
# All indices in 'z' and 'y' are loaded, don't worry about slicing
data = destaggeroperation(data,periodic,order)
elseif isempty(slicey)
# Call to the function that slices in z
data = zup(data, slicez, periodic, order)
data = yup(data, periodic, order)
elseif isempty(slicez)
# All indices in 'z' are loaded, don't worry about slicing
data = zup(data, periodic, order)
data = yup(data, slicey, periodic, order)
else
# Call to the function that slices in z and y
data = zup(data, slicez, periodic, order)
data = yup(data, slicey, periodic, order)
end
elseif destaggeroperation == zupxup
data = get_var(filename, params, varnr; slicey=slicey, kwargs...)
if isempty(slicez) && isempty(slicex)
# All indices in 'z' and 'x' are loaded, don't worry about slicing
data = destaggeroperation(data,periodic,order)
elseif isempty(slicez)
# Call to the function that slices in x
data = xup(data, slicex, periodic, order)
data = zup(data, periodic, order)
elseif isempty(slicex)
# All indices in 'y' are loaded, don't worry about slicing
data = xup(data, periodic, order)
data = zup(data, slicez, periodic, order)
else
# Call to the function that slices in x and z
data = xup(data, slicex, periodic, order)
data = zup(data, slicez, periodic, order)
end
elseif destaggeroperation == xupyup
data = get_var(filename, params, varnr; slicez=slicez, kwargs...)
if isempty(slicex) && isempty(slicey)
# All indices in 'x' and 'y' are loaded, don't worry about slicing
data = destaggeroperation(data,periodic,order)
elseif isempty(slicex)
# Call to the function that slices in y
data = yup(data, slicey, periodic, order)
data = xup(data, periodic, order)
elseif isempty(slicey)
# All indices in 'y' are loaded, don't worry about slicing
data = yup(data, periodic, order)
data = xup(data, slicex, periodic, order)
else
# Call to the function that slices in y and x.
data = yup(data, slicey, periodic, order)
data = xup(data, slicex, periodic, order)
end
else
error("Destaggering of varnnr $varnr is not implemented. "*
"Set the keyword-argument `destaggeroperation`"
)
end
end
return data
end
"""
rotate(
data ::AbstractArray,
variable ::String,
rotation_axis::String,
)
Rotate the data about an `rotation_axis`.
"""
function rotate(
data ::AbstractArray,
variable ::String,
rotation_axis::String,
)
xcomponents = ("px", "bx", "ex", "ix")
ycomponents = ("py", "by", "ey", "iy")
zcomponents = ("pz", "bz", "ez", "iz")
if variable in ("r", "p", "tg")
return data # Scalar fields, do nothing
else
if rotation_axis == "x"
if variable in xcomponents
return data
elseif variable in ycomponents || variable in zcomponents
return -data
end
else
error("Rotation about $rotation_axis-axis is not implemented")
end
end
end
function rotate(
data ::AbstractVector,
variable ::String,
rotation_axis::String,
)
return [rotate(data[i], variable, rotation_axis) for i in eachindex(data)]
end
"""
function get_electron_density(
xp::BifrostExperiment,
snap::Integer,
kwargs...)
Function to calculate the electron density from a snapshot `snap`. Supports
slicing. Gas density `rho` and internal energy `e` are optional arguments and
can be passed (but they MUST be in cgs units). If these quantities already
exist, passing them will speed up the calculation of electron density.
`kwargs`:
units::String="si",
slicex::AbstractVector{<:Integer}=Int[],
slicey::AbstractVector{<:Integer}=Int[],
slicez::AbstractVector{<:Integer}=Int[],
rho::Array{AbstractFloat,3}=Float32[;;;],
e::Array{AbstractFloat,3}=Float32[;;;],
tabfile::String="tabparam.in"
"""
function get_electron_density(
xp::BifrostExperiment,
snaps::Union{<:Integer, AbstractVector{<:Integer}};
slicex::AbstractVector{<:Integer}=Int[],
slicey::AbstractVector{<:Integer}=Int[],
slicez::AbstractVector{<:Integer}=Int[],
kwargs...)
if typeof(snaps) <: Integer
var = get_electron_density(xp.expname,snaps,xp.expdir;
slicex=slicex,slicey=slicey,slicez=slicez,kwargs...)
return var
elseif typeof(snaps) <: AbstractVector{<:Integer}
var = Vector{Array{Float32,3}}(undef, length(snaps))
for (i,snap) in enumerate(snaps)
var[i] = get_electron_density(xp.expname,snap,xp.expdir;
slicex=slicex,slicey=slicey,slicez=slicez,kwargs...)
end
return var
end
end
"""
function get_electron_density(
expname::String,
snap::Integer,
expdir::String;
units::String="si",
slicex::AbstractVector{<:Integer}=Int[],
slicey::AbstractVector{<:Integer}=Int[],
slicez::AbstractVector{<:Integer}=Int[],
rho::Array{T,3}=Float32[;;;],
e::Array{T,3}=Float32[;;;],
tabfile::String="tabparam.in"
) where {T<:AbstractFloat}
"""
function get_electron_density(
expname::String,
snap::Integer,
expdir::String;
units::String="si",
slicex::AbstractVector{<:Integer}=Int[],
slicey::AbstractVector{<:Integer}=Int[],
slicez::AbstractVector{<:Integer}=Int[],
rho::Array{T,3}=Float32[;;;],
e::Array{T,3}=Float32[;;;],
tabfile::String="tabparam.in"
) where {T<:AbstractFloat}
params = read_params(expname,snap,expdir)
# rho in g/cm^3
if isempty(rho)
varnr, file_ext = get_varnr_and_file_extension(params, "r")
tmp_file = string(joinpath(expdir,expname), "_", lpad(snap,3,"0"), file_ext)
rho = get_var(
tmp_file,
params,
varnr,
slicex=slicex,
slicey=slicey,
slicez=slicez
)
rho = convert_units(rho, "r", params, "cgs")
end
# internal energy in ergs
if isempty(e)
varnr, file_ext = get_varnr_and_file_extension(params, "e")
tmp_file = string(joinpath(expdir,expname), "_", lpad(snap,3,"0"), file_ext)
e = get_var(
tmp_file,
params,
varnr,
slicex=slicex,
slicey=slicey,
slicez=slicez
)
e = convert_units(e, "e", params, "cgs")
end
# Calculate internal energy per mass
ee = e ./ rho
# construct the EOS tables for interpolation of electron density
tabfile = joinpath(expdir,tabfile)
eos = EOSTables(tabfile)
if maximum(rho) > parse(Float64,eos.params["RhoMax"])
@warn "tab_interp: density outside table bounds. "*
"Table rho max=$(@sprintf("%.3e", parse(Float64,eos.params["RhoMax"]))), requested rho max=$(@sprintf("%.3e", maximum(rho)))"
end
if minimum(rho) <parse(Float64,eos.params["RhoMin"])
@warn "tab_interp: density outside table bounds. "*
"Table rho min=$(@sprintf("%.3e", parse(Float64,eos.params["RhoMin"]))), requested rho min=$(@sprintf("%.3e", minimum(rho)))"
end
if maximum(ee) > parse(Float64,eos.params["EiMax"])
@warn "tab_interp: energy outside table bounds. "*
"Table Ei max=$(@sprintf("%.3e", parse(Float64,eos.params["EiMax"]))), requested ee max=$(@sprintf("%.3e", maximum(ee)))"
end
if minimum(ee) < parse(Float64,eos.params["EiMin"])
@warn "tab_interp: energy outside table bounds. "*
"Table Ei min=$(@sprintf("%.3e", parse(Float64,eos.params["EiMin"]))), requested ee min=$(@sprintf("%.3e", minimum(ee)))"
end
# Create interpolation table, takes the log of coordinates
itp_table = eos_interpolate(eos,3)
x = log.(ee)
y = log.(rho)
ne = itp_table.(x, y)
# take exp to remove log
ne = exp.(ne)
# Convert to si on request (cm^-3 --> m^-3)
if lowercase(units) == "si"
ne .*= 1f6
end
return ne
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 43411 | # --- stagger operations
"""
function xup(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
Stagger operation on `arr` by a 5th order polynomial interpolation,
shifting the variable half a grid point upwards in the x-direction
"""
function xup(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[1] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = zeros(T, (n[1] + 5, n[2], n[3]))
tmp = zeros(T, (n[1] + 5, n[2], n[3]))
tmp[3:n[1]+2, :, :] .= arr[:, :, :]
if periodic
tmp[1:2, :, :] .= arr[end-1:end, :, :]
tmp[n[1]+3:end, :, :] .= arr[1:3, :, :]
else
# extrapolate bottom
for i = 1:2
@inbounds tmp[3-i, :, :] = 2.0 .* tmp[3, :, :] .- tmp[3+i, :, :]
end
# extrapolate top
for i = 1:3
@inbounds tmp[n[1]+2+i, :, :] = 2.0 .* tmp[n[1]+2, :, :] .- tmp[n[1]+2-i, :, :]
end
end
@turbo for k = 1:n[3]
for j = 1:n[2]
for i = 3:n[1]+2
out[i, j, k] =
a * (tmp[i, j, k] + tmp[i+1, j, k]) +
b * (tmp[i-1, j, k] + tmp[i+2, j, k]) +
c * (tmp[i-2, j, k] + tmp[i+3, j, k])
end
end
end
return out[3:end-3, :, :]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 1:n[2]
for i = 1:n[1]-1
out[i, j, k] = 0.5f0 * (arr[i, j, k] + arr[i+1, j, k])
end
end
end
if periodic
out[end, :, :] .= 0.5f0 .* (arr[1, :, :] .+ arr[end, :, :])
else
out[end, :, :] .= arr[end, :, :]
end
return out
end
end
end
function xup(
arr::Array{T,3},
slicex::AbstractVector{<:Integer},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[1] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = similar(arr, (length(slicex), n[2], n[3]))
tmp = zeros(T, (n[1] + 5, n[2], n[3]))
tmp[3:n[1]+2, :, :] .= arr[:, :, :]
if periodic
tmp[1:2, :, :] .= arr[end-1:end, :, :]
tmp[n[1]+3:end, :, :] .= arr[1:3, :, :]
else
# extrapolate bottom
for i = 1:2
@inbounds tmp[3-i, :, :] = 2.0 .* tmp[3, :, :] .- tmp[3+i, :, :]
end
# extrapolate top
for i = 1:3
@inbounds tmp[n[1]+2+i, :, :] = 2.0 .* tmp[n[1]+2, :, :] .- tmp[n[1]+2-i, :, :]
end
end
slicex = slicex .+ 2
for k = 1:n[3]
for j = 1:n[2]
for (i,slice) in enumerate(slicex)
@inbounds out[i, j, k] =
a * (tmp[slice, j, k] + tmp[slice+1, j, k]) +
b * (tmp[slice-1, j, k] + tmp[slice+2, j, k]) +
c * (tmp[slice-2, j, k] + tmp[slice+3, j, k])
end
end
end
return out
else # order 2
out = similar(arr, (length(slicex), n[2], n[3]))
if n[1] in slicex
index = findfirst(x->x==n[1], slicex)
if periodic
out[index, :, :] .= 0.5f0 .* (arr[1, :, :] .+ arr[end, :, :])
else
out[index, :, :] .= arr[end, :, :]
end
end
for k = 1:n[3]
for j = 1:n[2]
for (i,slice) in enumerate(slicex)
if i < n[1]
@inbounds out[i, j, k] = 0.5f0 * (arr[slice, j, k] + arr[slice+1, j, k])
end
end
end
end
return out
end
end
end
"""
dxup(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
)
Computes the spatial derivative in the x-direction of every entry in `arr`
shifted a half grid point upwards. Defaults to the 6th order accurate Bifrost
derivative with `order=6`, optional 2nd order accurate derivative with keyword
`order=2`
"""
function dxup(
arr::Array{T,3},
dx::Vector{T},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[1] == 1)
return zeros(T, (n[1], n[2], n[3]))
else
if order == 6
c = (-1.0 + (3.0^5 - 3.0) / (3.0^3 - 3.0)) / (5.0^5 - 5.0 - 5.0 * (3.0^5 - 3))
b = (-1.0 - 120.0 * c) / 24.0
a = (1.0 - 3.0 * b - 5.0 * c)
# inflate matrix in x direction
out = zeros(T, (n[1] + 5, n[2], n[3]))
tmp = zeros(T, (n[1] + 5, n[2], n[3]))
tmp[3:n[1]+2, :, :] .= arr[:, :, :]
if periodic
tmp[1:2, :, :] .= arr[end-1:end, :, :]
tmp[n[1]+3:end, :, :] .= arr[1:3, :, :]
else
# extrapolate bottom
for i = 1:2
@inbounds tmp[3-i, :, :] = 2.0 .* tmp[3, :, :] .- tmp[3+i, :, :]
end
# extrapolate top
for i = 1:3
@inbounds tmp[n[1]+2+i, :, :] = 2.0 .* tmp[n[1]+2, :, :] .- tmp[n[1]+2-i, :, :]
end
end
@turbo for k = 1:n[3]
for j = 1:n[2]
for i in 3:n[1]+2
out[i, j, k] =
dx[i-2] * (
a * (tmp[i+1, j, k] - tmp[i, j, k]) +
b * (tmp[i+2, j, k] - tmp[i-1, j, k]) +
c * (tmp[i+3, j, k] - tmp[i-2, j, k])
)
end
end
end
return out[3:end-3, :, :]
else # oder 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 1:n[2]
for i = 1:n[1]-1
out[i, j, k] = dx[i] * (arr[i+1, j, k] - arr[i, j, k])
end
end
end
if periodic
out[end, :, :] .= dx[end] .* (arr[1, :, :] .- arr[end, :, :])
end
return out
end
end
end
"""
function xdn(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
Stagger operation on `arr` by a 5th order polynomial interpolation,
shifting the variable half a grid point downwards in the x-direction
"""
function xdn(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[1] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = zeros(T, (n[1] + 5, n[2], n[3]))
tmp = zeros(T, (n[1] + 5, n[2], n[3]))
tmp[4:n[1]+3, :, :] .= arr[:, :, :]
if periodic
tmp[1:3, :, :] .= arr[end-2:end, :, :]
tmp[n[1]+4:end, :, :] .= arr[1:2, :, :]
else
# extrapolate bottom
for i = 1:3
@inbounds tmp[4-i, :, :] = 2.0 .* tmp[4, :, :] .- tmp[4+i, :, :]
end
# extrapolate top
for i = 1:2
@inbounds tmp[n[1]+3+i, :, :] = 2.0 .* tmp[n[1]+3, :, :] .- tmp[n[1]+3-i, :, :]
end
end
@turbo for k = 1:n[3]
for j = 1:n[2]
for i = 4:n[1]+3
out[i, j, k] =
a * (tmp[i-1, j, k] + tmp[i, j, k]) +
b * (tmp[i-2, j, k] + tmp[i+1, j, k]) +
c * (tmp[i-3, j, k] + tmp[i+2, j, k])
end
end
end
return out[4:end-2, :, :]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 1:n[2]
for i = 2:n[1]
out[i, j, k] = 0.5f0 * (arr[i-1, j, k] + arr[i, j, k])
end
end
end
if periodic
out[1, :, :] .= 0.5f0 .* (arr[end, :, :] .+ arr[1, :, :])
else
out[1, :, :] .= arr[1, :, :]
end
return out
end
end
end
function xdn(
arr::Array{T,3},
slicex::AbstractVector{<:Integer},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[1] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = similar(arr, (length(slicex), n[2], n[3]))
tmp = zeros(T, (n[1] + 5, n[2], n[3]))
tmp[4:n[1]+3, :, :] .= arr[:, :, :]
if periodic
tmp[1:3, :, :] .= arr[end-2:end, :, :]
tmp[n[1]+4:end, :, :] .= arr[1:2, :, :]
else
# extrapolate bottom
for i = 1:3
@inbounds tmp[4-i, :, :] = 2.0 .* tmp[4, :, :] .- tmp[4+i, :, :]
end
# extrapolate top
for i = 1:2
@inbounds tmp[n[1]+3+i, :, :] = 2.0 .* tmp[n[1]+3, :, :] .- tmp[n[1]+3-i, :, :]
end
end
slicex = slicex .+ 3
for k=1:n[3]
for j = 1:n[2]
for (i,slice) in enumerate(slicex)
@inbounds out[i, j, k] =
a * (tmp[slice-1, j, k] + tmp[slice, j, k]) +
b * (tmp[slice-2, j, k] + tmp[slice+1, j, k]) +
c * (tmp[slice-3, j, k] + tmp[slice+2, j, k])
end
end
end
return out
else # order 2
out = similar(arr, (length(slicex), n[2], n[3]))
if 1 in slicex
index = findfirst(x->x==1, slicex)
if periodic
out[index, :, :] .= 0.5f0 .* (arr[end, :, :] .+ arr[1, :, :])
else
out[index, :, :] .= arr[1, :, :]
end
end
for (k,slice) in enumerate(slicex)
for j = 1:n[2]
for (i,slice) in enumerate(slicex)
if i > 1
@inbounds out[i, j, k] = 0.5f0 * (arr[slice-1, j, k] + arr[slice, j, k])
end
end
end
end
return out
end
end
end
"""
dxdn(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
)
Computes the spatial derivative in the x-direction of every entry in `arr`
shifted a half grid point downwards. Defaults to the 6th order accurate Bifrost
derivative with `order=6`, optional 2nd order accurate derivative with keyword
`order=2`
"""
function dxdn(
arr::Array{T,3},
dx::Vector{T},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[1] == 1)
return zeros(T, (n[1], n[2], n[3]))
else
if order == 6
c = (-1.0 + (3.0^5 - 3.0) / (3.0^3 - 3.0)) / (5.0^5 - 5.0 - 5.0 * (3.0^5 - 3))
b = (-1.0 - 120.0 * c) / 24.0
a = (1.0 - 3.0 * b - 5.0 * c)
# inflate matrix in x direction
out = zeros(T, (n[1] + 5, n[2], n[3]))
tmp = zeros(T, (n[1] + 5, n[2], n[3]))
tmp[4:n[1]+3, :, :] .= arr[:, :, :]
if periodic
tmp[1:3, :, :] .= arr[end-2:end, :, :]
tmp[n[1]+4:end, :, :] .= arr[1:2, :, :]
else
# extrapolate bottom
for i = 1:3
@inbounds tmp[4-i, :, :] = 2.0 .* tmp[4, :, :] .- tmp[4+i, :, :]
end
# extrapolate top
for i = 1:2
@inbounds tmp[n[1]+3+i, :, :] = 2.0 .* tmp[n[1]+3, :, :] .- tmp[n[1]+3-i, :, :]
end
end
for k = 1:n[3]
@turbo for j = 1:n[2]
for i = 4:n[1]+3
out[i, j, k] =
dx[i-3] * (
a * (tmp[i, j, k] - tmp[i-1, j, k]) +
b * (tmp[i+1, j, k] - tmp[i-2, j, k]) +
c * (tmp[i+2, j, k] - tmp[i-3, j, k])
)
end
end
end
return out[4:end-2, :, :]
else # oder 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 1:n[2]
for i = 2:n[1]
out[i, j, k] = dx[i] * (arr[i, j, k] - arr[i-1, j, k])
end
end
end
if periodic
out[1, :, :] .= dx[1] .* (arr[1, :, :] .- arr[end, :, :])
end
return out
end
end
end
"""
function yup(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
Stagger operation on `arr` by a 5th order polynomial interpolation,
shifting the variable half a grid point upwards in the y-direction
"""
function yup(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[2] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = zeros(T, (n[1], n[2] + 5, n[3]))
tmp = zeros(T, (n[1], n[2] + 5, n[3]))
tmp[:, 3:n[2]+2, :] .= arr[:, :, :]
if periodic
tmp[:, 1:2, :] .= arr[:, end-1:end, :]
tmp[:, n[2]+3:end, :] .= arr[:, 1:3, :]
else
# extrapolate bottom
for j = 1:2
@inbounds tmp[:, 3-j, :] = 2.0 .* tmp[:, 3, :] .- tmp[:, 3+j, :]
end
# extrapolate top
for j = 1:3
@inbounds tmp[:, n[2]+2+j, :] = 2.0 .* tmp[:, n[2]+2, :] .- tmp[:, n[2]+2-j, :]
end
end
@turbo for k = 1:n[3]
for j = 3:n[2]+2
for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, j, k] + tmp[i, j+1, k]) +
b * (tmp[i, j-1, k] + tmp[i, j+2, k]) +
c * (tmp[i, j-2, k] + tmp[i, j+3, k])
end
end
end
return out[:, 3:end-3, :]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 1:n[2]-1
for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, j, k] + arr[i, j+1, k])
end
end
end
if periodic
out[:, end, :] .= 0.5f0 .* (arr[:, end, :] .+ arr[:, 1, :])
else
out[:, end, :] .= arr[:, end, :]
end
return out
end
end
end
function yup(
arr::Array{T,3},
slicey::AbstractVector{<:Integer},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[2] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = similar(arr, (n[1], length(slicey), n[3]))
tmp = zeros(T, (n[1], n[2] + 5, n[3]))
tmp[:, 3:n[2]+2, :] .= arr[:, :, :]
if periodic
tmp[:, 1:2, :] .= arr[:, end-1:end, :]
tmp[:, n[2]+3:end, :] .= arr[:, 1:3, :]
else
# extrapolate bottom
for j = 1:2
@inbounds tmp[:, 3-j, :] = 2.0 .* tmp[:, 3, :] .- tmp[:, 3+j, :]
end
# extrapolate top
for j = 1:3
@inbounds tmp[:, n[2]+2+j, :] = 2.0 .* tmp[:, n[2]+2, :] .- tmp[:, n[2]+2-j, :]
end
end
slicey = slicey .+ 2
for k = 1:n[3]
for (j,slice) in enumerate(slicey)
@turbo for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, slice, k] + tmp[i, slice+1, k]) +
b * (tmp[i, slice-1, k] + tmp[i, slice+2, k]) +
c * (tmp[i, slice-2, k] + tmp[i, slice+3, k])
end
end
end
return out
else # order 2
out = similar(arr, (n[1], length(slicey), n[3]))
if n[2] in slicey
index = findfirst(x->x==n[2], slicey)
if periodic
out[:, index, :] .= 0.5f0 .* (arr[:, end, :] .+ arr[:, 1, :])
else
out[:, index, :] .= arr[:, end, :]
end
end
for k = 1:n[3]
for (j,slice) in enumerate(slicey)
if j < n[2]
@turbo for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, slice, k] + arr[i, slice+1, k])
end
end
end
end
return out
end
end
end
"""
dyup(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
)
Computes the spatial derivative in the y-direction of every entry in `arr`
shifted a half grid point upwards. Defaults to the 6th order accurate Bifrost
derivative with `order=6`, optional 2nd order accurate derivative with keyword
`order=2`
"""
function dyup(
arr::Array{T,3},
dy::Vector{T},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[2] == 1)
return zeros(T, (n[1], n[2], n[3]))
else
if order == 6
c = (-1.0 + (3.0^5 - 3.0) / (3.0^3 - 3.0)) / (5.0^5 - 5.0 - 5.0 * (3.0^5 - 3))
b = (-1.0 - 120.0 * c) / 24.0
a = (1.0 - 3.0 * b - 5.0 * c)
# inflate matrix in x direction
out = zeros(T, (n[1], n[2] + 5, n[3]))
tmp = zeros(T, (n[1], n[2] + 5, n[3]))
tmp[:, 3:n[2]+2, :] = arr[:, :, :]
if periodic
tmp[:, 1:2, :] .= arr[:, end-1:end, :]
tmp[:, n[2]+3:end, :] .= arr[:, 1:3, :]
else
# extrapolate bottom
for j = 1:2
@inbounds tmp[:, 3-j, :] .= 2.0 .* tmp[:, 3, :] .- tmp[:, 3+j, :]
end
# extrapolate top
for j = 1:3
@inbounds tmp[:, n[2]+2+j, :] .= 2.0 .* tmp[:, n[2]+2, :] .- tmp[:, n[2]+2-j, :]
end
end
@turbo for k = 1:n[3]
for j = 3:n[2]+2
for i = 1:n[1]
out[i, j, k] =
dy[j-2] * (
a * (tmp[i, j+1, k] - tmp[i, j, k]) +
b * (tmp[i, j+2, k] - tmp[i, j-1, k]) +
c * (tmp[i, j+3, k] - tmp[i, j-2, k])
)
end
end
end
return out[:, 3:end-3, :]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 1:n[2]-1
for i = 1:n[1]
out[i, j, k] = dy[j] * (arr[i, j+1, k] - arr[i, j, k])
end
end
end
if periodic
out[:, end, :] .= dy[end] .* (arr[:, 1, :] - arr[:, end, :])
end
return out
end
end
end
"""
function ydn(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
Stagger operation on `arr` by a 5th order polynomial interpolation,
shifting the variable half a grid point downwards in the y-direction
"""
function ydn(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[2] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = zeros(T, (n[1], n[2] + 5, n[3]))
tmp = zeros(T, (n[1], n[2] + 5, n[3]))
tmp[:, 4:n[2]+3, :] .= arr[:, :, :]
if periodic
tmp[:, 1:3, :] .= arr[:, end-2:end, :]
tmp[:, n[2]+4:end, :] .= arr[:, 1:2, :]
else
# extrapolate bottom
for j = 1:3
@inbounds tmp[:, 4-j, :] .= 2.0 .* tmp[:, 4, :] .- tmp[:, 4+j, :]
end
# extrapolate top
for j = 1:2
@inbounds tmp[:, n[2]+3+j, :] .= 2.0 .* tmp[:, n[2]+3, :] .- tmp[:, n[2]+3-j, :]
end
end
@turbo for k = 1:n[3]
for j = 4:n[2]+3
for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, j-1, k] + tmp[i, j, k]) +
b * (tmp[i, j-2, k] + tmp[i, j+1, k]) +
c * (tmp[i, j-3, k] + tmp[i, j+2, k])
end
end
end
return out[:, 4:end-2, :]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 2:n[2]
for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, j-1, k] + arr[i, j, k])
end
end
end
if periodic
out[:, 1, :] .= 0.5f0 .* (arr[:, end, :] .+ arr[:, 1, :])
else
out[:, 1, :] .= arr[:, 1, :]
end
return out
end
end
end
function ydn(
arr::Array{T,3},
slicey::AbstractVector{<:Integer},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[2] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = similar(arr, (n[1], length(slicey), n[3]))
tmp = zeros(T, (n[1], n[2] + 5, n[3]))
tmp[:, 4:n[2]+3, :] .= arr[:, :, :]
if periodic
tmp[:, 1:3, :] .= arr[:, end-2:end, :]
tmp[:, n[2]+4:end, :] .= arr[:, 1:2, :]
else
# extrapolate bottom
for j = 1:3
@inbounds tmp[:, 4-j, :] .= 2.0 .* tmp[:, 4, :] .- tmp[:, 4+j, :]
end
# extrapolate top
for j = 1:2
@inbounds tmp[:, n[2]+3+j, :] .= 2.0 .* tmp[:, n[2]+3, :] .- tmp[:, n[2]+3-j, :]
end
end
slicey = slicey .+ 3
@turbo for k = 1:n[3]
for j = 4:n[2]+3
for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, j-1, k] + tmp[i, j, k]) +
b * (tmp[i, j-2, k] + tmp[i, j+1, k]) +
c * (tmp[i, j-3, k] + tmp[i, j+2, k])
end
end
end
return out[:, 4:end-2, :]
else # order 2
out = similar(arr, (n[1], length(slicey), n[3]))
if 1 in slicey
index = findfirst(x->x==n[2], slicey)
if periodic
out[:, index, :] .= 0.5f0 .* (arr[:, end, :] .+ arr[:, 1, :])
else
out[:, index, :] .= arr[:, end, :]
end
end
for k = 1:n[3]
for (j,slice) in enumerate(slicey)
if j > 1
@turbo for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, slice-1, k] + arr[i, slice, k])
end
end
end
end
return out
end
end
end
"""
dydn(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
)
Computes the spatial derivative in the y-direction of every entry in `arr`
shifted a half grid point downwards. Defaults to the 6th order accurate Bifrost
derivative with `order=6`, optional 2nd order accurate derivative with keyword
`order=2`
"""
function dydn(
arr::Array{T,3},
dy::Vector{T},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[2] == 1)
return zeros(T, (n[1], n[2], n[3]))
else
if order == 6
c = (-1.0 + (3.0^5 - 3.0) / (3.0^3 - 3.0)) / (5.0^5 - 5.0 - 5.0 * (3.0^5 - 3))
b = (-1.0 - 120.0 * c) / 24.0
a = (1.0 - 3.0 * b - 5.0 * c)
# inflate matrix in x direction
out = zeros(T, (n[1], n[2] + 5, n[3]))
tmp = zeros(T, (n[1], n[2] + 5, n[3]))
tmp[:, 4:n[2]+3, :] .= arr[:, :, :]
if periodic
tmp[:, 1:3, :] .= arr[:, end-2:end, :]
tmp[:, n[2]+4:end, :] .= arr[:, 1:2, :]
else
# extrapolate bottom
for j = 1:3
@inbounds tmp[:, 4-j, :] .= 2.0 .* tmp[:, 4, :] .- tmp[:, 4+j, :]
end
# extrapolate top
for j = 1:2
@inbounds tmp[:, n[2]+3+j, :] .= 2.0 .* tmp[:, n[2]+3, :] .- tmp[:, n[2]+3-j, :]
end
end
@turbo for k = 1:n[3]
for j = 4:n[2]+3
for i = 1:n[1]
out[i, j, k] =
dy[j-3] * (
a * (tmp[i, j, k] - tmp[i, j-1, k]) +
b * (tmp[i, j+1, k] - tmp[i, j-2, k]) +
c * (tmp[i, j+2, k] - tmp[i, j-3, k])
)
end
end
end
return out[:, 4:end-2, :]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]
for j = 2:n[2]
for i = 1:n[1]
out[i, j, k] = dy[j] * (arr[i, j, k] - arr[i, j-1, k])
end
end
end
if periodic
out[:, 1, :] .= dy[1] .* (arr[:, 1, :] .- arr[:, end, :])
end
return out
end
end
end
"""
function zup(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
Stagger operation on `arr` by a 5th order polynomial interpolation,
shifting the variable half a grid point upwards in the z-direction
"""
function zup(
arr::Array{T,3},
periodic::Bool=false,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[3] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = zeros(T, (n[1], n[2], n[3] + 5))
tmp = zeros(T, (n[1], n[2], n[3] + 5))
tmp[:, :, 3:n[3]+2] .= arr[:, :, :]
if periodic
tmp[:, :, 1:2] .= arr[:, :, end-1:end]
tmp[:, :, n[3]+3:end] .= arr[:, :, 1:3]
else
# extrapolate bottom
for k = 1:2
@inbounds tmp[:, :, 3-k] = 2.0 .* tmp[:, :, 3] .- tmp[:, :, 3+k]
end
# extrapolate top
for k = 1:3
@inbounds tmp[:, :, n[3]+2+k] = 2.0 .* tmp[:, :, n[3]+2] .- tmp[:, :, n[3]+2-k]
end
end
@turbo for k = 3:n[3]+2
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, j, k] + tmp[i, j, k+1]) +
b * (tmp[i, j, k-1] + tmp[i, j, k+2]) +
c * (tmp[i, j, k-2] + tmp[i, j, k+3])
end
end
end
return out[:, :, 3:end-3]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]-1
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, j, k] + arr[i, j, k+1])
end
end
end
if periodic
out[:, :, end] .= 0.5f0 .* (arr[:, :, end] .+ arr[:, :, 1])
else
out[:, :, end] .= arr[:, :, end]
end
return out
end
end
end
function zup(
arr::Array{T,3},
slicez::AbstractVector{<:Integer},
periodic::Bool=false,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[3] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
out = similar(arr, n[1], n[2], length(slicez))
tmp = zeros(T, (n[1], n[2], n[3] + 5))
tmp[:, :, 3:n[3]+2] .= arr[:, :, :]
if periodic
tmp[:, :, 1:2] .= arr[:, :, end-1:end]
tmp[:, :, n[3]+3:end] .= arr[:, :, 1:3]
else
# extrapolate bottom
for k = 1:2
@inbounds tmp[:, :, 3-k] = 2.0 .* tmp[:, :, 3] .- tmp[:, :, 3+k]
end
# extrapolate top
for k = 1:3
@inbounds tmp[:, :, n[3]+2+k] = 2.0 .* tmp[:, :, n[3]+2] .- tmp[:, :, n[3]+2-k]
end
end
slicez = slicez .+ 2
for (k,slice) in enumerate(slicez)
for j = 1:n[2]
@turbo for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, j, slice] + tmp[i, j, slice+1]) +
b * (tmp[i, j, slice-1] + tmp[i, j, slice+2]) +
c * (tmp[i, j, slice-2] + tmp[i, j, slice+3])
end
end
end
return out
else # order 2
out = similar(arr, (n[1], n[2], length(slicez)))
if n[3] in slicez
index = findfirst(x->x==n[3], slicez)
if periodic
out[:, :, index] .= 0.5f0 .* (arr[:, :, end] .+ arr[:, :, 1])
else
out[:, :, index] .= arr[:, :, end]
end
slicez = slicez[∉(index).(1:end)]
end
for (k,slice) in enumerate(slicez)
if k < n[3]
@turbo for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, j, slice] + arr[i, j, slice+1])
end
end
end
end
return out
end
end
end
"""
dzup(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
)
Computes the spatial derivative in the z-direction of every entry in `arr`
shifted a half grid point upwards. Defaults to the 6th order accurate Bifrost
derivative with `order=6`, optional 2nd order accurate derivative with keyword
`order=2`
"""
function dzup(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[3] == 1)
return zeros(T, (n[1], n[2], n[3]))
else
if order == 6
c = (-1.0 + (3.0^5 - 3.0) / (3.0^3 - 3.0)) / (5.0^5 - 5.0 - 5.0 * (3.0^5 - 3))
b = (-1.0 - 120.0 * c) / 24.0
a = (1.0 - 3.0 * b - 5.0 * c)
# inflate matrix in x direction
out = zeros(T, (n[1], n[2], n[3] + 5))
tmp = zeros(T, (n[1], n[2], n[3] + 5))
tmp[:, :, 3:n[3]+2] .= arr[:, :, :]
if periodic
tmp[:, :, 1:2] .= arr[:, :, end-1:end]
tmp[:, :, n[3]+3:end] .= arr[:, :, 1:3]
else
# extrapolate bottom
for k = 1:2
@inbounds tmp[:, :, 3-k] .= 2.0 .* tmp[:, :, 3] .- tmp[:, :, 3+k]
end
# extrapolate top
for k = 1:3
@inbounds tmp[:, :, n[3]+2+k] .= 2.0 .* tmp[:, :, n[3]+2] .- tmp[:, :, n[3]+2-k]
end
end
@turbo for k = 3:n[3]+2
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] =
dz[k-2] * (
a * (tmp[i, j, k+1] - tmp[i, j, k]) +
b * (tmp[i, j, k+2] - tmp[i, j, k-1]) +
c * (tmp[i, j, k+3] - tmp[i, j, k-2])
)
end
end
end
return out[:, :, 3:end-3]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 1:n[3]-1
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] = dz[k] * (arr[i, j, k+1] - arr[i, j, k])
end
end
end
if periodic
out[:, :, end] .= dz[end] .* (arr[:, :, 1] .- arr[:, :, end])
end
return out
end
end
end
"""
function zdn(
arr::Array{T,3},
periodic::Bool=true,
order::Int=6
) where {T<:AbstractFloat}
Stagger operation on `arr` by a 5th order polynomial interpolation,
shifting the variable half a grid point downwards in the z-direction
"""
function zdn(
arr::Array{T,3},
periodic::Bool=false,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[3] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
# inflate matrix in x direction
out = zeros(T, (n[1], n[2], n[3] + 5))
tmp = zeros(T, (n[1], n[2], n[3] + 5))
tmp[:, :, 4:n[3]+3] .= arr[:, :, :]
if periodic
tmp[:, :, 1:3] .= arr[:, :, end-2:end]
tmp[:, :, n[3]+4:end] .= arr[:, :, 1:2]
else
# extrapolate bottom
for k = 1:3
@inbounds tmp[:, :, 4-k] .= 2.0 .* tmp[:, :, 4] .- tmp[:, :, 4+k]
end
# extrapolate top
for k = 1:2
@inbounds tmp[:, :, n[3]+3+k] .= 2.0 .* tmp[:, :, n[3]+3] .- tmp[:, :, n[3]+3-k]
end
end
@turbo for k = 4:n[3]+3
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, j, k-1] + tmp[i, j, k]) +
b * (tmp[i, j, k-2] + tmp[i, j, k+1]) +
c * (tmp[i, j, k-3] + tmp[i, j, k+2])
end
end
end
return out[:, :, 4:end-2]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 2:n[3]
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, j, k-1] + arr[i, j, k])
end
end
end
if periodic
out[:, :, 1] .= 0.5f0 .* (arr[:, :, end] .+ arr[:, :, 1])
else
out[:, :, 1] .= arr[:, :, 1]
end
return out
end
end
end
function zdn(
arr::Array{T,3},
slicez::AbstractVector{<:Integer},
periodic::Bool=false,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[3] == 1)
return arr[:, :, :]
else
if order == 6
c = 3.0 / 256.0
b = -25.0 / 256.0
a = 0.5 - b - c
out = similar(arr, n[1], n[2], length(slicez))
tmp = zeros(T, (n[1], n[2], n[3] + 5))
tmp[:, :, 4:n[3]+3] .= arr[:, :, :]
if periodic
tmp[:, :, 1:3] .= arr[:, :, end-2:end]
tmp[:, :, n[3]+4:end] .= arr[:, :, 1:2]
else
# extrapolate bottom
for k = 1:3
@inbounds tmp[:, :, 4-k] .= 2.0 .* tmp[:, :, 4] .- tmp[:, :, 4+k]
end
# extrapolate top
for k = 1:2
@inbounds tmp[:, :, n[3]+3+k] .= 2.0 .* tmp[:, :, n[3]+3] .- tmp[:, :, n[3]+3-k]
end
end
slicez = slicez .+ 3
for (k,slice) in enumerate(slicez)
for j = 1:n[2]
@turbo for i = 1:n[1]
out[i, j, k] =
a * (tmp[i, j, slice-1] + tmp[i, j, slice]) +
b * (tmp[i, j, slice-2] + tmp[i, j, slice+1]) +
c * (tmp[i, j, slice-3] + tmp[i, j, slice+2])
end
end
end
return out
else # order 2
out = similar(arr, (n[1], n[2], length(slicez)))
if 1 in slicez
index = findfirst(x->x==1, slicez)
if periodic
out[:, :, index] .= 0.5f0 .* (arr[:, :, end] .+ arr[:, :, 1])
else
out[:, :, index] .= arr[:, :, 1]
end
slicez = slicez[∉(index).(1:end)]
end
for (k,slice) in enumerate(slicez)
if k > 1
@turbo for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] = 0.5f0 * (arr[i, j, slice-1] + arr[i, j, slice])
end
end
end
end
return out
end
end
end
"""
dzdn(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
)
Computes the spatial derivative in the z-direction of every entry in `arr`
shifted a half grid point downwards. Defaults to the 6th order accurate Bifrost
derivative with `order=6`, optional 2nd order accurate derivative with keyword
`order=2`
"""
function dzdn(
arr::Array{T,3},
dz::Vector{T},
periodic::Bool=false,
order::Int=6
) where {T<:AbstractFloat}
n = size(arr)
@assert length(n) == 3
if (n[3] == 1)
return zeros(T, (n[1], n[2], n[3]))
else
if order == 6
c = (-1.0 + (3.0^5 - 3.0) / (3.0^3 - 3.0)) / (5.0^5 - 5.0 - 5.0 * (3.0^5 - 3))
b = (-1.0 - 120.0 * c) / 24.0
a = (1.0 - 3.0 * b - 5.0 * c)
# inflate matrix in x direction
out = zeros(T, (n[1], n[2], n[3] + 5))
tmp = zeros(T, (n[1], n[2], n[3] + 5))
tmp[:, :, 4:n[3]+3] .= arr[:, :, :]
if periodic
tmp[:, :, 1:3] .= arr[:, :, end-2:end]
tmp[:, :, n[3]+4:end] .= arr[:, :, 1:2]
else
# extrapolate bottom
for k = 1:3
@inbounds tmp[:, :, 4-k] .= 2.0 .* tmp[:, :, 4] .- tmp[:, :, 4+k]
end
# extrapolate top
for k = 1:2
@inbounds tmp[:, :, n[3]+3+k] .= 2.0 .* tmp[:, :, n[3]+3] .- tmp[:, :, n[3]+3-k]
end
end
@turbo for k = 4:n[3]+3
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] =
dz[k-3] * (
a * (tmp[i, j, k] - tmp[i, j, k-1]) +
b * (tmp[i, j, k+1] - tmp[i, j, k-2]) +
c * (tmp[i, j, k+2] - tmp[i, j, k-3])
)
end
end
end
return out[:, :, 4:end-2]
else # order 2
out = zeros(T, (n[1], n[2], n[3]))
@turbo for k = 2:n[3]
for j = 1:n[2]
for i = 1:n[1]
out[i, j, k] = dz[k] * (arr[i, j, k] - arr[i, j, k-1])
end
end
end
if periodic
out[:, :, 1] .= dz[1] .* (arr[:, :, 1] .- arr[:, :, end])
end
return out
end
end
end
"""
yupzup(data, args...)
For destagering variables at cell edges, in particular the x-axis.
"""
function yupzup(data, args...)
yup(zup(data, args...), args...)
end
"""
zupxup(data, args...)
For destagering variables at cell edges, in particular the y-axis.
"""
function zupxup(data, args...)
zup(xup(data, args...), args...)
end
"""
xupyup(data, args...)
For destagering variables at cell edges, in particular the z-axis.
"""
function xupyup(data, args...)
xup(yup(data, args...), args...)
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 5465 | """
# Overview
Script for converting from Bifrost's simulation units to cgs or SI units.
Based on https://github.com/ITA-Solar/Bifrost/blob/develop/IDL/util/br_make_fits.pro
Many auxiliary variables are missing conversion factors. Feel free to add.
## Note on the electric field
The conversion of electric field from cgs to SI is based on dimensional analysis of
Ohm's law. In cgs-units, Ohm's law reads
ηJ = E + (u x B) / c.
In SI-units, one omits the lightspeed constant
ηJ = E + u x B.
Hence the division/multiplicatoin by c during conversion.
"""
"""
cgs_to_SI_conversion_factors
Factors for converting some physical quantities from cgs-units to SI-units.
"""
const cgs_to_SI_conversion_factors = Dict(
# Pressure: g/s^2/cm * 1f-3 kg/g * 1f2 cm/m = 1f-1 kg/s^2/m
"p" => 1e-1,
# Gas density: g/cm^3 * 1f-3 kg/g * 1f6 cm^3/m^3 = 1f3 kg/m^3
"r" => 1e3,
# Momentum: g/cm^2/s * 1f-3 kg/g * 1f4 cm^2/m^2 = 1f1 kg/m^2/s
"px" => 1e1,
"py" => 1e1,
"pz" => 1e1,
# Bulk velocity: cm/s * 1f-2 m/cm = 1f-2 m/s
"ux" => 1e-2,
"uy" => 1e-2,
"uz" => 1e-2,
# Internal energy: erg/cm^3 * 1f-7 J/erg * 1f6 cm^3/m = 1f-1 J/m^3
"e" => 1e-1,
# Dissipation coefficients/energy terms: erg/cm^3/s * 1.e-7 J/erg * 1.e6 cm^3/m^3 = W/m^3
"qvisc" => 1e-1,
"qjoule" => 1e-1,
"qpdv" => 1e-1,
"qrdiff" => 1e-1,
"qediff" => 1e-1,
"qeadv" => 1e-1,
# Magnetic field: G * 1f-4 T/G = 1f-4 T
"bx" => 1e-4,
"by" => 1e-4,
"bz" => 1e-4,
# Electric field: statV/cm * 1f-2 m/cm * 1f-4 T/G * c[cm/s] = 2.998f4 V/m
"ex" => 2.99792458e4,
"ey" => 2.99792458e4,
"ez" => 2.99792458e4,
# Temperature: K = K
"tg" => 1.0,
# Position
"x" => 1e-2,
"y" => 1e-2,
"z" => 1e-2,
)
const c_in_cgs = 2.99792458e10
"""
convert_units(
data ::AbstractArray,
variable::String,
params ::Dict{String,String},
units ::String,
)
Convert the `data` from code `units` to cgs or SI. Conversion factor depends
on `variable` and snapshot `params`.
"""
function convert_units(
data ::AbstractArray,
variable::String,
params ::Dict{String,String},
units ::String,
)
# Working floating precision
# Data is a vector of snapshots and has type Vector{AbstractArray}.
wfp = eltype(eltype(data))
if lowercase(units) == "si"
conversionfactor = code_to_cgs(variable, params)
try conversionfactor *= cgs_to_SI_conversion_factors[variable]
catch
error(
"Conversion to SI-units of variable $variable from CGS-units is" *
" not implemented."
)
end
return data * wfp(conversionfactor)
elseif lowercase(units) == "cgs"
return data * wfp(code_to_cgs(variable, params))
elseif lowercase(units) == "code"
# Do nothing
return data
else
throw(ErrorException("Unit conversion '$units' does not exits"))
end
end
"""
code_to_cgs(
variable::String,
params ::Dict{String,String},
)
Conversion factor of `variable` from code units to cgs units.
"""
function code_to_cgs(
variable::String,
params ::Dict{String,String},
)
if variable == "r" # Density
return parse(Float64, params["u_r"])
elseif variable == "e" # Energy
return parse(Float64, params["u_e"])
elseif variable == "tg" # Gas temperature
return 1.0 # nothing to do
elseif variable == "p" # Pressure
return parse(Float64, params["u_p"])
elseif variable in ("px", "py", "pz") # Momentum
return parse(Float64, params["u_r"])*parse(Float32,params["u_u"])
elseif variable in ("bx", "by", "bz") # Magnetic field
return parse(Float64, params["u_B"])
#elseif variable in ("ix", "iy", "iz") # Current density
# not implemented yet
elseif variable in ("ex", "ey", "ez") # Electric field
u_u = parse(Float64, params["u_u"])
u_B = parse(Float64, params["u_B"])
return u_u * u_B / c_in_cgs
elseif variable in ("qvisc", "qjoule", "qpdv", "qrdiff", "qediff", "qeadv")
return parse(Float64, params["u_e"])/parse(Float64, params["u_t"])
else
throw(ErrorException(
"Conversion to cgs-units of variable $variable is not implemented."
))
end
end
"""
convert_timeunits!(
t ::AbstractArray,
params::Dict{String,String}
) ::Float64
Converts snapshot time to seconds
"""
function convert_timeunits(
t ::Union{AbstractArray, AbstractFloat},
params::Dict{String,String}
)
t *= parse(Float64, params["u_t"])
end
function convert_axesunits(
mesh::BifrostMesh,
params::Dict{String, String}
;
units::String="code"
)
wfp = eltype(mesh.x)
if units == "cgs"
u_l = parse(Float64, params["u_l"])
conversionfactor = wfp(u_l)
elseif units == "si"
u_l = parse(Float64, params["u_l"])
conversionfactor = wfp(u_l*cgs_to_SI_conversion_factors["x"])
elseif units == "code"
conversionfactor = wfp(1.0)
else
throw(ErrorException("Unit conversion '$units' is not implemented"))
end
return mesh.x*conversionfactor,
mesh.y*conversionfactor,
mesh.z*conversionfactor
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 5825 | """
primary_vars
The primary variables and their order of storage for primary variables in a
Bifrost .snap binary file.
"""
const primary_vars = Dict(
"r" => 1,
"px" => 2,
"py" => 3,
"pz" => 4,
"e" => 5,
"bx" => 6,
"by" => 7,
"bz" => 8,
)
"""
get_snapsize_and_numvars(
params::Dict{String,String},
)
Returns snapsize (mx, my, mz), number of primary variables and number of
auxiliary variables, given the snapshot-parameters.
"""
function get_snapsize_and_numvars(
params::Dict{String,String},
)
return get_snapsize(params), get_numvars(params)
end
"""
get_snapsize(
params::Dict{String,String},
)
Returns snapsize (mx, my, mz) given the snapshot-parameters.
"""
function get_snapsize(
params::Dict{String,String},
)
mx = parse(Int, params["mx"])
my = parse(Int, params["my"])
mz = parse(Int, params["mz"])
snapsize::Tuple{Int64, Int64, Int64} = mx, my, mz
return snapsize
end
"""
get_snapsize(
mesh::BifrostMesh,
)
Returns snapsize (mx, my, mz) given a Bifrost-mesh.
"""
function get_snapsize(
mesh::BifrostMesh,
)
return (mesh.mx, mesh.my, mesh.mz)
end
"""
get_numvars(
params::Dict{String,String},
)
Returns number of primary variables and number of
auxiliary variables, given the snapshot-parameters.
"""
function get_numvars(
params::Dict{String,String},
)
if parse(Int, params["do_mhd"]) == 1
numvars = 8
else
numvars = 5
end
numauxvars::Int64 = length(split(params["aux"]))
return numvars, numauxvars
end
"""
get_snap_numbers(
expdir::String,
expname::String="none"
;
findall::Bool=false,
filenames::Vector{String}=String[]
)
Finds all files in the format 'expname_XXX.snap' in the experiment directory
`exp_dir`, and returns a vector of the snapshots XXX. If `expname` is not
given, is is assumed that the directory of the simulation matches the
experiment name.
"""
function get_snap_numbers(
xp::BifrostExperiment;
findall=false,
filenames::Vector{String}=String[]
)
get_snap_numbers(xp.expdir,xp.expname,findall=findall,filenames=filenames)
end
function get_snap_numbers(
expdir::String,
expname::String="none"
;
findall::Bool=false,
filenames::Vector{String}=String[]
)
if expname=="none"
expname = splitpath(expdir)[end]
end
if isempty(filenames)
filenames = readdir(expdir)
end
if ! findall
# Regex magic to match the filenames with 'expname' and '.snap'
pattern = r"^" * expname * r"_(\d+)\.snap$"
else
# wildcard that finds all files on format 'abXYcd_xyz.snap'
pattern = r"^.*_(\d+)\.snap$"
end
# Initialize an empty list to store the XXX numbers
snaps = Vector{Int}()
# Loop through the filenames and extract XXX numbers
for filename in filenames
match_result = match(pattern, filename)
if match_result ≠ nothing
isnap = Meta.parse(match_result.captures[1])
push!(snaps, isnap)
end
end
return sort(snaps)
end
"""
get_varnr_and_file_ext(
params::Dict{String,String},
variable::String
)
Given the snapshot `params` and desired `variable`, return
its index in the binary file, as well as the extension of this file.
(Either ".aux" or ".snap").
"""
function get_varnr_and_file_extension(
params ::Dict{String,String},
variable::String,
)
if variable in keys(primary_vars)
file_ext = ".snap"
varnr = primary_vars[variable]
elseif variable in split(params["aux"])
file_ext = ".aux"
indices = findall(x -> x == variable, split(params["aux"]))
if length(indices) > 1
error("Multiple matches for given aux-variable name.")
elseif length(indices) == 0
throw(ErrorException("Auxiliary variable not found in file."))
end
varnr = indices[1]
else
throw(ErrorException("Variable $variable does not exist"))
end
return varnr, file_ext
end
"""
get_variable_offset_in_file(
precision::DataType,
snapsize::Tuple{Integer, Integer, Integer},
varnr ::Integer
)
Given the precision and size of a snapshot, find the offset for reading the
variable with index `varnr` directly from file. Offset given in number of bytes.
"""
function get_variable_offset_in_file(
precision::DataType,
snapsize::Tuple{Integer, Integer, Integer},
varnr ::Integer
)
if precision == Float32
bytes_per_value = 4
elseif precision == Float64
bytes_per_value = 8
end
values_per_variable = snapsize[1]*snapsize[2]*snapsize[3]
offset = bytes_per_value*values_per_variable*(varnr - 1)
return offset
end
"""
get_basename(
expname ::String,
snap ::Union{<:Integer, AbstractVector{<:Integer}},
expdir ::String,
)
Return the basename of snapshots in the experiment `expname`, located in the
directory `expdir`. Also return the filename (withou file extension) of the
first snapshot of the experiment.
"""
function get_basename(
expname ::String,
snap ::Union{<:Integer, AbstractVector{<:Integer}},
expdir ::String,
)
isnap = lpad(snap[1],3,"0")
basename = joinpath(expdir, expname)
return basename, string(basename, "_", isnap)
end
"""
addtokwargs(;kwargs...)
Add keyword-arguments to your `Base.Pairs` of kwargs.
"""
function addtokwargs(;kwargs...)
kwargs
end
"""
function squeeze(a::AbstractArray)
Drop singleton dimensions of array `a`
"""
function squeeze(a::AbstractArray)
shape = size(a)[findall(size(a) .≠ 1)]
reshape(a, shape)
end | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 530 | @testset "calculate electron density" begin
tmp_array = Float32[
3.049999f14, 3.0051822f14, 2.9655743f14, 2.9357018f14, 2.9122838f14,
2.8965733f14, 2.8885182f14, 2.885963f14, 2.8863922f14, 2.891092f14,
2.904146f14, 2.929381f14, 2.9665926f14, 3.0098403f14, 3.0582764f14,
3.110285f14, 3.1600604f14, 3.207982f14, 3.2572512f14, 3.3144383f14
]
ne = get_electron_density(expname,xp.snaps[1],expdir,
slicex=[10],slicey=5:24,slicez=[10])
@test ne[1,:,1] == tmp_array
end | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 2833 | @testset "experiment and mesh" begin
@test xp.snaps == [1]
# (nx, ny, nz)
@test xp.snapsize == BifrostTools.get_snapsize(xp.mesh) == (48, 48, 64)
# 8 primary, 2 aux
@test xp.num_primary_vars == 8
@test xp.num_snaps == 1
# Check experiment name and directory
@test xp.expname == "en48"
@test xp.expdir == joinpath(BASE_FOLDER,"test","sp.n064")
# Check mesh loading
@test xp.mesh.x == Float32.([0.0, 0.506649, 1.013298, 1.519947, 2.026596,
2.533245, 3.039894, 3.546543, 4.053192, 4.55984, 5.066489, 5.573138,
6.079787, 6.586436, 7.093085, 7.599734, 8.106383, 8.613032, 9.11968,
9.626329, 10.13298, 10.63963, 11.14628, 11.65293, 12.15957, 12.66622,
13.17287, 13.67952, 14.18617, 14.69282, 15.19947, 15.70612, 16.21277,
16.71942, 17.22606, 17.73271, 18.23936, 18.74601, 19.25266, 19.75931,
20.26596, 20.77261, 21.27925, 21.7859, 22.29255, 22.7992, 23.30585,
23.8125])
@test xp.mesh.y == Float32.([0.000000e+00, 5.066490e-01, 1.013298e+00,
1.519947e+00, 2.026596e+00, 2.533245e+00, 3.039894e+00, 3.546543e+00,
4.053192e+00, 4.559840e+00, 5.066489e+00, 5.573138e+00, 6.079787e+00,
6.586436e+00, 7.093085e+00, 7.599734e+00, 8.106383e+00, 8.613032e+00,
9.119680e+00, 9.626329e+00, 1.013298e+01, 1.063963e+01, 1.114628e+01,
1.165293e+01, 1.215957e+01, 1.266622e+01, 1.317287e+01, 1.367952e+01,
1.418617e+01, 1.469282e+01, 1.519947e+01, 1.570612e+01, 1.621277e+01,
1.671942e+01, 1.722606e+01, 1.773271e+01, 1.823936e+01, 1.874601e+01,
1.925266e+01, 1.975931e+01, 2.026596e+01, 2.077261e+01, 2.127925e+01,
2.178590e+01, 2.229255e+01, 2.279920e+01, 2.330585e+01, 2.381250e+01])
@test xp.mesh.dzidzdn == Float32.([1.011856e+00, 1.011856e+00,
1.151191e+00, 1.246242e+00, 1.357512e+00, 1.485157e+00,
1.622413e+00, 1.772980e+00, 1.937252e+00, 2.115924e+00,
2.309373e+00, 2.517943e+00, 2.741309e+00, 2.979138e+00,
3.230086e+00, 3.492481e+00, 3.764105e+00, 4.041862e+00,
4.322001e+00, 4.600794e+00, 4.873609e+00, 5.136231e+00,
5.384175e+00, 5.613945e+00, 5.822441e+00, 6.007303e+00,
6.167999e+00, 6.303841e+00, 6.415701e+00, 6.505291e+00,
6.574116e+00, 6.625549e+00, 6.661535e+00, 6.684680e+00,
6.698149e+00, 6.703615e+00, 6.703403e+00, 6.699659e+00,
6.693029e+00, 6.685138e+00, 6.676098e+00, 6.666643e+00,
6.655931e+00, 6.644954e+00, 6.631567e+00, 6.614876e+00,
6.593908e+00, 6.566373e+00, 6.530425e+00, 6.484511e+00,
6.425321e+00, 6.350657e+00, 6.259104e+00, 6.147660e+00,
6.016158e+00, 5.862573e+00, 5.687376e+00, 5.491169e+00,
5.275891e+00, 5.043420e+00, 4.798181e+00, 4.539168e+00,
4.368103e+00, 3.862005e+00])
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 1450 | @testset "reading primary" begin
rho = get_var(xp,xp.snaps,"r")
tmp_array = Float32[
4.517059f-9, 4.6568345f-9, 4.8888467f-9, 5.1624958f-9, 5.4335088f-9,
5.5271556f-9, 5.5458482f-9, 5.5393974f-9, 5.546665f-9, 5.586004f-9,
5.703038f-9, 5.9570238f-9, 6.1816396f-9, 6.4527703f-9, 6.774606f-9,
7.119954f-9, 7.509514f-9, 7.911374f-9, 8.388659f-9, 8.831276f-9,
9.330005f-9, 9.9333075f-9, 1.07750155f-8, 1.1793163f-8, 1.3059435f-8,
1.4809497f-8, 1.7210718f-8, 2.0800567f-8, 2.6005944f-8, 3.4854107f-8,
5.2834523f-8, 9.635602f-8, 1.9742674f-7, 4.3695874f-7, 9.646677f-7,
2.2240474f-6, 4.748619f-6, 1.0463934f-5, 2.5050658f-5, 6.4452746f-5,
0.00016707876, 0.0004727985, 0.0013061843, 0.003768678, 0.012945924,
0.05415829, 0.22439207, 0.884576, 2.0215719, 2.5673914, 4.0931287,
6.104089, 8.702029, 12.321905, 17.355167, 23.90907, 32.994244,
45.009403, 61.117073, 82.6865, 111.65955, 150.39403, 202.42265, 277.7372
]
@test rho[10,10,:] == tmp_array
end
@testset "reading aux" begin
T = get_var(xp,xp.snaps,"tg")
tmp_array = Float32[
911071.6, 839773.9, 754957.4, 650684.8, 563146.94,
482063.88, 428093.47, 382720.2, 346727.2, 322469.6
]
@test T[20,30,5:14] == tmp_array
end
@test length(params) == 199
@test get_var(xp,xp.snaps,"t")[1] == 0.100002506
@test get_var(xp,xp.snaps[1],"t") == 0.100002506 | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 494 | using Test
using BifrostTools
using Interpolations
BASE_FOLDER = dirname(dirname(pathof(BifrostTools)))
expdir = joinpath(BASE_FOLDER,"test","sp.n064")
expname = "en48"
xp = BifrostExperiment(expname,expdir)
params = read_params(expname,xp.snaps[1],expdir)
@testset verbose = true "Total" begin
include("experiment.jl")
include("read_params_snap_aux.jl")
include("stagger_operators.jl")
include("unit_conversion.jl")
include("eos_tables.jl")
include("utils.jl")
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 2207 | # Test 5th order interpolation with 5th order polynomial
function p5(x::Real)
Float32(-0.001x^5 + 0.03x^4 - 0.3x^3 + 1.5x^2 - 2x + 5)
end
function dpdx(x::Real)
Float32(-0.005x^4 + 0.12x^3 - 0.9x^2 + 3x - 2)
end
# Second order polynomial to test boundaries
function p2(x::Real)
Float32(-0.5x^2 + 2x - 3)
end
@testset "interpolation" begin
@testset "Extrapolation" begin
x_stagger = Float32.(1:10) .- 0.5f0
x_stagger = reshape(x_stagger,(10,1,1))
x = xup(x_stagger,false)
@test x[:,1,1] == Float32.(1:10)
x = xdn(x_stagger,false)
@test x[:,1,1] == Float32.(0:9)
end
@testset "5th order exact interpolation" begin
x = Float32.(1:10) .- 0.5f0
y = p5.(x)
# Reshape to 3D array, check against inner parts
# x direction
p_x = reshape(y,(10,1,1))
@test xup(p_x,false)[3:end-3,1,1] ≈ p5.(x .+ 0.5)[3:end-3]
@test xdn(p_x,false)[4:end-2,1,1] ≈ p5.(x .- 0.5)[4:end-2]
# y direction
p_y = reshape(y,(1,10,1))
@test yup(p_y,false)[1,3:end-3,1] ≈ p5.(x .+ 0.5)[3:end-3]
@test ydn(p_y,false)[1,4:end-2,1] ≈ p5.(x .- 0.5)[4:end-2]
# z direction
p_z = reshape(y,(1,1,10))
@test zup(p_z,false)[1,1,3:end-3] ≈ p5.(x .+ 0.5)[3:end-3]
@test zdn(p_z,false)[1,1,4:end-2] ≈ p5.(x .- 0.5)[4:end-2]
end
@testset "6th order exact derivative" begin
x = Float32.(1:10) .- 0.5f0
dx = ones(Float32,10)
y = p5.(x)
# Reshape to 3D array, check against inner parts
# x direction
p_x = reshape(y,(10,1,1))
@test dxup(p_x,dx,false)[3:end-3,1,1] ≈ dpdx.(x .+ 0.5)[3:end-3]
@test dxdn(p_x,dx,false)[4:end-2,1,1] ≈ dpdx.(x .- 0.5)[4:end-2]
# y direction
p_y = reshape(y,(1,10,1))
@test dyup(p_y,dx,false)[1,3:end-3,1] ≈ dpdx.(x .+ 0.5)[3:end-3]
@test dydn(p_y,dx,false)[1,4:end-2,1] ≈ dpdx.(x .- 0.5)[4:end-2]
# z direction
p_z = reshape(y,(1,1,10))
@test dzup(p_z,dx,false)[1,1,3:end-3] ≈ dpdx.(x .+ 0.5)[3:end-3]
@test dzdn(p_z,dx,false)[1,1,4:end-2] ≈ dpdx.(x .- 0.5)[4:end-2]
end
end | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 2338 | # Solar units
u_l = 1e8
u_t = 1e2
u_r = 1e-7
u_p = 1e5
u_u = 1e6
u_kr = 1e1
u_ee = 1e12
u_e = 1e5
u_te = 1e11
u_B = 1.121e3
@test 1.0*u_t == BifrostTools.convert_timeunits(1.0,params)
@testset "unit conversions" begin
# Simplest test data
data = [1.0]
conversions = Dict(
"e" => u_e,
"px" => u_r * u_u,
"py" => u_r * u_u,
"pz" => u_r * u_u,
"r" => u_r,
"bx" => u_B,
"by" => u_B,
"bz" => u_B,
"p" => u_p,
"tg" => 1e0,
"ex" => u_u * u_B / 2.99792458e10,
"ey" => u_u * u_B / 2.99792458e10,
"ez" => u_u * u_B / 2.99792458e10,
"qvisc" => u_e / u_t,
"qjoule" => u_e / u_t,
"qpdv" => u_e / u_t,
"qrdiff" => u_e / u_t,
"qediff" => u_e / u_t,
"qeadv" => u_e / u_t
)
test_cgs_to_si_conversions = Dict(
# Pressure: g/s^2/cm * 1f-3 kg/g * 1f2 cm/m = 1f-1 kg/s^2/m
"p" => 1e-1,
# Gas density: g/cm^3 * 1f-3 kg/g * 1f6 cm^3/m^3 = 1f3 kg/m^3
"r" => 1e3,
# Momentum: g/cm^2/s * 1f-3 kg/g * 1f4 cm^2/m^2 = 1f1 kg/m^2/s
"px" => 1e1,
"py" => 1e1,
"pz" => 1e1,
# Bulk velocity: cm/s * 1f-2 m/cm = 1f-2 m/s
"ux" => 1e-2,
"uy" => 1e-2,
"uz" => 1e-2,
# Internal energy: erg/cm^3 * 1f-7 J/erg * 1f6 cm^3/m = 1f-1 J/m^3
"e" => 1e-1,
# Dissipation coefficients/energy terms: erg/cm^3/s * 1.e-7 J/erg * 1.e6 cm^3/m^3 = W/m^3
"qvisc" => 1e-1,
"qjoule" => 1e-1,
"qpdv" => 1e-1,
"qrdiff" => 1e-1,
"qediff" => 1e-1,
"qeadv" => 1e-1,
# Magnetic field: G * 1f-4 T/G = 1f-4 T
"bx" => 1e-4,
"by" => 1e-4,
"bz" => 1e-4,
# Electric field: statV/cm * 1f-2 m/cm * 1f-4 T/G * c[cm/s] = 2.998f4 V/m
"ex" => 2.99792458e4,
"ey" => 2.99792458e4,
"ez" => 2.99792458e4,
# Temperature: K = K
"tg" => 1.0,
)
@test all([
BifrostTools.convert_units(data, key, params, "cgs")[1] == value
for (key, value) in conversions
])
@test all([
BifrostTools.convert_units(data, key, params, "code") == data
for key in keys(conversions)
])
@test all([
BifrostTools.convert_units(data, key, params, "si")[1] == value*test_cgs_to_si_conversions[key]
for (key, value) in conversions
])
end
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | code | 346 | @testset "utility functions" begin
@test size(BifrostTools.squeeze(ones(1,1,10,1,1,10,1))) == (10,10)
basename,snapname = BifrostTools.get_basename(expname,xp.snaps[1],expdir)
@test splitpath(snapname)[end] == "en48_001"
@test splitpath(basename)[end] == "en48"
@test BifrostTools.get_snap_numbers(xp) == [1]
end | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | docs | 2305 | # BifrostTools.jl contributions
BifrostTools.jl includes the most basic functions to read Bifrost simulation input, and do interpolations and derivatives on the staggered mesh. If you want more functionality, feel free to open a pull request or raise an issue in the repository.
### Contributing code
If you want to contribute code, fork the repository and open a pull request. It best to open a PR directly into the develop branch.
We are not strict with coding style, but please document new functions you add with docstrings. It is also a huge bonus if you can add testing for new functions, but this is not necessary.
### Bugs
If you find bugs in the code, you have three options:
1. raise an issue in the repository
2. message the developers (Elias and Eilif)
3. fix it yourself in a pull request (if you are familiar with Julia)
### Git commit message manners
The commit message is mainly for other people, so they should be able to understand it now and six months later. Commit messages cannot be longer than one sentence (line) and should start with a tag identifier (see the end of this section).
Use the imperative form of verbs rather than past tense when referring to changes introduced by the commit in question. For example, "Remove property X", not "Removed property X" and not "I removed...". This tense makes picking, reviewing or reverting commits more readable.
Use following tags for commit messages:
[DEV] : Code development (including additions and deletions)
[ADD] : Adding new feature
[DEL] : Removing files, routines
[FIX] : Fixes that occur during development, but which have essentially no impact on previous work
[BUG] : Bug with significant impact on previous work -- `grep`-ing should give restricted list
[OPT] : Optimisation
[DBG] : Debugging
[ORG] : Organisational, no changes to functionality
[SYN] : Typos and misspellings (including simple syntax error fixes)
[DOC] : Documentation only
[REP] : Repository related changes (e.g., changes in the ignore list, remove files)
[UTL] : Changes in utils
Commit message examples:
* "[BUG] Add missing initialisation to tg array"
* "[FIX] Add lowercase castig to params"
* "[CLN] Remove unnecessary allocation for dynamic arrays."
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | docs | 2634 | # BifrostTools.jl
[](https://ITA-Solar.github.io/BifrostTools.jl/dev/)
[](https://github.com/ITA-Solar/BifrostTools.jl/actions/workflows/CI.yml?query=branch%3Adevelop)
[](https://codecov.io/gh/ITA-Solar/BifrostTools.jl)
Tools for reading and working with simulation output from [The stellar atmosphere simulation code Bifrost](https://ui.adsabs.harvard.edu/abs/2011A%26A...531A.154G/abstract) in Julia.
This Julia package is created for working with *Bifrost* data **efficiently**.
Load single or multiple simulation snapshots, and analyse data with Julia speed.
This package is an extension of `Bifrost.jl`, a script written by Mikolaj Szydlarski.
## Documentation
The documentation is available at [https://ita-solar.github.io/BifrostTools.jl](https://ita-solar.github.io/BifrostTools.jl)
## Quick user guide
To install the package, open julia and add it in the package manager (the package manager is opened by typing `]`)
```julia
] add BifrostTools
```
To load the package, type the following in the REPL
```julia
using BifrostTools
```
### Using `get_var`
The function `get_var` is the main function for reading data from Bifrost simulations.
It can read single or multiple snapshots, and it can read full data cubes or slices.
It can read primary variables or auxiliary variables.
The command
```julia
variable = get_var(expname, snap, expdir, variable)
```
loads the (primary or auxiliary) variable `variable` from snapshot `snap` in the simulation `expname` located in the directory `expdir`.
By creating a `BifrostExperiment` object
```julia
brxp = BifrostExperiment(expname, expdir)
```
we can access the mesh file
```julia
brxp.mesh
```
snapshot numbers
```julia
brxp.snaps
```
and the calling signature of `get_var` can be simplified
```julia
variable = get_var(brxp, snap, variable)
```
Using optional keyword-arguments in `get_var` allows us to convert units, destagger variables, rotate the grid, and read slices of the full cube.
The command
```julia
bx = get_var(brxp, snap, "bx"; units="si", destagger=true)
```
will load the $x$-component of the magnetic field in SI units and destagger it to the cell center.
See the [documentation](https://ITA-Solar.github.io/BifrostTools.jl/dev/) for further information and more elaborate example usage.
## Contributing
Contributions are welcome. Please read [CONTRIBUTING.md](CONTRIBUTING.md)
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | docs | 41 | ```@autodocs
Modules = [BifrostTools]
``` | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | docs | 497 | ```@meta
CurrentModule = BifrostTools
```
# BifrostTools
Documentation for [BifrostTools](https://github.com/ITA-Solar/BifrostTools.jl).
BifrostTools is a Julia package for working with Bifrost simulations.
The goal of this package is to analyse *Bifrost* data **faster** than if you were to use python.
BifrostTools lets you can load variables from single or multiple simulation snapshots, destaggers data automatically, and calculates derivatives. It is made to analyse data efficiently.
| BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | docs | 240 | # Installation
To install the package, open julia and add it in the package manager (the package manager is opened by typing `]`)
```julia
] add BifrostTools
```
Now you can use BifrostTools.jl in Julia!
```julia
using BifrostTools
``` | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.1 | 7c7efbce8bf43d86fdf7072ceef1378c5afb71aa | docs | 6012 | # Example usage
To load the package, type the following in the REPL
```julia
using BifrostTools
```
## Using `BifrostExperiment`
In this example, we look at the simulation *cb24oi*.
we start with defining the part to the simulation directory and the name of the simulation.
```julia
expdir = "/mn/stornext/d21/RoCS/matsc/3d/run/cb24oi/"
expname = "cb24oi"
```
These variables can be passed to the `BifrostExperiment` structure, which creates an instance that lets us access the mesh file, snapshot numbers, and so on.
```julia
xp = BifrostExperiment(expname,expdir)
# Mesh file that holds grid info etc.
mesh = xp.mesh
x = mesh.x
y = mesh.y
z = mesh.z
# vector with snap numbers
snaps = xp.snaps
```
## Reading data from the simulation with `get_var`
Reading data from the snapshot or aux files is handled through the `get_var` function. Due to `Julia`'s multiple dispatch functionality, there are several ways to call the `get_var` function, but we recommend using the following function to simplify the calling signature:
```julia
get_var(
xp::BifrostExperiment,
snap::Union{<:Integer, AbstractVector{<:Integer}},
variable::String,
args...
;
kwargs...
)
```
This funciton loads a *variable* from one or multiple snapshots of `xp`. If multiple snapshots are loaded, the variable is returned as a `Vector` of snapshots. The available variables are
The primary variables:
- "r": density
- "px": x-component of momentum
- "py": y-component of momentum
- "pz": z-component of momentum
- "e": energy
if `params["do_mhd"]` is `true` in the params file, we also have the magnetic field:
- "bx": x-component of magnetic field
- "by": y-component of magnetic field
- "bz": z-component of magnetic field
and auxilliary variables (variables in params["aux"]):
- "p": pressure
- "tg": gas temperature
- Q terms
The following are optional keyword-arguments
- `units::String`: Converts variables to "si" or "cgs" units. `units="si"` or `units="cgs"`.
- `destagger::Bool`: Performs 5th-order interpolation to center the variable, and should only be used when reading variables that are staggered, (e.g. velocity or magnetic field). The function uses the default direction (destaggeroperation) associated with the variable unless otherwise stated by the `destaggeroperation` keyword.
- `destaggeroperation::String`: Determines which direction to destagger the variable. This is by default handled automatically by the `destaggeroperation` dictionary.
- `rotate_about_x::String`: Rotate coordinate system to the normal "right hand system" with *z*-axis pointing upwards by passing `rotate_about="x"`
- `slicex::AbstractVector{<:Integer}`: Load a slice or slices in x-axis. Give e.g. `slicex=[32, 410]` or `slicex=40:90`
- `slicey::AbstractVector{<:Integer}`: Load a slice or slices in y-axis
- `slicez::AbstractVector{<:Integer}`: Load a slice or slices in z-axis
- `squeeze::Bool`: Removes singleton dimensions (dimensions with length 1)
### Loading a single snapshot
With `xp` as defined above, we define a snapshot that we want to investigate. When loading the full cube in code units, the variables are memory mapped, making them fast to load.
```julia
snap = 700
# Load some quantities for the full cube in code units
pressure = get_var(xp, snap, "p")
density = get_var(xp, snap, "r")
temperature = get_var(xp, snap, "tg")
```
### Converting units
If we want *si* or *cgs* units:
```julia
snap = 700
# Load some quantities for the full cube in si or cgs units
pressure = get_var(xp, snap, "p", units="cgs")
rho = get_var(xp, snap, "r", units="si")
# The temperature is written in Kelvin from before
temperature = get_var(xp, snap, "tg")
```
### Reading a slice of the full cube
If we're only interested in a small part of the cube, we can use the slicing functionality of `get_var`. Use the `squeeze` keyword to drop singleton dimensions.
We can load only the surface
```julia
idz = argmin(abs.(mesh.z))
rho = get_var(xp, snap, "r"; units="si", slicez=[idz], squeeze=true)
temperature = get_var(xp, snap, "tg"; units="si", slicez=[idz], squeeze=true)
```
or a smaller cube around the surface
```julia
rho = get_var(xp, snap, "r",
units="si", slicex=100:200, slicey=400:500, slicez=[idz-20:idz+20])
temperature = get_var(xp, snap, "tg",
units="si", slicex=100:200, slicey=400:500, slicez=[idz-20:idz+20])
```
### Interpolating staggered variables to the grid center
Interpolating staggered variables (destaggering) can be handled through `get_var`. This is recommended because `get_var` can determine the interpolation direction, and if you want to slice a variable, it takes care of the correct ordering of interpolating and slicing.
```julia
# Read and destagger vertical momentum in si units
pz = get_var(xp, snap, "pz", units="si", destagger=true)
```
### Reading multiple snapshots
If you want to get the time evolution of a quantity, you can simply pass a vector of snapshots.
[//]: # "The `get_var` function used Julia's threads functionality to read multiple snapshots in parallel. Had some trouble with this functionality, might be reintroduced."
```julia
snaps = 100:150
rho = get_var(xp, snaps, "r", units="si", slicez=[idz], squeeze=true)
# Calculate vertical velocity
vz = get_var(xp, snaps, "pz", units="si", slicez=[idz], squeeze=true) ./ rho
```
### Rotating grid
Rotate about the x-axis to get vector quantities on a *z*-axis that points upwards.
```
# When we rotate about the x-axis, this is what happens to the grid
z = -mesh.z
y = -mesh.y
# Load x-component of B-field and rotate about x-axis
bx = get_var(xp, isnap, "bx", units="si", destagger=true, rotate_about="x")
```
## Loading the simulation parameters with `read_params`
The `read_params` function reads the params file. It can be called by giving the full filename, like the following
```julia
snap = 500
params_file = joinpath(expdir,string(expname,"_",snap,".idl"))
params = read_params(params_file)
```
or
```julia
read_params(expname,snap,expdir)
``` | BifrostTools | https://github.com/ITA-Solar/BifrostTools.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1152 | using Documenter, RiskAdjustedLinearizations
makedocs(
modules = [RiskAdjustedLinearizations],
doctest = false,
strict = false,
clean = false,
format = Documenter.HTML(; # prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://juliadocs.github.io/Documenter.jl/stable/",
assets=String[],
),
sitename = "RiskAdjustedLinearizations.jl",
authors = "William Chen",
linkcheck = false,
pages = ["Home" => "index.md",
"Risk-Adjusted Linearizations" => "risk_adjusted_linearization.md",
"Sparse Arrays and Jacobians" => "sparse_arrays_jacs.md",
"Numerical Algorithms" => "numerical_algorithms.md",
"Example" => "example.md",
"Caching" => "caching.md",
"Diagnostics" => "diagnostics.md",
"Tips" => "tips.md",
]
)
deploydocs(;
repo = "github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git",
target = "build",
devbranch = "main",
versions = ["stable" => "v^", "v#.#"],
)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 4797 | using UnPack, OrderedCollections, LinearAlgebra, JLD2, SparseArrays
# Load guesses
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "crw_sss.jld2"), "r")
zguess = vec(sssout["z_rss"])
yguess = vec(sssout["y_rss"])
Psiguess = sssout["Psi_rss"]
mutable struct CoeurdacierReyWinant{T <: Real}
σr::T # SD of interest rate shocks
σy::T # SD of endowment shocks
β::T # intertemporal discount rate
γ::T # risk aversion coefficient
θ::T
ρr::T # persistence of interest rate
ρy::T # persistence of endowment
rr::T # long-run interest rate
yy::T # long-run endowment
end
function CoeurdacierReyWinant(; σr::T = .025, σy::T = .025, β::T = .96, γ::T = 2.,
θ::T = 1., ρr::T = .9, ρy::T = .9, rr::T = .01996, yy::T = log(θ)) where {T <: Real}
return CoeurdacierReyWinant{T}(σr, σy, β, γ, θ, ρr, ρy, rr, yy)
end
function crw(m::CoeurdacierReyWinant{T}; Ψ = nothing, sparse_jacobian::Vector{Symbol} = Symbol[],
sparse_arrays::Bool = false) where {T <: Real}
@unpack σr, σy, β, γ, θ, ρr, ρy, rr, yy = m
# Nₜ = exp(rₜ) * Aₜ₋₁ + Yₜ, where Aₜ is foreign assets and Yₜ is the endowment
# The jump variables are consumption, expected return on assets Xₜ = 𝔼ₜ[Rₜ₊₁], and
# Wₜ = 𝔼ₜ[Yₜ₊₁]
S = OrderedDict{Symbol, Int}(:N => 1, :r => 2, :y => 3) # State variables
J = OrderedDict{Symbol, Int}(:c => 1, :x => 2, :w => 3) # Jump variables
SH = OrderedDict{Symbol, Int}(:εr => 1, :εy => 2) # Exogenous shocks
Nz = length(S)
Ny = length(J)
Nε = length(SH)
function μ(F, z, y) # note that y here refers to jump variables
F[S[:N]] = exp(y[J[:w]]) + exp(y[J[:x]]) * (z[S[:N]] - exp(y[J[:c]]))
F[S[:r]] = (1 - ρr) * rr + ρr * z[S[:r]]
F[S[:y]] = (1 - ρy) * yy + ρy * z[S[:y]]
end
function ξ(F, z, y)
F[J[:c]] = log(β) + γ * y[J[:c]] # Euler equation
F[J[:x]] = -y[J[:x]] # rₜ₊₁ - xₜ, rational expectations
F[J[:w]] = -y[J[:w]] # yₜ₊₁ - wₜ
end
# The cache is initialized as zeros so we only need to fill non-zero elements
function Λ(F, z, y)
F[S[:N], J[:x]] = z[S[:N]] - exp(y[J[:c]])
F[S[:N], J[:w]] = 1.
end
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z, y)
F[S[:r], SH[:εr]] = σr
F[S[:y], SH[:εy]] = σy
end
Γ₅ = zeros(T, Ny, Nz)
Γ₅[J[:c], S[:r]] = 1.
Γ₅[J[:x], S[:r]] = 1.
Γ₅[J[:w], S[:y]] = 1.
Γ₆ = zeros(T, Ny, Ny)
Γ₆[J[:c], J[:c]] = -γ
if sparse_arrays
Γ₅ = sparse(Γ₅)
Γ₆ = sparse(Γ₆)
end
z = zguess
y = yguess
if isnothing(Ψ)
Ψ = Psiguess
end
if sparse_arrays
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, crw_ccgf, z, y, Ψ, Nε; sparse_jacobian = sparse_jacobian,
Λ_cache_init = dims -> spzeros(dims...),
Σ_cache_init = dims -> spzeros(dims...),
jump_dependent_shock_matrices = true)
else
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, crw_ccgf, z, y, Ψ, Nε; sparse_jacobian = sparse_jacobian,
jump_dependent_shock_matrices = true)
end
end
crw_cₜ(m, zₜ) = exp(m.y[1] + (m.Ψ * (zₜ - m.z))[1])
# Evaluates m_{t + 1} + r_{t + 1}
function crw_logSDFxR(m, zₜ, εₜ₊₁, Cₜ)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
return log(m_crw.β) - m_crw.γ * (yₜ₊₁[1] - log(Cₜ)) + zₜ₊₁[2]
end
# Calculate 𝔼ₜ[exp(mₜ₊₁ + rₜ₊₁)] via quadrature
std_norm_mean = zeros(2)
std_norm_sig = ones(2)
crw_𝔼_quadrature(f::Function) = gausshermite_expectation(f, std_norm_mean, std_norm_sig, 10)
# Calculate implied state variable(s)
function crw_endo_states(m, zₜ, zₜ₋₁, c_impl)
# rₜ, yₜ are exogenous while Nₜ = exp(rₜ) * Aₜ₋₁ + Yₜ is entirely pre-determined.
# Thus, our implied state variable will be foreign asset Aₜ = Nₜ - Cₜ.
# zₜ₋₁ may be the previous period's implied state, so we start from there
# to calculate Aₜ₋₁.
yₜ₋₁ = m.y + m.Ψ * (zₜ₋₁ - m.z) # Calculate implied jump variables last period
Cₜ₋₁ = exp(yₜ₋₁[1]) # to get the implied consumption last period.
Aₜ₋₁ = zₜ₋₁[1] - Cₜ₋₁ # Given that consumption, we compute implied foreign assets yesterday.
Nₜ = exp(zₜ[2]) * Aₜ₋₁ + exp(zₜ[3]) # Now we can get implied resources available today.
return vcat(zₜ, Nₜ - exp(c_impl)) # This gives us implied foreign assets today, along with other state variables
end
function crw_ccgf(F, α, z)
# F .= .5 * diag(α * α') # slower but this is the underlying math
sum!(F, α.^2) # faster implementation
F .*= .5
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1577 | # This script actually solves the CoeurdacierReyWinant model with a risk-adjusted linearization
# and times the methods, if desired
using BenchmarkTools, RiskAdjustedLinearizations, Test, JLD2
include("crw.jl")
# Settings
diagnostics = true
# Set up
m_crw = CoeurdacierReyWinant()
m = crw(m_crw)
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference/crw_sss.jld2"), "r")
# Small perturbation b/c initialized at the stochastic steady state from a saved file
m.z .= 1.1 * m.z
m.y .= 1.1 * m.y
m.Ψ .= 1.1 * m.Ψ
# Solve!
solve!(m, m.z, m.y, m.Ψ; algorithm = :homotopy, step = .5)
# Only homotopy seems to work for this model. The relaxation algorithm
# has trouble finding an answer with smaller error than 1e-3
# solve!(m, m.z, m.y, m.Ψ; algorithm = :relaxation, verbose = :high, ftol = 5e-5, damping = .9)
@test isapprox(sssout["z_rss"], m.z)
@test isapprox(sssout["y_rss"], m.y)
@test isapprox(sssout["Psi_rss"], m.Ψ)
if diagnostics
# See crw.jl for the definition of the functions
# crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, and crw_endo_states
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "crw_shocks.jld2"), "r")["shocks"]
@test abs(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, shocks, summary_statistic = x -> norm(x, Inf))) < 3e-5
c_err, endo_states_err = dynamic_euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, crw_endo_states, 1, shocks)
@test c_err < 2e-5
@test endo_states_err < 1e-3
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 8752 | using UnPack, OrderedCollections, LinearAlgebra, JLD2, LabelledArrays, SparseArrays
# Representative agent version, which is essentially Bansal and Yaron (2004)
mutable struct BansalYaron2004{T <: Real}
p::LArray{T, 1, Array{T, 1}, (:μ_y, :ρ_x, :σ_x, :ρ_σ, :σ_y, :ς, :β, :ψ, :γ)}
N_approx::LArray{Int64, 1, Array{Int64, 1}, (:q, :ω)}
S::OrderedDict{Symbol, Int}
J::OrderedDict{Symbol, Int}
E::OrderedDict{Symbol, Int}
SH::OrderedDict{Symbol, Int}
end
# Parameters are based off Schorfheide et al. (2016) and converted from a monthly frequency to quarterly
function BansalYaron2004(; μ_y::T = 0.0016 * 3., ρ_x::T = 0.99^3, σ_x::T = sqrt((0.74 * sqrt(1. - ρ_x^2))^2 * 3.), ρ_σ::T = 0.99^3,
σ_y::T = sqrt(0.0021^2 * 3.), ς::T = sqrt(0.0014^2 * 3.),
β::T = 0.999^3, ψ::T = 2., γ::T = 9.,
N_approx::LArray{Int64, 1, Array{Int64, 1}, (:q, :ω)} = (@LArray [1, 1] (:q, :ω))) where {T <: Real}
@assert all(N_approx[k] > 0 for k in keys(N_approx)) "N_approx must be at least 1 for all variables."
# Create indexing dictionaries
S_init = [:yy, :x, :σ²_y] # State variables
J_init = [:q, :v, :ce, :ω] # Jump variables
SH_init = [:ε_y, :ε_x, :ε_σ²] # Exogenous shocks
E_init = [:value_fnct, :certainty_equiv, :ez_fwd_diff, :cap_ret] # Equations
for var in [:q, :ω]
inds = (var == :q) ? (1:N_approx[var]) : (0:(N_approx[var] - 1))
push!(J_init, [Symbol(:d, var, "$(i)") for i in inds]...)
push!(J_init, [Symbol(:p, var, "$(i)") for i in 1:N_approx[var]]...)
push!(E_init, [Symbol(:eq_d, var, "$(i)") for i in inds]...)
push!(E_init, [Symbol(:eq_p, var, "$(i)") for i in 1:N_approx[var]]...)
end
S = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(S_init))
J = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(J_init))
E = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(E_init))
SH = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(SH_init))
para = @LArray [μ_y, ρ_x, σ_x, ρ_σ, σ_y, ς, β, ψ, γ] (:μ_y, :ρ_x, :σ_x, :ρ_σ, :σ_y, :ς, :β, :ψ, :γ)
return BansalYaron2004{T}(para, N_approx, S, J, E, SH)
end
function bansal_yaron_2004(m::BansalYaron2004{T}; sparse_arrays::Bool = false,
sparse_jacobian::Vector{Symbol} = Symbol[]) where {T <: Real}
# Unpack parameters and indexing dictionaries
@unpack p, N_approx, S, J, E, SH = m
@unpack yy, x, σ²_y = S
@unpack q, v, ce, ω = J
@unpack ε_y, ε_x, ε_σ² = SH
@unpack value_fnct, certainty_equiv, ez_fwd_diff, cap_ret = E
Nz = length(S)
Ny = length(J)
Nε = length(SH)
## Define nonlinear equations
# Some helper functions
m_ξ(z, y) = log(p.β) - (p.ψ - p.γ) * y[ce] - p.γ * p.μ_y
function m_fwd!(i, Γ₅, Γ₆)
Γ₅[i, yy] = -p.γ
Γ₆[i, v] = (p.ψ - p.γ)
end
function μ(F, z, y) # note that y here refers to jump variables
F[yy] = z[x]
F[x] = p.ρ_x * z[x]
F[σ²_y] = (1. - p.ρ_σ) * p.σ_y^2 + p.ρ_σ * z[σ²_y]
end
function ξ(F, z, y)
m_ξv = m_ξ(z, y) # evaluate SDF
F[value_fnct] = 1. / (1. - p.ψ) * (log(1. - p.β) + y[ω]) - y[v]
F[certainty_equiv] = 1. / (1. - p.ψ) * (log(1. - p.β) - log(p.β) + log(exp(y[ω]) - 1.)) - y[ce]
## Forward-difference equations separately handled b/c recursions
F[cap_ret] = y[q] - log(sum([exp(y[J[Symbol("dq$(i)")]]) for i in 1:N_approx[:q]]) +
exp(y[J[Symbol("pq$(N_approx[:q])")]]))
F[ez_fwd_diff] = y[ω] - log(sum([exp(y[J[Symbol("dω$(i)")]]) for i in 0:(N_approx[:ω] - 1)]) +
exp(y[J[Symbol("pω$(N_approx[:ω])")]]))
# Set initial boundary conditions
F[E[:eq_dq1]] = p.μ_y - y[J[:dq1]] + m_ξv
F[E[:eq_pq1]] = p.μ_y - y[J[:pq1]] + m_ξv
F[E[:eq_dω0]] = y[J[:dω0]]
F[E[:eq_pω1]] = p.μ_y - y[J[:pω1]] + m_ξv
# Recursions for forward-difference equations
for i in 2:N_approx[:q]
F[E[Symbol("eq_dq$(i)")]] = p.μ_y - y[J[Symbol("dq$(i)")]] + m_ξv
F[E[Symbol("eq_pq$(i)")]] = p.μ_y - y[J[Symbol("pq$(i)")]] + m_ξv
end
for i in 2:N_approx[:ω]
F[E[Symbol("eq_dω$(i-1)")]] = p.μ_y - y[J[Symbol("dω$(i-1)")]] + m_ξv
F[E[Symbol("eq_pω$(i)")]] = p.μ_y - y[J[Symbol("pω$(i)")]] + m_ξv
end
end
# The cache is initialized as zeros so we only need to fill non-zero elements
Λ = sparse_arrays ? spzeros(T, Nz, Ny) : zeros(T, Nz, Ny)
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z)
F[yy, ε_y] = sqrt(z[σ²_y])
F[x, ε_x] = sqrt(z[σ²_y]) * p.σ_x
F[σ²_y, ε_σ²] = sqrt(z[σ²_y]) * p.ς
end
function ccgf(F, α, z)
# F .= .5 * RiskAdjustedLinearizations.diag(α * α') # slower but this is the underlying math
sum!(F, α.^2) # faster implementation
F .*= .5
end
if sparse_arrays
Γ₅ = spzeros(T, Ny, Nz)
Γ₆ = spzeros(T, Ny, Ny)
else
Γ₅ = zeros(T, Ny, Nz)
Γ₆ = zeros(T, Ny, Ny)
end
# Forward difference equations: boundary conditions
m_fwd!(E[:eq_dq1], Γ₅, Γ₆)
Γ₅[E[:eq_dq1], yy] = one(T)
m_fwd!(E[:eq_pq1], Γ₅, Γ₆)
Γ₅[E[:eq_pq1], yy] = one(T)
Γ₆[E[:eq_pq1], q] = one(T)
m_fwd!(E[:eq_pω1], Γ₅, Γ₆)
Γ₅[E[:eq_pω1], yy] = one(T)
Γ₆[E[:eq_pω1], ω] = one(T)
# Forward difference equations: recursions
for i in 2:N_approx[:q]
m_fwd!(E[Symbol("eq_dq$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_dq$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_dq$(i)")], J[Symbol("dq$(i-1)")]] = one(T)
m_fwd!(E[Symbol("eq_pq$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_pq$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_pq$(i)")], J[Symbol("pq$(i-1)")]] = one(T)
end
for i in 2:N_approx[:ω]
m_fwd!(E[Symbol("eq_dω$(i-1)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_dω$(i-1)")], yy] = one(T)
Γ₆[E[Symbol("eq_dω$(i-1)")], J[Symbol("dω$(i-2)")]] = one(T)
m_fwd!(E[Symbol("eq_pω$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_pω$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_pω$(i)")], J[Symbol("pω$(i-1)")]] = one(T)
end
z, y = create_deterministic_guess(m)
Ψ = zeros(T, Ny, Nz)
if sparse_arrays
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian,
Λ_cache_init = dims -> spzeros(dims...), Σ_cache_init = dims -> spzeros(dims...))
else
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian)
end
end
function create_deterministic_guess(m::BansalYaron2004{T}) where {T <: Real}
## Set up
# Unpack parameters and indexing dictionaries
@unpack p, N_approx, S, J, E, SH = m
@unpack yy, x, σ²_y = S
@unpack q, v, ce, ω = J
@unpack ε_y, ε_x, ε_σ² = SH
@unpack value_fnct, certainty_equiv, ez_fwd_diff, cap_ret = E
Nz = length(S)
Ny = length(J)
# Initialize deterministic steady state guess vectors
z = Vector{T}(undef, Nz)
y = Vector{T}(undef, Ny)
## Compute guesses
# Steady state values of state variables known ex-ante
z[yy] = 0.
z[x] = 0.
z[σ²_y] = p.σ_y^2
# Now make guesses for remaining quantities
Y0 = 1. # long-run value is long-run value of X, which is 1
Ω0 = 1. / (1. - (p.β * Y0 * exp(p.μ_y)) ^ (1. - p.ψ))
V0 = ((1. - p.β) * Ω0) ^ (1. / (1. - p.ψ))
CE0 = ((1. - p.β) / p.β * (Ω0 - 1.)) ^ (1. / (1. - p.ψ))
M0 = p.β * (p.β * Ω0 / (Ω0 - 1.)) ^ ((p.ψ - p.γ) / (1. - p.ψ)) * (Y0 * exp(p.μ_y)) ^ (-p.γ)
Q0 = exp(p.μ_y) * M0 * Y0 / (1. - exp(p.μ_y) * M0 * Y0)
y[q] = log(Q0)
y[v] = log(V0)
y[ce] = log(CE0)
y[ω] = log(Ω0)
y[J[:dq1]] = convert(T, log(exp(p.μ_y) * M0 * Y0))
y[J[:pq1]] = convert(T, log(exp(p.μ_y) * M0 * Y0 * Q0))
y[J[:dω0]] = zero(T)
y[J[:pω1]] = convert(T, log(exp(p.μ_y) * M0 * Y0 * Ω0))
for i in 2:N_approx[:q]
y[J[Symbol("dq$(i)")]] = convert(T, log(M0) + μ_y + log(Y0) + y[J[Symbol("dq$(i-1)")]])
y[J[Symbol("pq$(i)")]] = convert(T, log(M0) + μ_y + log(Y0) + y[J[Symbol("pq$(i-1)")]])
end
for i in 2:N_approx[:ω]
y[J[Symbol("dω$(i-1)")]] = convert(T, μ_y + log(M0) + log(Y0) + y[J[Symbol("dω$(i-2)")]])
y[J[Symbol("pω$(i)")]] = convert(T, μ_y + log(M0) + log(Y0) + y[J[Symbol("pω$(i-1)")]])
end
return z, y
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2621 | # This script actually solves the CoeurdacierReyWinant model with a risk-adjusted linearization
# and times the methods, if desired
using BenchmarkTools, RiskAdjustedLinearizations, Test, JLD2
include("bansal_yaron_2004.jl")
include("het_risk_aversion.jl")
# What to do?
solve_rep_agent = false
solve_het_agent = true
check_mat = false
# Set up
if solve_rep_agent
m_rep = BansalYaron2004()
m = bansal_yaron_2004(m_rep; sparse_arrays = true, sparse_jacobian = [:μ, :ξ])
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
# Solve!
solve!(m, m.z, m.y; algorithm = :deterministic)
solve!(m; algorithm = :relaxation)
# Now solve with a higher risk aversion, using previous steady state as a guess
# Note the parameters used in the equations for the RiskAdjustedLinearization
# are from the field `m_rep.p`, which is a LabelledArray and thus allows us to
# mutate the parameters without re-compiling the functions contained in `m`.
m_rep.p[:γ] = 9.5
solve!(m; algorithm = :relaxation)
m_rep.p[:γ] = 9. # restore the original parameter so, for example, the check_mat test passes
end
if solve_het_agent
m_het = HetRiskAversion()
m = het_risk_aversion(m_het; sparse_arrays = false) # , sparse_jacobian = [:μ, :ξ])
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
# Solve!
# solve!(m; algorithm = :homotopy, step = .1)
#= # Now solve with a higher risk aversion, using previous steady state as a guess
# Note the parameters used in the equations for the RiskAdjustedLinearization
# are from the field `m_rep.p`, which is a LabelledArray and thus allows us to
# mutate the parameters without re-compiling the functions contained in `m`.
m_het.p[:γ] = 9.5
solve!(m; algorithm = :relaxation)
m_het.p[:γ] = 9. # restore the original parameter so, for example, the check_mat test passes=#
end
if check_mat
using MAT
matout = matread("dynare_ss.mat")
@unpack J, S = m_rep
solve!(m, z0, y0; algorithm = :deterministic)
@test log(matout["Q"]) ≈ m.y[J[:q]] atol=1e-6
@test log(matout["Omega"]) ≈ m.y[J[:ω]] atol=1e-6
@test log(matout["V"]) ≈ m.y[J[:v]] atol=1e-6
@test log(matout["CE"]) ≈ m.y[J[:ce]] atol=1e-6
@test log(matout["PQ"]) ≈ m.y[J[:pq1]] atol=1e-6
@test log(matout["DQ"]) ≈ m.y[J[:dq1]] atol=1e-6
@test log(matout["POmega"]) ≈ m.y[J[:pω1]] atol=1e-6
@test log(matout["DOmega"]) ≈ m.y[J[:dω0]] atol=1e-6
@test log(matout["X"]) ≈ m.z[S[:x]] atol=1e-6
@test log(matout["Y"]) ≈ m.z[S[:yy]] atol=1e-6
@test matout["SigSq"] ≈ m.z[S[:σ²_y]] atol=1e-6
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 20396 | using UnPack, OrderedCollections, LinearAlgebra, JLD2, LabelledArrays, SparseArrays, RiskAdjustedLinearizations, NLsolve
if !isdefined(Main, :BansalYaron2004) || !isdefined(Main, :bansal_yaron_2004)
include("bansal_yaron_2004.jl")
end
# BansalYaron2004 with heterogeneous risk aversion
# FIRST, we implement w/out extra forward expectations of the wealth share, to reduce the complexity.
mutable struct HetRiskAversion{T <: Real}
p::LArray{T, 1, Array{T, 1}, (:μ_y, :ρ_x, :σ_x, :ρ_σ, :σ_y, :ς, :λ₁, :β, :ψ, :γ₁, :γ₂,
:δ, :τ̅₁)}
N_approx::LArray{Int64, 1, Array{Int64, 1}, (:q1, :q2, :ω1, :ω2)}
S::OrderedDict{Symbol, Int}
J::OrderedDict{Symbol, Int}
E::OrderedDict{Symbol, Int}
SH::OrderedDict{Symbol, Int}
end
# Parameters are based off Schorfheide et al. (2016) and converted from a monthly frequency to quarterly
function HetRiskAversion(; μ_y::T = 0.0016 * 3., ρ_x::T = 0.99^3, σ_x::T = sqrt((0.74 * sqrt(1. - ρ_x^2))^2 * 3.), ρ_σ::T = 0.99^3,
σ_y::T = sqrt(0.0021^2 * 3.), ς::T = sqrt(0.0014^2 * 3.), λ₁::T = .5,
β::T = 0.999^3, ψ::T = 2., γ₁::T = 8.9, γ₂::T = (9. - λ₁ * γ₁) / (1. - λ₁), # target 9 for average risk aversion
δ::T = .9, τ̅₁::T = 1e-8, N_approx::LArray{Int64, 1, Array{Int64, 1}, (:q1, :q2, :ω1, :ω2)} =
LVector(q1 = 1, q2 = 1, ω1 = 1, ω2 = 1)
) where {T <: Real}
@assert all([N_approx[k] > 0 for k in keys(N_approx)]) "N_approx must be at least 1 for all variables."
# Create indexing dictionaries
S_init = [:yy, :x, :σ²_y, :W1, :r₋₁, :b1₋₁, :s1₋₁] # State variables
J_init = [:q, :v1, :v2, :ce1, :ce2, :ω1, :ω2, :r, :c1, :c2, # Jump variables
:b1, :b2, :s1, :s2, :Θ1, :Θ2, :logΘ1, :Θ1_t1]
#= J_init = [:q, :v1, :v2, :ce1, :ce2, :ω1, :ω2, :r, :c1, :c2, # Jump variables
:b1, :b2, :s1, :s2, :Θ1, :Θ2, :logΘ1, :logΘ2,
:Θ1_t1, :Θ2_t1]=#
SH_init = [:ε_y, :ε_x, :ε_σ²] # Exogenous shocks
E_init = [:value_fnct1, :value_fnct2, # Equations
:certainty_equiv1, :certainty_equiv2,
:ez_fwd_diff1, :ez_fwd_diff2,
:euler1, :euler2, :cap_ret1, :cap_ret2,
:budget_constraint1, # :budget_constraint2,
:wealth_per_agent1, :wealth_per_agent2,
:eq_logΘ1, # :eq_logΘ2,
:expected_wealth_per_agent1,
# :expected_wealth_per_agent2,
# :consumption_mc, :bond_mc, :share_mc]
:consumption_mc, :bond_mc, :share_mc] # bond market clears by Walras' law
for var in [:q1, :q2, :ω1, :ω2]
inds = (var in [:q1, :q2]) ? (1:N_approx[var]) : (0:(N_approx[var] - 1))
push!(J_init, [Symbol(:d, var, "_t$(i)") for i in inds]...)
push!(J_init, [Symbol(:p, var, "_t$(i)") for i in 1:N_approx[var]]...)
push!(E_init, [Symbol(:eq_d, var, "_t$(i)") for i in inds]...)
push!(E_init, [Symbol(:eq_p, var, "_t$(i)") for i in 1:N_approx[var]]...)
end
S = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(S_init))
J = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(J_init))
E = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(E_init))
SH = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(SH_init))
p = @LArray [μ_y, ρ_x, σ_x, ρ_σ, σ_y, ς, λ₁, β, ψ, γ₁, γ₂, δ, τ̅₁] (:μ_y, :ρ_x, :σ_x, :ρ_σ, :σ_y, :ς, :λ₁, :β, :ψ, :γ₁, :γ₂, :δ, :τ̅₁)
return HetRiskAversion{T}(p, N_approx, S, J, E, SH)
end
function het_risk_aversion(m::HetRiskAversion{T}; sparse_arrays::Bool = false,
sparse_jacobian::Vector{Symbol} = Symbol[],
m_rep = nothing, algorithm::Symbol = :relaxation) where {T <: Real}
# Unpack parameters and indexing dictionaries
@unpack p, N_approx, S, J, E, SH = m
@unpack yy, x, σ²_y, W1, r₋₁, b1₋₁, s1₋₁ = S
@unpack q, v1, v2, ce1, ce2, ω1, ω2, r, c1, c2, b1, b2, s1, s2 = J
# @unpack Θ1, Θ2, logΘ1, logΘ2, Θ1_t1, Θ2_t1, logΘ1, logΘ2 = J
@unpack Θ1, Θ2, logΘ1, Θ1_t1, logΘ1 = J
@unpack ε_y, ε_x, ε_σ² = SH
@unpack value_fnct1, value_fnct2, certainty_equiv1, certainty_equiv2 = E
@unpack ez_fwd_diff1, ez_fwd_diff2, cap_ret1, cap_ret2 = E
# @unpack euler1, euler2, budget_constraint1, budget_constraint2 = E
@unpack euler1, euler2, budget_constraint1 = E
# @unpack wealth_per_agent1, wealth_per_agent2, eq_logΘ1, eq_logΘ2 = E
@unpack wealth_per_agent1, wealth_per_agent2, eq_logΘ1 = E
# @unpack expected_wealth_per_agent1, expected_wealth_per_agent2 = E
@unpack expected_wealth_per_agent1 = E
@unpack consumption_mc, bond_mc, share_mc = E
# @unpack consumption_mc, share_mc = E
Nz = length(S)
Ny = length(J)
Nε = length(SH)
## Define nonlinear equations
# Some helper functions
m1_ξ(z, y) = log(p.β) + p.γ₁ * y[c1] - (p.ψ - p.γ₁) * y[ce1] - p.γ₁ * p.μ_y
m2_ξ(z, y) = log(p.β) + p.γ₂ * y[c2] - (p.ψ - p.γ₂) * y[ce2] - p.γ₂ * p.μ_y
function m1_fwd!(row, Γ₅, Γ₆)
Γ₅[row, yy] = -p.γ₁
Γ₆[row, c1] = -p.γ₁
Γ₆[row, v1] = (p.ψ - p.γ₁)
end
function m2_fwd!(row, Γ₅, Γ₆)
Γ₅[row, yy] = -p.γ₂
Γ₆[row, c2] = -p.γ₂
Γ₆[row, v2] = (p.ψ - p.γ₂)
end
function μ(F, z, y) # note that y here refers to jump variables
# Exogenous states
F[yy] = z[x]
F[x] = p.ρ_x * z[x]
F[σ²_y] = (1. - p.ρ_σ) * p.σ_y^2 + p.ρ_σ * z[σ²_y]
# Endogenous states
F[W1] = p.λ₁ * y[Θ1_t1]
F[r₋₁] = y[r]
F[b1₋₁] = y[b1]
F[s1₋₁] = y[s1]
end
function ξ(F, z, y)
## Preferences
m1_ξv = m1_ξ(z, y) # evaluate SDF for type 1
m2_ξv = m2_ξ(z, y) # evaluate SDF for type 2
F[value_fnct1] = 1. / (1. - p.ψ) * (log(1. - p.β) + y[ω1]) - y[v1]
F[value_fnct2] = 1. / (1. - p.ψ) * (log(1. - p.β) + y[ω2]) - y[v2]
F[certainty_equiv1] = 1. / (1. - p.ψ) * (log(1. - p.β) - log(p.β) + log(exp(y[ω1]) - 1.)) - y[ce1]
F[certainty_equiv2] = 1. / (1. - p.ψ) * (log(1. - p.β) - log(p.β) + log(exp(y[ω2]) - 1.)) - y[ce2]
## Euler equations
F[euler1] = y[r] + m1_ξv
F[euler2] = y[r] + m2_ξv
## Market-clearing equations and budget constraints
Q = exp(y[q])
C1 = exp(y[c1])
C2 = exp(y[c2])
B1 = -exp(y[b1]) # note that y[b1] = log(-B1) since B1 < 0
B2 = exp(y[b2])
S1 = exp(y[s1])
S2 = exp(y[s2])
λ₂ = 1. - p.λ₁
F[consumption_mc] = log(p.λ₁ * C1 + λ₂ * C2)
F[bond_mc] = log(-p.λ₁ * B1) - log(λ₂ * B2)
F[share_mc] = log(p.λ₁ * S1 + λ₂ * S2)
# W2 = 1. - z[W1]
F[budget_constraint1] = log(z[W1] / p.λ₁ * (1. + Q) / (C1 + B1 + Q * S1))
# F[budget_constraint1] = log(z[W1]) - log(p.λ₁) + log(1. + Q) - log(C1 + B1 + Q * S1)
# F[budget_constraint2] = log(W2 / λ₂ * (1. + Q) / (C2 + B2 + Q * S2))
# F[budget_constraint2] = log(W2) - log(λ₂) + log(1. + Q) - log(C2 + B2 + Q * S2)
## Wealth per agent (Θ) equations
S1₋₁ = exp(z[s1₋₁])
S2₋₁ = (1. - p.λ₁ * S1₋₁) / λ₂
B1₋₁ = -exp(z[b1₋₁]) # note that z[b1₋₁] = log(-B1₋₁) since B1₋₁ < 0
B2₋₁ = -p.λ₁ * B1₋₁ / λ₂
R₋₁ = exp(z[r₋₁])
τ₂ = -(p.τ̅₁ * p.λ₁ * (R₋₁ * B1₋₁ + (1. + Q) * S1₋₁)) / (λ₂ * (R₋₁ * B2₋₁ + (1. + Q) * S2₋₁))
F[wealth_per_agent1] = log(1. - p.τ̅₁) + log(S1₋₁ + R₋₁ * B1₋₁ / (1. + Q)) - log(y[Θ1])
# F[wealth_per_agent2] = log(R₋₁ * B2₋₁ / (1. + Q) + S2₋₁ + p.τ̅₁ * (R₋₁ * B1₋₁ / (1. + Q) + S1₋₁)) - log(y[Θ2])
F[wealth_per_agent2] = log(1. - τ₂) + log(S2₋₁ + R₋₁ * B2₋₁ / (1. + Q)) - log(y[Θ2])
F[eq_logΘ1] = y[logΘ1] - log(y[Θ1])
# F[eq_logΘ2] = y[logΘ2] - log(y[Θ2])
F[expected_wealth_per_agent1] = -log(y[Θ1_t1])
# F[expected_wealth_per_agent2] = -log(y[Θ2_t1])
## Forward-difference equations separately handled b/c recursions
F[cap_ret1] = y[q] - log(sum([exp(y[J[Symbol("dq1_t$(i)")]]) for i in 1:N_approx[:q1]]) +
exp(y[J[Symbol("pq1_t$(N_approx[:q1])")]]))
F[cap_ret2] = y[q] - log(sum([exp(y[J[Symbol("dq2_t$(i)")]]) for i in 1:N_approx[:q2]]) +
exp(y[J[Symbol("pq2_t$(N_approx[:q2])")]]))
F[ez_fwd_diff1] = y[ω1] - log(sum([exp(y[J[Symbol("dω1_t$(i)")]]) for i in 0:(N_approx[:ω1] - 1)]) +
exp(y[J[Symbol("pω1_t$(N_approx[:ω1])")]]))
F[ez_fwd_diff2] = y[ω2] - log(sum([exp(y[J[Symbol("dω2_t$(i)")]]) for i in 0:(N_approx[:ω2] - 1)]) +
exp(y[J[Symbol("pω2_t$(N_approx[:ω2])")]]))
# Set initial boundary conditions
F[E[:eq_dq1_t1]] = p.μ_y - y[J[:dq1_t1]] + m1_ξv
F[E[:eq_pq1_t1]] = p.μ_y - y[J[:pq1_t1]] + m1_ξv
F[E[:eq_dq2_t1]] = p.μ_y - y[J[:dq2_t1]] + m2_ξv
F[E[:eq_pq2_t1]] = p.μ_y - y[J[:pq2_t1]] + m2_ξv
F[E[:eq_dω1_t0]] = y[J[:dω1_t0]]
F[E[:eq_pω1_t1]] = p.μ_y - y[J[:pω1_t1]] + m1_ξv
F[E[:eq_dω2_t0]] = y[J[:dω2_t0]]
F[E[:eq_pω2_t1]] = p.μ_y - y[J[:pω2_t1]] + m2_ξv
# Recursions for forward-difference equations
for i in 2:N_approx[:q1]
F[E[Symbol("eq_dq1_t$(i)")]] = p.μ_y - y[J[Symbol("dq1_t$(i)")]] + m1_ξv
F[E[Symbol("eq_pq1_t$(i)")]] = p.μ_y - y[J[Symbol("pq1_t$(i)")]] + m1_ξv
end
for i in 2:N_approx[:q2]
F[E[Symbol("eq_dq2_t$(i)")]] = p.μ_y - y[J[Symbol("dq2_t$(i)")]] + m2_ξv
F[E[Symbol("eq_pq2_t$(i)")]] = p.μ_y - y[J[Symbol("pq2_t$(i)")]] + m2_ξv
end
for i in 2:N_approx[:ω1]
F[E[Symbol("eq_dω1_t$(i-1)")]] = p.μ_y - y[J[Symbol("dω1_t$(i-1)")]] + m1_ξv
F[E[Symbol("eq_pω1_t$(i)")]] = p.μ_y - y[J[Symbol("pω1_t$(i)")]] + m1_ξv
end
for i in 2:N_approx[:ω2]
F[E[Symbol("eq_dω2_t$(i-1)")]] = p.μ_y - y[J[Symbol("dω2_t$(i-1)")]] + m2_ξv
F[E[Symbol("eq_pω2_t$(i)")]] = p.μ_y - y[J[Symbol("pω2_t$(i)")]] + m2_ξv
end
end
# The cache is initialized as zeros so we only need to fill non-zero elements
function Λ(F, z)
# Heteroskedastic risk arises b/c the wealth share is a state variable
F[W1, Θ1] = p.λ₁
end
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z)
F[yy, ε_y] = sqrt(z[σ²_y]) # Take square root b/c Σ is not a variance-covariance matrix.
F[x, ε_x] = sqrt(z[σ²_y]) * p.σ_x # It is instead the "volatility" loading on the martingale difference sequences.
F[σ²_y, ε_σ²] = sqrt(z[σ²_y]) * p.ς
end
function ccgf(F, A, z)
# F .= .5 * diag(α * α') # slower but this is the underlying math
sum!(F, A.^2) # faster implementation
F .*= .5
end
if sparse_arrays
Γ₅ = spzeros(T, Ny, Nz)
Γ₆ = spzeros(T, Ny, Ny)
else
Γ₅ = zeros(T, Ny, Nz)
Γ₆ = zeros(T, Ny, Ny)
end
# Equations other than forward difference equations
m1_fwd!(euler1, Γ₅, Γ₆)
m2_fwd!(euler2, Γ₅, Γ₆)
Γ₆[expected_wealth_per_agent1, logΘ1] = one(T)
# Forward difference equations: boundary conditions
m1_fwd!(E[:eq_dq1_t1], Γ₅, Γ₆)
Γ₅[E[:eq_dq1_t1], yy] = one(T)
m1_fwd!(E[:eq_pq1_t1], Γ₅, Γ₆)
Γ₅[E[:eq_pq1_t1], yy] = one(T)
Γ₆[E[:eq_pq1_t1], q] = one(T)
m1_fwd!(E[:eq_pω1_t1], Γ₅, Γ₆)
Γ₅[E[:eq_pω1_t1], yy] = one(T)
Γ₆[E[:eq_pω1_t1], c1] = one(T)
Γ₆[E[:eq_pω1_t1], ω1] = one(T)
m2_fwd!(E[:eq_dq2_t1], Γ₅, Γ₆)
Γ₅[E[:eq_dq2_t1], yy] = one(T)
m2_fwd!(E[:eq_pq2_t1], Γ₅, Γ₆)
Γ₅[E[:eq_pq2_t1], yy] = one(T)
Γ₆[E[:eq_pq2_t1], q] = one(T)
m2_fwd!(E[:eq_pω2_t1], Γ₅, Γ₆)
Γ₅[E[:eq_pω2_t1], yy] = one(T)
Γ₆[E[:eq_pω2_t1], c2] = one(T)
Γ₆[E[:eq_pω2_t1], ω2] = one(T)
# Forward difference equations: recursions
for i in 2:N_approx[:q1]
m1_fwd!(E[Symbol("eq_dq1_t$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_dq1_t$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_dq1_t$(i)")], J[Symbol("dq1_t$(i-1)")]] = one(T)
m1_fwd!(E[Symbol("eq_pq1_t$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_pq1_t$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_pq1_t$(i)")], J[Symbol("pq1_t$(i-1)")]] = one(T)
end
for i in 2:N_approx[:ω1]
m1_fwd!(E[Symbol("eq_dω1_t$(i-1)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_dω1_t$(i-1)")], yy] = one(T)
Γ₆[E[Symbol("eq_dω1_t$(i-1)")], c1] = one(T)
Γ₆[E[Symbol("eq_dω1_t$(i-1)")], J[Symbol("dω1_t$(i-2)")]] = one(T)
m1_fwd!(E[Symbol("eq_pω1_t$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_pω1_t$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_pω1_t$(i)")], c1] = one(T)
Γ₆[E[Symbol("eq_pω1_t$(i)")], J[Symbol("pω1_t$(i-1)")]] = one(T)
end
for i in 2:N_approx[:q2]
m2_fwd!(E[Symbol("eq_dq2_t$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_dq2_t$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_dq2_t$(i)")], J[Symbol("dq2_t$(i-1)")]] = one(T)
m2_fwd!(E[Symbol("eq_pq2_t$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_pq2_t$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_pq2_t$(i)")], J[Symbol("pq2_t$(i-1)")]] = one(T)
end
for i in 2:N_approx[:ω2]
m2_fwd!(E[Symbol("eq_dω2_t$(i-1)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_dω2_t$(i-1)")], yy] = one(T)
Γ₆[E[Symbol("eq_dω2_t$(i-1)")], c2] = one(T)
Γ₆[E[Symbol("eq_dω2_t$(i-1)")], J[Symbol("dω2_t$(i-2)")]] = one(T)
m2_fwd!(E[Symbol("eq_pω2_t$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_pω2_t$(i)")], yy] = one(T)
Γ₆[E[Symbol("eq_pω2_t$(i)")], c2] = one(T)
Γ₆[E[Symbol("eq_pω2_t$(i)")], J[Symbol("pω2_t$(i-1)")]] = one(T)
end
z, y, Ψ = create_guess(m, (isnothing(m_rep) ?
BansalYaron2004(μ_y = p.μ_y, ρ_x = p.ρ_x, σ_x = p.σ_x,
ρ_σ = p.ρ_σ, σ_y = p.σ_y, ς = p.ς,
β = p.β, ψ = p.ψ, γ = p.λ₁ * p.γ₁ + (1. - p.λ₁) * p.γ₂,
N_approx = LVector(q = N_approx[:q1], ω = N_approx[:ω1])) : m_rep); algorithm = algorithm)
if sparse_arrays
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian,
Λ_cache_init = dims -> spzeros(dims...),
Σ_cache_init = dims -> Diagonal(Vector{eltype(z)}(undef, dims[1])))
else
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian)
end
end
function create_guess(m::HetRiskAversion{T}, m_rep::BansalYaron2004; algorithm::Symbol = :relaxation) where {T <: Real}
## Set up
# Get guesses from BansalYaron2004 representative agent model
ral_rep = bansal_yaron_2004(m_rep)
solve!(ral_rep, algorithm = algorithm, verbose = :none)
yrep = ral_rep.y
zrep = ral_rep.z
Ψrep = ral_rep.Ψ
Jr = m_rep.J
Sr = m_rep.S
# Unpack parameters and indexing dictionaries
@unpack p, N_approx, S, J, E, SH = m
@unpack yy, x, σ²_y, W1, r₋₁, b1₋₁, s1₋₁ = S
@unpack q, v1, v2, ce1, ce2, ω1, ω2, r = J
@unpack c1, c2, b1, b2, s1, s2 = J
# @unpack Θ1, Θ2, logΘ1, logΘ2, Θ1_t1, Θ2_t1, logΘ1, logΘ2 = J
@unpack Θ1, Θ2, logΘ1, Θ1_t1, logΘ1 = J
# Initialize deterministic steady state guesses
Nz = length(S)
Ny = length(J)
z = Vector{T}(undef, Nz)
y = Vector{T}(undef, Ny)
Ψ = zeros(T, Ny, Nz)
## Compute guesses for z, y
# Steady state values of state variables known ex-ante
z[yy] = 0.
z[x] = 0.
z[σ²_y] = p.σ_y^2
# Guesses based on representative agent model's solution
y[ω1] = yrep[Jr[:ω]]
y[ω2] = yrep[Jr[:ω]]
y[q] = yrep[Jr[:q]]
y[v1] = yrep[Jr[:v]]
y[ce1] = yrep[Jr[:ce]]
y[v2] = yrep[Jr[:v]]
y[ce2] = yrep[Jr[:ce]]
# Guesses for consumption and portfolio choice
S1 = (1. / p.γ₁) / (1. / p.γ₁ + 1. / p.γ₂) / p.λ₁
S2 = (1. - p.λ₁ * S1) / (1. - p.λ₁)
C1 = .99 * S1 # .99 just for now b/c heterogeneity is small
C2 = (1. - p.λ₁ * C1) / (1. - p.λ₁)
M0 = m_rep.p.β * (yrep[Jr[:v]] / yrep[Jr[:ce]])^(m_rep.p.ψ - m_rep.p.γ) *
exp(-m_rep.p.γ * m_rep.p.μ_y) # make a guess for SDF
R = 1. / M0 # just guess it's 1 / SDF
Q = exp(yrep[Jr[:q]])
B1 = (S1 * ((1. - p.τ̅₁) - p.τ̅₁ * Q) - C1) / (1. - (1. - p.τ̅₁) * R)
B2 = -p.λ₁ * B1 / (1. - p.λ₁)
τ₂ = -(p.τ̅₁ * (R * B1 + (1 + Q) * S1)) / (R * B2 + (1 + Q) * S2)
Θ10 = (1. - p.τ̅₁) * (S1 + R * B1 / (1. + Q))
Θ20 = (1. - τ₂) * (S2 + R * B2 / (1. + Q))
y[s1] = log(S1)
y[s2] = log(S2)
y[c1] = log(C1)
y[c2] = log(C2)
y[b1] = log(-B1) # agent 1 leveraged -> B10 < 0
y[b2] = log(B2)
y[Θ1] = Θ10
y[Θ2] = Θ20
y[logΘ1] = log(Θ10)
# y[logΘ2] = log(Θ20)
y[Θ1_t1] = Θ10 # in a steady state, the one-period ahead expectation agrees with the current day value.
# y[Θ2_t1] = Θ20
y[r] = log(R)
z[W1] = p.λ₁ * Θ10
z[r₋₁] = y[r]
z[b1₋₁] = y[b1]
z[s1₋₁] = y[s1]
M10 = m_rep.p.β * (yrep[Jr[:v]] / yrep[Jr[:ce]])^(m_rep.p.ψ - p.γ₁) * # use agent 1's risk aversion here
exp(-m_rep.p.γ * m_rep.p.μ_y) # make a guess for SDF
M20 = m_rep.p.β * (yrep[Jr[:v]] / yrep[Jr[:ce]])^(m_rep.p.ψ - p.γ₂) * # use agent 1's risk aversion here
exp(-m_rep.p.γ * m_rep.p.μ_y) # make a guess for SDF
Y0 = 1. # steady state growth rate in endowment
y[J[:dq1_t1]] = convert(T, log(exp(p.μ_y) * M10 * Y0))
y[J[:pq1_t1]] = convert(T, log(exp(p.μ_y) * M10 * Y0 * Q))
y[J[:dω1_t0]] = zero(T)
y[J[:pω1_t1]] = convert(T, log(exp(p.μ_y) * M10 * Y0 * exp(y[ω1])))
y[J[:dq2_t1]] = convert(T, log(exp(p.μ_y) * M20 * Y0))
y[J[:pq2_t1]] = convert(T, log(exp(p.μ_y) * M20 * Y0 * Q))
y[J[:dω2_t0]] = zero(T)
y[J[:pω2_t1]] = convert(T, log(exp(p.μ_y) * M20 * Y0 * exp(y[ω2])))
for i in 2:N_approx[:q1]
y[J[Symbol("dq1_t$(i)")]] = convert(T, log(M10) + μ_y + log(Y0) + y[J[Symbol("dq1_t$(i-1)")]])
y[J[Symbol("pq1_t$(i)")]] = convert(T, log(M10) + μ_y + log(Y0) + y[J[Symbol("pq1_t$(i-1)")]])
end
for i in 2:N_approx[:q2]
y[J[Symbol("dq2_t$(i)")]] = convert(T, log(M20) + μ_y + log(Y0) + y[J[Symbol("dq2_t$(i-1)")]])
y[J[Symbol("pq2_t$(i)")]] = convert(T, log(M20) + μ_y + log(Y0) + y[J[Symbol("pq2_t$(i-1)")]])
end
for i in 2:N_approx[:ω1]
y[J[Symbol("dω1_t$(i-1)")]] = convert(T, μ_y + log(M10) + log(Y0) + y[J[Symbol("dω1_t$(i-2)")]])
y[J[Symbol("pω1_t$(i)")]] = convert(T, μ_y + log(M10) + log(Y0) + y[J[Symbol("pω1_t$(i-1)")]])
end
for i in 2:N_approx[:ω2]
y[J[Symbol("dω2_t$(i-1)")]] = convert(T, μ_y + log(M20) + log(Y0) + y[J[Symbol("dω2_t$(i-2)")]])
y[J[Symbol("pω2_t$(i)")]] = convert(T, μ_y + log(M20) + log(Y0) + y[J[Symbol("pω2_t$(i-1)")]])
end
## Populate Ψ
# Parts inferred from representative agent model
Ψ[q, 1:3] = Ψrep[Jr[:q], 1:3] # indexes 1-3 are the common states for yy, x, and σ²_y
Ψ[v1, 1:3] = Ψrep[Jr[:v], 1:3]
Ψ[v2, 1:3] = Ψrep[Jr[:v], 1:3]
Ψ[ce1, 1:3] = Ψrep[Jr[:ce], 1:3]
Ψ[ce2, 1:3] = Ψrep[Jr[:ce], 1:3]
Ψ[ω1, 1:3] = Ψrep[Jr[:ω], 1:3]
Ψ[ω2, 1:3] = Ψrep[Jr[:ω], 1:3]
for i in 2:N_approx[:q1]
Ψ[J[Symbol("dq1_t$(i)")], 1:3] = Ψrep[Jr[Symbol("dq$(i)")], 1:3]
Ψ[J[Symbol("pq1_t$(i)")], 1:3] = Ψrep[Jr[Symbol("pq$(i)")], 1:3]
end
for i in 2:N_approx[:q2]
Ψ[J[Symbol("dq2_t$(i)")], 1:3] = Ψrep[Jr[Symbol("dq$(i)")], 1:3]
Ψ[J[Symbol("pq2_t$(i)")], 1:3] = Ψrep[Jr[Symbol("pq$(i)")], 1:3]
end
for i in 2:N_approx[:ω1]
Ψ[J[Symbol("dω1_t$(i-1)")], 1:3] = Ψrep[Jr[Symbol("dω$(i-1)")], 1:3]
Ψ[J[Symbol("pω1_t$(i)")], 1:3] = Ψrep[Jr[Symbol("pω$(i)")], 1:3]
end
for i in 2:N_approx[:ω2]
Ψ[J[Symbol("dω2_t$(i-1)")], 1:3] = Ψrep[Jr[Symbol("dω$(i-1)")], 1:3]
Ψ[J[Symbol("pω2_t$(i)")], 1:3] = Ψrep[Jr[Symbol("pω$(i)")], 1:3]
end
# Guess dependence on the wealth share
return z, y, Ψ
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2816 | using BenchmarkTools, RiskAdjustedLinearizations, MATLAB
include(joinpath(dirname(@__FILE__), "..", "rbc_cc", "rbc_cc.jl"))
# Settings: what do you want to do?
autodiff = false
time_matlab = true
# Set up
autodiff_method = autodiff ? :forward : :central
m_rbc_cc = RBCCampbellCochraneHabits()
m = rbc_cc(m_rbc_cc, 0)
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
# Use deterministic steady state as guesses
solve!(m, z0, y0; algorithm = :deterministic, autodiff = autodiff_method, verbose = :none)
zdet = copy(m.z)
ydet = copy(m.y)
Ψdet = copy(m.Ψ)
if time_matlab
println("Relaxation algorithm in MATLAB")
mat"""
genaffine_rbc_cc_relaxation;
"""
end
println("Relaxation algorithm in Julia")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method, verbose = :none)
end
println("Relaxation algorithm with Anderson acceleration")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, use_anderson = true, m = 3, autodiff = autodiff_method, verbose = :none)
end
sparsity, colorvec = compute_sparsity_pattern(m, :relaxation; sparsity_detection = false)
jac_cache = preallocate_jac_cache(m, :relaxation; sparsity_detection = false)
println("Relaxation with matrix coloring of sparse Jacobians")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, sparsity = sparsity, colorvec = colorvec, verbose = :none)
end
println("Relaxation with matrix coloring of sparse Jacobians and caching")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, jac_cache = jac_cache, verbose = :none)
end
if time_matlab
println("Homotopy algorithm in MATLAB")
mat"""
genaffine_rbc_cc_homotopy;
"""
end
println("Homotopy algorithm in Julia")
@btime begin # called the "continuation" method in the original paper, but is called homotopy in the original code
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method, verbose = :none)
end
sparsity, colorvec = compute_sparsity_pattern(m, :homotopy; sparsity_detection = false)
jac_cache = preallocate_jac_cache(m, :homotopy; sparsity_detection = false)
println("Homotopy with matrix coloring of sparse Jacobians")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, sparsity = sparsity, colorvec = colorvec, verbose = :none)
end
println("Homotopy with matrix coloring of sparse Jacobians and caching")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, jac_cache = jac_cache, verbose = :none)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2914 | using BenchmarkTools, RiskAdjustedLinearizations, MATLAB
include(joinpath(dirname(@__FILE__), "..", "wachter_disaster_risk", "wachter.jl"))
# Settings: what do you want to do?
autodiff = false
time_matlab = true
# Set up
autodiff_method = autodiff ? :forward : :central
m_wachter = WachterDisasterRisk()
m = inplace_wachter_disaster_risk(m_wachter)
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
# Use deterministic steady state as guesses
solve!(m, z0, y0; algorithm = :deterministic, autodiff = autodiff_method, verbose = :none)
zdet = copy(m.z)
ydet = copy(m.y)
Ψdet = copy(m.Ψ)
if time_matlab
println("Relaxation algorithm in MATLAB")
mat"""
genaffine_wac_disaster_relaxation;
"""
end
println("Relaxation algorithm in Julia")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method, verbose = :none)
end
println("Relaxation algorithm with Anderson acceleration")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, use_anderson = true, m = 3, autodiff = autodiff_method, verbose = :none)
end
sparsity, colorvec = compute_sparsity_pattern(m, :relaxation; sparsity_detection = false)
jac_cache = preallocate_jac_cache(m, :relaxation; sparsity_detection = false)
println("Relaxation with matrix coloring of sparse Jacobians")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, sparsity = sparsity, colorvec = colorvec, verbose = :none)
end
println("Relaxation with matrix coloring of sparse Jacobians and caching")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, jac_cache = jac_cache, verbose = :none)
end
if time_matlab
println("Homotopy algorithm in MATLAB")
mat"""
genaffine_wac_disaster_homotopy;
"""
end
println("Homotopy algorithm in Julia")
@btime begin # called the "continuation" method in the original paper, but is called homotopy in the original code
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method, verbose = :none)
end
sparsity, colorvec = compute_sparsity_pattern(m, :homotopy; sparsity_detection = false)
jac_cache = preallocate_jac_cache(m, :homotopy; sparsity_detection = false)
println("Homotopy with matrix coloring of sparse Jacobians")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, sparsity = sparsity, colorvec = colorvec, verbose = :none)
end
println("Homotopy with matrix coloring of sparse Jacobians and caching")
@btime begin
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, jac_cache = jac_cache, verbose = :none)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 27933 | using UnPack, OrderedCollections, ForwardDiff, JLD2, NLsolve
mutable struct NKEZDisaster{T <: Real, S, N}
β::T
γ::T
ψ::T
ν::T
ν̅::T
χ::T
δ::T
α::T
ϵ::T
θ::T
π_ss::T
ϕ_r::T
ϕ_π::T
ϕ_y::T
χ_y::T
ρ_β::T
ρ_l::T
ρ_r::T
σ_β::T
σ_l::T
σ_r::T
μ_a::T
σ_a::T
κ_a::T
disaster_occur_spec::Symbol
disaster_intensity_spec::Symbol
disaster_para::NamedTuple{S, NTuple{N, T}}
N_approx::NamedTuple{(:q, :s₁, :s₂, :ω), NTuple{4, Int}}
S::OrderedDict{Symbol, Int}
J::OrderedDict{Symbol, Int}
E::OrderedDict{Symbol, Int}
SH::OrderedDict{Symbol, Int}
end
# Absent a better way, I assume (1) each specification of disaster risk
# has a unique name and (2) disaster_para has correctly named parameters,
# given the specification's name. To see the process implied by
# `disaster_occur_spec` and `disaster_intensity_spec`, see
# the functions `infer_ccgf` and `infer_X̅` at the end of this file.
function NKEZDisaster(disaster_occur_spec::Symbol = :PoissonNormalMixture,
disaster_intensity_spec::Symbol = :CoxIngersollRoss,
disaster_para::NamedTuple{S1, NTuple{N1, T}} =
(σ_k = .01, ρ_p = .08^(1. / 4.), p = .0355 / 4., σ_p = .0114 / 4. / (.02 / sqrt(4.)) / sqrt(.0355 / 4.));
β::T = .99, γ::T = 3.8, ψ::T = 1. / .75, ν::T = 1., ν̅ = 0.72,
χ::T = 4., δ::T = 0.025, α::T = 0.33, ϵ::T = 10., θ::T = 0.7,
π_ss::T = 0., ϕ_r::T = 0.5, ϕ_π::T = 1.3, ϕ_y::T = 0.25,
χ_y::T = 1.6, ρ_β::T = 0.1, ρ_l::T = 0.1,, ρ_r::T = 0.,
σ_β::T = sqrt((log(β) / 4.)^2 * (1. - ρ_β^2)),
σ_l::T = 0.01, σ_r::T = 0.01, μ_a::T = 0.0125,
σ_a::T = 0.01, κ_a::T = 1.,
N_approx::NamedTuple{(:q, :s₁, :s₂, :ω), NTuple{4, Int}} =
(q = 1, s₁ = 1, s₂ = 1, ω = 1)) where {T <: Real, S1, N1}
@assert all(N_approx[k] > 0 for k in keys(N_approx)) "N_approx must be at least 1 for all variables."
## Create Indexing dictionaries.
# Note that for the exogenous shock
# state variables, instead of e.g. η_L and η_A, I use η_l and η_a
# since the uppercase variable will not appear in the jumps/states.
S_init = [:k₋₁, :logΔ₋₁, :r₋₁, :output₋₁, :η_β, :η_l, :η_r, :a, :η_k] # State Variables
J_init = [:output, :c, :l, :v, :ce, :ω, :ℓ, :β̅, :w, :r, :π, :q, :x,
:rk, :rq, :mc, :s₁, :s₂, :logΔ] # Jump variables
E_init = [:value_fnct, :certainty_equiv, :ez_fwd_diff,
:eq_β̅, :wage, :labor_disutility, :euler, :cap_ret,
:eq_mc, :kl_ratio, :eq_s₁, :eq_s₂,
:tobin,, :eq_rq, :phillips_curve, :price_dispersion,
:mp, :output_market_clear, :production] # Equations
SH_init = [:ε_β, :ε_l, :ε_r, :ε_a, :ε_k, :ε_p] # Exogenous shocks
# Add approximations for forward-difference equations
for var in [:q, :s₁, :s₂, :ω]
inds = (var == :q) ? (1:N_approx[var]) : (0:(N_approx[var] - 1))
push!(J_init, [Symbol(:d, var, "$(i)") for i in inds]...)
push!(J_init, [Symbol(:p, var, "$(i)") for i in 1:N_approx[var]]...)
push!(E_init, [Symbol(:eq_d, var, "$(i)") for i in inds]...)
push!(E_init, [Symbol(:eq_p, var, "$(i)") for i in 1:N_approx[var]]...)
end
# Specify random process(es) for whether a disaster occurs or not
if disaster_occur_spec in [:PoissonNormalMixture, :Bernoulli]
# Nothing need to be added
end
# Specify random process(es) for "intensity" (size or frequency) of a disaster.
if disaster_intensity_spec in [:CoxIngersollRoss, :TwoStateMarkovChain,
:TruncatedCoxIngersollRoss]
push!(S_init, :p)
elseif disaster_intensity_spec in [:LogAR1]
push!(S_init, :logp)
end
S = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(S_init))
J = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(J_init))
E = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(E_init))
SH = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(SH_init))
return NKEZDisaster(β, γ, ψ, ν,, ν̄, χ, δ, α, ϵ, θ, π_ss, ϕ_r, ϕ_π, ϕ_y,
χ_y, ρ_β, ρ_l, ρ_r, σ_β, σ_l, σ_r, μ_a, σ_a, κ_a,
disaster_spec, disaster_para,
N_approx, S, J, E, SH)
end
function nk_ez_disaster(m::NKEZDisaster{T, SNK, NNK}) where {T <: Real, SNK, NNK}
# Get parameters
@unpack β, γ, ψ, ν, ν̅, χ, δ, α, ϵ, θ, π_ss, ϕ_r, ϕ_π, ϕ_y = m
@unpack χ_y, ρ_β, ρ_l, ρ_r, σ_β, σ_l, σ_r, μ_a, σ_a, κ_a = m
@unpack disaster_occur_spec, disaster_intensity_spec, disaster_para = m
r_ss = infer_r_ss(m)
X̅ = infer_X̅(m)
𝔼η_k = infer_𝔼η_k(m)
# Unpack indexing dictionaries
@unpack N_approx, S, J, E, SH = m
@unpack k₋₁, logΔ₋₁, r₋₁, output₋₁, η_β, η_l, η_r, a, η_k = S
@unpack output, c, l, v, ce, ω, ℓ, β̅, w, r = J
@unpack π, q, x, rk, rq, mc, s₁, s₂, logΔ = J
@unpack value_fnct, certainty_equiv, ez_fwd_diff = E
@unpack eq_β̅, wage, labor_disutility, euler, cap_ret, eq_mc = E
@unpack kl_ratio, eq_s₁, eq_s₂, tobin, eq_rq = E
@unpack phillips_curve, price_dispersion, mp = E
@unpack output_market_clear, production = E
@unpack ε_β, ε_l, ε_r, ε_a, ε_k, ε_p = SH
if disaster_intensity_spec in [:CoxIngersollRoss, :TwoStateMarkovChain,
:TruncatedCoxIngersollRoss]
p = m.S[:p]
disaster_intensity_var = p
elseif disaster_intensity_spec in [:LogAR1]
logp = m.S[:logp]
disaster_intensity_var = logp
end
Nz = length(S)
Ny = length(J)
Nε = length(SH)
## Define nonlinear equations
# Some helper functions
_Φ(Xin, Kin) = X̅ ^ (1. / χ) / (1. - 1. / χ) * (Xin / Kin) ^ (1. - 1. / χ) - X̅ / (χ * (χ - 1.))
_Φ′(Xin, Kin) = X̅ ^ (1. / χ) * (Xin / Kin) ^ (- 1. / χ)
Φ(z, y) = _Φ(exp(y[x]), exp(z[η_k] + z[k₋₁]))
Φ′(z, y) = _Φ′(exp(y[x]), exp(z[η_k] + z[k₋₁]))
m_ξ(z, y) = z[η_β] + log(β) - y[β̅] + γ * y[c] -
(1. - γ) * y[ℓ] - (ψ - γ) * y[ce] - γ * μ_a
μ_y_bgp(z, y) = μ_a + κ_a * 𝔼η_k # calculate growth rate of output along balanced growth path
function m_fwd!(i, Γ₅, Γ₆)
Γ₅[i, β̅] = 1.
Γ₅[i, a] = -γ
Γ₆[i, c] = -γ
Γ₆[i, ℓ] = (1. - γ)
Γ₆[i, v] = (ψ - γ)
end
pstar(y) = log(ϵ / (ϵ - 1.)) + y[s₁] - y[s₂]
μ_η_k = infer_μ_disaster_occur(m)
μ_disi = infer_μ_disaster_intensity(m)
function μ(F, z, y)
# Expected value of η_k conditional on time t
μ_η_k_v = μ_η_k(z, y)
F[k₋₁] = log(1. - δ + Φ(z, y)) + z[η_k] + z[k₋₁]
F[v₋₁] = y[v]
F[r₋₁] = y[r]
F[output₋₁] = y[output]
F[η_β] = ρ_β * z[η_β]
F[η_l] = ρ_l * z[η_l]
F[η_r] = ρ_r * z[η_r]
F[a] = κ_a * μ_η_k_v
F[η_k] = μ_η_k_v
F[disaster_intensity_var] = μ_disi(z, y)
end
function ξ(F, z, y)
F_type = eltype(F)
## Pre-evaluate (just once) some terms
Φv = Φ(z, y)
Φ′v = Φ′(z, y)
pstarv = pstar(y)
m_ξv = m_ξ(z, y)
## Non-forward-difference equations
F[value_fnct] = 1. / (1. - ψ) * (y[β̅] + y[ω]) - y[v]
F[certainty_equiv] = 1. / (1. - ψ) * (y[β̅] - (z[η_β] + log(β)) + log(exp(y[ω]) - 1.)) - y[ce]
F[wage] = log(ψ) + z[η_l] + log(ν̅) + y[c] + ν * y[l] - (1. - ψ) / ψ * y[ℓ] - y[w]
F[labor_disutility] = ψ / (1. - ψ) * log(1. + (ψ - 1.) * exp(z[η_l]) * ν̅ *
exp((1. + ν) * y[l]) / (1. + ν)) - y[ℓ]
F[euler] = y[r] + m_ξv
F[eq_mc] = (1. - α) * (y[w] - log(1. - α)) + α * (y[rk] - log(α)) - y[mc]
F[kl_ratio] = log(α) - log(1. - α) + y[w] - y[rk] - (z[η_k] + z[k₋₁] - y[l])
F[tobin] = log(Φ′v) + y[q]
F[eq_rq] = log(1. - δ + Φv - Φ′v * exp(y[x] - (z[η_k] + z[k₋₁]))) - y[rq]
F[phillips_curve] = (1. - ϵ) * y[π] - log((1. - θ) * exp((1. - ϵ) * (pstarv + y[π])) + θ)
F[price_dispersion] = y[logΔ] - ϵ * y[π] - log((1. - θ) * exp(-ϵ * (pstarv + y[π])) + θ * exp(z[logΔ₋₁]))
F[mp] = (1. - ϕ_r) * r_ss + ϕ_r * z[r₋₁] +
(1. - ϕ_r) .* (ϕ_π * (y[π] - π_ss) + ϕ_y *
(y[output] - z[output₋₁] + (μ_a + z[a] - mp_μ_y_bgp(z, y)))) + z[η_r] - y[r]
F[output_market_clear] = y[output] - log(exp(y[c]) + exp(y[x]))
F[production] = log(exp(α * z[k₋₁] + (1. - α) * y[l]) - χ_y) - y[logΔ] - y[output]
F[eq_β̅] = log(1. - exp(z[η_β])) - y[β̅]
## Forward-difference equations separately handled b/c recursions
F[cap_ret] = y[q] - log(sum([exp(y[J[Symbol("dq$(i)")]]) for i in 1:N_approx[:q]]) +
exp(y[J[Symbol("pq$(N_approx[:q])")]]))
F[eq_s₁] = y[s₁] - log(sum([exp(y[J[Symbol("ds₁$(i)")]]) for i in 0:(N_approx[:s₁] - 1)]) +
exp(y[J[Symbol("ps₁$(N_approx[:s₁])")]]))
F[eq_s₂] = y[s₂] - log(sum([exp(y[J[Symbol("ds₂$(i)")]]) for i in 0:(N_approx[:s₂] - 1)]) +
exp(y[J[Symbol("ps₂$(N_approx[:s₂])")]]))
F[ez_fwd_diff] = y[ω] - log(sum([exp(y[J[Symbol("dω$(i)")]]) for i in 0:(N_approx[:ω] - 1)]) +
exp(y[J[Symbol("pω$(N_approx[:ω])")]]))
# Set initial boundary conditions
F[E[:eq_dq1]] = -y[J[:dq1]] + m_ξv
F[E[:eq_pq1]] = -y[J[:pq1]] + m_ξv
F[E[:eq_ds₁0]] = y[J[:ds₁0]] - y[mc] - y[output]
F[E[:eq_ps₁1]] = μ_a + log(θ) - y[J[:ps₁1]] + m_ξv
F[E[:eq_ds₂0]] = y[J[:ds₂0]] - y[output]
F[E[:eq_ps₂1]] = μ_a + log(θ) - y[J[:ps₂1]] + m_ξv
F[E[:eq_dω0]] = y[J[:dω0]]
F[E[:eq_pω1]] = μ_a - y[c] - y[J[:pω1]] + m_ξv
# Recursions for forward-difference equations
for i in 2:N_approx[:q]
F[E[Symbol("eq_dq$(i)")]] = -y[J[Symbol("dq$(i)")]] + m_ξv
F[E[Symbol("eq_pq$(i)")]] = -y[J[Symbol("pq$(i)")]] + m_ξv
end
for i in 2:N_approx[:s₁]
F[E[Symbol("eq_ds₁$(i-1)")]] = μ_a + log(θ) - y[J[Symbol("ds₁$(i-1)")]] + m_ξv
F[E[Symbol("eq_ps₁$(i)")]] = μ_a + log(θ) - y[J[Symbol("ps₁$(i)")]] + m_ξv
end
for i in 2:N_approx[:s₂]
F[E[Symbol("eq_ds₂$(i-1)")]] = μ_a + log(θ) - y[J[Symbol("ds₂$(i-1)")]] + m_ξv
F[E[Symbol("eq_ps₂$(i)")]] = μ_a + log(θ) - y[J[Symbol("ps₂$(i)")]] + m_ξv
end
for i in 2:N_approx[:ω]
F[E[Symbol("eq_dω$(i-1)")]] = μ_a - y[c] - y[J[Symbol("dω$(i-1)")]] + m_ξv
F[E[Symbol("eq_pω$(i)")]] = μ_a - y[c] - y[J[Symbol("pω$(i)")]] + m_ξv
end
end
# The cache is initialized as zeros so we only need to fill non-zero elements
Λ = zeros(T, Nz, Ny)
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z)
F_type = eltype(F)
# AR(1) processes
F[η_β, ε_β] = σ_β
F[η_l, ε_l] = σ_l
F[η_r, ε_r] = σ_r
# Productivity process
F[a, ε_a] = σ_a
F[a, ε_k] = κ_a
# Disaster risk
F[η_k, ε_k] = one(F_type)
F[disaster_intensity_var, ε_p] = Σ_disi(z)
end
ccgf = infer_ccgf(m)
## Forward-looking variables
Γ₅ = zeros(T, Ny, Nz)
Γ₆ = zeros(T, Ny, Ny)
# Equations w/out SDF terms and are not forward-difference equations
Γ₆[euler, π] = -one(T)
# Equations with SDF terms but are not forward-difference equations
m_fwd!(euler, Γ₅, Γ₆)
# Forward difference equations: boundary conditions
m_fwd!(E[:eq_dq1], Γ₅, Γ₆)
Γ₅[E[:eq_dq1], η_k] = one(T)
Γ₆[E[:eq_dq1], rk] = one(T)
m_fwd!(E[:eq_pq1], Γ₅, Γ₆)
Γ₅[E[:eq_pq1], η_k] = one(T)
Γ₆[E[:eq_pq1], q] = one(T)
Γ₆[E[:eq_pq1], rq] = one(T)
m_fwd!(E[:eq_ps₁1], Γ₅, Γ₆)
Γ₅[E[:eq_ps₁1], a] = one(T)
Γ₆[E[:eq_ps₁1], π] = convert(T, ϵ)
Γ₆[E[:eq_ps₁1], s₁] = one(T)
m_fwd!(E[:eq_ps₂1], Γ₅, Γ₆)
Γ₅[E[:eq_ps₂1], a] = one(T)
Γ₆[E[:eq_ps₂1], π] = convert(T, ϵ - 1.)
Γ₆[E[:eq_ps₂1], s₂] = one(T)
m_fwd!(E[:eq_pω1], Γ₅, Γ₆)
Γ₆[E[:eq_pω1], c] = one(T)
Γ₅[E[:eq_pω₁], a] = one(T)
Γ₆[E[:eq_pω1], ω] = one(T)
# Forward difference equations: recursions
for i in 2:N_approx[:q]
m_fwd!(E[Symbol("eq_dq$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_dq$(i)")], η_k] = one(T)
Γ₆[E[Symbol("eq_dq$(i)")], rq] = one(T)
Γ₆[E[Symbol("eq_dq$(i)")], J[Symbol("dq$(i-1)")]] = one(T)
m_fwd!(E[Symbol("eq_pq$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_pq$(i)")], η_k] = one(T)
Γ₆[E[Symbol("eq_pq$(i)")], rq] = one(T)
Γ₆[E[Symbol("eq_pq$(i)")], J[Symbol("pq$(i-1)")]] = one(T)
end
for i in 2:N_approx[:s₁]
m_fwd!(E[Symbol("eq_ds₁$(i-1)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_ds₁$(i-1)")], a] = one(T)
Γ₆[E[Symbol("eq_ds₁$(i-1)")], π] = convert(T, ϵ)
Γ₆[E[Symbol("eq_ds₁$(i-1)")], J[Symbol("ds₁$(i-2)")]] = one(T)
m_fwd!(E[Symbol("eq_ps₁$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_ps₁$(i)")], a] = one(T)
Γ₆[E[Symbol("eq_ps₁$(i)")], π] = convert(T, ϵ)
Γ₆[E[Symbol("eq_ps₁$(i)")], J[Symbol("ps₁$(i-1)")]] = one(T)
end
for i in 2:N_approx[:s₂]
m_fwd!(E[Symbol("eq_ds₂$(i-1)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_ds₂$(i-1)")], a] = one(T)
Γ₆[E[Symbol("eq_ds₂$(i-1)")], π] = convert(T, ϵ) - one(T)
Γ₆[E[Symbol("eq_ds₂$(i-1)")], J[Symbol("ds₂$(i-2)")]] = one(T)
m_fwd!(E[Symbol("eq_ps₂$(i)")], Γ₅, Γ₆)
Γ₅[E[Symbol("eq_ps₂$(i)")], a] = one(T)
Γ₆[E[Symbol("eq_ps₂$(i)")], π] = convert(T, ϵ) - one(T)
Γ₆[E[Symbol("eq_ps₂$(i)")], J[Symbol("ps₂$(i-1)")]] = one(T)
end
for i in 2:N_approx[:ω]
m_fwd!(E[Symbol("eq_dω$(i-1)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_dω$(i-1)")], c] = one(T)
Γ₅[E[Symbol("eq_dω$(i-1)")], a] = one(T)
Γ₆[E[Symbol("eq_dω$(i-1)")], J[Symbol("dω$(i-2)")]] = one(T)
m_fwd!(E[Symbol("eq_pω$(i)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_pω$(i)")], c] = one(T)
Γ₅[E[Symbol("eq_pω$(i)")], a] = one(T)
Γ₆[E[Symbol("eq_pω$(i)")], J[Symbol("pω$(i-1)")]] = one(T)
end
## Mapping from states to jump variables
Ψ = zeros(T, Ny, Nz)
## Deterministic steady state as initial guess
z, y = create_deterministic_ss_guess(m)
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε)
end
function create_deterministic_ss_guess(m::NKEZDisaster{T, SNK, NNK}) where {T <: Real, SNK, NNK}
## Set up
# Get parameters
@unpack β, γ, ψ, ν, ν̅, χ, δ, α, ϵ, θ, π_ss, ϕ_r, ϕ_π, ϕ_y = m
@unpack χ_y, ρ_β, ρ_l, ρ_r, σ_β, σ_l, σ_r, μ_a, σ_a, κ_a = m
@unpack disaster_occur_spec, disaster_intensity_spec, disaster_para = m
r_ss = infer_r_ss(m)
X̅ = infer_X̅(m)
𝔼η_k = infer_𝔼η_k(m)
# Unpack indexing dictionaries
@unpack N_approx, S, J, E, SH = m
@unpack k₋₁, logΔ₋₁, r₋₁, output₋₁, η_β, η_l, η_r, a, η_k = S
@unpack output, c, l, v, ce, ω, ℓ, β̅, w, r = J
@unpack π, q, x, rk, rq, mc, s₁, s₂, logΔ = J
## Create guesses for deterministic steady state
z = Vector{T}(undef, Nz)
y = Vector{T}(undef, Ny)
# AR(1) start at 0
η_β0 = 0.
η_l0 = 0.
η_r0 = 0.
# Disaster shock assumed to occur deterministically
# and equals the unconditional expected value
η_k0 = 𝔼η_k
A0 = exp(κ_a * η_k0)
# Variables known outright
Ω0 = 1. / (1. - (β * A0 * exp(μ_a)) ^ (1. - ψ))
V0 = ((1. - β) * Ω0) ^ (1. / (1. - ψ))
𝒞ℰ0 = ((1. - β) / β * (Ω0 - 1.)) ^ (1. / (1. - ψ))
M0 = β * (β * Ω0 / (Ω0 - 1.)) ^ ((ψ - γ) / (1. - ψ)) * (A0 * exp(μ_a)) ^ (-γ)
R0 = exp(r_ss)
Q0 = 1.
Rq0 = 1 / η_k0 - X̅
Rk0 = 1. / (M * exp(η_k0)) - Rq0
expβ̅ = 1. - exp(η_β0) * β
# Guesses
L0 = .5548
Δ0 = 1. # true if π_ss = 0, otherwise this is only a reasonable guess
ℒ0 = (1. + (ψ - 1.) * exp(η_l0) * ν̅ * L0^(1. + ν) / (1. + ν))^(ψ / (1. - ψ))
# Implied values given guesses
C0_fnct = Cin -> Cin[1] + X̅ * (α / (1. - α) * ψ * ν̅ * C0 * L0^ν / ℒ0 / RK0 * L0) -
((α / (1. - α) * * ψ * ν̅ * C0 * L0^ν / ℒ0 / RK0) ^ α * L0 - χ_y) / Δ0
C0_guess = NaN
for theguess in .5:.5:10.
try
C0_fnct([theguess])
C0_guess = theguess
catch e
end
end
C0 = nlsolve(C0_fnct, [C0_guess]).zero[1]
W0 = ψ * exp(η_l0) * ν̅ * C0 * L0^ν / ℒ0^((1. - ψ) / ψ)
MC0 = (1. / (1. - α)) ^ (1. - α) * (1. / α) ^ α * W0 ^ (1. - α) * RK0 ^ α
K0 = (α / (1. - α) * W0 / RK0 * L0) / η_k0
X0 = X̅ * η_k0 * K0
Y0 = ((η_k0 * K0) ^ α * L0 ^ (1. - α) - χ_y) / Δ0
Π0 = exp(π_ss)
S₁0 = MC0 * Y0 / (1. - exp(μ_a) * θ * M0 * A0 * Π0 ^ ϵ)
S₂0 = Y0 / (1. - exp(μ_a) * θ * M0 * A0 * Π0 ^ (ϵ - 1.))
z .= [convert(T, x) for x in log.([K0, Δ0, R0, Y0, exp.([η_β0, η_l0, η_r0, log(A0), η_k0])...])]
y[1:19] = [convert(T, x) for x in log.([Y0, C0, L0, V0, 𝒞ℰ0, Ω0, ℒ0, expβ̅, W0, R0, Π0, Q0, X0, Rk0, Rq0,
MC0, S₁0, S₂0, Δ0])]
y[J[:dq1]] = convert(T, log(M0 * Rk0))
y[J[:pq1]] = convert(T, log(Rq0 * M0 * Q0))
y[J[:ds₁0]] = convert(T, log(MC0 * Y0))
y[J[:ps₁1]] = convert(T, log(exp(μ_a) * θ * M0 * A0 * Π0^ϵ * S₁0))
y[J[:ds₂0]] = convert(T, log(Y0))
y[J[:ps₂1]] = convert(T, log(exp(μ_a) * θ * M0 * A0 * Π0^(ϵ - 1.) * S₂0))
# NEED TO ADD GUESSES FOR omega
for i in 2:N_approx
y[J[Symbol("dq$(i)")]] = convert(T, log(M0) + η_k0 + log(Rq0) + y[J[Symbol("dq$(i-1)")]])
y[J[Symbol("pq$(i)")]] = convert(T, log(M0) + η_k0 + log(Rq0) + y[J[Symbol("pq$(i-1)")]])
y[J[Symbol("ds₁$(i-1)")]] = convert(T, μ_a + log(θ) + log(M0) + log(A0) + ϵ * π_ss + y[J[Symbol("ds₁$(i-2)")]])
y[J[Symbol("ps₁$(i)")]] = convert(T, μ_a + log(θ) + log(M0) + log(A0) + ϵ * π_ss + y[J[Symbol("ps₁$(i-1)")]])
y[J[Symbol("ds₂$(i-1)")]] = convert(T, μ_a + log(θ) + log(M0) + log(A0) + (ϵ - 1.) * π_ss + y[J[Symbol("ds₂$(i-2)")]])
y[J[Symbol("ps₂$(i)")]] = convert(T, μ_a + log(θ) + log(M0) + log(A0) + (ϵ - 1.) * π_ss + y[J[Symbol("ps₂$(i-1)")]])
end
end
# Infer the value of η_k in the stochastic steady state
function infer_𝔼η_k(m::NKEZDisaster)
d = m.disaster_para
𝔼η_k = if m.disaster_occur_spec == :PoissonNormalMixture
# η_{k, t} ∼ N(-jₜ, jₜ σ_k^2), jₜ ∼ Poisson(pₜ₋₁)
# ⇒ 𝔼[ η_{k, t} ] = 𝔼[ 𝔼[η_{k, t} ∣ jₜ] ] = 𝔼[ -jₜ ]
# = -𝔼[ 𝔼[ jₜ ∣ pₜ₋₁] ] = -𝔼[ pₜ₋₁ ].
if m.disaster_intensity_spec == :CoxIngersollRoss
# pₜ₋₁ ∼ discretized CIR process w/unconditional mean p
# -𝔼[ pₜ₋₁ ] = -p
-d[:p]
elseif m.disaster_intensity_spec == :TwoStateMarkovChain
# pₜ₋₁ ∼ Markov Chain with states p_ and p̅;
# (respective) persistence probabilities ρ_ and ρ̅
# -𝔼[ pₜ₋₁ ] = -(ergodic mean)
-((1. - d[:ρ̅ₚ]) * d[:p_] + (1. - d[:ρ_ₚ] * d[:p̅])) / (2. - (d[:ρ_ₚ] + d[:ρ̅ₚ]))
end
elseif m.disaster_occur_spec == :Bernoulli
# η_{k, t} ∼ Bernoulli(pₜ₋₁) taking values η_ w/probability pₜ₋₁ and zero otherwise.
# ⇒ 𝔼[ η_{k, t} ] = 𝔼[ 𝔼[ η_{k, t} ∣ pₜ₋₁ ] ] = η_ 𝔼[ pₜ₋₁]
if m.disaster_intensity_spec == :CoxIngersollRoss
# pₜ₋₁ ∼ discretized CIR process w/unconditional mean p
# η_ 𝔼[ pₜ₋₁ ] = η_ p
d[:η_] * d[:p]
elseif m.disaster_intensity_spec == :TwoStateMarkovChain
# pₜ₋₁ ∼ Markov Chain with states p_ and p̅;
# (respective) persistence probabilities ρ_ and ρ̅
# η_ 𝔼[ pₜ₋₁ ] = η_ (ergodic mean)
d[:η_] * ((1. - d[:ρ̅ₚ]) * d[:p_] + (1. - d[:ρ_ₚ] * d[:p̅])) / (2. - (d[:ρ_ₚ] + d[:ρ̅ₚ]))
end
end
return 𝔼η_k
end
# Infer steady state investment rate given the disaster shock specification
function infer_X̅(m::NKEZDisaster)
return m.χ / (m.χ + 1.) * (1. / exp(infer_𝔼η_k(m)) + m.δ - 1.)
end
# Figure out the steady state interest rate
# given EZ preferences and the disaster shock specification
function infer_r_ss(m::NKEZDisaster)
# a = σ_a ε_a + κ_a * η_k
# ⇒ in stochastic steady state, a = κ_a 𝔼[ η_k ]
𝔼η_k = infer_𝔼η_k(m)
A = exp(m.κ_a * 𝔼η_k)
# Stochastic steady state is the expected state,
# conditional on shocks always equaling zero.
Ω̃ = 1. / (1. - (m.β * A * exp(m.μ_a))^(1. - m.ψ))
M = m.β * (m.β * Ω̃ / (Ω̃ - 1.))^((ψ - γ) / (1. - ψ))
return m.π_ss - log(M)
end
# Figure out the ccgf given the disaster shock specification
function infer_ccgf(m::NKEZDisaster)
function ccgf(F, α, z)
# F .= .5 * RiskAdjustedLinearizations.diag(α * α') # slower but this is the underlying math
F .= vec(.5 * sum(α.^2, dims = 2)) # faster implementation
end
end
# Infer state transition equations for
# the disaster shock occurrence η_k
function infer_μ_disaster_occur(m::NKEZDisaster)
d = m.disaster_para
# Define expected disaster shock proportion to pₜ b/c conditionally linear in pₜ
𝔼ₜη_k_div_pₜ = if m.disaster_occur_spec == :PoissonNormalMixture
# η_{k, t} ∼ N(-jₜ, jₜ σ_k^2), jₜ ∼ Poisson(pₜ₋₁)
# ⇒ 𝔼ₜ[ η_{k, t + 1} ] = 𝔼ₜ[ 𝔼[η_{k, t + 1} ∣ jₜ₊₁] ] = 𝔼ₜ[ -jₜ₊₁ ]
# = -𝔼ₜ[ 𝔼[ jₜ₊₁ ∣ pₜ] ] = -𝔼ₜ[ pₜ ] = -pₜ.
-1.
elseif m.disaster_occur_spec == :Bernoulli
# η_{k, t} ∼ Bernoulli(pₜ₋₁) taking values η_ w/probability pₜ₋₁ and zero otherwise.
# ⇒ 𝔼ₜ[ η_{k, t + 1} ] = 𝔼ₜ[ 𝔼[ η_{k, t + 1} ∣ pₜ ] ] = η_ 𝔼ₜ[ pₜ] = η_ pₜ
d[:η_]
end
𝔼ₜη_k = if m.disaster_intensity_spec in [:CoxIngersollRoss, :TwoStateMarkovChain, :TruncatedCoxIngersollRoss]
state_i = m.S[:p]
_𝔼ₜη_k_linear(z, y) = 𝔼ₜη_k_div_pₜ * z[state_i]
elseif m.disaster_intensity_spec in [:LogAR1]
state_i = m.S[:logp]
_𝔼ₜη_k_loglinear(z, y) = 𝔼ₜη_k_div_pₜ * exp(z[state_i])
end
return 𝔼ₜη_k
end
# Infer state transition equations for
# the disaster shock intensity p
function infer_μ_disaster_intensity(m::NKEZDisaster)
d = m.disaster_para
mdisi = m.disaster_intensity_spec
μ_p = if mdisi == :CoxIngersollRoss
state_i = m.S[:p]
@inline _μ_p_cir(z, y) = (1. - d[:ρ_p]) * d[:p] + d[:ρ_p] * z[state_i]
elseif mdisi == :TwoStateMarkovChain
state_i = m.S[:p]
@inline function _μ_p_2mc(z, y)
if z[state_i] == d[:p̅]
d[:ρ̅_p] * d[:p̅] + (1. - d[:ρ̅_p]) * d[:p_]
else
d[:ρ_p] * d[:p_] + (1. - d[:ρ_p]) * d[:p̅]
end
end
elseif mdisi == :TruncatedCoxIngersollRoss
error("TruncatedCoxIngersollRoss not implemented yet")
elseif mdisi == :LogAR1
state_i = m.S[:logp]
@inline _μ_p_logar1(z, y) = (1 - d[:ρ_p]) * log(d[:p]) + d[:ρ_p] * z[state_i]
end
return μ_p
end
function infer_Σ_disaster_intensity(m::NKEZDisaster)
d = m.disaster_para
mdisi = m.disaster_intensity_spec
Σ_p = if mdisi in [:CoxIngersollRoss, :TruncatedCoxIngersollRoss]
state_i = m.S[:p]
@inline _Σ_p_cir(z) = sqrt(z[state_i]) * d[:σ_p]
elseif mdisi == :TwoStateMarkovChain
@inline _Σ_p_2mc(z) = one(eltype(z))
elseif mdisi == :LogAR1
state_i = m.S[:logp]
@inline _Σ_p_logar1(z) = d[:σ_p]
end
return Σ_p
end
# Infer the desired CCGF function
function infer_ccgf(m::NKEZDisaster)
d = m.disaster_para
S = m.S
SH = m.SH
not_dis_keys = setdiff(collect(keys(SH)), [:ε_k, :ε_p])
not_dis_inds = [SH[i] for i in not_dis_keys]
ccgf = if m.disaster_occur_spec == :PoissonNormalMixture
# apply Poisson mgf to C_2(A) = -A + σ_j^2 A^2 / 2
# Poisson mgf w/intensity pₜ₋₁ is exp((exp(s) - 1) pₜ₋₁)
# and then subtract s * E_t[\eta_{k, t + 1}]
if m.disaster_intensity_spec == :CoxIngersollRoss
function _ccgf_poissonnormalmixture_cir(F, A, z)
F .= sum(A[:, vcat(not_dis_inds, SH[:ε_p])].^2, dims = 2) .* .5 # Gaussian parts
A_k = @view A[:, SH[:ε_k]]
F .+= ((exp.(-A_k + A_k.^2 .* (d[:σ_k] ^ 2 / 2.)) .- 1.) + A_k) .* z[S[:p]] # ε_k
end
elseif m.disaster_intensity_spec == :TwoStateMarkovChain
function _ccgf_poissonnormalmixture_2smc(F, A, z)
F .= sum((@view A[:, not_dis_inds]).^2, dims = 2) .* .5 # Gaussian parts
A_k = @view A[:, SH[:ε_k]]
F .+= ((exp.(-A_k + A_k.^2 .* (d[:σ_k] ^ 2 / 2.)) .- 1.) + A_k) .* z[S[:p]] # ε_k
# ε_p
A_p = @view A[:, SH[:ε_p]]
if z[S[:p]] == d[:p̅]
F .+= log.((1. - d[:ρ̅_p]) .* exp.(A_p * d[:p_]) + d[:ρ̅_p] .* exp.(A_p * d[:p̅])) -
A_p .* (d[:ρ̅_p] * d[:p̅] + (1. - d[:ρ̅_p]) * d[:p_])
else
F .+= log.((1. - d[:ρ_p]) .* exp.(A_p .* d[:p̅]) + d[:ρ_p] .* exp.(A_p .* d[:p_])) -
A_p .* (d[:ρ_p] * d[:p_] + (1. - d[:ρ_p]) * d[:p̅])
end
end
elseif m.disaster_intensity_spec == :LogAR1
function _ccgf_poissonnormalmixture_logar1(F, A, z)
F .= sum(A[:, vcat(not_dis_inds, SH[:ε_p])].^2, dims = 2) .* .5 # Gaussian parts
A_k = @view A[:, SH[:ε_k]]
F .+= ((exp.(-A_k + A_k.^2 .* (d[:σ_k] ^ 2 / 2.)) .- 1.) + A_k) .* exp(z[S[:logp]]) # ε_k
end
end
elseif m.disaster_occur_spec == :Bernoulli
if m.disaster_intensity_spec == :CoxIngersollRoss
function _ccgf_bernoulli_cir(F, A, z)
F .= sum(A[:, vcat(not_dis_inds, SH[:ε_p])].^2, dims = 2) .* .5 # Gaussian parts
A_k = @view A[:, SH[:ε_k]]
F .+= log((1. - z[S[:p]] + z[S[:p]]) .* exp.(A_k * d[:η_])) .-
A_k .* (d[:η_] * z[S[:p]]) # ε_k
end
elseif m.disaster_intensity_spec == :TwoStateMarkovChain
function _ccgf_bernoulli_2smc(F, A, z)
F .= sum((@view A[:, not_dis_inds]).^2, dims = 2) .* .5 # Gaussian parts
A_k = @view A[:, SH[:ε_k]]
F .+= log.((1. - z[S[:p]] + z[S[:p]]) .* exp.(A_k * d[:η_])) .-
A_k .* (d[:η_] * z[S[:p]]) # ε_k
# ε_p
A_p = @view A[:, SH[:ε_p]]
if z[S[:p]] == d[:p̅]
F .+= log.((1. - d[:ρ̅_p]) .* exp.(A_p * d[:p_]) + d[:ρ̅_p] .* exp.(A_p * d[:p̅])) -
A_p .* (d[:ρ̅_p] * d[:p̅] + (1. - d[:ρ̅_p]) * d[:p_])
else
F .+= log.((1. - d[:ρ_p]) .* exp.(A_p * d[:p̅]) + d[:ρ_p] .* exp.(A_p * d[:p_])) -
A_p .* (d[:ρ_p] * d[:p_] + (1. - d[:ρ_p]) * d[:p̅])
end
end
elseif m.disaster_intensity_spec == :LogAR1
function _ccgf_bernoulli_logar1(F, A, z)
F .= sum(A[:, vcat(not_dis_inds, SH[:ε_p])].^2, dims = 2) .* .5 # Gaussian parts
A_k = @view A[:, SH[:ε_k]]
F .+= log((1. - exp(z[S[:logp]]) + exp(z[S[:logp]])) .* exp.(A_k * d[:η_])) .-
A_k .* (d[:η_] * exp(z[S[:logp]])) # ε_k
end
end
end
if isnothing(ccgf)
error("Either the specification of the disaster shock's occurrence ($(m.disaster_occur_spec)) or intensity " *
"$(m.disaster_intensity_spec) is not recognized.")
else
return ccgf
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 11190 | # This script solves the NKCapital model with a risk-adjusted linearization.
using RiskAdjustedLinearizations, JLD2, LinearAlgebra, Test, SparseArrays
# Settings
define_functions = true
testing = true # check model's solution under default parameters against saved output
nlsolve_sparsity = true # exploit sparsity in calls to nlsolve
autodiff = false
algorithm = :relaxation
euler_equation_errors = false
test_price_dispersion = false # check if price dispersion in steady state is always bounded below by 1
plot_irfs = false
horizon = 40 # horizon for IRFs
N_approx = 5 # Number of periods ahead used for forward-difference equations
n_GH = 5 # number of nodes for Gauss-Hermite quadrature
if define_functions
include("nk_with_capital.jl")
end
# Set up
m_nk = NKCapital(; N_approx = N_approx) # create parameters
m = nk_capital(m_nk) # instantiate risk-adjusted linearization
autodiff_method = autodiff ? :forward : :central
if nlsolve_sparsity
solve!(m; algorithm = :deterministic, autodiff = autodiff_method) # ensure Ψ is nonzero
jac_cache = preallocate_jac_cache(m, algorithm; sparsity_detection = false)
else
jac_cache = nothing
end
# Solve!
solve!(m; algorithm = algorithm, autodiff = autodiff_method,
jac_cache = jac_cache, sparse_jacobian = nlsolve_sparsity)
if testing
out = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "nk_with_capital_output.jld2"), "r")
test_m_nk = NKCapital(; N_approx = 1) # create parameters
test_m = nk_capital(test_m_nk) # instantiate risk-adjusted linearization
solve!(test_m; algorithm = :deterministic, verbose = :none)
zdet = copy(test_m.z)
ydet = copy(test_m.y)
Psidet = copy(test_m.Ψ)
@test test_m.z ≈ out["z_det"]
@test test_m.y ≈ out["y_det"]
@test test_m.Ψ ≈ out["Psi_det"]
solve!(test_m; algorithm = :relaxation, verbose = :none)
@test test_m.z ≈ out["z"]
@test test_m.y ≈ out["y"]
@test test_m.Ψ ≈ out["Psi"]
test_5_m_nk = NKCapital(; N_approx = 5) # create parameters
test_5_m = nk_capital(test_5_m_nk) # instantiate risk-adjusted linearization
solve!(test_5_m; algorithm = :relaxation, verbose = :none)
@test test_5_m.z ≈ out["z_5"]
@test test_5_m.y ≈ out["y_5"]
@test test_5_m.Ψ ≈ out["Psi_5"]
end
if test_price_dispersion
π_ss_vec = log.(range(1 - .005, stop = 1 + .005, length = 10)) # On annualized basis, range from -2% to 2% target inflation
det_soln = Dict()
sss_soln = Vector{RiskAdjustedLinearization}(undef, length(π̃_ss_vec))
for (i, π_ss) in enumerate(π_ss_vec)
local m_nk = NKCapital(; π_ss = π_ss)
local m = nk_capital(m_nk)
solve!(m; algorithm = :deterministic, verbose = :none)
det_soln[i] = Dict()
det_soln[i][:z] = copy(m.z)
det_soln[i][:y] = copy(m.y)
det_soln[i][:Ψ] = copy(m.Ψ)
solve!(m; algorithm = algorithm, verbose = :none)
sss_soln[i] = m
end
det_v = exp.([det_soln[i][:z][3] for i in 1:length(det_soln)])
sss_v = exp.([sss_soln[i].z[3] for i in 1:length(sss_soln)])
@test all(det_v .> 1.)
@test all(sss_v .> 1.)
end
if euler_equation_errors
# Load shocks. Using CRW ones b/c that model also has 2 standard normal random variables
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "nk_with_capital_shocks.jld2"), "r")["shocks"][:, 1:50]
# Calculate Euler equation for bonds
@test abs(euler_equation_error(m, nk_cₜ, (a, b, c, d) -> nk_log_euler(a, b, c, d; β = m_nk.β, γ = m_nk.γ, J = m_nk.J),
nk_𝔼_quadrature, shocks, summary_statistic = x -> norm(x, Inf))) ≈ 0.
# Can calculate the Euler equation error for q, s₁, and s₂ as well by treating these variables as "consumption variables"
# but need to do the Euler equation error calculation "semi-manually" b/c of the forward difference equations
impl_output = Dict()
for k in [:dq, :pq, :ds₁, :ps₁, :ds₂, :ps₂]
impl_output[k] = Dict()
end
_states, _jumps = simulate(m, size(shocks, 2), shocks, m.z)
q_ral = _jumps[m_nk.J[:q], :]
s₁_ral = _jumps[m_nk.J[:s₁], :]
s₂_ral = _jumps[m_nk.J[:s₂], :]
for i in 1:m_nk.N_approx
impl_output[:dq][i] = log.(euler_equation_error(m, (m, zₜ) -> nk_dqₜ(m, zₜ, i, m_nk.J),
(a, b, c, d) -> nk_log_dq(a, b, c, d; β = m_nk.β,
γ = m_nk.β, i = i, J = m_nk.J, S = m_nk.S),
nk_𝔼_quadrature, shocks, return_soln = true))
impl_output[:pq][i] = log.(euler_equation_error(m, (m, zₜ) -> nk_pqₜ(m, zₜ, i, m_nk.J),
(a, b, c, d) -> nk_log_pq(a, b, c, d; β = m_nk.β,
γ = m_nk.β, i = i, J = m_nk.J, S = m_nk.S),
nk_𝔼_quadrature, shocks, return_soln = true))
impl_output[:ds₁][i - 1] = log.((i == 1) ? [nk_ds₁ₜ(m, _states[:, t], i - 1, m_nk.J) for t in 1:size(_states, 2)] :
euler_equation_error(m, (m, zₜ) -> nk_ds₁ₜ(m, zₜ, i - 1, m_nk.J),
(a, b, c, d) -> nk_log_ds₁(a, b, c, d; β = m_nk.β,
γ = m_nk.β, θ = m_nk.θ, ϵ = m_nk.ϵ,
i = i - 1, J = m_nk.J, S = m_nk.S),
nk_𝔼_quadrature, shocks, return_soln = true))
impl_output[:ps₁][i] = log.(euler_equation_error(m, (m, zₜ) -> nk_ps₁ₜ(m, zₜ, i, m_nk.J),
(a, b, c, d) -> nk_log_ps₁(a, b, c, d; β = m_nk.β,
γ = m_nk.β, θ = m_nk.θ, ϵ = m_nk.ϵ,
i = i, J = m_nk.J, S = m_nk.S),
nk_𝔼_quadrature, shocks, return_soln = true))
impl_output[:ds₂][i - 1] = log.((i == 1) ? [nk_ds₂ₜ(m, _states[:, t], i - 1, m_nk.J) for t in 1:size(_states, 2)] :
euler_equation_error(m, (m, zₜ) -> nk_ds₂ₜ(m, zₜ, i - 1, m_nk.J),
(a, b, c, d) -> nk_log_ds₂(a, b, c, d; β = m_nk.β,
γ = m_nk.β, θ = m_nk.θ, ϵ = m_nk.ϵ,
i = i - 1, J = m_nk.J, S = m_nk.S),
nk_𝔼_quadrature, shocks, return_soln = true))
impl_output[:ps₂][i] = log.(euler_equation_error(m, (m, zₜ) -> nk_ps₂ₜ(m, zₜ, i, m_nk.J),
(a, b, c, d) -> nk_log_ps₂(a, b, c, d; β = m_nk.β,
γ = m_nk.β, θ = m_nk.θ, ϵ = m_nk.ϵ,
i = i, J = m_nk.J, S = m_nk.S),
nk_𝔼_quadrature, shocks, return_soln = true))
end
q_impl = log.(sum([exp.(x) for x in collect(values(impl_output[:dq]))]) + exp.(impl_output[:pq][m_nk.N_approx]))
s₁_impl = log.(sum([exp.(x) for x in collect(values(impl_output[:ds₁]))]) + exp.(impl_output[:ps₁][m_nk.N_approx]))
s₂_impl = log.(sum([exp.(x) for x in collect(values(impl_output[:ds₂]))]) + exp.(impl_output[:ps₂][m_nk.N_approx]))
@test maximum(abs.((exp.(q_impl) - exp.(q_ral)) ./ exp.(q_ral))) < .1
@test maximum(abs.((exp.(s₁_impl) - exp.(s₁_ral)) ./ exp.(s₁_ral))) < .1
@test maximum(abs.((exp.(s₂_impl) - exp.(s₂_ral)) ./ exp.(s₂_ral))) < .1
end
if plot_irfs
# Show IRFs of interesting variables (discount rate, labor supply, productivity, and MP shocks)
m_nk = NKCapital()
m = nk_capital(m_nk)
solve!(m; algorithm = algorithm, autodiff = autodiff_method)
z_irfs = Dict()
y_irfs = Dict()
for k in keys(m_nk.SH)
z_irfs[k], y_irfs[k] = impulse_responses(m, horizon, m_nk.SH[k], 1.) # 1 positive standard deviation shock
end
using Plots
plot_dicts = Dict()
for k in keys(m_nk.SH)
plot_dicts[k] = Dict()
plot_dicts[k][:output] = plot(1:horizon, y_irfs[k][m_nk.J[:output], :], label = "Output",
linewidth = 3, color = :black)
plot_dicts[k][:l] = plot(1:horizon, y_irfs[k][m_nk.J[:l], :], label = "Hours",
linewidth = 3, color = :black)
plot_dicts[k][:w] = plot(1:horizon, y_irfs[k][m_nk.J[:w], :], label = "Real Wage",
linewidth = 3, color = :black)
plot_dicts[k][:rk] = plot(1:horizon, y_irfs[k][m_nk.J[:rk], :], label = "Rental Rate of Capital",
linewidth = 3, color = :black)
plot_dicts[k][:k] = plot(1:horizon, z_irfs[k][m_nk.S[:k₋₁], :], label = "Capital Stock",
linewidth = 3, color = :black)
plot_dicts[k][:π] = plot(1:horizon, y_irfs[k][m_nk.J[:π], :], label = "Inflation",
linewidth = 3, color = :black)
plot_dicts[k][:q] = plot(1:horizon, y_irfs[k][m_nk.J[:q], :], label = "Price of Capital",
linewidth = 3, color = :black)
plot_dicts[k][:x] = plot(1:horizon, y_irfs[k][m_nk.J[:x], :], label = "Investment",
linewidth = 3, color = :black)
plot_dicts[k][:r] = plot(1:horizon, y_irfs[k][m_nk.J[:r], :], label = "Nominal Interest Rate",
linewidth = 3, color = :black)
# excess returns on capital (exploits properties of IRFs)
EₜRₖₜ₊₁ = exp.(y_irfs[k][m_nk.J[:rk], 2:end] .+ m.y[m_nk.J[:rk]])
EₜQₜ₊₁ = exp.(y_irfs[k][m_nk.J[:q], 2:end] .+ m.y[m_nk.J[:q]])
EₜΩₜ₊₁ = exp.(y_irfs[k][m_nk.J[:ω], 2:end] .+ m.y[m_nk.J[:ω]])
exc_ret = ((EₜRₖₜ₊₁ + EₜQₜ₊₁ .* EₜΩₜ₊₁) ./ exp.(y_irfs[k][m_nk.J[:q], 1:end - 1] .+ m.y[m_nk.J[:q]])) -
(exp.(y_irfs[k][m_nk.J[:r], 1:end - 1] - y_irfs[k][m_nk.J[:π], 1:end - 1] .+
(m.y[m_nk.J[:r]] - m.y[m_nk.J[:π]]))) .-
(((exp(m.y[m_nk.J[:rk]]) + exp.(m.y[m_nk.J[:q]] + m.y[m_nk.J[:ω]])) / exp.(m.y[m_nk.J[:q]])) -
exp(m.y[m_nk.J[:r]] - m.y[m_nk.J[:π]]))
plot_dicts[k][:real_excess_ret] = plot(1:(horizon - 1), exc_ret, label = "Real Excess Returns",
linewidth = 3, color = :black)
end
end
nothing
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 17126 | using UnPack, OrderedCollections, ForwardDiff, JLD2, NLsolve, SparseArrays
mutable struct NKCapital{T <: Real}
β::T
γ::T
φ::T
ν::T
χ::T
δ::T
α::T
ϵ::T
θ::T
π_ss::T
ϕ_r::T
ϕ_π::T
ϕ_y::T
ρ_β::T
ρ_l::T
ρ_a::T
ρ_r::T
σ_β::T
σ_l::T
σ_a::T
σ_r::T
N_approx::Int
S::OrderedDict{Symbol, Int}
J::OrderedDict{Symbol, Int}
E::OrderedDict{Symbol, Int}
SH::OrderedDict{Symbol, Int}
end
function NKCapital(; β::T = .99, γ::T = 3.8, φ::T = 1., ν::T = 1., χ::T = 4.,
δ::T = 0.025, α::T = 0.33, ϵ::T = 10., θ::T = 0.7,
π_ss::T = 0., ϕ_r::T = 0.5,
ϕ_π::T = 1.3, ϕ_y::T = 0.25, ρ_β::T = 0.1,
ρ_l::T = 0.1, ρ_a::T = 0.9, ρ_r::T = 0.,
σ_β::T = 0.01, σ_l::T = 0.01, σ_a::T = 0.01, σ_r::T = 0.01,
N_approx::Int = 1) where {T <: Real}
@assert N_approx > 0 "N_approx must be at least 1."
## Create Indexing dictionaries.
# Note that for the exogenous shock
# state variables, instead of e.g. η_L and η_A, I use η_l and η_a
# since the uppercase variable will not appear in the jumps/states.
S_init = [:k₋₁, :v₋₁, :r₋₁, :output₋₁, :η_β, :η_l, :η_a, :η_r] # State Variables
J_init = [:output, :c, :l, :w, :r, :π, :q, :x, :rk, :mc,
:s₁, :s₂, :v, :ω] # Jump variables
E_init = [:wage, :euler, :tobin, :cap_ret,
:eq_mc, :kl_ratio, :eq_s₁, :eq_s₂,
:phillips_curve, :price_dispersion,
:mp, :output_market_clear, :production] # Equations
SH_init = [:ε_β, :ε_l, :ε_a, :ε_r] # Exogenous shocks
# Add approximations for forward-difference equations
push!(E_init, :eq_omega)
for var in [:q, :s₁, :s₂]
inds = (var == :q) ? (1:N_approx) : (0:(N_approx - 1))
push!(J_init, [Symbol(:d, var, "$(i)") for i in inds]...)
push!(J_init, [Symbol(:p, var, "$(i)") for i in 1:N_approx]...)
push!(E_init, [Symbol(:eq_d, var, "$(i)") for i in inds]...)
push!(E_init, [Symbol(:eq_p, var, "$(i)") for i in 1:N_approx]...)
end
S = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(S_init))
J = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(J_init))
E = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(E_init))
SH = OrderedDict{Symbol, Int}(k => i for (i, k) in enumerate(SH_init))
return NKCapital{T}(β, γ, φ, ν, χ, δ, α, ϵ, θ, π_ss, ϕ_r, ϕ_π, ϕ_y,
ρ_β, ρ_l, ρ_a, ρ_r, σ_β, σ_l, σ_a, σ_r,
N_approx, S, J, E, SH)
end
function nk_capital(m::NKCapital{T}; sparse_arrays::Bool = false,
sparse_jacobian::Vector{Symbol} = Symbol[]) where {T <: Real}
# Get parameters
@unpack β, γ, φ, ν, χ, δ, α, ϵ, θ, π_ss, ϕ_r, ϕ_π, ϕ_y = m
@unpack ρ_β, ρ_l, ρ_a, ρ_r, σ_β, σ_l, σ_a, σ_r = m
r_ss = π_ss - log(β)
X̄ = δ * χ / (χ + 1.)
# Unpack indexing dictionaries
@unpack N_approx, S, J, E, SH = m
@unpack k₋₁, v₋₁, r₋₁, output₋₁, η_β, η_l, η_a, η_r = S
@unpack output, c, l, w, r, π, q, x, rk, ω, mc, s₁, s₂, v = J
@unpack wage, euler, tobin, cap_ret, eq_mc, kl_ratio, eq_s₁, eq_s₂ = E
@unpack phillips_curve, price_dispersion, mp = E
@unpack output_market_clear, production, eq_omega = E
@unpack ε_β, ε_l, ε_a, ε_r = SH
Nz = length(S)
Ny = length(J)
Nε = length(SH)
## Define nonlinear equations
# Some helper functions
_Φ(Xin, Kin) = X̄ ^ (1. / χ) / (1. - 1. / χ) * (Xin / Kin) ^ (1. - 1. / χ) - X̄ / (χ * (χ - 1.))
_Φ′(Xin, Kin) = X̄ ^ (1. / χ) * (Xin / Kin) ^ (- 1. / χ)
Φ(z, y) = _Φ(exp(y[x]), exp(z[k₋₁]))
Φ′(z, y) = _Φ′(exp(y[x]), exp(z[k₋₁]))
m_ξ(z, y) = log(β) - z[η_β] + γ * y[c]
function m_fwd!(i, Γ₅, Γ₆)
Γ₅[i, η_β] = 1.
Γ₆[i, c] = -γ
end
pstar(y) = log(ϵ / (ϵ - 1.)) + y[s₁] - y[s₂]
function μ(F, z, y)
F[k₋₁] = log(1 + X̄ ^ (1. / χ) / (1. - 1. / χ) *
(exp(y[x] - z[k₋₁])) ^ (1. - 1. / χ) -
X̄ / (1. - 1. / χ)) + z[k₋₁]
F[v₋₁] = y[v]
F[r₋₁] = y[r]
F[output₋₁] = y[output]
F[η_β] = ρ_β * z[η_β]
F[η_l] = ρ_l * z[η_l]
F[η_a] = ρ_a * z[η_a]
F[η_r] = ρ_r * z[η_r]
end
function ξ(F, z, y)
F_type = eltype(F)
## Pre-evaluate (just once) some terms
Φv = Φ(z, y)
Φ′v = Φ′(z, y)
pstarv = pstar(y)
m_ξv = m_ξ(z, y)
## Non-forward-difference equations
F[wage] = log(φ) + z[η_l] + ν * y[l] - (-γ * y[c] + y[w])
F[euler] = y[r] + m_ξv
F[tobin] = y[q] + log(Φ′v)
F[eq_mc] = (1. - α) * y[w] + α * y[rk] - z[η_a] -
(1. - α) * log(1. - α) - α * log(α) - y[mc]
F[kl_ratio] = z[k₋₁] - y[l] - log(α / (1. - α)) - (y[w] - y[rk])
F[phillips_curve] = (1. - ϵ) * y[π] - log((1. - θ) * exp((1. - ϵ) * (pstarv + y[π])) + θ)
F[price_dispersion] = y[v] - ϵ * y[π] - log((1. - θ) * exp(-ϵ * (pstarv + y[π])) + θ * exp(z[v₋₁]))
F[mp] = (1. - ϕ_r) * r_ss + ϕ_r * z[r₋₁] + (1. - ϕ_r) .*
(ϕ_π * (y[π] - π_ss) + ϕ_y * (y[output] - z[output₋₁])) + z[η_r] - y[r]
F[output_market_clear] = y[output] - log(exp(y[c]) + exp(y[x]))
F[production] = z[η_a] + α * z[k₋₁] + (1. - α) * y[l] - y[v] - y[output]
## Forward-difference equations separately handled b/c recursions
F[eq_omega] = log(1. - δ + Φv - Φ′v * exp(y[x] - z[k₋₁])) - y[ω]
F[cap_ret] = y[q] - log(sum([exp(y[J[Symbol("dq$(i)")]]) for i in 1:N_approx]) +
exp(y[J[Symbol("pq$(N_approx)")]]))
F[eq_s₁] = y[s₁] - log(sum([exp(y[J[Symbol("ds₁$(i)")]]) for i in 0:(N_approx - 1)]) +
exp(y[J[Symbol("ps₁$(N_approx)")]]))
F[eq_s₂] = y[s₂] - log(sum([exp(y[J[Symbol("ds₂$(i)")]]) for i in 0:(N_approx - 1)]) +
exp(y[J[Symbol("ps₂$(N_approx)")]]))
# Set initial boundary conditions
F[E[:eq_dq1]] = -y[J[:dq1]] + m_ξv
F[E[:eq_pq1]] = -y[J[:pq1]] + m_ξv
F[E[:eq_ds₁0]] = y[J[:ds₁0]] - y[mc] - y[output]
F[E[:eq_ps₁1]] = log(θ) - y[J[:ps₁1]] + m_ξv
F[E[:eq_ds₂0]] = y[J[:ds₂0]] - y[output]
F[E[:eq_ps₂1]] = log(θ) - y[J[:ps₂1]] + m_ξv
# Recursions for forward-difference equations
for i in 2:N_approx
F[E[Symbol("eq_dq$(i)")]] = -y[J[Symbol("dq$(i)")]] + m_ξv
F[E[Symbol("eq_pq$(i)")]] = -y[J[Symbol("pq$(i)")]] + m_ξv
F[E[Symbol("eq_ds₁$(i-1)")]] = log(θ) - y[J[Symbol("ds₁$(i-1)")]] + m_ξv
F[E[Symbol("eq_ps₁$(i)")]] = log(θ) - y[J[Symbol("ps₁$(i)")]] + m_ξv
F[E[Symbol("eq_ds₂$(i-1)")]] = log(θ) - y[J[Symbol("ds₂$(i-1)")]] + m_ξv
F[E[Symbol("eq_ps₂$(i)")]] = log(θ) - y[J[Symbol("ps₂$(i)")]] + m_ξv
end
end
# The cache is initialized as zeros so we only need to fill non-zero elements
Λ = zeros(T, Nz, Ny)
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z)
F[η_β, ε_β] = σ_β
F[η_l, ε_l] = σ_l
F[η_a, ε_a] = σ_a
F[η_r, ε_r] = σ_r
end
function ccgf(F, α, z)
# F .= .5 * diag(α * α') # slower but this is the underlying math
sum!(F, α.^2) # faster implementation
F .*= .5
end
## Forward-looking variables
Γ₅ = zeros(T, Ny, Nz)
Γ₆ = zeros(T, Ny, Ny)
# Equations w/out SDF terms and are not forward-difference equations
Γ₆[euler, π] = -one(T)
# Equations with SDF terms but are not forward-difference equations
m_fwd!(euler, Γ₅, Γ₆)
# Forward difference equations: boundary conditions
m_fwd!(E[:eq_dq1], Γ₅, Γ₆)
Γ₆[E[:eq_dq1], rk] = one(T)
m_fwd!(E[:eq_pq1], Γ₅, Γ₆)
Γ₆[E[:eq_pq1], q] = one(T)
Γ₆[E[:eq_pq1], ω] = one(T)
m_fwd!(E[:eq_ps₁1], Γ₅, Γ₆)
Γ₆[E[:eq_ps₁1], π] = convert(T, ϵ)
Γ₆[E[:eq_ps₁1], s₁] = one(T)
m_fwd!(E[:eq_ps₂1], Γ₅, Γ₆)
Γ₆[E[:eq_ps₂1], π] = convert(T, ϵ) - one(T)
Γ₆[E[:eq_ps₂1], s₂] = one(T)
# Forward difference equations: recursions
for i in 2:N_approx
m_fwd!(E[Symbol("eq_dq$(i)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_dq$(i)")], ω] = one(T)
Γ₆[E[Symbol("eq_dq$(i)")], J[Symbol("dq$(i-1)")]] = one(T)
m_fwd!(E[Symbol("eq_pq$(i)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_pq$(i)")], ω] = one(T)
Γ₆[E[Symbol("eq_pq$(i)")], J[Symbol("pq$(i-1)")]] = one(T)
m_fwd!(E[Symbol("eq_ds₁$(i-1)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_ds₁$(i-1)")], π] = convert(T, ϵ)
Γ₆[E[Symbol("eq_ds₁$(i-1)")], J[Symbol("ds₁$(i-2)")]] = one(T)
m_fwd!(E[Symbol("eq_ps₁$(i)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_ps₁$(i)")], π] = convert(T, ϵ)
Γ₆[E[Symbol("eq_ps₁$(i)")], J[Symbol("ps₁$(i-1)")]] = one(T)
m_fwd!(E[Symbol("eq_ds₂$(i-1)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_ds₂$(i-1)")], π] = convert(T, ϵ) - one(T)
Γ₆[E[Symbol("eq_ds₂$(i-1)")], J[Symbol("ds₂$(i-2)")]] = one(T)
m_fwd!(E[Symbol("eq_ps₂$(i)")], Γ₅, Γ₆)
Γ₆[E[Symbol("eq_ps₂$(i)")], π] = convert(T, ϵ) - one(T)
Γ₆[E[Symbol("eq_ps₂$(i)")], J[Symbol("ps₂$(i-1)")]] = one(T)
end
if sparse_arrays
Γ₅ = sparse(Γ₅)
Γ₆ = sparse(Γ₆)
end
## Mapping from states to jump variables
Ψ = zeros(T, Ny, Nz)
## Deterministic steady state as initial guess
z = Vector{T}(undef, Nz)
y = Vector{T}(undef, Ny)
# AR(1) start at 0
η_β0 = 0.
η_l0 = 0.
η_a0 = 0.
η_r0 = 0.
# Variables known outright
M0 = β
Q0 = 1.
RK0 = 1. / β + X̄ - 1.
# Guesses
L0 = .5548429
V0 = 1. # true if π_ss = 0, otherwise this is only a reasonable guess
# Implied values given guesses
C0_fnct = Cin -> Cin[1] + X̄ * (α / (1. - α) * φ * L0 ^ ν / Cin[1] ^ (-γ) / RK0 * L0) -
(α / (1. - α) * φ * L0 ^ ν / Cin[1] ^ (-γ) / RK0) ^ α * L0 / V0
C0_guess = NaN
for theguess in .5:.5:10.
try
C0_fnct([theguess])
C0_guess = theguess
catch e
end
end
C0 = nlsolve(C0_fnct, [C0_guess]).zero[1]
W0 = φ * L0 ^ ν / C0 ^ (-γ)
MC0 = (1. / (1. - α)) ^ (1. - α) * (1. / α) ^ α * W0 ^ (1. - α) * RK0 ^ α
K0 = α / (1. - α) * W0 / RK0 * L0
X0 = X̄ * K0
Y0 = K0 ^ α * L0 ^ (1. - α) / V0
S₁0 = MC0 * Y0 / (1. - θ * exp(π_ss) ^ ϵ)
S₂0 = Y0 / (1. - θ * exp(π_ss) ^ (ϵ - 1.))
Π0 = exp(π_ss)
R0 = exp(r_ss)
Ω0 = 1. - δ + _Φ(X0, K0) - _Φ′(X0, K0) * X0 / K0
z .= [convert(T, x) for x in log.([K0, V0, R0, Y0, exp.([η_β0, η_l0, η_a0, η_r0])...])]
y[1:14] = [convert(T, x) for x in log.([Y0, C0, L0, W0, R0, Π0, Q0, X0, RK0, Ω0, MC0, S₁0, S₂0, V0])]
y[J[:dq1]] = convert(T, log(M0 * RK0))
y[J[:pq1]] = convert(T, log(Ω0 * M0 * Q0))
y[J[:ds₁0]] = convert(T, log(MC0 * Y0))
y[J[:ps₁1]] = convert(T, log(θ * M0 * Π0^ϵ * S₁0))
y[J[:ds₂0]] = convert(T, log(Y0))
y[J[:ps₂1]] = convert(T, log(θ * M0 * Π0^(ϵ - 1.) * S₂0))
for i in 2:N_approx
y[J[Symbol("dq$(i)")]] = convert(T, log(M0) + log(Ω0) + y[J[Symbol("dq$(i-1)")]])
y[J[Symbol("pq$(i)")]] = convert(T, log(M0) + log(Ω0) + y[J[Symbol("pq$(i-1)")]])
y[J[Symbol("ds₁$(i-1)")]] = convert(T, log(θ) + log(M0) + ϵ * π_ss + y[J[Symbol("ds₁$(i-2)")]])
y[J[Symbol("ps₁$(i)")]] = convert(T, log(θ) + log(M0) + ϵ * π_ss + y[J[Symbol("ps₁$(i-1)")]])
y[J[Symbol("ds₂$(i-1)")]] = convert(T, log(θ) + log(M0) + (ϵ - 1.) * π_ss + y[J[Symbol("ds₂$(i-2)")]])
y[J[Symbol("ps₂$(i)")]] = convert(T, log(θ) + log(M0) + (ϵ - 1.) * π_ss + y[J[Symbol("ps₂$(i-1)")]])
end
if sparse_arrays
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian,
Σ_cache_init = dims -> spzeros(dims...),
Λ_cache_init = dims -> spzeros(dims...))
else
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian)
end
end
nk_cₜ(m, zₜ) = exp(m.y[2] + (m.Ψ * (zₜ - m.z))[2])
nk_qₜ(m, zₜ) = exp(m.y[7] + (m.Ψ * (zₜ - m.z))[7])
nk_dqₜ(m, zₜ, i, J) = exp(m.y[J[Symbol("dq$(i)")]] + (m.Ψ * (zₜ - m.z))[J[Symbol("dq$(i)")]])
nk_pqₜ(m, zₜ, i, J) = exp(m.y[J[Symbol("pq$(i)")]] + (m.Ψ * (zₜ - m.z))[J[Symbol("pq$(i)")]])
nk_s₁ₜ(m, zₜ) = exp(m.y[12] + (m.Ψ * (zₜ - m.z))[12])
nk_s₂ₜ(m, zₜ) = exp(m.y[13] + (m.Ψ * (zₜ - m.z))[13])
nk_ds₁ₜ(m, zₜ, i, J) = exp(m.y[J[Symbol("ds₁$(i)")]] + (m.Ψ * (zₜ - m.z))[J[Symbol("ds₁$(i)")]])
nk_ps₁ₜ(m, zₜ, i, J) = exp(m.y[J[Symbol("ps₁$(i)")]] + (m.Ψ * (zₜ - m.z))[J[Symbol("ps₁$(i)")]])
nk_ds₂ₜ(m, zₜ, i, J) = exp(m.y[J[Symbol("ds₂$(i)")]] + (m.Ψ * (zₜ - m.z))[J[Symbol("ds₂$(i)")]])
nk_ps₂ₜ(m, zₜ, i, J) = exp(m.y[J[Symbol("ps₂$(i)")]] + (m.Ψ * (zₜ - m.z))[J[Symbol("ps₂$(i)")]])
# Evaluates Euler equation errors in log terms
function nk_log_euler(m, zₜ, εₜ₊₁, Cₜ; β::T = .99, γ::T = 3.8,
J::AbstractDict = NKCapital().J, S::AbstractDict = NKCapital().S) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
return log(β) - γ * (yₜ₊₁[J[:c]] - log(Cₜ)) +
zₜ₊₁[S[:η_β]] - zₜ[S[:η_β]] + yₜ[J[:r]] - yₜ₊₁[J[:π]]
end
function nk_log_dq(m, zₜ, εₜ₊₁, DQₜ; β::T = .99, γ::T = 3.8,
i::Int = 1, J::AbstractDict = NKCapital().J, S::AbstractDict = NKCapital().S) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
mₜ₊₁ = log(β) - γ * (yₜ₊₁[J[:c]] - yₜ[J[:c]]) +
zₜ₊₁[S[:η_β]] - zₜ[S[:η_β]]
if i == 1
return mₜ₊₁ + yₜ₊₁[J[:rk]] - log(DQₜ)
else
return yₜ₊₁[J[:ω]] + mₜ₊₁ + yₜ₊₁[J[Symbol("dq$(i-1)")]] - log(DQₜ)
end
end
function nk_log_pq(m, zₜ, εₜ₊₁, PQₜ; β::T = .99, γ::T = 3.8,
i::Int = 1, J::AbstractDict = NKCapital().J, S::AbstractDict = NKCapital().S) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
mₜ₊₁ = log(β) - γ * (yₜ₊₁[J[:c]] - yₜ[J[:c]]) +
zₜ₊₁[S[:η_β]] - zₜ[S[:η_β]]
if i == 1
return yₜ₊₁[J[:ω]] + mₜ₊₁ + yₜ₊₁[J[:q]] - log(PQₜ)
else
return yₜ₊₁[J[:ω]] + mₜ₊₁ + yₜ₊₁[J[Symbol("pq$(i-1)")]] - log(PQₜ)
end
end
function nk_log_ds₁(m, zₜ, εₜ₊₁, DS₁ₜ; β::T = .99, γ::T = 3.8,
θ::T = 0.7, ϵ::T = 10., i::Int = 0,
J::AbstractDict = NKCapital().J, S::AbstractDict = NKCapital().S) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
mₜ₊₁ = log(β) - γ * (yₜ₊₁[J[:c]] - yₜ[J[:c]]) +
zₜ₊₁[S[:η_β]] - zₜ[S[:η_β]]
if i == 0
return yₜ[J[:mc]] + yₜ[J[:output]]
else
return log(θ) + mₜ₊₁ + ϵ * yₜ₊₁[J[:π]] + yₜ₊₁[J[Symbol("ds₁$(i-1)")]] - log(DS₁ₜ)
end
end
function nk_log_ps₁(m, zₜ, εₜ₊₁, PS₁ₜ; β::T = .99, γ::T = 3.8,
θ::T = 0.7, ϵ::T = 10., i::Int = 0,
J::AbstractDict = NKCapital().J, S::AbstractDict = NKCapital().S) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
mₜ₊₁ = log(β) - γ * (yₜ₊₁[J[:c]] - yₜ[J[:c]]) +
zₜ₊₁[S[:η_β]] - zₜ[S[:η_β]]
if i == 1
return log(θ) + mₜ₊₁ + ϵ * yₜ₊₁[J[:π]] + yₜ₊₁[J[:s₂]] - log(PS₁ₜ)
else
return log(θ) + mₜ₊₁ + ϵ * yₜ₊₁[J[:π]] + yₜ₊₁[J[Symbol("ps₁$(i-1)")]] - log(PS₁ₜ)
end
end
function nk_log_ds₂(m, zₜ, εₜ₊₁, DS₂ₜ; β::T = .99, γ::T = 3.8,
θ::T = 0.7, ϵ::T = 10., i::Int = 0,
J::AbstractDict = NKCapital().J, S::AbstractDict = NKCapital().S) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
mₜ₊₁ = log(β) - γ * (yₜ₊₁[J[:c]] - yₜ[J[:c]]) +
zₜ₊₁[S[:η_β]] - zₜ[S[:η_β]]
if i == 0
return yₜ[J[:output]]
else
return log(θ) + mₜ₊₁ + (ϵ - 1.) * yₜ₊₁[J[:π]] + yₜ₊₁[J[Symbol("ds₂$(i-1)")]] - log(DS₂ₜ)
end
end
function nk_log_ps₂(m, zₜ, εₜ₊₁, PS₂ₜ; β::T = .99, γ::T = 3.8,
θ::T = 0.7, ϵ::T = 10., i::Int = 0,
J::AbstractDict = NKCapital().J, S::AbstractDict = NKCapital().S) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
mₜ₊₁ = log(β) - γ * (yₜ₊₁[J[:c]] - yₜ[J[:c]]) +
zₜ₊₁[S[:η_β]] - zₜ[S[:η_β]]
if i == 1
return log(θ) + mₜ₊₁ + (ϵ - 1.) * yₜ₊₁[J[:π]] + yₜ₊₁[J[:s₂]] - log(PS₂ₜ)
else
return log(θ) + mₜ₊₁ + (ϵ - 1.) * yₜ₊₁[J[:π]] + yₜ₊₁[J[Symbol("ps₂$(i-1)")]] - log(PS₂ₜ)
end
end
# Evaluates n-period ahead Euler equation errors in log terms
# Calculate Euler equation via quadrature
std_norm_mean = zeros(4)
std_norm_sig = ones(4)
if !isdefined(Main, :n_GH)
n_GH = 5
end
nk_𝔼_quadrature(f::Function) = gausshermite_expectation(f, std_norm_mean, std_norm_sig, n_GH)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2958 | # This script actually solves the RBCCampbellCochrane model with a risk-adjusted linearization
# and times the methods, if desired
using BenchmarkTools, RiskAdjustedLinearizations, Test
include("rbc_cc.jl")
# Settings: what do you want to do?
time_methods = false
numerical_algorithm = :relaxation
autodiff = false
n_strips = 3
# Set up
autodiff_method = autodiff ? :forward : :central
m_rbc_cc = RBCCampbellCochraneHabits()
m = rbc_cc(m_rbc_cc, n_strips)
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
# Solve!
solve!(m; algorithm = numerical_algorithm, autodiff = autodiff_method)
if n_strips == 0
if numerical_algorithm == :relaxation
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_sss_iterative_output.jld2"), "r")
elseif numerical_algorithm == :homotopy
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_sss_homotopy_output.jld2"), "r")
end
elseif n_strips == 3
if numerical_algorithm == :relaxation
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_sss_iterative_N3_output.jld2"), "r")
elseif numerical_algorithm == :homotopy
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_sss_homotopy_N3_output.jld2"), "r")
end
end
if n_strips in [0, 3]
@test isapprox(sssout["z_rss"], m.z, atol=1e-4)
@test isapprox(sssout["y_rss"], m.y, atol=2e-4)
@test isapprox(sssout["Psi_rss"], m.Ψ, atol=1e-4)
end
if time_methods
println("Deterministic steady state")
@btime begin
solve!(m, z0, y0; algorithm = :deterministic, autodiff = autodiff_method, verbose = :none)
end
# Use deterministic steady state as guesses
solve!(m, z0, y0; algorithm = :deterministic, autodiff = autodiff_method, verbose = :none)
zdet = copy(m.z)
ydet = copy(m.y)
Ψdet = copy(m.Ψ)
println("Relaxation method")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method, verbose = :none)
end
println("Relaxation method with automatic differentiation")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = :forward, verbose = :none)
end
println("Relaxation method with Anderson acceleration")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, use_anderson = true, m = 3, autodiff = autodiff_method, verbose = :none)
end
println("Homotopy method")
@btime begin # called the "continuation" method in the original paper, but is called homotopy in the original code
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method, verbose = :none)
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 6690 | using UnPack, OrderedCollections, ForwardDiff, JLD2, SparseArrays
mutable struct RBCCampbellCochraneHabits{T <: Real}
IK̄::T
β::T
δ::T
α::T
ξ₃::T
μₐ::T
σₐ::T
γ::T
ρₛ::T
S::T
end
function RBCCampbellCochraneHabits(; α::T = .36, ξ₃ = .23, δ::T = .1337, IK̄::T = (.025 * (1 + 1 / .23)) / (1 + 1 / .23),
μₐ = .0049875, σₐ = .0064 / (1 - .36), γ = 2., ρₛ = .96717, β = .989, S = .057) where {T <: Real}
return RBCCampbellCochraneHabits{T}(IK̄, β, δ, α, ξ₃, μₐ, σₐ, γ, ρₛ, S)
end
function rbc_cc(m::RBCCampbellCochraneHabits{T}, n_strips::Int = 0;
sparse_jacobian::Vector{Symbol} = Symbol[],
sparse_arrays::Bool = false) where {T <: Real}
@unpack IK̄, β, δ, α, ξ₃, μₐ, σₐ, γ, ρₛ, S = m
s = OrderedDict{Symbol, Int}(:kₐ => 1, :hats => 2) # State variables
J = OrderedDict{Symbol, Int}(:cₖ => 1, :iₖ => 2, :log_D_plus_Q => 3, :rf => 4, :q => 5, :log_E_RQ => 6) # Jump variables
SH = OrderedDict{Symbol, Int}(:εₐ => 1) # Exogenous shocks
if n_strips > 0
J[:div] = 7
J[:r_div] = 8
J[:wres] = 9
q_div_vec = Symbol[Symbol("q_div$i") for i in 1:n_strips]
for (i, q_div) in enumerate(q_div_vec)
J[q_div] = i + 9
end
end
Nz = length(s)
Ny = length(J)
Nε = length(SH)
# Some additional functions used by μ and ξ
Φ(x) = exp(μₐ) - 1. + δ / (1. - ξ₃^2) + (IK̄ / (1. - 1. / ξ₃)) * ((x / IK̄)^(1. - 1. / ξ₃))
Φ′(x) = (x / IK̄) ^ (-1. / ξ₃) # Φ′(x) = IK̄ * (IK̄) ^ (1. / ξ₃. - 1.) * x ^ (-1. / ξ₃)
function μ(F, z, y)
F_type = eltype(F)
F[s[:kₐ]] = -μₐ + z[s[:kₐ]] + log((1. - δ) + Φ(exp(y[J[:iₖ]])))
F[s[:hats]] = ρₛ * z[s[:hats]]
end
ξ = if n_strips > 0
function _ξ_withstrips(F, z, y)
mt = -log(β) - γ * (y[J[:cₖ]] + z[s[:hats]] - log(1. + Φ(exp(y[J[:iₖ]])) - δ))
mtp1 = -γ * (y[J[:cₖ]] + z[s[:hats]])
Y = exp(y[J[:cₖ]]) + exp(y[J[:iₖ]])
DivQ = α * Y - exp(y[J[:iₖ]]) + (Φ(exp(y[J[:iₖ]])) - δ) * exp(y[J[:q]])
F[J[:cₖ]] = log(Y) - (α - 1.) * z[s[:kₐ]]
F[J[:iₖ]] = -log(Φ′(exp(y[J[:iₖ]]))) - y[J[:q]]
F[J[:log_D_plus_Q]] = log(DivQ + exp(y[J[:q]])) - y[J[:log_D_plus_Q]]
F[J[:rf]] = -y[J[:q]] - y[J[:log_E_RQ]]
F[J[:q]] = log(sum(exp.(y[J[:q_div1]:end])) + exp(y[J[:r_div]])) - y[J[:q]]
F[J[:log_E_RQ]] = log(DivQ) - y[J[:div]]
F[J[:div]] = log(exp(y[J[Symbol("q_div$(n_strips)")]]) + exp(y[J[:r_div]])) - y[J[:wres]]
F[J[:r_div]] = -mt - y[J[:r_div]]
F[J[:wres]] = -mt - (-y[J[:rf]])
for i in 1:n_strips
F[J[Symbol("q_div$(i)")]] = -mt - y[J[Symbol("q_div$(i)")]]
end
end
else
function _ξ_nostrips(F, z, y)
mt = -log(β) - γ * (y[J[:cₖ]] + z[s[:hats]] - log(1. + Φ(exp(y[J[:iₖ]])) - δ))
mtp1 = -γ * (y[J[:cₖ]] + z[s[:hats]])
Y = exp(y[J[:cₖ]]) + exp(y[J[:iₖ]])
DivQ = α * Y - exp(y[J[:iₖ]]) + (Φ(exp(y[J[:iₖ]])) - δ) * exp(y[J[:q]])
F[J[:cₖ]] = log(Y) - (α - 1.) * z[s[:kₐ]]
F[J[:iₖ]] = -log(Φ′(exp(y[J[:iₖ]]))) - y[J[:q]]
F[J[:log_D_plus_Q]] = log(DivQ + exp(y[J[:q]])) - y[J[:log_D_plus_Q]]
F[J[:rf]] = -y[J[:q]] - y[J[:log_E_RQ]]
F[J[:q]] = -mt - y[J[:q]]
F[J[:log_E_RQ]] = -mt - (-y[J[:rf]])
end
end
# The cache is initialized as zeros so we only need to fill non-zero elements
function Λ(F, z)
F_type = eltype(F)
F[s[:hats], J[:cₖ]] = 1. / S * sqrt(1. - 2. * z[s[:hats]]) - 1.
end
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z)
F_type = eltype(F)
F[s[:kₐ], SH[:εₐ]] = -σₐ
F[s[:hats], SH[:εₐ]] = 0.
end
Γ₅ = zeros(T, Ny, Nz)
if n_strips > 0
Γ₅[J[:r_div], s[:hats]] = -γ
Γ₅[J[:wres], s[:hats]] = -γ
else
Γ₅[J[:q], s[:hats]] = -γ
Γ₅[J[:log_E_RQ], s[:hats]] = -γ
end
Γ₆ = zeros(T, Ny, Ny)
Γ₆[J[:rf], J[:log_D_plus_Q]] = 1.
if n_strips > 0
Γ₆[J[:r_div], J[:cₖ]] = -γ
Γ₆[J[:r_div], J[:wres]] = 1.
Γ₆[J[:wres], J[:cₖ]] = -γ
Γ₅[J[:q_div1], s[:hats]] = -γ
Γ₆[J[:q_div1], J[:cₖ]] = -γ
Γ₆[J[:q_div1], J[:div]] = 1.
if n_strips > 1
for i in 2:n_strips
Γ₅[J[Symbol("q_div$(i)")], s[:hats]] = -γ
Γ₆[J[Symbol("q_div$(i)")], J[:cₖ]] = -γ
Γ₆[J[Symbol("q_div$(i)")], J[Symbol("q_div$(i - 1)")]] = 1.
end
end
else
Γ₆[J[:q], J[:cₖ]] = -γ
Γ₆[J[:q], J[:log_D_plus_Q]] = 1.
Γ₆[J[:log_E_RQ], J[:cₖ]] = -γ
end
if sparse_arrays
Γ₅ = sparse(Γ₅)
Γ₆ = sparse(Γ₆)
end
if n_strips > 0
z = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_dss_N3_output.jld2"), "r")["z_dss"]
y = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_dss_N3_output.jld2"), "r")["y_dss"]
if n_strips > 3
y = vcat(y, -ones(n_strips - 3))
elseif n_strips < 3
y = y[1:end - (3 - n_strips)]
end
else
z = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_det_ss_output.jld2"), "r")["z_dss"]
y = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_det_ss_output.jld2"), "r")["y_dss"]
end
Ψ = zeros(T, Ny, Nz)
if sparse_arrays
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, rbc_cc_ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian,
Λ_cache_init = dims -> spzeros(dims...),
Σ_cache_init = dims -> spzeros(dims...))
else
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, rbc_cc_ccgf, vec(z), vec(y), Ψ, Nε; sparse_jacobian = sparse_jacobian)
end
end
function rbc_cc_ccgf(F, α, z)
# F .= .5 * diag(α * α') # slower but this is the underlying math
sum!(F, α.^2) # faster implementation
F .*= .5 # fewer multiplications by doing it this way
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2697 | # This script times the results from using sparse arrays for caching; sparse Jacobians
# of μ, ξ, and 𝒱 ; and sparse Jacobians for calls to nlsolve.
using RiskAdjustedLinearizations, LinearAlgebra, SparseArrays
using BenchmarkTools, Test, SparseDiffTools
# Settings
define_functions = true
time_methods = true
algorithm = :relaxation # Note only relaxation works for sparse differentiation.
N_approx = 10 # Number of periods ahead used for forward-difference equations
if define_functions
include(joinpath(dirname(@__FILE__), "..", "nk_with_capital", "nk_with_capital.jl"))
end
# Set up
## Instantiate object
m_nk = NKCapital(; N_approx = N_approx) # create parameters
m = nk_capital(m_nk; sparse_arrays = true, sparse_jacobian = [:μ, :ξ])
zinit = deepcopy(m.z)
yinit = deepcopy(m.y)
Ψinit = deepcopy(m.Ψ)
## Solve for steady state once and update sparsity pattern
solve!(m; algorithm = algorithm, verbose = :none)
sparsity = Dict()
colorvec = Dict()
sparsity[:J𝒱] = sparse(m[:JV])
colorvec[:J𝒱] = isempty(sparsity[:J𝒱].nzval) ? ones(Int64, size(sparsity[:J𝒱], 2)) : matrix_colors(sparsity[:J𝒱])
update_sparsity_pattern!(m, [:𝒱]; sparsity = sparsity, colorvec = colorvec)
## Solve w/sparse array caching; sparse differentiation of Jacobians of
## μ, ξ, and 𝒱 ; and sparse differentiation of the objective functions in `nlsolve`
jac_cache = preallocate_jac_cache(m, algorithm)
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = algorithm, sparse_jacobian = true, jac_cache = jac_cache)
if time_methods
m_dense = nk_capital(m_nk)
@info "Timing solve! with varying degrees of sparsiy"
println("Dense Array Caches and Dense Jacobians")
@btime begin
update!(m_dense, zinit, yinit, Ψinit)
solve!(m_dense; algorithm = algorithm, verbose = :none)
end
# ~ 2.48 s
println("Sparse Array Caches and Sparse Jacobians for Equilibrium Functions")
@btime begin
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = algorithm, verbose = :none)
end
# ~ 2.37 s
println("Sparse Jacobians for nlsolve")
@btime begin
update!(m_dense, zinit, yinit, Ψinit)
solve!(m_dense; algorithm = algorithm, sparse_jacobian = true,
jac_cache = jac_cache, verbose = :none)
end
# ~ 0.85 s
println("Sparse Array Caches, Sparse Jacobians for Equilibrium Functions, and Sparse Jacobians for nlsolve")
@btime begin
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = algorithm, sparse_jacobian = true, jac_cache = jac_cache, verbose = :none)
end
# ~ 0.9s
@test m_dense.z ≈ m.z
@test m_dense.y ≈ m.y
@test m_dense.Ψ ≈ m.Ψ
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 6170 | # This script shows how to compute risk-adjusted linearizations using sparse arrays
# for caches and sparse differentiation for the Jacobians of μ, ξ, and 𝒱 .
using RiskAdjustedLinearizations, LinearAlgebra, SparseArrays
using BenchmarkTools, Test, SparseDiffTools
# Settings
define_functions = true
time_methods = true
algorithm = :relaxation # Note that while both methods work with sparse array caches, only relaxation works for sparse differentiation.
N_approx = 10 # Number of periods ahead used for forward-difference equations
if define_functions
include(joinpath(dirname(@__FILE__), "..", "nk_with_capital", "nk_with_capital.jl"))
end
# Set up
m_nk = NKCapital(; N_approx = N_approx) # create parameters
# Sparse arrays for caches
m = nk_capital(m_nk; sparse_arrays = true)
## The keyword sparse_arrays tells the nk_capital function
## to make Γ₅, Γ₆ sparse arrays and to add the keyword arguments
## `Λ_cache_init = dims -> spzeros(dims...)`
## `Σ_cache_init = dims -> spzeros(dims...)`
## when calling the constructor, i.e.
## RiskAdjustedLinearization(...; Λ_cache_init = ...)
solve!(m; algorithm = algorithm, verbose = :none)
# Risk-adjusted linearization with sparse differentiation
# for the Jacobians of μ, ξ, and 𝒱 in addition to sparse caches.
## The first approach directly uses
## the constructor of a RiskAdjustedLinearization
## to determine the sparsity pattern.
## Since the initial guess for Ψ is a matrix of zeros,
## we will only use sparse differentiation for μ and ξ at first.
## Within nk_capital, the keyword `sparse_jacobian = [:μ, :ξ]` is passed
## to the constructor, i.e.
## RiskAdjustedLinearization(...; sparse_jacobian = ...)
m_sparsejac = nk_capital(m_nk; sparse_arrays = true, sparse_jacobian = [:μ, :ξ])
## Check the caches for the Jacobians of μ and ξ are actually sparse
@test issparse(m_sparsejac[:Γ₁])
@test issparse(m_sparsejac[:Γ₂])
@test issparse(m_sparsejac[:Γ₃])
@test issparse(m_sparsejac[:Γ₄])
## Now solve the model! Note that sparse differentiation
## can be fragile sometimes and result in NaNs or undefined
## numbers appearing during calls to `nlsolve`. Re-running
## `solve!` repeatedly or reconstructing `m_sparsejac` again
## will usually lead to a successful run.
solve!(m_sparsejac; algorithm = algorithm)
@test norm(steady_state_errors(m_sparsejac), Inf) < 1e-8
## The second approach calls `update_sparsity_pattern!`
## on an existing `RiskAdjustedLinearization`
### Create dictionaries for specifying the sparsity pattern
### Here, we will tell `m_sparsejac`
### to now use sparse differentiation for J𝒱
### and to use a new sparsity pattern for the
### Jacobians of μ and ξ.
sparsity = Dict()
colorvec = Dict()
sparsity[:J𝒱] = sparse(m[:JV])
@test isempty(sparsity[:J𝒱].nzval) # However, in the case of the current NK model, the entropy actually does not depend on z.
colorvec[:J𝒱] = ones(Int64, size(sparsity[:J𝒱], 2)) # Instead, we just pass in a coloring vector of this form
jac_to_sym = (μz = :Γ₁, μy = :Γ₂, ξz = :Γ₃, ξy = :Γ₄)
for k in [:μz, :μy, :ξz, :ξy]
sparsity[k] = m_sparsejac[jac_to_sym[k]]
colorvec[k] = matrix_colors(sparsity[k])
end
@test !issparse(m_sparsejac[:JV]) # JV is not sparse currently
update_sparsity_pattern!(m_sparsejac, [:μ, :ξ]; sparsity = sparsity) # Don't have to provide the matrix coloring vector (note 𝒱 is not included
# b/c calling matrix_color on its sparsity pattern will error)
update_sparsity_pattern!(m_sparsejac, [:μ, :ξ, :𝒱]; sparsity = sparsity, # But if the coloring vector already exists,
colorvec = colorvec) # then you may as well pass that information, too.
@test issparse(m_sparsejac[:JV]) # Now JV is sparse
### If you ever need to look at the sparsity pattern or coloring vector,
### you can call `linearized_system(m_sparsejac).sparse_jac_caches` or
### `m_sparsejac.linearization.sparse_jac_caches`, which is a
### `NamedTuple` whose values include the sparsity pattern and coloring vector,
### as well as a cache used by forwarddiff_color_jacobian!
for k in [:μz, :μy, :ξz, :ξy, :J𝒱]
@test issparse(m_sparsejac.linearization.sparse_jac_caches[k][:sparsity])
end
### Now solve again with sparse Jacobian of 𝒱, too!
solve!(m_sparsejac, m_sparsejac.z .* 1.01, m_sparsejac.y .* 1.01, m_sparsejac.Ψ .* 1.01; algorithm = algorithm)
@test norm(steady_state_errors(m_sparsejac), Inf) < 1e-8
if time_methods
m_dense = nk_capital(m_nk)
zinit = deepcopy(m_dense.z)
yinit = deepcopy(m_dense.y)
Ψinit = deepcopy(m_dense.Ψ)
solve!(m; algorithm = :deterministic, verbose = :none)
zdet = deepcopy(m.z)
ydet = deepcopy(m.y)
Ψdet = deepcopy(m.Ψ)
println("Deterministic steady state with dense caches for Γ₅, Γ₆, Λ, and Σ")
@btime begin
update!(m_dense, zinit, yinit, Ψinit)
solve!(m_dense; algorithm = :deterministic, verbose = :none)
end
println("Deterministic steady state with sparse caches for Γ₅, Γ₆, Λ, and Σ")
@btime begin
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = :deterministic, verbose = :none)
end
println("Relaxation with dense caches for Γ₅, Γ₆, Λ, and Σ")
@btime begin
update!(m_dense, zdet, ydet, Ψdet)
solve!(m_dense; algorithm = :relaxation, verbose = :none)
end
println("Relaxation with sparse caches for Γ₅, Γ₆, Λ, and Σ")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, verbose = :none)
end
println("Homotopy with dense caches for Γ₅, Γ₆, Λ, and Σ")
@btime begin
update!(m_dense, zdet, ydet, Ψdet)
solve!(m_dense; algorithm = :homotopy, verbose = :none)
end
println("Homotopy with sparse caches for Γ₅, Γ₆, Λ, and Σ")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :homotopy, verbose = :none)
end
println("Relaxation with sparse caches and differentiation")
@btime begin
update!(m_sparsejac, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, verbose = :none)
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 6485 | # This script shows how to compute risk-adjusted linearizations using sparse Jacobian methods
# for calls to nlsolve
using RiskAdjustedLinearizations, LinearAlgebra, SparseArrays, BenchmarkTools
# Settings
define_functions = true
time_methods = true
algorithm = :relaxation
N_approx = 10 # Number of periods ahead used for forward-difference equations
if define_functions
include(joinpath(dirname(@__FILE__), "..", "nk_with_capital", "nk_with_capital.jl"))
end
# Set up
m_nk = NKCapital(; N_approx = N_approx) # create parameters
m = nk_capital(m_nk) # instantiate risk-adjusted linearization
zinit = deepcopy(m.z)
yinit = deepcopy(m.y)
Ψinit = deepcopy(m.Ψ)
# Compute steady state
solve!(m; algorithm = :deterministic, verbose = :none)
zdet = deepcopy(m.z)
ydet = deepcopy(m.y)
Ψdet = deepcopy(m.Ψ)
# Using sparsity pattern and matrix coloring to find deterministic steady state
sparsity_det, colorvec_det = compute_sparsity_pattern(m, :deterministic) # Create sparsity pattern/matrix coloring vector
update!(m, zinit, yinit, Ψinit) # need to re-initialize or else the system of equations will be solved at the initial guess
solve!(m; algorithm = :deterministic, ftol = 1e-10, # this call to solve! tries to infer the sparsity pattern
sparse_jacobian = true, verbose = :none) # by computing the Jacobian one time w/finite differences
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = :deterministic, ftol = 1e-10, # need to add tighter tolerance to avoid
sparse_jacobian = true, sparsity = sparsity_det, colorvec = colorvec_det, # a LAPACK exception when computing the
verbose = :none) # Schur decomposition for Ψ
# Using sparsity pattern and matrix coloring to find stochastic steady state via relaxation
# Note that the syntax is essentially the same as the deterministic case, except that
# compute_sparsity_pattern's second argument is now :relaxation
sparsity_rel, colorvec_rel = compute_sparsity_pattern(m, :relaxation)
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, verbose = :none)
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, sparsity = sparsity_rel, colorvec = colorvec_rel,
verbose = :none)
# Using sparsity pattern and matrix coloring to find stochastic steady state via homotopy
sparsity_hom, colorvec_hom = compute_sparsity_pattern(m, :homotopy)
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, verbose = :none)
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, sparsity = sparsity_hom, colorvec = colorvec_hom,
verbose = :none)
# Using Jacobian cache to find deterministic steady state
# Like compute_sparsity_pattern, the user only needs to select the
# algorithm for which the Jacobian cache will be used.
jac_cache_det = preallocate_jac_cache(m, :deterministic)
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = :deterministic, autodiff = autodiff_method,
ftol = 1e-10, sparse_jacobian = true, jac_cache = jac_cache_det,
verbose = :none)
# Using Jacobian cache to find stochastic steady state via relaxation
jac_cache_rel = preallocate_jac_cache(m, :relaxation)
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, ftol = 1e-10,
sparse_jacobian = true, jac_cache = jac_cache_rel,
verbose = :none)
# Using Jacobian cache to find stochastic steady state via homotopy
jac_cache_hom = preallocate_jac_cache(m, :homotopy)
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, jac_cache = jac_cache_hom,
verbose = :none)
if time_methods
println("Deterministic steady state with dense Jacobian")
@btime begin
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = :deterministic, verbose = :none)
end
println("Deterministic steady state with sparsity pattern of Jacobian and matrix coloring vector")
@btime begin
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = :deterministic, ftol = 1e-10,
sparse_jacobian = true, sparsity = sparsity_det, colorvec = colorvec_det,
verbose = :none)
end
println("Deterministic steady state with sparsity pattern of Jacobian, matrix coloring vector, and caching")
@btime begin
update!(m, zinit, yinit, Ψinit)
solve!(m; algorithm = :deterministic, autodiff = autodiff_method,
ftol = 1e-10, sparse_jacobian = true, jac_cache = jac_cache_det,
verbose = :none)
end
println("Relaxation with dense Jacobian")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, verbose = :none)
end
println("Relaxation with sparsity pattern of Jacobian and matrix coloring vector")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, sparsity = sparsity_rel, colorvec = colorvec_rel,
verbose = :none)
end
println("Relaxation with sparsity pattern of Jacobian, matrix coloring vector, and caching")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :relaxation, autodiff = autodiff_method,
sparse_jacobian = true, jac_cache = jac_cache_rel,
verbose = :none)
end
println("Homotopy with dense Jacobian")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :homotopy, verbose = :none)
end
println("Homotopy with sparsity pattern of Jacobian and matrix coloring vector")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :homotopy,
sparse_jacobian = true, sparsity = sparsity_hom, colorvec = colorvec_hom,
verbose = :none)
end
println("Homotopy with sparsity pattern of Jacobian, matrix coloring vector, and caching")
@btime begin
update!(m, zdet, ydet, Ψdet)
solve!(m; algorithm = :homotopy, autodiff = autodiff_method,
sparse_jacobian = true, jac_cache = jac_cache_hom,
verbose = :none)
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2437 | # This script actually solves the TextbookNK model with a risk-adjusted linearization
# and times the methods, if desired
using RiskAdjustedLinearizations, JLD2, LinearAlgebra, Test
include("textbook_nk.jl")
out = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "textbook_nk_ss_output.jld2"), "r")
# Settings
autodiff = false
algorithm = :relaxation
euler_equation_errors = false
test_price_dispersion = false # check if price dispersion in steady state is always bounded below by 1
# Set up
m_nk = TextbookNK() # create parameters
m = textbook_nk(m_nk) # instantiate risk-adjusted linearization
autodiff_method = autodiff ? :forward : :central
# Solve!
solve!(m; algorithm = :deterministic, autodiff = autodiff_method)
@test m.z ≈ out["z_det"]
@test m.y ≈ out["y_det"]
@test m.Ψ ≈ out["Psi_det"]
z_det = copy(m.z)
y_det = copy(m.y)
Ψ_det = copy(m.Ψ)
solve!(m; algorithm = algorithm, autodiff = autodiff_method)
@test m.z ≈ out["z"]
@test m.y ≈ out["y"]
@test m.Ψ ≈ out["Ψ"]
if test_price_dispersion
π̃_ss_vec = log.(range(1 - .005, stop = 1 + .005, length = 10)) # On annualized basis, range from -2% to 2% target inflation
det_soln = Dict()
sss_soln = Vector{RiskAdjustedLinearization}(undef, length(π̃_ss_vec))
for (i, π̃_ss) in enumerate(π̃_ss_vec)
local m_nk = TextbookNK(; π̃_ss = π̃_ss)
local m = textbook_nk(m_nk)
solve!(m; algorithm = :deterministic, verbose = :none)
det_soln[i] = Dict()
det_soln[i][:z] = copy(m.z)
det_soln[i][:y] = copy(m.y)
det_soln[i][:Ψ] = copy(m.Ψ)
solve!(m; algorithm = algorithm, verbose = :none)
sss_soln[i] = m
end
det_v = exp.([det_soln[i][:z][3] for i in 1:length(det_soln)])
sss_v = exp.([sss_soln[i].z[3] for i in 1:length(sss_soln)])
@test all(det_v .> 1.)
@test all(sss_v .> 1.)
end
if euler_equation_errors
# Load shocks. Using CRW ones b/c that model also has 2 standard normal random variables
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "crw_shocks.jld2"), "r")["shocks"]
# With this simple model, the Euler equation for bonds holds exactly
@test abs(euler_equation_error(m, nk_cₜ, (a, b, c, d) -> nk_logSDFxR(a, b, c, d; β = m_nk.β, σ = m_nk.σ),
nk_𝔼_quadrature, shocks, summary_statistic = x -> norm(x, Inf))) ≈ 0.
end
nothing
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 4238 | using UnPack, OrderedCollections, ForwardDiff, JLD2
mutable struct TextbookNK{T <: Real}
β::T
σ::T
ψ::T
η::T
ϵ::T
ϕ::T
ρₐ::T
σₐ::T
ρᵢ::T
σᵢ::T
ϕ_π::T
π̃_ss::T
end
function TextbookNK(; β::T = .99, σ::T = 2., ψ::T = 1., η::T = 1., ϵ::T = 4.45, ϕ::T = .7,
ρₐ::T = 0.9, σₐ::T = .004, ρᵢ::T = .7, σᵢ::T = .025 / 4.,
ϕ_π::T = 1.5, π̃_ss::T = 0.) where {T <: Real}
return TextbookNK{T}(β, σ, ψ, η, ϵ, ϕ, ρₐ, σₐ, ρᵢ, σᵢ, ϕ_π, π̃_ss)
end
function textbook_nk(m::TextbookNK{T}) where {T <: Real}
@unpack β, σ, ψ, η, ϵ, ϕ, ρₐ, σₐ, ρᵢ, σᵢ, ϕ_π, π̃_ss = m
ĩ_ss = π̃_ss - log(β)
# On notation: x̃ = log(1 + x); x′ = 𝔼ₜ[xₜ₊₁]
S = OrderedDict{Symbol, Int}(:a => 1, :ĩ₋₁ => 2, :v₋₁ => 3, :i_sh => 4) # State Variables
J = OrderedDict{Symbol, Int}(:c => 1, :π̃ => 2, :n => 3, :w => 4, :mc => 5, :v => 6,
:x₁ => 7, :x₂ => 8, :ĩ => 9) # Jump variables
E = OrderedDict{Symbol, Int}(:euler => 1, :mrs => 2, :eq_mc => 3, :output => 4,
:dispersion => 5, :phillips_curve => 6, :eq_x₁ => 7,
:eq_x₂ => 8, :eq_mp => 9) # Equations
SH = OrderedDict{Symbol, Int}(:εₐ => 1, :εᵢ => 2) # Exogenous shocks
@unpack a, ĩ₋₁, v₋₁, i_sh = S
@unpack c, π̃, n, w, mc, v, x₁, x₂, ĩ = J
@unpack euler, mrs, eq_mc, output, dispersion, phillips_curve, eq_x₁, eq_x₂, eq_mp = E
@unpack εₐ, εᵢ = SH
Nz = length(S)
Ny = length(J)
Nε = length(SH)
function μ(F, z, y)
F_type = eltype(F)
F[a] = ρₐ * z[a]
F[ĩ₋₁] = y[ĩ]
F[v₋₁] = y[v]
F[i_sh] = zero(F_type)
end
function ξ(F, z, y)
F_type = eltype(F)
π̃_star = log(ϵ / (ϵ - 1.)) + y[π̃] + (y[x₁] - y[x₂])
F[euler] = log(β) + σ * y[c] + y[ĩ]
F[mrs] = log(ψ) + η * y[n] - (-σ * y[n] + y[w])
F[eq_mc] = y[w] - (z[a] + y[mc])
F[output] = y[c] - (z[a] + y[n] - y[v])
F[dispersion] = y[v] - (ϵ * y[π̃] + log((1. - ϕ) * exp(π̃_star)^(-ϵ) + ϕ * exp(z[v₋₁])))
F[phillips_curve] = (1. - ϵ) * y[π̃] - log((1. - ϕ) * exp(π̃_star)^(1 - ϵ) + ϕ)
F[eq_x₁] = log(ϕ) + log(β) - log(exp(y[x₁]) - exp((1. - σ) * y[c] + y[mc]))
F[eq_x₂] = log(ϕ) + log(β) - log(exp(y[x₂]) - exp((1. - σ) * y[c]))
F[eq_mp] = y[ĩ] - ((1. - ρᵢ) * ĩ_ss + ρᵢ * z[ĩ₋₁] + (1 - ρᵢ) * ϕ_π * (y[π̃] - π̃_ss) + z[i_sh])
end
# The cache is initialized as zeros so we only need to fill non-zero elements
Λ = zeros(T, Nz, Ny)
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z)
F[a, εₐ] = σₐ
F[i_sh, εᵢ] = σᵢ
end
function ccgf(F, α, z)
# F .= .5 * diag(α * α') # slower but this is the underlying math
sum!(F, α.^2) # faster implementation
F .*= .5
end
Γ₅ = zeros(T, Ny, Nz)
Γ₆ = zeros(T, Ny, Ny)
Γ₆[euler, c] = -σ
Γ₆[euler, π̃] = -one(T)
Γ₆[eq_x₁, x₁] = one(T)
Γ₆[eq_x₁, π̃] = ϵ
Γ₆[eq_x₂, x₂] = one(T)
Γ₆[eq_x₂, π̃] = (ϵ - 1.)
Ψ = zeros(T, Ny, Nz)
# Deterministic steady state as initial guess
# z
a0 = 0.
ĩ₋₁0 = ĩ_ss
v₋₁0 = 0.
i_sh0 = 0.
z = [a0, ĩ₋₁0, v₋₁0, i_sh0]
# y
ĩ0 = ĩ_ss
π̃0 = π̃_ss
v0 = 0.
mc0 = log((ϵ - 1.) / ϵ)
x₁0 = log(exp(π̃0) * exp(mc0) / (1. - ϕ * β))
x₂0 = log(1. / (1. - ϕ * β))
n0 = (1 / (η + σ)) * log(1. / ψ * (exp(v0))^σ * exp(mc0))
c0 = n0 - v0
w0 = a0 + mc0
y = [c0, π̃0, n0, w0, mc0, v0, x₁0, x₂0, ĩ0]
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, vec(z), vec(y), Ψ, Nε)
end
nk_cₜ(m, zₜ) = exp(m.y[1] + (m.Ψ * (zₜ - m.z))[1])
# Evaluates euler equation in log terms
function nk_logSDFxR(m, zₜ, εₜ₊₁, Cₜ; β::T = .99, σ::T = 2.) where {T <: Real}
yₜ = m.y + m.Ψ * (zₜ - m.z)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
return log(β) - σ * (yₜ₊₁[1] - log(Cₜ)) + yₜ[9] - yₜ₊₁[2]
end
# Calculate Euler equation via quadrature
std_norm_mean = zeros(2)
std_norm_sig = ones(2)
nk_𝔼_quadrature(f::Function) = gausshermite_expectation(f, std_norm_mean, std_norm_sig, 10)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2213 | # This script actually solves the WachterDisasterRisk model with a risk-adjusted linearization
# and times the methods, if desired
using BenchmarkTools, RiskAdjustedLinearizations, Test
include("wachter.jl")
# Settings: what do you want to do?
time_methods = false
numerical_algorithm = :relaxation
autodiff = false
# Set up
autodiff_method = autodiff ? :forward : :central
m_wachter = WachterDisasterRisk()
m = inplace_wachter_disaster_risk(m_wachter)
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
# Solve!
solve!(m; algorithm = numerical_algorithm, autodiff = autodiff_method)
if numerical_algorithm == :relaxation
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "iterative_sss_output.jld2"), "r")
elseif numerical_algorithm == :homotopy
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "homotopy_sss_output.jld2"), "r")
end
@test isapprox(sssout["z"], m.z, atol=1e-4)
@test isapprox(sssout["y"], m.y, atol=1e-4)
@test isapprox(sssout["Psi"], m.Ψ, atol=1e-4)
if time_methods
println("Deterministic steady state")
@btime begin
solve!(m, z0, y0; algorithm = :deterministic, autodiff = autodiff_method, verbose = :none)
end
# Use deterministic steady state as guesses
solve!(m, z0, y0; algorithm = :deterministic, autodiff = autodiff_method, verbose = :none)
zdet = copy(m.z)
ydet = copy(m.y)
Ψdet = copy(m.Ψ)
println("Relaxation method")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, autodiff = autodiff_method, verbose = :none)
end
println("Relaxation method with Anderson acceleration")
@btime begin # called the "iterative" method in the original paper
solve!(m, zdet, ydet, Ψdet; algorithm = :relaxation, use_anderson = true, m = 3, autodiff = autodiff_method, verbose = :none)
end
println("Homotopy method")
@btime begin # called the "continuation" method in the original paper, but is called homotopy in the original code
solve!(m, zdet, ydet, Ψdet; algorithm = :homotopy, autodiff = autodiff_method, verbose = :none)
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 4705 | using UnPack, OrderedCollections, ForwardDiff
mutable struct WachterDisasterRisk{T <: Real}
μₐ::T
σₐ::T
ν::T
δ::T
ρₚ::T
pp::T
ϕₚ::T
ρ::T
γ::T
β::T
end
function WachterDisasterRisk(; μₐ::T = .0252 / 4., σₐ::T = .02 / sqrt(4.), ν::T = .3, δ::T = 0., ρₚ::T = .08^(1. / 4.), pp::T = .0355 / 4.,
ϕₚ::T = .0114 / 4. / (.02 / sqrt(4.)) / sqrt(.0355 / 4.), ρ::T = 2.0, γ::T = 3.0,
β::T = exp(-.012 / 4.)) where {T <: Real}
return WachterDisasterRisk{T}(μₐ, σₐ, ν, δ, ρₚ, pp, ϕₚ, ρ, γ, β)
end
function inplace_wachter_disaster_risk(m::WachterDisasterRisk{T}) where {T <: Real}
@unpack μₐ, σₐ, ν, δ, ρₚ, pp, ϕₚ, ρ, γ, β = m
@assert ρ != 1. # Forcing ρ to be non-unit for this example
S = OrderedDict{Symbol, Int}(:p => 1, :εc => 2, :εξ => 3) # State variables
J = OrderedDict{Symbol, Int}(:vc => 1, :xc => 2, :rf => 3) # Jump variables
SH = OrderedDict{Symbol, Int}(:εₚ => 1, :εc => 2, :εξ => 3) # Exogenous shocks
Nz = length(S)
Ny = length(J)
Nε = length(SH)
function μ(F, z, y)
F_type = eltype(F)
F[S[:p]] = (1 - ρₚ) * pp + ρₚ * z[S[:p]]
F[S[:εc]] = zero(F_type)
F[S[:εξ]] = zero(F_type)
end
function ξ(F, z, y)
F[J[:vc]] = log(β) - γ * μₐ + γ * ν * z[S[:p]] - (ρ - γ) * y[J[:xc]] + y[J[:rf]]
F[J[:xc]] = log(1. - β + β * exp((1. - ρ) * y[J[:xc]])) - (1. - ρ) * y[J[:vc]]
F[J[:rf]] = (1. - γ) * (μₐ - ν * z[S[:p]] - y[J[:xc]])
end
Λ = zeros(T, Nz, Ny)
# The cache is initialized as zeros so we only need to fill non-zero elements
function Σ(F, z)
F_type = eltype(F)
F[SH[:εₚ], SH[:εₚ]] = ((z[S[:p]] < 0) ? 0. : sqrt(z[S[:p]])) * ϕₚ * σₐ
F[SH[:εc], SH[:εc]] = one(F_type)
F[SH[:εξ], SH[:εξ]] = one(F_type)
end
function ccgf(F, α, z)
# F .= .5 .* α[:, 1].^2 + .5 * α[:, 2].^2 + (exp.(α[:, 3] + α[:, 3].^2 .* δ^2 ./ 2.) .- 1. - α[:, 3]) * z[S[:p]]
# Following lines does the above calculation but in a faster way
sum!(F, view(α, :, 1:2).^2)
F .*= .5
F .+= (exp.(α[:, 3] + view(α, :, 3).^2 .* δ^2 ./ 2.) .- 1. - view(α, :, 3)) * z[S[:p]]
end
Γ₅ = zeros(T, Ny, Nz)
Γ₅[J[:vc], S[:εc]] = (-γ * σₐ)
Γ₅[J[:vc], S[:εξ]] = (γ * ν)
Γ₅[J[:rf], S[:εc]] = (1. - γ) * σₐ
Γ₅[J[:rf], S[:εξ]] = -(1. - γ) * ν
Γ₆ = zeros(T, Ny, Ny)
Γ₆[J[:vc], J[:vc]] = (ρ - γ)
Γ₆[J[:rf], J[:vc]] = (1. - γ)
z = [pp, 0., 0.]
xc_sss = log((1. - β) / (exp((1. - ρ) * (ν * pp - μₐ)) - β)) / (1. - ρ)
vc_sss = xc_sss + ν * pp - μₐ
y = [vc_sss, xc_sss, -log(β) + γ * (μₐ - ν * pp) - (ρ - γ) * (vc_sss - xc_sss)]
Ψ = zeros(T, Ny, Nz)
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nε)
end
function outofplace_wachter_disaster_risk(m::WachterDisasterRisk{T}) where {T}
@unpack μₐ, σₐ, ν, δ, ρₚ, pp, ϕₚ, ρ, γ, β = m
@assert ρ != 1. # Forcing ρ to be non-unit for this example
S = OrderedDict{Symbol, Int}(:p => 1, :εc => 2, :εξ => 3) # State variables
J = OrderedDict{Symbol, Int}(:vc => 1, :xc => 2, :rf => 3) # Jump variables
SH = OrderedDict{Symbol, Int}(:εₚ => 1, :εc => 2, :εξ => 3) # Exogenous shocks
Nz = length(S)
Ny = length(J)
Nε = length(SH)
function μ(z, y)
return [(1 - ρₚ) * pp + ρₚ * z[S[:p]], 0., 0.]
end
function ξ(z, y)
F = RiskAdjustedLinearizations.dualvector(y, z)
F[J[:vc]] = log(β) - γ * μₐ + γ * ν * z[S[:p]] - (ρ - γ) * y[J[:xc]] + y[J[:rf]]
F[J[:xc]] = log(1. - β + β * exp((1. - ρ) * y[J[:xc]])) - (1. - ρ) * y[J[:vc]]
F[J[:rf]] = (1. - γ) * (μₐ - ν * z[S[:p]] - y[J[:xc]])
return F
end
Λ = zeros(T, Nz, Nz)
function Σ(z)
F = zeros(eltype(z), Nz, Nz)
F[SH[:εₚ], SH[:εₚ]] = sqrt(z[S[:p]]) * ϕₚ * σₐ
F[SH[:εc], SH[:εc]] = 1.
F[SH[:εξ], SH[:εξ]] = 1.
return F
end
ccgf(α, z) = .5 * α[:, 1].^2 + .5 * α[:, 2].^2 + (exp.(α[:, 3] + α[:, 3].^2 * δ.^2 ./ 2.) .- 1. - α[:, 3]) * z[S[:p]]
Γ₅ = zeros(T, Ny, Nz)
Γ₅[J[:vc], S[:εc]] = (-γ * σₐ)
Γ₅[J[:vc], S[:εξ]] = (γ * ν)
Γ₅[J[:rf], S[:εc]] = (1. - γ) * σₐ
Γ₅[J[:rf], S[:εξ]] = -(1. - γ) * ν
Γ₆ = zeros(T, Ny, Ny)
Γ₆[J[:vc], J[:vc]] = (ρ - γ)
Γ₆[J[:rf], J[:vc]] = (1. - γ)
z = [pp, 0., 0.]
xc_sss = log((1. - β) / (exp((1. - ρ) * (ν * pp - μₐ)) - β)) / (1. - ρ)
vc_sss = xc_sss + ν * pp - μₐ
y = [vc_sss, xc_sss, -log(β) + γ * (μₐ - ν * pp) - (ρ - γ) * (vc_sss - xc_sss)]
Ψ = zeros(T, Ny, Nz)
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nε)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1488 | isdefined(Base, :__precompile__) && __precompile__(false)
module RiskAdjustedLinearizations
import Base: show, getindex
using ArrayInterface, FastGaussQuadrature, FiniteDiff, ForwardDiff, LabelledArrays, LinearAlgebra, Printf
using SparseArrays, SparseDiffTools, SparsityDetection, UnPack
using BandedMatrices: Zeros
using NLsolve: nlsolve, OnceDifferentiable
# Utilities
include("util.jl") # in principle, several of these utility functions could work for other packages
# RiskAdjustedLinearization
include("cache_types.jl")
include("risk_adjusted_linearization.jl")
include("sparse_jacobian_helpers.jl")
# Numerical Algorithms
include("numerical_algorithms/compute_psi.jl")
include("numerical_algorithms/blanchard_kahn.jl")
include("numerical_algorithms/relaxation.jl")
include("numerical_algorithms/homotopy.jl")
include("numerical_algorithms/solve.jl")
# Simulations and Simulation-based Diagnostics
include("simulation/simulate.jl")
include("simulation/impulse_responses.jl")
include("simulation/gausshermite_expectation.jl")
include("simulation/euler_equation_error.jl")
export
# main functions
RiskAdjustedLinearization, update!, nonlinear_system, linearized_system, solve!,
steady_state_errors,
# simulation related functions
simulate, impulse_responses, gausshermite_expectation, euler_equation_error,
dynamic_euler_equation_error,
# sparsity helpers
preallocate_jac_cache, compute_sparsity_pattern, update_sparsity_pattern!
end # module
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 24129 | abstract type AbstractRALF end
# RALF1
mutable struct RALF1{LC} <: AbstractRALF
f::Function
f0::Function
cache::LC
end
get_cache_type(ral::RALF1{LC}) where {LC} = LC
function RALF1(f::Function, x1::C1, cache::AbstractArray{<: Number};
chunksize::Int = ForwardDiff.pickchunksize(length(x1))) where {C1 <: AbstractArray{<: Number}, N}
if applicable(f, cache, x1)
fnew = function _f_ip(cache::LCN, x1::C1N) where {LCN <: DiffCache, C1N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1)
return target_cache
end
else
fnew = function _f_oop(cache::LCN, x1::C1N) where {LCN <: DiffCache, C1N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1)
return target_cache
end
end
return RALF1(fnew, f, dualcache(cache, Val{chunksize}))
end
function RALF1(fin::LC) where {LC <: AbstractArray{<: Number}}
f(cache::LCN, x1::C1N) where {LCN <: AbstractMatrix{ <: Number}, C1N <: AbstractArray{<: Number}} = cache
return RALF1{LC}(f, x -> fin, fin)
end
function (ralf::RALF1)(x1::C1) where {C1 <: AbstractArray{<: Number}}
return ralf.f(ralf.cache, x1)
end
# RALF2
mutable struct RALF2{LC} <: AbstractRALF
f::Function
f0::Function
cache::LC
end
get_cache_type(ral::RALF2{LC}) where {LC} = LC
function RALF2(f::Function, x1::C1, x2::C2, cache::AbstractArray{<: Number}, chunksizes::NTuple{Nc, Int} =
(ForwardDiff.pickchunksize(min(length(x1), length(x2))), )) where {C1 <: AbstractArray{<: Number}, C2 <: AbstractArray{<: Number}, N, Nc}
if applicable(f, cache, x1, x2)
if length(chunksizes) == 1 # Figure out which type of DiffCache is needed
diffcache = dualcache(cache, Val{chunksizes[1]})
fnew = function _f_ip1(cache::LCN, x1::C1N, x2::C2N, select::Tuple{Int, Int}) where {LCN <: DiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1, x2)
return target_cache
end
elseif length(chunksizes) == 2
diffcache = twodualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]})
fnew = function _f_ip2(cache::LCN, x1::C1N, x2::C2N, select::Tuple{Int, Int}) where {LCN <: TwoDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1, x2)
return target_cache
end
elseif length(chunksizes) == 3
diffcache = threedualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]}, Val{chunksizes[3]})
fnew = function _f_ip3(cache::LCN, x1::C1N, x2::C2N, select::Tuple{Int, Int}) where {LCN <: ThreeDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1, x2)
return target_cache
end
else
throw(MethodError("The length of the sixth input argument, chunksizes, must be 1, 2, or 3."))
end
else
if length(chunksizes) == 1 # Figure out which type of DiffCache is needed
diffcache = dualcache(cache, Val{chunksizes[1]})
fnew = function _f_oop1(cache::LCN, x1::C1N, x2::C2N, select::Tuple{Int, Int}) where {LCN <: DiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2)
return target_cache
end
elseif length(chunksizes) == 2
diffcache = twodualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]})
fnew = function _f_oop2(cache::LCN, x1::C1N, x2::C2N, select::Tuple{Int, Int}) where {LCN <: TwoDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2)
return target_cache
end
elseif length(chunksizes) == 3
diffcache = threedualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]}, Val{chunksizes[3]})
fnew = function _f_oop3(cache::LCN, x1::C1N, x2::C2N, select::Tuple{Int, Int}) where {LCN <: ThreeDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2)
return target_cache
end
else
throw(MethodError("The length of the sixth input argument, chunksizes, must be 1, 2, or 3."))
end
end
return RALF2(fnew, f, diffcache)
end
function RALF2(fin::LC) where {LC <: AbstractArray{<: Number}}
f(cache::LCN, x1::C1N, x2::C2N, select::Tuple{Int, Int}) where {LCN <: AbstractArray{<: Number}, C1N <: AbstractArray{<: Number}, C2N <: AbstractArray{<: Number}} = cache
return RALF2{LC}(f, x -> fin, fin)
end
# Using the default of (1, 1) for `select` is important. This way, we can use autodiff
# during the homotopy algorithm without requiring additional arguments when calling `update!`
# to ensure the correct cache is used.
function (ralf::RALF2)(x1::C1, x2::C2, select::Tuple{Int, Int} = (1, 1)) where {C1 <: AbstractArray{<: Number}, C2 <: AbstractArray{<: Number}}
return ralf.f(ralf.cache, x1, x2, select)
end
# RALF3
mutable struct RALF3{LC} <: AbstractRALF
f::Function
f0::Function
cache::LC
end
get_cache_type(ral::RALF3{LC}) where {LC} = LC
function RALF3(f::Function, x1::C1, x2::C2, x3::C3, cache::AbstractArray{<: Number},
chunksizes::NTuple{Nc, Int} =
(ForwardDiff.pickchunksize(min(length(x1),
length(x2), length(x3))), )) where {C1 <: AbstractArray{<: Number}, C2 <: AbstractArray{<: Number},
C3 <: AbstractArray{<: Number}, N, Nc}
if applicable(f, cache, x1, x2, x3)
if length(chunksizes) == 1 # Figure out which type of DiffCache is needed
diffcache = dualcache(cache, Val{chunksizes[1]})
fnew = function _f_ip1(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, select::Tuple{Int, Int}) where {LCN <: DiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1, x2, x3)
return target_cache
end
elseif length(chunksizes) == 2
diffcache = twodualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]})
fnew = function _f_ip2(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, select::Tuple{Int, Int}) where {LCN <: TwoDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1, x2, x3)
return target_cache
end
elseif length(chunksizes) == 3
diffcache = threedualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]}, Val{chunksizes[3]})
fnew = function _f_ip3(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, select::Tuple{Int, Int}) where {LCN <: ThreeDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1, x2, x3)
return target_cache
end
else
throw(MethodError("The length of the seventh input argument, chunksizes, must be 1, 2, or 3."))
end
else
if length(chunksizes) == 1 # Figure out which type of DiffCache is needed
diffcache = dualcache(cache, Val{chunksizes[1]})
fnew = function _f_oop1(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, select::Tuple{Int, Int}) where {LCN <: DiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2, x3)
return target_cache
end
elseif length(chunksizes) == 2
diffcache = twodualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]})
fnew = function _f_oop2(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, select::Tuple{Int, Int}) where {LCN <: TwoDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2, x3)
return target_cache
end
elseif length(chunksizes) == 3
diffcache = threedualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]}, Val{chunksizes[3]})
fnew = function _f_oop3(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, select::Tuple{Int, Int}) where {LCN <: ThreeDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2, x3)
return target_cache
end
else
throw(MethodError("The length of the seventh input argument, chunksizes, must be 1, 2, or 3."))
end
end
return RALF3(fnew, f, diffcache)
end
function RALF3(fin::LC) where {LC <: AbstractArray{<: Number}}
f(cache::LCN, x1::C1N, x2::C2N, x3::C3N, select::Tuple{Int, Int}) where {LCN <: AbstractArray{<: Number}, C1N <: AbstractArray{<: Number}, C2N <: AbstractArray{<: Number}, C3N <: AbstractArray{<: Number}} = cache
return RALF3{LC}(f, x -> fin, fin)
end
# Using the default of (1, 1) for `select` is important. This way, we can use autodiff
# during the homotopy algorithm without requiring additional arguments when calling `update!`
# to ensure the correct cache is used.
function (ralf::RALF3)(x1::C1, x2::C2, x3::C3,
select::Tuple{Int, Int} = (1, 1)) where {C1 <: AbstractArray{<: Number}, C2 <: AbstractArray{<: Number},
C3 <: AbstractArray{<: Number}}
return ralf.f(ralf.cache, x1, x2, x3, select)
end
# RALF4
mutable struct RALF4{LC} <: AbstractRALF
f::Function
f0::Function
cache::LC
end
get_cache_type(ral::RALF4{LC}) where {LC} = LC
function RALF4(f::Function, x1::C1, x2::C2, x3::C3, x4::C4, cache::AbstractArray{<: Number},
chunksizes::NTuple{Nc, Int} =
(ForwardDiff.pickchunksize(min(length(x1),
length(x2), legnth(x3), length(x4))), )) where {C1 <: AbstractArray{<: Number}, C2 <: AbstractArray{<: Number},
C3 <: AbstractArray{<: Number}, C4 <: AbstractArray{<: Number}, N, Nc}
if applicable(f, cache, x1, x2, x3, x4)
if length(chunksizes) == 1 # Figure out which type of DiffCache is needed
diffcache = dualcache(cache, Val{chunksizes[1]})
fnew = function _f_ip1(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, x4::C4N, select::Tuple{Int, Int}) where {LCN <: DiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number},
C4N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, x4, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
f(target_cache, x1, x2, x3, x4)
return target_cache
end
elseif length(chunksizes) == 2
diffcache = twodualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]})
fnew = function _f_ip2(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, x4::C4N, select::Tuple{Int, Int}) where {LCN <: TwoDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number},
C4N <: AbstractArray{<: Number}}
f(get_tmp(cache, x1, x2, x3, x4, select), x1, x2, x3, x4)
return get_tmp(cache, x1, x2, x3, x4, select)
end
elseif length(chunksizes) == 3
diffcache = threedualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]}, Val{chunksizes[3]})
fnew = function _f_ip3(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, x4::C4N, select::Tuple{Int, Int}) where {LCN <: ThreeDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number},
C4N <: AbstractArray{<: Number}}
f(get_tmp(cache, x1, x2, x3, x4, select), x1, x2, x3, x4)
return get_tmp(cache, x1, x2, x3, x4, select)
end
else
throw(MethodError("The length of the eighth input argument, chunksizes, must be 1, 2, or 3."))
end
else
if length(chunksizes) == 1 # Figure out which type of DiffCache is needed
diffcache = dualcache(cache, Val{chunksizes[1]})
fnew = function _f_oop1(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, x4::C4N, select::Tuple{Int, Int}) where {LCN <: DiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number},
C4N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, x4, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2, x3, x4)
return target_cache
end
elseif length(chunksizes) == 2
diffcache = twodualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]})
fnew = function _f_oop2(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, x4::C4N, select::Tuple{Int, Int}) where {LCN <: TwoDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number},
C4N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, x4, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2, x3, x4)
return target_cache
end
elseif length(chunksizes) == 3
diffcache = threedualcache(cache, Val{chunksizes[1]}, Val{chunksizes[2]}, Val{chunksizes[3]})
fnew = function _f_oop3(cache::LCN, x1::C1N, x2::C2N,
x3::C3N, x4::C4N, select::Tuple{Int, Int}) where {LCN <: ThreeDiffCache,
C1N <: AbstractArray{<: Number},
C2N <: AbstractArray{<: Number},
C3N <: AbstractArray{<: Number},
C4N <: AbstractArray{<: Number}}
target_cache = get_tmp(cache, x1, x2, x3, x4, select)
if size(target_cache) != size(cache.du)
target_cache = reshape(target_cache, size(cache.du))
end
target_cache .= f(x1, x2, x3, x4)
return target_cache
end
else
throw(MethodError("The length of the eighth input argument, chunksizes, must be 1, 2, or 3."))
end
end
return RALF4(fnew, f, diffcache)
end
function RALF4(fin::LC) where {LC <: AbstractArray{<: Number}}
f(cache::LCN, x1::C1N, x2::C2N, x3::C3N, x4::C4N, select::Tuple{Int, Int}) where {LCN <: AbstractArray{<: Number}, C1N <: AbstractArray{<: Number}, C2N <: AbstractArray{<: Number}, C3N <: AbstractArray{<: Number}, C4N <: AbstractArray{<: Number}} = cache
return RALF4{LC}(f, x -> fin, fin)
end
# Using the default of (1, 1) for `select` is important. This way, we can use autodiff
# during the homotopy algorithm without requiring additional arguments when calling `update!`
# to ensure the correct cache is used.
function (ralf::RALF4)(x1::C1, x2::C2, x3::C3,
x4::C4, select::Tuple{Int, Int} = (1, 1)) where {C1 <: AbstractArray{<: Number}, C2 <: AbstractArray{<: Number},
C3 <: AbstractArray{<: Number}, C4 <: AbstractArray{<: Number}}
return ralf.f(ralf.cache, x1, x2, x3, x4, select)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 33982 | # Subtypes used for the main RiskAdjustedLinearization type
mutable struct RALNonlinearSystem{L <: AbstractRALF, S <: AbstractRALF, V <: AbstractRALF}
μ::RALF2
Λ::L
Σ::S
ξ::RALF2
𝒱::V
ccgf::Function
end
Λ_eltype(m::RALNonlinearSystem{L, S}) where {L, S} = L
Σ_eltype(m::RALNonlinearSystem{L, S}) where {L, S} = S
@inline function update!(m::RALNonlinearSystem{L, S, V}, z::C1, y::C1, Ψ::C2;
select::Vector{Symbol} = Symbol[:μ, :ξ, :𝒱]) where {L, S, V <: RALF2,
C1 <: AbstractVector{<: Number},
C2 <: AbstractMatrix{<: Number}}
if :μ in select
m.μ(z, y)
end
if :ξ in select
m.ξ(z, y)
end
if :𝒱 in select
m.𝒱(z, Ψ)
end
m
end
@inline function update!(m::RALNonlinearSystem{L, S, V}, z::C1, y::C1, Ψ::C2;
select::Vector{Symbol} = Symbol[:μ, :ξ, :𝒱]) where {L, S, V <: RALF4,
C1 <: AbstractVector{<: Number}, C2 <: AbstractMatrix{<: Number}}
if :μ in select
m.μ(z, y)
end
if :ξ in select
m.ξ(z, y)
end
if :𝒱 in select
m.𝒱(z, y, Ψ, z)
end
m
end
mutable struct RALLinearizedSystem{JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number}, SJC <: AbstractDict{Symbol, NamedTuple}}
μz::RALF2
μy::RALF2
ξz::RALF2
ξy::RALF2
J𝒱::Union{RALF2, RALF3}
Γ₅::JC5
Γ₆::JC6
sparse_jac_caches::SJC
end
function RALLinearizedSystem(μz::RALF2, μy::RALF2, ξz::RALF2, ξy::RALF2, J𝒱::AbstractRALF,
Γ₅::AbstractMatrix{<: Number}, Γ₆::AbstractMatrix{<: Number})
RALLinearizedSystem(μz, μy, ξz, ξy, J𝒱, Γ₅, Γ₆, Dict{Symbol, NamedTuple}())
end
@inline function update!(m::RALLinearizedSystem{JC5, JC6}, z::C1, y::C1, Ψ::C2;
select::Vector{Symbol} =
Symbol[:Γ₁, :Γ₂, :Γ₃, :Γ₄, :JV]) where {#JV <: RALF2,
JC5, JC6,
C1 <: AbstractVector{<: Number}, C2 <: AbstractMatrix{<: Number}}
if :Γ₁ in select
m.μz(z, y)
end
if :Γ₂ in select
m.μy(z, y)
end
if :Γ₃ in select
m.ξz(z, y)
end
if :Γ₄ in select
m.ξy(z, y)
end
if :JV in select
if isa(m.J𝒱, RALF2)
m.J𝒱(z, Ψ)
else
m.J𝒱(z, y, Ψ)
end
end
m
end
abstract type AbstractRiskAdjustedLinearization end
"""
RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nε)
RiskAdjustedLinearization(nonlinear_system, linearized_system, z, y, Ψ, Nz, Ny, Nε)
Creates a first-order perturbation around the stochastic steady state of a discrete-time dynamic economic model.
The first method is the main constructor most users will want, while the second method is the default constructor.
### Inputs for First Method
- `μ::Function`: expected state transition function
- `ξ::Function`: nonlinear terms of the expectational equations
- `ccgf::Function`: conditional cumulant generating function of the exogenous shocks
- `Λ::Function` or `Λ::AbstractMatrix`: function or matrix mapping endogenous risk into state transition equations
- `Σ::Function` or `Σ::AbstractMatrix`: function or matrix mapping exogenous risk into state transition equations
- `Γ₅::AbstractMatrix{<: Number}`: coefficient matrix on one-period ahead expectation of state variables
- `Γ₆::AbstractMatrix{<: Number}`: coefficient matrix on one-period ahead expectation of jump variables
- `z::AbstractVector{<: Number}`: state variables in stochastic steady state
- `y::AbstractVector{<: Number}`: jump variables in stochastic steady state
- `Ψ::AbstractMatrix{<: Number}`: matrix linking deviations in states to deviations in jumps, i.e. ``y_t - y = \\Psi(z_t - z)``.
- `Nε::Int`: number of exogenous shocks
### Keywords for First Method
- `sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims)`: initializer for the cache of steady state vectors.
- `Λ_cache_init::Function = dims -> Matrix{T}(undef, dims)`: initializer for the cache of `Λ`
- `Σ_cache_init::Function = dims -> Matrix{T}(undef, dims)`: initializer for the cache of `Λ`
- `jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims)`: initializer for the cache of the Jacobians of `μ`, `ξ`, and `𝒱 `.
- `jump_dependent_shock_matrices::Bool = false`: if true, `Λ` and `Σ` are treated as `Λ(z, y)` and `Σ(z, y)`
to allow dependence on jumps.
- `sparse_jacobian::Vector{Symbol} = Symbol[]`: pass the symbols `:μ`, `:ξ`, and/or `:𝒱 ` to declare that
the Jacobians of these functions are sparse and should be differentiated using sparse methods from SparseDiffTools.jl
- `sparsity::AbstractDict = Dict{Symbol, Mtarix}()`: a dictionary for declaring the
sparsity patterns of the Jacobians of `μ`, `ξ`, and `𝒱 `. The relevant keys are `:μz`, `:μy`, `:ξz`, `:ξy`, and `:J𝒱 `.
- `colorvec::AbstractDict = Dict{Symbol, Vector{Int}}()`: a dictionary for declaring the
the matrix coloring vector. The relevant keys are `:μz`, `:μy`, `:ξz`, `:ξy`, and `:J𝒱 `.
- `sparsity_detection::Bool = false`: if true, use SparseDiffTools to determine the sparsity pattern.
When false (default), the sparsity pattern is estimated by differentiating the Jacobian once
with `ForwardDiff` and assuming any zeros in the calculated Jacobian are supposed to be zeros.
### Inputs for Second Method
- `nonlinear_system::RALNonlinearSystem`
- `linearized_system::RALLinearizedSystem`
- `z::AbstractVector{<: Number}`: state variables in stochastic steady state
- `y::AbstractVector{<: Number}`: jump variables in stochastic steady state
- `Ψ::AbstractMatrix{<: Number}`: matrix linking deviations in states to deviations in jumps, i.e. ``y_t - y = \\Psi(z_t - z)``.
- `Nz::Int`: number of state variables
- `Ny::Int`: number of jump variables
- `Nε::Int`: number of exogenous shocks
"""
mutable struct RiskAdjustedLinearization{C1 <: AbstractVector{<: Number}, C2 <: AbstractMatrix{<: Number}} <: AbstractRiskAdjustedLinearization
nonlinear::RALNonlinearSystem
linearization::RALLinearizedSystem
z::C1 # Coefficients, TODO: at some point, we may or may not want to make z, y, and Ψ also DiffCache types
y::C1
Ψ::C2
Nz::Int # Dimensions
Ny::Int
Nε::Int
end
# The following constructor is typically the main constructor for most users.
# It will call a lower-level constructor that uses automatic differentiation
# to calculate the Jacobian functions.
# Note that here we pass in the ccgf, rather than 𝒱
function RiskAdjustedLinearization(μ::M, Λ::L, Σ::S, ξ::X, Γ₅::JC5, Γ₆::JC6, ccgf::CF,
z::AbstractVector{T}, y::AbstractVector{T}, Ψ::AbstractMatrix{T},
Nε::Int; sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims),
Λ_cache_init::Function = dims -> Matrix{T}(undef, dims),
Σ_cache_init::Function = dims -> Matrix{T}(undef, dims),
jump_dependent_shock_matrices::Bool = false,
jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims),
sparse_jacobian::Vector{Symbol} = Symbol[],
sparsity::AbstractDict{Symbol, AbstractMatrix} = Dict{Symbol, AbstractMatrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false) where {T <: Number, M <: Function, L, S,
X <: Function,
JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number},
CF <: Function}
# Get dimensions
Nz = length(z)
Ny = length(y)
Nzy = Nz + Ny
if Nε < 0
throw(BoundsError("Nε cannot be negative"))
end
# Create wrappers enabling caching for μ and ξ
Nzchunk = ForwardDiff.pickchunksize(Nz)
Nychunk = ForwardDiff.pickchunksize(Ny)
_μ = RALF2(μ, z, y, sss_vector_cache_init(Nz), (max(min(Nzchunk, Nychunk), 2), Nzchunk, Nychunk))
_ξ = RALF2(ξ, z, y, sss_vector_cache_init(Ny), (max(min(Nzchunk, Nychunk), 2), Nzchunk, Nychunk))
# Apply dispatch on Λ and Σ to figure what they should be
return RiskAdjustedLinearization(_μ, Λ, Σ, _ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nz, Ny, Nε, sss_vector_cache_init = sss_vector_cache_init,
Λ_cache_init = Λ_cache_init,
Σ_cache_init = Σ_cache_init,
jump_dependent_shock_matrices = jump_dependent_shock_matrices,
jacobian_cache_init = jacobian_cache_init,
sparse_jacobian = sparse_jacobian, sparsity = sparsity,
colorvec = colorvec, sparsity_detection = sparsity_detection)
end
# Constructor that uses ForwardDiff to calculate Jacobian functions.
# Users will not typically use this constructor, however, because it requires
# various functions of the RALNonlinearSystem and RALLinearizedSystem to already
# be wrapped with either an RALF1 or RALF2 type.
function RiskAdjustedLinearization(μ::M, Λ::L, Σ::S, ξ::X, Γ₅::JC5, Γ₆::JC6, ccgf::CF,
z::AbstractVector{T}, y::AbstractVector{T}, Ψ::AbstractMatrix{T},
Nz::Int, Ny::Int, Nε::Int; sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims),
jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims),
sparse_jacobian::Vector{Symbol} = Symbol[],
sparsity::AbstractDict{Symbol, AbstractMatrix} = Dict{Symbol, AbstractMatrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false) where {T <: Number, M <: RALF2, L <: RALF1, S <: RALF1,
X <: RALF2,
JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number},
CF <: Function}
jac_cache = Dict{Symbol, NamedTuple}()
# Use RALF2 wrapper to create Jacobian functions with caching for μ, ξ.
# Use the tuple to select the correct Dual cache b/c μ is in place
if :μ in sparse_jacobian
μz, μy, jac_cache[:μz], jac_cache[:μy] =
construct_μ_jacobian_function(μ, z, y;
sparsity_z = haskey(sparsity, :μz) ? sparsity[:μz] : nothing,
sparsity_y = haskey(sparsity, :μy) ? sparsity[:μy] : nothing,
colorvec_z = haskey(sparsity, :μz) ? sparsity[:μz] : nothing,
colorvec_y = haskey(sparsity, :μy) ? sparsity[:μy] : nothing,
sparsity_detection = sparsity_detection)
else
μz = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> μ(x, y, (1, 2)), z), z, y,
jacobian_cache_init((Nz, Nz)))
μy = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> μ(z, x, (2, 3)), y), z, y,
jacobian_cache_init((Nz, Ny)))
end
if :ξ in sparse_jacobian
ξz, ξy, jac_cache[:ξz], jac_cache[:ξy] =
construct_ξ_jacobian_function(ξ, z, y;
sparsity_z = haskey(sparsity, :ξz) ? sparsity[:ξz] : nothing,
sparsity_y = haskey(sparsity, :ξy) ? sparsity[:ξy] : nothing,
colorvec_z = haskey(sparsity, :ξz) ? sparsity[:ξz] : nothing,
colorvec_y = haskey(sparsity, :ξy) ? sparsity[:ξy] : nothing,
sparsity_detection = sparsity_detection)
else
ξz = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> ξ(x, y, (1, 2)), z), z, y,
jacobian_cache_init((Ny, Nz)))
ξy = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> ξ(z, x, (2, 3)), y), z, y,
jacobian_cache_init((Ny, Ny)))
end
# Check if we need to compute the left divide or not
avoid_Λ = isa(get_cache_type(Λ), AbstractMatrix) && all(Λ.cache .== 0.)
# Check if Σ's cache is sparse, which matters only if Λ is relevant b/c
# a left-divide of two sparse matrices A and B (i.e. A \ B) will not work
# b/c the LU factorization algoritm employed needs more structure on A or B.
sparse_cache_Σ = !avoid_Λ && isa(Σ.cache, AbstractMatrix) ? issparse(Σ.cache) : issparse(Σ.cache.du)
# Create RALF2 wrappers for 𝒱 and its Jacobian J𝒱
if applicable(ccgf, Γ₅, z) # Check if ccgf is in place or not
_𝒱 = function _𝒱_oop(F, z, Ψ)
Σ0 = Σ(z)
if size(Σ0) != (Nz, Nε)
Σ0 = sparse_cache_Σ ? Matrix(reshape(Σ0, Nz, Nε)) : reshape(Σ0, Nz, Nε)
end
if avoid_Λ
F .= ccgf((Γ₅ + Γ₆ * Ψ) * Σ0, z)
else
Λ0 = Λ(z)
if size(Λ0) != (Nz, Ny)
Λ0 = reshape(Λ0, Nz, Ny)
end
F .= ccgf((Γ₅ + Γ₆ * Ψ) * ((I - (Λ0 * Ψ)) \ Σ0), z)
end
end
else # in place
_𝒱 = function _𝒱_ip(F, z, Ψ)
Σ0 = Σ(z)
if size(Σ0) != (Nz, Nε)
Σ0 = reshape(Σ0, Nz, Nε)
end
if avoid_Λ
ccgf(F, (Γ₅ + Γ₆ * Ψ) * Σ0, z)
else
Λ0 = Λ(z)
if size(Λ0) != (Nz, Ny)
Λ0 = reshape(Λ0, Nz, Ny)
end
ccgf(F, (Γ₅ + Γ₆ * Ψ) * ((I - (Λ0 * Ψ)) \ Σ0), z)
end
end
end
Nzchunk = ForwardDiff.pickchunksize(Nz)
Nychunk = ForwardDiff.pickchunksize(Ny)
𝒱 = RALF2((F, z, Ψ) -> _𝒱(F, z, Ψ), z, Ψ, sss_vector_cache_init(Ny), (max(min(Nzchunk, Nychunk), 2), Nzchunk))
if :𝒱 in sparse_jacobian
J𝒱, jac_cache[:J𝒱] = construct_𝒱_jacobian_function(𝒱, ccgf, Λ, Σ, Γ₅, Γ₆, z, Ψ;
sparsity = haskey(sparsity, :J𝒱) ? sparsity[:J𝒱] : nothing,
colorvec = haskey(colorvec, :J𝒱) ? colorvec[:J𝒱] : nothing,
sparsity_detection = sparsity_detection)
else
_J𝒱(F, z, Ψ) = ForwardDiff.jacobian!(F, x -> 𝒱(x, Ψ, (1, 2)), z)
J𝒱 = RALF2((F, z, Ψ) -> _J𝒱(F, z, Ψ), z, Ψ, jacobian_cache_init((Ny, Nz)))
end
# Form underlying RAL blocks
nonlinear_system = RALNonlinearSystem(μ, Λ, Σ, ξ, 𝒱, ccgf)
linearized_system = RALLinearizedSystem(μz, μy, ξz, ξy, J𝒱, Γ₅, Γ₆, jac_cache)
return RiskAdjustedLinearization(nonlinear_system, linearized_system, z, y, Ψ, Nz, Ny, Nε)
end
# Handles case where Λ and Σ are RALF2
function RiskAdjustedLinearization(μ::M, Λ::L, Σ::S, ξ::X, Γ₅::JC5, Γ₆::JC6, ccgf::CF,
z::AbstractVector{T}, y::AbstractVector{T}, Ψ::AbstractMatrix{T},
Nz::Int, Ny::Int, Nε::Int; sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims),
jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims),
sparse_jacobian::Vector{Symbol} = Symbol[],
sparsity::AbstractDict{Symbol, AbstractMatrix} = Dict{Symbol, AbstractMatrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false) where {T <: Number, M <: RALF2, L <: RALF2, S <: RALF2,
X <: RALF2,
JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number},
CF <: Function}
jac_cache = Dict{Symbol, NamedTuple}()
# Use RALF2 wrapper to create Jacobian functions with caching for μ, ξ.
# Use the tuple to select the correct Dual cache b/c μ is in place
if :μ in sparse_jacobian
μz, μy, jac_cache[:μz], jac_cache[:μy] =
construct_μ_jacobian_function(μ, z, y;
sparsity_z = haskey(sparsity, :μz) ? sparsity[:μz] : nothing,
sparsity_y = haskey(sparsity, :μy) ? sparsity[:μy] : nothing,
colorvec_z = haskey(sparsity, :μz) ? sparsity[:μz] : nothing,
colorvec_y = haskey(sparsity, :μy) ? sparsity[:μy] : nothing,
sparsity_detection = sparsity_detection)
else
μz = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> μ(x, y, (1, 2)), z), z, y,
jacobian_cache_init((Nz, Nz)))
μy = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> μ(z, x, (2, 3)), y), z, y,
jacobian_cache_init((Nz, Ny)))
end
if :ξ in sparse_jacobian
ξz, ξy, jac_cache[:ξz], jac_cache[:ξy] =
construct_ξ_jacobian_function(μ, z, y;
sparsity_z = haskey(sparsity, :ξz) ? sparsity[:ξz] : nothing,
sparsity_y = haskey(sparsity, :ξy) ? sparsity[:ξy] : nothing,
colorvec_z = haskey(sparsity, :ξz) ? sparsity[:ξz] : nothing,
colorvec_y = haskey(sparsity, :ξy) ? sparsity[:ξy] : nothing,
sparsity_detection = sparsity_detection)
else
ξz = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> ξ(x, y, (1, 2)), z), z, y,
jacobian_cache_init((Ny, Nz)))
ξy = RALF2((F, z, y) -> ForwardDiff.jacobian!(F, x -> ξ(z, x, (2, 3)), y), z, y,
jacobian_cache_init((Ny, Ny)))
end
# Check if we need to compute the left divide or not
avoid_Λ = isa(get_cache_type(Λ), AbstractMatrix) && all(Λ.cache .== 0.)
# Check if Σ's cache is sparse, which matters only if Λ is relevant b/c
# a left-divide of two sparse matrices A and B (i.e. A \ B) will not work
# b/c the LU factorization algoritm employed needs more structure on A or B.
sparse_cache_Σ = !avoid_Λ && isa(Σ.cache, AbstractMatrix) ? issparse(Σ.cache) : issparse(Σ.cache.du)
# Create RALF2 wrappers for 𝒱 and its Jacobian J𝒱
if applicable(ccgf, Γ₅, z) # Check if ccgf is in place or not
_𝒱 = function _𝒱_oop(F, z, y, Ψ, zₜ)
yₜ = y + Ψ * (zₜ - z)
Σ0 = Σ(zₜ, yₜ)
if size(Σ0) != (Nz, Nε)
Σ0 = reshape(Σ0, Nz, Nε)
end
if avoid_Λ
F .= ccgf((Γ₅ + Γ₆ * Ψ) * Σ0, zₜ)
else
Λ0 = Λ(zₜ, yₜ)
if size(Λ0) != (Nz, Ny)
Λ0 = reshape(Λ0, Nz, Ny)
end
F .= ccgf((Γ₅ + Γ₆ * Ψ) * ((I - (Λ0 * Ψ)) \ Σ0), zₜ)
end
end
else # in place
_𝒱 = function _𝒱_ip(F, z, y, Ψ, zₜ)
yₜ = y + Ψ * (zₜ - z)
Σ0 = Σ(zₜ, yₜ)
if size(Σ0) != (Nz, Nε)
Σ0 = reshape(Σ0, Nz, Nε)
end
if avoid_Λ
ccgf(F, (Γ₅ + Γ₆ * Ψ) * Σ0, zₜ)
else
Λ0 = Λ(zₜ, yₜ)
if size(Λ0) != (Nz, Ny)
Λ0 = reshape(Λ0, Nz, Ny)
end
ccgf(F, (Γ₅ + Γ₆ * Ψ) * ((I - (Λ0 * Ψ)) \ Σ0), zₜ)
end
end
end
Nzchunk = ForwardDiff.pickchunksize(Nz)
Nychunk = ForwardDiff.pickchunksize(Ny)
𝒱 = RALF4((F, z, y, Ψ, zₜ) -> _𝒱(F, z, y, Ψ, zₜ), z, y, Ψ, z, sss_vector_cache_init(Ny),
(max(min(Nzchunk, Nychunk), 2), Nzchunk))
if :𝒱 in sparse_jacobian
J𝒱, jac_cache[:J𝒱] = construct_𝒱_jacobian_function(𝒱, ccgf, Λ, Σ, Γ₅, Γ₆, z, y, Ψ;
sparsity = haskey(sparsity, :J𝒱) ? sparsity[:J𝒱] : nothing,
colorvec = haskey(colorvec, :J𝒱) ? colorvec[:J𝒱] : nothing,
sparsity_detection = sparsity_detection)
else
_J𝒱(F, z, y, Ψ) = ForwardDiff.jacobian!(F, zₜ -> 𝒱(z, y, Ψ, zₜ, (4, 2)), z) # use zₜ argument to infer the cache
J𝒱 = RALF3((F, z, y, Ψ) -> _J𝒱(F, z, y, Ψ), z, y, Ψ, jacobian_cache_init((Ny, Nz)))
end
# Form underlying RAL blocks
nonlinear_system = RALNonlinearSystem(μ, Λ, Σ, ξ, 𝒱, ccgf)
linearized_system = RALLinearizedSystem(μz, μy, ξz, ξy, J𝒱, Γ₅, Γ₆, jac_cache)
return RiskAdjustedLinearization(nonlinear_system, linearized_system, z, y, Ψ, Nz, Ny, Nε)
end
# The following four constructors cover different common cases for the Λ and Σ functions.
function RiskAdjustedLinearization(μ::M, Λ::L, Σ::S, ξ::X, Γ₅::JC5, Γ₆::JC6, ccgf::CF,
z::AbstractVector{T}, y::AbstractVector{T}, Ψ::AbstractMatrix{T},
Nz::Int, Ny::Int, Nε::Int; sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims),
Λ_cache_init::Function = dims -> Matrix{T}(undef, dims),
Σ_cache_init::Function = dims -> Matrix{T}(undef, dims),
jump_dependent_shock_matrices::Bool = false,
jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims),
sparse_jacobian::Vector{Symbol} = Symbol[],
sparsity::AbstractDict = Dict{Symbol, Matrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false) where {T <: Number, M <: RALF2, L <: Function, S <: Function,
X <: RALF2,
JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number},
CF <: Function}
# Create wrappers enabling caching for Λ and Σ
Nzchunk = ForwardDiff.pickchunksize(Nz)
Nychunk = ForwardDiff.pickchunksize(Ny)
if jump_dependent_shock_matrices
_Λ = RALF2(Λ, z, y, Λ_cache_init((Nz, Ny)), (max(min(Nzchunk, Nychunk), 2), Nzchunk))
_Σ = RALF2(Σ, z, y, Σ_cache_init((Nz, Nε)), (max(min(Nzchunk, Nychunk), 2), Nzchunk))
else
_Λ = RALF1(Λ, z, Λ_cache_init((Nz, Ny)))
_Σ = RALF1(Σ, z, Σ_cache_init((Nz, Nε)))
end
return RiskAdjustedLinearization(μ, _Λ, _Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nz, Ny, Nε, sss_vector_cache_init = sss_vector_cache_init,
jacobian_cache_init = jacobian_cache_init, sparse_jacobian = sparse_jacobian,
sparsity = sparsity, sparsity_detection = sparsity_detection, colorvec = colorvec)
end
function RiskAdjustedLinearization(μ::M, Λ::L, Σ::S, ξ::X, Γ₅::JC5, Γ₆::JC6, ccgf::CF,
z::AbstractVector{T}, y::AbstractVector{T}, Ψ::AbstractMatrix{T},
Nz::Int, Ny::Int, Nε::Int; sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims),
Λ_cache_init::Function = dims -> Matrix{T}(undef, dims),
Σ_cache_init::Function = dims -> Matrix{T}(undef, dims),
jump_dependent_shock_matrices::Bool = false,
jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims),
sparse_jacobian::Vector{Symbol} = Symbol[],
sparsity::AbstractDict = Dict{Symbol, Matrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false) where {T <: Number, M <: RALF2,
L <: AbstractMatrix{<: Number}, S <: Function,
X <: RALF2,
JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number},
CF <: Function}
# Create wrappers enabling caching for Λ and Σ
if jump_dependent_shock_matrices
_Λ = RALF2(Λ)
_Σ = RALF2(Σ, z, y, Σ_cache_init((Nz, Nε)), (max(min(Nzchunk, Nychunk), 2), Nzchunk))
else
_Λ = RALF1(Λ)
_Σ = RALF1(Σ, z, Σ_cache_init((Nz, Nε)))
end
return RiskAdjustedLinearization(μ, _Λ, _Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nz, Ny, Nε, sss_vector_cache_init = sss_vector_cache_init,
jacobian_cache_init = jacobian_cache_init, sparse_jacobian = sparse_jacobian,
sparsity = sparsity, sparsity_detection = sparsity_detection, colorvec = colorvec)
end
function RiskAdjustedLinearization(μ::M, Λ::L, Σ::S, ξ::X, Γ₅::JC5, Γ₆::JC6, ccgf::CF,
z::AbstractVector{T}, y::AbstractVector{T}, Ψ::AbstractMatrix{T},
Nz::Int, Ny::Int, Nε::Int; sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims),
Λ_cache_init::Function = dims -> Matrix{T}(undef, dims),
Σ_cache_init::Function = dims -> Matrix{T}(undef, dims),
jump_dependent_shock_matrices::Bool = false,
jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims),
sparse_jacobian::Vector{Symbol} = Symbol[],
sparsity::AbstractDict = Dict{Symbol, Matrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false) where {T <: Number, M <: RALF2, L <: Function, S <: AbstractMatrix{<: Number},
X <: RALF2,
JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number},
CF <: Function}
# Create wrappers enabling caching for Λ and Σ
Nzchunk = ForwardDiff.pickchunksize(Nz)
Nychunk = ForwardDiff.pickchunksize(Ny)
if jump_dependent_shock_matrices
_Λ = RALF2(Λ, z, y, Λ_cache_init((Nz, Ny)), (max(min(Nzchunk, Nychunk), 2), Nzchunk))
_Σ = RALF2(Σ)
else
_Λ = RALF1(Λ, z, Λ_cache_init((Nz, Ny)))
_Σ = RALF1(Σ)
end
return RiskAdjustedLinearization(μ, _Λ, _Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nz, Ny, Nε, sss_vector_cache_init = sss_vector_cache_init,
jacobian_cache_init = jacobian_cache_init, sparse_jacobian = sparse_jacobian,
sparsity = sparsity, sparsity_detection = sparsity_detection, colorvec = colorvec)
end
function RiskAdjustedLinearization(μ::M, Λ::L, Σ::S, ξ::X, Γ₅::JC5, Γ₆::JC6, ccgf::CF,
z::AbstractVector{T}, y::AbstractVector{T}, Ψ::AbstractMatrix{T},
Nz::Int, Ny::Int, Nε::Int; sss_vector_cache_init::Function = dims -> Vector{T}(undef, dims),
Λ_cache_init::Function = dims -> Matrix{T}(undef, dims),
Σ_cache_init::Function = dims -> Matrix{T}(undef, dims),
jump_dependent_shock_matrices::Bool = false,
jacobian_cache_init::Function = dims -> Matrix{T}(undef, dims),
sparse_jacobian::Vector{Symbol} = Symbol[],
sparsity::AbstractDict = Dict{Symbol, Matrix}(),
sparsity_detection::Bool = false) where {T <: Number, M <: RALF2,
L <: AbstractMatrix{<: Number}, S <: AbstractMatrix{<: Number},
X <: RALF2,
JC5 <: AbstractMatrix{<: Number},
JC6 <: AbstractMatrix{<: Number},
CF <: Function}
# Create wrappers enabling caching for Λ and Σ
_Λ = RALF1(Λ)
_Σ = RALF1(Σ)
return RiskAdjustedLinearization(μ, _Λ, _Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nz, Ny, Nε, sss_vector_cache_init = sss_vector_cache_init,
jacobian_cache_init = jacobian_cache_init, sparse_jacobian = sparse_jacobian,
sparsity = sparsity, sparsity_detection = sparsity_detection, colorvec = colorvec)
end
## Print statements for RAL objects
function Base.show(io::IO, m::AbstractRiskAdjustedLinearization)
@printf io "Risk-Adjusted Linearization of an Economic Model\n"
@printf io "No. of state variables: %i\n" m.Nz
@printf io "No. of jump variables: %i\n" m.Ny
@printf io "No. of exogenous shocks: %i\n" m.Nε
end
function Base.show(io::IO, m::RALNonlinearSystem)
@printf io "RALNonlinearSystem"
end
function Base.show(io::IO, m::RALLinearizedSystem)
@printf io "RALLinearizedSystem"
end
## Indexing for convenient access to steady state values
@inline function Base.getindex(m::RiskAdjustedLinearization, sym::Symbol)
if sym in [:μ_sss, :ξ_sss, :𝒱_sss, :Σ_sss, :Λ_sss]
m.nonlinear[sym]
elseif sym in [:Γ₁, :Γ₂, :Γ₃, :Γ₄, :Γ₅, :Γ₆, :JV]
m.linearization[sym]
else
throw(KeyError("key $sym not found"))
end
end
@inline function Base.getindex(m::RALNonlinearSystem, sym::Symbol)
if sym == :μ_sss
isnothing(m.μ.cache) ? error("μ is out of place, so its stochastic steady state value is not cached.") : m.μ.cache.du
elseif sym == :ξ_sss
isnothing(m.ξ.cache) ? error("ξ is out of place, so its stochastic steady state value is not cached.") : m.ξ.cache.du
elseif sym == :𝒱_sss
m.𝒱.cache.du
elseif sym == :Σ_sss
if isnothing(m.Σ.cache)
error("Λ is out of place, so its stochastic steady state value is not cached.")
elseif isa(m.Σ.cache, DiffCache)
m.Σ.cache.du
else
m.Σ.cache
end
elseif sym == :Λ_sss
if isnothing(m.Λ.cache)
error("Λ is out of place, so its stochastic steady state value is not cached.")
elseif isa(m.Λ.cache, DiffCache)
m.Λ.cache.du
else
m.Λ.cache
end
else
throw(KeyError("key $sym not found"))
end
end
@inline function Base.getindex(m::RALLinearizedSystem, sym::Symbol)
if sym == :Γ₁
m.μz.cache.du
elseif sym == :Γ₂
m.μy.cache.du
elseif sym == :Γ₃
m.ξz.cache.du
elseif sym == :Γ₄
m.ξy.cache.du
elseif sym == :Γ₅
m.Γ₅
elseif sym == :Γ₆
m.Γ₆
elseif sym == :JV
m.J𝒱.cache.du
else
throw(KeyError("key $sym not found"))
end
end
## Methods for using RiskAdjustedLinearization
@inline getvalues(m::RiskAdjustedLinearization) = (m.z, m.y, m.Ψ)
@inline getvecvalues(m::RiskAdjustedLinearization) = vcat(m.z, m.y, vec(m.Ψ))
@inline nonlinear_system(m::RiskAdjustedLinearization) = m.nonlinear
@inline linearized_system(m::RiskAdjustedLinearization) = m.linearization
@inline function update!(m::RiskAdjustedLinearization)
update!(nonlinear_system(m), m.z, m.y, m.Ψ)
update!(linearized_system(m), m.z, m.y, m.Ψ)
m
end
@inline function update!(m::RiskAdjustedLinearization, z::C1, y::C1, Ψ::C2;
update_cache::Bool = true) where {C1 <: AbstractVector{<: Number}, C2 <: AbstractMatrix{<: Number}}
# Update values of the affine approximation
m.z .= z
m.y .= y
m.Ψ .= Ψ
# Update the cached vectors and Jacobians
if update_cache
update!(m)
end
m
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 24585 | # Helper functions for exploiting sparsity in the Jacobians of μ, ξ, and 𝒱
"""
```
compute_sparsity_pattern(f::Function, x::AbstractVector{<: Number}, nrow::Int;
sparsity::Union{AbstractArray, Nothing} = nothing,
sparsity_detection::Bool = false)
```
calculates the sparsity pattern of the Jacobian of the functions μ, ξ, and 𝒱.
### Inputs
- `f`: is the function to be differentiated, e.g. `z -> 𝒱(z, Ψ, (1, 2))`
- `x`: the vector at which differentiation occurs
- `nrow`: specifies the number of rows of the Jacobian
### Keywords
- `sparsity`: sparsity pattern of the Jacobian
- `sparsity_detection`: if true, use SparsityDetection.jl to determine the sparsity pattern.
If false, then the sparsity pattern is determined by using automatic differentiation
to calculate a Jacobian and assuming any zeros are supposed to be zero.
"""
function compute_sparsity_pattern(f::Function, x::AbstractVector{T}, nrow::Int;
sparsity::Union{AbstractArray, Nothing} = nothing,
sparsity_detection::Bool = false) where {T <: Number}
if isnothing(sparsity)
sparsity = if sparsity_detection
convert(SparseMatrixCSC{eltype(x), Int}, jacobian_sparsity(f, similar(x, nrow), x))
else
sparse(ForwardDiff.jacobian(f, x))
end
end
if isempty(nonzeros(sparsity))
# default to differentiating a dense matrix if all zeros
return sparse(ones(T, size(sparsity))), 1:length(x)
else
return sparsity, matrix_colors(sparsity)
end
end
"""
```
update_sparsity_pattern!(m::RiskAdjustedLinearization, function_name::Union{Symbol, Vector{Symbol}};
z::AbstractVector{<: Number} = m.z,
y::AbstractVector{<: Number} = m.y,
Ψ::AbstractVector{<: Number} = m.Ψ,
sparsity::AbstractDict{Symbol, AbstractMatrix} = Dict{Symbol, AbstractMatrix}(),
colorvec::AbstractDict{Symbol, <: AbstractVector{Int}} = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false)
```
updates the Jacobians of μ, ξ, and/or 𝒱 in `m` with a new sparsity pattern. The Jacobians
to be updated are specified by `function_name`, e.g. `function_name = [:μ, :ξ, :𝒱]`.
If the keyword `sparsity` is empty, then the function attempts to determine the new sparsity pattern by computing
the Jacobian via automatic differentiation and assuming any zeros are supposed to be zero.
Keywords provide guesses for the coefficients ``(z, y, \\Psi)`` that are required
to calculate the Jacobians.
### Keywords
- `z`: state coefficients at steady state
- `y`: jump coefficients at steady state
- `Ψ`: coefficients for mapping from states to jumps
- `sparsity`: key-value pairs can be used to specify new sparsity patterns for the Jacobian functions
`μz`, `μy`, `ξz`, `ξy`, and `J𝒱 `.
- `colorvec`: key-value pairs can be used to specify new matrix coloring vectors for the Jacobian functions
`μz`, `μy`, `ξz`, `ξy`, and `J𝒱 `.
- `sparsity_detection`: use SparsityDetection.jl to determine the sparsity pattern.
"""
function update_sparsity_pattern!(m::RiskAdjustedLinearization, function_name::Symbol;
z::AbstractVector{<: Number} = m.z,
y::AbstractVector{<: Number} = m.y,
Ψ::AbstractMatrix{<: Number} = m.Ψ,
sparsity::AbstractDict = Dict{Symbol, Matrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false)
return update_sparsity_pattern!(m, [function_name]; z = z, y = y, Ψ = Ψ,
sparsity = sparsity, colorvec = colorvec,
sparsity_detection = sparsity_detection)
end
function update_sparsity_pattern!(m::RiskAdjustedLinearization, function_names::Vector{Symbol};
z::AbstractVector{<: Number} = m.z,
y::AbstractVector{<: Number} = m.y,
Ψ::AbstractMatrix{<: Number} = m.Ψ,
sparsity::AbstractDict = Dict{Symbol, Matrix}(),
colorvec::AbstractDict = Dict{Symbol, Vector{Int}}(),
sparsity_detection::Bool = false)
if :μ in function_names
μz, μy, μz_jac_cache, μy_jac_cache =
construct_μ_jacobian_function(m.nonlinear.μ, z, y;
sparsity_z = haskey(sparsity, :μz) ? sparsity[:μz] : nothing,
sparsity_y = haskey(sparsity, :μy) ? sparsity[:μy] : nothing,
colorvec_z = haskey(colorvec, :μz) ? colorvec[:μz] : nothing,
colorvec_y = haskey(colorvec, :μy) ? colorvec[:μy] : nothing,
sparsity_detection = sparsity_detection)
m.linearization.μz = μz
m.linearization.μy = μy
m.linearization.sparse_jac_caches[:μz] = μz_jac_cache
m.linearization.sparse_jac_caches[:μy] = μy_jac_cache
end
if :ξ in function_names
ξz, ξy, ξz_jac_cache, ξy_jac_cache =
construct_ξ_jacobian_function(m.nonlinear.ξ, z, y;
sparsity_z = haskey(sparsity, :ξz) ? sparsity[:ξz] : nothing,
sparsity_y = haskey(sparsity, :ξy) ? sparsity[:ξy] : nothing,
colorvec_z = haskey(colorvec, :ξz) ? colorvec[:ξz] : nothing,
colorvec_y = haskey(colorvec, :ξy) ? colorvec[:ξy] : nothing,
sparsity_detection = sparsity_detection)
m.linearization.ξz = ξz
m.linearization.ξy = ξy
m.linearization.sparse_jac_caches[:ξz] = ξz_jac_cache
m.linearization.sparse_jac_caches[:ξy] = ξy_jac_cache
end
if :𝒱 in function_names
J𝒱, J𝒱_jac_cache = if isa(m.nonlinear.𝒱, RALF2)
construct_𝒱_jacobian_function(m.nonlinear.𝒱, m.nonlinear.ccgf, m.nonlinear.Λ, m.nonlinear.Σ,
m[:Γ₅], m[:Γ₆], z, Ψ;
sparsity = haskey(sparsity, :J𝒱) ? sparsity[:J𝒱] : nothing,
colorvec = haskey(colorvec, :J𝒱) ? colorvec[:J𝒱] : nothing,
sparsity_detection = sparsity_detection)
else
construct_𝒱_jacobian_function(m.nonlinear.𝒱, m.nonlinear.ccgf, m.nonlinear.Λ, m.nonlinear.Σ,
m[:Γ₅], m[:Γ₆], z, y, Ψ; sparsity = haskey(sparsity, :J𝒱) ? sparsity[:J𝒱] : nothing,
colorvec = haskey(colorvec, :J𝒱) ? colorvec[:J𝒱] : nothing,
sparsity_detection = sparsity_detection)
end
m.linearization.J𝒱 = J𝒱
m.linearization.sparse_jac_caches[:J𝒱] = J𝒱_jac_cache
end
m
end
## Helper functions for constructing the Jacobian functions of μ, ξ, and 𝒱
function construct_μ_jacobian_function(μ::RALF2, z::AbstractVector{T}, y::AbstractVector{T};
sparsity_z::Union{AbstractArray, Nothing} = nothing,
sparsity_y::Union{AbstractArray, Nothing} = nothing,
colorvec_z = nothing, colorvec_y = nothing,
sparsity_detection::Bool = false) where {T <: Number}
# Define (temporary) objective functions
_f_μz = z -> μ(z, y, (1, 2))
_f_μy = y -> μ(z, y, (2, 3))
# Infer sparsity patterns and matrix coloring vector
Nz = length(z)
if isnothing(sparsity_z)
sparsity_z, colorvec_z = compute_sparsity_pattern(_f_μz, z, Nz; sparsity_detection = sparsity_detection)
elseif isnothing(colorvec_z)
colorvec_z = matrix_colors(sparsity_z)
end
if isnothing(sparsity_y)
sparsity_y, colorvec_y = compute_sparsity_pattern(_f_μy, y, Nz; sparsity_detection = sparsity_detection)
elseif isnothing(colorvec_y)
colorvec_y = matrix_colors(sparsity_y)
end
#= # Create caches for the sparse Jacobian methods # This code is left here for when
jac_cache_μz = ForwardColorJacCache(_f_μz, z, min(m.Nz, m.Ny); # Jacobians of μ and ξ are refactored
sparsity = sparsity_μz, colorvec = colorvec_μz)
jac_cache_μy = ForwardColorJacCache(_f_μy, y, min(m.Nz, m.Ny);
sparsity = sparsity_μy, colorvec = colorvec_μy)=#
# Create RALF2 objects. Note that we cannot pre-allocate the caches for
# forwarddiff_color_jacobian! by using ForwardColorJacCache b/c the objective function
# changes as z and y change. If Jacobians of μ and ξ are refactored to be done once,
# then it'll be possible to cache.
μ_dz = similar(z)
μ_dy = similar(z)
μz = RALF2((F, z, y) -> forwarddiff_color_jacobian!(F, (F0, x) -> μ.f0(F0, x, y), z, dx = μ_dz,
colorvec = colorvec_z, sparsity = sparsity_z),
z, y, deepcopy(sparsity_z))
μy = RALF2((F, z, y) -> forwarddiff_color_jacobian!(F, (F0, x) -> μ.f0(F0, z, x), y, dx = μ_dy,
colorvec = colorvec_y, sparsity = sparsity_y),
z, y, deepcopy(sparsity_y))
# Create mini-version of the Jacobian cache
μz_jac_cache = (dx = μ_dz, sparsity = sparsity_z, colorvec = colorvec_z)
μy_jac_cache = (dx = μ_dy, sparsity = sparsity_y, colorvec = colorvec_y)
return μz, μy, μz_jac_cache, μy_jac_cache
end
function construct_ξ_jacobian_function(ξ::RALF2, z::AbstractVector{T}, y::AbstractVector{T};
sparsity_z::Union{AbstractArray, Nothing} = nothing,
sparsity_y::Union{AbstractArray, Nothing} = nothing,
colorvec_z = nothing, colorvec_y = nothing,
sparsity_detection::Bool = false) where {T <: Number}
# Define (temporary) objective functions
_f_ξz = z -> ξ(z, y, (1, 2))
_f_ξy = y -> ξ(z, y, (2, 3))
# Infer sparsity patterns and matrix coloring vector
Ny = length(y)
if isnothing(sparsity_z)
sparsity_z, colorvec_z = compute_sparsity_pattern(_f_ξz, z, Ny; sparsity_detection = sparsity_detection)
elseif isnothing(colorvec_z)
colorvec_z = matrix_colors(sparsity_z)
end
if isnothing(sparsity_y)
sparsity_y, colorvec_y = compute_sparsity_pattern(_f_ξy, y, Ny; sparsity_detection = sparsity_detection)
elseif isnothing(colorvec_y)
colorvec_y = matrix_colors(sparsity_y)
end
#= # Create caches for the sparse Jacobian methods # This code is left here for when
jac_cache_ξz = ForwardColorJacCache(_f_ξz, z, min(m.Nz, m.Ny);
sparsity = sparsity_ξz, colorvec = colorvec_ξz)
jac_cache_ξy = ForwardColorJacCache(_f_ξy, y, min(m.Nz, m.Ny);
sparsity = sparsity_ξy, colorvec = colorvec_ξy)=#
# Create RALF2 objects. Note that we cannot pre-allocate the caches for
# forwarddiff_color_jacobian! by using ForwardColorJacCache b/c the objective function
# changes as z and y change. If Jacobians of μ and ξ are refactored to be done once,
# then it'll be possible to cache.
ξ_dz = similar(y)
ξ_dy = similar(y)
ξz = RALF2((F, z, y) -> forwarddiff_color_jacobian!(F, (F0, x) -> ξ.f0(F0, x, y), z, dx = ξ_dz,
colorvec = colorvec_z, sparsity = sparsity_z),
z, y, deepcopy(sparsity_z))
ξy = RALF2((F, z, y) -> forwarddiff_color_jacobian!(F, (F0, x) -> ξ.f0(F0, z, x), y, dx = ξ_dy,
colorvec = colorvec_y, sparsity = sparsity_y),
z, y, deepcopy(sparsity_y))
# Create mini-version of the Jacobian cache
ξz_jac_cache = (dx = ξ_dz, sparsity = sparsity_z, colorvec = colorvec_z)
ξy_jac_cache = (dx = ξ_dy, sparsity = sparsity_y, colorvec = colorvec_y)
return ξz, ξy, ξz_jac_cache, ξy_jac_cache
end
function construct_𝒱_jacobian_function(𝒱::RALF2, ccgf::Function, Λ::RALF1, Σ::RALF1{LC}, Γ₅::AbstractArray{<: Number},
Γ₆::AbstractArray{<: Number}, z::AbstractVector{T}, Ψ::AbstractMatrix{T};
sparsity::Union{AbstractArray, Nothing} = nothing,
colorvec = nothing, sparsity_detection::Bool = false) where {T <: Number, LC}
# Define (temporary) objective functions
_f_𝒱z = z -> 𝒱(z, Ψ, (1, 2))
# Need to grab some dimensions
Ny, Nz = size(Ψ)
Nε = size(LC <: AbstractArray ? Σ.cache : Σ.cache.du, 2)
# Infer sparsity patterns and matrix coloring vector
if isnothing(sparsity)
sparsity, colorvec = compute_sparsity_pattern(_f_𝒱z, z, Ny; sparsity_detection = sparsity_detection)
elseif isnothing(colorvec)
colorvec = matrix_colors(sparsity)
end
# Create RALF2 object. Note that we cannot pre-allocate the caches for
# forwarddiff_color_jacobian! by using ForwardColorJacCache b/c the objective function
# changes as the coefficients (z, y, Ψ) change.
#
# For 𝒱 specifically, to avoid problems reinterpreting arrays to make autodiff work,
# we redefine the 𝒱 function to use Λ.f0 and Σ.f0 rather than using the RALF objects
# (as we do when applying autodiff for dense Jacobians). If we use Λ(z) and Σ(z)
# directly, then the reinterpret step may either fail (cannot reinterpret the array
# to the new desired chunk size), or the reinterpreted array
# will have the wrong dimensions.
## Infer whether Λ and Σ are in place
FΛ0 = similar(z, Nz, Ny)
FΣ0 = similar(z, Nz, Nε)
Λ0 = if applicable(Λ.f0, FΛ0, z)
Λ.f0
else
function _Λ_op(F, z)
F .= Λ.f0(z)
end
end
Σ0 = if applicable(Σ.f0, FΛ0, z)
Σ.f0
else
function _Σ_op(F, z)
F .= Σ.f0(z)
end
end
_𝒱_sparse = if applicable(ccgf, Γ₅, z)
function _𝒱_sparse_op(F, z, Ψ)
FΛ = similar(F, Nz, Ny)
FΣ = similar(F, Nz, Nε)
Λ0(FΛ, z)
Σ0(FΣ, z)
F .= ccgf((Γ₅ + Γ₆ * Ψ) * ((I - (FΛ * Ψ)) \ FΣ), z)
end
else
function _𝒱_sparse_ip(F, z, Ψ)
FΛ = similar(F, Nz, Ny)
FΣ = similar(F, Nz, Nε)
Λ0(FΛ, z)
Σ0(FΣ, z)
ccgf(F, (Γ₅ + Γ₆ * Ψ) * ((I - (FΛ * Ψ)) \ FΣ), z)
end
end
𝒱_dz = similar(z, Ny)
J𝒱 = RALF2((F, z, Ψ) -> forwarddiff_color_jacobian!(F, (F0, x) -> _𝒱_sparse(F0, x, Ψ), z, dx = 𝒱_dz,
colorvec = colorvec, sparsity = sparsity),
z, Ψ, deepcopy(sparsity))
# Create mini-version of the Jacobian cache
J𝒱_jac_cache = (dx = 𝒱_dz, sparsity = sparsity, colorvec = colorvec)
return J𝒱, J𝒱_jac_cache
end
function construct_𝒱_jacobian_function(𝒱::RALF4, ccgf::Function, Λ::RALF2, Σ::RALF2{LC}, Γ₅::AbstractArray{<: Number},
Γ₆::AbstractArray{<: Number}, z::AbstractVector{T}, y::AbstractVector{T},
Ψ::AbstractMatrix{T};
sparsity::Union{AbstractArray, Nothing} = nothing,
colorvec = nothing, sparsity_detection::Bool = false) where {T <: Number, LC}
# Define (temporary) objective functions
_f_𝒱z = zₜ -> 𝒱(zₜ, y, Ψ, zₜ, (4, 2))
# Need to grab some dimensions
Ny, Nz = size(Ψ)
Nε = size(LC <: AbstractArray ? Σ.cache : Σ.cache.du, 2)
# Infer sparsity patterns and matrix coloring vector
if isnothing(sparsity)
sparsity, colorvec = compute_sparsity_pattern(_f_𝒱z, z, Ny; sparsity_detection = sparsity_detection)
elseif isnothing(colorvec)
colorvec = matrix_colors(sparsity)
end
# Create RALF2 objects. Note that we cannot pre-allocate the caches for
# forwarddiff_color_jacobian! by using ForwardColorJacCache b/c the objective function
# changes as z and y change. If Jacobians of μ and ξ are refactored to be done once,
# then it'll be possible to cache.
#
# See the previous version of construct_𝒱_jacobian_function for comments on
# why we re-implement 𝒱 as done below.
FΛ0 = similar(z, Nz, Ny)
FΣ0 = similar(z, Nz, Nε)
Λ0 = if applicable(Λ.f0, FΛ0, z, y)
Λ.f0
else
function _Λ_op(F, z, y)
F .= Λ.f0(z, y)
end
end
Σ0 = if applicable(Σ.f0, FΛ0, z, y)
Σ.f0
else
function _Σ_op(F, z, y)
F .= Σ.f0(z, y)
end
end
_𝒱_sparse = if applicable(ccgf, Γ₅, z)
function _𝒱_sparse_op(F, z, y, Ψ, zₜ)
FΛ = similar(F, Nz, Ny)
FΣ = similar(F, Nz, Nε)
yₜ = y + Ψ * (zₜ - z)
Λ0(FΛ, zₜ, yₜ)
Σ0(FΣ, zₜ, yₜ)
F .= ccgf((Γ₅ + Γ₆ * Ψ) * ((I - (FΛ * Ψ)) \ FΣ), zₜ)
end
else
function _𝒱_sparse_ip(F, z, y, Ψ, zₜ)
FΛ = similar(F, Nz, Ny)
FΣ = similar(F, Nz, Nε)
yₜ = y + Ψ * (zₜ - z)
Λ0(FΛ, zₜ, yₜ)
Σ0(FΣ, zₜ, yₜ)
ccgf(F, (Γ₅ + Γ₆ * Ψ) * ((I - (FΛ * Ψ)) \ FΣ), zₜ)
end
end
𝒱_dz = similar(y, Ny)
J𝒱 = RALF3((F, z, y, Ψ) -> forwarddiff_color_jacobian!(F, (F0, zₜ) -> _𝒱_sparse(F0, z, y, Ψ, zₜ), z, dx = 𝒱_dz,
colorvec = colorvec, sparsity = sparsity),
z, y, Ψ, deepcopy(sparsity))
# Create mini-version of the Jacobian cache
J𝒱_jac_cache = (dx = 𝒱_dz, sparsity = sparsity, colorvec = colorvec)
return J𝒱, J𝒱_jac_cache
end
# Helper functions for exploiting sparsity in calls to nlsolve
## Helper function for compute_sparsity_pattern
function infer_objective_function(m::RiskAdjustedLinearization, algorithm::Symbol; q::Float64 = .1)
f = if algorithm == :deterministic
(F, x) -> _deterministic_equations(F, x, m)
elseif algorithm == :relaxation
(F, x) -> _relaxation_equations(F, x, m, m.Ψ, m[:𝒱_sss])
elseif algorithm == :homotopy
if Λ_eltype(m.nonlinear) <: RALF1 && Σ_eltype(m.nonlinear) <: RALF1
(F, x) -> _homotopy_equations1(F, x, m, q)
else
(F, x) -> _homotopy_equations2(F, x, m, q)
end
end
return f
end
"""
```
compute_sparsity_pattern(m::RiskAdjustedLinearization, algorithm::Symbol; q::Float64 = .1,
sparsity::Union{AbstractArray, Nothing} = nothing,
sparsity_detection::Bool = false)
```
calculates the sparsity pattern and matrix coloring vector of the Jacobian
of the nonlinear system of equations for either the deterministic or
stochastic steady state, depending on which `algorithm` is called.
### Keywords
- `q`: step size for homotopy. Should satisfy `0 < q < 1` and is only required to ensure
that the sparsity pattern is correctly determined when `algorithm = :homotopy`
and thus the dependence of the entropy `𝒱` on the coefficients `(z, y, Ψ)` matters.
- `sparsity`: sparsity pattern of the Jacobian of the nonlinear system of equations
- `sparsity_detection`: if true, use SparsityDetection.jl to determine the sparsity pattern.
If false, then the sparsity pattern is determined by using finite differences
to calculate a Jacobian and assuming any zeros are supposed to be zero.
"""
function compute_sparsity_pattern(m::RiskAdjustedLinearization, algorithm::Symbol; q::Float64 = .1,
sparsity::Union{AbstractArray, Nothing} = nothing,
sparsity_detection::Bool = false)
@assert algorithm in [:deterministic, :relaxation, :homotopy] "The algorithm must be :deterministic, :relaxation, or :homotopy"
@assert 1 > q > 0 "The step size q must satisfy 0 < q < 1."
f = infer_objective_function(m, algorithm; q = q)
input = algorithm == :homotopy ? vcat(m.z, m.y, vec(m.Ψ)) : vcat(m.z, m.y)
if isnothing(sparsity)
sparsity = if sparsity_detection
convert(SparseMatrixCSC{eltype(x), Int}, jacobian_sparsity(f, similar(input), input))
else
jac = similar(input, length(input), length(input))
FiniteDiff.finite_difference_jacobian!(jac, f, input)
sparse(jac)
end
end
colorvec = matrix_colors(sparsity)
return sparsity, colorvec
end
"""
```
preallocate_jac_cache(m::RiskAdjustedLinearization, algorithm::Symbol; q::Float64 = .1,
sparsity::Union{AbstractArray, Nothing} = nothing,
sparsity_detection::Bool = false)
```
pre-allocates the cache for the Jacobian of the nonlinear system of equations
for either the deterministic or stochastic steady state, depending on which
`algorithm` is called.
### Keywords
- `q`: step size for homotopy. Should satisfy `0 < q < 1` and is only required to ensure
that the sparsity pattern is correctly determined when `algorithm = :homotopy`
and thus the dependence of the entropy `𝒱` on the coefficients `(z, y, Ψ)` matters.
- `sparsity`: the sparsity pattern of the Jacobian of the nonlinear system of equations
- `sparsity_detection`: if true, use SparsityDetection.jl to determine the sparsity pattern.
If false, then the sparsity pattern is determined by using finite differences
to calculate a Jacobian and assuming any zeros are supposed to be zero.
"""
function preallocate_jac_cache(m::RiskAdjustedLinearization, algorithm::Symbol; q::Float64 = .1,
sparsity::Union{AbstractArray, Nothing} = nothing,
sparsity_detection::Bool = false)
sparsity, colorvec = compute_sparsity_pattern(m, algorithm; q = q,
sparsity = sparsity, sparsity_detection = sparsity_detection)
input = algorithm == :homotopy ? vcat(m.z, m.y, vec(m.Ψ)) : vcat(m.z, m.y)
return FiniteDiff.JacobianCache(input, colorvec = colorvec, sparsity = sparsity)
end
function construct_sparse_jacobian_function(m::RiskAdjustedLinearization, f::Function,
algorithm::Symbol, autodiff::Symbol;
sparsity::Union{AbstractArray, Nothing} = nothing,
colorvec = nothing, jac_cache = nothing,
sparsity_detection::Bool = false)
if isnothing(jac_cache)
# Create Jacobian function that does not assume the existence of a cache
if isnothing(sparsity) # No sparsity pattern provided, so need to make one
sparsity, colorvec = compute_sparsity_pattern(m, algorithm; sparsity = sparsity,
sparsity_detection = sparsity_detection)
elseif isnothing(colorvec) # Sparsity pattern, but no colorvec, so apply matrix_colors
colorvec = matrix_colors(sparsity)
end
nlsolve_jacobian! = if autodiff == :forward
(F, x) -> forwarddiff_color_jacobian!(F, f, x, # homotopy doesn't work with autodiff, so assuming
ForwardColorJacCache(f, x, min(m.Nz, m.Ny); # only using deterministic/relaxation,
colorvec = colorvec, sparsity = sparsity)) # hence the chunk size
else
(F, x) -> FiniteDiff.finite_difference_jacobian!(F, f, x; colorvec = colorvec,
sparsity = sparsity)
end
return nlsolve_jacobian!, sparsity
else
# Create Jacobian function that assumes the existence of a cache
nlsolve_jacobian! = if autodiff == :forward
(F, x) -> forwarddiff_color_jacobian!(F, f, x, jac_cache)
else
(F, x) -> FiniteDiff.finite_difference_jacobian!(F, f, x, jac_cache)
end
return nlsolve_jacobian!, jac_cache.sparsity
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 20489 | """
```
dualarray(a::AbstractArray, b::AbstractArray)
```
returns an `Array` that has the size of input `a` and an element type consistent with the element types
of the inputs `a` and `b`. For example, suppose you want to write a function of the form
```
julia> function f(a, b)
F = similar(a)
F[1] = a[1] * b[1]
F[2] = a[2] + b[2]
end
```
If you were to automatically differentiate `f` with respect to `b`, then `F` would not have the correct element type since its type
will be the same as `a`. Rather than require the user to write
```
julia> F = if eltype(b) <: ForwardDiff.Dual
similar(a, eltype(b))
else
similar(a)
end
```
the user can use `dualvector` to write
```@meta
DocTestSetup = quote
import ForwardDiff
using RiskAdjustedLinearizations, ForwardDiff
end
```
```jldoctest
julia> a = rand(3)
julia> b = ones(ForwardDiff.Dual, 5)
julia> F = RiskAdjustedLinearizations.dualvector(a, b)
3-element Array{ForwardDiff.Dual,1}:
#undef
#undef
#undef
```
```@meta
DocTestSetup = nothing
```
Note that the element types of `a` and `b` must be subtypes of `Real` (or else `ForwardDiff` will not work).
"""
@inline dualarray(a::AbstractArray{<: ForwardDiff.Dual}, b::AbstractArray{<: ForwardDiff.Dual}) = similar(a)
@inline dualarray(a::AbstractArray{<: ForwardDiff.Dual}, b::AbstractArray{<: Real}) = similar(a)
@inline dualarray(a::AbstractArray{<: Real}, b::AbstractArray{<: Real}) = similar(a)
@inline dualarray(a::AbstractArray{<: Real}, b::AbstractArray{<: ForwardDiff.Dual}) = similar(a, eltype(b))
"""
```
dualvector(a::AbstractVector, b::AbstractVector)
```
has the same behavior of `dualarray` but acts specifically on vectors. This function
is primarily for user convenience.
"""
@inline dualvector(a::AbstractVector{<: ForwardDiff.Dual}, b::AbstractVector{<: ForwardDiff.Dual}) = similar(a)
@inline dualvector(a::AbstractVector{<: ForwardDiff.Dual}, b::AbstractVector{<: Real}) = similar(a)
@inline dualvector(a::AbstractVector{<: Real}, b::AbstractVector{<: Real}) = similar(a)
@inline dualvector(a::AbstractVector{<: Real}, b::AbstractVector{<: ForwardDiff.Dual}) = similar(a, eltype(b))
# Port of DiffCache type from old DiffEqBase, which is not in DiffEqBase nor in SciMLBase
struct DiffCache{T<:AbstractArray, S<:AbstractArray}
du::T
dual_du::S
end
function DiffCache(u::AbstractArray{T}, siz, ::Type{Val{chunk_size}}) where {T, chunk_size}
x = ArrayInterface.restructure(u,zeros(ForwardDiff.Dual{nothing,T,chunk_size}, siz...))
DiffCache(u, x)
end
dualcache(u::AbstractArray, N=Val{ForwardDiff.pickchunksize(length(u))}) = DiffCache(u, size(u), N)
function get_tmp(dc::DiffCache, u::AbstractArray{T}) where T<:ForwardDiff.Dual
x = reinterpret(T, dc.dual_du)
end
function get_tmp(dc::DiffCache{A, B}, u::AbstractArray{T}) where {T <: ForwardDiff.Dual,
A <: AbstractArray, B <: SparseMatrixCSC}
if VERSION <= v"1.5"
x = reinterpret(T, dc.dual_du)
return x
else
error("It is not possible to reinterpret sparse matrices in your current version of Julia. " *
"In order to cache sparse Jacobians in the current implementation of this package, " *
"you need to be able to reinterpret sparse matrices. Please use an earlier version " *
"of Julia (e.g. 1.3 or 1.5), or do not use the sparse Jacobian functionality.")
end
end
function get_tmp(dc::DiffCache, u::LabelledArrays.LArray{T,N,D,Syms}) where {T,N,D,Syms}
x = reinterpret(T, dc.dual_du.__x)
LabelledArrays.LArray{T,N,D,Syms}(x)
end
get_tmp(dc::DiffCache, u::AbstractArray) = dc.du
# Extend get_tmp(dc::DiffCache, ...) to allow for two arguments
function get_tmp(dc::DiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual}
if select[1] == 1
get_tmp(dc, u1)
elseif select[1] == 2
get_tmp(dc, u2)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
end
function get_tmp(dc::DiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: Number, T2 <: ForwardDiff.Dual}
if select[1] == 1
get_tmp(dc, u1)
elseif select[1] == 2
get_tmp(dc, u2)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
end
function get_tmp(dc::DiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: Number}
if select[1] == 1
get_tmp(dc, u1)
elseif select[1] == 2
get_tmp(dc, u2)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
end
get_tmp(dc::DiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2}, select::Tuple{Int, Int}) where {T1 <: Number, T2 <: Number} = dc.du
# Extend get_tmp to allow 3 input arguments, only done for the case required by RiskAdjustedLinearizations.jl
function get_tmp(dc::DiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual,
T3 <: ForwardDiff.Dual}
if select[1] == 1
get_tmp(dc, u1)
elseif select[1] == 2
get_tmp(dc, u2)
elseif select[1] == 3
get_tmp(dc, u3)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
get_tmp(dc::DiffCache, u1::AbstractArray{<: Number}, u2::AbstractArray{<: Number}, u3::AbstractArray{<: Number}, select::Tuple{Int, Int}) = dc.du
# Extend get_tmp to allow 4 input arguments, only done for the case required by RiskAdjustedLinearizations.jl
function get_tmp(dc::DiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, u4::AbstractArray{T4},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual,
T3 <: ForwardDiff.Dual, T4 <: ForwardDiff.Dual}
if select[1] == 1
get_tmp(dc, u1)
elseif select[1] == 2
get_tmp(dc, u2)
elseif select[1] == 3
get_tmp(dc, u3)
elseif select[1] == 4
get_tmp(dc, u4)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
function get_tmp(dc::DiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, u4::AbstractArray{T4},
select::Tuple{Int, Int}) where {T1 <: Number, T2 <: Number,
T3 <: Number, T4 <: ForwardDiff.Dual}
if select[1] == 1
get_tmp(dc, u1)
elseif select[1] == 2
get_tmp(dc, u2)
elseif select[1] == 3
get_tmp(dc, u3)
elseif select[1] == 4
get_tmp(dc, u4)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
get_tmp(dc::DiffCache, u1::AbstractArray{<: Number}, u2::AbstractArray{<: Number}, u3::AbstractArray{<: Number}, u4::AbstractArray{<: Number}, select::Tuple{Int, Int}) = dc.du
"""
```
TwoDiffCache
```
The TwoDiffCache type extends DiffCache from DiffEqBase to permit two Dual Array caches
to permit the case where you need to autodiff w.r.t 2 different lengths of arguments.
For example, suppose we have
```
function f(out, x, y) # in-place function
out .= x .* y
end
function F(cache::DiffCache, x, y) # wrapper function for f
f(get_tmp(cache, x), x, y)
return get_tmp(cache, x)
end
# Instantiate inputs and caches
x = rand(3)
y = rand(3)
cache3 = dualcache(zeros(3), Val{3})
cache6 = dualcache(zeros(3), Val{6})
```
Then the following block of code will work
```
JF3 = (G, x1, y1) -> ForwardDiff.jacobian!(G, z -> F(cache3, z, y1), x1)
JF3(rand(3, 3), x, y)
```
but it may be the case that we also sometimes need to calculate
```
JF6 = (G, x1, y1) -> ForwardDiff.jacobian!(G, z -> F(cache3, z[1:3], z[4:6]), vcat(x1, y1))
JF6(rand(3, 3), x, y)
```
This block of code will error because the chunk size needs to be 6, not 3 here.
Therefore, the correct definition of `JF6` is
```
JF6 = (G, x1, y1) -> ForwardDiff.jacobian!(G, z -> F(cache6, z[1:3], z[4:6]), vcat(x1, y1))
```
Rather than carry around two `DiffCache` objects, it is better to simply add another dual cache
to a single `DiffCache` object. In principle, this code could be generalized to `n` dual caches,
but this would require some additional thought to implement generically.
"""
struct TwoDiffCache{T <: AbstractArray, C1 <: AbstractArray, C2 <: AbstractArray}
du::T
dual_du1::C1
dual_du2::C2
end
function TwoDiffCache(u::AbstractArray{T}, siz, ::Type{Val{chunk_size1}}, ::Type{Val{chunk_size2}}) where {T, chunk_size1, chunk_size2}
x1 = ArrayInterface.restructure(u, zeros(ForwardDiff.Dual{nothing, T, chunk_size1}, siz...))
x2 = ArrayInterface.restructure(u, zeros(ForwardDiff.Dual{nothing, T, chunk_size2}, siz...))
TwoDiffCache(u, x1, x2)
end
twodualcache(u::AbstractArray, N1, N2) = TwoDiffCache(u, size(u), N1, N2)
# get_tmp for AbstractArray cases
function get_tmp(tdc::TwoDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual}
if select[1] == 1
x = reinterpret(T1, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 2
x = reinterpret(T2, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
return x
end
function get_tmp(tdc::TwoDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: Number, T2 <: ForwardDiff.Dual}
if select[1] == 1
x = reinterpret(T1, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 2
x = reinterpret(T2, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
return x
end
function get_tmp(tdc::TwoDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: Number}
if select[1] == 1
x = reinterpret(T1, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 2
x = reinterpret(T2, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
return x
end
# get_tmp for no Dual cases
get_tmp(tdc::TwoDiffCache, u1::AbstractArray, u2::AbstractArray, select::Tuple{Int, Int}) = tdc.du
# Extend get_tmp to allow 4 input arguments, only done for the case required by RiskAdjustedLinearizations.jl
function get_tmp(tdc::TwoDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual,
T3 <: ForwardDiff.Dual}
if select[1] == 1
x = reinterpret(T1, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 2
x = reinterpret(T2, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 3
x = reinterpret(T3, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
get_tmp(tdc::TwoDiffCache, u1::AbstractArray{<: Number}, u2::AbstractArray{<: Number}, u3::AbstractArray{<: Number}, select::Tuple{Int, Int}) = tdc.du
# Extend get_tmp to allow 4 input arguments, only done for the case required by RiskAdjustedLinearizations.jl
function get_tmp(tdc::TwoDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, u4::AbstractArray{T4},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual,
T3 <: ForwardDiff.Dual, T4 <: ForwardDiff.Dual}
if select[1] == 1
x = reinterpret(T1, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 2
x = reinterpret(T2, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 3
x = reinterpret(T3, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 4
x = reinterpret(T4, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
function get_tmp(tdc::TwoDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, u4::AbstractArray{T4},
select::Tuple{Int, Int}) where {T1 <: Number, T2 <: Number,
T3 <: Number, T4 <: ForwardDiff.Dual}
if select[1] == 1
x = reinterpret(T1, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 2
x = reinterpret(T2, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 3
x = reinterpret(T3, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
elseif select[1] == 4
x = reinterpret(T4, select[2] == 1 ? tdc.dual_du1 : tdc.dual_du2)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
get_tmp(tdc::TwoDiffCache, u1::AbstractArray{<: Number}, u2::AbstractArray{<: Number}, u3::AbstractArray{<: Number}, u4::AbstractArray{<: Number}, select::Tuple{Int, Int}) = tdc.du
"""
```
ThreeDiffCache
```
The ThreeDiffCache type extends DiffCache from DiffEqBase to permit three Dual Array caches.
"""
struct ThreeDiffCache{T <: AbstractArray, C1 <: AbstractArray, C2 <: AbstractArray, C3 <: AbstractArray}
du::T
dual_du1::C1
dual_du2::C2
dual_du3::C3
end
function ThreeDiffCache(u::AbstractArray{T}, siz, ::Type{Val{chunk_size1}},
::Type{Val{chunk_size2}}, ::Type{Val{chunk_size3}}) where {T, chunk_size1, chunk_size2, chunk_size3}
x1 = ArrayInterface.restructure(u, zeros(ForwardDiff.Dual{nothing, T, chunk_size1}, siz...))
x2 = ArrayInterface.restructure(u, zeros(ForwardDiff.Dual{nothing, T, chunk_size2}, siz...))
x3 = ArrayInterface.restructure(u, zeros(ForwardDiff.Dual{nothing, T, chunk_size3}, siz...))
ThreeDiffCache(u, x1, x2, x3)
end
threedualcache(u::AbstractArray, N1, N2, N3) = ThreeDiffCache(u, size(u), N1, N2, N3)
# get_tmp for both AbstractArray
function get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual}
dual_du = if select[2] == 1
tdc.dual_du1
elseif select[2] == 2
tdc.dual_du2
elseif select[2] == 3
tdc.dual_du3
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache"))
end
if select[1] == 1
x = reinterpret(T1, dual_du)
elseif select[1] == 2
x = reinterpret(T2, dual_du)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
return x
end
function get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: Number, T2 <: ForwardDiff.Dual}
dual_du = if select[2] == 1
tdc.dual_du1
elseif select[2] == 2
tdc.dual_du2
elseif select[2] == 3
tdc.dual_du3
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache"))
end
if select[1] == 1
x = reinterpret(T1, dual_du)
elseif select[1] == 2
x = reinterpret(T2, dual_du)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
return x
end
function get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: Number}
dual_du = if select[2] == 1
tdc.dual_du1
elseif select[2] == 2
tdc.dual_du2
elseif select[2] == 3
tdc.dual_du3
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache"))
end
if select[1] == 1
x = reinterpret(T1, dual_du)
elseif select[1] == 2
x = reinterpret(T2, dual_du)
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache."))
end
return x
end
# get_tmp for no Dual cases
get_tmp(tdc::ThreeDiffCache, u1::AbstractArray, u2::AbstractArray, select::Tuple{Int, Int}) = tdc.du
# Extend get_tmp to allow 3 input arguments, only done for the case required by RiskAdjustedLinearizations.jl
function get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual,
T3 <: ForwardDiff.Dual}
dual_du = if select[2] == 1
tdc.dual_du1
elseif select[2] == 2
tdc.dual_du2
elseif select[2] == 3
tdc.dual_du3
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache"))
end
if select[1] == 1
x = reinterpret(T1, dual_du)
elseif select[1] == 2
x = reinterpret(T2, dual_du)
elseif select[1] == 3
x = reinterpret(T3, dual_du)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{<: Number}, u2::AbstractArray{<: Number}, u3::AbstractArray{<: Number}, select::Tuple{Int, Int}) = tdc.du
# Extend get_tmp to allow 4 input arguments, only done for the case required by RiskAdjustedLinearizations.jl
function get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, u4::AbstractArray{T4},
select::Tuple{Int, Int}) where {T1 <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual,
T3 <: ForwardDiff.Dual, T4 <: ForwardDiff.Dual}
dual_du = if select[2] == 1
tdc.dual_du1
elseif select[2] == 2
tdc.dual_du2
elseif select[2] == 3
tdc.dual_du3
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache"))
end
if select[1] == 1
x = reinterpret(T1, dual_du)
elseif select[1] == 2
x = reinterpret(T2, dual_du)
elseif select[1] == 3
x = reinterpret(T3, dual_du)
elseif select[1] == 4
x = reinterpret(T4, dual_du)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
function get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{T1}, u2::AbstractArray{T2},
u3::AbstractArray{T3}, u4::AbstractArray{T4},
select::Tuple{Int, Int}) where {T1 <: Number, T2 <: Number,
T3 <: Number, T4 <: ForwardDiff.Dual}
dual_du = if select[2] == 1
tdc.dual_du1
elseif select[2] == 2
tdc.dual_du2
elseif select[2] == 3
tdc.dual_du3
else
throw(MethodError("Fourth input argument to get_tmp points to a non-existent cache"))
end
if select[1] == 1
x = reinterpret(T1, dual_du)
elseif select[1] == 2
x = reinterpret(T2, dual_du)
elseif select[1] == 3
x = reinterpret(T3, dual_du)
elseif select[1] == 4
x = reinterpret(T4, dual_du)
else
throw(MethodError("Sixth input argument to get_tmp points to a non-existent cache."))
end
end
get_tmp(tdc::ThreeDiffCache, u1::AbstractArray{<: Number}, u2::AbstractArray{<: Number}, u3::AbstractArray{<: Number}, u4::AbstractArray{<: Number}, select::Tuple{Int, Int}) = tdc.du
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2224 | """
```
blanchard_kahn(m::RiskAdjustedLinearization; deterministic::Bool = false, verbose::Symbol = :high)
```
checks the Blanchard-Kahn conditions for whether a first-order perturbation is saddle-path stable or not.
If `verbose` is `:low` or `:high`, a print statement will be shown if the Blanchard-Kahn conditions are satisfied.
"""
function blanchard_kahn(m::RiskAdjustedLinearization; deterministic::Bool = false, verbose::Symbol = :high)
li = linearized_system(m)
Nz = m.Nz
Ny = m.Ny
N_zy = m.Nz + m.Ny
ztype = eltype(m.z)
AA = Matrix{ztype}(undef, N_zy, N_zy)
BB = similar(AA)
# Populate AA
AA[1:Ny, 1:Nz] = li[:Γ₅]
AA[1:Ny, (Nz + 1):end] = li[:Γ₆]
AA[(Ny + 1):end, 1:Nz] = Matrix{ztype}(I, m.Nz, m.Nz) # faster but makes allocations, unlike Diagonal(Ones{ztype}(Nz))
AA[(Ny + 1):end, (Nz + 1):end] = Zeros{ztype}(m.Nz, m.Ny)
# Populate BB
BB[1:Ny, 1:Nz] = deterministic ? -li[:Γ₃] : -(li[:Γ₃] + li[:JV])
BB[1:Ny, (Nz + 1):end] = -li[:Γ₄]
BB[(Ny + 1):end, 1:Nz] = li[:Γ₁]
BB[(Ny + 1):end, (Nz + 1):end] = li[:Γ₂]
if count(abs.(eigen(AA, BB).values) .> 1) != m.Nz
if deterministic
throw(BlanchardKahnError("First-order perturbation around deterministic steady state is not saddle-path stable"))
else
throw(BlanchardKahnError("First-order perturbation around stochastic steady state is not saddle-path stable"))
end
else
if verbose in [:low, :high]
if deterministic
println("Blanchard-Kahn conditions for a unique locally bounded deterministic " *
"steady-state perturbation are satisfied")
else
println("Blanchard-Kahn conditions for a unique locally bounded stochastic " *
"steady-state perturbation are satisfied")
end
end
return true
end
end
mutable struct BlanchardKahnError <: Exception
msg::String
end
BlanchardKahnError() = BlanchardKahnError("First-order perturbation is not saddle-path stable")
Base.showerror(io::IO, ex::BlanchardKahnError) = print(io, ex.msg)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 5096 | """
```
compute_Ψ(Γ₁, Γ₂, Γ₃, Γ₄, Γ₅, Γ₆, JV = []; schur_fnct = schur!)
compute_Ψ(m::RALLinearizedSystem; zero_entropy_jacobian = false, schur_fnct = schur!)
compute_Ψ(m::RiskAdjustedLinearization; zero_entropy_jacobian = false, schur_fnct = schur!)
```
solves via QZ decomposition for ``\\Psi_n`` in the quadratic matrix equation
``math
\\begin{aligned}
0 = JV + \\Gamma_3 + \\Gamma_4\\Psi_n + (\\Gamma_5 + \\Gamma_6\\Psi_n) (\\Gamma_2 + \\Gamma_1 \\Psi_n).
\\end{aligned}
``
See the documentation of `RiskAdjustedLinearizations.jl` for details about what these matrices are.
### Inputs
For the first method, all the required inputs must have type `AbstractMatrix{<: Number}`. The `JV` term is empty by default,
in which case `qzdecomp` assumes that `JV` is the zero matrix, which corresponds to the
case of the deterministic steady state.
The second and third methods are wrappers for the first method.
Internally, there are in-place versions (`compute_Ψ!`) to avoid allocations when
`compute_Ψ!` is called repeatedly by one of the numerical algorithms (e.g. `relaxation!`).
### Keywords
- `schur_fnct::Function`: specifies which Generalized Schur algorithm is desired. By default,
the implementation from BLAS is called, but the user may want to use the Generalized Schur algorithm
from packages like `GenericLinearAlgebra.jl` and `GenericSchur.jl`.
- `zero_entropy_jacobian::Bool`: if true, then we assume the Jacobian of the entropy is all zeros (i.e. in the deterministic steady state).
This keyword allows the user to avoid adding zeros unnecessarily and/or let the cached entropy Jacobian remain undefined.
"""
function compute_Ψ(Γ₁::AbstractMatrix{S}, Γ₂::AbstractMatrix{S}, Γ₃::AbstractMatrix{S}, Γ₄::AbstractMatrix{S},
Γ₅::AbstractMatrix{S}, Γ₆::AbstractMatrix{S}, JV::AbstractMatrix{S} = Matrix{S}(undef, 0, 0);
schur_fnct::Function = schur!) where {S <: Number}
Nzy = sum(size(Γ₅))
AA = Matrix{Complex{S}}(undef, Nzy, Nzy)
BB = similar(AA)
return compute_Ψ!(AA, BB, Γ₁, Γ₂, Γ₃, Γ₄, Γ₅, Γ₆, JV; schur_fnct = schur_fnct)
end
function compute_Ψ(m::RALLinearizedSystem; zero_entropy_jacobian::Bool = false, schur_fnct::Function = schur!) where {S <: Number}
if zero_entropy_jacobian
return compute_Ψ(m[:Γ₁], m[:Γ₂], m[:Γ₃], m[:Γ₄], m[:Γ₅], m[:Γ₆]; schur_fnct = schur_fnct)
else
return compute_Ψ(m[:Γ₁], m[:Γ₂], m[:Γ₃], m[:Γ₄], m[:Γ₅], m[:Γ₆], m[:JV]; schur_fnct = schur_fnct)
end
end
@inline function compute_Ψ(m::RiskAdjustedLinearization; zero_entropy_jacobian::Bool = false, schur_fnct::Function = schur!) where {S <: Number}
return compute_Ψ(m.linearization; zero_entropy_jacobian = zero_entropy_jacobian, schur_fnct = schur_fnct)
end
function compute_Ψ!(AA::AbstractMatrix{Complex{S}}, BB::AbstractMatrix{Complex{S}},
Γ₁::AbstractMatrix{S}, Γ₂::AbstractMatrix{S}, Γ₃::AbstractMatrix{S}, Γ₄::AbstractMatrix{S},
Γ₅::AbstractMatrix{S}, Γ₆::AbstractMatrix{S}, JV::AbstractMatrix{S} = Matrix{S}(undef, 0, 0);
schur_fnct::Function = schur!) where {S <: Number}
if !isempty(JV)
Γ₃ += JV
end
Ny, Nz = size(Γ₅)
# Populate AA
AA[1:Ny, 1:Nz] = Γ₅
AA[1:Ny, (Nz + 1):end] = Γ₆
AA[(Ny + 1):end, 1:Nz] = Matrix{Complex{S}}(I, Nz, Nz) # faster but makes allocations, unlike Diagonal(Ones{Complex{S}}(Nz))
AA[(Ny + 1):end, (Nz + 1):end] = Zeros{Complex{S}}(Nz, Ny)
# Populate BB
BB[1:Ny, 1:Nz] = -Γ₃
BB[1:Ny, (Nz + 1):end] = -Γ₄
BB[(Ny + 1):end, 1:Nz] = Γ₁
BB[(Ny + 1):end, (Nz + 1):end] = Γ₂
# Compute QZ and back out Ψ
schurfact = schur_fnct(AA, BB)
ordschur!(schurfact, [abs(αᵢ) >= abs(βᵢ) for (αᵢ, βᵢ) in
zip(schurfact.α, schurfact.β)]) # eigenvalues = schurfact.β / schurfact.α
# Note: We could calculate stability conditions here but, typically,
# compute_Ψ is called enough within the relaxation algorithm
# that calculating the stability conditions repeatedly takes more time
# than just calling eigen once
return real(schurfact.Z[Nz + 1:end, 1:Nz] / schurfact.Z[1:Nz, 1:Nz])
end
function compute_Ψ!(AA::AbstractMatrix{Complex{S}}, BB::AbstractMatrix{Complex{S}},
m::RALLinearizedSystem; zero_entropy_jacobian::Bool = false, schur_fnct::Function = schur!) where {S <: Number}
if zero_entropy_jacobian
return compute_Ψ!(AA, BB, m[:Γ₁], m[:Γ₂], m[:Γ₃], m[:Γ₄], m[:Γ₅], m[:Γ₆]; schur_fnct = schur_fnct)
else
return compute_Ψ!(AA, BB, m[:Γ₁], m[:Γ₂], m[:Γ₃], m[:Γ₄], m[:Γ₅], m[:Γ₆], m[:JV]; schur_fnct = schur_fnct)
end
end
@inline function compute_Ψ!(AA::AbstractMatrix{Complex{S}}, BB::AbstractMatrix{Complex{S}},
m::RiskAdjustedLinearization; zero_entropy_jacobian::Bool = false, schur_fnct::Function = schur!) where {S <: Number}
return compute_Ψ!(AA, BB, m.linearization; zero_entropy_jacobian = zero_entropy_jacobian, schur_fnct = schur_fnct)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 8759 | """
```
homotopy!(m, xₙ₋₁; step = .1, pnorm = Inf, verbose = :none, kwargs...)
```
solves the system of equations characterizing a risk-adjusted linearization by a homotopy method with
embedding parameter ``q``, which steps from 0 to 1, with ``q = 1`` obtaining the true solution.
Currently, the only algorithm for choosing ``q`` is a simple uniform step search. Given a step size
``\\Delta``, we solve the homotopy starting from ``q = \\Delta`` and increase ``q`` by ``\\Delta``
until ``q`` reaches 1 or passes 1 (in which case, we force ``q = 1``).
### Types:
- `S1 <: Number`
### Inputs
- `m::RiskAdjustedLinearization`: object holding functions needed to calculate
the risk-adjusted linearization
- `xₙ₋₁::AbstractVector{S1}`: initial guess for ``(z, y, \\Psi)``
### Keywords
- `step::Float64`: size of the uniform step from `step` to 1.
- `pnorm::Float64`: norm under which to evaluate the errors after homotopy succeeds.
- `sparse_jacobian::Bool = false`: if true, exploit sparsity in the Jacobian in calls to `nlsolve` using SparseDiffTools.jl.
If `jac_cache` and `sparsity` are `nothing`, then `homotopy!` will attempt to determine the sparsity pattern.
- `sparsity::Union{AbstractArray, Nothing} = nothing`: sparsity pattern for the Jacobian in calls to `nlsolve`
- `colorvec = nothing`: matrix coloring vector for sparse Jacobian in calls to `nlsolve`
- `jac_cache = nothing`: pre-allocated Jacobian cache for calls to `nlsolve` during the numerical algorithms
- `sparsity_detection::Bool = false`: If true, use SparsityDetection.jl to detect sparsity pattern (only relevant if
both `jac_cache` and `sparsity` are `nothing`). If false, then the sparsity pattern is
determined by using finite differences to calculate a Jacobian and assuming any zeros will always be zero.
Currently, SparsityDetection.jl fails to work.
- `verbose::Symbol`: verbosity of information printed out during solution.
a) `:low` -> statement when homotopy continuation succeeds
b) `:high` -> statement when homotopy continuation succeeds and for each successful iteration
"""
function homotopy!(m::RiskAdjustedLinearization, xₙ₋₁::AbstractVector{S1};
step::Float64 = .1, pnorm::Float64 = Inf,
autodiff::Symbol = :central,
sparse_jacobian::Bool = false,
sparsity::Union{AbstractArray, Nothing} = nothing, colorvec = nothing,
jac_cache = nothing, sparsity_detection::Bool = true,
verbose::Symbol = :none,
kwargs...) where {S1 <: Number}
# Set up
nl = nonlinear_system(m)
li = linearized_system(m)
_my_eqn = if Λ_eltype(nl) <: RALF1 && Σ_eltype(nl) <: RALF1 # only difference in this block and the next block
(F, x, q) -> _homotopy_equations1(F, x, m, q) # is the number of args to retrieve 𝒱_sss and JV
else
(F, x, q) -> _homotopy_equations2(F, x, m, q)
end
qguesses = step:step:1.
if qguesses[end] != 1.
qguesses = vcat(qguesses, 1.)
end
for (i, q) in enumerate(qguesses)
solve_steadystate!(m, getvecvalues(m), _my_eqn, q; autodiff = autodiff,
sparse_jacobian = sparse_jacobian,
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache,
sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
if verbose == :high
println("Success at iteration $(i) of $(length(qguesses))")
end
end
if verbose in [:low, :high]
errvec = steady_state_errors(m)
println("Homotopy succeeded!")
println("Error under norm = $(pnorm) is $(norm(errvec, pnorm)).")
end
update!(m)
return m
end
function solve_steadystate!(m::RiskAdjustedLinearization, x0::AbstractVector{S1}, f::Function, q::Float64;
sparse_jacobian::Bool = false,
sparsity::Union{AbstractArray, Nothing} = nothing, colorvec = nothing,
jac_cache = nothing, sparsity_detection::Bool = true, autodiff::Symbol = :central,
verbose::Symbol = :none, kwargs...) where {S1 <: Real}
if sparse_jacobian # Exploit sparsity?
nlsolve_jacobian!, jac =
construct_sparse_jacobian_function(m, (F, x) -> f(F, x, q), :homotopy, autodiff;
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache,
sparsity_detection = sparsity_detection)
out = nlsolve(OnceDifferentiable((F, x) -> f(F, x, q), nlsolve_jacobian!, x0, copy(x0), jac), x0; kwargs...)
else
# Need to declare chunk size to ensure no problems with reinterpreting the cache
# Potential solution: https://discourse.julialang.org/t/issue-with-pdmp-and-forwardiff-differentialequation/17925/22
# Essentially, it may require developing another cache.
out = nlsolve(OnceDifferentiable((F, x) -> f(F, x, q), x0, copy(x0), autodiff,
ForwardDiff.Chunk(ForwardDiff.pickchunksize(min(m.Nz, m.Ny)))), x0; kwargs...)
end
if out.f_converged
N_zy = m.Nz + m.Ny
m.z .= out.zero[1:m.Nz]
m.y .= out.zero[(m.Nz + 1):N_zy]
m.Ψ .= reshape(out.zero[(N_zy + 1):end], m.Ny, m.Nz)
else
if verbose == :high
println(out)
end
throw(RALHomotopyError("A solution for (z, y, Ψ) to the state transition, expectational, " *
"and linearization equations could not be found when the embedding " *
"parameter q equals $(q)"))
end
end
function _homotopy_equations1(F::AbstractArray, x::AbstractArray, m::RiskAdjustedLinearization, q::Number)
# Unpack
z = @view x[1:m.Nz]
y = @view x[(m.Nz + 1):(m.Nz + m.Ny)]
Ψ = @view x[(m.Nz + m.Ny + 1):end]
Ψ = reshape(Ψ, m.Ny, m.Nz)
# Given coefficients, update the model
update!(m.nonlinear, z, y, Ψ)
update!(m.linearization, z, y, Ψ)
# Calculate residuals
μ_sss = get_tmp(m.nonlinear.μ.cache, z, y, (1, 1)) # select the first DiffCache b/c that one
ξ_sss = get_tmp(m.nonlinear.ξ.cache, z, y, (1, 1)) # corresponds to autodiffing both z and y
𝒱_sss = get_tmp(m.nonlinear.𝒱.cache, z, Ψ, (1, 1)) # This line is different than in _homotopy_equations2
Γ₁ = get_tmp(m.linearization.μz.cache, z, y, (1, 1))
Γ₂ = get_tmp(m.linearization.μy.cache, z, y, (1, 1))
Γ₃ = get_tmp(m.linearization.ξz.cache, z, y, (1, 1))
Γ₄ = get_tmp(m.linearization.ξy.cache, z, y, (1, 1))
JV = get_tmp(m.linearization.J𝒱.cache, z, Ψ, (1, 1)) # This line is different than in _homotopy_equations2
F[1:m.Nz] = μ_sss - z
F[(m.Nz + 1):(m.Nz + m.Ny)] = ξ_sss + m.linearization[:Γ₅] * z + m.linearization[:Γ₆] * y + q * 𝒱_sss
F[((m.Nz + m.Ny) + 1):end] = Γ₃ + Γ₄ * Ψ + (m.linearization[:Γ₅] + m.linearization[:Γ₆] * Ψ) * (Γ₁ + Γ₂ * Ψ) + q * JV
end
function _homotopy_equations2(F::AbstractArray, x::AbstractArray, m::RiskAdjustedLinearization, q::Number)
# Unpack
z = @view x[1:m.Nz]
y = @view x[(m.Nz + 1):(m.Nz + m.Ny)]
Ψ = @view x[(m.Nz + m.Ny + 1):end]
Ψ = reshape(Ψ, m.Ny, m.Nz)
# Given coefficients, update the model
update!(m.nonlinear, z, y, Ψ)
update!(m.linearization, z, y, Ψ)
# Calculate residuals
μ_sss = get_tmp(m.nonlinear.μ.cache, z, y, (1, 1)) # select the first DiffCache b/c that one
ξ_sss = get_tmp(m.nonlinear.ξ.cache, z, y, (1, 1)) # corresponds to autodiffing both z and y
𝒱_sss = get_tmp(m.nonlinear.𝒱.cache, z, y, Ψ, z, (1, 1))
Γ₁ = get_tmp(m.linearization.μz.cache, z, y, (1, 1))
Γ₂ = get_tmp(m.linearization.μy.cache, z, y, (1, 1))
Γ₃ = get_tmp(m.linearization.ξz.cache, z, y, (1, 1))
Γ₄ = get_tmp(m.linearization.ξy.cache, z, y, (1, 1))
JV = get_tmp(m.linearization.J𝒱.cache, z, y, Ψ, (1, 1))
F[1:m.Nz] = μ_sss - z
F[(m.Nz + 1):(m.Nz + m.Ny)] = ξ_sss + m.linearization[:Γ₅] * z + m.linearization[:Γ₆] * y + q * 𝒱_sss
F[((m.Nz + m.Ny) + 1):end] = Γ₃ + Γ₄ * Ψ + (m.linearization[:Γ₅] + m.linearization[:Γ₆] * Ψ) * (Γ₁ + Γ₂ * Ψ) + q * JV
end
mutable struct RALHomotopyError <: Exception
msg::String
end
Base.showerror(io::IO, ex::RALHomotopyError) = print(io, ex.msg)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 11432 | """
```
relaxation!(ral, xₙ₋₁, Ψₙ₋₁; tol = 1e-10, max_iters = 1000, damping = .5, pnorm = Inf,
schur_fnct = schur!, autodiff = :central, use_anderson = false, m = 5,
verbose = :none, kwargs...)
```
solves for the coefficients ``(z, y, \\Psi)`` of a risk-adjusted linearization by the following relaxation algorithm:
1. Initialize guesses for ``(z, y, \\Psi)``
2. Do until convergence
a) Solve for ``(z, y)`` using the expectational and state transition equations and fixing ``\\Psi``.
b) Use a QZ decomposition to solve for ``\\Psi`` while fixing ``(z, y)``.
### Types:
- `S1 <: Number`
- `S2 <: Real`
- `S3 <: Real`
### Inputs
- `m::RiskAdjustedLinearization`: object holding functions needed to calculate
the risk-adjusted linearization
- `xₙ₋₁::AbstractVector{S1}`: initial guess for ``(z, y)``
- `Ψₙ₋₁::AbstractVector{S1}`: initial guess for ``\\Psi``
### Keywords
- `tol::S2`: convergence tolerance of residual norm for relaxation algorithm
- `max_iters::Int`: maximumm number of iterations
- `damping::S2`: guesses are updated as the weighted average
`xₙ = damping * proposal + (1 - damping) * xₙ₋₁`.
- `pnorm::S3`: norm for residual tolerance
- `schur_fnct::Function`: function for calculating the Schur factorization during QZ decomposition
- `autodiff::Symbol`: specifies whether to use autoamtic differentiation in `nlsolve`
(and is the same keyword as the `autodiff` keyword for `nlsolve`)
- `use_anderson::Bool`: set to true to apply Anderson acceleration to the
fixed point iteration of the relaxation algorithm
- `m::Int`: `m` coefficient if using Anderson acceleration
- `sparse_jacobian::Bool = false`: if true, exploit sparsity in the Jacobian in calls to `nlsolve` using SparseDiffTools.jl.
If `jac_cache` and `sparsity` are `nothing`, then `relaxation!` will attempt to determine the sparsity pattern.
- `sparsity::Union{AbstractArray, Nothing} = nothing`: sparsity pattern for the Jacobian in calls to `nlsolve`
- `colorvec = nothing`: matrix coloring vector for sparse Jacobian in calls to `nlsolve`
- `jac_cache = nothing`: pre-allocated Jacobian cache for calls to `nlsolve` during the numerical algorithms
- `sparsity_detection::Bool = false`: If true, use SparsityDetection.jl to detect sparsity pattern (only relevant if
both `jac_cache` and `sparsity` are `nothing`). If false, then the sparsity pattern is
determined by using finite differences to calculate a Jacobian and assuming any zeros will always be zero.
Currently, SparsityDetection.jl fails to work.
- `verbose::Symbol`: verbosity of information printed out during solution.
a) `:low` -> statement when homotopy continuation succeeds
b) `:high` -> statement when homotopy continuation succeeds and for each successful iteration
"""
function relaxation!(ral::RiskAdjustedLinearization, xₙ₋₁::AbstractVector{S1}, Ψₙ₋₁::AbstractMatrix{S1};
tol::S2 = 1e-10, max_iters::Int = 1000, damping::S2 = .5, pnorm::S3 = Inf,
schur_fnct::Function = schur!, autodiff::Symbol = :central,
use_anderson::Bool = false, m::Int = 5,
sparse_jacobian::Bool = false, sparsity::Union{AbstractArray, Nothing} = nothing,
colorvec = nothing, jac_cache = nothing,
sparsity_detection::Bool = true, verbose::Symbol = :none,
kwargs...) where {S1 <: Number, S2 <: Real, S3 <: Real}
# Set up
err = 1.
nl = nonlinear_system(ral)
li = linearized_system(ral)
Nzy = ral.Nz + ral.Ny
AA = Matrix{Complex{S1}}(undef, Nzy, Nzy) # pre-allocate these matrices to calculate QZ decomp for Ψ
BB = similar(AA)
# Initialize system of equations
_my_eqn = (F, x, Ψ, 𝒱) -> _relaxation_equations(F, x, ral, Ψ, 𝒱)
if use_anderson
# Some aliases/views will be useful
zₙ = ral.z
yₙ = ral.y
Ψₙ = ral.Ψ
𝒱ₙ₋₁ = nl[:𝒱_sss]
J𝒱ₙ₋₁ = li[:JV]
_anderson_f = function _my_anderson(F::AbstractArray{T}, xₙ₋₁::AbstractVector{T}) where {T <: Number}
zₙ₋₁ = @view xₙ₋₁[1:ral.Nz]
yₙ₋₁ = @view xₙ₋₁[(ral.Nz + 1):Nzy]
Ψₙ₋₁ = @view xₙ₋₁[(Nzy + 1):end]
Ψₙ₋₁ = reshape(Ψₙ₋₁, ral.Ny, ral.Nz)
# Calculate entropy terms 𝒱ₙ₋₁, J𝒱ₙ₋₁
update!(nl, zₙ₋₁, yₙ₋₁, Ψₙ₋₁; select = Symbol[:𝒱]) # updates nl.𝒱_sss
update!(li, zₙ₋₁, yₙ₋₁, Ψₙ₋₁; select = Symbol[:JV]) # updates li.JV
# Solve state transition and expectational equations for (zₙ, yₙ), taking 𝒱ₙ₋₁ and Ψₙ₋₁ as given
solve_steadystate!(ral, vcat(zₙ₋₁, yₙ₋₁), _my_eqn, Ψₙ₋₁, 𝒱ₙ₋₁; autodiff = autodiff, # updates ral.z and ral.y
sparse_jacobian = sparse_jacobian,
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache,
sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
# Update Γ₁, Γ₂, Γ₃, Γ₄, given (zₙ, yₙ)
update!(li, zₙ, yₙ, Ψₙ₋₁; select = Symbol[:Γ₁, :Γ₂, :Γ₃, :Γ₄]) # updates li.Γᵢ
# QZ decomposition to get Ψₙ, taking Γ₁, Γ₂, Γ₃, Γ₄, and J𝒱ₙ₋₁ as given
Ψₙ .= compute_Ψ!(AA, BB, li; schur_fnct = schur_fnct)
# Update zₙ, yₙ, and Ψₙ; then calculate error for convergence check
zₙ .= (1 - damping) .* zₙ + damping .* zₙ₋₁
yₙ .= (1 - damping) .* yₙ + damping .* yₙ₋₁
Ψₙ .= (1 - damping) .* Ψₙ + damping .* Ψₙ₋₁
err = norm(vcat(zₙ - zₙ₋₁, yₙ - yₙ₋₁, vec(Ψₙ - Ψₙ₋₁)), pnorm)
# Calculate residual
F[1:ral.Nz] = zₙ - zₙ₋₁
F[(ral.Nz + 1):Nzy] = yₙ - yₙ₋₁
F[(Nzy + 1):end] = vec(Ψₙ - Ψₙ₋₁)
return F
end
out = nlsolve(_anderson_f, vcat(xₙ₋₁, vec(Ψₙ₋₁)); m = m, ftol = tol, iterations = max_iters)
count = out.iterations
if out.f_converged
update!(ral, out.zero[1:ral.Nz], out.zero[(ral.Nz + 1):Nzy],
reshape(out.zero[(Nzy + 1):end], ral.Ny, ral.Nz); update_cache = false)
end
else
count = 1
# Some aliases/views will be useful
zₙ₋₁ = @view xₙ₋₁[1:ral.Nz]
yₙ₋₁ = @view xₙ₋₁[(ral.Nz + 1):end]
zₙ = ral.z
yₙ = ral.y
Ψₙ = ral.Ψ
𝒱ₙ₋₁ = nl[:𝒱_sss]
J𝒱ₙ₋₁ = li[:JV]
while (err > tol) && (count < max_iters)
# Calculate entropy terms 𝒱ₙ₋₁, J𝒱ₙ₋₁
update!(nl, zₙ₋₁, yₙ₋₁, Ψₙ₋₁; select = Symbol[:𝒱]) # updates nl.𝒱_sss
update!(li, zₙ₋₁, yₙ₋₁, Ψₙ₋₁; select = Symbol[:JV]) # updates li.JV
# Solve state transition and expectational equations for (zₙ, yₙ), taking 𝒱ₙ₋₁ and Ψₙ₋₁ as given
solve_steadystate!(ral, xₙ₋₁, _my_eqn, Ψₙ₋₁, 𝒱ₙ₋₁; autodiff = autodiff, # updates ral.z and ral.y
sparse_jacobian = sparse_jacobian,
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache,
sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
# Update Γ₁, Γ₂, Γ₃, Γ₄, given (zₙ, yₙ)
update!(li, zₙ, yₙ, Ψₙ₋₁; select = Symbol[:Γ₁, :Γ₂, :Γ₃, :Γ₄]) # updates li.Γᵢ
# QZ decomposition to get Ψₙ, taking Γ₁, Γ₂, Γ₃, Γ₄, and J𝒱ₙ₋₁ as given
Ψₙ .= compute_Ψ!(AA, BB, li; schur_fnct = schur_fnct)
# Update zₙ, yₙ, and Ψₙ; then calculate error for convergence check
zₙ .= (1 - damping) .* zₙ + damping .* zₙ₋₁
yₙ .= (1 - damping) .* yₙ + damping .* yₙ₋₁
Ψₙ .= (1 - damping) .* Ψₙ + damping .* Ψₙ₋₁
err = norm(vcat(zₙ - zₙ₋₁, yₙ - yₙ₋₁, vec(Ψₙ - Ψₙ₋₁)), pnorm)
# Update zₙ₋₁, yₙ₋₁, and Ψₙ₋₁ (without reallocating them)
zₙ₋₁ .= zₙ
yₙ₋₁ .= yₙ
Ψₙ₋₁ .= Ψₙ
if verbose == :high
println("Iteration $(count): error under norm=$(pnorm) is $(err)")
end
count += 1
end
end
if count == max_iters
throw(RALRelaxationError("Relaxation method to find the risk-adjusted linearization did not converge."))
else
update!(ral)
if verbose == :low
errvec = steady_state_errors(ral)
println("Convergence achieved after $(count) iterations! Error under norm = $(pnorm) is " *
"$(norm(errvec, pnorm)).")
elseif verbose == :high
errvec = steady_state_errors(ral)
println("")
println("Convergence achieved after $(count) iterations! Error under norm = $(pnorm) is " *
"$(norm(errvec, pnorm)).")
end
return ral
end
end
function solve_steadystate!(m::RiskAdjustedLinearization, x0::AbstractVector{S1},
f::Function, Ψ::AbstractMatrix{<: Number}, 𝒱::AbstractVector{<: Number};
sparse_jacobian::Bool = false, sparsity::Union{AbstractArray, Nothing} = nothing,
colorvec = nothing, jac_cache = nothing,
sparsity_detection::Bool = true, autodiff::Symbol = :central,
verbose::Symbol = :none, kwargs...) where {S1 <: Real, S2 <: Real}
# Exploit sparsity?
if sparse_jacobian
nlsolve_jacobian!, jac =
construct_sparse_jacobian_function(m, (F, x) -> f(F, x, Ψ, 𝒱), :relaxation, autodiff;
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache, sparsity_detection = sparsity_detection)
out = nlsolve(OnceDifferentiable((F, x) -> f(F, x, Ψ, 𝒱), nlsolve_jacobian!, x0, copy(x0), jac), x0; kwargs...)
else
out = nlsolve(OnceDifferentiable((F, x) -> f(F, x, Ψ, 𝒱), x0, copy(x0), autodiff,
ForwardDiff.Chunk(ForwardDiff.pickchunksize(min(m.Nz, m.Ny)))), x0; kwargs...)
end
if out.f_converged
m.z .= out.zero[1:m.Nz]
m.y .= out.zero[(m.Nz + 1):end]
else
if verbose == :high
println(out)
end
throw(RALRelaxationError())
end
end
function _relaxation_equations(F::AbstractArray, x::AbstractArray, m::RiskAdjustedLinearization,
Ψ::AbstractMatrix{<: Number}, 𝒱::AbstractVector{<: Number})
# Unpack
z = @view x[1:m.Nz]
y = @view x[(m.Nz + 1):end]
# Update μ(z, y) and ξ(z, y)
update!(m.nonlinear, z, y, Ψ; select = Symbol[:μ, :ξ])
# Calculate residuals
μ_sss = get_tmp(m.nonlinear.μ.cache, z, y, (1, 1)) # select the first DiffCache b/c that one
ξ_sss = get_tmp(m.nonlinear.ξ.cache, z, y, (1, 1)) # corresponds to autodiffing both z and y
F[1:m.Nz] = μ_sss - z
F[(m.Nz + 1):end] = ξ_sss + m.linearization[:Γ₅] * z + m.linearization[:Γ₆] * y + 𝒱
end
mutable struct RALRelaxationError <: Exception
msg::String
end
RALRelaxationError() =
RALRelaxationError("A solution for (z, y), given Ψ and 𝒱, to the state transition and expectational equations could not be found.")
Base.showerror(io::IO, ex::RALRelaxationError) = print(io, ex.msg)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 11359 | """
```
solve!(m; algorithm = :relaxation, autodiff = :central, verbose = :high, kwargs...)
solve!(m, z0, y0; kwargs...)
solve!(m, z0, y0, Ψ0; kwargs...)
```
computes the risk-adjusted linearization of the dynamic economic model
described by `m` and updates `m` with the solution,
e.g. the coefficients ``(z, y, \\Psi)``.
The three available `solve!` algorithms are slight variations on each other.
- Method 1: uses the `z`, `y`, and `Ψ` fields of `m` as initial guesses
for ``(z, y, \\Psi)`` and proceeds with the numerical algorithm
specified by `algorithm`
- Method 2: uses `z0` and `y0` as initial guesses for the deterministic
steady state, which is then used as the initial guess for ``(z, Y, \\Psi)``
for the numerical algorithm specified by `algorithm`.
- Method 3: uses `z0`, `y0`, and `Ψ0` as initial guesses for ``(z, Y, \\Psi)``
and proceeds with the numerical algorithm specified by `algorithm`.
### Inputs
- `m::RiskAdjustedLinearization`: object holding functions needed to calculate
the risk-adjusted linearization
- `z0::AbstractVector{S1}`: initial guess for ``z``
- `y0::AbstractVector{S1}`: initial guess for ``y``
- `Ψ0::AbstractVector{S1}`: initial guess for ``\\Psi``
- `S1 <: Real`
### Keywords
- `algorithm::Symbol = :relaxation`: which numerical algorithm to use? Can be one of `[:relaxation, :homotopy, :deterministic]`
- `autodiff::Symbol = :central`: use autodiff or not? This keyword is the same as in `nlsolve`
- `use_anderson::Bool = false`: use Anderson acceleration if the relaxation algorithm is applied. Defaults to `false`
- `step::Float64 = .1`: size of step from 0 to 1 if the homotopy algorithm is applied. Defaults to 0.1
- `sparse_jacobian::Bool = false`: if true, exploit sparsity in the Jacobian in calls to `nlsolve` using SparseDiffTools.jl.
If `jac_cache` and `sparsity` are `nothing`, then `solve!` will attempt to determine the sparsity pattern.
- `sparsity::Union{AbstractArray, Nothing} = nothing`: sparsity pattern for the Jacobian in calls to `nlsolve`
- `colorvec = nothing`: matrix coloring vector for sparse Jacobian in calls to `nlsolve`
- `jac_cache = nothing`: pre-allocated Jacobian cache for calls to `nlsolve` during the numerical algorithms
- `sparsity_detection::Bool = false`: If true, use SparsityDetection.jl to detect sparsity pattern (only relevant if
both `jac_cache` and `sparsity` are `nothing`). If false, then the sparsity pattern is
determined by using finite differences to calculate a Jacobian and assuming any zeros will always be zero.
Currently, SparsityDetection.jl fails to work.
The solution algorithms all use `nlsolve` to calculate the solution to systems of nonlinear
equations. The user can pass in any of the keyword arguments for `nlsolve` to adjust
the settings of the nonlinear solver.
For the keywords relevant to specific methods, see the docstring for the underlying method being called.
Note these methods are not exported.
- `:relaxation` -> `relaxation!`
- `:homotopy` -> `homotopy!`
- `:deterministic` -> `deterministic_steadystate!`
"""
function solve!(m::RiskAdjustedLinearization; algorithm::Symbol = :relaxation,
autodiff::Symbol = :central, use_anderson::Bool = false,
step::Float64 = .1, sparse_jacobian::Bool = false,
sparsity::Union{AbstractArray, Nothing} = nothing, colorvec = nothing,
jac_cache = nothing, sparsity_detection::Bool = false,
verbose::Symbol = :high, kwargs...)
if algorithm == :deterministic
solve!(m, m.z, m.y; algorithm = algorithm, autodiff = autodiff,
sparse_jacobian = sparse_jacobian,
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache, sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
else
solve!(m, m.z, m.y, m.Ψ; algorithm = algorithm, autodiff = autodiff,
use_anderson = use_anderson, step = step,
sparse_jacobian = sparse_jacobian, sparsity = sparsity,
colorvec = colorvec, jac_cache = jac_cache,
sparsity_detection = sparsity_detection, verbose = verbose, kwargs...)
end
end
function solve!(m::RiskAdjustedLinearization, z0::AbstractVector{S1}, y0::AbstractVector{S1};
algorithm::Symbol = :relaxation, autodiff::Symbol = :central,
use_anderson::Bool = false, step::Float64 = .1,
sparse_jacobian::Bool = false,
sparsity::Union{AbstractArray, Nothing} = nothing, colorvec = nothing,
jac_cache = nothing, sparsity_detection::Bool = false,
verbose::Symbol = :high, kwargs...) where {S1 <: Real}
@assert algorithm in [:deterministic, :relaxation, :homotopy] "The algorithm must be :deterministic, :relaxation, or :homotopy"
# Deterministic steady state
deterministic_steadystate!(m, vcat(z0, y0); autodiff = autodiff,
sparse_jacobian = sparse_jacobian,
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache,
sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
# Calculate linearization
nl = nonlinear_system(m)
li = linearized_system(m)
update!(nl, m.z, m.y, m.Ψ; select = Symbol[:μ, :ξ])
update!(li, m.z, m.y, m.Ψ; select = Symbol[:Γ₁, :Γ₂, :Γ₃, :Γ₄])
# Back out Ψ
m.Ψ .= compute_Ψ(m; zero_entropy_jacobian = true)
# Use deterministic steady state as guess for stochastic steady state?
if algorithm == :deterministic
# Zero the entropy and Jacobian terms so they are not undefined or something else
m.nonlinear[:𝒱_sss] .= 0.
m.linearization[:JV] .= 0.
# Check Blanchard-Kahn
blanchard_kahn(m; deterministic = true, verbose = verbose)
else
solve!(m, m.z, m.y, m.Ψ; algorithm = algorithm,
use_anderson = use_anderson, step = step,
sparse_jacobian = sparse_jacobian,
jac_cache = jac_cache, sparsity = sparsity,
colorvec = colorvec, sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
end
m
end
function solve!(m::RiskAdjustedLinearization, z0::AbstractVector{S1}, y0::AbstractVector{S1}, Ψ0::AbstractMatrix{S1};
algorithm::Symbol = :relaxation, autodiff::Symbol = :central,
use_anderson::Bool = false, step::Float64 = .1,
sparse_jacobian::Bool = false, sparsity::Union{AbstractArray, Nothing} = nothing,
colorvec = nothing, jac_cache = nothing, sparsity_detection::Bool = false,
verbose::Symbol = :high, kwargs...) where {S1 <: Number}
@assert algorithm in [:relaxation, :homotopy] "The algorithm must be :relaxation or :homotopy because this function calculates the stochastic steady state"
# Stochastic steady state
if algorithm == :relaxation
N_zy = m.Nz + m.Ny
relaxation!(m, vcat(z0, y0), Ψ0; autodiff = autodiff,
use_anderson = use_anderson, sparse_jacobian = sparse_jacobian,
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache, sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
elseif algorithm == :homotopy
homotopy!(m, vcat(z0, y0, vec(Ψ0)); autodiff = autodiff, step = step,
sparse_jacobian = sparse_jacobian,
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache, sparsity_detection = sparsity_detection,
verbose = verbose, kwargs...)
end
# Check Blanchard-Kahn
blanchard_kahn(m; deterministic = false, verbose = verbose)
m
end
"""
```
function deterministic_steadystate!(m, x0; verbose = :none, kwargs...)
```
calculates the deterministic steady state.
### Types:
- `S1 <: Number`
- `S2 <: Real`
### Inputs
- `m::RiskAdjustedLinearization`: object holding functions needed to calculate
the risk-adjusted linearization
- `x0::AbstractVector{S1}`: initial guess for ``(z, y)``
"""
function deterministic_steadystate!(m::RiskAdjustedLinearization, x0::AbstractVector{S1};
autodiff::Symbol = :central,
sparse_jacobian::Bool = false,
sparsity::Union{AbstractArray, Nothing} = nothing, colorvec = nothing,
jac_cache = nothing, sparsity_detection::Bool = false,
verbose::Symbol = :none, kwargs...) where {S1 <: Real, S2 <: Real}
# Set up system of equations
_my_eqn = (F, x) -> _deterministic_equations(F, x, m)
# Exploit sparsity?
if sparse_jacobian
nlsolve_jacobian!, jac =
construct_sparse_jacobian_function(m, _my_eqn, :deterministic, autodiff;
sparsity = sparsity, colorvec = colorvec,
jac_cache = jac_cache,
sparsity_detection = sparsity_detection)
out = nlsolve(OnceDifferentiable(_my_eqn, nlsolve_jacobian!, x0, copy(x0), jac), x0; kwargs...)
else
out = nlsolve(OnceDifferentiable(_my_eqn, x0, copy(x0), autodiff,
ForwardDiff.Chunk(min(m.Nz, m.Ny))), x0; kwargs...)
end
if out.f_converged
m.z .= out.zero[1:m.Nz]
m.y .= out.zero[(m.Nz + 1):end]
if verbose in [:low, :high]
println("A deterministic steady state has been found")
end
else
error("A deterministic steady state could not be found.")
end
end
function _deterministic_equations(F::AbstractVector{<: Number}, x::AbstractVector{<: Number},
m::RiskAdjustedLinearization)
# Unpack input vector
z = @view x[1:m.Nz]
y = @view x[(m.Nz + 1):end]
# Update μ(z, y) and ξ(z, y)
update!(m.nonlinear, z, y, m.Ψ; select = Symbol[:μ, :ξ])
# Calculate residuals
μ_sss = get_tmp(m.nonlinear.μ.cache, z, y, (1, 1)) # select the first DiffCache b/c that
ξ_sss = get_tmp(m.nonlinear.ξ.cache, z, y, (1, 1)) # one corresponds to autodiffing both z and y
F[1:m.Nz] = μ_sss - z
F[(m.Nz + 1):end] = ξ_sss + m.linearization[:Γ₅] * z + m.linearization[:Γ₆] * y
end
"""
```
steady_state_errors(m::RiskAdjustedLinearization, stochastic_steady_state::Bool = true)
```
calculates the errors in the system of equations characterizing the steady state.
When the second input is `true`, the steady state is the stochastic steady state,
and when it is false, the steady state is the deterministic steady state.
"""
function steady_state_errors(m::RiskAdjustedLinearization, stochastic_steady_state::Bool = true)
if stochastic_steady_state
return vcat(m[:μ_sss] - m.z, m[:ξ_sss] + m[:Γ₅] * m.z + m[:Γ₆] * m.y + m[:𝒱_sss],
vec(m[:Γ₃] + m[:Γ₄] * m.Ψ + (m[:Γ₅] + m[:Γ₆] * m.Ψ) * (m[:Γ₁] + m[:Γ₂] * m.Ψ) + m[:JV]))
else
return vcat(m[:μ_sss] - m.z, m[:ξ_sss] + m[:Γ₅] * m.z + m[:Γ₆] * m.y)
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 12271 | """
```
euler_equation_error(m, cₜ, logSDFxR, 𝔼_quadrature, zₜ = m.z;
c_init = NaN, return_soln = false, kwargs...)
euler_equation_error(m, cₜ, logSDFxR, 𝔼_quadrature, shock_matrix, zₜ = m.z;
c_init = NaN, summary_statistic = x -> norm(x, Inf), burnin = 0,
return_soln = false, kwargs...)
```
calculates standard Euler equation errors, as recommended by Judd (1992).
The first method calculates the error at some state `zₜ`, which defaults
to the stochastic steady state. The second method simulates the state
vector from an initial state `zₜ` (defaults to stochastic steady state)
given a sequence of drawn shocks, evaluates the Euler equation errors,
and returns some summary statistic of the errors specified by the keyword
`summary_statistic`.
The Euler equation is
```math
\\begin{aligned}
0 = \\log \\mathbb{E}_t \\exp(m_{t + 1} + r_{t + 1}) = \\log \\mathbb{E}_t[M_{t + 1} R_{t + 1}],
\\end{aligned}
```
where ``m_{t + 1} = \\log(M_{t + 1})`` is the log stochastic discount factor and ``r_{t + 1} = \\log(R_{t + 1})``
is the risk free rate.
### Inputs
- `m::RiskAdjustedLinearization`: A solved instance of a risk-adjusted linearization
- `cₜ::Function`: a function of `(m, zₜ)` that calculates consumption at state `zₜ`, given the
state-space representation implied by `m`.
- `logSDFxR::Function`: a `Function` evaluating ``m_{t + 1} + r_{t + 1}``. The `Function` must
take as input `(m, zₜ, εₜ₊₁, c)`, where `m` is a `RiskAdjustedLinearization`,
`zₜ` is a state vector at which to evaluate, `εₜ₊₁` is a draw from the distribution
of exogenous shocks, and `c` is a guess for consumption at `zₜ` implied by
the conditional expectation in the Euler equation when calculated with a quadrature rule.
Note that `c` can be either the consumption level or some transformation (e.g. log consumption),
but the user should be consistent in the definition of the `cₜ` function with the guess `c`,
i.e. both should return the same transformation of consumption (e.g. both should return the level).
- `𝔼_quadrature::Function`: a quadrature rule whose single input is a `Function` with a single
input, which is a shock `εₜ₊₁`.
- `zₜ::AbstractVector`: a state at which to evaluate the Euler equation error
- `shock_matrix::Abstractmatrix`: a `Nε × T` matrix of shocks drawn from the distribution of exogenous shocks.
### Keywords
- `c_init::Number`: an initial guess to be used when solving the "true" consumption policy using
quadrature. The default is the consumption policy according to the `RiskAdjustedLinearization`
- `summary_statistic::Function`: a `Function` used to compute a summary statistic from the
ergodic set of Euler equation errors. The default is the maximum absolute error.
- `burnin::Int`: number of periods to drop as burn-in
- `return_soln::Bool`: if true, return the solution to the nonlinear equation isntead of the error
- `kwargs`: Any keyword arguments for `nlsolve` can be passed, too, e.g. `ftol` or `autodiff`
since `nlsolve` is used to calculate the "true" consumption policy.
"""
function euler_equation_error(m::RiskAdjustedLinearization, cₜ::Function, logSDFxR::Function, 𝔼_quadrature::Function,
zₜ::AbstractVector = m.z; c_init::Number = NaN,
return_soln::Bool = false, kwargs...)
# Compute expected consumption according to RAL
c_ral = cₜ(m, zₜ)
# Compute implied consumption according to the quadrature rule
out = nlsolve(c -> [log(𝔼_quadrature(εₜ₊₁ -> exp(logSDFxR(m, zₜ, εₜ₊₁, c[1]))))], [isnan(c_init) ? c_ral : c_init];
kwargs...)
if out.f_converged
c_impl = out.zero[1]
else
error("Failed to solve implied consumption.")
end
# Return error in unit-free terms
if return_soln
return c_impl
else
return (c_ral - c_impl) / c_ral
end
end
function euler_equation_error(m::RiskAdjustedLinearization, cₜ::Function, logSDFxR::Function, 𝔼_quadrature::Function,
shock_matrix::AbstractMatrix, zₜ::AbstractVector = m.z; c_init::Number = NaN,
summary_statistic::Function = x -> norm(x, Inf), burnin::Int = 0,
return_soln::Bool = false, kwargs...)
# Set up
T = size(shock_matrix, 2)
# Simulate states
states, _ = simulate(m, T, shock_matrix, zₜ)
# Compute implied consumption according to the quadrature rule for each state
# and expected consumption according to RAL
err = [euler_equation_error(m, cₜ, logSDFxR, 𝔼_quadrature, (@view states[:, t]); c_init = c_init,
return_soln = return_soln, kwargs...) for t in (burnin + 1):T]
# Return error in unit-free terms
if return_soln
return err
else
return summary_statistic(err)
end
end
# n-period ahead euler equation error obtained by the
# Compute cₜ⁽¹⁾ by solving
# 0 = log Eₜ[exp(mₜ₊₁(cₜ₊₁⁽⁰⁾, cₜ⁽¹⁾) + rₜ)],
# where cₜ₊₁⁽⁰⁾ is just the consumption function obtained from
# an RAL evaluated at a state in t + 1 (i.e., given zₜ and a
# a draw of shocks εₜ₊₁, one can calculate zₜ₊₁ and then
# evaluate cₜ₊₁⁽⁰⁾ = c + Ψ (zₜ₊₁ - z)).
# This approach implicitly defines a mapping cₜ⁽¹⁾ at every
# state zₜ since the expectation must be calculated conditional
# on the state zₜ. It follows that if cₜ⁽²⁾ solves
# 0 = log Eₜ[exp(log(β) - γ * (cₜ₊₁⁽¹⁾ - cₜ⁽²⁾) + rₜ)],
# then we calculate cₜ₊₁⁽¹⁾ by solving the first equation
# given an initial state zₜ₊₁. Since solving the first equation
# requires pushing the initial state forward by one period,
# calculating cₜ₊₁⁽¹⁾ means solving
# 0 = log Eₜ₊₁[exp(mₜ₊₂(cₜ₊₂⁽⁰⁾, cₜ₊₁⁽¹⁾) + rₜ₊₁)],
# hence cₜ⁽²⁾ is indeed a 2-period ahead Euler equation error
# b/c the recursion implicitly involves calculating what
# expected consumption is 2 periods ahead. Since
# we approximate the expectation with quadrature,
# this approach is recursive in that, we select some
# initial state zₜ, compute a set of {zₜ₊₁} implied
# by zₜ and draws εₜ₊₁, compute a new set {zₜ₊₂} for each
# zₜ₊₁ and draws εₜ₊₂ (for each zₜ₊₁), and so on until
# we have computed zₜ₊ₙ
"""
```
dynamic_euler_equation_error(m, cₜ, logSDFxR, 𝔼_quadrature, endo_states, n_aug,
shock_matrix, zₜ = m.z; c_init = NaN, summary_statistic = x -> norm(x, Inf),
burnin = 0, return_soln = false, kwargs...)
```
calculates dynamic Euler equation errors, as proposed in Den Haan (2009).
The Euler equation is
``math
\\begin{aligned}
0 = \\log \\mathbb{E}_t \\exp(m_{t + 1} + r_{t + 1}) = \\log \\mathbb{E}_t[M_{t + 1} R_{t + 1}],
\\end{aligned}
``
where ``m_{t + 1} = \\log(M_{t + 1})`` is the log stochastic discount factor and ``r_{t + 1} = \\log(R_{t + 1})``
is the risk free rate.
The dynamic errors are computed according the following algorithm.
1. Simulate according to the risk-adjusted linearization time series for the state variables
2. Using the time series from 1, compute time series for consumption and
some state variable (usually capital) that can ensure budget constraints hold and markets
clear when computing consumption by applying quadrature.
3. Generate a second "implied" time series for consumption and the "capital" state variable,
starting from the same initial state as 2. Repeat the following steps at each time period.
(i) Compute the conditional expectation in the Euler equation using quadrature to
obtain implied consumption.
(ii) Use budget constraint/market-clearing to compute implied capital.
By default, `dynamic_euler_equation_error` returns some summary statistic of the errors
specified by the keyword `summary_statistic`.
### Inputs
- `m::RiskAdjustedLinearization`: A solved instance of a risk-adjusted linearization
- `cₜ::Function`: a function of `(m, zₜ)` that calculates consumption at state `zₜ`, given the
state-space representation implied by `m`.
- `logSDFxR::Function`: a `Function` evaluating ``m_{t + 1} + r_{t + 1}``. The `Function` must
take as input `(m, zₜ, εₜ₊₁, cₜ)`, where `m` is a `RiskAdjustedLinearization`,
`zₜ` is a state vector at which to evaluate, `εₜ₊₁` is a draw from the distribution
of exogenous shocks, and `cₜ` is the a guess for consumption at `zₜ` implied by
the conditional expectation in the Euler equation when calculated with a quadrature rule.
- `𝔼_quadrature::Function`: a quadrature rule whose single input is a `Function` with a single
input, which is a shock `εₜ₊₁`.
- `endo_states::Function`: augments the state variables in the risk-adjusted linearization,
usually with one additional variable, which represents capital or assets.
- `n_aug::Int`: number of extra state variables added by `endo_states` (usually 1).
- `zₜ::AbstractVector`: a state at which to evaluate the Euler equation error
- `shock_matrix::Abstractmatrix`: a `Nε × T` matrix of shocks drawn from the distribution of exogenous shocks.
### Keywords
- `c_init::Number`: an initial guess to be used when solving the true consumption policy using
quadrature. The default is the consumption policy according to the `RiskAdjustedLinearization`
- `summary_statistic::Function`: a `Function` used to compute a summary statistic from the
ergodic set of Euler equation errors. The default is the maximum absolute error.
- `burnin::Int`: number of periods to drop as burn-in
- `kwargs`: Any keyword arguments for `nlsolve` can be passed, too, e.g. `ftol` or `autodiff`
since `nlsolve` is used to calculate the "true" consumption policy.
"""
function dynamic_euler_equation_error(m::RiskAdjustedLinearization, cₜ::Function, logSDFxR::Function,
𝔼_quadrature::Function, endo_states::Function, n_aug::Int,
shock_matrix::AbstractMatrix, z₀::AbstractVector = m.z;
c_init::Number = NaN, summary_statistic::Function = x -> norm(x, Inf),
burnin::Int = 0, return_soln::Bool = false, kwargs...)
# Set up
T = size(shock_matrix, 2)
c_impl = Vector{eltype(shock_matrix)}(undef, T)
# Simulate states and calculate consumption according to RAL
states, _ = simulate(m, T, shock_matrix, z₀)
c_ral = [cₜ(m, (@view states[:, t])) for t in 1:T]
orig_i = 1:size(states, 1)
# Additional set up
endo_states_impl = similar(states, length(orig_i) + n_aug, T)
endo_states_ral = similar(endo_states_impl)
# For each state, calculate conditional expectation using quadrature rule
# and compute the implied states
out = nlsolve(c -> [log(𝔼_quadrature(εₜ₊₁ -> exp(logSDFxR(m, (@view states[:, 1]), εₜ₊₁, c[1]))))], [isnan(c_init) ? c_ral[1] : c_init];
kwargs...) # Do period 1 separately b/c needed to initialize endo_states_impl
if out.f_converged
c_impl[1] = out.zero[1]
else
error("Failed to solve implied consumption in period 1 of $T.")
end
endo_states_impl[:, 1] = endo_states(m, (@view states[:, 1]), z₀, c_impl[1])
endo_states_ral[:, 1] = endo_states(m, (@view states[:, 1]), z₀, c_ral[1])
for t in 2:T
out = nlsolve(c -> [log(𝔼_quadrature(εₜ₊₁ -> exp(logSDFxR(m, (@view states[:, t]), εₜ₊₁, c[1]))))], [isnan(c_init) ? c_ral[t] : c_init];
kwargs...)
if out.f_converged
c_impl[t] = out.zero[1]
else
error("Failed to solve implied consumption in period $t of $T.")
end
endo_states_impl[:, t] = endo_states(m, (@view states[:, t]), (@view endo_states_impl[orig_i, t - 1]), c_impl[t])
endo_states_ral[:, t] = endo_states(m, (@view states[:, t]), (@view endo_states_ral[orig_i, t - 1]), c_ral[t])
end
# Calculate the errors
if return_soln
return c_ral[(burnin + 1):end], c_impl[(burnin + 1):end], endo_states_ral[(burnin + 1):end], endo_states_impl[(burnin + 1):end]
else
return summary_statistic(((@view c_ral[(burnin + 1):end]) - (@view c_impl[(burnin + 1):end])) ./ (@view c_ral[(burnin + 1):end])), summary_statistic(vec((@view endo_states_ral[:, (burnin + 1):end]) - (@view endo_states_impl[:, (burnin + 1):end])) ./ vec((@view endo_states_ral[:, (burnin + 1):end])))
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 7537 | function standard_normal_gausshermite(n::Int)
ϵᵢ, wᵢ = gausshermite(n) # approximates exp(-x²)
ϵᵢ .*= sqrt(2.) # Normalize ϵᵢ and wᵢ nodes to approximate standard normal
wᵢ ./= sqrt(π)
return ϵᵢ, wᵢ
end
"""
```
gausshermite_expectation(f, μ, σ, n = 10)
gausshermite_expectation(f, μ, Σ, n = 10)
gausshermite_expectation(f, μ, Σ, ns)
```
calculates the expectation of a function of a Gaussian random variable/vector.
The first method evalulates ``\\mathbb{E}[f(X)]`` where ``X \\sim N(\\mu, \\sigma)``,
while the other two methods evaluate ``\\mathbb{E}[f(X)]`` where
``X \\sim \\mathcal{N}(\\mu, \\Sigma)`` and ``\\Sigma`` is diagonal.
The latter two methods differ in that the first assumes the same number of
quadrature points in every dimension while the second does not.
### Inputs
- `f::Function`: some function of a random variable. If `f(x) = x`, then
`gausshermite_expectation(f, μ, σ)` calculates the mean of ``N(\\mu, \\sigma)``
using 10-point Gauss-Hermite quadrature.
- `μ::Number` or `μ::AbstractVector`: mean of the Gaussian random variable/vector.
- `σ::Number`: standard deviation of the Gaussian random variable.
- `Σ::AbstractVector`: diagonal of the variance-covariance matrix of
the Gaussian random vector.
- `n::Int`: number of quadrature points to use
- `ns::AbstractVector{Int}` or `ns::NTuple{N, Int} where N`: number of quadrature points to use
in each dimension of the Gaussian random vector.
"""
function gausshermite_expectation(f::Function, μ::Number, σ::Number, n::Int = 10)
ϵᵢ, wᵢ = gausshermite(n)
ϵᵢ .*= sqrt(2.) # Normalize ϵᵢ and wᵢ nodes to approximate standard normal
# wᵢ ./= sqrt(π) # This step done later to reduce number of computations
if μ ≈ 0.
return sum([wᵢ[i] * f(ϵᵢ[i] * σ) for i in 1:n]) / sqrt(π)
else
return sum([wᵢ[i] * f(ϵᵢ[i] * σ + μ) for i in 1:n]) / sqrt(π)
end
end
function gausshermite_expectation(f::Function, μ::AbstractVector{S},
Σ::AbstractVector{<: Number}, n::Int = 10) where {S <: Number}
d = length(μ)
@assert length(Σ) == d "The length of μ and Σ must be the same."
ϵ, w = gausshermite(n)
ϵ .*= sqrt(2.) # Normalize ϵ and w nodes to approximate standard normal
# w ./= sqrt(π) # This step done later to reduce number of computations
# Evaluate over the tensor grid
feval = Array{S}(undef, (n for i in 1:d)...)
allCI = CartesianIndices(feval)
if all(μ .≈ 0.)
@simd for CI in allCI
feval[CI] = f([ϵ[i] for i in Tuple(CI)] .* Σ)
end
else
@simd for CI in allCI
feval[CI] = f([ϵ[i] for i in Tuple(CI)] .* Σ + μ)
end
end
for n_dim in 1:(d - 1)
# Iteratively integrate out each dimension, i.e. law of iterated expectations
iter = CartesianIndices(tuple(Tuple(1:n for i in 1:(d - n_dim))...,
Tuple(1:1 for i in 1:n_dim)...)) # Create CartesianIndices for all remaining dimensions
# ((1:n for i in 1:(d - n_dim + 1))..., (1 for i in 1:(n_dim - 1))...) creates a Tuple of 1:n for the dimensions
# that are not to be integrated out and uses 1s for the remaining dimensions. We want to use each dimension of feval
# from 1 to (d - n_dim) (inclusive). So on the first iteration, the tuple should be (1:n, 1:n).
# We then assign it to the dimensions of feval from 1 to (d - n_dim - 1) (inclusive) to avoid allocations
feval[iter] .= dropdims(sum(mapslices(fᵢ -> fᵢ .* w, (@view feval[((1:n for i in 1:(d - n_dim + 1))...,
(1 for i in 1:(n_dim - 1))...)...]),
dims = (d - n_dim) + 1), dims = (d - n_dim) + 1), dims = (d - n_dim) + 1)
end
# Handle final integration on its own
return sum(w .* (@view feval[:, (1 for i in 1:(d - 1))...])) / π^(d / 2)
end
function gausshermite_expectation(f::Function, μ::AbstractVector{S},
Σ::AbstractVector{<: Number}, ns::AbstractVector{Int}) where {S <: Number}
d = length(μ)
@assert length(Σ) == d "The length of μ and Σ must be the same."
ϵ = Dict{Int, Vector{S}}()
w = Dict{Int, Vector{S}}()
for i in 1:d
ϵ[i], w[i] = gausshermite(ns[i])
ϵ[i] .*= sqrt(2.) # Normalize ϵ and w nodes to approximate standard normal
# w[i] ./= sqrt(π) # This step done later to reduce number of computations
end
# Evaluate over the tensor grid
feval = Array{S}(undef, (n for n in ns)...)
allCI = CartesianIndices(feval)
if all(μ .≈ 0.)
@simd for CI in allCI
feval[CI] = f([ϵ[n_dim][gridᵢ] for (n_dim, gridᵢ) in enumerate(Tuple(CI))] .* Σ)
end
else
@simd for CI in allCI
feval[CI] = f([ϵ[n_dim][gridᵢ] for (n_dim, gridᵢ) in enumerate(Tuple(CI))] .* Σ + μ)
end
end
# Iteratively integrate out each dimension, i.e. law of iterated expectations
for n_dim in 1:(d - 1)
iter = CartesianIndices(tuple(Tuple(1:ns[i] for i in 1:(d - n_dim))...,
Tuple(1:1 for i in 1:n_dim)...))
feval[iter, 1] .= dropdims(sum(mapslices(fᵢ -> fᵢ .* w[d - n_dim + 1], (@view feval[((1:ns[i] for i in 1:(d - n_dim + 1))...,
(1 for i in 1:(n_dim - 1))...)...]),
dims = (d - n_dim) + 1), dims = (d - n_dim) + 1), dims = (d - n_dim) + 1)
end
# Handle final integration on its own
return sum(w[1] .* (@view feval[:, (1 for i in 1:(d - 1))...])) / π^(d / 2)
end
function gausshermite_expectation(f::Function, μ::AbstractVector{S},
Σ::AbstractVector{<: Number}, ns::NTuple{N, Int}) where {S<: Number, N}
d = length(μ)
@assert length(Σ) == d "The length of μ and Σ must be the same."
ϵ = Dict{Int, Vector{S}}()
w = Dict{Int, Vector{S}}()
for i in 1:d
ϵ[i], w[i] = gausshermite(ns[i])
ϵ[i] .*= sqrt(2.) # Normalize ϵ and w nodes to approximate standard normal
# w[i] ./= sqrt(π) # This step done later to reduce number of computations
end
# Evaluate over the tensor grid
feval = Array{S}(undef, (n for n in ns)...)
allCI = CartesianIndices(feval)
if all(μ .≈ 0.)
@simd for CI in allCI
feval[CI] = f([ϵ[n_dim][gridᵢ] for (n_dim, gridᵢ) in enumerate(Tuple(CI))] .* Σ)
end
else
@simd for CI in allCI
feval[CI] = f([ϵ[n_dim][gridᵢ] for (n_dim, gridᵢ) in enumerate(Tuple(CI))] .* Σ + μ)
end
end
# Iteratively integrate out each dimension, i.e. law of iterated expectations
for n_dim in 1:(d - 1)
iter = CartesianIndices(tuple(Tuple(1:ns[i] for i in 1:(d - n_dim))...,
Tuple(1:1 for i in 1:n_dim)...))
feval[iter, 1] .= dropdims(sum(mapslices(fᵢ -> fᵢ .* w[d - n_dim + 1], (@view feval[((1:ns[i] for i in 1:(d - n_dim + 1))...,
(1 for i in 1:(n_dim - 1))...)...]),
dims = (d - n_dim) + 1), dims = (d - n_dim) + 1), dims = (d - n_dim) + 1)
end
# Handle final integration on its own
return sum(w[1] .* (@view feval[:, (1 for i in 1:(d - 1))...])) / π^(d / 2)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2205 | """
```
impulse_responses(m, horizon, shock_ind, shock_size, z₀; deviations = true)
impulse_responses(m, horizon, shock_ind, shock_size; deviations = true)
```
calculates impulse responses according to a risk-adjusted linearization,
given a size for the shock, the index of the shock, and the initial state.
The second method assumes that the initial state is the stochastic steady state.
### Inputs
- `m::RiskAdjustedLinearization`: a solved risk-adjusted linearization of a dynamic economy
- `z₀::AbstractVector`: the initial state from which the economy begins
- `horizon::Int`: number of periods to be simulated
- `shock_ind::Int`: index of the shock that should be nonzero (other shocks are zero)
- `shock_size::Number`: size of the shock (can be positive or negative)
### Keywords
- `deviations::Bool`: if true, the impulse responses are returned in deviations from steady state.
### Outputs
- `states`: a matrix of the simulated path of states `z`, with type specified by the array type of `z₀`
- `jumps`: a matrix of the simulated path of jump variables `y`, with type specified by the array type of `z₀`
"""
function impulse_responses(m::RiskAdjustedLinearization, horizon::Int, shock_ind::Int,
shock_size::Number, z₀::AbstractVector;
deviations::Bool = true)
# Create shock vector and output matrices
shock = zeros(eltype(z₀), m.Nε, 1)
shock[shock_ind] = shock_size
states = similar(z₀, m.Nz, horizon)
jumps = similar(z₀, m.Ny, horizon)
# Calculate state after impact
states[:, 1], jumps[:, 1] = simulate(m, 1, shock, z₀)
# Simulate with no other shocks drawn
states[:, 2:end], jumps[:, 2:end] = simulate(m, horizon - 1, (@view states[:, 1]))
if deviations
return states .- m.z, jumps .- m.y
else
return states, jumps
end
end
function impulse_responses(m::RiskAdjustedLinearization, horizon::Int, shock_ind::Int, shock_size::Number;
deviations::Bool = true)
# Calculate starting at stochastic steady state
return impulse_responses(m, horizon, shock_ind, shock_size, m.z; deviations = deviations)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 7318 | """
```
simulate(m, horizon, shock_matrix, z₀)
simulate(m, horizon, z₀)
simulate(m, shock_vector, z₀)
simulate(m, horizon, shock_matrix)
simulate(m, horizon)
```
simulates the economy approximated by a risk-adjusted linearization. The first method
incorporates an arbitrary path of shocks across the horizon while the second method
assumes no shocks occur during the horizon. The third method calculates next
period's states and jump variables, given a vector of shocks. The fourth and fifth
methods are the same as the first two but assume the economy begins at the
stochastic steady state.
### Inputs
- `m::RiskAdjustedLinearization`: a solved risk-adjusted linearization of a dynamic economy
- `z₀::AbstractVector`: the initial state from which the economy begins
- `horizon::Int`: number of periods to be simulated
- `shock_matrix::AbstractMatrix`: a `Nε × T` length matrix, whose columns are draws from
the distributions of exogenous shocks driving the economy approximated by `m`. The number of columns
must be at least as long as `horizon`. If the number of columns is larger, then we do not use
draws for columns `horizon + 1:T`.
### Outputs
- `states`: a matrix of the simulated path of states `z`, with type specified by the array type of `z₀`
- `jumps`: a matrix of the simulated path of jump variables `y`, with type specified by the array type of `z₀`
"""
function simulate(m::RiskAdjustedLinearization, horizon::Int, shock_matrix::AbstractMatrix, z₀::AbstractVector)
@assert horizon <= size(shock_matrix, 2) "There are not enough draws in shock_matrix (horizon <= size(shock_matrix, 2))"
# Set up
states = similar(z₀, m.Nz, horizon)
jumps = similar(z₀, m.Ny, horizon)
Γ₁ = m[:Γ₁]
Γ₂ = m[:Γ₂]
y₀ = m.y + m.Ψ * (z₀ - m.z)
# Create "shock function" which creates the matrix mapping shocks to states
shock_fnct = create_shock_function(m.nonlinear.Σ, m.nonlinear.Λ, m.z, m.y, m.Ψ)
# Iterate forward!
states[:, 1] = expected_transition(m.z, m.y, m.Ψ, Γ₁, Γ₂, z₀, y₀) + shock_fnct(z₀, shock_matrix[:, 1])
jumps[:, 1] = m.y + m.Ψ * ((@view states[:, 1]) - m.z)
for t in 2:horizon
states[:, t] = expected_transition(m.z, m.y, m.Ψ, Γ₁, Γ₂, (@view states[:, t - 1]), (@view jumps[:, t - 1])) +
shock_fnct((@view states[:, t - 1]), shock_matrix[:, t])
jumps[:, t] = m.y + m.Ψ * ((@view states[:, t]) - m.z)
end
return states, jumps
end
function simulate(m::RiskAdjustedLinearization, horizon::Int, z₀::AbstractVector)
# Set up
states = similar(z₀, m.Nz, horizon)
jumps = similar(z₀, m.Ny, horizon)
Γ₁ = m[:Γ₁]
Γ₂ = m[:Γ₂]
y₀ = m.y + m.Ψ * (z₀ - m.z)
# Iterate forward!
states[:, 1] = expected_transition(m.z, m.y, m.Ψ, Γ₁, Γ₂, z₀, y₀)
jumps[:, 1] = m.y + m.Ψ * ((@view states[:, 1]) - m.z)
for t in 2:horizon
states[:, t] = expected_transition(m.z, m.y, m.Ψ, Γ₁, Γ₂, (@view states[:, t - 1]), (@view jumps[:, t - 1]))
jumps[:, t] = m.y + m.Ψ * ((@view states[:, t]) - m.z)
end
return states, jumps
end
function simulate(m::RiskAdjustedLinearization, shock_vector::AbstractVector, z₀::AbstractVector)
# Set up
Γ₁ = m[:Γ₁]
Γ₂ = m[:Γ₂]
y₀ = m.y + m.Ψ * (z₀ - m.z)
# Create "shock function" which creates the matrix mapping shocks to states
shock_fnct = create_shock_function(m.nonlinear.Σ, m.nonlinear.Λ, m.z, m.y, m.Ψ)
# Iterate forward!
states = expected_transition(m.z, m.y, m.Ψ, Γ₁, Γ₂, z₀, y₀) + shock_fnct(z₀, shock_vector)
jumps = m.y + m.Ψ * (states - m.z)
return states, jumps
end
function expected_transition(z::AbstractVector, y::AbstractVector, Ψ::AbstractMatrix,
Γ₁::AbstractMatrix, Γ₂::AbstractMatrix, zₜ::AbstractVector, yₜ::AbstractVector)
return z + Γ₁ * (zₜ - z) + Γ₂ * (yₜ - y)
end
function simulate(m::RiskAdjustedLinearization, horizon::Int, shock_matrix::AbstractMatrix)
simulate(m, horizon, shock_matrix, m.z)
end
function simulate(m::RiskAdjustedLinearization, horizon::Int)
simulate(m, horizon, m.z)
end
# Use multiple dispatch to construct the correct shock function
function create_shock_function(Σ::RALF1{S}, Λ::RALF1{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: AbstractMatrix, L <: AbstractMatrix}
R = all(Λ.cache .≈ 0.) ? Σ.cache : (I - Λ.cache * Ψ) \ Σ.cache
f = function _both_mat(z::AbstractVector, ε::AbstractVector)
R * ε
end
return f
end
function create_shock_function(Σ::RALF1{S}, Λ::RALF1{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: DiffCache, L <: AbstractMatrix}
f = if all(Λ.cache .≈ 0.)
function _only_nonzero_Λ_mat(z::AbstractVector, ε::AbstractVector)
Σ(z) * ε
end
else
function _only_zero_Λ_mat(z::AbstractVector, ε::AbstractVector)
(I - Λ.cache * Ψ) \ (Σ(z) * ε)
end
end
return f
end
function create_shock_function(Σ::RALF1{S}, Λ::RALF1{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: AbstractMatrix, L <: DiffCache}
f = function _only_Σ_mat(z::AbstractVector, ε::AbstractVector)
(I - Λ(z) * Ψ) \ (Σ.cache * ε)
end
return f
end
function create_shock_function(Σ::RALF1{S}, Λ::RALF1{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: DiffCache, L <: DiffCache}
f = function _both_fnct(z::AbstractVector, ε::AbstractVector)
(I - Λ(z) * Ψ) \ (Σ(z) * ε)
end
return f
end
function create_shock_function(Σ::RALF2{S}, Λ::RALF2{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: AbstractMatrix, L <: AbstractMatrix}
R = all(Λ.cache .≈ 0.) ? Σ.cache : (I - Λ.cache * Ψ) \ Σ.cache
f = function _both_mat(z::AbstractVector, ε::AbstractVector)
R * ε
end
return f
end
function create_shock_function(Σ::RALF2{S}, Λ::RALF2{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: TwoDiffCache, L <: AbstractMatrix}
f = if all(m[:Λ_sss] .≈ 0.)
function _only_nonzero_Λ_mat(zₜ::AbstractVector, ε::AbstractVector)
Σ(zₜ, y + Ψ * (zₜ - z)) * ε
end
else
function _only_zero_Λ_mat(zₜ::AbstractVector, ε::AbstractVector)
(I - Λ.cache * Ψ) \ (Σ(zₜ, y + Ψ * (zₜ - z)) * ε)
end
end
return f
end
function create_shock_function(Σ::RALF2{S}, Λ::RALF2{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: AbstractMatrix, L <: TwoDiffCache}
f = function _only_Σ_mat(zₜ::AbstractVector, ε::AbstractVector)
(I - Λ(zₜ, y + Ψ * (zₜ - z)) * Ψ) \ (Σ.cache * ε)
end
return f
end
function create_shock_function(Σ::RALF2{S}, Λ::RALF2{L}, z::AbstractVector,
y::AbstractVector, Ψ::AbstractMatrix) where {S <: TwoDiffCache, L <: TwoDiffCache}
f = function _both_fnct(zₜ::AbstractVector, ε::AbstractVector)
yₜ = y + Ψ * (zₜ - z)
(I - Λ(zₜ, yₜ) * Ψ) \ (Σ(zₜ, yₜ) * ε)
end
return f
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2160 | using SafeTestsets
# Start Test Script
@time begin
@time @safetestset "Utilities" begin
include("util.jl")
end
@time @safetestset "Risk-Adjusted Linearization" begin
include("risk_adjusted_linearization/constructors.jl")
end
@time @safetestset "Numerical algorithms" begin
include("numerical_algorithms/blanchard_kahn.jl")
include("numerical_algorithms/compute_psi.jl")
include("numerical_algorithms/deterministic.jl")
include("numerical_algorithms/relaxation.jl")
include("numerical_algorithms/homotopy.jl")
include("numerical_algorithms/solve.jl")
end
@time @safetestset "Examples" begin
include(joinpath(dirname(@__FILE__), "..", "examples", "rbc_cc", "example_rbc_cc.jl"))
include(joinpath(dirname(@__FILE__), "..", "examples", "wachter_disaster_risk", "example_wachter.jl"))
include(joinpath(dirname(@__FILE__), "..", "examples", "crw", "example_crw.jl")) # This example tests case of jump-dependent Σ and Λ
include(joinpath(dirname(@__FILE__), "..", "examples", "textbook_nk", "example_textbook_nk.jl"))
include(joinpath(dirname(@__FILE__), "..", "examples", "nk_with_capital", "example_nk_with_capital.jl"))
include("examples_to_test/rbc_cc.jl")
end
if VERSION <= v"1.5"
@time @safetestset "Sparse Arrays and Jacobians for Objects of Risk-Adjusted Linearizations" begin
include("sparse_ral_objects/sparse_mu_xi_V_jacobians.jl")
include("sparse_ral_objects/sparse_array_caches.jl")
end
end
@time @safetestset "Simulations, Impulse Responses, and Simulation-Based Diagnostics" begin
include("simulation/simulate_rbc_cc.jl")
include("simulation/simulate_wachter_disaster_risk.jl")
include("simulation/simulate_crw.jl")
include("simulation/impulse_responses_rbc_cc.jl")
include("simulation/impulse_responses_wachter_disaster_risk.jl")
include("simulation/impulse_responses_crw.jl")
include("simulation/gausshermite_expectation.jl")
include("simulation/euler_equation_error.jl")
end
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1677 | using RiskAdjustedLinearizations, ForwardDiff, Random, Test
Random.seed!(1793)
aR = rand(2)
bR = rand(3)
aD = rand(2)
bD = rand(3)
AR = rand(2, 2, 3)
BR = rand(3, 2, 3, 2)
AD = ForwardDiff.Dual.(rand(2, 2, 3))
BD = ForwardDiff.Dual.(rand(3, 2, 3, 2))
@testset "dualarray and dualvector" begin
@test length(RiskAdjustedLinearizations.dualarray(AR, BR)) == length(AR)
@test length(RiskAdjustedLinearizations.dualarray(BR, AR)) == length(BR)
@test length(RiskAdjustedLinearizations.dualarray(AR, BD)) == length(similar(AR, eltype(BD)))
@test length(RiskAdjustedLinearizations.dualarray(BD, AR)) == length(similar(BD, eltype(AR)))
@test length(RiskAdjustedLinearizations.dualarray(AD, BD)) == length(AD)
@test length(RiskAdjustedLinearizations.dualarray(BD, AD)) == length(BD)
@test length(RiskAdjustedLinearizations.dualarray(AD, BR)) == length(AD)
@test length(RiskAdjustedLinearizations.dualarray(BR, AD)) == length(similar(BR, eltype(AD)))
@test length(RiskAdjustedLinearizations.dualvector(aR, bR)) == length(aR)
@test length(RiskAdjustedLinearizations.dualvector(bR, aR)) == length(bR)
@test length(RiskAdjustedLinearizations.dualvector(aR, bD)) == length(similar(aR, eltype(bD)))
@test length(RiskAdjustedLinearizations.dualvector(bD, aR)) == length(similar(bD, eltype(aR)))
@test length(RiskAdjustedLinearizations.dualvector(aD, bD)) == length(aD)
@test length(RiskAdjustedLinearizations.dualvector(bD, aD)) == length(bD)
@test length(RiskAdjustedLinearizations.dualvector(aD, bR)) == length(aD)
@test length(RiskAdjustedLinearizations.dualvector(bR, aD)) == length(similar(bR, eltype(aD)))
end
nothing
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 648 | # This script actually solves the WachterDisasterRisk model with a risk-adjusted linearization
# and times the methods, if desired
using BenchmarkTools, RiskAdjustedLinearizations, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "rbc_cc", "rbc_cc.jl"))
# Set up
m_rbc_cc = RBCCampbellCochraneHabits()
m = rbc_cc(m_rbc_cc, 0)
# Solve!
solve!(m; algorithm = :relaxation)
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "rbccc_sss_iterative_output.jld2"), "r")
@test isapprox(sssout["z_rss"], m.z, atol=1e-4)
@test isapprox(sssout["y_rss"], m.y, atol=1e-4)
@test isapprox(sssout["Psi_rss"], m.Ψ, atol=1e-4)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1215 | using RiskAdjustedLinearizations, Test, JLD2
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
# Get stochastic steady state
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "iterative_sss_output.jld2"), "r")
z = vec(sssout["z"])
y = vec(sssout["y"])
Ψ = sssout["Psi"]
# Create model and update it
m = WachterDisasterRisk()
ral = inplace_wachter_disaster_risk(m)
update!(ral, z, y, Ψ)
@info "The following messages about Blanchard-Kahn conditions are expected."
@test RiskAdjustedLinearizations.blanchard_kahn(ral)
@test RiskAdjustedLinearizations.blanchard_kahn(ral; verbose = :low)
@test RiskAdjustedLinearizations.blanchard_kahn(ral; verbose = :none)
# Get deterministic steady state
detout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "det_ss_output.jld2"), "r")
z = vec(detout["z"])
y = vec(detout["y"])
update!(ral, z, y, zeros(length(y), length(z)))
RiskAdjustedLinearizations.compute_Ψ(ral; zero_entropy_jacobian = true)
@test RiskAdjustedLinearizations.blanchard_kahn(ral; deterministic = true, verbose = :low)
@test RiskAdjustedLinearizations.blanchard_kahn(ral; deterministic = true, verbose = :high)
nothing
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1220 | using RiskAdjustedLinearizations, JLD2, Test
output = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "compute_psi_output.jld2"), "r")
GAM1 = output["GAM1"]
GAM2 = output["GAM2"]
GAM3 = output["GAM3"]
GAM4 = output["GAM4"]
GAM5 = output["GAM5"]
GAM6 = output["GAM6"]
JV = output["JV"]
Psi = output["Psi"]
Psi_det = output["Psi_det"]
Nzy = sum(size(GAM5))
AA = Matrix{Complex{Float64}}(undef, Nzy, Nzy)
BB = similar(AA)
@testset "QZ decomposition for Ψ" begin
# Out-of-place
@test RiskAdjustedLinearizations.compute_Ψ(GAM1, GAM2, GAM3, GAM4, GAM5, GAM6, JV) ≈ Psi
@test RiskAdjustedLinearizations.compute_Ψ(GAM1, GAM2, GAM3, GAM4, GAM5, GAM6) ≈ Psi_det
@test RiskAdjustedLinearizations.compute_Ψ(GAM1, GAM2, GAM3, GAM4, GAM5, GAM6, zeros(size(JV))) ≈ Psi_det
# In-place
@test RiskAdjustedLinearizations.compute_Ψ!(AA, BB, GAM1, GAM2, GAM3, GAM4, GAM5, GAM6, JV) ≈ Psi
@test RiskAdjustedLinearizations.compute_Ψ!(AA, BB, GAM1, GAM2, GAM3, GAM4, GAM5, GAM6) ≈ Psi_det
@test RiskAdjustedLinearizations.compute_Ψ!(AA, BB, GAM1, GAM2, GAM3, GAM4, GAM5, GAM6, zeros(size(JV))) ≈ Psi_det
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 4510 | using RiskAdjustedLinearizations, Test, JLD2
using SparseArrays, SparseDiffTools, FiniteDiff
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
# Load in guesses and true solutions
detout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "det_ss_output.jld2"), "r")
# Set up RiskAdjustedLinearization
m = WachterDisasterRisk()
ral = inplace_wachter_disaster_risk(m) # has guesses for z and y already
z = copy(ral.z)
y = copy(ral.y)
Ψ = copy(ral.Ψ)
# Solve!
@test_throws AssertionError solve!(ral, ral.z, ral.y, ral.Ψ; algorithm = :deterministic, verbose = :high, autodiff = :central) # first w/finite diff nlsolve Jacobian
@info "The following series of print statements are expected."
ral.z .= z .* 1.001
ral.y .= y .* 1.001
solve!(ral, ral.z, ral.y; algorithm = :deterministic, verbose = :high, autodiff = :central) # first w/finite diff nlsolve Jacobian
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
ral.z .= z .* 1.001
ral.y .= y .* 1.001
RiskAdjustedLinearizations.deterministic_steadystate!(ral, vcat(ral.z, ral.y);
verbose = :none, autodiff = :central) # first w/finite diff nlsolve Jacobian
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
update!(ral, z, y, Ψ) # now autodiff Jacobian
ral.z .= vec(detout["z"]) * 1.001
ral.y .= vec(detout["y"]) * 1.001
solve!(ral, ral.z, ral.y; algorithm = :deterministic, verbose = :high, autodiff = :forward) # now autodiff nlsolve Jacobian
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
ral.z .= vec(detout["z"]) * 1.001
ral.y .= vec(detout["y"]) * 1.001
RiskAdjustedLinearizations.deterministic_steadystate!(ral, vcat(ral.z, ral.y);
verbose = :none, autodiff = :forward)
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
# Check sparse Jacobian
sparsity, colorvec = compute_sparsity_pattern(ral, :deterministic; sparsity_detection = false)
jac_cache = preallocate_jac_cache(ral, :deterministic; sparsity_detection = false)
RiskAdjustedLinearizations.deterministic_steadystate!(ral, 1.001 .* vcat(ral.z, ral.y);
verbose = :none,
sparse_jacobian = true,
sparsity = sparsity,
colorvec = colorvec)
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
RiskAdjustedLinearizations.deterministic_steadystate!(ral, 1.001 .* vcat(ral.z, ral.y);
verbose = :none,
autodiff = :forward,
sparse_jacobian = true,
sparsity = sparsity,
colorvec = colorvec)
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
RiskAdjustedLinearizations.deterministic_steadystate!(ral, 1.001 .* vcat(ral.z, ral.y);
verbose = :none,
sparse_jacobian = true,
jac_cache = jac_cache)
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
RiskAdjustedLinearizations.deterministic_steadystate!(ral, 1.001 .* vcat(ral.z, ral.y);
verbose = :none,
sparse_jacobian = true,
sparsity_detection = false)
@test maximum(abs.(ral.z - detout["z"])) < 1e-6
@test maximum(abs.(ral.y - detout["y"])) < 1e-6
# Using SparsityDetection.jl fails
@test_broken RiskAdjustedLinearizations.deterministic_steadystate!(ral, 1.001 .* vcat(ral.z, ral.y);
verbose = :none,
sparse_jacobian = true,
sparsity_detection = true)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 3450 | using RiskAdjustedLinearizations, Test, JLD2
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
# Load in guesses and true solutions
detout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "det_ss_output.jld2"), "r")
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "homotopy_sss_output.jld2"), "r")
z = vec(detout["z"])
y = vec(detout["y"])
Ψ = zeros(length(y), length(z))
# Set up RiskAdjustedLinearization
m = WachterDisasterRisk()
ral = inplace_wachter_disaster_risk(m)
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ)
# Solve!
@info "The following series of print statements are expected."
for i in 1:2
try # To avoid issues arising sometimes where homotopy accidentally fails when it shouldn't
RiskAdjustedLinearizations.homotopy!(ral, vcat(ral.z, ral.y, vec(ral.Ψ)); verbose = :high, autodiff = :central,
step = .5, ftol = 1e-8) # first with finite diff NLsolve Jacobian
break
catch e
update!(ral, .99 * vec(sssout["z"]), .99 * vec(sssout["y"]), .99 * sssout["Psi"])
end
if i == 2 # trigger error if there actually is one
RiskAdjustedLinearizations.homotopy!(ral, vcat(ral.z, ral.y, vec(ral.Ψ)); verbose = :low, autodiff = :central,
step = .5, ftol = 1e-8) # first with finite diff NLsolve Jacobian
end
end
@test ral.z ≈ sssout["z"] atol=1e-6
@test ral.y ≈ sssout["y"] atol=1e-4
@test ral.Ψ ≈ sssout["Psi"] atol=5e-3
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ) # now autodiff Jacobian
@test_broken RiskAdjustedLinearizations.homotopy!(ral, vcat(ral.z, ral.y, vec(ral.Ψ)); verbose = :low, autodiff = :forward,
step = .5, ftol = 1e-8) # currently can't autodiff b/c problem with chunk size selection
#=@test ral.z ≈ sssout["z"] atol=1e-6
@test ral.y ≈ sssout["y"] atol=1e-4
@test ral.Ψ ≈ sssout["Psi"] atol=5e-3=#
# Check sparse Jacobian methods work
sparsity, colorvec = compute_sparsity_pattern(ral, :homotopy; sparsity_detection = false)
jac_cache = preallocate_jac_cache(ral, :homotopy; sparsity_detection = false)
RiskAdjustedLinearizations.homotopy!(ral, 1.001 .* vcat(ral.z, ral.y, vec(ral.Ψ));
verbose = :none, sparse_jacobian = true,
sparsity = sparsity, colorvec = colorvec)
@test maximum(abs.(ral.z - sssout["z"])) < 1e-6
@test maximum(abs.(ral.y - sssout["y"])) < 1e-6
RiskAdjustedLinearizations.homotopy!(ral, 1.001 .* vcat(ral.z, ral.y, vec(ral.Ψ));
verbose = :none, sparse_jacobian = true,
jac_cache = jac_cache)
@test maximum(abs.(ral.z - sssout["z"])) < 1e-6
@test maximum(abs.(ral.y - sssout["y"])) < 1e-6
RiskAdjustedLinearizations.homotopy!(ral, 1.001 .* vcat(ral.z, ral.y, vec(ral.Ψ));
verbose = :none, sparse_jacobian = true,
sparsity_detection = false)
@test maximum(abs.(ral.z - sssout["z"])) < 1e-6
@test maximum(abs.(ral.y - sssout["y"])) < 1e-6
# Using SparsityDetection.jl fails
@test_broken RiskAdjustedLinearizations.homotopy!(ral, 1.001 .* vcat(ral.z, ral.y, vec(ral.Ψ));
verbose = :none, sparse_jacobian = true,
sparsity_detection = true)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 3750 | using RiskAdjustedLinearizations, Test, JLD2
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
# Load in guesses and true solutions
detout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "det_ss_output.jld2"), "r")
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "iterative_sss_output.jld2"), "r")
z = vec(detout["z"])
y = vec(detout["y"])
Ψ = zeros(length(y), length(z))
# Set up RiskAdjustedLinearization
m = WachterDisasterRisk()
ral = inplace_wachter_disaster_risk(m)
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ)
# Solve!
@info "The following series of print statements are expected."
RiskAdjustedLinearizations.relaxation!(ral, vcat(ral.z, ral.y), ral.Ψ; verbose = :low, autodiff = :central,
tol = 1e-10, max_iters = 1000, damping = .5, pnorm = Inf, ftol = 1e-8) # first with finite diff NLsolve Jacobian
@test ral.z ≈ sssout["z"] atol=1e-6
@test ral.y ≈ sssout["y"] atol=1e-6
@test ral.Ψ ≈ sssout["Psi"] atol=1e-6
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ) # now autodiff Jacobian
RiskAdjustedLinearizations.relaxation!(ral, vcat(ral.z, ral.y), ral.Ψ; verbose = :low, autodiff = :forward,
tol = 1e-10, max_iters = 1000, damping = .5, pnorm = Inf, ftol = 1e-8)
@test ral.z ≈ sssout["z"] atol=1e-6
@test ral.y ≈ sssout["y"] atol=1e-6
@test ral.Ψ ≈ sssout["Psi"] atol=1e-6
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ) # now use Anderson acceleration
RiskAdjustedLinearizations.relaxation!(ral, vcat(ral.z, ral.y), ral.Ψ; verbose = :low, autodiff = :central, use_anderson = true,
tol = 1e-10, max_iters = 1000, damping = .5, pnorm = Inf, ftol = 1e-8)
@test ral.z ≈ sssout["z"] atol=1e-6
@test ral.y ≈ sssout["y"] atol=1e-6
@test ral.Ψ ≈ sssout["Psi"] atol=1e-6
# Check sparse Jacobian methods work
sparsity, colorvec = compute_sparsity_pattern(ral, :relaxation; sparsity_detection = false)
jac_cache = preallocate_jac_cache(ral, :relaxation; sparsity_detection = false)
RiskAdjustedLinearizations.relaxation!(ral, 1.001 .* vcat(ral.z, ral.y), ral.Ψ;
verbose = :none, sparse_jacobian = true,
sparsity = sparsity, colorvec = colorvec)
@test maximum(abs.(ral.z - sssout["z"])) < 1e-6
@test maximum(abs.(ral.y - sssout["y"])) < 1e-6
RiskAdjustedLinearizations.relaxation!(ral, 1.001 .* vcat(ral.z, ral.y), ral.Ψ;
verbose = :none, sparse_jacobian = true,
autodiff = :forward, sparsity = sparsity,
colorvec = colorvec)
@test maximum(abs.(ral.z - sssout["z"])) < 1e-6
@test maximum(abs.(ral.y - sssout["y"])) < 1e-6
RiskAdjustedLinearizations.relaxation!(ral, 1.001 .* vcat(ral.z, ral.y), ral.Ψ;
verbose = :none, sparse_jacobian = true,
jac_cache = jac_cache)
@test maximum(abs.(ral.z - sssout["z"])) < 1e-6
@test maximum(abs.(ral.y - sssout["y"])) < 1e-6
RiskAdjustedLinearizations.relaxation!(ral, 1.001 .* vcat(ral.z, ral.y), ral.Ψ;
verbose = :none, sparse_jacobian = true,
sparsity_detection = false)
@test maximum(abs.(ral.z - sssout["z"])) < 1e-6
@test maximum(abs.(ral.y - sssout["y"])) < 1e-6
# Using SparsityDetection.jl fails
@test_broken RiskAdjustedLinearizations.relaxation!(ral, 1.001 .* vcat(ral.z, ral.y), ral.Ψ;
verbose = :none, sparse_jacobian = true,
sparsity_detection = true)
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 5546 | using RiskAdjustedLinearizations, Test, JLD2
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
# Load in guesses and true solutions
detout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "det_ss_output.jld2"), "r")
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "iterative_sss_output.jld2"), "r")
z = vec(detout["z"])
y = vec(detout["y"])
Ψ = zeros(length(y), length(z))
# Set up RiskAdjustedLinearization
m = WachterDisasterRisk()
ral = inplace_wachter_disaster_risk(m)
zguess = 1.01 .* copy(ral.z)
yguess = 1.01 .* copy(ral.y)
# Solve!
@info "The following series of print statements are expected."
# relaxation w/finite diff Jacobian
solve!(ral, zguess, yguess; verbose = :high, autodiff = :central, ftol = 1e-8) # first w/ calculating the deterministic steady state
@test ral.z ≈ sssout["z"] atol=1e-5 # and then proceeding to stochastic steady state
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ)
solve!(ral; verbose = :none, autodiff = :central, ftol = 1e-8) # Now just go straight to solving stochastic steady state
@test ral.z ≈ sssout["z"] atol=1e-5
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
solve!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ;
verbose = :none, autodiff = :central, ftol = 1e-8) # Now just go straight to solving stochastic steady state
@test ral.z ≈ sssout["z"] atol=1e-5
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
# homotopy w/finite diff Jacobian
for i in 1:10
try
solve!(ral, zguess, yguess; algorithm = :homotopy, step = .5,
verbose = :high, autodiff = :central, ftol = 1e-8) # first w/ calculating the deterministic steady state
break
catch e
zguess .= 1.01 * vec(sssout["z"])
yguess .= 1.01 * vec(sssout["y"])
end
if i == 10
solve!(ral, zguess, yguess; algorithm = :homotopy, step = .5,
verbose = :high, autodiff = :central, ftol = 1e-8) # first w/ calculating the deterministic steady state
end
end
@test ral.z ≈ sssout["z"] atol=1e-5 # and then proceeding to stochastic steady state
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ)
for i in 1:2
try
solve!(ral; verbose = :none, algorithm = :homotopy, step = .5,
autodiff = :central, ftol = 1e-8) # Now just go straight to solving stochastic steady state
break
catch e
update!(ral, 1.01 * vec(sssout["z"]), 1.01 * vec(sssout["y"]), 1.01 * sssout["Psi"])
end
if i == 2
solve!(ral; verbose = :none, algorithm = :homotopy, step = .5,
autodiff = :central, ftol = 1e-8) # Now just go straight to solving stochastic steady state
end
end
@test ral.z ≈ sssout["z"] atol=1e-5
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
for i in 1:2
try
solve!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ;
verbose = :none, algorithm = :homotopy, step = .5,
autodiff = :central, ftol = 1e-8) # Now just go straight to solving stochastic steady state
break
catch e
z .= vec(sssout["z"])
y .= 1.01 * vec(sssout["y"])
Ψ .= sssout["Psi"]
end
if i == 2
solve!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ;
verbose = :none, algorithm = :homotopy, step = .5,
autodiff = :central, ftol = 1e-8) # Now just go straight to solving stochastic steady state
end
end
@test ral.z ≈ sssout["z"] atol=1e-5
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
# Now autodiff Jacobian
solve!(ral, zguess, yguess; verbose = :high, autodiff = :forward, ftol = 1e-8)
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ)
solve!(ral; verbose = :high, autodiff = :forward, ftol = 1e-8)
solve!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ; verbose = :none, autodiff = :forward, ftol = 1e-8)
# currently can't autodiff w/homotopy b/c chunksize inference is not working
@test_broken solve!(ral, zguess, yguess; verbose = :high, autodiff = :forward, ftol = 1e-8, algorithm = :homotopy)
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ)
@test_broken solve!(ral; verbose = :high, autodiff = :forward, ftol = 1e-8, algorithm = :homotopy)
@test_broken solve!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ; verbose = :none, autodiff = :forward, ftol = 1e-8, algorithm = :homotopy)
# relaxation w/Anderson
solve!(ral, zguess, yguess; verbose = :high, autodiff = :central,
use_anderson = true, ftol = 1e-8) # first w/ calculating the deterministic steady state
@test ral.z ≈ sssout["z"] atol=1e-5 # and then proceeding to stochastic steady state
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
update!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ)
solve!(ral; verbose = :none, autodiff = :central,
use_anderson = true, ftol = 1e-8) # Now just go straight to solving stochastic steady state
@test ral.z ≈ sssout["z"] atol=1e-5
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
solve!(ral, 1.01 .* z, 1.01 .* y, 1.01 .* Ψ;
verbose = :none, autodiff = :central,
use_anderson = true, ftol = 1e-8) # Now just go straight to solving stochastic steady state
@test ral.z ≈ sssout["z"] atol=1e-5
@test ral.y ≈ sssout["y"] atol=1e-5
@test ral.Ψ ≈ sssout["Psi"] atol=1e-5
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 5079 | using JLD2, Test, RiskAdjustedLinearizations
# Use Wachter model with Disaster Risk to assess the constructors of a RiskAdjustedLinearization type
# for in-place and out-of-place functions
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
m = WachterDisasterRisk()
### In-place RiskAdjustedLinearization
## Deterministic steady state
detout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "det_ss_output.jld2"), "r")
z = vec(detout["z"])
y = vec(detout["y"])
Ψ = zeros(eltype(y), length(y), length(z))
ral = inplace_wachter_disaster_risk(m)
# Check outputs
update!(ral, z, y, Ψ)
nl = nonlinear_system(ral)
li = linearized_system(ral)
@testset "Evaluate WachterDisasterRisk in-place RiskAdjustedLinearization at deterministic steady state" begin
@test nl[:μ_sss] ≈ detout["MU"]
@test nl[:Λ_sss] == detout["LAM"]
@test nl[:Σ_sss] ≈ detout["SIG"]
@test nl[:ξ_sss] ≈ detout["XI"]
@test nl[:𝒱_sss] ≈ detout["V"]
@test li[:Γ₁] ≈ detout["GAM1"]
@test li[:Γ₂] ≈ detout["GAM2"]
@test li[:Γ₃] ≈ detout["GAM3"]
@test li[:Γ₄] ≈ detout["GAM4"]
@test li[:Γ₅] ≈ detout["GAM5"]
@test li[:Γ₆] ≈ detout["GAM6"]
@test li[:JV] ≈ detout["JV"]
@test ral[:μ_sss] ≈ detout["MU"]
@test ral[:Λ_sss] == detout["LAM"]
@test ral[:Σ_sss] ≈ detout["SIG"]
@test ral[:ξ_sss] ≈ detout["XI"]
@test ral[:𝒱_sss] ≈ detout["V"]
@test ral[:Γ₁] ≈ detout["GAM1"]
@test ral[:Γ₂] ≈ detout["GAM2"]
@test ral[:Γ₃] ≈ detout["GAM3"]
@test ral[:Γ₄] ≈ detout["GAM4"]
@test ral[:Γ₅] ≈ detout["GAM5"]
@test ral[:Γ₆] ≈ detout["GAM6"]
@test ral[:JV] ≈ detout["JV"]
end
## Stochastic steady state
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "iterative_sss_output.jld2"), "r")
z = vec(sssout["z"])
y = vec(sssout["y"])
Ψ = sssout["Psi"]
# Check outputs
update!(ral, z, y, Ψ)
nl = nonlinear_system(ral)
li = linearized_system(ral)
@testset "Evaluate WachterDisasterRisk in-place RiskAdjustedLinearization at stochastic steady state" begin
@test nl[:μ_sss] ≈ sssout["MU"]
@test nl[:Λ_sss] == sssout["LAM"]
@test nl[:Σ_sss] ≈ sssout["SIG"]
@test nl[:ξ_sss] ≈ sssout["XI"]
@test nl[:𝒱_sss] ≈ sssout["V"]
@test li[:Γ₁] ≈ sssout["GAM1"]
@test li[:Γ₂] ≈ sssout["GAM2"]
@test li[:Γ₃] ≈ sssout["GAM3"]
@test li[:Γ₄] ≈ sssout["GAM4"]
@test li[:Γ₅] ≈ sssout["GAM5"]
@test li[:Γ₆] ≈ sssout["GAM6"]
@test li[:JV] ≈ sssout["JV"]
@test ral[:μ_sss] ≈ sssout["MU"]
@test ral[:Λ_sss] == sssout["LAM"]
@test ral[:Σ_sss] ≈ sssout["SIG"]
@test ral[:ξ_sss] ≈ sssout["XI"]
@test ral[:𝒱_sss] ≈ sssout["V"]
@test ral[:Γ₁] ≈ sssout["GAM1"]
@test ral[:Γ₂] ≈ sssout["GAM2"]
@test ral[:Γ₃] ≈ sssout["GAM3"]
@test ral[:Γ₄] ≈ sssout["GAM4"]
@test ral[:Γ₅] ≈ sssout["GAM5"]
@test ral[:Γ₆] ≈ sssout["GAM6"]
@test ral[:JV] ≈ sssout["JV"]
end
### Out-of-place RiskAdjustedLinearization
## Deterministic steady state
detout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "det_ss_output.jld2"), "r")
z = vec(detout["z"])
y = vec(detout["y"])
Ψ = zeros(eltype(y), length(y), length(z))
ral = outofplace_wachter_disaster_risk(m)
# Check outputs
update!(ral, z, y, Ψ)
nl = nonlinear_system(ral)
li = linearized_system(ral)
@testset "Evaluate WachterDisasterRisk out-of-place RiskAdjustedLinearization at deterministic steady state" begin
@test nl.μ(z, y) ≈ detout["MU"]
@test nl[:μ_sss] ≈ detout["MU"]
@test isa(nl[:Λ_sss], AbstractArray)
@test nl.Λ(z) == detout["LAM"]
@test nl[:Σ_sss] ≈ detout["SIG"]
@test nl.Σ(z) ≈ detout["SIG"]
@test nl[:ξ_sss] ≈ detout["XI"]
@test nl.ξ(z, y) ≈ detout["XI"]
@test nl[:𝒱_sss] ≈ detout["V"]
@test nl.𝒱(z, Ψ) ≈ detout["V"]
@test li[:Γ₁] ≈ detout["GAM1"]
@test li[:Γ₂] ≈ detout["GAM2"]
@test li[:Γ₃] ≈ detout["GAM3"]
@test li[:Γ₄] ≈ detout["GAM4"]
@test li[:Γ₅] ≈ detout["GAM5"]
@test li[:Γ₆] ≈ detout["GAM6"]
@test li[:JV] ≈ detout["JV"]
end
## Stochastic steady state
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "iterative_sss_output.jld2"), "r")
z = vec(sssout["z"])
y = vec(sssout["y"])
Ψ = sssout["Psi"]
# Check outputs
update!(ral, z, y, Ψ)
nl = nonlinear_system(ral)
li = linearized_system(ral)
@testset "Evaluate WachterDisasterRisk out-of-place RiskAdjustedLinearization at stochastic steady state" begin
@test nl.μ(z, y) ≈ sssout["MU"]
@test nl[:μ_sss] ≈ sssout["MU"]
@test isa(nl[:Λ_sss], AbstractArray)
@test nl.Λ(z) == sssout["LAM"]
@test nl[:Σ_sss] ≈ sssout["SIG"]
@test nl.Σ(z) ≈ sssout["SIG"]
@test nl[:ξ_sss] ≈ sssout["XI"]
@test nl.ξ(z, y) ≈ sssout["XI"]
@test nl[:𝒱_sss] ≈ sssout["V"]
@test nl.𝒱(z, Ψ) ≈ sssout["V"]
@test li[:Γ₁] ≈ sssout["GAM1"]
@test li[:Γ₂] ≈ sssout["GAM2"]
@test li[:Γ₃] ≈ sssout["GAM3"]
@test li[:Γ₄] ≈ sssout["GAM4"]
@test li[:Γ₅] ≈ sssout["GAM5"]
@test li[:Γ₆] ≈ sssout["GAM6"]
@test li[:JV] ≈ sssout["JV"]
end
nothing
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 5242 | using RiskAdjustedLinearizations, JLD2, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "crw", "crw.jl"))
# Solve model
m_crw = CoeurdacierReyWinant()
m = crw(m_crw)
try
solve!(m, m.z, m.y, m.Ψ; algorithm = :homotopy, step = .5, verbose = :none)
catch e
local sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference/crw_sss.jld2"), "r")
update!(ral, sssout["z_rss"], sssout["y_rss"], sssout["Psi_rss"])
end
# Calculate consumption at state zₜ
crw_cₜ(m, zₜ) = exp(m.y[1] + (m.Ψ * (zₜ - m.z))[1])
# Evaluates m_{t + 1} + r_{t + 1}
function crw_logSDFxR(m, zₜ, εₜ₊₁, Cₜ)
zₜ₊₁, yₜ₊₁ = simulate(m, εₜ₊₁, zₜ)
return log(m_crw.β) - m_crw.γ * (yₜ₊₁[1] - log(Cₜ)) + zₜ₊₁[2]
end
# Calculate 𝔼ₜ[exp(mₜ₊₁ + rₜ₊₁)] via quadrature
std_norm_mean = zeros(2)
std_norm_sig = ones(2)
crw_𝔼_quadrature(f::Function) = gausshermite_expectation(f, std_norm_mean, std_norm_sig, 10)
# Calculate implied state variable(s)
function crw_endo_states(m, zₜ, zₜ₋₁, c_impl)
# rₜ, yₜ are exogenous while Nₜ = exp(rₜ) * Aₜ₋₁ + Yₜ is entirely pre-determined.
# Thus, our implied state variable will be foreign asset Aₜ = Nₜ - Cₜ.
# zₜ₋₁ may be the previous period's implied state, so we start from there
# to calculate Aₜ₋₁.
yₜ₋₁ = m.y + m.Ψ * (zₜ₋₁ - m.z) # Calculate implied jump variables last period
Cₜ₋₁ = exp(yₜ₋₁[1]) # to get the implied consumption last period.
Aₜ₋₁ = zₜ₋₁[1] - Cₜ₋₁ # Given that consumption, we compute implied foreign assets yesterday.
Nₜ = exp(zₜ[2]) * Aₜ₋₁ + exp(zₜ[3]) # Now we can get implied resources available today.
return vcat(zₜ, Nₜ - exp(c_impl)) # This gives us implied foreign assets today, along with other state variables
end
# Load draws from bivariate standard normal
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "crw_shocks.jld2"), "r")["shocks"]
@testset "Calculate Euler Equation Errors using Gauss-Hermite quadrature" begin
# Calculate Euler Equation errors
out1 = out2 = out3 = out4 = out5 = NaN
for i in 1:100
out1, out2, out3, out4, out5 = try
abs.(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature; c_init = m.y[1] * 1.1)),
abs.(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, m.z * 1.1; c_init = m.y[1] * 1.1)),
abs.(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, m.z * 1.1;
c_init = m.y[1] * 1.1, method = :newton)),
abs(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, shocks,
summary_statistic = x -> norm(x, Inf))),
abs(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, shocks,
summary_statistic = x -> norm(x, 2)))
catch e
NaN, NaN, NaN, NaN, NaN
end
if !isnan(out1)
break
end
if i == 100
out1, out2, out3, out4, out5 = abs.(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature;
c_init = m.y[1] * 1.1)),
abs.(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, m.z * 1.1; c_init = m.y[1] * 1.1)),
abs.(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, m.z * 1.1;
c_init = m.y[1] * 1.1, method = :newton)),
abs(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, shocks,
summary_statistic = x -> norm(x, Inf))),
abs(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, shocks,
summary_statistic = x -> norm(x, 2)))
end
end
@test out1 < 1e-10
@test out2 < 1e-3
@test out3 < 1e-3
@test out4 < .6
@test out5 < .6
out6 = euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, shocks,
return_soln = true)
_states, _ = simulate(m, size(shocks, 2), shocks, m.z)
_impl_c = Vector{eltype(_states)}(undef, size(_states, 2))
for i in 1:length(_impl_c)
_impl_c[i] = crw_cₜ(m, (@view _states[:, i]))
end
@test abs.(maximum((out6 - _impl_c) ./ out6)) < .6
c_ral, c_impl, endo_states_ral, endo_states_impl =
dynamic_euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, crw_endo_states, 1, shocks;
return_soln = true)
c_err, endo_states_err = dynamic_euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature,
crw_endo_states, 1, shocks; return_soln = false)
@test_throws DimensionMismatch dynamic_euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature,
crw_endo_states, 0, shocks; return_soln = false)
@test c_err < 2e-5
@test endo_states_err < 1e-3
@test c_err == norm((c_ral - c_impl) ./ c_ral, Inf)
@test endo_states_err == norm(vec(endo_states_ral - endo_states_impl) ./ vec(endo_states_ral), Inf)
end
nothing
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1693 | using RiskAdjustedLinearizations, FastGaussQuadrature, Test
@testset "Gauss-Hermite Quadrature for Expectations of Functions of Independent Normally Distributed Random Variables/Vectors" begin
f(x) = x # calculate the expected value
g(x) = 1. # calculate the probability
ϵᵢ, wᵢ = RiskAdjustedLinearizations.standard_normal_gausshermite(3)
true_eps, true_wts = gausshermite(3)
@test ϵᵢ == true_eps .* sqrt(2.)
@test wᵢ == true_wts ./ sqrt(π)
mean51 = gausshermite_expectation(f, 5., 1., 5)
mean01 = gausshermite_expectation(f, 0., 1., 5)
mean53 = gausshermite_expectation(f, 5., 3., 5)
mean03 = gausshermite_expectation(f, 0., 3., 5)
prob = gausshermite_expectation(g, -5., .1)
@test mean51 ≈ 5.
@test mean53 ≈ 5.
@test isapprox(mean01, 0., atol = 1e-14)
@test isapprox(mean03, 0., atol = 1e-14)
@test prob ≈ 1.
h1(x) = x[1]
h2(x) = x[2]
prob1 = gausshermite_expectation(g, [.5, 5.], [1., 1.], 5)
mean11 = gausshermite_expectation(h1, [.5, 5.], [1., 1.], 5)
mean21 = gausshermite_expectation(h2, [.5, 5.], [1., 1.], 5)
prob2 = gausshermite_expectation(g, [5., -1.], [1., 1.], (5, 5))
mean12 = gausshermite_expectation(h1, [.5, 5.], [1., 1.], (5, 5))
mean22 = gausshermite_expectation(h2, [.5, 5.], [1., 1.], (5, 5))
prob3 = gausshermite_expectation(g, [5., -1., 2.], [1., 1., 2.], [5, 5, 3])
mean13 = gausshermite_expectation(h1, [.5, 5., 2.], [1., 1., 1.], [5, 5, 3])
mean23 = gausshermite_expectation(h2, [.5, 5., 2.], [1., 1., 1.], [5, 5, 3])
@test prob1 ≈ prob2 ≈ prob3 ≈ 1.
@test mean11 ≈ mean12 ≈ mean13 ≈ .5
@test mean21 ≈ mean22 ≈ mean23 ≈ 5.
end
nothing
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1935 | using RiskAdjustedLinearizations, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "crw", "crw.jl"))
# Solve model
m_crw = CoeurdacierReyWinant()
m = crw(m_crw)
try
solve!(m, m.z, m.y, m.Ψ; algorithm = :homotopy, step = .5, verbose = :none)
catch e
local sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference/crw_sss.jld2"), "r")
update!(ral, sssout["z_rss"], sssout["y_rss"], sssout["Psi_rss"])
end
# Verify impulse responses with a zero shock is the same as simulate with no shocks
horizon = 100
@testset "Calculate impulse responses for Coeurdacier et al. (2011)" begin
# No shocks and start from steady state
state1, jump1 = simulate(m, horizon)
state2, jump2 = impulse_responses(m, horizon, 1, 0.; deviations = false)
@test state1 ≈ state2
@test jump1 ≈ jump2
@test state1 ≈ repeat(m.z, 1, horizon)
@test jump1 ≈ repeat(m.y, 1, horizon)
# No shocks but perturb away from steady state
state1, jump1 = simulate(m, horizon, 1.01 * m.z)
state2, jump2 = impulse_responses(m, horizon, 1, 0., 1.01 * m.z; deviations = false)
@test state1 ≈ state2
@test jump1 ≈ jump2
@test !(state1[:, 2] ≈ m.z)
@test !(jump1[:, 2] ≈ m.y)
# Now with shocks, from steady state
shocks = zeros(2, horizon)
shocks[1] = -3.
state1, jump1 = impulse_responses(m, horizon, 1, -3.; deviations = false)
state2, jump2 = impulse_responses(m, horizon, 1, -3., m.z; deviations = false)
state3, jump3 = simulate(m, horizon, shocks)
state4, jump4 = impulse_responses(m, horizon, 1, -3.; deviations = true)
state5, jump5 = impulse_responses(m, horizon, 1, -3., m.z; deviations = true)
@test state1 ≈ state2
@test state1 ≈ state3
@test state1 ≈ state4 .+ m.z
@test state1 ≈ state5 .+ m.z
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test jump1 ≈ jump4 .+ m.y
@test jump1 ≈ jump5 .+ m.y
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2261 | using RiskAdjustedLinearizations, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "rbc_cc", "rbc_cc.jl"))
# Solve model
m_rbc_cc = RBCCampbellCochraneHabits()
m = rbc_cc(m_rbc_cc)
try
solve!(m, m.z, m.y; verbose = :none)
catch e
local sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_sss_iterative_output.jld2"), "r")
update!(m, sssout["z_rss"], sssout["y_rss"], sssout["Psi_rss"])
end
# Verify impulse responses with a zero shock is the same as simulate with no shocks
horizon = 100
@testset "Calculate impulse responses for an RBC Model w/Campbell-Cochrane Habits" begin
# No shocks and start from steady state
state1, jump1 = simulate(m, horizon)
state2, jump2 = impulse_responses(m, horizon, 1, 0.; deviations = false)
@test state1 ≈ state2
@test jump1 ≈ jump2
@test state1 ≈ repeat(m.z, 1, horizon)
@test jump1 ≈ repeat(m.y, 1, horizon)
# No shocks but perturb away from steady state
state1, jump1 = simulate(m, horizon, 1.01 * m.z)
state2, jump2 = impulse_responses(m, horizon, 1, 0., 1.01 * m.z; deviations = false)
@test state1 ≈ state2
@test jump1 ≈ jump2
@test !(state1[:, 2] ≈ m.z)
@test !(jump1[:, 2] ≈ m.y)
# Now with shocks, from steady state
shocks = zeros(1, horizon)
shocks[1] = -3.
state1, jump1 = impulse_responses(m, horizon, 1, -3.; deviations = false)
state2, jump2 = impulse_responses(m, horizon, 1, -3., m.z; deviations = false)
state3, jump3 = simulate(m, horizon, shocks)
@test state1 ≈ state2
@test state1 ≈ state3
@test jump1 ≈ jump2
@test jump1 ≈ jump3
state1, jump1 = impulse_responses(m, horizon, 1, -3.; deviations = false)
state2, jump2 = impulse_responses(m, horizon, 1, -3., m.z; deviations = false)
state3, jump3 = simulate(m, horizon, shocks)
state4, jump4 = impulse_responses(m, horizon, 1, -3.; deviations = true)
state5, jump5 = impulse_responses(m, horizon, 1, -3., m.z; deviations = true)
@test state1 ≈ state2
@test state1 ≈ state3
@test state1 ≈ state4 .+ m.z
@test state1 ≈ state5 .+ m.z
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test jump1 ≈ jump4 .+ m.y
@test jump1 ≈ jump5 .+ m.y
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2335 | using RiskAdjustedLinearizations, Test, LinearAlgebra
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
# Solve model
m_wachter = WachterDisasterRisk()
m = inplace_wachter_disaster_risk(m_wachter)
try
solve!(m, m.z, m.y; verbose = :none)
catch e
local sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "iterative_sss_output.jld2"), "r")
update!(m, sssout["z"], sssout["y"], sssout["Psi"])
end
# Verify impulse responses with a zero shock is the same as simulate with no shocks
horizon = 100
@testset "Calculate impulse responses for Wachter (2013)" begin
# No shocks and start from steady state
state1, jump1 = simulate(m, horizon)
state2, jump2 = impulse_responses(m, horizon, 1, 0.; deviations = false)
state3, jump3 = impulse_responses(m, horizon, 2, 0.; deviations = false)
state4, jump4 = impulse_responses(m, horizon, 3, 0.; deviations = false)
@test state1 ≈ state2
@test state1 ≈ state3
@test state1 ≈ state4
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test jump1 ≈ jump4
@test state1 ≈ repeat(m.z, 1, horizon)
@test jump1 ≈ repeat(m.y, 1, horizon)
# No shocks but perturb away from steady state
state1, jump1 = simulate(m, horizon, 1.01 * m.z)
state2, jump2 = impulse_responses(m, horizon, 1, 0., 1.01 * m.z; deviations = false)
state3, jump3 = impulse_responses(m, horizon, 2, 0., 1.01 * m.z; deviations = false)
state4, jump4 = impulse_responses(m, horizon, 3, 0., 1.01 * m.z; deviations = false)
@test state1 ≈ state2
@test state1 ≈ state3
@test state1 ≈ state4
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test jump1 ≈ jump4
@test !(state1[:, 2] ≈ m.z)
@test !(jump1[:, 2] ≈ m.y)
# Now with shocks, from steady state
shocks = zeros(3, horizon)
shocks[2] = -3.
state1, jump1 = impulse_responses(m, horizon, 2, -3.; deviations = false)
state2, jump2 = impulse_responses(m, horizon, 2, -3., m.z; deviations = false)
state3, jump3 = simulate(m, horizon, shocks)
state4, jump4 = impulse_responses(m, horizon, 1, -3; deviations = false)
@test state1 ≈ state2
@test state1 ≈ state3
@test !(state1 ≈ state4)
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test !(jump1 ≈ jump4)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2010 | using RiskAdjustedLinearizations, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "crw", "crw.jl"))
# Solve model
m_crw = CoeurdacierReyWinant()
m = crw(m_crw)
try
solve!(m, m.z, m.y, m.Ψ; algorithm = :homotopy, step = .5, verbose = :none)
catch e
local sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference/crw_sss.jld2"), "r")
update!(ral, sssout["z_rss"], sssout["y_rss"], sssout["Psi_rss"])
end
# Simulate with no shocks
horizon = 100
zero_shocks = zeros(2, horizon) # 100 fake draws
@testset "Simulate Coeurdacier et al. (2011) with no shocks" begin
state1, jump1 = simulate(m, horizon)
state2, jump2 = simulate(m, horizon, m.z)
state3, jump3 = simulate(m, horizon, zero_shocks)
state4, jump4 = simulate(m, horizon, zero_shocks, m.z)
state5, jump5 = simulate(m, horizon, 1.01 * m.z) # perturb away from steady state
state6, jump6 = simulate(m, horizon, zero_shocks, 1.01 * m.z)
state7, jump7 = simulate(m, vec(zero_shocks[:, 1]), 1.01 * m.z)
@test state1 ≈ state2
@test state1 ≈ state3
@test state1 ≈ state4
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test jump1 ≈ jump4
@test state5 ≈ state6
@test jump5 ≈ jump6
@test state1 ≈ repeat(m.z, 1, horizon) # check state1 remains at steady state
@test jump1 ≈ repeat(m.y, 1, horizon)
@test state7 ≈ state6[:, 1]
@test jump7 ≈ jump6[:, 1]
end
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "crw_shocks.jld2"), "r")["shocks"] # 100 draws from a bivariate standard normal
@testset "Simulate Coeurdacier et al. (2011) with shocks" begin
state1, jump1 = simulate(m, horizon)
state3, jump3 = simulate(m, horizon, shocks)
state4, jump4 = simulate(m, horizon, shocks, m.z)
state5, jump5 = simulate(m, shocks[:, 1], m.z)
@test state3 ≈ state4
@test jump3 ≈ jump4
@test !(state1 ≈ state4)
@test !(jump1 ≈ jump4)
@test state5 ≈ state4[:, 1]
@test jump5 ≈ jump4[:, 1]
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2032 | using RiskAdjustedLinearizations, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "rbc_cc", "rbc_cc.jl"))
# Solve model
m_rbc_cc = RBCCampbellCochraneHabits()
m = rbc_cc(m_rbc_cc)
try
solve!(m, m.z, m.y; verbose = :none)
catch e
local sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "rbccc_sss_iterative_output.jld2"), "r")
update!(m, sssout["z_rss"], sssout["y_rss"], sssout["Psi_rss"])
end
# Simulate with no shocks
horizon = 100
zero_shocks = zeros(1, horizon) # 100 fake draws
@testset "Simulate an RBC Model w/Campbell-Cochrane Habits with no shocks" begin
state1, jump1 = simulate(m, horizon)
state2, jump2 = simulate(m, horizon, m.z)
state3, jump3 = simulate(m, horizon, zero_shocks)
state4, jump4 = simulate(m, horizon, zero_shocks, m.z)
state5, jump5 = simulate(m, horizon, 1.01 * m.z) # perturb away from steady state
state6, jump6 = simulate(m, horizon, zero_shocks, 1.01 * m.z)
state7, jump7 = simulate(m, vec(zero_shocks[:, 1]), 1.01 * m.z)
@test state1 ≈ state2
@test state1 ≈ state3
@test state1 ≈ state4
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test jump1 ≈ jump4
@test state5 ≈ state6
@test jump5 ≈ jump6
@test state1 ≈ repeat(m.z, 1, horizon) # check state1 remains at steady state
@test jump1 ≈ repeat(m.y, 1, horizon)
@test state7 ≈ state6[:, 1]
@test jump7 ≈ jump6[:, 1]
end
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "rbc_cc_shocks.jld2"), "r")["shocks"] # 100 draws from a standard normal
@testset "Simulate an RBC Model w/Campbell-Cochrane Habits with shocks" begin
state1, jump1 = simulate(m, horizon)
state3, jump3 = simulate(m, horizon, shocks)
state4, jump4 = simulate(m, horizon, shocks, m.z)
state5, jump5 = simulate(m, shocks[:, 1], m.z)
@test state3 ≈ state4
@test jump3 ≈ jump4
@test !(state1 ≈ state4)
@test !(jump1 ≈ jump4)
@test state5 ≈ state4[:, 1]
@test jump5 ≈ jump4[:, 1]
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 2073 | using RiskAdjustedLinearizations, Test, JLD2
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "wachter_disaster_risk", "wachter.jl"))
# Solve model
m_wachter = WachterDisasterRisk()
m = inplace_wachter_disaster_risk(m_wachter)
try
solve!(m, m.z, m.y; verbose = :none)
catch e
local sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "iterative_sss_output.jld2"), "r")
update!(m, sssout["z"], sssout["y"], sssout["Psi"])
end
# Simulate with no shocks
horizon = 100
zero_shocks = zeros(3, horizon) # 100 fake draws
@testset "Simulate Wachter (2013) with no shocks" begin
state1, jump1 = simulate(m, horizon)
state2, jump2 = simulate(m, horizon, m.z)
state3, jump3 = simulate(m, horizon, zero_shocks)
state4, jump4 = simulate(m, horizon, zero_shocks, m.z)
state5, jump5 = simulate(m, horizon, 1.01 * m.z) # perturb away from steady state
state6, jump6 = simulate(m, horizon, zero_shocks, 1.01 * m.z)
state7, jump7 = simulate(m, vec(zero_shocks[:, 1]), 1.01 * m.z)
@test state1 ≈ state2
@test state1 ≈ state3
@test state1 ≈ state4
@test jump1 ≈ jump2
@test jump1 ≈ jump3
@test jump1 ≈ jump4
@test state5 ≈ state6
@test jump5 ≈ jump6
@test state1 ≈ repeat(m.z, 1, horizon) # check state1 remains at steady state
@test jump1 ≈ repeat(m.y, 1, horizon)
@test state7 ≈ state6[:, 1]
@test jump7 ≈ jump6[:, 1]
end
# 100 draws from a standard multivariate normal, technically not the right distribution but it's fine
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "wachter_shocks.jld2"), "r")["shocks"]
@testset "Simulate Wachter (2013) with shocks" begin
state1, jump1 = simulate(m, horizon)
state3, jump3 = simulate(m, horizon, shocks)
state4, jump4 = simulate(m, horizon, shocks, m.z)
state5, jump5 = simulate(m, shocks[:, 1], m.z)
@test state3 ≈ state4
@test jump3 ≈ jump4
@test !(state1 ≈ state4)
@test !(jump1 ≈ jump4)
@test state5 ≈ state4[:, 1]
@test jump5 ≈ jump4[:, 1]
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 1405 | # This script checks sparse arrays can be used as caches for Γ₅, Γ₆, Λ_sss, and Σ_sss
using BenchmarkTools, RiskAdjustedLinearizations, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "rbc_cc", "rbc_cc.jl"))
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "crw", "crw.jl"))
@testset "Sparse caching for Γ₅, Γ₆, Λ, and Σ (RBC-CC)" begin
m_rbc_cc = RBCCampbellCochraneHabits()
m = rbc_cc(m_rbc_cc, 0; sparse_arrays = true)
@test issparse(m[:Γ₅])
@test issparse(m[:Γ₆])
@test issparse(m[:Λ_sss])
@test issparse(m[:Σ_sss])
# Solve!
solve!(m; algorithm = :relaxation, verbose = :none)
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "rbccc_sss_iterative_output.jld2"), "r")
@test isapprox(sssout["z_rss"], m.z, atol=1e-4)
@test isapprox(sssout["y_rss"], m.y, atol=1e-4)
@test isapprox(sssout["Psi_rss"], m.Ψ, atol=1e-4)
end
@testset "Sparse caching for Γ₅, Γ₆, Λ, and Σ with jump-dependent shock matrices (CRW)" begin
m_crw = CoeurdacierReyWinant()
m = crw(m_crw; sparse_arrays = true)
solve!(m; algorithm = :homotopy, step = .5, verbose = :none)
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "crw_sss.jld2"), "r")
@test isapprox(sssout["z_rss"], m.z, atol=1e-4)
@test isapprox(sssout["y_rss"], m.y, atol=1e-4)
@test isapprox(sssout["Psi_rss"], m.Ψ, atol=1e-4)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | code | 9359 | using RiskAdjustedLinearizations, SparseArrays, SparseDiffTools, Test
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "rbc_cc", "rbc_cc.jl"))
include(joinpath(dirname(@__FILE__), "..", "..", "examples", "crw", "crw.jl"))
# Set up
n_strips = 0
m_rbc_cc = RBCCampbellCochraneHabits()
m_crw = CoeurdacierReyWinant()
# Test sparse Jacobians on RBC-CC
m_dense = rbc_cc(m_rbc_cc, n_strips)
z0 = copy(m_dense.z)
y0 = copy(m_dense.y)
Ψ0 = copy(m_dense.Ψ)
m = rbc_cc(m_rbc_cc, n_strips; sparse_jacobian = [:μ, :ξ, :𝒱])
@testset "Construct a RiskAdjustedLinearization that exploits sparsity in Jacobians (using RBC-CC)" begin
@test isempty(m_dense.linearization.sparse_jac_caches)
@test m.z ≈ z0
@test m.y ≈ y0
@test m.Ψ ≈ Ψ0
for k in [:μz, :μy, :ξz, :ξy, :J𝒱]
@test haskey(m.linearization.sparse_jac_caches, k)
if k != :J𝒱
@test issparse(m.linearization.sparse_jac_caches[k][:sparsity])
@test isa(m.linearization.sparse_jac_caches[k][:colorvec], AbstractVector{Int})
end
end
@test m.linearization.sparse_jac_caches[:J𝒱][:colorvec] == 1:2
@test m.linearization.sparse_jac_caches[:J𝒱][:sparsity] == ones(size(m.Ψ))
end
@testset "Update a RiskAdjustedLinearization to exploit sparsity in Jacobians (using RBC-CC)" begin
update_sparsity_pattern!(m_dense, :𝒱)
for k in [:μz, :μy, :ξz, :ξy]
@test !haskey(m_dense.linearization.sparse_jac_caches, k)
end
@test m_dense.linearization.sparse_jac_caches[:J𝒱][:colorvec] == 1:2
@test m_dense.linearization.sparse_jac_caches[:J𝒱][:sparsity] == ones(size(m.Ψ))
update_sparsity_pattern!(m_dense, [:μ, :ξ, :𝒱])
for k in [:μz, :μy, :ξz, :ξy]
@test haskey(m_dense.linearization.sparse_jac_caches, k)
@test issparse(m_dense.linearization.sparse_jac_caches[k][:sparsity])
@test isa(m_dense.linearization.sparse_jac_caches[k][:colorvec], AbstractVector{Int})
end
@test_broken update_sparsity_pattern!(m_dense, [:μ, :ξ, :𝒱]; sparsity_detection = true)
end
@testset "Calculate risk-adjusted linearization with sparse autodiff (using RBC-CC)" begin
# Now provide the sparsity pattern and matrix coloring vector
# to update the Jacobians of objects
rbc_cc_out = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference", "rbccc_sss_iterative_output.jld2"), "r")
m_dense = rbc_cc(m_rbc_cc, n_strips) # recompute to get dense Jacobians again
update!(m_dense, vec(rbc_cc_out["z_rss"]), vec(rbc_cc_out["y_rss"]), rbc_cc_out["Psi_rss"])
ztrue = copy(m_dense.z)
ytrue = copy(m_dense.y)
Ψtrue = copy(m_dense.Ψ)
sparsity = Dict{Symbol, SparseMatrixCSC{Float64, Int64}}()
colorvec = Dict{Symbol, Vector{Int64}}()
sparsity[:μz] = sparse(m_dense[:Γ₁])
sparsity[:μy] = sparse(m_dense[:Γ₂])
sparsity[:ξz] = sparse(m_dense[:Γ₃])
sparsity[:ξy] = sparse(m_dense[:Γ₄])
sparsity[:J𝒱] = sparse(m_dense[:JV])
for (k, v) in sparsity
colorvec[k] = matrix_colors(v)
end
# Check updating dense Jacobians works
update_sparsity_pattern!(m_dense, [:μ, :ξ, :𝒱])
try # prone to weird non-deterministic behavior in nlsolve
solve!(m_dense, ztrue * 1.005, ytrue * 1.005, Ψtrue * 1.005; algorithm = :relaxation,
ftol = 1e-6, tol = 1e-5, verbose = :none)
@test norm(steady_state_errors(m_dense), Inf) < 1e-4
catch e
println("Updating dense Jacobian with sparse Jacobian methods did not pass")
end
@test_broken update_sparsity_pattern!(m_dense, [:μ, :ξ, :𝒱]; sparsity_detection = true)
# Check updating sparse Jacobians w/new patterns works
update_sparsity_pattern!(m, :𝒱; sparsity = sparsity,
colorvec = colorvec)
try # prone to weird non-deterministic behavior in nlsolve
solve!(m, ztrue * 1.005, ytrue * 1.005, Ψtrue * 1.005; algorithm = :relaxation,
ftol = 1e-6, tol = 1e-5, verbose = :none)
@test norm(steady_state_errors(m), Inf) < 1e-4
catch e
println("Updating sparsity pattern of 𝒱 for an RAL w/sparse methods did not pass")
end
update_sparsity_pattern!(m, [:μ, :ξ, :𝒱]; sparsity = sparsity)
update_sparsity_pattern!(m, [:μ, :ξ, :𝒱]; sparsity = sparsity,
colorvec = colorvec)
try # prone to weird non-deterministic behavior in nlsolve
solve!(m, ztrue * 1.005, ytrue * 1.005, Ψtrue * 1.005; algorithm = :relaxation,
ftol = 1e-6, tol = 1e-5, verbose = :none)
@test norm(steady_state_errors(m), Inf) < 1e-4
catch e
println("Updating sparsity pattern of μ, ξ, and 𝒱 for an RAL w/sparse methods did not pass")
end
close(rbc_cc_out)
# caching appears to be failing somehow; the caches of μ, ξ, and 𝒱 are being set to NaN unexpectedly
@test_broken solve!(m, ztrue * 1.005, ytrue * 1.005, Ψtrue * 1.005; algorithm = :homotopy, verbose = :none)
#=
@test m.z ≈ m_dense.z atol=1e-6
@test m.y ≈ m_dense.y atol=1e-6
@test m.Ψ ≈ m_dense.Ψ atol=1e-6
=#
end
# Test sparse Jacobians on CRW
m_dense = crw(m_crw)
z0 = copy(m_dense.z)
y0 = copy(m_dense.y)
Ψ0 = copy(m_dense.Ψ)
m = crw(m_crw; Ψ = zero(Ψ0), sparse_jacobian = [:μ, :ξ, :𝒱])
m_dense.Ψ .= 0.
@testset "Construct a RiskAdjustedLinearization that exploits sparsity in Jacobians (using CRW)" begin
@test isempty(m_dense.linearization.sparse_jac_caches)
@test m.z ≈ z0
@test m.y ≈ y0
for k in [:μz, :μy, :ξz, :ξy, :J𝒱]
@test haskey(m.linearization.sparse_jac_caches, k)
if k != :J𝒱
@test issparse(m.linearization.sparse_jac_caches[k][:sparsity])
@test isa(m.linearization.sparse_jac_caches[k][:colorvec], AbstractVector{Int})
end
end
@test m.linearization.sparse_jac_caches[:J𝒱][:colorvec] == 1:3
@test m.linearization.sparse_jac_caches[:J𝒱][:sparsity] == ones(size(m.Ψ))
end
@testset "Update a RiskAdjustedLinearization to exploit sparsity in Jacobians (using CRW)" begin
update_sparsity_pattern!(m_dense, :𝒱)
for k in [:μz, :μy, :ξz, :ξy]
@test !haskey(m_dense.linearization.sparse_jac_caches, k)
end
@test m_dense.linearization.sparse_jac_caches[:J𝒱][:colorvec] == 1:3
@test m_dense.linearization.sparse_jac_caches[:J𝒱][:sparsity] == ones(size(m.Ψ))
update_sparsity_pattern!(m_dense, [:μ, :ξ, :𝒱])
for k in [:μz, :μy]
@test haskey(m_dense.linearization.sparse_jac_caches, k)
@test issparse(m_dense.linearization.sparse_jac_caches[k][:sparsity])
@test isa(m_dense.linearization.sparse_jac_caches[k][:colorvec], AbstractVector{Int})
end
@test_broken update_sparsity_pattern!(m_dense, [:μ, :ξ, :𝒱]; sparsity_detection = true)
end
@testset "Calculate risk-adjusted linearization with sparse autodiff (using CRW)" begin
# Now provide the sparsity pattern and matrix coloring vector
# to update the Jacobians of objects
m_dense = crw(m_crw) # recompute to get dense Jacobians again
crw_out = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "reference/crw_sss.jld2"), "r")
update!(m_dense, vec(crw_out["z_rss"]), vec(crw_out["y_rss"]), copy(crw_out["Psi_rss"]))
ztrue = copy(m_dense.z)
ytrue = copy(m_dense.y)
Ψtrue = copy(m_dense.Ψ)
sparsity = Dict{Symbol, SparseMatrixCSC{Float64, Int64}}()
colorvec = Dict{Symbol, Vector{Int64}}()
sparsity[:μz] = sparse(m_dense[:Γ₁])
sparsity[:μy] = sparse(m_dense[:Γ₂])
sparsity[:ξz] = sparse(ones(size(m_dense[:Γ₃])))
sparsity[:ξy] = sparse(m_dense[:Γ₄])
sparsity[:J𝒱] = sparse(m_dense[:JV])
for (k, v) in sparsity
if k == :ξz
colorvec[k] = 1:3
elseif k == :J𝒱
colorvec[k] = ones(Int64, 3)
else
colorvec[k] = matrix_colors(v)
end
end
# Check updating dense Jacobians works
update_sparsity_pattern!(m_dense, [:μ, :ξ])
try
solve!(m_dense, ztrue, ytrue, Ψtrue; algorithm = :relaxation, ftol = 5e-4, tol = 1e-3, verbose = :none)
@test norm(steady_state_errors(m_dense), Inf) < 1e-3
catch e
println("Updating dense Jacobian with sparse Jacobian methods did not pass")
end
# Check updating sparse Jacobians w/new patterns works
update_sparsity_pattern!(m, :𝒱; sparsity = sparsity,
colorvec = colorvec)
try
solve!(m, ztrue, ytrue, Ψtrue; algorithm = :relaxation, ftol = 5e-4, tol = 1e-3, verbose = :none)
@test norm(steady_state_errors(m), Inf) < 1e-3
catch e
println("Updating sparsity pattern of 𝒱 for an RAL w/sparse methods did not pass")
end
update_sparsity_pattern!(m, [:μ, :ξ]; sparsity = sparsity) # don't do for 𝒱 b/c its Jacobian is all zeros
update_sparsity_pattern!(m, [:μ, :ξ, :𝒱]; sparsity = sparsity,
colorvec = colorvec)
try
solve!(m, ztrue, ytrue, Ψtrue; algorithm = :relaxation, ftol = 5e-4, tol = 1e-3, verbose = :none)
@test norm(steady_state_errors(m), Inf) < 1e-3
catch e
println("Updating sparsity pattern of μ, ξ, and 𝒱 for an RAL w/sparse methods did not pass")
end
@test_broken update_sparsity_pattern!(m_dense, [:μ, :ξ, :𝒱]; sparsity_detection = true)
@test_broken solve!(m, ztrue, ytrue, Ψtrue; algorithm = :homotopy, ftol = 5e-4, tol = 1e-3, verbose = :none)
end
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 2339 | # RiskAdjustedLinearizations.jl
[](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/releases/latest)
[](https://chenwilliam77.github.io/RiskAdjustedLinearizations.jl/stable)
[](https://chenwilliam77.github.io/RiskAdjustedLinearizations.jl/dev)

[](https://codecov.io/gh/chenwilliam77/RiskAdjustedLinearizations.jl)
This package implements [Lopez et al. (2018) "Risk-Adjusted Linearizations of Dynamic Equilibrium Models"](https://ideas.repec.org/p/bfr/banfra/702.html) in Julia. The [original companion code](https://github.com/fvazquezgrande/gen_affine) for the paper implements the method using MATLAB's Symbolic Math Toolbox. RiskAdjustedLinearizations.jl takes advantage of Julia's speed and flexibility so that the method can be used for solving and estimating large-scale Dynamic Stochastic General Equilibrium (DSGE) models.
Timing tests indicate that this package's speed is significantly faster than the original MATLAB code.
As examples, run the [wac_disaster.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/matlab_timing_test/wac_disaster.jl) or [rbc_cc.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/matlab_timing_test/rbc_cc.jl) scripts, which assess how long it takes to calculate a risk-adjusted linearization using the two numerical algorithms
implemented by this package and by the original authors.
The relaxation algorithm in Julia is around 50x-100x faster while the homotopy algorithm in Julia is 3x-4x times faster.
## Installation
```julia
pkg> add RiskAdjustedLinearizations
```
The package is compatiable with Julia `1.x` and is tested in Linux and Windows. The package should also be compatible with MacOS.
## Future Development
Please see the [issues](https://github.com/chenwilliam77/RiskAdjustedLinearizations/issues) for additional features planned for implementation.
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 3532 | # [Caching](@id caching)
If users create a `RiskAdjustedLinearization` with the constructor
```
RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nε)
```
where `μ, Λ, Σ, ξ, ccgf` are either in-place or out-of-place functions, then
we use the wrapper types `RALF1` and `RALF2` to convert these
functions to non-allocating ones. The implementation of these wrappers is similar to the
implementation of `LinSolveFactorize` in
[DifferentialEquations.jl](https://diffeq.sciml.ai/stable/features/linear_nonlinear/#Implementing-Your-Own-LinSolve:-How-LinSolveFactorize-Was-Created).
See [Automated Caching via `RALF1` and `RALF2`](@ref ralfwrappers) for further details.
Unlike `LinSolveFactorize`, however, we need to be able to automatically differentiate with `RALF1` and `RALF2`
so the cache needs to be handled more carefully. To do this, we
utilize and extend the `DiffCache` type implemented by [DiffEqBase.jl](https://github.com/SciML/DiffEqBase.jl).
## [`TwoDiffCache` and `ThreeDiffCache`](@id new cache types)
The idea for `DiffCache` is that you need two caches, one for Dual numbers when applying automatic differentiation
and one for the subtype of `Number` used for the actual array (e.g. Float64). For the ``\Lambda`` and ``\Sigma`` functions,
this type works because they are functions of one input variables. The functions
``\mu_z``, ``\mu_y``, ``\xi_z``, and ``\xi_y`` also can use `DiffCache` once it is extended to work for functions with
two input variables (e.g. the chunk size should depend on the length of both input variables).
However, for the ``\mu``, ``\xi``, and ``\mathcal{V}`` functions, we actually need multiple caches for Dual numbers
that differ in their chunk size. The reason is that not only do we need to be able to evaluate them with Dual numbers but we also
need to apply automatic differentiation to calculate their Jacobians. Because all three of these functions take two input variables,
the chunk size for the cache used to evaluate the functions themselves will be different from the cache
used to calculate the Jacobians, which occur with respect to only one of the input variables.
*Note that, by default, the cache is initialized with undefined values if arrays are dense
and zeros if arrays are sparse.*
## [Automated Caching via `RALF1`, `RALF2`, `RALF3`, and `RALF4` Wrappers](@id ralfwrappers)
The `RALF1` type applies to functions with 1 input variables (``\Lambda`` and ``\Sigma``),
`RALF2` to functions with 2 input variables (e.g. ``\mu``, ``\mu_z``), and so on.
The way these wrappers work is that
they take a user-defined function `f` and convert it to a new in-place function whose first input argument
is a cache, which is a `DiffCache`, `TwoDiffCache`, or a `ThreeDiffCache`.
The `RALF1`, `RALF2`, `RALF3`, and `RALF4` types are callable in the same way `LinSolveFactorize` is.
For `RALF2`, the syntax `(x::RALF2)(x1, x2)` on its own would not work, however, because (1) it is not clear
which input should be used to infer whether or not to use a Dual cache and (2) there
are potentially multiple Dual caches. To get around this problem, we add an optional third argument
named `select`, which is a `Tuple{Int, Int}`. The first element specifies which input argument
to use for infering whether a Dual cache is needed, and the second element specifies which
cache to use. By default, `select = (1, 1)`. This approach is the same for `RALF3` and `RALF4`.
The types `RALF3` and `RALF4` are only relevant if ``\Lambda`` and ``\Sigma`` depend on jump variables.
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 2553 | # [Diagnostics](@id diagnostics)
To assess the quality of a risk-adjusted linearization, diagnostic tests should be run. In particular,
as Lopez et al. (2018) discuss at length, whenever forward difference equations arise
(e.g. the equation for the log wealth-consumption ratio in our implementation of Wachter (2013)),
there are infinitely many ways to write the expectational equations. Assuming computational costs
do not become too significant, users should add as many expectational equations as needed
to maximize the accuracy of the risk-adjusted linearization.
The best accuracy test is comparing the risk-adjusted linearization to the true nonlinear solution,
but this test requires this solution to be available. In many cases (e.g. high dimensions),
neither analytical nor numerical methods can deliver the true
solution. To address this problem, economists have developed a variety of accuracy tests that
only involve the chosen approximation method and quadrature.
The most popular diagnostics revolve around the Euler equation. RiskAdjustedLinearizations.jl implements wrapper functions for
performing two of these Euler equation diagnostics. The first is the so-called "Euler equation errors" test proposed
by [Judd (1992)](https://www.sciencedirect.com/science/article/abs/pii/002205319290061L). The second is
the so-called "dynamic Euler equation errors" test proposed by
[Den Haan (2009)](https://www.sciencedirect.com/science/article/abs/pii/S0165188909001298).
We defer the reader to these articles for explanations of the theory behind these tests.
A good set of slides on accuracy tests are [these ones by Den Haan](http://www.wouterdenhaan.com/numerical/slidesaccuracy.pdf).
The wrapper functions in RiskAdjustedLinearizations.jl are `euler_equation_error` and `dynamic_euler_equation_error`.
See the [Coeurdacier, Rey, and Winant (2011) script](https://github.com/chenwilliam77/RiskAdjustedLinearizations/tree/main/examples/crw/example_crw.jl)
for an example of how to use these functions.
```@docs
RiskAdjustedLinearizations.euler_equation_error
RiskAdjustedLinearizations.dynamic_euler_equation_error
```
To make running diagnostics even easier, we also provide user-friendly functions
for calculating Gauss-Hermite quadrature when shocks are Gaussian. Extensions
of Gauss-Hermite quadrature rules for non-Gaussian shocks (e.g. Poisson disaster risk)
should be straightforward to implement by mirroring the implementation
in RiskAdjustedLinearizations.jl.
```@docs
RiskAdjustedLinearizations.gausshermite_expectation
``` | RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 7697 | # [Example](@id example)
This example shows how to calculate the risk-adjusted linearization of the
discrete-time version of the [Wachter (2013)](http://finance.wharton.upenn.edu/~jwachter/research/Wachter2013jf.pdf)
model with disaster-risk. You can run this example using the script [examples/wachter\_disaster\_risk/example_wachter.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations/tree/main/examples/wachter_disaster_risk/example_wachter.jl).
For the equivalent code in MATLAB provided by Lopez et al., see [here](https://github.com/fvazquezgrande/gen_affine/blob/main/examples/wac_disaster/genaffine_ezdis.m). See [List of Examples](@ref listexample) for short descriptions of and links to all examples in this package.
## Create a `RiskAdjustedLinearization`
### Define Nonlinear System
The user generally needs to define
- ``\mu``: expected state transition function
- ``\xi`` nonlinear terms of the expectational equations
- ccgf: conditional cumulant generating function of the exogenous shocks
- ``\Lambda``: function or matrix mapping endogenous risk into state transition equations
- ``\Sigma``: function or matrix mapping exogenous risk into state transition equations
- ``\Gamma_5``: coefficient matrix on one-period ahead expectation of state variables
- ``\Gamma_6``: coefficient matrix on one-period ahead expectation of jump variables
The quantities ``\mu``, ``\xi``, and ccgf are always functions. The quantities ``\Lambda`` and ``\Sigma`` can
either be functions or matrices. For example, in endowment economies like Wachter (2013), ``\Lambda`` is
the zero matrix since there is no endogenous risk. In other applications, ``\Sigma`` may not be state-dependent
and thus a constant matrix. The last two quantities ``\Gamma_5`` and ``\Gamma_6`` are always matrices.
These functions do not need to have type assertions for the inputs, but if the user wants to add type assertions,
then the types should be `AbstractVector{T}`, `AbstractMatrix{T}`, or `AbstractArray{T}` where `T` should
allow any subtypes of `Real` to permit automatic differentiation, e.g. `AbstractVector{T} where T <: Real`.
If more specific types than `AbstractVector`, etc., are used, then `RiskAdjustedLinearization` may not
work properly. For these reasons, it is recommended that type assertions are not added unless necessary.
In addition, you need to define initial guesses for the coefficients `z, y, Ψ` and specify the number of exogenous shocks `Nε`.
The initial guesses can be undefined if you don't want to use actual numbers yet, but
you will eventually need to provide guesses in order for the nonlinear solvers to work in
the numerical algorithms.
### Instantiate the object
Once you have the required quantities, simply call
```
ral = RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nε)
```
### Example
The following code presents a function that defines the desired functions and matrices, given
the parameters for the model in Wachter (2013), and returns a `RiskAdjustedLinearization` object.
The code is from this script [examples/wachter\_disaster\_risk/wachter.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations/tree/main/examples/wachter_disaster_risk/wachter.jl), which has examples for both in-place and out-of-place functions.
```
function inplace_wachter_disaster_risk(m::WachterDisasterRisk{T}) where {T <: Real}
@unpack μₐ, σₐ, ν, δ, ρₚ, pp, ϕₚ, ρ, γ, β = m
@assert ρ != 1. # Forcing ρ to be non-unit for this example
S = OrderedDict{Symbol, Int}(:p => 1, :εc => 2, :εξ => 3) # State variables
J = OrderedDict{Symbol, Int}(:vc => 1, :xc => 2, :rf => 3) # Jump variables
SH = OrderedDict{Symbol, Int}(:εₚ => 1, :εc => 2, :εξ => 3) # Exogenous shocks
Nz = length(S)
Ny = length(J)
Nε = length(SH)
function μ(F, z, y)
F_type = eltype(F)
F[S[:p]] = (1 - ρₚ) * pp + ρₚ * z[S[:p]]
F[S[:εc]] = zero(F_type)
F[S[:εξ]] = zero(F_type)
end
function ξ(F, z, y)
F[J[:vc]] = log(β) - γ * μₐ + γ * ν * z[S[:p]] - (ρ - γ) * y[J[:xc]] + y[J[:rf]]
F[J[:xc]] = log(1. - β + β * exp((1. - ρ) * y[J[:xc]])) - (1. - ρ) * y[J[:vc]]
F[J[:rf]] = (1. - γ) * (μₐ - ν * z[S[:p]] - y[J[:xc]])
end
Λ = zeros(T, Nz, Ny)
function Σ(F, z)
F_type = eltype(F)
F[SH[:εₚ], SH[:εₚ]] = sqrt(z[S[:p]]) * ϕₚ * σₐ
F[SH[:εc], SH[:εc]] = one(F_type)
F[SH[:εξ], SH[:εξ]] = one(F_type)
end
function ccgf(F, α, z)
F .= .5 .* α[:, 1].^2 + .5 * α[:, 2].^2 + (exp.(α[:, 3] + α[:, 3].^2 .* δ^2 ./ 2.) .- 1. - α[:, 3]) * z[S[:p]]
end
Γ₅ = zeros(T, Ny, Nz)
Γ₅[J[:vc], S[:εc]] = (-γ * σₐ)
Γ₅[J[:vc], S[:εξ]] = (γ * ν)
Γ₅[J[:rf], S[:εc]] = (1. - γ) * σₐ
Γ₅[J[:rf], S[:εξ]] = -(1. - γ) * ν
Γ₆ = zeros(T, Ny, Ny)
Γ₆[J[:vc], J[:vc]] = (ρ - γ)
Γ₆[J[:rf], J[:vc]] = (1. - γ)
z = [pp, 0., 0.]
xc_sss = log((1. - β) / (exp((1. - ρ) * (ν * pp - μₐ)) - β)) / (1. - ρ)
vc_sss = xc_sss + ν * pp - μₐ
y = [vc_sss, xc_sss, -log(β) + γ * (μₐ - ν * pp) - (ρ - γ) * (vc_sss - xc_sss)]
Ψ = zeros(T, Ny, Nz)
return RiskAdjustedLinearization(μ, Λ, Σ, ξ, Γ₅, Γ₆, ccgf, z, y, Ψ, Nε)
end
```
## Solve using a Newton-type Numerical Algorithm
To solve the model using the relaxation algorithm, just call
```
solve!(ral; algorithm = :relaxation)
```
This form of `solve!` uses the coefficients in `ral` as initial guesses. To specify
other initial guesses, call
```
solve!(ral, z0, y0, Ψ0; algorithm = :relaxation)
```
If you don't have a guess for ``\Psi``, then you can just provide guesses for ``z`` and ``y``:
```
solve!(ral, z0, y0; algorithm = :relaxation)
```
In this case, we calculate the deterministic steady state first using ``z`` and ``y``;
back out the implied ``\Psi``; and then proceed with the relaxation algorithm using
the deterministic steady state as the initial guess.
## [Additional Examples](@ref listexample)
- [Wachter (2013)](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/blob/main/examples/wachter_disaster_risk/example_wachter.jl): discrete-time model with Epstein-Zin preferences and disaster risk (as a Poisson mixture of normals); demonstrates how to use both in-place and out-of-place functions with `RiskAdjustedLinearization`
- [Jermann (1998)/Chen (2017)](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/blob/main/examples/rbc_cc/example_rbc_cc.jl): RBC model with Campbell-Cochrane habits and multi-period approximation of forward difference equations
- [Textbook New Keynesian Model](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/blob/main/examples/textbook_nk/example_textbook_nk.jl): cashless limit of Gali (2015) textbook model with a simple Taylor rule. A Dynare script is also provided for comparison.
- [Coeurdacier, Rey, Winant (2011)](https://github.com/chenwilliam77/RiskAdjustedLinearizations/tree/main/examples/crw/example_crw.jl): small-open economy model whose deterministic steady state does not exist. Example also provides a tutorial on calculating Euler equation errors.
- [New Keynesian Model with Capital](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/blob/main/examples/textbook_nk/example_textbook_nk.jl): cashless limit of Gali (2015) textbook model with capital accumulation, capital adjustment costs, additional shocks, and a Taylor rule on both inflation and output growth. A Dynare script is also provided for comparison.
- [MATLAB Timing Test](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/matlab_timing_test): compare speed of Julia to MATLAB for Jermann (1998) and Wachter (2013) examples
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 606 | # [RiskAdjustedLinearizations.jl](@id Home)
This package provides a user-friendly API for efficiently calculating risk-adjusted linearizations
of dynamic economic models. These linearizations are equivalent
to first-order perturbations around the stochastic steady state and are solved
by computing affine approximations.
## License
This content is released under the [MIT](https://opensource.org/licenses/MIT) License.
## Contents
```@contents
Pages = ["risk_adjusted_linearization.md", "sparse_arrays_jacs.md", "numerical_algorithms.md", "example.md", "caching.md", "diagnostics.md", "tips.md"]
```
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 8507 | # [Numerical Algorithms](@id numerical-algorithms)
To calculate the risk-adjusted linearization, we need to solve a system of nonlinear equations.
These equations are generally solvable using Newton-type methods. The package currently has two
available algorithms, [relaxation](@ref relaxation) and [homotopy continuation](@ref homotopy)
## `solve!`
The primary interface for calculating a risk-adjusted linearization once
a `RiskAdjustedLinearization` object is created is the function `solve!`.
The user selects the desired numerical algorithm through `algorithm`
keyword of `solve!`.
All of the available algorithms need to solve a system of nonlinear
equations. We use `nlsolve` for this purpose, and all keyword arguments
for `nlsolve` can be passed as keyword arguments to `solve!`, e.g.
`autodiff` and `ftol`. The user can also exploit sparsity in the
Jacobian of the system of nonlinear equations to accelerate
`nlsolve` by using the keywords `sparse_jacobian`,
`sparsity`, `colorvec`, `jac_cache`, and/or `sparsity_detection`.
For details, see [Exploiting Sparsity](@ref sparsity-numerical-algo).
```@docs
RiskAdjustedLinearizations.solve!
```
## [Relaxation](@id relaxation)
The first and default numerical algorithm is a relaxation algorithm. The key problem in
solving the equations characterizing ``(z, y, \Psi)`` is that it is difficult to jointly solve the nonlinear matrix
equation for ``\Psi`` along with the steady-state equations for ``z`` and ``y`` due to the presence of the
entropy term. The relaxation algorithm splits the solution of these equations into two steps, which
allows us to calculate guesses of ``\Psi`` using linear algebra. It is in this sense that
this iterative algorithm is a relaxation algorithm.
The system of equations
characterizing the coefficients ``(z, y, \Psi)`` are solved iteratively in two separate steps.
Given previous guesses ``(z_{n - 1}, y_{n - 1}, \Psi_{n - 1})``, we calculate ``(z_n, y_n)``
such that
```math
\begin{aligned}
0 & = \mu(z_n, y_n) - z_n,\\
0 & = \xi(z_n, y_n) + \Gamma_5 z_n + \Gamma_6 y_n + \mathcal{V}(z_{n - 1}),\\
\end{aligned}
```
is satisfied. In other words, we hold the entropy term constant and update ``(z_n, y_n)`` in the remaining terms.
The coefficients are solved efficiently through `nlsolve` with ``(z_{n - 1}, y_{n - 1})`` as initial guesses.
Then we compute ``\Psi_n`` by solving
```math
\begin{aligned}
0 & = \Gamma_3 + \Gamma_4 \Psi_n + (\Gamma_5 + \Gamma_6 \Psi_n)(\Gamma_1 + \Gamma_2 \Psi_n) + J\mathcal{V}(z_{n - 1}).
\end{aligned}
```
with a [Generalized Schur decomposition](https://en.wikipedia.org/wiki/Schur_decomposition#Generalized_Schur_decomposition)
(also known as QZ decomposition). Notice that we also hold the Jacobian of the entropy constant. Only after we have
a new round of ``(z_n, y_n, \Psi_n)`` do we update the entropy-related terms.
Convergence is achieved once ``(z_n, y_n, \Psi_n)`` are sufficiently close under some norm. By default,
we use the ``L^\infty`` norm (maximum absolute error).
## [Homotopy Continuation](@id homotopy)
When the deterministic steady state exists, it is typically an easy problem to solve numerically. We can therefore
use the equations characterizing the deterministic steady state for a
[homotopy continuation method](https://en.wikipedia.org/wiki/Numerical_algebraic_geometry).
Let ``q`` be the embedding parameter. Then the homotopy continuation method iteratively solves
```math
\begin{aligned}
0 & = \mu(z, y) - z,\\
0 & = \xi(z, y) + \Gamma_5 z + \Gamma_6 y + q \mathcal{V}(z),\\
0 & = \Gamma_3 + \Gamma_4 \Psi + (\Gamma_5 + \Gamma_6 \Psi)(\Gamma_1 + \Gamma_2 \Psi) + q J\mathcal{V}(z)
\end{aligned}
```
for the coefficients ``(z_q, y_q, \Psi_q)`` by increasing ``q`` from 0 to 1.
## [Blanchard-Kahn Conditions](@id blanchard-kahn)
At the end of `solve!`, we check the stochastic steady state found is
locally unique and saddle-path stable by checking what are known as the Blanchard-Kahn conditions.
Standard references for computational macroeconomics explain what these conditions are, so
we defer to them (e.g. [Blanchard-Kahn (1980)](http://dept.ku.edu/~empirics/Emp-Coffee/blanchard-kahn_eca80.pdf),
[Klein (2000)](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.335.8685&rep=rep1&type=pdf), and
[Sims (2002)](https://link.springer.com/article/10.1023/A:1020517101123)).
For the stochastic steady state, these conditions are essentially identical to the conditions for
the deterministic steady state, but the Jacobian of the expectational equations to ``z_t``
also includes the Jacobian of the entropy. In the deterministic steady state, the entropy is zero,
hence the Jacobian of the entropy is zero. In the stochastic steady state, the entropy is no longer zero
and varies with ``z_t``, hence the Jacobian of the expectational equations to ``z_t`` depends on entropy.
## [Exploiting Sparsity](@id sparsity-numerical-algo)
When solving for the deterministic or stochastic steady state, this package
solves a system of nonlinear equations by calling `nlsolve`, whose underlying
algorithms typically require calculating the Jacobian of the system of nonlinear equations.
For many economic models, this system is sparse because each individual equation usually depends
on a small subset of the coefficients ``(z, y, \Psi)``. To exploit this sparsity and further
accelerate computation time, we can use methods implemented by
[SparseDiffTools.jl](https://github.com/JuliaDiff/SparseDiffTools.jl).
For an example, please see this
[script](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/sparse_methods/sparse_nlsolve_jacobians.jl).
We automate the setup process by letting the user pass the keyword `sparse_jacobian = true`
to `solve!`. If this keyword is true, then there are three ways to exploit sparsity.
1. If no other keywords are passed, then `solve!` will attempt to determine the sparsity pattern.
By default, the sparsity pattern is determined by using finite differences to
calculate a Jacobian and assuming any zeros will always be zero. If the keyword
`sparsity_detection = true`, then `solve!` will try using [SparsityDetection.jl](https://github.com/JuliaDiff/SparsityDetection.jl).
Currently, the latter approach does not work with RiskAdjustedLinearizations.jl.
2. The keyword `sparsity` can be used to specify the sparsity pattern of the Jacobian. If `colorvec` is not also
passed, then the matrix coloring vector is computed based on `sparsity`.
3. The keyword `jac_cache` allows the user to specify the sparsity pattern of the Jacobian
and additionally pre-allocate the Jacobian's cache, which potentially achieves speed gains by
avoiding extra allocations when the Jacobian function is repeatedly constructed.
If `solve!` is called once, then the first two approaches are essentially the same. If `solve!`
is repeatedly called (e.g. if the model's parameters are changed), then
the second two approaches are strictly faster because
computing the sparsity pattern or pre-allocating the Jacobian's cache only needs to be done once,
as long as the system of equations does not change.
To simplify using the `sparsity`, `colorvec`, and `jac_cache` keywords, we implement
two helper functions, `compute_sparsity_pattern` and `preallocate_jac_cache`.
The first function calculates `sparsity` and `colorvec` while the second ones
computes `jac_cache`. See the docstrings below and
this [example](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/sparse_methods/sparse_nlsolve_jacobians.jl)
for more details.
Some additional caveats on these methods:
- Creating a cached Jacobian with automatic differentiation via `ForwardColorJacCache`
will not work because the objective function changes in each loop of the homotopy and
relaxation algorithms, so the cached `Dual` matrices will have information
on the wrong function after a loop completes. Currently, RiskAdjustedLinearizations.jl has not implemented
a way to update the information on the function required by the `Dual` matrices.
- If automatic differentiation does not work with dense Jacobians due to
problems with reinterpreting the chunk size, then it will also not work when using sparse Jacobians.
## Docstrings
```@docs
RiskAdjustedLinearizations.relaxation!
RiskAdjustedLinearizations.homotopy!
RiskAdjustedLinearizations.blanchard_kahn
RiskAdjustedLinearizations.compute_sparsity_pattern
RiskAdjustedLinearizations.preallocate_jac_cache
``` | RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 13889 | # [Risk-Adjusted Linearizations](@id risk-adjusted linearization)
## Theory
### Nonlinear Model
Most dynamic economic models can be formulated as the system of nonlinear equations
```math
\begin{aligned}
z_{t + 1} & = \mu(z_t, y_t) + \Lambda(z_t)(y_{t + 1} - \mathbb{E}_t y_{t + 1}) + \Sigma(z_t) \varepsilon_{t + 1},\\
0 & = \log\mathbb{E}_t[\exp(\xi(z_t, y_t) + \Gamma_5 z_{t + 1} + \Gamma_6 y_{t + 1})].
\end{aligned}
```
The vectors ``z_t\in \mathbb{R}^{n_z}`` and ``y_t \in \mathbb{R}^{n_y}`` are the state and jump variables, respectively.
The first vector equation comprise the transition equations of the state variables.
The second vector equation comprise the model's expectational equations, which are typically
the first-order conditions for the jump variables from agents' optimization problem.
The exogenous shocks
``\varepsilon_t \in\mathbb{R}^{n_\varepsilon}`` form a martingale difference sequence and can therefore
be non-Gaussian. Given some
differentiable mapping ``\alpha:\mathbb{R}^{n_z}\rightarrow\mathbb{R}^{n_\varepsilon}``,
the random variable ``X_t = \alpha(z_t)^T \varepsilon_{t + 1}`` has the
differentiable, conditional (on ``z_t``) cumulant generating function
```math
\begin{aligned}
ccgf[\alpha(z_t) \mid z_t] = \log\mathbb{E}_t[\exp(\alpha(z_t)^T \varepsilon_{t + 1})].
\end{aligned}
```
The functions
```math
\begin{aligned}
\xi:\mathbb{R}^{2n_y + 2n_z}\rightarrow \mathbb{R}^{n_y},& \quad \mu:\mathbb{R}^{n_y + n_z}\rightarrow \mathbb{R}^{n_z},\\
\Lambda:\mathbb{R}^{n_z} \rightarrow \mathbb{R}^{n_z \times n_y}, & \quad \Sigma:\mathbb{R}^{n_z}\ \rightarrow \mathbb{R}^{n_z\times n_\varepsilon},
\end{aligned}
```
are differentiable. The first two functions characterize the effects of time ``t`` variables on the expectational and
state transition equations. The function ``\Lambda`` characterizes heteroskedastic endogenous risk that depends on
innovations in jump variables while the function ``\Sigma`` characterizes exogenous risk.
Note that ``\Sigma`` is *not* the variance-covariance matrix of ``\varepsilon_t``.
The functions ``\Lambda`` and ``\Sigma`` can also depend on jump variables. Denote the jump-dependent versions as
``\tilde{\Lambda}:\mathbb{R}^{n_z\times n_y} \rightarrow \mathbb{R}^{n_z \times n_y}``
and ``\tilde{\Sigma}:\mathbb{R}^{n_z \times n_y}\ \rightarrow \mathbb{R}^{n_z\times n_\varepsilon}``.
If there exists a mapping ``y_t = y(z_t)``, then we define ``\Lambda(z_t) = \tilde{\Lambda}(z_t, y(z_t))``
and ``\Sigma(z_t) = \tilde{\Sigma}(z_t, y(z_t))``.
The expectational equations can be simplified as
```math
\begin{aligned}
0 & = \log\mathbb{E}_t[\exp(\xi(z_t, y_t) + \Gamma_5 z_{t + 1} + \Gamma_6 y_{t + 1})]\\
& = \log[\exp(\xi(z_t, y_t))\mathbb{E}_t[\exp(\Gamma_5 z_{t + 1} + \Gamma_6 y_{t + 1})]]\\
& = \xi(z_t, y_t) + \Gamma_5\mathbb{E}_t z_{t + 1} + \Gamma_6 \mathbb{E}_t y_{t + 1} + \log\mathbb{E}_t[\exp(\Gamma_5 z_{t + 1} + \Gamma_6 y_{t + 1})] - (\Gamma_5\mathbb{E}_t z_{t + 1} + \Gamma_6 \mathbb{E}_t y_{t + 1})\\
& = \xi(z_t, y_t) + \Gamma_5\mathbb{E}_t z_{t + 1} + \Gamma_6 \mathbb{E}_t y_{t + 1} + \mathcal{V}(\Gamma_5 z_{t + 1} + \Gamma_6 y_{t + 1}),
\end{aligned}
```
where the last term is defined to be
```math
\begin{aligned}
\mathcal{V}(x_{t + 1}) = \log\mathbb{E}_t[\exp(x_{t + 1})] - \mathbb{E}_t x_{t + 1}.
\end{aligned}
```
As Lopez et al. (2018) describe it, this quantity "is a relative entropy measure, i.e. a nonnegative measure of dispersion that generalizes variance."
### [Risk-Adjusted Linearizations by Affine Approximation](@id affine-theory)
Many economic models are typically solved by perturbation around the deterministic steady state. To break certainty equivalence so that
asset pricing is meaningful, these perturbations need to be at least third order. However, even third-order perturbations
can poorly approximate the true global solution. A key problem is that the economy may not spend much time near the
deterministic steady state, so a perturbation around this point will be inaccurate.
Instead of perturbing the model's nonlinear equations around the deterministic steady state, we could perturb around the
stochastic or "risky" steady state. This point is better for a perturbation because the economy will spend a
large amount of time near the stochastic steady state. [Lopez et al. (2018)](https://ideas.repec.org/p/bfr/banfra/702.html)
show that an affine approximation of the model's nonlinear equation is equivalent to a linearization around the
stochastic steady state. Further, they confirm that in practice this "risk-adjusted" linearization approximates
global solutions of canonical economic models very well and outperforms perturbations around the deterministic steady state.
The affine approximation of an dynamic economic model is
```math
\begin{aligned}
\mathbb{E}[z_{t + 1}] & = \mu(z, y) + \Gamma_1(z_t - z) + \Gamma_2(y_t - y),\\
0 & = \xi(z, y) + \Gamma_3(z_t - z) + \Gamma_4(y_t - y) + \Gamma_5 \mathbb{E}_t z_{t + 1} + \Gamma_6 \mathbb{E}_t y_{t + 1} + \mathcal{V}(z) + J\mathcal{V}(z)(z_t - z),
\end{aligned}
```
where ``\Gamma_1, \Gamma_2`` are the Jacobians of ``\mu`` with respect to ``z_t`` and ``y_t``, respectively;
``\Gamma_3, \Gamma_4`` are the Jacobians of ``\xi`` with respect to ``z_t`` and ``y_t``, respectively;
``\Gamma_5, \Gamma_6`` are constant matrices; ``\mathcal{V}(z)`` is the model's entropy; and
``J\mathcal{V}(z)`` is the Jacobian of the entropy;
and the state variables ``z_t`` and jump variables ``y_t`` follow
```math
\begin{aligned}
z_{t + 1} & = z + \Gamma_1(z_t - z) + \Gamma_2(y_t - y) + (I_{n_z} - \Lambda(z_t) \Psi)^{-1}\Sigma(z_t)\varepsilon_{t + 1},\\
y_t & = y + \Psi(z_t - z).
\end{aligned}
```
The unknowns ``(z, y, \Psi)`` solve the system of equations
```math
\begin{aligned}
0 & = \mu(z, y) - z,\\
0 & = \xi(z, y) + \Gamma_5 z + \Gamma_6 y + \mathcal{V}(z),\\
0 & = \Gamma_3 + \Gamma_4 \Psi + (\Gamma_5 + \Gamma_6 \Psi)(\Gamma_1 + \Gamma_2 \Psi) + J\mathcal{V}(z).
\end{aligned}
```
Under an affine approximation, the entropy term is a nonnegative function
``\mathcal{V}:\mathbb{R}^{n_z} \rightarrow \mathbb{R}_+^{n_y}`` defined such that
```math
\begin{aligned}
\mathcal{V}(z_t) \equiv \mathcal{V}_t(\exp((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1})) = \vec{ccgf}[(\Gamma_5 + \Gamma_6 \Psi)(I_{n_z} - \Lambda(z_t) \Psi)^{-1} \Sigma(z_t) \mid z_t]
\end{aligned}
```
where the notation ``\vec{ccgf}`` means that each component ``ccgf_i[\cdot \mid \cdot]`` is a conditional cumulant-generating
function. Explicitly, define
```math
\begin{aligned}
A(z_t) = (\Gamma_5 + \Gamma_6 \Psi)(I_{n_z} - \Lambda(z_t) \Psi)^{-1} \Sigma(z_t) = [A_1(z_t), \dots, A_{n_y}(z_t)]^T.
\end{aligned}
```
Each ``A_i(z_t)`` is a mapping from ``z_t`` to the ``i``th row vector in ``A(z_t)``. Then
``ccgf_i[\cdot \mid \cdot]`` is
```math
\begin{aligned}
ccgf_i[A_i(z_t)\mid z_t] = \log\mathbb{E}_t[\exp(A_i(z_t) \varepsilon_{t + 1})].
\end{aligned}
```
Every ``ccgf_i[\cdot \mid \cdot]`` corresponds to an expectational equation and thus
acts as a risk correction to each one. In the common case where the individual components of
``\varepsilon_{t + 1}`` are independent, ``ccgf_i`` simplifies to
```math
\begin{aligned}
ccgf_i[A_i(z_t)\mid z_t] = \sum_{j = 1}^{n_\varepsilon}\log\mathbb{E}_t[\exp(A_{ij}(z_t) \varepsilon_{j, t + 1})],
\end{aligned}
```
i.e. it is the sum of the cumulant-generating functions for each shock ``\varepsilon_{j, t + 1}``.
To see why ``\mathcal{V}_t(\exp((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1}))`` can be expressed as the conditional
cumulant-generating function of ``(\Gamma_5 + \Gamma_6 \Psi)(I_{n_z} - \Lambda(z_t) \Psi)^{-1} \Sigma(z_t)``,
observe that
```math
\begin{aligned}
\mathcal{V}_t(\exp((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1})) & = \log\mathbb{E}_t[\exp((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1})] - \mathbb{E}_t[(\Gamma_5 + \Gamma_6 \Psi)z_{t + 1}]\\
& = \log\left(\frac{\mathbb{E}_t[\exp((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1})]}{\exp(\mathbb{E}_t[(\Gamma_5 + \Gamma_6 \Psi)z_{t + 1}])}\right).
\end{aligned}
```
Since ``\mathbb{E}_t[(\Gamma_5 + \Gamma_6 \Psi)z_{t + 1}]`` is a conditional expectation, it is measurable with respect to the time-``t`` information set. Therefore, we can move the denominator of the fraction within the logarithm inside the numerator's conditional expectation.
```math
\begin{aligned}
\mathcal{V}_t(\exp((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1})) & = \log\mathbb{E}_t\left[\exp\left((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1} - \mathbb{E}_t[(\Gamma_5 + \Gamma_6 \Psi)z_{t + 1}]\right)\right]\\
& = \log\mathbb{E}_t\left[\exp\left((\Gamma_5 + \Gamma_6 \Psi)(z_{t + 1} - \mathbb{E}_t[z_{t + 1}])\right)\right].
\end{aligned}
```
Using the postulated law of motion for states,
```math
\begin{aligned}
z_{t + 1} - \mathbb{E}_t[z_{t + 1}] & = \mu(z_t, y_t) + \Lambda(z_t)(y_{t + 1} - \mathbb{E}_t[y_{t + 1}]) + \Sigma(z_t) \varepsilon_{t + 1} - \mu(z_t, y_t)\\
& = \Lambda(z_t) \Psi (z_{t + 1} - \mathbb{E}_t[z_{t + 1}]) + \Sigma(z_t) \varepsilon_{t + 1}\\
(I - \Lambda(z_t) \Psi) (z_{t + 1} - \mathbb{E}_t[z_{t + 1}]) & = \Sigma(z_t) \varepsilon_{t + 1}.
\end{aligned}
```
Therefore, the entropy term ``\mathcal{V}_t(\cdot)`` becomes
```math
\begin{aligned}
\mathcal{V}_t(\exp((\Gamma_5 + \Gamma_6 \Psi)z_{t + 1})) & = \log\mathbb{E}_t\left[\exp\left((\Gamma_5 + \Gamma_6 \Psi)(I - \Lambda(z_t) \Psi)^{-1} \Sigma(z_t) \varepsilon_{t + 1}\right)\right].
\end{aligned}
```
The RHS is a vector of logarithms of expected values of linear combinations of the shocks ``\varepsilon_{t, + 1}``
with coefficients given by the rows of ``(\Gamma_5 + \Gamma_6 \Psi)(I - \Lambda(z_t) \Psi)^{-1} \Sigma(z_t)``.
Thus, each element of ``\mathcal{V}_t(\cdot)`` is the conditional cumulant-generating function
of the random vector ``\varepsilon_{t, + 1}`` evaluated at one of the rows of
``(\Gamma_5 + \Gamma_6 \Psi)(I - \Lambda(z_t) \Psi)^{-1} \Sigma(z_t)``, as claimed.
Refer to [Lopez et al. (2018) "Risk-Adjusted Linearizations of Dynamic Equilibrium Models"](https://ideas.repec.org/p/bfr/banfra/702.html) for more details about the theory justifying this approximation approach.
See [Deriving the conditional cumulant generating function](@ref ccgf-tips) for some guidance on calculating the ccgf, which
many users may not have seen before.
## [Implementation as `RiskAdjustedLinearization`](@id implement-ral)
We implement risk-adjusted linearizations of nonlinear dynamic economic models
through the wrapper type `RiskAdjustedLinearization`.
The user only needs to define the functions and matrices characterizing the equilibrium of the nonlinear model. Once these
functions are defined, the user can create a `RiskAdjustedLinearization` object, which will automatically
create the Jacobian functions needed to compute the affine approximation.
To ensure efficiency in speed and memory, this package takes advantage of a number of features that are easily
accessible through Julia.
1. The Jacobians are calculated using forward-mode automatic differentiation rather than symbolic differentiation.
2. The Jacobian functions are constructed to be in-place with pre-allocated caches.
3. Functions provided by the user will be converted into in-place functions with pre-allocated caches.
4. Calls to nonlinear equation solver `nlsolve` are accelerated by exploiting sparsity in Jacobians with [SparseDiffTools.jl](https://github.com/JuliaDiff/SparseDiffTools.jl).
5. Calculation of Jacobians of ``\mu``, ``\xi``, and ``\mathcal{V}`` with automatic differentiation is accelerated by exploiting sparsity with [SparseDiffTools.jl](https://github.com/JuliaDiff/SparseDiffTools.jl).
See the [Example](@ref example) for how to use the type. To compare this package's speed with the original MATLAB code,
run the [wac_disaster.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/matlab_timing_test/wac_disaster.jl) or [rbc_cc.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/matlab_timing_test/rbc_cc.jl)
scripts. These scripts assess how long it takes to calculate a risk-adjusted linearization using the two numerical algorithms
implemented by this package and by the original authors. The relaxation algorithm is generally around 50x-100x faster
while the homotopy algorithm 3x-4x times faster.
The docstring for the constructor is given below. Some of the keywords allow the user to exploit sparsity in the
objects comprising a risk-adjusted linearization. For more details on sparse methods,
see [Spares Arrays and Jacobians](@ref sparse-arrays-jacs).
```@docs
RiskAdjustedLinearizations.RiskAdjustedLinearization
```
## Helper Types
To organize the functions comprisng a risk-adjusted linearization, we create two helper types, `RALNonlinearSystem` and `RALLinearizedSystem`.
The first type holds the ``\mu``, ``\Lambda``, ``\Sigma``, ``\xi``, and ``\mathcal{V}`` functions while the second type holds
the ``\mu_z``, ``\mu_y``, ``\xi_z``, ``\xi_y``, ``J\mathcal{V}``, ``\Gamma_5``, and ``\Gamma_6`` quantities.
The `RALNonlinearSystem` type holds potentially nonlinear functions, and in particular ``\mu``, ``\xi``, and ``\mathcal{V}``,
which need to be linearized (e.g. by automatic differentiation). The `RALLinearizedSystem` holds both matrices that
are only relevant once the model is linearized, such as ``\Gamma_1`` (calculated by ``\mu_z``), as well as ``\Gamma_5`` and ``\Gamma_6``
since these latter two quantities are always constant matrices.
Aside from providing a way to organize the various functions comprising a risk-adjusted linearization, these helper types do not
have much additional functionality. The `update!` functions for a `RiskAdjustedLinearization`, for example, are implemented
underneath the hood by calling `update!` functions written for the `RALNonlinearSystem` and `RALLinearizedSystem`.
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 9101 | # [Sparse Arrays and Jacobians](@id sparse-arrays-jacs)
The risk-adjusted linearization of many economic models contains
substantial amounts of sparsity. The matrices ``\Gamma_5``
and ``\Gamma_6`` as well as the output of the functions
``\Lambda(\cdot)`` and ``\Sigma(\cdot)`` are typically sparse.
All of the Jacobians, ``\Gamma_1``, ``\Gamma_2``, ``\Gamma_3``,
``\Gamma_4``, and ``J\mathcal{V}``, are also very sparse.
To optimize performance, RiskAdjustedLinearizations.jl
allows users to leverage the sparsity of these objects.
The caches for the first set of objects can be
sparse matrices, assuming that ``\Lambda(\cdot)`` and ``\Sigma(\cdot)``
are written properly. The second set of objects are usually computed
with forward-mode automatic differentiation. By using matrix coloring techniques
implemented by [SparseDiffTools](https://github.com/JuliaDiff/SparseDiffTools.jl),
we can accelerate the calculation of these Jacobians and cache their output
as sparse matrices.
These methods can be easily used through keyword arguments of the main constructor of the
`RiskAdjustedLinearization` type.
We have also written examples which show how to use these methods and time their speed.
See the folder [examples/sparse_methods](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/sparse_methods).
The script [sparse\_arrays\_and\_jacobians.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/sparse_methods/sparse_arrays_and_jacobians.jl)
illustrates how to apply the methods described in this documentation page while
[sparse\_nlsolve\_jacobians.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/sparse_methods/sparse_nlsolve_jacobians.jl) describe how to use sparse automatic differentiation
to accelerate the calculation of Jacobians during calls to `nlsolve`. See [Numerical Algorithms](@ref numerical-algorithms)
for more details on the latter. Finally, the script
[combined\_sparse\_methods.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl/tree/main/examples/sparse_methods/combined_sparse_methods.jl) combines these methods to achieve the fastest possible speeds with this package.
## Sparsity with ``\Gamma_5``, ``\Gamma_6``, ``\Lambda``, and ``\Sigma``
The matrices ``\Gamma_5`` and ``\Gamma_6`` are constants and can be passed in directly as
sparse matrices. The caches for ``\Lambda`` and ``\Sigma`` can be initialized as sparse matrices by using
the `Λ_cache_init` and `Σ_cache_init` keywords for `RiskAdjustedLinearization`. This keyword is
a function which takes as input a `Tuple` of `Int` dimensions and allocates an array with
those dimensions. By default, these keyword have the values
```
Λ_cache_init = dims -> Matrix{Float64}(undef, dims...)
Σ_cache_init = dims -> Matrix{Float64}(undef, dims...)
```
To use `SparseMatrixCSC` arrays, the user would instead pass
```
Λ_cache_init = dims -> spzeros(dims...)
Σ_cache_init = dims -> spzeros(dims...)
```
However, the user should be aware of two caveats.
1. Using sparse arrays for caches may not always be faster
for calculating the steady state. To obtain ``\Psi``,
we need to apply the Schur decomposition, which requires dense matrices.
Thus, we still have to allocate dense versions of the sparse caches.
2. If ``\Lambda`` is nonzero, then the cache for ``\Sigma`` cannot be sparse.
The reason is that we need to compute `(I - Λ * Ψ) \ Σ`, but this calculation
will fail when the `Σ` is sparse. The cache, however, can be other special
matrix types as long as the left division works. For example,
the matrix could be a `Diagonal` or `BandedMatrix`.
## Sparse Jacobians and Automatic Differentiation
To calculate a risk-adjusted linearization, we need to compute the Jacobians of ``\mu`` and ``\xi``
with respect to ``z`` and ``y`` as well as the Jacobian of ``\mathcal{V}`` with respect to ``z``.
These Jacobians are typically sparse because each equation in economic models
only has a small subset of variables. To exploit this sparsity, we utilize methods from
[SparseDiffTools.jl](https://github.com/JuliaDiff/SparseDiffTools.jl).
There are two ways to instruct a `RiskAdjustedLinearization` that the Jacobians of ``\mu``, ``\xi``,
and/or ``\mathcal{V}`` are sparse. The first applies during the construction of an instance
while the second occurs after an instance exists.
Note that sparse differentiation for this package is still a work in progress.
While working examples exist, the code still has bugs. The major problems
are listed below:
- Homotopy does not work yet with sparse automatic differentiation.
- `NaN`s or undefined values sometimes occur during calls to `nlsolve` within `solve!`.
However, the numerical algorithm can succeed if `solve!` is repeatedly run,
even when using the same initial guess for the coefficients ``(z, y, \Psi)``. This happens
at a sufficiently high frequency that using sparse automatic differentiation is not reliable.
### Specify Sparsity during Construction
When constructing a `RiskAdjustedLinearization`, the keyword `sparse_jacobian::Vector{Symbol}`
is a vector containing the symbols `:μ`, `:ξ`, and/or `:𝒱`. For example, if
```
sparse_jacobian = [:μ, :𝒱]
```
then the constructor will interpret that ``\mu`` has sparse Jacobians with respect to ``z`` and ``y``,
and that ``\mathcal{V}`` has a sparse Jacobian with respect to ``z``.
To implement sparse differentiation, the user needs to provide a sparsity pattern and a matrix coloring vector.
The user can use the keywords `sparsity` and `colorvec` to provide this information. These keywords
are dictionaries whose keys are the names of the Jacobians and values are the sparsity pattern and matrix coloring vector.
The relevant keys are `:μz`, `:μy`, `:ξz`, `:ξy`, and `:J𝒱`, where
- `:μz` and `:μy` are the Jacobians of `μ` with respect to ``z`` and ``y``,
- `:ξz` and `:ξy` are the Jacobians of `ξ` with respect to ``z`` and ``y``, and
- `:J𝒱` is the Jacobian of `𝒱` with respect to ``z``.
If `sparse_jacobian` is nonempty, but
one of these dictionaries is empty or does not contain the correct subset of the keys
`:μz`, `:μy`, `:ξz`, `:ξy`, and `:J𝒱`, then we attempt to determine the sparsity pattern
and/or matrix coloring vector. Once the sparsity pattern is known, the matrix coloring
vector is determined by calling `matrix_colors`.
We implement two approaches to discern the sparsity pattern. By default, we compute the dense Jacobian
once using ForwardDiff and assume that any zeros in the computed Jacobian are supposed to be zero. If this
assumption is true, then this Jacobian can be used as the sparsity pattern. Alternatively,
the user can set the keyword `sparsity_detection = true`, in which case we call `jacobian_sparsity`
from [SparsityDetection.jl](https://github.com/SciML/SparsityDetection.jl).
to determine the sparsity pattern. Currently, only the first approach works.
For ``\mu`` and ``\xi``, the first approach typically works fine. For ``\mathcal{V}``, however,
if the user guesses that ``\Psi`` is a matrix of zeros, then the Jacobian will be zero as well.
A good guess of ``\Psi`` is crucial to inferring the correct sparsity pattern of
``\mathcal{V}`` because different ``\Psi`` can imply different sparsity patterns.
For this reason, to fully exploit the sparsity in a model,
we recommend calculating the risk-adjusted linearization once using dense Jacobian methods.
The calculated Jacobians can be used subsequently as the sparsity patterns.
For reference, see the [docstring for `RiskAdjustedLinearization`](@ref implement-ral).
### Update a `RiskAdjustedLinearization` with Sparse Jacobians after Construction
Sparse Jacobians can be specified after a `RiskAdjustedLinearization` object `m` already exists
by calling `update_sparsity_pattern!(m, function_names)`.
The syntax of `update_sparsity_pattern!` is very similar to the specification of
sparse Jacobians in the constructor. The second input `function_names` is either
a `Symbol` or `Vector{Symbol}`, and it specifies the Jacobian(s) whose sparsity pattern(s) should be updated.
The relevent symbols are `:μz`, `:μy`, `:ξz`, `:ξy`, and `:J𝒱`.
If the Jacobians calculated by `m` are dense Jacobians, then `update_sparsity_pattern!`
will replace the functions computing dense Jacobians with functions that exploit sparsity.
If the Jacobians are already being calculated as sparse Jacobians,
then `update_sparsity_pattern!` can update the sparsity pattern and matrix coloring vector
being used.
If no keywords are passed, then `update_sparsity_pattern!` will
use the same methods as the constructor to infer the sparsity pattern. Either
we compute the dense Jacobian once using ForwardDiff, or we utilize SparsityDetection.
The new sparsity pattern and matrix coloring vectors can be specified using the
`sparsity` and `colorvec` keywords, just like the constructor.
Different values for ``z``, ``y``, and ``\Psi`` can also be used
when trying to infer the sparsity pattern by passing the new values as keywords.
```@docs
RiskAdjustedLinearizations.update_sparsity_pattern!
```
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
|
[
"MIT"
] | 1.0.1 | ef647b0c9518902bb5d6867fdec1219d1e14ec92 | docs | 10351 | # [Tips](@id tips)
This page of the documentation holds miscellaneous tips for using the package.
## [Deriving the conditional cumulant generating function](@id ccgf-tips)
The cumulant generating function is based upon the moment-generating function. If
```math
\begin{aligned}
M_X(t) \equiv \mathbb{E}[e^{tX}],\quad \quad \quad t\in \mathbb{R},
\end{aligned}
```
is the moment-generating function of a random variable ``X``, then the cumulant-generating function is just
```math
\begin{aligned}
ccgf_X(t) \equiv \log\mathbb{E}[e^{tX}],\quad \quad \quad t\in \mathbb{R}.
\end{aligned}
```
As an example, if ``X \sim N(\mu, \sigma^2)``, then ``M_X(t) = \exp(t\mu + \sigma^2 t^2 / 2)`` and
``ccgf_X(t) = t\mu + \sigma^2 t^2 / 2``.
Risk-adjusted linearizations imply that the relative entropy measure ``\mathcal{V}(\Gamma_5 z_{t + 1} + \Gamma_6 y_{t + 1})``
becomes a vector of conditional cumulant-generating functions for the random variables ``A_i(z_t) \varepsilon_{t + 1}``,
where ``A_i(z_t)`` is the ``i``th row vector of
```math
\begin{aligned}
A(z_t) = (\Gamma_5 + \Gamma_6 \Psi)(I_{n_z} - \Lambda(z_t) \Psi)^{-1} \Sigma(z_t).
\end{aligned}
```
To create a `RiskAdjustedLinearization`, the user needs to define a function `ccgf` in the form
`ccgf(F, A, z)` or `ccgf(A, z)`, where `A` refers to the matrix ``A(z_t)`` once it has already been
evaluated at ``z_t``. In other words, the input `A` should seen as a ``n_y \times n_\varepsilon`` matrix
of real scalars. However,
depending on the distributions of the martingale difference sequence ``\varepsilon_{t + 1}``,
writing the conditional cumulant-generating function may also require knowing the current state ``z_t``.
Let us consider two didactic examples. First, assume ``\varepsilon_{t + 1}\sim \mathcal{N}(0, I)``.
Then we claim
```
ccgf(A, z) = sum(A.^2, dims = 2) / 2
```
Based on the definition of ``\mathcal{V}(z_t)``, one may be tempted to derive the conditional cumulant-generating function
for the random vector ``A(z_t) \varepsilon_{t + 1}``. However, this is not actually what we want.
Rather, `ccgf` should just return a vector of conditional cumulant-generating functions
for the ``n_y`` random variables ``X_i = A_i(z_t)\varepsilon_{t + 1}``.
Because the individual components of ``\varepsilon_{t + 1}`` are independent and each $\varepsilon_{i, t}$ has
a standard Normal distribution,
the moment-generating function for ``X_i`` is ``\exp\left(\frac{1}{2}\left(\sum_{j = 1}^{n_\varepsilon} (t A_{ij})^2 / 2\right)\right)``, hence the ``i``th cumulant-generating function is ``\frac{1}{2}\left(\sum_{j = 1}^{n_\varepsilon} (t A_{ij})^2 / 2\right)``.
For risk-adjusted linearizations, we evaluate at $t = 1$ since we want the
conditional cumulant-generating function ``\log\mathbb{E}_t[\exp(A_i(z_t)\varepsilon_{t + 1})]``.
This is precisely what the code above achieves.
Second, let us consider a more complicated example. In the [Wachter (2013) Example](@ref example),
the ccgf is
```
function ccgf(F, α, z) # α is used here instead of A
# the S term in z[S[:p]] is just an ordered dictionary mapping the symbol :p to the desired index of z
F .= .5 .* α[:, 1].^2 + .5 * α[:, 2].^2 + (exp.(α[:, 3] + α[:, 3].^2 .* δ^2 ./ 2.) .- 1. - α[:, 3]) * z[S[:p]]
end
```
Observe that the first two quantities `.5 .* α[:, 1].^2 + .5 * α[:, 2].^2` resemble what would be obtained
from a standard multivariate normal distribution. The remaining terms are more complicated because
the Wachter (2013) model involves a Poisson mixture of normal distributions. It will be instructive to spell the details out.
Consumption growth follows the exogenous process
```math
\begin{aligned}
c_{t + 1} = \mu + c_t + \sigma \varepsilon^c_{t + 1} - \theta \xi_{t + 1},
\end{aligned}
```
where ``\varepsilon_t^c \sim N(0, 1)`` is iid over time and ``\xi_t \mid j_t \sim N(j_t, j_t\delta^2)``, where the number of jumps
``j_t \sim Poisson(p_{t - 1})``, hence ``\mathbb{E}_t \xi_{t + 1} = \mathbb{E}_t j_{t + 1} = p_t``. Assume that ``\varepsilon_t^c``
and ``\varepsilon_t^\xi = \xi_t - \mathbb{E}_{t - 1}\xi_t`` are independent.
Finally, the intensity ``p_t`` follows the process
```math
\begin{aligned}
p_{t + 1} = (1 - \rho_p) p + \rho_p p_t + \sqrt{p_t} \phi_p \sigma \varepsilon_{t + 1}^p,
\end{aligned}
```
where ``\varepsilon_t^p \sim N(0, 1)`` is iid over time and independent of ``\varepsilon_t^c`` and ``\varepsilon_t^\xi``.
Note that ``\xi_t`` and
``\mathbb{E}_{t - 1}\xi_t`` are not independent because ``\mathbb{E}_{t - 1}\xi_t = p_{t - 1}`` and ``j_t \sim Poisson(p_{t - 1})``,
hence a higher ``p_{t - 1}`` implies ``\xi_t`` is more likely to be higher. Re-centering ``\xi_t`` by ``\mathbb{E}_{t - 1}\xi_t``
creates a martingale difference sequence since ``\xi_t \mid j_t`` is normal.
By independence of the components of ``\varepsilon_t = [\varepsilon_t^c, \varepsilon_t^p, \varepsilon_t^\xi]^T``,
the conditional cumulant-generating function for the ``i``th row of the ``A(z_t)`` matrix described in this
[section](@ref affine-theory) is
```math
\begin{aligned}
ccgf_i[A_i(z_t) \mid z_t] & = \log\mathbb{E}_t[\exp(A_{i1}(z_t) \varepsilon_{t + 1}^c)] + \log\mathbb{E}_t[\exp(A_{i2}(z_t) \varepsilon_{t + 1}^p)] + \log\mathbb{E}_t[\exp(A_{i3}(z_t) \varepsilon_{t + 1}^\xi)].
\end{aligned}
```
The first two terms on the RHS are for normal random variables and simplify to ``(A_{i1}(z_t)^2 + A_{i2}(z_t)^2) / 2``.
To calculate the remaining term, note that ``\mathbb{E}_{t}\xi_{t + 1} = p_t`` is already part of the information set
at ``z_t``, hence
```math
\begin{aligned}
\log\mathbb{E}_t[\exp(A_{i3}(z_t) \varepsilon_{t + 1}^\xi)] & = \log\left[\frac{1}{\exp(A_{i3}(z_t) p_t)}\mathbb{E}_t\left[\exp(A_{i3}(z_t) \xi_{t + 1})\right]\right] \\
& = \log\mathbb{E}_t\left[\exp(A_{i3}(z_t) \xi_{t + 1})\right] - A_{i3}(z_t) p_t.
\end{aligned}
```
To calculate the cumulant-generating function of ``\xi_t``, aside from direct calculation,
we can also use the results for mixture distributions in
[Villa and Escobr (2006)](https://www.jstor.org/stable/27643733?seq=2#metadata_info_tab_contents) or
[Bagui et al. (2020)](https://www.atlantis-press.com/journals/jsta/125944282/view).
Given random variables ``X`` and ``Y``, assume the conditional distribution ``X\mid Y`` and the
marginal distribution for ``Y`` are available. If we can write the moment-generating function
for the random variable ``X\mid Y`` as
```math
\begin{aligned}
M_{X \mid Y}(s) = C_1(s) \exp(C_2(s) Y),
\end{aligned}
```
then the moment-generating function of ``X`` is
```math
\begin{aligned}
M_{X}(s) = C_1(s) M_Y[C_2(s)].
\end{aligned}
```
In our case, we have
```math
\begin{aligned}
M_{\xi_t \mid j_t}(s) = \exp\left(s j_t + \frac{1}{2} s^2 \delta^2j_t \right),
\end{aligned}
```
hence ``C_1(s) = 0`` and ``C_2(s) = (s + s^2\delta^2 / 2)``. The variable ``j_t`` has a Poisson distribution
with intensity ``p_t``, which implies the moment-generating function
```math
\begin{aligned}
M_{j_t}(s) = \exp((\exp(s) - 1) p_t).
\end{aligned}
```
Thus, as desired,
```math
\begin{aligned}
\log\mathbb{E}_t\left[\exp(A_{i3}(z_t) \xi_{t + 1})\right] - A_{i3}(z_t) p_t & = (\exp(A_{i3}(z_t) + A_{i3}(z_t)^2\delta^2) - 1)p_t - A_{i3}(z_t) p_t.
\end{aligned}
```
Computing this quantity for each expectational equation yields the `ccgf` used in the [Wachter (2013) Example](@ref example).
## Writing functions compatible with automatic differentiation
- **Use an in-place function to avoid type errors.**
For example, define the `ccgf` as `ccgf(F, x)`.
You can use the element type of `F` via `eltype(F)` to ensure that you don't get a type error
from using `Float64` instead of `Dual` inside the function. If `ccgf` was out-of-place, then
depending on how the vector being returned is coded, you may get a type error if elements
of the return vector are zero or constant numbers. By having `F` available, you can
guarantee these numbers can be converted to `Dual` types if needed without always
declaring them as `Dual` types.
- **Use `dualvector` or `dualarray`.**
The package provides these two helper functions
in the case where you have a function `f(x, y)`, and you need to be able to automatcally
differentiate with respect to `x` and `y` separately. For example, the nonlinear
terms of the expectational equation `ξ(z, y)` takes this form. Within , you can
pre-allocate the return vector by calling `F = RiskAdjustedLinearizations.dualvector(z, y)`.
The `dualvector` function will infer from `z` and `y` whether `F` should be have `Dual` element types
or not so you can repeatedly avoid writing if-else conditional blocks. The `dualarray` function
generalizes this to arbitrary `AbstractMatrix` inputs.
See the out-of-place function for `ξ` in [examples/wachter\_disaster\_risk/wachter.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations/tree/main/examples/wachter_disaster_risk/wachter.jl).
- **Don't pre-allocate the return vector.**
Instead of pre-allocating the return vector at the
top of the function for an out-of-place function, just concatenate the individual elements
at the very end. Julia will figure out the appropriate element type for you. The downside of this
approach is that you won't be able to assign names to the specific indices of the return vector (e.g.
does this equation define the risk-free interest rate?). For small models, this disadvantage is generally not a problem.
See the definition of the out-of-place expected state transition function `μ` in
[examples/wachter\_disaster\_risk/wachter.jl](https://github.com/chenwilliam77/RiskAdjustedLinearizations/tree/main/examples/wachter_disaster_risk/wachter.jl).
- **Exponentiate all terms to write conditions in levels.**
Automatic differentiation will be faster if the equilibrium conditions are written in the form
```math
\begin{aligned}
0 & = \log(F(x)),
\end{aligned}
```
instead of in levels
```math
\begin{aligned}
1 & = F(x).
\end{aligned}
```
However, it may be easier and/or less error prone to write the equilibrium conditions
in levels. This approach can be easily accomplished by (1) exponentiating all input
arguments at the beginning of the function to convert inputs from logs to levels;
(2) writing all equilibrium conditions in the form ``1 = F(x)``; and
(3) returning the output as ``\log(F(x))``.
| RiskAdjustedLinearizations | https://github.com/chenwilliam77/RiskAdjustedLinearizations.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.