licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.4.4 | ccd0226f62ff59b9b1f5231646e2afa7b8002586 | docs | 1915 | # Load Data from Existing Outputs
```julia
using Mera
```
*__ __ _______ ______ _______
| |_| | | _ | | _ |
| | ___| | || | |_| |
| | |___| |_||_| |
| | ___| __ | |
| ||_|| | |___| | | | _ |
|_| |_|_______|___| |_|__| |__|
## Load data from a sequence of snapshots
```julia
for i = 1:10
info = getinfo(output=i, "../../../testing/simulations/manu_sim_sf_L10", verbose=false)
#...gethydro(info)...getparticles(info)... etc.
end
```
## Load data from existing simulations in a given folder
List the content of a given folder:
```julia
path = "../../../testing/simulations/ramses_star_formation"
readdir(path)
```
9-element Array{String,1}:
".ipynb_checkpoints"
"output_00001"
"output_00003"
"output_00004"
"output_00007"
"output_00010"
"output_00013"
"output_00016"
"output_00019"
Get the relevant simulation output-numbers:
```julia
N = checkoutputs(path);
```
```julia
N.outputs
```
7-element Array{Int64,1}:
1
4
7
10
13
16
19
List of empty simulation folders:
```julia
N.missing
```
1-element Array{Int64,1}:
3
Load the data:
```julia
for i in N.outputs
println("Output: $i")
info = getinfo(output=i, path, verbose=false)
#...gethydro(info)...getparticles(info)... etc.
end
```
Output: 1
Output: 4
Output: 7
Output: 10
Output: 13
Output: 16
Output: 19
Get the physical time of all existing outputs:
```julia
gettime.(N.outputs, path, :Myr)
```
7-element Array{Float64,1}:
0.0
0.6974071892328049
0.8722968605999833
1.0432588470755855
1.2217932462903247
1.4016810597086558
1.5865234202798626
```julia
```
| Mera | https://github.com/ManuelBehrendt/Mera.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 4832 | module CGumbo
# immutable types corresponding to structs from gumbo.h
# also various enums from gumbo.h
struct Vector # Gumbo vector
data::Ptr{Ptr{Cvoid}}
length::Cuint
capacity::Cuint
end
struct StringPiece
data::Ptr{UInt8}
length::Csize_t
end
struct SourcePosition
line::Cuint
column::Cuint
offset::Cuint
end
struct Text
text::Ptr{UInt8}
original_text::StringPiece
start_pos::SourcePosition
end
# GumboNodeType enum
const DOCUMENT = Int32(0)
const ELEMENT = Int32(1)
const TEXT = Int32(2)
const CDATA = Int32(3)
const WHITESPACE = Int32(5)
struct Document
children::Vector
has_doctype::Bool
name::Ptr{UInt8}
public_identifier::Ptr{UInt8}
system_identifier::Ptr{UInt8}
doc_type_quirks_mode::Int32 # enum
end
struct Attribute
attr_namespace::Int32 # enum
name::Ptr{UInt8}
original_name::StringPiece
value::Ptr{UInt8}
original_value::StringPiece
name_start::SourcePosition
name_end::SourcePosition
value_start::SourcePosition
value_end::SourcePosition
end
struct Element
children::Vector
tag::Int32 # enum
tag_namespace::Int32 # enum
original_tag::StringPiece
original_end_tag::StringPiece
start_pos::SourcePosition
end_pos::SourcePosition
attributes::Vector
end
struct Node{T}
gntype::Int32 # enum
parent::Ptr{Node}
index_within_parent::Csize_t
parse_flags::Int32 # enum
v::T
end
struct Output
document::Ptr{Node}
root::Ptr{Node}
errors::Vector
end
const TAGS = [:HTML,
:head,
:title,
:base,
:link,
:meta,
:style,
:script,
:noscript,
:template,
:body,
:article,
:section,
:nav,
:aside,
:h1,
:h2,
:h3,
:h4,
:h5,
:h6,
:hgroup,
:header,
:footer,
:address,
:p,
:hr,
:pre,
:blockquote,
:ol,
:ul,
:li,
:dl,
:dt,
:dd,
:figure,
:figcaption,
:main,
:div,
:a,
:em,
:strong,
:small,
:s,
:cite,
:q,
:dfn,
:abbr,
:data,
:time,
:code,
:var,
:samp,
:kbd,
:sub,
:sup,
:i,
:b,
:u,
:mark,
:ruby,
:rt,
:rp,
:bdi,
:bdo,
:span,
:br,
:wbr,
:ins,
:del,
:image,
:img,
:iframe,
:embed,
:object,
:param,
:video,
:audio,
:source,
:track,
:canvas,
:map,
:area,
:math,
:mi,
:mo,
:mn,
:ms,
:mtext,
:mglyph,
:malignmark,
:annotation_xml,
:svg,
:foreignobject,
:desc,
:table,
:caption,
:colgroup,
:col,
:tbody,
:thead,
:tfoot,
:tr,
:td,
:th,
:form,
:fieldset,
:legend,
:label,
:input,
:button,
:select,
:datalist,
:optgroup,
:option,
:textarea,
:keygen,
:output,
:progress,
:meter,
:details,
:summary,
:menu,
:menuitem,
:applet,
:acronym,
:bgsound,
:dir,
:frame,
:frameset,
:noframes,
:isindex,
:listing,
:xmp,
:nextid,
:noembed,
:plaintext,
:rb,
:strike,
:basefont,
:big,
:blink,
:center,
:font,
:marquee,
:multicol,
:nobr,
:spacer,
:tt,
:rtc,
:unknown
]
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 473 | module Gumbo
using Gumbo_jll, Libdl
include("CGumbo.jl")
export HTMLElement,
HTMLDocument,
HTMLText,
NullNode,
HTMLNode,
attrs,
text,
tag,
children,
hasattr,
getattr,
setattr!,
parsehtml,
postorder,
preorder,
breadthfirst,
prettyprint
include("htmltypes.jl")
include("manipulation.jl")
include("comparison.jl")
include("io.jl")
include("conversion.jl")
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 1366 | # comparison functions for HTML Nodes and Documents
# TODO right now hashing and equality completely ignore
# parents. I think this is *probably* appropriate but it deserves
# some more thought. There's an argument that two HTMLElements with
# the same contents and children but different parent pointers are not
# really equal. Perhaps an auxilliary equality function could be provided
# for this purpose?
# equality
import Base: ==, isequal, hash
isequal(x::HTMLDocument, y::HTMLDocument) =
isequal(x.doctype,y.doctype) && isequal(x.root,y.root)
isequal(x::HTMLText,y::HTMLText) = isequal(x.text, y.text)
isequal(x::HTMLElement, y::HTMLElement) =
isequal(x.attributes,y.attributes) && isequal(x.children,y.children)
==(x::HTMLDocument, y::HTMLDocument) =
==(x.doctype,y.doctype) && ==(x.root,y.root)
==(x::HTMLText,y::HTMLText) = ==(x.text, y.text)
==(x::HTMLElement, y::HTMLElement) =
==(x.attributes,y.attributes) && ==(x.children,y.children)
# hashing
function hash(doc::HTMLDocument)
hash(hash(HTMLDocument),hash(hash(doc.doctype), hash(doc.root)))
end
function hash(elem::HTMLElement{T}) where {T}
h = hash(HTMLElement)
h = hash(h,hash(T))
h = hash(h,hash(attrs(elem)))
for child in children(elem)
h = hash(h,hash(child))
end
return h
end
hash(t::HTMLText) = hash(hash(HTMLText),hash(t.text))
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 4117 | using Libdl
function parsehtml(input::AbstractString; strict=false, preserve_whitespace=false)
result_ptr = ccall((:gumbo_parse,libgumbo),
Ptr{CGumbo.Output},
(Cstring,),
input)
goutput::CGumbo.Output = unsafe_load(result_ptr)
if strict && goutput.errors.length > 0
throw(InvalidHTMLException("input html was invalid"))
end
doc = document_from_gumbo(goutput, preserve_whitespace)
default_options = Libdl.dlsym(Gumbo_jll.libgumbo_handle, :kGumboDefaultOptions)
ccall((:gumbo_destroy_output,libgumbo),
Cvoid,
(Ptr{Cvoid}, Ptr{CGumbo.Output}),
default_options, result_ptr)
return doc
end
# turn a gumbo vector into a Julia vector
# of Ptr{T} where T is the struct contained
# by the gumbo vector
gvector_to_jl(T,gv::CGumbo.Vector) = unsafe_wrap(Array, convert(Ptr{Ptr{T}},gv.data),
(Int(gv.length),))
# convert a vector of pointers to GumboAttributes to
# a Dict AbstractString => AbstractString
function attributes(av::Vector{Ptr{CGumbo.Attribute}})
result = Dict{AbstractString,AbstractString}()
for ptr in av
ga::CGumbo.Attribute = unsafe_load(ptr)
result[unsafe_string(ga.name)] = unsafe_string(ga.value)
end
return result
end
function elem_tag(ge::CGumbo.Element)
tag = CGumbo.TAGS[ge.tag+1] # +1 is for 1-based julia indexing
if tag == :unknown
ot = ge.original_tag
tag = split(unsafe_string(ot.data, ot.length)[2:end-1])[1] |> Symbol
end
tag
end
function gumbo_to_jl(parent::HTMLNode, ge::CGumbo.Element, preserve_whitespace)
tag = elem_tag(ge)
attrs = attributes(gvector_to_jl(CGumbo.Attribute,ge.attributes))
children = HTMLNode[]
res = HTMLElement{tag}(children, parent, attrs)
preserve_whitespace = tag in RELEVANT_WHITESPACE || preserve_whitespace
for childptr in gvector_to_jl(CGumbo.Node{Int},ge.children)
node = load_node(childptr, preserve_whitespace)
if in(typeof(node).parameters[1], [CGumbo.Element, CGumbo.Text])
push!(children, gumbo_to_jl(res, node.v, preserve_whitespace))
end
end
res
end
function gumbo_to_jl(parent::HTMLNode, gt::CGumbo.Text, preserve_whitespace)
HTMLText(parent, unsafe_string(gt.text))
end
# this is a fallback method that should only be called to construct
# the root of a tree
gumbo_to_jl(ge::CGumbo.Element, preserve_whitespace) = gumbo_to_jl(NullNode(), ge, preserve_whitespace)
# load a GumboNode struct into memory as the appropriate Julia type
# this involves loading it once as a CGumbo.Node{Int} in order to
# figure out what the correct type actually is, and then reloading it as
# that type
function load_node(nodeptr::Ptr, preserve_whitespace=false)
precursor = unsafe_load(reinterpret(Ptr{CGumbo.Node{Int}},nodeptr))
# TODO clean this up with a Dict in the CGumbo module
correctptr = if precursor.gntype == CGumbo.ELEMENT
reinterpret(Ptr{CGumbo.Node{CGumbo.Element}},nodeptr)
elseif precursor.gntype == CGumbo.TEXT
reinterpret(Ptr{CGumbo.Node{CGumbo.Text}},nodeptr)
elseif precursor.gntype == CGumbo.DOCUMENT
reinterpret(Ptr{CGumbo.Node{CGumbo.Document}},nodeptr)
elseif preserve_whitespace && precursor.gntype == CGumbo.WHITESPACE
reinterpret(Ptr{CGumbo.Node{CGumbo.Text}},nodeptr)
else
# TODO this is super sketchy and should realistically be an
# error
nodeptr
end
unsafe_load(correctptr)
end
# transform gumbo output into Julia data
function document_from_gumbo(goutput::CGumbo.Output, preserve_whitespace)
# TODO convert some of these typeasserts to better error messages?
gnode::CGumbo.Node{CGumbo.Document} = load_node(goutput.document, preserve_whitespace)
gdoc = gnode.v
doctype = unsafe_string(gdoc.name)
groot::CGumbo.Node{CGumbo.Element} = load_node(goutput.root, preserve_whitespace)
root = gumbo_to_jl(groot.v, preserve_whitespace) # already an element
HTMLDocument(doctype, root)
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 714 | abstract type HTMLNode end
mutable struct HTMLText <: HTMLNode
parent::HTMLNode
text::AbstractString
end
# convenience method for defining without parent
HTMLText(text::AbstractString) = HTMLText(NullNode(), text)
struct NullNode <: HTMLNode end
mutable struct HTMLElement{T} <: HTMLNode
children::Vector{HTMLNode}
parent::HTMLNode
attributes::Dict{AbstractString,AbstractString}
end
# convenience method for defining an empty element
HTMLElement(T::Symbol) = HTMLElement{T}(HTMLNode[],NullNode(),Dict{AbstractString,AbstractString}())
mutable struct HTMLDocument
doctype::AbstractString
root::HTMLElement
end
struct InvalidHTMLException <: Exception
msg::AbstractString
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 3344 | const NO_ENTITY_SUBSTITUTION = Set([:script, :style])
const EMPTY_TAGS = Set([
:area,
:base,
:basefont,
:bgsound,
:br,
:command,
:col,
:embed,
Symbol("event-source"),
:frame,
:hr,
:image,
:img,
:input,
:keygen,
:link,
:menuitem,
:meta,
:param,
:source,
:spacer,
:track,
:wbr
])
const RELEVANT_WHITESPACE = Set([:pre, :textarea, :script, :style])
function substitute_text_entities(str)
str = replace(str, "&" => "&")
str = replace(str, "<" => "<")
str = replace(str, ">" => ">")
return str
end
function substitute_attribute_entities(str)
str = substitute_text_entities(str)
str = replace(str, "\""=> """)
str = replace(str, "'" => "'")
return str
end
function Base.print(io::IO, elem::HTMLElement{T}; pretty = false, depth = 0, substitution = true) where {T}
empty_tag = T in EMPTY_TAGS
ws_relevant = T in RELEVANT_WHITESPACE
has_children = !isempty(elem.children)
pretty_children = pretty && !ws_relevant
pretty && print(io, ' '^(2*depth))
print(io, '<', T)
for (name, value) in sort(collect(elem.attributes), by = first)
print(io, ' ', name, "=\"", substitute_attribute_entities(value), '"')
end
if empty_tag
print(io, '/')
end
print(io, '>')
pretty_children && has_children && print(io, '\n')
if !empty_tag
for child in elem.children
print(io, child; pretty = pretty_children, depth = depth + 1, substitution = substitution && !in(T, NO_ENTITY_SUBSTITUTION))
end
pretty && has_children && print(io, ' '^(2*depth))
print(io, "</", T, '>')
end
pretty && print(io, '\n')
return nothing
end
function Base.print(io::IO, node::HTMLText; pretty = false, depth = 0, substitution = true)
substitutor = substitution ? substitute_text_entities : identity
if !pretty
print(io, substitutor(node.text))
return nothing
end
for line in strip.(split(node.text, '\n'))
isempty(line) && continue
print(io, ' '^(2*depth), substitutor(line), '\n')
end
end
function Base.print(io::IO, doc::HTMLDocument; pretty = false)
write(io, "<!DOCTYPE ", doc.doctype, ">")
Base.print(io, doc.root, pretty = pretty)
end
prettyprint(io::IO, doc::HTMLDocument) = Base.print(io, doc, pretty = true)
prettyprint(doc::HTMLDocument) = prettyprint(stdout, doc)
prettyprint(io::IO, elem::HTMLElement) = print(io, elem, pretty = true)
prettyprint(elem::HTMLElement) = print(stdout, elem, pretty = true)
function Base.show(io::IO, elem::HTMLElement)
write(io, summary(elem), ":")
if get(io, :compact, false)
return
elseif get(io, :limit, false)
buf = IOBuffer()
print(buf, elem, pretty = true)
for (i, line) in enumerate(split(String(take!(buf)), '\n'))
if i > 20
println(io, "...")
return
end
println(io, line)
end
else
print(io, elem, pretty=true)
end
end
function Base.show(io::IO, t::HTMLText)
write(io,"HTML Text: `", t.text, '`')
end
function Base.show(io::IO, doc::HTMLDocument)
write(io, "HTML Document:\n")
write(io, "<!DOCTYPE ", doc.doctype, ">\n")
Base.show(io, doc.root)
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 1484 | # functions for accessing and manipulation HTML types
import AbstractTrees
# elements
tag(elem::HTMLElement{T}) where {T} = T
attrs(elem::HTMLElement) = elem.attributes
function setattr!(elem::HTMLElement, name::AbstractString, value::AbstractString)
elem.attributes[name] = value
end
getattr(elem::HTMLElement, name) = elem.attributes[name]
getattr(elem::HTMLElement, name, default) = get(elem.attributes, name, default)
getattr(f::Function, elem::HTMLElement, name) = get(f, elem.attributes, name)
hasattr(elem::HTMLElement, name) = name in keys(attrs(elem))
AbstractTrees.children(elem::HTMLElement) = elem.children
AbstractTrees.children(elem::HTMLText) = ()
# TODO there is a naming conflict here if you want to use both packages
# (see https://github.com/JuliaWeb/Gumbo.jl/issues/31)
#
# I still think exporting `children` from Gumbo is the right thing to
# do, since it's probably more common to be using this package alone
children = AbstractTrees.children
# indexing into an element indexes into its children
Base.getindex(elem::HTMLElement,i) = getindex(elem.children,i)
Base.setindex!(elem::HTMLElement,i,val) = setindex!(elem.children,i,val)
Base.push!(elem::HTMLElement,val) = push!(elem.children, val)
# text
text(t::HTMLText) = t.text
function text(el::HTMLElement)
io = IOBuffer()
for c in AbstractTrees.PreOrderDFS(el)
if c isa HTMLText
print(io, c.text, ' ')
end
end
return strip(String(take!(io)))
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 657 | # tests of basic utilities for working with HTML
import Gumbo: HTMLNode, NullNode
# convenience constructor works
@test HTMLElement(:body) == HTMLElement{:body}(HTMLNode[],
NullNode(),
Dict{AbstractString,AbstractString}())
# accessing tags works
@test HTMLElement(:body) |> tag == :body
let
elem = HTMLElement{:body}(HTMLNode[], NullNode(), Dict("foo" => "bar"))
@test getattr(elem, "foo") == "bar"
@test getattr(elem, "foo", "baz") == "bar"
@test getattr(elem, "bar", "qux") == "qux"
@test getattr(() -> "qux", elem, "bar") == "qux"
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 912 |
# test for comparisons and hashing
let
x = HTMLText("test")
y = HTMLText("test")
x1 = HTMLText("test1")
@test x == y
@test hash(x) == hash(y)
@test x1 != y
@test hash(x1) != hash(y)
end
let
x = HTMLElement(:div)
y = HTMLElement(:div)
@test x == y
@test hash(x) == hash(y)
push!(x, HTMLElement(:p))
@test x != y
@test hash(x) != hash(y)
push!(y, HTMLElement(:p))
@test x == y
@test hash(x) == hash(y)
setattr!(x,"class","test")
@test x != y
@test hash(x) != hash(y)
setattr!(y,"class","test")
@test x == y
@test hash(x) == hash(y)
end
let
x = HTMLDocument("html", HTMLElement(:html))
y = HTMLDocument("html", HTMLElement(:html))
@test x == y
@test hash(x) == hash(y)
x.doctype = ""
@test x != y
@test hash(x) != hash(y)
y.doctype = ""
@test x == y
@test hash(x) == hash(y)
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 1400 |
let
# roundtrip test
# TODO this could be done with Quickcheck if we had a way of
# generating "interesting" HTML documents
doc = open("$testdir/fixtures/example.html") do example
read(example, String) |> parsehtml
end
io = IOBuffer()
print(io, doc)
seek(io, 0)
newdoc = read(io, String) |> parsehtml
@test newdoc == doc
end
tests = [
"30", # regression test for issue #30
"multitext", # regression test for multiple HTMLText in one HTMLElement
"varied", # relatively complex example
"whitespace", # whitespace sensitive
"whitespace2", # whitespace sensitive
]
@testset for test in tests
let
doc = open("$testdir/fixtures/$(test)_input.html") do example
parsehtml(read(example, String), preserve_whitespace = (test == "whitespace"))
end
io = IOBuffer()
print(io, doc.root, pretty = (test != "whitespace"))
seek(io, 0)
ground_truth = read(open("$testdir/fixtures/$(test)_output.html"), String)
# Eliminate possible line ending issues
ground_truth = replace(ground_truth, "\r\n" => "\n")
@test read(io, String) == ground_truth
end
end
@testset "xml entities" begin
io = IOBuffer()
orig = """<p class="asd>&-2""><faketag></p>"""
print(io, parsehtml(orig))
@test occursin(orig, String(take!(io)))
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 877 | # basic test that parsing works correctly
testdir = dirname(@__FILE__)
@test_throws Gumbo.InvalidHTMLException parsehtml("", strict=true)
let
page = open("$testdir/fixtures/example.html") do example
read(example, String) |> parsehtml
end
@test page.doctype == "html"
root = page.root
@test tag(root[1][1]) == :meta
@test root[2][1][1].text == "A simple test page."
@test root[2][1][1].parent === root[2][1]
end
# test that nonexistant tags are parsed as their actual name and not "unknown"
let
page = parsehtml("<weird></weird")
@test tag(page.root[2][1]) == :weird
end
# test that non-standard tags, with attributes, are parsed correctly
let
page = Gumbo.parsehtml("<my-element cool></my-element>")
@test tag(page.root[2][1]) == Symbol("my-element")
@test Gumbo.attrs(page.root[2][1]) == Dict("cool" => "")
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 133 | using Test
using Gumbo
include("basics.jl")
include("comparison.jl")
include("parsing.jl")
include("traversal.jl")
include("io.jl")
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | code | 1345 | using AbstractTrees
# TODO these tests are pretty silly in that they now pretty much just
# test code in AbstractTrees.jl. They are still sort of useful though
# in that they test our implementation of `children` indirectly, and I
# an just loath to remove tests in general, so I'm going to leave them
# here for now
const ex = parsehtml("""
<html>
<head></head>
<body>
<p>a<strong>b</strong>c</p>
</body>
</html>
""")
let res = Any[]
for node in StatelessBFS(ex.root)
push!(res, node)
end
@assert tag(res[3]) == :body
@assert tag(res[4]) == :p
@assert text(last(res)) == "b"
end
let res = Any[]
for node in PreOrderDFS(ex.root)
push!(res, node)
end
@assert tag(res[3]) == :body
@assert tag(res[4]) == :p
@assert text(last(res)) == "c"
end
let res = Any[]
for node in PostOrderDFS(ex.root)
push!(res, node)
end
@assert tag(res[1]) == :head
@assert text(res[2]) == "a"
@assert tag(res[4]) == :strong
@assert tag(last(res)) == :HTML
end
isp(node::HTMLNode) = isa(node, HTMLElement) && tag(node) == :p
for itr in [PostOrderDFS, PreOrderDFS, StatelessBFS]
@assert mapreduce(isp,+,itr(ex.root)) == 1
end
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.8.2 | a1a138dfbf9df5bace489c7a9d5196d6afdfa140 | docs | 6249 | # Gumbo.jl
[](https://juliahub.com/ui/Packages/Gumbo/mllB2) [](https://travis-ci.org/JuliaWeb/Gumbo.jl) [](http://codecov.io/github/JuliaWeb/Gumbo.jl?branch=master) [](https://juliahub.com/ui/Packages/Gumbo/mllB2) [](https://juliahub.com/ui/Packages/Gumbo/mllB2?t=2)
Gumbo.jl is a Julia wrapper around
[Google's gumbo library](https://github.com/google/gumbo-parser) for
parsing HTML.
Getting started is very easy:
```julia
julia> using Gumbo
julia> parsehtml("<h1> Hello, world! </h1>")
HTML Document:
<!DOCTYPE >
HTMLElement{:HTML}:
<HTML>
<head></head>
<body>
<h1>
Hello, world!
</h1>
</body>
</HTML>
```
Read on for further documentation.
## Installation
```jl
using Pkg
Pkg.add("Gumbo")
```
or activate `Pkg` mode in the REPL by typing `]`, and then:
```
add Gumbo
```
## Basic usage
The workhorse is the `parsehtml` function, which takes a single
argument, a valid UTF8 string, which is interpreted as HTML data to be
parsed, e.g.:
```julia
parsehtml("<h1> Hello, world! </h1>")
```
Parsing an HTML file named `filename`can be done using:
```julia
julia> parsehtml(read(filename, String))
```
The result of a call to `parsehtml` is an `HTMLDocument`, a type which
has two fields: `doctype`, which is the doctype of the parsed document
(this will be the empty string if no doctype is provided), and `root`,
which is a reference to the `HTMLElement` that is the root of the
document.
Note that gumbo is a very permissive HTML parser, designed to
gracefully handle the insanity that passes for HTML out on the wild,
wild web. It will return a valid HTML document for *any* input, doing
all sorts of algorithmic gymnastics to twist what you give it into
valid HTML.
If you want an HTML validator, this is probably not your library. That
said, `parsehtml` does take an optional `Bool` keyword argument,
`strict` which, if `true`, causes an `InvalidHTMLError` to be thrown
if the call to the gumbo C library produces any errors.
## HTML types
This library defines a number of types for representing HTML.
### `HTMLDocument`
`HTMlDocument` is what is returned from a call to `parsehtml` it has a
`doctype` field, which contains the doctype of the parsed document,
and a `root` field, which is a reference to the root of the document.
### `HTMLNode`s
A document contains a tree of HTML Nodes, which are represented as
children of the `HTMLNode` abstract type. The first of these is
`HTMLElement`.
### `HTMLElement`
```julia
mutable struct HTMLElement{T} <: HTMLNode
children::Vector{HTMLNode}
parent::HTMLNode
attributes::Dict{String, String}
end
```
`HTMLElement` is probably the most interesting and frequently used
type. An `HTMLElement` is parameterized by a symbol representing its
tag. So an `HTMLElement{:a}` is a different type from an
`HTMLElement{:body}`, etc. An empty `HTMLElement` of a given tag can be
constructed as follows:
```julia
julia> HTMLElement(:div)
HTMLElement{:div}:
<div></div>
```
`HTMLElement`s have a `parent` field, which refers to another
`HTMLNode`. `parent` will always be an `HTMLElement`, unless the
element has no parent (as is the case with the root of a document), in
which case it will be a `NullNode`, a special type of `HTMLNode` which
exists for just this purpose. Empty `HTMLElement`s constructed as in
the example above will also have a `NullNode` for a parent.
`HTMLElement`s also have `children`, which is a vector of
`HTMLElement` containing the children of this element, and
`attributes`, which is a `Dict` mapping attribute names to values.
`HTMLElement`s implement `getindex`, `setindex!`, and `push!`;
indexing into or pushing onto an `HTMLElement` operates on its
children array.
There are a number of convenience methods for working with `HTMLElement`s:
- `tag(elem)`
get the tag of this element as a symbol
- `attrs(elem)`
return the attributes dict of this element
- `children(elem)`
return the children array of this element
- `getattr(elem, name)`
get the value of attribute `name` or raise a `KeyError`. Also
supports being called with a default value (`getattr(elem, name,
default)`) or function (`getattr(f, elem, name)`).
- `setattr!(elem, name, value)`
set the value of attribute `name` to `value`
### `HTMLText`
```jl
type HTMLText <: HTMLNode
parent::HTMLNode
text::String
end
```
Represents text appearing in an HTML document. For example:
```julia
julia> doc = parsehtml("<h1> Hello, world! </h1>")
HTML Document:
<!DOCTYPE >
HTMLElement{:HTML}:
<HTML>
<head></head>
<body>
<h1>
Hello, world!
</h1>
</body>
</HTML>
julia> doc.root[2][1][1]
HTML Text: Hello, world!
```
This type is quite simple, just a reference to its parent and the
actual text it represents (this is also accessible by a `text`
function). You can construct `HTMLText` instances as follows:
```jl
julia> HTMLText("Example text")
HTML Text: Example text
```
Just as with `HTMLElement`s, the parent of an instance so constructed
will be a `NullNode`.
## Tree traversal
Use the iterators defined in
[AbstractTrees.jl](https://github.com/Keno/AbstractTrees.jl/), e.g.:
```julia
julia> using AbstractTrees
julia> using Gumbo
julia> doc = parsehtml("""
<html>
<body>
<div>
<p></p> <a></a> <p></p>
</div>
<div>
<span></span>
</div>
</body>
</html>
""");
julia> for elem in PreOrderDFS(doc.root) println(tag(elem)) end
HTML
head
body
div
p
a
p
div
span
julia> for elem in PostOrderDFS(doc.root) println(tag(elem)) end
head
p
a
p
div
span
div
body
HTML
julia> for elem in StatelessBFS(doc.root) println(tag(elem)) end
HTML
head
body
div
div
p
a
p
span
julia>
```
## TODOS
- support CDATA
- support comments
| Gumbo | https://github.com/JuliaWeb/Gumbo.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 374 | module SimpleTensorNetworks
using Requires
using LinearAlgebra
include("tensors.jl")
include("tensorcontract.jl")
include("simplify.jl")
include("contractionorder/contractionorder.jl")
function __init__()
@require CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" include("cuda.jl")
@require Viznet = "52a3aca4-6234-47fd-b74a-806bdf78ede9" include("viz.jl")
end
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 1211 | using .CUDA
using .CUDA: CuArray, @linearidx, GPUArrays, CUBLAS
using LinearAlgebra
export togpu
function togpu(tn::TensorNetwork)
TensorNetwork(togpu.(tn.tensors))
end
function togpu(t::LabeledTensor)
LabeledTensor(CuArray(t.array), t.labels, t.meta)
end
function genperm(I::NTuple{N}, perm::NTuple{N}) where N
ntuple(d-> (@inbounds return I[perm[d]]), Val(N))
end
function LinearAlgebra.permutedims!(dest::GPUArrays.AbstractGPUArray, src::GPUArrays.AbstractGPUArray, perm)
LinearAlgebra.permutedims!(dest::GPUArrays.AbstractGPUArray, src::GPUArrays.AbstractGPUArray, Tuple(perm))
end
function LinearAlgebra.permutedims!(dest::GPUArrays.AbstractGPUArray, src::GPUArrays.AbstractGPUArray, perm::NTuple)
perm isa Tuple || (perm = Tuple(perm))
size_dest = size(dest)
size_src = size(src)
CUDA.gpu_call(vec(dest), vec(src), perm; name="permutedims!") do ctx, dest, src, perm
i = @linearidx src
I = l2c(size_src, i)
@inbounds dest[c2l(size_dest, genperm(I, perm))] = src[i]
return
end
return reshape(dest, size(dest))
end
function LinearAlgebra.rmul!(a::StridedCuArray{<:CUBLAS.CublasFloat}, k::Number)
vec(a) .*= k
return a
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 2474 | export rm_multiedges!, rm_degree12, to_simplified_tnet
using LightGraphs: SimpleGraph, add_edge!
function rm_multiedges!(tn::TensorNetwork)
n = length(tn.tensors)
for i=1:n
ti = tn.tensors[i]
for j=i+1:n
tj = tn.tensors[j]
if count(x->x ∈ ti.labels, tj.labels) > 1
# rm multi edges
tn.tensors[i], tn.tensors[j] = rm_multiedge(ti, tj)
end
end
end
return tn
end
function rm_multiedge(ti, tj, li::Tuple, lj::Tuple)
ti, tj, li, lj = rm_multiedge(ti, tj, collect(li), collect(lj))
ti, tj, (li...,), (lj...,)
end
function rm_multiedge(t1, t2)
ti, li, tj, lj = t1.array, t1.labels, t2.array, t2.labels
common_edges = li ∩ lj
dimsi = indexin(common_edges, li)
remsi = setdiff(1:length(li), dimsi)
dimsj = indexin(common_edges, lj)
remsj = setdiff(1:length(lj), dimsj)
orderi = Int[(remsi ∪ dimsi)...]
orderj = Int[(remsj ∪ dimsj)...]
# permute
ti = permutedims(ti, orderi)
tj = permutedims(tj, orderj)
li = li[orderi]
lj = lj[orderj]
# reshape
ti = reshape(ti, size(ti)[1:length(remsi)]..., :)
tj = reshape(tj, size(tj)[1:length(remsj)]..., :)
li = li[1:length(remsi)+1]
lj = lj[1:length(remsj)+1]
return similar(t1, ti, li), similar(t2, tj, lj)
end
function tn2graph(tn::TensorNetwork)
n = length(tn.tensors)
g = SimpleGraph(n)
for i=1:n
ti = tn.tensors[i]
for j=i+1:n
if any(x->x ∈ ti.labels, tn.tensors[j].labels)
add_edge!(g, i, j)
end
end
end
return g
end
function rm_degree12(tn::TensorNetwork{T}) where T
tn = copy(tn)
n = length(tn)
mask = ones(Bool, n)
has_dangling_edges = true
factor = one(T)
while has_dangling_edges && length(tn) > 1
has_dangling_edges = false
for i=1:n
mask[i] || continue
ti = tn.tensors[i]
if ndims(ti) <= 2
has_dangling_edges = true
mask[i] = false
if ndims(ti) != 0
j = findfirst(k -> mask[k] && (ti.labels[1] ∈ tn.tensors[k].labels), 1:n)
# absorb i -> j
tn.tensors[j] = ti * tn.tensors[j]
else
factor *= Array(tn.tensors[i].array)[]
end
end
end
end
return factor, TensorNetwork(tn.tensors[mask])
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 5599 | export tensorcontract, TensorNetwork, contract_label!, PlotMeta, contract, ContractionTree
export contract_tree
function _align_eltypes(xs::AbstractArray...)
T = promote_type(eltype.(xs)...)
return map(x->eltype(x)==T ? x : T.(x), xs)
end
function _align_eltypes(xs::AbstractArray{T}...) where T
xs
end
# batched routines
@inline _indexpos(iAs, i)::Int = findfirst(==(i), iAs)
_isunique(x) = length(unique(x)) == length(x)
# can be used in either static or dynamic invoke
@noinline function analyse_dim(iAs, iBs, iOuts)
# check indices
@assert _isunique(iAs) "indices in A matrix is not unique $iAs"
@assert _isunique(iBs) "indices in B matrix is not unique $iBs"
@assert _isunique(iOuts) "indices in C matrix is not unique $iOuts"
allinds = [iAs..., iBs..., iOuts...]
@assert all(i -> count(==(i), allinds) == 2, allinds) "indices does not appear in pairs! got $iAs, $iBs and $iOuts"
mdims = setdiff(iAs, iBs)
ndims = setdiff(iBs, iAs)
kdims = iAs ∩ iBs
iABs = mdims ∪ ndims
lA = mdims ∪ kdims
lB = kdims ∪ ndims
lOut = mdims ∪ ndims
pA = indexin(lA, iAs)
pB = indexin(lB, iBs)
pOut = indexin(iOuts, lOut)
pA, pB, pOut, length(kdims)
end
@noinline function analyse_size(pA, sA, pB, sB, nc)
nA = length(sA)
nB = length(sB)
sA1 = Int[sA[pA[i]] for i=1:nA-nc]
sB2 = Int[sB[pB[i]] for i=nc+1:nB]
K = mapreduce(i->sB[pB[i]], *, 1:nc, init=1)
return prod(sA1), K, prod(sB2), [sA1..., sB2...]
end
function tensorcontract(iAs, A::AbstractArray, iBs, B::AbstractArray, iOuts)
A, B = _align_eltypes(A, B)
pA, pB, pOut, nc = analyse_dim([iAs...], [iBs...], [iOuts...])
M, K, N, sOut = analyse_size(pA, size(A), pB, size(B), nc)
Apr = reshape(_conditioned_permutedims(A, pA), M, K)
Bpr = reshape(_conditioned_permutedims(B, pB), K, N)
AB = Apr * Bpr
AB = _conditioned_permutedims(reshape(AB, sOut...), (pOut...,))
end
function _conditioned_permutedims(A::AbstractArray{T,N}, perm) where {T,N}
if any(i-> (@inbounds perm[i]!=i), 1:N)
return permutedims(A, (perm...,))
else
return A
end
end
"""
TensorNetwork{T,TT<:AbstractTensor{T}}
TensorNetworks(tensors)
A tensor network, its only field is `tensors` - a vector of `AbstractTensor` (e.g. `LabeledTensor`).
"""
struct TensorNetwork{T,TT<:AbstractTensor{T}}
tensors::Vector{TT}
end
function TensorNetwork(tensors)
T = promote_type([eltype(x.array) for x in tensors]...)
TensorNetwork(AbstractTensor{T}[tensors...])
end
Base.copy(tn::TensorNetwork) = TensorNetwork(copy(tn.tensors))
Base.length(tn::TensorNetwork) = length(tn.tensors)
struct ContractionTree
left
right
function ContractionTree(left::Union{Integer, ContractionTree}, right::Union{Integer, ContractionTree})
new(left, right)
end
end
function Base.getindex(ct::ContractionTree, i::Int)
Base.@boundscheck i==1 || i==2
i==1 ? ct.left : ct.right
end
function contract_tree(tn::TensorNetwork{T}, ctree; normalizer) where T
if ctree isa Integer
t = tn.tensors[ctree]
if normalizer !== nothing
normt = normalizer(t)
return normt, rmul!(copy(t), T(1/normt))
else
return one(T), t
end
else
normt1, t1 = contract_tree(tn, ctree[1]; normalizer=normalizer)
normt2, t2 = contract_tree(tn, ctree[2]; normalizer=normalizer)
normt = normt1 * normt2
t = t1 * t2
if normalizer !== nothing
normt_ = normalizer(t)
normt = normt * normt_
rmul!(t, T(1/normt_))
end
return normt, t
end
end
function contract(tn::TensorNetwork{T}, ctree; normalizer=nothing) where T
normt, t = contract_tree(tn, ctree; normalizer=normalizer)
if normt != one(T)
rmul!(t.array, normt)
end
return t
end
function contract_label!(tn::TensorNetwork{T}, label) where {T}
ts = findall(x->label ∈ x.labels, tn.tensors)
@assert length(ts) == 2 "number of tensors with the same label $label is not 2, find $ts"
t1, t2 = tn.tensors[ts[1]], tn.tensors[ts[2]]
tout = t1 * t2
deleteat!(tn.tensors, ts)
push!(tn.tensors, tout)
return tout, t1.labels ∩ t2.labels
end
using Base.Cartesian
c2l(size::NTuple{0, Int}, c::NTuple{0,Int}) = 1
@generated function c2l(size::NTuple{N, Int}, c::NTuple{N,Int}) where N
quote
res = c[1]
stride = size[1]
@nexprs $(N-1) i->begin
res += (c[i+1]-1) * stride
stride *= size[i+1]
end
return res
end
end
l2c(size::NTuple{0, Int}, l::Int) = ()
@generated function l2c(size::NTuple{N, Int}, l::Int) where N
quote
l -= 1
@nexprs $(N-1) i->begin
@inbounds l, s_i = divrem(l, size[i])
end
$(Expr(:tuple, [:($(Symbol(:s_, i))+1) for i=1:N-1]..., :(l+1)))
end
end
function Base.show(io::IO, tn::TensorNetwork)
print(io, "$(typeof(tn).name):\n $(join(["$(t.meta === nothing ? i : dispmeta(t.meta)) => $t" for (i, t) in enumerate(tn.tensors)], "\n "))")
end
function Base.show(io::IO, ::MIME"plain/text", tn::TensorNetwork)
Base.show(io, tn)
end
function adjacency_matrix(tn::TensorNetwork)
indx = Int[]
indy = Int[]
data = Int[]
for i=1:length(tn)
for j=1:length(tn)
if i!=j && any(l->l ∈ tn.tensors[i].labels, tn.tensors[j].labels)
push!(indx, i)
push!(indy, j)
push!(data, 1)
end
end
end
return sparse(indx, indy, data)
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 2590 | export LabeledTensor
# abstractions
abstract type AbstractTensor{T, N} end
"""
LabeledTensor{T,N,AT<:AbstractArray{T,N}, LT, MT} <: AbstractTensor{T, N}
LabeledTensor(array::AbstractArray, labels::AbstractVector[, meta])
Tensor with labeled legs.
When multiplying two tensors, legs with same labels will be treated as inner degree of freedom and get contracted.
The optional argument `meta` is the meta information, default is `nothing`. One can use it to store additional information like plotting layout.
"""
struct LabeledTensor{T,N,AT<:AbstractArray{T,N}, LT, MT} <: AbstractTensor{T, N}
array::AT
labels::Vector{LT}
meta::MT
end
Base.ndims(t::LabeledTensor) = ndims(t.array)
LinearAlgebra.norm(t::LabeledTensor, p::Real=2) = norm(t.array, p)
function LabeledTensor(tensor::AbstractArray, labels::AbstractVector)
@assert ndims(tensor) == length(labels) "dimension of tensor $(ndims(tensor)) != number of labels $(length(labels))"
LabeledTensor(tensor, labels, nothing)
end
function Base.:(*)(A::LabeledTensor, B::LabeledTensor)
labels_AB = setdiff(A.labels, B.labels) ∪ setdiff(B.labels, A.labels)
LabeledTensor(tensorcontract(A.labels, A.array, B.labels, B.array, labels_AB), labels_AB, merge_meta(A.meta, B.meta))
end
function Base.isapprox(a::LabeledTensor, b::LabeledTensor; kwargs...)
isapprox(a.array, b.array; kwargs...) && a.labels == b.labels
end
Base.size(t::LabeledTensor) = Base.size(t.array)
Base.copy(t::LabeledTensor) = LabeledTensor(copy(t.array), t.labels)
Base.similar(::Type{<:LabeledTensor}, arr::AbstractArray, labels::AbstractVector, meta=nothing) = LabeledTensor(arr, labels, meta)
Base.similar(::LabeledTensor, arr::AbstractArray, labels::AbstractVector, meta=nothing) = LabeledTensor(arr, labels, meta)
LinearAlgebra.rmul!(t::LabeledTensor, factor) = (rmul!(t.array, factor); t)
function Base.show(io::IO, lt::LabeledTensor)
print(io, "$(typeof(lt).name.name){$(eltype(lt.array))}($(join(lt.labels, ", ")))")
end
function Base.show(io::IO, ::MIME"plain/text", lt::LabeledTensor)
Base.show(io, lt)
end
function mul_dim(t::LabeledTensor, m::AbstractMatrix; dim::Int)
data = t.array
iA = ntuple(i->i, ndims(data))
iB = (dim, -dim)
iC = ntuple(i->i==dim ? -dim : i, ndims(data))
LabeledTensor(tensorcontract(iA, data, iB, m, iC), t.labels)
end
struct PlotMeta
loc::Tuple{Float64, Float64}
name::String
end
merge_meta(m1::PlotMeta, m2::PlotMeta) = PlotMeta((m1.loc .+ m2.loc) ./ 2, m1.name*m2.name)
merge_meta(m1::Nothing, m2::Nothing) = nothing
dispmeta(m::PlotMeta) = m.name
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 3965 | using .Viznet
using .Viznet.Compose
using SparseArrays
export viz_tnet
function viz_tnet(tnet::TensorNetwork; r=0.25/sqrt(length(tnet.tensors)+1), show_edgeindex=false,
node_fontsize=100pt/sqrt(length(tnet.tensors)+1),
edge_fontsize=200pt/sqrt(length(tnet.tensors)+1),
labels=1:length(tnet),
locs=spring_layout(tnet),
linecolor="skyblue",
node_edgecolor="black",
node_facecolor="white"
)
nt = length(tnet.tensors)
nb = nodestyle(:default, fill(node_facecolor), stroke(node_edgecolor), linewidth(2mm/sqrt(length(tnet.tensors)+1)); r=r)
eb = bondstyle(:default, linewidth(4mm/sqrt(length(tnet.tensors)+1)), stroke(linecolor))
tb1 = textstyle(:default, fontsize(node_fontsize))
tb2 = textstyle(:default, fontsize(edge_fontsize))
compose(Compose.context(r, r, 1-2r, 1-2r), canvas() do
for (t, loc, label) in zip(tnet.tensors, locs, labels)
nb >> loc
if !isempty(label)
tb1 >> (loc, string(label))
end
end
for i=1:nt
for j=i+1:nt
li = tnet.tensors[i].labels
lj = tnet.tensors[j].labels
loci, locj = locs[i], locs[j]
common_labels = li ∩ lj
if !isempty(common_labels)
eb >> (loci, locj)
show_edgeindex && tb2 >> ((loci .+ locj) ./ 2, join(common_labels, ", "))
end
end
end
end)
end
function Base.show(io::IO, mime::MIME"text/html", tnet::TensorNetwork)
show(io, mime, viz_tnet(tnet))
end
# copied from LightGraphs
function spring_layout(tn::TensorNetwork,
locs_x=2*rand(length(tn)).-1.0,
locs_y=2*rand(length(tn)).-1.0;
C=2.0,
MAXITER=100,
INITTEMP=2.0)
nvg = length(tn)
adj_matrix = adjacency_matrix(tn)
# The optimal distance bewteen vertices
k = C * sqrt(4.0 / nvg)
k² = k * k
# Store forces and apply at end of iteration all at once
force_x = zeros(nvg)
force_y = zeros(nvg)
# Iterate MAXITER times
@inbounds for iter = 1:MAXITER
# Calculate forces
for i = 1:nvg
force_vec_x = 0.0
force_vec_y = 0.0
for j = 1:nvg
i == j && continue
d_x = locs_x[j] - locs_x[i]
d_y = locs_y[j] - locs_y[i]
dist² = (d_x * d_x) + (d_y * d_y)
dist = sqrt(dist²)
if !( iszero(adj_matrix[i,j]) && iszero(adj_matrix[j,i]) )
# Attractive + repulsive force
# F_d = dist² / k - k² / dist # original FR algorithm
F_d = dist / k - k² / dist²
else
# Just repulsive
# F_d = -k² / dist # original FR algorithm
F_d = -k² / dist²
end
force_vec_x += F_d*d_x
force_vec_y += F_d*d_y
end
force_x[i] = force_vec_x
force_y[i] = force_vec_y
end
# Cool down
temp = INITTEMP / iter
# Now apply them, but limit to temperature
for i = 1:nvg
fx = force_x[i]
fy = force_y[i]
force_mag = sqrt((fx * fx) + (fy * fy))
scale = min(force_mag, temp) / force_mag
locs_x[i] += force_x[i] * scale
locs_y[i] += force_y[i] * scale
end
end
# Scale to unit square
min_x, max_x = minimum(locs_x), maximum(locs_x)
min_y, max_y = minimum(locs_y), maximum(locs_y)
function scaler(z, a, b)
2.0*((z - a)/(b - a)) - 1.0
end
map!(z -> scaler(z, min_x, max_x), locs_x, locs_x)
map!(z -> scaler(z, min_y, max_y), locs_y, locs_y)
return [((x+1)/2, (y+1)/2) for (x, y) in zip(locs_x, locs_y)]
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 2199 | module ContractionOrder
using LightGraphs
using LightGraphs: SimpleEdge
function log2sumexp2(s)
ms = maximum(s)
return log2(sum(x->exp2(x - ms), s)) + ms
end
function indexof(x, v)
@inbounds for i=1:length(v)
if v[i] == x
return i
end
end
return 0
end
include("edgeinfo.jl")
include("greedy.jl")
end
using .ContractionOrder: order_greedy, TensorNetworkLayout
import .ContractionOrder: abstract_contract, log2sumexp2
using LightGraphs: SimpleEdge, src, dst
export trees_greedy, abstract_contract, log2sumexp2
function build_trees(N::Int, order::AbstractVector)
ids = collect(Any, 1:N)
for i=1:length(order)
ta, tb = src(order[i]), dst(order[i])
ids[ta] = ContractionTree(ids[ta], ids[tb])
ids[tb] = nothing
end
filter(x->x!==(nothing), ids)
end
function build_order(trees::AbstractVector)
res = SimpleEdge[]
for t in trees
build_order!(t, res)
end
return res
end
function build_order!(tree, res)
if tree isa Integer
return tree
else
a = build_order!(tree.left, res)
b = build_order!(tree.right, res)
push!(res, SimpleEdge(a, b))
return min(a, b)
end
end
function layout(tn::TensorNetwork)
graph = tn2graph(tn)
log2shapes = [[log2.(size(tn.tensors[i]))...] for i=1:length(tn)]
TensorNetworkLayout(graph, log2shapes)
end
"""
trees_greedy(tn::TensorNetwork, strategy="min_dim")
Returns a tuple of `(time complexities, space complexities, trees)`,
where `trees` is a vector of `ContractionTree` objects.
For disconnected graphs, the number of trees can be greater than 1.
`strategy` can be "min_dim", "min_reduce", "max_reduce", "min_reduce_tri" or "max_reduce_tri".
"""
function trees_greedy(tn::TensorNetwork; kwargs...)
tc, sc, order = order_greedy(layout(tn); kwargs...)
tc, sc, build_trees(length(tn), order)
end
abstract_contract(tn::TensorNetwork, trees::ContractionTree) = abstract_contract(tn, [trees])
function abstract_contract(tn::TensorNetwork, trees::AbstractVector)
abstract_contract(layout(tn), build_order(trees))
end | SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 2950 | export EdgeInfo, edgeinfo, update!, add!, remove!, select
struct EdgeInfo{DT<:Dict}
data::DT
end
function EdgeInfo()
EdgeInfo(Dict{SimpleEdge{Int},Float64}())
end
function edgeinfo(edges, log2_shapes, neighbors, strategy)
ei = EdgeInfo()
for edge in edges
update!(ei, edge, log2_shapes, neighbors, strategy)
end
return ei
end
Base.copy(ei::EdgeInfo) = EdgeInfo(copy(ei.data))
# strategy ∈ {'min_dim','max_reduce','min_dim_tri','max_reduce_tri'}
function update!(ei::EdgeInfo, edge::SimpleEdge, log2_shapes::Vector, neighbors, strategy)
m, n = src(edge), dst(edge)
idxm_n = findfirst(==(n), neighbors[m])
log2_shapemn = log2_shapes[m][idxm_n]
log2_shapem = sum(log2_shapes[m])
log2_shapen = sum(log2_shapes[n])
if strategy == "min_reduce" || strategy == "min_reduce_tri"
value = exp2(log2_shapem + log2_shapen - 2*log2_shapemn)
elseif strategy == "max_reduce" || strategy == "max_reduce_tri"
loga = log2_shapem + log2_shapen - 2*log2_shapemn
logb = log2sumexp2([log2_shapem, log2_shapen])
value = exp2(loga) - exp2(logb) # different with previous one!
else
value = 1.0
end
ei.data[edge] = value
return ei
end
function add!(ei::EdgeInfo, nodes::Vector{Int}, log2_shapes::Vector, neighbors, strategy)
edges = SimpleEdge{Int}[]
for node in nodes
for neighbor in neighbors[node]
edge = uedge(neighbor, node)
if !(edge ∈ edges)
push!(edges, edge)
update!(ei, edge, log2_shapes, neighbors, strategy)
end
end
end
return ei
end
function remove!(ei::EdgeInfo, nodes)
for edge in keys(ei.data)
if src(edge) in nodes || dst(edge) in nodes
pop!(ei.data, edge)
end
end
return ei
end
"""
select(ei::EdgeInfo, strategy, neighbors, edge_pool=nothing)
Select an edge from edge_pool, considering the priority of data.
"""
function select(ei::EdgeInfo, strategy, neighbors, edge_pool=nothing)
pool_edge_info = if edge_pool !== nothing
Dict(edge=>ei.data[edge] for edge in edge_pool)
else
ei.data
end
min_value = minimum(values(pool_edge_info))
min_edges = [edge for edge in keys(pool_edge_info) if pool_edge_info[edge] == min_value]
if strategy == "min_dim_tri" || strategy == "max_reduce_tri"
triangle_count = zeros(Int, length(min_edges))
for (i, edge) in enumerate(min_edges)
triangle_count[i] = length(neighbors[src(edge)] ∩ neighbors[dst(edge)])
end
edge = min_edges[rand(findall(==(maximum(triangle_count)), triangle_count))]
else
edge = rand(min_edges)
end
return edge
end
Base.keys(ei::EdgeInfo) = keys(ei.data)
Base.haskey(ei::EdgeInfo, ind) = haskey(ei.data, ind)
nremain(ei::EdgeInfo) = length(ei.data)
"""undirected edge"""
uedge(i, k) = i < k ? SimpleEdge(i, k) : SimpleEdge(k, i) | SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 4606 | export TensorNetworkLayout, order_greedy
export abstract_contract
struct TensorNetworkLayout
graph::SimpleGraph{Int}
log2_shapes::Vector{Vector{Float64}}
function TensorNetworkLayout(graph, log2_shapes)
@assert nv(graph) == length(log2_shapes) "size of graph ($(nv(graph))) and length of shapes ($(length(log2_shapes))) does not match!"
new(graph, log2_shapes)
end
end
LightGraphs.edges(tn::TensorNetworkLayout) = edges(tn.graph)
"""
abstract contraction of each edge, returns the time complexity and space complexity.
"""
function abstract_contract_edge!(edge, log2_shapes::Vector{Vector{Float64}}, edge_info::EdgeInfo,
neighbors::Vector{Vector{ET}}, strategy::String) where ET
haskey(edge_info, edge) || error("edge $edge not found!")
i, j = src(edge), dst(edge)
remove!(edge_info, [i, j, neighbors[i]..., neighbors[j]...])
local log2_shapei, log2_shapej, log2_shapeij
idxi_j = indexof(j, neighbors[i])
idxj_i = indexof(i, neighbors[j])
log2_shapeij = log2_shapes[i][idxi_j]
deleteat!(log2_shapes[i], idxi_j)
deleteat!(log2_shapes[j], idxj_i)
deleteat!(neighbors[i], idxi_j)
deleteat!(neighbors[j], idxj_i)
log2_shapei, log2_shapej = sum(log2_shapes[i]), sum(log2_shapes[j])
for node in neighbors[j] # rm multi-edge
idxj_n = indexof(node, neighbors[j])
idxn_j = indexof(j, neighbors[node])
if node in neighbors[i]
idxi_n = indexof(node, neighbors[i])
idxn_i = indexof(i, neighbors[node])
log2_shapes[i][idxi_n] += log2_shapes[j][idxj_n]
log2_shapes[node][idxn_i] += log2_shapes[node][idxn_j]
deleteat!(log2_shapes[node], idxn_j)
deleteat!(neighbors[node], idxn_j)
else
push!(log2_shapes[i], log2_shapes[j][idxj_n])
push!(neighbors[i], node)
neighbors[node][idxn_j] = i
end
end
add!(edge_info, [i, neighbors[i]...], log2_shapes, neighbors, strategy)
log2_tc = log2_shapei + log2_shapeij + log2_shapej
log2_sc = log2_shapei + log2_shapej
# completely remove j
empty!(log2_shapes[j])
empty!(neighbors[j])
return log2_tc, log2_sc
end
"""
order_greedy(tn::TensorNetworkLayout; strategy="min_dim")
Compute greedy order, return the time and space complexities.
"""
function order_greedy(tn::TensorNetworkLayout; strategy="min_dim", edge_pool=collect(edges(tn)))
order = SimpleEdge{Int}[]
log2_shapes = deepcopy(tn.log2_shapes)
neighbors = deepcopy(tn.graph.fadjlist)
edge_info = edgeinfo(edges(tn), log2_shapes, neighbors, strategy)
log2_tcs = Float64[] # time complexity
log2_scs = Float64[] # space complexity
while !isempty(edge_pool)
edge = select(edge_info, strategy, neighbors, edge_pool)
push!(order, edge)
log2_tc_step, log2_sc_step = abstract_contract_edge!(edge, log2_shapes, edge_info, neighbors, strategy)
push!(log2_tcs, log2_tc_step)
push!(log2_scs, log2_sc_step)
deleteat!(edge_pool, indexof(edge, edge_pool))
i, j = src(edge), dst(edge)
for (l, el) in enumerate(edge_pool)
if j == src(el) || j == dst(el)
k = src(el) == j ? dst(el) : src(el)
edge_pool[l] = uedge(i, k)
end
end
edge_pool = unique(edge_pool)
end
return log2_tcs, log2_scs, order
end
function abstract_contract(tn::TensorNetworkLayout, order)
@assert check_healthy(tn) "tensor network shape mismatch!"
log2_tcs = Float64[]
log2_scs = Float64[]
log2_shapes = deepcopy(tn.log2_shapes)
neighbors = deepcopy(tn.graph.fadjlist)
edge_info = edgeinfo(edges(tn), log2_shapes, neighbors, "min_dim")
maxsize = 0.0
for edge in order
log2_tc, log2_sc = abstract_contract_edge!(edge, log2_shapes, edge_info, neighbors, "min_dim")
push!(log2_tcs, log2_tc)
push!(log2_scs, compute_log2_sc(log2_shapes))
maxsize = max(maxsize, maximum(sum.(log2_shapes)))
end
return log2_tcs, log2_scs, maxsize
end
function check_healthy(tn::TensorNetworkLayout)
res = true
for i in vertices(tn.graph)
for j in neighbors(tn.graph, i)
res = res && (edgesize(tn, i, j) == edgesize(tn, j, i))
end
end
return res
end
log2size(s) = isempty(s) ? -Inf : sum(s)
compute_log2_sc(log2_shapes) = (isempty(log2_shapes) || all(isempty, log2_shapes)) ? 0.0 : log2sumexp2(log2size.(log2_shapes))
edgesize(tn::TensorNetworkLayout, m::Int, n::Int) = tn.log2_shapes[n][indexof(m, neighbors(tn.graph, n))]
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 1202 | using Test
using SimpleTensorNetworks
using CUDA
using SimpleTensorNetworks: c2l, l2c
using Random
CUDA.allowscalar(false)
@testset "c2l" begin
for i=1:100
shape = (4,rand(1:5),rand(1:7),5,19)
target = ([rand(1:s) for s in shape]...,)
@test c2l(shape, target) == LinearIndices(shape)[target...]
end
for i=1:100
shape = (4,rand(1:5),rand(1:12),15,19)
ci = CartesianIndices(shape)
i = rand(1:prod(shape))
@test l2c(shape, i) == ci[i].I
end
@test l2c((), 1) == ()
@test c2l((), ()) == 1
end
@testset "permutedims" begin
a = randn(rand(1:3, 20)...)
A = CuArray(a)
p = randperm(20)
@test Array(permutedims(A, p)) ≈ permutedims(a, p)
end
@testset "tensor contract - GPU" begin
A = zeros(Float64, 10, 32, 21);
B = zeros(Float64, 32, 11, 5, 2, 41, 10);
tA = LabeledTensor(A, [1,2,3])
tB = LabeledTensor(B, [2,4,5,6,7,1])
tOut = tA * tB
tnet = TensorNetwork([tA, tB]) |> togpu
tOut2, contracted_labels = contract_label!(tnet, 1)
@test Array(tnet.tensors[].array) ≈ tOut.array
@test Array(tnet.tensors[].array) ≈ Array(tOut2.array)
@test contracted_labels == [1, 2]
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 406 | using SimpleTensorNetworks
using Test
@testset "contract" begin
include("tensorcontract.jl")
end
@testset "simplify" begin
include("simplify.jl")
end
@testset "contractionorder" begin
include("contractionorder/contractionorder.jl")
end
@testset "viz" begin
include("viz.jl")
end
@testset "cuda" begin
if Base.find_package("CUDA") !== nothing
include("cuda.jl")
end
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 1391 | using Test
using SimpleTensorNetworks
@testset "rm multi edges" begin
t1 = randn(2,3,4,5)
l1 = ['a', 'b', 'c', 'd']
t2 = randn(6,4,7,2,3)
l2 = ['e', 'c', 'f', 'a', 'b']
t1_, t2_ = SimpleTensorNetworks.rm_multiedge(LabeledTensor(t1, l1), LabeledTensor(t2, l2))
@test t1_.array == reshape(permutedims(t1, (4,1,2,3)), 5, :)
@test t2_.array == reshape(permutedims(t2, (1,3,4,5,2)), 6,7,:)
@test t1_.labels == ['d', 'a']
@test t2_.labels == ['e', 'f', 'a']
end
@testset "rm single nodes" begin
tn = TensorNetwork([
LabeledTensor(randn(2), ["a"]),
LabeledTensor(randn(2,2,2,2,2), ["g", "c", "a", "k", "l"]),
LabeledTensor(randn(2), ["c"]),
LabeledTensor(randn(2,2,2), ["g", "k", "l"]),
])
r1 = contract(tn, [[[1,2], 3], 4])
factor, tn = rm_degree12(tn)
@test length(tn) == 2
@test tn.tensors[1].labels == ["g", "k", "l"]
@test tn.tensors[2].labels == ["g", "k", "l"]
r2 = contract(tn, [1,2])
@test r1 ≈ r2
@test factor == 1.0
tn = TensorNetwork([
LabeledTensor(randn(2), ["a"]),
LabeledTensor(randn(2,2,2,2), ["g", "c", "a", "k"]),
LabeledTensor(randn(2), ["c"]),
LabeledTensor(randn(2,2), ["g", "k"]),
])
r1 = contract(tn, [[[1,2], 3], 4])
factor, tn = rm_degree12(tn)
@test length(tn) == 0
@test r1.array[] ≈ factor
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 1604 | using Test
using OMEinsum
using SimpleTensorNetworks
@testset "contract" begin
for (iAs, iBs, iOut) in [
[(1,2,3), (2,3,4), (1,4)],
[(1,3,2), (2,3,4), (4,1)],
[(), (2,4), (4,2)],
[(2,4), (), (4,2)],
[(2,4), (1,), (4,2,1)],
[(2,4), (2,4), ()],
[(2,4), (4,), (2,)],
]
A = asarray(randn(rand(4:12, length(iAs))...))
B = asarray(randn([(iBs[i] in iAs) ? size(A, indexin(iBs[i], collect(iAs))[]) : rand(4:12) for i=1:length(iBs)]...))
out = tensorcontract(iAs, A, iBs, B, iOut)
eincode = EinCode{(iAs, iBs), iOut}()
eout = eincode(A, B)
@test eout ≈ out
end
end
@testset "tensor contract" begin
A = zeros(Float64, 10, 32, 21);
B = zeros(Float64, 32, 11, 5, 2, 41, 10);
tA = LabeledTensor(A, [1,2,3])
tB = LabeledTensor(B, [2,4,5,6,7,1])
tOut = tA * tB
@test tOut.array == ein"abc,bdefga->cdefg"(A, B)
@test tOut.labels == [3,4,5,6,7]
tnet1 = TensorNetwork([tA])
@test tnet1 isa TensorNetwork
tnet = TensorNetwork([tA, tB])
tOut2, contracted_labels = contract_label!(tnet, 1)
@test tnet.tensors[] ≈ tOut
@test tnet.tensors[] ≈ tOut2
@test contracted_labels == [1, 2]
end
@testset "mul_dim" begin
A = zeros(Float64, 10, 32, 21);
B = zeros(Float64, 32, 11);
tA = LabeledTensor(A, [1,2,3])
tB = LabeledTensor(B, [2,4])
tA1 = SimpleTensorNetworks.mul_dim(tA, B; dim=2)
tA2 = tA * tB
@test tA1.array ≈ permutedims(tA2.array, (1,3,2))
@test tA1.labels == [1, 2, 3]
@test tA2.labels == [1, 3, 4]
end | SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 263 | using Test
using SimpleTensorNetworks
using Compose, Viznet
@testset "viz" begin
tn = TensorNetwork([LabeledTensor(randn(2,2), ['a', 'b']), LabeledTensor(randn(2,2), ['b', 'c']), LabeledTensor(randn(2,2), ['a', 'c'])])
@test viz_tnet(tn) isa Context
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 957 | using Test
using LightGraphs
using SimpleTensorNetworks
using Random
@testset "greedy" begin
include("edgeinfo.jl")
end
@testset "greedy" begin
include("greedy.jl")
end
@testset "other" begin
for seed in 1:10
Random.seed!(3)
g = random_regular_graph(10, 3)
tn = TensorNetwork([LabeledTensor(randn(2,2,2), [e for e in edges(g) if i ∈ (e.src, e.dst)]) for i=1:10])
tcs, scs, order = SimpleTensorNetworks.order_greedy(SimpleTensorNetworks.layout(tn); strategy="min_reduce")
trees = SimpleTensorNetworks.build_trees(10, order)
order_new = SimpleTensorNetworks.build_order(trees)
#@test order == order_new
tc, sc = abstract_contract(SimpleTensorNetworks.layout(tn), order)
tc2, sc2 = abstract_contract(tn, trees)
@test sort(tc) ≈ sort(tc2)
end
@testset "log2sumexp2" begin
x = randn(10)
@test log2(sum(exp2.(x))) ≈ log2sumexp2(x)
end
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 886 | using Test
using SimpleTensorNetworks.ContractionOrder
using LightGraphs: SimpleEdge
@testset "edge info" begin
egs = [SimpleEdge(1, 2), SimpleEdge(2, 3), SimpleEdge(3, 1), SimpleEdge(3, 4)]
log2_shapes = [log2.((2,2)), log2.((2,3)), log2.((3,2,6)), log2.((6,))]
neighbors = [[2,3], [1,3], [1,2,4], [3]]
strategy = "max_reduce_tri"
ei = edgeinfo(egs[1:3], log2_shapes, neighbors, strategy)
@test ei.data[SimpleEdge(3, 1)] ≈ -24.0
@test ei.data[SimpleEdge(1, 2)] ≈ -4
@test ei.data[SimpleEdge(2, 3)] ≈ -18
@test select(ei, strategy, neighbors, nothing) == SimpleEdge(3, 1)
# add!, remove!
ei2 = add!(copy(ei), [4], log2_shapes, neighbors, strategy)
ei3 = edgeinfo(egs, log2_shapes, neighbors, strategy)
for (k, v) in ei2.data
@test ei3.data[k] ≈ v
end
ei4 = remove!(copy(ei2), [4])
@test ei4.data == ei.data
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | code | 2345 | using Test, SimpleTensorNetworks.ContractionOrder
using LightGraphs
using LightGraphs: SimpleEdge
using SimpleTensorNetworks.ContractionOrder: abstract_contract_edge!, log2sumexp2
function get_shapes(edges, sizes, neighbors)
log2_shapes = Vector{Float64}[]
for i=1:length(neighbors)
si = Float64[]
for nb in neighbors[i]
ie = findfirst(e->e == SimpleEdge(i=>nb) || e == SimpleEdge(nb=>i), edges)
push!(si, sizes[ie])
end
push!(log2_shapes, si)
end
return log2_shapes
end
tolog2(shapes) = [log2.(x) for x in shapes]
@testset "abstract contract" begin
es = [SimpleEdge(1, 2), SimpleEdge(2, 3), SimpleEdge(1, 3), SimpleEdge(3, 4)]
log2_sizes = log2.([2, 3, 4, 5])
neighbors = [[2,3], [3,1], [1,2,4], [3]]
log2_shapes = get_shapes(es, log2_sizes, neighbors)
strategy = "min_reduce"
edge_info = edgeinfo(es, log2_shapes, neighbors, strategy)
edge = SimpleEdge(2, 3)
tc, sc = abstract_contract_edge!(edge, log2_shapes, edge_info,
neighbors, strategy)
@test edge_info.data[SimpleEdge(2, 4)] ≈ 8.0
@test edge_info.data[SimpleEdge(1, 2)] ≈ 5.0
@test log2_shapes == [[8], [8, 5], Int[], [5]] |> tolog2
@test neighbors == [[2], [1, 4], Int[], [2]]
@test tc ≈ log2(120)
es = [SimpleEdge(1, 2), SimpleEdge(2, 3), SimpleEdge(1, 3), SimpleEdge(3, 4)]
log2_sizes = log2.([2, 3, 4, 5])
neighbors = [[2,3], [3,1], [1,2,4], [3]]
log2_shapes = get_shapes(es, log2_sizes, neighbors)
tn = TensorNetworkLayout(SimpleGraph(4, neighbors), log2_shapes)
order = [SimpleEdge(1, 2),SimpleEdge(1, 3), SimpleEdge(1, 4)]
tcs, scs = abstract_contract(tn, order)
@test log2sumexp2(tcs) ≈ log2(89)
@test maximum(scs) ≈ log2(12*5+12+5)
end
@testset "greedy order" begin
g = random_regular_graph(10, 3)
log2_shapes = [[2,2,2] for i=1:10] |> tolog2
tn = TensorNetworkLayout(g, log2_shapes)
tcs, scs, order = order_greedy(tn; strategy="min_dim")
@test length(order) <= length(edges(g))
end
@testset "disconnected graph" begin
n = 5
g = SimpleGraph(n)
add_edge!(g, 2, 3)
tn = TensorNetworkLayout(g, [ones(degree(g, i)) for i=1:n])
tc, sc, orders = order_greedy(tn)
@test orders isa Vector
tcs, scs = abstract_contract(tn, orders)
@test maximum(tcs) == 1
end
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"MIT"
] | 0.2.1 | 6e23282395631fd2fed63a3347f729cbd4dc6e02 | docs | 265 | # SimpleTensorNetworks
[](https://github.com/TensorBFS/SimpleTensorNetworks.jl/actions)
A naive implementation of tensor network. Warning: It is still a work in progress.
| SimpleTensorNetworks | https://github.com/TensorBFS/SimpleTensorNetworks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 2145 | using Documenter
using DataAssimilationBenchmarks
makedocs(
sitename = "DataAssimilationBenchmarks",
format = Documenter.HTML(),
modules = [DataAssimilationBenchmarks],
pages = [
"Home" => "index.md",
"DataAssimilationBenchmarks" => Any[
"Introduction" => "home/Introduction.md",
"Getting Started" => "home/Getting Started.md",
"Global Types" => "home/DataAssimilationBenchmarks.md",
],
"Submodules" => Any[
"Models" => Any[
"L96" => "submodules/models/L96.md",
"IEEE39bus" => "submodules/models/IEEE39bus.md",
"ObsOperators" => "submodules/models/ObsOperators.md"
],
"Methods" => Any[
"DeSolvers" => "submodules/methods/DeSolvers.md",
"EnsembleKalmanSchemes" => "submodules/methods/EnsembleKalmanSchemes.md",
"XdVAR" => "submodules/methods/XdVAR.md"
],
"Experiments" => Any[
"Workflow" => "submodules/experiments/Workflow.md",
"GenerateTimeSeries" => "submodules/experiments/GenerateTimeSeries.md",
"FilterExps" => "submodules/experiments/FilterExps.md",
"SmootherExps" => "submodules/experiments/SmootherExps.md",
"SingleExperimentDriver" => "submodules/experiments/SingleExperimentDriver.md",
"ParallelExperimentDriver" => "submodules/experiments/ParallelExperimentDriver.md",
],
"Analysis" => Any[
"ProcessExperimentData" => "submodules/analysis/ProcessExperimentData.md",
"PlotExperimentData" => "submodules/analysis/PlotExperimentData.md",
],
],
"Community" => Any[
"Contributing" => "community/Contributing.md",
"Contributor Covenant Code of Conduct" => "community/CodeOfConduct.md",
],
]
)
deploydocs(
repo = "github.com:cgrudz/DataAssimilationBenchmarks.jl.git",
)
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 8035 | ##############################################################################################
module DataAssimilationBenchmarks
##############################################################################################
# imports and exports
using LinearAlgebra
export VecA, ArView, ParamDict, ParamSample, CovM, ConM, TransM, StepKwargs
##############################################################################################
# Global type union declarations for multiple dispatch and type aliases
"""
function VecA(type)
Union{Vector{T}, SubArray{T, 1}} where T <: type
end
Type constructor for union of Vectors and 1-D SubArrays. This is utilzed in order to pass
columns of an ensemble maxtrix into integration schemes and related array operations.
"""
function VecA(type)
Union{Vector{T}, SubArray{T, 1}} where T <: type
end
"""
function ArView(type)
Union{Array{T, 2}, SubArray{T, 2}} where T <: type
end
Type constructor for union of Arrays and SubArrays for use within ensemble conditioning
operations, integration schemes and other array operations.
"""
function ArView(type)
Union{Array{T, 2}, SubArray{T, 2}} where T <: type
end
"""
function ParamDict(type)
Union{Dict{String, Array{T}}, Dict{String, Vector{T}}} where T <: type
end
Type constructor for Dictionary of model parameters to be passed to derivative functions
by name. This allows one to pass both vector parameters (and scalars written as
vectors), as well as matrix valued parameters such as diffusion arrays.
"""
function ParamDict(type)
Union{Dict{String, Array{T}}, Dict{String, Vector{T}}} where T <: type
end
"""
ParamSample = Dict{String, Vector{UnitRange{Int64}}}
Dictionary containing key and index pairs to subset the state vector and
then merge a statistical sample of parameters that govern the equations of motion with
the ParamDict `dx_params` in parameter estimation problems.
"""
ParamSample = Dict{String, Vector{UnitRange{Int64}}}
"""
function CovM(type)
Union{UniformScaling{T}, Diagonal{T, Vector{T}},
Symmetric{T, Matrix{T}}} where T <: type
end
Type constructor for union of covariance matrix types, for multiple dispatch
based on their special characteristics as symmetric, positive definite operators.
"""
function CovM(type)
Union{UniformScaling{T}, Diagonal{T, Vector{T}},
Symmetric{T, Matrix{T}}} where T <: type
end
"""
function ConM(type)
Union{UniformScaling{T}, Symmetric{T}} where T <: type
end
Type union of conditioning matrix types, which are used for optimization routines in the
transform method.
"""
function ConM(type)
Union{UniformScaling{T}, Symmetric{T}} where T <: type
end
"""
function TransM(type)
Union{Tuple{Symmetric{T,Array{T,2}},Array{T,1},Array{T,2}},
Tuple{Symmetric{T,Array{T,2}},Array{T,2},Array{T,2}}} where T <: type
end
Type union constructor for tuples representing the ensemble update step with a right
ensemble anomaly transformation, mean update weights and mean-preserving orthogonal
transformation.
"""
function TransM(type)
Union{Tuple{Symmetric{T,Array{T,2}},Array{T,1},Array{T,2}},
Tuple{Symmetric{T,Array{T,2}},Array{T,2},Array{T,2}}} where T <: type
end
"""
StepKwargs = Dict{String,Any}
Key word arguments for twin experiment time stepping. Arguments are given as:
REQUIRED:
* `dx_dt` - time derivative function with arguments x and dx_params
* `dx_params` - parameters necessary to resolve dx_dt, not including parameters to be estimated in the extended state vector;
* `h` - numerical time discretization step size
OPTIONAL:
* `diffusion` - tunes the standard deviation of the Wiener process, equal to `sqrt(h) * diffusion`;
* `diff_mat` - structure matrix for the diffusion coefficients, replaces the default uniform scaling;
* `s_infl` - ensemble anomalies of state components are scaled by this parameter for calculation of emperical covariance;
* `p_infl` - ensemble anomalies of extended-state components for parameter sample replicates are scaled by this parameter for calculation of emperical covariance, `state_dim` must be defined below;
* `state_dim` - keyword for parameter estimation, specifying the dimension of the dynamic state, less than the dimension of full extended state;
* `param_sample` - `ParamSample` dictionary for merging extended state with `dx_params`;
* `ξ` - random array size `state_dim`, can be defined in `kwargs` to provide a particular realization for method validation;
* `γ` - controls nonlinearity of the alternating_obs_operatori.
See [`DataAssimilationBenchmarks.ObsOperators.alternating_obs_operator`](@ref) for
a discusssion of the `γ` parameter.
"""
StepKwargs = Union{Dict{String,Any}}
##############################################################################################
# imports and exports of sub-modules
include("methods/DeSolvers.jl")
include("methods/EnsembleKalmanSchemes.jl")
include("methods/XdVAR.jl")
include("models/L96.jl")
include("models/IEEE39bus.jl")
include("models/ObsOperators.jl")
include("experiments/GenerateTimeSeries.jl")
include("experiments/FilterExps.jl")
include("experiments/SmootherExps.jl")
include("experiments/SingleExperimentDriver.jl")
include("experiments/ParallelExperimentDriver.jl")
using .DeSolvers
using .EnsembleKalmanSchemes
using .L96
using .IEEE39bus
using .ObsOperators
using .GenerateTimeSeries
using .FilterExps
using .SmootherExps
using .SingleExperimentDriver
using .ParallelExperimentDriver
export DeSolvers, EnsembleKalmanSchemes, XdVAR, L96, IEEE39bus, ObsOperators,
GenerateTimeSeries, FilterExps, SingleExperimentDriver, ParallelExperimentDriver
##############################################################################################
# info
function Info()
print(" _____ _ ")
printstyled("_",color=9)
print(" ")
printstyled("_",color=2)
print(" _ _ ")
printstyled("_ \n",color=13)
print(" | __ \\ | | /\\ ")
printstyled("(_)",color=9)
printstyled(" (_)",color=2)
print(" | | | ")
printstyled("(_) \n",color=13)
print(" | | | | __ _| |_ __ _ / \\ ___ ___ _ _ __ ___ _| | __ _| |_ _ ___ _ __ \n")
print(" | | | |/ _` | __/ _` | / /\\ \\ / __/ __| | '_ ` _ \\| | |/ _` | __| |/ _ \\| '_ \\ \n")
print(" | |__| | (_| | || (_| |/ ____ \\\\__ \\__ \\ | | | | | | | | (_| | |_| | (_) | | | | \n")
print(" |_____/ \\__,_|\\__\\__,_/_/ \\_\\___/___/_|_| |_| |_|_|_|\\__,_|\\__|_|\\___/|_| |_| \n")
print("\n")
print(" ____ _ _ ")
printstyled(" _ ", color=12)
print("_\n")
print(" | _ \\ | | | | ")
printstyled("(_)",color=12)
print(" | \n")
print(" | |_) | ___ _ __ ___| |__ _ __ ___ __ _ _ __| | _____ _| | \n")
print(" | _ < / _ \\ '_ \\ / __| '_ \\| '_ ` _ \\ / _` | '__| |/ / __| | | | \n")
print(" | |_) | __/ | | | (__| | | | | | | | | (_| | | | <\\__ \\_| | | \n")
print(" |____/ \\___|_| |_|\\___|_| |_|_| |_| |_|\\__,_|_| |_|\\_\\___(_) |_| \n")
print(" _/ | \n")
print(" |__/ \n")
print("\n")
printstyled(" Welcome to DataAssimilationBenchmarks!\n", bold=true)
print(" Version v0.3.4, Copyright 2022 Colin Grudzien ([email protected]) et al.\n")
print(" Licensed under the Apache License, Version 2.0 \n")
print(" https://github.com/cgrudz/DataAssimilationBenchmarks/blob/master/LICENSE.md\n")
print("\n")
nothing
end
##############################################################################################
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 9933 | #######################################################################################################################
module ProcessExperimentData
########################################################################################################################
########################################################################################################################
# imports and exports
using Debugger
using Statistics
using JLD, HDF5
export process_filter_state, process_smoother_state, process_smoother_param, process_filter_nonlinear_obs,
process_smoother_nonlinear_obs, process_smoother_versus_shift, process_smoother_versus_tanl,
rename_smoother_state
########################################################################################################################
########################################################################################################################
# Scripts for processing experimental output data and writing to JLD and HDF5 to read into matplotlib later
#
# These scripts are designed to try to load every file according to the standard naming conventions
# and if these files cannot be loaded, to save inf as a dummy variable for missing or corrupted data.
# NOTE: these scripts are currently deprecated and there is only a single method provided as an example
# Future plans include merging in methods for processing data that are more consistent.
########################################################################################################################
function process_filter_state()
# creates an array of the average RMSE and spread for each experiment
# ensemble size is increasing from the origin on the horizontal axis
# inflation is increasing from the origin on the vertical axis
# time the operation
t1 = time()
# static parameters that are not varied
seed = 0
tanl = 0.05
nanl = 20000
burn = 5000
diffusion = 0.0
h = 0.01
obs_un = 1.0
obs_dim = 40
sys_dim = 40
γ = 1.0
# parameters in ranges that will be used in loops
analysis_list = [
"fore",
"filt",
]
stat_list = [
"rmse",
"spread",
]
method_list = [
"enkf",
"etkf",
"enkf-n-primal",
]
ensemble_sizes = 15:2:41
total_ensembles = length(ensemble_sizes)
inflations = LinRange(1.00, 1.10, 11)
total_inflations = length(inflations)
# define the storage dictionary here, looping over the method list
data = Dict{String, Array{Float64}}()
for method in method_list
if method == "enkf-n"
for analysis in analysis_list
for stat in stat_list
# multiplicative inflation parameter should always be one, there is no dimension for this variable
data[method * "_" * analysis * "_" * stat] = Array{Float64}(undef, total_ensembles)
end
end
else
for analysis in analysis_list
for stat in stat_list
# create storage for inflations and ensembles
data[method * "_" * analysis * "_" * stat] = Array{Float64}(undef, total_inflations, total_ensembles)
end
end
end
end
# auxilliary function to process data, producing rmse and spread averages
function process_data(fnames::Vector{String}, method::String)
# loop ensemble size, last axis
for j in 0:(total_ensembles - 1)
if method[1:6] == "enkf-n"
try
# attempt to load the file
tmp = load(fnames[j+1])
# if successful, continue to unpack arrays and store the mean stats over
# the experiment after the burn period for stationary statistics
for analysis in analysis_list
for stat in stat_list
analysis_stat = tmp[analysis * "_" * stat]::Vector{Float64}
data[method * "_" * analyis * "_" * stat][j+1] =
mean(analysis_stat[burn+1: nanl+burn])
end
end
catch
# file is missing or corrupted, load infinity to represent an incomplete or unstable experiment
for analysis in analysis_list
for stat in stat_list
analysis_stat = tmp[analysis * "_" * stat]::Vector{Float64}
data[method * "_" * analyis * "_" * stat][j+1] = inf
end
end
end
else
# loop inflations, first axis
for i in 1:total_inflations
try
# attempt to load the file
tmp = load(fnames[i + j*total_inflations])
# if successful, continue to unpack arrays and store the mean stats over
# the experiment after the burn period for stationary statistics
for analysis in analysis_list
for stat in stat_list
analysis_stat = tmp[analysis * "_" * stat]::Vector{Float64}
data[method * "_" * analyis * "_" * stat][total_inflations + 1 - i, j + 1] =
mean(analysis_stat[burn+1: nanl+burn])
end
end
catch
# file is missing or corrupted, load infinity to represent an incomplete or unstable experiment
for analysis in analysis_list
for stat in stat_list
analysis_stat = tmp[analysis * "_" * stat]::Vector{Float64}
data[method * "_" * analyis * "_" * stat][total_inflations + 1 - i, j + 1] = inf
end
end
end
end
end
end
end
# define path to data on server
fpath = "/x/capa/scratch/cgrudzien/final_experiment_data/all_ens/"
# generate the range of experiments, storing file names as a list
for method in method_list
fnames = []
for N_ens in ensemble_sizes
if method[1:6] == "enkf-n"
# inflation is a static value of 1.0
name = method *
"_L96_state_seed_" * lpad(seed, 4, "0") *
"_diffusion_" * rpad(diffusion, 4, "0") *
"_sys_dim_" * lpad(sys_dim, 2, "0") *
"_obs_dim_" * lpad(obs_dim, 2, "0") *
"_obs_un_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl + burn, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_N_ens_" * lpad(N_ens, 3,"0") *
"_state_inflation_" * rpad(round(1.0, digits=2), 4, "0") *
".jld"
push!(fnames, fpath * method * "/diffusion_" * rpad(diffusion, 4, "0") * "/" * name)
else
# loop inflations
for infl in inflations
name = method *
"_L96_state_seed_" * lpad(seed, 4, "0") *
"_diffusion_" * rpad(diffusion, 4, "0") *
"_sys_dim_" * lpad(sys_dim, 2, "0") *
"_obs_dim_" * lpad(obs_dim, 2, "0") *
"_obs_un_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl + burn, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_N_ens_" * lpad(N_ens, 3,"0") *
"_state_inflation_" * rpad(round(infl, digits=2), 4, "0") *
".jld"
push!(fnames, fpath * method * "/diffusion_" * rpad(diffusion, 4, "0") * "/" * name)
end
end
end
# turn fnames into a string array, use this as the argument in process_data
fnames = Array{String}(fnames)
process_data(fnames, method)
end
# create jld file name with relevant parameters
jlname = "processed_filter_state" *
"_diffusion_" * rpad(diffusion, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_burn_" * lpad(burn, 5, "0") *
".jld"
# create hdf5 file name with relevant parameters
h5name = "processed_filter_state" *
"_diffusion_" * rpad(diffusion, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_burn_" * lpad(burn, 5, "0") *
".h5"
# write out file in jld
save(jlname, data)
# write out file in hdf5
h5open(h5name, "w") do file
for key in keys(data)
h5write(h5name, key, data[key])
end
end
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
########################################################################################################################
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 2648 | ##############################################################################################
module plot_var_analysis
##############################################################################################
# imports and exports
using Random, LinearAlgebra
using JLD2, HDF5, Plots, Statistics, Measures
using DataAssimilationBenchmarks
##############################################################################################
# parameters of interest
bkg_covs = ["ID", "clima"]
dims = collect(1:1:40)
s_infls = collect(1:1:199)
# pre-allocation for stabilized rmse
stab_rmse_id = ones(length(dims), length(s_infls)) * 20
stab_rmse_clima = ones(length(dims), length(s_infls)) * 20
# iterate through configurations of parameters of interest
for bkg_cov in bkg_covs
for dim in dims
for s_infl in s_infls
infl = s_infl*0.005;
# import data
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/D3-var-bkg-" * bkg_cov * "/"
file = "bkg-" * bkg_cov * "_L96_state_seed_0123_diff_0.000_sysD_40_obsD_" * lpad(dim, 2, "0") *
"_obsU_1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_stateInfl_" * rpad(round(infl, digits=3), 5, "0") * ".jld2"
# retrieve statistics of interest
ts = load(path * file)::Dict{String,Any}
nanl = ts["nanl"]::Int64
filt_rmse = ts["filt_rmse"]::Array{Float64, 1}
# generate statistics based on background covariance
if cmp(bkg_cov, "ID") == 0
stab_rmse_id[dim,s_infl] = mean(filter(!isinf, filter(!isnan,filt_rmse[1000:nanl])))
else
stab_rmse_clima[dim,s_infl] = mean(filter(!isinf, filter(!isnan,filt_rmse[1000:nanl])))
end
end
end
end
# transform data for plotting
infl_vals = s_infls*0.005
clima_t = transpose(stab_rmse_clima)
id_t = transpose(stab_rmse_id)
# create heatmap for background covariance using climatology
heatmap(dims,infl_vals,clima_t,clim=(0.001,5.0), title="Stabilized Filter RMSE Clima: Inflation Tuning Parameter vs. Dimensions", margin=15mm, size=(800,500), dpi = 600)
xlabel!("Dimensions")
ylabel!("Inflation Tuning Parameter")
savefig("stab_heat_clima_t")
# create heatmap for background covariance using identity
heatmap(dims,infl_vals,id_t,clim=(0.001,5.0), title="Stabilized Filter RMSE ID: Inflation Tuning Parameter vs. Dimensions", margin=15mm, size=(800,500), dpi = 600)
xlabel!("Dimensions")
ylabel!("Inflation Tuning Parameter")
savefig("stab_heat_id_t")
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 1058 | #######################################################################################################################
module Rename
########################################################################################################################
# imports and exports
using Debugger
using Glob
using JLD
export rename_files
########################################################################################################################
function rename_files()
fnames = Glob.glob("./ienks-transform/*")
for name in fnames
split_name = split(name, "_")
tmp = [split_name[1:end-7]; ["nens"]; split_name[end-4:end]]
rename = ""
string_len = (length(tmp)-1)
for i in 1:string_len
rename *= tmp[i] * "_"
end
rename *= tmp[end]
@bp
print(rename)
my_command = `mv $name $rename`
run(my_command)
end
end
########################################################################################################################
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 28174 | ##############################################################################################
module FilterExps
##############################################################################################
# imports and exports
using Random, Distributions, StatsBase
using LinearAlgebra
using JLD2, HDF5
using ..DataAssimilationBenchmarks, ..ObsOperators, ..DeSolvers, ..EnsembleKalmanSchemes,
..XdVAR, ..L96, ..IEEE39bus, ..GenerateTimeSeries
export ensemble_filter_state, ensemble_filter_param, D3_var_filter_state
##############################################################################################
# Main filtering experiments, debugged and validated for use with schemes in methods directory
##############################################################################################
"""
ensemble_filter_state((time_series::String, method::String, seed::Int64, nanl::Int64,
obs_un::Float64, obs_dim::Int64, γ::Float64, N_ens::Int64,
s_infl::Float64)::NamedTuple)
Ensemble filter state estimation twin experiment.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]::Array{Float64,2}
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
where the file name is written dynamically according to the selected parameters as follows:
method *
"_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
"""
function ensemble_filter_state((time_series, method, seed, nanl, obs_un, obs_dim,
γ, N_ens, s_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:obs_un,:obs_dim,
:γ,:N_ens,:s_infl),
<:Tuple{String,String,Int64,Int64,Float64,Int64,
Float64,Int64,Float64}})
# time the experiment
t1 = time()
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
# define integration method
step_model! = rk4_step!
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
sys_dim = length(init)
ens = rand(MvNormal(init, I), N_ens)
# define the observation range and truth reference solution
obs = obs[:, 2:nanl + 1]
truth = copy(obs)
# define kwargs for the filtering method
# and the underlying dynamical model
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"dx_params" => dx_params,
"h" => h,
"diffusion" => diffusion,
"s_infl" => s_infl,
"γ" => γ,
)
# define the observation operator, observation error covariance and observations
# with error observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# check if there is a diffusion structure matrix
if haskey(ts, "diff_mat")
kwargs["diff_mat"] = ts["diff_mat"]::Array{Float64,2}
end
# create storage for the forecast and analysis statistics
fore_rmse = Vector{Float64}(undef, nanl)
filt_rmse = Vector{Float64}(undef, nanl)
fore_spread = Vector{Float64}(undef, nanl)
filt_spread = Vector{Float64}(undef, nanl)
# loop over the number of observation-forecast-analysis cycles
for i in 1:nanl
# for each ensemble member
for j in 1:N_ens
# loop over the integration steps between observations
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if model == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
# compute the forecast statistics
fore_rmse[i], fore_spread[i] = analyze_ens(ens, truth[:, i])
# after the forecast step, perform assimilation of the observation
analysis = ensemble_filter(method, ens, obs[:, i], H_obs, obs_cov, kwargs)
ens = analysis["ens"]::Array{Float64,2}
# compute the analysis statistics
filt_rmse[i], filt_spread[i] = analyze_ens(ens, truth[:, i])
end
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]::Array{Float64,2}
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
name = method *
"_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
ensemble_filter_param((time_series::String, method::String, seed::Int64, nanl::Int64,
obs_un::Float64, obs_dim::Int64, γ::Float64, p_err::Float64,
p_wlk::Float64, N_ens::Int64, s_infl::Float64,
p_infl::Float64)::NamedTuple)
Ensemble filter joint state-parameter estimation twin experiment.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"param_spread" => para_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"param_truth" => param_truth,
"sys_dim" => sys_dim,
"state_dim" => state_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"p_err" => p_err,
"p_wlk" => p_wlk,
"nanl" => nanl,
"tanl" => tanl,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]::Array{Float64,2}
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
where the file name is written dynamically according to the selected parameters as follows:
method *
"_" * model *
"_param_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_stateD_" * lpad(state_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_paramE_" * rpad(p_err, 4, "0") *
"_paramW_" * rpad(p_wlk, 6, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_nens_" * lpad(N_ens, 3, "0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
"_paramInfl_" * rpad(round(p_infl, digits=2), 4, "0") *
".jld2"
"""
function ensemble_filter_param((time_series, method, seed, nanl, obs_un, obs_dim, γ, p_err,
p_wlk,N_ens, s_infl, p_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:obs_un,:obs_dim,:γ,:p_err,
:p_wlk,:N_ens,:s_infl,:p_infl),
<:Tuple{String,String,Int64,Int64,Float64,Int64,Float64,Float64,
Float64,Int64,Float64,Float64}})
# time the experiment
t1 = time()
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
step_model! = rk4_step!
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
if model == "L96"
param_truth = pop!(dx_params, "F")
elseif model == "IEEE39bus"
param_truth = [pop!(dx_params, "H"); pop!(dx_params, "D")]
param_truth = param_truth[:]
end
# define state and extended system dimensions
state_dim = length(init)
sys_dim = state_dim + length(param_truth)
# define the initial ensemble
ens = rand(MvNormal(init, I), N_ens)
# extend this by the parameter ensemble
# note here the covariance is supplied such that the standard deviation is a percent
# of the parameter value
param_ens = rand(MvNormal(param_truth[:], diagm(param_truth[:] * p_err).^2.0), N_ens)
# define the extended state ensemble
ens = [ens; param_ens]
# define the observation range and truth reference solution
obs = obs[:, 2:nanl + 1]
truth = copy(obs)
# define kwargs, note the possible exclusion of dx_params if it is the only parameter for
# dx_dt and this is the parameter to be estimated
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"dx_params" => dx_params,
"f_steps" => f_steps,
"step_model" => step_model!,
"h" => h,
"diffusion" => diffusion,
"γ" => γ,
"state_dim" => state_dim,
"s_infl" => s_infl,
"p_infl" => p_infl
)
# define the observation operator, observation error covariance and observations with
# error observation covariance operator currently taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# we define the parameter sample as the key name and index
# of the extended state vector pair, to be loaded in the
# ensemble integration step
if model == "L96"
param_sample = Dict("F" => [41:41])
elseif model == "IEEE39bus"
param_sample = Dict("H" => [21:30], "D" => [31:40])
end
kwargs["param_sample"] = param_sample
# create storage for the forecast and analysis statistics
fore_rmse = Vector{Float64}(undef, nanl)
filt_rmse = Vector{Float64}(undef, nanl)
para_rmse = Vector{Float64}(undef, nanl)
fore_spread = Vector{Float64}(undef, nanl)
filt_spread = Vector{Float64}(undef, nanl)
para_spread = Vector{Float64}(undef, nanl)
# loop over the number of observation-forecast-analysis cycles
for i in 1:nanl
# for each ensemble member
for j in 1:N_ens
if model == "IEEE39bus"
# we define the diffusion structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
dx_params["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
@views for k in 1:f_steps
# loop over the integration steps between observations
step_model!(ens[:, j], 0.0, kwargs)
if model == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
# compute the forecast statistics
fore_rmse[i], fore_spread[i] = analyze_ens(ens[1:state_dim, :], truth[:, i])
# after the forecast step, perform assimilation of the observation
analysis = ensemble_filter(method, ens, obs[:, i], H_obs, obs_cov, kwargs)
ens = analysis["ens"]::Array{Float64,2}
# extract the parameter ensemble for later usage
param_ens = @view ens[state_dim+1:end, :]
# compute the analysis statistics
filt_rmse[i], filt_spread[i] = analyze_ens(ens[1:state_dim, :], truth[:, i])
para_rmse[i], para_spread[i] = analyze_ens_param(param_ens, param_truth)
# include random walk for the ensemble of parameters
# with standard deviation given by the p_wlk scaling
# of the mean vector
param_mean = mean(param_ens, dims=2)
param_ens .= param_ens + p_wlk * param_mean .* rand(Normal(),
length(param_truth), N_ens)
end
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"param_spread" => para_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"param_truth" => param_truth,
"sys_dim" => sys_dim,
"state_dim" => state_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"gamma" => γ,
"p_err" => p_err,
"p_wlk" => p_wlk,
"nanl" => nanl,
"tanl" => tanl,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]::Array{Float64,2}
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
name = method *
"_" * model *
"_param_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_stateD_" * lpad(state_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_paramE_" * rpad(p_err, 4, "0") *
"_paramW_" * rpad(p_wlk, 6, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_nens_" * lpad(N_ens, 3, "0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
"_paramInfl_" * rpad(round(p_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
D3_var_filter_state((time_series::String, bkg_cov::String, seed::Int64, nanl::Int64,
obs_dim::Int64, obs_un:Float64, γ::Float64,
s_infl::Float64)::NamedTuple)
3D-VAR state estimation twin experiment.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"bkg_cov" => bkg_cov,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"h" => h,
"s_infl" => round(s_infl, digits=2)
)
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/D3-var-bkg-" * bkg_cov * "/"
where the file name is written dynamically according to the selected parameters as follows:
"bkg-" * bkg_cov *
"_L96" *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_stateInfl_" * rpad(round(s_infl, digits=3), 5, "0") *
".jld2"
"""
function D3_var_filter_state((time_series, bkg_cov, seed, nanl, obs_un, obs_dim, γ,
s_infl)::NamedTuple{
(:time_series,:bkg_cov,:seed,:nanl,:obs_un,:obs_dim,:γ,
:s_infl),
<:Tuple{String,String,Int64,Int64,Float64,Int64,Float64,
Float64}})
# time the experiment
t1 = time()
# load the path, timeseries, and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
F = dx_params["F"]::Vector{Float64}
tanl = ts["tanl"]::Float64
# set the integration step size for the control trajectory
h = ts["h"]::Float64
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# define the dynamical model derivative for this experiment - we are assuming
# Lorenz-96 model
dx_dt = L96.dx_dt
# define integration method
step_model! = rk4_step!
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# set seed
seed = ts["seed"]::Int64
Random.seed!(seed)
# define the initial background state as a perturbation of first true state
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
sys_dim = length(init)
x_b = rand(MvNormal(init, I))
# define the observation range and truth reference solution
obs = obs[:, 2:nanl + 1]
truth = copy(obs)
# define the state background covariance
if bkg_cov == "ID"
state_cov = s_infl * I
elseif bkg_cov == "clima"
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
state_dim = sys_dim
clima_nanl = 11000
clima_tanl = 0.50
clima_spin = 1000
clima_h = 0.01
clima_seed = 0
name = "L96_time_series_seed_" * lpad(clima_seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F[1], 4, "0") *
"_tanl_" * rpad(clima_tanl, 4, "0") *
"_nanl_" * lpad(clima_nanl, 5, "0") *
"_spin_" * lpad(clima_spin, 4, "0") *
"_h_" * rpad(clima_h, 5, "0") *
".jld2"
print("Loading background climatology from " * name * "\n")
try
clima = load(path * name)::Dict{String,Any}
catch
clima_params = (
seed = clima_seed,
h = clima_h,
state_dim = state_dim,
tanl = clima_tanl,
nanl = clima_nanl,
spin = clima_spin,
diffusion = diffusion,
F = F[1],
)
L96_time_series(clima_params)
clima = load(path * name)::Dict{String,Any}
end
clima = clima["obs"]
state_cov = s_infl * Symmetric(cov(clima, dims=2))
end
# define kwargs for the analysis method
# and the underlying dynamical model
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"dx_params" => dx_params,
"h" => h,
"diffusion" => diffusion,
"γ" => γ,
)
# define the observation operator, observation error covariance and observations
# with error observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# create storage for the forecast and analysis statistics
fore_rmse = Vector{Float64}(undef, nanl)
filt_rmse = Vector{Float64}(undef, nanl)
# loop over the number of observation-forecast-analysis cycles
for i in 1:nanl
# loop over the integration steps between observations
for j in 1:f_steps
step_model!(x_b, 0.0, kwargs)
end
# compute the forecast rmse
fore_rmse[i] = rmsd(x_b, truth[:, i])
# optimized cost function input and value
x_b = D3_var_NewtonOp(x_b, obs[:, i], state_cov, H_obs, obs_cov, kwargs)
# compute the forecast rmse
filt_rmse[i] = rmsd(x_b, truth[:, i])
end
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"bkg_cov" => bkg_cov,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"h" => h,
"s_infl" => s_infl,
)
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/D3-var-bkg-" * bkg_cov * "/"
name = "bkg-" * bkg_cov *
"_L96" *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_stateInfl_" * rpad(round(s_infl, digits=3), 5, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 10118 | ##############################################################################################
module GenerateTimeSeries
##############################################################################################
# imports and exports
using Random, Distributions
using LinearAlgebra
using JLD2, HDF5
using ..DataAssimilationBenchmarks, ..DeSolvers, ..L96, ..IEEE39bus
export L96_time_series, IEEE39bus_time_series
##############################################################################################
"""
L96_time_series((seed::Int64, h::Float64, state_dim::Int64, tanl::Float64, nanl::Int64,
spin::Int64, diffusion::Float64, F::Float64)::NamedTuple)
Simulate a "free run" time series of the [Lorenz-96 model](@ref) model
for generating an observation process and truth twin for data assimilation twin experiments.
Output from the experiment is saved in a dictionary of the form,
Dict{String, Any}(
"seed" => seed,
"h" => h,
"diffusion" => diffusion,
"dx_params" => dx_params,
"tanl" => tanl,
"nanl" => nanl,
"spin" => spin,
"state_dim" => state_dim,
"obs" => obs,
"model" => "L96"
)
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
where the file name is written dynamically according to the selected parameters as follows:
"L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
"""
function L96_time_series((seed, h, state_dim, tanl, nanl, spin, diffusion, F)::NamedTuple{
(:seed,:h,:state_dim,:tanl,:nanl,:spin,:diffusion,:F),
<:Tuple{Int64,Float64,Int64,Float64,Int64,Int64,
Float64,Float64}})
# time the experiment
t1 = time()
# define the model
dx_dt = L96.dx_dt
dx_params = Dict{String, Array{Float64}}("F" => [8.0])
# define the integration scheme
if diffusion == 0.0
# generate the observations with the Runge-Kutta scheme
step_model! = DeSolvers.rk4_step!
else
# generate the observations with the strong Taylor scheme
step_model! = L96.l96s_tay2_step!
# parameters for the order 2.0 strong Taylor scheme
p = 1
α, ρ = comput_α_ρ(p)
end
# set the number of discrete integrations steps between each observation time
f_steps = convert(Int64, tanl/h)
# set storage for the ensemble timeseries
obs = Array{Float64}(undef, state_dim, nanl)
# define the integration parameters in the kwargs dict
kwargs = Dict{String, Any}(
"h" => h,
"diffusion" => diffusion,
"dx_params" => dx_params,
"dx_dt" => dx_dt,
)
if diffusion != 0.0
kwargs["p"] = p
kwargs["α"] = α
kwargs["ρ"] = ρ
end
# seed the random generator
Random.seed!(seed)
x = rand(Normal(), state_dim)
# spin the model onto the attractor
for j in 1:spin
for k in 1:f_steps
step_model!(x, 0.0, kwargs)
end
end
# save the model state at timesteps of tanl
for j in 1:nanl
for k in 1:f_steps
step_model!(x, 0.0, kwargs)
end
obs[:, j] = x
end
data = Dict{String, Any}(
"seed" => seed,
"h" => h,
"diffusion" => diffusion,
"dx_params" => dx_params,
"tanl" => tanl,
"nanl" => nanl,
"spin" => spin,
"state_dim" => state_dim,
"obs" => obs,
"model" => "L96"
)
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
name = "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
IEEE39bus_time_series((seed::Int64, h:Float64, tanl::Float64, nanl::Int64, spin::Int64,
diffusion::Float64)::NamedTuple)
Simulate a "free run" time series of the [IEEE39bus](@ref) for
generating an observation process and truth twin for data assimilation twin experiments.
Output from the experiment is saved in a dictionary of the form,
Dict{String, Any}(
"seed" => seed,
"h" => h,
"diffusion" => diffusion,
"diff_mat" => diff_mat,
"dx_params" => dx_params,
"tanl" => tanl,
"nanl" => nanl,
"spin" => spin,
"obs" => obs,
"model" => "IEEE39bus"
)
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
where the file name is written dynamically according to the selected parameters as follows:
"IEEE39bus_time_series_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
"""
function IEEE39bus_time_series((seed, h, tanl, nanl, spin, diffusion)::NamedTuple{
(:seed,:h,:tanl,:nanl,:spin,:diffusion),
<:Tuple{Int64,Float64,Float64,Int64,Int64,Float64}})
# time the experiment
t1 = time()
# set random seed
Random.seed!(seed)
# define the model
dx_dt = IEEE39bus.dx_dt
state_dim = 20
# define the model parameters
input_data = pkgdir(DataAssimilationBenchmarks) *
"/src/models/IEEE39bus_inputs/NE_EffectiveNetworkParams.jld2"
tmp = load(input_data)
dx_params = Dict{String, Array{Float64}}(
"A" => tmp["A"],
"D" => tmp["D"],
"H" => tmp["H"],
"K" => tmp["K"],
"γ" => tmp["γ"],
"ω" => tmp["ω"]
)
# define the integration scheme
step_model! = DeSolvers.rk4_step!
h = 0.01
# define the diffusion coefficient structure matrix
# notice that the perturbations are only applied to the frequencies
# based on the change of variables derivation
# likewise, the diffusion parameter is applied separately as an amplitude
# in the Runge-Kutta scheme
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] = tmp["ω"][1] ./ (2.0 * tmp["H"])
# set the number of discrete integrations steps between each observation time
f_steps = convert(Int64, tanl/h)
# set storage for the ensemble timeseries
obs = Array{Float64}(undef, state_dim, nanl)
# define the integration parameters in the kwargs dict
kwargs = Dict{String, Any}(
"h" => h,
"diffusion" => diffusion,
"dx_params" => dx_params,
"dx_dt" => dx_dt,
"diff_mat" => diff_mat
)
# load the steady state, generated by long simulation without noise
x = tmp["synchronous_state"]
# spin the model onto the attractor
for j in 1:spin
for k in 1:f_steps
step_model!(x, 0.0, kwargs)
# set phase angles mod 2pi
x[1:10] .= rem2pi.(x[1:10], RoundNearest)
end
end
# save the model state at timesteps of tanl
for j in 1:nanl
for k in 1:f_steps
step_model!(x, 0.0, kwargs)
# set phase angles mod 2pi
x[1:10] .= rem2pi.(x[1:10], RoundNearest)
end
obs[:, j] = x
end
data = Dict{String, Any}(
"seed" => seed,
"h" => h,
"diffusion" => diffusion,
"diff_mat" => diff_mat,
"dx_params" => dx_params,
"tanl" => tanl,
"nanl" => nanl,
"spin" => spin,
"obs" => obs,
"model" => "IEEE39bus"
)
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
name = "IEEE39bus_time_series_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 25239 | ##############################################################################################
module ParallelExperimentDriver
##############################################################################################
# imports and exports
using ..DataAssimilationBenchmarks
export ensemble_filter_adaptive_inflation, ensemble_filter_param, classic_ensemble_state,
classic_ensemble_param, single_iteration_ensemble_state, iterative_ensemble_state,
D3_var_tuned_inflation
##############################################################################################
# Utility methods and definitions
##############################################################################################
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
##############################################################################################
# Filters
##############################################################################################
"""
args, wrap_exp = ensemble_filter_adaptive_inflation()
Constucts a parameter map and experiment wrapper for sensitivity test of adaptive inflation.
The ensemble size is varied along with the adaptive multiplicative inflation method,
including the dual, primal and primal with linesearch EnKF-N methods.
"""
function ensemble_filter_adaptive_inflation()
exp = DataAssimilationBenchmarks.FilterExps.ensemble_filter_state
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 6500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
methods = ["enkf-n-primal", "enkf-n-primal-ls", "enkf-n-dual"]
seed = 1234
obs_un = 1.0
obs_dim = 40
N_enss = 15:3:42
s_infls = [1.0]
nanl = 4000
γ = 1.0
# load the experiments
args = Vector{Any}()
for method in methods
for N_ens in N_enss
for s_infl in s_infls
tmp = (
time_series = time_series,
method = method,
seed = seed,
nanl = nanl,
obs_un = obs_un,
obs_dim = obs_dim,
γ = γ,
N_ens = N_ens,
s_infl = s_infl
)
push!(args, tmp)
end
end
end
return args, wrap_exp
end
##############################################################################################
"""
args, wrap_exp = D3_var_tuned_inflation()
Constructs parameter range for tuning multiplicative inflation for 3D-VAR background cov.
The choice of the background covariance is varied between the identity matrix and a
climatological covariance computed from a long time series of the Lorenz-96 system. Both
choices then are scaled by a multiplicative covariance parameter that tunes the variances.
"""
function D3_var_tuned_inflation()
exp = DataAssimilationBenchmarks.FilterExps.D3_var_filter_state
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 6500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
bkg_covs = ["ID", "clima"]
dims = collect(1:1:40)
s_infls = 0.005:0.005:1.0
# load the experiments
args = Vector{Any}()
for s_infl in s_infls
for dim in dims
for bkg_cov in bkg_covs
tmp = (
time_series = time_series,
bkg_cov = bkg_cov,
seed = 0,
nanl = 3500,
obs_un = 1.0,
obs_dim = dim,
γ = 1.0,
s_infl = s_infl,
)
push!(args, tmp)
end
end
end
return args, wrap_exp
end
##############################################################################################
"""
args, wrap_exp = ensemble_filter_param()
Constucts a parameter map and experiment wrapper for sensitivity test of parameter estimation.
Ensemble schemes sample the forcing parameter for the Lorenz-96 system and vary the random
walk parameter model for its time evolution / search over parameter space.
"""
function ensemble_filter_param()
exp = DataAssimilationBenchmarks.FilterExps.ensemble_filter_param
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 7500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
methods = ["etkf", "mlef-transform"]
seed = 1234
obs_un = 1.0
obs_dim = 40
p_err = 0.03
p_wlks = [0.0000, 0.0001, 0.0010, 0.0100]
N_enss = 15:3:42
nanl = 4000
s_infls = LinRange(1.0, 1.10, 11)
p_infls = LinRange(1.0, 1.05, 6)
γ = 1.0
# load the experiments
args = Vector{Any}()
for method in methods
for p_wlk in p_wlks
for N_ens in N_enss
for s_infl in s_infls
for p_infl in p_infls
tmp = (
time_series = time_series,
method = method,
seed = seed,
nanl = nanl,
obs_un = obs_un,
obs_dim = obs_dim,
γ = γ,
p_err = p_err,
p_wlk = p_wlk,
N_ens = N_ens,
s_infl = s_infl,
p_infl = p_infl
)
push!(args, tmp)
end
end
end
end
end
return args, wrap_exp
end
##############################################################################################
# Classic smoothers
##############################################################################################
"""
args, wrap_exp = classic_ensemble_state()
Constucts a parameter map and experiment wrapper for sensitivity test of nonlinear obs.
The ETKS / MLES estimators vary over different multiplicative inflation parameters, smoother
lag lengths and the nonlinearity of the observation operator.
"""
function classic_ensemble_state()
exp = DataAssimilationBenchmarks.SmootherExps.classic_ensemble_state
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 7500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
methods = ["etks", "mles-transform"]
seed = 1234
lags = 1:3:52
shifts = [1]
gammas = Vector{Float64}(1:10)
shift = 1
obs_un = 1.0
obs_dim = 40
N_enss = 15:3:42
s_infls = LinRange(1.0, 1.10, 11)
nanl = 4000
# load the experiments
args = Vector{Any}()
for method in methods
for γ in gammas
for lag in lags
for shift in shifts
for N_ens in N_enss
for s_infl in s_infls
tmp = (
time_series = time_series,
method = method,
seed = seed,
nanl = nanl,
lag = lag,
shift = shift,
obs_un = obs_un,
obs_dim = obs_dim,
γ = γ,
N_ens = N_ens,
s_infl = s_infl
)
push!(args, tmp)
end
end
end
end
end
end
return args, wrap_exp
end
#############################################################################################
"""
args, wrap_exp = ensemble_filter_adaptive_inflation()
Constucts a parameter map and experiment wrapper for sensitivity test of parameter estimation.
Ensemble schemes sample the forcing parameter for the Lorenz-96 system and vary the random
walk parameter model for its time evolution / search over parameter space. Methods vary
the ETKS and MLES analysis, with different lag lengths, multiplicative inflation parameters,
and different pameter models.
"""
function classic_ensemble_param()
exp = DataAssimilationBenchmarks.SmootherExps.classic_ensemble_param
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 7500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
methods = ["etks", "mles-transform"]
seed = 1234
lags = 1:3:52
shifts = [1]
gammas = [1.0]
shift = 1
obs_un = 1.0
obs_dim = 40
N_enss = 15:3:42
p_err = 0.03
p_wlks = [0.0000, 0.0001, 0.0010, 0.0100]
s_infls = LinRange(1.0, 1.10, 11)
p_infls = LinRange(1.0, 1.05, 6)
nanl = 4000
# load the experiments
args = Vector{Any}()
for method in methods
for lag in lags
for γ in gammas
for N_ens in N_enss
for p_wlk in p_wlks
for s_infl in s_infls
for p_infl in p_infls
tmp = (
time_series = time_series,
method = method,
seed = seed,
nanl = nanl,
lag = lag,
shift = shift,
obs_un = obs_un,
obs_dim = obs_dim,
γ = γ,
p_err = p_err,
p_wlk = p_wlk,
N_ens = N_ens,
s_infl = s_infl,
p_infl = p_infl
)
push!(args, tmp)
end
end
end
end
end
end
end
return args, wrap_exp
end
#############################################################################################
# SIEnKS
#############################################################################################
"""
args, wrap_exp = single_iteration_ensemble_state()
Constucts a parameter map and experiment wrapper for sensitivity test of multiple DA.
The ensemble size is varied along with the multiplicative inflation coefficient,
and the use of single versus multiple data assimilation in the SIEnKS.
"""
function single_iteration_ensemble_state()
exp = DataAssimilationBenchmarks.SmootherExps.single_iteration_ensemble_state
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 7500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
methods = ["etks"]
seed = 1234
lags = 1:3:52
shifts = [1]
gammas = [1.0]
shift = 1
obs_un = 1.0
obs_dim = 40
N_enss = 15:3:42
s_infls = LinRange(1.0, 1.10, 11)
nanl = 4000
mdas = [false, true]
# load the experiments
args = Vector{Any}()
for mda in mdas
for γ in gammas
for method in methods
for lag in lags
for shift in shifts
for N_ens in N_enss
for s_infl in s_infls
tmp = (
time_series = time_series,
method = method,
seed = seed,
nanl = nanl,
lag = lag,
shift = shift,
mda = mda,
obs_un = obs_un,
obs_dim = obs_dim,
γ = γ,
N_ens = N_ens,
s_infl = s_infl
)
push!(args, tmp)
end
end
end
end
end
end
end
return args, wrap_exp
end
#############################################################################################
# IEnKS
#############################################################################################
"""
args, wrap_exp = iterative_ensemble_state()
Constucts a parameter map and experiment wrapper for sensitivity test of multiple DA.
The ensemble size is varied along with the multiplicative inflation coefficient,
and the use of single versus multiple data assimilation in the IEnKS.
"""
function iterative_ensemble_state()
exp = DataAssimilationBenchmarks.SmootherExps.iterative_ensemble_state
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 7500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
methods = ["ienks-transform", "lin-ienks-transform"]
seed = 1234
lags = 1:3:52
gammas = [1.0]
shift = 1
obs_un = 1.0
obs_dim = 40
N_enss = 15:3:42
s_infls = LinRange(1.0, 1.10, 11)
nanl = 4000
mdas = [false, true]
# load the experiments
args = Vector{Any}()
for mda in mdas
for γ in gammas
for method in methods
for lag in lags
for N_ens in N_enss
for s_infl in s_infls
tmp = (
time_series = time_series,
method = method,
seed = seed,
nanl = nanl,
lag = lag,
shift = shift,
mda = mda,
obs_un = obs_un,
obs_dim = obs_dim,
γ = γ,
N_ens = N_ens,
s_infl = s_infl
)
push!(args, tmp)
end
end
end
end
end
end
return args, exp
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 11037 | ##############################################################################################
module SingleExperimentDriver
##############################################################################################
# imports and exports
using JLD2, HDF5
using ..DataAssimilationBenchmarks, ..FilterExps, ..SmootherExps, ..GenerateTimeSeries
export time_series_exps, enkf_exps, d3_var_exps, classic_enks_exps, sienks_exps, ienks_exps
##############################################################################################
# Utility methods and definitions
##############################################################################################
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
##############################################################################################
# Time series
##############################################################################################
"""
time_series_exps = Dict{String, Any}
Pre-configured time series experiments for generating test data for twin experiments.
"""
time_series_exps = Dict{String, Any}(
# Generates a short time series of the L96 model for testing
"L96_deterministic_test" => (
seed = 0,
h = 0.05,
state_dim = 40,
tanl = 0.05,
nanl = 5000,
spin = 1500,
diffusion = 0.00,
F = 8.0,
),
# Generates a short time series of the IEEE39bus model for testing
"IEEE39bus_deterministic_test" => (
seed = 0,
h = 0.01,
tanl = 0.01,
nanl = 5000,
spin = 1500,
diffusion = 0.0,
),
)
##############################################################################################
# EnKF style experiments
##############################################################################################
"""
enkf_exps = Dict{String, Any}
Pre-configured EnKF style twin experiments for benchmark models.
"""
enkf_exps = Dict{String, Any}(
# Lorenz-96 ETKF state estimation standard configuration
"L96_ETKF_state_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "etkf",
seed = 0,
nanl = 3500,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
N_ens = 21,
s_infl = 1.02,
),
# Lorenz-96 3D-VAR state estimation standard configuration
"L96_D3_var_state_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
bkg_cov = "ID",
seed = 0,
nanl = 3500,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
s_infl = 0.20,
),
# Lorenz-96 ETKF joint state-parameter estimation standard configuration
"L96_ETKF_param_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "etkf",
seed = 0,
nanl = 3500,
obs_un = 1.0,
obs_dim = 40,
γ = 1.0,
p_err = 0.10,
p_wlk = 0.0010,
N_ens = 21,
s_infl = 1.02,
p_infl = 1.0,
),
# IEEE39bus ETKF state estimation standard configuration
"IEEE39bus_ETKF_state_test" => (
time_series = path *
"IEEE39bus_time_series_seed_0000_diff_0.000_tanl_0.01_nanl_05000_spin_1500_" *
"h_0.010.jld2",
method = "etkf",
seed = 0,
nanl = 3500,
obs_un = 0.1,
obs_dim = 20,
γ = 1.00,
N_ens = 21,
s_infl = 1.02
),
)
##############################################################################################
# 3D-VAR style experiments
##############################################################################################
"""
d3_var_exps = Dict{String, Any}
Pre-configured 3D-VAR style twin experiments for benchmark models.
"""
d3_var_exps = Dict{String, Any}(
# Lorenz-96 3D-VAR state estimation configuration
"L96_D3_var_state_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_"*
"spin_1500_h_0.050.jld2",
bkg_cov = "ID",
seed = 0,
nanl = 3500,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
s_infl = 0.23,
)
)
##############################################################################################
# Classic smoothers
##############################################################################################
"""
classic_enks_exps = Dict{String, Any}
Pre-configured classic EnKS style twin experiments for benchmark models.
"""
classic_enks_exps = Dict{String, Any}(
# Lorenz-96 ETKS state estimation standard configuration
"L96_ETKS_state_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "etks",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
N_ens = 21,
s_infl = 1.02,
),
# Lorenz-96 ETKS joint state-parameter estimation standard configuration
"L96_ETKS_param_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "etks",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
obs_un = 1.0,
obs_dim = 40,
γ = 1.0,
p_err = 0.10,
p_wlk = 0.0010,
N_ens = 21,
s_infl = 1.02,
p_infl = 1.0,
),
)
##############################################################################################
# SIEnKS style experiments
##############################################################################################
"""
sienks_exps = Dict{String, Any}
Pre-configured SIEnKS style twin experiments for benchmark models.
"""
sienks_exps = Dict{String, Any}(
# Lorenz-96 SIEnKS sda state estimation standard configuration
"L96_ETKS_state_sda_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "etks",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = false,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
N_ens = 21,
s_infl = 1.02,
),
"L96_ETKS_state_mda_test" => (
# Lorenz-96 SIEnKS mda state estimation standard configuration
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "etks",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = true,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
N_ens = 21,
s_infl = 1.02,
),
# Lorenz-96 SIEnKS sda joint state-parameter estimation standard configuration
"L96_ETKS_param_sda_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "etks",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = false,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
p_err = 0.10,
p_wlk = 0.0010,
N_ens = 21,
s_infl = 1.02,
p_infl = 1.0,
),
# Lorenz-96 SIEnKS mda joint state-parameter estimation standard configuration
"L96_ETKS_param_mda_test" => (
time_series = path * "L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_" *
"tanl_0.05_nanl_05000_spin_1500_h_0.050.jld2",
method = "etks",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = true,
obs_un = 1.0,
obs_dim = 40,
γ = 1.0,
p_err = 0.10,
p_wlk = 0.0010,
N_ens = 21,
s_infl = 1.02,
p_infl = 1.0,
),
)
##############################################################################################
# IEnKS style experiments
##############################################################################################
"""
ienks_exps = Dict{String, Any}
Pre-configured IEnKS style twin experiments for benchmark models.
"""
ienks_exps = Dict{String, Any}(
# Lorenz-96 IEnKS sda state estimation standard configuration
"L96_IEnKS_state_sda_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "ienks-transform",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = false,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
N_ens = 21,
s_infl = 1.02,
),
# Lorenz-96 IEnKS mda state estimation standard configuration
"L96_IEnKS_state_mda_test" => (
time_series = path *
"L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05_nanl_05000_" *
"spin_1500_h_0.050.jld2",
method = "ienks-transform",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = true,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
N_ens = 21,
s_infl = 1.02,
),
# Lorenz-96 IEnKS sda joint state-parameter estimation standard configuration
"L96_IEnKS_param_sda_test" => (
time_series = path * "L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0" *
"_tanl_0.05_nanl_05000_spin_1500_h_0.050.jld2",
method = "ienks-transform",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = false,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
p_err = 0.10,
p_wlk = 0.0010,
N_ens = 21,
s_infl = 1.02,
p_infl = 1.0,
),
# Lorenz-96 IEnKS mda joint state-parameter estimation standard configuration
"L96_IEnKS_param_mda_test" => (
time_series = path * "L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0" *
"_tanl_0.05_nanl_05000_spin_1500_h_0.050.jld2",
method = "ienks-transform",
seed = 0,
nanl = 3500,
lag = 10,
shift = 1,
mda = true,
obs_un = 1.0,
obs_dim = 40,
γ = 1.00,
p_err = 0.10,
p_wlk = 0.0010,
N_ens = 21,
s_infl = 1.02,
p_infl = 1.0,
),
)
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 109889 | ##############################################################################################
module SmootherExps
##############################################################################################
# imports and exports
using Random, Distributions
using LinearAlgebra
using JLD2, HDF5
using ..DataAssimilationBenchmarks, ..ObsOperators, ..EnsembleKalmanSchemes, ..DeSolvers,
..L96, ..IEEE39bus
export classic_ensemble_state, classic_ensemble_param, single_iteration_ensemble_state,
single_iteration_ensemble_param,
iterative_ensemble_state, iterative_ensemble_param
##############################################################################################
# Main smoothing experiments, debugged and validated for use with schemes in methods directory
##############################################################################################
"""
classic_ensemble_state((time_series::String, method::String, seed::Int64, nanl::Int64,
lag::Int64, shift::Int64, obs_un::Float64, obs_dim::Int64,
γ::Float64, N_ens::Int64, s_infl::Float64)::NamedTuple)
Classic ensemble Kalman smoother state estimation twin experiment.
NOTE: the classic scheme does not use multiple data assimilation and we hard code
`mda=false` in the function for consistency with the API of other methods.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"h" => h,
"N_ens" => N_ens,
"mda" => mda,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-classic/"
where the file name is written dynamically according to the selected parameters as follows:
method * "-classic_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
"""
function classic_ensemble_state((time_series, method, seed, nanl, lag, shift, obs_un, obs_dim,
γ, N_ens, s_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:lag,:shift,:obs_un,:obs_dim,
:γ,:N_ens,:s_infl),
<:Tuple{String,String,Int64,Int64,Int64,Int64,Float64,Int64,
Float64,Int64,Float64}})
# time the experiment
t1 = time()
# Define experiment parameters
# define static mda parameter, not used for classic smoother
mda = false
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
# define integration method
step_model! = rk4_step!
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
sys_dim = length(init)
ens = rand(MvNormal(init, I), N_ens)
# define the observation range and truth reference solution
obs = obs[:, 1:nanl + 3 * lag + 1]
truth = copy(obs)
# define kwargs
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"dx_params" => dx_params,
"h" => h,
"diffusion" => diffusion,
"s_infl" => s_infl,
"γ" => γ,
"shift" => shift,
"mda" => mda
)
# define the observation operator, observation error covariance and observations
# with error, observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# check if there is a diffusion structure matrix
if haskey(ts, "diff_mat")
kwargs["diff_mat"] = ts["diff_mat"]
end
# create storage for the forecast and analysis statistics, indexed in relative time
# the first index corresponds to time 1
# last index corresponds to index nanl + 3 * lag + 1
fore_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
fore_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# posterior array of length lag + shift will be loaded with filtered states as they
# arrive in the DAW, with the shifting time index
post = Array{Float64}(undef, sys_dim, N_ens, lag + shift)
# we will run through nanl total analyses, i ranges in the absolute analysis-time index,
# we perform assimilation of the observation window from time 2 to time nanl + 1 + lag
# at increments of shift starting at time 2 because of no observations at time 1
# only the interval 2 : nanl + 1 is stored later for all statistics
for i in 2: shift : nanl + 1 + lag
kwargs["posterior"] = post
# observations indexed in absolute time
analysis = ls_smoother_classic(method, ens, obs[:, i: i + shift - 1],
H_obs, obs_cov, kwargs)
ens = analysis["ens"]::Array{Float64}
fore = analysis["fore"]::Array{Float64}
filt = analysis["filt"]::Array{Float64}
post = analysis["post"]::Array{Float64}
for j in 1:shift
# compute the forecast, filter and analysis statistics
# indices for the forecast, filter, analysis statistics storage index starts
# at absolute time 1, truth index starts at absolute time 1
fore_rmse[i + j - 1],
fore_spread[i + j - 1] = analyze_ens(
fore[:, :, j],
truth[:, i + j - 1]
)
filt_rmse[i + j - 1],
filt_spread[i + j - 1] = analyze_ens(
filt[:, :, j],
truth[:, i + j - 1]
)
# we analyze the posterior states to be be discarded in the non-overlapping DAWs
if shift == lag
# for the shift=lag, all states are analyzed and discared,
# no dummy past states are used, truth follows times minus 1
# from the filter and forecast stastistics
post_rmse[i + j - 2],
post_spread[i + j - 2] = analyze_ens(
post[:, :, j],
truth[:, i + j - 2]
)
elseif i > lag
# for lag > shift, we wait for the dummy lag-1-total posterior states to be
# cycled out, the first posterior starts with the first prior at time 1,
# later discarded to align stats
post_rmse[i - lag + j - 1],
post_spread[i - lag + j - 1] = analyze_ens(
post[:, :, j],
truth[:, i - lag + j - 1]
)
end
end
end
# cut the statistics so that they align on the same absolute time points
fore_rmse = fore_rmse[2: nanl + 1]
fore_spread = fore_spread[2: nanl + 1]
filt_rmse = filt_rmse[2: nanl + 1]
filt_spread = filt_spread[2: nanl + 1]
post_rmse = post_rmse[2: nanl + 1]
post_spread = post_spread[2: nanl + 1]
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"h" => h,
"N_ens" => N_ens,
"mda" => mda,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-classic/"
name = method * "-classic_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
classic_ensemble_param((time_series::String, method::String, seed::Int64, nanl::Int64,
lag::Int64, shift::Int64, obs_un::Float64, obs_dim::Int64,
γ::Float64, p_err::Float64, p_wlk::Float64, N_ens::Int64,
s_infl::Float64, s_infl::Float64})::NamedTuple)
Classic ensemble Kalman smoother joint state-parameter estimation twin experiment.
NOTE: the classic scheme does not use multiple data assimilation and we hard code `mda=false`
in the function for consistency with other methods.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"param_spread" => para_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"param_truth" => param_truth,
"sys_dim" => sys_dim,
"state_dim" => state_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"p_err" => p_err,
"p_wlk" => p_wlk,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-classic/"
where the file name is written dynamically according to the selected parameters as follows:
method * "-classic_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
"""
function classic_ensemble_param((time_series, method, seed, nanl, lag, shift, obs_un, obs_dim,
γ, p_err, p_wlk, N_ens, s_infl, p_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:lag,:shift,:obs_un,:obs_dim,
:γ,:p_err,:p_wlk,:N_ens,:s_infl,:p_infl),
<:Tuple{String,String,Int64,Int64,Int64,Int64,Float64,Int64,
Float64,Float64,Float64,Int64,Float64,Float64}})
# time the experiment
t1 = time()
# define static mda parameter, not used for classic smoother
mda=false
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
# define integration method
step_model! = rk4_step!
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
if model == "L96"
param_truth = pop!(dx_params, "F")
elseif model == "IEEE39bus"
param_truth = [pop!(dx_params, "H"); pop!(dx_params, "D")]
param_truth = param_truth[:]
end
state_dim = length(init)
sys_dim = state_dim + length(param_truth)
# define the initial ensemble
ens = rand(MvNormal(init, I), N_ens)
# extend this by the parameter ensemble
# note here the covariance is supplied such that the standard deviation is a percent
# of the parameter value
param_ens = rand(MvNormal(param_truth[:],
diagm(param_truth[:] * p_err).^2.0),
N_ens
)
# define the extended state ensemble
ens = [ens; param_ens]
# define the observation sequence where we map the true state into the observation space
# and perturb by white-in-time-and-space noise with standard deviation obs_un
obs = obs[:, 1:nanl + 3 * lag + 1]
truth = copy(obs)
# define kwargs
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"h" => h,
"diffusion" => diffusion,
"dx_params" => dx_params,
"γ" => γ,
"state_dim" => state_dim,
"p_wlk" => p_wlk,
"s_infl" => s_infl,
"p_infl" => p_infl,
"shift" => shift,
"mda" => mda
)
# define the observation operator, observation error covariance and observations
# with error observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# we define the parameter sample as the key name and index
# of the extended state vector pair, to be loaded in the
# ensemble integration step
if model == "L96"
param_sample = Dict("F" => [41:41])
elseif model == "IEEE39bus"
param_sample = Dict("H" => [21:30], "D" => [31:40])
end
kwargs["param_sample"] = param_sample
# create storage for the forecast and analysis statistics, indexed in relative time
# first index corresponds to time 1, last index corresponds to index nanl + 3 * lag + 1
fore_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
para_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
fore_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
para_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# posterior array of length lag + shift will be loaded with filtered states as they
# arrive in the DAW, with the shifting time index
post = Array{Float64}(undef, sys_dim, N_ens, lag + shift)
# we will run through nanl total analyses, i ranges in the absolute analysis-time index,
# we perform assimilation of the observation window from time 2 to time nanl + 1 + lag
# at increments of shift starting at time 2 because of no observations at time 1
# only the interval 2 : nanl + 1 is stored later for all statistics
for i in 2: shift : nanl + 1 + lag
kwargs["posterior"] = post
# observations indexed in absolute time
analysis = ls_smoother_classic(
method, ens, obs[:, i: i + shift - 1],
H_obs, obs_cov, kwargs
)
ens = analysis["ens"]::Array{Float64}
fore = analysis["fore"]::Array{Float64}
filt = analysis["filt"]::Array{Float64}
post = analysis["post"]::Array{Float64}
for j in 1:shift
# compute the forecast, filter and analysis statistics
# indices for the forecast, filter, analysis statistics storage index starts
# at absolute time 1, truth index starts at absolute time 1
fore_rmse[i + j - 1],
fore_spread[i + j - 1] = analyze_ens(
fore[1:state_dim, :, j],
truth[:, i + j - 1]
)
filt_rmse[i + j - 1],
filt_spread[i + j - 1] = analyze_ens(
filt[1:state_dim, :, j],
truth[:, i + j - 1]
)
# analyze the posterior states that will be discarded in the non-overlapping DAWs
if shift == lag
# for the shift=lag, all states are analyzed and discared,
# no dummy past states are used truth follows times minus 1 from the
# filter and forecast stastistics
post_rmse[i + j - 2],
post_spread[i + j - 2] = analyze_ens(
post[1:state_dim, :, j],
truth[:, i + j - 2]
)
para_rmse[i + j - 2],
para_spread[i + j - 2] = analyze_ens_param(
post[state_dim + 1: end,:, j],
param_truth
)
elseif i > lag
# for lag > shift, we wait for the dummy lag-1-total posterior states
# to be cycled out the first posterior starts with the first prior at time 1,
# later discarded to align stats
post_rmse[i - lag + j - 1],
post_spread[i - lag + j - 1] = analyze_ens(
post[1:state_dim, :, j],
truth[:, i - lag + j - 1]
)
para_rmse[i - lag + j - 1],
para_spread[i - lag + j - 1] =
analyze_ens_param(
post[state_dim + 1: end, :, j],
param_truth
)
end
end
end
# cut the statistics so that they align on the same absolute time points
fore_rmse = fore_rmse[2: nanl + 1]
fore_spread = fore_spread[2: nanl + 1]
filt_rmse = filt_rmse[2: nanl + 1]
filt_spread = filt_spread[2: nanl + 1]
post_rmse = post_rmse[2: nanl + 1]
post_spread = post_spread[2: nanl + 1]
para_rmse = para_rmse[2: nanl + 1]
para_spread = para_spread[2: nanl + 1]
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"param_spread" => para_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"param_truth" => param_truth,
"sys_dim" => sys_dim,
"state_dim" => state_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"p_err" => p_err,
"p_wlk" => p_wlk,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-classic/"
name = method * "-classic_" * model *
"_param_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_paramE_" * rpad(p_err, 4, "0") *
"_paramW_" * rpad(p_wlk, 6, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
"_paramInfl_" * rpad(round(p_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
single_iteration_ensemble_state((time_series::String, method::String, seed::Int64,
nanl::Int64, lag::Int64, shift::Int64, mda::Bool,
obs_un::Float64, obs_dim::Int64, γ::Float64,
N_ens::Int64, s_infl::Float64})::NamedTuple)
SIEnKS state estimation twin experiment.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-single-iteration/"
where the file name is written dynamically according to the selected parameters as follows:
method * "-single-iteration_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
"""
function single_iteration_ensemble_state((time_series, method, seed, nanl, lag, shift, mda,
obs_un, obs_dim, γ, N_ens, s_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:lag,:shift,:mda,
:obs_un,:obs_dim,:γ,:N_ens,:s_infl),
<:Tuple{String,String,Int64,Int64,Int64,Int64,Bool,
Float64,Int64,Float64,Int64,Float64}})
# time the experiment
t1 = time()
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
# define integration method
step_model! = rk4_step!
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# number of discrete shift windows within the lag window
n_shifts = convert(Int64, lag / shift)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
sys_dim = length(init)
ens = rand(MvNormal(init, I), N_ens)
# define the observation sequence where we map the true state into the observation
# space andperturb by white-in-time-and-space noise with standard deviation obs_un
obs = obs[:, 1:nanl + 3 * lag + 1]
truth = copy(obs)
# define kwargs
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"dx_params" => dx_params,
"h" => h,
"diffusion" => diffusion,
"γ" => γ,
"shift" => shift,
"s_infl" => s_infl,
"mda" => mda
)
# define the observation operator, observation error covariance and observations
# with error observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# check if there is a diffusion structure matrix
if haskey(ts, "diff_mat")
kwargs["diff_mat"] = ts["diff_mat"]
end
# create storage for the forecast and analysis statistics, indexed in relative time
# first index corresponds to time 1, last index corresponds to index nanl + 3 * lag + 1
fore_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
fore_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# perform an initial spin for the smoothed re-analyzed first prior estimate while
# handling new observations with a filtering step to prevent divergence of the
# forecast for long lags
spin = true
kwargs["spin"] = spin
posterior = Array{Float64}(undef, sys_dim, N_ens, shift)
kwargs["posterior"] = posterior
# we will run through nanl + 2 * lag total observations but discard the last-lag
# forecast values and first-lag posterior values so that the statistics align on
# the same time points after the spin
for i in 2: shift : nanl + lag + 1
# perform assimilation of the DAW
# we use the observation window from current time +1 to current time +lag
if mda
# NOTE: mda spin weights only take lag equal to an integer multiple of shift
if spin
# for the first rebalancing step, all observations are new
# and get fully assimilated, observation weights are given with
# respect to a special window in terms of the number of times the
# observation will be assimilated
obs_weights = []
for n in 1:n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
kwargs["reb_weights"] = ones(lag)
elseif i <= lag
# if still processing observations from the spin cycle,
# deal with special weights given by the number of times
# the observation is assimilated
n_complete = (i - 2) / shift
n_incomplete = n_shifts - n_complete
# the leading terms have weights that are based upon the number of times
# that the observation will be assimilated < n_shifts total times as in
# the stable algorithm
obs_weights = []
for n in n_shifts - n_incomplete + 1 : n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
for n in 1 : n_complete
obs_weights = [obs_weights; ones(shift) * n_shifts]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
reb_weights = []
# the rebalancing weights are specially constructed as above
for n in 1:n_incomplete
reb_weights = [reb_weights; ones(shift) * n / (n + n_complete)]
end
for n in n_incomplete + 1 : n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
else
# equal weights as all observations are assimilated n_shifts total times
kwargs["obs_weights"] = ones(lag) * n_shifts
# rebalancing weights are constructed in steady state
reb_weights = []
for n in 1:n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
end
end
# peform the analysis
analysis = ls_smoother_single_iteration(
method, ens, obs[:, i: i + lag - 1],
H_obs, obs_cov, kwargs
)
ens = analysis["ens"]::Array{Float64}
fore = analysis["fore"]::Array{Float64}
filt = analysis["filt"]::Array{Float64}
post = analysis["post"]::Array{Float64}
if spin
for j in 1:lag
# compute forecast and filter statistics for spin period
fore_rmse[i - 1 + j],
fore_spread[i - 1 + j] = analyze_ens(
fore[:, :, j],
truth[:, i - 1 + j]
)
filt_rmse[i - 1 + j],
filt_spread[i - 1 + j] = analyze_ens(
filt[:, :, j],
truth[:, i - 1 + j]
)
end
for j in 1:shift
# compute the reanalyzed prior and the shift-forward forecasted reanalysis
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(
post[:, :, j],
truth[:, i - 2 + j]
)
end
# turn off the initial spin period, continue on the normal assimilation cycle
spin = false
kwargs["spin"] = spin
else
for j in 1:shift
# compute the forecast, filter and analysis statistics
# indices for the forecast, filter, analysis and truth are in absolute time,
# forecast / filter stats computed beyond the first lag period for the spin
fore_rmse[i + lag - 1 - shift + j],
fore_spread[i + lag - 1 - shift + j] =
analyze_ens(
fore[:, :, j],
truth[:, i + lag - 1 - shift + j]
)
filt_rmse[i + lag - 1 - shift + j],
filt_spread[i + lag - 1 - shift + j] =
analyze_ens(
filt[:, :, j],
truth[:, i + lag - 1 - shift + j]
)
# analysis statistics computed beyond the first shift
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(
post[:, :, j],
truth[:, i - 2 + j]
)
end
end
end
# cut the statistics so that they align on the same absolute time points
fore_rmse = fore_rmse[2: nanl + 1]
fore_spread = fore_spread[2: nanl + 1]
filt_rmse = filt_rmse[2: nanl + 1]
filt_spread = filt_spread[2: nanl + 1]
post_rmse = post_rmse[2: nanl + 1]
post_spread = post_spread[2: nanl + 1]
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-single-iteration/"
name = method * "-single-iteration_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
single_iteration_ensemble_param((time_series::String, method::String, seed:Int64,
nanl::Int64, lag::Int64, shift::Int64, mda::Bool,
obs_un::Float64, obs_dim::Int64, γ::Float64,
p_err::Float64, p_wlk::Float64, N_ens::Int64,
s_infl::Float64, p_infl::Float64)::NamedTuple)
SIEnKS joint state-parameter estimation twin experiment.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"param_spread" => para_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"param_truth" => param_truth,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"p_wlk" => p_wlk,
"p_infl" => p_infl,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-single-iteration/"
where the file name is written dynamically according to the selected parameters as follows:
method * "-single-iteration_" * model *
"_param_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_paramE_" * rpad(p_err, 4, "0") *
"_paramW_" * rpad(p_wlk, 6, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
"_paramInfl_" * rpad(round(p_infl, digits=2), 4, "0") *
".jld2"
"""
function single_iteration_ensemble_param((time_series, method, seed, nanl, lag, shift, mda,
obs_un, obs_dim, γ, p_err, p_wlk, N_ens, s_infl,
p_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:lag,:shift,:mda,
:obs_un,:obs_dim,:γ,:p_err,:p_wlk,:N_ens,:s_infl,
:p_infl),
<:Tuple{String,String,Int64,Int64,Int64,Int64,Bool,
Float64,Int64,Float64,Float64,Float64,Int64,
Float64,Float64}})
# time the experiment
t1 = time()
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
# define integration method
step_model! = rk4_step!
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# number of discrete shift windows within the lag window
n_shifts = convert(Int64, lag / shift)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
if model == "L96"
param_truth = pop!(dx_params, "F")
elseif model == "IEEE39bus"
param_truth = [pop!(dx_params, "H"); pop!(dx_params, "D")]
param_truth = param_truth[:]
end
state_dim = length(init)
sys_dim = state_dim + length(param_truth)
# define the initial ensemble
ens = rand(MvNormal(init, I), N_ens)
# extend this by the parameter ensemble
# note here the covariance is supplied such that the standard deviation is a percent
# of the parameter value
param_ens = rand(MvNormal(param_truth[:],
diagm(param_truth[:] * p_err).^2.0), N_ens)
# define the extended state ensemble
ens = [ens; param_ens]
obs = obs[:, 1:nanl + 3 * lag + 1]
truth = copy(obs)
# define kwargs
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"dx_params" => dx_params,
"h" => h,
"diffusion" => diffusion,
"γ" => γ,
"state_dim" => state_dim,
"shift" => shift,
"p_wlk" => p_wlk,
"s_infl" => s_infl,
"p_infl" => p_infl,
"mda" => mda
)
# define the observation operator, observation error covariance and observations
# with error observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# we define the parameter sample as the key name and index
# of the extended state vector pair, to be loaded in the
# ensemble integration step
if model == "L96"
param_sample = Dict("F" => [41:41])
elseif model == "IEEE39bus"
param_sample = Dict("H" => [21:30], "D" => [31:40])
end
kwargs["param_sample"] = param_sample
# create storage for the forecast and analysis statistics, indexed in relative time
# first index corresponds to time 1, last index corresponds to index nanl + 3 * lag + 1
fore_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
para_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
fore_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
para_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# perform an initial spin for the smoothed re-analyzed first prior estimate while
# handling new observations with a filtering step to prevent divergence of the forecast
# for long lags
spin = true
kwargs["spin"] = spin
posterior = zeros(sys_dim, N_ens, shift)
kwargs["posterior"] = posterior
# we will run through nanl + 2 * lag total analyses but discard the last-lag
# forecast values and first-lag posterior values so that the statistics align on
# the same time points after the spin
for i in 2: shift : nanl + lag + 1
# perform assimilation of the DAW
# we use the observation window from current time +1 to current time +lag
if mda
# NOTE: mda spin weights only take lag equal to an integer multiple of shift
if spin
# for the first rebalancing step, all observations are new
# and get fully assimilated, observation weights are given with
# respect to a special window in terms of the number of times the
# observation will be assimilated
obs_weights = []
for n in 1:n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
kwargs["reb_weights"] = ones(lag)
elseif i <= lag
# if still processing observations from the spin cycle,
# deal with special weights given by the number of times
# the observation is assimilated
n_complete = (i - 2) / shift
n_incomplete = n_shifts - n_complete
# the leading terms have weights that are based upon the number of times
# that the observation will be assimilated < n_shifts total times as in
# the stable algorithm
obs_weights = []
for n in n_shifts - n_incomplete + 1 : n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
for n in 1 : n_complete
obs_weights = [obs_weights; ones(shift) * n_shifts]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
reb_weights = []
# the rebalancing weights are specially constructed as above
for n in 1:n_incomplete
reb_weights = [reb_weights; ones(shift) * n / (n + n_complete)]
end
for n in n_incomplete + 1 : n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
else
# equal weights as all observations are assimilated n_shifts total times
kwargs["obs_weights"] = ones(lag) * n_shifts
# rebalancing weights are constructed in steady state
reb_weights = []
for n in 1:n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
end
end
# peform the analysis
analysis = ls_smoother_single_iteration(
method,
ens,
obs[:, i: i + lag - 1],
H_obs,
obs_cov,
kwargs
)
ens = analysis["ens"]::Array{Float64}
fore = analysis["fore"]::Array{Float64}
filt = analysis["filt"]::Array{Float64}
post = analysis["post"]::Array{Float64}
if spin
for j in 1:lag
# compute forecast and filter statistics for the spin period
fore_rmse[i - 1 + j],
fore_spread[i - 1 + j] = analyze_ens(
fore[1:state_dim, :, j],
truth[:, i - 1 + j]
)
filt_rmse[i - 1 + j],
filt_spread[i - 1 + j] = analyze_ens(
filt[1:state_dim, :, j],
truth[:, i - 1 + j]
)
end
for j in 1:shift
# compute the reanalyzed prior and the shift-forward forecasted reanalysis
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(
post[1:state_dim, :, j],
truth[:, i - 2 + j]
)
para_rmse[i - 2 + j],
para_spread[i - 2 + j] = analyze_ens_param(
post[state_dim+1:end, :, j],
param_truth
)
end
# turn off the initial spin period, continue on the normal assimilation cycle
spin = false
kwargs["spin"] = spin
else
for j in 1:shift
# compute the forecast, filter and analysis statistics
# indices for the forecast, filter, analysis and truth arrays are
# in absolute time, forecast / filter stats computed beyond the
# first lag period for the spin
fore_rmse[i + lag - 1 - shift + j],
fore_spread[i + lag - 1 - shift + j] =
analyze_ens(
fore[1:state_dim, :, j],
truth[:, i + lag - 1 - shift + j]
)
filt_rmse[i + lag - 1 - shift + j],
filt_spread[i + lag - 1 - shift + j] =
analyze_ens(
filt[1:state_dim, :, j],
truth[:, i + lag - 1 - shift + j]
)
# analysis statistics computed beyond the first shift
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(
post[1:state_dim, :, j],
truth[:, i - 2 + j]
)
para_rmse[i - 2 + j],
para_spread[i - 2 + j] = analyze_ens_param(
post[state_dim+1:end, :, j],
param_truth
)
end
end
end
# cut the statistics so that they align on the same absolute time points
fore_rmse = fore_rmse[2: nanl + 1]
fore_spread = fore_spread[2: nanl + 1]
filt_rmse = filt_rmse[2: nanl + 1]
filt_spread = filt_spread[2: nanl + 1]
post_rmse = post_rmse[2: nanl + 1]
post_spread = post_spread[2: nanl + 1]
para_rmse = para_rmse[2: nanl + 1]
para_spread = para_spread[2: nanl + 1]
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"param_spread" => para_spread,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"param_truth" => param_truth,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"p_wlk" => p_wlk,
"p_infl" => p_infl,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "-single-iteration/"
name = method * "-single-iteration_" * model *
"_param_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_paramE_" * rpad(p_err, 4, "0") *
"_paramW_" * rpad(p_wlk, 6, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
"_paramInfl_" * rpad(round(p_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
iterative_ensemble_state((time_series::String, method::String, seed::Int64, nanl::Int64,
lag::Int64, shift::Int64, mda::Bool, obs_un::Float64,
obs_dim::Int64, γ::Float64, N_ens::Int64,
s_infl::Float64)::NamedTuple)
4DEnVAR state estimation twin experiment using the IEnKS formalism.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"iteration_sequence" => iteration_sequence,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
where the file name is written dynamically according to the selected parameters as follows:
method * "_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
"""
function iterative_ensemble_state((time_series, method, seed, nanl, lag, shift, mda, obs_un,
obs_dim,γ, N_ens, s_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:lag,:shift,:mda,:obs_un,
:obs_dim,:γ,:N_ens,:s_infl),
<:Tuple{String,String,Int64,Int64,Int64,Int64,Bool,Float64,
Int64,Float64,Int64,Float64}})
# time the experiment
t1 = time()
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
# define integration method
step_model! = rk4_step!
# define the iterative smoother method HARD-CODED here
ls_smoother_iterative = ls_smoother_gauss_newton
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# number of discrete shift windows within the lag window
n_shifts = convert(Int64, lag / shift)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
sys_dim = length(init)
ens = rand(MvNormal(init, I), N_ens)
# define the observation range and truth reference solution
obs = obs[:, 1:nanl + 3 * lag + 1]
truth = copy(obs)
# define kwargs
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"dx_params" => dx_params,
"h" => h,
"diffusion" => diffusion,
"γ" => γ,
"s_infl" => s_infl,
"shift" => shift,
"mda" => mda
)
# define the observation operator, observation error covariance and observations
# with error observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# check if there is a diffusion structure matrix
if haskey(ts, "diff_mat")
kwargs["diff_mat"] = ts["diff_mat"]
end
# create storage for the forecast and analysis statistics, indexed in relative time
# first index corresponds to time 1, last index corresponds to index nanl + 3 * lag + 1
fore_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
fore_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# create storage for the iteration sequence, where we will append the number
# of iterations on the fly, due to the miss-match between the number of observations
# and the number of analyses with shift > 1
iteration_sequence = Vector{Float64}[]
# create counter for the analyses
m = 1
# perform an initial spin for the smoothed re-analyzed first prior estimate while
# handling new observations with a filtering step to prevent divergence of the
# forecast for long lags
spin = true
kwargs["spin"] = spin
posterior = zeros(sys_dim, N_ens, shift)
kwargs["posterior"] = posterior
# we will run through nanl + 2 * lag total observations but discard the
# last-lag forecast values and first-lag posterior values so that the statistics
# align on the same observation time points after the spin
for i in 2: shift : nanl + lag + 1
# perform assimilation of the DAW
# we use the observation window from current time +1 to current time +lag
if mda
# NOTE: mda spin weights are only designed for lag equal to an integer
# multiple of shift
if spin
# for the first rebalancing step, all observations are new and get
# fully assimilated observation weights are given with respect to a
# special window in terms of the number of times the observation will
# be assimilated
obs_weights = []
for n in 1:n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
kwargs["reb_weights"] = ones(lag)
elseif i <= lag
# if still processing observations from the spin cycle,
# deal with special weights given by the number of times the observation
# is assimilated
n_complete = (i - 2) / shift
n_incomplete = n_shifts - n_complete
# the leading terms have weights that are based upon the number of times
# that the observation will be assimilated < n_shifts total times as in
# the stable algorithm
obs_weights = []
for n in n_shifts - n_incomplete + 1 : n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
for n in 1 : n_complete
obs_weights = [obs_weights; ones(shift) * n_shifts]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
reb_weights = []
# the rebalancing weights are specially constructed as above
for n in 1:n_incomplete
reb_weights = [reb_weights; ones(shift) * n / (n + n_complete)]
end
for n in n_incomplete + 1 : n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
else
# otherwise equal weights as all observations are assimilated n_shifts
# total times
kwargs["obs_weights"] = ones(lag) * n_shifts
# rebalancing weights are constructed in steady state
reb_weights = []
for n in 1:n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
end
end
if method[1:4] == "lin-"
if spin
# on the spin cycle, there are the standard number of iterations allowed
# to warm up
analysis = ls_smoother_iterative(method[5:end], ens, obs[:, i: i + lag - 1],
H_obs, obs_cov, kwargs)
else
# after this, the number of iterations allowed is set to one
analysis = ls_smoother_iterative(method[5:end], ens, obs[:, i: i + lag - 1],
H_obs, obs_cov, kwargs, max_iter=1)
end
else
analysis = ls_smoother_iterative(method, ens, obs[:, i: i + lag - 1],
H_obs, obs_cov, kwargs)
end
ens = analysis["ens"]::Array{Float64}
fore = analysis["fore"]::Array{Float64}
filt = analysis["filt"]::Array{Float64}
post = analysis["post"]::Array{Float64}
iteration_sequence = [iteration_sequence; analysis["iterations"]]
m+=1
if spin
for j in 1:lag
# compute filter statistics on the first lag states during spin period
filt_rmse[i - 1 + j],
filt_spread[i - 1 + j] = analyze_ens(
filt[:, :, j],
truth[:, i - 1 + j]
)
end
for j in 1:lag+shift
# compute the forecast statistics on the first lag+shift states during
# the spin period
fore_rmse[i - 1 + j],
fore_spread[i - 1 + j] = analyze_ens(
fore[:, :, j],
truth[:, i - 1 + j]
)
end
for j in 1:shift
# compute only the reanalyzed prior and the shift-forward forecasted
# reanalysis
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(
post[:, :, j],
truth[:, i - 2 + j]
)
end
# turn off the initial spin period, continue hereafter on the normal
# assimilation cycle
spin = false
kwargs["spin"] = spin
else
for j in 1:shift
# compute the forecast, filter and analysis statistics
# indices for the forecast, filter, analysis and truth arrays
# are in absolute time, forecast / filter stats computed beyond
# the first lag period for the spin
fore_rmse[i + lag - 1 + j],
fore_spread[i + lag - 1+ j] = analyze_ens(
fore[:, :, j],
truth[:, i + lag - 1 + j]
)
filt_rmse[i + lag - 1 - shift + j],
filt_spread[i + lag - 1 - shift + j] =
analyze_ens(
filt[:, :, j],
truth[:, i + lag - 1 - shift + j]
)
# analysis statistics computed beyond the first shift
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(
post[:, :, j],
truth[:, i - 2 + j]
)
end
end
end
# cut the statistics so that they align on the same absolute time points
fore_rmse = fore_rmse[2: nanl + 1]
fore_spread = fore_spread[2: nanl + 1]
filt_rmse = filt_rmse[2: nanl + 1]
filt_spread = filt_spread[2: nanl + 1]
post_rmse = post_rmse[2: nanl + 1]
post_spread = post_spread[2: nanl + 1]
iteration_sequence = Array{Float64}(iteration_sequence)
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"iteration_sequence" => iteration_sequence,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
name = method * "_" * model *
"_state_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
"""
iterative_ensemble_param((time_series:String, method:String, seed::Int64, nanl::Int64,
lag::Int64, shift::Int64, mda::Bool, obs_un::Float64,
obs_dim::Int64, γ::Float64, p_err::Float64, p_wlk::Float64,
N_ens::Int64, s_infl::Float64, p_infl::Float64)::NamedTuple)
4DEnVAR joint state-parameter estimation twin experiment using the IEnKS formalism.
Output from the experiment is saved in a dictionary of the form,
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"param_spread" => para_spread,
"iteration_sequence" => iteration_sequence,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"p_wlk" => p_wlk,
"p_infl" => p_infl,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
Experiment output is written to a directory defined by
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
where the file name is written dynamically according to the selected parameters as follows:
method * "_" * model *
"_param_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_paramE_" * rpad(p_err, 4, "0") *
"_paramW_" * rpad(p_wlk, 6, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
"_paramInfl_" * rpad(round(p_infl, digits=2), 4, "0") *
".jld2"
"""
function iterative_ensemble_param((time_series, method, seed, nanl, lag, shift, mda, obs_un,
obs_dim, γ, p_err, p_wlk, N_ens, s_infl,
p_infl)::NamedTuple{
(:time_series,:method,:seed,:nanl,:lag,:shift,:mda,:obs_un,
:obs_dim,:γ,:p_err,:p_wlk,:N_ens,:s_infl,:p_infl),
<:Tuple{String,String,Int64,Int64,Int64,Int64,Bool,Float64,
Int64,Float64,Float64,Float64,Int64,Float64,
Float64}})
# time the experiment
t1 = time()
# load the timeseries and associated parameters
ts = load(time_series)::Dict{String,Any}
diffusion = ts["diffusion"]::Float64
dx_params = ts["dx_params"]::ParamDict(Float64)
tanl = ts["tanl"]::Float64
model = ts["model"]::String
# define the observation operator HARD-CODED in this line
H_obs = alternating_obs_operator
# set the integration step size for the ensemble at 0.01 if an SDE, if deterministic
# simply use the same step size as the observation model
if diffusion > 0.0
h = 0.01
else
h = ts["h"]
end
# define the dynamical model derivative for this experiment from the name
# supplied in the time series
if model == "L96"
dx_dt = L96.dx_dt
elseif model == "IEEE39bus"
dx_dt = IEEE39bus.dx_dt
end
# define integration method
step_model! = rk4_step!
# define the iterative smoother method
ls_smoother_iterative = ls_smoother_gauss_newton
# number of discrete forecast steps
f_steps = convert(Int64, tanl / h)
# number of discrete shift windows within the lag window
n_shifts = convert(Int64, lag / shift)
# set seed
Random.seed!(seed)
# define the initialization
obs = ts["obs"]::Array{Float64, 2}
init = obs[:, 1]
if model == "L96"
param_truth = pop!(dx_params, "F")
elseif model == "IEEE39bus"
param_truth = [pop!(dx_params, "H"); pop!(dx_params, "D")]
param_truth = param_truth[:]
end
state_dim = length(init)
sys_dim = state_dim + length(param_truth)
# define the initial ensemble
ens = rand(MvNormal(init, I), N_ens)
# extend this by the parameter ensemble
# note here the covariance is supplied such that the standard deviation is a
# percent of the parameter value
param_ens = rand(MvNormal(param_truth[:], diagm(param_truth[:] * p_err).^2.0), N_ens)
# define the extended state ensemble
ens = [ens; param_ens]
# define the observation range and truth reference solution
obs = obs[:, 1:nanl + 3 * lag + 1]
truth = copy(obs)
# define kwargs
kwargs = Dict{String,Any}(
"dx_dt" => dx_dt,
"f_steps" => f_steps,
"step_model" => step_model!,
"dx_params" => dx_params,
"h" => h,
"diffusion" => diffusion,
"γ" => γ,
"state_dim" => state_dim,
"shift" => shift,
"p_wlk" => p_wlk,
"s_infl" => s_infl,
"p_infl" => p_infl,
"mda" => mda
)
# define the observation operator, observation error covariance and observations
# with error observation covariance operator taken as a uniform scaling by default,
# can be changed in the definition below
obs = H_obs(obs, obs_dim, kwargs)
obs += obs_un * rand(Normal(), size(obs))
obs_cov = obs_un^2.0 * I
# we define the parameter sample as the key name and index
# of the extended state vector pair, to be loaded in the
# ensemble integration step
if model == "L96"
param_sample = Dict("F" => [41:41])
elseif model == "IEEE39bus"
param_sample = Dict("H" => [21:30], "D" => [31:40])
end
kwargs["param_sample"] = param_sample
# create storage for the forecast and analysis statistics, indexed in relative time
# first index corresponds to time 1, last index corresponds to index nanl + 3 * lag + 1
fore_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
para_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
fore_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
filt_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
post_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
para_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# create storage for the iteration sequence, where we will append the number
# of iterations on the fly, due to the miss-match between the number of observations
# and the number of analyses with shift > 1
iteration_sequence = Vector{Float64}[]
# create counter for the analyses
m = 1
# perform an initial spin for the smoothed re-analyzed first prior estimate while
# handling new observations with a filtering step to prevent divergence of the
# forecast for long lags
spin = true
kwargs["spin"] = spin
posterior = zeros(sys_dim, N_ens, shift)
kwargs["posterior"] = posterior
# we will run through nanl + 2 * lag total observations but discard the
# last-lag forecast values and first-lag posterior values so that the statistics
# align on the same observation time points after the spin
for i in 2: shift : nanl + lag + 1
# perform assimilation of the DAW
# we use the observation window from current time +1 to current time +lag
if mda
# NOTE: mda spin weights only take lag equal to an integer multiple of shift
if spin
# all observations are new and get fully assimilated
# observation weights are given in terms of the
# number of times the observation will be assimilated
obs_weights = []
for n in 1:n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
kwargs["reb_weights"] = ones(lag)
elseif i <= lag
# still processing observations from the spin cycle, deal with special weights
# given by the number of times the observation is assimilated
n_complete = (i - 2) / shift
n_incomplete = n_shifts - n_complete
# the leading terms have weights that are based upon the number of times
# that the observation will be assimilated < n_shifts total times as in
# the stable algorithm
obs_weights = []
for n in n_shifts - n_incomplete + 1 : n_shifts
obs_weights = [obs_weights; ones(shift) * n]
end
for n in 1 : n_complete
obs_weights = [obs_weights; ones(shift) * n_shifts]
end
kwargs["obs_weights"] = Array{Float64}(obs_weights)
reb_weights = []
# the rebalancing weights are specially constructed as above
for n in 1:n_incomplete
reb_weights = [reb_weights; ones(shift) * n / (n + n_complete)]
end
for n in n_incomplete + 1 : n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
else
# equal weights as all observations are assimilated n_shifts total times
kwargs["obs_weights"] = ones(lag) * n_shifts
# rebalancing weights are constructed in steady state
reb_weights = []
for n in 1:n_shifts
reb_weights = [reb_weights; ones(shift) * n / n_shifts]
end
kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
end
end
if method[1:4] == "lin-"
if spin
# on the spin cycle, a standard number of iterations allowed to warm up
analysis = ls_smoother_iterative(method[5:end], ens, obs[:, i: i + lag - 1],
H_obs, obs_cov, kwargs)
else
# after this, the number of iterations allowed is set to one
analysis = ls_smoother_iterative(method[5:end], ens, obs[:, i: i + lag - 1],
H_obs, obs_cov, kwargs, max_iter=1)
end
else
analysis = ls_smoother_iterative(method, ens, obs[:, i: i + lag - 1],
H_obs, obs_cov, kwargs)
end
ens = analysis["ens"]::Array{Float64}
fore = analysis["fore"]::Array{Float64}
filt = analysis["filt"]::Array{Float64}
post = analysis["post"]::Array{Float64}
iteration_sequence = [iteration_sequence; analysis["iterations"]]
m+=1
if spin
for j in 1:lag
# compute filter statistics on the first lag states during spin period
filt_rmse[i - 1 + j],
filt_spread[i - 1 + j] = analyze_ens(filt[1:state_dim, :, j],
truth[:, i - 1 + j])
end
for j in 1:lag+shift
# compute the forecast statistics on the first lag+shift states
# during the spin period
fore_rmse[i - 1 + j],
fore_spread[i - 1 + j] = analyze_ens(fore[1:state_dim, :, j],
truth[:, i - 1 + j])
end
for j in 1:shift
# compute the reanalyzed prior and the shift-forward forecasted reanalysis
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(post[1:state_dim, :, j],
truth[:, i - 2 + j])
para_rmse[i - 2 + j],
para_spread[i - 2 + j] = analyze_ens_param(post[state_dim+1:end, :, j],
param_truth)
end
# turn off the initial spin period, continue with the normal assimilation cycle
spin = false
kwargs["spin"] = spin
else
for j in 1:shift
# compute the forecast, filter and analysis statistics
# indices for the forecast, filter, analysis and truth are in absolute time,
# forecast / filter stats computed beyond the first lag period for the spin
fore_rmse[i + lag - 1 + j],
fore_spread[i + lag - 1+ j] = analyze_ens(
fore[1:state_dim, :, j],
truth[:, i + lag - 1 + j]
)
filt_rmse[i + lag - 1 - shift + j],
filt_spread[i + lag - 1 - shift + j] =
analyze_ens(
filt[1:state_dim, :, j],
truth[:, i + lag - 1 - shift + j]
)
# analysis statistics computed beyond the first shift
post_rmse[i - 2 + j],
post_spread[i - 2 + j] = analyze_ens(
post[1:state_dim, :, j],
truth[:, i - 2 + j]
)
para_rmse[i - 2 + j],
para_spread[i - 2 + j] = analyze_ens_param(
post[state_dim+1:end, :, j],
param_truth
)
end
end
end
# cut the statistics so that they align on the same absolute time points
fore_rmse = fore_rmse[2: nanl + 1]
fore_spread = fore_spread[2: nanl + 1]
filt_rmse = filt_rmse[2: nanl + 1]
filt_spread = filt_spread[2: nanl + 1]
post_rmse = post_rmse[2: nanl + 1]
post_spread = post_spread[2: nanl + 1]
para_rmse = para_rmse[2: nanl + 1]
para_spread = para_spread[2: nanl + 1]
iteration_sequence = Array{Float64}(iteration_sequence)
data = Dict{String,Any}(
"fore_rmse" => fore_rmse,
"filt_rmse" => filt_rmse,
"post_rmse" => post_rmse,
"param_rmse" => para_rmse,
"fore_spread" => fore_spread,
"filt_spread" => filt_spread,
"post_spread" => post_spread,
"param_spread" => para_spread,
"iteration_sequence" => iteration_sequence,
"method" => method,
"seed" => seed,
"diffusion" => diffusion,
"dx_params" => dx_params,
"sys_dim" => sys_dim,
"obs_dim" => obs_dim,
"obs_un" => obs_un,
"γ" => γ,
"p_wlk" => p_wlk,
"p_infl" => p_infl,
"nanl" => nanl,
"tanl" => tanl,
"lag" => lag,
"shift" => shift,
"mda" => mda,
"h" => h,
"N_ens" => N_ens,
"s_infl" => round(s_infl, digits=2),
"p_infl" => round(p_infl, digits=2)
)
if haskey(ts, "diff_mat")
data["diff_mat"] = ts["diff_mat"]
end
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/" * method * "/"
name = method * "_" * model *
"_param_seed_" * lpad(seed, 4, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_sysD_" * lpad(sys_dim, 2, "0") *
"_obsD_" * lpad(obs_dim, 2, "0") *
"_obsU_" * rpad(obs_un, 4, "0") *
"_gamma_" * lpad(γ, 5, "0") *
"_paramE_" * rpad(p_err, 4, "0") *
"_paramW_" * rpad(p_wlk, 6, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_h_" * rpad(h, 4, "0") *
"_lag_" * lpad(lag, 3, "0") *
"_shift_" * lpad(shift, 3, "0") *
"_mda_" * string(mda) *
"_nens_" * lpad(N_ens, 3,"0") *
"_stateInfl_" * rpad(round(s_infl, digits=2), 4, "0") *
"_paramInfl_" * rpad(round(p_infl, digits=2), 4, "0") *
".jld2"
save(path * name, data)
print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
end
##############################################################################################
# end module
end
##############################################################################################
# NOTE STILL DEBUGGING THIS EXPERIMENT
#function single_iteration_adaptive_state(args::Tuple{String,String,Int64,Int64,Int64,Bool,Float64,Int64,
# Float64,Int64,Float64};tail::Int64=3)
#
# # time the experiment
# t1 = time()
#
# # Define experiment parameters
# time_series, method, seed, lag, shift, mda, obs_un, obs_dim, γ, N_ens, s_infl = args
#
# # load the timeseries and associated parameters
# ts = load(time_series)::Dict{String,Any}
# diffusion = ts["diffusion"]::Float64
# f = ts["F"]::Float64
# tanl = ts["tanl"]::Float64
# h = 0.01
# dx_dt = L96.dx_dt
# step_model! = rk4_step!
#
# # number of discrete forecast steps
# f_steps = convert(Int64, tanl / h)
#
# # number of discrete shift windows within the lag window
# n_shifts = convert(Int64, lag / shift)
#
# # number of analyses
# nanl = 2500
#
# # set seed
# Random.seed!(seed)
#
# # define the initial ensembles
# obs = ts["obs"]::Array{Float64, 2}
# init = obs[:, 1]
# sys_dim = length(init)
# ens = rand(MvNormal(init, I), N_ens)
#
# # define the observation sequence where we map the true state into the observation space and
# # perturb by white-in-time-and-space noise with standard deviation obs_un
# obs = obs[:, 1:nanl + 3 * lag + 1]
# truth = copy(obs)
#
# # define kwargs
# kwargs = Dict{String,Any}(
# "dx_dt" => dx_dt,
# "f_steps" => f_steps,
# "step_model" => step_model!,
# "dx_params" => [f],
# "h" => h,
# "diffusion" => diffusion,
# "γ" => γ,
# "shift" => shift,
# "mda" => mda
# )
#
# if method == "etks_adaptive"
# tail_spin = true
# kwargs["tail_spin"] = tail_spin
# kwargs["analysis"] = Array{Float64}(undef, sys_dim, N_ens, lag)
# kwargs["analysis_innovations"] = Array{Float64}(undef, sys_dim, lag)
# end
#
# # define the observation operator, observation error covariance and observations with error
# obs = H_obs(obs, obs_dim, kwargs)
# obs += obs_un * rand(Normal(), size(obs))
# obs_cov = obs_un^2.0 * I
#
# # create storage for the forecast and analysis statistics, indexed in relative time
# # the first index corresponds to time 1, last index corresponds to index nanl + 3 * lag + 1
# fore_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
# filt_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
# post_rmse = Vector{Float64}(undef, nanl + 3 * lag + 1)
#
# fore_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# filt_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
# post_spread = Vector{Float64}(undef, nanl + 3 * lag + 1)
#
# # perform an initial spin for the smoothed re-analyzed first prior estimate while handling
# # new observations with a filtering step to prevent divergence of the forecast for long lags
# spin = true
# kwargs["spin"] = spin
# posterior = Array{Float64}(undef, sys_dim, N_ens, shift)
# kwargs["posterior"] = posterior
#
# # we will run through nanl + 2 * lag total observations but discard the last-lag
# # forecast values and first-lag posterior values so that the statistics align on
# # the same time points after the spin
# for i in 2: shift : nanl + lag + 1
# # perform assimilation of the DAW
# # we use the observation window from current time +1 to current time +lag
# if mda
# # NOTE: mda spin weights are only designed for lag equal to an integer multiple of shift
# if spin
# # for the first rebalancing step, all observations are new and get fully assimilated
# # observation weights are given with respect to a special window in terms of the
# # number of times the observation will be assimilated
# obs_weights = []
# for n in 1:n_shifts
# obs_weights = [obs_weights; ones(shift) * n]
# end
# kwargs["obs_weights"] = Array{Float64}(obs_weights)
# kwargs["reb_weights"] = ones(lag)
#
# elseif i <= lag
# # if still processing observations from the spin cycle, deal with special weights
# # given by the number of times the observation is assimilated
# n_complete = (i - 2) / shift
# n_incomplete = n_shifts - n_complete
#
# # the leading terms have weights that are based upon the number of times
# # that the observation will be assimilated < n_shifts total times as in
# # the stable algorithm
# obs_weights = []
# for n in n_shifts - n_incomplete + 1 : n_shifts
# obs_weights = [obs_weights; ones(shift) * n]
# end
# for n in 1 : n_complete
# obs_weights = [obs_weights; ones(shift) * n_shifts]
# end
# kwargs["obs_weights"] = Array{Float64}(obs_weights)
# reb_weights = []
#
# # the rebalancing weights are specially constructed as above
# for n in 1:n_incomplete
# reb_weights = [reb_weights; ones(shift) * n / (n + n_complete)]
# end
# for n in n_incomplete + 1 : n_shifts
# reb_weights = [reb_weights; ones(shift) * n / n_shifts]
# end
# kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
#
# else
# # otherwise equal weights as all observations are assimilated n_shifts total times
# kwargs["obs_weights"] = ones(lag) * n_shifts
#
# # rebalancing weights are constructed in steady state
# reb_weights = []
# for n in 1:n_shifts
# reb_weights = [reb_weights; ones(shift) * n / n_shifts]
# end
# kwargs["reb_weights"] = 1.0 ./ Array{Float64}(reb_weights)
# end
# end
#
# # peform the analysis
# analysis = ls_smoother_single_iteration(method, ens, obs[:, i: i + lag - 1],
# obs_cov, s_infl, kwargs)
# ens = analysis["ens"]
# fore = analysis["fore"]
# filt = analysis["filt"]
# post = analysis["post"]
#
# if method == "etks_adaptive"
# # cycle the analysis states for the new DAW
# kwargs["analysis"] = analysis["anal"]
# if tail_spin
# # check if we have reached a long enough tail of innovation statistics
# analysis_innovations = analysis["inno"]
# if size(analysis_innovations, 2) / lag >= tail
# # if so, stop the tail spin
# tail_spin = false
# kwargs["tail_spin"] = tail_spin
# end
# end
# # cycle the analysis states for the new DAW
# kwargs["analysis_innovations"] = analysis["inno"]
# end
#
# if spin
# for j in 1:lag
# # compute forecast and filter statistics on the first lag states during spin period
# fore_rmse[i - 1 + j], fore_spread[i - 1 + j] = analyze_ens(fore[:, :, j],
# truth[:, i - 1 + j])
#
# filt_rmse[i - 1 + j], filt_spread[i - 1 + j] = analyze_ens(filt[:, :, j],
# truth[:, i - 1 + j])
# end
#
# for j in 1:shift
# # compute only the reanalyzed prior and the shift-forward forecasted reanalysis
# post_rmse[i - 2 + j], post_spread[i - 2 + j] = analyze_ens(post[:, :, j], truth[:, i - 2 + j])
# end
#
# # turn off the initial spin period, continue hereafter on the normal assimilation cycle
# spin = false
# kwargs["spin"] = spin
#
# else
# for j in 1:shift
# # compute the forecast, filter and analysis statistics
# # indices for the forecast, filter, analysis and truth arrays are in absolute time,
# # forecast / filter stats computed beyond the first lag period for the spin
# fore_rmse[i + lag - 1 - shift + j],
# fore_spread[i + lag - 1 - shift + j] = analyze_ens(fore[:, :, j],
# truth[:, i + lag - 1 - shift + j])
#
# filt_rmse[i + lag - 1 - shift + j],
# filt_spread[i + lag - 1 - shift + j] = analyze_ens(filt[:, :, j],
# truth[:, i + lag - 1 - shift + j])
#
# # analysis statistics computed beyond the first shift
# post_rmse[i - 2 + j], post_spread[i - 2 + j] = analyze_ens(post[:, :, j], truth[:, i - 2 + j])
# end
# end
# end
#
# # cut the statistics so that they align on the same absolute time points
# fore_rmse = fore_rmse[2: nanl + 1]
# fore_spread = fore_spread[2: nanl + 1]
# filt_rmse = filt_rmse[2: nanl + 1]
# filt_spread = filt_spread[2: nanl + 1]
# post_rmse = post_rmse[2: nanl + 1]
# post_spread = post_spread[2: nanl + 1]
#
# data = Dict{String,Any}(
# "fore_rmse" => fore_rmse,
# "filt_rmse" => filt_rmse,
# "post_rmse" => post_rmse,
# "fore_spread" => fore_spread,
# "filt_spread" => filt_spread,
# "post_spread" => post_spread,
# "method" => method,
# "seed" => seed,
# "diffusion" => diffusion,
# "sys_dim" => sys_dim,
# "obs_dim" => obs_dim,
# "obs_un" => obs_un,
# "γ" => γ,
# "nanl" => nanl,
# "tanl" => tanl,
# "lag" => lag,
# "shift" => shift,
# "mda" => mda,
# "h" => h,
# "N_ens" => N_ens,
# "s_infl" => round(s_infl, digits=2)
# )
#
# if method == "etks_adaptive"
# data["tail"] = tail
# end
#
# path = "../data/" * method * "_single_iteration/"
# name = method * "_single_iteration" *
# "_l96_state_benchmark_seed_" * lpad(seed, 4, "0") *
# "_diffusion_" * rpad(diffusion, 4, "0") *
# "_sys_dim_" * lpad(sys_dim, 2, "0") *
# "_obs_dim_" * lpad(obs_dim, 2, "0") *
# "_obs_un_" * rpad(obs_un, 4, "0") *
# "_gamma_" * lpad(γ, 5, "0") *
# "_nanl_" * lpad(nanl, 5, "0") *
# "_tanl_" * rpad(tanl, 4, "0") *
# "_h_" * rpad(h, 4, "0") *
# "_lag_" * lpad(lag, 3, "0") *
# "_shift_" * lpad(shift, 3, "0") *
# "_mda_" * string(mda) *
# "_N_ens_" * lpad(N_ens, 3,"0") *
# "_s_infl_" * rpad(round(s_infl, digits=2), 4, "0") *
# ".jld2"
#
#
# save(path * name, data)
# print("Runtime " * string(round((time() - t1) / 60.0, digits=4)) * " minutes\n")
#
#end
#
#
#########################################################################################################################
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 1081 | ##############################################################################################
module run_sensitivity_test
##############################################################################################
# imports and exports
using Distributed
@everywhere using DataAssimilationBenchmarks
##############################################################################################
config = ParallelExperimentDriver.ensemble_filter_adaptive_inflation
print("Generating experiment configurations from " * string(config) * "\n")
print("Generate truth twin\n")
args, wrap_exp = config()
num_exps = length(args)
print("Configuration ready\n")
print("\n")
print("Running " * string(num_exps) * " configurations on " * string(nworkers()) *
" total workers\n")
print("Begin pmap\n")
pmap(wrap_exp, args)
print("Experiments completed, verify outputs in the appropriate directory under:\n")
print(pkgdir(DataAssimilationBenchmarks) * "/src/data\n")
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 6647 | ##############################################################################################
module DeSolvers
##############################################################################################
# imports and exports
using ..DataAssimilationBenchmarks
export rk4_step!, tay2_step!, em_step!
##############################################################################################
"""
rk4_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T <: Real
Steps model state with the 4 stage Runge-Kutta scheme.
The rule has strong convergence order 1.0 for generic SDEs and order 4.0 for ODEs.
This method overwrites the input in-place and returns the updated
```
return x
```
"""
function rk4_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T <: Real
# unpack the integration scheme arguments and the parameters of the derivative
h = kwargs["h"]::Float64
diffusion = kwargs["diffusion"]::Float64
dx_dt = kwargs["dx_dt"]::Function
if haskey(kwargs, "dx_params")
# get parameters for resolving dx_dt
dx_params = kwargs["dx_params"]::ParamDict(T)
end
# infer the (possibly) extended state dimension
sys_dim = length(x)
# check if extended state vector
if haskey(kwargs, "param_sample")
param_sample = kwargs["param_sample"]::ParamSample
state_dim = kwargs["state_dim"]::Int64
v = @view x[begin: state_dim]
param_est = true
else
# the state dim equals the system dim
state_dim = sys_dim
v = @view x[begin: state_dim]
param_est = false
end
# check if SDE formulation
if diffusion != 0.0
if haskey(kwargs, "ξ")
# pre-computed perturbation is provided for controlled run
ξ = kwargs["ξ"]::Array{Float64,2}
else
# generate perturbation for brownian motion if not neccesary to reproduce
ξ = rand(Normal(), state_dim)
end
if haskey(kwargs, "diff_mat")
# diffusion is a scalar intensity which is applied to the
# structure matrix for the diffusion coefficients
diff_mat = kwargs["diff_mat"]::Array{Float64}
diffusion = diffusion * diff_mat
end
# rescale the standard normal to variance h for Wiener process
W = ξ * sqrt(h)
end
# load parameter values from the extended state into the derivative
if param_est
if haskey(kwargs, "dx_params")
# extract the parameter sample and append to other derivative parameters
for key in keys(param_sample)
dx_params = merge(dx_params, Dict(key => x[param_sample[key][1]]))
end
else
# set the parameter sample as the only derivative parameters
dx_params = Dict{String, Array{T}}
for key in keys(param_sample)
dx_params = merge(dx_params, Dict(key => x[param_sample[key][1]]))
end
end
end
# pre-allocate storage for the Runge-Kutta scheme
κ = Array{T}(undef, state_dim, 4)
# terms of the RK scheme recursively evolve the dynamic state components alone
if diffusion != 0.0
# SDE formulation
κ[:, 1] = dx_dt(v, t, dx_params) * h + diffusion * W
κ[:, 2] = dx_dt(v + 0.5 * κ[:, 1], t + 0.5 * h, dx_params) * h + diffusion * W
κ[:, 3] = dx_dt(v + 0.5 * κ[:, 2], t + 0.5 * h, dx_params) * h + diffusion * W
κ[:, 4] = dx_dt(v + κ[:, 3], t + h, dx_params) * h + diffusion * W
else
# deterministic formulation
κ[:, 1] = dx_dt(v, t, dx_params) * h
κ[:, 2] = dx_dt(v + 0.5 * κ[:, 1], t + 0.5 * h, dx_params) * h
κ[:, 3] = dx_dt(v + 0.5 * κ[:, 2], t + 0.5 * h, dx_params) * h
κ[:, 4] = dx_dt(v + κ[:, 3], t + h, dx_params) * h
end
# compute the update to the dynamic variables
x[begin: state_dim] = v + (1.0 / 6.0) * (κ[:, 1] + 2.0*κ[:, 2] + 2.0*κ[:, 3] + κ[:, 4])
return x
end
##############################################################################################
"""
tay2_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T<: Real
Steps model state with the deterministic second order autonomous Taylor method.
This method has order 2.0 convergence for autonomous ODEs.
Time variable `t` is just a dummy variable, where this method is not defined for non-autonomous
dynamics. This overwrites the input in-place and returns the updated
```
return x
```
"""
function tay2_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T <: Real
# unpack dx_params
h = kwargs["h"]::Float64
dx_params = kwargs["dx_params"]::ParamDict(T)
dx_dt = kwargs["dx_dt"]::Function
jacobian = kwargs["jacobian"]::Function
# calculate the evolution of x one step forward via the second order Taylor expansion
# first derivative
dx = dx_dt(x, t, dx_params)
# second order taylor expansion
x .= x + dx * h + 0.5 * jacobian(x, t, dx_params) * dx * h^2.0
return x
end
##############################################################################################
"""
em_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T <: Real
Steps model state with the Euler-Maruyama scheme.
This method has order 1.0 convergence for ODEs and for SDEs with additive noise, though has
inferior performance to the four stage Runge-Kutta scheme when the amplitude of the SDE noise
purturbations are small-to-moderately large.
This overwrites the input in-place and returns the updated
```
return x
```
"""
function em_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T <: Real
# unpack the arguments for the integration step
h = kwargs["h"]::Float64
dx_params = kwargs["dx_params"]::ParamDict(T)
diffusion = kwargs["diffusion"]::Float64
dx_dt = kwargs["dx_dt"]::Function
state_dim = length(x)
# check if SDE or deterministic formulation
if diffusion != 0.0
if haskey(kwargs, "ξ")
# pre-computed perturbation is provided for controlled run
ξ = kwargs["ξ"]::Array{Float64,2}
else
# generate perturbation for brownian motion if not neccesary to reproduce
ξ = rand(Normal(), state_dim)
end
else
# if deterministic Euler, load dummy ξ of zeros
ξ = zeros(state_dim)
end
# rescale the standard normal to variance h for Wiener process
W = ξ * sqrt(h)
# step forward by interval h
x .= x + h * dx_dt(x, t, dx_params) + diffusion * W
return x
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 122010 | ##############################################################################################
module EnsembleKalmanSchemes
##############################################################################################
# imports and exports
using Random, Distributions, Statistics, StatsBase
using LinearAlgebra, SparseArrays
using ..DataAssimilationBenchmarks
using Optim, LineSearches, LinearAlgebra
export analyze_ens, analyze_ens_param, rand_orth, inflate_state!, inflate_param!,
transform_R, ens_gauss_newton,
square_root, square_root_inv,
ensemble_filter, ls_smoother_classic,
ls_smoother_single_iteration, ls_smoother_gauss_newton
##############################################################################################
# Main methods, debugged and validated
##############################################################################################
"""
analyze_ens(ens::ArView(T), truth::VecA(T)) where T <: Float64
Computes the ensemble state RMSE as compared with truth twin, and the ensemble spread.
```
return rmse, spread
```
Note: the ensemble `ens` should only include the state vector components to compare with the
truth twin state vector `truth`, without replicates of the model parameters. These can be
passed as an [`ArView`](@ref) for efficient memory usage.
"""
function analyze_ens(ens::ArView(T), truth::VecA(T)) where T <: Float64
# infer the shapes
sys_dim, N_ens = size(ens)
# compute the ensemble mean
x_bar = mean(ens, dims=2)
# compute the RMSE of the ensemble mean
rmse = rmsd(truth, x_bar)
# compute the spread as in whitaker & louge 98 by the standard deviation
# of the mean square deviation of the ensemble from its mean
spread = sqrt( ( 1.0 / (N_ens - 1.0) ) * sum(mean((ens .- x_bar).^2.0, dims=1)))
# return the tuple pair
rmse, spread
end
##############################################################################################
"""
analyze_ens_param(ens::ArView(T), truth::VecA(T)) where T <: Float64
Computes the ensemble parameter RMSE as compared with truth twin, and the ensemble spread.
```
return rmse, spread
```
Note: the ensemble `ens` should only include the extended state vector components
consisting of model parameter replicates to compare with the truth twin's governing
model parameters `truth`. These can be passed as an [`ArView`](@ref) for
efficient memory usage.
"""
function analyze_ens_param(ens::ArView(T), truth::VecA(T)) where T <: Float64
# infer the shapes
param_dim, N_ens = size(ens)
# compute the ensemble mean
x_bar = mean(ens, dims=2)
# compute the RMSE of relative to the magnitude of the parameter
rmse = sqrt( mean( (truth - x_bar).^2.0 ./ truth.^2.0 ) )
# compute the spread as in whitaker & louge 98 by the standard deviation
# of the mean square deviation of the ensemble from its mean,
# with the weight by the size of the parameter square
spread = sqrt( ( 1.0 / (N_ens - 1.0) ) *
sum(mean( (ens .- x_bar).^2.0 ./
(ones(param_dim, N_ens) .* truth.^2.0), dims=1)))
# return the tuple pair
rmse, spread
end
##############################################################################################
"""
rand_orth(N_ens::Int64)
This generates a random, mean-preserving, orthogonal matrix as in [Sakov et al.
2008](https://journals.ametsoc.org/view/journals/mwre/136/3/2007mwr2021.1.xml), depending on
the esemble size `N_ens`.
```
return U
```
"""
function rand_orth(N_ens::Int64)
# generate the random, mean preserving orthogonal transformation within the
# basis given by the B matrix
Q = rand(Normal(), N_ens - 1, N_ens - 1)
Q, R = qr!(Q)
U_p = zeros(N_ens, N_ens)
U_p[1, 1] = 1.0
U_p[2:end, 2:end] = Q
# generate the B basis for which the first basis vector is the vector of 1/sqrt(N)
b_1 = ones(N_ens) / sqrt(N_ens)
B = zeros(N_ens, N_ens)
B[:, 1] = b_1
# note, this uses the "full" QR decomposition so that the singularity is encoded in R
# and B is a full-size orthogonal matrix
B, R = qr!(B)
U = B * U_p * transpose(B)
U
end
##############################################################################################
"""
inflate_state!(ens::ArView(T), inflation::Float64, sys_dim::Int64,
state_dim::Int64) where T <: Float64
Applies multiplicative covariance inflation to the state components of the ensemble matrix.
```
return ens
```
The first index of the ensemble matrix `ens` corresponds to the length `sys_dim` (extended)
state dimension while the second index corresponds to the ensemble dimension. Dynamic state
variables are assumed to be in the leading `state_dim` rows of `ens`, while extended state
parameter replicates are after. Multiplicative inflation is performed only in the leading
components of the ensemble anomalies from the ensemble mean, in-place in memory.
"""
function inflate_state!(ens::ArView(T), inflation::Float64, sys_dim::Int64,
state_dim::Int64) where T <: Float64
if inflation != 1.0
x_mean = mean(ens, dims=2)
X = ens .- x_mean
infl = Matrix(1.0I, sys_dim, sys_dim)
infl[1:state_dim, 1:state_dim] .*= inflation
ens .= x_mean .+ infl * X
return ens
end
end
##############################################################################################
"""
inflate_param!(ens::ArView(T), inflation::Float64, sys_dim::Int64,
state_dim::Int64) where T <: Float64
Applies multiplicative covariance inflation to parameter replicates in the ensemble matrix.
```
return ens
```
The first index of the ensemble matrix `ens` corresponds to the length `sys_dim` (extended)
state dimension while the second index corresponds to the ensemble dimension. Dynamic state
variables are assumed to be in the leading `state_dim` rows of `ens`, while extended state
parameter replicates are after. Multiplicative inflation is performed only in the trailing
`state_dim + 1: state_dim` components of the ensemble anomalies from the ensemble mean,
in-place in memory.
"""
function inflate_param!(ens::ArView(T), inflation::Float64, sys_dim::Int64,
state_dim::Int64) where T <: Float64
if inflation == 1.0
return ens
else
x_mean = mean(ens, dims=2)
X = ens .- x_mean
infl = Matrix(1.0I, sys_dim, sys_dim)
infl[state_dim+1: end, state_dim+1: end] .*= inflation
ens .= x_mean .+ infl * X
return ens
end
end
##############################################################################################
"""
square_root(M::CovM(T)) where T <: Real
Computes the square root of covariance matrices with parametric type.
Multiple dispatches for the method are defined according to the sub-type of [`CovM`](@ref),
where the square roots of `UniformScaling` and `Diagonal` covariance matrices are computed
directly, while the square roots of the more general class of `Symmetric` covariance
matrices are computed via the singular value decomposition, for stability and accuracy
for close-to-singular matrices.
```
return S
```
"""
function square_root(M::UniformScaling{T}) where T <: Real
S = M^0.5
end
function square_root(M::Diagonal{T, Vector{T}}) where T <: Real
S = sqrt(M)
end
function square_root(M::Symmetric{T, Matrix{T}}) where T <: Real
F = svd(M)
S = Symmetric(F.U * Diagonal(sqrt.(F.S)) * F.Vt)
end
##############################################################################################
"""
square_root_inv(M::CovM(T); sq_rt::Bool=false, inverse::Bool=false,
full::Bool=false) where T <: Real
Computes the square root inverse of covariance matrices with parametric type.
Multiple dispatches for the method are defined according to the sub-type of [`CovM`](@ref),
where the square root inverses of `UniformScaling` and `Diagonal` covariance matrices
are computed directly, while the square root inverses of the more general class of
`Symmetric` covariance matrices are computed via the singular value decomposition, for
stability and accuracy for close-to-singular matrices. This will optionally return a
computation of the inverse and the square root itself all as a byproduct of the singular
value decomposition for efficient numerical computation of ensemble
analysis / update routines.
Optional keyword arguments are specified as:
* `sq_rt=true` returns the matrix square root in addition to the square root inverse
* `inverse=true` returns the matrix inverse in addition to the square root inverse
* `full=true` returns the square root and the matrix inverse in addition to the square
root inverse
and are evaluated in the above order.
Output follows control flow:
```
if sq_rt
return S_inv, S
elseif inverse
return S_inv, M_inv
elseif full
return S_inv, S, M_inv
else
return S_inv
end
```
"""
function square_root_inv(M::UniformScaling{T}; sq_rt::Bool=false, inverse::Bool=false,
full::Bool=false) where T <: Real
if sq_rt
S = M^0.5
S_inv = S^(-1.0)
S_inv, S
elseif inverse
M_inv = M^(-1.0)
S_inv = M_inv^0.5
S_inv, M_inv
elseif full
M_inv = M^(-1.0)
S = M^0.5
S_inv = S^(-1.0)
S_inv, S, M_inv
else
S_inv = M^(-0.5)
S_inv
end
end
function square_root_inv(M::Diagonal{T, Vector{T}}; sq_rt::Bool=false, inverse::Bool=false,
full::Bool=false) where T <: Real
if sq_rt
S = sqrt(M)
S_inv = inv(S)
S_inv, S
elseif inverse
M_inv = inv(M)
S_inv = sqrt(M_inv)
S_inv, M_inv
elseif full
S = sqrt(M)
S_inv = inv(S)
M_inv = inv(M)
S_inv, S, M
else
S_inv = M.^(-0.5)
S_inv
end
end
function square_root_inv(M::Symmetric{T, Matrix{T}}; sq_rt::Bool=false, inverse::Bool=false,
full::Bool=false) where T <: Real
# stable square root inverse for close-to-singular inverse calculations
F = svd(M)
if sq_rt
# take advantage of the SVD calculation to produce both the square root inverse
# and square root simultaneously
S_inv = Symmetric(F.U * Diagonal(1.0 ./ sqrt.(F.S)) * F.Vt)
S = Symmetric(F.U * Diagonal(sqrt.(F.S)) * F.Vt)
S_inv, S
elseif inverse
# take advantage of the SVD calculation to produce the square root inverse
# and inverse calculations all at once
S_inv = Symmetric(F.U * Diagonal(1.0 ./ sqrt.(F.S)) * F.Vt)
M_inv = Symmetric(F.U * Diagonal(1.0 ./ F.S) * F.Vt)
S_inv, M_inv
elseif full
# take advantage of the SVD calculation to produce the square root inverse,
# square root and inverse calculations all at once
S_inv = Symmetric(F.U * Diagonal(1.0 ./ sqrt.(F.S)) * F.Vt)
S = Symmetric(F.U * Diagonal(sqrt.(F.S)) * F.Vt)
M_inv = Symmetric(F.U * Diagonal(1.0 ./ F.S) * F.Vt)
S_inv, S, M_inv
else
# only return the square root inverse, if other calculations are not necessary
S_inv = Symmetric(F.U * Diagonal(1.0 ./ sqrt.(F.S)) * F.Vt)
S_inv
end
end
##############################################################################################
"""
transform_R(analysis::String, ens::ArView(T), obs::VecA(T), H_obs::Function,
obs_cov::CovM(T), kwargs::StepKwargs; conditioning::ConM=1000.0I,
m_err::ArView(T)=(1.0 ./ zeros(1,1)),
tol::Float64 = 0.0001,
j_max::Int64=40,
Q::CovM(T)=1.0I) where T <: Float64
Computes the ensemble transform and related values for various flavors of ensemble
Kalman schemes. The output type is a tuple containing a right transform of the ensemble
anomalies, the weights for the mean update and a random orthogonal transformation
for stabilization:
```
return trans, w, U
```
where the tuple is of type [`TransM`](@ref).
`m_err`, `tol`, `j_max`, `Q` are optional arguments depending on the `analysis`, with
default values provided.
Serves as an auxilliary function for EnKF, ETKF(-N), EnKS, ETKS(-N), where
"analysis" is a string which determines the type of transform update. The observation
error covariance `obs_cov` is of type [`CovM`](@ref), the conditioning matrix `conditioning`
is of type [`ConM`](@ref), the keyword arguments dictionary `kwargs` is of type
[`StepKwargs`](@ref) and the model error covariance matrix `Q` is of type [`CovM`](@ref).
Currently validated `analysis` options:
* `analysis=="etkf" || analysis=="etks"` computes the deterministic ensemble transform
as in the ETKF described in [Grudzien et al.
2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
* `analysis[1:7]=="mlef-ls" || analysis[1:7]=="mles-ls"` computes the maximum likelihood
ensemble filter transform described in [Grudzien et al.
2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html),
optimizing the nonlinear
cost function with Newton-based
[line searches](https://julianlsolvers.github.io/LineSearches.jl/stable/).
* `analysis[1:4]=="mlef" || analysis[1:4]=="mles"` computes the maximum likelihood
ensemble filter transform described in
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html),
optimizing the nonlinear
cost function with simple Newton-based scheme.
* `analysis=="enkf-n-dual" || analysis=="enks-n-dual"`
computes the dual form of the EnKF-N transform as in [Bocquet et al.
2015](https://npg.copernicus.org/articles/22/645/2015/)
Note: this cannot be used with the nonlinear observation operator.
This uses the Brent method for the argmin problem as this
has been more reliable at finding a global minimum than Newton optimization.
* `analysis=="enkf-n-primal" || analysis=="enks-n-primal"`
computes the primal form of the EnKF-N transform as in [Bocquet et al.
2015](https://npg.copernicus.org/articles/22/645/2015/),
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
This differs from the MLEF/S-N in that there is no approximate linearization of
the observation operator in the EnKF-N, this only handles the approximation error
with respect to the adaptive inflation. This uses a simple Newton-based
minimization of the cost function for the adaptive inflation.
* `analysis=="enkf-n-primal-ls" || analysis=="enks-n-primal-ls"`
computes the primal form of the EnKF-N transform as in [Bocquet et al.
2015](https://npg.copernicus.org/articles/22/645/2015/),
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
This differs from the MLEF/S-N in that there is no approximate linearization of
the observation operator in the EnKF-N, this only handles the approximation error
with respect to the adaptive inflation. This uses a Newton-based
minimization of the cost function for the adaptive inflation with
[line searches](https://julianlsolvers.github.io/LineSearches.jl/stable/).
"""
function transform_R(analysis::String, ens::ArView(T), obs::VecA(T), H_obs::Function,
obs_cov::CovM(T), kwargs::StepKwargs; conditioning::ConM(T)=1000.0I,
m_err::ArView(T)=(1.0 ./ zeros(1,1)),
tol::Float64 = 0.0001,
j_max::Int64=40,
Q::CovM(T)=1.0I) where T <: Float64
if analysis=="etkf" || analysis=="etks"
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
# step 1: compute the ensemble in observation space
Y = H_obs(ens, obs_dim, kwargs)
# step 2: compute the ensemble mean in observation space
y_mean = mean(Y, dims=2)
# step 3: compute the sensitivity matrix in observation space
obs_sqrt_inv = square_root_inv(obs_cov)
S = obs_sqrt_inv * (Y .- y_mean )
# step 4: compute the weighted innovation
δ = obs_sqrt_inv * ( obs - y_mean )
# step 5: compute the approximate hessian
hessian = Symmetric((N_ens - 1.0)*I + transpose(S) * S)
# step 6: compute the transform matrix, transform matrix inverse and
# hessian inverse simultaneously via the SVD for stability
trans, hessian_inv = square_root_inv(hessian, inverse=true)
# step 7: compute the analysis weights
w = hessian_inv * transpose(S) * δ
# step 8: generate mean preserving random orthogonal matrix as in sakov oke 08
U = rand_orth(N_ens)
elseif analysis[1:7]=="mlef-ls" || analysis[1:7]=="mles-ls"
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
# step 1: set up inputs for the optimization
# step 1a: inial choice is no change to the mean state
ens_mean_0 = mean(ens, dims=2)
anom_0 = ens .- ens_mean_0
w = zeros(N_ens)
# step 1b: pre-compute the observation error covariance square root
obs_sqrt_inv = square_root_inv(obs_cov)
# step 1c: define the conditioning and parameters for finite size formalism if needed
if analysis[end-5:end] == "bundle"
trans = inv(conditioning)
trans_inv = conditioning
elseif analysis[end-8:end] == "transform"
trans = Symmetric(Matrix(1.0*I, N_ens, N_ens))
trans_inv = Symmetric(Matrix(1.0*I, N_ens, N_ens))
end
if analysis[8:9] == "-n"
# define the epsilon scaling and the effective ensemble size if finite size form
ϵ_N = 1.0 + (1.0 / N_ens)
N_effective = N_ens + 1.0
end
# step 1d: define the storage of the gradient and Hessian as global to the functions
grad_w = Vector{Float64}(undef, N_ens)
hess_w = Array{Float64}(undef, N_ens, N_ens)
cost_w = 0.0
# step 2: define the cost / gradient / hessian function to avoid repeated computations
function fgh!(G, H, C, trans::ConM(T1), trans_inv::ConM(T1),
w::Vector{T1}) where T1 <: Float64
# step 2a: define the linearization of the observation operator
ens_mean_iter = ens_mean_0 + anom_0 * w
ens = ens_mean_iter .+ anom_0 * trans
Y = H_obs(ens, obs_dim, kwargs)
y_mean = mean(Y, dims=2)
# step 2b: compute the weighted anomalies in observation space, conditioned
# with trans inverse
S = obs_sqrt_inv * (Y .- y_mean) * trans_inv
# step 2c: compute the weighted innovation
δ = obs_sqrt_inv * (obs - y_mean)
# step 2d: gradient, hessian and cost function definitions
if G != nothing
if analysis[8:9] == "-n"
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
G[:] = N_effective * ζ * w - transpose(S) * δ
else
G[:] = (N_ens - 1.0) * w - transpose(S) * δ
end
end
if H != nothing
if analysis[8:9] == "-n"
H .= Symmetric((N_effective - 1.0)*I + transpose(S) * S)
else
H .= Symmetric((N_ens - 1.0)*I + transpose(S) * S)
end
end
if C != nothing
if analysis[8:9] == "-n"
y_mean_iter = H_obs(ens_mean_iter, obs_dim, kwargs)
δ = obs_sqrt_inv * (obs - y_mean_iter)
return N_effective * log(ϵ_N + sum(w.^2.0)) + sum(δ.^2.0)
else
y_mean_iter = H_obs(ens_mean_iter, obs_dim, kwargs)
δ = obs_sqrt_inv * (obs - y_mean_iter)
return (N_ens - 1.0) * sum(w.^2.0) + sum(δ.^2.0)
end
end
nothing
end
function newton_ls!(grad_w, hess_w, trans::ConM(T1), trans_inv::ConM(T1),
w::Vector{T1}, linesearch) where T1 <: Float64
# step 2e: find the Newton direction and the transform update if needed
fx = fgh!(grad_w, hess_w, cost_w, trans, trans_inv, w)
p = -hess_w \ grad_w
if analysis[end-8:end] == "transform"
trans_tmp, trans_inv_tmp = square_root_inv(Symmetric(hess_w), sq_rt=true)
trans .= trans_tmp
trans_inv .= trans_inv_tmp
end
# step 2f: univariate line search functions
ϕ(α) = fgh!(nothing, nothing, cost_w, trans, trans_inv, w .+ α.*p)
function dϕ(α)
fgh!(grad_w, nothing, nothing, trans, trans_inv, w .+ α.*p)
return dot(grad_w, p)
end
function ϕdϕ(α)
phi = fgh!(grad_w, nothing, cost_w, trans, trans_inv, w .+ α.*p)
dphi = dot(grad_w, p)
return (phi, dphi)
end
# step 2g: define the linesearch
dϕ_0 = dot(p, grad_w)
α, fx = linesearch(ϕ, dϕ, ϕdϕ, 1.0, fx, dϕ_0)
Δw = α * p
w .= w + Δw
return Δw
end
# step 3: optimize
# step 3a: perform the optimization by Newton with linesearch
# we use StrongWolfe for RMSE performance as the default linesearch
#ln_search = HagerZhang()
ln_search = StrongWolfe()
j = 0
Δw = ones(N_ens)
while j < j_max && norm(Δw) > tol
Δw = newton_ls!(grad_w, hess_w, trans, trans_inv, w, ln_search)
end
if analysis[8:9] == "-n"
# peform a final inflation with the finite size cost function
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
hess_w = ζ * I - 2.0 * ζ^2.0 * w * transpose(w)
hess_w = Symmetric(transpose(S) * S + (N_ens + 1.0) * hess_w)
trans = square_root_inv(hess_w)
elseif analysis[end-5:end] == "bundle"
trans = square_root_inv(hess_w)
end
# step 3b: generate mean preserving random orthogonal matrix as in sakov oke 08
U = rand_orth(N_ens)
elseif analysis[1:4]=="mlef" || analysis[1:4]=="mles"
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
# step 1: set up the optimization, inial choice is no change to the mean state
ens_mean_0 = mean(ens, dims=2)
anom_0 = ens .- ens_mean_0
w = zeros(N_ens)
# pre-compute the observation error covariance square root
obs_sqrt_inv = square_root_inv(obs_cov)
# define these variables as global compared to the while loop
grad_w = Vector{Float64}(undef, N_ens)
hess_w = Array{Float64}(undef, N_ens, N_ens)
S = Array{Float64}(undef, obs_dim, N_ens)
ens_mean_iter = copy(ens_mean_0)
# define the conditioning
if analysis[end-5:end] == "bundle"
trans = inv(conditioning)
trans_inv = conditioning
elseif analysis[end-8:end] == "transform"
trans = 1.0*I
trans_inv = 1.0*I
end
# step 2: perform the optimization by simple Newton
j = 0
if analysis[5:6] == "-n"
# define the epsilon scaling and the effective ensemble size if finite size form
ϵ_N = 1.0 + (1.0 / N_ens)
N_effective = N_ens + 1.0
end
while j < j_max
# step 2a: compute the observed ensemble and ensemble mean
ens_mean_iter = ens_mean_0 + anom_0 * w
ens = ens_mean_iter .+ anom_0 * trans
Y = H_obs(ens, obs_dim, kwargs)
y_mean = mean(Y, dims=2)
# step 2b: compute the weighted anomalies in observation space, conditioned
# with trans inverse
S = obs_sqrt_inv * (Y .- y_mean) * trans_inv
# step 2c: compute the weighted innovation
δ = obs_sqrt_inv * (obs - y_mean)
# step 2d: compute the gradient and hessian
if analysis[5:6] == "-n"
# for finite formalism, we follow the IEnKS-N convention where
# the gradient is computed with the finite-size cost function but we use the
# usual hessian, with the effective ensemble size
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
grad_w = N_effective * ζ * w - transpose(S) * δ
hess_w = Symmetric((N_effective - 1.0)*I + transpose(S) * S)
else
grad_w = (N_ens - 1.0) * w - transpose(S) * δ
hess_w = Symmetric((N_ens - 1.0)*I + transpose(S) * S)
end
# step 2e: perform Newton approximation, simultaneously computing
# the update transform trans with the SVD based inverse at once
if analysis[end-8:end] == "transform"
trans, trans_inv, hessian_inv = square_root_inv(hess_w, full=true)
Δw = hessian_inv * grad_w
else
Δw = hess_w \ grad_w
end
# 2f: update the weights
w -= Δw
if norm(Δw) < tol
break
else
# step 2g: update the iterative mean state
j+=1
end
end
if analysis[5:6] == "-n"
# peform a final inflation with the finite size cost function
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
hess_w = ζ * I - 2.0 * ζ^2.0 * w * transpose(w)
hess_w = Symmetric(transpose(S) * S + (N_ens + 1.0) * hess_w)
trans = square_root_inv(hess_w)
elseif analysis[end-5:end] == "bundle"
trans = square_root_inv(hess_w)
end
# step 7: generate mean preserving random orthogonal matrix as in sakov oke 08
U = rand_orth(N_ens)
elseif analysis=="etkf-sqrt-core" || analysis=="etks-sqrt-core"
### NOTE: STILL DEVELOPMENT CODE, NOT DEBUGGED
# needs to be revised for the calculation with unweighted anomalies
# Uses the contribution of the model error covariance matrix Q
# in the square root as in Raanes et al. 2015
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
# step 1: compute the ensemble mean
x_mean = mean(ens, dims=2)
# step 2a: compute the normalized anomalies
A = (ens .- x_mean) / sqrt(N_ens - 1.0)
# step 2b: compute the SVD for the two-sided projected model error covariance
F = svd(A)
Σ_inv = Diagonal([1.0 ./ F.S[1:N_ens-1]; 0.0])
p_inv = F.V * Σ_inv * transpose(F.U)
## NOTE: want to
G = Symmetric(1.0I + (N_ens - 1.0) * p_inv * Q * transpose(p_inv))
# step 2c: compute the model error adjusted anomalies
A = A * square_root(G)
# step 3: compute the ensemble in observation space
Y = H_obs(ens, obs_dim, kwargs)
# step 4: compute the ensemble mean in observation space
y_mean = mean(Y, dims=2)
# step 5: compute the weighted anomalies in observation space
# first we find the observation error covariance inverse
obs_sqrt_inv = square_root_inv(obs_cov)
# then compute the weighted anomalies
S = (Y .- y_mean) / sqrt(N_ens - 1.0)
S = obs_sqrt_inv * S
# step 6: compute the weighted innovation
δ = obs_sqrt_inv * ( obs - y_mean )
# step 7: compute the transform matrix
trans = inv(Symmetric(1.0I + transpose(S) * S))
# step 8: compute the analysis weights
w = trans * transpose(S) * δ
# step 9: compute the square root of the transform
trans = sqrt(trans)
# step 10: generate mean preserving random orthogonal matrix as in sakov oke 08
U = rand_orth(N_ens)
elseif analysis=="enkf-n-dual" || analysis=="enks-n-dual"
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
# step 1: compute the observed ensemble and ensemble mean
Y = H_obs(ens, obs_dim, kwargs)
y_mean = mean(Y, dims=2)
# step 2: compute the weighted anomalies in observation space
# first we find the observation error covariance inverse
obs_sqrt_inv = square_root_inv(obs_cov)
# then compute the sensitivity matrix in observation space
S = obs_sqrt_inv * (Y .- y_mean)
# step 5: compute the weighted innovation
δ = obs_sqrt_inv * (obs - y_mean)
# step 6: compute the SVD for the simplified cost function, gauge weights and range
F = svd(S)
ϵ_N = 1.0 + (1.0 / N_ens)
ζ_l = 0.000001
ζ_u = (N_ens + 1.0) / ϵ_N
# step 7: define the dual cost function derived in singular value form
function D(ζ::Float64)
cost = I - (F.U * Diagonal( F.S.^2.0 ./ (ζ .+ F.S.^2.0) ) * transpose(F.U) )
cost = transpose(δ) * cost * δ .+ ϵ_N * ζ .+
(N_ens + 1.0) * log((N_ens + 1.0) / ζ) .- (N_ens + 1.0)
cost[1]
end
# The below is defined for possible Hessian-based minimization
# NOTE: standard Brent's method appears to be more reliable at finding a
# global minimizer with some basic tests, may be tested further
#
#function D_v(ζ::Vector{Float64})
# ζ = ζ[1]
# cost = I - (F.U * Diagonal( F.S.^2.0 ./ (ζ .+ F.S.^2.0) ) * transpose(F.U) )
# cost = transpose(δ) * cost * δ .+ ϵ_N * ζ .+
# (N_ens + 1.0) * log((N_ens + 1.0) / ζ) .- (N_ens + 1.0)
# cost[1]
#end
#function D_prime!(storage::Vector{Float64}, ζ::Vector{Float64})
# ζ = ζ[1]
# grad = transpose(δ) * F.U * Diagonal( - F.S.^2.0 .* (ζ .+ F.S.^2.0).^(-2.0) ) *
# transpose(F.U) * δ
# storage[:, :] = grad .+ ϵ_N .- (N_ens + 1.0) / ζ
#end
#function D_hess!(storage::Array{Float64}, ζ::Vector{Float64})
# ζ = ζ[1]
# hess = transpose(δ) * F.U *
# Diagonal( 2.0 * F.S.^2.0 .* (ζ .+ F.S.^2.0).^(-3.0) ) * transpose(F.U) * δ
# storage[:, :] = hess .+ (N_ens + 1.0) * ζ^(-2.0)
#end
#lx = [ζ_l]
#ux = [ζ_u]
#ζ_0 = [(ζ_u + ζ_l)/2.0]
#df = TwiceDifferentiable(D_v, D_prime!, D_hess!, ζ_0)
#dfc = TwiceDifferentiableConstraints(lx, ux)
#ζ_b = optimize(D_v, D_prime!, D_hess!, ζ_0)
# step 8: find the argmin
ζ_a = optimize(D, ζ_l, ζ_u)
diag_vals = ζ_a.minimizer .+ F.S.^2.0
# step 9: compute the update weights
w = F.V * Diagonal( F.S ./ diag_vals ) * transpose(F.U) * δ
# step 10: compute the update transform
trans = Symmetric(Diagonal( F.S ./ diag_vals) * transpose(F.U) * δ *
transpose(δ) * F.U * Diagonal( F.S ./ diag_vals))
trans = Symmetric(Diagonal(diag_vals) -
( (2.0 * ζ_a.minimizer^2.0) / (N_ens + 1.0) ) * trans)
trans = Symmetric(F.V * square_root_inv(trans) * F.Vt)
# step 11: generate mean preserving random orthogonal matrix as in sakov oke 08
U = rand_orth(N_ens)
elseif analysis=="enkf-n-primal" || analysis=="enks-n-primal"
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
# step 1: compute the observed ensemble and ensemble mean
Y = H_obs(ens, obs_dim, kwargs)
y_mean = mean(Y, dims=2)
# step 2: compute the weighted anomalies in observation space
# first we find the observation error covariance inverse
obs_sqrt_inv = square_root_inv(obs_cov)
# then compute the sensitivity matrix in observation space
S = obs_sqrt_inv * (Y .- y_mean)
# step 3: compute the weighted innovation
δ = obs_sqrt_inv * (obs - y_mean)
# step 4: define the epsilon scaling and the effective ensemble size
ϵ_N = 1.0 + (1.0 / N_ens)
N_effective = N_ens + 1.0
# step 5: set up the optimization
# step 5:a the inial choice is no change to the mean state
w = zeros(N_ens)
# step 5b: define the primal cost function
function P(w::Vector{Float64})
cost = (δ - S * w)
cost = sum(cost.^2.0) + N_effective * log(ϵ_N + sum(w.^2.0))
0.5 * cost
end
# step 5c: define the primal gradient
function ∇P!(grad::Vector{Float64}, w::Vector{Float64})
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
grad[:] = N_effective * ζ * w - transpose(S) * (δ - S * w)
end
# step 5d: define the primal hessian
function H_P!(hess::ArView(T1), w::Vector{Float64}) where T1 <: Float64
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
hess .= ζ * I - 2.0 * ζ^2.0 * w * transpose(w)
hess .= transpose(S) * S + N_effective * hess
end
# step 6: perform the optimization by simple Newton
j = 0
trans = Array{Float64}(undef, N_ens, N_ens)
grad_w = Array{Float64}(undef, N_ens)
hess_w = Array{Float64}(undef, N_ens, N_ens)
while j < j_max
# compute the gradient and hessian
∇P!(grad_w, w)
H_P!(hess_w, w)
# perform Newton approximation, simultaneously computing
# the update transform trans with the SVD based inverse at once
trans, hessian_inv = square_root_inv(Symmetric(hess_w), inverse=true)
Δw = hessian_inv * grad_w
w -= Δw
if norm(Δw) < tol
break
else
j+=1
end
end
# step 7: generate mean preserving random orthogonal matrix as in sakov oke 08
U = rand_orth(N_ens)
elseif analysis=="enkf-n-primal-ls" || analysis=="enks-n-primal-ls"
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
# step 1: compute the observed ensemble and ensemble mean
Y = H_obs(ens, obs_dim, kwargs)
y_mean = mean(Y, dims=2)
# step 2: compute the weighted anomalies in observation space
# first we find the observation error covariance inverse
obs_sqrt_inv = square_root_inv(obs_cov)
# then compute the sensitivity matrix in observation space
S = obs_sqrt_inv * (Y .- y_mean)
# step 3: compute the weighted innovation
δ = obs_sqrt_inv * (obs - y_mean)
# step 4: define the epsilon scaling and the effective ensemble size
ϵ_N = 1.0 + (1.0 / N_ens)
N_effective = N_ens + 1.0
# step 5: set up the optimization
# step 5:a the inial choice is no change to the mean state
w = zeros(N_ens)
# step 5b: define the primal cost function
function J(w::Vector{Float64})
cost = (δ - S * w)
cost = sum(cost.^2.0) + N_effective * log(ϵ_N + sum(w.^2.0))
0.5 * cost
end
# step 5c: define the primal gradient
function ∇J!(grad::Vector{Float64}, w::Vector{Float64})
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
grad[:] = N_effective * ζ * w - transpose(S) * (δ - S * w)
end
# step 5d: define the primal hessian
function H_J!(hess::ArView(T1), w::Vector{Float64}) where T1 <: Float64
ζ = 1.0 / (ϵ_N + sum(w.^2.0))
hess .= ζ * I - 2.0 * ζ^2.0 * w * transpose(w)
hess .= transpose(S) * S + N_effective * hess
end
# step 6: find the argmin for the update weights
# step 6a: define the line search algorithm with Newton
# we use StrongWolfe for RMSE performance as the default linesearch
# method, see the LineSearches docs, alternative choice is commented below
# ln_search = HagerZhang()
ln_search = StrongWolfe()
opt_alg = Newton(linesearch = ln_search)
# step 6b: perform the optimization
w = Optim.optimize(J, ∇J!, H_J!, w, method=opt_alg, x_tol=tol).minimizer
# step 7: compute the update transform
trans = Symmetric(H_J!(Array{Float64}(undef, N_ens, N_ens), w))
trans = square_root_inv(trans)
# step 8: generate mean preserving random orthogonal matrix as in Sakov & Oke 08
U = rand_orth(N_ens)
end
return trans, w, U
end
##############################################################################################
"""
ens_gauss_newton(analysis::String, ens::ArView(T), obs::VecA(T),
H_obs::Function, obs_cov::CovM(T), kwargs::StepKwargs;
conditioning::ConM(T)=1000.0I,
m_err::ArView(T)=(1.0 ./ zeros(1,1)),
tol::Float64 = 0.0001,
j_max::Int64=40,
Q::CovM(T)=1.0I) where T <: Float64
Computes the ensemble estimated gradient and Hessian terms for nonlinear least-squares
```
return ∇_J, Hess_J
```
`m_err`, `tol`, `j_max`, `Q` are optional arguments depending on the `analysis`, with
default values provided.
Serves as an auxilliary function for IEnKS(-N), where "analysis" is a string which
determines the method of transform update ensemble Gauss-Newton calculation. The observation
error covariance `obs_cov` is of type [`CovM`](@ref), the conditioning matrix `conditioning`
is of type [`ConM`](@ref), the keyword arguments dictionary `kwargs` is of type
[`StepKwargs`](@ref) and the model error covariance matrix `Q` is of type [`CovM`](@ref).
Currently validated `analysis` options:
* `analysis == "ienks-bundle" || "ienks-n-bundle" || "ienks-transform" || "ienks-n-transform"`
computes the weighted observed anomalies as per the
bundle or transform version of the IEnKS, described in [Bocquet et al.
2013](https://rmets.onlinelibrary.wiley.com/doi/abs/10.1002/qj.2236),
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
Bundle versus tranform versions of the scheme are specified by the trailing
`analysis` string as `-bundle` or `-transform`. The bundle version uses a small uniform
scalar `ϵ`, whereas the transform version uses a matrix square root inverse as the
conditioning operator. This form of analysis differs from other schemes by returning a
sequential-in-time value for the cost function gradient and Hessian, which will is
utilized within the iterative smoother optimization. A finite-size inflation scheme,
based on the EnKF-N above, can be utilized by appending additionally a `-n` to the
`-bundle` or `-transform` version of the IEnKS scheme specified in `analysis`.
"""
function ens_gauss_newton(analysis::String, ens::ArView(T), obs::VecA(T),
H_obs::Function, obs_cov::CovM(T), kwargs::StepKwargs;
conditioning::ConM(T)=1000.0I,
m_err::ArView(T)=(1.0 ./ zeros(1,1)),
tol::Float64 = 0.0001,
j_max::Int64=40,
Q::CovM(T)=1.0I) where T <: Float64
if analysis[1:5]=="ienks"
# step 0: infer observation dimension
obs_dim = length(obs)
# step 1: compute the observed ensemble and ensemble mean
Y = H_obs(ens, obs_dim, kwargs)
y_mean = mean(Y, dims=2)
# step 2: compute the observed anomalies, proportional to the conditioning matrix
# here conditioning should be supplied as trans^(-1)
S = (Y .- y_mean) * conditioning
# step 3: compute the cost function gradient term
inv_obs_cov = inv(obs_cov)
∇J = transpose(S) * inv_obs_cov * (obs - y_mean)
# step 4: compute the cost function gradient term
hess_J = transpose(S) * inv_obs_cov * S
# return tuple of the gradient and hessian terms
return ∇J, hess_J
end
end
##############################################################################################
"""
ens_update_RT!(ens::ArView(T), transform::TransM(T)) where T <: Float64
Updates forecast ensemble to the analysis ensemble by right transform (RT) method.
```
return ens
```
Arguments include the ensemble of type [`ArView`](@ref) and the 3-tuple including the
right transform for the anomalies, the weights for the mean and the random, mean-preserving
orthogonal matrix, type [`TransM`](@ref).
"""
function ens_update_RT!(ens::ArView(T), update::TransM(T)) where T <: Float64
# step 0: infer dimensions and unpack the transform
sys_dim, N_ens = size(ens)
trans, w, U = update
# step 1: compute the ensemble mean
x_mean = mean(ens, dims=2)
# step 2: compute the non-normalized anomalies
X = ens .- x_mean
# step 3: compute the update
ens_transform = w .+ trans * U * sqrt(N_ens - 1.0)
ens .= x_mean .+ X * ens_transform
return ens
end
##############################################################################################
"""
ensemble_filter(analysis::String, ens::ArView(T), obs::VecA(T), H_obs::Function,
obs_cov::CovM(T), kwargs::StepKwargs) where T <: Float64
General filter analysis step, wrapping the right transform / update, and inflation steps.
Optional keyword argument includes state_dim for extended state including parameters.
In this case, a value for the parameter covariance inflation should be included
in addition to the state covariance inflation.
```
return Dict{String,Array{Float64,2}}("ens" => ens)
```
"""
function ensemble_filter(analysis::String, ens::ArView(T), obs::VecA(T), H_obs::Function,
obs_cov::CovM(T), kwargs::StepKwargs) where T <: Float64
# step 0: infer the system, observation and ensemble dimensions
sys_dim, N_ens = size(ens)
obs_dim = length(obs)
if haskey(kwargs, "state_dim")
state_dim = kwargs["state_dim"]
else
state_dim = sys_dim
end
# step 1: compute the tranform and update ensemble
ens_update_RT!(ens, transform_R(analysis, ens, obs, H_obs, obs_cov, kwargs))
# step 2a: compute multiplicative inflation of state variables
if haskey(kwargs, "s_infl")
s_infl = kwargs["s_infl"]::Float64
inflate_state!(ens, s_infl, sys_dim, state_dim)
end
# step 2b: if including an extended state of parameter values,
# optionally compute multiplicative inflation of parameter values
if haskey(kwargs, "p_infl")
p_infl = kwargs["p_infl"]::Float64
inflate_param!(ens, p_infl, sys_dim, state_dim)
end
return Dict{String,Array{Float64,2}}("ens" => ens)
end
##############################################################################################
"""
ls_smoother_classic(analysis::String, ens::ArView(T), obs::ArView(T), H_obs::Function,
obs_cov::CovM(T), kwargs::StepKwargs) where T <: Float64
Lag-shift ensemble Kalman smoother analysis step, classical version.
Classic EnKS uses the last filtered state for the forecast, different from the
iterative schemes which use the once or multiple-times re-analized posterior for
the initial condition for the forecast of the states to the next shift.
Optional argument includes state dimension for extended state including parameters.
In this case, a value for the parameter covariance inflation should be included
in addition to the state covariance inflation.
```
return Dict{String,Array{Float64}}(
"ens" => ens,
"post" => posterior,
"fore" => forecast,
"filt" => filtered
)
```
"""
function ls_smoother_classic(analysis::String, ens::ArView(T), obs::ArView(T),
H_obs::Function, obs_cov::CovM(T),
kwargs::StepKwargs) where T <: Float64
# step 0: unpack kwargs
f_steps = kwargs["f_steps"]::Int64
step_model! = kwargs["step_model"]::Function
posterior = kwargs["posterior"]::Array{Float64,3}
# infer the ensemble, obs, and system dimensions,
# observation sequence includes shift forward times,
# posterior is size lag + shift
obs_dim, shift = size(obs)
sys_dim, N_ens, lag = size(posterior)
lag = lag - shift
if shift < lag
# posterior contains length lag + shift past states, we discard the oldest shift
# states and load the new filtered states in the routine
posterior = cat(posterior[:, :, 1 + shift: end],
Array{Float64}(undef, sys_dim, N_ens, shift), dims=3)
end
# optional parameter estimation
if haskey(kwargs, "state_dim")
state_dim = kwargs["state_dim"]::Int64
param_est = true
else
state_dim = sys_dim
param_est = false
end
# step 1: create storage for the forecast and filter values over the DAW
forecast = Array{Float64}(undef, sys_dim, N_ens, shift)
filtered = Array{Float64}(undef, sys_dim, N_ens, shift)
# step 2: forward propagate the ensemble and analyze the observations
for s in 1:shift
# initialize posterior for the special case lag=shift
if lag==shift
posterior[:, :, s] = ens
end
# step 2a: propagate between observation times
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define the diffusion structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
# step 2b: store forecast to compute ensemble statistics before observations
# become available
forecast[:, :, s] = ens
# step 2c: perform the filtering step
trans = transform_R(analysis, ens, obs[:, s], H_obs, obs_cov, kwargs)
ens_update_RT!(ens, trans)
# optionally compute multiplicative inflation of state variables
if haskey(kwargs, "s_infl")
s_infl = kwargs["s_infl"]::Float64
inflate_state!(ens, s_infl, sys_dim, state_dim)
end
# if including an extended state of parameter values,
# optionally compute multiplicative inflation of parameter values
if haskey(kwargs, "p_infl")
p_infl = kwargs["p_infl"]::Float64
inflate_param!(ens, p_infl, sys_dim, state_dim)
end
# store the filtered states and posterior states
filtered[:, :, s] = ens
posterior[:, :, end - shift + s] = ens
# step 2e: re-analyze the posterior in the lag window of states,
# not including current time
@views for l in 1:lag + s - 1
ens_update_RT!(posterior[:, :, l], trans)
end
end
# step 3: if performing parameter estimation, apply the parameter model
if haskey(kwargs, "p_wlk")
p_wlk = kwargs["p_wlk"]::Float64
param_ens = ens[state_dim + 1:end , :]
param_mean = mean(param_ens, dims=2)
param_ens .= param_ens +
p_wlk * param_mean .* rand(Normal(), length(param_mean), N_ens)
ens[state_dim + 1:end, :] = param_ens
end
Dict{String,Array{Float64}}(
"ens" => ens,
"post" => posterior,
"fore" => forecast,
"filt" => filtered
)
end
##############################################################################################
"""
ls_smoother_single_iteration(analysis::String, ens::ArView(T),
H_obs::Function, obs::ArView(T), obs_cov::CovM(T),
kwargs::StepKwargs) where T <: Float64
Lag-shift, single-iteration ensemble Kalman smoother (SIEnKS) analysis step.
Single-iteration EnKS uses the final re-analyzed posterior initial state for the forecast,
which is pushed forward in time to shift-number of observation times.
Optional argument includes state dimension for an extended state including parameters.
In this case, a value for the parameter covariance inflation should be included in
addition to the state covariance inflation.
```
return Dict{String,Array{Float64}}(
"ens" => ens,
"post" => posterior,
"fore" => forecast,
"filt" => filtered
)
```
"""
function ls_smoother_single_iteration(analysis::String, ens::ArView(T),
obs::ArView(T), H_obs::Function, obs_cov::CovM(T),
kwargs::StepKwargs) where T <: Float64
# step 0: unpack kwargs, posterior contains length lag past states ending
# with ens as final entry
f_steps = kwargs["f_steps"]::Int64
step_model! = kwargs["step_model"]::Function
posterior = kwargs["posterior"]::Array{Float64,3}
# infer the ensemble, obs, and system dimensions, observation sequence
# includes lag forward times
obs_dim, lag = size(obs)
sys_dim, N_ens, shift = size(posterior)
# optional parameter estimation
if haskey(kwargs, "state_dim")
state_dim = kwargs["state_dim"]::Int64
param_est = true
else
state_dim = sys_dim
param_est = false
end
# make a copy of the intial ens for re-analysis
ens_0 = copy(ens)
# spin to be used on the first lag-assimilations -- this makes the smoothed time-zero
# re-analized prior the first initial condition for the future iterations
# regardless of sda or mda settings
spin = kwargs["spin"]::Bool
# step 1: create storage for the posterior, forecast and filter values over the DAW
# only the shift-last and shift-first values are stored as these represent the
# newly forecasted values and last-iterate posterior estimate respectively
if spin
forecast = Array{Float64}(undef, sys_dim, N_ens, lag)
filtered = Array{Float64}(undef, sys_dim, N_ens, lag)
else
forecast = Array{Float64}(undef, sys_dim, N_ens, shift)
filtered = Array{Float64}(undef, sys_dim, N_ens, shift)
end
# multiple data assimilation (mda) is optional, read as boolean variable
mda = kwargs["mda"]::Bool
if mda
# set the observation and re-balancing weights
reb_weights = kwargs["reb_weights"]::Vector{Float64}
obs_weights = kwargs["obs_weights"]::Vector{Float64}
# set iteration count for the initial rebalancing step followed by mda
i = 0
# the posterior statistics are computed in the zeroth pass with rebalancing
posterior[:, :, 1] = ens_0
# make a single iteration with SDA,
# with MDA make a rebalancing step on the zeroth iteration
while i <=1
# step 2: forward propagate the ensemble and analyze the observations
for l in 1:lag
# step 2a: propagate between observation times
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define the structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
if i == 0
# step 2b: store forecast to compute ensemble statistics before
# observations become available
# for MDA, this is on the zeroth iteration through the DAW
if spin
# store all new forecast states
forecast[:, :, l] = ens
elseif (l > (lag - shift))
# only store forecasted states for beyond unobserved
# times beyond previous forecast windows
forecast[:, :, l - (lag - shift)] = ens
end
# step 2c: perform the filtering step with rebalancing weights
trans = transform_R(analysis, ens, obs[:, l], H_obs,
obs_cov * reb_weights[l], kwargs)
ens_update_RT!(ens, trans)
if spin
# optionaly compute multiplicative inflation of state variables
if haskey(kwargs, "s_infl")
s_infl = kwargs["s_infl"]::Float64
inflate_state!(ens, s_infl, sys_dim, state_dim)
end
# if including an extended state of parameter values,
# optionally compute multiplicative inflation of parameter values
if haskey(kwargs, "p_infl")
p_infl = kwargs["p_infl"]::Float64
inflate_param!(ens, p_infl, sys_dim, state_dim)
end
# store all new filtered states
filtered[:, :, l] = ens
elseif l > (lag - shift)
# store the filtered states for previously unobserved times,
# not mda values
filtered[:, :, l - (lag - shift)] = ens
end
# step 2d: compute re-analyzed posterior statistics within rebalancing
# step, using the MDA rebalancing analysis transform for all available
# times on all states that will be discarded on the next shift
reanalysis_index = min(shift, l)
@views for s in 1:reanalysis_index
ens_update_RT!(posterior[:, :, s], trans)
end
# store most recent filtered state in the posterior statistics, for all
# states to be discarded on the next shift > 1
if l < shift
posterior[:, :, l + 1] = ens
end
else
# step 2c: perform the filtering step with mda weights
trans = transform_R(analysis, ens, obs[:, l], H_obs,
obs_cov * obs_weights[l], kwargs)
ens_update_RT!(ens, trans)
# re-analyzed initial conditions are computed in the mda step
ens_update_RT!(ens_0, trans)
end
end
# reset the ensemble with the prior for mda and step forward the iteration count,
ens = copy(ens_0)
i+=1
end
else
# step 2: forward propagate the ensemble and analyze the observations
for l in 1:lag
# step 2a: propagate between observation times
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define the structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
if spin
# step 2b: store forecast to compute ensemble statistics before observations
# become available
# if spin, store all new forecast states
forecast[:, :, l] = ens
# step 2c: apply the transformation and update step
trans = transform_R(analysis, ens, obs[:, l], H_obs, obs_cov, kwargs)
ens_update_RT!(ens, trans)
# optionally compute multiplicative inflation of state variables
if haskey(kwargs, "s_infl")
s_infl = kwargs["s_infl"]::Float64
inflate_state!(ens, s_infl, sys_dim, state_dim)
end
# if including an extended state of parameter values,
# optionally compute multiplicative inflation of parameter values
if haskey(kwargs, "p_infl")
p_infl = kwargs["p_infl"]::Float64
inflate_param!(ens, p_infl, sys_dim, state_dim)
end
# store all new filtered states
filtered[:, :, l] = ens
# step 2d: compute the re-analyzed initial condition if assimilation update
ens_update_RT!(ens_0, trans)
elseif l > (lag - shift)
# step 2b: store forecast to compute ensemble statistics before observations
# become available
# if not spin, only store forecasted states for beyond unobserved times
# beyond previous forecast windows
forecast[:, :, l - (lag - shift)] = ens
# step 2c: apply the transformation and update step
trans = transform_R(analysis, ens, obs[:, l], H_obs, obs_cov, kwargs)
ens_update_RT!(ens, trans)
# store the filtered states for previously unobserved times, not mda values
filtered[:, :, l - (lag - shift)] = ens
# step 2d: compute re-analyzed initial condition if assimilation update
ens_update_RT!(ens_0, trans)
end
end
# reset the ensemble with the re-analyzed prior
ens = copy(ens_0)
end
# step 3: propagate the posterior initial condition forward to the shift-forward time
# step 3a: optionally inflate the posterior covariance
if haskey(kwargs, "s_infl")
s_infl = kwargs["s_infl"]::Float64
inflate_state!(ens, s_infl, sys_dim, state_dim)
end
# if including an extended state of parameter values,
# optionally compute multiplicative inflation of parameter values
if haskey(kwargs, "p_infl")
p_infl = kwargs["p_infl"]::Float64
inflate_param!(ens, p_infl, sys_dim, state_dim)
end
# step 3b: if performing parameter estimation, apply the parameter model
if haskey(kwargs, "p_wlk")
p_wlk = kwargs["p_wlk"]::Float64
param_ens = ens[state_dim + 1:end , :]
param_mean = mean(param_ens, dims=2)
param_ens .= param_ens +
p_wlk * param_mean .* rand(Normal(), length(param_mean), N_ens)
ens[state_dim + 1:end , :] = param_ens
end
# step 3c: propagate the re-analyzed, resampled-in-parameter-space ensemble up by shift
# observation times
for s in 1:shift
if !mda
posterior[:, :, s] = ens
end
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define the diffusion structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
end
Dict{String,Array{Float64}}(
"ens" => ens,
"post" => posterior,
"fore" => forecast,
"filt" => filtered,
)
end
##############################################################################################
"""
ls_smoother_gauss_newton(analysis::String, ens::ArView(T), obs::ArView(T),
H_obs::Function, obs_cov::CovM(T),
kwargs::StepKwargs; ϵ::Float64=0.0001,
tol::Float64=0.001, max_iter::Int64=5) where T <: Float64
This implements a lag-shift Gauss-Newton IEnKS analysis step as in algorithm 4 of
[Bocquet et al. 2014](https://rmets.onlinelibrary.wiley.com/doi/10.1002/qj.2236).
The IEnKS uses the final re-analyzed initial state in the data assimilation window to generate
the forecast, which is subsequently pushed forward in time from the initial conidtion to
shift-number of observation times. Optional argument includes state dimension for an extended
state including parameters. In this case, a value for the parameter covariance inflation
should be included in addition to the state covariance inflation.
```
return Dict{String,Array{Float64}}(
"ens" => ens,
"post" => posterior,
"fore" => forecast,
"filt" => filtered
)
```
"""
function ls_smoother_gauss_newton(analysis::String, ens::ArView(T), obs::ArView(T),
H_obs::Function, obs_cov::CovM(T),
kwargs::StepKwargs; ϵ::Float64=0.0001,
tol::Float64=0.001, max_iter::Int64=5) where T <: Float64
# step 0: unpack kwargs, posterior contains length lag past states ending
# with ens as final entry
f_steps = kwargs["f_steps"]::Int64
step_model! = kwargs["step_model"]::Function
posterior = kwargs["posterior"]::Array{Float64,3}
# infer the ensemble, obs, and system dimensions,
# observation sequence includes lag forward times
obs_dim, lag = size(obs)
sys_dim, N_ens, shift = size(posterior)
# optional parameter estimation
if haskey(kwargs, "state_dim")
state_dim = kwargs["state_dim"]::Int64
param_est = true
else
state_dim = sys_dim
param_est = false
end
# spin to be used on the first lag-assimilations -- this makes the smoothed time-zero
# re-analized prior
# the first initial condition for the future iterations regardless of sda or mda settings
spin = kwargs["spin"]::Bool
# step 1: create storage for the posterior filter values over the DAW,
# forecast values in the DAW+shift
if spin
forecast = Array{Float64}(undef, sys_dim, N_ens, lag + shift)
filtered = Array{Float64}(undef, sys_dim, N_ens, lag)
else
forecast = Array{Float64}(undef, sys_dim, N_ens, shift)
filtered = Array{Float64}(undef, sys_dim, N_ens, shift)
end
# step 1a: determine if using finite-size or MDA formalism in the below
if analysis[1:7] == "ienks-n"
# epsilon inflation factor corresponding to unknown forecast distribution mean
ϵ_N = 1.0 + (1.0 / N_ens)
# effective ensemble size
N_effective = N_ens + 1.0
end
# multiple data assimilation (mda) is optional, read as boolean variable
mda = kwargs["mda"]::Bool
# algorithm splits on the use of MDA or not
if mda
# 1b: define the initial parameters for the two stage iterative optimization
# define the rebalancing weights for the first sweep of the algorithm
reb_weights = kwargs["reb_weights"]::Vector{Float64}
# define the mda weights for the second pass of the algorithm
obs_weights = kwargs["obs_weights"]::Vector{Float64}
# m gives the total number of iterations of the algorithm over both the
# rebalancing and the MDA steps, this is combined from the iteration count
# i in each stage; the iterator i will give the number of iterations of the
# optimization and does not take into account the forecast / filtered iteration;
# for an optmized routine of the transform version, forecast / filtered statistics
# can be computed within the iteration count i; for the optimized bundle
# version, forecast / filtered statistics need to be computed with an additional
# iteration due to the epsilon scaling of the ensemble
m = 0
# stage gives the algorithm stage, 0 is rebalancing, 1 is MDA
stage = 0
# step 1c: compute the initial ensemble mean and normalized anomalies,
# and storage for the sequentially computed iterated mean, gradient
# and hessian terms
ens_mean_0 = mean(ens, dims=2)
anom_0 = ens .- ens_mean_0
∇J = Array{Float64}(undef, N_ens, lag)
hess_J = Array{Float64}(undef, N_ens, N_ens, lag)
# pre-allocate these variables as global for the loop re-definitions
hessian = Symmetric(Array{Float64}(undef, N_ens, N_ens))
new_ens = Array{Float64}(undef, sys_dim, N_ens)
# step through two stages starting at zero
while stage <=1
# step 1d: (re)-define the conditioning for bundle versus transform varaints
if analysis[end-5:end] == "bundle"
trans = ϵ*I
trans_inv = (1.0 / ϵ)*I
elseif analysis[end-8:end] == "transform"
trans = 1.0*I
trans_inv = 1.0*I
end
# step 1e: (re)define the iteration count and the base-point for the optimization
i = 0
ens_mean_iter = copy(ens_mean_0)
w = zeros(N_ens)
# step 2: begin iterative optimization
while i < max_iter
# step 2a: redefine the conditioned ensemble with updated mean, after
# first spin run in stage 0
if !spin || i > 0 || stage > 0
ens = ens_mean_iter .+ anom_0 * trans
end
# step 2b: forward propagate the ensemble and sequentially store the
# forecast or construct cost function
for l in 1:lag
# propagate between observation times
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
if spin && i == 0 && stage==0
# if first spin, store the forecast over the entire DAW
forecast[:, :, l] = ens
# otherwise, compute the sequential terms of the gradient and hessian of
# the cost function, weights depend on the stage of the algorithm
elseif stage == 0
# this is the rebalancing step to produce filter and forecast stats
∇J[:,l], hess_J[:, :, l] = ens_gauss_newton(
analysis,
ens, obs[:, l],
H_obs,
obs_cov * reb_weights[l],
kwargs,
conditioning=trans_inv
)
elseif stage == 1
# this is the MDA step to shift the window forward
∇J[:,l], hess_J[:, :, l] = ens_gauss_newton(
analysis,
ens,
obs[:, l],
H_obs,
obs_cov * obs_weights[l],
kwargs,
conditioning=trans_inv
)
end
end
# skip this section in the first spin cycle, return and begin optimization
if !spin || i > 0 || stage > 0
# step 2c: formally compute the gradient and the hessian from the
# sequential components, perform Gauss-Newton after forecast iteration
if analysis[1:7] == "ienks-n"
# use the finite size EnKF cost function for the gradient calculation
ζ = 1.0 / (sum(w.^2.0) + ϵ_N)
gradient = N_effective * ζ * w - sum(∇J, dims=2)
# hessian is computed with the effective ensemble size
hessian = Symmetric((N_effective - 1.0) * I +
dropdims(sum(hess_J, dims=3), dims=3))
else
# compute the usual cost function directly
gradient = (N_ens - 1.0) * w - sum(∇J, dims=2)
# hessian is computed with the ensemble rank
hessian = Symmetric((N_ens - 1.0) * I +
dropdims(sum(hess_J, dims=3), dims=3))
end
if analysis[end-8:end] == "transform"
# transform method requires each of the below, and we make
# all calculations simultaneously via the SVD for stability
trans, trans_inv, hessian_inv = square_root_inv(hessian, full=true)
# compute the weights update
Δw = hessian_inv * gradient
else
# compute the weights update by the standard linear equation solver
Δw = hessian \ gradient
end
# update the weights
w -= Δw
# update the mean via the increment, always with the zeroth
# iterate of the ensemble
ens_mean_iter = ens_mean_0 + anom_0 * w
if norm(Δw) < tol
i+=1
break
end
end
# update the iteration count
i+=1
end
# step 3: compute posterior initial condiiton and propagate forward in time
# step 3a: perform the analysis of the ensemble
if analysis[1:7] == "ienks-n"
# use finite size EnKF cost function to produce adaptive
# inflation with the hessian
ζ = 1.0 / (sum(w.^2.0) + ϵ_N)
hessian = Symmetric(
N_effective * (ζ * I - 2.0 * ζ^(2.0) * w * transpose(w)) +
dropdims(sum(hess_J, dims=3), dims=3)
)
trans = square_root_inv(hessian)
elseif analysis == "ienks-bundle"
trans = square_root_inv(hessian)
end
# compute analyzed ensemble by the iterated mean and the transformed
# original anomalies
U = rand_orth(N_ens)
ens = ens_mean_iter .+ sqrt(N_ens - 1.0) * anom_0 * trans * U
# step 3b: if performing parameter estimation, apply the parameter model
# for the for the MDA step and shifted window
if haskey(kwargs, "p_wlk") && stage == 1
p_wlk = kwargs["p_wlk"]::Float64
param_ens = ens[state_dim + 1:end , :]
param_mean = mean(param_ens, dims=2)
param_ens .= param_ens +
p_wlk *
param_mean .* rand(Normal(), length(param_mean), N_ens)
ens[state_dim + 1:end, :] = param_ens
end
# step 3c: propagate the re-analyzed, resampled-in-parameter-space ensemble up
# by shift observation times in stage 1, store the filtered state as the forward
# propagated value at the new observation times within the DAW in stage 0,
# the forecast states as those beyond the DAW in stage 0, and
# store the posterior at the times discarded at the next shift in stage 0
if stage == 0
for l in 1:lag + shift
if l <= shift
# store the posterior ensemble at times that will be discarded
posterior[:, :, l] = ens
end
# shift the ensemble forward Δt
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
if spin && l <= lag
# store spin filtered states at all times up to lag
filtered[:, :, l] = ens
elseif spin && l > lag
# store the remaining spin forecast states at shift times
# beyond the DAW
forecast[:, :, l] = ens
elseif l > lag - shift && l <= lag
# store filtered states for newly assimilated observations
filtered[:, :, l - (lag - shift)] = ens
elseif l > lag
# store forecast states at shift times beyond the DAW
forecast[:, :, l - lag] = ens
end
end
else
for l in 1:shift
for j in 1:N_ens
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
end
end
stage += 1
m += i
end
# store and optionally inflate the forward posterior at the new initial condition
if haskey(kwargs, "s_infl")
s_infl = kwargs["s_infl"]::Float64
inflate_state!(ens, s_infl, sys_dim, state_dim)
end
# if including an extended state of parameter values,
# optionally compute multiplicative inflation of parameter values
if haskey(kwargs, "p_infl")
p_infl = kwargs["p_infl"]::Float64
inflate_param!(ens, p_infl, sys_dim, state_dim)
end
Dict{String,Array{Float64}}(
"ens" => ens,
"post" => posterior,
"fore" => forecast,
"filt" => filtered,
"iterations" => Array{Float64}([m])
)
else
# step 1b: define the initial correction and iteration count, note that i will
# give the number of iterations of the optimization and does not take into
# account the forecast / filtered iteration; for an optmized routine of the
# transform version, forecast / filtered statistics can be computed within
# the iteration count i; for the optimized bundle version, forecast / filtered
# statistics need to be computed with an additional iteration due to the epsilon
# scaling of the ensemble
w = zeros(N_ens)
i = 0
# step 1c: compute the initial ensemble mean and normalized anomalies,
# and storage for the sequentially computed iterated mean, gradient
# and hessian terms
ens_mean_0 = mean(ens, dims=2)
ens_mean_iter = copy(ens_mean_0)
anom_0 = ens .- ens_mean_0
if spin
∇J = Array{Float64}(undef, N_ens, lag)
hess_J = Array{Float64}(undef, N_ens, N_ens, lag)
else
∇J = Array{Float64}(undef, N_ens, shift)
hess_J = Array{Float64}(undef, N_ens, N_ens, shift)
end
# pre-allocate these variables as global for the loop re-definitions
hessian = Symmetric(Array{Float64}(undef, N_ens, N_ens))
new_ens = Array{Float64}(undef, sys_dim, N_ens)
# step 1e: define the conditioning for bundle versus transform varaints
if analysis[end-5:end] == "bundle"
trans = ϵ*I
trans_inv = (1.0 / ϵ)*I
elseif analysis[end-8:end] == "transform"
trans = 1.0*I
trans_inv = 1.0*I
end
# step 2: begin iterative optimization
while i < max_iter
# step 2a: redefine the conditioned ensemble with updated mean, after the
# first spin run or for all runs if after the spin cycle
if !spin || i > 0
ens = ens_mean_iter .+ anom_0 * trans
end
# step 2b: forward propagate the ensemble and sequentially store the forecast
# or construct cost function
for l in 1:lag
# propagate between observation times
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
if spin
if i == 0
# if first spin, store the forecast over the entire DAW
forecast[:, :, l] = ens
else
# otherwise, compute the sequential terms of the gradient
# and hessian of the cost function over all observations in the DAW
∇J[:,l], hess_J[:, :, l] = ens_gauss_newton(
analysis,
ens,
obs[:, l],
H_obs,
obs_cov,
kwargs,
conditioning=trans_inv
)
end
elseif l > (lag - shift)
# compute sequential terms of the gradient and hessian of the
# cost function only for the shift-length new observations in the DAW
∇J[:,l - (lag - shift)],
hess_J[:, :, l - (lag - shift)] = ens_gauss_newton(
analysis,
ens,
obs[:, l],
H_obs,
obs_cov,
kwargs,
conditioning=trans_inv
)
end
end
# skip this section in the first spin cycle, return and begin optimization
if !spin || i > 0
# step 2c: otherwise, formally compute the gradient and the hessian from the
# sequential components, perform Gauss-Newton step after forecast iteration
if analysis[1:7] == "ienks-n"
# use finite size EnKF cost function to produce the gradient calculation
ζ = 1.0 / (sum(w.^2.0) + ϵ_N)
gradient = N_effective * ζ * w - sum(∇J, dims=2)
# hessian is computed with the effective ensemble size
hessian = Symmetric((N_effective - 1.0) * I +
dropdims(sum(hess_J, dims=3), dims=3))
else
# compute the usual cost function directly
gradient = (N_ens - 1.0) * w - sum(∇J, dims=2)
# hessian is computed with the ensemble rank
hessian = Symmetric((N_ens - 1.0) * I +
dropdims(sum(hess_J, dims=3), dims=3))
end
if analysis[end-8:end] == "transform"
# transform method requires each of the below, and we make
# all calculations simultaneously via the SVD for stability
trans, trans_inv, hessian_inv = square_root_inv(hessian, full=true)
# compute the weights update
Δw = hessian_inv * gradient
else
# compute the weights update by the standard linear equation solver
Δw = hessian \ gradient
end
# update the weights
w -= Δw
# update the mean via the increment, always with the zeroth iterate
# of the ensemble
ens_mean_iter = ens_mean_0 + anom_0 * w
if norm(Δw) < tol
i +=1
break
end
end
# update the iteration count
i+=1
end
# step 3: compute posterior initial condiiton and propagate forward in time
# step 3a: perform the analysis of the ensemble
if analysis[1:7] == "ienks-n"
# use finite size EnKF cost function to produce adaptive inflation
# with the hessian
ζ = 1.0 / (sum(w.^2.0) + ϵ_N)
hessian = Symmetric(
N_effective * (ζ * I - 2.0 * ζ^(2.0) * w * transpose(w)) +
dropdims(sum(hess_J, dims=3), dims=3)
)
# redefine the ensemble transform for the final update
trans = square_root_inv(hessian)
elseif analysis == "ienks-bundle"
# redefine the ensemble transform for the final update,
# this is already computed in-loop for the ienks-transform
trans = square_root_inv(hessian)
end
# compute analyzed ensemble by the iterated mean and the transformed
# original anomalies
U = rand_orth(N_ens)
ens = ens_mean_iter .+ sqrt(N_ens - 1.0) * anom_0 * trans * U
# step 3b: if performing parameter estimation, apply the parameter model
if haskey(kwargs, "p_wlk")
p_wlk = kwargs["p_wlk"]::Float64
param_ens = ens[state_dim + 1:end , :]
param_ens = param_ens + p_wlk * rand(Normal(), size(param_ens))
ens[state_dim + 1:end, :] = param_ens
end
# step 3c: propagate re-analyzed, resampled-in-parameter-space ensemble up by shift
# observation times, store the filtered state as the forward propagated value at the
# new observation times within the DAW, forecast states as those beyond the DAW, and
# store the posterior at the times discarded at the next shift
for l in 1:lag + shift
if l <= shift
# store the posterior ensemble at times that will be discarded
posterior[:, :, l] = ens
end
# shift the ensemble forward Δt
for j in 1:N_ens
if param_est
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# define structure matrix with respect to the sample value
# of the inertia, as per each ensemble member
diff_mat = zeros(20,20)
diff_mat[LinearAlgebra.diagind(diff_mat)[11:end]] =
kwargs["dx_params"]["ω"][1] ./ (2.0 * ens[21:30, j])
kwargs["diff_mat"] = diff_mat
end
end
@views for k in 1:f_steps
step_model!(ens[:, j], 0.0, kwargs)
if string(parentmodule(kwargs["dx_dt"])) == "IEEE39bus"
# set phase angles mod 2pi
ens[1:10, j] .= rem2pi.(ens[1:10, j], RoundNearest)
end
end
end
if l == shift
# store the shift-forward ensemble for the initial condition in the new DAW
new_ens = copy(ens)
end
if spin && l <= lag
# store spin filtered states at all times up to lag
filtered[:, :, l] = ens
elseif spin && l > lag
# store the remaining spin forecast states at shift times beyond the DAW
forecast[:, :, l] = ens
elseif l > lag - shift && l <= lag
# store filtered states for newly assimilated observations
filtered[:, :, l - (lag - shift)] = ens
elseif l > lag
# store forecast states at shift times beyond the DAW
forecast[:, :, l - lag] = ens
end
end
# store and optionally inflate the forward posterior at the new initial condition
ens = copy(new_ens)
if haskey(kwargs, "s_infl")
s_infl = kwargs["s_infl"]::Float64
inflate_state!(ens, s_infl, sys_dim, state_dim)
end
# if including an extended state of parameter values,
# optionally compute multiplicative inflation of parameter values
if haskey(kwargs, "p_infl")
p_infl = kwargs["p_infl"]::Float64
inflate_param!(ens, p_infl, sys_dim, state_dim)
end
Dict{String,Array{Float64}}(
"ens" => ens,
"post" => posterior,
"fore" => forecast,
"filt" => filtered,
"iterations" => Array{Float64}([i])
)
end
end
##############################################################################################
# end module
end
##############################################################################################
# Methods below are yet to be to debugged and benchmark
##############################################################################################
# single iteration, correlation-based lag_shift_smoother, adaptive inflation STILL DEBUGGING
#
#function ls_smoother_single_iteration_adaptive(analysis::String, ens::ArView, obs::ArView,
# obs_cov::CovM, s_infl::Float64, kwargs::StepKwargs)
#
# """Lag-shift ensemble kalman smoother analysis step, single iteration adaptive version
#
# This version of the lag-shift enks uses the final re-analyzed posterior initial state for the forecast,
# which is pushed forward in time from the initial conidtion to shift-number of observation times.
#
# Optional keyword argument includes state dimension if there is an extended state including parameters. In this
# case, a value for the parameter covariance inflation should be included in addition to the state covariance
# inflation. If the analysis method is 'etks_adaptive', this utilizes the past analysis means to construct an
# innovation-based estimator for the model error covariances. This is formed by the expectation step in the
# expectation maximization algorithm dicussed by Tandeo et al. 2021."""
#
# # step 0: unpack kwargs, posterior contains length lag past states ending with ens as final entry
# f_steps = kwargs["f_steps"]::Int64
# step_model! = kwargs["step_model"]::Function
# posterior = kwargs["posterior"]::Array{Float64,3}
#
# # infer the ensemble, obs, and system dimensions, observation sequence includes lag forward times
# obs_dim, lag = size(obs)
# sys_dim, N_ens, shift = size(posterior)
#
# # for the adaptive inflation shceme
# # load bool if spinning up tail of innovation statistics
# tail_spin = kwargs["tail_spin"]::Bool
#
# # pre_analysis will contain the sequence of the last cycle's analysis states
# # over the current DAW
# pre_analysis = kwargs["analysis"]::Array{Float64,3}
#
# # analysis innovations contains the innovation statistics over the previous DAW plus a trail of
# # length tail * lag to ensure more robust frequentist estimates
# analysis_innovations = kwargs["analysis_innovations"]::ArView
#
# # optional parameter estimation
# if haskey(kwargs, "state_dim")
# state_dim = kwargs["state_dim"]::Int64
# p_infl = kwargs["p_infl"]::Float64
# p_wlk = kwargs["p_wlk"]::Float64
#
# else
# state_dim = sys_dim
# end
#
# # make a copy of the intial ens for re-analysis
# ens_0 = copy(ens)
#
# # spin to be used on the first lag-assimilations -- this makes the smoothed time-zero re-analized prior
# # the first initial condition for the future iterations regardless of sda or mda settings
# spin = kwargs["spin"]::Bool
#
# # step 1: create storage for the posterior, forecast and filter values over the DAW
# # only the shift-last and shift-first values are stored as these represent the newly forecasted values and
# # last-iterate posterior estimate respectively
# if spin
# forecast = Array{Float64}(undef, sys_dim, N_ens, lag)
# filtered = Array{Float64}(undef, sys_dim, N_ens, lag)
# else
# forecast = Array{Float64}(undef, sys_dim, N_ens, shift)
# filtered = Array{Float64}(undef, sys_dim, N_ens, shift)
# end
#
# if spin
# ### NOTE: WRITING THIS NOW SO THAT WE WILL HAVE AN ARBITRARY TAIL OF INNOVATION STATISTICS
# # FROM THE PASS BACK THROUGH THE WINDOW, BUT WILL COMPUTE INNOVATIONS ONLY ON THE NEW
# # SHIFT-LENGTH REANALYSIS STATES BY THE SHIFTED DAW
# # create storage for the analysis means computed at each forward step of the current DAW
# post_analysis = Array{Float64}(undef, sys_dim, N_ens, lag)
# else
# # create storage for the analysis means computed at the shift forward states in the DAW
# post_analysis = Array{Float64}(undef, sys_dim, N_ens, shift)
# end
#
# # step 2: forward propagate the ensemble and analyze the observations
# for l in 1:lag
# # step 2a: propagate between observation times
# for j in 1:N_ens
# @views for k in 1:f_steps
# step_model!(ens[:, j], 0.0, kwargs)
# end
# end
# if spin
# # step 2b: store the forecast to compute ensemble statistics before observations become available
# # if spin, store all new forecast states
# forecast[:, :, l] = ens
#
# # step 2c: apply the transformation and update step
# trans = transform_R(analysis, ens, obs[:, l], obs_cov, kwargs)
# ens_update_RT!(ens, trans)
#
# # compute multiplicative inflation of state variables
# inflate_state!(ens, s_infl, sys_dim, state_dim)
#
# # if including an extended state of parameter values,
# # compute multiplicative inflation of parameter values
# if state_dim != sys_dim
# inflate_param!(ens, p_infl, sys_dim, state_dim)
# end
#
# # store all new filtered states
# filtered[:, :, l] = ens
#
# # store the re-analyzed ensembles for future statistics
# post_analysis[:, :, l] = ens
# for j in 1:l-1
# post_analysis[:, :, j] = ens_update_RT!(post_analysis[:, :, j], trans)
# end
#
# # step 2d: compute the re-analyzed initial condition if we have an assimilation update
# ens_update_RT!(ens_0, trans)
#
# elseif l > (lag - shift)
# # step 2b: store the forecast to compute ensemble statistics before observations become available
# # if not spin, only store forecasted states for beyond unobserved times beyond previous forecast windows
# forecast[:, :, l - (lag - shift)] = ens
#
# # step 2c: apply the transformation and update step
# if tail_spin
# trans = transform_R(analysis, ens, obs[:, l], obs_cov, kwargs,
# m_err=analysis_innovations[:, 1:end-shift])
# else
# trans = transform_R(analysis, ens, obs[:, l], obs_cov, kwargs,
# m_err=analysis_innovations)
# end
#
# ens = ens_update_RT!(ens, trans)
#
# # store the filtered states for previously unobserved times, not mda values
# filtered[:, :, l - (lag - shift)] = ens
#
# # store the re-analyzed ensembles for future statistics
# post_analysis[:, :, l] = ens
# for j in 1:l-1
# post_analysis[:, :, j] = ens_update_RT!(post_analysis[:, :, j], trans)
# end
#
# # step 2d: compute the re-analyzed initial condition if we have an assimilation update
# ens_update_RT!(ens_0, trans)
#
# elseif l > (lag - 2 * shift)
# # store the re-analyzed ensembles for future statistics
# post_analysis[:, :, l] = ens
#
# # compute the innovation versus the last cycle's analysis state
# analysis_innovations[:, :, end - lag + l] = pre_analysis[:, :, l + shift] - post_analysis[:, :, l]
# end
# end
# # reset the ensemble with the re-analyzed prior
# ens = copy(ens_0)
#
# # reset the analysis innovations for the next DAW
# pre_analysis = copy(post_analysis)
#
# if !tail_spin
# # add the new shifted DAW innovations to the statistics and discard the oldest
# # shift-innovations
# analysis_innovations = hcat(analysis_innovations[:, shift + 1: end],
# Array{Float64}(undef, sys_dim, shift))
# end
#
# # step 3: propagate the posterior initial condition forward to the shift-forward time
# # step 3a: inflate the posterior covariance
# inflate_state!(ens, s_infl, sys_dim, state_dim)
#
# # if including an extended state of parameter values,
# # compute multiplicative inflation of parameter values
# if state_dim != sys_dim
# inflate_param!(ens, p_infl, sys_dim, state_dim)
# end
#
# # step 3b: if performing parameter estimation, apply the parameter model
# if state_dim != sys_dim
# param_ens = ens[state_dim + 1:end , :]
# param_ens = param_ens + p_wlk * rand(Normal(), size(param_ens))
# ens[state_dim + 1:end, :] = param_ens
# end
#
# # step 3c: propagate the re-analyzed, resampled-in-parameter-space ensemble up by shift
# # observation times
# for s in 1:shift
# if !mda
# posterior[:, :, s] = ens
# end
# for j in 1:N_ens
# @views for k in 1:f_steps
# step_model!(ens[:, j], 0.0, kwargs)
# end
# end
# end
#
# if tail_spin
# # prepare storage for the new innovations concatenated to the oldest lag-innovations
# analysis_innovations = hcat(analysis_innovations,
# Array{Float64}(undef, sys_dim, shift))
# else
# # reset the analysis innovations window to remove the oldest lag-innovations
# analysis_innovations = hcat(analysis_innovations[:, shift + 1: end],
# Array{Float64}(undef, sys_dim, lag))
# end
#
# Dict{String,Array{Float64}}(
# "ens" => ens,
# "post" => posterior,
# "fore" => forecast,
# "filt" => filtered,
# "anal" => pre_analysis,
# "inno" => analysis_innovations,
# )
#end
#
#
#########################################################################################################################
#########################################################################################################################
# Methods below taken from old python code, yet to completely convert, debug and benchmark
#########################################################################################################################
## IEnKF-T-LM
#
#
#def ietlm(X_ext_ens, H, obs, obs_cov, f_steps, f, h, tau=0.001, e1=0,
# inflation=1.0, tol=0.001, l_max=40):
#
# """This produces an analysis ensemble via transform as in algorithm 3, bocquet sakov 2012"""
#
# # step 0: infer the ensemble, obs, and state dimensions
# [sys_dim, N_ens] = np.shape(X_ext_ens)
# obs_dim = len(obs)
#
# # step 1: we compute the ensemble mean and non-normalized anomalies
# X_mean_0 = np.mean(X_ext_ens, axis=1)
# A_t = X_ext_ens.transpose() - X_mean_0
#
# # step 2: we define the initial iterative minimization parameters
# l = 0
# nu = 2
# w = np.zeros(N_ens)
#
# # step 3: update the mean via the w increment
# X_mean_1 = X_mean_0 + A_t.transpose() @ w
# X_mean_tmp = copy.copy(X_mean_1)
#
# # step 4: evolve the ensemble mean forward in time, and transform into observation space
# for k in range(f_steps):
# # propagate ensemble mean one step forward
# X_mean_tmp = l96_rk4_step(X_mean_tmp, h, f)
#
# # define the observed mean by the propagated mean in the observation space
# Y_mean = H @ X_mean_tmp
#
# # step 5: Define the initial transform
# T = np.eye(N_ens)
#
# # step 6: redefine the ensemble with the updated mean and the transform
# X_ext_ens = (X_mean_1 + T @ A_t).transpose()
#
# # step 7: loop over the discretization steps between observations to produce a forecast ensemble
# for k in range(f_steps):
# X_ext_ens = l96_rk4_stepV(X_ext_ens, h, f)
#
# # step 8: compute the forecast anomalies in the observation space, via the observed, evolved mean and the
# # observed, forward ensemble, conditioned by the transform
# Y_ens = H @ X_ext_ens
# Y_ens_t = np.linalg.inv(T).transpose() @ (Y_ens.transpose() - Y_mean)
#
# # step 9: compute the cost function in ensemble space
# J = 0.5 * (obs - Y_mean) @ np.linalg.inv(obs_cov) @ (obs - Y_mean) + 0.5 * (N_ens - 1) * w @ w
#
# # step 10: compute the approximate gradient of the cost function
# grad_J = (N_ens - 1) * w - Y_ens_t @ np.linalg.inv(obs_cov) @ (obs - Y_mean)
#
# # step 11: compute the approximate hessian of the cost function
# hess = (N_ens - 1) * np.eye(N_ens) + Y_ens_t @ np.linalg.inv(obs_cov) @ Y_ens_t.transpose()
#
# # step 12: compute the infinity norm of the jacobian and the max of the hessian diagonal
# flag = np.max(np.abs(grad_J)) > e1
# mu = tau * np.max(np.diag(hess))
#
# # step 13: while loop
# while flag:
# if l > l_max:
# break
#
# # step 14: set the iteration count forward
# l+= 1
#
# # step 15: solve the system for the w increment update
# δ_w = solve(hess + mu * np.eye(N_ens), -1 * grad_J)
#
# # step 16: check if the increment is sufficiently small to terminate
# if np.sqrt(δ_w @ δ_w) < tol:
# # step 17: flag false to terminate
# flag = False
#
# # step 18: begin else
# else:
# # step 19: reset the ensemble adjustment
# w_prime = w + δ_w
#
# # step 20: reset the initial ensemble with the new adjustment term
# X_mean_1 = X_mean_0 + A_t.transpose() @ w_prime
#
# # step 21: forward propagate the new ensemble mean, and transform into observation space
# X_mean_tmp = copy.copy(X_mean_1)
# for k in range(f_steps):
# X_mean_tmp = l96_rk4_step(X_mean_tmp, h, f)
#
# Y_mean = H @ X_mean_tmp
#
# # steps 22 - 24: define the parameters for the confidence region
# L = 0.5 * δ_w @ (mu * δ_w - grad_J)
# J_prime = 0.5 * (obs - Y_mean) @ np.linalg.inv(obs_cov) @ (obs - Y_mean) + 0.5 * (N_ens -1) * w_prime @ w_prime
# theta = (J - J_prime) / L
#
# # step 25: evaluate if new correction needed
# if theta > 0:
#
# # steps 26 - 28: update the cost function, the increment, and the past ensemble, conditioned with the
# # transform
# J = J_prime
# w = w_prime
# X_ext_ens = (X_mean_1 + T.transpose() @ A_t).transpose()
#
# # step 29: integrate the ensemble forward in time
# for k in range(f_steps):
# X_ext_ens = l96_rk4_stepV(X_ext_ens, h, f)
#
# # step 30: compute the forward anomlaies in the observation space, by the forward evolved mean and forward evolved
# # ensemble
# Y_ens = H @ X_ext_ens
# Y_ens_t = np.linalg.inv(T).transpose() @ (Y_ens.transpose() - Y_mean)
#
# # step 31: compute the approximate gradient of the cost function
# grad_J = (N_ens - 1) * w - Y_ens_t @ np.linalg.inv(obs_cov) @ (obs - Y_mean)
#
# # step 32: compute the approximate hessian of the cost function
# hess = (N_ens - 1) * np.eye(N_ens) + Y_ens_t @ np.linalg.inv(obs_cov) @ Y_ens_t.transpose()
#
# # step 33: define the transform as the inverse square root of the hessian
# V, Sigma, V_t = np.linalg.svd(hess)
# T = V @ np.diag( 1 / np.sqrt(Sigma) ) @ V_t
#
# # steps 34 - 35: compute the tolerance and correction parameters
# flag = np.max(np.abs(grad_J)) > e1
# mu = mu * np.max([1/3, 1 - (2 * theta - 1)**3])
# nu = 2
#
# # steps 36 - 37: else statement, update mu and nu
# else:
# mu = mu * nu
# nu = nu * 2
#
# # step 38: end if
# # step 39: end if
# # step 40: end while
#
# # step 41: perform update to the initial mean with the new defined anomaly transform
# X_mean_1 = X_mean_0 + A_t.transpose() @ w
#
# # step 42: define the transform as the inverse square root of the hessian, bundle version only
# #V, Sigma, V_t = np.linalg.svd(hess)
# #T = V @ np.diag( 1 / np.sqrt(Sigma) ) @ V_t
#
# # step 43: compute the updated ensemble by the transform conditioned anomalies and updated mean
# X_ext_ens = (T.transpose() @ A_t + X_mean_1).transpose()
#
# # step 44: forward propagate the ensemble to the observation time
# for k in range(f_steps):
# X_ext_ens = l96_rk4_stepV(X_ext_ens, h, f)
#
# # step 45: compute the ensemble with inflation
# X_mean_2 = np.mean(X_ext_ens, axis=1)
# A_t = X_ext_ens.transpose() - X_mean_2
# infl = np.eye(N_ens) * inflation
# X_ext_ens = (X_mean_2 + infl @ A_t).transpose()
#
# return X_ext_ens
#
#########################################################################################################################
## IEnKF-B-LM
#
#
#def ieblm(X_ext_ens, H, obs, obs_cov, f_steps, f, h, tau=0.001, e1=0, epsilon=0.0001,
# inflation=1.0, tol=0.001, l_max=40):
#
# """This produces an analysis ensemble as in algorithm 3, bocquet sakov 2012"""
#
# # step 0: infer the ensemble, obs, and state dimensions
# [sys_dim, N_ens] = np.shape(X_ext_ens)
# obs_dim = len(obs)
#
# # step 1: we compute the ensemble mean and non-normalized anomalies
# X_mean_0 = np.mean(X_ext_ens, axis=1)
# A_t = X_ext_ens.transpose() - X_mean_0
#
# # step 2: we define the initial iterative minimization parameters
# l = 0
#
# # NOTE: MARC'S VERSION HAS NU SET TO ONE FIRST AND THEN ITERATES ON THIS IN PRODUCTS
# # OF TWO
# #nu = 2
# nu = 1
#
# w = np.zeros(N_ens)
#
# # step 3: update the mean via the w increment
# X_mean_1 = X_mean_0 + A_t.transpose() @ w
# X_mean_tmp = copy.copy(X_mean_1)
#
# # step 4: evolve the ensemble mean forward in time, and transform into observation space
# for k in range(f_steps):
# X_mean_tmp = l96_rk4_step(X_mean_tmp, h, f)
#
# Y_mean = H @ X_mean_tmp
#
# # step 5: Define the initial transform, transform version only
# # T = np.eye(N_ens)
#
# # step 6: redefine the ensemble with the updated mean, rescaling by epsilon
# X_ext_ens = (X_mean_1 + epsilon * A_t).transpose()
#
# # step 7: loop over the discretization steps between observations to produce a forecast ensemble
# for k in range(f_steps):
# X_ext_ens = l96_rk4_stepV(X_ext_ens, h, f)
#
# # step 8: compute the anomalies in the observation space, via the observed, evolved mean and the observed,
# # forward ensemble, rescaling by epsilon
# Y_ens = H @ X_ext_ens
# Y_ens_t = (Y_ens.transpose() - Y_mean) / epsilon
#
# # step 9: compute the cost function in ensemble space
# J = 0.5 * (obs - Y_mean) @ np.linalg.inv(obs_cov) @ (obs - Y_mean) + 0.5 * (N_ens - 1) * w @ w
#
# # step 10: compute the approximate gradient of the cost function
# grad_J = (N_ens - 1) * w - Y_ens_t @ np.linalg.inv(obs_cov) @ (obs - Y_mean)
#
# # step 11: compute the approximate hessian of the cost function
# hess = (N_ens - 1) * np.eye(N_ens) + Y_ens_t @ np.linalg.inv(obs_cov) @ Y_ens_t.transpose()
#
# # step 12: compute the infinity norm of the jacobian and the max of the hessian diagonal
# # NOTE: MARC'S VERSION DOES NOT HAVE A FLAG BASED ON THE INFINITY NORM OF THE GRADIENT
# # THIS IS ALSO PROBABLY A TRIVIAL FLAG
# # flag = np.max(np.abs(grad_J)) > e1
#
# # NOTE: MARC'S FLAG
# flag = True
#
# # NOTE: MARC'S VERSION USES MU=1 IN THE FIRST ITERATION AND NEVER MAKES
# # THIS DECLARATION IN TERMS OF TAU AND HESS
# # mu = tau * np.max(np.diag(hess))
# mu = 1
#
# # step 13: while loop
# while flag:
# if l > l_max:
# print(l)
# break
#
# # step 14: set the iteration count forward
# l+= 1
#
# # NOTE: MARC'S RE-DEFINITION OF MU AND NU
# mu *= nu
# nu *= 2
#
# # step 15: solve the system for the w increment update
# δ_w = solve(hess + mu * np.eye(N_ens), -1 * grad_J)
#
# # step 16: check if the increment is sufficiently small to terminate
# # NOTE: MARC'S VERSION NORMALIZES THE LENGTH RELATIVE TO THE ENSEMBLE SIZE
# if np.sqrt(δ_w @ δ_w) < tol:
# # step 17: flag false to terminate
# flag = False
# print(l)
#
# # step 18: begin else
# else:
# # step 19: reset the ensemble adjustment
# w_prime = w + δ_w
#
# # step 20: reset the initial ensemble with the new adjustment term
# X_mean_1 = X_mean_0 + A_t.transpose() @ w_prime
#
# # step 21: forward propagate the new ensemble mean, and transform into observation space
# X_mean_tmp = copy.copy(X_mean_1)
# for k in range(f_steps):
# X_mean_tmp = l96_rk4_step(X_mean_tmp, h, f)
#
# Y_mean = H @ X_mean_tmp
#
# # steps 22 - 24: define the parameters for the confidence region
# L = 0.5 * δ_w @ (mu * δ_w - grad_J)
# J_prime = 0.5 * (obs - Y_mean) @ np.linalg.inv(obs_cov) @ (obs - Y_mean) + 0.5 * (N_ens -1) * w_prime @ w_prime
# theta = (J - J_prime) / L
#
# # step 25: evaluate if new correction needed
# if theta > 0:
#
# # steps 26 - 28: update the cost function, the increment, and the past ensemble, rescaled with epsilon
# J = J_prime
# w = w_prime
# X_ext_ens = (X_mean_1 + epsilon * A_t).transpose()
#
# # step 29: integrate the ensemble forward in time
# for k in range(f_steps):
# X_ext_ens = l96_rk4_stepV(X_ext_ens, h, f)
#
# # step 30: compute the forward anomlaies in the observation space, by the forward evolved mean and forward evolved
# # ensemble
# Y_ens = H @ X_ext_ens
# Y_ens_t = (Y_ens.transpose() - Y_mean) / epsilon
#
# # step 31: compute the approximate gradient of the cost function
# grad_J = (N_ens - 1) * w - Y_ens_t @ np.linalg.inv(obs_cov) @ (obs - Y_mean)
#
# # step 32: compute the approximate hessian of the cost function
# hess = (N_ens - 1) * np.eye(N_ens) + Y_ens_t @ np.linalg.inv(obs_cov) @ Y_ens_t.transpose()
#
# # step 33: define the transform as the inverse square root of the hessian, transform version only
# #V, Sigma, V_t = np.linalg.svd(hess)
# #T = V @ np.diag( 1 / np.sqrt(Sigma) ) @ V_t
#
# # steps 34 - 35: compute the tolerance and correction parameters
# # NOTE: TRIVIAL FLAG?
# # flag = np.max(np.abs(grad_J)) > e1
#
# mu = mu * np.max([1/3, 1 - (2 * theta - 1)**3])
#
# # NOTE: ADJUSTMENT HERE TO MATCH NU TO MARC'S CODE
# # nu = 2
# nu = 1
#
# # steps 36 - 37: else statement, update mu and nu
# #else:
# # mu = mu * nu
# # nu = nu * 2
#
# # step 38: end if
# # step 39: end if
# # step 40: end while
#
# # step 41: perform update to the initial mean with the new defined anomaly transform
# X_mean_1 = X_mean_0 + A_t.transpose() @ w
#
# # step 42: define the transform as the inverse square root of the hessian
# V, Sigma, V_t = np.linalg.svd(hess)
# T = V @ np.diag( 1 / np.sqrt(Sigma) ) @ V_t
#
# # step 43: compute the updated ensemble by the transform conditioned anomalies and updated mean
# X_ext_ens = (T.transpose() @ A_t + X_mean_1).transpose()
#
# # step 44: forward propagate the ensemble to the observation time
# for k in range(f_steps):
# X_ext_ens = l96_rk4_stepV(X_ext_ens, h, f)
#
# # step 45: compute the ensemble with inflation
# X_mean_2 = np.mean(X_ext_ens, axis=1)
# A_t = X_ext_ens.transpose() - X_mean_2
# infl = np.eye(N_ens) * inflation
# X_ext_ens = (X_mean_2 + infl @ A_t).transpose()
#
# return X_ext_ens
#
# elseif analysis=="etks-adaptive"
# ## NOTE: STILL DEVELOPMENT VERSION, NOT DEBUGGED
# # needs to be revised for unweighted anomalies
# # This computes the transform of the ETKF update as in Asch, Bocquet, Nodet
# # but using a computation of the contribution of the model error covariance matrix Q
# # in the square root as in Raanes et al. 2015 and the adaptive inflation from the
# # frequentist estimator for the model error covariance
# # step 0: infer the system, observation and ensemble dimensions
# sys_dim, N_ens = size(ens)
# obs_dim = length(obs)
#
# # step 1: compute the ensemble mean
# x_mean = mean(ens, dims=2)
#
# # step 2a: compute the normalized anomalies
# A = (ens .- x_mean) / sqrt(N_ens - 1.0)
#
# if !(m_err[1] == Inf)
# # step 2b: compute the SVD for the two-sided projected model error covariance
# F_ens = svd(A)
# mean_err = mean(m_err, dims=2)
#
# # NOTE: may want to consider separate formulations in which we treat
# # the model error mean known versus unknown
# # A_err = (m_err .- mean_err) / sqrt(length(mean_err) - 1.0)
# A_err = m_err / sqrt(size(m_err, 2))
# F_err = svd(A_err)
# if N_ens <= sys_dim
# Σ_pinv = Diagonal([1.0 ./ F_ens.S[1:N_ens-1]; 0.0])
# else
# Σ_pinv = Diagonal(1.0 ./ F_ens.S)
# end
#
# # step 2c: compute the square root covariance with model error anomaly
# # contribution in the ensemble space dimension, note the difference in
# # equation due to the normalized anomalies
# G = Symmetric(I + Σ_pinv * transpose(F_ens.U) * F_err.U *
# Diagonal(F_err.S.^2) * transpose(F_err.U) *
# F_ens.U * Σ_pinv)
#
# G = F_ens.V * square_root(G) * F_ens.Vt
#
# # step 2c: compute the model error adjusted anomalies
# A = A * G
# end
#
# # step 3: compute the ensemble in observation space
# Y = alternating_obs_operator(ens, obs_dim, kwargs)
#
# # step 4: compute the ensemble mean in observation space
# y_mean = mean(Y, dims=2)
#
# # step 5: compute the weighted anomalies in observation space
#
# # first we find the observation error covariance inverse
# obs_sqrt_inv = square_root_inv(obs_cov)
#
# # then compute the weighted anomalies
# S = (Y .- y_mean) / sqrt(N_ens - 1.0)
# S = obs_sqrt_inv * S
#
# # step 6: compute the weighted innovation
# δ = obs_sqrt_inv * ( obs - y_mean )
#
# # step 7: compute the transform matrix
# T = inv(Symmetric(1.0I + transpose(S) * S))
#
# # step 8: compute the analysis weights
# w = T * transpose(S) * δ
#
# # step 9: compute the square root of the transform
# T = sqrt(T)
#
# # step 10: generate mean preserving random orthogonal matrix as in sakov oke 08
# U = rand_orth(N_ens)
#
# # step 11: package the transform output tuple
# T, w, U
#
# elseif analysis=="etkf-hybrid" || analysis=="etks-hybrid"
# # NOTE: STILL DEVELOPMENT VERSION, NOT DEBUGGED
# # step 0: infer the system, observation and ensemble dimensions
# sys_dim, N_ens = size(ens)
# obs_dim = length(obs)
#
# # step 1: compute the background in observation space, and the square root hybrid
# # covariance
# Y = H * conditioning
# x_mean = mean(ens, dims=2)
# X = (ens .- x_mean)
# Σ = inv(conditioning) * X
#
# # step 2: compute the ensemble mean in observation space
# Y_ens = H * ens
# y_mean = mean(Y_ens, dims=2)
#
# # step 3: compute the sensitivity matrix in observation space
# obs_sqrt_inv = square_root_inv(obs_cov)
# Γ = obs_sqrt_inv * Y
#
# # step 4: compute the weighted innovation
# δ = obs_sqrt_inv * ( obs - y_mean )
#
# # step 5: run the Gauss-Newton optimization of the cost function
#
# # step 5a: define the gradient of the cost function for the hybridized covariance
# function ∇J!(w_full::Vector{Float64})
# # define the factor to be inverted and compute with the SVD
# w = w_full[1:end-2]
# α_1 = w_full[end-1]
# α_2 = w_full[end]
# K = (N_ens - 1.0) / α_1 * I + transpose(Σ) * Σ
# F = svd(K)
# K_inv = F.U * Diagonal(1.0 ./ F.S) * F.Vt
# grad_w = transpose(Γ) * (δ - Γ * w) + w / α_2 - K_inv * w / α_2
# grad_1 = 1 / α_2 * transpose(w) * K_inv * ( (1.0 - N_ens) / α_1^2.0 * I) *
# k_inv * w
# grad_2 = -transpose(w) * w / α_2^2.0 + transpose(w) * K_inv * w / α_2^2.0
# [grad_w; grad_1; grad_2]
# end
#
# # step 5b: run the Gauss-Newton iteration
# w = zeros(N_ens)
# α_1 = 0.5
# α_2 = 0.5
# j = 0
# w_full = [w; α_1; α_2]
#
# while j < j_max
# # compute the gradient and hessian approximation
# grad_w = ∇J(w_full)
# hess_w = grad_w * transpose(grad_w)
#
# # perform Newton approximation, simultaneously computing
# # the update transform T with the SVD based inverse at once
# T, hessian_inv = square_root_inv(Symmetric(hess_w), inverse=true)
# Δw = hessian_inv * grad_w
# w_full -= Δw
#
# if norm(Δw) < tol
# break
# else
# j+=1
# end
# end
#
# # step 6: store the ensemble weights
#
# # step 6: generate mean preserving random orthogonal matrix as in sakov oke 08
# U = rand_orth(N_ens)
#
# # step 7: package the transform output tuple
# T, w, U
#
#
#
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 6987 | ##############################################################################################
module XdVAR
##############################################################################################
# imports and exports
using LinearAlgebra, ForwardDiff
using ..DataAssimilationBenchmarks
export D3_var_cost, D3_var_grad, D3_var_hessian, D3_var_NewtonOp
##############################################################################################
# Main methods
##############################################################################################
"""
D3_var_cost(x::VecA(T), obs::VecA(T), x_bkg::VecA(T), state_cov::CovM(T),
H_obs::Function, obs_cov::CovM(T), kwargs::StepKwargs) where T <: Real
Computes the cost of the three-dimensional variational analysis increment from an initial state
proposal with a static background covariance
`x` is a free argument used to evaluate the cost of the given state proposal versus other
proposal states, `obs` is to the observation vector, `x_bkg` is the initial state proposal
vector, `state_cov` is the background error covariance matrix, `H_obs` is a model mapping
operator for observations, and `obs_cov` is the observation error covariance matrix.
`kwargs` refers to any additional arguments needed for the operation computation.
```
return 0.5*back_component + 0.5*obs_component
```
"""
function D3_var_cost(x::VecA(T), obs::VecA(T), x_bkg::VecA(T), state_cov::CovM(T),
H_obs::Function, obs_cov::CovM(T), kwargs::StepKwargs) where T <: Real
# initializations
obs_dim = length(obs)
# obs operator
H = H_obs(x, obs_dim, kwargs)
# background discepancy
δ_b = x - x_bkg
# observation discrepancy
δ_o = obs - H
# cost function
back_component = dot(δ_b, inv(state_cov) * δ_b)
obs_component = dot(δ_o, inv(obs_cov) * δ_o)
0.5*back_component + 0.5*obs_component
end
##############################################################################################
"""
D3_var_grad(x::VecA(T), obs::VecA(T), x_bkg::VecA(T), state_cov::CovM(T),
H_obs::Function, obs_cov::CovM(T), kwargs::StepKwargs) where T <: Float64
Computes the gradient of the three-dimensional variational analysis increment from an initial
state proposal with a static background covariance using a wrapper function for automatic
differentiation
`x` is a free argument used to evaluate the cost of the given state proposal versus other
proposal states, `obs` is to the observation vector, `x_bkg` is the initial state proposal
vector, `state_cov` is the background error covariance matrix, `H_obs` is a model mapping
operator for observations, and `obs_cov` is the observation error covariance matrix.
`kwargs` refers to any additional arguments needed for the operation computation.
`wrap_cost` is a function that allows differentiation with respect to the free argument `x`
while treating all other hyperparameters of the cost function as constant.
```
return ForwardDiff.gradient(wrap_cost, x)
```
"""
function D3_var_grad(x::VecA(T), obs::VecA(T), x_bkg::VecA(T), state_cov::CovM(T),
H_obs::Function, obs_cov::CovM(T),
kwargs::StepKwargs) where T <: Float64
function wrap_cost(x::VecA(T)) where T <: Real
D3_var_cost(x, obs, x_bkg, state_cov, H_obs, obs_cov, kwargs)
end
ForwardDiff.gradient(wrap_cost, x)
end
##############################################################################################
"""
D3_var_hessian(x::VecA(T), obs::VecA(T), x_bkg::VecA(T), state_cov::CovM(T),
H_obs::Function, obs_cov::CovM(T), kwargs::StepKwargs) where T <: Float64
Computes the Hessian of the three-dimensional variational analysis increment from an initial
state proposal with a static background covariance using a wrapper function for automatic
differentiation
`x` is a free argument used to evaluate the cost of the given state proposal versus other
proposal states, `obs` is to the observation vector, `x_bkg` is the initial state proposal
vector, `state_cov` is the background error covariance matrix, `H_obs` is a model mapping
operator for observations, and `obs_cov` is the observation error covariance matrix.
`kwargs` refers to any additional arguments needed for the operation computation.
`wrap_cost` is a function that allows differentiation with respect to the free argument `x`
while treating all other hyperparameters of the cost function as constant.
```
return ForwardDiff.hessian(wrap_cost, x)
```
"""
function D3_var_hessian(x::VecA(T), obs::VecA(T), x_bkg::VecA(T), state_cov::CovM(T),
H_obs::Function, obs_cov::CovM(T),
kwargs::StepKwargs) where T <: Float64
function wrap_cost(x::VecA(T)) where T <: Real
D3_var_cost(x, obs, x_bkg, state_cov, H_obs, obs_cov, kwargs)
end
ForwardDiff.hessian(wrap_cost, x)
end
##############################################################################################
"""
D3_var_NewtonOp(x_bkg::VecA(T), obs::VecA(T), state_cov::CovM(T), H_obs::Function,
obs_cov::CovM(T), kwargs::StepKwargs) where T <: Float64
Computes the local minima of the three-dimension variational cost function with a static
background covariance using a simple Newton optimization method
`x_bkg` is the initial state proposal vector, `obs` is to the observation vector,
`state_cov` is the background error covariance matrix, `H_obs` is a model mapping operator
for observations, `obs_cov` is the observation error covariance matrix, and `kwargs` refers
to any additional arguments needed for the operation computation.
```
return x
```
"""
function D3_var_NewtonOp(x_bkg::VecA(T), obs::VecA(T), state_cov::CovM(T), H_obs::Function,
obs_cov::CovM(T), kwargs::StepKwargs) where T <: Float64
# initializations
j_max = 40
tol = 0.001
j = 1
sys_dim = length(x_bkg)
# first guess is copy of the first background
x = copy(x_bkg)
# gradient preallocation over-write
function grad!(g::VecA(T), x::VecA(T)) where T <: Real
g[:] = D3_var_grad(x, obs, x_bkg, state_cov, H_obs, obs_cov, kwargs)
end
# Hessian preallocation over-write
function hess!(h::ArView(T), x::VecA(T)) where T <: Real
h .= D3_var_hessian(x, obs, x_bkg, state_cov, H_obs, obs_cov, kwargs)
end
# perform the optimization by simple Newton
grad_x = Array{Float64}(undef, sys_dim)
hess_x = Array{Float64}(undef, sys_dim, sys_dim)
while j <= j_max
# compute the gradient and Hessian
grad!(grad_x, x)
hess!(hess_x, x)
# perform Newton approximation
Δx = inv(hess_x) * grad_x
x = x - Δx
if norm(Δx) < tol
break
else
j+=1
end
end
return x
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 2793 | ##############################################################################################
module IEEE39bus
##############################################################################################
# imports and exports
using ..DataAssimilationBenchmarks
export dx_dt
##############################################################################################
"""
dx_dt(x::VecA(T), t::Float64, dx_params::ParamDict(T)) where T <: Real
Time derivative of the phase and fequency of the [effective-network swing equation model](https://iopscience.iop.org/article/10.1088/1367-2630/17/1/015012).
Input x is a 2 `n_g` [`VecA`](@ref) of the phase and fequency at each of the `n_g`
generator buses. The input `dx_params` of type [`ParamDict`](@ref) containing system
parameters to be passed to the integration scheme. The system is currenty defined
autonomously to be run as an SDE, noise perturbed steady state.
"""
function dx_dt(x::VecA(T), t::Float64, dx_params::ParamDict(T)) where T <: Real
# unpack the system parameters effective network of
# Nishikawa, T., & Motter, A. E. (2015). Comparative analysis of existing
# models for power-grid synchronization.
A = dx_params["A"]::Array{T}
D = dx_params["D"]::Array{T}
H = dx_params["H"]::Array{T}
K = dx_params["K"]::Array{T}
γ = dx_params["γ"]::Array{T}
ω = dx_params["ω"]::Array{T}
# convert the effective bus coupling and passive injection to contain the change
# of variable terms
K = ω[1] * K / 2.0
A = ω[1] * A / 2.0
# unpack the phase and frequency at the n_g buses, with all phases listed first, then all
# fequencies in the order of the bus index
n_g = convert(Int, length(x) / 2)
δ_1 = @view x[1:n_g]
δ_2 = @view x[n_g+1:end]
# define the vector of the derivatives
dx = zeros(2 * n_g)
# derivative of the phase equals frequency
dx[1:n_g] .= δ_2
# compute the derivative of the inertia normalized frequencies
# entry j is defined as
# A_j * ω/2 - D_j /2 * δ_2 - Σ_{i!=j} K * ω/2 * sin(δ_j - δ_i - γ_ij)
for j in 1:n_g
for i in 1:n_g
if j != i
# K is symmetric, we loop over the columns for faster memory access
# with the same variable j as in the row index of the derivative
dx[n_g + j] += -K[i, j] * sin(δ_1[j] - δ_1[i] - γ[i, j])
end
end
# finally apply the remaining terms
dx[n_g + j] += A[j] - δ_2[j] * D[j] / 2.0
end
# to compute the derivative of the frequencies, we finally
# divide back out by the inertia
dx[n_g + 1 : end] = dx[n_g + 1: end] ./ H
return dx
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 8830 | ##############################################################################################
module L96
##############################################################################################
# imports and exports
using ..DataAssimilationBenchmarks, SparseArrays
export dx_dt, jacobian, l96s_tay_2_step!, compute_α_ρ
##############################################################################################
"""
mod_indx!(indx::Int64, dim::Int64)
Auxiliary function to return state vector indices for the Lorenz-96 model, where `indx` is
taken mod `dim`. Mod zero is replaced with `dim` for indexing in Julia state vectors.
"""
function mod_indx!(indx::Int64, dim::Int64)
indx = mod(indx, dim)
if indx==0
indx = dim
end
return indx
end
##############################################################################################
"""
dx_dt(x::VecA(T), t::Float64, dx_params::ParamDict(T)) where T <: Real
Time derivative for Lorenz-96 model, `x` is a model state of size `state_dim` and type
[`VecA`](@ref), `t` is a dummy time argument for consistency with integration methods,
`dx_params` is of type [`ParamDict`](@ref) which is called for the forcing parameter.
Returns time derivative of the state vector
```
return dx
```
"""
function dx_dt(x::VecA(T), t::Float64, dx_params::ParamDict(T)) where T <: Real
# unpack the (only) derivative parameter for l96
F = dx_params["F"][1]
x_dim = length(x)
dx = copy(x)
for j in 1:x_dim
# index j minus 2, modulo the system dimension
j_m_2 = mod_indx!(j - 2, x_dim)
# index j minus 1, modulo the system dimension
j_m_1 = mod_indx!(j - 1, x_dim)
# index j plus 1, modulo the system dimension
j_p_1 = mod_indx!(j + 1, x_dim)
dx[j] = (x[j_p_1] - x[j_m_2])*x[j_m_1] - x[j] + F
end
return dx
end
##############################################################################################
"""
jacobian(x::VecA(T), t::Float64, dx_params::ParamDict(T)) where T <: Real
Computes the Jacobian of Lorenz-96 about the state `x` of type [`VecA`](@ref). The time
variable `t` is a dummy variable for consistency with integration methods,
`dx_params` is of type [`ParamDict`](@ref) which is called for the forcing parameter.
Note that this is designed to load entries in a zeros array and return a sparse array to
make a compromise between memory and computational resources.
```
return sparse(dxF)
```
"""
function jacobian(x::VecA(T), t::Float64, dx_params::ParamDict(T)) where T <: Real
x_dim = length(x)
dxF = zeros(x_dim, x_dim)
# looping columns j of the jacobian, loading the standard matrix
for j in 1:x_dim
# index j minus 2, modulo the system dimension
j_m_2 = mod_indx!(j - 2, x_dim)
# index j minus 1, modulo the system dimension
j_m_1 = mod_indx!(j - 1, x_dim)
# index j plus 1, modulo the system dimension
j_p_1 = mod_indx!(j + 1, x_dim)
# index j plus 2, modulo the system dimension
j_p_2 = mod_indx!(j + 2, x_dim)
# load the jacobian entries in corresponding rows
dxF[j_p_2, j] = -x[j_p_1]
dxF[j_p_1, j] = x[j_p_2] - x[j_m_1]
dxF[j, j] = -1.0
dxF[j_m_1, j] = x[j_m_2]
end
return sparse(dxF)
end
##############################################################################################
"""
compute_α_ρ(p::Int64)
Computes auxiliary functions for the 2nd order Taylor-Stratonovich expansion. The constants
`α` and `ρ` need to be computed once, only as a function of the order of truncation of the
Fourier series, the argument `p`, for the integration method. These constants are then
supplied as arguments to `l96s_tay2_step!` in `kwargs`. See [`l96s_tay2_step!`](@ref) for
the interpretation and usage of these constants.
```
return α(p)::Float64, ρ(p)::Float64
```
"""
function compute_α_ρ(p::Int64)
function α(p::Int64)
(π^2.0) / 180.0 - 0.5 * π^(-2.0) * sum(1.0 ./ Vector{Float64}(1:p).^4.0)
end
function ρ(p::Int64)
1.0/12.0 - 0.5 * π^(-2.0) * sum(1.0 ./ Vector{Float64}(1:p).^2.0)
end
return α(p), ρ(p)
end
##############################################################################################
"""
l96s_tay2_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T <: Real
One step of integration rule for l96 second order taylor rule
The constants `ρ` and `α` are to be computed with `compute_α_ρ`, depending
only on `p`, and supplied for all steps. This is the general formulation which includes,
e.g., dependence on the truncation of terms in the auxilliary function `C` with
respect to the parameter `p`. In general, truncation at `p=1` is all that is
necessary for order 2.0 convergence.
This method is derived in
[Grudzien, C. et al. (2020).](https://gmd.copernicus.org/articles/13/1903/2020/gmd-13-1903-2020.html)
NOTE: this Julia version still pending validation as in the manuscript
```
return x
```
"""
function l96s_tay2_step!(x::VecA(T), t::Float64, kwargs::StepKwargs) where T <: Real
# Infer model and parameters
sys_dim = length(x)
dx_params = kwargs["dx_params"]::ParamDict(T)
h = kwargs["h"]::Float64
diffusion = kwargs["diffusion"]::Float64
p = kwargs["p"]::Int64
ρ = kwargs["ρ"]::Float64
α = kwargs["α"]::Float64
# Compute the deterministic dxdt and the jacobian equations
dx = dx_dt(x, 0.0, dx_params)
Jac_x = jacobian(x, 0.0, dx_params)
## random variables
# Vectors ξ, μ, ϕ are sys_dim X 1 vectors of iid standard normal variables,
# ζ and η are sys_dim X p matrices of iid standard normal variables.
# Functional relationships describe each variable W_j as the transformation of
# ξ_j to be of variace given by the length of the time step h. Functions of random
# Fourier coefficients a_i, b_i are given in terms μ / η and ϕ / ζ respectively.
# draw standard normal samples
rndm = rand(Normal(), sys_dim, 2*p + 3)
ξ = rndm[:, 1]
μ = rndm[:, 2]
ϕ = rndm[:, 3]
ζ = rndm[:, 4: p+3]
η = rndm[:, p+4: end]
### define the auxiliary functions of random fourier coefficients, a and b
# denominators for the a series
denoms = repeat((1.0 ./ Vector{Float64}(1:p)), 1, sys_dim)
# vector of sums defining a terms
a = -2.0 * sqrt(h * ρ) * μ - sqrt(2.0*h) * sum(ζ' .* denoms, dims=1)' / π
# denominators for the b series
denoms = repeat((1.0 ./ Vector{Float64}(1:p).^2.0), 1, sys_dim)
# vector of sums defining b terms
b = sqrt(h * α) * ϕ + sqrt(h / (2.0 * π^2.0) ) * sum(η' .* denoms, dims=1)'
# vector of first order Stratonovich integrals
J_pdelta = (h / 2.0) * (sqrt(h) * ξ + a)
### auxiliary functions for higher order stratonovich integrals ###
# C function is optional for higher precision but does not change order of convergence
function C(l, j)
if p == 1
return 0.0
end
c = zeros(p, p)
# define the coefficient as a sum of matrix entries where r and k do not agree
indx = Set(1:p)
for r in 1:p
# vals are all values not equal to r
vals = setdiff(indx, Set(r))
for k in vals
# and for column r, define all row entries below, with zeros on diagonal
c[k, r] = (r / (r^2 - k^2)) * ((1.0 / k) * ζ[l, r] * ζ[j, k] + (1.0 / r) *
η[l, r] * η[j, k])
end
end
# return the sum of all values scaled by -1/(2pi^2)
-0.5 * π^(-2.0) * sum(c)
end
function Ψ(l, j)
# Ψ - generic function of the indicies l and j, define Ψ plus and Ψ minus index-wise
h^2.0 * ξ[l] * ξ[j] / 3.0 + h * a[l] * a[j] / 2.0 +
h^(1.5) * (ξ[l] * a[j] + ξ[j] * a[l]) / 4.0 -
h^(1.5) * (ξ[l] * b[j] + ξ[j] * b[l]) / (2.0 * π) - h^2.0 * (C(l,j) + C(j,l))
end
# define the approximations of the second order Stratonovich integral
Ψ_plus = copy(x)
Ψ_minus = copy(x)
for i in 1:sys_dim
Ψ_plus[i] = Ψ(mod_indx!((i-1), sys_dim), mod_indx!((i+1), sys_dim))
Ψ_minus[i] = Ψ(mod_indx!((i-2), sys_dim), mod_indx!((i-1), sys_dim))
end
# the final vectorized step forward is given as
x .= collect(Iterators.flatten(
x + dx * h + h^2.0 * 0.5 * Jac_x * dx + # deterministic taylor step
diffusion * sqrt(h) * ξ + # stochastic euler step
diffusion * Jac_x * J_pdelta + # stochastic first order taylor step
diffusion^2.0 * (Ψ_plus - Ψ_minus) # stochastic second order taylor step
))
return x
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 11071 | ##############################################################################################
module ObsOperators
##############################################################################################
# imports and exports
using LinearAlgebra, SparseArrays
using ..DataAssimilationBenchmarks
export alternating_projector, alternating_obs_operator, alternating_obs_operator_jacobian
##############################################################################################
# Main methods
##############################################################################################
"""
alternating_projector(x::VecA(T), obs_dim::Int64) where T <: Real
alternating_projector(ens::ArView(T), obs_dim::Int64) where T <: Real
Utility method produces a projection of alternating vector or ensemble components via slicing.
```
return x
return ens
```
This operator takes a single model state `x` of type [`VecA`](@ref), a truth twin time series
or an ensemble of states of type [`ArView`](@ref), and maps this data to alternating
row components. If truth twin is 2D, then the first index corresponds to the state dimension
and the second index corresponds to the time dimension. The ensemble is assumed to be
2D where the first index corresponds to the state dimension and the second index
corresponds to the ensemble dimension.
The operator selects row components of the input to keep based on the `obs_dim`.
States correpsonding to even state dimension indices
are removed from the state vector until the observation dimension is appropriate.
If the observation dimension is less than half the state dimension, states corresponding
to odd state dimension idices are subsequently removed until the observation dimension
is appropriate.
"""
function alternating_projector(x::VecA(T), obs_dim::Int64) where T <: Real
sys_dim = length(x)
if obs_dim == sys_dim
elseif (obs_dim / sys_dim) > 0.5
# the observation dimension is greater than half the state dimension, so we
# remove only the trailing odd-index rows equal to the difference
# of the state and observation dimension
R = sys_dim - obs_dim
indx = 1:(sys_dim - 2 * R)
indx = [indx; sys_dim - 2 * R + 2: 2: sys_dim]
x = x[indx]
elseif (obs_dim / sys_dim) == 0.5
# the observation dimension is equal to half the state dimension so we remove exactly
# half the rows, corresponding to those with even-index
x = x[1:2:sys_dim, :]
else
# the observation dimension is less than half of the state dimension so that we
# remove all even rows and then all but the remaining, leading obs_dim rows
x = x[1:2:sys_dim]
x = x[1:obs_dim]
end
return x
end
function alternating_projector(ens::ArView(T), obs_dim::Int64) where T <: Real
sys_dim, N_ens = size(ens)
if obs_dim == sys_dim
elseif (obs_dim / sys_dim) > 0.5
# the observation dimension is greater than half the state dimension, so we
# remove only the trailing odd-index rows equal to the difference
# of the state and observation dimension
R = sys_dim - obs_dim
indx = 1:(sys_dim - 2 * R)
indx = [indx; sys_dim - 2 * R + 2: 2: sys_dim]
ens = ens[indx, :]
elseif (obs_dim / sys_dim) == 0.5
# the observation dimension is equal to half the state dimension so we remove exactly
# half the rows, corresponding to those with even-index
ens = ens[1:2:sys_dim, :]
else
# the observation dimension is less than half of the state dimension so that we
# remove all even rows and then all but the remaining, leading obs_dim rows
ens = ens[1:2:sys_dim, :]
ens = ens[1:obs_dim, :]
end
return ens
end
##############################################################################################
@doc raw"""
alternating_obs_operator(x::VecA(T), obs_dim::Int64, kwargs::StepKwargs) where T <: Real
alternating_obs_operator(ens::ArView(T), obs_dim::Int64,
kwargs::StepKwargs) where T <: Real
This produces observations of alternating state vector components for generating pseudo-data.
```
return obs
```
This operator takes a single model state `x` of type [`VecA`](@ref), a truth twin time series
or an ensemble of states of type [`ArView`](@ref), and maps this data to the observation
space via the method [`alternating_projector`](@ref) and (possibly) a nonlinear transform.
The truth twin in this version is assumed to be 2D, where the first index corresponds to
the state dimension and the second index corresponds to the time dimension. The ensemble
is assumed to be 2D where the first index corresponds to the state dimension and the
second index corresponds to the ensemble dimension.
The `γ` parameter (optional) in `kwargs` of type [`StepKwargs`](@ref) controls the
component-wise transformation of the remaining state vector components mapped to the
observation space. For `γ=1.0`, there is no transformation applied, and the observation
operator acts as a linear projection onto the remaining components of the state vector,
equivalent to not specifying `γ`. For `γ>1.0`, the nonlinear observation operator of
[Asch et al. 2016](https://epubs.siam.org/doi/book/10.1137/1.9781611974546),
pg. 181 is applied,
```math
\begin{align}
\mathcal{H}(\pmb{x}) = \frac{\pmb{x}}{2}\circ\left[\pmb{1} + \left(\frac{\vert\pmb{x}\vert}{10} \right)^{\gamma - 1}\right]
\end{align}
```
where ``\circ`` is the Schur product, and which limits to the identity for `γ=1.0`.
If `γ=0.0`, the quadratic observation operator of
[Hoteit et al. 2012](https://journals.ametsoc.org/view/journals/mwre/140/2/2011mwr3640.1.xml),
```math
\begin{align}
\mathcal{H}(\pmb{x}) =0.05 \pmb{x} \circ \pmb{x}
\end{align}
```
is applied to the remaining state components (note, this is not a continuous limit).
If `γ<0.0`, the exponential observation
operator of [Wu et al. 2014](https://npg.copernicus.org/articles/21/955/2014/)
```math
\begin{align}
\mathcal{H}(\pmb{x}) = \pmb{x} \circ \exp\{- \gamma \pmb{x} \}
\end{align}
```
is applied to the remaining state vector components, where the exponential
is applied componentwise (note, this is also not a continuous limit).
"""
function alternating_obs_operator(x::VecA(T), obs_dim::Int64,
kwargs::StepKwargs) where T <: Real
sys_dim = length(x)
if haskey(kwargs, "state_dim")
# performing parameter estimation, load the dynamic state dimension
state_dim = kwargs["state_dim"]::Int64
# observation operator for extended state, without observing extended state components
obs = copy(x[1:state_dim])
# proceed with alternating observations of the regular state vector
sys_dim = state_dim
else
obs = copy(x)
end
# project the state vector into the correct components
obs = alternating_projector(obs, obs_dim)
if haskey(kwargs, "γ")
γ = kwargs["γ"]::Float64
if γ > 1.0
obs .= (obs ./ 2.0) .* ( 1.0 .+ ( abs.(obs) ./ 10.0 ).^(γ - 1.0) )
elseif γ == 0.0
obs .= 0.05*obs.^2.0
elseif γ < 0.0
obs .= obs .* exp.(-γ * obs)
end
end
return obs
end
function alternating_obs_operator(ens::ArView(T), obs_dim::Int64,
kwargs::StepKwargs) where T <: Real
sys_dim, N_ens = size(ens)
if haskey(kwargs, "state_dim")
# performing parameter estimation, load the dynamic state dimension
state_dim = kwargs["state_dim"]::Int64
# observation operator for extended state, without observing extended state components
obs = copy(ens[1:state_dim, :])
# proceed with alternating observations of the regular state vector
sys_dim = state_dim
else
obs = copy(ens)
end
# project the state vector into the correct components
obs = alternating_projector(obs, obs_dim)
if haskey(kwargs, "γ")
γ = kwargs["γ"]::Float64
if γ > 1.0
for i in 1:N_ens
x = obs[:, i]
obs[:, i] .= (x / 2.0) .* ( 1.0 .+ ( abs.(x) / 10.0 ).^(γ - 1.0) )
end
elseif γ == 0.0
obs = 0.05*obs.^2.0
elseif γ < 0.0
for i in 1:N_ens
x = obs[:, i]
obs[:, i] .= x .* exp.(-γ * x)
end
end
end
return obs
end
##############################################################################################
"""
alternating_obs_operator_jacobian(x::VecA(T), obs_dim::Int64,
kwargs::StepKwargs) where T <: Real
Explicitly computes the jacobian of the alternating observation operator
given a single model state `x` of type [`VecA`](@ref) and desired dimension of observations
'obs_dim' for the jacobian. The `γ` parameter (optional) in `kwargs` of type
[`StepKwargs`](@ref) controls the component-wise transformation of the remaining state
vector components mapped to the observation space. For `γ=1.0`, there is no
transformation applied, and the observation operator acts as a linear projection onto
the remaining components of the state vector, equivalent to not specifying `γ`.
For `γ>1.0`, the nonlinear observation operator of
[Asch, et al. (2016).](https://epubs.siam.org/doi/book/10.1137/1.9781611974546),
pg. 181 is applied, which limits to the identity for `γ=1.0`. If `γ=0.0`, the quadratic
observation operator of [Hoteit, et al. (2012).](https://journals.ametsoc.org/view/journals/mwre/140/2/2011mwr3640.1.xml)
is applied to the remaining state components. If `γ<0.0`, the exponential observation
operator of [Wu, et al. (2014).](https://npg.copernicus.org/articles/21/955/2014/)
is applied to the remaining state vector components.
"""
function alternating_obs_operator_jacobian(x::VecA(T), obs_dim::Int64,
kwargs::StepKwargs) where T <: Real
sys_dim = length(x)
if haskey(kwargs, "state_dim")
# performing parameter estimation, load the dynamic state dimension
state_dim = kwargs["state_dim"]::Int64
# observation operator for extended state, without observing extended state components
jac = copy(x[1:state_dim])
# proceed with alternating observations of the regular state vector
sys_dim = state_dim
else
jac = copy(x)
end
# jacobian calculation
if haskey(kwargs, "γ")
γ = kwargs["γ"]::Float64
if γ > 1.0
jac .= (1.0 / 2.0) .* (((jac .*(γ - 1.0) / 10.0) .* (( abs.(jac) / 10.0 ).^(γ - 2.0))) .+ 1.0 .+ (( abs.(jac) / 10.0 ).^(γ - 1.0)))
elseif γ == 0.0
jac = 0.1.*jac
elseif γ < 0.0
jac .= exp.(-γ * jac) .* (1.0 .- (γ * jac))
end
end
# matrix formation and projection
jacobian_matrix = alternating_projector(diagm(jac), obs_dim)
return jacobian_matrix
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 2953 | ##############################################################################################
module TestClassicSmootherExps
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks
using DataAssimilationBenchmarks.SingleExperimentDriver
using DataAssimilationBenchmarks.SmootherExps
using JLD2, Statistics
##############################################################################################
# run and analyze the ETKS for state estimation with the Lorenz-96 model
function run_ensemble_smoother_state_L96()
try
classic_ensemble_state(classic_enks_exps["L96_ETKS_state_test"])
true
catch
false
end
end
function analyze_ensemble_smoother_state_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etks-classic/"
data = load(path * "etks-classic_L96_state_seed_0000_diff_0.000_sysD_40_obsD_40_" *
"obsU_1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_lag_010_shift_001_" *
"mda_false_nens_021_stateInfl_1.02.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(filt_rmse[501:end]) < 0.2 && mean(post_rmse[501:end]) < 0.15
true
else
false
end
catch
false
end
end
##############################################################################################
# run and analyze the ETKF for joint state-parameter estimation with the Lorenz-96 model
function run_ensemble_smoother_param_L96()
try
classic_ensemble_param(classic_enks_exps["L96_ETKS_param_test"])
true
catch
false
end
end
function analyze_ensemble_smoother_param_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etks-classic/"
data = load(path * "etks-classic_L96_param_seed_0000_diff_0.000_sysD_41_obsD_40_" *
"obsU_1.00_gamma_001.0_paramE_0.10_paramW_0.0010_nanl_03500_tanl_0.05_" *
"h_0.05_lag_010_shift_001_mda_false_nens_021_stateInfl_1.02_" *
"paramInfl_1.00.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
para_rmse = data["param_rmse"]
# note, we use a small burn-in to reach more regular cycles
if (mean(filt_rmse[501:end]) < 0.2) && (mean(post_rmse[501:end]) < 0.15) &&
(mean(para_rmse[501:end]) < 0.01)
true
else
false
end
catch
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 587 | ##############################################################################################
module TestDataAssimilationBenchmarks
##############################################################################################
using DataAssimilationBenchmarks
##############################################################################################
function splash()
try
DataAssimilationBenchmarks.Info()
true
catch
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 3970 | ##############################################################################################
module TestDeSolvers
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks
using DataAssimilationBenchmarks.DeSolvers, DataAssimilationBenchmarks.L96
using LsqFit
##############################################################################################
"""
exponentialODE(x::T, t::Float64, dx_params::ParamDict) where {T <: VecA}
Wrapper for making a vectorized output of the exponential function for the DE solvers. This
is used to verify the order of convergence for integration methods versus an analytical
solution.
"""
function exponentialODE(x::VecA(T), t::T, dx_params::ParamDict(T)) where T <: Float64
[exp(t)]
end
##############################################################################################
"""
expDiscretizationError(step_model!, h)
Auxiliary function to compute the difference of the numerically simulated integral versus
the analytical value. This is a function of the time step and integration method, for
varying the approximations to demonstrate the correct reduction in discretization errors.
"""
function expDiscretizationError(step_model!, h::Float64)
# continuous time length of the integration
tanl = 0.1
# discrete integration steps
fore_steps = convert(Int64, tanl/h)
time_steps = LinRange(0, tanl, fore_steps + 1)
# initial data for the exponential function
x = [1.0]
# set the kwargs for the integration scheme
# with empty values for the uneccessary parameters
diffusion = 0.0
dx_params = Dict{String, Array{Float64}}()
kwargs = Dict{String, Any}(
"h" => h,
"diffusion" => diffusion,
"dx_params" => dx_params,
"dx_dt" => exponentialODE,
)
for i in 1:fore_steps
step_model!(x, time_steps[i], kwargs)
end
# find the absolute difference of the apprximate integral from the built-in exponential
abs(x[1] - exp(tanl))
end
##############################################################################################
"""
calculateOrderConvergence(step_model!)
Auxiliary function to compute the least-squares estimated order of convergence for the
numerical integration schemes. This ranges over step sizes as a function of the integration
method, and calculates the log-10 / log-10 slope and intercept for change in error with
respect to step size.
"""
function calculateOrderConvergence(step_model!)
# set step sizes in increasing order for log-10 log-10 analysis
h_range = [0.005, 0.01, 0.05, 0.1]
error_range = Vector{Float64}(undef, length(h_range))
# loop the discretization and calculate the errors
for i in 1:length(h_range)
error_range[i] = expDiscretizationError(step_model!, h_range[i])
end
# convert the error and the step sizes to log-10
h_range_log10 = log10.(h_range)
error_range_log10 = log10.(error_range)
function model_lsq_squares(x,p)
# define a function object to vary parameters p
@. p[1] + p[2]*x
end
# fit the best-fit line and return coefficients
fit = curve_fit(model_lsq_squares, h_range_log10, error_range_log10, [1.0, 1.0])
coef(fit)
end
##############################################################################################
function testEMExponential()
coef = calculateOrderConvergence(em_step!)
if abs(coef[2] - 1.0) > 0.1
false
else
true
end
end
##############################################################################################
function testRKExponential()
coef = calculateOrderConvergence(rk4_step!)
if abs(coef[2] - 4.0) > 0.1
false
else
true
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 4703 | ##############################################################################################
module TestFilterExps
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks
using DataAssimilationBenchmarks.SingleExperimentDriver
using DataAssimilationBenchmarks.FilterExps
using JLD2, Statistics
##############################################################################################
# run and analyze the ETKF for state estimation with the Lorenz-96 model
function run_ensemble_filter_state_L96()
try
ensemble_filter_state(enkf_exps["L96_ETKF_state_test"])
true
catch
false
end
end
function analyze_ensemble_filter_state_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etkf/"
data = load(path * "etkf_L96_state_seed_0000_diff_0.000_sysD_40_obsD_40" *
"_obsU_1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_nens_021" *
"_stateInfl_1.02.jld2")
rmse = data["filt_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(rmse[501:end]) < 0.2
true
else
false
end
catch
false
end
end
##############################################################################################
# run and analyze the 3D-VAR filter for state estimation with the Lorenz-96 model
function run_D3_var_filter_state_L96()
try
D3_var_filter_state(d3_var_exps["L96_D3_var_state_test"])
true
catch
false
end
end
function analyze_D3_var_filter_state_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/D3-var-bkg-ID/"
data = load(path * "bkg-ID_L96_state_seed_0000_diff_0.000_sysD_40_obsD_40_obsU_"
* "1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_stateInfl_0.230.jld2")
nanl = data["nanl"]
rmse = data["filt_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(filter(!isnan,rmse[1000:nanl])) < 0.41
true
else
false
end
catch
false
end
end
##############################################################################################
# run and analyze the ETKF for joint state-parameter estimation with the Lorenz-96 model
function run_ensemble_filter_param_L96()
try
ensemble_filter_param(enkf_exps["L96_ETKF_param_test"])
true
catch
false
end
end
function analyze_ensemble_filter_param_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etkf/"
data = load(path * "etkf_L96_param_seed_0000_diff_0.000_sysD_41_stateD_40_obsD_40_" *
"obsU_1.00_gamma_001.0_paramE_0.10_paramW_0.0010_nanl_03500_tanl_0.05_" *
"h_0.05_nens_021_stateInfl_1.02_paramInfl_1.00.jld2")
filt_rmse = data["filt_rmse"]
para_rmse = data["param_rmse"]
# note, we use a small burn-in to reach more regular cycles
if (mean(filt_rmse[501:end]) < 0.2) && (mean(para_rmse[501:end]) < 0.01)
true
else
false
end
catch
false
end
end
##############################################################################################
# run and analyzed the ETKF for state estimateion with the IEEE39bus model
# static version for test cases
function run_ensemble_filter_state_IEEE39bus()
try
ensemble_filter_state(enkf_exps["IEEE39bus_ETKF_state_test"])
true
catch
false
end
end
function analyze_ensemble_filter_state_IEEE39bus()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etkf/"
data = load(path * "etkf_IEEE39bus_state_seed_0000_diff_0.000_sysD_20_obsD_20_" *
"obsU_0.10_gamma_001.0_nanl_03500_tanl_0.01_h_0.01_nens_021_" *
"stateInfl_1.02.jld2")
rmse = data["filt_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(rmse[501:end]) < 0.02
true
else
false
end
catch
false
end
end
##############################################################################################
# end module
end | DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 1805 | ##############################################################################################
module TestGenerateTimeSeries
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks
using DataAssimilationBenchmarks.SingleExperimentDriver
using DataAssimilationBenchmarks.GenerateTimeSeries
using JLD2, Random
##############################################################################################
# Test generation and loading of the L96 model time series in default localtion
function testGenL96()
try
L96_time_series(time_series_exps["L96_deterministic_test"])
true
catch
false
end
end
function testLoadL96()
try
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
load(path * "L96_time_series_seed_0000_dim_40_diff_0.000_F_08.0_tanl_0.05" *
"_nanl_05000_spin_1500_h_0.050.jld2")
true
catch
false
end
end
##############################################################################################
# Test generation and loading of the IEEE39 bus model time series in the default location
function testGenIEEE39bus()
try
IEEE39bus_time_series(time_series_exps["IEEE39bus_deterministic_test"])
true
catch
false
end
end
function testLoadIEEE39bus()
try
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
load(path * "IEEE39bus_time_series_seed_0000_diff_0.000_tanl_0.01" *
"_nanl_05000_spin_1500_h_0.010.jld2")
true
catch
false
end
end
#######################################################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 1290 | ##############################################################################################
module TestIEEE39bus
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks
using JLD2, Statistics
##############################################################################################
"""
test_synchrony()
This function tests to see if the swing equation model without noise reaches the synchronous
steady state for the system, by evaluating the standard deviation of the state components
after the spin up period.
"""
function test_synchrony()
try
# load the observations
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
obs = load(path * "IEEE39bus_time_series_seed_0000_diff_0.000_tanl_0.01" *
"_nanl_05000_spin_1500_h_0.010.jld2")
obs = obs["obs"][:, 3001:end]
# take the standard deviation of the model state after warm up
sd = std(obs, dims=2)
if sum(sd .< 0.01) == 20
true
else
false
end
catch
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 5092 | ##############################################################################################
module TestIterativeSmootherExps
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks
using DataAssimilationBenchmarks.SmootherExps
using DataAssimilationBenchmarks.SingleExperimentDriver
using JLD2, Statistics
##############################################################################################
# run and analyze the IEnKS for state estimation with the Lorenz-96 model
function run_sda_ensemble_smoother_state_L96()
try
iterative_ensemble_state(ienks_exps["L96_IEnKS_state_sda_test"])
true
catch
false
end
end
function analyze_sda_ensemble_smoother_state_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/ienks-transform/"
data = load(path * "ienks-transform_L96_state_seed_0000_diff_0.000_sysD_40_obsD_40_" *
"obsU_1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_lag_010_shift_001_" *
"mda_false_nens_021_stateInfl_1.02.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(filt_rmse[501:end]) < 0.2 && mean(post_rmse[501:end]) < 0.10
true
else
false
end
catch
false
end
end
function run_mda_ensemble_smoother_state_L96()
try
iterative_ensemble_state(ienks_exps["L96_IEnKS_state_mda_test"])
true
catch
false
end
end
function analyze_mda_ensemble_smoother_state_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/ienks-transform/"
data = load(path * "ienks-transform_L96_state_seed_0000_diff_0.000_sysD_40_obsD_40_" *
"obsU_1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_lag_010_shift_001_" *
"mda_true_nens_021_stateInfl_1.02.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(filt_rmse[501:end]) < 0.2 && mean(post_rmse[501:end]) < 0.10
true
else
false
end
catch
false
end
end
##############################################################################################
# run and analyze the IEnKS for joint state-parameter estimation with the Lorenz-96 model
function run_sda_ensemble_smoother_param_L96()
try
iterative_ensemble_param(ienks_exps["L96_IEnKS_param_sda_test"])
true
catch
false
end
end
function analyze_sda_ensemble_smoother_param_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/ienks-transform/"
data = load(path * "ienks-transform_L96_param_seed_0000_diff_0.000_sysD_41_obsD_40_" *
"obsU_1.00_gamma_001.0_paramE_0.10_paramW_0.0010_nanl_03500_tanl_0.05_" *
"h_0.05_lag_010_shift_001_mda_false_nens_021_stateInfl_1.02_" *
"paramInfl_1.00.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
para_rmse = data["param_rmse"]
# note, we use a small burn-in to reach more regular cycles
if (mean(filt_rmse[501:end]) < 0.2) && (mean(post_rmse[501:end]) < 0.10) &&
(mean(para_rmse[501:end]) < 0.01)
true
else
false
end
catch
false
end
end
function run_mda_ensemble_smoother_param_L96()
try
iterative_ensemble_param(ienks_exps["L96_IEnKS_param_mda_test"])
true
catch
false
end
end
function analyze_mda_ensemble_smoother_param_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/ienks-transform/"
data = load(path * "ienks-transform_L96_param_seed_0000_diff_0.000_sysD_41_obsD_40_" *
"obsU_1.00_gamma_001.0_paramE_0.10_paramW_0.0010_nanl_03500_tanl_0.05_" *
"h_0.05_lag_010_shift_001_mda_true_nens_021_stateInfl_1.02_" *
"paramInfl_1.00.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
para_rmse = data["param_rmse"]
# note, we use a small burn-in to reach more regular cycles
if (mean(filt_rmse[501:end]) < 0.2) && (mean(post_rmse[501:end]) < 0.10) &&
(mean(para_rmse[501:end]) < 0.01)
true
else
false
end
catch
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 3195 | ##############################################################################################
module TestL96
##############################################################################################
using DataAssimilationBenchmarks.DeSolvers, DataAssimilationBenchmarks.L96
using ForwardDiff
##############################################################################################
"""
Jacobian()
Tests the L96 jacobian function for known behavior with automatic differentiation.
Returns whether the difference of computed jacobians is within error tolerance for every entry
"""
function Jacobian()
# dummy time argument
t = 0.0
# forcing parameter
F = 8.0
dx_params = Dict{String, Array{Float64}}("F" => [F])
# wrapper function
function wrap_dx_dt(x)
L96.dx_dt(x, t, dx_params)
end
# model state
x = Vector{Float64}(1:40)
# compute difference between ForwardDiff and L96 calculated jacobians
diff = Matrix(ForwardDiff.jacobian(wrap_dx_dt, x) - Matrix(L96.jacobian(x, t, dx_params)))
# compare within error tolerance for every entry
if sum((abs.(diff)) .<= 0.01) == 40*40
true
else
false
end
end
##############################################################################################
"""
EMZerosStep()
Tests the L96 derivative function for known behavior with Euler(-Maruyama) method.
The initial condition of zeros returns h * F in all components
"""
function EMZerosStep()
# step size
h = 0.01
# forcing parameter
F = 8.0
dx_params = Dict{String, Array{Float64}}("F" => [F])
# initial conditions and arguments
x = zeros(40)
# parameters to test
kwargs = Dict{String, Any}(
"h" => h,
"diffusion" => 0.0,
"dx_params" => dx_params,
"dx_dt" => L96.dx_dt,
)
# em_step! writes over x in place
em_step!(x, 0.0, kwargs)
# evaluate test pass/fail if the vector of x is equal to (f*h) in every instance
if sum(x .== (F*h)) == 40
true
else
false
end
end
##############################################################################################
"""
EMFStep()
Tests the L96 derivative function for known behavior with Euler(-Maruyama) method.
The vector with all components equal to the forcing parameter F is a fixed point for the
system and the time derivative should be zero with this initiial condition.
"""
function EMFStep()
# step size
h = 0.01
# forcing parameter
F = 8.0
dx_params = Dict{String, Array{Float64}}("F" => [F])
# initial conditions and arguments
x = ones(40)
x = x * F
# parameters to test
kwargs = Dict{String, Any}(
"h" => h,
"diffusion" => 0.0,
"dx_params" => dx_params,
"dx_dt" => L96.dx_dt,
)
# em_step! writes over x in place
em_step!(x, 0.0, kwargs)
# evaluate test pass/fail if the vector of x is equal to (f*h) in every instance
if sum(x .== (F)) == 40
true
else
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 3908 | ##############################################################################################
module TestObsOperators
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks.ObsOperators
using ForwardDiff
##############################################################################################
"""
alternating_obs_jacobian_pos()
Tests the alternating observation operator jacobian function for known behavior with automatic
differentiation using 'γ' > 1.0.
Returns whether the difference of computed jacobian is within error tolerance for every entry
"""
function alternating_obs_jacobian_pos()
# 1-D ensemble argument
x = Vector{Float64}(1:40)
# observation dimension
obs_dim = 20
# test gammas
gam_pos = Dict{String, Any}("γ" => 2.0)
# wrapper function for γ > 1.0
function wrap_pos(x)
ObsOperators.alternating_obs_operator(x, obs_dim, gam_pos)
end
# jacobian computed via automatic differentiation
jacob_auto = ForwardDiff.jacobian(wrap_pos, x)
# compute differences between ForwardDiff and ObsOperators calculated jacobians
diff = jacob_auto - ObsOperators.alternating_obs_operator_jacobian(x, obs_dim, gam_pos)
# compare within error tolerance for every entry across all differences
if sum((abs.(diff)) .<= 0.001) == 20*40
true
else
false
end
end
##############################################################################################
"""
alternating_obs_jacobian_zero()
Tests the alternating observation operator jacobian function for known behavior with automatic
differentiation using 'γ' == 0.0.
Returns whether the difference of computed jacobian is within error tolerance for every entry
"""
function alternating_obs_jacobian_zero()
# 1-D ensemble argument
x = Vector{Float64}(1:40)
# observation dimension
obs_dim = 20
# test gamma (γ == 0.0)
gam_zero = Dict{String, Any}("γ" => 0.0)
# wrapper function for γ == 0.0
function wrap_zero(x)
ObsOperators.alternating_obs_operator(x, obs_dim, gam_zero)
end
# jacobian computed via automatic differentiation
jacob_auto = ForwardDiff.jacobian(wrap_zero, x)
# compute difference between ForwardDiff and ObsOperators calculated jacobian
diff = jacob_auto - ObsOperators.alternating_obs_operator_jacobian(x, obs_dim, gam_zero)
# compare within error tolerance for every entry of difference matrix
if sum((abs.(diff)) .<= 0.01) == 20*40
true
else
false
end
end
##############################################################################################
"""
alternating_obs_jacobian_neg()
Tests the alternating observation operator jacobian function for known behavior with automatic
differentiation using 'γ' < 0.0.
Returns whether the difference of computed jacobian is within error tolerance for every entry
"""
function alternating_obs_jacobian_neg()
# 1-D ensemble argument
x = Vector{Float64}(1:40)
# observation dimension
obs_dim = 20
# test gamma (γ < 0.0)
gam_neg = Dict{String, Any}("γ" => -0.5)
# wrapper function for γ < 0.0
function wrap_neg(x)
ObsOperators.alternating_obs_operator(x, obs_dim, gam_neg)
end
# jacobian computed via automatic differentiation
jacob_auto = ForwardDiff.jacobian(wrap_neg, x)
# compute difference between ForwardDiff and ObsOperators calculated jacobian
diff = jacob_auto - ObsOperators.alternating_obs_operator_jacobian(x, obs_dim, gam_neg)
# compare within error tolerance for every entry of difference matrix
if sum((abs.(diff)) .<= 0.001) == 20*40
true
else
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 3072 | ##############################################################################################
module TestParallelExperimentDriver
##############################################################################################
using DataAssimilationBenchmarks
using DataAssimilationBenchmarks.ParallelExperimentDriver
##############################################################################################
"""
This is to test if the named tuple constructor will generate the experiment configuration
"""
function test_ensemble_filter_adaptive_inflation()
try
args, wrap_exp = ensemble_filter_adaptive_inflation()
wrap_exp(args[1])
true
catch
false
end
end
##############################################################################################
"""
This is to test if the named tuple constructor will generate the experiment configuration
"""
function test_D3_var_tuned_inflation()
try
args, wrap_exp = D3_var_tuned_inflation()
wrap_exp(args[1])
true
catch
false
end
end
##############################################################################################
"""
This is to test if the named tuple constructor will generate the experiment configuration
"""
function test_ensemble_filter_param()
try
args, wrap_exp = ensemble_filter_param()
wrap_exp(args[1])
true
catch
false
end
end
##############################################################################################
"""
This is to test if the named tuple constructor will generate the experiment configuration
"""
function test_classic_ensemble_state()
try
args, wrap_exp = classic_ensemble_state()
wrap_exp(args[1])
true
catch
false
end
end
##############################################################################################
"""
This is to test if the named tuple constructor will generate the experiment configuration
"""
function test_classic_ensemble_param()
try
args, wrap_exp = classic_ensemble_param()
wrap_exp(args[1])
true
catch
false
end
end
##############################################################################################
"""
This is to test if the named tuple constructor will generate the experiment configuration
"""
function test_single_iteration_ensemble_state()
try
args, wrap_exp = single_iteration_ensemble_state()
wrap_exp(args[1])
true
catch
false
end
end
##############################################################################################
"""
This is to test if the named tuple constructor will generate the experiment configuration
"""
function test_iterative_ensemble_state()
try
args, wrap_exp = iterative_ensemble_state()
wrap_exp(args[1])
true
catch
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 5174 | ##############################################################################################
module TestSingleIterationSmootherExps
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks
using DataAssimilationBenchmarks.SmootherExps
using DataAssimilationBenchmarks.SingleExperimentDriver
using JLD2, Statistics
##############################################################################################
# run and analyze the IEnKS for state estimation with the Lorenz-96 model
function run_sda_ensemble_smoother_state_L96()
try
single_iteration_ensemble_state(sienks_exps["L96_ETKS_state_sda_test"])
true
catch
false
end
end
function analyze_sda_ensemble_smoother_state_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etks-single-iteration/"
data = load(path * "etks-single-iteration_L96_state_seed_0000_diff_0.000_sysD_40_" *
"obsD_40_obsU_1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_lag_010_" *
"shift_001_mda_false_nens_021_stateInfl_1.02.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(filt_rmse[501:end]) < 0.2 && mean(post_rmse[501:end]) < 0.10
true
else
false
end
catch
false
end
end
function run_mda_ensemble_smoother_state_L96()
try
single_iteration_ensemble_state(sienks_exps["L96_ETKS_state_mda_test"])
true
catch
false
end
end
function analyze_mda_ensemble_smoother_state_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etks-single-iteration/"
data = load(path * "etks-single-iteration_L96_state_seed_0000_diff_0.000_sysD_40_" *
"obsD_40_obsU_1.00_gamma_001.0_nanl_03500_tanl_0.05_h_0.05_lag_010_" *
"shift_001_mda_true_nens_021_stateInfl_1.02.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
# note, we use a small burn-in to reach more regular cycles
if mean(filt_rmse[501:end]) < 0.2 && mean(post_rmse[501:end]) < 0.10
true
else
false
end
catch
false
end
end
##############################################################################################
# run and analyze the IEnKS for joint state-parameter estimation with the Lorenz-96 model
function run_sda_ensemble_smoother_param_L96()
try
single_iteration_ensemble_param(sienks_exps["L96_ETKS_param_sda_test"])
true
catch
false
end
end
function analyze_sda_ensemble_smoother_param_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etks-single-iteration/"
data = load(path * "etks-single-iteration_L96_param_seed_0000_diff_0.000_sysD_41" *
"_obsD_40_obsU_1.00_gamma_001.0_paramE_0.10_paramW_0.0010_nanl_03500" *
"_tanl_0.05_h_0.05_lag_010_shift_001_mda_false_nens_021_" *
"stateInfl_1.02_paramInfl_1.00.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
para_rmse = data["param_rmse"]
# note, we use a small burn-in to reach more regular cycles
if (mean(filt_rmse[501:end]) < 0.2) && (mean(post_rmse[501:end]) < 0.10) &&
(mean(para_rmse[501:end]) < 0.01)
true
else
false
end
catch
false
end
end
function run_mda_ensemble_smoother_param_L96()
try
single_iteration_ensemble_param(sienks_exps["L96_ETKS_param_mda_test"])
true
catch
false
end
end
function analyze_mda_ensemble_smoother_param_L96()
try
# test if the filter RMSE for standard simulation falls below adequate threshold
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/etks-single-iteration/"
data = load(path * "etks-single-iteration_L96_param_seed_0000_diff_0.000_sysD_41_" *
"obsD_40_obsU_1.00_gamma_001.0_paramE_0.10_paramW_0.0010_nanl_03500_" *
"tanl_0.05_h_0.05_lag_010_shift_001_mda_true_nens_021_stateInfl_1.02_" *
"paramInfl_1.00.jld2")
filt_rmse = data["filt_rmse"]
post_rmse = data["post_rmse"]
para_rmse = data["param_rmse"]
# note, we use a small burn-in to reach more regular cycles
if (mean(filt_rmse[501:end]) < 0.2) && (mean(post_rmse[501:end]) < 0.10) &&
(mean(para_rmse[501:end]) < 0.01)
true
else
false
end
catch
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 2940 | ##############################################################################################
module TestVarAD
##############################################################################################
# imports and exports
using DataAssimilationBenchmarks.XdVAR, DataAssimilationBenchmarks.ObsOperators
using ForwardDiff, LinearAlgebra, Random, Distributions
##############################################################################################
"""
testCost()
Tests the 3dVAR cost function for known behavior.
"""
function testCost()
# initialization
x = ones(40) * 0.5
obs = zeros(40)
x_bkg = ones(40)
state_cov = 1.0I
obs_cov = 1.0I
params = Dict{String, Any}()
H_obs = alternating_obs_operator
cost = D3_var_cost(x, obs, x_bkg, state_cov, H_obs, obs_cov, params)
if abs(cost - 10) < 0.001
true
else
false
end
end
##############################################################################################
"""
testGrad()
Tests the gradient 3dVAR cost function for known behavior using ForwardDiff.
"""
function testGrad()
# initialization
obs = zeros(40)
x_bkg = ones(40)
state_cov = 1.0I
obs_cov = 1.0I
params = Dict{String, Any}()
H_obs = alternating_obs_operator
# wrapper function
function wrap_cost(x)
D3_var_cost(x, obs, x_bkg, state_cov, H_obs, obs_cov, params)
end
# input
x = ones(40) * 0.5
grad = ForwardDiff.gradient(wrap_cost, x)
if norm(grad) < 0.001
true
else
false
end
end
##############################################################################################
"""
testNewton()
Tests the Newton optimization of the 3dVAR cost function.
"""
function testNewton()
# initialization
obs = zeros(40)
x_bkg = ones(40)
state_cov = 1.0I
obs_cov = 1.0I
params = Dict{String, Any}()
H_obs = alternating_obs_operator
# perform Simple Newton optimization
op = D3_var_NewtonOp(x_bkg, obs, state_cov, H_obs, obs_cov, params)
if abs(sum(op - ones(40) * 0.5)) < 0.001
true
else
false
end
end
##############################################################################################
"""
testNewtonNoise()
Tests the Newton optimization of the 3dVAR cost function with noise.
"""
function testNewtonNoise()
# initialization
Random.seed!(123)
obs = rand(Normal(0, 1), 40)
x_bkg = zeros(40)
state_cov = 1.0I
obs_cov = 1.0I
params = Dict{String, Any}()
H_obs = alternating_obs_operator
# perform Simple Newton optimization
op = D3_var_NewtonOp(x_bkg, obs, state_cov, H_obs, obs_cov, params)
if abs(sum(op - obs * 0.5)) < 0.001
true
else
false
end
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | code | 5376 | ##############################################################################################
module runtests
##############################################################################################
# imports and exports
using Test
using JLD2
##############################################################################################
# include test sub-modules
include("TestDataAssimilationBenchmarks.jl")
include("TestObsOperators.jl")
include("TestVarAD.jl")
include("TestDeSolvers.jl")
include("TestL96.jl")
include("TestGenerateTimeSeries.jl")
include("TestIEEE39bus.jl")
include("TestFilterExps.jl")
include("TestClassicSmootherExps.jl")
include("TestIterativeSmootherExps.jl")
include("TestSingleIterationSmootherExps.jl")
include("TestParallelExperimentDriver.jl")
##############################################################################################
# Run tests
@testset "ParentModule" begin
@test TestDataAssimilationBenchmarks.splash()
end
# Test Observation Operators jacobian
@testset "Observation Operators" begin
@test TestObsOperators.alternating_obs_jacobian_pos()
@test TestObsOperators.alternating_obs_jacobian_zero()
@test TestObsOperators.alternating_obs_jacobian_neg()
end
# Calculate the order of convergence for standard integrators
@testset "Calculate Order Convergence" begin
@test TestDeSolvers.testEMExponential()
@test TestDeSolvers.testRKExponential()
end
# Test L96 model equations for known behavior
@testset "Lorenz-96" begin
@test TestL96.Jacobian()
@test TestL96.EMZerosStep()
@test TestL96.EMFStep()
end
# Test time series generation, saving output to default directory and loading
@testset "Generate Time Series" begin
@test TestGenerateTimeSeries.testGenL96()
@test TestGenerateTimeSeries.testLoadL96()
@test TestGenerateTimeSeries.testGenIEEE39bus()
@test TestGenerateTimeSeries.testLoadIEEE39bus()
end
# Test the model equations for known behavior
@testset "IEEE 39 Bus" begin
@test TestIEEE39bus.test_synchrony()
end
# Test 3D-VAR
@testset "VAR-AutoDiff" begin
@test TestVarAD.testCost()
@test TestVarAD.testGrad()
@test TestVarAD.testNewton()
@test TestVarAD.testNewtonNoise()
end
# Test filter state and parameter experiments
@testset "Filter Experiments" begin
@test TestFilterExps.run_ensemble_filter_state_L96()
@test TestFilterExps.analyze_ensemble_filter_state_L96()
@test TestFilterExps.run_D3_var_filter_state_L96()
@test TestFilterExps.analyze_D3_var_filter_state_L96()
@test TestFilterExps.run_ensemble_filter_param_L96()
@test TestFilterExps.analyze_ensemble_filter_param_L96()
@test TestFilterExps.run_ensemble_filter_state_IEEE39bus()
@test TestFilterExps.analyze_ensemble_filter_state_IEEE39bus()
end
# Test classic smoother state and parameter experiments
@testset "Classic Smoother Experiments" begin
@test TestClassicSmootherExps.run_ensemble_smoother_state_L96()
@test TestClassicSmootherExps.analyze_ensemble_smoother_state_L96()
@test TestClassicSmootherExps.run_ensemble_smoother_param_L96()
@test TestClassicSmootherExps.analyze_ensemble_smoother_param_L96()
end
# Test IEnKS smoother state and parameter experiments
@testset "Iterative Smoother Experiments" begin
@test TestIterativeSmootherExps.run_sda_ensemble_smoother_state_L96()
@test TestIterativeSmootherExps.analyze_sda_ensemble_smoother_state_L96()
@test TestIterativeSmootherExps.run_sda_ensemble_smoother_param_L96()
@test TestIterativeSmootherExps.analyze_sda_ensemble_smoother_param_L96()
@test TestIterativeSmootherExps.run_sda_ensemble_smoother_state_L96()
@test TestIterativeSmootherExps.analyze_sda_ensemble_smoother_state_L96()
@test TestIterativeSmootherExps.run_sda_ensemble_smoother_param_L96()
@test TestIterativeSmootherExps.analyze_sda_ensemble_smoother_param_L96()
end
# Test SIEnKS smoother state and parameter experiments
@testset "Single Iteration Smoother Experiments" begin
@test TestSingleIterationSmootherExps.run_sda_ensemble_smoother_state_L96()
@test TestSingleIterationSmootherExps.analyze_sda_ensemble_smoother_state_L96()
@test TestSingleIterationSmootherExps.run_sda_ensemble_smoother_param_L96()
@test TestSingleIterationSmootherExps.analyze_sda_ensemble_smoother_param_L96()
@test TestSingleIterationSmootherExps.run_mda_ensemble_smoother_state_L96()
@test TestSingleIterationSmootherExps.analyze_mda_ensemble_smoother_state_L96()
@test TestSingleIterationSmootherExps.run_mda_ensemble_smoother_param_L96()
@test TestSingleIterationSmootherExps.analyze_mda_ensemble_smoother_param_L96()
end
# Test parallel experiment constructors
@testset "Parallel experiment constructors" begin
@test TestParallelExperimentDriver.test_ensemble_filter_adaptive_inflation()
@test TestParallelExperimentDriver.test_D3_var_tuned_inflation()
@test TestParallelExperimentDriver.test_ensemble_filter_param()
@test TestParallelExperimentDriver.test_classic_ensemble_state()
@test TestParallelExperimentDriver.test_classic_ensemble_param()
@test TestParallelExperimentDriver.test_single_iteration_ensemble_state()
@test TestParallelExperimentDriver.test_iterative_ensemble_state()
end
##############################################################################################
# end module
end
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 5471 | # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the project maintainer, Colin Grudzien, [email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 3237 | # DataAssimilationBenchmarks.jl

<table>
<tr>
<td>
<a href="https://cgrudz.github.io/DataAssimilationBenchmarks.jl/dev">
<img src="https://img.shields.io/badge/docs-dev-purple.svg" alt="docs-dev-img">
</a>
</td>
<td>
<a href="https://cgrudz.github.io/DataAssimilationBenchmarks.jl/stable">
<img src="https://img.shields.io/badge/docs-stable-blue.svg" alt="docs-stable-img">
</a>
</td>
<td>
<a href="https://joss.theoj.org/papers/478dcc0b1608d2a4d8c930edebb58736">
<img src="https://joss.theoj.org/papers/478dcc0b1608d2a4d8c930edebb58736/status.svg" alt="status">
</a>
</td>
<td>
<a href="https://github.com/cgrudz/DataAssimilationBenchmarks.jl">
<img src="https://tokei.rs/b1/github/cgrudz/DataAssimilationBenchmarks.jl?category=code" alt="Total lines of code without comments">
</a>
</td>
<td>
<a href="https://app.travis-ci.com/cgrudz/DataAssimilationBenchmarks.jl">
<img src="https://app.travis-ci.com/cgrudz/DataAssimilationBenchmarks.jl.svg?branch=master" alt="Build Status">
</a>
</td>
<td>
<a href="https://codecov.io/gh/cgrudz/DataAssimilationBenchmarks.jl">
<img src="https://codecov.io/gh/cgrudz/DataAssimilationBenchmarks.jl/branch/master/graph/badge.svg?token=3XLYTH8YSZ" alt="codecov">
</a>
</td>
</tr>
</table>
Lines of code counter (without comments or blank lines) courtesy of [Tokei](https://github.com/XAMPPRocky/tokei).
## Welcome to DataAssimilationBenchmarks.jl!
### Description
This is a data assimilation research code base with an emphasis on prototyping, testing and
validating sequential filters and smoothers in toy model twin experiments.
This code is meant to be performant in the sense that large hyper-parameter discretizations
can be explored to determine hyper-parameter sensitivity and reliability of results across
different experimental regimes, with parallel implementations in native Julia distributed
computing.
This package currently includes code for developing and testing data assimilation schemes in
the [L96-s model](https://gmd.copernicus.org/articles/13/1903/2020/) and the IEEE 39 bus test
case in the form of the [effective network
model](https://iopscience.iop.org/article/10.1088/1367-2630/17/1/015012)
model equations. New toy models and data assimilation schemes are in continuous development
in the development branch. Currently validated techniques are available in the master
branch.
This package supported the development of all numerical results and benchmark simulations
in the manuscript
[A fast, single-iteration ensemble Kalman smoother for sequential data
assimilation](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
### Documentation
Please see the [up-to-date documentation](https://cgrudz.github.io/DataAssimilationBenchmarks.jl/dev/)
synchronized with the [master branch](https://github.com/cgrudz/DataAssimilationBenchmarks.jl)
or the [stable documentation](https://cgrudz.github.io/DataAssimilationBenchmarks.jl/stable/)
for the last tagged version in the [Julia General Registries](https://github.com/JuliaRegistries/General).
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 6914 | ---
title: 'DataAssimilationBenchmarks.jl: a data assimilation research framework.'
tags:
- Julia
- Data Assimilation
- Bayesian Inference
- Optimization
- Machine learning
authors:
- name: Colin Grudzien
orcid: 0000-0002-3084-3178
affiliation: "1,2"
- name: Charlotte Merchant
affiliation: "1,3"
- name: Sukhreen Sandhu
affiliation: "4"
affiliations:
- name: CW3E - Scripps Institution of Oceanography, University of California, San Diego, USA
index: 1
- name: Department of Mathematics and Statistics, University of Nevada, Reno, USA
index: 2
- name: Department of Computer Science, Princeton University, USA
index: 3
- name: Department of Computer Science and Engineering, University of Nevada, Reno, USA
index: 4
date: 10 September 2022
bibliography: paper.bib
---
# Summary
Data assimilation (DA) refers to techniques used to combine the data from physics-based,
numerical models and real-world observations to produce an estimate for the state of a
time-evolving random process and the parameters that govern its evolution [@asch2016data].
Owing to their history in numerical weather prediction, full-scale DA systems are designed
to operate in an extremely large dimension of model variables and observations, often with
sequential-in-time observational data [@carrassi2018data]. As a long-studied "big-data"
problem, DA has benefited from the fusion of a variety of techniques, including methods
from Bayesian inference, dynamical systems, numerical analysis, optimization, control
theory, and machine learning. DA techniques are widely used in many
areas of geosciences, neurosciences, biology, autonomous vehicle guidance, and various
engineering applications requiring dynamic state estimation and control.
The purpose of this package is to provide a research framework for the theoretical
development and empirical validation of novel data assimilation techniques.
While analytical proofs can be derived for classical methods, such as the Kalman filter
in linear-Gaussian dynamics [@jazwinski2007stochastic], most currently developed DA
techniques are designed for estimation in nonlinear, non-Gaussian models where no
analytical solution may exist. DA methods,
therefore, must be studied with rigorous numerical simulation in standard test-cases
to demonstrate the effectiveness and computational performance of novel algorithms.
Pursuant to proposing a novel DA method, one should likewise compare the performance
of a proposed scheme with other standard methods within the same class of estimators.
This package implements a variety of standard data assimilation algorithms,
including some of the widely used performance modifications that are used in
practice to tune these estimators. This software framework was written originally
to support the development and intercomparison of methods studied in @grudzien2022fast.
Details of the studied ensemble DA schemes, including pseudo-code detailing
their implementation and DA experiment benchmark configurations, can be found in
the above principal reference. Additional details on numerical integration schemes
utilized in this package can be found in the secondary reference [@grudzien2020numerical].
# Statement of need
Standard libraries exist for full-scale DA system research and development, e.g.,
the Data Assimilation Research Testbed (DART) [@anderson2009data], but
there are fewer standard options for theoretical research and algorithm development in
simple test systems. Many basic research frameworks, furthermore, do not include
standard operational techniques developed from classical variational methods,
due to the difficulty in constructing tangent linear and adjoint codes [@kalnay20074denkf].
DataAssimilationBenchmarks.jl provides one framework for studying sequential filters
and smoothers that are commonly used in online, geoscientific prediction settings,
including both ensemble methods and variational schemes, with hybrid methods planned for
future development.
## Comparison with similar projects
Similar projects to DataAssimilationBenchmarks.jl include the DAPPER Python library
[@dapper], DataAssim.jl used by @vetra2018state, and
EnsembleKalmanProcesses.jl of the Climate Modeling Alliance [@enkprocesses].
These alternatives are differentiated primarily in that:
* DAPPER is a Python-based library which is well-established, and includes many of the same
estimators and models. However, numerical simulations in Python run notably slower than
simulations in Julia when numerical routines cannot be vectorized in Numpy
[@bezanson2017julia]. Particularly, this can make the wide hyper-parameter search
intended above computationally challenging without utilizing additional packages such
as Numba [@lam2015numba] for code acceleration.
* DataAssim.jl is another established Julia library, but notably lacks an implementation
of variational and ensemble-variational techniques.
* EnsembleKalmanProcesses.jl is another established Julia library, but specifically lacks
traditional geoscientific DA approaches such as 3D-VAR and the ETKF/S.
## Future development
The future development of the DataAssimilationBenchmarks.jl package is intended to expand
upon the existing, variational and ensemble-variational filters and sequential smoothers for
robust intercomparison of novel schemes. Additional process models and observation models
for the DA system are also in development.
# Acknowledgements
Colin Grudzien developed the numerical code for the package's Julia type optimizations for
numerical schemes and automatic differentiation of code, the
ensemble-based estimation schemes, the observation models, the Lorenz-96 model, the IEEE 39
Bus test case model, and the numerical integration schemes for ordinary and stochastic
differential equations. Charlotte Merchant developed the numerical code for implementing
variational data assimilation in the Lorenz-96 model and related experiments. Sukhreen
Sandhu supported the development of the package structure and organization.
All authors supported the development of the package by implementing test cases and writing
software documentation.
This work was supported by the University of Nevada, Reno, Office of Undergraduate Research's
Pack Research Experience Program which supported Sukhreen Sandhu as a research assistant.
This work was supported by the Center for Western Weather and Water Extremes internship
program which supported Charlotte Merchant as a research assistant.
This work benefited from the DAPPER library which was referenced at times for the development
of DA schemes. The authors would like to thank the two handling editors Bita Hasheminezhad
and Patrick Diehl, and the two named referees Lukas Riedel and Tangi Migot, for their comments,
suggestions, and valuable advice which strongly improved the quality of the paper and
the software.
# References
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 834 | ---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1338 | ---
name: Feature requests and new use-cases
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Please introduce yourself and your use-case for the software**
Are you developing a new method that you want to test versus other similar learning / estimation schemes in standard benchmarks or do you want to examine the behavior of an existing method within the main code base in a novel context? Do you want to implement a new state or observation model to benchmark methods in the code base? Please note that there is no support for operational data assimilation or data assimilation with real data, please see alternatives that exist for that scope.
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen. Provide relevant references for models, methods, benchmarks, etc.
**Describe a plan forward for your use-case and any alternatives you've considered**
A clear and concise description of how you see this new functionality being implemented and any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1069 | # Description
This is a data assimilation research code base with an emphasis on prototyping, testing, and
validating sequential filters and smoothers in toy model twin experiments.
This code is meant to be performant in the sense that large hyper-parameter discretizations
can be explored to determine hyper-parameter sensitivity and reliability of results across
different experimental regimes, with parallel implementations in native Julia distributed
computing.
This package currently includes code for developing and testing data assimilation schemes in
the [L96-s model](https://gmd.copernicus.org/articles/13/1903/2020/) and the IEEE 39 bus test
case in the form of the [effective network
model](https://iopscience.iop.org/article/10.1088/1367-2630/17/1/015012)
model equations. New toy models and data assimilation schemes are in continuous development.
This package supported the development of all numerical results and benchmark simulations
in the manuscript
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 4803 | # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the project maintainer, Colin Grudzien, [email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the
[Contributor Covenant](https://www.contributor-covenant.org/version/2/1/code_of_conduct.html)
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 4172 | # Contributing
## What to expect
This is currently a small project maintained by a small community of researchers and
developers, focused on academic research in data assimilation and machine learning
methodology. This software is not intended for use in an operational data assimilation
environment, or even for use with real data, as there are a variety of existing alternatives
for that scope.
We will gladly work with others who want to join our community by using this software for
their own research, whether that means simply using an existing functionality or contributing
new source code to the main code base. However, please note that support for community
development is primarily a volunteer effort, and it may take some time to get a response.
Anyone participating in this community is expected to follow the
[Contributor Covenant Code of Conduct](@ref).
## How to get support or ask a question
The preferred means to get support is to use the
[Github discussions](https://github.com/cgrudz/DataAssimilationBenchmarks.jl/discussions).
to ask a question or to make an introduction as a user of this software.
If you cannot find what you are looking for in the
[main documentation](https://cgrudz.github.io/DataAssimilationBenchmarks.jl/dev/),
please feel free to post a question in the
[Github discussions](https://github.com/cgrudz/DataAssimilationBenchmarks.jl/discussions).
and this may be migrated to the main documentation as frequently asked questions develop.
## Bug reports with existing code
Bug reports go in the
[Github issues](https://github.com/cgrudz/DataAssimilationBenchmarks.jl/issues)
for the project. Please follow the template and provide any relevant details of the
bug you have encountered that will allow the community to reproduce and solve the issue.
Reproducibility is key, and if there are not sufficient details to reproduce an issue,
the issue will be sent back for more details.
## How to contribute new methods, models or other core code
The best way to contribute new code is to reach out to the community first as this code
base is in an early and active state of development and will occasionally face breaking
changes in order to accommodate more generality and new features. Please start with an
introduction of yourself in the
[Github discussions](https://github.com/cgrudz/DataAssimilationBenchmarks.jl/discussions)
followed by a detailed feature request in the
[Github issues](https://github.com/cgrudz/DataAssimilationBenchmarks.jl/issues),
covering your use-case and what new functionality you are proposing. This will help
the community anticipate your needs and the backend changes that might need to be implemented
in order to accommodate new functionality. There is not currently a general system for
implementing new data assimilation methods or models, and it is therefore
critical to bring up your use-case to the community so that how this new feature is
incorporated can be planned into the development. Once the issue can be evaluated and
discussed by the development community, the strategy is usually to create a fork of the
main code base where new modules and features can be prototyped. Once the new code
development is ready for a review, a pull request can be made where the new functionality
may be merged, and possibly further refactored for general consistency and consolidation
of codes.
Ideally, any new data assimilation method incorporated into this code should come with
a hyper-parameter configuration built into the [SingleExperimentDriver](@ref) module,
a selected benchmark model in which the learning scheme is to be utilized and a
corresponding test case that demonstrates and verifies an expected behavior. As much
as possible, conventions with arguments should try to match
existing conventions in, e.g., [EnsembleKalmanSchemes](@ref) and [XdVAR](@ref), though
it is understood that not all data assimilation methods need follow these conventions
or even have analogous arguments and sub-routines. Please discuss your approach with
the community in advanced so that the framework can be made as consistent (and
therefore extendable and user-friendly) as possible.
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 524 | # Global Types
The following type and type constructors are declared for optimizing numerical routines,
for using multiple dispatch of functions with different specified input forms and for
passing arguments between inner / outer loop steps of the DA twin experiment. Type
constructors are designed to be flexible enough to handle multiple dispatch for
automatic code differentiation, though seek to ensure type consistency within methods
for improved performance.
```@autodocs
Modules = [DataAssimilationBenchmarks]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 3281 | # Getting Started
## Installation
The main module DataAssimilationBenchmarks.jl declares global types and type constructors.
These conventions are utilized in sub-modules that implement the core numerical solvers
for ordinary and stochastic differential equations, solvers for data assimilation routines,
and the core process model code for running twin experiments with benchmark models, collected
in the `methods` and `models` sub-directories. Experiments define routines for driving
standard benchmark case studies with
[NamedTuples](https://docs.julialang.org/en/v1/base/base/#Core.NamedTuple)
as arguments to these methods defining the associated experimental hyper-parameters.
This parent module only serves to support the overhead of type declarations used thoughout
the package and the functionality of the methods standalone is extremely limited.
In order to get the full functionality of this package __you will need to install the dev
version__. This provides access to the source code needed to create new experiments and
to define performance benchmarks for these experiments.
### Install a dev package
There are two ways to install a dev package of the repository.
In either case, the installed version will be included in your
```
~/.julia/dev/
```
on Linux and the analogous directory with respect Windows and Mac systems.
#### Install the tagged stable version
To install the last tagged official release, you can use the following
commands in the REPL
```{julia}
pkg> dev DataAssimilationBenchmarks
```
This version in the Julia General Registries will be the latest official release.
However, this official release tends to lag behind the current version.
#### Install the up-to-date version
You can install the latest version from the main Github branch directly as follows:
```{julia}
pkg> dev https://github.com/cgrudz/DataAssimilationBenchmarks.jl
```
The master branch synchronizes with the up-to-date documentation and commits to the master
branch are considered tested but not necessarily stable. As this package functions as a
__research framework__, the master branch is in continuous development. If your use case is
performing research of DA methods with this package, it is recommended to install and keep
up-to-date with the current version of the master branch.
### Repository structure
The repository is structured as follows:
```@raw html
<ul>
<li><code>src</code> - contains the main parent module</li>
<ul>
<li><code>models</code> - contains code for defining the state and observation model equation for twin
experiments</li>
<li><code>methods</code> - contains DA solvers and general numerical routines for running
twin experiments</li>
<li><code>experiments</code> - contains the outer-loop scripts that set up twin
experiments, and constructors for generating parameter grids</li>
<li><code>data</code> - this is an input / output directory for the inputs to and
ouptuts from experiments</li>
<li><code>analysis</code> - contains auxilliary scripts for batch processing experiment
results and for plotting (currently in Python, not fully integrated).</li>
</ul>
<li><code>test</code> - contains test cases for the package.</li>
<li><code>docs</code> - contains the documenter files.</li>
</ul>
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 2999 | # Introduction
## Statement of purpose
The purpose of this package is to provide a research framework for the theoretical
development and empirical validation of novel data assimilation techniques.
While analytical proofs can be derived for classical methods, such as the Kalman filter
in linear-Gaussian dynamics, most currently developed DA
techniques are designed for estimation in nonlinear, non-Gaussian models where no
analytical solution typically exists. Rigorous validation of novel data assimilation
methods, therefore, must be performed with reproducible numerical simulations in
standard test-cases in order to demonstrate the effectiveness and computational
performance of the proposed technique. Pursuant to proposing a novel DA method,
one should likewise compare its performance with other standard methods within
the same class of estimators.
This package implements a variety of standard data assimilation algorithms, including
some of the widely used performance modifications that are used in practice to tune
these estimators. Standard libraries exist for full-scale DA system research and
development, e.g., the
[Data Assimilation Research Testbed (DART)](https://dart.ucar.edu/), but
there are fewer standard options for theoretical research and algorithm development in
simple test systems. Many basic research frameworks, furthermore, do not include
standard operational techniques developed from classical VAR methods, due to the
difficulty in constructing tangent linear and adjoint codes.
DataAssimilationBenchmarks.jl provides one framework for studying
sequential filters and smoothers that are commonly used in online, geoscientific
prediction settings, including ensemble estimators, classical VAR techniques
(currently in-development) and (in-planning) hybrid-EnVAR methods.
## Validated methods
For a discussion of many of the following methods and benchmarks for their
performance validation, please see the manuscript
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
```@raw html
<table>
<tr>
<th>Estimator / enhancement</th>
<th>Tuned inflation</th>
<th>Adaptive inflation</th>
<th>Linesearch</th>
<th>Localization / Hybridization</th>
<th>Multiple data assimilation</th>
</tr>
<tr>
<td>ETKF</td>
<td> X </td>
<td> X </td>
<td> NA </td>
<td> </td>
<td> NA </td>
</tr>
<tr>
<td>3D-VAR</td>
<td> X </td>
<td> </td>
<td> </td>
<td> </td>
<td> NA </td>
</tr>
<tr>
<td>MLEF</td>
<td> X </td>
<td> X </td>
<td> X </td>
<td> </td>
<td> NA </td>
</tr>
<tr>
<td>ETKS</td>
<td> X </td>
<td> X </td>
<td> NA </td>
<td> </td>
<td> NA </td>
</tr>
<tr>
<td>MLES</td>
<td> X </td>
<td> X </td>
<td> X </td>
<td> </td>
<td> NA </td>
</tr>
<tr>
<td>SIEnKS</td>
<td> X </td>
<td> X </td>
<td> X </td>
<td> </td>
<td> X </td>
</tr>
<tr>
<td>IEnKS</td>
<td> X </td>
<td> X </td>
<td> </td>
<td> </td>
<td> X </td>
</tr>
</table>
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 216 | # PlotExperimentData
This is currently in-development and is only integrated loosely based on Python
codes for plotting. A native Julia implementation is planned, though without any
currently planned release date.
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1840 | # Analysis
## Processing experiment outputs
The `analysis` directory contains examples for batch processing the outputs from experiments into time-averaged
RMSE and spread and arranging these outputs in an array for plotting. These examples should be modified based
on the local paths to stored data and experiment settings. This will try to load files based on parameter settings
written in the name of the output `.jld2` file and if this is not available, this will store `Inf` values in the
place of missing data. These scripts are not currently integrated or supported, with the expecation that
one will write their own variants based on their needs with specific experiments.
## Validating results
Benchmark configurations for the above filtering and smoothing experiments are available in the open access article
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html),
with details on the algorithm and parameter specifications discussed in the experiments section. Performance of filtering and
smoothing schemes should be validated versus the numerical results for root mean square error and ensemble spread.
Simple versions of these diagnostics are built for automatic testing of the filter and smoother experiments for state and parameter estimation
in the L96-s model. Further test cases are currently in development. The deterministic Runge-Kutta and Euler scheme for ODEs are
validated in the package tests, estimating the order of convergence with the least-squares log-10 line fit between step size
and discretization error. Test cases for the stochastic integration schemes are in development, but numerical results with these
schemes can be validated versus the results in the open-access article
[Grudzien et al. 2020](https://gmd.copernicus.org/articles/13/1903/2020/).
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 2046 | # FilterExps
The `FilterExps.jl` sub-module contains methods to configure filter twin experiments, using a
stored time series as generated by [GenerateTimeSeries](@ref) as the underlying
observation generating process. The frequency of observations in continuous time is defined
by the frequency of data saved in the time series and is inferred by the experiment
when reading in the data.
Filter experiment configurations are generated by supplying a
[NamedTuple](https://docs.julialang.org/en/v1/base/base/#Core.NamedTuple)
with the required fields as specified in the experiment method. Conventions for
these arguments are as follows:
* `time_series` - the path and file name of the [.jld2](https://juliaio.github.io/JLD2.jl/dev/) truth twin data set;
* `method` - the sub-method analysis scheme string name;
* `seed` - the pseudo-random seed that will define, e.g., the observation noise sequence;
* `nanl` - the number of observation / analysis times to produce a posterior estimate;
* `obs_un` - the observation error standard deviation, assuming a uniform scaling observation error covariance;
* `obs_dim` - the dimension of the observation vector;
* `γ` - defines nonlinearity in the [`DataAssimilationBenchmarks.ObsOperators.alternating_obs_operator`](@ref);
* `N_ens` - the ensemble size for ensemble-based filters;
* `s_infl` - the multiplicative inflation for the empirical model state covariance;
* `p_infl` - the multiplicative inflation for the empirical parameter sample covariance;
* `p_err` - defines initial parameter sample standard deviation as `p_err` percent of the system parameter value;
* `p_wlk` - defines the standard deviation of a Gaussian random walk as `p_wlk` percent of the estimated parameter value for a random parameter model.
Standard configurations should be defined in the [SingleExperimentDriver](@ref) module, for reproducing results
and generating standard tests of methods.
## Filter Experiment Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.FilterExps]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1792 | # GenerateTimeSeries
GenerateTimeSeries is a sub-module used to generate a time series for a twin experiment based
on tuneable model configuration parameters. Example syntax for the configuration of a time
series is as follows, where arguments are defined in a
[NamedTuple](https://docs.julialang.org/en/v1/base/base/#Core.NamedTuple)
to be passed to the
specific experiment function:
```{julia}
(seed::Int64, h::Float64, state_dim::Int64, tanl::Float64, nanl::Int64, spin::Int64,
diffusion::Float64)::NamedTuple)
```
Conventions for these arguments are as follows:
* `seed` - specifies initial condition for the pseudo-random number generator on which various simulation settings will depend and will be reproducible with the same `seed` value;
* `h` - is the numerical integration step size, controlling the discretization error of the model evolution;
* `state_dim` - controls the size of the [Lorenz-96 model](@ref) model though other models such as the [IEEE39bus](@ref) model are of pre-defined size;
* `tanl` - (__time-between-analysis__) defines the length of continuous time units between sequential observations;
* `nanl` - (__number-of-analyses__) defines the number of observations / analyses to be saved;
* `spin` - discrete number of `tanl` intervals to spin-up for the integration of the dynamical system solution to guarantee a stationary observation generating process;
* `diffusion` - determines intensity of the random perturbations in the integration scheme;
Results are saved in [.jld2 format](https://juliaio.github.io/JLD2.jl/dev/) in the data directory to be called by filter / smoother
experiments cycling over the pseudo-observations.
## Time series experiments
```@autodocs
Modules = [DataAssimilationBenchmarks.GenerateTimeSeries]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 6183 | # ParallelExperimentDriver
In order to perform sensitivity testing and estimator tuning, many different parameter
combinations may need to be evaluated for each experiment defined in the submodules
[GenerateTimeSeries](@ref), [FilterExps](@ref) and [SmootherExps](@ref). These experiments
are designed so that these hyper-parameter searches can be implemented with naive parallelism,
using [parallel maps](https://en.wikipedia.org/wiki/MapReduce) and Julia's
native [Distributed](https://docs.julialang.org/en/v1/stdlib/Distributed/) computing module.
This module defines argumentless functions to construct an array with each array entry given
by a [NamedTuple](https://docs.julialang.org/en/v1/base/base/#Core.NamedTuple), defining
a particular hyper-parameter configuration. These functions also define a soft-fail method
for evaluating experiments, with example syntax as
```{julia}
args, wrap_exp = method()
```
where the `wrap_exp` follows a convention of
```{julia}
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
```
with `exp` being imported from one of the experiment modules above.
This soft-fail wrapper provides that if a single experiment configuration in the parameter
array fails due to, e.g., numerical overflow, the remaining configurations will continue
their own course unaffected.
## Example usage
An example of how one can use the ParallelExperimentDriver framework to run a sensitivity
test is as follows. We use a sensitivity test on the ensemble size for several
variants of the EnKF using adaptive inflation. The following function, defined in
ParallelExperimentDriver.jl module, will construct all of input data for the truth twin
and a collection of NamedTuples that define individual experiments:
```{julia}
path = pkgdir(DataAssimilationBenchmarks) * "/src/data/time_series/"
function ensemble_filter_adaptive_inflation()
exp = DataAssimilationBenchmarks.FilterExps.ensemble_filter_state
function wrap_exp(arguments)
try
exp(arguments)
catch
print("Error on " * string(arguments) * "\n")
end
end
# set time series parameters
seed = 123
h = 0.05
state_dim = 40
tanl = 0.05
nanl = 6500
spin = 1500
diffusion = 0.00
F = 8.0
# generate truth twin time series
GenerateTimeSeries.L96_time_series(
(
seed = seed,
h = h,
state_dim = state_dim,
tanl = tanl,
nanl = nanl,
spin = spin,
diffusion = diffusion,
F = F,
)
)
# define load path to time series
time_series = path * "L96_time_series_seed_" * lpad(seed, 4, "0") *
"_dim_" * lpad(state_dim, 2, "0") *
"_diff_" * rpad(diffusion, 5, "0") *
"_F_" * lpad(F, 4, "0") *
"_tanl_" * rpad(tanl, 4, "0") *
"_nanl_" * lpad(nanl, 5, "0") *
"_spin_" * lpad(spin, 4, "0") *
"_h_" * rpad(h, 5, "0") *
".jld2"
# define ranges for filter parameters
methods = ["enkf-n-primal", "enkf-n-primal-ls", "enkf-n-dual"]
seed = 1234
obs_un = 1.0
obs_dim = 40
N_enss = 15:3:42
s_infls = [1.0]
nanl = 4000
γ = 1.0
# load the experiments
args = Vector{Any}()
for method in methods
for N_ens in N_enss
for s_infl in s_infls
tmp = (
time_series = time_series,
method = method,
seed = seed,
nanl = nanl,
obs_un = obs_un,
obs_dim = obs_dim,
γ = γ,
N_ens = N_ens,
s_infl = s_infl
)
push!(args, tmp)
end
end
end
return args, wrap_exp
end
```
With a constructor as above, one can define a script as follows to run the sensitivity test:
```{julia}
##############################################################################################
module run_sensitivity_test
##############################################################################################
# imports and exports
using Distributed
@everywhere using DataAssimilationBenchmarks
##############################################################################################
config = ParallelExperimentDriver.ensemble_filter_adaptive_inflation
print("Generating experiment configurations from " * string(config) * "\n")
print("Generate truth twin\n")
args, wrap_exp = config()
num_exps = length(args)
print("Configuration ready\n")
print("\n")
print("Running " * string(num_exps) * " configurations on " * string(nworkers()) *
" total workers\n")
print("Begin pmap\n")
pmap(wrap_exp, args)
print("Experiments completed, verify outputs in the appropriate directory under:\n")
print(pkgdir(DataAssimilationBenchmarks) * "/src/data\n")
##############################################################################################
# end module
end
```
Running the script using
```
julia -p N run_sensitivity_test.jl
```
will map the evaluation of all parameter configurations to parallel workers where `N`
is the number of workers, to be defined based on the available resources on the user system.
User-defined sensitivity tests can be generated by modifying the above script according
to new constructors defined within the ParallelExperimentDriver module.
## Experiment groups
```@autodocs
Modules = [DataAssimilationBenchmarks.ParallelExperimentDriver]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1180 | # SingleExperimentDriver
Following the convention of [GenerateTimeSeries](@ref), [FilterExps](@ref)
and [SmootherExps](@ref) using
[NamedTuple](https://docs.julialang.org/en/v1/base/base/#Core.NamedTuple)
arguments to define hyper-parameter configurations,
the SingleExperimentDriver module defines dictionaries of the form
```
experiment_group["parameter_settings"]
```
where keyword arguments return standard parameter configurations for these experiments
with known results for reproducibility. These standard configurations are used in the package
for for debugging, testing, benchmarking and profiling code. Package tests
use these standard configurations to verify a DA method's forecast and analysis RMSE.
User-defined, custom experiments can be modeled from the methods in the above modules with a
corresponding SingleExperimentDriver dictionary entry used to run and debug the experiment,
and to test and document the expected results. Parallel submission scripts are used
for production runs of sensitivity experiments, defined in [ParallelExperimentDriver](@ref).
## Experiment groups
```@autodocs
Modules = [DataAssimilationBenchmarks.SingleExperimentDriver]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 2261 | # SmootherExps
The `SmootherExps.jl` sub-module contains methods to configure filter twin experiments,
using a stored time series as generated by [GenerateTimeSeries](@ref) as the underlying
observation generating process. The frequency of observations in continuous time is defined
by the frequency of data saved in the time series and is inferred by the experiment
when reading in the data.
```@raw html
<div sytle="float:left; width:100%;">
<img style="width:95%" src="https://raw.githubusercontent.com/cgrudz/DataAssimilationBenchmarks.jl/master/assets/cyclingSDA.png" alt="Observation analysis forecast cycle over multiple data assimilation windows">
</div>
```
Smoother experiment configurations are generated by supplying a
[NamedTuple](https://docs.julialang.org/en/v1/base/base/#Core.NamedTuple)
with the required fields as specified in the experiment method. Conventions for
these arguments are the same as with the [FilterExps](@ref), with the additional options
that configure the data assimilation window (DAW) and how this is shifted in time:
* `lag` - the number of past observation / analysis times to reanalyze in a DAW, corresponding to ``L`` in the figure above;
* `shift`- the number of observation / analysis times to shift the DAW, corresponding to ``S`` in the figure above;
* `mda` - (__Multiple Data Assimilation__), type `Bool`, determines whether the technique of multiple data assimilation is used (only compatible with `single_iteration` and `iterative` smoothers.
Currently debugged and validated smoother experiment configurations include
* `classic_state` - classic ETKS style state estimation
* `classic_param` - classic ETKS style state-parameter estimation
* `single_iteration_state` - SIEnKS state estimation
* `single_iteration_param` - SIEnKS state-parameter estimation
* `iterative_state` - IEnKS Gauss-Newton style state estimation
* `iterative_param` - IEnKS Gauss-Newton style state-parameter estimation
Note, the single-iteration and fully iterative Gauss-Newton style smoothers are only defined
for MDA compatible values of lag and shift where the lag is an integer multiple of the shift.
## Smoother Experiment Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.SmootherExps]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1662 | # Workflow
This package is based around file input and output, with experiment configurations defined
as function arguments using the
[NamedTuple](https://docs.julialang.org/en/v1/base/base/#Core.NamedTuple)
data type. A basic workflow to run a data assimilation twin
experiment is to first generate a time series for observations using a choice of
tuneable parameters using the [GenerateTimeSeries](@ref) submodule. Once the time
series data is generated from one of the benchmark models, one can use this data as a
truth twin to generate pseudo-observations. This time series can thus be re-used over
multiple configurations of filters and smoothers, holding the pseudo-data fixed while
varying other hyper-parameters. Test cases in this package model this workflow,
to first generate test data and then to implement a particular experiment based
on a parameter configuration to exhibit known behavior of the estimator, typically in terms
of forecast and analysis root mean square error (RMSE).
Standard configurations of hyper-parameters for the truth twin and the data assimilation
method are included in the [SingleExperimentDriver](@ref) submodule, and constructors for
generating maps of parallel experiments over parameter grids are defined in the
[ParallelExperimentDriver](@ref) submodule. It is assumed that one will
[Install a dev package](@ref) this package in order to define new parameter tuples
and constructors for parallel experiments in order to test the behavior of estimators
in new configurations. It is also assumed that one will write new experiments using
the [FilterExps](@ref) and [SmootherExps](@ref) submodules as templates.
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 3128 | # Differential Equation Solvers
Three general schemes are developed for ordinary and stochastic differential equations,
* the four-stage Runge-Kutta [`DataAssimilationBenchmarks.DeSolvers.rk4_step!`](@ref) scheme,
* the second order autonomous Taylor [`DataAssimilationBenchmarks.DeSolvers.tay2_step!`](@ref) scheme, and
* the Euler-(Maruyama) [`DataAssimilationBenchmarks.DeSolvers.em_step!`](@ref) scheme.
These schemes have arguments with the conventions:
* `x` - model states of type [`VecA`](@ref) possibly including a statistical replicate of model parameter values;
* `t` - time value of type [`Float64`](https://docs.julialang.org/en/v1/base/numbers/#Core.Float64) for present model state (a dummy argument is used for autonomous dynamics);
* `kwargs` - a dictionary of type [`StepKwargs`](@ref).
Details of these schemes are available in the manuscript
[Grudzien et al. 2020](https://gmd.copernicus.org/articles/13/1903/2020/gmd-13-1903-2020.html)
Because the second order Taylor-Stratonovich scheme relies specifically on the structure
of the Lorenz-96 model with additive noise, this is included separately in the
[Lorenz-96 model](@ref) sub-module. These time steppers over-write
the value of the model state `x` in-place for efficient ensemble integration.
The four-stage Runge-Kutta scheme follows the convention in data assimilation of the
extended state formalism for parameter estimation. In particular, the parameter sample
should be included as trailing state variables in the columns of the ensemble array. If
the following conditional is true:
```{julia}
true == haskey(kwargs, "param_sample")
```
the `state_dim` parameter specifies the dimension of the dynamical states and creates a
view of the vector `x` including all entries up to this index. The remaining entries in
the state vector `x` will be passed to the `dx_dt` function in
a dictionary merged with the `dx_params` [`ParamDict`](@ref), according to the `param_sample`
indices and parameter values specified in `param_sample`. The parameter sample values
will remain unchanged by the time stepper when the dynamical state entries in `x` are
over-written in place.
Setting `diffusion > 0.0` introduces additive noise to the dynamical system. The main
[`DataAssimilationBenchmarks.DeSolvers.rk4_step!`](@ref) has convergence on order 4.0
when diffusion is equal to zero, and both strong and weak convergence on order 1.0 when
stochasticity is introduced. This is the recommended out-of-the-box solver for any
generic DA simulation for the statistically robust performance, versus Euler-(Maruyama).
When specifically generating the truth-twin for the Lorenz-96 model with additive noise,
this should be performed with the [`DataAssimilationBenchmarks.L96.l96s_tay2_step!`](@ref),
while the ensemble should be generated with the
[`DataAssimilationBenchmarks.DeSolvers.rk4_step!`](@ref). See the benchmarks on the
[L96-s model](https://gmd.copernicus.org/articles/13/1903/2020/) for a full discussion of
statistically robust model configurations.
## Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.DeSolvers]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 4004 | # EnsembleKalmanSchemes
There are currently four families of ensemble Kalman estimators available in this package,
which define the outer-loop of the data assimilation cycle. Particularly, these define
how the sequential data assimilation cycle will pass over a time series of observations,
with more details in the [SmootherExps](@ref) documents.
Ensemble filters only produce analyses forward-in-time. The classic lag-shift smoother runs
identically to the filter in its forecast and filter steps, but includes an additional retrospective
analysis to past ensemble states stored in memory. The single iteration smoother follows
the same convention as the classic smoother, except in that new cycles are initiated from
a past, reanalyzed ensemble state. The Gauss-Newton iterative smoothers are 4D smoothers,
which iteratively optimize the initial condition at the beginning of a data assimilation
cycle, and propagate this initial condition to initialize the subsequent cycle. A full
discussion of these methods can be found in
[Grudzien et al. 2022](https://gmd.copernicus.org/articles/15/7641/2022/gmd-15-7641-2022.html).
For each outer-loop method defining the data assimilation cycle, different types of analyses
can be specified within their arguments. Likewise, these outer-loop methods require
arguments such as the ensemble state or the range of ensemble states to analyze, an
observation to assimilate or a range of observations to assimilate, as the observation
operator and observation error covariance and key word arguments for running the
underlying dynamical state model. Examples of the syntax are below:
```{julia}
ensemble_filter(analysis::String, ens::ArView(T), obs::VecA(T), obs_cov::CovM(T),
kwargs::StepKwargs) where T <: Float64
ls_smoother_classic(analysis::String, ens::ArView(T), obs::ArView(T), obs_cov::CovM(T),
kwargs::StepKwargs) where T <: Float64
ls_smoother_single_iteration(analysis::String, ens::ArView(T), obs::ArView(T),
kwargs::StepKwargs) where T <: Float64
ls_smoother_gauss_newton(analysis::String, ens::ArView(T), obs::ArView(T), obs_cov::CovM(T),
kwargs::StepKwargs; ϵ::Float64=0.0001, tol::Float64=0.001,
max_iter::Int64=10) where T <: Float64
```
with conventions defined as follows:
* `analysis` - string name of the analysis scheme;
* `ens` - ensemble matrix defined by the array with columns given by the replicates of the model state;
* `obs` - observation vector for the current analysis in `ensemble_filter` / array with columns given by the observation vectors for the ordered sequence of analysis times in the current smoothing window;
* `H_obs` - observation model mapping state vectors and ensembles into observed variables;
* `obs_cov` - observation error covariance matrix;
* `kwargs` - keyword arguments for inflation, parameter estimation or other functionality, including integration parameters for the state model in smoothing schemes.
The `analysis` string is passed to the
[`DataAssimilationBenchmarks.EnsembleKalmanSchemes.transform_R`](@ref) or the
[`DataAssimilationBenchmarks.EnsembleKalmanSchemes.ens_gauss_newton`](@ref)
methods below to produce a specialized analysis within the outer-loop controlled by the above
filter and smoother methods. Observations for the filter
schemes correspond to information available at a single analysis time giving an observation
of the state vector of type [`VecA`](@ref). The `ls` (lag-shift) smoothers require an array of
observations of type [`ArView`](@ref) corresponding to all analysis times within the data
assimilation window (DAW). Observation covariances are typed as [`CovM`](@ref) for
efficiency. State covariance multiplicative inflation and extended state parameter
covariance multiplicative inflation can be specified in `kwargs`.
Utility scripts to generate observation operators, analyze ensemble statistics, etc,
are included in the below.
## Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.EnsembleKalmanSchemes]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 874 | # XdVAR
This module defines methods for classical variational data assimilation such as
3D- / 4D-VAR. Primal cost functions are defined, with their implicit differentiation
performed with automatic differentiation with [JuliaDiff](https://github.com/JuliaDiff)
methods. Development of gradient-based optimization schemes using automatic
differentiation is ongoing, with future development planned to integrate variational
benchmark experiments.
The basic 3D-VAR cost function API is defined as follows
```{julia}
D3_var_cost(x::VecA(T), obs::VecA(T), x_background::VecA(T), state_cov::CovM(T),
obs_cov::CovM(T), kwargs::StepKwargs) where T <: Real
```
where the control variable `x` is optimized, with fixed hyper-parameters defined in a
wrapping function passed to auto-differentiation.
## Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.XdVAR]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 5494 | # IEEE39bus
This is a version of the IEEE-39 bus test case as described by
[Nishikawa et al. 2015](https://iopscience.iop.org/article/10.1088/1367-2630/17/1/015012).
The model, denoted the "effective network", consists of the ten generator buses in the
network with all other buses eliminated by the classical Kron reduction.
The power flow is described in steady state by a fixed point of the nonlinear swing equations
$$\begin{align}
\frac{2H_i}{\omega_\mathrm{R}} \ddot{\delta}_i + \frac{D_i}{\omega_\mathrm{R}} \dot{\delta}_i = A_{i}^\mathrm{EN} - \sum_{j =1, j\neq i}^{n_g} K_{ij}\sin\left(\delta_i - \delta_j -\gamma_{ij}^\mathrm{EN}\right),
\end{align}$$
where we define each of the following:
* the angular reference frequency (in radians) about which the steady state synchronizes is defined as $\omega_\mathrm{R}$;
* the angle of rotation of the generator rotor at bus $i$, relative to the frame rotating at the reference frequency, is defined as $\delta_i(t)$;
* the difference between the reference frequency and the frequency of the rotor at bus $i$ is defined $\dot{\delta}_i(t)$;
* the rate of acceleration of the difference between the angle of the rotor at bus $i$ and the frame rotating at the reference frequency is defined as $\ddot{\delta}_i(t)$;
* the values of the inertia and damping at bus $i$ are defined as $H_i$ and $D_i$ respectively;
* the strength of the dynamical coupling of the buses $i$ and $j$ is defined as $K_{ij}$, while $\gamma_{ij}$ represents the phase shift involved in the coupling of these buses;
* the active power injected into the network by the generator at bus $i$ is represented by $A^\mathrm{EN}_i$; and
* the number of generators in the network is defined as $n_g =10$.
This model assumes constant, passive loads at each bus that draws power.
The actual parameters used in the model are defined by files in the
```
DataAssimilationBenchmarks/src/models/IEEE39bus_inputs/
```
directory, taken from the configuration studied by
[Nishikawa et al. 2015](https://iopscience.iop.org/article/10.1088/1367-2630/17/1/015012),
with details on their interpretation in section 4.1.
The stochastic form in this code loosens the assumption of constant loads in this model by
assuming that, at the time scale of interest, the draw of power fluctuates randomly about
the constant level that defines the steady state. We introduce a Wiener process to
the above equations of the form $s W_i(t)$, where $s$ is a parameter in the model
controlling the relative diffusion level. We assume that the fluctuations in the net
power are uncorrelated across buses and that the diffusion in all buses is proportional to
$s$.
Making a change of variables $\psi_i = \dot{\delta}_i$, we recover the system of nonlinear
SDEs,
$$\begin{align}
\dot{\delta}_i = \psi_i,
\end{align}$$
$$\begin{align}
\dot{\psi}_i = \frac{A^\mathrm{EN}_i \omega_\mathrm{R}}{2H_i} - \frac{D_i}{2H_i} \psi_i -
\sum_{j=1,j\neq i}^{n_g} \frac{K_{ij}^\mathrm{EN}\omega_\mathrm{R}}{2H_i} \sin\left(\delta_i - \delta_j -\gamma_{ij}^\mathrm{EN}\right) + \frac{ s \omega_R}{2 H_i} \mathrm{d}W_i(t).
\end{align}$$
The diffusion level $s$ controls the standard deviation of the Gaussian process
$$\begin{align}
\frac{s \omega_R}{2H_i} W_{i,\Delta_t}\doteq \frac{s \omega_R}{2H_i}\left(W_i(\Delta + t) - W_i(t)\right).
\end{align}$$
By definition the standard deviation of $W_{i,\Delta_t}$ is equal to $\sqrt{\Delta}$ so that
for each time-discretization of the Wiener process of step size $\Delta$,
$\frac{s \omega_R}{2 H_i}W_{i,\Delta_t}$ is a mean zero, Gaussian distributed variable
with standard deviation $\frac{s \omega_\mathrm{R}}{2}\sqrt{\Delta}$. The reference
frequency in North America is 60 Hz, and the tolerable deviation from this frequency under
normal operations is approximately $\pm 0.05$ Hz, or of magnitude
approximately $0.08\%$. In the above model, the
reference frequency is in radians, related to the reference frequency in Hz as
$\omega_\mathrm{R} = 60 \mathrm{Hz} \times 2 \pi \approx 376.99$. This makes the
tolerable limit of perturbations to the frequency approximately $0.3$ radians under normal
operations.
By definition $\psi_i$ is the $i$-th frequency relative to the reference frequency
$\omega_\mathrm{R}$. One should choose $s$ sufficiently small such that the probability
that the size of a perturbation to the frequency
$$\begin{align}
\parallel \frac{s \omega_\mathrm{R}}{2 H_i}\mathbf{W}_{\Delta_t} \parallel\geq 0.3
\end{align}$$
is small. Simulating the model numerically with the four-stage, stochastic Runge-Kutta
algorithm
[`DataAssimilationBenchmarks.DeSolvers.rk4_step!`](@ref)
a step size of $\Delta=0.01$ is recommended, so that the standard deviation of
a perturbation to the $i$-th relative frequency $\psi_i$ at any time step is
$\frac{s \omega_\mathrm{R}}{20 H_i}$. The smallest inertia parameter in the model is
approximately $24.3$, so that three standard deviations of the perturbation
to the frequency is bounded as
$$\begin{align}
\frac{s\omega_\mathrm{R}}{20 \times 24.3} \times 3 \leq 0.03 \Leftrightarrow s \leq\frac{4.86}{\omega_\mathrm{R}} \approx 0.0129.
\end{align}$$
For $s \leq 0.012$, we bound the standard deviation of each component,
$\frac{s \omega_\mathrm{R}}{2H_i}\sqrt{\Delta}$, of the perturbation vector by $0.01$ so
that over $99.7\%$ of perturbations to the $i$-th frequency have size less than $0.03$.
## Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.IEEE39bus]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1722 | # Lorenz-96 model
The classical form for the (single-layer) Lorenz-96 equations are defined as
$$\begin{align}
\frac{\mathrm{d}\pmb{x}}{\mathrm{d} t} = \pmb{f}(\pmb{x}),
\end{align}$$
where for each state component $i\in\{1,\cdots,n\}$,
$$\begin{align}
f^i(\pmb{x}) &=-x^{i-2}x^{i-1} + x^{i-1}x^{i+1} - x^i + F
\end{align}$$
such that the components of the vector $\pmb{x}$ are given by the variables $x^i$ with
periodic boundary conditions, $x^0=x^n$, $x^{-1}=x^{n-1}$ and $x^{n+1}=x^{1}$. The term
$F$ in the Lorenz-96 system is the forcing parameter that injects energy to the model.
With the above definition for the classical Lorenz-96 equations, we define the L96-s model
with additive noise (of scalar covariance) as
$$\begin{align}
\frac{\mathrm{d} \pmb{x}}{\mathrm{d} t} = \pmb{f}(\pmb{x}) + s(t)\mathbf{I}_{n}\pmb{W}(t),
\end{align}$$
where $\pmb{f}$ is defined as in the classical equations, $\mathbf{I}_n$ is the
$n\times n$ identity matrix, $\pmb{W}(t)$ is an $n$-dimensional Wiener process and
$s(t):\mathbb{R}\rightarrow \mathbb{R}$ is a measurable function of (possibly)
time-varying diffusion coefficients. This model is analyzed in-depth for data assimilation
twin experiments in the manuscript
[Grudzien et al. 2020](https://gmd.copernicus.org/articles/13/1903/2020/gmd-13-1903-2020.html)
and further details of using the system for data assimilation benchmarks in stochastic
dynamics are discussed there. The methods in the below define the model equations,
the Jacobian, and the order 2.0 Taylor-Stratonovich scheme derived especially
for statistically robust numerical simulation of the truth twin of the L96-s system.
## Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.L96]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"Apache-2.0"
] | 0.3.4 | 633aca1308dd579a622fd8559de7d96e425f0dc4 | docs | 1054 | # Observation Operators
The methods in this module define observation operators mapping the state model to
the observation space. In current experiments, the observation operator is hard-coded
in the driver script with a statement
```
H_obs = alternating_obs_operator
```
defining the observation operator. The dimension of the observation and the nonlinear
transform applied can be controlled with the parameters of
[`DataAssimilationBenchmarks.ObsOperators.alternating_obs_operator`](@ref).
Additional observation models are pending,
following the convention where observation operators will be defined both for
vector arguments and multi-arrays using mutliple dispatch with the conventions:
```
function H_obs(x::VecA(T), obs_dim::Int64, kwargs::StepKwargs) where T <: Real
function H_obs(x::ArView(T), obs_dim::Int64, kwargs::StepKwargs) where T <: Real
```
allowing for the same naming to be used for single states, time series of states, and
ensembles of states.
## Methods
```@autodocs
Modules = [DataAssimilationBenchmarks.ObsOperators]
```
| DataAssimilationBenchmarks | https://github.com/cgrudz/DataAssimilationBenchmarks.jl.git |
|
[
"MIT"
] | 1.1.0 | a8d466a4330392cd2c3bfa7a74e170d8dccd6f66 | code | 63 | module Khepri
using Reexport
@reexport using KhepriAutoCAD
end
| Khepri | https://github.com/aptmcl/Khepri.jl.git |
|
[
"MIT"
] | 1.1.0 | a8d466a4330392cd2c3bfa7a74e170d8dccd6f66 | code | 144 | using Khepri
#@static if VERSION < v"0.7.0-DEV.2005"
# using Base.Test
#else
# using Test
#end
# write your own tests here
#@test 1 == 2
| Khepri | https://github.com/aptmcl/Khepri.jl.git |
|
[
"MIT"
] | 1.1.0 | a8d466a4330392cd2c3bfa7a74e170d8dccd6f66 | docs | 36 | # Khepri.jl
Khepri for Julia users.
| Khepri | https://github.com/aptmcl/Khepri.jl.git |
|
[
"MIT"
] | 0.3.0 | 658f5aff8f64b475c4b3c3c8da6ab457dd96812b | code | 612 | using Documenter, TERMIOS, DocumenterMarkdown
cp(joinpath(@__DIR__, "../README.md"), joinpath(@__DIR__, "./src/index.md"), force=true, follow_symlinks=true)
makedocs(
sitename="TERMIOS.jl documentation",
format = Markdown()
)
deploydocs(;
repo="github.com/kdheepak/TERMIOS.jl",
deps = Deps.pip(
"mkdocs==0.17.5",
"mkdocs-material==2.9.4",
"python-markdown-math",
"pygments",
"pymdown-extensions",
),
make = () -> run(`mkdocs build`),
target = "site",
)
| TERMIOS | https://github.com/kdheepak/TERMIOS.jl.git |
|
[
"MIT"
] | 0.3.0 | 658f5aff8f64b475c4b3c3c8da6ab457dd96812b | code | 22627 | """
This package provides an interface to the POSIX calls for tty I/O control.
For a complete description of these calls, see the [termios(3)](http://man7.org/linux/man-pages/man3/termios.3.html) manual page.
**Usage**
Example:
using TERMIOS
const T = TERMIOS
term_stdin = T.termios()
T.tcgetattr(stdin, term_stdin)
term_stdin.c_iflag |= T.IGNBRK
T.tcsetattr(stdin, T.TCSANOW, term_stdin)
"""
module TERMIOS
struct TERMIOSError <: Exception
msg::String
end
Base.showerror(io::IO, e::TERMIOSError) = print(io, "TERMIOSError: $(e.msg)")
const cc_t = Sys.islinux() ? Cuchar : Cvoid
const tcflag_t = Sys.islinux() ? Cuint : Culong
const char = Sys.islinux() ? Cchar : Cchar
const speed_t = Sys.islinux() ? Cuint : Culong
"""End-of-file character ICANON"""
const VEOF = Sys.islinux() ? 4 : 0
"""End-of-line character ICANON"""
const VEOL = Sys.islinux() ? 11 : 1
"""Second EOL character ICANON together with IEXTEN"""
const VEOL2 = Sys.islinux() ? 16 : 2
"""Erase character ICANON"""
const VERASE = Sys.islinux() ? 2 : 3
"""Word-erase character ICANON together with IEXTEN"""
const VWERASE = Sys.islinux() ? 14 : 4
"""Kill-line character ICANON"""
const VKILL = Sys.islinux() ? 3 : 5
"""Reprint-line character ICANON together with IEXTEN"""
const VREPRINT = Sys.islinux() ? 12 : 6
"""Interrupt character ISIG"""
const VINTR = Sys.islinux() ? 0 : 8
"""Quit character ISIG"""
const VQUIT = Sys.islinux() ? 1 : 9
"""Suspend character ISIG"""
const VSUSP = Sys.islinux() ? 10 : 10
"""Delayed suspend character ISIG together with IEXTEN"""
const VDSUSP = Sys.islinux() ? nothing : 11
"""Start (X-ON) character IXON, IXOFF"""
const VSTART = Sys.islinux() ? 8 : 12
"""Stop (X-OFF) character IXON, IXOFF"""
const VSTOP = Sys.islinux() ? 9 : 13
"""Literal-next character IEXTEN"""
const VLNEXT = Sys.islinux() ? 15 : 14
"""Discard character IEXTEN"""
const VDISCARD = Sys.islinux() ? 13 : 15
"""Minimum number of bytes read at once !ICANON"""
const VMIN = Sys.islinux() ? 6 : 16
"""Time-out value (tenths of a second) !ICANON"""
const VTIME = Sys.islinux() ? 5 : 17
"""Status character ICANON together with IEXTEN"""
const VSTATUS = Sys.islinux() ? nothing : 18
const NCCS = Sys.islinux() ? 32 : 20
const VSWTC = Sys.islinux() ? 7 : nothing
#
# Input flags - software input processing
#
"""Ignore BREAK condition"""
const IGNBRK = 0x00000001
"""Map BREAK to SIGINTR"""
const BRKINT = 0x00000002
"""Ignore (discard) parity errors"""
const IGNPAR = 0x00000004
"""Mark parity and framing errors"""
const PARMRK = 0x00000008
"""Enable checking of parity errors"""
const INPCK = 0x00000010
"""Strip 8th bit off chars"""
const ISTRIP = 0x00000020
"""Map NL into CR"""
const INLCR = 0x00000040
"""Ignore CR"""
const IGNCR = 0x00000080
"""Map CR to NL (ala CRMOD)"""
const ICRNL = 0x00000100
"""Enable output flow control"""
const IXON = Sys.islinux() ? 0x00000400 : 0x00000200
"""Enable input flow control"""
const IXOFF = Sys.islinux() ? 0x00001000 : 0x00000400
"""Any char will restart after stop"""
const IXANY = 0x00000800
"""Ring bell on input queue full"""
const IMAXBEL = Sys.islinux() ? nothing : 0x00002000
"""(macos) maintain state for UTF-8 VERASE"""
const IUTF8 = Sys.islinux() ? nothing : 0x00004000
"""(glibc) Translate upper case input to lower case."""
const IUCLC = Sys.islinux() ? (1 << 14) : nothing
#
# Output flags - software output processing
#
"""Enable following output processing """
const OPOST = 0x00000001
"""Map NL to CR-NL (ala CRMOD)"""
const ONLCR = Sys.islinux() ? 0x00000004 : 0x00000002
"""Expand tabs to spaces"""
const OXTABS = 0x00000004
"""Discard EOT's (^D) on output)"""
const ONOEOT = 0x00000008
"""Map CR to NL on output"""
const OCRNL = Sys.islinux() ? 0x00000008 : 0x00000010
"""No CR output at column 0"""
const ONOCR = Sys.islinux() ? 0x00000010 : 0x00000020
"""NL performs CR function"""
const ONLRET = Sys.islinux() ? 0x00000020 : 0x00000040
raw"""\n delay"""
const NLDLY = Sys.islinux() ? 0x00000100 : 0x00000300
"""Horizontal tab delay"""
const TABDLY = Sys.islinux() ? 0x00001800 : 0x00000c04
raw"""\r delay"""
const CRDLY = Sys.islinux() ? 0x00000600 : 0x00003000
"""Form feed delay"""
const FFDLY = Sys.islinux() ? 0x00008000 : 0x00004000
raw"""\b delay"""
const BSDLY = Sys.islinux() ? 0x00002000 : 0x00008000
"""Vertical tab delay"""
const VTDLY = Sys.islinux() ? 0x00004000 : 0x00010000
"""NL type 0."""
const NL0 = 0x00000000
"""NL type 1."""
const NL1 = 0x00000100
const NL2 = Sys.islinux() ? nothing : 0x00000200
const NL3 = Sys.islinux() ? nothing : 0x00000300
"""TAB delay type 0."""
const TAB0 = 0x00000000
"""TAB delay type 1."""
const TAB1 = Sys.islinux() ? 0x00000800 : 0x00000400
"""TAB delay type 2."""
const TAB2 = Sys.islinux() ? 0x00001000 : 0x00000800
"""Expand tabs to spaces."""
const TAB3 = Sys.islinux() ? 0x00001800 : 0x00000004
"""CR delay type 0."""
const CR0 = 0x00000000
"""CR delay type 1."""
const CR1 = Sys.islinux() ? 0x00000200 : 0x00001000
"""CR delay type 2."""
const CR2 = Sys.islinux() ? 0x00000400 : 0x00002000
"""CR delay type 3."""
const CR3 = Sys.islinux() ? 0x00000600 : 0x00003000
"""FF delay type 0."""
const FF0 = 0x00000000
"""FF delay type 1."""
const FF1 = Sys.islinux() ? 0x00008000 : 0x00004000
"""BS delay type 0."""
const BS0 = 0x00000000
"""BS delay type 1."""
const BS1 = Sys.islinux() ? 0x00002000 : 0x00008000
"""VT delay type 0."""
const VT0 = 0x00000000
"""VT delay type 1."""
const VT1 = Sys.islinux() ? 0x00004000 : 0x00010000
"""(glibc) Translate lower case output to upper case."""
const OLCUC = Sys.islinux() ? (1 << 17) : nothing
"""Use fill characters for delay"""
const OFILL = Sys.islinux() ? 0x00000040 : 0x00000080
"""Fill is DEL, else NUL"""
const OFDEL = Sys.islinux() ? nothing : 0x00020000
#
# Control flags - hardware control of terminal
#
"""Ignore control flags"""
const CIGNORE = Sys.islinux() ? nothing : 0x00000001
"""5 bits (pseudo)"""
const CS5 = 0x00000000
"""6 bits"""
const CS6 = Sys.islinux() ? 0x00000010 : 0x00000100
"""7 bits"""
const CS7 = Sys.islinux() ? 0x00000020 : 0x00000200
"""8 bits"""
const CS8 = CS6 | CS7
"""Character size mask"""
const CSIZE = CS5 | CS6 | CS7 | CS8
"""Send 2 stop bits"""
const CSTOPB = Sys.islinux() ? 0x00000040 : 0x00000400
"""Enable receiver"""
const CREAD = Sys.islinux() ? 0x00000080 : 0x00000800
"""Parity enable"""
const PARENB = Sys.islinux() ? 0x00000100 : 0x00001000
"""Odd parity, else even"""
const PARODD = Sys.islinux() ? 0x00000200 : 0x00002000
"""Hang up on last close"""
const HUPCL = Sys.islinux() ? 0x00000400 : 0x00004000
"""Ignore modem status lines"""
const CLOCAL = Sys.islinux() ? 0x00000800 : 0x00008000
"""CTS flow control of output"""
const CCTS_OFLOW = Sys.islinux() ? nothing : 0x00010000
"""RTS flow control of input"""
const CRTS_IFLOW = Sys.islinux() ? nothing : 0x00020000
"""DTR flow control of input"""
const CDTR_IFLOW = Sys.islinux() ? nothing : 0x00040000
"""DSR flow control of output"""
const CDSR_OFLOW = Sys.islinux() ? nothing : 0x00080000
"""DCD flow control of output"""
const CCAR_OFLOW = Sys.islinux() ? nothing : 0x00100000
"""Old name for CCAR_OFLOW"""
const MDMBUF = Sys.islinux() ? nothing : 0x00100000
const CRTSCTS = Sys.islinux() ? nothing : (CCTS_OFLOW | CRTS_IFLOW)
#
# "Local" flags - dumping ground for other state
#
# Warning: some flags in this structure begin with
# the letter "I" and look like they belong in the
# input flag.
#
"""Visual erase for line kill"""
const ECHOKE = Sys.islinux() ? nothing : 0x00000001
"""Visually erase chars"""
const ECHOE = Sys.islinux() ? 0x00000010 : 0x00000002
"""Echo NL after line kill"""
const ECHOK = Sys.islinux() ? 0x00000020 : 0x00000004
"""Enable echoing"""
const ECHO = 0x00000008
"""Echo NL even if ECHO is off"""
const ECHONL = Sys.islinux() ? 0x00000040 : 0x00000010
"""Visual erase mode for hardcopy"""
const ECHOPRT = Sys.islinux() ? nothing : 0x00000020
"""Echo control chars as ^(Char)"""
const ECHOCTL = Sys.islinux() ? nothing : 0x00000040
"""Enable signals INTR, QUIT, [D]SUSP"""
const ISIG = Sys.islinux() ? 0x00000001 : 0x00000080
"""Canonicalize input lines"""
const ICANON = Sys.islinux() ? 0x00000002 : 0x00000100
"""Use alternate WERASE algorithm"""
const ALTWERASE = Sys.islinux() ? nothing : 0x00000200
"""Enable DISCARD and LNEXT"""
const IEXTEN = Sys.islinux() ? 0x00008000 : 0x00000400
"""External processing"""
const EXTPROC = Sys.islinux() ? nothing : 0x00000800
"""Stop background jobs from output"""
const TOSTOP = Sys.islinux() ? 0x00000100 : 0x00400000
"""Output being flushed (state)"""
const FLUSHO = Sys.islinux() ? nothing : 0x00800000
"""No kernel output from VSTATUS"""
const NOKERNINFO = Sys.islinux() ? nothing : 0x02000000
"""XXX retype pending input (state)"""
const PENDIN = Sys.islinux() ? nothing : 0x20000000
"""Don't flush after interrupt"""
const NOFLSH = Sys.islinux() ? 0x00000080 : 0x80000000
#
# Commands passed to tcsetattr() for setting the termios structure.
#
"""Make change immediate"""
const TCSANOW = 0
"""Drain output, then change"""
const TCSADRAIN = 1
"""Drain output, flush input"""
const TCSAFLUSH = 2
"""Flag - don't alter h.w. state"""
const TCSASOFT = Sys.islinux() ? nothing : 0x10
#
# Standard speeds
#
const B0 = Sys.islinux() ? 0 : 0
const B50 = Sys.islinux() ? 1 : 50
const B75 = Sys.islinux() ? 2 : 75
const B110 = Sys.islinux() ? 3 : 110
const B134 = Sys.islinux() ? 4 : 134
const B150 = Sys.islinux() ? 5 : 150
const B200 = Sys.islinux() ? 6 : 200
const B300 = Sys.islinux() ? 7 : 300
const B600 = Sys.islinux() ? 8 : 600
const B1200 = Sys.islinux() ? 9 : 1200
const B1800 = Sys.islinux() ? 10 : 1800
const B2400 = Sys.islinux() ? 11 : 2400
const B4800 = Sys.islinux() ? 12 : 4800
const B7200 = Sys.islinux() ? 13 : 7200
const B19200 = Sys.islinux() ? 14 : 19200
const B38400 = Sys.islinux() ? 15 : 38400
const B9600 = 9600
const B14400 = 14400
const B28800 = 28800
const B57600 = 57600
const B76800 = 76800
const B115200 = 115200
const B230400 = 230400
const EXTA = 19200
const EXTB = 38400
const B460800 = 460800
const B500000 = 500000
const B576000 = 576000
const B921600 = 921600
const B1000000 = 1000000
const B1152000 = 1152000
const B1500000 = 1500000
const B2000000 = 2000000
const B2500000 = 2500000
const B3000000 = 3000000
const B3500000 = 3500000
const B4000000 = 4000000
#
# Values for the QUEUE_SELECTOR argument to `tcflush'.
#
"""Discard data received but not yet read."""
const TCIFLUSH = Sys.islinux() ? 0 : 1
"""Discard data written but not yet sent."""
const TCOFLUSH = Sys.islinux() ? 1 : 2
"""Discard all pending data."""
const TCIOFLUSH = Sys.islinux() ? 2 : 3
#
# Values for the ACTION argument to `tcflow'.
#
"""Suspend output."""
const TCOOFF = Sys.islinux() ? 16 : 1
"""Restart suspended output."""
const TCOON = Sys.islinux() ? 32 : 2
"""Send a STOP character."""
const TCIOFF = Sys.islinux() ? 4 : 3
"""Send a START character."""
const TCION = Sys.islinux() ? 8 : 4
########################################################################################################################
# The layout of a termios struct in C must be as follows
#
# ```c
# struct termios {
# tcflag_t c_iflag;
# tcflag_t c_oflag;
# tcflag_t c_cflag;
# tcflag_t c_lflag;
# cc_t c_line;
# cc_t c_cc[NCCS];
# speed_t c_ispeed;
# speed_t c_ospeed;
# };
# ```
#
# We need to create this struct in Julia and set the memory layout in Julia.
# the termios library requires passing in a termios struct that is used to get or set attributes
# There are two ways to create a `struct` in Julia that affect the memory layout.
# `struct termios end` and `mutable struct termios end`
# The first one is a immutable, and cannot be passed as a reference into a library.
# Additionally, because it is a immutable, Julia may make a copy of the struct instead of passing in the reference.
# The second one is what we want.
# Typical workflow in using the termios library involves using `tcgetattr` to initialize a termios struct, changing values appropriately
# and using `tcsetattr` to set the attributes.
# the termios struct has a field `c_cc[NCCS]`.
# This field is laid out sequentially in memory.
# In Julia, there are two ways to lay out memory sequentially. We can either
# 1) use a `NTuple{NCCS, UInt8}`
# 2) lay out the memory manually
# Tuples are immutable, which means if a user wants to change `termios.c_cc`, they would have to create a new tuple with the values needed.
# In both of these approaches, we can use `getproperty` to mimic the interface presented in C
mutable struct termios
"""Input flags"""
c_iflag::tcflag_t
"""Output flags"""
c_oflag::tcflag_t
"""Control flags"""
c_cflag::tcflag_t
"""Local flags"""
c_lflag::tcflag_t
@static if Sys.islinux()
c_line::cc_t
end
"""Control chars"""
_c_cc::NTuple{NCCS, UInt8}
"""Input speed"""
c_ispeed::speed_t
"""Output speed"""
c_ospeed::speed_t
end
function Base.getproperty(t::termios, name::Symbol)
if name == :c_cc
return _C_CC(t)
else
return getfield(t, name)
end
end
function Base.propertynames(t::termios, private = false)
return @static if Sys.islinux()
(
:c_iflag,
:c_oflag,
:c_cflag,
:c_lflag,
:c_cc,
:c_ispeed,
:c_ospeed,
)
else
(
:c_iflag,
:c_oflag,
:c_cflag,
:c_lflag,
:c_line,
:c_cc,
:c_ispeed,
:c_ospeed,
)
end
end
struct _C_CC
ref::termios
end
function Base.getindex(c_cc::_C_CC, index)
return collect(c_cc.ref._c_cc)[index + 1]
end
function Base.setindex!(c_cc::_C_CC, value, index)
_c_cc = collect(c_cc.ref._c_cc)
_c_cc[index + 1] = value
c_cc.ref._c_cc = NTuple{NCCS, UInt8}(_c_cc)
end
function Base.show(io::IO, ::MIME"text/plain", c_cc::_C_CC)
X = [getfield(c_cc.ref, Symbol("_c_cc$i")) for i in 1:NCCS]
summary(io, X)
isempty(X) && return
print(io, ":")
if !haskey(io, :compact) && length(axes(X, 2)) > 1
io = IOContext(io, :compact => true)
end
if get(io, :limit, false) && eltype(X) === Method
# override usual show method for Vector{Method}: don't abbreviate long lists
io = IOContext(io, :limit => false)
end
if get(io, :limit, false) && displaysize(io)[1]-4 <= 0
return print(io, " …")
else
println(io)
end
io = IOContext(io, :typeinfo => eltype(X))
Base.print_array(io, X)
end
function termios()
term = @static if Sys.islinux()
termios(
0,
0,
0,
0,
0,
NTuple{NCCS, UInt8}([0 for _ in 1:NCCS]),
0,
0,
)
else
termios(
0,
0,
0,
0,
NTuple{NCCS, UInt8}([0 for _ in 1:NCCS]),
0,
0,
)
end
return term
end
# helper function
_file_handle(s::Base.LibuvStream) = Base._fd(s)
"""
tcgetattr(fd::RawFD, term::termios)
tcgetattr(s::Base.LibuvStream, term::termios)
tcgetattr(f::Int, term::termios)
Get the tty attributes for file descriptor fd
"""
function tcgetattr(fd::RawFD, term::termios)
r = ccall(:tcgetattr, Cint, (Cint, Ptr{Cvoid}), fd, Ref(term))
r == -1 ? throw(TERMIOSError("tcgetattr failed: $(Base.Libc.strerror())")) : nothing
end
tcgetattr(s::Base.LibuvStream, term) = tcgetattr(_file_handle(s), term)
tcgetattr(f::Int, term) = tcgetattr(RawFD(f), term)
"""
tcsetattr(s::Base.LibuvStream, when::Integer, term::termios)
Set the tty attributes for file descriptor fd.
The when argument determines when the attributes are changed:
- `TERMIOS.TCSANOW` to change immediately
- `TERMIOS.TCSADRAIN` to change after transmitting all queued output
- `TERMIOS.TCSAFLUSH` to change after transmitting all queued output and discarding all queued input.
"""
function tcsetattr(fd::RawFD, when::Integer, term::termios)
r = ccall(:tcsetattr, Cint, (Cint, Cint, Ptr{Cvoid}), fd, when, Ref(term))
r == -1 ? throw(TERMIOSError("tcsetattr failed: $(Base.Libc.strerror())")) : nothing
end
tcsetattr(s::Base.LibuvStream, when, term) = tcsetattr(_file_handle(s), when, term)
tcsetattr(f::Int, when, term) = tcsetattr(RawFD(f), when, term)
"""
tcdrain(s::Base.LibuvStream)
Wait until all output written to file descriptor fd has been transmitted.
"""
function tcdrain(fd::RawFD)
r = ccall(:tcdrain, Cint, (Cint, ), fd)
r == -1 ? throw(TERMIOSError("tcdrain failed: $(Base.Libc.strerror())")) : nothing
end
tcdrain(s::Base.LibuvStream) = tcdrain(_file_handle(s))
tcdrain(f::Int) = tcdrain(RawFD(f))
"""
tcflow(s::Base.LibuvStream, action::Integer)
Suspend transmission or reception of data on the object referred to by fd, depending on the value of action:
- `TERMIOS.TCOOFF` to suspend output,
- `TERMIOS.TCOON` to restart output
- `TERMIOS.TCIOFF` to suspend input,
- `TERMIOS.TCION` to restart input.
"""
function tcflow(fd::RawFD, action::Integer)
r = ccall(:tcflush, Cint, (Cint, Cint), fd, action)
r == -1 ? throw(TERMIOSError("tcflow failed: $(Base.Libc.strerror())")) : nothing
end
tcflow(s::Base.LibuvStream, action) = tcflow(_file_handle(s), action)
tcflow(fd::Int, action) = tcflow(RawFD(fd), action)
"""
tcflush(s::Base.LibuvStream, queue::Integer)
Discard data written to the object referred to by fd but not transmitted, or data received but not read, depending on the value of queue_selector:
- `TERMIOS.TCIFLUSH` flushes data received but not read.
- `TERMIOS.TCOFLUSH` flushes data written but not transmitted.
- `TERMIOS.TCIOFLUSH` flushes both data received but not read, and data written but not transmitted.
"""
function tcflush(fd::RawFD, queue::Integer)
r = ccall(:tcflush, Cint, (Cint, Cint), fd, queue)
r == -1 ? throw(TERMIOSError("tcflush failed: $(Base.Libc.strerror())")) : nothing
end
tcflush(s::Base.LibuvStream, queue) = tcflush(_file_handle(s), queue)
tcflush(fd::Int, queue) = tcflush(RawFD(fd), queue)
"""
tcsendbreak(s::Base.LibuvStream, duration::Integer)
Transmit a continuous stream of zero-valued bits for a specific duration, if the terminal is using asynchronous serial data transmission. If duration is zero, it transmits zero-valued bits for at least 0.25 seconds, and not more that 0.5 seconds. If duration is not zero, it sends zero-valued bits for some implementation-defined length of time.
If the terminal is not using asynchronous serial data transmission, tcsendbreak() returns without taking any action.
"""
function tcsendbreak(fd::RawFD, duration::Integer)
r = ccall(:tcsendbreak, Cint, (Cint, Cint), fd, duration)
r == -1 ? throw(TERMIOSError("tcsendbreak failed: $(Base.Libc.strerror())")) : nothing
end
tcsendbreak(s::Base.LibuvStream, duration) = tcsendbreak(_file_handle(s), duration)
tcsendbreak(f::Int, duration) = tcsendbreak(RawFD(f), duration)
"""
cfmakeraw(term::termios)
Set the terminal to something like the "raw" mode of the old Version 7 terminal driver: input is available character by character, echoing is disabled, and all special processing of terminal input and output characters is disabled. The terminal attributes are set as follows:
term.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP
| INLCR | IGNCR | ICRNL | IXON);
term.c_oflag &= ~OPOST;
term.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
term.c_cflag &= ~(CSIZE | PARENB);
term.c_cflag |= CS8;
"""
function cfmakeraw(term::termios)
r = ccall(:cfmakeraw, Cint, (Ref{termios},), Ref(term))
r == -1 ? throw(TERMIOSError("cfmakeraw failed: $(Base.Libc.strerror())")) : nothing
end
"""
cfsetspeed(term::termios, speed::Int)
is a 4.4BSD extension. It takes the same arguments as cfsetispeed(), and sets both input and output speed.
"""
function cfsetspeed(term::termios, speed::Integer)
r = ccall(:cfsetspeed, Cint, (Ref{termios}, speed_t), Ref(term), speed)
r == -1 ? throw(TERMIOSError("cfsetspeed failed: $(Base.Libc.strerror())")) : nothing
end
"""
cfgetispeed(term::termios) -> Int
Returns the input baud rate stored in the termios structure.
"""
cfgetispeed(term::termios) = ccall(:cfgetispeed, speed_t, (Ptr{termios}, ), Ref(term))
"""
cfgetospeed(term::termios) -> Int
Returns the output baud rate stored in the termios structure.
"""
cfgetospeed(term::termios) = ccall(:cfgetospeed, speed_t, (Ptr{termios}, ), Ref(term))
"""
cfsetispeed(term::termios, speed::Integer)
sets the input baud rate stored in the termios structure to speed, which must be one of these constants:
- `TERMIOS.B0`
- `TERMIOS.B50`
- `TERMIOS.B75`
- `TERMIOS.B110`
- `TERMIOS.B134`
- `TERMIOS.B150`
- `TERMIOS.B200`
- `TERMIOS.B300`
- `TERMIOS.B600`
- `TERMIOS.B1200`
- `TERMIOS.B1800`
- `TERMIOS.B2400`
- `TERMIOS.B4800`
- `TERMIOS.B9600`
- `TERMIOS.B19200`
- `TERMIOS.B38400`
- `TERMIOS.B57600`
- `TERMIOS.B115200`
- `TERMIOS.B230400`
The zero baud rate, B0, is used to terminate the connection. If B0 is specified, the modem control lines shall no longer be asserted. Normally, this will disconnect the line. CBAUDEX is a mask for the speeds beyond those defined in POSIX.1 (57600 and above). Thus, B57600 & CBAUDEX is nonzero.
"""
function cfsetispeed(term::termios, speed::Integer)
r = ccall(:cfsetispeed, Cint, (Ref{termios}, speed_t), Ref(term), speed)
r == -1 ? throw(TERMIOSError("cfsetispeed failed: $(Base.Libc.strerror())")) : nothing
end
"""
cfsetospeed(term::termios, speed::Integer)
sets the output baud rate stored in the termios structure to speed, which must be one of these constants:
- `TERMIOS.B0`
- `TERMIOS.B50`
- `TERMIOS.B75`
- `TERMIOS.B110`
- `TERMIOS.B134`
- `TERMIOS.B150`
- `TERMIOS.B200`
- `TERMIOS.B300`
- `TERMIOS.B600`
- `TERMIOS.B1200`
- `TERMIOS.B1800`
- `TERMIOS.B2400`
- `TERMIOS.B4800`
- `TERMIOS.B9600`
- `TERMIOS.B19200`
- `TERMIOS.B38400`
- `TERMIOS.B57600`
- `TERMIOS.B115200`
- `TERMIOS.B230400`
The zero baud rate, B0, is used to terminate the connection. If B0 is specified, the modem control lines shall no longer be asserted. Normally, this will disconnect the line. CBAUDEX is a mask for the speeds beyond those defined in POSIX.1 (57600 and above). Thus, B57600 & CBAUDEX is nonzero.
"""
function cfsetospeed(term::termios, speed::Integer)
r = ccall(:cfsetospeed, Cint, (Ref{termios}, speed_t), Ref(term), speed)
r == -1 ? throw(TERMIOSError("cfsetospeed failed: $(Base.Libc.strerror())")) : nothing
end
end # module
| TERMIOS | https://github.com/kdheepak/TERMIOS.jl.git |
|
[
"MIT"
] | 0.3.0 | 658f5aff8f64b475c4b3c3c8da6ab457dd96812b | code | 2373 | using TERMIOS
using Test
const c_iflag = Sys.islinux() ? 0x00000500 : 0x0000000000006b02
const c_oflag = Sys.islinux() ? 0x00000005 : 0x0000000000000003
const c_cflag = Sys.islinux() ? 0x000000bf : 0x0000000000004b00
const c_lflag = Sys.islinux() ? 0x00008a3b : 0x00000000000005cf
const c_cc = Sys.islinux() ? (0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00, 0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) : (0x04, 0xff, 0xff, 0x7f, 0x17, 0x15, 0x12, 0x00, 0x03, 0x1c, 0x1a, 0x19, 0x11, 0x13, 0x16, 0x0f, 0x01, 0x00, 0x14, 0x00)
const c_ispeed = Sys.islinux() ? 0x0000000f : 0x0000000000009600
const c_ospeed = Sys.islinux() ? 0x0000000f : 0x0000000000009600
@testset "All" begin
@testset "termios.jl stdout" begin
term = TERMIOS.termios()
TERMIOS.tcgetattr(stdout, term)
@test term.c_iflag == c_iflag
@test term.c_oflag == c_oflag
@test term.c_cflag == c_cflag
@test term.c_lflag == c_lflag
@test term.c_cc.ref._c_cc == c_cc
@test term.c_ispeed == c_ispeed
@test term.c_ospeed == c_ospeed
term = TERMIOS.termios()
TERMIOS.tcgetattr(0, term)
@test term.c_iflag == c_iflag
@test term.c_oflag == c_oflag
@test term.c_cflag == c_cflag
@test term.c_lflag == c_lflag
@test term.c_cc.ref._c_cc == c_cc
@test term.c_ispeed == c_ispeed
@test term.c_ospeed == c_ospeed
term = TERMIOS.termios()
TERMIOS.tcgetattr(0, term)
@test TERMIOS.cfgetispeed(term) == term.c_ispeed
@test TERMIOS.cfgetospeed(term) == term.c_ospeed
TERMIOS.cfsetispeed(term, term.c_ispeed)
@test TERMIOS.cfgetispeed(term) == term.c_ispeed
TERMIOS.cfsetospeed(term, term.c_ospeed)
@test TERMIOS.cfgetospeed(term) == term.c_ospeed
term = TERMIOS.termios()
TERMIOS.tcgetattr(0, term)
TERMIOS.tcsetattr(0, TERMIOS.TCSANOW, term)
@test TERMIOS.cfgetispeed(term) == term.c_ispeed
@test TERMIOS.cfgetospeed(term) == term.c_ospeed
end
@testset "termios.jl stdin" begin
term = TERMIOS.termios()
TERMIOS.tcgetattr(stdin, term)
@test term.c_iflag == c_iflag
@test term.c_oflag == c_oflag
@test term.c_cflag == c_cflag
@test term.c_lflag == c_lflag
@test term.c_cc.ref._c_cc == c_cc
@test term.c_ispeed == c_ispeed
@test term.c_ospeed == c_ospeed
end
end
| TERMIOS | https://github.com/kdheepak/TERMIOS.jl.git |
|
[
"MIT"
] | 0.3.0 | 658f5aff8f64b475c4b3c3c8da6ab457dd96812b | docs | 1347 | # TERMIOS
[](https://kdheepak.github.io/TERMIOS.jl/stable)
[](https://kdheepak.github.io/TERMIOS.jl/dev)
[](https://travis-ci.com/kdheepak/TERMIOS.jl)
[](https://codecov.io/gh/kdheepak/TERMIOS.jl)
[](https://coveralls.io/github/kdheepak/TERMIOS.jl?branch=master)
This package provides an interface to the POSIX calls for tty I/O control.
> The termios functions describe a general terminal interface that is
provided to control asynchronous communications ports.
For a complete description of these calls, see the [termios(3)](http://man7.org/linux/man-pages/man3/termios.3.html) manual page.
All functions in this package take a `RawFD` file descriptor `fd` as their first argument.
This can also be an integer file descriptor or a concrete instance of `Base.LibuvStream`, such as `stdin` or `stdout`.
This package defines the constants needed to work with the functions provided.
You may need to refer to your system documentation for more information on using this package.
| TERMIOS | https://github.com/kdheepak/TERMIOS.jl.git |
|
[
"MIT"
] | 0.3.0 | 658f5aff8f64b475c4b3c3c8da6ab457dd96812b | docs | 761 | # Usage
### Example
```julia
using TERMIOS
const T = TERMIOS
options = T.termios()
T.tcgetattr(stdin, options)
# Disable ctrl-c, disable CR translation, disable stripping 8th bit (unicode), disable parity
options.c_iflag &= ~(T.BRKINT | T.ICRNL | T.INPCK | T.ISTRIP | T.IXON)
# Disable output processing
options.c_oflag &= ~(T.OPOST)
# Disable parity
options.c_cflag &= ~(T.CSIZE | T.PARENB)
# Set character size to 8 bits (unicode)
options.c_cflag |= (T.CS8)
# Disable echo, disable canonical mode (line mode), disable input processing, disable signals
options.c_lflag &= ~(T.ECHO | T.ICANON | T.IEXTEN | T.ISIG)
options.c_cc[T.VMIN] = 0
options.c_cc[T.VTIME] = 1
T.tcsetattr(stdin, T.TCSANOW, options)
```
### API
```@autodocs
Modules = [TERMIOS]
```
| TERMIOS | https://github.com/kdheepak/TERMIOS.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 5447 |
using CorticalSurfaces
using CorticalParcels
using CIFTI
using JLD
using Pkg.Artifacts
# first we need to set up a surface to use (refer to CorticalSurfaces.jl for details);
data_dir = artifact"CIFTI_test_files"
surf_file = joinpath(data_dir, "MSC01.jld")
temp = load(surf_file)
hems = Dict()
for hem in LR
coords = temp["pointsets"]["midthickness"][hem]
mw = temp["medial wall"][hem]
triangles = temp["triangle"][hem] # required for adjacency calculations below
hems[hem] = Hemisphere(hem, coords, mw; triangles = triangles)
make_adjacency_list!(hems[hem])
make_adjacency_matrix!(hems[hem])
end
c = CorticalSurface(hems[L], hems[R])
# note that the two adjacency items created above, :A and :neighbors, are required
# for many parcelwise ops (particularly ones that require graph traversal such
# as erode!() and dilate!()), though you could still do some things without them
# for how we'll just deal with one hemisphere, the left one:
hem = c[L]
# given the vertex space defined in the left Hemisphere struct above,
# now make a "parcel" within that space consisting of just a single vertex, 17344
p1 = Parcel(hem, [17344])
# make another parcel at vertex 8423 (which happens to be 30mm from the previous one)
p2 = Parcel(hem, [8423])
# grow the first parcel a couple of times, and check the size afterwards ...
dilate!(p1) # 6 vertices are added, so size is now 7
@assert size(p1) == 7
dilate!(p1) # 12 vertices are added, so size is now 19
@assert size(p1) == 19
# make a copy of p1 and do some various resizing
p1′ = Parcel(p1)
dilate!(p1′)
erode!(p1′)
@assert isequal(p1, p1′)
# resize to an arbitrary size, say 500 vertices, by repeated dilation:
resize!(p1′, 500)
# or shrink (erode) it to 100 vertices:
resize!(p1′, 100)
# dilate it once more, but don't add more than 10 new vertices:
dilate!(p1′; limit = 10)
@assert size(p1′) <= 110
# if you want to see which vertices belong to a parcel:
vertices(p1′)
# remove all vertices from the parcel
clear!(p1′)
# grow p2 iteratively until there's only a small margin or interstice
# separating it from p1:
while sum(interstices(p1, p2)) == 0
n_new_verts = dilate!(p2)
println("Added $n_new_verts vertices to p2 (total size: $(size(p2)))")
end
# they still don't quite overlap yet ...
@assert overlap(p1, p2) == 0
@assert complement(p1, p2) == size(p1)
@assert complement(p2, p1) == size(p2)
# but there's only a thin margin or interstice, 3 vertices long, between them:
margin_vertices = findall(interstices(p1, p2))
@assert length(margin_vertices) == 3
# now make an empty parcellation within the space of our left Hemisphere struct,
# using keys (parcel IDs) of type Int:
px = HemisphericParcellation{Int}(hem)
# give it *copies* of the two parcels we were working with above
px[1] = Parcel(p1)
px[2] = Parcel(p2)
@assert size(px) == 2 # two parcels
# now combine parcels 1 and 2 from px; parcel 2 will be incorporated into
# parcel 1, along with the 3 interstitial vertices in between, and then deleted
merge!(px, 1, 2)
@assert size(px) == 1 # just one parcel remains now
@assert size(px[1]) == size(p1) + size(p2) + length(margin_vertices)
# now reverse those operations and go back to the way it was a minute ago
setdiff!(px[1], p2)
setdiff!(px[1], margin_vertices)
@assert isequal(px[1], p1)
# add a copy of parcel #2 back in
px[2] = Parcel(p2)
# add just one vertex from the interstices so that the two parcels become connected
append!(px[1], margin_vertices[1])
# make a new parcel p3 just for demo purposes
p3 = Parcel(px[1])
union!(p3, p2) # combine p3 and p2
@assert size(p3) == 1 + size(p1) + size(p2)
# now p3 has all the vertices from p1, all the vertices from p2,
# plus one vertex linking those two regions; we can cut that vertex
# (an articulation point or cut vertex in graph theory terms) and then
# get back the two resulting connected components, i.e. recover the
# two original parcels p1 and p2 though not necessarily in the same order:
orig_parcels = cut(p3)
@assert isequal(orig_parcels[1], p2)
@assert overlap(orig_parcels[2], p1) == size(p1) - 1
# load in a real parcellation form a CIFTI file:
parcel_file = joinpath(data_dir, "test_parcels.dtseries.nii")
cifti_data = CIFTI.load(parcel_file)
px = BilateralParcellation{Int}(c, cifti_data)
# as long as there are no overlapping parcels, you can use vec() on an
# AbstractParcellation{T} to recover a simple Vector{T} that matches the original vector
# from which it was constructed (except for the fact that the parcellation will
# include medial wall vertices; so for comparison we pad the original cifti data
# to account for that):
@assert vec(px) == pad(vec(cifti_data[LR]), c)
# A BilateralParcellation is composed of a left and a right HemisphericParcellation;
# you can access them like px[L] and px[R]. Every time you show px, it will display
# properties of a few random parcels
px[L]
px[L]
px[L]
# some miscellaneous functions:
keys(px[L]) # get the parcel IDs (of type T) from HemisphericParcellation{T} px
vec(px[L]) # turn a HemisphericParcellation{T} into a Vector{T}
unassigned(px[L]) # get a BitVector representing the unassigned vertices in px
union(px[L]) # collapse all Parcels within px to a single BitVector
nnz(px[L]) # the number of vertices in px that have parcel membership
length(px[L]) # the length of px's vector space representation
density(px[L]) # proportion of assigned verices: nnz(px) / length(px)
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 992 |
module CorticalParcels
using CIFTI
using CorticalSurfaces
using Chain
using NearestNeighbors
using SparseArrays
using StatsBase: sample
using ThreadsX
# import some type-aliasing constants for convenience
import CorticalSurfaces: AdjacencyList, AdjacencyMatrix, DistanceMatrix
include("types.jl")
export Parcel, AbstractParcellation, HemisphericParcellation, BilateralParcellation
include("constructors.jl")
include("accessors.jl")
export vertices, size, length, keys, haskey, values, getindex
export vec, union, unassigned, nnz, density
include("set_ops.jl")
export intersect, union, setdiff, intersect!, union!, setdiff!
export overlap, complement
include("morphology.jl")
export dilate!, erode!, close!, resize!
export dilate, erode, interstices, borders
include("editing.jl")
export setindex!, cut, split, clear!, delete!, append!, merge!, deepcopy
include("distances.jl")
export DistanceMethod, CentroidToCentroid, ClosestVertices, centroid, distance
include("show.jl")
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 4553 |
import CorticalSurfaces: vertices
# ===== Parcel functions =====
"""
membership(p)
Get a `BitVector` denoting vertexwise parcel membership
"""
membership(p::Parcel) = p.membership
"""
vertices(p)
Get the vertex indices belonging to a `Parcel`. Indices will be numbered
inclusive of medial wall by default.
"""
vertices(p::Parcel) = findall(p.membership)
"""
size(p)
Get the size (number of non-zero vertices) of a `Parcel`".
"""
Base.size(p::Parcel) = sum(p.membership)
"""
length(p)
Get the length of the representational space in which a `Parcel` is located.
"""
Base.length(p::Parcel) = length(p.membership)
"""
density(p)
Get the proportion of member vertices in a `Parcel` relative to the length of its space.
"""
density(p::Parcel) = size(p) / length(p)
Base.getindex(p::Parcel, args...) = getindex(p.membership, args...)
Base.isequal(p1::Parcel, p2::Parcel) =
p1.surface == p2.surface && p1.membership == p2.membership
Base.isequal(p1::Parcel, x::BitVector) =
p1.membership == p2.membership
Base.:(==)(p1::Parcel, p2::Parcel) = isequal(p1, p2)
Base.:(==)(p1::Parcel, x::BitVector) = isequal(p1, p2)
CorticalSurfaces.brainstructure(p::Parcel) = brainstructure(p.surface)
# ===== Parcellation functions =====
"""
size(px)
Get the number of Parcels comprising a Parcellation.
"""
Base.size(px::HemisphericParcellation) = length(px.parcels)
Base.size(px::BilateralParcellation) = size(px[L]) + size(px[R])
"""
length(px)
Get the number of vertices comprising the representation space of a `Parcellation`.
"""
Base.length(px::AbstractParcellation) = size(px.surface)
"""
keys(px)
Get the IDs of all `Parcel`s within a `Parcellation`.
"""
Base.keys(px::HemisphericParcellation) = keys(px.parcels)
Base.keys(px::BilateralParcellation) = union(keys(px[L]), keys(px[R]))
"""
haskey(px, k)
Check whether `Parcellation{T} px` contains a parcel with key value `k`.
"""
Base.haskey(px::HemisphericParcellation{T}, k::T) where T = haskey(px.parcels, k)
Base.haskey(px::BilateralParcellation{T}, k::T) where T =
haskey(px[L], k) || haskey(px[R], k)
"""
values(px)
Access the `Parcel`s in a `Parcellation`.
"""
Base.values(px::HemisphericParcellation) = values(px.parcels)
"""
getindex(px, k)
Access a single Parcel within a Parcellation via its key of type `T`".
"""
Base.getindex(px::HemisphericParcellation{T}, k::T) where T = px.parcels[k]
Base.getindex(px::BilateralParcellation{T}, k::T) where T =
haskey(px[L], k) ? px[L][k] : px[R][k]
Base.getindex(px::BilateralParcellation, b::BrainStructure) = px.parcels[b]
"""
vec(px)
Convert a `HemisphericParcellation` from its internal `Dict`-based representation into
a `Vector{T}`. `T` must have a `zeros(T, ...)` method. Warning: this is not a sensible
representation in the event that any `Parcel`s overlap.
"""
function Base.vec(px::HemisphericParcellation{T}) where T <: Real
out = zeros(T, length(px))
for k in keys(px)
@inbounds out[vertices(px[k])] .= k
end
return out
end
"""
vec(px)
Convert a `BilateralParcellation` from its internal `Dict`-based representation into
a `Vector{T}`. `T` must have a `zeros(T, ...)` method. Warning: this is not a sensible
representation in the event that any `Parcel`s overlap.
"""
function Base.vec(px::BilateralParcellation{T}) where T <: Real
return vcat(vec(px[L]), vec(px[R]))
end
function Base.union(px::HemisphericParcellation)
out = falses(length(px))
for k in keys(px)
out .|= px[k].membership
end
return out
end
"""
unassigned(px)
Get a `BitVector` identifying unassigned vertices (`1`) in a parcellation.
"""
unassigned(px::HemisphericParcellation) = .!union(px)
unassigned(px::BilateralParcellation) = vcat(unassigned(px[L]), unassigned(px[R]))
"""
nnz(px)
Get the number of vertices within a parcellation that are assigned
to at least one `Parcel`.
"""
nnz(px::HemisphericParcellation) = sum(union(px))
nnz(px::BilateralParcellation) = nnz(px[L]) + nnz(px[R])
"""
density(px)
Get the proportion of assigned parcel vertices of a parcellation
relative to the total number of vertices in its surface representation.
"""
density(px::AbstractParcellation) = nnz(px) / length(px)
function Base.:(==)(px1::HemisphericParcellation, px2::HemisphericParcellation)
px1.surface == px2.surface || return false
all([haskey(px2, k) && px1[k] == px2[k] for k in keys(px1)]) || return false
return true
end
function Base.:(==)(px1::BilateralParcellation, px2::BilateralParcellation)
return px1[L] == px2[L] && px1[R] == px2[R]
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
|
[
"MIT"
] | 0.9.0 | 1a6437e64eda050221e0886b7c37e9f3535028bf | code | 802 |
function Base.getindex(
c::CiftiStruct{E, CIFTI.BRAIN_MODELS(), C}, p::Parcel
) where {E, C}
hem = brainstructure(p.surface)
n_out = length(dts.brainstructure[hem])
nverts_excl = size(p.surface, Exclusive())
nverts_excl == n_out || error("Provided parcel doesn't match CIFTI's vertex space")
verts = c.brainstructure[hem][collapse(vertices(p), p.surface)]
return c[verts, :]
end
function Base.getindex(
c::CiftiStruct{E, CIFTI.BRAIN_MODELS(), CIFTI.BRAIN_MODELS()}, p::Parcel
) where E
hem = brainstructure(p.surface)
n_out = length(dts.brainstructure[hem])
nverts_excl = size(p.surface, Exclusive())
nverts_excl == n_out || error("Provided parcel doesn't match CIFTI's vertex space")
verts = c.brainstructure[hem][collapse(vertices(p), p.surface)]
return c[verts, verts]
end
| CorticalParcels | https://github.com/myersm0/CorticalParcels.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.