licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.20 | 131cd2b2ae892b6949a0b9aa375c689802e99c9d | docs | 14265 | ```@meta
DocTestSetup = quote
using PoreMatMod
parent = Crystal("IRMOF-1.cif")
infer_bonds!(parent, true)
query = moiety("p-phenylene.xyz")
end
```
# Subgraph matching (substructure searches)
`PoreMatMod.jl` conducts subgraph matching, i.e. searches for subgraphs of a `parent` graph isomorphic to a `query` graph, using [Ullmann's algorithm for subgraph isomorphisms](https://doi.org/10.1145/321921.321925).
For subgraph matching, both the `parent` crystal structure and `query` fragment are represented by node-labeled (by the chemical species) graphs (nodes = atoms, edges = bonds). For crystals, bonds across the unit cell boundaries of periodic materials are accounted for, allowing us to find subgraph isomorphisms when the fragment is split across a unit cell boundary.
# Substructure Searches: how to
To learn by example, suppose we wish to search the IRMOF-1 crystal structure for *p*-phenylene fragments.

First, we load the `query` fragment and `parent` crystal structure:
```julia
parent = Crystal("IRMOF-1.cif")
infer_bonds!(parent_xtal, true) # true to infer bonds across the periodic boundary
query = moiety("p-phenylene.xyz")
```
then execute a search for subgraphs of the `parent` that "match" (are isomorphic to) the graph of the `query` fragment:
```jldoctest find
search = substructure_search(query, parent)
# output
p-phenylene.xyz ∈ IRMOF-1.cif
96 hits in 24 locations.
```
!!! note "Syntactic sugar for substructure search"
The `∈` (`\in` then hit `Tab` for this Unicode character) or `in` infix operators will also execute the search:
```jldoctest find; output=false
search = query ∈ parent
# or
search = query in parent
# or
search = substructure_search(query, parent)
# output
p-phenylene.xyz ∈ IRMOF-1.cif
96 hits in 24 locations.
```
Both functions `substructure_search` and `∈` return a `Search` object with attributes:
- `search.query`: the query in the search
- `search.parent`: the parent in the search
- `search.isomorphisms`: the result of a search---a nested vector giving the query-to-parent correpondence dictionaries.
```jldoctest find
search.isomorphisms
# output
24-element Vector{Vector{Dict{Int64, Int64}}}:
[Dict(5 => 185, 4 => 245, 6 => 197, 7 => 414, 2 => 306, 10 => 341, 9 => 402, 8 => 329, 3 => 318, 1 => 233…), Dict(5 => 197, 4 => 233, 6 => 185, 7 => 402, 2 => 318, 10 => 329, 9 => 414, 8 => 341, 3 => 306, 1 => 245…), Dict(5 => 185, 4 => 318, 6 => 197, 7 => 341, 2 => 233, 10 => 414, 9 => 329, 8 => 402, 3 => 245, 1 => 306…), Dict(5 => 197, 4 => 306, 6 => 185, 7 => 329, 2 => 245, 10 => 402, 9 => 341, 8 => 414, 3 => 233, 1 => 318…)]
[Dict(5 => 186, 4 => 246, 6 => 198, 7 => 413, 2 => 305, 10 => 342, 9 => 401, 8 => 330, 3 => 317, 1 => 234…), Dict(5 => 198, 4 => 234, 6 => 186, 7 => 401, 2 => 317, 10 => 330, 9 => 413, 8 => 342, 3 => 305, 1 => 246…), Dict(5 => 186, 4 => 317, 6 => 198, 7 => 342, 2 => 234, 10 => 413, 9 => 330, 8 => 401, 3 => 246, 1 => 305…), Dict(5 => 198, 4 => 305, 6 => 186, 7 => 330, 2 => 246, 10 => 401, 9 => 342, 8 => 413, 3 => 234, 1 => 317…)]
[Dict(5 => 187, 4 => 248, 6 => 200, 7 => 415, 2 => 308, 10 => 344, 9 => 404, 8 => 331, 3 => 319, 1 => 235…), Dict(5 => 200, 4 => 235, 6 => 187, 7 => 404, 2 => 319, 10 => 331, 9 => 415, 8 => 344, 3 => 308, 1 => 248…), Dict(5 => 187, 4 => 319, 6 => 200, 7 => 344, 2 => 235, 10 => 415, 9 => 331, 8 => 404, 3 => 248, 1 => 308…), Dict(5 => 200, 4 => 308, 6 => 187, 7 => 331, 2 => 248, 10 => 404, 9 => 344, 8 => 415, 3 => 235, 1 => 319…)]
[Dict(5 => 188, 4 => 247, 6 => 199, 7 => 416, 2 => 307, 10 => 343, 9 => 403, 8 => 332, 3 => 320, 1 => 236…), Dict(5 => 199, 4 => 236, 6 => 188, 7 => 403, 2 => 320, 10 => 332, 9 => 416, 8 => 343, 3 => 307, 1 => 247…), Dict(5 => 188, 4 => 320, 6 => 199, 7 => 343, 2 => 236, 10 => 416, 9 => 332, 8 => 403, 3 => 247, 1 => 307…), Dict(5 => 199, 4 => 307, 6 => 188, 7 => 332, 2 => 247, 10 => 403, 9 => 343, 8 => 416, 3 => 236, 1 => 320…)]
[Dict(5 => 189, 4 => 255, 6 => 207, 7 => 376, 2 => 262, 10 => 351, 9 => 358, 8 => 333, 3 => 280, 1 => 237…), Dict(5 => 207, 4 => 237, 6 => 189, 7 => 358, 2 => 280, 10 => 333, 9 => 376, 8 => 351, 3 => 262, 1 => 255…), Dict(5 => 189, 4 => 280, 6 => 207, 7 => 351, 2 => 237, 10 => 376, 9 => 333, 8 => 358, 3 => 255, 1 => 262…), Dict(5 => 207, 4 => 262, 6 => 189, 7 => 333, 2 => 255, 10 => 358, 9 => 351, 8 => 376, 3 => 237, 1 => 280…)]
[Dict(5 => 190, 4 => 256, 6 => 208, 7 => 375, 2 => 261, 10 => 352, 9 => 357, 8 => 334, 3 => 279, 1 => 238…), Dict(5 => 208, 4 => 238, 6 => 190, 7 => 357, 2 => 279, 10 => 334, 9 => 375, 8 => 352, 3 => 261, 1 => 256…), Dict(5 => 190, 4 => 279, 6 => 208, 7 => 352, 2 => 238, 10 => 375, 9 => 334, 8 => 357, 3 => 256, 1 => 261…), Dict(5 => 208, 4 => 261, 6 => 190, 7 => 334, 2 => 256, 10 => 357, 9 => 352, 8 => 375, 3 => 238, 1 => 279…)]
[Dict(5 => 191, 4 => 254, 6 => 206, 7 => 373, 2 => 264, 10 => 350, 9 => 360, 8 => 335, 3 => 277, 1 => 239…), Dict(5 => 206, 4 => 239, 6 => 191, 7 => 360, 2 => 277, 10 => 335, 9 => 373, 8 => 350, 3 => 264, 1 => 254…), Dict(5 => 191, 4 => 277, 6 => 206, 7 => 350, 2 => 239, 10 => 373, 9 => 335, 8 => 360, 3 => 254, 1 => 264…), Dict(5 => 206, 4 => 264, 6 => 191, 7 => 335, 2 => 254, 10 => 360, 9 => 350, 8 => 373, 3 => 239, 1 => 277…)]
[Dict(5 => 192, 4 => 253, 6 => 205, 7 => 374, 2 => 263, 10 => 349, 9 => 359, 8 => 336, 3 => 278, 1 => 240…), Dict(5 => 205, 4 => 240, 6 => 192, 7 => 359, 2 => 278, 10 => 336, 9 => 374, 8 => 349, 3 => 263, 1 => 253…), Dict(5 => 192, 4 => 278, 6 => 205, 7 => 349, 2 => 240, 10 => 374, 9 => 336, 8 => 359, 3 => 253, 1 => 263…), Dict(5 => 205, 4 => 263, 6 => 192, 7 => 336, 2 => 253, 10 => 359, 9 => 349, 8 => 374, 3 => 240, 1 => 278…)]
[Dict(5 => 193, 4 => 252, 6 => 204, 7 => 395, 2 => 290, 10 => 348, 9 => 386, 8 => 337, 3 => 299, 1 => 241…), Dict(5 => 204, 4 => 241, 6 => 193, 7 => 386, 2 => 299, 10 => 337, 9 => 395, 8 => 348, 3 => 290, 1 => 252…), Dict(5 => 193, 4 => 299, 6 => 204, 7 => 348, 2 => 241, 10 => 395, 9 => 337, 8 => 386, 3 => 252, 1 => 290…), Dict(5 => 204, 4 => 290, 6 => 193, 7 => 337, 2 => 252, 10 => 386, 9 => 348, 8 => 395, 3 => 241, 1 => 299…)]
[Dict(5 => 194, 4 => 251, 6 => 203, 7 => 396, 2 => 289, 10 => 347, 9 => 385, 8 => 338, 3 => 300, 1 => 242…), Dict(5 => 203, 4 => 242, 6 => 194, 7 => 385, 2 => 300, 10 => 338, 9 => 396, 8 => 347, 3 => 289, 1 => 251…), Dict(5 => 194, 4 => 300, 6 => 203, 7 => 347, 2 => 242, 10 => 396, 9 => 338, 8 => 385, 3 => 251, 1 => 289…), Dict(5 => 203, 4 => 289, 6 => 194, 7 => 338, 2 => 251, 10 => 385, 9 => 347, 8 => 396, 3 => 242, 1 => 300…)]
⋮
[Dict(5 => 212, 4 => 271, 6 => 219, 7 => 392, 2 => 283, 10 => 367, 9 => 379, 8 => 356, 3 => 296, 1 => 260…), Dict(5 => 219, 4 => 260, 6 => 212, 7 => 379, 2 => 296, 10 => 356, 9 => 392, 8 => 367, 3 => 283, 1 => 271…), Dict(5 => 212, 4 => 296, 6 => 219, 7 => 367, 2 => 260, 10 => 392, 9 => 356, 8 => 379, 3 => 271, 1 => 283…), Dict(5 => 219, 4 => 283, 6 => 212, 7 => 356, 2 => 271, 10 => 379, 9 => 367, 8 => 392, 3 => 260, 1 => 296…)]
[Dict(5 => 213, 4 => 276, 6 => 224, 7 => 419, 2 => 314, 10 => 372, 9 => 410, 8 => 361, 3 => 323, 1 => 265…), Dict(5 => 224, 4 => 265, 6 => 213, 7 => 410, 2 => 323, 10 => 361, 9 => 419, 8 => 372, 3 => 314, 1 => 276…), Dict(5 => 213, 4 => 323, 6 => 224, 7 => 372, 2 => 265, 10 => 419, 9 => 361, 8 => 410, 3 => 276, 1 => 314…), Dict(5 => 224, 4 => 314, 6 => 213, 7 => 361, 2 => 276, 10 => 410, 9 => 372, 8 => 419, 3 => 265, 1 => 323…)]
[Dict(5 => 214, 4 => 275, 6 => 223, 7 => 420, 2 => 313, 10 => 371, 9 => 409, 8 => 362, 3 => 324, 1 => 266…), Dict(5 => 223, 4 => 266, 6 => 214, 7 => 409, 2 => 324, 10 => 362, 9 => 420, 8 => 371, 3 => 313, 1 => 275…), Dict(5 => 214, 4 => 324, 6 => 223, 7 => 371, 2 => 266, 10 => 420, 9 => 362, 8 => 409, 3 => 275, 1 => 313…), Dict(5 => 223, 4 => 313, 6 => 214, 7 => 362, 2 => 275, 10 => 409, 9 => 371, 8 => 420, 3 => 266, 1 => 324…)]
[Dict(5 => 215, 4 => 273, 6 => 221, 7 => 418, 2 => 316, 10 => 369, 9 => 412, 8 => 363, 3 => 322, 1 => 267…), Dict(5 => 221, 4 => 267, 6 => 215, 7 => 412, 2 => 322, 10 => 363, 9 => 418, 8 => 369, 3 => 316, 1 => 273…), Dict(5 => 215, 4 => 322, 6 => 221, 7 => 369, 2 => 267, 10 => 418, 9 => 363, 8 => 412, 3 => 273, 1 => 316…), Dict(5 => 221, 4 => 316, 6 => 215, 7 => 363, 2 => 273, 10 => 412, 9 => 369, 8 => 418, 3 => 267, 1 => 322…)]
[Dict(5 => 216, 4 => 274, 6 => 222, 7 => 417, 2 => 315, 10 => 370, 9 => 411, 8 => 364, 3 => 321, 1 => 268…), Dict(5 => 222, 4 => 268, 6 => 216, 7 => 411, 2 => 321, 10 => 364, 9 => 417, 8 => 370, 3 => 315, 1 => 274…), Dict(5 => 216, 4 => 321, 6 => 222, 7 => 370, 2 => 268, 10 => 417, 9 => 364, 8 => 411, 3 => 274, 1 => 315…), Dict(5 => 222, 4 => 315, 6 => 216, 7 => 364, 2 => 274, 10 => 411, 9 => 370, 8 => 417, 3 => 268, 1 => 321…)]
[Dict(5 => 225, 4 => 303, 6 => 231, 7 => 424, 2 => 310, 10 => 399, 9 => 406, 8 => 381, 3 => 328, 1 => 285…), Dict(5 => 231, 4 => 285, 6 => 225, 7 => 406, 2 => 328, 10 => 381, 9 => 424, 8 => 399, 3 => 310, 1 => 303…), Dict(5 => 225, 4 => 328, 6 => 231, 7 => 399, 2 => 285, 10 => 424, 9 => 381, 8 => 406, 3 => 303, 1 => 310…), Dict(5 => 231, 4 => 310, 6 => 225, 7 => 381, 2 => 303, 10 => 406, 9 => 399, 8 => 424, 3 => 285, 1 => 328…)]
[Dict(5 => 226, 4 => 304, 6 => 232, 7 => 423, 2 => 309, 10 => 400, 9 => 405, 8 => 382, 3 => 327, 1 => 286…), Dict(5 => 232, 4 => 286, 6 => 226, 7 => 405, 2 => 327, 10 => 382, 9 => 423, 8 => 400, 3 => 309, 1 => 304…), Dict(5 => 226, 4 => 327, 6 => 232, 7 => 400, 2 => 286, 10 => 423, 9 => 382, 8 => 405, 3 => 304, 1 => 309…), Dict(5 => 232, 4 => 309, 6 => 226, 7 => 382, 2 => 304, 10 => 405, 9 => 400, 8 => 423, 3 => 286, 1 => 327…)]
[Dict(5 => 227, 4 => 302, 6 => 230, 7 => 421, 2 => 312, 10 => 398, 9 => 408, 8 => 383, 3 => 325, 1 => 287…), Dict(5 => 230, 4 => 287, 6 => 227, 7 => 408, 2 => 325, 10 => 383, 9 => 421, 8 => 398, 3 => 312, 1 => 302…), Dict(5 => 227, 4 => 325, 6 => 230, 7 => 398, 2 => 287, 10 => 421, 9 => 383, 8 => 408, 3 => 302, 1 => 312…), Dict(5 => 230, 4 => 312, 6 => 227, 7 => 383, 2 => 302, 10 => 408, 9 => 398, 8 => 421, 3 => 287, 1 => 325…)]
[Dict(5 => 228, 4 => 301, 6 => 229, 7 => 422, 2 => 311, 10 => 397, 9 => 407, 8 => 384, 3 => 326, 1 => 288…), Dict(5 => 229, 4 => 288, 6 => 228, 7 => 407, 2 => 326, 10 => 384, 9 => 422, 8 => 397, 3 => 311, 1 => 301…), Dict(5 => 228, 4 => 326, 6 => 229, 7 => 397, 2 => 288, 10 => 422, 9 => 384, 8 => 407, 3 => 301, 1 => 311…), Dict(5 => 229, 4 => 311, 6 => 228, 7 => 384, 2 => 301, 10 => 407, 9 => 397, 8 => 422, 3 => 288, 1 => 326…)]
```
In this example, the `query` fragment (*p*-phenylene) occurs in 24 different location in the `parent` crystal structure, with 4 symmetry-equivalent isomorphisms at each location, for a total of 96 subgraph isomorphisms.
The number of locations---the number of unique substructures of the `parent` to which the `query` is isomorphic---is the length of `search.isomorphisms`.
```jldoctest find
nb_locations(search) # = length(search.isomorphisms)
# output
24
```
Element `i_loc` of `search.isomorphisms`, `search.isomorphisms[i_loc]`, is a vector of isomorphisms that share the same subset of atoms in the `parent`, each of which correspond to a different orientation of the `query` overlaying that `parent` substructure. The function `nb_ori_at_loc` outputs a vector whose element `i_loc` is the number of overlay orientations at that location.
```jldoctest find; output=false
nb_ori_at_loc(search) # 24-element Vector{Int64}: [4, 4, 4, ..., 4]
# output
24-element Vector{Int64}:
4
4
4
4
4
4
4
4
4
4
⋮
4
4
4
4
4
4
4
4
4
```
Each individual isomorphism `isom = search.isomorphisms[i_loc][i_ori]` for a specific location `i_loc` and orientation `i_ori` indicates the correspondence from the `query` to the `parent` struture: if atom `q` of the `query` maps to atom `p` of the `parent`, then `isom[q] == p`.
The total number of isomorphisms is given by `nb_isomorphisms(search)`.
```jldoctest find
nb_isomorphisms(search) # = sum(nb_ori_at_loc(search))
# output
96
```
N.b. to generate a `Crystal` containing only the substructures of the `parent` which are isomorphic to the `query`, use:
```jldoctest find
isomorphic_substructures(search)
# output
Crystal(C₁₄₄H₉₆, periodic = TTT):
bounding_box : [ 25.832 0 0;
1.58175e-15 25.832 0;
1.58175e-15 1.58175e-15 25.832]u"Å"
```
## Stereochemistry and Isomorphism
The node-labeled graph representation of a molecule/crystal structure is invariant with respect to stereochemistry.
In other words, every rotational/conformational state and stereoisomer of a structure share the same graph representation.
What this means is that `PoreMatMod.jl` may find more subgraph matches than you may first expect.
*Example 1*: Suppose we search for a carboxylate with beta hydrogen in acrylate.

There is clearly only one substructure of acrylate that matches the query.
However, there are two subgraph isomorphisms, because swapping the oxygen atoms in the point cloud representation results in the same graph representation.
The above image gives a closer look at how these degenerate representations translate to multiple isomorphisms for a single occurence of a fragment in a structure.
*Example 2*: Suppose we search the IRMOF-1 `parent` structure for the [BDC.xyz](../../../assets/find/BDC.xyz) linker as the `query` instead of the more minimal *p*-phenylene `query` fragment.
Thanks to the two carboxyl groups, the total number of isomorphisms is multiplied by a factor of 4, due to the 180 degree rotation of these groups having no effect on the graph representation.
The number of _locations_ at which the isomorphisms are found, however, is unchanged.
```jldoctest find
query = moiety("BDC.xyz")
search = query ∈ parent
nb_isomorphisms(search)
# output
384
```
```jldoctest find
nb_locations(search)
# output
24
```
**Note**: We advise to define the `query` using the most minimal structure that matches the targeted `parent` substructure.
## Documentation for functions
```@docs
Search
substructure_search
nb_ori_at_loc
nb_isomorphisms
nb_locations
isomorphic_substructures
```
| PoreMatMod | https://github.com/SimonEnsemble/PoreMatMod.jl.git |
|
[
"MIT"
] | 0.2.20 | 131cd2b2ae892b6949a0b9aa375c689802e99c9d | docs | 5239 | ```@meta
DocTestSetup = quote
using PoreMatMod
end
```
# Reading data from crystal structure and chemical fragment files into `PoreMatMod.jl`
This section details how to load data into `PoreMatMod.jl`, including the handling of paths to data and input file formats.
## Crystal structures
Accepted file formats for crystal structures (containing atomic coordinates and unit cell information) are `.cif` (see [here](https://en.wikipedia.org/wiki/Crystallographic_Information_File)) and `.cssr`.
Crystal structure files (`.cif`, `.cssr`) are read from the path `rc[:paths][:crystals]`.
!!! example
Read in the crystal structure of [IRMOF-1.cif](../../../assets/inputs/IRMOF-1.cif) and infer its bonding graph:
```jldoctest; output=false
parent = Crystal("IRMOF-1.cif")
infer_bonds!(parent, true) # true b/c we want periodic bonds included
# output
true
```
The `Crystal` constructor returns a [`Crystal`](@ref) data structure.
The [`infer_bonds!`](@ref) function infers the bonding graph of the crystal structure (nodes: atoms, edges: bonds) based on interatomic distances---necessary for subgraph matching.
Both `Crystal` and `infer_bonds!` are inherited from `Xtals.jl` (see the [`docs`](https://simonensemble.github.io/Xtals.jl/dev/crystal/#Xtals.Crystal)).
## Query and Replacement Fragments
Accepted file formats for chemical fragments (list of atoms and their Cartesian coordinates) are `.xyz` (see [here](https://en.wikipedia.org/wiki/XYZ_file_format)).
Query and replacement fragment files (`.xyz`) are read from the path `rc[:paths][:moieties]`.
N.b. masked atoms of query fragments must be labeled with `!` for [`replace` operations](../../replace). For [substructure searches](../../find) using `substructure_search`, any `!` tags are ignored (the atoms are treated according to their chemical species).
!!! example
Read in the chemical fragment [`p-phenylene.xyz`](../../../assets/inputs/p-phenylene.xyz):
```jldoctest; output=false
query = moiety("p-phenylene.xyz")
# output
Crystal(C₆H₄, periodic = TTT):
bounding_box : [ 1 0 0;
6.12323e-17 1 0;
6.12323e-17 6.12323e-17 1]u"Å"
```
The [`moiety`](@ref) reader also returns a `Crystal` data structure but with a (arbitrary) unit cube unit cell.
Note that the order of atoms as stored in the `Crystal` returned by `moiety` may be different than the order in the file.
This is to speed up structure searches.
If it is important that your `Crystal` have its atoms indexed identically to the source file, one solution is to save a new version of the file using [`write_xyz`](@ref).
!!! example
Sort the atoms in [`glycine_res.xyz`](../../../assets/inputs/glycine_res.xyz):
```jldoctest; output=false
# read the original data, pre-sorting the atoms
q1 = moiety("glycine_res.xyz")
# q1 is now indexed differently than the input data
# save a new source file
write_xyz(q1, joinpath(rc[:paths][:moieties], "glycine_res_sorted.xyz"))
# q2 is ordered the same as the new file
q2 = moiety("glycine_res_sorted.xyz")
# q1 and q2 are identical
@assert isapprox(q1.atoms.coords.xf, q2.atoms.coords.xf; atol=0.01)
# output
```
The pre-sorting can also be disabled for non-!-tagged atoms, but at the risk of degraded search efficiency.
!!! example
Load [`glycine_res.xyz`](../../../assets/inputs/glycine_res.xyz) without changing the atom order:
```jldoctest; output=false
moiety("glycine_res.xyz"; presort=false)
# output
Crystal(C₂H₃NO, periodic = TTT):
bounding_box : [ 1 0 0;
6.12323e-17 1 0;
6.12323e-17 6.12323e-17 1]u"Å"
Atoms{Frac}(1, [:N], Frac([-2.4029152; -2.23405082; 0.0;;]))
Atoms{Frac}(1, [:H], Frac([-1.4033551999999998; -2.26371682; 0.0;;]))
Atoms{Frac}(1, [:C], Frac([-3.0898692; -0.95823882; 0.0;;]))
Atoms{Frac}(1, [:C], Frac([-2.0853462; 0.18518518; 0.0;;]))
Atoms{Frac}(1, [:H], Frac([-3.7147022; -0.88143582; -0.889823;;]))
Atoms{Frac}(1, [:H], Frac([-3.7147022; -0.88143582; 0.889823;;]))
Atoms{Frac}(1, [:O], Frac([-0.8513402; -0.06139382; 0.0;;]))
```
## Changing the Data Directories
`rc[:paths][:crystals]` and `rc[:paths][:moieties]` default to `./data/crystals` and `./data/moieties`, respectively.
To change the paths from where the input files are read, change `rc[:paths][:crystals]` and `rc[:paths][:moieties]`.
!!! example
Suppose we wish to store our `.cif` files in `~/my_xtals` and our `.xyz` files in our present working directory.
```julia
rc[:paths][:crystals] = joinpath(homedir(), "my_xtals")
rc[:paths][:moiety] = pwd()
```
## Other data
`PoreMatMod.jl` draws atomic masses and covalent radii from [`Xtals.jl`](https://github.com/SimonEnsemble/Xtals.jl/).
## Detailed documentation for functions
```@docs
moiety
Crystal
infer_bonds!
BondingRule
strip_numbers_from_atom_labels!
```
| PoreMatMod | https://github.com/SimonEnsemble/PoreMatMod.jl.git |
|
[
"MIT"
] | 0.2.20 | 131cd2b2ae892b6949a0b9aa375c689802e99c9d | docs | 5303 | ```@meta
DocTestSetup = quote
using PoreMatMod
end
```
# Find/Replace Operations
Suppose we wish to conduct the find-and-replace operations illustrated in the figure below, to produce an acetylamido-functionalized IRMOF-1 structure.

#### the `parent` structure
First, we load the `parent` IRMOF-1 structure and infer its bonds.
```jldoctest replace_md; output=false
parent = Crystal("IRMOF-1.cif")
infer_bonds!(parent, true)
# output
true
```
#### the `query` fragment
Next, we define a `query` fragment as a *p*-phenylene moiety.
To guide the replacement, the masked atoms of the `query` fragment must be annotated with `!` in the `.xyz` input file by appending a `!` character at the end of their atomic symbols.
The atom property viewer feature in [iRASPA](https://iraspa.org/) is useful for figuring out which atom(s) to mask.
!!! note
A masked atom (marked with `!`) in the `query` fragment implies that the corresponding atom of the `parent` crystal structure (i) must be removed [e.g., to make room for replacement with a different functionality] but (ii) does not correspond with an atom on the `replacement` fragment and thus cannot be used in the process of aligning the `replacement` fragment onto the `parent` crystal.
In our example, in `2-!-p-phenylene.xyz` input file describing our *p*-phenylene `query` fragment, one H atom is masked (see figure above):
```
10
C -1.71069 0.96969 -0.46280
C -0.48337 1.30874 0.11690
C -2.33707 -0.23371 -0.12103
C 0.11757 0.44439 1.03836
C -0.50881 -0.75900 1.38013
C -1.73613 -1.09805 0.80043
H! 1.06706 0.70670 1.48683
H 0.00122 2.23972 -0.14750
H -3.28655 -0.49601 -0.56950
H -2.22071 -2.02904 1.06484
```
We then read the input file for the `query` fragment.
```jldoctest replace_md; output=false
query = moiety("2-!-p-phenylene.xyz")
# output
Crystal(C₆H!H₃, periodic = TTT):
bounding_box : [ 1 0 0;
6.12323e-17 1 0;
6.12323e-17 6.12323e-17 1]u"Å"
```
#### the `replacement` fragment
Next, we read in the acetylamido-functionalized version of the `query` fragment, [2-acetylamido-p-phenylene.xyz](../../../assets/replace/2-acetylamido-p-phenylene.xyz), as the `replacement` fragment:
```jldoctest replace_md; output=false
replacement = moiety("2-acetylamido-p-phenylene.xyz")
# output
Crystal(C₈H₇NO, periodic = TTT):
bounding_box : [ 1 0 0;
6.12323e-17 1 0;
6.12323e-17 6.12323e-17 1]u"Å"
```
#### the find step
We search for subgraphs of the `parent` structure that match the `query` fragment.
Note the `!` tags are ignored during the `substructure_search`.
```jldoctest replace_md; output=false
search = query in parent # equivalent to substructure_search(query, parent)
# output
2-!-p-phenylene.xyz ∈ IRMOF-1.cif
96 hits in 24 locations.
```
#### the replace step
The code below will, at each location in the `parent` where a substructure matched the `query` fragment, choose a random orientation (corresponding to an overlay of the `query` with the substructure), align and install the replacement fragment, then remove the original substructure, giving the `child` structure shown in the figure above.
```jldoctest replace_md; output=false
child = substructure_replace(search, replacement)
# output
Crystal(C₂₄₀H₁₆₈N₂₄O₁₂₈Zn₃₂, periodic = TTT):
bounding_box : [ 25.832 0 0;
1.58175e-15 25.832 0;
1.58175e-15 1.58175e-15 25.832]u"Å"
```
To direct the number, location, and orientation of the replacements, use the keyword arguments for [`substructure_replace`](@ref). Particularly, the location `loc` and orientation `ori` keyword arguments specify a particular isomorphism to use (in reference to `search.isomorphisms`) when conducting a replacement operation. The figure below illustrates.

For more details, see the [search docs](../../find) and the [replacement modes example](../../../examples/replacement_modes.html).
### quick find-and-replace syntax
For one-shot find-and-replace operations, the `replace` function may be used:
```jldoctest replace_md; output=false
child = replace(parent, query => replacement)
# output
Crystal(C₂₄₀H₁₆₈N₂₄O₁₂₈Zn₃₂, periodic = TTT):
bounding_box : [ 25.832 0 0;
1.58175e-15 25.832 0;
1.58175e-15 1.58175e-15 25.832]u"Å"
```
!!! note
Generally, it is advisable to perform the search using `substructure_replace` then pass it to `replace`, as multiple replacement tasks can then be performed on the basis of the search step as opposed to repeating it for each replacement. The search is usually the slowest step, and it is desirable not to perform it repeatedly.
## Documentation of functions
```@docs
substructure_replace
replace
```
| PoreMatMod | https://github.com/SimonEnsemble/PoreMatMod.jl.git |
|
[
"MIT"
] | 0.2.20 | 131cd2b2ae892b6949a0b9aa375c689802e99c9d | docs | 1185 | # Getting Started
## Installation
Download and install the [Julia programming language](https://julialang.org/), at version 1.6 or higher.
`PoreMatMod.jl` is not currently stable on version 1.7.
To install `PoreMadMod.jl` (officially registered as a Julia package), in the Julia REPL, enter the package manager by typing `]` and enter:
```
pkg> add PoreMatMod
```
## Loading the `PoreMatMod.jl` package
To load the `PoreMatMod.jl` package, so that its functions are imported into your namespace, in your Julia code:
```julia
julia> using PoreMatMod
```
We recommend writing Julia code and performing find-and-replace tasks with `PoreMadMod.jl` using interactive [Pluto notebooks](https://github.com/fonsp/Pluto.jl).
## Running tests (optional)
Run the unit tests associated with `PoreMatMod.jl` by entering package mode in the Julia REPL via (`]`) and entering:
```
pkg> test PoreMatMod
```
!!! note
`PoreMatMod.jl` is built on [`Xtals.jl`](https://github.com/SimonEnsemble/Xtals.jl), which provides:
- the data structure and reader, `Crystal`, for crystal structures
- the `infer_bonds!` function that assigns bonds between atoms of a `Crystal`
| PoreMatMod | https://github.com/SimonEnsemble/PoreMatMod.jl.git |
|
[
"MIT"
] | 0.2.0 | fb6803dafae4a5d62ea5cab204b1e657d9737e7f | code | 5929 | module LeftChildRightSiblingTrees
using AbstractTrees
# See `abstracttrees.jl` for the only dependency of this package
export Node,
addchild,
addsibling,
depth,
graftchildren!,
isroot,
isleaf,
islastsibling,
lastsibling,
prunebranch!
mutable struct Node{T}
data::T
parent::Node{T}
child::Node{T}
sibling::Node{T}
"""
root = Node(data)
Construct a disconnected node, which can serve as the root of a new tree.
`root.data` holds `data`.
"""
function Node{T}(data) where T
n = new{T}(data)
n.parent = n
n.child = n
n.sibling = n
n
end
"""
node = Node(data, parent::Node)
Construct a `node` with `parent` as its parent node. `node.data` stores `data`.
Node that this does *not* update any links in, e.g., `parent`'s other children.
For a higher-level interface, see [`addchild`](@ref).
"""
function Node{T}(data, parent::Node) where T
n = new{T}(data, parent)
n.child = n
n.sibling = n
n
end
end
Node(data::T) where {T} = Node{T}(data)
Node(data, parent::Node{T}) where {T} = Node{T}(data, parent)
"""
node = lastsibling(child)
Return the last sibling of `child`.
"""
function lastsibling(sib::Node)
newsib = sib.sibling
while !islastsibling(sib)
sib = newsib
newsib = sib.sibling
end
sib
end
"""
node = addsibling(oldersib, data)
Append a new "youngest" sibling, storing `data` in `node`. `oldersib` must be
the previously-youngest sibling (see [`lastsibling`](@ref)).
"""
function addsibling(oldersib::Node{T}, data) where T
if oldersib.sibling != oldersib
error("Truncation of sibling list")
end
youngersib = Node(data, oldersib.parent)
oldersib.sibling = youngersib
youngersib
end
"""
node = addchild(parent::Node, data)
Create a new child of `parent`, storing `data` in `node.data`.
This adjusts all links to ensure the integrity of the tree.
"""
function addchild(parent::Node{T}, data) where T
newc = Node(data, parent)
prevc = parent.child
if prevc == parent
parent.child = newc
else
prevc = lastsibling(prevc)
prevc.sibling = newc
end
newc
end
"""
isroot(node)
Returns `true` if `node` is the root of a tree (meaning, it is its own parent).
"""
AbstractTrees.isroot(n::Node) = n == n.parent
"""
islastsibling(node)
Returns `true` if `node` is the last sibling
"""
islastsibling(n::Node) = n === n.sibling
"""
isleaf(node)
Returns `true` if `node` has no children.
"""
isleaf(n::Node) = n == n.child
makeleaf!(n::Node) = n.child = n
makelastsibling!(n::Node) = n.sibling = n
Base.show(io::IO, n::Node) = print(io, "Node(", n.data, ')')
# Iteration over children
# for c in parent
# # do something
# end
Base.IteratorSize(::Type{<:Node}) = Base.SizeUnknown()
Base.eltype(::Type{Node{T}}) where T = Node{T}
function Base.iterate(n::Node, state::Node = n.child)
n === state && return nothing
return state, islastsibling(state) ? n : state.sibling
end
# To support Base.pairs
struct PairIterator{T}
parent::Node{T}
end
Base.pairs(node::Node) = PairIterator(node)
Base.IteratorSize(::Type{<:PairIterator}) = Base.SizeUnknown()
function Base.iterate(iter::PairIterator, state::Node=iter.parent.child)
iter.parent === state && return nothing
return state=>state, islastsibling(state) ? iter.parent : state.sibling
end
function showedges(io::IO, parent::Node, printfunc = identity)
str = printfunc(parent.data)
if str != nothing
if isleaf(parent)
println(io, str, " has no children")
else
print(io, str, " has the following children: ")
for c in parent
print(io, printfunc(c.data), " ")
end
print(io, "\n")
for c in parent
showedges(io, c, printfunc)
end
end
end
end
showedges(parent::Node) = showedges(stdout, parent)
depth(node::Node) = depth(node, 1)
function depth(node::Node, d)
childd = d + 1
for c in node
d = max(d, depth(c, childd))
end
return d
end
"""
graftchildren!(dest, src)
Move the children of `src` to become children of `dest`.
`src` becomes a leaf node.
"""
function graftchildren!(dest, src)
for c in src
c.parent = dest
end
if isleaf(dest)
dest.child = src.child
else
lastsib = lastsibling(dest.child)
lastsib.sibling = src.child
end
makeleaf!(src)
return dest
end
"""
prunebranch!(node)
Eliminate `node` and all its children from the tree.
"""
function prunebranch!(node)
isroot(node) && error("cannot prune the root")
p = node.parent
if p.child == node
# `node` is the first child of p
if islastsibling(node)
makeleaf!(p) # p is now a leaf
else
p.child = node.sibling
end
else
# `node` is a middle or last child of p
child = p.child
sib = child.sibling
while sib != node
@assert sib != child
child = sib
sib = child.sibling
end
if islastsibling(sib)
# node is the last child of p, just truncate
makelastsibling!(child)
else
# skip over node
child.sibling = sib.sibling
end
end
return p
end
function Base.:(==)(a::Node, b::Node)
a.data == b.data || return false
reta, retb = iterate(a), iterate(b)
while true
reta === retb === nothing && return true
(reta === nothing) || (retb === nothing) && return false
childa, statea = reta
childb, stateb = retb
childa == childb || return false
reta, retb = iterate(a, statea), iterate(b, stateb)
end
end
include("abstracttrees.jl")
end # module
| LeftChildRightSiblingTrees | https://github.com/JuliaCollections/LeftChildRightSiblingTrees.jl.git |
|
[
"MIT"
] | 0.2.0 | fb6803dafae4a5d62ea5cab204b1e657d9737e7f | code | 553 |
AbstractTrees.nodetype(::Type{<:Node{T}}) where T = Node{T}
AbstractTrees.NodeType(::Type{<:Node{T}}) where T = HasNodeType()
AbstractTrees.parent(node::Node) = node.parent ≡ node ? nothing : node.parent
AbstractTrees.ParentLinks(::Type{<:Node{T}}) where T = StoredParents()
AbstractTrees.SiblingLinks(::Type{<:Node{T}}) where T = StoredSiblings()
AbstractTrees.children(node::Node) = node
function AbstractTrees.nextsibling(node::Node)
ns = node.sibling
return node ≡ ns ? nothing : ns
end
AbstractTrees.nodevalue(node::Node) = node.data
| LeftChildRightSiblingTrees | https://github.com/JuliaCollections/LeftChildRightSiblingTrees.jl.git |
|
[
"MIT"
] | 0.2.0 | fb6803dafae4a5d62ea5cab204b1e657d9737e7f | code | 3108 | using LeftChildRightSiblingTrees, AbstractTrees
using Test
function mumtree()
# from the README
mum = Node("Mum")
me = addchild(mum, "Me")
son = addchild(me, "Son")
daughter = addchild(me, "Daughter")
brother = addsibling(me, "Brother")
return mum
end
@testset "LeftChildRightSiblingTrees" begin
root = Node(0)
@test isroot(root)
@test isleaf(root)
@test islastsibling(root)
nchildren = 0
for c in root
nchildren += 1
end
@test nchildren == 0
c1 = addchild(root, 1)
@test islastsibling(c1)
c2 = addchild(root, 2)
@test !islastsibling(c1)
@test islastsibling(c2)
c3 = addsibling(c2, 3)
@test lastsibling(c1) == c3
@test islastsibling(c3)
@test !islastsibling(c2)
c21 = addchild(c2, 4)
c22 = addchild(c2, 5)
@test isroot(root)
@test !isleaf(root)
nchildren = 0
for c in root
@test !isroot(c)
nchildren += 1
end
@test nchildren == 3
@test isleaf(c1)
@test !isleaf(c2)
@test isleaf(c3)
for c in c2
@test !isroot(c)
@test isleaf(c)
end
children2 = [c21,c22]
i = 0
for c in c2
@test c == children2[i+=1]
end
io = IOBuffer()
show(io, c2)
str = String(take!(io))
@test str == "Node(2)"
LeftChildRightSiblingTrees.showedges(io, root)
str = String(take!(io))
@test occursin("2 has the following children", str)
@test depth(root) == 3
@test depth(c3) == 1
root1 = deepcopy(root)
node = collect(root1)[2]
graftchildren!(root1, node)
@test isleaf(node)
@test [c.data for c in root1] == [1,2,3,4,5]
for c in root1
@test c.parent == root1
end
prunebranch!(node)
@test [c.data for c in root1] == [1,3,4,5]
root1 = deepcopy(root)
chlds = collect(root1)
p, node = chlds[1], chlds[2]
@test isleaf(p)
graftchildren!(p, node)
@test isleaf(node)
@test [c.data for c in root1] == [1,2,3]
@test [c.data for c in p] == [4,5]
for c in p
@test c.parent == p
end
root1 = deepcopy(root)
chlds = collect(root1)
prunebranch!(chlds[end])
@test [c.data for c in root1] == [1,2]
root1 = deepcopy(root)
chlds = collect(root1)
prunebranch!(chlds[1])
@test [c.data for c in root1] == [2,3]
@test_throws ErrorException("cannot prune the root") prunebranch!(root1)
tree1 = mumtree()
tree2 = mumtree()
@test tree1 == tree2
c = collect(tree1)
addchild(last(c), "Kid")
@test tree1 != tree2
end
@testset "AbstractTrees" begin
root = Node(0)
c1 = addchild(root, 1)
c2 = addchild(root, 2)
c3 = addsibling(c2, 3)
c21 = addchild(c2, 4)
c22 = addchild(c2, 5)
io = IOBuffer()
print_tree(io, root)
@test strip(String(take!(io))) == "0\n├─ 1\n├─ 2\n│ ├─ 4\n│ └─ 5\n└─ 3"
@test map(x->x.data, @inferred(collect(PostOrderDFS(root)))) == [1,4,5,2,3,0]
@test map(x->x.data, @inferred(collect(PreOrderDFS(root)))) == [0,1,2,4,5,3]
@test map(x->x.data, @inferred(collect(Leaves(root)))) == [1,4,5,3]
end
| LeftChildRightSiblingTrees | https://github.com/JuliaCollections/LeftChildRightSiblingTrees.jl.git |
|
[
"MIT"
] | 0.2.0 | fb6803dafae4a5d62ea5cab204b1e657d9737e7f | docs | 2998 | # LeftChildRightSiblingTrees
A [left child, right sibling tree](https://en.wikipedia.org/wiki/Left-child_right-sibling_binary_tree)
(frequently abbreviated as "LCRS")
is a rooted tree data structure that allows a parent node to have multiple child nodes.
Rather than maintain a list of children (which requires one array per node),
instead it is represented as a binary tree, where the "left" branch is the first child,
whose "right" branch points to its first sibling.
Concretely, suppose a particular node, `A`, has 3 children, `a`, `b`, and `c`. Then:
- `a`, `b`, and `c` link to `A` as their parent.
- `A` links `a` as its child (via `A`'s left branch); `a` links `b` as its sibling
(via `a`'s right branch), and `b` links `c` as its sibling (via `b`'s right branch).
- `A`'s right branch would link to any of its siblings (e.g., `B`), if they exist.
- Any missing links (e.g., `c` does not have a sibling) link back to itself
(`c.sibling == c`).
## Tradeoffs
An LCRS tree is typically more memory efficient than an equivalent multi-way tree
representation that uses an array to store the children of each node.
However, for certain tasks it can be less performant, because some operations that modify
the tree structure require iterating over all the children of a node.
## Demo
### Creating a Tree
Can `addchild` or `addsibling`.
```julia
julia> using LeftChildRightSiblingTrees
julia> mum = Node("Mum");
julia> me = addchild(mum, "Me");
julia> son = addchild(me, "Son");
julia> daughter = addchild(me, "Daughter");
julia> brother = addsibling(me, "Brother"); # equivalent: to `addchild(mum, "Brother")`
```
### Querying about nodes:
```julia
julia> lastsibling(me)
Node(Brother)
julia> isroot(mum)
true
julia> isleaf(me)
false
julia> isleaf(daughter)
true
```
### Iterating the Tree/Nodes
Iteration goes through all (direct) children.
The `.data` field holds the information put in the tree.
we can use this to draw a simple visualization of the tree via recursion.
```julia
julia> for child in mum
println(child)
end
Node(Me)
Node(Brother)
julia> function showtree(node, indent=0)
println("\t"^indent, node.data)
for child in node
showtree(child, indent + 1)
end
end
showtree (generic function with 2 methods)
julia> showtree(mum)
Mum
Me
Son
Daughter
Brother
```
LeftChildRightSiblingTrees also has a built in function for showing this kind of info:
```julia
julia> LeftChildRightSiblingTrees.showedges(mum)
Mum has the following children: Me Brother
Me has the following children: Son Daughter
Son has no children
Daughter has no children
Brother has no children
```
## Manipulating the tree
See the docstrings for `graftchildren!` and `prunebranch!`.
## Credits
This existed as an internal component of
[ProfileView](https://github.com/timholy/ProfileView.jl)
since its inception until it was split out as an independent package.
| LeftChildRightSiblingTrees | https://github.com/JuliaCollections/LeftChildRightSiblingTrees.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 481 | module HardwareAbstractions
using ControlSystemsBase, LinearAlgebra
export @periodically, chirp, show_measurements
export control, measure, inputrange, outputrange, isstable, isasstable, sampletime, bias, initialize, finalize, ninputs, noutputs, nstates
import Base: finalize
import ControlSystemsBase: sampletime, isstable, ninputs, noutputs, nstates
include("utilities.jl")
include("interface.jl")
include("reference_generators.jl")
include("controllers.jl")
end # module
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 1395 | export run_control_2DOF
"""
y,u,r = run_control_2DOF(process, sysFB[, sysFF]; duration = 10, reference(t) = sign(sin(2π*t)))
Perform control experiemnt on process where the feedback and feedforward controllers are given by
`sysFB` and `sysFF`, both of type `StateSpace`.
`reference` is a reference generating function that accepts a scalar `t` (time in seconds) and outputs a scalar `r`, default is `reference(t) = sign(sin(2π*t))`.
The outputs `y,u,r` are the beam angle, control signal and reference respectively.

"""
function run_control_2DOF(P::AbstractProcess,sysFB, sysFF=nothing; duration = 10, reference = t->sign(sin(2π*t)))
nu = num_inputs(P)
ny = num_outputs(P)
h = sampletime(P)
y = zeros(ny, length(0:h:duration))
u = zeros(nu, length(0:h:duration))
r = zeros(ny, length(0:h:duration))
Gfb = SysFilter(sysFB)
if sysFF != nothing
Gff = SysFilter(sysFF)
end
function calc_control(i)
rf = sysFF == nothing ? r[:,i] : Gff(r[:,i])
e = rf-y[:,i]
ui = Gfb(e)
ui .+ bias(P)
end
simulation = isa(P, SimulatedProcess)
initialize(P)
for (i,t) = enumerate(0:h:duration)
@periodically h simulation begin
y[:,i] .= measure(P)
r[:,i] .= reference(t)
u[:,i] .= calc_control(i) # y,r must be updated before u
control(P, [clamp.(u[j,i], inputrange(P)[j]...) for j=1:nu])
end
end
finalize(P)
y',u',r'
end
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 3234 | export AbstractProcess, PhysicalProcess, SimulatedProcess, processtype
export outputrange,
inputrange,
ninputs,
noutputs,
nstates,
isstable,
isasstable,
sampletime,
bias,
control,
measure,
initialize,
finalize
# Interface specification ===================================================================
"""
AbstractProcess
Base abstract type for all lab processes. This should not be inherited from directly, see [`PhysicalProcess`](@ref), [`SimulatedProcess`](@ref)
"""
abstract type AbstractProcess end
"""
PhysicalProcess <: AbstractProcess
Pysical processes should have this trait when queried with [`processtype`](@ref).
"""
struct PhysicalProcess <: AbstractProcess
end
"""
SimulatedProcess <: AbstractProcess
Simulated processes should have this trait when queried with [`processtype`](@ref).
"""
struct SimulatedProcess <: AbstractProcess
end
"""
processtype(P::AbstractProcess)
Return the type of process `P`, either `PhysicalProcess` or `SimulatedProcess`.
"""
function processtype end
## Function definitions =====================================================================
"""
range = outputrange(P::AbstractProcess)
Return the range of outputs (measurement signals) of the process. `range` is a vector of
tuples, `length(range) = num_outputs(P), eltype(range) = Tuple(Real, Real)`
"""
function outputrange end
"""
range = inputrange(P::AbstractProcess)
Return the range of inputs (control signals) of the process. `range` is a vector of
tuples, `length(range) = num_inputs(P), eltype(range) = Tuple(Real, Real)`
"""
function inputrange end
"""
isstable(P::AbstractProcess)
Return true/false indicating whether or not the process is stable
"""
function isstable end
"""
isasstable(P::AbstractProcess)
Return true/false indicating whether or not the process is asymptotically stable
"""
function isasstable end
"""
h = sampletime(P::AbstractProcess)
Return the sample time of the process in seconds.
"""
function sampletime end
"""
b = bias(P::AbstractProcess)
Return an input bias for the process. This could be, i.e., the constant input u₀ around which
a nonlinear system is linearized, or whatever other bias might exist on the input.
`length(b) = num_inputs(P)`
"""
function bias end
"""
control(P::AbstractProcess, u)
Send a control signal to the process. `u` must have dimension equal to `num_inputs(P)`
"""
function control end
"""
y = measure(P::AbstractProcess)
Return a measurement from the process. `y` has length `num_outputs(P)`
"""
function measure end
"""
initialize(P::AbstractProcess)
This function is called before any control or measurement operations are performed. During a call to `initialize`, one might set up external communications etc. After control is done,
the function [`finalize`](@ref) is called.
"""
function initialize end
"""
finalize(P::AbstractProcess)
This function is called after any control or measurement operations are performed. During a call to `finalize`, one might finalize external communications etc. Before control is done,
the function [`initialize`](@ref) is called.
"""
function finalize end
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 659 | export PRBSGenerator
#PRBS
"""
r = PRBSGenerator()
Generates a pseudo-random binary sequence. Call like `random_input = r()`.
"""
mutable struct PRBSGenerator
state::Int
end
PRBSGenerator() = PRBSGenerator(Int(1))
function (r::PRBSGenerator)(args...)
state = r.state
bit = ((state >> 0) ⊻ (state >> 2) ⊻ (state >> 3) ⊻ (state >> 5) ) & 1
r.state = (state >> 1) | (bit << 15)
bit
end
"""
chirp(t, f0, f1, Tf; logspace = true)
If `t` is a symbolic variable, a symbolic expression in `t` is returned.
"""
function chirp(t, f0, f1, Tf; logspace=true)
f = logspace ? f0*(f1/f0)^(t/Tf) : f0 + t/Tf*(f1-f0)
sin(2π*f*t)
end
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 5257 | export @periodically, @periodically_yielding, init_sysfilter, sysfilter!, SysFilter
"""
@periodically(h, body)
Ensures that the body is run with an interval of `h >= 0.001` seconds.
"""
macro periodically(h, body)
quote
local start_time = time()
$(esc(body))
local execution_time = time()-start_time
Libc.systemsleep(max(0,$(esc(h))-execution_time))
end
end
macro periodically_yielding(h, body)
quote
local start_time = time()
$(esc(body))
local execution_time = time()-start_time
sleep(max(0,$(esc(h))-execution_time))
end
end
"""
@periodically(h, simulation::Bool, body)
Ensures that the body is run with an interval of `h >= 0.001` seconds.
If `simulation == false`, no sleep is done
"""
macro periodically(h, simulation, body)
quote
local start_time = time()
$(esc(body))
local execution_time = time()-start_time
$(esc(simulation)) || Libc.systemsleep(max(0,$(esc(h))-execution_time))
end
end
"""
Csf = SysFilter(sys_discrete::StateSpace)
Csf = SysFilter(sys_continuous::StateSpace, sampletime)
Csf = SysFilter(sys::StateSpace, state::AbstractVector)
Returns an object used for filtering signals through LTI systems.
Create a SysFilter object that can be used to implement control loops and simulators
with LTI systems, i.e., `U(z) = C(z)E(z)`. To filter a signal `u` through the filter,
call like `y = Csf(u)`. Calculates the filtered output `y` in `y = Cx+Du, x'=Ax+Bu`
"""
struct SysFilter{T<:StateSpace}
sys::T
state::Vector{Float64}
function SysFilter(sys::StateSpace, state::AbstractVector)
@assert !ControlSystemsBase.iscontinuous(sys) "Can not filter using continuous time model."
@assert length(state) == sys.nx "length(state) != sys.nx"
new{typeof(sys)}(sys, state)
end
function SysFilter(sys::StateSpace)
@assert !ControlSystemsBase.iscontinuous(sys) "Can not filter using continuous time model. Supply sample time."
new{typeof(sys)}(sys, init_sysfilter(sys))
end
function SysFilter(sys::StateSpace, h::Real)
@assert ControlSystemsBase.iscontinuous(sys) "Sample time supplied byt system model is already in discrete time."
sysd = c2d(sys, h)[1]
new{typeof(sysd)}(sysd, init_sysfilter(sysd))
end
end
(s::SysFilter)(input) = sysfilter!(s.state, s.sys, input)
"""
state = init_sysfilter(sys::StateSpace)
Use together with [`sysfilter!`](@ref)
"""
function init_sysfilter(sys::StateSpace)
zeros(sys.nx)
end
"""
output = sysfilter!(s::SysFilter, input)
output = sysfilter!(state, sys::StateSpace, input)
Returns the filtered output `y` in `y = Cx+Du, x'=Ax+Bu`
This function is used to implement control loops where a signal is filtered through a
dynamical system, i.e., `U(z) = C(z)E(z)`. Initialize `state` using [`init_sysfilter`](@ref).
"""
function sysfilter!(state::AbstractVector, sys::StateSpace, input)
state .= vec(sys.A*state + sys.B*input)
output = vec(sys.C*state + sys.D*input)
end
sysfilter!(s::SysFilter, input) = sysfilter!(s.state, s.sys, input)
"""
f_discrete = rk4(f, Ts; supersample = 1)
Discretize `f` using RK4 with sample time `Ts`. See also [`MPCIntegrator`](@ref) for more advanced integration possibilities. More details are available at https://help.juliahub.com/juliasimcontrol/stable/mpc_details/#Discretization
"""
function rk4(f::F, Ts0; supersample::Integer = 1) where {F}
supersample ≥ 1 || throw(ArgumentError("supersample must be positive."))
# Runge-Kutta 4 method
Ts = Ts0 / supersample # to preserve type stability in case Ts0 is an integer
let Ts = Ts
function (x, u, p, t)
T = typeof(x)
f1 = f(x, u, p, t)
f2 = f(x + Ts / 2 * f1, u, p, t + Ts / 2)
f3 = f(x + Ts / 2 * f2, u, p, t + Ts / 2)
f4 = f(x + Ts * f3, u, p, t + Ts)
add = Ts / 6 * (f1 + 2 * f2 + 2 * f3 + f4)
# This gymnastics with changing the name to y is to ensure type stability when x + add is not the same type as x. The compiler is smart enough to figure out the type of y
y = x + add
for i in 2:supersample
f1 = f(y, u, p, t)
f2 = f(y + Ts / 2 * f1, u, p, t + Ts / 2)
f3 = f(y + Ts / 2 * f2, u, p, t + Ts / 2)
f4 = f(y + Ts * f3, u, p, t + Ts)
add = Ts / 6 * (f1 + 2 * f2 + 2 * f3 + f4)
y += add
end
return y
end
end
end
function show_measurements(fun, p; Tf = 3600)
data = Vector{Float64}[]
Ts = sampletime(p)
N = round(Int, Tf/Ts)
try
for i = 1:N
@periodically_yielding Ts begin
y = measure(p)
push!(data, y)
fun(data)
end
end
catch e
@info e
finally
@info "Going to the pub"
end
data
end
function collect_data(p; Tf = 10)
data = Vector{Float64}[]
Ts = sampletime(p)
N = round(Int, Tf/Ts)
sizehint!(data, N)
GC.enable(false); GC.gc()
t_start = time()
try
for i = 1:N
@periodically Ts begin
y = measure(p)
t = time() - t_start
push!(data, [t; y])
end
end
catch e
@info e
finally
GC.enable(true); GC.gc()
@info "Going to the pub"
end
data
end
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 4742 | # Interface implementation Ball And Beam ====================================================
# The ball and beam can be used in two modes, either just the Beam, in which case there is a
# single output (measurement signal) or the BallAndBeam, in which case there are two.
# There are a few union types defined for convenience, these are
# AbstractBeam = Union{Beam, BeamSimulator}
# AbstractBallAndBeam = Union{BallAndBeam, BallAndBeamSimulator}
# AbstractBeamOrBallAndBeam = All types
# Although not Abstract per se, the names AbstractBeam etc. were chosen since this reflects
# their usage in dispatch.
export Beam, BeamSimulator, AbstractBeam, BallAndBeam, BallAndBeamSimulator, AbstractBeamOrBallAndBeam
# @with_kw allows specification of default values for fields. If none is given, this value must be supplied by the user. replaces many constructors that would otherwise only supply default values.
# Call constructor like Beam(bias=1.0) if you want a non-default value for bias
"""
Beam(;kwargs...)
Physical beam process
#Arguments (fields)
- `h::Float64 = 0.01`
- `bias::Float64 = 0.0`
- `stream::LabStream = ComediStream()`
- `measure::AnalogInput10V = AnalogInput10V(0)`
- `control::AnalogOutput10V = AnalogOutput10V(1)`
"""
struct Beam <: PhysicalProcess
h::Float64
bias::Float64
stream::LabStream
measure::AnalogInput10V
control::AnalogOutput10V
end
function Beam(;
h::Float64 = 0.01,
bias::Float64 = 0.,
stream::LabStream = ComediStream(),
measure::AnalogInput10V = AnalogInput10V(0),
control::AnalogOutput10V = AnalogOutput10V(1))
p = Beam(Float64(h),Float64(bias),stream,measure,control)
init_devices!(p.stream, p.measure, p.control)
p
end
include("define_beam_system.jl")
const beam_system, nice_beam_controller = define_beam_system()
# nice_beam_controller gives ϕₘ=56°, Aₘ=4, Mₛ = 1.6. Don't forget to discretize it before use
struct BeamSimulator <: SimulatedProcess
h::Float64
s::SysFilter
BeamSimulator(;h::Real = 0.01, bias=0) = new(Float64(h), SysFilter(beam_system, h))
end
struct BallAndBeam <: PhysicalProcess
h::Float64
bias::Float64
stream::LabStream
measure1::AnalogInput10V
measure2::AnalogInput10V
control::AnalogOutput10V
end
function BallAndBeam(;
h = 0.01,
bias = 0.,
stream = ComediStream(),
measure1::AnalogInput10V = AnalogInput10V(0),
measure2::AnalogInput10V = AnalogInput10V(1),
control::AnalogOutput10V = AnalogOutput10V(1))
p = BallAndBeam(h,bias,stream,measure1,measure2,control)
init_devices!(p.stream, p.measure1, p.measure2, p.control)
p
end
struct BallAndBeamSimulator <: SimulatedProcess
h::Float64
s::SysFilter
end
const AbstractBeam = Union{Beam, BeamSimulator}
const AbstractBallAndBeam = Union{BallAndBeam, BallAndBeamSimulator}
const AbstractBeamOrBallAndBeam = Union{AbstractBeam, AbstractBallAndBeam}
num_outputs(p::AbstractBeam) = 1
num_outputs(p::AbstractBallAndBeam) = 2
num_inputs(p::AbstractBeamOrBallAndBeam) = 1
outputrange(p::AbstractBeam) = [(-10,10)]
outputrange(p::AbstractBallAndBeam) = [(-10,10),(-1,1)] # Beam angle, Ball position
inputrange(p::AbstractBeamOrBallAndBeam) = [(-10,10)]
isstable(p::AbstractBeam) = true
isstable(p::AbstractBallAndBeam) = false
isasstable(p::AbstractBeamOrBallAndBeam) = false
sampletime(p::AbstractBeamOrBallAndBeam) = p.h
bias(p::AbstractBeamOrBallAndBeam) = p.bias
bias(p::BeamSimulator) = 0
bias(p::BallAndBeamSimulator) = 0
function control(p::AbstractBeamOrBallAndBeam, u::AbstractArray)
length(u) == 1 || error("Process $(typeof(p)) only accepts one control signal, tried to send u=$u.")
control(p,u[1])
end
control(p::AbstractBeamOrBallAndBeam, u::Number) = send(p.control,u)
control(p::BeamSimulator, u::Number) = p.s(u)
control(p::BallAndBeamSimulator, u::Number) = error("Not yet implemented")
measure(p::Beam) = read(p.measure)
measure(p::BallAndBeam) = [read(p.measure1), read(p.measure2)]
measure(p::BeamSimulator) = dot(p.s.sys.C,p.s.state)
measure(p::BallAndBeamSimulator) = error("Not yet implemented")
initialize(p::Beam) = nothing
initialize(p::BallAndBeam) = nothing
finalize(p::AbstractBeamOrBallAndBeam) = foreach(close, p.stream.devices)
initialize(p::BallAndBeamSimulator) = nothing
finalize(p::BallAndBeamSimulator) = nothing
initialize(p::BeamSimulator) = p.s.state .*= 0
finalize(p::BeamSimulator) = nothing
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 2981 | # Interface implementation Ball And Beam ====================================================
# There is a union type defined for convenience:
# AbstractETHHelicopter = Union{ETHHelicopter, ETHHelicopterSimulator}
# Although not Abstract per se, the names AbstractETHHelicopter etc. were chosen since this
# reflects the usage in dispatch.
export ETHHelicopter, ETHHelicopterSimulator, AbstractETHHelicopter
# @with_kw allows specification of default values for fields. If none is given, this value must be supplied by the user. replaces many constructors that would otherwise only supply default values.
# Call constructor like ETHHelicopter(bias=1.0) if you want a non-default value for bias
"""
ETHHelicopter(;kwargs...)
Physical ETH helicopter process
# Arguments (fields)
- `h::Float64 = 0.05`
- `bias::Float64 = 0.0`
- `stream::LabStream = ComediStream()`
- `measure1::AnalogInput10V = AnalogInput10V(0)`
- `measure2::AnalogInput10V = AnalogInput10V(1)`
- `control1::AnalogOutput10V = AnalogOutput10V(0)`
- `control2::AnalogOutput10V = AnalogOutput10V(1)`
"""
@with_kw struct ETHHelicopter <: PhysicalProcess
h::Float64
bias::Float64
stream::LabStream
measure1::AnalogInput10V
measure2::AnalogInput10V
control1::AnalogOutput10V
control2::AnalogOutput10V
end
function ETHHelicopter(;
h = 0.05,
bias = 0.,
stream = ComediStream(),
measure1::AnalogInput10V = AnalogInput10V(0),
measure2::AnalogInput10V = AnalogInput10V(1),
control1::AnalogOutput10V = AnalogOutput10V(0),
control2::AnalogOutput10V = AnalogOutput10V(1))
p = ETHHelicopter(h,bias,stream,measure1,measure2,control1,control2)
init_devices!(p.stream, p.measure1, p.measure2, p.control1, p.control2)
p
end
struct ETHHelicopterSimulator <: SimulatedProcess
h::Float64
bias::Float64
state::Vector{Float64}
end
ETHHelicopterSimulator() = ETHHelicopterSimulator(0.01, zeros(4))
const AbstractETHHelicopter = Union{ETHHelicopter, ETHHelicopterSimulator}
num_outputs(p::AbstractETHHelicopter) = 2
num_inputs(p::AbstractETHHelicopter) = 2
outputrange(p::AbstractETHHelicopter) = [(-10,10),(-10,10)]
inputrange(p::AbstractETHHelicopter) = [(-10,10),(-10,10)]
isstable(p::AbstractETHHelicopter) = false
isasstable(p::AbstractETHHelicopter) = false
sampletime(p::AbstractETHHelicopter) = p.h
bias(p::AbstractETHHelicopter) = p.bias
function control(p::ETHHelicopter, u)
send(p.control1,u[1])
send(p.control2,u[2])
end
measure(p::ETHHelicopter) = [read(p.measure1), read(p.measure2)]
control(p::ETHHelicopterSimulator, u) = error("Not yet implemented")
measure(p::ETHHelicopterSimulator) = error("Not yet implemented")
initialize(p::ETHHelicopter) = nothing
finalize(p::ETHHelicopter) = foreach(close, p.stream.devices)
initialize(p::ETHHelicopterSimulator) = nothing
finalize(p::ETHHelicopterSimulator) = nothing
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | code | 765 | using HardwareAbstractions, ControlSystemsBase, DSP
using Test
# Reference generators
r = PRBSGenerator(Int(4))
seq = [r() for i = 1:10]
@test all(seq .== [1,0,1,0,0,0,0,0,0,0])
foreach(r,1:10_000)
function test_sysfilter()
N = 10
u = randn(N)
b = [1, 1]
a = [1, 0.1, 1]
sys = ss(tf(b,a,1))
state = init_sysfilter(sys)
yf = filt(b,a,u)
yff = similar(yf)
for i in eachindex(u)
yff[i] = sysfilter!(state, sys, u[i])[1]
end
@test sum(abs,yf - yff) < √(eps())
sysfilt = SysFilter(sys)
for i in eachindex(u)
yff[i] = sysfilter!(sysfilt, u[i])[1]
end
@test sum(abs,yf - yff) < √(eps())
sysfilt = SysFilter(sys)
for i in eachindex(u)
yff[i] = sysfilt(u[i])[1]
end
@test sum(abs,yf - yff) < √(eps())
end
test_sysfilter()
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | 52d5c95f041496a7262a538682625153fe9849bc | docs | 694 | # HardwareAbstractions
**Work in progress**
An interface to communicate with hardware devices for the purposes of automatic control. This package does not do much itself other than defining the interface for other packages to implement.
The interface is defined by [`interface.jl`](https://github.com/baggepinnen/HardwareAbstractions.jl/blob/main/src/interface.jl)
## Utilities
The package also contains some utilities for working with hardware devices and control loops, such as
- `@periodically`: Ensures that the body is run with an interval of `h >= 0.001` seconds.
- `SysFilter`: Returns an object used for filtering signals through LTI systems.
- `rk4`: A Runge-Kutta 4 integrator.
| HardwareAbstractions | https://github.com/baggepinnen/HardwareAbstractions.jl.git |
|
[
"MIT"
] | 0.1.0 | eaa980f5a721579635e085676efe307873bbab55 | code | 294 | # Inside make.jl
using Documenter, Proportions
makedocs(
sitename = "Proportions.jl",
modules = [Proportions],
doctest = true,
pages=[
"Home" => "index.md"
])
deploydocs(;
repo="github.com/hillelawaskar/Proportions.jl",
)
| Proportions | https://github.com/hillelawaskar/Proportions.jl.git |
|
[
"MIT"
] | 0.1.0 | eaa980f5a721579635e085676efe307873bbab55 | code | 262 | module Proportions
export get_proportion,get_proportion_round, get_proportion_round_add1251,
get_proportion_2d,get_proportion_round_2d,get_proportion_round_add1251_2d
# Include functions
include("onedproportion.jl")
include("twodproportion.jl")
#moduleend
end
| Proportions | https://github.com/hillelawaskar/Proportions.jl.git |
|
[
"MIT"
] | 0.1.0 | eaa980f5a721579635e085676efe307873bbab55 | code | 2851 | """
get_proportion(for_prop_arr::AbstractVector{Float64})::AbstractVector{Float64}
Compute the proportional values for each element in the 1D array.
returns a AbstractVector{Float64}
# Example
```
julia> get_proportion([1.0,2.0,3.0,4.0,5.0,34.0000034,2423,5656.98988])
=[0.00012301651427098885, 0.0002460330285419777, 0.00036904954281296654, 0.0004920660570839554, 0.0006150825713549442, 0.004182561903469769, 0.298069014078606, 0.6959031763038594]
```
"""
function get_proportion(for_prop_arr::AbstractVector{Float64})::AbstractVector{Float64}
return for_prop_arr ./ sum(for_prop_arr)
end
"""
get_proportion_round(for_prop_arr::AbstractVector{Float64};round_digits::Int64)::AbstractVector{Float64}
Compute the proportional values for each element in the array and round to a specified decimal digits.
If the sum of all the elements is not 1 , then the adjustment is done in the 1st largest element
returns a AbstractVector{Float64}
# Example
```
julia> get_proportion_round([1.33,1.33,1.23,1.1111],round_digits = 2)
=[0.26, 0.27, 0.25, 0.22]
```
"""
function get_proportion_round(for_prop_arr::AbstractVector{Float64};round_digits::Int64 = 2)::AbstractVector{Float64}
temparr= round.(for_prop_arr ./ sum(for_prop_arr), digits=round_digits, base = 10)
diff = 1 - sum(temparr) #delta naming
if diff != 0.0
max = argmax(temparr)
temparr[max] = round(temparr[max] + diff, digits=round_digits, base = 10)
end
return temparr
end
"""
get_proportion_round_add1251(for_prop_arr::AbstractVector{Float64};round_digits::Int64)::AbstractVector{Float64}
Compute the proportional values for each element in the array and round to a specified decimal digits.
If the sum of all the elements is not 1 , then add 1251 to the array and try looking for proportional correctness
after 2 itterations , the adjustment is done in the 1st largest element
returns a AbstractVector{Float64}
# Example
```
julia> get_proportion_round_add1251([1.33,1.33,1.23,1.1111],round_digits = 2)
=[0.26, 0.27, 0.25, 0.22]
```
"""
function get_proportion_round_add1251(for_prop_arr::AbstractVector{Float64};round_digits::Int64 = 2)::AbstractVector{Float64}
temparr= round.(for_prop_arr ./ sum(for_prop_arr), digits=round_digits, base = 10)
diff = 1 - sum(temparr)
if diff != 0.0
check = 1
while check < 2
sqarr= (for_prop_arr) .+1251
temparr= round.(sqarr ./ sum(sqarr), digits=round_digits, base = 10)
diff = 1 - sum(temparr)
if diff != 0.00
check = check + 1
if check == 2
max = argmax(temparr)
temparr[max] = round(temparr[max]+diff,digits=round_digits, base = 10)
end
else
break
end
end
end
return temparr
end
| Proportions | https://github.com/hillelawaskar/Proportions.jl.git |
|
[
"MIT"
] | 0.1.0 | eaa980f5a721579635e085676efe307873bbab55 | code | 2851 | """
get_proportion_2d(for_prop_arr::AbstractArray{Float64, 2})::AbstractArray{Float64, 2}
Compute the proportional values for each element in the 2d array.
returns a AbstractArray{Float64, 2}
# Example
```
julia> get_proportion_2d([1.0 2.0 ; 4.0 5.0])
=[0.08333333333333333 0.16666666666666666; 0.3333333333333333 0.4166666666666667]
```
"""
function get_proportion_2d(for_prop_arr::AbstractArray{Float64,2})::AbstractArray{Float64,2}
return for_prop_arr ./ sum(for_prop_arr)
end
"""
get_proportion_round_2d(for_prop_arr::AbstractArray{Float64, 2};round_digits::Int64)::AbstractArray{Float64, 2}
Compute the proportional values for each element in the 2d array and round to a specified decimal digits.
If the sum of all the elements is not 1 , then the adjustment is done in the 1st largest element
returns a AbstractArray{Float64, 1}
# Example
```
julia> get_proportion_round_2d([1.1101 2.243 ; 4.9898 5.87],round_digits=5)
=[0.07811 0.15781; 0.35108 0.413]
```
"""
function get_proportion_round_2d(for_prop_arr::AbstractArray{Float64,2};round_digits::Int64 = 2)::AbstractArray{Float64,2}
temparr= round.(for_prop_arr ./ sum(for_prop_arr), digits=round_digits, base = 10)
diff = 1 - sum(temparr) #delta naming
if diff != 0.0
max = argmax(temparr)
temparr[max] = round(temparr[max] + diff, digits=round_digits, base = 10)
end
return temparr
end
"""
get_proportion_round_add1251(for_prop_arr::AbstractArray{Float64, 2};round_digits::Int64)::AbstractArray{Float64, 2}
Compute the proportional values for each element in the 2d array and round to a specified decimal digits.
If the sum of all the elements is not 1 , then add 1251 to the array and try looking for proportional correctness
after 2 itterations , the adjustment is done in the 1st largest element
returns a AbstractArray{Float64, 2}
# Example
```
julia> get_proportion_round_add1251_2d([1.1101 2.243 ; 4.9898 5.87 ; 34.333 6.78],round_digits=8)
=[0.16559399 0.16574381; 0.16610708 0.16622349; 0.16998779 0.16634384]
```
"""
function get_proportion_round_add1251_2d(for_prop_arr::AbstractArray{Float64,2};round_digits::Int64 = 2)::AbstractArray{Float64,2}
temparr= round.(for_prop_arr ./ sum(for_prop_arr), digits=round_digits, base = 10)
diff = 1 - sum(temparr)
if diff != 0.0
check = 1
while check < 2
sqarr= (for_prop_arr) .+1251
temparr= round.(sqarr ./ sum(sqarr), digits=round_digits, base = 10)
diff = 1 - sum(temparr)
if diff != 0.00
check = check + 1
if check == 2
max = argmax(temparr)
temparr[max] = round(temparr[max]+diff,digits=round_digits, base = 10)
end
else
break
end
end
end
return temparr
end
| Proportions | https://github.com/hillelawaskar/Proportions.jl.git |
|
[
"MIT"
] | 0.1.0 | eaa980f5a721579635e085676efe307873bbab55 | code | 1094 | using Proportions
using Test
@testset "Proportions.jl" begin
# Write your tests here.
@test Proportions.get_proportion([1.0,1.0,1.0,1.0]) == [0.25,0.25,0.25,0.25]
@test Proportions.get_proportion([1.0,1.0,1.0,2.5]) != [0.25,0.25,0.25,0.25]
@test Proportions.get_proportion_round([1.33,1.33,1.23,1.1111],round_digits=2) ==[0.26, 0.27, 0.25, 0.22]
@test Proportions.get_proportion_round([33,33,33.23,33.1111],round_digits=6) ==[0.249356, 0.249356, 0.251093, 0.250195]
@test Proportions.get_proportion_round_add1251([33.009, 33.31223, 33.2113, 33.111122],round_digits=6) ==[0.24997, 0.25003, 0.25001, 0.24999]
@test Proportions.get_proportion_2d([1.0 2.0 ; 4.0 5.0]) == [0.08333333333333333 0.16666666666666666; 0.3333333333333333 0.4166666666666667]
@test Proportions.get_proportion_round_2d([1.1101 2.243 ; 4.9898 5.87],round_digits=5) == [0.07811 0.15781; 0.35108 0.413]
@test Proportions.get_proportion_round_add1251_2d([1.1101 2.243 ; 4.9898 5.87 ; 34.333 6.78],round_digits=8) == [0.16559399 0.16574381; 0.16610708 0.16622349; 0.16998779 0.16634384]
end
| Proportions | https://github.com/hillelawaskar/Proportions.jl.git |
|
[
"MIT"
] | 0.1.0 | eaa980f5a721579635e085676efe307873bbab55 | docs | 7813 | # Proportions
[](https://travis-ci.com/hillelawaskar/Proportions.jl)
[](https://ci.appveyor.com/project/hillelawaskar/Proportions-jl)
[](https://codecov.io/gh/hillelawaskar/Proportions.jl)
[](https://coveralls.io/github/hillelawaskar/Proportions.jl?branch=main)
</a><a class="docs-settings-button fas fa-cog" id="documenter-settings-button" href="#" title="Settings"></a><a class="docs-sidebar-button fa fa-bars is-hidden-desktop" id="documenter-sidebar-button" href="#"></a></div></header><article class="content" id="documenter-page"><h1 id="Proportions"><a class="docs-heading-anchor" href="#Proportions">Proportions</a><a id="Proportions-1"></a><a class="docs-heading-anchor-permalink" href="#Proportions" title="Permalink"></a></h1><p><em>The best Proportions package.</em></p><h2 id="Package-Features"><a class="docs-heading-anchor" href="#Package-Features">Package Features</a><a id="Package-Features-1"></a><a class="docs-heading-anchor-permalink" href="#Package-Features" title="Permalink"></a></h2><ul><li>Provides functions to provide proportionality for vector/matrix data</li><li>It can be used in various applications to split any values in a particular proportion</li><li>Examples:</li><li>1 Split a Number in proportions of a factor in vector/matrix</li><li>2 Split an payment schedule in various proportions</li><li>3 Split an payment based on the deliver of goods received</li></ul><h1 id="Documentation"><a class="docs-heading-anchor" href="#Documentation">Documentation</a><a id="Documentation-1"></a><a class="docs-heading-anchor-permalink" href="#Documentation" title="Permalink"></a></h1><article class="docstring"><header><a class="docstring-binding" id="Proportions.get_proportion-Tuple{AbstractVector{Float64}}" href="#Proportions.get_proportion-Tuple{AbstractVector{Float64}}"><code>Proportions.get_proportion</code></a> — <span class="docstring-category">Method</span></header><section><div><pre><code class="language-julia hljs">get_proportion(for_prop_arr::AbstractVector{Float64})::AbstractVector{Float64}</code></pre><p>Compute the proportional values for each element in the 1D array. returns a AbstractVector{Float64}</p><p><strong>Example</strong></p><pre><code class="nohighlight hljs">julia> get_proportion([1.0,2.0,3.0,4.0,5.0,34.0000034,2423,5656.98988])
[0.00012301651427098885, 0.0002460330285419777, 0.00036904954281296654, 0.0004920660570839554, 0.0006150825713549442, 0.004182561903469769, 0.298069014078606, 0.6959031763038594]</code></pre></div></section></article><article class="docstring"><header><a class="docstring-binding" id="Proportions.get_proportion_2d-Tuple{AbstractMatrix{Float64}}" href="#Proportions.get_proportion_2d-Tuple{AbstractMatrix{Float64}}"><code>Proportions.get_proportion_2d</code></a> — <span class="docstring-category">Method</span></header><section><div><pre><code class="language-julia hljs">get_proportion_2d(for_prop_arr::AbstractArray{Float64, 2})::AbstractArray{Float64, 2}</code></pre><p>Compute the proportional values for each element in the 2d array. returns a AbstractArray{Float64, 2}</p><p><strong>Example</strong></p><pre><code class="nohighlight hljs">julia> get_proportion_2d([1.0 2.0 ; 4.0 5.0])
=[0.08333333333333333 0.16666666666666666; 0.3333333333333333 0.4166666666666667]</code></pre></div></section></article><article class="docstring"><header><a class="docstring-binding" id="Proportions.get_proportion_round-Tuple{AbstractVector{Float64}}" href="#Proportions.get_proportion_round-Tuple{AbstractVector{Float64}}"><code>Proportions.get_proportion_round</code></a> — <span class="docstring-category">Method</span></header><section><div><pre><code class="language-julia hljs">get_proportion_round(for_prop_arr::AbstractVector{Float64};round_digits::Int64)::AbstractVector{Float64}</code></pre><p>Compute the proportional values for each element in the array and round to a specified decimal digits. If the sum of all the elements is not 1 , then the adjustment is done in the 1st largest element returns a AbstractVector{Float64}</p><p><strong>Example</strong></p><pre><code class="nohighlight hljs">julia> get_proportion_round([1.33,1.33,1.23,1.1111],round_digits = 2)
=[0.26, 0.27, 0.25, 0.22]</code></pre></div></section></article><article class="docstring"><header><a class="docstring-binding" id="Proportions.get_proportion_round_2d-Tuple{AbstractMatrix{Float64}}" href="#Proportions.get_proportion_round_2d-Tuple{AbstractMatrix{Float64}}"><code>Proportions.get_proportion_round_2d</code></a> — <span class="docstring-category">Method</span></header><section><div><pre><code class="language-julia hljs">get_proportion_round_2d(for_prop_arr::AbstractArray{Float64, 2};round_digits::Int64)::AbstractArray{Float64, 2}</code></pre><p>Compute the proportional values for each element in the 2d array and round to a specified decimal digits. If the sum of all the elements is not 1 , then the adjustment is done in the 1st largest element returns a AbstractArray{Float64, 1}</p><p><strong>Example</strong></p><pre><code class="nohighlight hljs">julia> get_proportion_round_2d([1.1101 2.243 ; 4.9898 5.87],round_digits=5)
=[0.07811 0.15781; 0.35108 0.413]</code></pre></div></section></article><article class="docstring"><header><a class="docstring-binding" id="Proportions.get_proportion_round_add1251-Tuple{AbstractVector{Float64}}" href="#Proportions.get_proportion_round_add1251-Tuple{AbstractVector{Float64}}"><code>Proportions.get_proportion_round_add1251</code></a> — <span class="docstring-category">Method</span></header><section><div><pre><code class="language-julia hljs">get_proportion_round_add1251(for_prop_arr::AbstractVector{Float64};round_digits::Int64)::AbstractVector{Float64}</code></pre><p>Compute the proportional values for each element in the array and round to a specified decimal digits. If the sum of all the elements is not 1 , then add 1251 to the array and try looking for proportional correctness after 2 itterations , the adjustment is done in the 1st largest element returns a AbstractVector{Float64}</p><p><strong>Example</strong></p><pre><code class="nohighlight hljs">julia> get_proportion_round_add1251([1.33,1.33,1.23,1.1111],round_digits = 2)
=[0.26, 0.27, 0.25, 0.22]</code></pre></div></section></article><article class="docstring"><header><a class="docstring-binding" id="Proportions.get_proportion_round_add1251_2d-Tuple{AbstractMatrix{Float64}}" href="#Proportions.get_proportion_round_add1251_2d-Tuple{AbstractMatrix{Float64}}"><code>Proportions.get_proportion_round_add1251_2d</code></a> — <span class="docstring-category">Method</span></header><section><div><pre><code class="language-julia hljs">get_proportion_round_add1251(for_prop_arr::AbstractArray{Float64, 2};round_digits::Int64)::AbstractArray{Float64, 2}</code></pre><p>Compute the proportional values for each element in the 2d array and round to a specified decimal digits. If the sum of all the elements is not 1 , then add 1251 to the array and try looking for proportional correctness after 2 itterations , the adjustment is done in the 1st largest element returns a AbstractArray{Float64, 2}</p><p><strong>Example</strong></p><pre><code class="nohighlight hljs">julia> get_proportion_round_add1251_2d([1.1101 2.243 ; 4.9898 5.87 ; 34.333 6.78],round_digits=8)
=[0.16559399 0.16574381; 0.16610708 0.16622349; 0.16998779 0.16634384]</code></pre></div></section></article></article></div></div></div></body></html>
| Proportions | https://github.com/hillelawaskar/Proportions.jl.git |
|
[
"MIT"
] | 0.1.0 | eaa980f5a721579635e085676efe307873bbab55 | docs | 561 | # Proportions
*The best Proportions package.*
## Package Features
- Provides functions to provide proportionality for vector/matrix data
- It can be used in various applications to split any values in a particular proportion
- Examples:
- 1 Split a Number in proportions of a factor in vector/matrix
- 2 Split an payment schedule in various proportions
- 3 Split an payment based on the deliver of goods received
# Documentation
```@meta
CurrentModule = Proportions
DocTestSetup = quote
using Proportions
end
```
```@autodocs
Modules = [Proportions]
```
| Proportions | https://github.com/hillelawaskar/Proportions.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 1612 | using PhyloGaussianBeliefProp
using Documenter
using DocumenterCitations
DocMeta.setdocmeta!(PhyloGaussianBeliefProp, :DocTestSetup, :(using PhyloGaussianBeliefProp); recursive=true)
bib = CitationBibliography(joinpath(@__DIR__, "src", "refs.bib"))
makedocs(;
modules=[PhyloGaussianBeliefProp],
authors="Cecile Ane <[email protected]>, Benjamin Teo <[email protected]>, and contributors",
sitename="PhyloGaussianBeliefProp.jl",
format=Documenter.HTML(;
mathengine=Documenter.KaTeX(),
prettyurls=get(ENV, "CI", "false") == "true",
size_threshold = 600 * 2^10, size_threshold_warn = 500 * 2^10, # 600 KiB
canonical="https://cecileane.github.io/PhyloGaussianBeliefProp.jl/stable/",
edit_link="main",
assets=String["assets/citations.css"],
),
pages=[
"Home" => "index.md",
"Manual" => [
"Installation" => "man/installation.md",
"Getting started" => "man/getting_started.md",
"Background" => "man/background.md",
"Evolutionary models" => "man/evolutionary_models.md",
"Cluster graphs" => "man/clustergraphs.md",
"Regularization" => "man/regularization.md",
"Message schedules" => "man/message_schedules.md"
]
],
doctestfilters=[
# Ignore any digit after the 5th digit after a decimal, throughout the docs
r"(?<=\d\.\d{5})\d+",
],
plugins=[bib],
)
deploydocs(;
repo="github.com/cecileane/PhyloGaussianBeliefProp.jl.git",
push_preview=true,
devbranch="main",
)
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 902 | module PhyloGaussianBeliefProp
import Base: show
using Distributions: MvNormalCanon, MvNormal, AbstractMvNormal
using Graphs
import LinearAlgebra as LA
using MetaGraphsNext
using Optim, PreallocationTools, ForwardDiff
using PDMats
using StaticArrays
using StatsFuns
using Tables
using DataStructures: DefaultDict
import PhyloNetworks as PN
using PhyloNetworks: HybridNetwork, getparents, getparent, getparentedge,
getchild, getchildren, getchildedge, hassinglechild
include("utils.jl")
include("clustergraph.jl")
include("evomodels/evomodels.jl") # abstract evomodel must be included before all other models
include("evomodels/homogeneousbrownianmotion.jl")
include("evomodels/homogeneousornsteinuhlenbeck.jl")
include("evomodels/heterogeneousmodels.jl")
include("beliefs.jl")
include("beliefupdates.jl")
include("clustergraphbeliefs.jl")
include("calibration.jl")
include("score.jl")
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 33849 | @enum BeliefType bclustertype=1 bsepsettype=2
abstract type AbstractBelief{T} end
struct FamilyFactor{T<:Real,P<:AbstractMatrix{T},V<:AbstractVector{T}} <: AbstractBelief{T}
h::V
J::P
g::MVector{1,T} # mutable
"metadata, e.g. index of cluster in cluster graph"
metadata::Symbol # because clusters have metadata of type Symbol
end
function Base.show(io::IO, b::FamilyFactor)
print(io, "factor for node family" * " $(b.metadata)")
print(io, "\nexponential quadratic belief, parametrized by\nh: $(b.h)\nJ: $(b.J)\ng: $(b.g[1])\n")
end
"""
FamilyFactor(belief::AbstractBelief{T}) where T
Constructor to allocate memory for one family factor, with canonical parameters
and metadata initialized to be a copy of those in `belief`.
`FamilyFactor`s metadata are supposed to be symbols, so this constructor should
fail if its input is a sepset belief, whose `metadata` is a Tuple of Symbols.
"""
function FamilyFactor(belief::AbstractBelief{T}) where T
h = deepcopy(belief.h)
J = deepcopy(belief.J)
g = deepcopy(belief.g)
FamilyFactor{T,typeof(J),typeof(h)}(h,J,g,belief.metadata)
end
"""
Belief{T<:Real,Vlabel<:AbstractVector,P<:AbstractMatrix{T},V<:AbstractVector{T},M} <: AbstractBelief{T}
A "belief" is an exponential quadratic form, using the canonical parametrization:
C(x | J,h,g) = exp( -(1/2)x'Jx + h'x + g )
It is a *normalized* distribution density if
g = - (1/2) (log(2πΣ) + μ'Jμ)
= - entropy of normalized distribution + (1/2) dim(μ) - (1/2) μ'Jμ.
- μ is the mean, of type V (stored but typically not updated)
- h = inv(Σ)μ is the potential, also of type V,
- Σ is the variance matrix (not stored)
- J = inv(Σ) is the precision matrix, of type P
- g is a scalar to get the unnormalized belief: of type `MVector{1,T}` to be mutable.
See `MvNormalCanon{T,P,V}` in
[Distributions.jl](https://juliastats.org/Distributions.jl/stable/multivariate/#Distributions.MvNormalCanon)
Other fields are used to track which cluster or edge the belief corresponds to,
and which traits of which variables are in scope:
- `nodelabel` of type `Vlabel`
- `ntraits`
- `inscope`
- `type`: cluster or sepset
- `metadata` of type `M`: `Symbol` for clusters, `Tuple{Symbol,Symbol}` for sepsets.
Methods for a belief `b`:
- `nodelabels(b)`: vector of node labels, of type `Vlabel`, e.g. Int8 if nodes
are labelled by their preorder index in the original phylogeny
- `ntraits(b)`: number of traits (dimension of the random variable x above)
- `inscope(b)`: matrix of booleans (trait i in row i and and node j in column j)
- `nodedimensions(b)`: vector of integers, with jth value giving the dimension
(number of traits in scope) of node j.
- `dimension(b)`: total dimension of the belief, that is, total number of traits
in scope. Without any missing data, that would be ntraits × length of nodelabels.
"""
struct Belief{T<:Real,Vlabel<:AbstractVector,P<:AbstractMatrix{T},V<:AbstractVector{T},M} <: AbstractBelief{T}
"Integer label for nodes in the cluster"
nodelabel::Vlabel # StaticVector{N,Tlabel}
"Total number of traits at each node"
ntraits::Int
"""Matrix inscope[i,j] is `false` if trait `i` at node `j` is / will be
removed from scope, to avoid issues from 0 precision or infinite variance; or
when there is no data for trait `i` below node `j` (in which case tracking
this variable is only good for prediction, not for learning parameters).
"""
inscope::BitArray
μ::V
h::V
J::P # PDMat(J) not easily mutable, stores cholesky computed upon construction only
g::MVector{1,T}
"belief type: cluster (node in cluster graph) or sepset (edge in cluster graph)"
type::BeliefType
"metadata, e.g. index in cluster graph,
of type (M) `Symbol` for clusters or Tuple{Symbol,Symbol} for edges."
metadata::M
end
nodelabels(b::Belief) = b.nodelabel
ntraits(b::Belief) = b.ntraits
inscope(b::Belief) = b.inscope
nodedimensions(b::Belief) = map(sum, eachslice(inscope(b), dims=2))
dimension(b::Belief) = sum(inscope(b))
function Base.show(io::IO, b::Belief)
disp = "belief for " * (b.type == bclustertype ? "Cluster" : "SepSet") * " $(b.metadata),"
disp *= " $(ntraits(b)) traits × $(length(nodelabels(b))) nodes, dimension $(dimension(b)).\n"
disp *= "Node labels: "
print(io, disp)
print(io, nodelabels(b))
print(io, "\ntrait × node matrix of non-degenerate beliefs:\n")
show(io, inscope(b))
print(io, "\nexponential quadratic belief, parametrized by\nμ: $(b.μ)\nh: $(b.h)\nJ: $(b.J)\ng: $(b.g[1])\n")
end
"""
Belief(nodelabels, numtraits, inscope, belieftype, metadata, T=Float64)
Constructor to allocate memory for one cluster, and initialize objects with 0s
to initialize the belief with the constant function exp(0)=1.
"""
function Belief(nl::AbstractVector{Tlabel}, numtraits::Integer,
inscope::BitArray, belief, metadata, T::Type=Float64) where Tlabel<:Integer
nnodes = length(nl)
nodelabels = SVector{nnodes}(nl)
size(inscope) == (numtraits,nnodes) || error("inscope of the wrong size")
cldim = sum(inscope)
μ = MVector{cldim,T}(zero(T) for _ in 1:cldim) # zeros(T, cldim)
h = MVector{cldim,T}(zero(T) for _ in 1:cldim)
J = MMatrix{cldim,cldim,T}(zero(T) for _ in 1:(cldim*cldim))
g = MVector{1,T}(0)
Belief{T,typeof(nodelabels),typeof(J),typeof(h),typeof(metadata)}(
nodelabels,numtraits,inscope,μ,h,J,g,belief,metadata)
end
"""
inscope_onenode(node_label, b:Belief)
AbstractVector: view of the row vector in `b`'s inscope matrix corresponding to
node `node_label`, indicating whether a trait at that node is in scope or not.
"""
function inscope_onenode(node_label, belief::Belief)
node_j = findfirst(isequal(node_label), nodelabels(belief))
return view(inscope(belief), :, node_j)
end
"""
scopeindex(j::Integer, belief::AbstractBelief)
Indices in the belief's μ,h,J vectors and matrices of the traits in scope
for node indexed `j` in `nodelabels(belief)`.
"""
function scopeindex(j::Integer, belief::AbstractBelief)
binscope = inscope(belief)
# subset_inscope = falses(size(belief_inscope))
# subset_inscope[:,j] .= binscope[:,j]
# return findall(subset_inscope[binscope])
node_dims = map(sum, eachslice(binscope, dims=2))
k0 = sum(node_dims[1:(j-1)]) # 0 if j=1
collect(k0 .+ (1:node_dims[j]))
end
"""
scopeindex(node_labels::Union{Tuple,AbstractVector}, belief::AbstractBelief)
Indices in the belief's μ,h,J vectors and matrices of the variables
for nodes labeled `node_labels`. The belief's `inscope` matrix of
booleans says which node (column) and trait (row) is in the belief's scope.
These variables are vectorized by stacking up columns, that is,
listing all in-scope traits of the first node, then all in-scope traits of
the second node etc.
"""
function scopeindex(node_labels::Union{Tuple,AbstractVector}, belief::AbstractBelief)
binscope = inscope(belief)
node_j = indexin(node_labels, nodelabels(belief))
any(isnothing.(node_j)) && error("some label is not in the belief's node labels")
node_dims = map(sum, eachslice(binscope, dims=2))
node_cumsum = cumsum(node_dims)
res = Vector{Int}(undef, sum(node_dims[node_j]))
i0=0
for jj in node_j
k0 = (jj==1 ? 0 : node_cumsum[jj-1])
for _ in 1:node_dims[jj]
i0 += 1; k0 += 1
res[i0] = k0
end
end
return res
end
"""
scopeindex(sepset::AbstractBelief, cluster::AbstractBelief)
Indices `ind`, in the cluster in-scope variables (that is, in the cluster's μ,h,J
vectors and matrices) of the sepset in-scope variables, such that
`cluster.μ[ind]` correspond to the same variables as `sepset.μ`, for example.
These sepset in-scope variables can be a subset of traits for each node in the
sepset, as indicated by `inscope(sepset)`.
Warning: the labels in the sepset are assumed to be ordered as in the cluster.
An error is returned if `sepset` contains labels not in the `cluster`,
or if a variable in the `sepset`'s scope is not in scope in the `cluster`.
"""
scopeindex(sep::AbstractBelief, clu::AbstractBelief) =
scopeindex(nodelabels(sep), inscope(sep), nodelabels(clu), inscope(clu))
function scopeindex(subset_labels::AbstractVector, subset_inscope::BitArray,
belief_labels::AbstractVector, belief_inscope::BitArray)
node_index = indexin(subset_labels, belief_labels)
issorted(node_index) || error("subset labels come in a different order in the belief")
any(isnothing.(node_index)) && error("subset_labels not a subset of belief_labels")
any(subset_inscope .&& .!view(belief_inscope,:,node_index)) &&
error("some variable(s) in subset's scope yet not in full belief's scope")
subset_inclusterscope = falses(size(belief_inscope))
subset_inclusterscope[:,node_index] .= subset_inscope
return findall(subset_inclusterscope[belief_inscope])
end
"""
scopeindex(node_label, sepset::AbstractBelief, cluster::AbstractBelief)
Tuple of 2 index vectors `(ind_in_sepset, ind_in_cluster)` in the sepset and in the
cluster in-scope variables (that is, in the cluster's μ,h,J vectors and matrices)
of the *shared* in-scope traits for node `node_label`, such that
`sepset.μ[ind_in_sepset]` correspond to all the node's traits in the sepset scope
and `cluster.μ[ind_in_cluster]` correspond to the same traits in the cluster scope,
which may be a subset of all the node's traits in scope for that cluster.
If not, an error is thrown.
"""
function scopeindex(node_lab, sep::AbstractBelief, clu::AbstractBelief)
s_j = findfirst(isequal(node_lab), nodelabels(sep))
isnothing(s_j) && error("$node_lab not in sepset")
sep_inscope = inscope(sep)
s_insc_node = view(sep_inscope, :,s_j) # column for node in sepset inscope
c_j = findfirst(isequal(node_lab), nodelabels(clu))
isnothing(c_j) && error("$node_lab not in cluster")
clu_inscope = inscope(clu)
c_insc_node = view(clu_inscope, :,c_j) # column for node in cluster inscope
any(s_insc_node .& .!c_insc_node) &&
error("some traits are in sepset's but not in cluster's scope for node $node_lab")
s_insc = falses(size(sep_inscope))
s_insc[:,s_j] .= s_insc_node
ind_sep = findall(s_insc[sep_inscope])
c_insc = falses(size(clu_inscope))
c_insc[:,c_j] .= s_insc_node # !! not c_insc_node: to get *shared* traits
ind_clu = findall(c_insc[clu_inscope])
return (ind_sep, ind_clu)
end
# todo perhaps: add option to turn off checks, and
# add function to run these checks once between all incident sepset-cluster
"""
init_beliefs_allocate(tbl::Tables.ColumnTable, taxa, net, clustergraph,
evolutionarymodel)
Vector of beliefs, initialized to the constant function exp(0)=1,
one for each cluster then one for each sepset in `clustergraph`.
`tbl` is used to know which leaf in `net` has data for which trait,
so as to remove from the scope each variable without data below it.
`taxa` should be a vector with taxon names in the same order as they come in
the table of data `tbl`.
The root is removed from scope if the evolutionary model has a fixed root: so as
to use the model's fixed root value as data if the root as zero prior variance.
Also removed from scope is any hybrid node that is degenerate and who has
a single child edge of positive length.
Warnings: this function might need to be re-run to re-do allocation if
- the data changed: different number of traits, or different pattern of missing
data at the tips
- the model changed: with the root changed from fixed to random, see
[`init_beliefs_allocate_atroot!`](@ref) in that case.
"""
function init_beliefs_allocate(tbl::Tables.ColumnTable, taxa::AbstractVector,
net::HybridNetwork, clustergraph, model::EvolutionaryModel{T}) where T
numtraits = length(tbl)
nnodes = length(net.nodes_changed)
nnodes > 0 ||
error("the network should have been pre-ordered, with indices used in cluster graph")
fixedroot = isrootfixed(model)
#= hasdata: to know, for each node, whether that node has a descendant
with data, for each trait.
If not: that node can be removed from all clusters & sepsets.
If yes and the node is a tip: the evidence should be used later,
then the tip can be removed the evidence is absorbed.
=#
hasdata = falses(numtraits, nnodes)
for i_node in reverse(eachindex(net.nodes_changed))
node = net.nodes_changed[i_node]
nodelab = node.name
i_row = findfirst(isequal(nodelab), taxa)
if !isnothing(i_row) # the node has data: it should be a tip!
node.leaf || error("A node with data is internal, should be a leaf")
for v in 1:numtraits
hasdata[v,i_node] = !ismissing(tbl[v][i_row])
end
end
if node.leaf
all(!hasdata[v,i_node] for v in 1:numtraits) &&
@error("tip $nodelab in network without any data")
continue
end
for e in node.edge
ch = getchild(e)
ch !== node || continue # skip parent edges
i_child = findfirst( n -> n===ch, net.nodes_changed)
isempty(i_child) && error("oops, child (number $(ch.number)) not found in nodes_changed")
hasdata[:,i_node] .|= hasdata[:,i_child] # bitwise or
end
all(!hasdata[v,i_node] for v in 1:numtraits) &&
@error("internal node $nodelab without any data below")
end
#= next: create a belief for each cluster and sepset. inscope =
'has partial information and non-degenerate variance or precision?' =
- false at the root if "fixedroot", else:
- 'hasdata?' at internal nodes (assumes non-degenerate transitions)
- false at tips (assumes all data are at tips)
- false at degenerate hybrid node with 1 tree child edge of positive length
=#
function build_inscope(set_nodeindices)
inscope = falses(numtraits, length(set_nodeindices)) # remove from scope by default
for (i,i_node) in enumerate(set_nodeindices)
node = net.nodes_changed[i_node]
(node.leaf || (isdegenerate(node) && unscope(node))) && continue # inscope[:,i] already false
fixedroot && i_node==1 && continue # keep 'false' at the root if fixed
inscope[:,i] .= hasdata[:,i_node]
end
return inscope
end
beliefs = Belief{T}[]
for cllab in labels(clustergraph)
nodeindices = clustergraph[cllab][2]
inscope = build_inscope(nodeindices)
push!(beliefs, Belief(nodeindices, numtraits, inscope, bclustertype, cllab,T))
end
for sslab in edge_labels(clustergraph)
nodeindices = clustergraph[sslab...]
inscope = build_inscope(nodeindices)
push!(beliefs, Belief(nodeindices, numtraits, inscope, bsepsettype, sslab,T))
end
return beliefs
end
"""
init_factors_allocate(beliefs::AbstractVector{<:Belief}, nclusters::Integer)
Vector of `nclusters` factors of type [`FamilyFactor`](@ref), whose canonical
parameters and metadata are initialized to be a copy of those in `beliefs`.
Assumption: `beliefs[1:nclusters]` are cluster beliefs, and
`beliefs[nclusters+1:end]` (if any) are sepset beliefs. This is not checked.
"""
function init_factors_allocate(beliefs::AbstractVector{B}, nclusters::Integer) where B<:Belief{T} where T
factors = FamilyFactor{T}[]
for i in 1:nclusters
push!(factors, FamilyFactor(beliefs[i]))
end
return factors
end
"""
init_beliefs_allocate_atroot!(beliefs, factors, messageresiduals, model)
Update the scope and re-allocate memory for cluster & sepset `beliefs`, `factors`
and `messageresiduals` to include or exclude the root,
depending on whether the root variable is random or fixed in `model`.
To change the dimension of canonical parameters μ,h,J, new memory is allocated
and initilized to 0.
This function can be used to update beliefs when the root model changes from
fixed to non-fixed or vice-versa.
It re-allocates less memory than [`init_beliefs_allocate`](@ref) (which would
need to be followed by [`init_factors_allocate`](@ref))
because clusters and sepsets that do not have the root are not modified.
Assumptions:
- all traits at the root have at least one descendant with non-missing data,
- beliefs were previously initialized with a model that had the same number of
traits as the current `model`.
"""
function init_beliefs_allocate_atroot!(beliefs, factors, messageresidual, model::EvolutionaryModel{T}) where T
numtraits = dimension(model)
fixedroot = isrootfixed(model)
# root *not* in scope if fixed; else *in* scope bc we assume data below
update_inscope!(inscope, root_ind) = inscope[:,root_ind] .= !fixedroot
for (i_b, be) in enumerate(beliefs)
root_ind = findfirst(nl -> 1 == nl, nodelabels(be))
isnothing(root_ind) && continue # skip: root ∉ belief
iscluster = be.type == bclustertype
be_insc = be.inscope
update_inscope!(be_insc, root_ind)
beliefs[i_b] = Belief(be.nodelabel, numtraits, be_insc, be.type, be.metadata, T)
if iscluster # re-allocate the corresponding factor. if sepset: nothing to do
factors[i_b] = FamilyFactor(beliefs[i_b])
end
issepset = beliefs[i_b].type == bsepsettype
if issepset # re-allocate the corresponding messages for sepsets
(clustlab1, clustlab2) = beliefs[i_b].metadata
messageresidual[(clustlab1, clustlab2)] = MessageResidual(beliefs[i_b].J, beliefs[i_b].h)
messageresidual[(clustlab2, clustlab1)] = MessageResidual(beliefs[i_b].J, beliefs[i_b].h)
end
end
end
"""
init_beliefs_reset!(beliefs::Vector{<:Belief})
Reset all beliefs (which can be cluster and/or sepset beliefs) to h=0, J=0, g=0 (μ unchanged).
They can later be re-initialized for different model parameters and
re-calibrated, without re-allocating memory.
"""
function init_beliefs_reset!(beliefs::AbstractVector{B}) where B<:Belief{T} where T
for be in beliefs
be.h .= zero(T)
be.J .= zero(T)
be.g[1] = zero(T)
end
end
"""
init_factors_frombeliefs!(factors, beliefs, checkmetadata::Bool=false)
Reset all `factors` by copying h,J,g from `beliefs`.
Assumption: the cluster beliefs match the factors exactly: for a valid factor
index `i`, `beliefs[i]` is of cluster type and has the same dimension as
`factors[i]`.
Set `checkmetadata` to true to check that `beliefs[i]` and `factors[i]` have
the same metadata.
"""
function init_factors_frombeliefs!(factors, beliefs, checkmetadata::Bool=false)
for (fa,be) in zip(factors,beliefs)
if checkmetadata
fa.metadata == be.metadata ||
error("factor $(fa.metadata) mismatched with belief $(be.metadata)")
end
fa.h .= be.h
fa.J .= be.J
fa.g[1] = be.g[1]
end
end
"""
init_beliefs_assignfactors!(beliefs,
evolutionarymodel, columntable, taxa,
nodevector_preordered)
Initialize cluster beliefs prior to belief propagation, by assigning
each factor to one cluster. Sepset beliefs are reset to 0.
There is one factor for each node v in the vector of nodes:
the density of X\\_v conditional on its parent X\\_pa(v) if v is not the root,
or the prior density for X_root.
- for each leaf, the factor is reduced by absorbing the evidence for that leaf,
that is, the data found in the `columntable`, whose rows should be ordered by
taxa as they appear in `taxa`.
- for each leaf, missing trait values are removed from scope.
- for each internal node, any trait not in scope (e.g. if all descendant leaves
are missing a value for this trait) is marginalized out of the factor.
Assumptions:
- In vector `nodevector_preordered`, nodes are assumed to be preordered.
Typically, this vector is `net.nodes_changed` after the network is preordered.
- Belief node labels correspond to the index of each node in `nodevector_preordered`.
- In `beliefs`, cluster beliefs come first and sepset beliefs come last,
as when created by [`init_beliefs_allocate`](@ref)
Output: vector `node2belief` such that, if `i` is the preorder index of a node
in the network, `node2belief[i]` is the index of the belief that the node family
was assigned to.
The `beliefs` vector is modified in place.
"""
function init_beliefs_assignfactors!(
beliefs::Vector{<:Belief},
model::EvolutionaryModel,
tbl::Tables.ColumnTable, taxa::AbstractVector, prenodes::Vector{PN.Node})
init_beliefs_reset!(beliefs)
numtraits = dimension(model)
visited = falses(length(prenodes))
node2belief = Vector{Int}(undef, length(prenodes)) # node preorder index → belief index
for (i_node,node) in enumerate(prenodes)
visited[i_node] && continue # skip child of unscoped degenerate hybrid
nodelab = node.name
if i_node == 1 # root
isrootfixed(model) && continue # h,J,g all 0: nothing to do
i_b = findfirst(b -> 1 ∈ nodelabels(b), beliefs)
isnothing(i_b) && error("no cluster containing the root, number $(node.number).")
i_inscope = (1,)
h,J,g = factor_root(model)
elseif node.hybrid
pae = PN.Edge[] # collect parent edges, parent nodes, and their
pa = PN.Node[] # preorder indices, sorted in postorder
i_parents = Int[]
for e in node.edge # loop over parent edges
getchild(e) === node || continue
pn = getparent(e) # parent node
pi = findfirst(n -> n===pn, prenodes) # parent index
ii = findfirst(i_parents .< pi) # i_parents is reverse-sorted
if isnothing(ii) ii = length(i_parents) + 1; end
insert!(i_parents, ii, pi)
insert!(pa, ii, pn)
insert!(pae, ii, e)
end
if isdegenerate(node) && unscope(node)
che = getchildedge(node)
ch = getchild(che)
i_child = findfirst(n -> n===ch, prenodes)
visited[i_child] = true
h,J,g = factor_tree_degeneratehybrid(model, pae, che)
if ch.leaf
i_datarow = findfirst(isequal(ch.name), taxa)
h,J,g = absorbleaf!(h,J,g, i_datarow, tbl)
i_inscope = (i_parents...,)
else
i_inscope = (i_child, i_parents...)
end
else
i_inscope = (i_node, i_parents...)
h,J,g = factor_hybridnode(model, pae)
@debug "node $(node.name), lengths $([e.length for e in pae]), gammas $([p.gamma for p in pae])\nh=$h, J=$J, g=$g"
end
i_b = findfirst(b -> issubset(i_inscope, nodelabels(b)), beliefs)
isnothing(i_b) && error("no cluster containing the scope for hybrid $(node.number).")
else
e = getparentedge(node)
pa = getparent(e)
i_parent = findfirst(n -> n===pa, prenodes)
i_b = findfirst(x -> i_parent ∈ x && i_node ∈ x, nodelabels(b) for b in beliefs)
isnothing(i_b) && error("no cluster containing nodes $(node.number) and $(pa.number).")
h,J,g = factor_treeedge(model, e)
if node.leaf
i_datarow = findfirst(isequal(nodelab), taxa)
h,J,g = absorbleaf!(h,J,g, i_datarow, tbl)
i_inscope = (i_parent,)
else
i_inscope = (i_node,i_parent)
end
end
node2belief[i_node] = i_b
be = beliefs[i_b]
be.type == bclustertype || error("belief $(be.metadata) is of type $(be.type)")
if isrootfixed(model) && 1 ∈ i_inscope # the node's parents include the root
1 == i_inscope[end] || error("expected the root to be listed last (postorder)")
i_inscope = i_inscope[1:(end-1)] # remove last entry '1'
rootindex = (length(h) - numtraits + 1):length(h)
h,J,g = absorbevidence!(h,J,g, rootindex, rootpriormeanvector(model))
end
factorind = scopeindex(i_inscope, be)
@debug """
factor for node $(node.name), nodes in scope have preorder $i_inscope,
cluster $i_b with labels $(nodelabels(be)), inscope: $(inscope(be)),
their variable belief indices $factorind.
before marginalizing: h=$(round.(h, digits=2)), J=$(round.(J, digits=2)), g=$g
"""
if length(factorind) != numtraits * length(i_inscope)
# then marginalize variables not in scope, e.g. bc no data below
var_inscope = view(inscope(be), :, indexin(i_inscope, nodelabels(be)))
keep_index = LinearIndices(var_inscope)[var_inscope]
@debug """factor for node $(node.name), cluster $i_b with labels $(nodelabels(be)),
need to marginalize, keep index $keep_index.
h=$(round.(h, digits=2)), J=$(round.(J, digits=2)), g=$g"""
h,J,g = marginalizebelief(h,J,g, keep_index, be.metadata)
end
view(be.h, factorind) .+= h
view(be.J, factorind, factorind) .+= J
be.g[1] += g
end
return node2belief
end
#= messages in ReactiveMP.jl have an `addons` field that stores computation history:
https://biaslab.github.io/ReactiveMP.jl/stable/lib/message/#ReactiveMP.Message
Here, similar "in spirit" to track progress towards calibration
or to facilitate adaptive scheduling (e.g. residual BP), we store a
message residual: difference (on log-scale) between a message *received*
by a cluster and the belief that the sepset previously had.
=#
abstract type AbstractResidual{T} end
"""
MessageResidual{T<:Real, P<:AbstractMatrix{T}, V<:AbstractVector{T}} <: AbstractResidual{T}
Structure to store the most recent computation history of a message, in the
form of the ratio: sent\\_message / current\\_sepset\\_belief, when a message is
sent from one cluster to another along a given sepset.
At calibration, this ratio is 1. For Gaussian beliefs, this ratio is an
exponential quadratic form, stored using its canonical parametrization,
excluding the constant.
Fields:
- `Δh`: canonical parameter vector of the message residual
- `ΔJ`: canonical parameter matrix of the message residual
- `kldiv`: kl divergence between the message that was last sent and the
sepset belief before the last update
- `iscalibrated_resid`: true if the last message and prior sepset belief were
approximately equal, false otherwise. see [`iscalibrated_residnorm!`](@ref)
- `iscalibrated_kl`: same, but in terms of the KL divergence,
see [`iscalibrated_kl!`](@ref).
"""
struct MessageResidual{T<:Real, P<:AbstractMatrix{T}, V<:AbstractVector{T}} <: AbstractResidual{T}
Δh::V
ΔJ::P
kldiv::MVector{1,T}
iscalibrated_resid::MVector{1,Bool}
iscalibrated_kl::MVector{1,Bool}
end
"""
MessageResidual(J::AbstractMatrix{T}, h::AbstractVector{T})
Constructor to allocate memory for a `MessageResidual` with canonical parameters
`(ΔJ, Δh)` of the same dimension and type as `J` and `h`, initialized to zeros.
`kldiv` is initalized to `[-1.0]` and the flags `iscalibrated_{resid,kl}`
are initialized to `false` if the message is of positive dimension.
If the message is empty (ΔJ and Δh of dimension 0) then the message is initialized
as being calibrated: `kldiv` is set to 0 and `iscalibrated` flags set to true.
`(ΔJ, Δh)` of zero suggest calibration, but the flags `iscalibrated_{resid,kl}`
being false indicate otherwise.
"""
function MessageResidual(J::AbstractMatrix{T}, h::AbstractVector{T}) where {T <: Real}
Δh = zero(h)
ΔJ = zero(J)
kldiv, iscal_res, iscal_kl = (isempty(h) ?
(MVector(zero(T)), MVector(true), MVector(true) ) :
(MVector(-one(T)), MVector(false), MVector(false))
)
MessageResidual{T,typeof(ΔJ),typeof(Δh)}(Δh, ΔJ, kldiv, iscal_res, iscal_kl)
end
"""
init_messageresidual_allocate(beliefs::Vector{B}, nclusters)
Dictionary of `2k` residuals of type [`MessageResidual`](@ref), whose canonical
parameters (Δh,ΔJ) are initialized using [`MessageResidual`](@ref), to be of
the same size as sepsets in `beliefs`, where `k` is `length(beliefs) - nclusters`.
Assumption: the first `nclusters` beliefs are cluster beliefs, and the next
`k` beliefs are sepset beliefs. This is not checked.
The sepset for edge `(label1,label2)` is associated with 2 messages, for the
2 directions in which beliefs can be propagated along the edge. The keys for
these messages are `(label1,label2)` and `(label2,label1)`.
"""
function init_messageresidual_allocate(beliefs::Vector{B}, nclusters) where B<:Belief{T} where T<:Real
messageresidual = Dict{Tuple{Symbol,Symbol}, MessageResidual{T}}()
for j in (nclusters+1):length(beliefs)
ssbe = beliefs[j] # sepset belief
(clustlab1, clustlab2) = ssbe.metadata
messageresidual[(clustlab1, clustlab2)] = MessageResidual(ssbe.J, ssbe.h)
messageresidual[(clustlab2, clustlab1)] = MessageResidual(ssbe.J, ssbe.h)
end
return messageresidual
end
"""
init_messagecalibrationflags_reset!(mr::AbstractResidual, reset_kl::Bool)
For a non-empty message residual `mr`, reset its `iscalibrated_*` flags to false,
and if `reset_kl` is true, reset its `kldiv` to -1.
Its `ΔJ` and `Δh` fields are *not* reset here, because they are overwritten
during a belief propagation step.
Nothing is done for empty messages.
"""
function init_messagecalibrationflags_reset!(mr::AbstractResidual{T}, resetkl::Bool) where T
if isempty(mr.Δh) return nothing; end
if resetkl mr.kldiv[1] = - one(T); end
mr.iscalibrated_resid[1] = false
mr.iscalibrated_kl[1] = false
return nothing
end
iscalibrated_residnorm(res::AbstractResidual) = res.iscalibrated_resid[1]
iscalibrated_kl(res::AbstractResidual) = res.iscalibrated_kl[1]
"""
iscalibrated_residnorm!(res::AbstractResidual, atol=1e-5, p::Real=Inf)
True if the canonical parameters `res.Δh` and `res.ΔJ` of the message residual
have `p`-norm within `atol` of 0; false otherwise.
`res.iscalibrated_resid` is updated accordingly.
With `p` infinite, the max norm is used by default, meaning that
`res.Δh` and `res.ΔJ` should be close to 0 element-wise.
"""
function iscalibrated_residnorm!(res::AbstractResidual{T}, atol=T(1e-5), p::Real=Inf) where T
res.iscalibrated_resid[1] =
isapprox(LA.norm(res.Δh, p), zero(T), atol=atol) &&
isapprox(LA.norm(res.ΔJ, p), zero(T), atol=atol)
end
"""
iscalibrated_kl!(res::AbstractResidual, atol=1e-5)
True if the KL divergence stored in `res.kldiv` is within `atol` of 0;
false otherwise. `res.iscalibrated_kl` is modified accordingly.
This KL divergence should have been previously calculated: between a sepset
belief, equal to the message that was passed most recently, and its belief just
prior to passing that message.
"""
function iscalibrated_kl!(res::AbstractResidual{T}, atol=T(1e-5)) where T
res.iscalibrated_kl[1] = isapprox(res.kldiv[1], zero(T), atol=atol)
end
"""
residual_kldiv!(residual::AbstractResidual, sepset::AbstractBelief,
canonicalparams::Tuple)
Update `residual.kldiv` with the
[Kullback-Leibler](https://en.wikipedia.org/wiki/Kullback-Leibler_divergence#Multivariate_normal_distributions)
divergence between
a message sent through a sepset (normalized to a probability distribution),
and the sepset belief before the belief update (also normalized).
`sepset` should contain the updated belief, and `residual` the difference
in the `J` and `h` parameters due to the belief update (after - before),
such that the previous belief is: `sepset` belief - `residual`.
As a side product, `sepset.μ` is updated.
Output: `true` if the KL divergence is close to 0, `false` otherwise.
See [`iscalibrated_kl!`](@ref) for the tolerance.
If the current or previous `sepset` belief is degenerate,
in the sense that its precision matrix is not positive definite and the
belief cannot be normalized to a proper distribution, then
`residual` and `sepset` are not updated, and `false` is returned.
No warning and no error is sent, because sepset beliefs are initialized at 0
and this case is expected to be frequent before enough messages are sent.
## Calculation:
sepset after belief-update (i.e. message sent): C(x | Jₘ, hₘ, gₘ) ∝ density for
X ~ 𝒩(μ=Jₘ⁻¹hₘ, Σ=Jₘ⁻¹)
sepset before belief-update: C(x | Jₛ, hₛ, gₛ)
residual: ΔJ = Jₘ - Jₛ, Δh = hₘ - hₛ
p: dimension of X (number of variables: number of nodes * number of traits).
Below, we use the notation Δg for the change in constants to normalize each
message, which is *not* gₘ-gₛ because the stored beliefs are not normalized.
KL(C(Jₘ, hₘ, _) || C(Jₛ, hₛ, _))
= Eₘ[log C(x | Jₘ,hₘ,_)/C(x | Jₛ,hₛ,_)] where x ∼ C(Jₘ,hₘ,_)
= Eₘ[-(1/2) x'ΔJx + Δh'x + Δg)]
= ( tr(JₛJₘ⁻¹) - p + (μₛ-μₘ)'Jₛ(μₛ-μₘ) + log(det(Jₘ)/det(Jₛ)) ) /2
See also: [`average_energy!`](@ref), which only requires the sepset belief
to be positive definite.
"""
function residual_kldiv!(res::AbstractResidual{T}, sepset::AbstractBelief{T}) where {T <: Real}
# isposdef returns true for empty matrices e.g. isposdef(Real[;;]) and isposdef(MMatrix{0,0}(Real[;;]))
isempty(sepset.J) && return true
(J0, μ0) = try getcholesky_μ!(sepset) # current (m): message that was passed
catch
return false
end
(J1, μ1) = try getcholesky_μ(sepset.J .- res.ΔJ, sepset.h .- res.Δh) # before (s)
catch
return false
end
res.kldiv[1] = ( - LA.tr(J0 \ res.ΔJ) + # tr(JₛJₘ⁻¹-I) = tr((Jₛ-Jₘ)Jₘ⁻¹) = tr(-ΔJ Jₘ⁻¹)
quad(J1, μ1-μ0) + # (μ1-μ0)' J1 (μ1-μ0)
LA.logdet(J0) - LA.logdet(J1) )/2
iscalibrated_kl!(res)
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 8702 | """
BPPosDefException
Exception thrown when a belief message cannot be computed, that is, when the
submatrix of the precision `J`, subsetted to the variables to be integrated out,
is not [positive definite](https://en.wikipedia.org/wiki/Definiteness_of_a_matrix).
It has a message `msg` field (string), and an `info` field (integer) inherited
from `LinearAlgebra.PosDefException`, to indicate the location of (one of)
the eigenvalue(s) which is (are) less than/equal to 0.
"""
struct BPPosDefException <: Exception
msg::String
info::LA.BlasInt
end
function Base.showerror(io::IO, ex::BPPosDefException)
print(io, "BPPosDefException: $(ex.msg)\nmatrix is not ")
if ex.info == -1
print(io, "Hermitian.")
else
print(io, "positive definite.")
end
end
"""
marginalizebelief(belief::AbstractBelief, keep_index)
marginalizebelief(h,J,g, keep_index, beliefmetadata)
marginalizebelief(h,J,g, keep_index, integrate_index, beliefmetadata)
Canonical form (h,J,g) of the input belief, after all variables except those at
indices `keep_index` have been integrated out. If we use `I` and `S` subscripts
to denote subvectors and submatrices at indices to integrate out
(I: `integrate_index`) and indices to keep (S: save for sepset, `keep_index`)
then the returned belief parameters are:
``h_S - J_{S,I} J_I^{-1} h_I``
``J_S - J_{S,I} J_I^{-1} J_{I,S}``
and
``g + (\\log|2\\pi J_I^{-1}| + h_I^{T} J_I^{-1} h_I)/2 .``
These operations fail if the Cholesky decomposition of ``J_I`` fails.
In that case, an error of type [`BPPosDefException`](@ref) is thrown
with a message about the `beliefmetadata`,
which can be handled by downstream functions.
"""
marginalizebelief(b::AbstractBelief, keepind) =
marginalizebelief(b.h, b.J, b.g[1], keepind, b.metadata)
function marginalizebelief(h,J,g::Real, keep_index, metadata)
integrate_index = setdiff(1:length(h), keep_index)
marginalizebelief(h,J,g, keep_index, integrate_index, metadata)
end
function marginalizebelief(h,J,g::Real, keep_index, integrate_index, metadata)
isempty(integrate_index) && return (h,J,g)
ni = length(integrate_index)
Ji = view(J, integrate_index, integrate_index)
Jk = view(J, keep_index, keep_index)
Jki = view(J, keep_index, integrate_index)
hi = view(h, integrate_index)
hk = view(h, keep_index)
# Ji = Jki = hi = 0 if missing data: fake issue
ϵ = eps(eltype(J))
if all(isapprox.(Ji, 0, atol=ϵ)) && all(isapprox.(hi, 0, atol=ϵ)) && all(isapprox.(Jki, 0, atol=ϵ))
return (hk, Jk, g)
end
Ji = try # re-binds Ji
PDMat(Ji) # fails if non positive definite, e.g. Ji=0
catch pdmat_ex
if isa(pdmat_ex, LA.PosDefException)
ex = BPPosDefException("belief $metadata, integrating $(integrate_index)", pdmat_ex.info)
throw(ex)
else
rethrow(pdmat_ex)
end
end
messageJ = Jk - X_invA_Xt(Ji, Jki) # Jk - Jki Ji^{-1} Jki' without inv(Ji)
μi = Ji \ hi
messageh = hk - Jki * μi
messageg = g + (ni*log2π - LA.logdet(Ji) + LA.dot(hi, μi))/2
return (messageh, messageJ, messageg)
end
"""
integratebelief!(belief::AbstractBelief)
integratebelief(h,J,g)
(μ,g) from fully integrating the belief, that is:
``μ = J^{-1} h`` and
``g + (\\log|2\\pi J^{-1}| + h^{T} J^{-1} h)/2 .``
The first form updates `belief.μ`.
"""
function integratebelief!(b::AbstractBelief)
μ, g = integratebelief(b.h, b.J, b.g[1])
b.μ .= μ
return (μ,g)
end
function integratebelief(h,J,g)
Ji = PDMat(J) # fails if cholesky fails, e.g. if J=0
integratebelief(h,Ji,g)
end
function integratebelief(h,J::Union{LA.Cholesky{T},PDMat{T}},g::T) where T<:Real
n = length(h)
μ = J \ h
messageg = g + (n*T(log2π) - LA.logdet(J) + sum(h .* μ))/2
return (μ, messageg)
end
"""
absorbevidence!(h,J,g, dataindex, datavalues)
Absorb evidence, at indices `dataindex` and using `datavalues`.
Warnings:
- a subset of `h` is modified in place
- traits are assumed to come in the same order in `dataindex` as in `datavalues`.
"""
function absorbevidence!(h,J,g, dataindex, datavalues)
numt = length(datavalues)
length(dataindex) == numt || error("data values and indices have different numbers of traits")
hasdata = .!ismissing.(datavalues)
absorb_ind = dataindex[hasdata]
nvar = length(h)
keep_ind = setdiff(1:nvar, absorb_ind)
# index of variables with missing data, after removing variables with data:
missingdata_indices = indexin(dataindex[.!hasdata], keep_ind)
data_nm = view(datavalues, hasdata) # non-missing data values
length(absorb_ind) + length(keep_ind) == nvar ||
error("data indices go beyond belief size")
Jkk = view(J, keep_ind, keep_ind) # avoid copying
if isempty(absorb_ind)
return h, Jkk, g, missingdata_indices
end
Jk_data = view(J, keep_ind, absorb_ind) * data_nm
Ja_data = view(J, absorb_ind, absorb_ind) * data_nm
g += sum(view(h, absorb_ind) .* data_nm) - sum(Ja_data .* data_nm)/2
hk = view(h, keep_ind) .- Jk_data # modifies h in place for a subset of indices
return hk, Jkk, g, missingdata_indices
end
"""
absorbleaf!(h,J,g, rowindex, columntable)
Absorb evidence from a leaf, given in `col[rowindex]` of each column in the table,
then marginalizes out any variable for a missing trait at that leaf.
See [`absorbevidence!`](@ref) and [`marginalizebelief`](@ref).
Warning:
The leaf traits are assumed to correspond to the first variables in `h` (and `J`),
as is output by [`factor_treeedge`](@ref).
"""
function absorbleaf!(h,J,g, rowindex, tbl)
datavalues = [col[rowindex] for col in tbl]
h,J,g,missingindices = absorbevidence!(h,J,g, 1:length(datavalues), datavalues)
if !isempty(missingindices)
@debug "leaf data $(join(datavalues,',')), J=$(round.(J, digits=2)), will integrate at index $(join(missingindices,','))"
h,J,g = marginalizebelief(h,J,g, setdiff(1:length(h), missingindices), missingindices, "leaf row $rowindex")
end
return h,J,g
end
"""
propagate_belief!(cluster_to, sepset, cluster_from, residual)
Update the canonical parameters of the beliefs in `cluster_to` and in `sepset`,
by marginalizing the belief in `cluster_from` to the sepset's variable and
passing that message.
The change in sepset belief (`Δh` and `ΔJ`: new - old) is stored in `residual`.
## Degeneracy
Propagating a belief requires the `cluster_from` belief to have a
non-degenerate `J_I`: submatrix of `J` for the indices to be integrated out.
Problems arise if this submatrix has one or more 0 eigenvalues, or infinite values
(see [`marginalizebelief`](@ref)).
If so, a [`BPPosDefException`](@ref) is returned **but not thrown**.
Downstream functions should try & catch these failures, and decide how to proceed.
See [`regularizebeliefs_bycluster!`](@ref) to reduce the prevalence of degeneracies.
## Output
- `nothing` if the message was calculated with success
- a [`BPPosDefException`](@ref) object if marginalization failed. In this case,
*the error is not thrown*: downstream functions should check for failure
(and may choose to throw the output error object).
## Warnings
- only the `h`, `J` and `g` parameters are updated, not `μ`.
- Does not check that `cluster_from` and `cluster_to` are of cluster type,
or that `sepset` is of sepset type, but does check that the labels and scope
of `sepset` are included in each cluster.
"""
function propagate_belief!(
cluster_to::AbstractBelief,
sepset::AbstractBelief,
cluster_from::AbstractBelief,
residual::AbstractResidual
)
# 1. compute message: marginalize cluster_from to variables in sepset
# requires cluster_from.J[I,I] to be invertible, I = indices other than `keepind`
# marginalizebelief sends BPPosDefException otherwise.
# `keepind` can be empty (e.g. if `cluster_from` is entirely "clamped")
keepind = scopeindex(sepset, cluster_from)
h,J,g = try marginalizebelief(cluster_from, keepind)
catch ex
isa(ex, BPPosDefException) && return ex # output the exception: not thrown
rethrow(ex) # exception thrown if other than BPPosDefException
end
# calculate residual
residual.Δh .= h .- sepset.h
residual.ΔJ .= J .- sepset.J
# 2. extend message to scope of cluster_to and propagate
upind = scopeindex(sepset, cluster_to) # indices to be updated
view(cluster_to.h, upind) .+= residual.Δh
view(cluster_to.J, upind, upind) .+= residual.ΔJ
cluster_to.g[1] += g - sepset.g[1]
# 3. update sepset belief
sepset.h .= h
sepset.J .= J
sepset.g[1] = g
return nothing
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 20624 | """
calibrate!(beliefs::ClusterGraphBelief, schedule, niterations=1;
auto::Bool=false, info::Bool=false,
verbose::Bool=true,
update_residualnorm::Bool=true,
update_residualkldiv::Bool=false)
Propagate messages in postorder then preorder for each tree in the `schedule`
list, for `niterations`. Each schedule "tree" should be a tuple
of 4 vectors as output by [`spanningtree_clusterlist`](@ref), where each vector
provides the parent/child label/index of an edge along which to pass a message,
and where these edges are listed in preorder. For example, the parent of the
first edge is taken to be the root of the schedule tree.
Calibration is evaluated after each schedule tree is run,
and said to be reached if all message residuals have a small norm,
based on [`iscalibrated_residnorm`](@ref).
Output: `true` if calibration is reached, `false` otherwise.
Optional keyword arguments:
- `auto`: If true, then belief updates are stopped after calibration is
found to be reached.
Otherwise belief updates continue for the full number of iterations.
- `info`: Is true, information is logged with the iteration number and
schedule tree index when calibration is reached.
- `verbose`: log error messages about degenerate messages
- `update_residualnorm`
- `update_residualkldiv`
See also: [`iscalibrated_residnorm`](@ref)
and [`iscalibrated_residnorm!`](@ref) for the tolerance and norm used by default,
to declare calibration for a given sepset message (in 1 direction).
"""
function calibrate!(beliefs::ClusterGraphBelief, schedule::AbstractVector,
niter::Integer=1; auto::Bool=false, info::Bool=false,
verbose::Bool=true,
update_residualnorm::Bool=true,
update_residualkldiv::Bool=false,
)
iscal = false
for i in 1:niter
for (j, spt) in enumerate(schedule)
iscal = calibrate!(beliefs, spt, verbose, update_residualnorm, update_residualkldiv)
if iscal
info && @info "calibration reached: iteration $i, schedule tree $j"
auto && return true
end
end
end
return iscal
end
"""
calibrate!(beliefs::ClusterGraphBelief, scheduletree::Tuple,
verbose::Bool=true, up_resnorm::Bool=true, up_reskldiv::Bool=false)
Propage messages along the `scheduletree`, in postorder then preorder:
see [`propagate_1traversal_postorder!`](@ref).
Output: `true` if all message residuals have a small norm,
based on [`iscalibrated_residnorm`](@ref), `false` otherwise.
"""
function calibrate!(beliefs::ClusterGraphBelief, spt::Tuple,
verbose::Bool=true,
up_resnorm::Bool=true,
up_reskldiv::Bool=false,
)
propagate_1traversal_postorder!(beliefs, spt..., verbose, up_resnorm, up_reskldiv)
propagate_1traversal_preorder!(beliefs, spt..., verbose, up_resnorm, up_reskldiv)
return iscalibrated_residnorm(beliefs)
end
"""
propagate_1traversal_postorder!(beliefs::ClusterGraphBelief, spanningtree...)
propagate_1traversal_preorder!(beliefs::ClusterGraphBelief, spanningtree...)
Messages are propagated along the spanning tree, from the tips to the root by
`propagate_1traversal_postorder!` and from the root to the tips by
`propagate_1traversal_preorder!`.
The "spanning tree" should be a tuple of 4 vectors as output by
[`spanningtree_clusterlist`](@ref), meant to list edges in preorder.
Its nodes (resp. edges) should correspond to clusters (resp. sepsets) in
`beliefs`: labels and indices in the spanning tree information
should correspond to indices in `beliefs`.
This condition holds if beliefs are produced on a given cluster graph and if the
tree is produced by [`spanningtree_clusterlist`](@ref) on the same graph.
Optional positional arguments after spanning tree, in this order (default value):
- `verbose` (true): log error messages about degenerate messages that failed
to be passed.
- `update_residualnorm` (true): to update each message residual's `iscalibrated_resid`
- `update_residualkldiv` (false): to update each message residual's field
`kldiv`: KL divergence between the new and old sepset beliefs,
normalized to be considered as (conditional) distributions.
"""
function propagate_1traversal_postorder!(
beliefs::ClusterGraphBelief,
pa_lab, ch_lab, pa_j, ch_j,
verbose::Bool=true,
update_residualnorm::Bool=true,
update_residualkldiv::Bool=false,
)
b = beliefs.belief
mr = beliefs.messageresidual
# (parent <- sepset <- child) in postorder
for i in reverse(1:length(pa_lab))
ss_j = sepsetindex(pa_lab[i], ch_lab[i], beliefs)
sepset = b[ss_j]
mrss = mr[(pa_lab[i], ch_lab[i])]
flag = propagate_belief!(b[pa_j[i]], sepset, b[ch_j[i]], mrss)
if isnothing(flag)
update_residualnorm && iscalibrated_residnorm!(mrss)
update_residualkldiv && residual_kldiv!(mrss, sepset)
elseif verbose
@error flag.msg
end
end
end
function propagate_1traversal_preorder!(
beliefs::ClusterGraphBelief,
pa_lab, ch_lab, pa_j, ch_j,
verbose::Bool=true,
update_residualnorm::Bool=true,
update_residualkldiv::Bool=false,
)
b = beliefs.belief
mr = beliefs.messageresidual
# (child <- sepset <- parent) in preorder
for i in eachindex(pa_lab)
ss_j = sepsetindex(pa_lab[i], ch_lab[i], beliefs)
sepset = b[ss_j]
mrss = mr[(ch_lab[i], pa_lab[i])]
flag = propagate_belief!(b[ch_j[i]], sepset, b[pa_j[i]], mrss)
if isnothing(flag)
update_residualnorm && iscalibrated_residnorm!(mrss)
update_residualkldiv && residual_kldiv!(mrss, sepset)
elseif verbose
@error flag.msg
end
end
end
"""
calibrate_optimize_cliquetree!(beliefs::ClusterGraphBelief, clustergraph,
nodevector_preordered, tbl::Tables.ColumnTable, taxa::AbstractVector,
evolutionarymodel_name, evolutionarymodel_startingparameters)
Optimize model parameters using belief propagation along `clustergraph`,
assumed to be a clique tree for the input network, whose nodes in preorder are
`nodevector_preordered`. Optimization aims to maximize the likelihood
of the data in `tbl` at leaves in the network. The taxon names in `taxa`
should appear in the same order as they come in `tbl`.
The parameters being optimized are the variance rate(s) and prior mean(s)
at the root. The prior variance at the root is fixed.
The calibration does a postorder of the clique tree only, to get the likelihood
at the root *without* the conditional distribution at all nodes, modifying
`beliefs` in place. Therefore, if the distribution of ancestral states is sought,
an extra preorder calibration would be required.
Warning: there is *no* check that the cluster graph is in fact a clique tree.
"""
function calibrate_optimize_cliquetree!(beliefs::ClusterGraphBelief,
cgraph, prenodes::Vector{PN.Node},
tbl::Tables.ColumnTable, taxa::AbstractVector,
evomodelfun, # constructor function
evomodelparams)
spt = spanningtree_clusterlist(cgraph, prenodes)
rootj = spt[3][1] # spt[3] = indices of parents. parent 1 = root
mod = evomodelfun(evomodelparams...) # model with starting values
function score(θ) # θ: unconstrained parameters, e.g. log(σ2)
model = evomodelfun(params_original(mod, θ)...)
# reset beliefs based on factors from new model parameters
init_beliefs_assignfactors!(beliefs.belief, model, tbl, taxa, prenodes)
# no need to reset factors: free_energy not used on a clique tree
init_messagecalibrationflags_reset!(beliefs, false)
propagate_1traversal_postorder!(beliefs, spt...)
_, res = integratebelief!(beliefs, rootj) # drop conditional mean
return -res # score to be minimized (not maximized)
end
# autodiff does not currently work with ForwardDiff, ReverseDiff of Zygote,
# because they cannot differentiate array mutation, as in: view(be.h, factorind) .+= h
# consider solutions suggested here: https://fluxml.ai/Zygote.jl/latest/limitations/
# Could this cache technique be used ?
# https://github.com/JuliaDiff/ForwardDiff.jl/issues/136#issuecomment-237941790
# https://juliadiff.org/ForwardDiff.jl/dev/user/limitations/
# See PreallocationTools.jl package (below)
opt = Optim.optimize(score, params_optimize(mod), Optim.LBFGS())
loglikscore = -Optim.minimum(opt)
bestθ = Optim.minimizer(opt)
bestmodel = evomodelfun(params_original(mod, bestθ)...)
return bestmodel, loglikscore, opt
end
function calibrate_optimize_cliquetree_autodiff!(bufferbeliefs::GeneralLazyBufferCache,
cgraph, prenodes::Vector{PN.Node},
tbl::Tables.ColumnTable, taxa::AbstractVector,
evomodelfun, # constructor function
evomodelparams)
spt = spanningtree_clusterlist(cgraph, prenodes)
rootj = spt[3][1] # spt[3] = indices of parents. parent 1 = root
mod = evomodelfun(evomodelparams...) # model with starting values
#=
TODO: externalize the cache to avoid re-alocation (as done here) ?
Or define cache inside of the function ?
Note that the second option needs net in the arguments.
lbc = PreallocationTools.GeneralLazyBufferCache(function (paramOriginal)
model = evomodelfun(paramOriginal...)
belief = init_beliefs_allocate(tbl, taxa, net, cgraph, model);
return ClusterGraphBelief(belief)
end)
=#
#=
TODO: GeneralLazyBufferCache is the "laziest" solution from PreallocationTools
there might be more efficient solutions using lower level caches.
=#
# score function using cache
function score(θ) # θ: unconstrained parameters, e.g. log(σ2)
paramOriginal = params_original(mod, θ)
model = evomodelfun(paramOriginal...)
dualBeliefs = bufferbeliefs[paramOriginal]
# reset beliefs based on factors from new model parameters
init_beliefs_assignfactors!(dualBeliefs.belief, model, tbl, taxa, prenodes)
# no need to reset factors: free_energy not used on a clique tree
init_messagecalibrationflags_reset!(dualBeliefs, false)
propagate_1traversal_postorder!(dualBeliefs, spt...)
_, res = integratebelief!(dualBeliefs, rootj) # drop conditional mean
return -res # score to be minimized (not maximized)
end
# optim using autodiff
od = OnceDifferentiable(score, params_optimize(mod); autodiff = :forward);
opt = Optim.optimize(od, params_optimize(mod), Optim.LBFGS())
loglikscore = -Optim.minimum(opt)
bestθ = Optim.minimizer(opt)
bestmodel = evomodelfun(params_original(mod, bestθ)...)
return bestmodel, loglikscore, opt
end
"""
calibrate_optimize_clustergraph!(beliefs::ClusterGraphBelief, clustergraph,
nodevector_preordered, tbl::Tables.ColumnTable, taxa::AbstractVector,
evolutionarymodel_name, evolutionarymodel_startingparameters,
max_iterations=100)
Same as [`calibrate_optimize_cliquetree!`](@ref) above, except that the user can
supply an arbitrary `clustergraph` (including a clique tree) for the input
network. Optimization aims to maximize the [`factored_energy`](@ref) approximation
to the ELBO for the log-likelihood of the data
(which is also the negative Bethe [`free_energy`](@ref)).
When `clustergraph` is a clique tree, the factored energy approximation is exactly
equal to the ELBO and the log-likelihood.
Cluster beliefs are regularized using [`regularizebeliefs_bycluster!`](@ref)
(other options are likely to be available in future versions) before calibration.
The calibration repeatedly loops through a minimal set of spanning trees (see
[`spanningtrees_clusterlist`](@ref)) that covers all edges in the cluster
graph, and does a postorder-preorder traversal for each tree. The loop runs till
calibration is detected or till `max_iterations` have passed, whichever occurs
first.
"""
function calibrate_optimize_clustergraph!(beliefs::ClusterGraphBelief,
cgraph, prenodes::Vector{PN.Node},
tbl::Tables.ColumnTable, taxa::AbstractVector,
evomodelfun, # constructor function
evomodelparams, maxiter::Integer=100)
sch = spanningtrees_clusterlist(cgraph, prenodes)
mod = evomodelfun(evomodelparams...) # model with starting values
function score(θ)
model = evomodelfun(params_original(mod, θ)...)
init_beliefs_assignfactors!(beliefs.belief, model, tbl, taxa, prenodes)
init_factors_frombeliefs!(beliefs.factor, beliefs.belief)
init_messagecalibrationflags_reset!(beliefs)
regularizebeliefs_bycluster!(beliefs, cgraph)
calibrate!(beliefs, sch, maxiter, auto=true)
return free_energy(beliefs)[3] # to be minimized
end
opt = Optim.optimize(score, params_optimize(mod), Optim.LBFGS())
iscalibrated_residnorm(beliefs) ||
@warn "calibration was not reached. increase maxiter ($maxiter) or use a different cluster graph?"
fenergy = -Optim.minimum(opt)
bestθ = Optim.minimizer(opt)
bestmodel = evomodelfun(params_original(mod, bestθ)...)
return bestmodel, fenergy, opt
end
"""
calibrate_exact_cliquetree!(beliefs::ClusterGraphBelief,
schedule,
nodevector_preordered,
tbl::Tables.ColumnTable, taxa::AbstractVector,
evolutionarymodel_name)
For a Brownian Motion with a fixed root, compute the maximum likelihood estimate
of the prior mean at the root and the restricted maximum likelihood (REML)
estimate of the variance/covariance rate matrix
using analytical formulas relying on belief propagation,
using the data in `tbl` at leaves in the network.
These estimates are for the model with a prior variance of 0 at the root,
that is, a root state equal to the prior mean.
output: `(bestmodel, loglikelihood_score)`
where `bestmodel` is an evolutionary model created by `evolutionarymodel_name`,
containing the estimated model parameters.
assumptions:
- `taxa` should list the taxon names in the same order in which they come in the
rows of `tbl`.
- `schedule` should provide a schedule to transmit messages between beliefs
in `beliefs` (containing clusters first then sepsets). This schedule is
assumed to traverse a clique tree for the input phylogeny,
with the root cluster containing the root of the phylogeny in its scope.
- `nodevector_preordered` should list the nodes in this phylogeny, in preorder.
- `beliefs` should be of size and scope consistent with `evolutionarymodel_name`
and data in `tbl`.
- a leaf should either have complete data, or be missing data for all traits.
Steps:
1. Calibrate `beliefs` in place according to the `schedule`, under a model
with an infinite prior variance at the root.
2. Estimate parameters analytically.
3. Re-calibrate `beliefs`, to calculate the maximum log-likelihood of the
fixed-root model at the estimated optimal parameters, again modifying
`beliefs` in place. (Except that beliefs with the root node in scope are
re-assigned to change their scoping dimension.)
Warning: there is *no* check that the beliefs and schedule are consistent
with each other.
"""
function calibrate_exact_cliquetree!(beliefs::ClusterGraphBelief{B},
spt, # node2belief may be needed if pre & post calibrations are moved outside
prenodes::Vector{PN.Node},
tbl::Tables.ColumnTable, taxa::AbstractVector,
evomodelfun # constructor function
) where B<:Belief{T} where T
evomodelfun ∈ (UnivariateBrownianMotion, MvFullBrownianMotion) ||
error("Exact optimization is only implemented for the univariate or full Brownian Motion.")
p = length(tbl)
# check for "clean" data at the tips: data at 0 or all p traits
function clean(v) s = sum(v); s==0 || s==p; end
for ic in 1:nclusters(beliefs)
b = beliefs.belief[ic]
all(map(clean, eachslice(inscope(b), dims=2))) ||
error("some leaf must have partial data: cluster $(b.metadata) has partial traits in scope")
end
## calibrate beliefs using infinite root and identity rate variance
calibrationparams = evomodelfun(LA.diagm(ones(p)), zeros(p), LA.diagm(repeat([Inf], p)))
init_beliefs_allocate_atroot!(beliefs.belief, beliefs.factor, beliefs.messageresidual, calibrationparams) # in case root status changed
node2belief = init_beliefs_assignfactors!(beliefs.belief, calibrationparams, tbl, taxa, prenodes)
# no need to reset factors: free_energy not used on a clique tree
init_messagecalibrationflags_reset!(beliefs, false)
calibrate!(beliefs, [spt])
## Compute μ hat from root belief
rootj = spt[3][1] # spt[3] = indices of parents. parent 1 = root
exp_root, _ = integratebelief!(beliefs, rootj)
mu_hat = exp_root[scopeindex((1,), beliefs.belief[rootj])]
## Compute σ² hat from conditional moments
tmp_num = zeros(T, p, p)
tmp_den = zero(T)
for i in 2:length(prenodes) # loop over non-root notes (1=root in pre-order)
nodechild = prenodes[i]
clusterindex = node2belief[i] # index of cluster to which its node family factor was assigned
b = beliefs.belief[clusterindex]
dimclus = length(b.nodelabel)
childind = findfirst(lab == i for lab in b.nodelabel) # child index in cluster
# parents should all be in the cluster, if node2belief is valid
all_parent_edges = PN.Edge[]; parind = Int[] # parent(s) indices in cluster
edge_length = zero(T) # parent edge length if 1 parent; sum of γ² t over all parent edges
all_gammas = zeros(T, dimclus)
for ee in nodechild.edge
getchild(ee) === nodechild || continue # skip below if child edge
push!(all_parent_edges, ee)
pn = getparent(ee) # parent node
pi = findfirst(prenodes[j].name == pn.name for j in b.nodelabel)
push!(parind, pi)
all_gammas[pi] = ee.gamma
edge_length += ee.gamma * ee.gamma * ee.length
end
edge_length == 0.0 && continue # 0 length => variance parameter absent from factor
# moments
exp_be, _ = integratebelief!(b)
vv = inv(b.J)
if nodechild.leaf # tip node
# there should be only 1 parent
length(parind) == 1 || error("leaf $(nodechild.name) does not have 1 parent...")
inscope_i_pa = scopeindex(parind[1], b)
isempty(inscope_i_pa) && continue # no data at or below: do nothing
# find tip data
# TODO later: replace tbl,taxa by tipvalue, to avoid re-constructing it over and over
i_row = findfirst(isequal(nodechild.name), taxa)
!isnothing(i_row) || error("leaf $(nodechild.name) is missing from the data's list of taxa")
tipvalue = [tbl[v][i_row] for v in eachindex(tbl)]
diffExp = view(exp_be, inscope_i_pa) - tipvalue
tmp_num += diffExp * transpose(diffExp) ./ edge_length
# assumes that vv ∝ (co)variance rate matrix R_test
tmp_den += 1 - vv[1, 1] / edge_length
else # internal node
inscope_i_ch = scopeindex(childind, b)
isempty(inscope_i_ch) && continue # no data at or below: do nothing
begic = inscope_i_ch[1]
diffExp = view(exp_be, inscope_i_ch) # init with child node
diffVar = vv[begic, begic]
# sum over parent nodes
inscope_i_pa = [scopeindex(j, b) for j in parind]
for (j1, j1_ii) in zip(parind, inscope_i_pa)
# exp and covar with child
diffExp -= all_gammas[j1] .* view(exp_be, j1_ii)
diffVar -= 2 * all_gammas[j1] * vv[begic, j1_ii[1]]
# parents var covar
for (j2, j2_ii) in zip(parind, inscope_i_pa)
diffVar += all_gammas[j1] * all_gammas[j2] * vv[j1_ii[1], j2_ii[1]]
end
end
tmp_num += diffExp * transpose(diffExp) ./ edge_length
tmp_den += 1 - diffVar / edge_length
end
end
sigma2_hat = tmp_num ./ tmp_den
## Get optimal paramters
bestθ = (sigma2_hat, mu_hat, zeros(T, p, p)) # zero variance at the root: fixed
bestmodel = evomodelfun(bestθ...)
## Get associated likelihood
loglikscore = NaN
init_beliefs_allocate_atroot!(beliefs.belief, beliefs.factor, beliefs.messageresidual, bestmodel)
init_beliefs_assignfactors!(beliefs.belief, bestmodel, tbl, taxa, prenodes)
init_messagecalibrationflags_reset!(beliefs, false)
calibrate!(beliefs, [spt])
_, loglikscore = integratebelief!(beliefs, rootj)
return bestmodel, loglikscore
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 45932 | @inline function vgraph_eltype(net::HybridNetwork)
nn = 2max(length(net.node), length(net.edge))
(nn < typemax(Int8) ? Int8 : (nn < typemax(Int16) ? Int16 : Int))
end
@enum EdgeType etreetype=1 ehybridtype=2 moralizedtype=3 filltype=4
"""
preprocessnet!(net::HybridNetwork, prefix="I")
Create or update the pre-ordering of nodes in `net` using `PhyloNetworks.preorder!`,
then name unnamed internal nodes, with names starting with `prefix`.
Nodes in phylogenetic networks need to have names to build cluster graphs,
in which a cluster contains network nodes. Pre-ordering is also used to traverse
the network for building cluster graphs.
See [`clustergraph!`](@ref).
"""
function preprocessnet!(net::HybridNetwork, prefix="I")
PN.preorder!(net)
PN.nameinternalnodes!(net, prefix)
end
"""
moralize!(net::HybridNetwork, prefix="I")
moralize(net)
Undirected graph `g` of type [MetaGraph](https://github.com/JuliaGraphs/MetaGraphsNext.jl)
with the same nodes as in `net`, labelled by their names in `net`, with extra
edges to moralize the graph, that is, to connect any nodes with a common child.
Node data, accessed as `g[:nodelabel]`, is their index in the network's preordering.
Edge data, accessed as `g[:label1, :label2]` is a type to indicate if the edge
was an original tree edge or hybrid edge, or added to moralize the graph.
Another type, not used here, if for fill edges that may need to be added to
triangulate `g` (make it chordal).
The first version modifies `net` to name its internal nodes (used as labels in `g`)
and to create or update its node preordering, then calls the second version.
"""
function moralize!(net::HybridNetwork, prefix="I")
preprocessnet!(net, prefix)
moralize(net)
end
@doc (@doc moralize!) moralize
function moralize(net::HybridNetwork)
T = vgraph_eltype(net)
mg = MetaGraph(Graph{T}(0), # simple graph
Symbol, # label type: Symbol(original node name)
T, # vertex data type: store postorder
EdgeType, # edge data type
:moralized, # graph data
edge_data -> one(T), # weight function
zero(T))
# add vertices in preorder, which saves their index to access them in net.
sym2code = Dict{Symbol,T}()
for (code,n) in enumerate(net.nodes_changed)
ns = Symbol(n.name)
vt = T(code)
push!(sym2code, ns => vt)
add_vertex!(mg, ns, vt)
end
for e in net.edge
et = (e.hybrid ? ehybridtype : etreetype)
add_edge!(mg, Symbol(getparent(e).name), Symbol(getchild(e).name), et)
end
# moralize
for n in net.node
n.hybrid || continue
plab = [Symbol(node.name) for node in getparents(n)] # parent labels
npar = length(plab)
for (i1,p1) in enumerate(plab), i2 in (i1+1):npar
p2 = plab[i2]
has_edge(mg.graph, sym2code[p1], sym2code[p2]) && continue
add_edge!(mg, p1, p2, moralizedtype)
end
end
return mg
end
"""
triangulate_minfill!(graph)
Ordering for node elimination, chosen to greedily minimize the number of fill edges
necessary to eliminate the node (to connect all its neighbors with each other).
Ties are broken by favoring the post-ordering of nodes.
`graph` is modified with these extra fill edges, making it chordal.
"""
function triangulate_minfill!(graph::AbstractGraph{T}) where T
ordering = typeof(label_for(graph, one(T)))[]
g2 = deepcopy(graph)
fe = Tuple{T,T}[] # to reduce memory allocation
scorefun = v -> (min_fill!(fe,v,g2), - g2[label_for(g2,v)]) # break ties using post-ordering
while nv(g2) > 1
i = argmin(scorefun, vertices(g2))
# add fill edges in both graph and g2, then delete i from g2
filledges!(fe, T(i), g2)
for (v1,v2) in fe
l1 = label_for(g2,v1); l2 = label_for(g2,v2)
add_edge!(g2, l1, l2, filltype)
add_edge!(graph, l1, l2, filltype)
end
lab = label_for(g2,i)
push!(ordering, lab)
delete!(g2, lab)
end
push!(ordering, label_for(g2,one(T)))
return ordering
end
function min_fill!(fe, vertex_code, graph::AbstractGraph)
filledges!(fe, vertex_code, graph::AbstractGraph)
return length(fe)
end
function filledges!(fe, vertex_code, graph::AbstractGraph)
empty!(fe)
neighb = inneighbors(graph, vertex_code)
nn = length(neighb)
for (i1,n1) in enumerate(neighb), i2 in (i1+1):nn
n2 = neighb[i2]
has_edge(graph, n1,n2) || push!(fe, (n1,n2))
end
return nothing # nn
end
"""
nodefamilies(net)
Vector `v` with elements of type `Vector{T}`.
`v[i]` first lists `i`, the preorder index of node `net.nodes_changed[i]`,
followed by the preorder index of all of this node's parents in `net`,
sorted in decreasing order. Due to pre-ordering,
all of the parents' indices are listed after the node (their child) index.
A given node and its parents is called a "node family".
**Warning**: `net` is assumed preordered, see [`preprocessnet!`](@ref) and
[`PhyloNetworks.preorder!`](https://crsl4.github.io/PhyloNetworks.jl/latest/lib/public/#PhyloNetworks.preorder!)).
"""
function nodefamilies(net::HybridNetwork)
T = vgraph_eltype(net)
prenodes = net.nodes_changed
node2family = Vector{Vector{T}}(undef, length(prenodes))
for (code, n) in enumerate(prenodes)
o = sort!(indexin(getparents(n), prenodes), rev=true)
pushfirst!(o, code)
node2family[code] = o # then node2family[code][1] = code
end
return node2family
end
"""
isfamilypreserving(clusters, net)
Tuple `(ispreserving, isfamily_incluster)`:
1. `ispreserving`: true (false) if `clusters` is (is not) family-preserving
with respect to `net`, that is: if each node family (a node and all of its parents)
in `net` is contained in at least 1 cluster in `clusters`.
`clusters` should be a vector, where each element describes one cluster,
given as a vector of preorder indices. Index `i` corresponds to node number `i`
in `net` according the node pre-ordering: `net.nodes_changed[i]`.
2. `isfamily_incluster`: vector of `BitVector`s. `isfamily_incluster[i][j]` is
true (false) if the family of node `i` is (is not) fully contained in cluster [j].
`i` is taken as the preorder index of a node in `net`.
`ispreserving` is true if every element (bit vector) in `isfamily_incluster`
contains at least 1 true value.
**Warning**: assumes that `net` is preordered, see
[`PhyloNetworks.preorder!`](https://crsl4.github.io/PhyloNetworks.jl/latest/lib/public/#PhyloNetworks.preorder!)).
See also [`nodefamilies`](@ref) to get node families.
"""
function isfamilypreserving(clusters::Vector{Vector{T}},
net::HybridNetwork) where {T <: Integer}
node2family = nodefamilies(net) # vectors of type vgraph_eltype(net)
isfamilyincluster = Vector{BitVector}(undef, length(node2family))
for nf in node2family
ch = nf[1] # preorder index of family's child. also: node2family[ch] = nf
isfamilyincluster[ch] = BitArray(nf ⊆ cl for cl in clusters)
end
ifp = all(any.(isfamilyincluster)) # *every* node family is in *some* cluster
return (ifp, isfamilyincluster)
end
"""
check_runningintersection(clustergraph, net)
Vector of tuples. Each tuple is of the form `(nodelabel, istree)`, where
`nodelabel::Symbol` is the label of a node in `net` and `istree` is true (false)
if the node's cluster subgraph is (is not) a tree.
This "cluster subgraph" for a given node is the subgraph of `clustergraph`
induced by the clusters containing the node and by the edges whose sepset
contain the node: see [`nodesubtree`](@ref).
`clustergraph` satisfies the generalized *running-intersection* property if
`istree` is true for all nodes in `net`.
**Warning**:
- assumes that `net` has been preordered, and
- does *not* check if `clustergraph` has been correctly constructed.
"""
function check_runningintersection(clustergraph::MetaGraph, net::HybridNetwork)
res = Tuple{Symbol, Bool}[]
for (nod_ind, n) in enumerate(net.nodes_changed)
nodelab = Symbol(n.name)
sg, _ = nodesubtree(clustergraph, nodelab, nod_ind)
push!(res, (nodelab, is_tree(sg)))
end
return res
end
"""
nodesubtree(clustergraph::MetaGraph, node_symbol)
nodesubtree(clustergraph::MetaGraph, node_symbol, node_preorderindex)
MetaGraph subgraph of `clustergraph` induced by the clusters and sepsets containing
the node labelled `node_symbol` (of specified preorder index if known, not checked).
If `clustergraph` satisfies the generalized running-intersection property,
then this subgraph should be a tree.
"""
function nodesubtree(cgraph::MetaGraph, ns::Symbol)
# find 1 cluster containing the node of interest
clu = findfirst(ns ∈ cgraph[clab][1] for clab in labels(cgraph))
isnothing(clu) && error("no cluster with node labelled $ns")
# find preorder index of node of interest: stored in cluster data
n1data = cgraph[label_for(cgraph,clu)]
node_ind = n1data[2][findfirst(isequal(ns), n1data[1])]
nodesubtree(cgraph, ns, node_ind)
end
function nodesubtree(cgraph::MetaGraph, ns::Symbol, node_ind)
# indices of clusters containing the node of interest
clusters_i = findall(ns ∈ cgraph[clab][1] for clab in labels(cgraph))
isempty(clusters_i) && error("no cluster with node labelled $ns")
sg, vmap = induced_subgraph(cgraph, clusters_i)
# in subgraph, delete any edge whose sepset lacks node_ind, the node's preorder index
for e in edge_labels(sg)
if node_ind ∉ sg[e...] # edge data = vector of nodes preorder indices
delete!(sg, e...)
end
end
return sg, vmap
end
"""
AbstractClusterGraphMethod
Abstract type for cluster graph construction algorithms.
"""
abstract type AbstractClusterGraphMethod end
getclusters(obj::AbstractClusterGraphMethod) =
hasfield(typeof(obj), :clusters) ? obj.clusters : nothing
getmaxclustersize(obj::AbstractClusterGraphMethod) =
hasfield(typeof(obj), :maxclustersize) ? obj.maxclustersize : nothing
"""
Bethe
Subtype of [`AbstractClusterGraphMethod`](@ref).
## Algorithm
A Bethe cluster graph (also known as factor graph) has:
- a factor-cluster `{v, parents(v}}` for each node-family in the network, that is,
for each non-root node `v` (a family is a child node and all of its parents)
* with one exception: if `v`'s family is included in another family,
then no factor-cluster is created for `v`.
- a variable-cluster `{v}` for each non-leaf node `v` in the network,
or more specifically, for each node `v` that belongs in more than 1 factor.
Each variable-cluster `{v}` is joined to the factor-clusters that contain `v`,
by an edge labelled with sepset `{v}`.
## References
D. Koller and N. Friedman.
*Probabilistic graphical models: principles and techniques*.
MIT Press, 2009. ISBN 9780262013192.
"""
struct Bethe <: AbstractClusterGraphMethod end
"""
LTRIP{T<:Integer}
Subtype of [`AbstractClusterGraphMethod`](@ref).
A HybridNetwork and a valid LTRIP are passed to [`clustergraph!`](@ref) to
construct a cluster graph from the user-provided clusters based on the *Layered
Trees Running Intersection Property* algorithm of Streicher & du Preez (2017).
## Fieldnames
- clusters: vector of clusters, required to be family-preserving
with respect to some HybridNetwork -- see [`isfamilypreserving`](@ref).
Within each cluster, nodes (identified by their preorder index in the network)
are required to be sorted in decreasing order (for postorder)
## Constructors
- `LTRIP(net)`: uses [`nodefamilies(net)`](@ref) as input clusters,
which are guaranteed to be family-preserving
- `LTRIP(clusters, net)`: checks if that clusters provided are family-preserving,
then sorts each cluster in decreasing order (modifying them in place!)
before creating the LTRIP object.
They assume, *with no check*, that `net` already has a preordering.
## Algorithm
1. An initial graph G is considered, in which each input cluster is a node.
An edge (C1,C2) is added if clusters C1 and C2 share at least 1 node (in `net`).
The weight of edge (C1,C2) is defined as the size of the intersection C1 ∩ C2.
2. For each node `n` in `net`,
the subgraph of G induced by the clusters containing `n`, G_n, has its
weights adjusted as follows:
* the edges of maximum weight (within G_n) are identified, then
* the weight of each edge is increased by the number of max-weight edges
that either of its endpoints adjacent to.
Then, LTRIP finds a maximum-weight spanning tree of G_n. The edges of this
tree are all labelled with `{n}` (or its label or preorder index).
3. The spanning trees for each node are layered on one another to form a cluster
graph. In other words, an edge (C1,C2) is added if it is present is any
spanning tree. If so, its sepset is the union of its labels across the
different spanning trees.
## References
S. Streicher and J. du Preez. Graph Coloring: Comparing Cluster Graphs to Factor
Graphs. In *Proceedings of the ACM Multimedia 2017 Workshop on South African
Academic Participation*, pages 35-42, 2017.
doi: [10.1145/3132711.3132717](https://doi.org/10.1145/3132711.3132717).
"""
struct LTRIP{T<:Integer} <: AbstractClusterGraphMethod
clusters::Vector{Vector{T}}
end
function LTRIP(net::HybridNetwork)
clusters = nodefamilies(net)
return LTRIP(clusters)
end
function LTRIP(clusters::Vector{Vector{T}}, net::HybridNetwork) where {T <: Integer}
isfamilypreserving(clusters, net)[1] ||
error("`clusters` is not family preserving with respect to `net`")
for cl in clusters
issorted(cl, rev=true) || sort!(cl, rev=true)
end
return LTRIP(clusters)
end
"""
JoinGraphStructuring
Subtype of [`AbstractClusterGraphMethod`](@ref).
## Fieldnames
- `maxclustersize`: upper limit for cluster size.
This value must be at least the size of the largest node family in
the input phylogenetic network, normally 3 if the network is bicombining
(each hybrid node has 2 parents, never more). See [`nodefamilies(net)`](@ref).
## Constructors
- `JoinGraphStructuring(maxclustersize, net)`:
checks that the input `maxclustersize` is valid for `net`
## Algorithm, by Mateescu et al. (2010)
Requires:
- a user-specified maximum cluster size
- an elimination order for the nodes in the HybridNetwork, hopefully yielding
a small induced-width, e.g. from a heuristic such as greedy min-fill
(see [`triangulate_minfill!(graph)`](@ref)).
1. Each node in `net` labels a "bucket", and these buckets are ordered according
to the elimination order, e.g. the highest-priority bucket is labelled by the
first node in the elimination order.
2. Node families are assigned to buckets based on the highest-priority node they
contain. For example, if {1,4,9} forms a node family and if node 4 is
higher in the elimination order than nodes 1 or 9, then {1,4,9} gets assigned
to bucket 4.
3. Node families are clusters (of nodes), and we refer to the clusters within a
bucket as "minibuckets". Minibuckets within a bucket can be merged as long as
the size of their union does not exceed the maximum cluster size allowed.
Sometimes, this can be done in multiple ways.
4. Starting with the highest-priority bucket, we create new minibuckets by
"marginalizing out" the bucket label from each existing minibucket
(these are left unchanged in the process).
5. Each new minibucket is joined to its "originator" minibucket by an edge that
is labeled by their intersection (i.e. the variables in the new minibucket).
Each new minibucket is then reassigned to a new (and necessarily lower-priority)
bucket based on its highest priority node. Merging can take place during
reassignment as long as the the maximum cluster size is respected.
The union of 2 minibuckets retains the edges of each of minibucket.
6. Steps 4 & 5 are carried out for each bucket in order of priority.
7. The resulting minibuckets in each bucket are then joined in a chain (there is
some degree of freedom for how this can be done),
where each edge is labelled by the bucket label.
## References
R. Mateescu, K. Kask, V.Gogate, and R. Dechter. Join-graph propagation algorithms.
*Journal of Artificial Intelligence Research*, 37:279-328, 2010
doi: [10.1613/jair.2842](https://doi.org/10.1613/jair.2842).
"""
struct JoinGraphStructuring <: AbstractClusterGraphMethod
maxclustersize::Integer
end
function JoinGraphStructuring(maxclustersize::Integer, net::HybridNetwork)
maxindegree = maximum(n -> length(getparents(n)), net.hybrid)
maxclustersize ≥ (maxindegree + 1) ||
error("maxclustersize $maxclustersize is smaller than the size of largest node family $(maxindegree+1).")
return JoinGraphStructuring(maxclustersize)
end
"""
Cliquetree
Subtype of [`AbstractClusterGraphMethod`](@ref).
## Algorithm
1. [`moralize`](@ref) the network (connect partners that share a child).
2. triangulate the resulting undirected graph using greedy min-fill,
see [`triangulate_minfill!(graph)`](@ref).
3. extract the maximal cliques of the resulting chordal graph.
4. calculate the edge weight between each pair of maximal cliques as the size of
their intersection
5. find a maximum-weight spanning tree
6. label the retained edges (in the spanning tree) by the intersection of
the two cliques they connect.
## References
D. Koller and N. Friedman.
*Probabilistic graphical models: principles and techniques*.
MIT Press, 2009. ISBN 9780262013192.
"""
struct Cliquetree <: AbstractClusterGraphMethod end
"""
clustergraph!(net, method)
clustergraph(net, method)
Cluster graph `U` for an input network `net` and a `method` of cluster graph
construction. The following methods are supported:
- [`Bethe`](@ref)
- [`LTRIP`](@ref)
- [`JoinGraphStructuring`](@ref)
- [`Cliquetree`](@ref)
The first method pre-processes `net`, which may modify it in place,
see [`preprocessnet!`](@ref).
The second method assumes that `net` is already pre-processed.
"""
function clustergraph!(net::HybridNetwork, method::AbstractClusterGraphMethod)
preprocessnet!(net)
return clustergraph(net, method)
end
@doc (@doc clustergraph!) clustergraph
clustergraph(net::HybridNetwork, ::Bethe) = betheclustergraph(net)
clustergraph(net::HybridNetwork, method::LTRIP) = ltripclustergraph(net, method)
clustergraph(net::HybridNetwork, method::JoinGraphStructuring) =
joingraph(net, JoinGraphStructuring(getmaxclustersize(method), net))
function clustergraph(net::HybridNetwork, ::Cliquetree)
g = moralize(net)
triangulate_minfill!(g)
return cliquetree(g)
end
"""
betheclustergraph(net)
See [`Bethe`](@ref)
"""
function betheclustergraph(net::HybridNetwork)
T = vgraph_eltype(net)
clustergraph = init_clustergraph(T, :Bethe)
node2cluster = Dict{T, Tuple{Symbol, Vector{Symbol}}}() # for joining clusters later
node2code = Dict{T,T}() # node preorder index -> code of family(node) in cluster graph
code = zero(T)
prenodes = net.nodes_changed
prenodes_names = [Symbol(n.name) for n in prenodes]
# add a factor-cluster for each non-root node
for noi in reverse(eachindex(prenodes)) # postorder: see fam(h) before fam(p) in case fam(p) ⊆ fam(h)
n = prenodes[noi]
vt = T(noi)
o = sort!(indexin(getparents(n), prenodes), rev=true) # for postorder
nodeind = pushfirst!(T.(o), vt) # preorder indices of nodes in factor
nodesym = prenodes_names[nodeind] # node symbol of nodes in factor
# (nodesym, nodeind) = factor-cluster data, its nodes listed in postorder
length(nodeind) > 1 || continue # skip the root
# if n's family ⊆ another family: would have in one of its children's
isfamilysubset = false
for ch in getchildren(n)
ch_code = node2code[findfirst(x -> x===ch, prenodes)]
if nodeind ⊆ clustergraph[label_for(clustergraph, ch_code)][2]
isfamilysubset = true
node2code[noi] = ch_code # family(n) assigned to its child's cluster
break # do not check other children's families
end
end
isfamilysubset && continue # to next node: do *not* create a new cluster in graph
factorCname = Symbol(nodesym...) # factor-cluster name: label in metagraph
code += one(T)
node2code[noi] = code
add_vertex!(clustergraph, factorCname, (nodesym, nodeind))
for (nns, nni) in zip(nodesym, nodeind)
if haskey(node2cluster, nni)
push!(node2cluster[nni][2], factorCname)
else node2cluster[nni] = (nns, [factorCname])
end
end
end
# add a variable-cluster for each non-leaf node, and its adjacent edges (sepsets)
for ni in sort!(collect(keys(node2cluster)), rev=true) # add nodes in postorder
ns, clusterlist = node2cluster[ni]
length(clusterlist) > 1 || continue # skip leaves: in only 1 factor-cluster
add_vertex!(clustergraph, ns, ([ns], [ni]))
for lab in clusterlist # lab: factor-cluster name, for each factor that contains the node
add_edge!(clustergraph, ns, lab, [ni]) # sepset: singleton {ni}
end
end
return clustergraph
end
"""
ltripclustergraph(net, method::LTRIP)
See [`LTRIP`](@ref)
"""
function ltripclustergraph(net::HybridNetwork, method::LTRIP)
T = vgraph_eltype(net)
clustg = init_clustergraph(T, :ltrip) # 'clustergraph' is a function already
cg = MetaGraph(Graph{T}(0), # auxiliary graph to hold connection weights
Symbol, # vertex label
Tuple{Vector{Symbol}, Vector{T}}, # vertex data: nodes in cluster
T, # edge data holds edge weight
:connectionweights, # tag for the whole graph
edge_data -> edge_data,
zero(T)) # default weight
node2cluster = Dict{T, Vector{T}}() # for joining clusters later
clusters = getclusters(method)
prenodes_names = [Symbol(n.name) for n in net.nodes_changed]
# build nodes in clustg and in auxiliary cg
for (code, nodeindlist) in enumerate(clusters) # nodeindlist assumed sorted, decreasing
cdat = prenodes_names[nodeindlist]
cname = Symbol(cdat...)
# cluster data: (node labels, node preorder index), sorted in postorder
add_vertex!(clustg, cname, (cdat, nodeindlist))
add_vertex!(cg, cname, (cdat, nodeindlist))
for ni in nodeindlist
if haskey(node2cluster, ni)
push!(node2cluster[ni], T(code))
else node2cluster[ni] = [T(code)]
end
end
# add edges in auxiliary cg: to calculate intersection size only once
for code2 in 1:(code-1)
c2name = label_for(cg, code2)
c2nodeindlist = clusters[code2]
w = length(intersect(nodeindlist, c2nodeindlist))
w > 0 && add_edge!(cg, cname, c2name, w)
end
end
clustweight = Dict(lab => 0 for lab in labels(cg)) # clusters score in cg's subgraph sg
for ni in sort!(collect(keys(node2cluster)), rev=true)
# sepsets will be sorted by nodes' postorder
clusterindlist = node2cluster[ni]
# build subgraph sg of auxiliary cg, then adjust its edge weights
sg, _ = induced_subgraph(cg, clusterindlist)
ne(sg) > 0 || continue # e.g leaves: only 1 cluster, no edge to add
maxw = maximum(sg[e...] for e in edge_labels(sg))
for e in edge_labels(sg)
if sg[e...] == maxw # if a sepset edge has maximum weigh, then
clustweight[e[1]] += 1 # add 1 to each adjacent cluster's score
clustweight[e[2]] += 1
end
end # separate loop below: adjust weights *after* testing for max weight
for e in edge_labels(sg)
sg[e...] += clustweight[e[1]] + clustweight[e[2]]
end
# reset clustweight before next cluster
for cl in keys(clustweight) clustweight[cl]=0; end
mst_edges = kruskal_mst(sg, minimize=false) # mst: maximum spanning tree
# augment sepsets in clustg, based on edges in max spanning tree
for e in mst_edges
lab1 = label_for(sg, src(e))
lab2 = label_for(sg, dst(e))
if haskey(clustg, lab1, lab2) # if (lab1, lab2) is an edge
clustg[lab1, lab2] = push!(clustg[lab1, lab2], ni) # augment sepset
else # create the edge, initialize sepset
add_edge!(clustg, lab1, lab2, [ni])
end
end
end
return clustg
end
"""
joingraph(net, method::JoinGraphStructuring)
See [`JoinGraphStructuring`](@ref)
"""
function joingraph(net::HybridNetwork, method::JoinGraphStructuring)
g = moralize(net)
ordering = triangulate_minfill!(g) # node labels in elimination order
T = vgraph_eltype(net)
# preorder indices sorted in elimination order
eliminationorder2preorder = [g[ns] for ns in ordering]
#= steps 1-3: initialize `buckets` and their minibuckets
i: elimination order for the node labeling the bucket
buckets[i][1]: node symbol
buckets[i][2]: dictionary minibucket size => minibucket(s)
=#
buckets = Dict{T, Tuple{Symbol, Dict{T, Vector{Vector{T}}}}}(
i => (ns, Dict()) for (i, ns) in enumerate(ordering)
)
# node families (represented as vectors of preorder indices) = initial factors
node2family = nodefamilies(net)
maxclustersize = T(getmaxclustersize(method)) # size limit for minibuckets
cg = init_clustergraph(T, :auxiliary)
for nf in node2family
# minibucket: node family, represented then sorted by elimination order
mb = Vector{T}(indexin(nf, eliminationorder2preorder))
sort!(mb)
# first node has lowest elimination order: highest priority
bi = mb[1] # assign `mb` to bucket corresponding to this first node
di = buckets[bi][2] # dictionary minibucket size => minibucket(s)
# merge `mb` with existing minibucket or add as new minibucket, depending on max size
assign!(di, mb, maxclustersize)
end
# steps 4-6: marginalize the bucket label from each minibucket
for i in eachindex(ordering)
_, bd = buckets[i] # bucket symbol, bucket dictionary
# @info "node $i, bd has $(sum(length(mb) for mb in values(bd))) minibuckets"
bi = eliminationorder2preorder[i] # preorder index of bucket labeling node
previous_mb_label = nothing
for minibuckets in values(bd), mb in minibuckets
# create cluster in `cg` corresponding to minibucket `mb`
nodeindlist = eliminationorder2preorder[mb]
o = sortperm(nodeindlist, rev=true)
nodeindlist .= nodeindlist[o]
vdat = ordering[mb][o]
lab = Symbol(vdat...)
add_vertex!(cg, lab, (vdat, nodeindlist)) # nothing happens if cg already has lab
# chain minibuckets: sepset = bucket labeling node, whose preorder is bi
isnothing(previous_mb_label) || # connect `mb` to chain of current bucket
# sepset is [bi] even if minibuckets have a larger intersection
add_edge!(cg, previous_mb_label, lab, [bi])
previous_mb_label = lab # `mb` becomes new tail of "chain"
# new minibucket: marginalize bucket labeling node bi
mb_new = copy(mb)
popfirst!(mb_new)
isempty(mb_new) && continue # below: mb_new is not empty
#= Assign to bucket labeled by highest priority element mb_new[1]:
merge mb_new with an existing minibucket (mb2) to get mb1.
- if possible: great. It may be that mb1=mb2, if mb_new is a subset of mb2.
- else: mb1=mb_new and mb2 is empty.
=#
(mb1, mb2) = assign!(buckets[mb_new[1]][2], mb_new, maxclustersize)
# create cluster corresponding to marginalized & merged minibucket
nodeindlist1 = eliminationorder2preorder[mb1]
o1 = sortperm(nodeindlist1, rev=true)
nodeindlist1 .= nodeindlist1[o1]
vdat1 = ordering[mb1][o1]
lab1 = Symbol(vdat1...)
add_vertex!(cg, lab1, (vdat1, nodeindlist1)) # nothing happens if cg already has lab1
# connect mb to mb1 in cluster graph cg
add_edge!(cg, lab, lab1, filter(ni -> ni != bi, nodeindlist))
# if mb2 ⊂ mb1 strictly and if mb2 was already a cluster in cg
# then contract mb1-mb2, leaving mb1 only
if length(mb1) != length(mb2) # mb1 ≠ mb2 bc mb2 ⊆ mb1
o2 = sortperm(eliminationorder2preorder[mb2], rev=true)
vdat2 = ordering[mb2][o2]
lab2 = Symbol(vdat2...)
if haskey(cg, lab2) # then replace mb2 by mb1:
for labn in neighbor_labels(cg, lab2) # connect mb2's neibhbors to mb1
add_edge!(cg, lab1, labn, cg[lab2, labn])
end
delete!(cg, lab2) # delete mb2 from cg
end
end
end
end
return cg
end
"""
assign!(bucket, new_minibucket, max_minibucket_size)
Merge `new_minibucket` with one of the minibuckets contained in `bucket`
(in order of decreasing size) subject to the constraint that the resulting
minibucket does not exceed `max_minibucket_size`. If this is not possible,
then `new_minibucket` is added to `bucket` as a new minibucket.
The bucket should be represented as a dictionary:
minibucket_size => vector of minibuckets of that size,
where each minibucket is itself represented as a vector of indices.
Output:
- (`resulting_minibucket`, `minibucket_merged_into`) if a successful merge is found
- (`new_minibucket`, []) otherwise
"""
function assign!(bucket::Dict{T, Vector{Vector{T}}},
new::Vector{T}, maxsize::T) where {T <: Integer}
for sz in sort(collect(keys(bucket)), rev=true) # favor merging with large minibuckets
minibuckets = bucket[sz] # minibuckets of size `sz`
for (i, mb) in enumerate(minibuckets)
merged = sort(union(new, mb))
mergedsz = length(merged)
if mergedsz ≤ maxsize
popat!(minibuckets, i) # remove minibucket being merged with
isempty(minibuckets) && pop!(bucket, sz) # remove any empty size categories
# insert result of merge into appropriate size category
if haskey(bucket, mergedsz)
push!(bucket[mergedsz], merged)
else
bucket[mergedsz] = [merged]
end
return (merged, mb)
end
end
end
# no merging: insert `new` to `bucket`
sz = length(new)
if haskey(bucket, sz)
push!(bucket[sz], new)
else
bucket[sz] = [new]
end
return (new, T[])
end
"""
cliquetree(chordal_graph)
Clique tree `U` for an input graph `g` assumed to be chordal (triangulated),
e.g. using [`triangulate_minfill!`](@ref).
**Warning**: does *not* check that the graph is already triangulated.
- Each node in `U` is a maximal clique of `g` whose data is the tuple of vectors
(node_labels, node_data) using the labels and data from `g`, with nodes sorted
by decreasing data.
If `g` was originally built from a phylogenetic network using [`moralize`](@ref),
then the nodes' data are their preorder index, making them sorted in postorder
within in each clique.
The clique label is the concatenation of the node labels.
- For each edge (clique1, clique2) in `U`, the edge data hold the sepset
(separating set) information as a vector of node data, for nodes shared by
both clique1 and clique2. In this sepset, nodes are sorted by decreasing data.
Uses `maximal_cliques` and `kruskal_mst` (for min/maximum spanning trees) from
[Graphs.jl](https://juliagraphs.org/Graphs.jl/stable/).
"""
function cliquetree(graph::AbstractGraph{T}) where T
mc = maximal_cliques(graph)
mg = init_clustergraph(T, :cliquetree)
node2clique = Dict{T,Vector{T}}() # to connect cliques faster later
for (code, cl) in enumerate(mc)
nodeindlist = [graph[label_for(graph,u)] for u in cl] # preorder index
o = sortperm(nodeindlist, rev=true) # order to list nodes in postorder
vdat = [label_for(graph,cl[i]) for i in o]
nodeindlist .= nodeindlist[o]
# clique data: (node labels, node preorder index), sorted in postorder
add_vertex!(mg, Symbol(vdat...), (vdat, nodeindlist))
for ni in nodeindlist
if haskey(node2clique, ni)
push!(node2clique[ni], T(code))
else node2clique[ni] = [T(code)]
end
end
end
# create edges between pairs of cliques sharing the same node
for node in sort!(collect(keys(node2clique)), rev=true) # sepsets will be sorted by nodes' postorder
cliquelist = node2clique[node]
# add node to the sepset between each pair of cliques that has that node
for (i1, cl1) in enumerate(cliquelist)
lab1 = label_for(mg, cl1)
for i2 in 1:(i1-1)
cl2 = cliquelist[i2]
lab2 = label_for(mg, cl2)
if has_edge(mg, cl1, cl2)
elabs = MetaGraphsNext.arrange(mg, lab1, lab2)
haskey(mg.edge_data, elabs) || error("hmm, mg.graph has the edge, but mg has no edge data")
push!(mg.edge_data[elabs], node)
else
add_edge!(mg, lab1, lab2, [node])
end
end
end
end
#= altenate way to create edges between cliques
# would be faster for a complex graph with many nodes but few (large) cliques.
for cl1 in vertices(mg)
lab1 = label_for(mg, cl1)
ni1 = mg[lab1][2] # node indices: preorder index of nodes in the clique
for cl2 in Base.OneTo(cl1 - one(T))
lab2 = label_for(mg, cl2)
ni2 = mg[lab2][2]
sepset = intersect(ni1, ni2) # would be nice to take advantage of the fact that ni1 and ni2 are both sorted (descending)
isempty(sepset) && continue
add_edge!(mg, lab1, lab2, sepset)
end
end =#
# find maximum spanning tree: with maximum sepset sizes
mst_edges = kruskal_mst(mg, minimize=false)
# delete the other edges to get the clique tree
# complication: edge iterator is invalidated by the first edge deletion
todelete = setdiff!(collect(edges(mg)), mst_edges)
for e in todelete
rem_edge!(mg, src(e), dst(e))
end
return mg
end
"""
init_clustergraph(T::Type{<:Integer}, clustergraph_method::Symbol)
`MetaGraph` with an empty base graph (0 vertices, 0 edges),
meta-graph data `clustergraph_method`,
edge-weight function counting the length of the edge-data vector,
and the following types:
- vertex indices in the base graph: `T`
- vertex labels: `Symbol`
- vertex data: `Tuple{Vector{Symbol}, Vector{T}}`
to hold information about the variables (nodes in phylogenetic network)
in the cluster (vertex in cluster graph):
node names as symbols, and node preorder indices
- edge data: `Vector{T}` to hold information about the sepset:
preorder index of nodes in the sepset.
See packages [MetaGraphsNext](https://juliagraphs.org/MetaGraphsNext.jl/dev/)
and [Graphs](https://juliagraphs.org/Graphs.jl/dev/).
The empty graph above is of type `Graphs.SimpleGraphs.SimpleGraph{T}`:
undirected, with vertex indices of type `T`. After addition of `n` vertices,
the vertex indices range from 1 to `n`, technically in `Base.OneTo{T}(n)`.
"""
function init_clustergraph(T::Type{<:Integer}, method::Symbol)
clustergraph = MetaGraph(
Graph{T}(0),
Symbol, # vertex label
Tuple{Vector{Symbol}, Vector{T}}, # vertex data: nodes in cluster
Vector{T}, # edge data: nodes in sepset
method, # tag for the whole graph
edge_data -> T(length(edge_data)),
zero(T)) # default weight
return clustergraph
end
function get_nodesymbols2index(clustergraph::MetaGraph)
Dict(ns => ni
for l in labels(clustergraph)
for (ns, ni) in zip(clustergraph[l][1], clustergraph[l][2]) )
end
"""
spanningtree_clusterlist(clustergraph, root_index)
spanningtree_clusterlist(clustergraph, nodevector_preordered)
Build the depth-first search spanning tree of the cluster graph, starting from
the cluster indexed `root_index` in the underlying simple graph;
find the associated topological ordering of the clusters (preorder); then
return a tuple of these four vectors:
1. `parent_labels`: labels of the parents' child clusters. The first one is the root.
2. `child_labels`: labels of clusters in pre-order, except for the cluster
choosen to be the root.
3. `parent_indices`: indices of the parent clusters
4. `child_indices`: indices of the child clusters, listed in preorder as before.
In the second version in which `root_index` is not provided, the root of the
spanning tree is chosen to be a cluster that contains the network's root. If
multiple clusters contain the network's root, then one is chosen containing the
smallest number of taxa: see [`default_rootcluster`](@ref).
"""
function spanningtree_clusterlist(cgraph::MetaGraph, prenodes::Vector{PN.Node})
rootj = default_rootcluster(cgraph, prenodes)
spanningtree_clusterlist(cgraph, rootj)
end
function spanningtree_clusterlist(cgraph::MetaGraph, rootj::Integer)
par = dfs_parents(cgraph.graph, rootj)
spt = Graphs.tree(par) # or directly: spt = dfs_tree(cgraph.graph, rootj)
# spt.fadjlist # forward adjacency list: sepsets, but edges not indexed
childclust_j = topological_sort(spt)[2:end] # cluster in preorder, excluding the root cluster
parentclust_j = par[childclust_j] # parent of each cluster in spanning tree
childclust_lab = [cgraph.vertex_labels[j] for j in childclust_j]
parentclust_lab = [cgraph.vertex_labels[j] for j in parentclust_j]
return parentclust_lab, childclust_lab, parentclust_j, childclust_j
end
"""
spanningtrees_clusterlist(clustergraph, nodevector_preordered)
Vector of spanning trees for `clustergraph`, that together cover all edges.
Each spanning tree is specified as a tuple of 4 vectors describing a
depth-first search traversal of the tree, starting from a cluster that contains
the network's root, as in [`spanningtree_clusterlist`](@ref).
The spanning trees are iteratively obtained using Kruskal's minimum-weight
spanning tree algorithm, with edge weights defined as the number of previous
trees covering the edge.
"""
function spanningtrees_clusterlist(cgraph::MetaGraph{T}, prenodes::Vector{PN.Node}) where T
# graph with same clusters and edges, but different edge data & edge weights
cg = MetaGraph(Graph{T}(0), Symbol, Tuple{Vector{Symbol}, Vector{T}},
T, # edge data type: edge data = number of spanning trees containing the edge
:edgeweights, # graph tag: hold edge weights to compute min spanning tree
edge_data -> edge_data, # edge data holds edge weight
typemax(T)) # default weight for absent edges
# copy clusters (same order) and edges from cgraph, but set edge data to 0
for l in labels(cgraph) add_vertex!(cg, l, cgraph[l]); end
for (l1,l2) in edge_labels(cgraph) add_edge!(cg, l1, l2, 0); end
# schedule: initialize vector of spanning trees
sched = Tuple{Vector{Symbol}, Vector{Symbol}, Vector{T}, Vector{T}}[]
# repeat until every edge has data > 0: used in ≥1 spanning tree
while any(cg[l1,l2] == 0 for (l1,l2) in edge_labels(cg))
mst_edges = kruskal_mst(cg) # vector of edges in min spanning tree
sg, vmap = induced_subgraph(cg, mst_edges) # spanning tree as `metagraph`
spt = spanningtree_clusterlist(sg, prenodes)
spt[3] .= vmap[spt[3]] # code i in `sg` maps to code vmap[i] in `cg`
spt[4] .= vmap[spt[4]]
push!(sched, spt)
# update edge data: +1 for each edge in spanning tree, to prioritize
# unused (or rarely used) in future spanning trees
for e in mst_edges
parentlab = label_for(cg, src(e))
childlab = label_for(cg, dst(e))
cg[parentlab, childlab] += 1
end
end
return sched
end
"""
nodesubtree_clusterlist(clustergraph::MetaGraph, nodesymbol)
Spanning tree of the subgraph of `clustergraph` induced by the clusters and
sepsets that contain the node labelled `nodesymbol`, see [`nodesubtree`](@ref).
If `clustergraph` satisfies the generalized running-intersection property,
then this subgraph should be a tree anyway, but this is not assumed
(via extracting a spanning tree).
Output: tuple of 4 vectors describing a depth-first search traversal of the tree,
starting from a cluster containing the node of smallest preorder index, as
determined by [`default_rootcluster`](@ref).
Each element in this tuple is a vector: see [`spanningtree_clusterlist`](@ref).
"""
function nodesubtree_clusterlist(cgraph::MetaGraph, ns::Symbol)
sg, vmap = nodesubtree(cgraph, ns)
# to root sg: pick a cluster containing a node with smallest preorder index
rootj = default_rootcluster(sg)
spt = spanningtree_clusterlist(sg, rootj)
# map cluster indices back to those for `cgraph`
spt[3] .= vmap[spt[3]]
spt[4] .= vmap[spt[4]]
return spt
end
# """
# minimal_valid_schedule(clustergraph, clusterswithevidence)
# Generate a minimal valid schedule of messages to be computed on a initialized
# Bethe cluster graph, so that any schedule of messages following is valid.
# Return the schedule as a tuple of four vectors: (`parent_labels`, `child_labels`,
# `parent_indices`, `child_indices`) as in [`spanningtree_clusterlist`](@ref).
# """
# function minimal_valid_schedule(cgraph::MetaGraph, wevidence::Vector{Symbol})
# !isempty(wevidence) || error("`wevidence` cannot be empty")
# #= `received` tracks clusters that have received evidence (through messages
# during initialization). Only clusters that have received evidence can
# transmit this (such clusters get added to `cansend`) to neighbor clusters
# through a message. =#
# received = Set{Symbol}(wevidence)
# cansend = copy(wevidence)
# T = typeof(cgraph[cansend[1]][2][1])
# childclust_j = T[] # child cluster indices
# parentclust_j = T[] # parent cluster indices
# childclust_lab = Symbol[] # child cluster labels
# parentclust_lab = Symbol[] # parent cluster labels
# while !isempty(cansend)
# #= For each cluster in `cansend`, send a message to any neighbors that
# have not received evidence (all such neighbors get added to `cansend`),
# then remove it from `cansend`. Since the cluster graph represented by
# `cgraph` is connected, all clusters will eventually be added to `cansend`
# and processed in order. Hence, this generates a minimal sequence of
# messages that can be computed, so that the updated cluster beliefs will
# be non-degenerate wrt any messages they are allowed to compute (i.e.
# any schedule of messages following is valid). =#
# cl = popfirst!(cansend) # remove node to be processed
# nb = neighbor_labels(cgraph, cl)
# for cl2 in nb
# if cl2 ∉ received
# push!(childclust_j, code_for(cgraph, cl2))
# push!(parentclust_j, code_for(cgraph, cl))
# push!(childclust_lab, cl2)
# push!(parentclust_lab, cl)
# push!(received, cl2) # `cl2` can no longer receive messages
# push!(cansend, cl2) # `cl2` can now send messages
# end
# end
# end
# return parentclust_lab, childclust_lab, parentclust_j, childclust_j
# end
"""
default_rootcluster(clustergraph, nodevector_preordered)
Index of a cluster that contains the network's root, whose label is assumed to
be `1` (preorder index). If multiple clusters contain the network's root,
then one is chosen with the smallest number of taxa (leaves in the network).
For cluster with label `:lab`, its property `clustergraph[:lab][2]`
should list the nodes in the cluster, by the index of each node in
`nodevector_preordered` such that `1` corresponds to the network's root.
Typically, this vector is `net.nodes_changed` after the network is preordered.
"""
function default_rootcluster(cgraph::MetaGraph, prenodes::Vector{PN.Node})
hasroot = lab -> begin # Inf if the cluster does not contain the root 1
nodelabs = cgraph[lab][2] # number of taxa in the cluster otherwise
(1 ∈ nodelabs ? sum(prenodes[i].leaf for i in nodelabs) : Inf)
end
rootj = argmin(hasroot(lab) for lab in labels(cgraph))
return rootj
end
"""
default_rootcluster(clustergraph)
Index of a cluster that contains the node with the smallest preorder index.
If multiple clusters contain that node, then one is chosen that *only* has
that node. If all clusters containing that node have more than 1 node,
then a cluster is chosen containing a node with the second-smallest-preorder index.
For cluster with label `:lab`, its property `clustergraph[:lab][2]`
should list the nodes in the cluster by their preorder index,
sorted in decreasingly order (so the smallest is at the end).
"""
function default_rootcluster(cgraph::MetaGraph)
i0 = minimum(cgraph[lab][2][end] for lab in labels(cgraph)) # smallest preorder index
rootscore = lab -> begin # Inf if the cluster does not contain i0
nodelabs = cgraph[lab][2] # second smallest index otherwise
(i0 ∈ nodelabs ?
(length(nodelabs)==1 ? 0 : nodelabs[end-1]) :
Inf)
end
rootj = argmin(rootscore(lab) for lab in labels(cgraph))
return rootj
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 16908 | """
ClusterGraphBelief{B<:Belief, F<:FamilyFactor, M<:MessageResidual}
Structure to hold a vector of beliefs, with cluster beliefs coming first and
sepset beliefs coming last. Fields:
- `belief`: vector of beliefs
- `factor`: vector of initial cluster beliefs after factor assignment
- `nclusters`: number of clusters
- `cdict`: dictionary to get the index of a cluster belief from its node labels
- `sdict`: dictionary to get the index of a sepset belief from the labels of
its two incident clusters
- `messageresidual`: dictionary to log information about sepset messages,
which can be used to track calibration or help adaptive scheduling with
residual BP. See [`MessageResidual`](@ref).
The keys of `messageresidual` are tuples of cluster labels, similar to a
sepset's metadata. Each edge in the cluster graph has 2 messages corresponding
to the 2 directions in which a message can be passed, with keys:
`(label1, label2)` and `(label2, label1)`.
The cluster receiving the message is the first label in the tuple,
and the sending cluster is the second.
Assumptions:
- For a cluster belief, the cluster's nodes are stored in the belief's `metadata`.
- For a sepset belief, its incident clusters' nodes are in the belief's metadata.
"""
struct ClusterGraphBelief{B<:Belief, F<:FamilyFactor, M<:MessageResidual}
"vector of beliefs, cluster beliefs first and sepset beliefs last"
belief::Vector{B}
"""vector of initial factors from the graphical model, one per cluster.
Each node family defines the conditional probability of the node
conditional to its parent, and the root has its prior probability.
Each such density is assigned to 1 cluster.
A cluster belief can be assigned 0, 1 or more such density.
Cluster beliefs are modified during belief propagation, but factors are not.
They are useful to approximate the likelihood by the factored energy."""
factor::Vector{F}
"number of clusters"
nclusters::Int
"dictionary: cluster label => cluster index"
cdict::Dict{Symbol,Int}
"dictionary: cluster neighbor labels => sepset index"
sdict::Dict{Set{Symbol},Int}
"dictionary: message labels (cluster_to, cluster_from) => residual information"
messageresidual::Dict{Tuple{Symbol,Symbol}, M}
end
nbeliefs(obj::ClusterGraphBelief) = length(obj.belief)
nclusters(obj::ClusterGraphBelief) = obj.nclusters
nsepsets(obj::ClusterGraphBelief) = nbeliefs(obj) - nclusters(obj)
function Base.show(io::IO, b::ClusterGraphBelief)
disp = "beliefs for $(nclusters(b)) clusters and $(nsepsets(b)) sepsets.\nclusters:\n"
for (k, v) in b.cdict
disp *= " $(rpad(k,10)) => $v\n"
end
disp *= "sepsets:\n"
for (k, v) in b.sdict
disp *= " $(rpad(join(k,", "),20)) => $v\n"
end
print(io, disp)
end
clusterindex(c, obj::ClusterGraphBelief) = clusterindex(c, obj.cdict)
function clusterindex(clusterlabel, clusterdict)
clusterdict[clusterlabel]
end
sepsetindex(c1, c2, obj::ClusterGraphBelief) = sepsetindex(c1, c2, obj.sdict)
function sepsetindex(clustlabel1, clustlabel2, sepsetdict)
sepsetdict[Set((clustlabel1, clustlabel2))]
end
"""
ClusterGraphBelief(belief_vector::Vector{B})
Constructor of a `ClusterGraphBelief` with belief `belief_vector` and all other
fields constructed accordingly. New memory is allocated for these other fields,
e.g. for factors (with data copied from cluster beliefs) and message residuals
(with data initialized to 0 but of size matching that from sepset beliefs)
To construct the input vector of beliefs, see [`init_beliefs_allocate`](@ref)
and [`init_beliefs_assignfactors!`](@ref)
"""
function ClusterGraphBelief(beliefs::Vector{B}) where B<:Belief
i = findfirst(b -> b.type == bsepsettype, beliefs)
nc = (isnothing(i) ? length(beliefs) : i - 1)
all(beliefs[i].type == bclustertype for i in 1:nc) ||
error("clusters are not consecutive")
all(beliefs[i].type == bsepsettype for i in (nc+1):length(beliefs)) ||
error("sepsets are not consecutive")
cdict = get_clusterindexdictionary(beliefs, nc)
sdict = get_sepsetindexdictionary(beliefs, nc)
mr = init_messageresidual_allocate(beliefs, nc)
factors = init_factors_allocate(beliefs, nc)
return ClusterGraphBelief{B,eltype(factors),valtype(mr)}(beliefs,factors,nc,cdict,sdict,mr)
end
function get_clusterindexdictionary(beliefs, nclusters)
Dict(beliefs[j].metadata => j for j in 1:nclusters)
end
function get_sepsetindexdictionary(beliefs, nclusters)
Dict(Set(beliefs[j].metadata) => j for j in (nclusters+1):length(beliefs))
end
"""
init_beliefs_reset_fromfactors!(beliefs::ClusterGraphBelief)
Reset cluster beliefs to existing factors, and sepset beliefs to h=0, J=0, g=0
This is not used so far, as changing model parameters requires a reset of both
factors and beliefs, done by [`init_beliefs_assignfactors!`](@ref).
"""
function init_beliefs_reset_fromfactors!(beliefs::ClusterGraphBelief)
nc, nb = nclusters(beliefs), length(beliefs.belief)
b, f = beliefs.belief, beliefs.factor
for i in 1:nc
b[i].h .= f[i].h
b[i].J .= f[i].J
b[i].g[1] = f[i].g[1]
end
for i in (nc+1):nb
b[i].h .= 0.0
b[i].J .= 0.0
b[i].g[1] = 0.0
end
end
"""
init_messagecalibrationflags_reset!(beliefs::ClusterGraphBelief, reset_kl=true)
Reset all non-empty message residuals in `beliefs`.
"""
function init_messagecalibrationflags_reset!(beliefs::ClusterGraphBelief, reset_kl=true)
for m in values(beliefs.messageresidual)
init_messagecalibrationflags_reset!(m, reset_kl)
end
end
"""
iscalibrated_residnorm(beliefs::ClusterGraphBelief)
iscalibrated_kl(beliefs::ClusterGraphBelief)
True if all edges in the cluster graph have calibrated messages in both directions,
in that their latest message residuals have norm close to 0 (`residnorm`)
or KL divergence close to 0 between the message received and prior sepset belief.
False if not all edges have calibrated messages.
This condition is sufficient but not necessary for calibration.
Calibration was determined for each individual message residual by
[`iscalibrated_residnorm!`](@ref) and [`iscalibrated_kl!`](@ref) using some
tolerance value.
"""
iscalibrated_residnorm(cb::ClusterGraphBelief) =
all(x -> iscalibrated_residnorm(x), values(cb.messageresidual))
iscalibrated_kl(cb::ClusterGraphBelief) =
all(x -> iscalibrated_kl(x), values(cb.messageresidual))
"""
integratebelief!(obj::ClusterGraphBelief, beliefindex)
integratebelief!(obj::ClusterGraphBelief)
integratebelief!(obj::ClusterGraphBelief, clustergraph, nodevector_preordered)
`(μ,g)` from fully integrating the object belief indexed `beliefindex`. This
belief is modified, with its `belief.μ`'s values updated to those in `μ`.
The second method uses the first sepset containing a single node. This is valid
if the beliefs are fully calibrated (including a pre-order traversal), but
invalid otherwise.
The third method uses the default cluster containing the root,
see [`default_rootcluster`](@ref). This is valid if the same cluster was used
as the root of the cluster graph, if this graph is a clique tree, and after
a post-order traversal to start the calibration.
"""
function integratebelief!(obj::ClusterGraphBelief, cgraph::MetaGraph, prenodes)
integratebelief!(obj, default_rootcluster(cgraph, prenodes))
end
integratebelief!(b::ClusterGraphBelief) = integratebelief!(b, default_sepset1(b))
integratebelief!(b::ClusterGraphBelief, j::Integer) = integratebelief!(b.belief[j])
# first sepset containing a single node
default_sepset1(b::ClusterGraphBelief) = default_sepset1(b.belief, nclusters(b) + 1)
function default_sepset1(beliefs::AbstractVector, n::Integer)
j = findnext(b -> length(nodelabels(b)) == 1, beliefs, n)
isnothing(j) && error("no sepset with a single node") # should not occur: degree-1 taxa
return j
end
"""
regularizebeliefs_bycluster!(beliefs::ClusterGraphBelief, clustergraph)
regularizebeliefs_bycluster!(beliefs::ClusterGraphBelief, clustergraph, cluster_label)
Modify beliefs of cluster graph by adding positive values to some diagonal
elements of precision matrices `J`, while preserving the full graphical model
(product of cluster beliefs over product of sepset beliefs,
invariant during belief propagation) so that all beliefs are non-degenerate.
This regularization could be done after initialization with
[`init_beliefs_assignfactors!`](@ref) for example.
The goal is that at each later step of belief propagation, the sending cluster
has a non-degenerate (positive definite) precision matrix for the variables to be
integrated, so that the message to be sent is well-defined (i.e. can be computed)
and positive semidefinite.
## Algorithm
For each cluster Ci (or for only for 1 cluster, labeled `cluster_label`):
1. Find a regularization parameter adaptively for that cluster:
ϵ = maximum absolute value of all entries in Ci's precision matrix J, and
of the machine epsilon.
Then loop through its incident edges:
2. For each neighbor cluster Cj and associated sepset Sij,
add ϵ > 0 to the diagonal entries of Ci's precision matrix `J`
corresponding to the traits in Sij.
3. To preserve the graphical model's joint distribution for the full set of
variables (invariant during BP), the same ϵ is added to each diagonal entry
of Sij's precision matrix.
"""
function regularizebeliefs_bycluster!(beliefs::ClusterGraphBelief, cgraph::MetaGraph)
for lab in labels(cgraph)
regularizebeliefs_bycluster!(beliefs, cgraph, lab)
end
end
function regularizebeliefs_bycluster!(beliefs::ClusterGraphBelief{B},
cgraph::MetaGraph, clusterlab) where B<:Belief{T} where T
b = beliefs.belief
cluster_to = b[clusterindex(clusterlab, beliefs)] # receiving-cluster
ϵ = max(eps(T), maximum(abs, cluster_to.J)) # regularization constant
for nblab in neighbor_labels(cgraph, clusterlab)
sepset = b[sepsetindex(clusterlab, nblab, beliefs)]
regularizebeliefs_1clustersepset!(cluster_to, sepset, ϵ)
end
end
"""
regularizebeliefs_1clustersepset!(cluster::AbstractBelief, sepset::AbstractBelief, ϵ)
Modify beliefs of 1 cluster and 1 of sepset, assumed to be neighbors in a
cluster graph (such that the sepset's scope is a subset of the cluster's scope)
by adding ϵ to all diagonal elements of the sepset precision matrice `J`
and to the corresponding diagonal elements of the cluster precision,
so as to preserve the full graphical model.
Used by
[`regularizebeliefs_bycluster!`](@ref) and
[`regularizebeliefs_onschedule!`](@ref).
"""
function regularizebeliefs_1clustersepset!(cluster::AbstractBelief, sepset::AbstractBelief, ϵ)
upind = scopeindex(sepset, cluster) # indices to be updated
isempty(upind) && return
ΔJ = ϵ*LA.I(length(upind))
view(cluster.J, upind, upind) .+= ΔJ # regularize cluster precision
sepset.J .+= ΔJ # preserve the graph invariant
return
end
"""
regularizebeliefs_bynodesubtree!(beliefs::ClusterGraphBelief, clustergraph)
Modify beliefs of cluster graph by adding positive values to some diagonal
elements of precision matrices `J`, while preserving the full graphical model
(product of cluster beliefs over product of sepset beliefs,
invariant during belief propagation) so that all beliefs are non-degenerate.
The goal is the same as [`regularizebeliefs_bycluster!`](@ref) and
[`regularizebeliefs_onschedule!`](@ref), but the algorithm is different.
## Algorithm
For each node (or variable) v:
1. Consider the subgraph T of clusters & edges that have v. If `clustergraph`
has the generalized running-intersection property, this subgraph is a tree.
2. Root T at a cluster containing a node with the largest postorder index.
3. Find a regularization parameter adaptively for that node:
ϵ = maximum absolute value of all entries in Ci's precision matrix J, and
of the machine epsilon, over clusters Ci in T.
4. For each trait j, find the subtree Tj of clusters and sepsets that
have trait j of node v in their scope.
5. For each cluster and sepset in Tj, except at its cluster root:
add ϵ on the diagonal of their belief precision matrix `J` corresponding to
trait j of node v.
6. Check that graphical model invariant is preserved, that is: for each trait j,
ϵ was added to the same number of clusters as number of sepsets.
"""
function regularizebeliefs_bynodesubtree!(beliefs::ClusterGraphBelief, cgraph::MetaGraph)
for (node_symbol, node_ind) in get_nodesymbols2index(cgraph)
regularizebeliefs_bynodesubtree!(beliefs, cgraph, node_symbol, node_ind)
end
end
function regularizebeliefs_bynodesubtree!(beliefs::ClusterGraphBelief{B},
cgraph::MetaGraph, node_symbol, node_ind) where B<:Belief{T} where T
b = beliefs.belief
sg, _ = nodesubtree(cgraph, node_symbol, node_ind)
nv(sg) <= 1 && return nothing
is_tree(sg) || error("running intersection violated for node / variable $node_symbol")
rootj = argmax(sg[l][2][1] for l in labels(sg)) # cluster with largest preorder index
# degree(sg, rootj) == 1 || @warn "cluster $(label_for(sg, rootj)) not a leaf in subtree for $node_symbol, I had hoped so."
spt = spanningtree_clusterlist(sg, rootj) # ! cluster indices refer to those in sg, not cgraph
ϵ = eps(T)
for l in labels(sg)
ϵ = max(ϵ, maximum(abs, b[clusterindex(l, beliefs)].J))
end
for (par_l, chi_l, _, _) in zip(spt...) # traverse node subtree
childbelief = b[clusterindex(chi_l, beliefs)]
sepsetbelief = b[sepsetindex(par_l, chi_l, beliefs)]
s_ind, c_ind = scopeindex(node_ind, sepsetbelief, childbelief)
d = length(s_ind)
view(childbelief.J, c_ind, c_ind) .+= ϵ*LA.I(d)
view(sepsetbelief.J, s_ind, s_ind) .+= ϵ*LA.I(d)
end
return nothing
end
"""
regularizebeliefs_onschedule!(beliefs::ClusterGraphBelief, clustergraph)
Modify beliefs of the cluster graph so that all beliefs are non-degenerate, by
(1) adding positive values to some diagonal elements of precision matrices `J`
while preserving the full graphical model, and
(2) propagating messages by marginalizing cluster beliefs.
The goal is the same as [`regularizebeliefs_bycluster!`](@ref) and
[`regularizebeliefs_bynodesubtree!`](@ref), but the algorithm is different.
## Algorithm
The outcomes depends on a "schedule", that is, on an order in which clusters
are considered. Here, the method simply takes the order given by `labels(clustergraph)`.
Mark all messages Ci → Cj as un-sent.
For each cluster Ci in order:
1. For each neighbor cluster Cj of Ci, if the message Cj → Ci has not been sent,
then:
- add ϵ_i to the diagonal entries of sepset Sij's precision matrix Jij
- add ϵ_i to the diagonal entries of Ci's precision matrix Ji that correspond to
the inscope variables of Sij
- mark Cj → Ci as sent.
Notes:
+ ϵ_i is taken as the maximum of all absolute values of entries in Ji,
and of the square root of machine epsilon.
+ This is equivalent to "sending" a default message, that is not computed
from Cj's belief, with diagonal precision matrix ϵ⋅I and other canonical
parameters h=0, g=0.
2. For each neighbor cluster Cj of Ci, if Ci → Cj has not been sent then
- send a message from Ci to Cj by marginalizing Ci's belief (that is, using belief propagation).
- mark the message Ci → Cj as sent.
"""
function regularizebeliefs_onschedule!(beliefs::ClusterGraphBelief, cgraph::MetaGraph)
# (cluster1, cluster2) ∈ messagesent if cluster1 has sent a message to cluster2
messagesent = Set{NTuple{2,Symbol}}()
tosend = Tuple{Tuple{Symbol,Int},Int}[] # indices for step 2, reset at each cluster
ϵ0 = sqrt(eps(Float64))
b = beliefs.belief
mr = beliefs.messageresidual
for clusterlab in labels(cgraph)
empty!(tosend)
ci = clusterindex(clusterlab, beliefs) # cluster index
ϵ = max(maximum(abs, b[ci].J), ϵ0) # regularization parameter
for nblab in neighbor_labels(cgraph, clusterlab)
nb_i = clusterindex(nblab, beliefs) # neighbor cluster index
ss_i = sepsetindex(clusterlab, nblab, beliefs) # sepset index
if (nblab, clusterlab) ∉ messagesent # step 1: regularize precisions
regularizebeliefs_1clustersepset!(b[ci], b[ss_i], ϵ)
push!(messagesent, (nblab, clusterlab))
end
if (clusterlab, nblab) ∉ messagesent # store indices for step 2 later
push!(tosend, ((nblab, nb_i), ss_i))
push!(messagesent, (clusterlab, nblab))
end
end
for ((nblab, nb_i), ss_i) in tosend # step 2: pass message with BP
propagate_belief!(b[nb_i], b[ss_i], b[ci], mr[(nblab, clusterlab)])
end
end
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 6164 | """
getcholesky(J::AbstractMatrix)
Cholesky decomposition of J, assumed to be symmetric *positive* definite,
stored as a `PDMat` object.
Warning: PDMat is not a subtype of Cholesky.
[PDMats.jl](https://github.com/JuliaStats/PDMats.jl) is efficient for
structured matrices (e.g diagonal or sparse) and has efficient methods for
linear algebra, e.g. `\\`, `invquad`, `X_invA_Xt` etc.
"""
function getcholesky(J::AbstractMatrix)
return PDMat(J) # LA.cholesky(b.J)
end
"""
getcholesky_μ(J::AbstractMatrix, h)
getcholesky_μ!(belief::Belief)
Tuple `(Jchol, μ)` where `Jchol` is a cholesky representation of `J` or `belief.J`
and `μ` is J⁻¹h, used to update `belief.μ` (by the second method).
"""
function getcholesky_μ(J::AbstractMatrix, h)
Jchol = getcholesky(J)
μ = Jchol \ h
return (Jchol, μ)
end
@doc (@doc getcholesky_μ) getcholesky_μ!
function getcholesky_μ!(b::Belief)
(Jchol, μ) = getcholesky_μ(b.J, b.h)
b.μ .= μ
return (Jchol, μ)
end
"""
entropy(J::Cholesky)
entropy(J::AbstractMatrix)
entropy(belief::AbstractBelief)
Entropy of a multivariate Gaussian distribution with precision matrix `J`,
assumed to be square and symmetric (not checked).
It is 0 if `J` is empty (of size 0×0). It may be `Inf` if `J` is semi-definite.
The second version applies the first to the belief precision `belief.J`.
`entropy` is defined for discrete distributions in
[StatsBase.jl](https://juliastats.org/StatsBase.jl/stable/scalarstats/#StatsBase.entropy)
and extended to Gaussian distributions in Distributions.jl around
[here](https://github.com/JuliaStats/Distributions.jl/blob/master/src/multivariate/mvnormal.jl#L95)
"""
function entropy(J::Union{LA.Cholesky{T},PDMat{T}}) where T<:Real
n = size(J,2)
n == 0 && return zero(T)
(n * (T(log2π) + 1) - LA.logdet(J)) / 2
end
function entropy(J::AbstractMatrix{T}) where T<:Real
n = size(J,2)
n == 0 && return zero(T)
(n * (T(log2π) + 1) - LA.logdet(LA.Symmetric(J))) / 2
end
entropy(cluster::AbstractBelief) = entropy(cluster.J)
"""
average_energy!(ref::Belief, target::AbstractBelief)
average_energy!(ref::Belief, Jₜ, hₜ, gₜ)
average_energy(Jᵣ::Union{LA.Cholesky,PDMat}, μᵣ, Jₜ, hₜ, gₜ)
Average energy (i.e. negative expected log) of a `target` canonical form with
parameters `(Jₜ, hₜ, gₜ)` with respect to a normalized non-degenerate reference
canonical form `ref` with parameters `(Jᵣ, hᵣ)`. The reference distribution
is normalized, so specifying `gᵣ` is unnecessary.
When the target canonical form is also normalized and non-degenerate,
this is equal to their cross-entropy:
H(fᵣ, fₜ) = - Eᵣ(log fₜ) = - ∫ fᵣ log fₜ .
`ref` is assumed to be non-degenerate, that is, `Jᵣ` should be positive definite.
`average_energy!` modifies the reference belief by updating `ref.μ` to J⁻¹h.
It calls `average_energy` after a cholesky decomposition of `ref.J`,
stored in `Jᵣ`: see [`getcholesky_μ!`](@ref).
## Calculation:
ref: f(x) = C(x | Jᵣ, hᵣ, _) is the density of 𝒩(μ=Jᵣ⁻¹hᵣ, Σ=Jᵣ⁻¹)
target: C(x | Jₜ, hₜ, gₜ) = exp( - (1/2)x'Jₜx + hₜ'x + gₜ )
E[-log C(X | Jₜ, hₜ, gₜ)] where X ∼ C(Jᵣ, hᵣ, _)
= 0.5 (μᵣ'Jₜ μᵣ + tr(Jᵣ⁻¹Jₜ)) - hₜ'μᵣ - gₜ
With empty vectors and matrices (J's of dimension 0×0 and h's of length 0),
the result is simply: - gₜ.
"""
function average_energy!(ref::Belief, target::AbstractBelief)
average_energy!(ref, target.J, target.h, target.g[1])
end
function average_energy!(ref::Belief, Jₜ, hₜ, gₜ)
(Jᵣ, μᵣ) = getcholesky_μ!(ref)
average_energy(Jᵣ, μᵣ, Jₜ, hₜ, gₜ)
end
@doc (@doc average_energy!) average_energy
function average_energy(Jᵣ::Union{LA.Cholesky,PDMat}, μᵣ, Jₜ, hₜ, gₜ)
isempty(Jₜ) && return -gₜ # dot(x,A,x) fails on empty x & A
(LA.tr(Jᵣ \ Jₜ) + LA.dot(μᵣ, Jₜ, μᵣ)) / 2 - LA.dot(hₜ, μᵣ) - gₜ
end
"""
factored_energy(beliefs::ClusterGraphBelief)
Factored energy functional for general cluster graphs (Koller & Friedman 2009),
which approximates the evidence lower bound (ELBO), a lower bound for the
log-likelihood. It is
also called the (negative) Bethe free energy in the context of factor graphs
It is the sum of the cluster average energies and entropies,
minus the sepset entropies.
It is assumed but not checked that `beliefs` are calibrated
(neighbor clusters and sepset beliefs are consistent, used as local marginals).
For a calibrated clique tree, the factored energy is equal to the
log-likelihood. For a calibrated cluster graph, it can serve as as approximation.
output: tuple of 3 values, the 3rd being the factored energy:
(average energy, approximate entropy, factored energy = -energy + entropy).
See also: [`free_energy`](@ref),
[`entropy`](@ref),
[`average_energy!`](@ref)
## References
D. Koller and N. Friedman.
*Probabilistic graphical models: principles and techniques*.
MIT Press, 2009. ISBN 9780262013192.
D. M. Blei, A. Kucukelbir, and J. D. McAuliffe. Variational inference: A Review
for Statisticians, Journal of the American statistical Association, 112:518,
859-877, 2017, doi: [10.1080/01621459.2017.1285773](https://doi.org/10.1080/01621459.2017.1285773).
"""
function factored_energy(b::ClusterGraphBelief)
res = free_energy(b)
return (res[1], res[2], -res[3])
end
"""
free_energy(beliefs::ClusterGraphBelief)
negative [`factored_energy`](@ref) to approximate the negative log-likelihood.
The approximation is exact on a clique tree after calibration.
"""
function free_energy(beliefs::ClusterGraphBelief{B}) where B<:Belief{T} where T<:Real
b = beliefs.belief
init_b = beliefs.factor
nclu = nclusters(beliefs)
ave_energy = zero(T)
approx_entropy = zero(T)
for i in 1:nclu
fac = init_b[i]
if isempty(fac.J) # then b[i].J should be empty too
ave_energy -= fac.g[1]
else # do 1 cholesky of b[i], use it twice
(Jclu, μclu) = getcholesky_μ!(b[i])
ave_energy += average_energy(Jclu, μclu, fac.J, fac.h, fac.g[1])
approx_entropy += entropy(Jclu)
end
end
for i in (nclu+1):length(b)
approx_entropy -= entropy(b[i])
end
return (ave_energy, approx_entropy, ave_energy - approx_entropy)
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 4528 | """
isdegenerate(node)
`true` if *all* parent edges of `node` have length 0, `false` otherwise.
Intended for hybrid nodes, as tree edges of length 0 should be suppressed
before trait evolution analysis.
"""
function isdegenerate(node::PN.Node)
nparents = 0
for e in node.edge # loop over parent edges
getchild(e)===node || continue
nparents += 1
e.length > 0.0 && return false
end
return nparents > 0 # true unless it's the root
end
"""
ishybridsinglepositivechild(node)
`true` if `node` is a hybrid node with a single child edge of positive length.
If it [`isdegenerate`](@ref)) (all its parent edges have length 0) and
if its child is a tree node, then it could be removed from scope:
see [`unscope`](@ref).
"""
ishybridsinglepositivechild(v::PN.Node) = v.hybrid && hassinglechild(v) && getchildedge(v).length > 0.0
"""
unscope(node)
`true` if `node` is a hybrid node with a single child edge of positive length,
and if its child node is a tree node.
If it [`isdegenerate`](@ref)) (all its parent edges have length 0) then it
could be removed from scope:
see [`addtreenode_belowdegeneratehybrid!`](@ref).
"""
unscope(v::PN.Node) = ishybridsinglepositivechild(v) && !(getchild(v).hybrid)
"""
hasdegenerate(net)
`true` if degenerate nodes remain in scope, that is, if there exists a tree
edge of length 0, or if there exists a hybrid node with all parent edges of
length 0 and with 2 or more children edges, or with 1 child edge of length 0.
"""
hasdegenerate(net::HybridNetwork) = any(isdegenerate(v) && !unscope(v) for v in net.node)
"""
parentinformation(node, net)
Tuple of (edge length, edge γ, index of parent node in `net.nodes_changed`)
for all parent edges of `node`. Assumes that `net` has been preordered before.
"""
function parentinformation(hyb::PN.Node, net::HybridNetwork)
t = Float64[]
γ = Float64[]
i_par = Int[]
for e in hyb.edge # loop over parent edges
getchild(e)===hyb || continue
push!(t, e.length)
push!(γ, e.gamma)
push!(i_par, findfirst(isequal(getparent(e)), net.nodes_changed))
end
return (t, γ, i_par)
end
"""
shrinkdegenerate_treeedges(net::HybridNetwork)
Network obtained from `net` with any non-external tree edge of length 0 suppressed.
Returns an error if any edge length is missing or negative,
or if any γ is missing or non positive.
It is assumed that γs sum to 1 across partner hybrid edges.
"""
function shrinkdegenerate_treeedges(net::HybridNetwork)
str = "Trait evolution models need the network to have edge lengths and γs."
PN.check_nonmissing_nonnegative_edgelengths(net, str)
if any(e.gamma <= 0 for e in net.edge)
error("Branch number $(e.number) has a missing or non-positive γ.\n" * str)
end
net = deepcopy(net)
redo = true
while redo
for e in net.edge
e.hybrid && continue
if e.length == 0.0
getchild(e).leaf && error("external edge $(e.number) has length 0")
PN.shrinkedge!(net, e) # changes net.edge
break # of for loop over net.edge
end
end
redo = false
end
return net
end
"""
addtreenode_belowdegeneratehybrid!(net::HybridNetwork)
If a degenerate hybrid node h1 has 1 child edge of length t>0 to a hybrid child h2:
break the edge by adding a tree node at distance t from h1 and 0 from h2.
That way, h1 may be removed from scope.
This is done iteratively, as h2 may become degenerate after this operation.
See [`shrinkdegenerate_treeedges`](@ref) to remove degenerate internal tree nodes,
and [`hasdegenerate`](@ref) to check if `net` still has degenerate nodes.
"""
function addtreenode_belowdegeneratehybrid!(net::HybridNetwork)
restart = true
# find prefix for naming new nodes
m = match(r"(^\D+)\d+$", net.node[net.root].name)
prefix = (isnothing(m) ? "I" : m.captures[1])
while restart
for hyb in net.hybrid
(isdegenerate(hyb) && ishybridsinglepositivechild(hyb)) || continue
che = getchildedge(hyb)
getchild(che).hybrid || continue
t = che.length
_,newe = PN.breakedge!(che, net) # hyb --newe--> newv --che--> hybridchild
newe.length = t
che.length = 0.0 # the hybrid child may now be degenerate, so restart
preprocessnet!(net, prefix) # name new node, update net.nodes_changed
break
end
restart=false
end
return net
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 17093 | """
EvolutionaryModel{T}
Evolutionary model type,
with `T` the element type in all parameter vector and matrices.
Implemented models include the [`UnivariateBrownianMotion`](@ref).
An object of this type must contain at least the following elements:
* μ: the mean of the trait at the root.
* v: the variance of the trait at the root. Can be zero (fixed root) or infinite.
New evolutionary models must implement the following interfaces:
```julia
params(obj::EvolutionaryModel)
params_optimize(obj::EvolutionaryModel)
params_original(obj::EvolutionaryModel, transformedparams::AbstractArray)
```
"""
abstract type EvolutionaryModel{T} end
# Trait for univariate / multivariate models
abstract type UnivariateType end
struct IsUnivariate <: UnivariateType end
struct IsMultivariate <: UnivariateType end
# Default to multivariate models
UnivariateType(::Type) = IsMultivariate()
# generic methods
modelname(obj::EvolutionaryModel) = string(typeof(obj))
variancename(obj::EvolutionaryModel) = "variance"
varianceparam(obj::EvolutionaryModel) = error("varianceparam not implemented for type $(typeof(obj))")
nonrootparamnames(obj::EvolutionaryModel) = (variancename(obj), )
paramnames(obj::EvolutionaryModel) = isrootfixed(obj) ? (nonrootparamnames(obj)..., "root mean μ") : (nonrootparamnames(obj)..., "root mean μ", "root variance v")
## Root
# requires all models to have a field named μ
rootpriormeanvector(obj::T) where {T <: EvolutionaryModel} = rootpriormeanvector(UnivariateType(T), obj)
rootpriormeanvector(::IsMultivariate, obj) = obj.μ
rootpriormeanvector(::IsUnivariate, obj) = [obj.μ]
# requires all models to have a field named v
isrootfixed(obj::EvolutionaryModel) = all(obj.v .== 0)
rootpriorvariance(obj::EvolutionaryModel) = obj.v
rootpriorprecision(obj::EvolutionaryModel) = inv(rootpriorvariance(obj))
# default root variance
function getrootvarianceunivariate(T, v=nothing)
if isnothing(v) v = zero(T); end
typeof(v) <: Number || error("root variance v=$v must be a number")
v >= 0 || error("root variance v=$v must be non-negative")
return v
end
function getrootvariancediagonal(T, numt, v=nothing)
SV = SVector{numt, T}
if isnothing(v)
v = SV(zero(T) for _ in 1:numt)
else
length(v) == numt || error("v and μ have different lengths")
all(v .>= 0.0) || error("root variances v=$v must all be non-negative")
end
return v
end
function getrootvariancemultivariate(T, numt, v=nothing)
if isnothing(v)
v = LA.Symmetric(zeros(T, numt, numt))
else
size(v) == (numt,numt) || error("v and μ have conflicting sizes")
LA.issymmetric(v) || error("v should be symmetric")
v = LA.Symmetric(Matrix{T}(v))
# `min(LA.eigvals(v)...) ≥ 0` catches the zero matrix (e.g. fixed root)
# `LA.isposdef(v)` catches symmetric matrices with ≥1 `Inf` on the diagonal
LA.isposdef(v) || min(LA.eigvals(v)...) ≥ 0 || error("v is not positive semi-definite")
# LA.isposdef(v) || error("v is not positive semi-definite")
end
return v
end
"""
dimension(m::EvolutionaryModel)
Number of traits, e.g. 1 for univariate models.
"""
dimension(obj::EvolutionaryModel) = length(rootpriormeanvector(obj))
"""
params(m::EvolutionaryModel)
Tuple of parameters, the same that can be used to construct the evolutionary model.
"""
params(m::EvolutionaryModel) = isrootfixed(m) ? (varianceparam(m), m.μ) : (varianceparam(m), m.μ, m.v)
"""
params_optimize(m::EvolutionaryModel)
Tuple of transformed parameters for model `m`, in an unconstrained space that
can be used for numerical optimization.
"""
params_optimize(obj::EvolutionaryModel) =
error("params_optimize not implemented for type $(typeof(obj))")
"""
params_original(m::EvolutionaryModel, transformedparams::AbstractArray)
Tuple of parameters for model `m` in the original space, corresponding to
back-transformed parameters of `transformedparams`, and that can be used
as input to the model constructor.
"""
params_original(obj::EvolutionaryModel, ::AbstractArray) =
error("params_original' not implemented for type $(typeof(obj))")
function Base.show(io::IO, obj::EvolutionaryModel)
disp = modelname(obj) * "\n"
parnames = paramnames(obj)
par = params(obj)
for (n,p) in zip(parnames, par)
disp *= "\n- " * n * " :\n$(p)"
end
print(io, disp)
end
################################################################
## factor_treeedge
################################################################
"""
branch_actualization(obj::EvolutionaryModel, edge::PN.Edge)
branch_displacement(obj::EvolutionaryModel, edge::PN.Edge)
branch_precision(obj::EvolutionaryModel, edge::PN.Edge)
branch_variance(obj::EvolutionaryModel, edge::PN.Edge)
branch_logdet(obj::EvolutionaryModel, edge::PN.Edge, precision::AbstractMatrix)
branch_transition_qωjg(obj::EvolutionaryModel, edge)
branch_transition_qωv!(q, obj::EvolutionaryModel, edge)
Under the most general linear Gaussian model, X₀ given X₁ is Gaussian with
conditional mean q X₁ + ω and conditional variance Σ independent of X₁.
`branch_actualization`, `branch_displacement` and `branch_variance`
return, respectively, q, ω and Σ.
`branch_precision` and `branch_variance` should return a matrix of symmetric type.
`branch_variance` defaults to the inverse of `branch_precision`.
`branch_logdet` defaults to g = -0.5\\*log(|2πΣ|), the log normalizing constant of the
Gaussian density in the traditional form.
`branch_transition_*` return or modify in place the corresponding
transition matrices.
Under a Brownian motion, we have q=I, ω=0, and conditional variance t*R
where R is the model's variance rate and t the branch length.
"""
function branch_actualization(obj::EvolutionaryModel{T}, edge::PN.Edge) where T
p = dimension(obj)
M = Matrix{T}(undef, p, p)
branch_actualization!(M, obj, edge)
end
function branch_actualization!(::AbstractMatrix, obj::EvolutionaryModel, ::PN.Edge)
error("branch_actualization! not implemented for type $(typeof(obj)).")
end
@doc (@doc branch_actualization) branch_displacement
function branch_displacement(obj::EvolutionaryModel, ::PN.Edge)
error("`branch_displacement` not implemented for type $(typeof(obj)).")
end
@doc (@doc branch_actualization) branch_precision
function branch_precision(obj::EvolutionaryModel, ::PN.Edge)
error("`branch_precision` not implemented for type $(typeof(obj)).")
end
@doc (@doc branch_actualization) branch_variance
function branch_variance(obj::EvolutionaryModel, edge::PN.Edge)
return inv(branch_precision(obj, edge))
end
function branch_logdet(obj::EvolutionaryModel, ::PN.Edge, precision::AbstractMatrix)
return branch_logdet_precision(dimension(obj), precision)
end
function branch_logdet_precision(dim::Int, precision::AbstractMatrix)
return (- dim * log2π + LA.logdet(precision))/2
end
function branch_logdet_variance(dim::Int, variance::AbstractMatrix)
return - (dim * log2π + LA.logdet(variance))/2
end
function branch_transition_qωjg(obj::EvolutionaryModel, edge::PN.Edge)
j = branch_precision(obj, edge)
ω = branch_displacement(obj, edge)
q = branch_actualization(obj, edge)
g = branch_logdet(obj, edge, j)
return (q,ω,j,g)
end
function branch_transition_qωv!(q::AbstractMatrix, obj::EvolutionaryModel, edge::PN.Edge)
v = branch_variance(obj, edge)
ω = branch_displacement(obj, edge)
branch_actualization!(q, obj, edge)
return (ω,v)
end
"""
factor_treeedge(evolutionarymodel, edge)
Canonical parameters `h,J,g` of factor ϕ(X0,X1) from the given evolutionary model
along one edge, where X₀ is the state of the child node and X₁ the state of the
parent node. In `h` and `J`, the first p coordinates are for the child and the
last p for the parent, where p is the number of traits (determined by the model).
Under the most general linear Gaussian model, X₀ given X₁ is Gaussian with
conditional mean q X₁ + ω and conditional variance Σ independent of X₁.
The generic fallback method uses functions
[`branch_actualization`](@ref) for q,
[`branch_displacement`](@ref) for ω,
[`branch_precision`](@ref) for Σ⁻¹.
Under a Brownian motion, we have q=I, ω=0, and Σ=tR
where R is the model's variance rate and t is the length of the branch.
In that case, a specific (more efficient) method is implemented,
and the default fallback is not used.
"""
function factor_treeedge(m::EvolutionaryModel, edge::PN.Edge)
(q,ω,j,g0) = branch_transition_qωjg(m, edge)
factor_treeedge(q, ω, j, 1, dimension(m), g0)
end
# factor from precision, actualization, displacement
function factor_treeedge(q::AbstractMatrix{T}, ω::AbstractVector{T}, j::AbstractMatrix{T},
nparents::Int, ntraits::Int, g0::T) where T
J, ntot, jq = _factor_treeedge_get_J(q, j, nparents, ntraits)
qjomega = transpose(jq) * ω
jomega = j * ω
gen = ((u,tu) for u in 0:nparents for tu in 1:ntraits)
huv = (u,tu) -> (u==0 ? jomega[tu] : qjomega[(u-1)*ntraits+tu])
h = SVector{ntot,T}(huv(x...) for x in gen)
g = g0 - LA.dot(ω, jomega) / 2
return(h,J,g)
end
# frequent case when ω=0
function factor_treeedge(q::AbstractMatrix{T}, j::AbstractMatrix{T},
nparents::Int, ntraits::Int, g::T) where T
J, ntot, _ = _factor_treeedge_get_J(q, j, nparents, ntraits)
h = SVector{ntot,T}(zero(T) for _ in 1:ntot)
return(h,J,g)
end
# computes only J
function _factor_treeedge_get_J(q::AbstractMatrix{T}, j::AbstractMatrix{T},
nparents::Int, ntraits::Int) where T
nn = 1 + nparents; ntot = ntraits * nn
jq = - j * q
qjq = - transpose(q) * jq
# J = [j -jq; -q'j q'jq]
gen = ((u,tu,v,tv) for u in 0:nparents for tu in 1:ntraits for v in 0:nparents for tv in 1:ntraits)
Juv = (u,tu,v,tv) -> (u==0 ? (v==0 ? j[tu,tv] : jq[tu,(v-1)*ntraits+tv]) :
(v==0 ? jq[tv,(u-1)*ntraits+tu] : qjq[(u-1)*ntraits+tu,(v-1)*ntraits+tv]))
J = LA.Symmetric(SMatrix{ntot,ntot,T}(Juv(x...) for x in gen))
return(J, ntot, jq)
end
################################################################
## factor_hybridnode
################################################################
"""
hybridnode_displacement(obj::EvolutionaryModel, parentedges::AbstractVector{PN.Edge})
hybridnode_precision(obj::EvolutionaryModel, parentedges::AbstractVector{PN.Edge})
hybridnode_variance(obj::EvolutionaryModel, parentedges::AbstractVector{PN.Edge})
Under the most general weighted average Gaussian model, X₀ given its parents X₁, X₂, ...
is Gaussian with conditional mean the weighted average of the parents
plus a displacement vector ω and conditional variance Σ independent of X₁, X₂, ... .
The weights are given by the inheritance probabilities contained in the `PN.Edge` objects.
`hybridnode_displacement` and `hybridnode_variance` return, respectively,
ω and Σ.
`hybridnode_variance` and `hybridnode_precision` should return a matrix of symmetric type.
`hybridnode_precision` defaults to the inverse of `hybridnode_variance`.
`hybridnode_displacement` and `hybridnode_variance` default to a vector or matrix of zeros.
"""
function hybridnode_displacement(obj::EvolutionaryModel{T}, ::AbstractVector{PN.Edge}) where T
zeros(T, dimension(obj))
end
@doc (@doc hybridnode_displacement) hybridnode_variance
function hybridnode_variance(obj::EvolutionaryModel{T}, ::AbstractVector{PN.Edge}) where T
ntraits = dimension(obj)
zeros(T, ntraits, ntraits)
end
@doc (@doc hybridnode_displacement) hybridnode_precision # TODO: this function is never used ?
function hybridnode_precision(obj::EvolutionaryModel, parentedges::AbstractVector{PN.Edge})
return inv(hybridnode_variance(obj, parentedges))
end
"""
factor_hybridnode(evolutionarymodel, ts::AbstractVector, γs)
factor_tree_degeneratehybrid(model, t0::Real, γs)
Canonical parameters `h,J,g` of factor ϕ(X₀, X₁, X₂, ...) from the evolutionary model
for a hybrid node: where X₀ is the state at the hybrid node and X₁, X₂, ... the
states of the parent nodes.
**Warning:** `γs` is modified in placed, changed to `[1 -γs]`.
It is assumed that the conditional mean is a simple weighted average:
``E[X_0 | X_1, X_2, ...] = \\sum_k \\gamma_k X_k = q \\mathrm{vec}(X_1,X_2,...) + \\omega``
where q has one block for each parent, and each block is diagonal scalar:
``\\gamma_k I_p``.
More complex models could consider adding a shift ω to the conditional mean.
If all the parent hybrid edges edges have length 0, then it is assumed that
the model gives a degenerate distribution, with 0 conditional variance.
More complex models could consider adding a hybrid conditional variance Σ.
- The first form assumes that at least 1 parent edge length is positive,
with conditional variance ``\\sum_k \\gamma_k^2 V_k`` where ``V_k`` is
the conditional variance from the kth parent edge.
- The second form can be used in case all parent edges have 0 length,
to integrate out the hybrid node state and the factor ϕ(X₀, X₁, X₂, ...)
when X₀ is its **child** state, along an edge of length `t0` between
the hybrid node and its child. This second form is appropriate when
this hybrid's child is a tree node, and `t0>0`.`
In `h` and `J`, the first p coordinates are for the hybrid (or its child) and
the last coordinates for the parents, in the same order in which
the edge lengths and γs are given.
"""
function factor_hybridnode(m::EvolutionaryModel{T}, pae::AbstractVector{PN.Edge}) where T
ntraits = dimension(m)
nparents = length(pae)
v = hybridnode_variance(m, pae) # extra node variance
ω = hybridnode_displacement(m, pae) # extra node displacement
q = Matrix{T}(undef, ntraits, nparents * ntraits) # init actualisation
for (k, edge) in enumerate(pae)
qe = view(q, :, ((k-1) * ntraits + 1):(k*ntraits))
(ωe, ve) = branch_transition_qωv!(qe, m, edge)
qe .*= edge.gamma
v .+= edge.gamma^2 .* ve
ω .+= edge.gamma .* ωe
end
j = inv(v) # bloc variance
g0 = branch_logdet_precision(ntraits, j)
factor_treeedge(q, ω, j, nparents, ntraits, g0)
end
# j = Sigma_child^{-1}
# omega = q_child * (sum_k gamma_k omega_k + omega_hybrid)
# q = q_child [gamma_k q_k]
# TODO: is this necessary ?
function factor_tree_degeneratehybrid(m::EvolutionaryModel{T}, pae::AbstractVector{PN.Edge}, che::PN.Edge) where T
ntraits = dimension(m)
nparents = length(pae)
# hybridnode_variance(m, pae) is zero if degenerate, as well as branch_variance(m, edge) for all edge in pae
j = branch_precision(m, che)
# hybrid displacement and actualisation
ωh = hybridnode_displacement(m, pae)
qh = Matrix{T}(undef, ntraits, nparents * ntraits)
for (k, edge) in enumerate(pae)
ωh .+= edge.gamma .* qche * branch_displacement(m, edge)
branch_actualization!(view(qh, :, ((k-1) * ntraits + 1):(k*ntraits)), m, edge)
end
# child displacement and actualization
# TODO: can we avoid re-allocation here ?
qche = branch_actualization(m, che)
ω = branch_displacement(m, che) + qche * ωh
q = qche * qh
factor_treeedge(q, ω, j, nparents, ntraits)
end
################################################################
## factor_root
################################################################
"""
factor_root(m::EvolutionaryModel)
Canonical parameters `h,J,g` of the prior density at the root, from model `m`.
Assumes that `isrootfixed(m)` returns `false` (in which case the root value
should be absorbed as evidence and the root removed from scope).
More strongly, the root variance is assumed to be invertible, in particular,
traits are all non-fixed at the root.
The prior is improper if the prior variance is infinite. In this case this prior
is not a distribution (total probability ≠ 1) but is taken as the constant
function 1, which corresponds to h,J,g all 0 (and an irrelevant mean).
If the root variance is not invertible (e.g., fixed root),
this function fails and should never be called
(see `isrootfixed`)
"""
factor_root(obj::T) where {T <: EvolutionaryModel} = factor_root(UnivariateType(T), obj)
function factor_root(::IsUnivariate, m::EvolutionaryModel{T}) where T
j = T(1/m.v) # improper prior: j=0, v=Inf, factor ≡ 1: h,J,g all 0
g = (j == 0.0 ? zero(T) : -(log2π + log(m.v) + m.μ^2 * j)/2)
return(m.μ*j, j, g)
end
function factor_root(::IsMultivariate, m::EvolutionaryModel{T}) where T
#= check if improper from m.v since inv(m.v), called by `rootpriorprecision`,
errors if m.v contains Infs or NaNs for m.v == Symmetric =#
improper = any(LA.diag(rootpriorvariance(m)) .== Inf)
j = improper ? zeros(T, size(rootpriorvariance(m))) : rootpriorprecision(m)
# j = rootpriorprecision(m)
μ = rootpriormeanvector(m)
h = j * μ
# improper = any(LA.diag(j) .== 0.0) # then assumes that *all* are 0
g = (improper ? zero(T) : (-dimension(m) * log2π + LA.logdet(j) - LA.dot(m.μ, h))/2)
return(h, j, g)
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 7264 | """
PaintedParameter{T}
Type with 2 fields:
- `parameter`: vector whose elements are of type `T`
- `color`: `DefaultDict` dictionary mapping integers to integers,
with a default value of 1.
`ncolors(pp)` returns the number of parameters, that is, the length
of `pp.parameter`
This type is meant to store several values for a given evolutionary parameter
(say, Brownian motion variance rate), each one being used on some edges or
nodes of a phylogenetic network. The default parameter value is the first one.
For an edge number `i`, color `j=pp.color[i]` indexes its parameter value,
that is, evolution along edge number `i` should use `pp.parameter[j]`.
This parameter value is obtained with
`getparameter(pp, j)` for the parameter value of color `j`, or
`getparameter(pp, edge)` for the parameter value of `edge`.
"""
struct PaintedParameter{T}
parameter::Vector{T}
color::DefaultDict{Int,Int}
end
function PaintedParameter(parameter::Vector{T}, d::Dict=Dict{Int,Int}()) where T
PaintedParameter{T}(parameter, DefaultDict(1, d))
end
ncolors(pp::PaintedParameter) = length(pp.parameter)
getparameter(pp::PaintedParameter, e::PN.Edge) = getparameter(pp, e.number)
getparameter(pp::PaintedParameter, number::Int) = pp.parameter[pp.color[number]]
function Base.show(io::IO, obj::PaintedParameter)
disp = "Painted parameter on a network with $(ncolors(obj)) different parameters:"
for (ind, pp) in enumerate(obj.parameter)
disp *= "\n$ind: $pp"
end
disp *= "\nmapping of edge/node number to parameter color:\n$(obj.color)"
print(io, disp)
end
################################################################
## Heterogeneous Model
################################################################
abstract type HeterogeneousEvolutionaryModel{T} <: EvolutionaryModel{T} end
# subtype with a BM along each branch, so q=I along each edge
abstract type HeterogeneousBM{T} <: HeterogeneousEvolutionaryModel{T} end
"""
HeterogeneousBrownianMotion{T,U,V,W} <: HeterogeneousBM{T}
Type for a heterogeneous Brownian motion model, univariate or multivariate.
Along each edge, evolution follows a Brownian motion.
Each edge can have its own variance rate.
This model has no shifts, and no extra hybrid variance.
By default, the root is fixed with prior variance 0.
`T` is the scalar type, `U` is the type for the root mean (vector of length d,
where `d` is the trait dimension, even if univariate), `V` is a matrix type for
the root variance, and `W` the matrix type of each variance rate, one per color.
For a univariate BM, we may have `W=T` and `V=Vector{T}`.
For a multivariate BM, we may have `W=V<:Matrix{T}`.
This is such that each field is mutable, so we can update model parameters
in place within the model object, itself immutable.
"""
struct HeterogeneousBrownianMotion{T<:Real, U<:AbstractVector{T}, V<:AbstractMatrix{T}, W<:Union{T, V, PDMats.PDMat{T}}} <: HeterogeneousBM{T}
"variance rate"
variancerate::PaintedParameter{W}
"inverse variance (precision) rate"
inverserate::PaintedParameter{W}
"prior mean at the root"
μ::U
"prior variance at the root"
v::V
"g0: -log(2π variancerate)/2"
g0::PaintedParameter{T}
end
modelname(m::HeterogeneousBrownianMotion) = "Heterogeneous Brownian motion"
variancename(m::HeterogeneousBM) = "evolutionary variance rates"
varianceparam(m::HeterogeneousBM) = m.variancerate
function HeterogeneousBrownianMotion(R::AbstractMatrix, μ, v=nothing)
HeterogeneousBrownianMotion([R], Dict{Int,Int}(), μ, v)
end
function HeterogeneousBrownianMotion(Rvec, colors::AbstractDict, μ, v=nothing)
if !isa(μ, Array) μ = [μ]; end
numt = length(μ)
length(Rvec) >= 1 || error("Rvec must have at list one component")
T = promote_type(Float64, eltype(Rvec[1]), eltype(μ))
v = getrootvariancemultivariate(T, numt, v)
all(size(R) == (numt,numt) for R in Rvec) || error("R and μ have conflicting sizes")
all(LA.issymmetric(R) for R in Rvec) || error("R should be symmetric")
Rvec = [PDMat(R) for R in Rvec]
Jvec = inv.(Rvec) # uses cholesky. fails if not symmetric positive definite
gvec = [branch_logdet_variance(numt, R) for R in Rvec]
HeterogeneousBrownianMotion{T, typeof(μ), typeof(v), typeof(Rvec[1])}(
PaintedParameter(Rvec, colors), PaintedParameter(Jvec, colors), μ, v,
PaintedParameter(gvec, colors)
)
end
function HeterogeneousBrownianMotion(paintedrates::PaintedParameter, μ, v=nothing)
HeterogeneousBrownianMotion(paintedrates.parameter, paintedrates.color, μ, v)
end
# params(m::HeterogeneousBrownianMotion) = isrootfixed(m) ? (m.R, m.μ) : (m.R, m.μ, m.v)
function branch_actualization(obj::HeterogeneousBM{T}, ::PN.Edge) where T
ScalMat(dimension(obj), one(T))
end
# below: would error if `q` is a ScalMat but used on a Matrix created by factor_hybridnode
function branch_actualization!(q::AbstractMatrix, ::HeterogeneousBM, ::PN.Edge)
q[LA.diagind(q)] .= 1.0
LA.tril!(q)
LA.triu!(q)
end
function branch_precision(obj::HeterogeneousBM, edge::PN.Edge)
getparameter(obj.inverserate, edge) ./ edge.length
end
function branch_variance(obj::HeterogeneousBM, edge::PN.Edge)
edge.length .* getparameter(obj.variancerate, edge)
end
function branch_logdet(obj::HeterogeneousBM, edge::PN.Edge)
getparameter(obj.g0, edge) - dimension(obj) * log(edge.length)/2
end
function factor_treeedge(m::HeterogeneousBrownianMotion, edge::PN.Edge)
ntraits = dimension(m)
q = branch_actualization(m, edge)
j = branch_precision(m, edge)
g = branch_logdet(m, edge)
factor_treeedge(q,j,1,ntraits,g)
end
function factor_hybridnode(m::HeterogeneousBrownianMotion{T}, pae::AbstractVector{PN.Edge}) where T
ntraits = dimension(m)
nparents = length(pae)
v = zeros(T, ntraits, ntraits) # no extra node variance
q = Matrix{T}(undef, ntraits, nparents * ntraits) # init actualisation
for (k, edge) in enumerate(pae)
qe = view(q, :, ((k-1) * ntraits + 1):(k*ntraits))
ve = branch_variance(m, edge)
branch_actualization!(qe, m, edge)
qe .*= edge.gamma
v .+= edge.gamma^2 .* ve
end
j = inv(v) # block variance
g0 = branch_logdet_precision(ntraits, j)
factor_treeedge(q,j,nparents,ntraits,g0)
end
"""
HeterogeneousShiftedBrownianMotion{T,U,V,W} <: HeterogeneousBM{T}
Type for a heterogeneous Brownian motion model like
[`HeterogeneousBrownianMotion`](@ref) but with a possible
shift in the mean along each edge.
"""
struct HeterogeneousShiftedBrownianMotion{T<:Real, U<:AbstractVector{T}, V<:AbstractMatrix{T}, W<:Union{T, V, PDMats.PDMat{T}}} <: HeterogeneousBM{T}
"variance rate"
variancerate::PaintedParameter{W}
"inverse variance (precision) rate"
inverserate::PaintedParameter{W}
"shift in the mean along edges"
shiftmean::PaintedParameter{U}
"prior mean at the root"
μ::U
"prior variance at the root"
v::V
"g0: -log(2π variancerate)/2"
g0::PaintedParameter{T}
end
modelname(m::HeterogeneousShiftedBrownianMotion) = "Heterogeneous Brownian motion with mean shifts"
# fixit: write a constructor
function branch_displacement(obj::HeterogeneousShiftedBrownianMotion, ::PN.Edge)
getparameter(obj.shiftmean, edge)
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 10768 | ################################################################
## Model Definitions
################################################################
## Abstract BM type
abstract type HomogeneousBrownianMotion{T} <: EvolutionaryModel{T} end
"""
UnivariateBrownianMotion{T} <: HomogeneousBrownianMotion{T}
The univariate Brownian motion, homogeneous across the phylogeny, that is,
with the same variance rate `σ2` across all edges.
`μ` is the prior mean at the root.
`v` the prior variance at the root, 0 by default.
"""
struct UnivariateBrownianMotion{T<:Real} <: HomogeneousBrownianMotion{T}
"variance rate"
σ2::T
"inverse variance (precision) rate"
J::T
"prior mean at the root"
μ::T
"prior variance at the root"
v::T
"g0: -log(2π σ2)/2"
g0::T
end
UnivariateType(::Type{<:UnivariateBrownianMotion}) = IsUnivariate()
modelname(m::UnivariateBrownianMotion) = "Univariate Brownian motion"
variancename(m::UnivariateBrownianMotion) = "evolutionary variance rate σ2"
varianceparam(m::UnivariateBrownianMotion) = m.σ2
function UnivariateBrownianMotion(σ2::Number, μ::Number, v=nothing)
T = promote_type(Float64, typeof(σ2), typeof(μ))
v = getrootvarianceunivariate(T, v)
σ2 > 0 || error("evolutionary variance rate σ2 = $(σ2) must be positive")
UnivariateBrownianMotion{T}(σ2, 1/σ2, μ, v, -(log2π + log(σ2))/2)
end
function UnivariateBrownianMotion(σ2::Union{U1,V1}, μ::Union{U2,V2}, v=nothing) where {U1<:Number, U2<:Number, V1<:AbstractArray{U1}, V2<:AbstractArray{U2}}
if (isnothing(v))
(length(σ2) == 1 && length(μ) == 1) || error("UnivariateBrownianMotion can only take scalars as entries.")
UnivariateBrownianMotion(σ2[1], μ[1])
else
(length(σ2) == 1 && length(μ) == 1 && length(v) == 1) || error("UnivariateBrownianMotion can only take scalars as entries.")
UnivariateBrownianMotion(σ2[1], μ[1], v[1])
end
end
params(m::UnivariateBrownianMotion) = isrootfixed(m) ? (m.σ2, m.μ) : (m.σ2, m.μ, m.v)
params_optimize(m::UnivariateBrownianMotion) = [-2*m.g0 - log2π, m.μ] # log(σ2),μ
params_original(m::UnivariateBrownianMotion, logσ2μ::AbstractArray) = (exp(logσ2μ[1]), logσ2μ[2], m.v)
"""
MvDiagBrownianMotion{T,V} <: HomogeneousBrownianMotion{T}
The multivariate Brownian motion with diagonal variance rate matrix, that is,
traits with independent evolution. It is homogeneous across the phylogeny.
`R` is the variance rate (stored as a vector of type `V`),
`μ` is the prior mean at the root and
`v` the prior variance at the root, 0 by default (and both also of type `V`)
"""
struct MvDiagBrownianMotion{T<:Real, V<:AbstractVector{T}} <: HomogeneousBrownianMotion{T}
"diagonal entries of the diagonal variance rate matrix"
R::V
"inverse variance rates (precision) on the diagonal inverse rate matrix"
J::V
"prior mean vector at the root"
μ::V
"prior variance vector at the root"
v::V
"g0: -log(det(2πR))/2"
g0::T
end
modelname(m::MvDiagBrownianMotion) = "Multivariate Diagonal Brownian motion"
variancename(m::MvDiagBrownianMotion) = "evolutionary variance rates (diagonal values in the rate matrix): R"
varianceparam(m::MvDiagBrownianMotion) = m.R
function MvDiagBrownianMotion(R, μ, v=nothing)
numt = length(μ) # number of traits
length(R) == numt || error("R and μ have different lengths")
T = promote_type(Float64, eltype(R), eltype(μ))
v = getrootvariancediagonal(T, numt, v)
all(R .> 0.0) || error("evolutionary variance rates R = $R must all be positive")
SV = SVector{numt, T}
R = SV(R)
J = 1 ./R
MvDiagBrownianMotion{T, SV}(R, J, SV(μ), SV(v), -(numt * log2π + sum(log.(R)))/2)
end
params(m::MvDiagBrownianMotion) = isrootfixed(m) ? (m.R, m.μ) : (m.R, m.μ, m.v)
params_optimize(m::MvDiagBrownianMotion) = [log.(m.R)..., m.μ...]
params_original(m::MvDiagBrownianMotion, logRμ::AbstractArray) = (exp.(logRμ[1:dimension(m)]), logRμ[(dimension(m)+1):end], m.v)
rootpriorvariance(obj::MvDiagBrownianMotion) = LA.Diagonal(obj.v)
"""
MvFullBrownianMotion{T,P1,V,P2} <: HomogeneousBrownianMotion{T}
The full multivariate Brownian motion. It is homogeneous across the phylogeny.
`R` is the variance rate (of matrix type `P1`),
`μ` is the prior mean at the root (of vector type `V`) and
`v` the prior variance at the root, 0 by default (of matrix type `P2`).
"""
struct MvFullBrownianMotion{T<:Real, P1<:AbstractMatrix{T}, V<:AbstractVector{T}, P2<:AbstractMatrix{T}} <: HomogeneousBrownianMotion{T}
"variance rate matrix"
R::P1
"inverse variance (precision) rate matrix"
J::P1
"prior mean vector at the root"
μ::V
"prior variance/covariance matrix at the root"
v::P2
"g0: -log(det(2πR))/2"
g0::T
end
modelname(m::MvFullBrownianMotion) = "Multivariate Brownian motion"
variancename(m::MvFullBrownianMotion) = "evolutionary variance rate matrix: R"
varianceparam(m::MvFullBrownianMotion) = m.R
function MvFullBrownianMotion(R::AbstractMatrix, μ, v=nothing)
numt = length(μ)
T = promote_type(Float64, eltype(R), eltype(μ))
v = getrootvariancemultivariate(T, numt, v)
SV = SVector{numt, T}
size(R) == (numt,numt) || error("R and μ have conflicting sizes")
LA.issymmetric(R) || error("R should be symmetric")
R = PDMat(R)
J = inv(R) # uses cholesky. fails if not symmetric positive definite
MvFullBrownianMotion{T, typeof(R), SV, typeof(v)}(R, J, SV(μ), v, branch_logdet_variance(numt, R))
end
"""
MvFullBrownianMotion(R::AbstractMatrix, μ, v=nothing)
MvFullBrownianMotion(U::AbstractVector, μ, v=nothing)
Constructor for a full multivariate Brownian motion (homogeneous) with
variance rate matrix `V` and prior mean vector `μ` at the root.
If not provided, the prior variance matrix at the root `v` is set to 0.
If a vector `U` is provided, it is used as the Upper cholesky factor of R=U'U,
vectorized, so U should be of length p(p+1)/2 where p is the number of traits
(also the length of μ).
"""
function MvFullBrownianMotion(Uvec::AbstractVector{T}, μ, v=nothing) where T
# TODO: tested, but not used anywhere
numt = length(μ)
(numt*(1+numt)) ÷ 2 == length(Uvec) || error("Uvec and μ have conflicting sizes")
R = zeros(T, numt, numt)
for (k, (i,j)) in enumerate(((i,j) for i in 1:numt for j in i:numt))
R[i,j] = Uvec[k]
end
R .= R' * R
MvFullBrownianMotion(R, μ, v)
end
params(m::MvFullBrownianMotion) = isrootfixed(m) ? (m.R, m.μ) : (m.R, m.μ, m.v)
#= TODO: implement params_optimize and params_original for MvFullBrownianMotion
- optimize variances with a log transformation
- optimize the correlation matrix using a good parametrization
=#
################################################################
## factor_treeedge
################################################################
factor_treeedge(m::HomogeneousBrownianMotion, edge::PN.Edge) = factor_treeedge(m, edge.length)
function factor_treeedge(m::UnivariateBrownianMotion{T}, t::Real) where T
j = T(m.J / t)
J = LA.Symmetric(SMatrix{2,2}(j,-j, -j,j))
h = SVector{2,T}(zero(T), zero(T))
g = m.g0 - dimension(m) * log(t)/2
return(h,J,g)
end
function factor_treeedge(m::MvDiagBrownianMotion{T,V}, t::Real) where {T,V}
numt = dimension(m); ntot = numt * 2
j = m.J ./ T(t) # diagonal elements
# J = [diag(j) -diag(j); -diag(j) diag(j)]
gen = ((u,tu,v,tv) for u in 1:2 for tu in 1:numt for v in 1:2 for tv in 1:numt)
Juv = (u,tu,v,tv) -> (tu==tv ? (u==v ? j[tu] : -j[tu]) : 0)
J = LA.Symmetric(SMatrix{ntot,ntot}(Juv(x...) for x in gen))
h = SVector{ntot,T}(zero(T) for _ in 1:ntot)
g = m.g0 - numt * log(t)/2
return(h,J,g)
end
function factor_treeedge(m::MvFullBrownianMotion{T,P1,V,P2}, t::Real) where {T,P1,V,P2}
numt = dimension(m); ntot = numt * 2
j = m.J ./ T(t)
# J = [j -j; -j j]
gen = ((u,tu,v,tv) for u in 1:2 for tu in 1:numt for v in 1:2 for tv in 1:numt)
Juv = (u,tu,v,tv) -> (u==v ? j[tu,tv] : -j[tu,tv])
J = LA.Symmetric(SMatrix{ntot,ntot}(Juv(x...) for x in gen))
h = SVector{ntot,T}(zero(T) for _ in 1:ntot)
g = m.g0 - numt * log(t)/2
return(h,J,g)
end
################################################################
## factor_hybridnode
################################################################
factor_hybridnode(m::HomogeneousBrownianMotion, pae::AbstractVector{PN.Edge}) =
factor_hybridnode(m, [e.length for e in pae], [p.gamma for p in pae])
factor_tree_degeneratehybrid(m::HomogeneousBrownianMotion, pae::AbstractVector{PN.Edge}, che::PN.Edge) =
factor_tree_degeneratehybrid(m, che.length, [p.gamma for p in pae])
function factor_hybridnode(m::HomogeneousBrownianMotion{T}, t::AbstractVector, γ::AbstractVector) where T
t0 = T(sum(γ.^2 .* t)) # >0 if hybrid node is not degenerate
factor_tree_degeneratehybrid(m, t0, γ)
end
function factor_tree_degeneratehybrid(m::UnivariateBrownianMotion{T}, t0::Real, γ::AbstractVector) where T
j = T(m.J / t0)
nparents = length(γ); nn = 1 + nparents
# modifies γ in place below, to get longer vector: [1 -γ]
γ .= -γ; pushfirst!(γ, one(eltype(γ)))
J = LA.Symmetric(SMatrix{nn,nn, T}(j*x*y for x in γ, y in γ))
h = SVector{nn,T}(zero(T) for _ in 1:nn)
g = m.g0 - dimension(m) * log(t0)/2
return(h,J,g)
end
function factor_tree_degeneratehybrid(m::MvDiagBrownianMotion{T,V}, t0::Real, γ::AbstractVector) where {T,V}
j = m.J ./ T(t0) # diagonal elements. Dj = diag(j)
nparents = length(γ); nn = 1 + nparents
numt = dimension(m); ntot = nn * numt
# J = [Dj -γ1Dj -γ2Dj; -γ1Dj γ1γ1Dj γ1γ2Dj; -γ2Dj γ1γ2Dj γ2γ2Dj]
gen = ((u,tu,v,tv) for u in 0:nparents for tu in 1:numt for v in 0:nparents for tv in 1:numt)
Juv = (u,tu,v,tv) -> (tu==tv ?
(u==0 ? (v==0 ? j[tu] : -γ[v] * j[tu]) :
(v==0 ? -γ[u] * j[tu] : γ[u] * γ[v] * j[tu])) : zero(T))
J = LA.Symmetric(SMatrix{ntot,ntot, T}(Juv(x...) for x in gen))
h = SVector{ntot,T}(zero(T) for _ in 1:ntot)
g = m.g0 - numt * log(t0)/2
return(h,J,g)
end
function factor_tree_degeneratehybrid(m::MvFullBrownianMotion{T,P1,V,P2}, t0::Real, γ::AbstractVector) where {T,P1,V,P2}
j = m.J ./ T(t0)
nparents = length(γ); nn = 1 + nparents
numt = dimension(m); ntot = nn * numt
# J = [j -γ1j -γ2j; -γ1j γ1γ1j γ1γ2j; -γ2j γ1γ2j γ2γ2j]
gen = ((u,tu,v,tv) for u in 0:nparents for tu in 1:numt for v in 0:nparents for tv in 1:numt)
Juv = (u,tu,v,tv) -> (u==0 ? (v==0 ? j[tu,tv] : -γ[v] * j[tu,tv]) :
(v==0 ? -γ[u] * j[tu,tv] : γ[u] * γ[v] * j[tu,tv]))
J = LA.Symmetric(SMatrix{ntot,ntot, T}(Juv(x...) for x in gen))
h = SVector{ntot,T}(zero(T) for _ in 1:ntot)
g = m.g0 - numt * log(t0)/2
return(h,J,g)
end | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 2740 | ################################################################
## Homogeneous OU Model
################################################################
abstract type HomogeneousOrnsteinUhlenbeck{T} <: EvolutionaryModel{T} end
"""
UnivariateOrnsteinUhlenbeck{T} <: HomogeneousOrnsteinUhlenbeck{T}
The univariate Ornstein-Uhlenbeck model. It is homogeneous, that is, has the
same parameters across all edges in the phylogeny.
`σ2` is the variance rate,
`α` the selection strength,
`θ` the optimal value,
`μ` the prior mean at the root, and
`v` the prior variance at the root, 0 by default.
"""
struct UnivariateOrnsteinUhlenbeck{T<:Real} <: HomogeneousOrnsteinUhlenbeck{T}
"stationary variance rate"
γ2::T
"inverse stationary variance (precision) rate"
J::T
"selection strength"
α::T
"optimal value"
θ::T
"prior mean at the root"
μ::T
"prior variance at the root"
v::T
"g0: -log(2π γ2)/2"
g0::T
end
UnivariateType(::Type{<:UnivariateOrnsteinUhlenbeck}) = IsUnivariate()
modelname(m::UnivariateOrnsteinUhlenbeck) = "homogeneous univariate Ornstein-Uhlenbeck"
variancename(m::UnivariateOrnsteinUhlenbeck) = "stationary evolutionary variance γ2"
varianceparam(m::UnivariateOrnsteinUhlenbeck) = m.γ2
nonrootparamnames(m::UnivariateOrnsteinUhlenbeck) = (variancename(m), "selection strength α", "optimal value θ")
function UnivariateOrnsteinUhlenbeck(σ2::U1, α::U2, θ::U3, μ::U4, v=nothing) where {U1<:Number, U2<:Number, U3<:Number, U4<:Number}
T = promote_type(Float64, typeof(σ2), typeof(α), typeof(θ), typeof(μ))
v = getrootvarianceunivariate(T, v)
σ2 > 0 || error("evolutionary variance rate σ2 = $(σ2) must be positive")
α > 0 || error("selection strength α = $(α) must be positive")
γ2 = σ2 / (2 * α)
UnivariateOrnsteinUhlenbeck{T}(γ2, 1/γ2, α, θ, μ, v, -(log2π + log(γ2))/2)
end
params(m::UnivariateOrnsteinUhlenbeck) = isrootfixed(m) ? (m.γ2, m.α, m.θ, m.μ) : (m.γ2, m.α, m.θ, m.μ, m.v)
params_optimize(m::UnivariateOrnsteinUhlenbeck) = [-2*m.g0 - log2π, log(m.α), m.θ, m.μ]
params_original(m::UnivariateOrnsteinUhlenbeck, transparams::AbstractArray) = (exp(transparams[1]), exp(transparams[2]), transparams[3], transparams[4], m.v)
function branch_transition_qωjg(obj::UnivariateOrnsteinUhlenbeck, edge::PN.Edge)
q = exp(-obj.α * edge.length)
facvar = (1 - q^2)
j = 1 / obj.γ2 / facvar
ω = (1 - q) * obj.θ
g = obj.g0 - log(facvar)/2
return ([q;;],[ω],[j;;],g)
end
function branch_transition_qωv!(q::AbstractMatrix, obj::UnivariateOrnsteinUhlenbeck, edge::PN.Edge)
actu = exp(-obj.α * edge.length)
facvar = (1 - actu^2)
v = obj.γ2 * facvar
ω = (1 - actu) * obj.θ
q[1,1] = actu
return ([ω],[v;;])
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 562 | using PhyloGaussianBeliefProp
using DataFrames
using Graphs, MetaGraphsNext
using LinearAlgebra
using PhyloNetworks
using PreallocationTools
using Tables
using Test
const PGBP = PhyloGaussianBeliefProp
@testset "PhyloGaussianBeliefProp.jl" begin
include("test_clustergraph.jl")
include("test_evomodels.jl")
include("test_canonicalform.jl")
include("test_calibration.jl")
# include("test_optimization.jl") redundant with test_calibration.jl, but future tests could use networks in there to vary networks used in test
include("test_exactBM.jl")
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 15501 | @testset "calibration" begin
netstr_unnamed = "(A:2.5,((B:1,#H1:0.5::0.1):1,(C:1,(D:0.5)#H1:0.5::0.9):1):0.5);"
netstr_named = "(((A:4.0,((B1:1.0,B2:1.0)i6:0.6)#H5:1.1::0.9)i4:0.5,(#H5:2.0::0.1,C:0.1)i2:1.0)i1:3.0);"
@testset "miscellaneous" begin
ex = PGBP.BPPosDefException("belief 1, integrate 3,4", 1)
io = IOBuffer()
showerror(io, ex)
@test String(take!(io)) == """BPPosDefException: belief 1, integrate 3,4
matrix is not positive definite."""
end
@testset "residual_kldiv!" begin
"""
ref distr (new belief): μ=[0, 1], Σ=[1 0; 0 1], J=[1 0; 0 1], h=[0, 1]
approx distr (prev belief): μ=[1, 0], Σ=[2 1; 1 2], J=(1/3)[2 -1; -1 2],
h=(1/3)[2, -1]
resid distr (ref-approx): ΔJ=(1/3)[1 1; 1 1], Δh=(1/3)[-2, 4]
---
Using R to compute KL divergence:
> library(rags2ridges)
> KLdiv(c(1,0),c(0,1),matrix(c(2,1,1,2),nrow=2),matrix(c(1,0,0,1),nrow=2))
[1] 1.215973
"""
# allocate memory for ::MessageResidual object (resid distr)
res = PGBP.MessageResidual(zeros(2,2), zeros(2,))
res.ΔJ .= ones(2,2)/3
res.Δh .= [-2,4]/3
# allocate memory for ::Belief object (ref distr)
sepset = PGBP.Belief([1, 2], 1, BitArray([1 1]), PGBP.bsepsettype, (:A,:B))
sepset.J .= [1 0; 0 1]
sepset.h .= [0, 1]
PGBP.residual_kldiv!(res, sepset)
@test res.kldiv[1] ≈ 1.215973 rtol=1e-6
end
@testset "no optimization" begin
@testset "Level-1 w/ 4 tips. Univariate. Clique tree" begin
net = readTopology(netstr_named)
df = DataFrame(taxon=["A","B1","B2","C"], y=[1.0,.9,1,-1])
tbl_y = columntable(select(df, :y))
#= fitBM = phylolm(@formula(y ~ 1), df, net; tipnames=:taxon)
sigma2_phylo(fitBM) # reml variance-rate: 0.4714735834478196
# reconstructed root mean and variance
coef(fitBM)[1] # reconstructed root mean: -0.26000871507162693
ancestralStateReconstruction(fitBM).variances_nodes[5,5] # 0.33501871740664146
loglikelihood(fitBM) # restricted likelihood: -4.877930583154144
Assume reml variance-rate (0.471474) and compute posterior mean, posterior
variance and likelihood for comparison. =#
m = PGBP.UnivariateBrownianMotion(0.471474, 0, Inf) # 𝒩(0, ∞) prior on root mean
ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
spt = PGBP.spanningtree_clusterlist(ct, net.nodes_changed)
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b)
PGBP.calibrate!(ctb, [spt])
llscore = -4.877930583154144
for i in eachindex(ctb.belief)
_, tmp = PGBP.integratebelief!(ctb, i) # llscore from norm. constant
@test tmp ≈ llscore
end
@test PGBP.factored_energy(ctb)[3] ≈ llscore
root_ind = findfirst(be -> 1 ∈ PGBP.nodelabels(be), b) # 5
@test PGBP.integratebelief!(b[root_ind])[1][end] ≈
-0.26000871507162693 rtol=1e-5 # posterior root mean
@test (b[root_ind].J \ I)[end,end] ≈
0.33501871740664146 rtol=1e-5 # posterior root variance
PGBP.init_beliefs_reset_fromfactors!(ctb)
@testset "regularization by cluster" begin
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
PGBP.regularizebeliefs_bynodesubtree!(ctb, ct);
PGBP.calibrate!(ctb, [spt]);
_, tmp = PGBP.integratebelief!(ctb, 1)
@test tmp ≈ llscore
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
PGBP.regularizebeliefs_bycluster!(ctb, ct);
PGBP.calibrate!(ctb, [spt]);
_, tmp = PGBP.integratebelief!(ctb, 1)
@test tmp ≈ llscore # graph invariant was preserved
end
end
@testset "Level-1 w/ 4 tips. Univariate. Bethe, regularize on a schedule" begin
net = readTopology(netstr_unnamed)
# tip data simulated from ParamsBM(0,1)
df = DataFrame(y=[-1.81358, 0.468158, 0.658486, 0.643821],
taxon=["A","B","C", "D"])
tbl_y = columntable(select(df, :y))
#= Reroot network at I3 for ancestral reconstruction.
net0 = rootatnode!(deepcopy(net), "I3")
PhyloNetworks.setBranchLength!(net0.edge[1], 0.5) # A now represents I4
df = DataFrame(y=[0.0, 0.468158, 0.658486, 0.643821],
taxon=["A","B","C", "D"])
fitBM = phylolm(@formula(y ~ 1), df, net0; tipnames=:taxon)
sigma2_phylo(fitBM) # reml variance-rate: 0.08612487128235946
coef(fitBM)[1] # reconstructed root mean: 0.21511454631828986
Compare with posterior mean for I3. =#
m = PGBP.UnivariateBrownianMotion(0.0861249, 0) # 𝒩(0, 0) prior on root mean
cg = PGBP.clustergraph!(net, PGBP.Bethe())
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, cg, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
cgb = PGBP.ClusterGraphBelief(b)
PGBP.regularizebeliefs_onschedule!(cgb, cg)
sched = PGBP.spanningtrees_clusterlist(cg, net.nodes_changed)
@test PGBP.calibrate!(cgb, sched, 20; auto=true)
# [ Info: Calibration detected: iter 5, sch 1
ind = PGBP.clusterindex(:I3, cgb)
@test PGBP.integratebelief!(b[ind])[1][end] ≈
0.21511454631828986 rtol=1e-5 # posterior root mean
end
@testset "level-3, 2 tips, 2 traits, 1 missing unscoped in 2 nodes. Join-graph, regularize by node subtree" begin
netstr = "((#H1:0.1::0.4,#H2:0.1::0.4)I1:1.0,(((A:1.0)#H1:0.1::0.6,#H3:0.1::0.4)#H2:0.1::0.6,(B:1.0)#H3:0.1::0.6)I2:1.0)I3;"
net = readTopology(netstr)
# y1 simulated from ParamsBM(2,0.1)
df = DataFrame(taxon=["A","B"], y1=[2.11,2.15], y2=[30.0,missing])
tbl_y = columntable(select(df, :y1, :y2))
m = PGBP.MvFullBrownianMotion([1 0.5; 0.5 1], [0,0], [Inf 0; 0 Inf]) # improper root
cg = PGBP.clustergraph!(net, PGBP.JoinGraphStructuring(3))
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, cg, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
cgb = PGBP.ClusterGraphBelief(b)
PGBP.regularizebeliefs_bynodesubtree!(cgb, cg)
sch = [] # schedule based on 1 subtree per variable
for n in net.nodes_changed
subtree = PGBP.nodesubtree_clusterlist(cg, Symbol(n.name))
isempty(subtree[1]) && continue
push!(sch, subtree)
end
@test (@test_logs (:info, "calibration reached: iteration 4, schedule tree 1") PGBP.calibrate!(cgb, sch, 10; auto=true, info=true))
#= Compare posterior means against clique tree estimates:
ct = PGBP.clustergraph!(net, PGBP.Cliquetree());
b_ct = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b_ct, m, tbl_y, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_ct);
PGBP.calibrate!(ctb, [PGBP.spanningtree_clusterlist(ct, net.nodes_changed)]);
PGBP.integratebelief!(b_ct[6]) # cluster I1I2I3: PGBP.clusterindex(:I1I2I3, ctb)
# ([2.121105154896223, 30.005552577448075, 2.1360649504455984, 30.013032475222563, 2.128585052670908, 30.00929252633532], -1.39059577242449)
PGBP.integratebelief!(b_ct[2]) # cluster H1H2I1
# ([2.125583120364, 30.007791560181964, 2.129918967774073, 30.009959483886966, 2.121105154896214, 30.00555257744811], -1.390595772423012)
=#
tmp = PGBP.integratebelief!(b[6]) # 6: PGBP.clusterindex(:I1I2I3, cgb)
@test tmp[2] ≈ -1.390595772423
@test all(tmp[1] .≈ [2.121105154896223, 30.005552577448075, 2.1360649504455984, 30.013032475222563, 2.128585052670943, 30.00929252633547])
tmp = PGBP.integratebelief!(b[2]) # cluster 2: H1H2I1
@test tmp[2] ≈ -1.390595772423
@test all(tmp[1] .≈ [2.125583120364, 30.007791560181964, 2.129918967774073, 30.009959483886966, 2.121105154896214, 30.00555257744811])
#= likelihood using PN.vcv and matrix inversion
Σnet = kron(Matrix(vcv(net)[!,[:A,:B]]), [1 0.5; 0.5 1])
Σnet_y2Bmissing = Σnet[1:3,1:3]; invΣ = inv(Σnet_y2Bmissing)
yvec = [df[1,:y1], df[1,:y2], df[2,:y1]]
X = [1 0; 0 1; 1 0]; Xt = transpose(X)
μ = X * inv(Xt * invΣ * X) * Xt * invΣ * yvec
# [2.128585052670943, 30.00929252633547, 2.128585052670943] # great: same as posterior at I3 = root
loglikelihood(MvNormal(μ, Σnet_y2Bmissing), yvec) # -3.3498677834866997 but not same model: fixed root here
=#
m = PGBP.MvFullBrownianMotion([1 0.5; 0.5 1], [2.128585052670943,30.00929252633547]) # fixed root
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, cg, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
cgb = PGBP.ClusterGraphBelief(b)
PGBP.regularizebeliefs_bynodesubtree!(cgb, cg)
@test (@test_logs PGBP.calibrate!(cgb, sch, 10; auto=true, info=false))
tmp = PGBP.integratebelief!(b[6]) # I3 un-scoped bc fixed root, same posterior mean for I1 and I2
@test tmp[2] ≈ -3.3498677834866997
@test all(tmp[1] .≈ [2.121105154896223, 30.005552577448075, 2.1360649504455984, 30.013032475222563])
end
end
@testset "with optimization" begin
@testset "Level-1 w/ 4 tips. Univariate. Bethe + Optim." begin
net = readTopology(netstr_unnamed)
df = DataFrame(y=[11.275034507978296, 10.032494469945764,
11.49586603350308, 11.004447427824012], taxon=["A","B","C", "D"])
tbl_y = columntable(select(df, :y))
cg = PGBP.clustergraph!(net, PGBP.Bethe())
m = PGBP.UnivariateBrownianMotion(1, 0)
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, cg, m);
cgb = PGBP.ClusterGraphBelief(b)
mod, fenergy, opt = PGBP.calibrate_optimize_clustergraph!(cgb, cg,
net.nodes_changed, tbl_y, df.taxon,
PGBP.UnivariateBrownianMotion, (1,0))
# Compare with RxInfer + Optim
@test fenergy ≈ -3.4312133894974126 rtol=1e-4
@test mod.μ ≈ 10.931640613828181 rtol=1e-4
@test mod.σ2 ≈ 0.15239159696122745 rtol=1e-4
end
@testset "Level-1 w/ 4 tips. Univariate. Clique tree + Optim / Autodiff" begin
net = readTopology(netstr_named)
df = DataFrame(taxon=["A","B1","B2","C"], x=[10,10,missing,0], y=[1.0,.9,1,-1])
df_var = select(df, Not(:taxon))
tbl = columntable(df_var)
tbl_y = columntable(select(df, :y)) # 1 trait, for univariate models
tbl_x = columntable(select(df, :x))
m = PGBP.UnivariateBrownianMotion(2, 3, 0)
ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
spt = PGBP.spanningtree_clusterlist(ct, net.nodes_changed)
# y: 1 trait, no missing values
#= calibrate_optimize_cliquetree! already tested later
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b)
mod, llscore, opt = PGBP.calibrate_optimize_cliquetree!(ctb, ct, net.nodes_changed,
tbl_y, df.taxon, PGBP.UnivariateBrownianMotion, (1,-2))
@test PGBP.integratebelief!(ctb, spt[3][1])[2] ≈ llscore
@test llscore ≈ -5.174720533524127
@test mod.μ ≈ -0.26000871507162693
@test PGBP.varianceparam(mod) ≈ 0.35360518758586457
=#
lbc = GeneralLazyBufferCache(function (paramOriginal)
mo = PGBP.UnivariateBrownianMotion(paramOriginal...)
bel = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, mo)
return PGBP.ClusterGraphBelief(bel)
end)
mod2, llscore2, opt2 = PGBP.calibrate_optimize_cliquetree_autodiff!(lbc, ct, net.nodes_changed,
tbl_y, df.taxon, PGBP.UnivariateBrownianMotion, (1, -2))
index_ctb = findfirst(isassigned(lbc.bufs.vals, i) for i in 1:length(lbc.bufs.vals))
@test PGBP.integratebelief!(lbc.bufs.vals[index_ctb], spt[3][1])[2].value ≈ llscore2
@test llscore2 ≈ -5.174720533524127
@test mod2.μ ≈ -0.26000871507162693
@test PGBP.varianceparam(mod2) ≈ 0.35360518758586457
#=
using BenchmarkTools
@benchmark PGBP.calibrate_optimize_cliquetree_autodiff!(lbc, ct, net.nodes_changed, tbl_y, df.taxon, PGBP.UnivariateBrownianMotion, (1, -2))
@benchmark PGBP.calibrate_optimize_cliquetree!(ctb, ct, net.nodes_changed, tbl_y, df.taxon, PGBP.UnivariateBrownianMotion, (1, -2))
=#
#= ML solution the matrix-way, analytical for BM:
# for y: univariate
Σ = Matrix(vcv(net)[!,Symbol.(df.taxon)])
n=4 # number of data points
i = ones(n) # intercept
μhat = inv(transpose(i) * inv(Σ) * i) * (transpose(i) * inv(Σ) * tbl.y) # -0.26000871507162693
r = tbl.y .- μhat
σ2hat_ML = (transpose(r) * inv(Σ) * r) / n # 0.35360518758586457
llscore = - n/2 - logdet(2π * σ2hat_ML .* Σ)/2 # -5.174720533524127
# for x: third value is missing
xind = [1,2,4]; n = length(xind); i = ones(n) # intercept
Σ = Matrix(vcv(net)[!,Symbol.(df.taxon)])[xind,xind]
μhat = inv(transpose(i) * inv(Σ) * i) * (transpose(i) * inv(Σ) * tbl.x[xind]) # 3.500266520382341
r = tbl.x[xind] .- μhat
σ2hat_ML = (transpose(r) * inv(Σ) * r) / n # 11.257682945973125
llscore = - n/2 - logdet(2π * σ2hat_ML .* Σ)/2 # -9.215574122592923
=#
# x,y: 2 traits, some missing values
m = PGBP.MvDiagBrownianMotion((2,1), (3,-3), (0,0))
b = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b)
mod, llscore, opt = PGBP.calibrate_optimize_cliquetree!(ctb, ct, net.nodes_changed,
tbl, df.taxon, PGBP.MvDiagBrownianMotion, ((2,1), (1,-1)))
@test PGBP.integratebelief!(ctb, spt[3][1])[2] ≈ llscore
@test llscore ≈ -14.39029465611705 # -5.174720533524127 -9.215574122592923
@test mod.μ ≈ [3.500266520382341, -0.26000871507162693]
@test PGBP.varianceparam(mod) ≈ [11.257682945973125,0.35360518758586457]
#= calibrate_optimize_cliquetree_autodiff! already tested earlier
lbc = GeneralLazyBufferCache(function (paramOriginal)
mo = PGBP.MvDiagBrownianMotion(paramOriginal...)
bel = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, mo)
return PGBP.ClusterGraphBelief(bel)
end)
mod2, llscore2, opt2 = PGBP.calibrate_optimize_cliquetree_autodiff!(lbc, ct, net.nodes_changed,
tbl, df.taxon, PGBP.MvDiagBrownianMotion, ((2, 1), (1, -1)))
@test llscore2 ≈ -14.39029465611705 # -5.174720533524127 -9.215574122592923
@test mod2.μ ≈ [3.500266520382341, -0.26000871507162693]
@test PGBP.varianceparam(mod2) ≈ [11.257682945973125, 0.35360518758586457]
=#
#=
using BenchmarkTools
@benchmark PGBP.calibrate_optimize_cliquetree_autodiff!(lbc, ct, net.nodes_changed, tbl, df.taxon, PGBP.MvDiagBrownianMotion, ((2, 1), (1, -1)))
@benchmark PGBP.calibrate_optimize_cliquetree!(ctb, ct, net.nodes_changed, tbl, df.taxon, PGBP.MvDiagBrownianMotion, ((2, 1), (1, -1)))
=#
end
end
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 4914 | @testset "canonical form" begin
netstr = "(((A:4.0,((B1:1.0,B2:1.0)i6:0.6)#H5:1.1::0.9)i4:0.5,(#H5:2.0::0.1,C)i2:1.0)i1:3.0);"
df = DataFrame(taxon=["A","B1","B2","C"],
x=[10, 10,missing, 0],
y=[1.0, 0.9,1.0, -1.0])
df_var = select(df, Not(:taxon))
tbl = columntable(df_var)
tbl_y = columntable(select(df, :y)) # 1 trait, for univariate models
@testset "basics" begin
net = readTopology(netstr)
@test_throws ErrorException PGBP.shrinkdegenerate_treeedges(net)
net.edge[8].length=0.0 # external edge
@test_throws ErrorException PGBP.shrinkdegenerate_treeedges(net)
net.edge[8].length=0.1
net.edge[4].length=0.0 # tree edge below hybrid
net_polytomy = PGBP.shrinkdegenerate_treeedges(net)
@test writeTopology(net_polytomy) == "((A:4.0,(B1:1.0,B2:1.0)#H5:1.1::0.9)i4:0.5,(#H5:2.0::0.1,C:0.1)i2:1.0)i1;"
@test PGBP.isdegenerate.(net.node) == [0,0,0, 1,0,0,0,0,0]
@test PGBP.hasdegenerate(net)
for i in [5,7] net.edge[i].length=0.0; end # hybrid edge: makes the hybrid degenerate
net.edge[4].length=0.6 # back to original
@test PGBP.isdegenerate.(net.node) == [0,0,0,0, 1,0,0,0,0]
@test PGBP.unscope(net.hybrid[1])
nm = trues(3,2); nm[2,1]=false
b1 = PGBP.Belief(Int8[5,6], 3, nm, PGBP.bclustertype, 1)
@test PGBP.nodelabels(b1) == [5,6]
@test size(b1.inscope) == (3,2)
@test length(b1.μ) == 5
@test size(b1.J) == (5,5)
end # of basic testset
@testset "degenerate hybrid: complex case" begin
net = readTopology(netstr)
net.edge[8].length=0.1 # was missing
for i in [5,7] net.edge[i].length=0.0; end # hybrid edge: makes the hybrid degenerate
g = PGBP.moralize!(net)
PGBP.triangulate_minfill!(g)
ct = PGBP.cliquetree(g) # 6 sepsets, 7 cliques
#=
ne(ct), nv(ct)
[ct[lab] for lab in labels(ct)]
[ct[lab...] for lab in edge_labels(ct)]
[(n.name, n.number) for n in net.nodes_changed]
=#
# fixit: one clique should have both the child & parents of the degenerate hybrid
b = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, PGBP.UnivariateBrownianMotion(1,0,1))
beliefnodelabels = [[6,5], [7,6], [8,6], [5,4,2], [4,2,1], [3,2], [9,4], [6], [6], [5], [4,2], [2], [4]]
@test [PGBP.nodelabels(be) for be in b] == beliefnodelabels
@test PGBP.inscope(b[5]) == trues(2,3)
@test isempty(PGBP.scopeindex([5], b[1]))
@test PGBP.scopeindex([6], b[1]) == [1,2]
@test_throws ErrorException PGBP.scopeindex([2], b[1])
b = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, PGBP.UnivariateBrownianMotion(1,0,0))
@test [PGBP.nodelabels(be) for be in b] == beliefnodelabels
@test PGBP.inscope(b[5]) == [true true false; true true false] # root not in scope
end # of degenerate testset
@testset "non-degenerate hybrid: simpler case" begin
net = readTopology(netstr)
net.edge[8].length=0.1 # was missing
g = PGBP.moralize!(net)
PGBP.triangulate_minfill!(g)
ct = PGBP.cliquetree(g)
m = PGBP.UnivariateBrownianMotion(2, 3, 0) # 0 root prior variance: fixed root
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
# ["$(be.type): $(be.nodelabel)" for be in b]
@test b[1].J ≈ m.J/net.edge[4].length .* [1 -1; -1 1]
@test b[1].h == [0,0]
@test b[1].g[1] ≈ -log(2π * net.edge[4].length * m.σ2)/2
bp = m.J/net.edge[3].length # bp for Belief Precision
@test b[2].J ≈ bp .* [1;;] # external edge to B2
@test b[2].h ≈ bp * [tbl_y.y[3]]
@test b[2].g[1] ≈ -(log(2π/bp) + bp*tbl_y.y[3]^2)/2
bp = m.J/net.edge[2].length
@test b[3].J ≈ bp .* [1;;] # external edge to B1
@test b[3].h ≈ bp * [tbl_y.y[2]]
@test b[3].g[1] ≈ -(log(2π/bp) + bp*tbl_y.y[2]^2)/2
bp = m.J/(net.edge[7].gamma^2 * net.edge[7].length + net.edge[5].gamma^2 * net.edge[5].length)
@test b[4].J ≈ bp .* [1 -.9 -.1;-.9 .81 .09;-.1 .09 .01]
@test b[4].h ≈ [0,0,0]
@test b[4].g[1] ≈ -log(2π/bp)/2
# 3 factors in b[5]: 1->4, 1->2, root 1: N(μ, l4 σ2) x N(μ, l2 σ2)
bp = m.J ./[net.edge[6].length, net.edge[9].length]
@test b[5].J ≈ [bp[1] 0; 0 bp[2]] # LinearAlgebra.diagm(bp)
@test b[5].h ≈ bp .* [m.μ, m.μ]
@test b[5].g[1] ≈ - (sum(log.(2π ./ bp) .+ bp .* m.μ^2))/2
# 7 clusters, so b[7+1] = first sepset
s=b[8]; PGBP.propagate_belief!(b[1],s,b[2], PGBP.MessageResidual(s.J,s.h))
s=b[9]; PGBP.propagate_belief!(b[1],s,b[3], PGBP.MessageResidual(s.J,s.h))
s=b[10];PGBP.propagate_belief!(b[4],s,b[1], PGBP.MessageResidual(s.J,s.h))
s=b[12];PGBP.propagate_belief!(b[4],s,b[6], PGBP.MessageResidual(s.J,s.h))
s=b[13];PGBP.propagate_belief!(b[4],s,b[7], PGBP.MessageResidual(s.J,s.h))
s=b[11];PGBP.propagate_belief!(b[5],s,b[4], PGBP.MessageResidual(s.J,s.h))
# tree traversed once to cluster 5 as root
rootstate, dataloglik = PGBP.integratebelief!(b[5])
@test dataloglik ≈ -10.732857817537196
#= likelihood using PN.vcv and matrix inversion
Σnet = m.σ2 .* Matrix(vcv(net)[!,Symbol.(df.taxon)])
loglikelihood(MvNormal(repeat([m.μ],4), Σnet), tbl.y) # -10.732857817537196
r = tbl.y .- m.μ; - transpose(r) * inv(Σnet) * r /2 - logdet(2π .* Σnet)/2
=#
end # of non-degenerate testset
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 6822 | @testset "cluster graphs" begin
netstr = "(((A:4.0,(B:1.0)#H1:1.1::0.9):0.5,((#H1:1.0::0.1,C:0.6):1.0,C2):1.0):3.0,D:5.0);"
# network from Mateescu et al. (2010) with 2 extra leaves
mateescu = "((((g:1)#H4:1)#H2:2.04,(d:1,(#H2:0.01::0.5,#H4:1::0.5)#H3:1)D:1,(#H3:1::0.5)#H1:0.01)B:1,#H1:1.01::0.5)A;"
@testset "Utilities" begin
net = readTopology(netstr)
g = PhyloGaussianBeliefProp.moralize!(net)
@test nv(g) == net.numNodes
@test ne(g) == net.numEdges + 1 # 1 extra: moralized
@test PhyloGaussianBeliefProp.triangulate_minfill!(g) ==
[:A,:B,:H1,:C,:C2,:D,:I5,:I1,:I2,:I3,:I4]
@test ne(g) == 13 # 1 extra fill edge
@test PGBP.parentinformation(net.node[1], net) == ([4.0], [1.0], [8])
@test PGBP.parentinformation(net.hybrid[1], net) == ([1.1,1.], [.9,.1], [8,6])
# hybrid ladder H2 -> H1; and H2 child of root
net6 = readTopology("(#H2:0::0.2,((C:1,((B:1)#H1:100::0.6)#H2:0::0.8),(#H1:0,(A1:0.1,A2:0.1):0.2):0.3):0.1,O:3);")
PGBP.preprocessnet!(net6, "i")
PGBP.addtreenode_belowdegeneratehybrid!(net6)
@test net6.node[13].name == "i6"
@test length(net6.nodes_changed) == 13
end
#=
function metaplot(gr)
elab = [gr[label_for(gr,src(e)),label_for(gr,dst(e))] for e in edges(gr)]
gplothtml(gr, nodelabel=collect(labels(gr)), edgelabel=elab);
end
metaplot(ct)
=#
@testset "Bethe cluster graph" begin
net = readTopology(netstr)
cg = PGBP.clustergraph!(net, PGBP.Bethe())
#= number of clusters:
1. factor clusters: 1 / node family = 1 per non-root node, except when a family is a subset of another
2. variable clusters: 1 / internal node (including the root, excluding leaves)
=#
numfactorclusters = net.numNodes-1
numvarclusters = net.numNodes-net.numTaxa
@test nv(cg) == numfactorclusters + numvarclusters
#= number of edges in the cluster graph, assuming
* leaves are not hybrids
* bicombining: hybrid nodes have 2 parents
1. external edge in net: 1 per leaf → 1 edge in cluster graph
2. internal tree edge in net, e.g. internal tree node → 2 edges in graph
3. hybrid node family, 1 per hybrid node in net → 3 edges in graph
=#
ninternal_tree = sum(!e.hybrid for e in net.edge) - net.numTaxa
@test ne(cg) == (net.numTaxa + 2*ninternal_tree + 3*net.numHybrids)
@test length(connected_components(cg)) == 1 # check for 1 connected component
@test all(t[2] for t in PGBP.check_runningintersection(cg, net))
cluster_properties = cg.vertex_properties
clusters = [v[2][2] for v in values(cluster_properties)]
# variable clusters: [1], [3], [4], [6], [8], [9]
# factor clusters: [2, 1], [3, 1], [4, 3], [5, 4], [6, 4], [7, 6], [8, 3],
# [9, 8, 6], [10, 9], [11, 8]
@test PGBP.isfamilypreserving(clusters, net)[1]
# case with 3-cycle: one node family is a subset of another
deleteleaf!(net, "C"); deleteleaf!(net, "C2"); removedegree2nodes!(net)
preorder!(net)
cg = PGBP.clustergraph!(net, PGBP.Bethe())
@test nv(cg) == 4+3 # not (5-1) node families + 3 internal nodes
@test ne(cg) == 6 # each internal node is 2 clusters
clusters = [v[2][2] for v in values(cg.vertex_properties)]
@test PGBP.isfamilypreserving(clusters, net)[1]
end
@testset "LTRIP cluster graph" begin
net = readTopology(netstr)
clusters = Vector{Int8}[ # node families, nodes specified as preorder indices
[11, 8], [10, 9], [7, 6], [5, 4], [2, 1],
[9, 8, 6], [8, 3], [6, 4], [4, 3], [3, 1]]
# below: would error (test would fail) if `clusters` not family-preserving for net
cg = PGBP.clustergraph!(net, PGBP.LTRIP(clusters, net))
output_clusters = collect(v[2][2] for v in values(cg.vertex_properties))
@test sort(clusters) == sort(output_clusters)
@test is_connected(cg)
@test all(t[2] for t in PGBP.check_runningintersection(cg, net))
cg = PGBP.clustergraph!(net, PGBP.LTRIP(net))
@test all(t[2] for t in PGBP.check_runningintersection(cg, net))
clusters2 = [v[2][2] for v in values(cg.vertex_properties)] # has extra root cluster
@test PGBP.isfamilypreserving(clusters2, net)[1]
clusters3 = Vector{Int8}[
[11, 8], [10, 9], [7, 6], [5, 4], [2, 1],
[9, 8], [8, 3], [6, 4], [4, 3], [3, 1]] # not family-preserving
@test_throws ErrorException PGBP.LTRIP(clusters3, net)
end
@testset "Join-graph structuring" begin
net = readTopology(mateescu)
# Mateescu network: 1 bucket has multiple minibuckets
cg = PGBP.clustergraph!(net, PGBP.JoinGraphStructuring(3))
@test all(t[2] for t in PGBP.check_runningintersection(cg, net))
@test !is_tree(cg)
clusters = [[1],[2,1],[3,2,1],[4,3,2],[5,2],[5,4,3],[6,5,2],[7,6,5],[8,7],[9,4]]
# clusters for netstr: [[2,1],[3],[3,1],[4,3],[5,4],[6,4,3],[7,6],[8,6,3],[9,8,6],[10,9],[11,8]]
@test sort([v[2][2] for v in values(cg.vertex_properties)]) == clusters
sepsets = [[1],[2],[2,1],[3,2],[4],[4,3],[5],[5,2],[6,5],[7]]
# sepstes for netstr: [[1],[3],[3],[4],[4,3],[6],[6,3],[8],[8,6],[9]]
@test sort([cg[l1,l2] for (l1,l2) in edge_labels(cg)]) == sepsets
@test PGBP.isfamilypreserving(clusters, net)[1]
# maxclustersize smaller than largest family:
@test_throws ErrorException PGBP.clustergraph!(net, PGBP.JoinGraphStructuring(2))
end
@testset "Clique tree" begin
net = readTopology(netstr)
ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
@test ne(ct) == 8
@test sort([ct[lab...] for lab in edge_labels(ct)]) == [[1],[3],[4],[6],
[6,3],[8],[8,6],[9]]
@test is_tree(ct)
@test all(t[2] for t in PGBP.check_runningintersection(ct, net))
cliques = [v[2][2] for v in values(ct.vertex_properties)]
@test PGBP.isfamilypreserving(cliques, net)[1]
net = readTopology(mateescu)
ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
@test is_tree(ct)
@test ct[:H3DH1B][2] == [5,4,3,2] # largest clique
end
@testset "Traversal" begin
net = readTopology(netstr)
cg = PGBP.clustergraph!(net, PGBP.Bethe())
clusterlabs = Set(labels(cg))
n = length(clusterlabs) - 1 # number of edges in each spanning tree
c_edges = Set(MetaGraphsNext.arrange(cg, e...) for e in edge_labels(cg))
s_edges = Set{eltype(c_edges)}() # edges covered by schedule
sched = PGBP.spanningtrees_clusterlist(cg, net.nodes_changed)
for spt in sched # check: spt is a tree spanning all clusters
@test length(spt[1]) == n
spt_edgecodes = [Edge(code_for(cg, spt[1][i]), code_for(cg, spt[2][i])) for i in 1:n]
sg, _ = induced_subgraph(cg, spt_edgecodes)
@test Set(labels(sg)) == clusterlabs
@test is_tree(sg)
for i in 1:n push!(s_edges, MetaGraphsNext.arrange(cg, spt[1][i], spt[2][i])); end
end
@test c_edges == s_edges # check: `sched` covers all edges
end
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 14150 | @testset "evolutionary models parameters" begin
m = PGBP.MvDiagBrownianMotion([1,0.5], [-1,1]) # default 0 root variance
m = PGBP.MvDiagBrownianMotion([1,0.5], [-1,1], [0,1])
par = PGBP.params_optimize(m)
oripar = PGBP.params_original(m, par)
@test oripar == PGBP.params(m)
@test PGBP.dimension(m) == 2
m = PGBP.MvFullBrownianMotion([1.0, .5, 0.8660254037844386], [-1,1]) # default 0 root variance
@test PGBP.varianceparam(m) ≈ [1 0.5; 0.5 1]
@test PGBP.rootpriorvariance(m) == [0 0; 0 0]
m = PGBP.MvFullBrownianMotion([1 0.5; 0.5 1], [-1,1], [10^10 0; 0 10^10])
@test PGBP.dimension(m) == 2
m = PGBP.UnivariateBrownianMotion(2, 3)
par = PGBP.params_optimize(m)
oripar = PGBP.params_original(m, par)
@test oripar[1] ≈ PGBP.params(m)[1]
@test oripar[2] ≈ PGBP.params(m)[2]
h,J,g = PGBP.factor_treeedge(m, 1)
@test h == [0.0,0]
@test J == [.5 -.5; -.5 .5]
@test g ≈ -1.2655121234846454
m2 = PGBP.UnivariateBrownianMotion(2, 3, 0)
@test m == m2
m2 = PGBP.UnivariateBrownianMotion(2.0, 3, 0)
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2], [3])
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2], [3.0])
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2], 3)
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2], 3.0)
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2], 3, 0)
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2.0], 3, 0)
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2], 3, [0])
@test m == m2
m2 = PGBP.UnivariateBrownianMotion([2], 3.0, [0])
@test m == m2
@test_throws "scalars" PGBP.UnivariateBrownianMotion([2,2], [3], [0])
@test_throws "scalars" PGBP.UnivariateBrownianMotion([2,2], 3, 0)
@test_throws "scalars" PGBP.UnivariateBrownianMotion([2], [3,3])
@test_throws "scalars" PGBP.UnivariateBrownianMotion([2], 3, [0,0])
end
@testset "evolutionary models likelihood" begin
netstr = "(((A:4.0,((B1:1.0,B2:1.0)i6:0.6)#H5:1.1::0.9)i4:0.5,(#H5:2.0::0.1,C:0.1)i2:1.0)i1:3.0);"
net = readTopology(netstr)
df = DataFrame(taxon=["A","B1","B2","C"], x=[10,10,missing,0], y=[1.0,.9,1,-1])
df_var = select(df, Not(:taxon))
tbl = columntable(df_var)
tbl_y = columntable(select(df, :y)) # 1 trait, for univariate models
tbl_x = columntable(select(df, :x))
ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
spt = PGBP.spanningtree_clusterlist(ct, net.nodes_changed)
rootclusterindex = spt[3][1]
# allocate beliefs to avoid re-allocation of same sizes for multiple tests
m_uniBM_fixedroot = PGBP.UnivariateBrownianMotion(2, 3, 0)
b_y_fixedroot = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m_uniBM_fixedroot)
m_uniBM_randroot = PGBP.UnivariateBrownianMotion(2, 3, Inf)
b_y_randroot = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m_uniBM_randroot)
m_biBM_fixedroot = PGBP.MvDiagBrownianMotion((2,1), (3,-3), (0,0))
b_xy_fixedroot = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m_biBM_fixedroot)
m_biBM_randroot = PGBP.MvDiagBrownianMotion((2,1), (3,-3), (0.1,10))
b_xy_randroot = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m_biBM_randroot)
@testset "homogeneous univariate BM" begin
@testset "Fixed Root, no missing" begin
# y no missing, fixed root
show(devnull, m_uniBM_fixedroot)
PGBP.init_beliefs_assignfactors!(b_y_fixedroot, m_uniBM_fixedroot, tbl_y, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_y_fixedroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -10.732857817537196
end
@testset "Infinite Root, no missing" begin
# y no missing, infinite root variance
PGBP.init_beliefs_assignfactors!(b_y_randroot, m_uniBM_randroot, tbl_y, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_y_randroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -5.899094849099194
end
@testset "Random Root, with missing" begin
# x with missing, random root
m = PGBP.UnivariateBrownianMotion(2, 3, 0.4)
b = (@test_logs (:error,"tip B2 in network without any data") PGBP.init_beliefs_allocate(tbl_x, df.taxon, net, ct, m);)
PGBP.init_beliefs_assignfactors!(b, m, tbl_x, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -13.75408386332493
end
end
@testset "homogeneous univariate OU" begin
@testset "Random Root, no missing" begin
m = PGBP.UnivariateOrnsteinUhlenbeck(2, 3, -2, 0.0, 0.4)
show(devnull, m)
PGBP.init_beliefs_assignfactors!(b_y_randroot, m, tbl_y, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_y_randroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -42.31401134496844
#= code to compute the univariate OU likelihood by hand
# 1. calculate vcv of all nodes in preorder
V(t::Number) = (1-exp(-2m.α * t)) * m.γ2 # variance conditional on parent, one edge
q(t::Number) = exp(-m.α * t) # actualization along tree edge: weight of parent value
q(edge) = q(edge.length) * edge.gamma
V(parentedges) = sum(V(e.length) * e.gamma * e.gamma for e in parentedges)
net_vcv = zeros(9,9) # all 9 nodes
net_vcv[1,1] = PGBP.rootpriorvariance(m)
net_mean = zeros(9,1) # all 9 nodes expectations
net_mean[1] = m.μ
for i in 2:9 # non-root nodes
n = net.nodes_changed[i]
pae = [PhyloNetworks.getparentedge(n)]
if n.hybrid push!(pae, PhyloNetworks.getparentedgeminor(n)); end
nparents = length(pae)
pa = [PhyloNetworks.getparent(e) for e in pae]
pai = indexin(pa, net.nodes_changed)
# var(Xi)
net_vcv[i,i] = V(pae) # initialize
for (j1,e1) in zip(pai, pae) for (j2,e2) in zip(pai, pae)
net_vcv[i,i] += q(e1) * q(e2) * net_vcv[j1,j2]
end; end
# cov(Xi,Xj) for j<i in preorder
for j in 1:(i-1)
for (j1,e1) in zip(pai, pae)
net_vcv[i,j] += q(e1) * net_vcv[j1,j]
end
net_vcv[j,i] = net_vcv[i,j]
end
# E[Xi]
for (j1,e1) in zip(pai, pae)
net_mean[i] += q(e1) * net_mean[j1] + (e1.gamma - q(e1)) * m.θ
end
end
net_vcv
[n.name for n in net.nodes_changed] # i1,i2,C,i4,H5,i6,B2,B1,A
df.taxon # A,B1,B2,C with preorder indices: 9,8,7,3
taxon_ind = [findfirst(isequal(tax), n.name for n in net.nodes_changed) for tax in df.taxon]
print(net_vcv[taxon_ind,taxon_ind]) # copy-pasted below
Σnet = [
0.3333333333334586 5.651295717406613e-10 5.651295717406613e-10 2.0226125393342098e-8;
5.651295717406613e-10 0.33332926986180506 0.000822187254027199 1.4032919760109094e-6;
5.651295717406613e-10 0.000822187254027199 0.33332926986180506 1.4032919760109094e-6;
2.0226125393342098e-8 1.4032919760109094e-6 1.4032919760109094e-6 0.3334240245358365]
print(net_mean[taxon_ind]) # copy-pasted below
Mnet = [-1.9999972580818273, -1.9998778851480223, -1.9998778851480223, -1.92623366519752]
loglikelihood(MvNormal(Mnet, Σnet), tbl.y) # -42.31401134496844
=#
end
end
@testset "Diagonal BM" begin
@testset "homogeneous, fixed root" begin
PGBP.init_beliefs_assignfactors!(b_xy_fixedroot, m_biBM_fixedroot, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_fixedroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -24.8958130127972
end
@testset "homogeneous, random root" begin
PGBP.init_beliefs_assignfactors!(b_xy_randroot, m_biBM_randroot, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_randroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -21.347496753649892
end
@testset "homogeneous, improper root" begin
m = PGBP.MvDiagBrownianMotion((2,1), (1,-3), (Inf,Inf))
PGBP.init_beliefs_assignfactors!(b_xy_randroot, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_randroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -17.66791635814575
end
end
@testset "Full BM" begin
@testset "homogeneous, fixed root" begin
m = PGBP.MvFullBrownianMotion([2.0 0.5; 0.5 1.0], [3.0,-3.0])
PGBP.init_beliefs_assignfactors!(b_xy_fixedroot, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_fixedroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -24.312323855394055
end
@testset "homogeneous, random root" begin
m = PGBP.MvFullBrownianMotion([2.0 0.5; 0.5 1.0], [3.0,-3.0],
[0.1 0.01; 0.01 0.2])
PGBP.init_beliefs_assignfactors!(b_xy_randroot, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_randroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -23.16482738327936
end
@testset "homogeneous, improper root" begin
m = PGBP.MvFullBrownianMotion([2.0 0.5; 0.5 1.0], [3.0,-3.0],
[Inf 0; 0 Inf])
PGBP.init_beliefs_assignfactors!(b_xy_randroot, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_randroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -16.9626044836951
end
end
@testset "heterogeneous BM" begin
@testset "Fixed Root one mv rate" begin
m = PGBP.HeterogeneousBrownianMotion([2.0 0.5; 0.5 1.0], [3.0, -3.0])
show(devnull, m)
PGBP.init_beliefs_assignfactors!(b_xy_fixedroot, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_fixedroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -24.312323855394055
end
@testset "Random root several mv rates" begin
rates = [[2.0 0.5; 0.5 1.0], [2.0 0.5; 0.5 1.0]]
colors = Dict(9 => 2, 7 => 2, 8 => 2) # includes one hybrid edge
pp = PGBP.PaintedParameter(rates, colors)
show(devnull, pp)
m = PGBP.HeterogeneousBrownianMotion(pp, [3.0, -3.0], [0.1 0.01; 0.01 0.2])
show(devnull, m)
PGBP.init_beliefs_assignfactors!(b_xy_randroot, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b_xy_randroot)
PGBP.propagate_1traversal_postorder!(ctb, spt...)
_, tmp = PGBP.integratebelief!(ctb, rootclusterindex)
@test tmp ≈ -23.16482738327936
end
end
#= likelihood using PN.vcv and matrix inversion
using Distributions
Σnet = Matrix(vcv(net)[!,Symbol.(df.taxon)])
## univariate y
loglikelihood(MvNormal(repeat([3.0],4), Σnet), tbl.y) # -10.732857817537196
## univariate x
xind = [1,2,4]; n = length(xind); i = ones(n) # intercept
Σ = 2.0 * Σnet[xind,xind] .+ 0.4
loglikelihood(MvNormal(repeat([3.0],3), Σ), Vector{Float64}(tbl.x[xind])) # -13.75408386332493
## Univariate y, REML
ll(μ) = loglikelihood(MvNormal(repeat([μ],4), Σnet), tbl.y)
using Integrals
log(solve(IntegralProblem((x,p) -> exp(ll(x)), -Inf, Inf), QuadGKJL()).u) # -5.899094849099194
## Diagonal x, y
loglikelihood(MvNormal(repeat([3],3), 2 .* Σnet[xind,xind]), Vector{Float64}(tbl.x[xind])) +
loglikelihood(MvNormal(repeat([-3],4), 1 .* Σnet), tbl.y)
## Diagonal x y random root
loglikelihood(MvNormal(repeat([3],3), 2 .* Σnet[xind,xind] .+ 0.1), Vector{Float64}(tbl.x[xind])) +
loglikelihood(MvNormal(repeat([-3],4), 1 .* Σnet .+ 10), tbl.y)
# Diagonal x y REML
ll(μ) = loglikelihood(MvNormal(repeat([μ[1]],3), 2 .* Σnet[xind,xind]), Vector{Float64}(tbl.x[xind])) + loglikelihood(MvNormal(repeat([μ[2]],4), 1 .* Σnet), tbl.y)
using Integrals
log(solve(IntegralProblem((x,p) -> exp(ll(x)), [-Inf, -Inf], [Inf, Inf]), HCubatureJL(), reltol = 1e-16, abstol = 1e-16).u) # -17.66791635814575
# Full x y fixed root
R = [2.0 0.5; 0.5 1.0]
varxy = kron(R, Σnet)
xyind = vcat(xind, 4 .+ [1,2,3,4])
varxy = varxy[xyind, xyind]
meanxy = vcat(repeat([3.0],3), repeat([-3.0],4))
datxy = Vector{Float64}(vcat(tbl.x[xind], tbl.y))
loglikelihood(MvNormal(meanxy, varxy), datxy) # -24.312323855394055
# Full x y random root
R = [2.0 0.5; 0.5 1.0]
V = [0.1 0.01; 0.01 0.2]
varxy = kron(R, Σnet) + kron(V, ones(4, 4))
xyind = vcat(xind, 4 .+ [1,2,3,4])
varxy = varxy[xyind, xyind]
meanxy = vcat(repeat([3.0],3), repeat([-3.0],4))
datxy = Vector{Float64}(vcat(tbl.x[xind], tbl.y))
loglikelihood(MvNormal(meanxy, varxy), datxy) # -23.16482738327936
# Full x y improper root
R = [2.0 0.5; 0.5 1.0]
varxy = kron(R, Σnet)
xyind = vcat(xind, 4 .+ [1,2,3,4])
varxy = varxy[xyind, xyind]
meanxy = vcat(repeat([3.0],3), repeat([-3.0],4))
datxy = Vector{Float64}(vcat(tbl.x[xind], tbl.y))
ll(μ) = loglikelihood(MvNormal(vcat(repeat([μ[1]],3),repeat([μ[2]],4)), varxy), datxy)
using Integrals
log(solve(IntegralProblem((x,p) -> exp(ll(x)), [-Inf, -Inf], [Inf, Inf]),
HCubatureJL(), reltol = 1e-16, abstol = 1e-16).u) # -16.9626044836951
=#
end
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 12467 | @testset "exact tree calibration for the BM" begin
netstr = "((A:1.5,B:1.5):1,(C:1,(D:0.5, E:0.5):0.5):1.5);"
df = DataFrame(taxon=["A","B","C","D","E"], x=[10,10,3,0,1], y=[1.0,.9,1,-1,-0.9])
n = 5
df_var = select(df, Not(:taxon))
tbl = columntable(df_var)
tbl_y = columntable(select(df, :y)) # 1 trait, for univariate models
tbl_x = columntable(select(df, :x))
net = readTopology(netstr)
g = PGBP.moralize!(net)
PGBP.triangulate_minfill!(g)
ct = PGBP.cliquetree(g)
spt = PGBP.spanningtree_clusterlist(ct, net.nodes_changed)
@testset "comparison with PhyloEM" begin # of PhyloEM
m = PGBP.UnivariateBrownianMotion(1, 0, 10000000000) # "infinite" root variance to match phyloEM
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
cgb = PGBP.ClusterGraphBelief(b)
PGBP.calibrate!(cgb, [spt])
# Test conditional expectations and variances
@test PGBP.default_sepset1(cgb) == 9
llscore = -18.83505
condexp = [1,0.9,1,-1,-0.9,0.4436893,0.7330097,0.009708738,-0.6300971]
condexp = condexp[[6,8,9,5,4,3,7,2,1]]
tmp1, tmp = PGBP.integratebelief!(cgb)
@test tmp1 ≈ [condexp[cgb.belief[9].nodelabel[1]]] atol=1e-6
@test tmp ≈ llscore atol=1e-6
for i in eachindex(cgb.belief)
tmp1, tmp = PGBP.integratebelief!(cgb, i)
@test tmp1[end] ≈ condexp[cgb.belief[i].nodelabel[end]] atol=1e-6
@test tmp ≈ llscore atol=1e-6
end
# Test conditional variances
condvar = [0.0000000,0.0000000,0.0000000,0.0000000,0.0000000,0.9174757,0.5970874,0.3786408,0.2087379]
condvar = condvar[[6,8,9,5,4,3,7,2,1]]
for i in eachindex(cgb.belief)
vv = inv(cgb.belief[i].J)[end,end]
@test vv ≈ condvar[cgb.belief[i].nodelabel[end]] atol=1e-6
end
# Test conditional covariances
condcovar = [0.0000000,0.0000000,0.0000000,0.0000000,0.0000000,NaN,0.3932039,0.2038835,0.1262136]
condcovar = condcovar[[6,8,9,5,4,3,7,2,1]]
for i in eachindex(cgb.belief)
vv = inv(cgb.belief[i].J)
size(vv, 1) == 2 && @test vv[1,2] ≈ condcovar[cgb.belief[i].nodelabel[1]] atol=1e-6
end
#= likelihood and moments using PhylogeneticEM from R
library(PhylogeneticEM)
# tree and data
tree <- read.tree(text = "((A:1.5,B:1.5):1,(C:1,(D:0.5, E:0.5):0.5):1.5);")
tree <- reorder(tree, "postorder")
ntips <- length(tree$tip.label)
Y_data <- c(1.0,.9,1,-1,-0.9)
names(Y_data) <- c("A", "B", "C", "D", "E")
Y_data <- t(Y_data)
# tree traversal
theta_var <- 1
params_random <- params_BM(p = 1, variance = diag(theta_var, 1, 1), value.root = rep(0, 1),
random = TRUE, var.root = diag(10000000000, 1, 1))
resE <- PhylogeneticEM:::compute_E.upward_downward(phylo = tree,
Y_data = Y_data,
process = "BM",
params_old = params_random)
# likelihood
log_likelihood(params_random, phylo = tree, Y_data = Y_data) # -18.83505
# conditional expectations
resE$conditional_law_X$expectations # 1 0.9 1 -1 -0.9 0.4436893 0.7330097 0.009708738 -0.6300971
# conditional variances
resE$conditional_law_X$variances[1, 1, ] # 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.9174757 0.5970874 0.3786408 0.2087379
# conditional covariances
resE$conditional_law_X$covariances[1,1,] # 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 NA 0.3932039 0.2038835 0.1262136
# mu hat
resE$conditional_law_X$expectations[ntips + 1] # 0.4436893
# sigma2 hat
num <- 0
den <- 0
for (i in 1:nrow(tree$edge)) {
par <- tree$edge[i, 1]
child <- tree$edge[i, 2]
num <- num + (resE$conditional_law_X$expectations[par] - resE$conditional_law_X$expectations[child])^2 / tree$edge.length[i]
den <- den + 1 - (resE$conditional_law_X$variances[,,par] + resE$conditional_law_X$variances[,,child] - 2 * resE$conditional_law_X$covariances[,,child]) / tree$edge.length[i] / theta_var
}
num / den
=#
end # of PhyloEM
@testset "update root status" begin
# Check that an update from a fixed root to a random root is possible
# y: 1 trait, no missing values
m1 = PGBP.UnivariateBrownianMotion(1, 0, 0.9)
b1 = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m1);
f1 = PGBP.init_factors_allocate(b1, nv(ct))
mess1 = PGBP.init_messageresidual_allocate(b1, nv(ct))
PGBP.init_beliefs_assignfactors!(b1, m1, tbl_y, df.taxon, net.nodes_changed);
m2 = PGBP.UnivariateBrownianMotion(1, 0, 0) # fixed root
b2 = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m2);
f2 = PGBP.init_factors_allocate(b2, nv(ct))
mess2 = PGBP.init_messageresidual_allocate(b2, nv(ct))
PGBP.init_beliefs_assignfactors!(b2, m2, tbl_y, df.taxon, net.nodes_changed);
PGBP.init_beliefs_allocate_atroot!(b1, f1, mess1, m2)
PGBP.init_beliefs_assignfactors!(b1, m2, tbl_y, df.taxon, net.nodes_changed);
for ind in eachindex(b1)
@test b1[ind].nodelabel == b2[ind].nodelabel
@test b1[ind].ntraits == b2[ind].ntraits
@test b1[ind].inscope == b2[ind].inscope
@test b1[ind].μ == b2[ind].μ
@test b1[ind].h == b2[ind].h
@test b1[ind].J == b2[ind].J
@test b1[ind].type == b2[ind].type
@test b1[ind].metadata == b2[ind].metadata
end
PGBP.init_beliefs_allocate_atroot!(b1, f1, mess1, m1)
PGBP.init_beliefs_assignfactors!(b1, m1, tbl_y, df.taxon, net.nodes_changed);
PGBP.init_beliefs_allocate_atroot!(b2, f2, mess2, m1)
PGBP.init_beliefs_assignfactors!(b2, m1, tbl_y, df.taxon, net.nodes_changed);
for ind in eachindex(b1)
@test b1[ind].nodelabel == b2[ind].nodelabel
@test b1[ind].ntraits == b2[ind].ntraits
@test b1[ind].inscope == b2[ind].inscope
@test b1[ind].μ == b2[ind].μ
@test b1[ind].h == b2[ind].h
@test b1[ind].J == b2[ind].J
@test b1[ind].type == b2[ind].type
@test b1[ind].metadata == b2[ind].metadata
end
# x,y: 2 traits, no missing values
m1 = PGBP.MvDiagBrownianMotion((1,1), (0,0), (1.2,3))
b1 = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m1);
f1 = PGBP.init_factors_allocate(b1, nv(ct))
mess1 = PGBP.init_messageresidual_allocate(b1, nv(ct))
PGBP.init_beliefs_assignfactors!(b1, m1, tbl, df.taxon, net.nodes_changed);
m2 = PGBP.MvDiagBrownianMotion((1,1), (0,0), (0,0))
b2 = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m2);
mess2 = PGBP.init_messageresidual_allocate(b2, nv(ct))
PGBP.init_beliefs_assignfactors!(b2, m2, tbl, df.taxon, net.nodes_changed);
PGBP.init_beliefs_allocate_atroot!(b1, f1, mess1, m2)
PGBP.init_beliefs_assignfactors!(b1, m2, tbl, df.taxon, net.nodes_changed);
for ind in eachindex(b1)
@test b1[ind].nodelabel == b2[ind].nodelabel
@test b1[ind].ntraits == b2[ind].ntraits
@test b1[ind].inscope == b2[ind].inscope
@test b1[ind].μ == b2[ind].μ
@test b1[ind].h == b2[ind].h
@test b1[ind].J == b2[ind].J
@test b1[ind].type == b2[ind].type
@test b1[ind].metadata == b2[ind].metadata
end
end # of root update
end # of exact, on a tree
@testset "exact network calibration for the BM" begin
netstr = "(((A:4.0,((B1:1.0,B2:1.0)i6:0.6)#H5:1.1::0.9)i4:0.5,(#H5:2.0::0.1,C:0.1)i2:1.0)i1:3.0);"
df = DataFrame(taxon=["A","B1","B2","C"], x=[10,10,2,0], y=[1.0,.9,1,-1])
df_var = select(df, Not(:taxon))
tbl = columntable(df_var)
tbl_y = columntable(select(df, :y)) # 1 trait, for univariate models
tbl_x = columntable(select(df, :x))
net = readTopology(netstr)
g = PGBP.moralize!(net)
PGBP.triangulate_minfill!(g)
ct = PGBP.cliquetree(g)
spt = PGBP.spanningtree_clusterlist(ct, net.nodes_changed)
@testset "exact formulas (REML)" begin
# y: 1 trait, no missing values
m = PGBP.UnivariateBrownianMotion(1, 0, Inf) # infinite root variance
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
cgb = PGBP.ClusterGraphBelief(b)
mod, llscore = PGBP.calibrate_exact_cliquetree!(cgb, spt,
net.nodes_changed,
tbl_y, df.taxon, PGBP.UnivariateBrownianMotion)
@test PGBP.integratebelief!(cgb, spt[3][1])[2] ≈ llscore
@test llscore ≈ -5.250084678427689
@test mod.μ ≈ -0.260008715071627
@test PGBP.varianceparam(mod) ≈ 0.4714735834478194
#= numerical optim, ML -- redundant with test_calibration
m = PGBP.UnivariateBrownianMotion(0.5,0.5,0)
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl_y, df.taxon, net.nodes_changed);
cgb = PGBP.ClusterGraphBelief(b)
PGBP.calibrate!(cgb, [spt])
modopt, llscoreopt, opt = PGBP.calibrate_optimize_cliquetree!(cgb, ct, net.nodes_changed,
tbl_y, df.taxon, PGBP.UnivariateBrownianMotion, (0.5,-3))
@test PGBP.integratebelief!(cgb, spt[3][1])[2] ≈ llscoreopt
@test llscoreopt ≈ -5.174720533524127
@test modopt.μ ≈ mod.μ
n=4
@test PGBP.varianceparam(modopt) ≈ PGBP.varianceparam(mod) * (n-1) / n
=#
# x,y: 2 traits, no missing values
m = PGBP.MvDiagBrownianMotion((1,1), (0,0), (Inf,Inf))
b = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m);
cgb = PGBP.ClusterGraphBelief(b)
mod, llscore = PGBP.calibrate_exact_cliquetree!(cgb, spt,
net.nodes_changed,
tbl, df.taxon, PGBP.MvFullBrownianMotion)
#@test PGBP.integratebelief!(cgb, spt[3][1])[2] ≈ llscore
#@test llscore ≈ -6.851098376474686
@test mod.μ ≈ [2.791001688545128 ; -0.260008715071627]
@test PGBP.varianceparam(mod) ≈ [17.93326111121198 1.6089749098736517 ; 1.6089749098736517 0.4714735834478195]
#= ML solution the matrix-way, analytical for BM:
# for y: univariate
Σ = Matrix(vcv(net)[!,Symbol.(df.taxon)])
n=4 # number of data points
i = ones(n) # intercept
μhat = inv(transpose(i) * inv(Σ) * i) * (transpose(i) * inv(Σ) * tbl.y)
r = tbl.y .- μhat
σ2hat_REML = (transpose(r) * inv(Σ) * r) / (n-1)
σ2hat_ML = (transpose(r) * inv(Σ) * r) / n
llscorereml = - (n-1)/2 - logdet(2π * σ2hat_REML .* Σ)/2
llscore = - n/2 - logdet(2π * σ2hat_ML .* Σ)/2
# for x,y: multivariate
Σ = Matrix(vcv(net)[!,Symbol.(df.taxon)])
n=4 # number of data points
datatbl = [tbl.x tbl.y]
i = ones(n) # intercept
μhat = inv(transpose(i) * inv(Σ) * i) * (transpose(i) * inv(Σ) * datatbl)
r = datatbl - i * μhat
σ2hat_REML = (transpose(r) * inv(Σ) * r) / (n-1)
llscore = - n/2 - logdet(2π * σ2hat_ML .* Σ)/2
=#
end # of exact formulas
# new network: clade of 2 sisters below a tree node, *not* a hybrid node,
# fixit: calibration fails if fully missing data below hybrid node
net = readTopology("((((B1:1.0,B2:1.0)i6:4.0,(A:0.6)#H5:1.1::0.9)i4:0.5,(#H5:2.0::0.1,C:0.1)i2:1.0)i1:3.0);")
g = PGBP.moralize!(net); PGBP.triangulate_minfill!(g); ct = PGBP.cliquetree(g)
spt = PGBP.spanningtree_clusterlist(ct, net.nodes_changed)
tbl = (x=[10,missing,missing,0], y=[1.0,.9,1,-1])
tbl_x = (x=tbl[:x], )
@testset "exact formulas, with missing values" begin
# x: 2 sister taxa with fully-missing values. #tipswithdata=2 > #traits=1
m = PGBP.UnivariateBrownianMotion(1, 0, 0) # wrong starting model
cgb = (@test_logs (:error,r"^tip") (:error,r"^tip") (:error,r"^internal") PGBP.ClusterGraphBelief(PGBP.init_beliefs_allocate(tbl_x, df.taxon, net, ct, m)))
mod, llscore = PGBP.calibrate_exact_cliquetree!(cgb, spt, net.nodes_changed,
tbl_x, df.taxon, PGBP.MvFullBrownianMotion) # mv instead of univariate
@test mod.μ ≈ [3.538570417551306]
@test PGBP.varianceparam(mod) ≈ [35.385704175513084;;]
@test llscore ≈ -6.2771970782154565
# x,y: B1,B2 with partial data
m = PGBP.MvDiagBrownianMotion((1,1), (0,0), (Inf,Inf))
cgb = PGBP.ClusterGraphBelief(PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m))
# error: some leaf must have partial data: cluster i6i4 has partial traits in scope
@test_throws ["partial data", "partial traits in scope"] PGBP.calibrate_exact_cliquetree!(cgb, spt, net.nodes_changed,
tbl, df.taxon, PGBP.MvFullBrownianMotion)
end # of exact formulas
end # of exact, on a network
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | code | 6782 | @testset "calibration w/ optimization" begin
# univariate, multivariate, missing, fixed/proper/improper root
examplenetdir = joinpath(dirname(Base.find_package("PhyloGaussianBeliefProp")),
"..", "test","example_networks")
@testset "univariate, no missing, fixed root" begin
net = readTopology(joinpath(examplenetdir, "mateescu_2010.phy"))
# 9 nodes: 2 tips, 4 hybrids
# level-4, not tree-child, has hybrid ladder, has deg-4 hybrid
df = DataFrame(taxon=["d","g"], y=[1.0,-1.0])
tbl_y = columntable(select(df, :y))
ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
m = PGBP.UnivariateBrownianMotion(1.0, 0.0) # σ2 = 1.0, μ = 0.0
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, m);
ctb = PGBP.ClusterGraphBelief(b)
## reference is from a previous call to `calibrate_optimize_cliquetree!`
refμ = -0.07534357691418593
refσ2 = 0.5932930079336234
refll = -3.2763180687070053
mod, llscore, opt = PGBP.calibrate_optimize_cliquetree!(ctb, ct,
net.nodes_changed, tbl_y, df.taxon, PGBP.UnivariateBrownianMotion,
(1.0,0.0))
@test mod.μ ≈ refμ
@test mod.σ2 ≈ refσ2
@test llscore ≈ refll
## test against `calibrate_optimize_cliquetree_autodiff!`
lbc = GeneralLazyBufferCache(function (paramOriginal)
mo = PGBP.UnivariateBrownianMotion(paramOriginal...)
bel = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, ct, mo)
return PGBP.ClusterGraphBelief(bel)
end)
mod, llscore, opt = PGBP.calibrate_optimize_cliquetree_autodiff!(lbc, ct,
net.nodes_changed, tbl_y, df.taxon, PGBP.UnivariateBrownianMotion,
(1.0,0,0))
@test mod.μ ≈ refμ rtol=4e-10
@test mod.σ2 ≈ refσ2 rtol=3e-11
@test llscore ≈ refll rtol=3e-16
## compare with Bethe
cg = PGBP.clustergraph!(net, PGBP.Bethe())
b = PGBP.init_beliefs_allocate(tbl_y, df.taxon, net, cg, m)
cgb = PGBP.ClusterGraphBelief(b)
mod, fenergy, opt = PGBP.calibrate_optimize_clustergraph!(cgb, cg,
net.nodes_changed, tbl_y, df.taxon, PGBP.UnivariateBrownianMotion,
(1.0,0.0))
@test mod.μ ≈ refμ rtol=2e-5
@test mod.σ2 ≈ refσ2 rtol=2e-6
@test fenergy ≈ refll rtol=3e-2
end
# norm(mod.μ-refμ)/max(norm(mod.μ),norm(refμ))
# norm(mod.σ2-refσ2)/max(norm(mod.σ2),norm(refσ2))
# norm(fenergy+refll)/max(norm(fenergy),norm(refll))
# norm(llscore-refll)/max(norm(llscore),norm(refll))
@testset "bivariate, no missing, improper root" begin
net = readTopology(joinpath(examplenetdir, "sun_2023.phy"))
# 42 nodes: 10 tips, 6 hybrids
# level-6, not tree-child, has hybrid ladder, has deg-4 hybrid
#= tip data simulated from ParamsMultiBM():
rootmean = [0.0, 0.0]; rate = [2.0 1.0; 1.0 2.0]
sim = simulate(net, ParamsMultiBM(rootmean, rate))
y1 = sim[:Tips][1,:]; y2 = sim[:Tips][2,:] =#
df = DataFrame(taxon=tipLabels(net),
y1=[-1.001, 0.608, -3.606, -7.866, -5.977, -6.013, -7.774,
-5.511, -6.392, -6.471],
y2=[0.262, 5.124, -5.076, -6.223, -7.033, -6.062, -6.42, -6.34,
-6.516, -6.501])
df_var = select(df, Not(:taxon))
tbl = columntable(df_var)
ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
# min cluster size: 2, max cluster size: 5
m = PGBP.MvFullBrownianMotion([2.0 1.0; 1.0 2.0], [0.0 0.0], [Inf 0.0; 0.0 Inf])
b = PGBP.init_beliefs_allocate(tbl, df.taxon, net, ct, m);
PGBP.init_beliefs_assignfactors!(b, m, tbl, df.taxon, net.nodes_changed);
ctb = PGBP.ClusterGraphBelief(b)
# note: reaches max iterations before converging, so just save results for now
# mod, llscore, opt = PGBP.calibrate_optimize_cliquetree!(ctb, ct,
# net.nodes_changed, tbl, df.taxon, PGBP.MvFullBrownianMotion,
# ([2.0 1.0; 1.0 2.0], [0.0 0.0], [Inf 0.0; 0.0 Inf]))
# Multivariate Brownian motion
# - evolutionary variance rate matrix: R:
# [3.717085841556895 1.7464551312269698; 1.7464551312269698 2.0994767855707854]
# - root mean: μ = [0.0, 0.0]
# - root variance: v = [Inf 0.0; 0.0 Inf], -32.22404541422671,
# * Status: failure (reached maximum number of iterations)
# * Candidate solution
# Final objective value: 3.222405e+01
# * Found with
# Algorithm: L-BFGS
# * Convergence measures
# |x - x'| = 4.05e-09 ≰ 0.0e+00
# |x - x'|/|x'| = 6.16e-09 ≰ 0.0e+00
# |f(x) - f(x')| = 5.68e-13 ≰ 0.0e+00
# |f(x) - f(x')|/|f(x')| = 1.76e-14 ≰ 0.0e+00
# |g(x)| = 9.97e-08 ≰ 1.0e-08
# * Work counters
# Seconds run: 248 (vs limit Inf)
# Iterations: 1000
# f(x) calls: 3180
# ∇f(x) calls: 3180
cg = PGBP.clustergraph!(net, PGBP.JoinGraphStructuring(4));
# min cluster size: 1, max cluster size: 4
b_jg = PGBP.init_beliefs_allocate(tbl, df.taxon, net, cg, m);
PGBP.init_beliefs_assignfactors!(b_jg, m, tbl, df.taxon, net.nodes_changed);
cgb = PGBP.ClusterGraphBelief(b_jg)
# note: reaches max iterations before converging, so just save results for now
# mod, fenergy, opt = PGBP.calibrate_optimize_clustergraph!(cgb, cg,
# net.nodes_changed, tbl, df.taxon, PGBP.MvFullBrownianMotion,
# ([2.0 1.0; 1.0 2.0], [0.0 0.0], [Inf 0.0; 0.0 Inf]))
# Multivariate Brownian motion
# - evolutionary variance rate matrix: R:
# [3.7170858696599423 1.7464551640805306; 1.7464551640805306 2.0994768084399875]
# - root mean: μ = [0.0, 0.0]
# - root variance: v = [Inf 0.0; 0.0 Inf], 32.270019493029075,
# * Status: failure (reached maximum number of iterations)
# * Candidate solution
# Final objective value: 3.227002e+01
# * Found with
# Algorithm: L-BFGS
# * Convergence measures
# |x - x'| = 1.16e-10 ≰ 0.0e+00
# |x - x'|/|x'| = 1.76e-10 ≰ 0.0e+00
# |f(x) - f(x')| = 6.39e-14 ≰ 0.0e+00
# |f(x) - f(x')|/|f(x')| = 1.98e-15 ≰ 0.0e+00
# |g(x)| = 2.55e-07 ≰ 1.0e-08
# * Work counters
# Seconds run: 338 (vs limit Inf)
# Iterations: 1000
# f(x) calls: 3270
# ∇f(x) calls: 3270
end
end | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 1479 | # PhyloGaussianBeliefProp
[](https://cecileane.github.io/PhyloGaussianBeliefProp.jl/stable/)
[](https://cecileane.github.io/PhyloGaussianBeliefProp.jl/dev/)
[](https://github.com/cecileane/PhyloGaussianBeliefProp.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/cecileane/PhyloGaussianBeliefProp.jl)
[](https://github.com/invenia/BlueStyle)
[](https://github.com/SciML/ColPrac)
[](https://JuliaCI.github.io/NanosoldierReports/pkgeval_badges/report.html)
[Julia](http://julialang.org) package for the analysis of Gaussian models on
phylogenetic networks and admixture graphs
using belief propagation (aka message passing).
It depends on utilities from [PhyloNetworks](https://github.com/crsl4/PhyloNetworks.jl).
## Citing
See [`CITATION.bib`](CITATION.bib) for the relevant reference(s).
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 657 | ```@meta
CurrentModule = PhyloGaussianBeliefProp
```
# PhyloGaussianBeliefProp
[PhyloGaussianBeliefProp](https://github.com/cecileane/PhyloGaussianBeliefProp.jl)
is a [Julia](http://julialang.org) package for the analysis of Gaussian models on
phylogenetic networks using belief propagation (aka message passing).
---
## Manual
```@contents
Pages = [
"man/installation.md",
"man/getting_started.md",
"man/background.md",
"man/evolutionary_models.md",
"man/clustergraphs.md",
"man/regularization.md",
"man/message_schedules.md"
]
Depth = 3
```
## Library
```@index
```
```@autodocs
Modules = [PhyloGaussianBeliefProp]
```
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 4559 | # Background
## Trait evolution on a phylogeny
The evolution of molecular and phenotypic traits is commonly modeled using
[Markov processes](https://en.wikipedia.org/wiki/Markov_chain) along a rooted
phylogeny.
For example, most models of continuous trait evolution on a phylogenetic tree
are extensions of the
[Brownian motion](https://en.wikipedia.org/wiki/Wiener_process) (BM) to capture
features such as:
- evolutionary trends
- adaptation
- rate variation across lineages
## Factoring the joint model
A Markov process along the phylogeny induces a *joint distribution*
``p_\theta(x_1,\dots,x_m)``, with parameters ``\theta``, over all nodes
(trait vectors) ``X_1,\dots,X_m``.
``p_\theta(x_1,\dots,x_m)`` can also be factored as the product of *conditional
distributions* ``\phi_v`` for each node ``X_v``, where
``\phi_v=p_\theta(x_v\mid x_{\mathrm{pa}(v)})`` is the distribution of ``X_v``
given its parent(s) ``X_{\mathrm{pa}(v)}``, e.g.
``X_{\mathrm{pa}(v)}=\begin{bmatrix} X_{p_1} \\ X_{p_2}\end{bmatrix}``:
```math
p_\theta(x_1,\dots,x_m) = \prod_{v=1}^n \phi_v
```
We focus on the case where all conditional distributions are *linear Gaussian*.
That is, for each node ``X_v``:
```math
\begin{aligned}
X_v\mid X_{\mathrm{pa}(v)} &\sim \mathcal{N}(\omega_v+
\bm{q}_v X_{\mathrm{pa}(v)},\bm{V}_v) \\
\phi_v &= (|2\pi\bm{V}_v|)^{-1/2}\exp(-||x_v-(\omega_v+\bm{q}_v x_{\mathrm{pa}(v)})||_{\bm{V}_v^{-1}}/2)
\end{aligned}
```
with trend vector ``\omega_v``, actualization matrix ``\bm{q}_v``, and
covariance matrix ``\bm{V}_v``. For example, the BM on a phylogeny
(and most of its extensions)
satisfies this characterization [mitov2020fast](@cite).
## Parameter inference
Typically, we observe the tips of the phylogeny
``X_1=\mathrm{x}_1,\dots,X_n=\mathrm{x}_n`` and use these data for parameter
inference by optimizing the log-likelihood
``\mathrm{LL}(\theta)=\log\int p_\theta(\mathrm{x}_1,\dots,\mathrm{x}_n,x_{n+1},
\dots x_m)dx_{n+1}\dots dx_m``:
```math
\widehat{\theta} = \argmax_{\theta} \ \mathrm{LL}(\theta)
```
For simpler models, it is possible to derive a closed-form expression for
``\widehat{\theta}`` by solving:
```math
\nabla_\theta \ [\mathrm{LL}(\theta)]|_{\theta=\widehat{\theta}}=0
```
for the zero of the log-likelihood gradient, and to compute it directly.
For more complicated models however, ``\widehat{\theta}`` must be obtained by
iterative methods that evaluate ``\mathrm{LL}(\theta)`` over different
parameter values.
In general, evaluating ``\mathrm{LL}(\theta)`` is costly as the size and
complexity of the phylogeny grows.
## BP for exact inference
[Belief propagation](https://en.wikipedia.org/wiki/Belief_propagation) (BP) is a
framework for efficiently computing various
marginals of a joint distribution ``p_\theta`` that can be factored into
conditional distributions ``\phi_v\in\Phi``, where ``\Phi`` denotes the full set
of conditional distributions.
We refer to the reference book by Koller and Friedman (2009) [koller2009probabilistic](@cite)
for more background on BP, and only sketch the main steps involved here:
1. Construct a tree data structure called a
[*clique tree*](https://en.wikipedia.org/wiki/Tree_decomposition)
(also known by junction tree, join tree, or tree decomposition),
whose nodes ``\mathcal{C}_i``, called *clusters*,
are subsets of ``\{X_1,\dots,X_m\}``.
2. Each conditional distribution is assigned (``\mapsto``) to a cluster of the
clique tree, and the product of all conditional distributions assigned to a
cluster ``\mathcal{C}_i`` initializes its *belief*
``\beta_i = \prod_{\phi_v\mapsto\mathcal{C}_i,\ \phi_v\in\Phi}\phi_v``
3. Each cluster computes messages from its belief, and propagates these to its
neighbor clusters to update their beliefs.
``\mathrm{LL}(\theta)`` can be computed by passing messages according to a
single postorder traversal of the clique tree,
An additional preorder traversal guarantees that every cluster belief is the
corresponding marginal of ``p_\theta``. That is, every cluster belief reflects
the conditional distribution of the cluster given the data.
## Loopy BP for approximate inference
A clique tree is a special case of a graph data structure called a
*cluster graph*. In general, cluster graphs can be non-treelike with cycles.
BP on a *loopy cluster graph* (i.e. a cluster graph with cycles), abbreviated
as *loopy BP*, can approximate the likelihood and conditional distributions of
the unobserved, ancestral nodes, and be more computationally efficient than BP.
## References
```@bibliography
``` | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 7579 | ```@meta
CurrentModule = PhyloGaussianBeliefProp
```
# Cluster graphs
A cluster graph groups the nodes ``X_1,\dots,X_m`` of the phylogeny into
several (possibly intersecting) clusters/subsets, such that for any node:
- there is ``\ge 1`` cluster that contains it and its parents (i.e. the clusters are (node) *family-preserving*)
- the clusters that contain it are always joined as a tree, whose edge labels contain that node
Each edge between two clusters ``\mathcal{C}_i,\mathcal{C}_j`` is labeled with
a node subset ``\mathcal{S}_{i,j}\subseteq\mathcal{C}_i\cap\mathcal{C}_j``
based on the second property.
These labels are referred to as sepsets (i.e a "sep(arating) sets").
Intuitively, if the factors of a joint distribution ``p_\theta`` over the nodes
are distributed among the clusters, then the topology of the cluster graph
implies the possible computational pathways in which these factors may be
sequentially "combined" by product or marginalization.
For example, a cluster's belief is the cumulative result of computations that might
follow some walk along the cluster graph, ending at that cluster. This is then
interpreted as an estimate of its conditional distribution given the data.
## Clique tree
A cluster graph whose topology is a tree is known as a clique tree. We provide
the option to construct a clique tree, and 3 further options (below) for
constructing (potentially) loopy cluster graphs.
A clique tree tends to have more clusters of larger size than a loopy cluster
graph. The time-complexity of message passing on a cluster graph is parametrized
by maximum cluster size, and so clique trees allow for exact inference but at a
greater cost than approximate inference on a loopy cluster graph.
Below is an example using a virus recombination network from
[Müller et al. (2022, Fig 1a)](https://doi.org/10.1038/s41467-022-31749-8)
[muller2022bayesian](@cite), with inheritance probabilities estimated from the
inferred recombination breakpoints (see [muller2022_nexus2newick.jl](https://github.com/bstkj/graphicalmodels_for_phylogenetics_code/blob/5f61755c4defe804fd813113e883d49445971ade/real_networks/muller2022_nexus2newick.jl)).
```jldoctest clustergraphs; setup = :(using PhyloNetworks, PhyloGaussianBeliefProp; const PGBP = PhyloGaussianBeliefProp)
julia> net = readTopology(pkgdir(PhyloGaussianBeliefProp, "test/example_networks", "muller_2022.phy")); # 1161 edges, 801 nodes: 40 tips, 361 hybrid nodes, 400 internal tree nodes.
julia> preorder!(net)
julia> ct = PGBP.clustergraph!(net, PGBP.Cliquetree());
julia> PGBP.labels(ct) |> length # no. of vertices/clusters in clique tree
664
julia> PGBP.edge_labels(ct) |> length # no. of edges in clique tree, one less than no. of clusters
663
julia> clusters = PGBP.labels(ct) |> collect; # vector of cluster labels
julia> clusters[1] # cluster label is the concatenation of node labels
:I300I301I302I189
julia> ct[clusters[1]] # access metadata for `cluster[1]`: (node labels, preorder indices), nodes are arranged by decreasing preorder index
([:I300, :I301, :I302, :I189], Int16[722, 719, 717, 487])
julia> using StatsBase # `summarystats`
julia> (length(ct[cl][1]) for cl in PGBP.labels(ct)) |> collect |> summarystats # distribution of cluster sizes
Summary Stats:
Length: 664
Missing Count: 0
Mean: 6.728916
Std. Deviation: 6.120608
Minimum: 2.000000
1st Quartile: 4.000000
Median: 5.000000
3rd Quartile: 7.000000
Maximum: 54.000000
```
## Bethe / Factor graph
The Bethe cluster graph has a cluster for each node family (*factor clusters*)
and for each node (*variable clusters*). Each factor cluster is joined to the
variable cluster for any node it contains.
```jldoctest clustergraphs
julia> fg = PGBP.clustergraph!(net, PGBP.Bethe());
julia> (PGBP.labels(fg) |> length, PGBP.edge_labels(fg) |> length) # (no. of clusters, no. of edges)
(1557, 1914)
julia> (length(fg[cl][1]) for cl in PGBP.labels(fg)) |> collect |> summarystats
Summary Stats:
Length: 1557
Missing Count: 0
Mean: 1.743738
Std. Deviation: 0.809151
Minimum: 1.000000
1st Quartile: 1.000000
Median: 2.000000
3rd Quartile: 2.000000
Maximum: 3.000000
```
If each hybrid node has 2 parents (as for `net`), then the maximum cluster size
is 3.
## Join-graph structuring
[Join-graph structuring](https://doi.org/10.1613/jair.2842) allows the user to
specify the maximum cluster size ``k^*``. See [`JoinGraphStructuring`](@ref)
for more details on the algorithm.
```jldoctest clustergraphs
julia> jg = PGBP.clustergraph!(net, PGBP.JoinGraphStructuring(10));
julia> (PGBP.labels(jg) |> length, PGBP.edge_labels(jg) |> length)
(1001, 1200)
julia> (length(jg[cl][1]) for cl in PGBP.labels(jg)) |> collect |> summarystats
Summary Stats:
Length: 1001
Missing Count: 0
Mean: 6.036963
Std. Deviation: 2.177070
Minimum: 1.000000
1st Quartile: 4.000000
Median: 6.000000
3rd Quartile: 8.000000
Maximum: 10.000000
```
Since the set of clusters has to be family-preserving (see above), ``k^*``
cannot be smaller than the largest node family (i.e. a node and its parents).
For example, if the network has hybrid nodes, then ``k^*\ge 3`` necessarily.
```jldoctest clustergraphs
julia> jg = PGBP.clustergraph!(net, PGBP.JoinGraphStructuring(2));
ERROR: maxclustersize 2 is smaller than the size of largest node family 3.
```
On the other extreme, suppose we set ``k^*=54``, the maximum cluster size of the
clique tree above:
```jldoctest clustergraphs
julia> jg2 = PGBP.clustergraph!(net, PGBP.JoinGraphStructuring(54));
julia> (PGBP.labels(jg2) |> length, PGBP.edge_labels(jg2) |> length)
(801, 800)
julia> (length(jg2[cl][1]) for cl in PGBP.labels(jg2)) |> collect |> summarystats
Summary Stats:
Length: 801
Missing Count: 0
Mean: 9.539326
Std. Deviation: 9.953078
Minimum: 1.000000
1st Quartile: 4.000000
Median: 5.000000
3rd Quartile: 10.000000
Maximum: 54.000000
```
then it turns out the `jg2` is a clique tree (since the number of clusters and
edges differ by 1), though not the same one as `ct`. Generally, a cluster graph
with larger clusters is less likely to be loopy than one with smaller clusters.
## LTRIP
For [LTRIP](https://doi.org/10.1145/3132711.3132717), the user provides the set
of clusters, which are assumed to be family-preserving (see above).
1. For each node, the clusters that contain it are joined as a tree, prioritizing edges formed with clusters that intersect heavily with others. See [`LTRIP`](@ref) for details.
2. The trees for each node are layered on one another (the sepsets for an edge are merged) to produce the cluster graph.
As an example, we use the clusters from join-graph structuring:
```jldoctest clustergraphs
julia> clusters = (jg[cl][2] for cl in PGBP.labels(jg)) |> collect; # vector of clusters, each given as a vector of preorder indices in decreasing order
julia> lg = PGBP.clustergraph!(net, PGBP.LTRIP(clusters, net));
julia> (PGBP.labels(lg) |> length, PGBP.edge_labels(lg) |> length)
(1001, 1249)
```
The summary statistics would be the same as for `jg`'s clusters, though it
appears that `lg` is more densely connected than `jg`.
If the user does not provide the clusters, then the set of node families
(see [`nodefamilies`](@ref)) is used by default:
```jldoctest clustergraphs
julia> lg = PGBP.clustergraph!(net, PGBP.LTRIP(net));
julia> (PGBP.labels(lg) |> length, PGBP.edge_labels(lg) |> length)
(801, 1158)
```
There are 801 clusters, 1 per node (family), as expected. | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 4140 | ```@meta
CurrentModule = PhyloGaussianBeliefProp
```
# Evolutionary models
## Specifying a process
Each trait evolutionary model is specified as a linear Gaussian process, such as a BM or some extension of it,
that evolves along the phylogeny.
Minimally, the user provides a variance rate ``\Sigma``, and a prior mean
``\mu`` and variance ``\bm{V}_{\!\!\rho}`` for the root state ``X_\rho``.
For example, ``\bm{V}_{\!\!\rho}=0`` treats ``X_\rho=\mu`` as known, while
``\bm{V}_{\!\!\rho}=\infty`` disregards all prior beliefs about ``X_\rho``.
For example, the univariate BM can be specified as:
```jldoctest evolutionary_models; setup = :(using PhyloGaussianBeliefProp; const PGBP = PhyloGaussianBeliefProp)
julia> PGBP.UnivariateBrownianMotion(1, 0) # root variance v = 0 (fixed root)
Univariate Brownian motion
- evolutionary variance rate σ2 :
1.0
- root mean μ :
0.0
julia> PGBP.UnivariateBrownianMotion(1, 0, Inf) # root variance v = Inf (improper prior)
Univariate Brownian motion
- evolutionary variance rate σ2 :
1.0
- root mean μ :
0.0
- root variance v :
Inf
```
The multivariate BM is available to model multivariate traits.
If the components of a multivariate trait evolve in an uncorrelated manner,
then ``\Sigma`` is a diagonal matrix and is specified its diagonal entries
(e.g. `MvDiagBrownianMotion`). Otherwise, ``\Sigma`` is potentially dense and
is passed in whole (e.g. `MvFullBrownianMotion`).
```jldoctest evolutionary_models
julia> PGBP.MvDiagBrownianMotion([1, 0.5], [-1, 1]) # v = [0, 0]
Multivariate Diagonal Brownian motion
- evolutionary variance rates (diagonal values in the rate matrix): R :
[1.0, 0.5]
- root mean μ :
[-1.0, 1.0]
julia> PGBP.MvFullBrownianMotion([1 0.5; 0.5 1], [-1,1], [10^10 0; 0 10^10])
Multivariate Brownian motion
- evolutionary variance rate matrix: R :
[1.0 0.5; 0.5 1.0]
- root mean μ :
[-1.0, 1.0]
- root variance v :
[1.0e10 0.0; 0.0 1.0e10]
```
``\Sigma`` can vary along the phylogeny. If path length ``t\ge 0`` from the root
represents evolutionary time, then the Early Burst (EB) model and Accelerating
Rate (AC) model respectively allow ``\Sigma`` to decay (``b<0``) and grow
(``b>0``) along a *time-consistent* phylogeny:
```math
\Sigma(t) = \Sigma_0\exp(bt), \text{ where } \Sigma_0 = \Sigma(0)
```
This model is not implemented at the moment.
Selection can be additionally modeled by the
[Ornstein-Uhlenbeck](https://en.wikipedia.org/wiki/Ornstein–Uhlenbeck_process)
(OU) process, which allows a trait to diffuse with variance rate ``\Sigma`` yet
drift towards some optimal value ``\theta`` (with selection "strength"
``\bm{A}``).
```jldoctest evolutionary_models
julia> PGBP.UnivariateOrnsteinUhlenbeck(2, 3, -2, 0, 0.4) # σ2=2, below showing γ2 = σ2/(2α)
homogeneous univariate Ornstein-Uhlenbeck
- stationary evolutionary variance γ2 :
0.3333333333333333
- selection strength α :
3.0
- optimal value θ :
-2.0
- root mean μ :
0.0
- root variance v :
0.4
```
## Edge factors
After specifying the evolutionary model
(e.g. `m = PGBP.UnivariateBrownianMotion(1, 0)`), it is eventually passed to
[`init_beliefs_assignfactors!`](@ref)
(see [4\. Initialize cluster graph beliefs](@ref)), which infers the conditional
distribution for each node and assigns it to a cluster.
We refer to these conditional distributions as *edge factors* since they relate
the states of a parent node and its child.
## Hybrid factors
Each reticulation node ``X_h`` has multiple parents ``X_{p_1},\dots,X_{p_k}``,
and is thus potentially associated with multiple edge factors.
We reconcile these by modeling ``X_h`` as a weighted-average of its
"immediate parents", the child states for each of these edge factors.
Imagine that we introduce ``k`` copies ``X_{(p_1,h)},\dots,X_{(p_k,h)}`` of
``X_h``, each of which descends from the corresponding ``X_{p_i}``. Then ``X_h``
is modeled as a weighted-average of ``X_{(p_1,h)},\dots,X_{(p_k,h)}``:
```math
X_h = \sum_{i=1}^k \gamma_{(p_i,h)} X_{(p_i,h)}
```
where the inheritance weights ``\gamma_{(p_i,h)}`` are positive and sum to 1.
Thus, each tree node is associated with an edge factor, and each hybrid node
with a hybrid factor. | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 14835 | ```@meta
CurrentModule = PhyloGaussianBeliefProp
```
# Getting started
This version of the package is a proof of concept, and not all methods have been
fully implemented.
A minimal API is still incomplete, and so we demonstrate the package's various
capabilities as pipelines involving multiple internal functions. A complete API
that wraps these pipelines will be made available later.
In what follows, we use simulated trait data.
In most of this manual, we use as an example the network topology from
[Lazaridis et al. (2014), Figure 3](https://doi.org/10.1038/nature13673)
[lazaridis2014ancient](@cite), displayed below,
with branch lengths arbitrarily set to 1 and inheritance probabilities
(admixture proportions) set arbibitrarily to 0.4 and 0.6 at each hybrid node.

## Exact likelihood for fixed parameters
### 1\. Read in the network and the tip data
```jldoctest getting_started
julia> using PhyloGaussianBeliefProp
julia> const PGBP = PhyloGaussianBeliefProp;
julia> using PhyloNetworks # `readTopology`, `tipLabels`, `preorder!`
julia> using DataFrames # `DataFrame`
julia> net = readTopology(pkgdir(PGBP, "test/example_networks", "lazaridis_2014.phy"))
PhyloNetworks.HybridNetwork, Rooted Network
23 edges
20 nodes: 7 tips, 4 hybrid nodes, 9 internal tree nodes.
tip labels: Mbuti, Onge, Karitiana, MA1, ...
(Mbuti:1.0,(((Onge:1.0,#H1:0.01::0.4)EasternNorthAfrican:1.0,(((Karitiana:1.0)#H1:0.01::0.6,(MA1:1.0,#H3:0.01::0.4)ANE:1.0)AncientNorthEurasian:1.0,(((#H2:0.01::0.4)#H3:0.01::0.6,Loschbour:1.0)WHG:1.0,#H4:0.01::0.4)WestEurasian:1.0)I1:1.0)I2:1.0,((European:1.0)#H2:0.01::0.6,Stuttgart:1.0)#H4:0.01::0.6)NonAfrican:1.0)I3;
julia> preorder!(net) # updates net.nodes_changed to contain network nodes listed in preorder
julia> df = DataFrame(taxon=tipLabels(net), # simulated using `simulate(net, ParamsBM(0, 1))` from PhyloNetworks
x=[1.343, 0.841, -0.623, -1.483, 0.456, -0.081, 1.311])
7×2 DataFrame
Row │ taxon x
│ String Float64
─────┼────────────────────
1 │ Mbuti 1.343
2 │ Onge 0.841
3 │ Karitiana -0.623
4 │ MA1 -1.483
5 │ Loschbour 0.456
6 │ European -0.081
7 │ Stuttgart 1.311
```
In this example, the trait `x` is univariate.
We have mapped the observed data to the corresponding species in the dataframe `df`.
The call to `preorder!` updates `net` to contain a list of its nodes arranged in
preorder (or topological ordering).
Many internals in the package assume that this information is available,
and so it is important that this be called immediately after reading in the network!
### 2\. Choose an evolutionary model
At the moment, models available are: [`UnivariateBrownianMotion`](@ref), [`UnivariateOrnsteinUhlenbeck`](@ref),
[`MvDiagBrownianMotion`](@ref), [`MvFullBrownianMotion`](@ref).
Note however that not all methods may be implemented across all models.
See section [Evolutionary models](@ref) for more details on the available models.
```jldoctest getting_started
julia> m = PGBP.UnivariateBrownianMotion(1, 0) # σ2 = 1.0, μ = 0.0
Univariate Brownian motion
- evolutionary variance rate σ2 :
1.0
- root mean μ :
0.0
```
We specify a univariate Brownian motion with mean ``\mu=0`` and variance rate
``\sigma^2=1``. We want to compute the likelihood for these particular values,
though other values may better fit the data.
### 3\. Build a cluster graph from the network
Methods available are: [`Bethe`](@ref), [`LTRIP`](@ref),
[`JoinGraphStructuring`](@ref), [`Cliquetree`](@ref).
We first choose `Cliquetree` to compute the likelihood exactly. Other methods may
return a loopy cluster graph, which gives an approximate likelihood.
See section [Cluster graphs](@ref) for more background on cluster graphs.
```jldoctest getting_started
julia> ct = PGBP.clustergraph!(net, PGBP.Cliquetree())
Meta graph based on a Graphs.SimpleGraphs.SimpleGraph{Int8} with vertex labels of type Symbol, vertex metadata of type Tuple{Vector{Symbol}, Vector{Int8}}, edge metadata of type Vector{Int8}, graph metadata given by :cliquetree, and default weight 0
julia> PGBP.labels(ct) |> collect # cluster labels
17-element Vector{Symbol}:
:H1EasternNorthAfricanAncientNorthEurasian
:EasternNorthAfricanAncientNorthEurasianI2
:OngeEasternNorthAfrican
:StuttgartH4
:MbutiI3
:H2H3H4
:H3ANEWHGH4
:ANEWHGH4WestEurasian
:LoschbourWHG
:KaritianaH1
:EuropeanH2
:AncientNorthEurasianWestEurasianI1NonAfrican
:ANEH4WestEurasianNonAfrican
:ANEAncientNorthEurasianWestEurasianNonAfrican
:AncientNorthEurasianI1I2NonAfrican
:NonAfricanI3
:MA1ANE
```
See that each cluster's label is derived by concatenating the labels of the
nodes it contains.
### 4\. Initialize cluster graph beliefs
`ct` describes the topology of our cluster graph, but does not track the beliefs
for each cluster. Next, we:
- allocate memory for these beliefs
- initialize their values using the evolutionary model
- wrap them within another data structure to facilitate message passing.
```jldoctest getting_started
julia> using Tables # `columntable`
julia> tbl_x = columntable(select(df, :x)) # extract trait `x` from `df` as a column table
(x = [1.343, 0.841, -0.623, -1.483, 0.456, -0.081, 1.311],)
julia> b = PGBP.init_beliefs_allocate(tbl_x, df.taxon, net, ct, m); # allocate memory for beliefs
julia> length(b) # no. of beliefs
33
julia> b[1] # belief for cluster {H1, EasternNorthAfrican, AncientNorthEurasian} before factor assignment
belief for Cluster H1EasternNorthAfricanAncientNorthEurasian, 1 traits × 3 nodes, dimension 3.
Node labels: Int8[17, 16, 10]
trait × node matrix of non-degenerate beliefs:
Bool[1 1 1]
exponential quadratic belief, parametrized by
μ: [0.0, 0.0, 0.0]
h: [0.0, 0.0, 0.0]
J: [0.0 0.0 0.0; 0.0 0.0 0.0; 0.0 0.0 0.0]
g: 0.0
julia> PGBP.init_beliefs_assignfactors!(b, m, tbl_x, df.taxon, net.nodes_changed); # initialize beliefs from evolutionary model
julia> b[1] # belief for cluster {H1, EasternNorthAfrican, AncientNorthEurasian} after factor assignment
belief for Cluster H1EasternNorthAfricanAncientNorthEurasian, 1 traits × 3 nodes, dimension 3.
Node labels: Int8[17, 16, 10]
trait × node matrix of non-degenerate beliefs:
Bool[1 1 1]
exponential quadratic belief, parametrized by
μ: [0.0, 0.0, 0.0]
h: [0.0, 0.0, 0.0]
J: [192.30769230769232 -76.92307692307693 -115.38461538461539; -76.92307692307693 30.769230769230774 46.15384615384616; -115.38461538461539 46.15384615384616 69.23076923076923]
g: 1.7106097934927051
julia> ctb = PGBP.ClusterGraphBelief(b); # wrap beliefs to facilitate message passing
julia> PGBP.nclusters(ctb) # no. of cluster beliefs
17
julia> PGBP.nsepsets(ctb) # no. of edge/sepset beliefs
16
```
`b` is a vector of all beliefs, one for each cluster and edge (also known as
*sepset*) in the cluster graph. The edge beliefs store the most recent messages
passed between neighboring clusters.
Recall that each cluster or edge is associated with a set of nodes. The *scope*
``x`` of its belief comes from stacking the trait vectors for these nodes.
A belief with scope ``x`` is parametrized by ``(\bm{J},h,g)`` as follows:
```math
\exp(-x^{\top}\bm{J}x/2 + h^{\top}x + g)
```
We show belief `b[1]` before and after factor (i.e. conditional distribution)
assignment. Note that its `J` and `g` parameters are changed.
`ctb` contains `b` with added information to locate specific beliefs in `b` from
their corresponding cluster/edge labels in `ct`, and added storage to log
information during message passing.
### 5\. Propose a schedule from the cluster graph
A message schedule can be described by a sequence of cluster pairs.
Each pairing tells us to send a message between these clusters (which must be
neighbors), while the order within the pair indicates the sender and the
recipient.
We build a message schedule `sched` from `ct` by finding a minimal set of
spanning trees for the cluster graph that together cover all its edges (i.e.
neighbor cluster pairs). Each spanning tree is represented as a sequence of
edges following some preorder traversal of `ct`.
See section [Message schedules](@ref) for more details on message schedules.
Since `ct` is a clique tree, there is a single spanning tree (`sched[1]`). We
extract and display the preorder sequence of edges from `sched[1]`. In this example,
`NonAfricanI3` is the root cluster of `ct`, and `KaritianaH1` is a leaf cluster.
```jldoctest getting_started
julia> sched = PGBP.spanningtrees_clusterlist(ct, net.nodes_changed);
julia> DataFrame(parent=sched[1][1], child=sched[1][2]) # edges of tree 1 in preorder
16×2 DataFrame
Row │ parent child
│ Symbol Symbol
─────┼──────────────────────────────────────────────────────────────────────
1 │ NonAfricanI3 AncientNorthEurasianWestEurasian…
2 │ AncientNorthEurasianWestEurasian… ANEAncientNorthEurasianWestEuras…
3 │ ANEAncientNorthEurasianWestEuras… ANEH4WestEurasianNonAfrican
4 │ ANEH4WestEurasianNonAfrican ANEWHGH4WestEurasian
5 │ ANEWHGH4WestEurasian H3ANEWHGH4
6 │ H3ANEWHGH4 LoschbourWHG
7 │ H3ANEWHGH4 MA1ANE
8 │ NonAfricanI3 MbutiI3
9 │ H3ANEWHGH4 H2H3H4
10 │ H2H3H4 EuropeanH2
11 │ H2H3H4 StuttgartH4
12 │ AncientNorthEurasianWestEurasian… AncientNorthEurasianI1I2NonAfric…
13 │ AncientNorthEurasianI1I2NonAfric… EasternNorthAfricanAncientNorthE…
14 │ EasternNorthAfricanAncientNorthE… H1EasternNorthAfricanAncientNort…
15 │ H1EasternNorthAfricanAncientNort… OngeEasternNorthAfrican
16 │ H1EasternNorthAfricanAncientNort… KaritianaH1
```
### 6\. Calibrate beliefs with the schedule
We apply one iteration of belief propagation on `ctb` following the schedule
`sched`. Since `ct` is a clique tree, the resulting beliefs are guaranteed to be
*calibrated* (i.e. the beliefs of neighbor clusters agree marginally over the
sepset between them).
```jldoctest getting_started
julia> PGBP.calibrate!(ctb, sched);
```
### 7\. Extract the log-likelihood
On a calibrated clique tree, there are two ways to obtain the log-likelihood:
- integrate any belief over its scope to get its normalization constant (`norm`)
- compute the [`factored_energy`](@ref), which approximates the log-likelihood on loopy cluster graphs but is exact on a clique tree
```jldoctest getting_started
julia> (_, norm) = PGBP.integratebelief!(b[1]); # `norm` is the integral of `b[1]` over its scope
julia> norm
-11.273958980921249
julia> (_, _, fe) = PGBP.factored_energy(ctb); # `fe` is the factored energy from the cluster/edge beliefs
julia> fe
-11.273958980921272
```
The first approach is more efficient (it uses only one belief, rather than all
beliefs), but only works for a clique tree. The normalization constant of a
belief from a calibrated loopy cluster graph cannot be similarly interpreted.
We see that both approaches return the same value, modulo rounding error.
## Exact inference
In the section above, we computed the log-likelihood for ``\mu=0``, ``\sigma^2=1``.
Now we find ``\mu=\widehat{\mu}`` and ``\sigma^2=\widehat{\sigma}^2`` that
maximize the log-likelihood. There are two options:
- iterative optimization
- exact computation using belief propagation
```jldoctest getting_started
julia> mod, ll, _ = PGBP.calibrate_optimize_cliquetree!( # iterative optimization
ctb, # beliefs
ct, # clique tree
net.nodes_changed, # network nodes in preorder
tbl_x, # trait data
df.taxon, # tip labels
PGBP.UnivariateBrownianMotion, # type of evolutionary model
(1.0, 0)); # starting parameters: σ2 = 1.0, μ = 0.0
julia> mod # ML estimates
Univariate Brownian motion
- evolutionary variance rate σ2 :
0.31812948857664464
- root mean μ :
1.1525789703803826
julia> ll # log-likelihood for ML estimates
-8.656529929205773
julia> mod, _ = PGBP.calibrate_exact_cliquetree!( # exact computation
ctb,
sched[1], # schedule the order in which edges (sepsets) are traversed
net.nodes_changed,
tbl_x,
df.taxon,
PGBP.UnivariateBrownianMotion);
julia> mod # REML estimate for σ2, ML estimate for μ
Univariate Brownian motion
- evolutionary variance rate σ2 :
0.37115107002903314
- root mean μ :
1.1525789703844822
```
Both options return the maximum-likelihood (ML) estimate for ``\mu``, though
the latter returns the restricted maximum-likelihood (REML) estimate for
``\sigma^2``.
Strictly speaking, the estimates from the latter option do not jointly maximize
the log-likelihood. However, the REML estimate for ``\sigma^2`` is generally
less biased than its ML counterpart.
In this simple model, the REML estimate is just equal to the ML estimate
up to a factor $(n-1)/n$, with $n$ the number of tips in the network:
```jldoctest getting_started
julia> PGBP.varianceparam(mod) * (net.numTaxa - 1) / net.numTaxa # sigma2_REML = (n-1)/n * sigma2_ML
0.3181294885963141
```
## Approximate inference
Suppose now that we use a loopy cluster graph instead of a clique tree. We choose `Bethe` to
construct a Bethe cluster graph (also known as factor graph) `fg`.
As before, we set up a data structure `fgb` to track the beliefs of the factor
graph during message passing. Then we call [`calibrate_optimize_clustergraph!`](@ref),
the analog of [`calibrate_optimize_cliquetree!`](@ref) from earlier:
```jldoctest getting_started
julia> fg = PGBP.clustergraph!(net, PGBP.Bethe()) # factor graph
Meta graph based on a Graphs.SimpleGraphs.SimpleGraph{Int8} with vertex labels of type Symbol, vertex metadata of type Tuple{Vector{Symbol}, Vector{Int8}}, edge metadata of type Vector{Int8}, graph metadata given by :Bethe, and default weight 0
julia> b_fg = PGBP.init_beliefs_allocate(tbl_x, df.taxon, net, fg, m); # allocate memory for beliefs
julia> fgb = PGBP.ClusterGraphBelief(b_fg); # wrap beliefs to facilitate message passing
julia> mod, fe, _ = PGBP.calibrate_optimize_clustergraph!(fgb, fg, net.nodes_changed, tbl_x,
df.taxon, PGBP.UnivariateBrownianMotion, (1.0, 0));
julia> mod # parameter estimates
Univariate Brownian motion
- evolutionary variance rate σ2 :
0.3181295330492941
- root mean μ :
1.1525789120595669
julia> fe # factored energy approximation to the log-likelihood
-8.587925093657454
```
We see that both parameter estimates are very close to their maximum-likelihood
counterparts (within 10⁻⁴ percent), and the factored energy slightly
overestimates the log-likelihood for these values (within 1 percent).
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 728 | # Installation
For information on how to install Julia and PhyloNetworks, see
[here](https://crsl4.github.io/PhyloNetworks.jl/dev/man/installation/#Installation).
PhyloGaussianBeliefProp depends on PhyloNetworks.
To install [PhyloGaussianBeliefProp](https://github.com/cecileane/PhyloGaussianBeliefProp.jl)
in the Julia REPL, do:
```julia
julia> using Pkg
julia> Pkg.add("PhyloGaussianBeliefProp")
```
Or enter `]` in the Julia REPL to access the package mode, and do:
```
pkg> add PhyloGaussianBeliefProp
```
In this manual, we will also use
[PhyloNetworks](https://github.com/crsl4/PhyloNetworks.jl) and other packages,
to be installed similarly, here in package mode:
```
pkg> add PhyloNetworks
pkg> add DataFrames
```
| PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 4181 | ```@meta
CurrentModule = PhyloGaussianBeliefProp
DocTestSetup = quote
using DataFrames, Tables, PhyloNetworks, PhyloGaussianBeliefProp;
const PGBP = PhyloGaussianBeliefProp;
end
```
# Message schedules
As described in [5. Propose a schedule from the cluster graph](@ref), we build a
message schedule by calling [`spanningtrees_clusterlist`](@ref) on our given
cluster graph (output by [`clustergraph!`](@ref)).
A schedule of messages can be visualized as a sequence of edge traversals
(from sender to recipient) on the cluster graph.
Since the calibration of a cluster graph requires neighbor clusters to reach
some state of agreement with each other, it is reasonable to expect that
multiple messages may need to be sent back and forth on each edge.
Thus, *proper* message schedules require that each edge is traversed in both directions, infinitely often (until stopping criteria are met).
`spanningtrees_clusterlist` satisfies the requirements of a proper message
schedule by specifying a finite sequence of edge traversals that together
account for all possible messages on the cluster graph. This sequence can then
be repeated as needed. Specifically:
- the sequence of edge traversals is returned as a collection of edge sets for different spanning trees of the cluster graph
- each edge set is ordered as a preorder traversal of a spanning tree
Each time [`calibrate!`](@ref) is called with a particular tree, it
passes messages according to a postorder then preorder traversal of the tree.
Returning to the last few edges of the tree schedule from
[5. Propose a schedule from the cluster graph](@ref):
```jldoctest; setup = :(net = readTopology(pkgdir(PGBP, "test/example_networks", "lazaridis_2014.phy")); preorder!(net); ct = PGBP.clustergraph!(net, PGBP.Cliquetree()); sched = PGBP.spanningtrees_clusterlist(ct, net.nodes_changed);)
julia> DataFrame(parent=sched[1][1], child=sched[1][2])[13:end,:] # last 4 edges of tree 1 in preorder
4×2 DataFrame
Row │ parent child
│ Symbol Symbol
─────┼──────────────────────────────────────────────────────────────────────
1 │ AncientNorthEurasianI1I2NonAfric… EasternNorthAfricanAncientNorthE…
2 │ EasternNorthAfricanAncientNorthE… H1EasternNorthAfricanAncientNort…
3 │ H1EasternNorthAfricanAncientNort… OngeEasternNorthAfrican
4 │ H1EasternNorthAfricanAncientNort… KaritianaH1
```
According to a postorder, the first message to be sent is from `KaritianaH1` to
`H1EasternNorthAfricanAncientNort…`, followed by `OngeEasternNorthAfrican` to
`H1EasternNorthAfricanAncientNort…` and so on.
An *iteration* of calibration refers to `calibrate!` being called once for each
schedule tree in the collection.
Continuing with the code example from [A heuristic](@ref), we:
- increase the number of iterations of calibration to 100 (the default is 1)
- tell `calibrate!` to return once calibration is detected (`auto=true`)
- log information on when calibration was detected (`info=true`)
```jldoctest; setup = :(net = readTopology(pkgdir(PGBP, "test/example_networks", "lipson_2020b.phy")); preorder!(net); df = DataFrame(taxon=tipLabels(net), x=[0.431, 1.606, 0.72, 0.944, 0.647, 1.263, 0.46, 1.079, 0.877, 0.748, 1.529, -0.469]); m = PGBP.UnivariateBrownianMotion(1, 0); fg = PGBP.clustergraph!(net, PGBP.Bethe()); tbl_x = columntable(select(df, :x)); b = PGBP.init_beliefs_allocate(tbl_x, df.taxon, net, fg, m); fgb = PGBP.ClusterGraphBelief(b); sched = PGBP.spanningtrees_clusterlist(fg, net.nodes_changed);)
julia> PGBP.init_beliefs_assignfactors!(b, m, tbl_x, df.taxon, net.nodes_changed); # reset to initial beliefs
julia> PGBP.regularizebeliefs_bynodesubtree!(fgb, fg); # regularize by node subtree
julia> PGBP.calibrate!(fgb, sched, 100; auto=true, info=true)
[ Info: calibration reached: iteration 20, schedule tree 1
true
```
Similarly, during iterative optimization
(e.g [`calibrate_optimize_clustergraph!`](@ref)), multiple iterations of
calibration are run for each set of candidate parameter values (till beliefs
are calibrated) to determine the associated factored energy. | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 8465 | ```@meta
CurrentModule = PhyloGaussianBeliefProp
```
# Regularization
## Ill-defined messages
Propagating a message ``\tilde{\mu}_{i\rightarrow j}`` from a cluster
``\mathcal{C}_i`` to its neighbor ``\mathcal{C}_j`` involves 3 steps:
1\. ``\mathcal{C}_i``'s belief ``\beta_i`` is marginalized over the sepset nodes
``\mathcal{S}_{i,j}``:
```math
\tilde{\mu}_{i\rightarrow j} = \int\beta_i d(\mathcal{C}_i\setminus
\mathcal{S}_{i,j})
```
2\. The message ``\tilde{\mu}_{i\rightarrow j}`` is divided by the current
edge belief ``\mu_{i,j}``, and the result is multiplied into ``\mathcal{C}_j``'s
belief ``\beta_j``:
```math
\beta_j \leftarrow \beta_j\tilde{\mu}_{i\rightarrow j}/\mu_{i,j}
```
3\. The edge belief ``\mu_{i,j}`` is updated to the message just passed:
```math
\mu_{i,j} \leftarrow \tilde{\mu}_{i\rightarrow j}
```
In the linear Gaussian setting, where each belief has the form
``\exp(-x^{\top}\bm{J}x/2 + h^{\top}x + g)``, where ``x`` denotes its scope,
these steps can be concisely expressed in terms of the ``(\bm{J},h,g)``
parameters of the beliefs involved.
Crucially, the precision matrix ``\bm{J}`` for ``\beta_i`` has to be
full-rank / invertible with respect to the nodes to be integrated out.
For example, if ``\mathcal{C}_i=\{X_1,X_2,X_3\}``,
``\mathcal{S}_{i,j}=\{X_1\}`` and:
```math
\beta_i = \exp\left(-\begin{bmatrix}x_1 \\ x_2 \\ x_3\end{bmatrix}^{\top}\bm{J}
\begin{bmatrix}x_1 \\ x_2 \\ x_3\end{bmatrix}/2 +
h^{\top}\begin{bmatrix}x_1 \\ x_2 \\ x_3\end{bmatrix} + g\right), \text{ where }
\bm{J} = \begin{matrix}x_1 \\ x_2 \\ x_3\end{matrix}\!\!
\begin{bmatrix}1 & -1/2 & -1/2 \\ -1/2 & 1/4 & 1/4 \\
-1/2 & 1/4 & 1/4\end{bmatrix}
```
then the ``2\times 2`` submatrix of ``\bm{J}`` for
``\mathcal{C}_i\setminus\mathcal{S}_{i,j}=\{X_2,X_3\}`` (annotated above)
consists only of ``1/4``s, is not full-rank, and thus
``\tilde{\mu}_{i\rightarrow j}`` is ill-defined / cannot be computed.
On a clique tree, a schedule of messages that follows a postorder traversal of
the tree (from the tip clusters to a root cluster) can always be computed.
On a loopy cluster graph however, it may be unclear how to find a schedule such
that each message is well-defined, or if such a schedule even exists.
This is a problem since a loopy cluster graph typically requires multiple
traversals to reach convergence.
## A heuristic
One approach to deal with ill-defined messages is to skip their computation and
proceed on with the schedule.
A more robust, yet simple, alternative is to *regularize* cluster beliefs by
increasing some diagonal elements of their precision matrix so that the relevant
submatrices are full-rank:
```math
\begin{matrix}x_1 \\ x_2 \\ x_3\end{matrix}\!\!
\begin{bmatrix}1 & -1/2 & -1/2 \\ -1/2 & 1/4 & 1/4 \\
-1/2 & 1/4 & 1/4\end{bmatrix} \longrightarrow
\begin{bmatrix}1 & -1/2 & -1/2 \\ -1/2 & 1/4\textcolor{red}{+\epsilon} & 1/4 \\
-1/2 & 1/4 & 1/4\textcolor{red}{+\epsilon}\end{bmatrix}
```
To maintain the probability model, the product of all cluster beliefs divided
by the product of all edge beliefs must remain equal to the joint distribution
``p_\theta`` (this is satisfied after factor assignment, and everytime a message
is passed).
Thus, each time a cluster belief is regularized, we "balance" this change by a
similar modification to one or more associated edge beliefs. For example, if
``\mathcal{C}_i`` above was connected to another sepset
``\mathcal{S}_{i,k}=\{X_2,X_3\}`` with ``\bm{J}=\bm{0}`` then we might do:
```math
\begin{matrix}x_2 \\ x_3\end{matrix}\!\!
\begin{bmatrix}0 & 0 \\ 0 & 0\end{bmatrix} \longrightarrow
\begin{bmatrix}0\textcolor{red}{+\epsilon} & 0 \\ 0 &
\textcolor{red}{+\epsilon}\end{bmatrix}
```
We provide several options for regularization below. A typical usage of these
methods is after the initial assignment of factors.
### By cluster
[`regularizebeliefs_bycluster!`](@ref) performs regularization separately from
message passing.
The algorithm loops over each cluster. For each incident sepset, ``\epsilon`` is
added to all the diagonal entries of its precision, and to the corresponding
diagonal entries of the cluster's precision.
Currently, [`calibrate_optimize_clustergraph!`](@ref) calls
`regularizebeliefs_bycluster!` for each set of candidate parameter values, before calibration is run. Other options are likely to be available in future versions.
### Along node subtrees
[`regularizebeliefs_bynodesubtree!`](@ref) performs regularization separately
from message passing.
A *node subtree* of a cluster graph is the subtree induced by all clusters that
contain that node.
The algorithm loops over each node subtree. For all edges and all but one
cluster in a given node subtree, it adds ``\epsilon`` to the diagonal entries of
the precision matrix that correspond to that node.
### On a schedule
[`regularizebeliefs_onschedule!`](@ref) interleaves regularization with message
passing.
The algorithm loops over each cluster and tracks which messages have been sent:
- Each cluster ``\mathcal{C}_i`` is regularized only if it has not received a message from ``\ge 1`` of its neighbors.
- Regularization proceeds by adding ``\epsilon`` to the diagonal entries of ``\mathcal{C}_i``'s precision that correspond to the nodes in ``\mathcal{S}_{i,j}``, and to all diagonal entries of ``\mathcal{S}_{i,j}``'s precision, if neighbor ``\mathcal{C}_j`` has not sent a message to ``\mathcal{C}_i``.
- After being regularized, ``\mathcal{C}_i`` sends a message to each neighbor for which it has not already done so.
The example below shows how regularization methods can help to minimize
ill-defined messages. We use here a network from
[Lipson et al. (2020, Extended Data Fig. 4)](https://doi.org/10.1038/s41586-020-1929-1)
[lipson2020ancient](@cite), with degree-2 nodes suppressed and any resulting
length 0 edges assigned length 1 (see [lipson2020b_notes.jl](https://github.com/bstkj/graphicalmodels_for_phylogenetics_code/blob/5f61755c4defe804fd813113e883d49445971ade/real_networks/lipson2020b_notes.jl)),
and follow the steps in [Exact likelihood for fixed parameters](@ref):
```jldoctest regularization
julia> using DataFrames, Tables, PhyloNetworks, PhyloGaussianBeliefProp
julia> const PGBP = PhyloGaussianBeliefProp;
julia> net = readTopology(pkgdir(PGBP, "test/example_networks", "lipson_2020b.phy")); # 54 edges; 44 nodes: 12 tips, 11 hybrid nodes, 21 internal tree nodes.
julia> preorder!(net)
julia> df = DataFrame(taxon=tipLabels(net), # simulated using `simulate(net, ParamsBM(0, 1))` from PhyloNetworks
x=[0.431, 1.606, 0.72, 0.944, 0.647, 1.263, 0.46, 1.079, 0.877, 0.748, 1.529, -0.469]);
julia> m = PGBP.UnivariateBrownianMotion(1, 0); # choose model: σ2 = 1.0, μ = 0.0
julia> fg = PGBP.clustergraph!(net, PGBP.Bethe()); # build factor graph
julia> tbl_x = columntable(select(df, :x)); # trait data as column table
julia> b = PGBP.init_beliefs_allocate(tbl_x, df.taxon, net, fg, m); # allocate memory for beliefs
julia> PGBP.init_beliefs_assignfactors!(b, m, tbl_x, df.taxon, net.nodes_changed); # assign factors based on model
julia> fgb = PGBP.ClusterGraphBelief(b); # wrap beliefs for message passing
julia> sched = PGBP.spanningtrees_clusterlist(fg, net.nodes_changed); # generate schedule
```
Without regularization, errors indicating ill-defined messages (which are skipped)
are returned when we run a single iteration of calibration:
```jldoctest regularization; filter = r"└ @ PhyloGaussianBeliefProp.*" => s""
julia> PGBP.calibrate!(fgb, sched); # there are ill-defined messages (which are skipped)
┌ Error: belief H5I5I16, integrating [2, 3]
└ @ PhyloGaussianBeliefProp ...
┌ Error: belief H6I10I15, integrating [2, 3]
└ @ PhyloGaussianBeliefProp ...
```
However, with regularization, there are no ill-defined messages for a single
iteration of calibration:
```jldoctest regularization
julia> PGBP.init_beliefs_assignfactors!(b, m, tbl_x, df.taxon, net.nodes_changed); # reset to initial beliefs
julia> PGBP.regularizebeliefs_bynodesubtree!(fgb, fg); # regularize by node subtree
julia> PGBP.calibrate!(fgb, sched); # no ill-defined messages
julia> PGBP.init_beliefs_assignfactors!(b, m, tbl_x, df.taxon, net.nodes_changed); # reset to initial beliefs
julia> PGBP.regularizebeliefs_onschedule!(fgb, fg); # regularize by on schedule
julia> PGBP.calibrate!(fgb, sched); # no ill-defined messages
```
Note that this does not necessarily guarantee that subsequent iterations avoid
ill-defined messages. | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.0.1 | e785a3d1d3bd6dd66ffd313f06d03b00d1a0a9b4 | docs | 1567 | # Example networks
The `Source` column indicates which figure of which study the network was coded from.
| Network | Source |
| --- | --- |
| `lazaridis_2014.phy` | [Fig 3](https://doi.org/10.1038/nature13673) |
| `lipson_2020b.phy` | [Extended Data Fig 4](https://doi.org/10.1038/s41586-020-1929-1) |
| `mateescu_2010.phy` | [Fig 2a](https://doi.org/10.1613/jair.2842) |
| `muller_2022.phy` | [Fig 1a](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9297283/) |
| `sun_2023.phy` | [Fig 4c](https://doi.org/10.1038/s41559-023-02185-8) |
| `maier_2023.phy` | [Fig 1e](https://doi.org/10.7554/eLife.85492)|
Networks (coded in extended newick format) were preprocessed as follows:
- `mateescu_2020.phy`, `lazaridis_2014.phy`: edge lengths and inheritance
probabilities were set arbitrarily.
- `muller_2022.phy`: the inheritance probabilities were estimated from the
inferred recombination breakpoints (see [muller2022_nexus2newick.jl](https://github.com/bstkj/graphicalmodels_for_phylogenetics_code/blob/5f61755c4defe804fd813113e883d49445971ade/real_networks/muller2022_nexus2newick.jl)).
- `lipson_2020b.phy`: degree-2 nodes were suppressed and any resulting
length 0 edges assigned length 1 (see [lipson2020b_notes.jl](https://github.com/bstkj/graphicalmodels_for_phylogenetics_code/blob/5f61755c4defe804fd813113e883d49445971ade/real_networks/lipson2020b_notes.jl)).
- `sun_2023.phy`: degree-2 nodes were not suppressed. All admixture edges
were assigned a length of 1.0 (the minimum length among all drift edges).
- `maier_2023.phy`: Edge lengths were set arbitrarily. | PhyloGaussianBeliefProp | https://github.com/JuliaPhylo/PhyloGaussianBeliefProp.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 1061 | using Documenter
using AnyMOD
makedocs(sitename="AnyMOD.jl",
authors = "Leonard Goeke",
pages = [
"Introduction" => "index.md",
"Model Elements" => Any["Model object" => "model_object.md",
"Sets and Mappings" => "sets.md",
"Parts" => "parts.md",
"Parameter" => Any["Parameter overview" => "parameter_overview.md","Parameter list" => "parameter_list.md"],
"Variables" => "variables.md",
"Constraints" => "constraints.md"],
"Tools" => Any["Reporting" => Any["Error handling" => "error.md","Data files" => "data.md","Plots" => "plots.md",],
"Performance and stability" => "performance.md"],
"Annex" => Any["Tips" => "tips.md",
"API" => "api.md",
"Related material" => "related.md"]
],
)
deploydocs(
repo = "github.com/leonardgoeke/AnyMOD.jl.git",
devbranch = "master"
)
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 1708 | module AnyMOD
# XXX enforce use of Julia's own python distribution to avoid interference with local python installations
using Pkg
# save current value of environment variable
if "PYTHON" in keys(ENV)
envPy = ENV["PYTHON"]
else
envPy = ""
end
# build python package with Julia distribution
ENV["PYTHON"]=""
Pkg.build("PyCall")
# re-sets environment to former status
if envPy == ""
delete!(ENV,"PYTHON")
else
ENV["PYTHON"] = envPy
end
using Base.Threads, CSV, Dates, LinearAlgebra, Requires, DataFrames, JuMP
using MathOptInterface, Reexport, Statistics, PyCall, SparseArrays, CategoricalArrays
@reexport using JuMP
pyimport_conda("networkx","networkx")
pyimport_conda("matplotlib.pyplot","matplotlib")
pyimport_conda("plotly","plotly")
include("objects.jl")
include("tools.jl")
include("modelCreation.jl")
include("optModel/exchange.jl")
include("optModel/objective.jl")
include("optModel/other.jl")
include("optModel/tech.jl")
include("dataHandling/mapping.jl")
include("dataHandling/parameter.jl")
include("dataHandling/readIn.jl")
include("dataHandling/tree.jl")
include("dataHandling/util.jl")
export anyModel, initializeModel, createOptModel!, setObjective!
export reportResults, reportTimeSeries, printObject, printDuals
export plotTree, plotEnergyFlow, moveNode!
# XXX define function to print subset of infeasible constraints, if gurobi can be used (has to be installed separately)
function __init__()
@require Gurobi="2e9cd046-0924-5485-92f1-d5272153d98b" include("dataHandling/gurobiTools.jl")
end
end
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 4641 |
# XXX create optimization model after anyModel has been initialized
"""
```julia
createOptModel!(model_object::anyModel)
```
Create all elements of the model's underlying optimization problem except for the objective function.
"""
function createOptModel!(anyM::anyModel)
# <editor-fold desc="create technology related variables and constraints"
techSym_arr = collect(keys(anyM.parts.tech))
parDef_dic = defineParameter(anyM.options,anyM.report)
# XXX get dimension of expansion and capacity variables and mapping of capacity constraints
tsYear_dic = Dict(zip(anyM.supTs.step,collect(0:anyM.options.shortExp:(length(anyM.supTs.step)-1)*anyM.options.shortExp)))
prepVar_dic = Dict{Symbol,Dict{Symbol,NamedTuple}}()
prepareTechs!(techSym_arr,prepVar_dic,tsYear_dic,anyM)
if any(getindex.(anyM.report,1) .== 3) print(getElapsed(anyM.options.startTime)); errorTest(anyM.report,anyM.options) end
# remove technologies without any potential capacity variables
techSym_arr = collect(keys(prepVar_dic))
foreach(x -> delete!(anyM.parts.tech, x),setdiff(collect(keys(anyM.parts.tech)),techSym_arr))
# XXX create all technology related elements
# creates dictionary that assigns combination of superordinate dispatch timestep and dispatch level to dispatch timesteps
allLvlTsDis_arr = unique(getfield.(values(anyM.cInfo),:tsDis))
ts_dic = Dict((x[1], x[2]) => anyM.sets[:Ts].nodes[x[1]].lvl == x[2] ? [x[1]] : getDescendants(x[1],anyM.sets[:Ts],false,x[2]) for x in Iterators.product(anyM.supTs.step,allLvlTsDis_arr))
# creates dictionary that assigns combination of expansion region and dispatch level to dispatch region
allLvlR_arr = union(getindex.(getfield.(getfield.(values(anyM.parts.tech),:balLvl),:exp),2),map(x -> x.rDis,values(anyM.cInfo)))
allRExp_arr = union([getfield.(getNodesLvl(anyM.sets[:R],x),:idx) for x in allLvlR_arr]...)
r_dic = Dict((x[1], x[2]) => (anyM.sets[:R].nodes[x[1]].lvl <= x[2] ? getDescendants(x[1], anyM.sets[:R],false,x[2]) : getAncestors(x[1],anyM.sets[:R],:int,x[2])[end]) |> (z -> typeof(z) <: Array ? z : [z]) for x in Iterators.product(allRExp_arr,allLvlR_arr))
produceMessage(anyM.options,anyM.report, 3," - Determined dimension of expansion and capacity variables for technologies")
# constraints for technologies are prepared in threaded loop and stored in an array of dictionaries
techCnsDic_arr = Array{Dict{Symbol,cnsCont}}(undef,length(techSym_arr))
tech_itr = collect(enumerate(techSym_arr))
@threads for (idx,tSym) in tech_itr
techCnsDic_arr[idx] = createTech!(techInt(tSym,anyM.sets[:Te]),anyM.parts.tech[tSym],prepVar_dic[tSym],copy(parDef_dic),ts_dic,r_dic,anyM)
end
# loops over array of dictionary with constraint container for each technology to create actual jump constraints
for (idx,cnsDic) in enumerate(techCnsDic_arr), cnsSym in keys(cnsDic)
anyM.parts.tech[techSym_arr[idx]].cns[cnsSym] = createCns(cnsDic[cnsSym],anyM.optModel)
end
produceMessage(anyM.options,anyM.report, 1," - Created variables and constraints for all technologies")
# </editor-fold>
# <editor-fold desc="create exchange related variables and constraints"
prepExc_dic = Dict{Symbol,NamedTuple}()
partExc = anyM.parts.exc
partLim = anyM.parts.lim
# obtain dimensions of expansion variables for exchange
potExc_df = prepareExcExpansion!(partExc,partLim,prepExc_dic,tsYear_dic,anyM)
# obtain capacity dimensions solely based on expansion variables
prepareCapacity!(partExc,prepExc_dic,prepExc_dic[:expExc].var,:capaExc,anyM)
addResidualCapaExc!(partExc,prepExc_dic,potExc_df,anyM)
if !all(map(x -> isempty(x),values(prepExc_dic[:capaExc])))
# create expansion and capacity variables
createExpCap!(partExc,prepExc_dic,anyM)
# create capacity constraint
createCapaExcCns!(partExc,anyM)
produceMessage(anyM.options,anyM.report, 2," - Created all variables and constraints related to expansion and capacity for exchange")
# create dispatch related variables
createExcVar!(partExc,ts_dic,anyM)
produceMessage(anyM.options,anyM.report, 2," - Created all dispatch variables for exchange")
# create capacity restrictions
createRestrExc!(ts_dic,partExc,anyM)
produceMessage(anyM.options,anyM.report, 2," - Created all capacity restrictions for exchange")
produceMessage(anyM.options,anyM.report, 1," - Created variables and constraints for exchange")
end
# </editor-fold>
createTradeVarCns!(anyM.parts.trd,anyM)
createEnergyBal!(techSym_arr,anyM)
createLimitCns!(anyM.parts.lim,anyM)
produceMessage(anyM.options,anyM.report, 1," - Completed model creation")
end
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 21341 |
# <editor-fold desc="definition and handling of parameters"
# XXX defines struct for handling parameter data
"""
```julia
mutable struct ParElement
name::Symbol
dim::Tuple
defVal::Union{Nothing,Float64}
herit::Tuple
data::DataFrame
techPre::NamedTuple{(:preset,:mode),Tuple{Symbol,Tuple{Vararg{Symbol,N} where N}}}
end
```
Type to store parameter data. Includes data and additional information specified in [Parameter list](@ref).
**Fields**
- `name::Symbol`: name of the parameter
- `dim::Tuple`: potential dimensions of parameter data
- `defVal::Union{Nothing,Float64}`: default value
- `herit::Tuple`: inheritance rules for parameter, see [Parameter overview](@ref)
- `data::DataFrame`: parameter data
"""
mutable struct ParElement
name::Symbol
dim::Tuple
defVal::Union{Nothing,Float64}
herit::Tuple
data::DataFrame
techPre::NamedTuple{(:preset,:mode),Tuple{Symbol,Tuple{Vararg{Symbol,N} where N}}}
function ParElement(paraData_df::DataFrame,paraDef_ntup::NamedTuple,name::Symbol,report::Array{Tuple,1})
setLongShort_dic = Dict(:Ts => :timestep, :R => :region, :C => :carrier, :Te => :technology, :M => :mode)
if isempty(paraData_df) return new(name,paraDef_ntup.dim,paraDef_ntup.defVal,paraDef_ntup.herit,DataFrame()) end
# XXX check consistency of rows in input dataframe and definition of set and rename columns according to set defintion
# assigns array of used suffixes according to parameter defintion to each set
splitDim_arr = map(x -> map(y -> Symbol(y), split(String(x),"_")),collect(paraDef_ntup.dim))
setSuf_dic = Dict(x => map(y -> length(y) == 1 ? Symbol() : y[end],filter(z -> z[1] == x,splitDim_arr)) for x in unique(map(x -> x[1],splitDim_arr)))
# loops over set columns in input dataframe and assigns them to the sets defined for the parameter
newCol_dic = Dict(:val => :val)
sufNum_dic = Dict(:b => 2, :c => 3, :d => 4, :e => 5, :f => 6, :g => 7)
for colNam in setdiff(namesSym(paraData_df),[:val])
colNam_arr = split(String(colNam),"_")
setNam = Symbol(colNam_arr[1])
if !haskey(setSuf_dic,setNam) # parameter provided for a set not appearing in definition (e.g. demand depending on the technology)
push!(report,(2, "parameter assignment", string(name), "parameter data was specified for $(setLongShort_dic[setNam]) set, but it is not defined to depend on this set"))
continue
elseif length(setSuf_dic[setNam]) == 1 && length(colNam_arr) > 1 # they are several instances of the set provided, but it only depends on one instance (e.g. two region sets for efficiency)
push!(report,(2, "parameter assignment", string(name), "parameter data was specified for several instances of $(setLongShort_dic[setNam]) set, but it is defined to depend only on one instance, additonal instances were ignored"))
continue
elseif setSuf_dic[setNam][1] == Symbol() # set has only one instance and no suffix => no change when converting from read-in dataframe to parameter element
newCol_dic[colNam] = colNam
elseif length(setSuf_dic[setNam]) == 1 || length(colNam_arr) == 1 # column name in dataframe has no underscore, but defintion of parameter element has one
newCol_dic[colNam] = Symbol(setNam, "_", setSuf_dic[setNam][1])
else
cntRep_int = sufNum_dic[Symbol(colNam_arr[2])] # set defined for several instances
newCol_dic[colNam] = Symbol(setNam, "_", setSuf_dic[setNam][cntRep_int])
end
end
# filters only used coulumns, renames them accordingly and converts to table
writeData_df = paraData_df[:,collect(keys(newCol_dic))]
DataFrames.rename!(writeData_df,newCol_dic)
new_obj = new(name,paraDef_ntup.dim,paraDef_ntup.defVal,paraDef_ntup.herit,writeData_df)
# defines on which level parameter is presetted and which capacity restrictions are affected by different modes for all dispatch parameters, where this is specified
if haskey(paraDef_ntup,:techPre) new_obj.techPre = paraDef_ntup.techPre end
return new_obj
end
ParElement() = new()
end
# XXX specific struct for read in process of parameter data
mutable struct parEntry
colSet::Symbol
entry::Array{String,1}
lvl::Array{Int,1}
startLvl::Int
end
# </editor-fold>
# <editor-fold desc="import and extensions of base functions"
# XXX functions to copy parameter structs of parameter data
import Base.copy
function copy(par_obj::ParElement)
out = ParElement()
out.name = par_obj.name
out.dim = par_obj.dim
out.defVal = par_obj.defVal
out.herit = par_obj.herit
out.data = copy(par_obj.data)
if isdefined(par_obj,:techPre) out.techPre = par_obj.techPre end
return out
end
function copy(par_obj::ParElement,data_df::DataFrame)
out = ParElement()
out.name = par_obj.name
out.dim = par_obj.dim
out.defVal = par_obj.defVal
out.herit = par_obj.herit
out.data = data_df
if isdefined(par_obj,:techPre) out.techPre = par_obj.techPre end
return out
end
# XXX usual collect sometimes creates a mysterious error if used on dictionary keys, this command avoids this
import Base._collect
import Base.SizeUnknown
collectKeys(itr) = _collect(Symbol, itr, SizeUnknown())
# </editor-fold>
# <editor-fold desc="struct for individual parts of the model"
# XXX defines parts of the model
abstract type AbstractModelPart end
"""
```julia
mutable struct TechPart <: AbstractModelPart
name::Tuple{Vararg{String,N} where N}
par::Dict{Symbol,ParElement}
var::Dict{Symbol,DataFrame}
cns::Dict{Symbol,DataFrame}
carrier::NamedTuple
balLvl::NamedTuple{(:exp,:ref),Tuple{Tuple{Int,Int},Union{Nothing,Tuple{Int,Int}}}}
capaRestr::DataFrame
actSt::Tuple
type::Symbol
disAgg::Bool
modes::Tuple{Vararg{Int,N} where N}
TechPart(name::Tuple{Vararg{String,N} where N}) = new(name,Dict{Symbol,ParElement}(),Dict{Symbol,DataFrame}(),Dict{Symbol,DataFrame}())
TechPart() = new()
end
```
Type used for technology model parts.
**General fields**
- `par`: dictionary of parameters with names as keys (see [Parameter list](@ref))
- `var`: dictionary of variables with names as keys (see [Variables](@ref))
- `cns`: dictionary of constraints with names as key (see [Constraints](@ref))
**Technology specific fields**
See [Technologies](@ref) for details.
- `name`: full name of technology as a series of nodes from the technology tree
- `carrier`: ids of energy carriers assigned to technology by groups (e.g. generation, use, ...)
- `balLvl`: temporal and spatial resolution for expansion and conversion balance of the technology
- `capaRestr`: specification of capacity restrictions required for technology
- `actSt`: ids of carriers actively stored although they are not leafs
- `type`: type of technology (`stock`, `mature`, or `evolving`)
- `disAgg`: if `true`, dispatch is modelled at expansion resolution instead of dispatch resolution
- `modes`: different operational modes of technology
"""
mutable struct TechPart <: AbstractModelPart
name::Tuple{Vararg{String,N} where N}
par::Dict{Symbol,ParElement}
var::Dict{Symbol,DataFrame}
cns::Dict{Symbol,DataFrame}
carrier::NamedTuple
balLvl::NamedTuple{(:exp,:ref),Tuple{Tuple{Int,Int},Union{Nothing,Tuple{Int,Int}}}}
capaRestr::DataFrame
actSt::Tuple
type::Symbol
disAgg::Bool
modes::Tuple{Vararg{Int,N} where N}
TechPart(name::Tuple{Vararg{String,N} where N}) = new(name,Dict{Symbol,ParElement}(),Dict{Symbol,DataFrame}(),Dict{Symbol,DataFrame}())
TechPart() = new()
end
"""
```julia
mutable struct TechPart <: AbstractModelPart
par::Dict{Symbol,ParElement}
var::Dict{Symbol,DataFrame}
cns::Dict{Symbol,DataFrame}
OthPart() = new(Dict{Symbol,ParElement}(),Dict{Symbol,DataFrame}(),Dict{Symbol,DataFrame}(
end
```
Type used for 'exchange', 'trade', 'balance', 'limits', and 'objective' model parts.
**Fields**
- `par`: dictionary of parameters with names as keys (see [Parameter list](@ref))
- `var`: dictionary of variables with names as keys (see [Variables](@ref))
- `cns`: dictionary of constraints with names as keys (see [Constraints](@ref))
"""
mutable struct OthPart <: AbstractModelPart
par::Dict{Symbol,ParElement}
var::Dict{Symbol,DataFrame}
cns::Dict{Symbol,DataFrame}
OthPart() = new(Dict{Symbol,ParElement}(),Dict{Symbol,DataFrame}(),Dict{Symbol,DataFrame}())
end
# XXX container to store data defining a constraint (used to separate definition and actual jump creation of constraints)
struct cnsCont
data::DataFrame
sign::Symbol
end
# </editor-fold>
# <editor-fold desc="structs for nodes that then make up the trees to save set data"
# XXX define nodes for set tree and tree itself
"""
```julia
mutable struct Node
idx::Int
val::String
lvl::Int
subIdx::Int
down::Array{Int,1}
end
```
Type to store nodes of hierarchical trees.
**Fields**
- `idx`: internal node id
- `val`: name originally assigned
- `lvl`: level of node within hierarchical tree
- `subIdx`: numbered position among all nodes sharing the same direct ancestor
- `down`: array of children
"""
mutable struct Node
idx::Int
val::String
lvl::Int
subIdx::Int
down::Array{Int,1}
end
"""
```julia
mutable struct Tree
nodes::Dict{Int,Node}
srcTup::Dict{Tuple,Array{Int,1}}
srcStr::Dict{Tuple{String,Int},Array{Int,1}}
up::Dict{Int,Int}
height::Int
Tree() = new(Dict{Int,Node}(),Dict{Tuple,Int}(),Dict{String,Array{Int,1}}(),Dict{Int,Int}(),1)
end
```
Type to store hierarchical trees.
**Fields**
- `nodes`: dictionary of nodes with node ids as keys
- `srcTup`: assigns a tuple of consecutive node names to the corresponding id
- `srcStr`: assigns a tuple with a node name and a level to the corresponding id
- `up`: assigns the id of each node to the id of its ancestor
- `height`: maximum level of tree
"""
mutable struct Tree
nodes::Dict{Int,Node}
srcTup::Dict{Tuple,Array{Int,1}}
srcStr::Dict{Tuple{String,Int},Array{Int,1}}
up::Dict{Int,Int}
height::Int
Tree() = new(Dict{Int,Node}(),Dict{Tuple,Int}(),Dict{String,Array{Int,1}}(),Dict{Int,Int}(),1)
end
# </editor-fold>
# <editor-fold desc="options for model and model itself"
# create abstract model object to reference before creation (avoid circular type definiton)
abstract type AbstractModel end
# XXX defines final model object and its options
struct modOptions
# data in- and output
inDir::Array{String,1}
outDir::String
objName::String
csvDelim::String
outStamp::String
# model generation
decomm::Symbol
interCapa::Symbol
supTsLvl::Int
shortExp::Int
redStep::Float64
# managing numerical issues
emissionLoss::Bool
coefRng::NamedTuple{(:mat,:rhs),Tuple{Tuple{Float64,Float64},Tuple{Vararg{Float64,2}}}}
scaFac::NamedTuple{(:capa,:oprCapa,:dispConv,:dispSt,:dispExc, :dispTrd, :costDisp,:costCapa,:obj),Tuple{Vararg{Float64,9}}}
bound::NamedTuple{(:capa,:disp,:obj),Tuple{Vararg{Float64,3}}}
avaMin::Float64
checkRng::Float64
# reporting related options
reportLvl::Int
errCheckLvl::Int
errWrtLvl::Int
startTime::DateTime
end
# XXX flow graph object that defines relations between technologies and carriers (and among carriers)
mutable struct flowGraph
nodeC::Dict{Int64,Int64}
nodeTe::Dict{Int64,Int64}
edgeC::Array{Pair{Int,Int},1}
edgeTe::Array{Pair{Int,Int},1}
nodePos::Dict{Int,Array{Float64,1}}
function flowGraph(anyM::AbstractModel)
# creates dictionary mapping carrier id to node id
nodeC_dic = Dict(x[2] => x[1] for x in enumerate(sort(filter(x -> x != 0,getfield.(collect(values(anyM.sets[:C].nodes)),:idx)))))
# get all relevant technology, a technology is not relevant, where all children of a parent have the same carriers (in this case only the parent is relevant)
t_tree = anyM.sets[:Te]
allTech_arr = getfield.(collect(values(t_tree.nodes)),:idx)
tleaf_dic = Dict(x => unique(filter(y -> techSym(y,anyM.sets[:Te]) in keys(anyM.parts.tech), [x,getDescendants(x,t_tree,true)...])) for x in allTech_arr)
relTech_arr = Array{Int,1}()
for t in keys(tleaf_dic)
subCar_arr = map(y -> anyM.parts.tech[techSym(y,anyM.sets[:Te])].carrier,tleaf_dic[t])
if length(unique(subCar_arr)) == 1
push!(relTech_arr,t)
else
append!(relTech_arr,collect(tleaf_dic[t]))
end
end
# creates dictionary mapping each relevant id to node id
nodeTe_dic = Dict(x[2] => x[1] + length(nodeC_dic) for x in enumerate(filter(x -> isempty(intersect(getAncestors(x,t_tree,:int),relTech_arr)),unique(relTech_arr))))
# creates edges between technologies
edgeTe_arr = Array{Pair{Int,Int},1}()
for t in keys(nodeTe_dic)
gotTech_boo = false; tItr = t
while !gotTech_boo
if techSym(tItr,anyM.sets[:Te]) in keys(anyM.parts.tech)
gotTech_boo = true
else
tItr = intersect(getDescendants(t,anyM.sets[:Te],true),map(x -> techInt(x,anyM.sets[:Te]),collectKeys(keys(anyM.parts.tech))))[1]
end
end
car_ntup = anyM.parts.tech[techSym(tItr,anyM.sets[:Te])].carrier
for cIn in map(x -> getfield(car_ntup,x),intersect(keys(car_ntup),(:use,:stExtIn))) |> (y -> isempty(y) ? y : union(y...))
push!(edgeTe_arr, nodeC_dic[cIn] => nodeTe_dic[t])
end
for cOut in map(x -> getfield(car_ntup,x),intersect(keys(car_ntup),(:gen,:stExtOut))) |> (y -> isempty(y) ? y : union(y...))
push!(edgeTe_arr, nodeTe_dic[t] => nodeC_dic[cOut])
end
end
# creates edges between carriers
edgeC_arr = Array{Pair{Int,Int},1}()
for c in keys(nodeC_dic)
for cChild in anyM.sets[:C].nodes[c].down
push!(edgeC_arr, nodeC_dic[cChild] => nodeC_dic[c])
end
end
return new(nodeC_dic,nodeTe_dic,edgeC_arr,edgeTe_arr)
end
end
# XXX specific information for graphical evaluation
"""
```julia
mutable struct graInfo
graph::flowGraph
names::Dict{String,String}
colors::Dict{String,Tuple{Float64,Float64,Float64}}
end
```
Type to store information on styling of graphs. See [Styling](@ref).
**Fields**
- `graph`: saved layout for the qualitative energy flow graph
- `names`: assigns names of nodes to labels used in plots
- `colors`: assigns names or label of nodes to RGB color specified as tuple of three numbers between 0 and 1
"""
mutable struct graInfo
graph::flowGraph
names::Dict{String,String}
colors::Dict{String,Tuple{Float64,Float64,Float64}}
function graInfo(anyM::AbstractModel)
# create default options for names and colors
graph_obj = flowGraph(anyM)
# specify some default names and colors used in visualisations
namesDef_arr = ["coalPlant" => "coal plant", "gasPlant" => "gas plant", "districtHeat" => "district heat", "naturalGas" => "natural gas", "synthGas" => "synthetic gas", "fossilGas" => "fossil gas",
"demand" => "final demand", "export" => "export", "import" => "import", "crt" => "curtailment", "lss" => "loss of load", "trdSell" => "trade sell", "trdBuy" => "trade buy"]
# create dictionary assigning internal model names to names used within visualisations
allVal_arr = unique(vcat(map(x -> getfield.(values(anyM.sets[x].nodes),:val) ,collect(keys(anyM.sets)))...))
names_dic = setdiff(allVal_arr,getindex.(namesDef_arr,1)) |> (z -> Dict(vcat(namesDef_arr,Pair.(z,z))))
# define default colors for default energy carriers
colorsCar_arr = ["electricity" => (1.0, 0.9215, 0.2313),"heat" => (0.769,0.176,0.290),"districtHeat" => (0.6,0.0,0.169), "gas" => (1.0,0.416,0.212),
"naturalGas" => (1.0,0.506,0.294),"fossilGas" => (0.898,0.259,0.075), "synthGas" => (0.235,0.506,0.325), "hydrogen" => (0.329,0.447,0.827),
"coal" => (0.459,0.286,0.216),"biomass" => (0.682,0.898,0.443),"bioGas" => (0.682,0.898,0.443)]
colors_dic = setdiff(getfield.(values(anyM.sets[:C].nodes),:val),getindex.(colorsCar_arr,1)) |> (z -> Dict(vcat(colorsCar_arr,Pair.(z,fill((0.85,0.85,0.85),length(z))))))
return new(graph_obj,names_dic,colors_dic)
end
end
# XXX finally, the model object itself
"""
```julia
mutable struct anyModel <: AbstractModel
options::modOptions
report::Array{Tuple,1}
optModel::Model
lock::ReentrantLock
supTs::NamedTuple{(:lvl,:step,:sca),Tuple{Int,Tuple{Vararg{Int,N} where N},Dict{Tuple{Int,Int},Float64}}}
cInfo::Dict{Int,NamedTuple{(:tsDis,:tsExp,:rDis,:rExp,:eq),Tuple{Int,Int,Int,Int,Bool}}}
sets::Dict{Symbol,Tree}
parts::NamedTuple{(:tech,:trd,:exc,:bal,:lim,:obj),Tuple{Dict{Symbol,TechPart},OthPart,OthPart,OthPart,OthPart,OthPart}}
graInfo::graInfo
end
```
The core model object containing all related data and subordinate objects.
**Fields**
- `options`: model options provided as keyword arguments to constructor
- `report`: entries for writing to the reporting file [Error handling](@ref)
- `optModel::Model`: the actual [JuMP](https://github.com/JuliaOpt/JuMP.jl) object of the model's underlying optimization problem
- `lock`: lock used for multi-threading
- `supTs`: information and mappings for superordinate time-steps
- `cInfo`: information on resolution of energy carriers
- `sets`: sets organized as [Tree](@ref) objects (see [Sets and Mappings](@ref))
- `parts::NamedTuple`: all part objects of the model (see [Parts](@ref))
- `graInfo::graInfo`: properties for creation of plots and graphics, can be used to adjust colors and labels (see [Styling](@ref))
**Constructor**
```julia
anyModel(inDir::Union{String,Array{String,1}},outDir::String; kwargs)
```
See [Model object](@ref) for a detailed list of arguments.
"""
mutable struct anyModel <: AbstractModel
options::modOptions
report::Array{Tuple,1}
optModel::Model
lock::ReentrantLock
supTs::NamedTuple{(:lvl,:step,:sca),Tuple{Int,Tuple{Vararg{Int,N} where N},Dict{Tuple{Int,Int},Float64}}}
cInfo::Dict{Int,NamedTuple{(:tsDis,:tsExp,:rDis,:rExp,:eq),Tuple{Int,Int,Int,Int,Bool}}}
sets::Dict{Symbol,Tree}
parts::NamedTuple{(:tech,:trd,:exc,:bal,:lim,:obj),Tuple{Dict{Symbol,TechPart},OthPart,OthPart,OthPart,OthPart,OthPart}}
graInfo::graInfo
function anyModel(inDir::Union{String,Array{String,1}},outDir::String; objName = "", csvDelim = ",", decomm = :decomm, interCapa = :linear, supTsLvl = 0, shortExp = 10, redStep = 1.0, emissionLoss = true,
reportLvl = 2, errCheckLvl = 1, errWrtLvl = 1, coefRng = (mat = (1e-2,1e5), rhs = (1e-2,1e2)),
scaFac = (capa = 1e1, oprCapa = 1e2, dispConv = 1e3, dispSt = 1e4, dispExc = 1e3, dispTrd = 1e3, costDisp = 1e1, costCapa = 1e2, obj = 1e0),
bound = (capa = NaN, disp = NaN, obj = NaN), avaMin = 0.01, checkRng = NaN)
anyM = new()
# <editor-fold desc="initialize report and options"
# XXX creates dataframe to which reporting is written
anyM.report = Array{Tuple,1}()
anyM.optModel = Model()
anyM.lock = ReentrantLock()
# XXX sets whole options object from specified directories TODO arbeite mit kwargs später
outStamp_str = string(objName,"_",Dates.format(now(),"yyyymmddHHMM"))
defOpt_ntup = (inDir = typeof(inDir) == String ? [inDir] : inDir, outDir = outDir, objName = objName, csvDelim = csvDelim, outStamp = outStamp_str, decomm = decomm, interCapa = interCapa,
supTsLvl = supTsLvl, shortExp = shortExp, redStep = redStep, emissionLoss = emissionLoss, coefRng = coefRng, scaFac = scaFac, bound = bound,
avaMin = avaMin, checkRng = checkRng, reportLvl = reportLvl, errCheckLvl = errCheckLvl, errWrtLvl = errWrtLvl, startTime = now())
anyM.options = modOptions(defOpt_ntup...)
# </editor-fold>
# <editor-fold desc= read in set and parameter data>
files_dic = readInputFolder(anyM.options.inDir)
# XXX read-in sets and parameters
setData_dic = readSets!(files_dic,anyM)
paraTemp_dic = readParameters!(files_dic,setData_dic,anyM)
produceMessage(anyM.options,anyM.report, 1," - Read-in all set and parameter files")
# </editor-fold>
# <editor-fold desc="create part objects and general mappings"
# assign actual tech to parents
relTech_df = setData_dic[:Te][!,Symbol.(filter(x -> occursin("technology",x) && !isnothing(tryparse(Int16,string(x[end]))), string.(namesSym(setData_dic[:Te]))))]
relTech_df = DataFrame(filter(x -> any(collect(x) .!= ""), eachrow(relTech_df)))
techIdx_arr = filter(z -> isempty(anyM.sets[:Te].nodes[z].down), map(x -> lookupTupleTree(tuple(collect(x)...),anyM.sets[:Te],1)[1], eachrow(relTech_df)))
anyM.parts = (tech = Dict(techSym(x,anyM.sets[:Te]) => TechPart(getUniName(x,anyM.sets[:Te])) for x in techIdx_arr), trd = OthPart(), exc = OthPart(), bal = OthPart(), lim = OthPart(), obj = OthPart())
createCarrierMapping!(setData_dic,anyM)
createTimestepMapping!(anyM)
# XXX write general info about technologies
for t in techIdx_arr createTechInfo!(techSym(t,anyM.sets[:Te]), setData_dic, anyM) end
produceMessage(anyM.options,anyM.report, 2," - Created all mappings among sets")
# XXX assign parameters to model parts
parDef_dic = parameterToParts!(paraTemp_dic, techIdx_arr, anyM)
produceMessage(anyM.options,anyM.report, 2," - Assigned parameter data to model parts")
# XXX create object for data visualization
anyM.graInfo = graInfo(anyM)
produceMessage(anyM.options,anyM.report, 1," - Prepared creation of optimization model")
# </editor-fold>
return anyM
end
anyModel() = new()
end
# </editor-fold>
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 50629 |
# XXX prints dataframe to csv file
"""
```julia
printObject(print_df::DataFrame, model_object::anyModel)
```
Writes a DataFrame of parameters, constraints, or variables to a `.csv` file in readable format (strings instead of ids). See [Individual elements](@ref).
"""
function printObject(print_df::DataFrame,anyM::anyModel; fileName::String = "", rtnDf::Tuple{Vararg{Symbol,N} where N} = (:csv,), filterFunc::Function = x -> true)
sets = anyM.sets
options = anyM.options
colNam_arr = namesSym(print_df)
cntCol_int = size(colNam_arr,1)
# filters values according to filter function,
print_df = copy(filter(filterFunc,print_df))
# converts variable column to value of variable
if :var in colNam_arr
print_df[!,:var] = value.(print_df[!,:var])
end
for i = 1:cntCol_int
lookUp_sym = Symbol(split(String(colNam_arr[i]),"_")[1])
if !(lookUp_sym in keys(sets)) && lookUp_sym == :eqn
print_df[!,i] = string.(print_df[!,i])
elseif lookUp_sym in keys(sets)
print_df[!,i] = map(x -> createFullString(x,sets[lookUp_sym]),print_df[!,i])
end
end
# rename columns
colName_dic = Dict(:Ts_dis => :timestep_dispatch, :Ts_exp => :timestep_expansion, :Ts_expSup => :timestep_superordinate_expansion, :Ts_disSup => :timestep_superordinate_dispatch,
:R => :region, :R_dis => :region_dispatch, :R_exp => :region_expansion, :R_to => :region_to, :R_from => :region_from, :C => :carrier, :Te => :technology,
:cns => :constraint, :var => :variable)
rename!(print_df,map(x -> x in keys(colName_dic) ? colName_dic[x] : x, namesSym(print_df)) )
if :csv in rtnDf
CSV.write("$(options.outDir)/$(fileName)_$(options.outStamp).csv", print_df)
end
if :csvDf in rtnDf return print_df end
end
# <editor-fold desc="report results to csv files"
"""
```julia
reportResults(reportType::Symbol, model_object::anyModel; rtnOpt::Tuple = (:csv,))
```
Writes results to `.csv` file with content depending on `reportType`. Available types are `:summary`, `:exchange`, and `:costs`. See [Analysed results](@ref).
"""
reportResults(reportType::Symbol,anyM::anyModel; kwargs...) = reportResults(Val{reportType}(),anyM::anyModel; kwargs...)
# XXX summary of all capacity and dispatch results
function reportResults(objGrp::Val{:summary},anyM::anyModel; wrtSgn::Bool = true, rtnOpt::Tuple{Vararg{Symbol,N} where N} = (:csv,))
techSym_arr = collect(keys(anyM.parts.tech))
allData_df = DataFrame(Ts_disSup = Int[], R_dis = Int[], Te = Int[], C = Int[], variable = Symbol[], value = Float64[])
# XXX get demand values
dem_df = copy(anyM.parts.bal.par[:dem].data)
if !isempty(dem_df)
dem_df[!,:lvlR] = map(x -> anyM.cInfo[x].rDis, :C in namesSym(dem_df) ? dem_df[!,:C] : filter(x -> x != 0,getfield.(values(anyM.sets[:C].nodes),:idx)))
# aggregates demand values
# artificially add dispatch dimensions, if none exist
if :Ts_dis in namesSym(dem_df)
ts_dic = Dict(x => anyM.sets[:Ts].nodes[x].lvl == anyM.supTs.lvl ? x : getAncestors(x,anyM.sets[:Ts],:int,anyM.supTs.lvl)[end] for x in unique(dem_df[!,:Ts_dis]))
dem_df[!,:Ts_disSup] = map(x -> ts_dic[x],dem_df[!,:Ts_dis])
else
dem_df[!,:Ts_disSup] .= anyM.supTs.step
dem_df = flatten(dem_df,:Ts_disSup)
end
dem_df[!,:val] = dem_df[!,:val] .* getResize(dem_df,anyM.sets[:Ts],anyM.supTs) ./ anyM.options.redStep
allR_arr = :R_dis in namesSym(dem_df) ? unique(dem_df[!,:R_dis]) : getfield.(getNodesLvl(anyM.sets[:R],1),:idx)
allLvlR_arr = unique(dem_df[!,:lvlR])
r_dic = Dict((x[1], x[2]) => (anyM.sets[:R].nodes[x[1]].lvl < x[2] ? getDescendants(x[1], anyM.sets[:R],false,x[2]) : getAncestors(x[1],anyM.sets[:R],:int,x[2])[end]) for x in Iterators.product(allR_arr,allLvlR_arr))
if :R_dis in namesSym(dem_df)
dem_df[!,:R_dis] = map(x -> r_dic[x.R_dis,x.lvlR],eachrow(dem_df[!,[:R_dis,:lvlR]]))
else
dem_df[!,:R_dis] .= 0
end
dem_df = combine(groupby(dem_df,[:Ts_disSup,:R_dis,:C]),:val => ( x -> sum(x) / 1000) => :value)
dem_df[!,:Te] .= 0
dem_df[!,:variable] .= :demand
if wrtSgn dem_df[!,:value] = dem_df[!,:value] .* -1 end
allData_df = vcat(allData_df,dem_df)
end
# XXX get expansion and capacity variables
for t in techSym_arr
part = anyM.parts.tech[t]
tech_df = DataFrame(Ts_disSup = Int[], R_dis = Int[], Te = Int[], C = Int[], variable = Symbol[], value = Float64[])
# get installed capacity values
for va in intersect(keys(part.var),(:expConv, :expStIn, :expStOut, :expStSize, :expExc, :capaConv, :capaStIn, :capaStOut, :capaStSize, :oprCapaConv, :oprCapaStIn, :oprCapaStOut, :oprCapaStSize))
capa_df = copy(part.var[va])
if va in (:expConv, :expStIn, :expStOut, :expStSize)
capa_df = flatten(capa_df,:Ts_expSup)
select!(capa_df,Not(:Ts_disSup))
rename!(capa_df,:Ts_expSup => :Ts_disSup)
end
# set carrier column to zero for conversion capacities and add a spatial dispatch column
if va in (:expConv,:capaConv,:oprCapaConv)
capa_df[!,:C] .= 0
capa_df[!,:R_dis] = map(x -> getAncestors(x,anyM.sets[:R],:int,part.balLvl.ref[2])[end],capa_df[!,:R_exp])
else
capa_df[!,:R_dis] = map(x -> getAncestors(x.R_exp,anyM.sets[:R],:int,anyM.cInfo[x.C].rDis)[end],eachrow(capa_df))
end
select!(capa_df,Not(:R_exp))
# aggregate values and add to tech data frame
capa_df = combine(groupby(capa_df,[:Ts_disSup,:R_dis,:C,:Te]),:var => ( x -> value.(sum(x))) => :value)
capa_df[!,:variable] .= va
tech_df = vcat(tech_df,capa_df)
end
# add tech dataframe to overall data frame
allData_df = vcat(allData_df,tech_df)
end
# XXX get dispatch variables
for va in (:use, :gen, :stIn, :stOut, :stExtIn, :stExtOut, :stIntIn, :stIntOut, :emission, :crt, :lss, :trdBuy, :trdSell)
# get all variables, group them and get respective values
allVar_df = getAllVariables(va,anyM)
if isempty(allVar_df) continue end
disp_df = combine(groupby(allVar_df,intersect(intCol(allVar_df),[:Ts_disSup,:R_dis,:C,:Te])),:var => (x -> value(sum(x))) => :value)
# scales values to twh (except for emissions of course)
if va != :emission disp_df[!,:value] = disp_df[!,:value] ./ 1000 end
disp_df[!,:variable] .= va
# add empty values for non-existing columns
for dim in (:Te,:C)
if !(dim in namesSym(disp_df))
disp_df[:,dim] .= 0
end
end
# adjust sign, if enabled
if wrtSgn && va in (:use,:stIn,:stIntIn,:stExtIn,:crt,:trdSell) disp_df[!,:value] = disp_df[!,:value] .* -1 end
allData_df = vcat(allData_df,disp_df)
end
# XXX get exchange variables aggregated by import and export
allExc_df = getAllVariables(:exc,anyM)
if !isempty(allExc_df)
# add losses to all exchange variables
allExc_df = getExcLosses(convertExcCol(allExc_df),anyM.parts.exc.par,anyM.sets)
# compute export and import of each region, losses are considered at import
excFrom_df = rename(combine(groupby(allExc_df,[:Ts_disSup,:R_a,:C]),:var => ( x -> value(sum(x))/1000) => :value),:R_a => :R_dis)
excFrom_df[!,:variable] .= :export; excFrom_df[!,:Te] .= 0
if wrtSgn excFrom_df[!,:value] = excFrom_df[!,:value] .* -1 end
excTo_df = rename(combine(x -> (value = value(dot(x.var,(1 .- x.loss)))/1000,),groupby(allExc_df,[:Ts_disSup,:R_b,:C])),:R_b => :R_dis)
excTo_df[!,:variable] .= :import; excTo_df[!,:Te] .= 0
allData_df = vcat(allData_df,vcat(excFrom_df,excTo_df))
end
# XXX get full load hours for conversion, storage input and storage output
if anyM.options.decomm == :none
flh_dic = Dict(:capaConv => :flhConv, :capaStIn => :flhStIn, :capaStOut => :flhStOut)
else
flh_dic = Dict(:oprCapaConv => :flhConv, :oprCapaStIn => :flhStIn, :oprCapaStOut => :flhStOut)
end
flhAss_dic = Dict(:capaConv => [:use,:stIntOut,:gen,:stIntIn],:oprCapaConv => [:use,:stIntOut,:gen,:stIntIn], :capaStIn => [:stIntIn,:stExtIn],:oprCapaStIn => [:stIntIn,:stExtIn], :capaStOut => [:stIntOut,:stExtOut],:oprCapaStOut => [:stIntOut,:stExtOut])
for flhCapa in collect(keys(flh_dic))
# gets relevant capacity variables
capaFlh_df = rename(filter(x -> x.variable == flhCapa && x.value > 0.0, allData_df),:variable => :varCapa, :value => :valCapa)
if isempty(capaFlh_df) continue end
# expand with relevant dispatch variables
capaFlh_df[!,:varDisp] = map(x -> flhAss_dic[x.varCapa],eachrow(capaFlh_df))
capaFlh_df = flatten(capaFlh_df,:varDisp)
intCol(capaFlh_df,:varDisp)
# omit carrier in case of conversion capacity
if flhCapa in (:capaConv,:oprCapaConv) select!(capaFlh_df,Not([:C])) end
# joins capacity and with relevant dispatch variables
allFLH_df = innerjoin(capaFlh_df,rename(allData_df,:variable => :varDisp), on = intCol(capaFlh_df,:varDisp))
# remove row for gen variables in case gen exists as well and compute full load hours for conversion
allFLH_df = combine(combine(y -> filter(x -> :use in y[!,:varDisp] ? x.varDisp != :gen : true,y), groupby(allFLH_df, intCol(capaFlh_df,:varCapa)), ungroup = false), AsTable([:value,:valCapa]) => (x -> 1000*abs(sum(x.value)/x.valCapa[1])) => :value)
allFLH_df[!,:variable] = map(x -> flh_dic[x], allFLH_df[!,:varCapa])
# adds carrier again in case of conversion capacity
allFLH_df[!,:C] .= 0
allData_df = vcat(allData_df, select(allFLH_df,Not([:varCapa])))
end
# XXX comptue storage cycles
if anyM.options.decomm == :none
cyc_dic = Dict(:capaStIn => :cycStIn, :capaStOut => :cycStOut)
else
cyc_dic = Dict(:oprCapaStIn => :cycStIn, :oprCapaStOut => :cycStOut)
end
cycAss_dic = Dict(:oprCapaStIn => [:stIntIn,:stExtIn], :capaStIn => [:stIntIn,:stExtIn], :oprCapaStOut => [:stIntOut,:stExtOut], :capaStOut => [:stIntOut,:stExtOut])
for cycCapa in collect(keys(cyc_dic))
capaCyc_df = rename(filter(x -> x.variable == :capaStSize && x.value > 0.0, allData_df),:variable => :varCapa, :value => :valCapa)
if isempty(capaCyc_df) continue end
# expand with relevant dispatch variables
capaCyc_df[!,:varDisp] = map(x -> cycAss_dic[cycCapa],eachrow(capaCyc_df))
capaCyc_df = flatten(capaCyc_df,:varDisp)
# joins capacity and with relevant dispatch variables
capaCyc_df = innerjoin(capaCyc_df,rename(allData_df,:variable => :varDisp), on = intCol(capaCyc_df,:varDisp))
# compute cycling value and add to overall
capaCyc_df = combine(groupby(capaCyc_df, intCol(capaCyc_df,:varCapa)), AsTable([:value,:valCapa]) => (x -> 1000*abs(sum(x.value)/x.valCapa[1])) => :value)
capaCyc_df[!,:variable] = map(x -> cyc_dic[cycCapa], capaCyc_df[!,:varCapa])
allData_df = vcat(allData_df,select(capaCyc_df,Not([:varCapa])))
end
# return dataframes and write csv files based on specified inputs
if :csv in rtnOpt || :csvDf in rtnOpt
csvData_df = printObject(allData_df,anyM, fileName = "results_summary",rtnDf = rtnOpt)
end
if :raw in rtnOpt
CSV.write("$(anyM.options.outDir)/results_summary_$(anyM.options.outStamp).csv", allData_df)
end
if :rawDf in rtnOpt && :csvDf in rtnOpt
return allData_df, csvData_df
else
if :rawDf in rtnOpt return allData_df end
if :csvDf in rtnOpt return csvData_df end
end
end
# XXX results for costs
function reportResults(objGrp::Val{:costs},anyM::anyModel; rtnOpt::Tuple{Vararg{Symbol,N} where N} = (:csv,))
# prepare empty dataframe
allData_df = DataFrame(Ts_disSup = Int[], R = Int[], Te = Int[], C = Int[], variable = Symbol[], value = Float64[])
# loops over all objective variables with keyword "cost" in it
for cst in filter(x -> occursin("cost",string(x)),keys(anyM.parts.obj.var))
cost_df = copy(anyM.parts.obj.var[cst])
# rename all dispatch and expansion regions simply to region
if !isempty(intersect([:R_dis,:R_exp],namesSym(cost_df)))
rename!(cost_df,:R_dis in namesSym(cost_df) ? :R_dis : :R_exp => :R)
end
# add empty column for non-existing dimensions
for dim in (:Te,:C,:R)
if !(dim in namesSym(cost_df))
cost_df[:,dim] .= 0
end
end
# obtain values and write to dataframe
cost_df[:,:variable] .= string(cst)
cost_df[:,:value] = value.(cost_df[:,:var])
if :Ts_exp in namesSym(cost_df) cost_df = rename(cost_df,:Ts_exp => :Ts_disSup) end
allData_df = vcat(allData_df,cost_df[:,Not(:var)])
end
# return dataframes and write csv files based on specified inputs
if :csv in rtnOpt || :csvDf in rtnOpt
csvData_df = printObject(allData_df,anyM, fileName = "results_costs", rtnDf = rtnOpt)
end
if :raw in rtnOpt
CSV.write("$(anyM.options.outDir)/results_costs_$(anyM.options.outStamp).csv", allData_df)
end
if :rawDf in rtnOpt && :csvDf in rtnOpt
return allData_df, csvData_df
else
if :rawDf in rtnOpt return allData_df end
if :csvDf in rtnOpt return csvData_df end
end
end
# XXX results for exchange
function reportResults(objGrp::Val{:exchange},anyM::anyModel; rtnOpt::Tuple{Vararg{Symbol,N} where N} = (:csv,))
allData_df = DataFrame(Ts_disSup = Int[], R_from = Int[], R_to = Int[], C = Int[], variable = Symbol[], value = Float64[])
if isempty(anyM.parts.exc.var) error("No exchange data found") end
# XXX expansion variables
if :expExc in keys(anyM.parts.exc.var)
exp_df = copy(anyM.parts.exc.var[:expExc]) |> (x -> vcat(x,rename(x,:R_from => :R_to, :R_to => :R_from)))
exp_df = flatten(exp_df,:Ts_expSup)
select!(exp_df,Not(:Ts_disSup))
rename!(exp_df,:Ts_expSup => :Ts_disSup)
exp_df = combine(groupby(exp_df,[:Ts_disSup,:R_from,:R_to,:C]), :var => (x -> value.(sum(x))) => :value)
exp_df[!,:variable] .= :expExc
else
exp_df = DataFrame(Ts_disSup = Int[], R_from = Int[], R_to = Int[], C = Int[], variable = Symbol[], value = Float64[])
end
# XXX capacity variables
capa_df = copy(anyM.parts.exc.var[:capaExc])
capa_df = vcat(capa_df,rename(filter(x -> x.dir == 0, capa_df),:R_from => :R_to, :R_to => :R_from))
capa_df = combine(groupby(capa_df,[:Ts_disSup,:R_from,:R_to,:C]), :var => (x -> value.(sum(x))) => :value)
capa_df[!,:variable] .= :capaExc
if anyM.options.decomm != :none
oprCapa_df = copy(anyM.parts.exc.var[:oprCapaExc])
oprCapa_df = vcat(oprCapa_df,rename(filter(x -> x.dir == 0, oprCapa_df),:R_from => :R_to, :R_to => :R_from))
oprCapa_df = combine(groupby(oprCapa_df,[:Ts_disSup,:R_from,:R_to,:C]), :var => (x -> value.(sum(x))) => :value)
oprCapa_df[!,:variable] .= :oprCapaExc
capa_df = vcat(capa_df,oprCapa_df)
end
# XXX dispatch variables
disp_df = getAllVariables(:exc,anyM)
disp_df = combine(groupby(disp_df,[:Ts_disSup,:R_from,:R_to,:C]), :var => (x -> value.(sum(x)) ./ 1000) => :value)
disp_df[!,:variable] .= :exc
# XXX get full load hours
capaExt_df = replCarLeafs(copy(capa_df),anyM.sets[:C])
flh_df = innerjoin(rename(select(capaExt_df,Not(:variable)),:value => :capa),rename(select(disp_df,Not(:variable)),:value => :disp),on = [:Ts_disSup,:R_from,:R_to,:C])
flh_df[!,:value] = flh_df[!,:disp] ./ flh_df[!,:capa] .* 1000
flh_df[!,:variable] .= :flhExc
# XXX merge and print all data
allData_df = vcat(exp_df,capa_df,disp_df,select(flh_df,Not([:capa,:disp])))
# return dataframes and write csv files based on specified inputs
if :csv in rtnOpt || :csvDf in rtnOpt
csvData_df = printObject(allData_df,anyM, fileName = "results_exchange", rtnDf = rtnOpt)
end
if :raw in rtnOpt
CSV.write("$(anyM.options.outDir)/results_exchange_$(anyM.options.outStamp).csv", allData_df)
end
if :rawDf in rtnOpt && :csvDf in rtnOpt
return allData_df, csvData_df
else
if :rawDf in rtnOpt return allData_df end
if :csvDf in rtnOpt return csvData_df end
end
end
# XXX print time series for in and out into separate tables
"""
```julia
reportTimeSeries(car_sym::Symbol, model_object::anyModel)
```
Writes elements of energy balance for carrier specified by `car_sym` to `.csv` file. See [Time-series](@ref).
"""
function reportTimeSeries(car_sym::Symbol, anyM::anyModel; filterFunc::Function = x -> true, unstck::Bool = true, signVar::Tuple = (:in,:out), minVal::Number = 1e-3, mergeVar::Bool = true, rtnOpt::Tuple{Vararg{Symbol,N} where N} = (:csv,))
# XXX converts carrier named provided to index
node_arr = filter(x -> x.val == string(car_sym),collect(values(anyM.sets[:C].nodes)))
if length(node_arr) != 1
error("no carrier named $car_sym defined")
return
end
c_int = node_arr[1].idx
# XXX initialize dictionary to save data
allData_dic = Dict{Symbol,DataFrame}()
for signItr in signVar
allData_dic[signItr] = DataFrame(Ts_disSup = Int[], Ts_dis = Int[], R_dis = Int[], variable = String[], value = Float64[])
end
# XXX initialize relevant dimensions and carriers
relDim_df = filter(filterFunc,createPotDisp([c_int],anyM))
sort!(relDim_df,sort(intCol(relDim_df)))
relC_arr = unique([c_int,getDescendants(c_int,anyM.sets[:C])...])
cRes_tup = anyM.cInfo[c_int] |> (x -> (Ts_dis = x.tsDis, R_dis = x.rDis, C = anyM.sets[:C].nodes[c_int].lvl))
# XXX add demand and size it
if :out in signVar
dem_df = matchSetParameter(relDim_df,anyM.parts.bal.par[:dem],anyM.sets,newCol = :value)
dem_df[!,:value] = dem_df[!,:value] .* getResize(dem_df,anyM.sets[:Ts],anyM.supTs) .* -1
dem_df[!,:variable] .= "demand"
filter!(x -> abs(x.value) > minVal, dem_df)
allData_dic[:out] = vcat(allData_dic[:out],select!(dem_df,Not(:C)))
end
# XXX adds all technology related variables
cBalRes_tup = anyM.cInfo[c_int] |> (x -> (x.tsDis, x.rDis))
relType_tup = map(x -> x in signVar ? (x == :in ? (:use, :stExtIn) : (:gen,:stExtOut)) : tuple(),(:in,:out)) |> (x -> tuple(vcat(collect.(x)...)...))
for c in relC_arr
# gets technologies relevant for respective filterCarrier
relTech_arr = getRelTech(c,anyM.parts.tech,anyM.sets[:C])
if isempty(relTech_arr) continue end
for x in relTech_arr
# gets resolution and adjusts add_df in case of an agggregated technology
add_df = select(filter(r -> r.C == c,anyM.parts.tech[x[1]].var[x[2]]),[:Ts_disSup,:Ts_dis,:R_dis,:var])
tRes_tup = anyM.parts.tech[x[1]].disAgg ? (cRes_tup[1], anyM.parts.tech[x[1]].balLvl.exp[2]) : (cRes_tup[1], cRes_tup[2])
checkTechReso!(tRes_tup,cBalRes_tup,add_df,anyM.sets)
# filter values based on filter function and minimum value reported
add_df = combine(groupby(add_df,[:Ts_disSup,:Ts_dis,:R_dis]), :var => (x -> sum(x)) => :var)
filter!(filterFunc,add_df)
if isempty(add_df) continue end
add_df[!,:value] = value.(add_df[!,:var]) .* (x[2] in (:use,:stExtIn) ? -1.0 : 1.0)
add_df[!,:variable] .= string(x[2],"; ", x[1])
filter!(x -> abs(x.value) > minVal, add_df)
# add to dictionary of dataframe for in or out
sign_sym = x[2] in (:use,:stExtIn) ? :out : :in
allData_dic[sign_sym] = vcat(allData_dic[sign_sym] ,select(add_df,Not(:var)))
end
end
# XXX add import and export variables
if :exc in keys(anyM.parts.exc.var)
exc_df = filterCarrier(anyM.parts.exc.var[:exc],relC_arr)
if :out in signVar
excFrom_df = combine(groupby(filter(filterFunc,rename(copy(exc_df),:R_from => :R_dis)), [:Ts_disSup,:Ts_dis,:R_dis]), :var => (x -> value(sum(x)) * -1) => :value)
excFrom_df[!,:variable] .= :export
filter!(x -> abs(x.value) > minVal, excFrom_df)
if !isempty(excFrom_df)
allData_dic[:out] = vcat(allData_dic[:out],excFrom_df)
end
end
if :in in signVar
addLoss_df = rename(getExcLosses(convertExcCol(exc_df),anyM.parts.exc.par,anyM.sets),:R_b => :R_dis)
excTo_df = combine(x -> (value = value(dot(x.var,(1 .- x.loss))),),groupby(filter(filterFunc,addLoss_df), [:Ts_disSup,:Ts_dis,:R_dis]))
excTo_df[!,:variable] .= :import
filter!(x -> abs(x.value) > minVal, excTo_df)
if !isempty(excTo_df)
allData_dic[:in] = vcat(allData_dic[:in],excTo_df)
end
end
end
# XXX add trade
agg_arr = [:Ts_dis, :R_dis, :C]
if !isempty(anyM.parts.trd.var)
for trd in intersect(keys(anyM.parts.trd.var),(:trdBuy,:trdSell))
trdVar_df = copy(relDim_df)
trdVar_df[!,:value] = value.(filterCarrier(anyM.parts.trd.var[trd],relC_arr) |> (x -> aggUniVar(x,relDim_df,agg_arr,cRes_tup,anyM.sets))) .* (trd == :trdBuy ? 1.0 : -1.0)
trdVar_df[!,:variable] .= trd
filter!(x -> abs(x.value) > minVal, trdVar_df)
sign_sym = :trdBuy == trd ? :in : :out
allData_dic[sign_sym] = vcat(allData_dic[sign_sym],select(trdVar_df,Not(:C)))
end
end
# XXX add curtailment
if :crt in keys(anyM.parts.bal.var)
crt_df = copy(relDim_df)
crt_df[!,:value] = value.(filterCarrier(anyM.parts.bal.var[:crt],relC_arr) |> (x -> aggUniVar(x,crt_df,agg_arr, cRes_tup,anyM.sets))) .* -1.0
crt_df[!,:variable] .= :crt
filter!(x -> abs(x.value) > minVal, crt_df)
allData_dic[:out] = vcat(allData_dic[:out],select(crt_df,Not(:C)))
end
# XXX add losted load
if :lss in keys(anyM.parts.bal.var)
lss_df = copy(relDim_df)
lss_df[!,:value] = value.(filterCarrier(anyM.parts.bal.var[:lss],relC_arr) |> (x -> aggUniVar(x,lss_df,agg_arr, cRes_tup,anyM.sets)))
lss_df[!,:variable] .= :lss
filter!(x -> abs(x.value) > minVal, lss_df)
allData_dic[:in] = vcat(allData_dic[:in],select(lss_df,Not(:C)))
end
# XXX unstack data and write to csv
if mergeVar
# merges in and out files and writes to same csv file
data_df = vcat(values(allData_dic)...)
if unstck && !isempty(data_df)
data_df[!,:variable] = CategoricalArray(string.(data_df[!,:variable]))
data_df = unstack(data_df,:variable,:value)
end
if :csv in rtnOpt || :csvDf in rtnOpt
csvData_df = printObject(data_df,anyM, fileName = string("timeSeries_",car_sym,), rtnDf = rtnOpt)
end
if :raw in rtnOpt
CSV.write("$(anyM.options.outDir)/$(string("timeSeries_",car_sym,))_$(anyM.options.outStamp).csv", data_df)
end
else
# loops over different signs and writes to different csv files
for signItr in signVar
data_df = allData_dic[signItr]
if unstck && !isempty(data_df)
data_df[!,:variable] = CategoricalArray(string.(data_df[!,:variable]))
data_df = unstack(data_df,:variable,:value)
end
if :csv in rtnOpt || :csvDf in rtnOpt
csvData_df = printObject(data_df,anyM, fileName = string("timeSeries_",car_sym,"_",signItr), rtnDf = rtnOpt)
end
if :raw in rtnOpt
CSV.write("$(anyM.options.outDir)/$(string("timeSeries_",car_sym,"_",signItr))_$(anyM.options.outStamp).csv", data_df)
end
end
end
# return dataframes based on specified inputs
if :rawDf in rtnOpt && :csvDf in rtnOpt
return data_df, csvData_df
else
if :rawDf in rtnOpt return data_df end
if :csvDf in rtnOpt return csvData_df end
end
end
# XXX write dual values for constraint dataframe
"""
```julia
printDuals(print_df::DataFrame, model_object::anyModel)
```
Writes duals of a constraint DataFrame to a `.csv` file in readable format (strings instead of ids). See [Individual elements](@ref).
"""
function printDuals(cns_df::DataFrame,anyM::anyModel;filterFunc::Function = x -> true, fileName::String = "", rtnOpt::Tuple{Vararg{Symbol,N} where N} = (:csv,))
if !(:cns in namesSym(cns_df)) error("No constraint column found!") end
cns_df = copy(filter(filterFunc,cns_df))
cns_df[!,:dual] = dual.(cns_df[!,:cns])
if :csv in rtnOpt || :csvDf in rtnOpt
csvData_df = printObject(select(cns_df,Not(:cns)),anyM;fileName = string("dual",fileName != "" ? "_" : "",fileName), rtnDf = rtnOpt)
end
if :rawDf in rtnOpt
CSV.write("$(anyM.options.outDir)/$(string("dual",fileName != "" ? "_" : "",fileName))_$(anyM.options.outStamp).csv", data_df)
end
# return dataframes based on specified inputs
if :rawDf in rtnOpt && :csvDf in rtnOpt
return select(cns_df,Not(:cns)), csvData_df
else
if :rawDf in rtnOpt return data_df end
if :csvDf in rtnOpt return csvData_df end
end
end
# </editor-fold>
# <editor-fold desc="plotting tools"
# XXX plots tree graph for input set
"""
```julia
plotTree(tree_sym::Symbol, model_object::anyModel)
```
Plots the hierarchical tree of nodes for the set specified by `tree_sym`. See [Node trees](@ref).
"""
function plotTree(tree_sym::Symbol, anyM::anyModel; plotSize::Tuple{Float64,Float64} = (8.0,4.5), fontSize::Int = 12, useColor::Bool = true, wide::Array{Float64,1} = fill(1.0,30))
netw = pyimport("networkx")
plt = pyimport("matplotlib.pyplot")
PyCall.fixqtpath()
# <editor-fold desc="initialize variables"
treeName_dic = Dict(:region => :R,:timestep => :Ts,:carrier => :C,:technology => :Te)
# convert tree object into a data frame
tree_obj = anyM.sets[treeName_dic[tree_sym]]
data_arr = filter(x -> x.idx != 0,collect(values(tree_obj.nodes))) |> (y -> map(x -> getfield.(y,x),(:idx,:val,:lvl,:down,:subIdx)))
tree_df = DataFrame(idx = data_arr[1], val = data_arr[2], lvl = data_arr[3], down = data_arr[4], subIdx = data_arr[5], up =map(x -> tree_obj.up[x],data_arr[1]))
# sets options
col_dic = Dict(:region => (0.133, 0.545, 0.133),:timestep => (0.251,0.388,0.847),:carrier => (0.584, 0.345, 0.698),:technology => (0.796,0.235,0.2))
# </editor-fold>
# <editor-fold desc="computes positon of nodes"
# adds a new dummy top node
push!(tree_df,(0,"",0,tree_obj.nodes[0].down ,0,1))
nodes_int = nrow(tree_df)
idxPos_dic = Dict(zip(tree_df[:,:idx], 1:(nodes_int)))
# create vertical position and labels from input tree
locY_arr = float(tree_df[!,:lvl]) .+ 1.2
# horizontal position is computed in a two step process
locX_arr = zeros(Float64, nodes_int)
# first step, filter all nodes at end of a respective branch and sort them correctly
lowLvl_df = tree_df[isempty.(tree_df[!,:down]),:]
lowLvl_df = lowLvl_df[map(y -> findall(x -> x == y, lowLvl_df[:,:idx])[1],sortSiblings(convert(Array{Int64,1},lowLvl_df[:,:idx]),tree_obj)),:]
# sets distance from next node on the left depending on if they are part of the same subtree
for (idx2, lowNode) in Iterators.drop(enumerate(eachrow(lowLvl_df)),1)
if lowNode[:up] == lowLvl_df[idx2-1,:up] distance_fl = wide[lowNode[:lvl]] else distance_fl = 1 end
locX_arr[idxPos_dic[lowNode[:idx]]] = locX_arr[idxPos_dic[lowLvl_df[idx2-1,:idx]]] + distance_fl
end
# second step, remaining horizontal nodes are placed in the middle of their children
highLvl_df = tree_df[false .== isempty.(tree_df[!,:down]),:]
highLvl_df = highLvl_df[map(y -> findall(x -> x == y, highLvl_df[:,:idx])[1],sortSiblings(convert(Array{Int64,1},highLvl_df[:,:idx]),tree_obj)),:]
for highNode in reverse(eachrow(highLvl_df))
locX_arr[idxPos_dic[highNode[:idx]]] = Statistics.mean(locX_arr[map(x -> idxPos_dic[x],highNode.down)])
end
locX_arr[end] = Statistics.mean(locX_arr[map(x -> idxPos_dic[x],tree_df[findall(tree_df[:,:lvl] .== 1),:idx])])
locY_arr = abs.(locY_arr .- maximum(locY_arr))
# compute dictionary of final node positions
pos_dic = Dict(x => (locX_arr[x]/maximum(locX_arr),locY_arr[x]/maximum(locY_arr)) for x in 1:nodes_int)
posIdx_dic = collect(idxPos_dic) |> (z -> Dict(Pair.(getindex.(z,2),getindex.(z,1))))
# </editor-fold>
# <editor-fold desc="determine node colors and labels"
name_dic = anyM.graInfo.names
label_dic = Dict(x[1] => x[2] == "" ? "" : name_dic[x[2]] for x in enumerate(tree_df[!,:val]))
if useColor
col_arr = [col_dic[tree_sym]]
else
col_arr = getNodeColors(collect(1:nodes_int),label_dic,anyM)
end
# </editor-fold>
# <editor-fold desc="draw final tree"
# draw single nodes
edges_arr = Array{Tuple{Int,Int},1}()
for rowTree in eachrow(tree_df)[1:end-1]
# 0 node in tree_df becomes last node in graph, because there is 0 node within the plots
if rowTree[:up] == 0 pare_int = nodes_int else pare_int = idxPos_dic[rowTree[:up]] end
push!(edges_arr, (idxPos_dic[rowTree[:idx]], pare_int))
end
# draw graph object
plt.clf()
graph_obj = netw.Graph()
netw.draw_networkx_nodes(graph_obj, pos_dic; nodelist = collect(1:nodes_int), node_color = col_arr)
netw.draw_networkx_edges(graph_obj, pos_dic; edgelist = edges_arr)
posLabOff_dic = netw.draw_networkx_labels(graph_obj, pos_dic, font_family = "arial", font_size = fontSize, labels = label_dic)
figure = plt.gcf()
figure.set_size_inches(plotSize[1],plotSize[2])
r = figure.canvas.get_renderer()
trans = plt.gca().transData.inverted()
for x in collect(posLabOff_dic)
down_boo = isempty(tree_obj.nodes[posIdx_dic[x[1]]].down)
bb = x[2].get_window_extent(renderer=r)
bbdata = bb.transformed(trans)
# computes offset of label for leaves and non-leaves by first moving according to size auf letters itself (bbdata) and then by size of the node
# (node-size in pixel is devided by dpi and plot size to get relative offset)
offset_arr = [down_boo ? 0.0 : (bbdata.width/2.0 + (150/plotSize[1]/600)), down_boo ? (-bbdata.height/2.0 - 150/plotSize[2]/600) : 0.0]
x[2].set_position([x[2]."_x" + offset_arr[1],x[2]."_y" + offset_arr[2]])
x[2].set_clip_on(false)
end
# size plot and save
plt.axis("off")
plt.savefig("$(anyM.options.outDir)/$(tree_sym)_$(anyM.options.outStamp)", dpi = 600, bbox_inches="tight")
# </editor-fold>
end
"""
```julia
plotEnergyFlow(plotType::Symbol, model_object::anyModel)
```
Plots the energy flow in a model. Set `plotType` to `:graph` for a qualitative node graph or to `:sankey` for a quantitative Sankey diagram. See [Energy flow](@ref).
"""
plotEnergyFlow(plotType::Symbol,anyM::anyModel; kwargs...) = plotEnergyFlow(Val{plotType}(),anyM::anyModel; kwargs...)
# XXX plot qualitative energy flow graph (applies python modules networkx and matplotlib via PyCall package)
function plotEnergyFlow(objGrp::Val{:graph},anyM::anyModel; plotSize::Tuple{Number,Number} = (16.0,9.0), fontSize::Int = 12, replot::Bool = true, scaDist::Number = 0.5, maxIter::Int = 5000, initTemp::Number = 2.0, useTeColor::Bool = false)
# XXX import python function
netw = pyimport("networkx")
plt = pyimport("matplotlib.pyplot")
PyCall.fixqtpath()
# <editor-fold desc="create graph and map edges"
graph_obj = netw.DiGraph()
flowGrap_obj = anyM.graInfo.graph
edges_arr = vcat(collect.(flowGrap_obj.edgeC),collect.(flowGrap_obj.edgeTe))
for x in edges_arr
graph_obj.add_edge(x[1],x[2])
end
# </editor-fold>
# <editor-fold desc="obtain and order graph properties (colors, names, etc.)"
# get carriers that should be plotted, because they are connected with a technology
relNodeC1_arr = filter(x -> x[2] in vcat(getindex.(flowGrap_obj.edgeTe,1),getindex.(flowGrap_obj.edgeTe,2)), collect(flowGrap_obj.nodeC))
# get carriers that shold be plotted, because they are connected with another carrier that should be plotted
relNodeC2_arr = filter(x -> any(map(y -> x[2] in y && !isempty(intersect(getindex.(relNodeC1_arr,2),y)) , collect.(flowGrap_obj.edgeC))), collect(flowGrap_obj.nodeC))
# maps node id to node names
idToC_arr = map(x -> x[2] => anyM.sets[:C].nodes[x[1]].val, filter(y -> y[2] in union(edges_arr...), intersect(flowGrap_obj.nodeC, union(relNodeC1_arr,relNodeC2_arr))))
idToTe_arr = map(x -> x[2] => anyM.sets[:Te].nodes[x[1]].val, filter(y -> y[2] in union(edges_arr...), collect(flowGrap_obj.nodeTe)))
idToName_dic = Dict(vcat(idToC_arr,idToTe_arr))
# obtain colors of nodes
ordC_arr = intersect(unique(vcat(edges_arr...)), getindex.(idToC_arr,1))
ordTe_arr = intersect(unique(vcat(edges_arr...)), getindex.(idToTe_arr,1))
nodeC_arr = getNodeColors(ordC_arr,idToName_dic,anyM)
nodeTe_arr = useTeColor ? getNodeColors(ordTe_arr,idToName_dic,anyM) : [(0.85,0.85,0.85)]
nodesCnt_int = length(idToName_dic)
# converts edges to sparse matrix for flowLayout function
id_arr = vcat(getindex.(idToC_arr,1), getindex.(idToTe_arr,1))
edges_mat = convert(Array{Int64,2},zeros(nodesCnt_int,nodesCnt_int))
foreach(x -> edges_mat[findall(id_arr .== x[1])[1],findall(id_arr .== x[2])[1]] = 1, filter(x -> x[1] in id_arr && x[2] in id_arr,edges_arr))
edges_smat = SparseArrays.sparse(edges_mat)
# compute position of nodes
if replot || !(isdefined(flowGrap_obj,:nodePos))
pos_dic = flowLayout(nodesCnt_int,edges_smat; scaDist = scaDist, maxIter = maxIter, initTemp = initTemp)
flowGrap_obj.nodePos = Dict(id_arr[x] => pos_dic[x] for x in keys(pos_dic))
end
# separate into edges between technologies and carriers and between carriers, then get respective colors
cEdges_arr = filter(x -> x[1] in ordC_arr && x[2] in ordC_arr, collect(graph_obj.edges))
edgeColC_arr = map(x -> anyM.graInfo.colors[idToName_dic[x[1]]], cEdges_arr)
teEdges_arr = filter(x -> x[1] in ordTe_arr || x[2] in ordTe_arr, collect(graph_obj.edges))
edgeColTe_arr = map(x -> x[1] in ordC_arr ? anyM.graInfo.colors[idToName_dic[x[1]]] : anyM.graInfo.colors[idToName_dic[x[2]]], teEdges_arr)
# </editor-fold>
# <editor-fold desc="draw and save graph with python"
# plot final graph object
plt.clf()
netw.draw_networkx_nodes(graph_obj, flowGrap_obj.nodePos, nodelist = ordC_arr, node_shape="s", node_size = 300, node_color = nodeC_arr)
netw.draw_networkx_nodes(graph_obj, flowGrap_obj.nodePos, nodelist = ordTe_arr, node_shape="o", node_size = 185,node_color = nodeTe_arr)
netw.draw_networkx_edges(graph_obj, flowGrap_obj.nodePos, edgelist = cEdges_arr, edge_color = edgeColC_arr, arrowsize = 16.2, width = 1.62)
netw.draw_networkx_edges(graph_obj, flowGrap_obj.nodePos, edgelist = teEdges_arr, edge_color = edgeColTe_arr)
posLabC_dic = netw.draw_networkx_labels(graph_obj, flowGrap_obj.nodePos, font_size = fontSize, labels = Dict(y[1] => anyM.graInfo.names[y[2]] for y in filter(x -> x[1] in ordC_arr,idToName_dic)), font_weight = "bold", font_family = "arial")
posLabTe_dic = netw.draw_networkx_labels(graph_obj, flowGrap_obj.nodePos, font_size = fontSize, font_family = "arial", labels = Dict(y[1] => anyM.graInfo.names[y[2]] for y in filter(x -> !(x[1] in ordC_arr),idToName_dic)))
# adjusts position of carrier labels so that they are right from node, uses code provided by ImportanceOfBeingErnest from here https://stackoverflow.com/questions/43894987/networkx-node-labels-relative-position
figure = plt.gcf()
figure.set_size_inches(plotSize[1],plotSize[2])
r = figure.canvas.get_renderer()
trans = plt.gca().transData.inverted()
for x in vcat(collect(posLabC_dic),collect(posLabTe_dic))
cNode_boo = x[1] in ordC_arr
bb = x[2].get_window_extent(renderer=r)
bbdata = bb.transformed(trans)
# computes offset of label for leaves and non-leaves by first moving according to size auf letters itself (bbdata) and then by size of the node
# (node-size in pixel is devided by dpi and plot size to get relative offset)
offset_arr = [cNode_boo ? (bbdata.width/2.0 + (500/plotSize[1]/600)) : 0.0, cNode_boo ? 0.0 : (bbdata.height/2.0 + 200/plotSize[2]/600)]
x[2].set_position([x[2]."_x" + offset_arr[1],x[2]."_y" + offset_arr[2]])
x[2].set_clip_on(false)
end
plt.axis("off")
# size plot and save
plt.savefig("$(anyM.options.outDir)/energyFlowGraph_$(anyM.options.outStamp)", dpi = 600)
# </editor-fold>
end
# XXX plot quantitative energy flow sankey diagramm (applies python module plotly via PyCall package)
function plotEnergyFlow(objGrp::Val{:sankey},anyM::anyModel; plotSize::Tuple{Number,Number} = (16.0,9.0), minVal::Float64 = 0.1, filterFunc::Function = x -> true, dropDown::Tuple{Vararg{Symbol,N} where N} = (:region,:timestep), rmvNode::Tuple{Vararg{String,N} where N} = tuple(), useTeColor = true)
plt = pyimport("plotly")
flowGrap_obj = anyM.graInfo.graph
# <editor-fold desc="initialize data"
if !isempty(setdiff(dropDown,[:region,:timestep]))
error("dropDown only accepts array :region and :timestep as content")
end
# get mappings to create buttons of dropdown menue
drop_dic = Dict(:region => :R_dis, :timestep => :Ts_disSup)
dropDim_arr = collect(map(x -> drop_dic[x], dropDown))
# get summarised data and filter dispatch variables
data_df = reportResults(:summary,anyM,rtnOpt = (:rawDf,))
filter!(x -> x.variable in (:demand,:gen,:use,:stIn,:stOut,:trdBuy,:trdSell,:demand,:import,:export,:lss,:crt),data_df)
# filter non relevant entries
filter!(x -> abs(x.value) > minVal, data_df)
filter!(filterFunc, data_df)
# create dictionaries for nodes that are neither technology nor carrier
othNode_dic = maximum(values(flowGrap_obj.nodeTe)) |> (z -> Dict((x[2].C,x[2].variable) => x[1] + z for x in enumerate(eachrow(unique(filter(x -> x.Te == 0,data_df)[!,[:variable,:C]])))))
othNodeId_dic = collect(othNode_dic) |> (z -> Dict(Pair.(getindex.(z,2),getindex.(z,1))))
# </editor-fold>
# <editor-fold desc="prepare labels and colors"
# prepare name and color assignment
names_dic = anyM.graInfo.names
revNames_dic = collect(names_dic) |> (z -> Dict(Pair.(getindex.(z,2),getindex.(z,1))))
col_dic = anyM.graInfo.colors
sortTe_arr = getindex.(sort(collect(flowGrap_obj.nodeTe),by = x -> x[2]),1)
cColor_dic = Dict(x => anyM.sets[:C].nodes[x].val |> (z -> z in keys(col_dic) ? col_dic[z] : (names_dic[z] in keys(col_dic) ? col_dic[col_dic[z]] : (0.85,0.85,0.85))) for x in sort(collect(keys(flowGrap_obj.nodeC))))
# create array of node labels
cLabel_arr = map(x -> names_dic[anyM.sets[:C].nodes[x].val],sort(collect(keys(flowGrap_obj.nodeC))))
teLabel_arr = map(x -> names_dic[anyM.sets[:Te].nodes[x].val],sortTe_arr)
othLabel_arr = map(x -> names_dic[String(othNodeId_dic[x][2])],sort(collect(keys(othNodeId_dic))))
nodeLabel_arr = vcat(cLabel_arr, teLabel_arr, othLabel_arr)
revNodelLabel_arr = map(x -> revNames_dic[x],nodeLabel_arr)
# create array of node colors
cColor_arr = map(x -> anyM.sets[:C].nodes[x].val |> (z -> z in keys(col_dic) ? col_dic[z] : (names_dic[z] in keys(col_dic) ? col_dic[names_dic[z]] : (0.85,0.85,0.85))),sort(collect(keys(flowGrap_obj.nodeC))))
teColor_arr = map(x -> anyM.sets[:Te].nodes[x].val |> (z -> useTeColor && z in keys(col_dic) ? col_dic[z] : (useTeColor && names_dic[z] in keys(col_dic) ? col_dic[names_dic[z]] : (0.85,0.85,0.85))),sortTe_arr)
othColor_arr = map(x -> anyM.sets[:C].nodes[othNodeId_dic[x][1]].val |> (z -> z in keys(col_dic) ? col_dic[z] : (names_dic[z] in keys(col_dic) ? col_dic[names_dic[z]] : (0.85,0.85,0.85))),sort(collect(keys(othNodeId_dic))))
nodeColor_arr = vcat(map(x -> replace.(string.("rgb",string.(map(z -> z .* 255.0,x)))," " => ""),[cColor_arr, teColor_arr, othColor_arr])...)
dropData_arr = Array{Dict{Symbol,Any},1}()
# </editor-fold>
# XXX loop over potential buttons in dropdown menue
for drop in eachrow(unique(data_df[!,dropDim_arr]))
# <editor-fold desc="filter data and create flow array"
dropData_df = copy(data_df)
if :region in dropDown subR_arr = [drop.R_dis, getDescendants(drop.R_dis,anyM.sets[:R],true)...] end
for d in dropDown
filter!(x -> d == :region ? x.R_dis in subR_arr : x.Ts_disSup == drop.Ts_disSup, dropData_df)
end
flow_arr = Array{Tuple,1}()
# write flows reported in data summary
for x in eachrow(dropData_df)
a = Array{Any,1}(undef,3)
# technology related entries
if x.variable in (:demand,:export,:trdSell,:crt)
a[1] = flowGrap_obj.nodeC[x.C]
a[2] = othNode_dic[(x.C,x.variable)]
elseif x.variable in (:import,:trdBuy,:lss)
a[1] = othNode_dic[(x.C,x.variable)]
a[2] = flowGrap_obj.nodeC[x.C]
elseif x.variable in (:gen,:stOut)
if x.Te in keys(flowGrap_obj.nodeTe) # if technology is not directly part of the graph, use its smallest parent that its
a[1] = flowGrap_obj.nodeTe[x.Te]
else
a[1] = flowGrap_obj.nodeTe[minimum(intersect(keys(flowGrap_obj.nodeTe),getAncestors(x.Te,anyM.sets[:Te],:int)))]
end
a[2] = flowGrap_obj.nodeC[x.C]
else
a[1] = flowGrap_obj.nodeC[x.C]
if x.Te in keys(flowGrap_obj.nodeTe)
a[2] = flowGrap_obj.nodeTe[x.Te]
else
a[2] = flowGrap_obj.nodeTe[minimum(intersect(keys(flowGrap_obj.nodeTe),getAncestors(x.Te,anyM.sets[:Te],:int)))]
end
end
a[3] = abs(x.value)
push!(flow_arr,tuple(a...))
end
# create flows connecting different carriers
idToC_dic = Dict(map(x -> x[2] => x[1], collect(flowGrap_obj.nodeC)))
for x in filter(x -> anyM.sets[:C].up[x] != 0,intersect(union(getindex.(flow_arr,1),getindex.(flow_arr,2)),values(flowGrap_obj.nodeC)))
a = Array{Any,1}(undef,3)
a[1] = flowGrap_obj.nodeC[x]
a[2] = flowGrap_obj.nodeC[anyM.sets[:C].up[x]]
a[3] = (getindex.(filter(y -> y[2] == x,flow_arr),3) |> (z -> isempty(z) ? 0.0 : sum(z))) - (getindex.(filter(y -> y[1] == x,flow_arr),3) |> (z -> isempty(z) ? 0.0 : sum(z)))
push!(flow_arr,tuple(a...))
end
# merges flows for different regions that connect the same nodes
flow_arr = map(unique(map(x -> x[1:2],flow_arr))) do fl
allFl = filter(y -> y[1:2] == fl[1:2],flow_arr)
return (allFl[1][1],allFl[1][2],sum(getindex.(allFl,3)))
end
# removes nodes accoring function input provided
for rmv in rmvNode
# splits remove expression by semicolon and searches for first part
rmvStr_arr = split(rmv,"; ")
relNodes_arr = findall(nodeLabel_arr .== rmvStr_arr[1])
if isempty(relNodes_arr) relNodes_arr = findall(revNodelLabel_arr .== rmvStr_arr[1]) end
if isempty(relNodes_arr) continue end
if length(rmvStr_arr) == 2 # if rmv contains two strings separated by a semicolon, the second one should relate to a carrier, carrier is searched for and all related flows are removed
relC_arr = findall(nodeLabel_arr .== rmvStr_arr[2])
if isempty(relNodes_arr) relC_arr = findall(revNodelLabel_arr .== rmvStr_arr[2]) end
if isempty(relC_arr)
produceMessage(anyM.options,anyM.report, 1," - Remove string contained a carrier not found in graph, check for typos: "*rmv)
continue
else
c_int = relC_arr[1]
end
filter!(x -> !((x[1] in relNodes_arr || x[2] in relNodes_arr) && (x[1] == c_int || x[2] == c_int)),flow_arr)
elseif length(rmvStr_arr) > 2
error("one remove string contained more then one semicolon, this is not supported")
else # if rmv only contains one string, only nodes where in- and outgoing flow are equal or only one of both exists
out_tup = filter(x -> x[1] == relNodes_arr[1],flow_arr)
in_tup = filter(x -> x[2] == relNodes_arr[1],flow_arr)
if length(out_tup) == 1 && length(in_tup) == 1 && out_tup[1][3] == in_tup[1][3] # in- and outgoing are the same
filter!(x -> !(x in (out_tup[1],in_tup[1])),flow_arr)
push!(flow_arr,(in_tup[1][1],out_tup[1][2],in_tup[1][3]))
elseif length(out_tup) == 0 # only ingoing flows
filter!(x -> !(x in in_tup),flow_arr)
elseif length(in_tup) == 0 # only outgoing flows
filter!(x -> !(x in out_tup),flow_arr)
end
end
end
# </editor-fold>
# <editor-fold desc="create dictionaries for later plotting"
# collect data for drop in a dictionary
linkColor_arr = map(x -> collect(x[1] in keys(cColor_dic) ? cColor_dic[x[1]] : cColor_dic[x[2]]) |>
(z -> replace(string("rgba",string(tuple([255.0 .*z..., (x[1] in keys(cColor_dic) && x[2] in keys(cColor_dic) ? 0.8 : 0.5)]...)))," " => "")), flow_arr)
link_dic = Dict(:source => getindex.(flow_arr,1) .- 1, :target => getindex.(flow_arr,2) .- 1, :value => getindex.(flow_arr,3), :color => linkColor_arr)
fullData_arr = [Dict(:link => link_dic, :node => Dict(:label => nodeLabel_arr, :color => nodeColor_arr))]
# pushes dictionary to overall array
label_str = string("<b>",join(map(y -> anyM.sets[Symbol(split(String(y),"_")[1])].nodes[drop[y]].val,dropDim_arr),", "),"</b>")
push!(dropData_arr,Dict(:args => fullData_arr, :label => label_str, :method => "restyle"))
# </editor-fold>
end
# <editor-fold desc="create various dictionaries to define format and create plot"
menues_dic =[Dict(:buttons => dropData_arr, :direction => "down", :pad => Dict(:l => 10, :t => 10), :font => Dict(:size => 16, :family => "Arial"), :showactive => true, :x => 0.01, :xanchor => "center", :y => 1.1, :yanchor => "middle")]
data_dic = Dict(:type => "sankey", :orientation => "h", :valueformat => ".0f", :textfont => Dict(:family => "Arial"), :node => Dict(:pad => 8, :thickness => 36, :line => Dict(:color => "white",:width => 0.01), :hoverinfo => "skip"))
layout_dic = Dict(:width => 125*plotSize[1], :height => 125*plotSize[2], :updatemenus => menues_dic, :font => Dict(:size => 32, :family => "Arial"))
fig = Dict(:data => [data_dic], :layout => layout_dic)
plt.offline.plot(fig, filename="$(anyM.options.outDir)/energyFlowSankey_$(join(string.(dropDown),"_"))_$(anyM.options.outStamp).html")
# </editor-fold>
end
# XXX define postions of nodes in energy flow graph
# function is mostly taken from [GraphPlot.jl](https://github.com/JuliaGraphs/GraphPlot.jl), who again reference the following source [IainNZ](https://github.com/IainNZ)'s [GraphLayout.jl](https://github.com/IainNZ/GraphLayout.jl)
function flowLayout(nodesCnt_int::Int,edges_smat::SparseMatrixCSC{Int64,Int64}, locsX_arr::Array{Float64,1} = 2*rand(nodesCnt_int).-1.0, locsY_arr::Array{Float64,1} = 2*rand(nodesCnt_int).-1.0; scaDist::Number = 0.5, maxIter::Int=5000, initTemp::Number=2.0)
# optimal distance bewteen vertices
k = scaDist * sqrt(4.0 / nodesCnt_int)
k² = k * k
# store forces and apply at end of iteration all at once
force_x = zeros(nodesCnt_int)
force_y = zeros(nodesCnt_int)
# iterate maxIter times
@inbounds for iter = 1:maxIter
# Calculate forces
for i = 1:nodesCnt_int
force_vec_x = 0.0
force_vec_y = 0.0
for j = 1:nodesCnt_int
i == j && continue
d_x = locsX_arr[j] - locsX_arr[i]
d_y = locsY_arr[j] - locsY_arr[i]
dist² = (d_x * d_x) + (d_y * d_y)
dist = sqrt(dist²)
if !( iszero(edges_smat[i,j]) && iszero(edges_smat[j,i]) )
# Attractive + repulsive force
# F_d = dist² / k - k² / dist # original FR algorithm
F_d = dist / k - k² / dist²
else
# Just repulsive
# F_d = -k² / dist # original FR algorithm
F_d = -k² / dist²
end
force_vec_x += F_d*d_x
force_vec_y += F_d*d_y
end
force_x[i] = force_vec_x
force_y[i] = force_vec_y
end
# Cool down
temp = initTemp / iter
# Now apply them, but limit to temperature
for i = 1:nodesCnt_int
fx = force_x[i]
fy = force_y[i]
force_mag = sqrt((fx * fx) + (fy * fy))
scale = min(force_mag, temp) / force_mag
locsX_arr[i] += force_x[i] * scale
locsY_arr[i] += force_y[i] * scale
end
end
# Scale to unit square
min_x, max_x = minimum(locsX_arr), maximum(locsX_arr)
min_y, max_y = minimum(locsY_arr), maximum(locsY_arr)
function scaler(z, a, b)
2.0*((z - a)/(b - a)) - 1.0
end
map!(z -> scaler(z, min_x, max_x), locsX_arr, locsX_arr)
map!(z -> scaler(z, min_y, max_y), locsY_arr, locsY_arr)
# converts positions into dictionary
pos_dic = Dict(z => [locsX_arr[z]*16/9,locsY_arr[z]] for z in 1:nodesCnt_int)
return pos_dic
end
# XXX returns array of colors for input nodes, which labels can be found in label_dic
function getNodeColors(node_arr::Array{Int,1}, label_dic::Dict{Int64,String},anyM::anyModel)
revName_dic = collect(anyM.graInfo.names) |> (z -> Dict(Pair.(getindex.(z,2),getindex.(z,1))))
col_dic = anyM.graInfo.colors
color_arr = map(node_arr) do x
str = label_dic[x]
if str in keys(col_dic) # label is key in color dictionary
return col_dic[str]
elseif str in keys(anyM.graInfo.names) && anyM.graInfo.names[str] in keys(col_dic) # internal name is key in dictionary, but label was external
return col_dic[anyM.graInfo.names[str]]
elseif str in keys(revName_dic) && revName_dic[str] in keys(col_dic) # external name is key in dictionary, but label was internal
return col_dic[revName_dic[str]]
else # default color
return (0.85,0.85,0.85)
end
end
return color_arr
end
# XXX move a node after positions were created within energy flow graph
"""
```julia
moveNode!(model_object::anyModel, newPos_arr::Union{Array{Tuple{String,Array{Float64,1}},1},Tuple{String,Array{Float64,1}}})
```
Moves a node within the current layout of the node graph created with `plotEnergyFlow`. See [Energy flow](@ref).
"""
function moveNode!(anyM::anyModel,newPos_arr::Union{Array{Tuple{String,Array{Float64,1}},1},Tuple{String,Array{Float64,1}}})
flowGrap_obj = anyM.graInfo.graph
if !isdefined(flowGrap_obj,:nodePos)
error("Initial positions are not yet defined. Run 'plotEnergyFlow' first.")
end
# gets assignment between node ids and names
edges_arr = vcat(collect.(flowGrap_obj.edgeC),collect.(flowGrap_obj.edgeTe))
cToId_arr = map(x -> anyM.sets[:C].nodes[x[1]].val => x[2], filter(y -> y[2] in union(edges_arr...), collect(flowGrap_obj.nodeC)))
teToId_arr = map(x -> anyM.sets[:Te].nodes[x[1]].val => x[2], filter(y -> y[2] in union(edges_arr...), collect(flowGrap_obj.nodeTe)))
nameToId_dic = Dict(vcat(teToId_arr,cToId_arr))
# if input is just a single tuple and not an array convert to array
if typeof(newPos_arr) == Tuple{String,Array{Float64,1}}
newPos_arr = [newPos_arr]
end
switchNames_dic = Dict(map(x -> x[2] => x[1],collect(anyM.graInfo.names)))
# loops overa array of moved notes
for newPos in newPos_arr
# get id of node depending on whether it is an orignial name or name just used in plot
if newPos[1] in keys(nameToId_dic)
x = nameToId_dic[newPos[1]]
elseif newPos[1] in values(anyM.graInfo.names)
x = nameToId_dic[switchNames_dic[newPos[1]]]
else
error("Node name not recognized!")
end
# actually adjust node position
flowGrap_obj.nodePos[x] = [flowGrap_obj.nodePos[x][1] + newPos[2][1]*2, flowGrap_obj.nodePos[x][2] + newPos[2][2]*2]
end
end
# </editor-fold>
# XXX dummy function just do provide a docstring for printIIS (docstring in printIIS wont work, because read-in is conditional)
"""
```julia
printIIS(model_object::anyModel)
```
Uses Gurobi's computeIIS function to determine the constraints of the optimization problem that cause infeasibility.
"""
function printIIS(anyM::anyModel,d::Int)
end
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 1301 | using Gurobi
function printIIS(anyM::anyModel)
# computes iis
compute_conflict!(anyM.optModel)
if MOI.get(anyM.optModel, MOI.ConflictStatus()) != MOI.ConflictStatusCode(3) return end
# loops over constraint tables to find constraints within iis
allCns_pair = vcat(collect.(vcat(anyM.parts.bal.cns, anyM.parts.trd.cns, anyM.parts.lim.cns, map(x -> x.cns,values(anyM.parts.tech))...))...)
for cns in allCns_pair
if cns[1] == :objEqn continue end
allConstr_arr = findall(map(x -> MOI.ConflictParticipationStatusCode(0) != MOI.get(anyM.optModel.moi_backend, MOI.ConstraintConflictStatus(), x.index),cns[2][!,:cns]))
# prints constraints within iis
if !isempty(allConstr_arr)
println("$(length(allConstr_arr)) of IIS in $(cns[1]) constraints.")
colSet_dic = Dict(x => Symbol(split(string(x),"_")[1]) for x in intCol(cns[2]))
for iisConstr in allConstr_arr
row = cns[2][iisConstr,:]
dimStr_arr = map(x -> row[x] == 0 ? "" : string(x,": ",join(getUniName(row[x], anyM.sets[colSet_dic[x]])," < ")),collect(keys(colSet_dic)))
println("$(join(filter(x -> x != "",dimStr_arr),", ")), constraint: $(row[:cns])")
end
end
end
end
export printIIS
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 21522 |
# XXX maps their different levels to carriers
function createCarrierMapping!(setData_dic::Dict,anyM::anyModel)
# determines number of specified carrier levels
lvlCar_arr = [Symbol("carrier_",i) for i in 1:anyM.sets[:C].height]
# tuple of columns with dispatch and expansion resolutions
resCol_tup = (:timestep_dispatch, :timestep_expansion, :region_dispatch, :region_expansion)
resLongShort_tup = Dict(:timestep_dispatch => :lvlTsDis, :timestep_expansion => :lvlTsExp, :region_dispatch => :lvlRDis, :region_expansion => :lvlRExp)
anyM.cInfo = Dict{Int,NamedTuple{(:tsDis,:tsExp,:rDis,:rExp,:eq),Tuple{Int,Int,Int,Int,Bool}}}()
# loops over rows in carrier file and writes specific resolutions
for row in eachrow(setData_dic[:C])
# gets tuple of strings for respective carrier
car_tup = tuple(map(x -> row[x],lvlCar_arr)...)
car_int = lookupTupleTree(car_tup,anyM.sets[:C])[1]
# gets resolution value and writes them if they can be parsed to numbers
resVal_dic = Dict(resLongShort_tup[x] => row[x] for x in resCol_tup)
# check, if carrier got an equality constraint or not
if :carrier_equality in namesSym(row)
if !(row[:carrier_equality] in ("no","yes"))
push!(anyM.report,(2,"carrier mapping","","column carrier_equality can only contain keywords 'yes' or 'no'"))
continue
else
eq_boo = row[:carrier_equality] == "yes" ? true : false
end
else
eq_boo = false
end
# check if level values can be converted to integers
if any(map(x -> tryparse(Int,x), values(resVal_dic)) .== nothing)
push!(anyM.report,(2,"carrier mapping","","no resolutions written for $(createFullString(car_int,anyM.sets[:C])), provide as integer, carrier was skipped"))
continue
end
res_dic = Dict(resLongShort_tup[x] => parse(Int,row[x]) for x in resCol_tup)
# writes levels after consistency check
if res_dic[:lvlTsDis] < res_dic[:lvlTsExp]
push!(anyM.report,(3,"carrier mapping","","temporal resolution of expansion can not be more detailed than for dispatch for $(createFullString(car_int,anyM.sets[:C]))"))
continue
elseif res_dic[:lvlRDis] > res_dic[:lvlRExp]
push!(anyM.report,(3,"carrier mapping","","spatial resolution of expansion must be at least as detailed as dispatch for $(createFullString(car_int,anyM.sets[:C]))"))
continue
else
anyM.cInfo[car_int] = (tsDis = res_dic[:lvlTsDis],tsExp = res_dic[:lvlTsExp],rDis = res_dic[:lvlRDis],rExp = res_dic[:lvlRExp], eq = eq_boo)
end
end
if any(getindex.(anyM.report,1) .== 3) print(getElapsed(anyM.options.startTime)); errorTest(anyM.report,anyM.options) end
# loops over all carriers and check consistency of resolutions and tries to inherit a resolution where none was defined, cannot be carried if above they have already been errors detected
for c in filter(x -> x != 0, keys(anyM.sets[:C].nodes))
anyM.cInfo = evaluateReso(c,anyM.sets[:C],anyM.cInfo,anyM.report)
end
if minimum(map(x -> getfield(x,:tsDis),values(anyM.cInfo))) < maximum(map(x -> getfield(x,:tsExp),values(anyM.cInfo)))
push!(anyM.report,(3,"carrier mapping","","one temporal dispatch resoultion is more detailed than one of the temporal expansion resolutions"))
end
produceMessage(anyM.options,anyM.report, 3," - Created mapping for carriers")
end
# XXX checks carrier for errors in resolution or derive resolution from lower carriers
function evaluateReso(startIdx_int::Int,car_tree::Tree,cInfo_dic::Dict{Int,NamedTuple{(:tsDis,:tsExp,:rDis,:rExp,:eq),Tuple{Int,Int,Int,Int,Bool}}},report::Array{Tuple,1})
# extracts all children and all columns related to resolution
carName_str = createFullString(startIdx_int,car_tree)
allChildIdx_arr = getDescendants(startIdx_int,car_tree)
# all entries need to have a resoultion => otherwise evaluateReso on them
for noResoIdx = setdiff(allChildIdx_arr,collect(keys(cInfo_dic)))
cInfo_dic = evaluateReso(noResoIdx,car_tree,cInfo_dic,report)
end
# tries to inherit resolutions from children, if no data exists yet
if !haskey(cInfo_dic,startIdx_int)
if isempty(allChildIdx_arr)
push!(report,(3,"carrier mapping","","carrier $(carName_str) got no resolution and could not inherit from children either"))
return cInfo_dic
else
newReso_dic = Dict(y => minimum([getfield(cInfo_dic[x],y) for x in allChildIdx_arr]) for y in (:tsDis,:tsExp,:rDis,:rExp))
cInfo_dic[startIdx_int] = (tsDis = newReso_dic[:tsDis],tsExp = newReso_dic[:tsExp],rDis = newReso_dic[:rDis],rExp = newReso_dic[:rExp], eq = false)
push!(report,(1,"carrier mapping","","carrier $(carName_str) inherited resolution from children"))
return cInfo_dic
end
# checks if existing resolution is flawed
else
for childIdx in allChildIdx_arr
# check if any children got a smaller resolution value
if any(map(x -> getfield(cInfo_dic[startIdx_int],x) > getfield(cInfo_dic[childIdx],x),(:tsDis,:tsExp,:rDis,:rExp)))
push!(report,(3,"carrier mapping","","carrier $(carName_str) got a resolution more detailed than its childrens'"))
end
end
end
return cInfo_dic
end
# XXX maps information about timesteps used
function createTimestepMapping!(anyM::anyModel)
# XXX writes the superordinate dispatch level, the timesteps on this level and scaling factor for timesteps depending on the respective superordinate dispatch timestep and the level
supTsLvl_int = maximum(map(x -> getfield(x,:tsExp),values(anyM.cInfo)))
if anyM.options.supTsLvl != 0
if minimum(map(x -> getfield(x,:tsDis),values(anyM.cInfo))) >= anyM.options.supTsLvl
supTsLvl_int = anyM.options.supTsLvl
push!(anyM.report,(1,"timestep mapping","","superordinate dispatch level provided via options was used"))
else
push!(anyM.report,(2,"timestep mapping","","superordinate dispatch level provided via options could not be used, because it was more detailed than at least one dispatch level provided"))
end
end
supTs_tup = tuple(sort(getfield.(filter(x -> x.lvl == supTsLvl_int,collect(values(anyM.sets[:Ts].nodes))),:idx))...)
scaSupTs_dic = Dict((x[1],x[2]) => (1/anyM.options.redStep)*8760/length(getDescendants(x[1],anyM.sets[:Ts],false,x[2])) for x in Iterators.product(supTs_tup,filter(x -> x >= supTsLvl_int,1:anyM.sets[:Ts].height)))
anyM.supTs = (lvl = supTsLvl_int, step = supTs_tup, sca = scaSupTs_dic)
if length(anyM.supTs.step) > 50
push!(anyM.report,(2,"timestep mapping","","problem specification resulted in more than 50 superordinate dispatch timesteps, this looks faulty"))
end
produceMessage(anyM.options,anyM.report, 3," - Created mapping for time steps")
end
# XXX writes basic information for each technology
function createTechInfo!(tSym::Symbol, setData_dic::Dict,anyM::anyModel)
part = anyM.parts.tech[tSym]
t_int = techInt(tSym,anyM.sets[:Te])
lvlTech_arr = Symbol.(:technology_,1:anyM.sets[:Te].height)
# tuple of columns with input, output and stored carriers
typeStr_dic = Dict(:carrier_conversion_in => "conversion input", :carrier_conversion_out => "conversion output", :carrier_stored_in => "storage", :carrier_stored_out => "storage")
carCol_tup = (:carrier_conversion_in, :carrier_conversion_out, :carrier_stored_in, :carrier_stored_out)
# maps carrier strings to their id
nameC_dic = Dict(collect(values(anyM.sets[:C].nodes)) |> (y -> Pair.(getfield.(y,:val),getfield.(y,:idx))))
# maps selected strings of tech types to integers
typeStringInt_dic = Dict("stock" => 0, "mature" => 1,"emerging" => 2)
# gets datarow for respective technology
row_df = anyM.sets[:Te].nodes[t_int].val |> (z -> filter(x -> any(map(y -> z == x[y],lvlTech_arr)) ,setData_dic[:Te])[1,:])
# XXX writes carrier info
# gets string array of carriers for input, output and stored, looks up respective ids afterwards and writes to mapping file
carStrArr_dic = Dict(y => y in namesSym(row_df) ? split(replace(row_df[y]," " => ""),";") |> (z -> filter(x -> !isempty(x),z)) : String[] for y in carCol_tup)
carId_dic = Dict(z => tuple(map(x -> getDicEmpty(nameC_dic,x),carStrArr_dic[z])...) for z in keys(carStrArr_dic))
for x in filter(x -> Int[] in carId_dic[x], collectKeys(keys(carId_dic)))
push!(anyM.report,(3,"technology mapping","carrier","$(typeStr_dic[x]) carrier of technology $(string(tSym)) not entered correctly"))
carId_dic[x] = tuple(filter(y -> y != Int[],collect(carId_dic[x]))...)
end
# avoid storage of carriers that are balanced on superordinate dispatch level (e.g. if gas is balanced yearly, there is no need for gas storage)
for type in (:carrier_stored_out, :carrier_stored_in)
for c in carId_dic[type]
if anyM.supTs.lvl == anyM.cInfo[c].tsDis
carId_dic[type] = tuple(filter(x -> x != c,collect(carId_dic[type]))...)
push!(anyM.report,(2,"technology mapping","carrier","carrier $(createFullString(c,anyM.sets[:C])) of technology $(string(tSym)) cannot be stored, because carrier is balanced on superordinate dispatch level"))
end
end
end
# writes all relevant type of dispatch variables and respective carrier
carGrp_ntup = (use = carId_dic[:carrier_conversion_in], gen = carId_dic[:carrier_conversion_out], stExtIn = carId_dic[:carrier_stored_in], stExtOut = carId_dic[:carrier_stored_out],
stIntIn = tuple(intersect(carId_dic[:carrier_conversion_out],carId_dic[:carrier_stored_out])...), stIntOut = tuple(intersect(carId_dic[:carrier_conversion_in],carId_dic[:carrier_stored_in])...))
if :carrier_stored_active in namesSym(row_df)
actStStr_arr = split(replace(row_df[:carrier_stored_active]," " => ""),";") |> (z -> filter(x -> !isempty(x),z))
actSt_tup = tuple(map(x -> getDicEmpty(nameC_dic,x),actStStr_arr)...)
else
actSt_tup = tuple()
end
part.actSt = actSt_tup
# report on suspicious looking carrier constellations
if isempty(union(carId_dic[:carrier_conversion_out],carId_dic[:carrier_stored_out])) push!(anyM.report,(2,"technology mapping","carrier","technology $(string(tSym)) has no output")) end
if !isempty(setdiff(carId_dic[:carrier_stored_in],union(carGrp_ntup.stIntOut,carGrp_ntup.stExtOut))) && !isempty(carId_dic[:carrier_stored_in])
push!(anyM.report,(2,"technology mapping","carrier","some carrier of technology $(string(tSym)) can be charged but not discharged"))
end
if !isempty(setdiff(carId_dic[:carrier_stored_out],union(carGrp_ntup.stIntIn,carGrp_ntup.stExtIn))) && !isempty(carId_dic[:carrier_stored_out])
push!(anyM.report,(2,"technology mapping","carrier","some carrier of technology $(string(tSym)) can be discharged but not charged"))
end
for c in part.actSt
if !(c in vcat(map(x -> vcat(getDescendants(x,anyM.sets[:C],true)...,x), union(carGrp_ntup.stExtIn,carGrp_ntup.stExtOut))...))
push!(anyM.report,(3,"technology mapping","carrier","$(createFullString(c,anyM.sets[:C])) for active storage of technology $(string(tSym)) is not stored or a descendant of a stored carrier"))
end
end
part.carrier = filter(x -> getfield(carGrp_ntup,x) != tuple(),collectKeys(keys(carGrp_ntup))) |> (y -> NamedTuple{Tuple(y)}(map(x -> getfield(carGrp_ntup,x), y)) )
# detects if any in or out carrier is a parent of another in or out carrier, removes carrier in these cases and reports on it
for type in (:carrier_conversion_in, :carrier_conversion_out)
relCar_tup = carId_dic[type]
inherCar_tup = relCar_tup[findall(map(x -> !(isempty(filter(z -> z != x,intersect(getDescendants(x,anyM.sets[:C],true),relCar_tup)))),relCar_tup))]
if !isempty(inherCar_tup)
for inher in inherCar_tup
push!(anyM.report,(3,"technology mapping","carrier","for technology $(string(tSym)) the $(typeStr_dic[type]) carrier $(createFullString(inher,anyM.sets[:C])) is a parent of another $(typeStr_dic[type]) carrier, this is not supported"))
end
end
end
# XXX writes technology type
# finds technology type and tries to convert to an integer
if :technology_type in namesSym(row_df)
type_str = row_df[:technology_type]
else
type_str = "mature"
end
if !haskey(typeStringInt_dic,String(type_str))
push!(anyM.report,(3,"technology mapping","type","unknown technology type $type_str used, allowed are: $(join(keys(typeStringInt_dic),", "))"))
return
end
part.type = Symbol(type_str)
# XXX writes modes of technology
if :mode in namesSym(row_df) && length(anyM.sets[:M].nodes) > 1
part.modes = tuple(collect(lookupTupleTree(tuple(string(x),),anyM.sets[:M],1)[1] for x in filter(x -> x != "",split(replace(row_df[:mode]," " => ""),";")))...)
else
part.modes = tuple()
end
# XXX determines resolution of expansion
# determines carrier based expansion resolutions
cEx_boo = true
if isempty(vcat(collect.(values(carGrp_ntup))...))
push!(anyM.report,(2,"technology mapping","carrier","for technology $(string(tSym)) no carriers were provided"))
cEx_boo = false
end
tsExp_int = cEx_boo ? maximum(map(y -> getfield(anyM.cInfo[y],:tsExp), vcat(collect.(values(carGrp_ntup))...))) : 0
rExp_int = cEx_boo ? maximum(map(y -> getfield(anyM.cInfo[y],:rExp), vcat(collect.(values(carGrp_ntup))...))) : 0
# check if carrier based temporal resolution is overwritten by a technology specifc value
if cEx_boo && :timestep_expansion in namesSym(row_df)
tsExpSpc_int = tryparse(Int,row_df[:timestep_expansion])
if !isnothing(tsExpSpc_int)
if tsExpSpc_int > anyM.supTs.lvl
push!(anyM.report,(2,"technology mapping","expansion level","specific temporal expansion level provided for $(string(tSym)) is below superordinate dispatch level and therefore could not be used"))
else
push!(anyM.report,(1,"technology mapping","expansion level","specific temporal expansion level provided for $(string(tSym)) was used instead of a carrier based value"))
tsExp_int = tsExpSpc_int
end
end
end
# check if carrier based spatial resolution is overwritten by a technology specifc value
if cEx_boo && :region_expansion in namesSym(row_df)
rExpSpc_int = tryparse(Int,row_df[:region_expansion])
if !isnothing(rExpSpc_int)
if rExpSpc_int < rExp_int
push!(anyM.report,(2,"technology mapping","expansion level","specific spatial expansion level provided for $(string(tSym)) is less detailed than default value obtained from carriers and therefore could not be used"))
elseif rExpSpc_int == rExp_int
push!(anyM.report,(1,"technology mapping","expansion level","specific spatial expansion level provided for $(string(tSym)) is equal to default value obtained from carriers"))
else
push!(anyM.report,(1,"technology mapping","expansion level","specific spatial expansion level provided for $(string(tSym)) was used instead of a carrier based value"))
rExp_int = rExpSpc_int
end
end
end
expLvl_tup = (tsExp_int,rExp_int)
# XXX checks if dispatch variables should be disaggregated by expansion regions
rExpOrg_int = cEx_boo ? maximum(map(y -> getfield(anyM.cInfo[y],:rDis), vcat(collect.(values(carGrp_ntup))...))) : 0
if :region_disaggregate in namesSym(row_df) && rExp_int > rExpOrg_int # relies on information in explicit column, if disaggregation is possible and column exists
daggR_str = row_df[:region_disaggregate]
if daggR_str == "yes"
disAgg_boo = true
elseif daggR_str == "no"
disAgg_boo = false
else
push!(anyM.report,(3,"technology mapping","spatial aggregation","unknown keyword $type_str used to control spatial aggregation, please use 'yes' or 'no'"))
return
end
elseif rExp_int > rExpOrg_int # disaggregate by default, if it makes sense
disAgg_boo = true
else
disAgg_boo = false
end
part.disAgg = disAgg_boo
# XXX determines reference resolution for conversion (takes into account "region_disaggregate" by using spatial expansion instead of dispatch level if set to yes)
if !isempty(union(carGrp_ntup.use,carGrp_ntup.gen))
refTs_int = minimum([minimum([getproperty(anyM.cInfo[x],:tsDis) for x in getproperty(carGrp_ntup,z)]) for z in intersect(keys(part.carrier),(:gen, :use))])
refR_int = disAgg_boo ? rExp_int : minimum([minimum([getproperty(anyM.cInfo[x],:rDis) for x in getproperty(carGrp_ntup,z)]) for z in intersect(keys(part.carrier),(:gen, :use))])
refLvl_tup = (refTs_int, refR_int)
else
refLvl_tup = nothing
end
part.balLvl = (exp = expLvl_tup, ref = refLvl_tup)
produceMessage(anyM.options,anyM.report, 3," - Created mapping for technology $(string(tSym))")
end
# XXX maps capacity constraints to technology
function createCapaRestrMap!(tSym::Symbol,anyM::anyModel)
part = anyM.parts.tech[tSym]
capaDispRestr_arr = Array{Tuple{String,Array{Int,1},Int,Int},1}()
# extract tech info
carGrp_ntup = part.carrier
balLvl_ntup = part.balLvl
disAgg_boo = part.disAgg
# XXX writes dimension of capacity restrictions for conversion part (even if there are no inputs)
for side in intersect((:use,:gen),keys(carGrp_ntup))
# get respective carrier and their reference level
carDis_tup = map(getfield(carGrp_ntup,side)) do x
carRow_ntup = anyM.cInfo[x]
return x, carRow_ntup.tsDis, disAgg_boo ? balLvl_ntup.exp[2] : carRow_ntup.rDis
end
carConstr_arr = Tuple{Array{Int,1},Int,Int}[]
# writes all relevant combinations by going from finest resolution up, separately for temporal and spatial (2 and 3)
for j = [2,3]
# sorts descinding by j-th column and ascending by other column
carDisSort_arr = sort(collect(carDis_tup), by = x -> x[j], rev=true)
carIt_arr = map(1:length(carDis_tup)) do x
if j == 2 (sort([carDisSort_arr[y][1] for y in 1:x]), carDisSort_arr[x][2], minimum([carDisSort_arr[y][3] for y in 1:x]))
else (sort([carDisSort_arr[y][1] for y in 1:x]), minimum([carDisSort_arr[y][2] for y in 1:x]), carDisSort_arr[x][3]) end
end
# filters entries that exceed the reference level or are not below the reference level, if already a constraint on the reference level exists from the previous iteration
if side == :use && isempty(setdiff((:use,:gen),keys(carGrp_ntup)))
carIt_arr = carIt_arr[findall(x -> j == 2 ? x[2] > balLvl_ntup.ref[1] : x[3] > balLvl_ntup.ref[2],carIt_arr)]
else
carIt_arr = carIt_arr[findall(x -> j == 2 ? x[2] >= balLvl_ntup.ref[1] : x[3] >= balLvl_ntup.ref[2],carIt_arr)]
end
push!(carConstr_arr, carIt_arr...)
end
# identifies and addresses "crossings" (one carrier is temporal more but spatially less detailed than another one) by converting into new restriction mappings
carConstrUni_arr = unique(carConstr_arr)
cross_arr = filter(x -> (any(map(z -> (x[2] > z[2] && x[3] < z[3]) || (x[3] > z[3] && x[2] < z[2]), carConstrUni_arr))),carConstrUni_arr)
if !isempty(cross_arr)
newEntry_tup = (union(getindex.(cross_arr,1)...), minimum(getindex.(cross_arr,2)),minimum(getindex.(cross_arr,3)))
carConstrUni_arr = unique(vcat(newEntry_tup,carConstrUni_arr))
end
# filter redundant and "dominated" combinations (less or the same carriers, but not more temporal or spatial detail)
carConstrUni_arr2 = map(i -> map(x -> x[i],carConstrUni_arr),1:3)
carConFilt_arr = filter(carConstrUni_arr) do x
!(any((BitArray(issubset(x[1],y) for y in carConstrUni_arr2[1]) .&
(((x[2] .<= carConstrUni_arr2[2]) .& (x[3] .< carConstrUni_arr2[3])) .|
((x[2] .< carConstrUni_arr2[2]) .& (x[3] .<= carConstrUni_arr2[3])) .|
((x[2] .<= carConstrUni_arr2[2]) .& (x[3] .<= carConstrUni_arr2[3])))) .& BitArray(map(y -> y != x,carConstrUni_arr)))) end
carConFilt_arr2 = map(i -> map(x -> x[i],carConFilt_arr),1:3)
typeCapa_sym = side == :use ? "in" : "out"
# adds necessary capacity restrictions below reference level
map(x -> push!(capaDispRestr_arr,(typeCapa_sym, carConFilt_arr2[1][x], carConFilt_arr2[2][x], carConFilt_arr2[3][x])),1:length(carConFilt_arr))
end
# XXX writes dimension of capacity restrictions for storage
stInVar_arr, stOutVar_arr = [intersect(x,keys(carGrp_ntup)) for x in ((:stExtIn,:stIntIn),(:stExtOut,:stIntOut))]
if !isempty(stInVar_arr) || !isempty(stOutVar_arr)
allCar_arr = unique(vcat(collect.([getproperty(carGrp_ntup,y) for y in union(stInVar_arr,stOutVar_arr)])...))
for x in allCar_arr
carRow_ntup = anyM.cInfo[x]
# storage on carrier level, but at least on reference level, if region is disaggregated balance on expansion (which is at least lower)
tsLvl_int = balLvl_ntup.ref != nothing ? max(balLvl_ntup.ref[1], carRow_ntup.tsDis) : carRow_ntup.tsDis
rLvl_int = disAgg_boo ? balLvl_ntup.exp[2] : carRow_ntup.rDis |> (z -> balLvl_ntup.ref != nothing ? max(balLvl_ntup.ref[2], z) : z)
if !isempty(stInVar_arr) push!(capaDispRestr_arr,("stIn", [x], tsLvl_int, rLvl_int)) end
if !isempty(stOutVar_arr) push!(capaDispRestr_arr,("stOut", [x], tsLvl_int, rLvl_int)) end
push!(capaDispRestr_arr,("stSize", [x], tsLvl_int, rLvl_int))
end
end
part.capaRestr = isempty(capaDispRestr_arr) ? DataFrame() : rename(DataFrame(capaDispRestr_arr), :1 => :cnstrType, :2 => :car, :3 => :lvlTs, :4 => :lvlR)
end
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 62179 |
# <editor-fold desc="definition and handling of parameters"
# XXX defines all existing parameters
function defineParameter(options::modOptions,report::Array{Tuple,1})
parDef_dic = Dict{Symbol, NamedTuple}()
# <editor-fold desc="XXX expansion parameters"
# XXX general expansion
parDef_dic[:rateDisc] = (dim = (:Ts_disSup, :R_exp), defVal = 0.02, herit = (:Ts_disSup => :up, :R_exp => :up, :R_exp => :avg_any, :Ts_disSup => :avg_any), part = :obj)
# XXX technology and exchange expansion
parDef_dic[:stInToConv] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_exp => :up, :R_exp => :up), part = :techSt)
parDef_dic[:stOutToStIn] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_exp => :up, :R_exp => :up), part = :techSt)
parDef_dic[:sizeToStIn] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_exp => :up, :R_exp => :up), part = :techSt)
parDef_dic[:delConv] = (dim = (:Ts_expSup, :R_exp, :Te), defVal = 0, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techConv)
parDef_dic[:delStIn] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = 0, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techSt)
parDef_dic[:delStOut] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = 0, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techSt)
parDef_dic[:delStSize] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = 0, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techSt)
parDef_dic[:delExc] = (dim = (:Ts_expSup, :R_a, :R_b, :C), defVal = 0, herit = (:Ts_expSup => :up, :R_a => :avg_any, :R_b => :avg_any, :C => :up), part = :exc)
parDef_dic[:lifeConv] = (dim = (:Ts_expSup, :R_exp, :Te), defVal = 20, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techConv)
parDef_dic[:lifeStIn] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = 20, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techSt)
parDef_dic[:lifeStOut] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = 20, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techSt)
parDef_dic[:lifeStSize] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = 20, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :techSt)
parDef_dic[:lifeExc] = (dim = (:Ts_expSup, :R_a, :R_b, :C), defVal = 50, herit = (:Ts_expSup => :up, :R_a => :avg_any, :R_b => :avg_any, :C => :up), part = :exc)
parDef_dic[:lifeEcoConv] = (dim = (:Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:lifeEcoStIn] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:lifeEcoStOut] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:lifeEcoStSize] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:lifeEcoExc] = (dim = (:Ts_expSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_expSup => :up, :R_a => :avg_any, :R_b => :avg_any, :C => :up), part = :obj)
parDef_dic[:costExpConv] = (dim = (:Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costExpStIn] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costExpStOut] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costExpStSize] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costExpExc] = (dim = (:Ts_expSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_expSup => :up, :R_a => :avg_any, :R_b => :avg_any, :C => :up), part = :obj)
parDef_dic[:rateExpConv] = (dim = (:Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:rateExpStIn] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:rateExpStOut] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:rateExpStSize] = (dim = (:Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:rateExpExc] = (dim = (:Ts_expSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_expSup => :up, :R_a => :avg_any, :R_b => :avg_any, :C => :up), part = :obj)
parDef_dic[:costOprConv] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:Ts_disSup => :up, :Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costOprStIn] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_disSup => :up, :Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costOprStOut] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_disSup => :up, :Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costOprStSize] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_disSup => :up, :Te => :up, :Ts_expSup => :up, :R_exp => :up), part = :obj)
parDef_dic[:costOprExc] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :up, :R_a => :avg_any, :R_b => :avg_any, :R_a => :up, :R_b => :up, :C => :up), part = :obj)
# XXX parameters regarding limits on technology and exchange expansion and capacity
# expansion limits on conversion, storage and exchange
parDef_dic[:expConvUp] = (dim = (:Ts_exp, :R_exp, :Te), defVal = nothing, herit = (:Ts_exp => :sum_full, :R_exp => :sum_full, :Te => :sum_full), part = :lim)
parDef_dic[:expConvLow] = (dim = (:Ts_exp, :R_exp, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any), part = :lim)
parDef_dic[:expConvFix] = (dim = (:Ts_exp, :R_exp, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any), part = :lim)
parDef_dic[:expStInUp] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_full, :R_exp => :sum_full, :Te => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:expStInLow] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:expStInFix] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:expStOutUp] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_full, :R_exp => :sum_full, :Te => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:expStOutLow] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:expStOutFix] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:expStSizeUp] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_full, :R_exp => :sum_full, :Te => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:expStSizeLow] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:expStSizeFix] = (dim = (:Ts_exp, :R_exp, :C, :Te), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_exp => :sum_any, :Te => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:expExcUp] = (dim = (:Ts_exp, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_exp => :sum_full, :R_a => :sum_full, :R_b => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:expExcLow] = (dim = (:Ts_exp, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:expExcFix] = (dim = (:Ts_exp, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_exp => :sum_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_any), part = :lim)
# installed capacity limits and residual capacities on conversion, storage and exchange
parDef_dic[:capaConvUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:capaConvLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaConvFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaStInUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :C => :sum_full, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:capaStInLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaStInFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaStOutUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :C => :sum_full, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:capaStOutLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaStOutFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaStSizeUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :C => :sum_full, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:capaStSizeLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaStSizeFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:capaExcUp] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_full), part = :lim)
parDef_dic[:capaExcLow] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:capaExcFix] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:capaConvResi] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :Ts_expSup => :sum_any, :Ts_disSup => :up), part = :techConv)
parDef_dic[:capaStInResi] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any, :Ts_disSup => :up), part = :techSt)
parDef_dic[:capaStOutResi] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any, :Ts_disSup => :up), part = :techSt)
parDef_dic[:capaStSizeResi] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any, :Ts_disSup => :up), part = :techSt)
parDef_dic[:capaExcResi] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :Ts_disSup => :up), part = :exc)
parDef_dic[:capaExcResiDir] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :Ts_disSup => :up), part = :exc)
# commssioned capacity limits on conversion, storage and exchange
if options.decomm != :none
parDef_dic[:oprCapaConvUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:oprCapaConvLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaConvFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaStInUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :C => :sum_full, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:oprCapaStInLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaStInFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaStOutUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :C => :sum_full, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:oprCapaStOutLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaStOutFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaStSizeUp] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_full, :Te => :sum_full, :Ts_disSup => :avg_any, :C => :sum_full, :Ts_expSup => :sum_full), part = :lim)
parDef_dic[:oprCapaStSizeLow] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaStSizeFix] = (dim = (:Ts_disSup, :Ts_expSup, :R_exp, :C, :Te), defVal = nothing, herit = (:R_exp => :sum_any, :Te => :sum_any, :Ts_disSup => :avg_any, :C => :sum_any, :Ts_expSup => :sum_any), part = :lim)
parDef_dic[:oprCapaExcUp] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_full), part = :lim)
parDef_dic[:oprCapaExcLow] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:oprCapaExcFix] = (dim = (:Ts_disSup, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_disSup => :avg_any, :R_a => :sum_any, :R_b => :sum_any, :C => :sum_any), part = :lim)
end
# XXX limits on quantites (including emissions and emission factors)
upHerit_tup = (:Ts_dis => :sum_full, :Ts_expSup => :sum_full, :R_dis => :sum_full, :C => :sum_full, :Te => :sum_full, :M => :sum_full)
ofHerit_tup = (:Ts_dis => :sum_any, :Ts_expSup => :sum_any, :R_dis => :sum_any, :C => :sum_any, :Te => :sum_any, :M => :sum_any)
# actual energy limits on use, generation and storage
parDef_dic[:useUp] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = upHerit_tup, part = :lim)
parDef_dic[:useLow] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:useFix] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:genUp] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = upHerit_tup, part = :lim)
parDef_dic[:genLow] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:genFix] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:stOutUp] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = upHerit_tup, part = :lim)
parDef_dic[:stOutLow] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:stOutFix] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:stInUp] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = upHerit_tup, part = :lim)
parDef_dic[:stInLow] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:stInFix] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = ofHerit_tup, part = :lim)
parDef_dic[:excUp] = (dim = (:Ts_dis, :R_from, :R_to, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_from => :sum_full, :R_to => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:excLow] = (dim = (:Ts_dis, :R_from, :R_to, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_from => :sum_any, :R_to => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:excFix] = (dim = (:Ts_dis, :R_from, :R_to, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_from => :sum_any, :R_to => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:excDirUp] = (dim = (:Ts_dis, :R_from, :R_to, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_from => :sum_full, :R_to => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:excDirLow] = (dim = (:Ts_dis, :R_from, :R_to, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_from => :sum_any, :R_to => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:excDirFix] = (dim = (:Ts_dis, :R_from, :R_to, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_from => :sum_any, :R_to => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:crtUp] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:crtLow] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:crtFix] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:lssUp] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:lssLow] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:lssFix] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:trdBuyUp] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:trdBuyLow] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_dis => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:trdBuyFix] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_dis => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:trdSellUp] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_full, :R_dis => :sum_full, :C => :sum_full), part = :lim)
parDef_dic[:trdSellLow] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_dis => :sum_any, :C => :sum_any), part = :lim)
parDef_dic[:trdSellFix] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :sum_any, :R_dis => :sum_any, :C => :sum_any), part = :lim)
# emission limits and factors (are computed as net values of trade and exchange)
parDef_dic[:emissionUp] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = upHerit_tup, part = :lim)
parDef_dic[:emissionFac] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = tuple(:Ts_expSup => :up, :Ts_dis => :up, :R_dis => :up, :C => :up, :Te => :up, :M => :up), part = :lim)
parDef_dic[:emissionPrc] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = tuple(:Ts_expSup => :up, :Ts_dis => :up, :R_dis => :up, :C => :up, :Te => :up, :M => :up), part = :obj)
# </editor-fold>
# <editor-fold desc="XXX dispatch parameters"
# XXX technology dispatch properties
# availability parameters
parDef_dic[:avaConv] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :Te, :M), defVal = 1.0, herit = (:Ts_expSup => :up, :Ts_dis => :up, :R_dis => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techConv, techPre = (preset = :lowest, mode = (:in, :out)))
parDef_dic[:avaStIn] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = 1.0, herit = (:Ts_expSup => :up, :Ts_dis => :up, :R_dis => :up, :C => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techSt, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
parDef_dic[:avaStOut] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = 1.0, herit = (:Ts_expSup => :up, :Ts_dis => :up, :R_dis => :up, :C => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techSt, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
parDef_dic[:avaStSize] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = 1.0, herit = (:Ts_expSup => :up, :Ts_dis => :up, :R_dis => :up, :C => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techSt, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
# efficiency parameters
parDef_dic[:effConv] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :Te, :M), defVal = 1.0, herit = (:Ts_expSup => :up, :Ts_dis => :up, :R_dis => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techConv, techPre = (preset = :reference, mode = (:in, :out)))
parDef_dic[:effStIn] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = 1.0, herit = (:Ts_expSup => :up, :Ts_dis => :up, :C => :up, :R_dis => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techSt, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
parDef_dic[:effStOut] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = 1.0, herit = (:Ts_expSup => :up, :Ts_dis => :up, :C => :up, :R_dis => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techSt, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
# specific storage parameters
parDef_dic[:stDis] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :up, :C => :up, :R_dis => :up, :Te => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :techSt, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
parDef_dic[:stInflow] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te), defVal = nothing, herit = (:Ts_expSup => :up, :C => :up, :Ts_dis => :sum_any, :R_dis => :sum_any, :Te => :up), part = :techSt, techPre = (preset = :carrierSt, mode = tuple()))
# variable costs
parDef_dic[:costVarUse] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :C => :up, :Te => :up, :Ts_dis => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :obj, techPre = (preset = :carrierIn, mode = (:in,)))
parDef_dic[:costVarGen] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :C => :up, :Te => :up, :Ts_dis => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :obj, techPre = (preset = :carrierOut, mode = (:out,)))
parDef_dic[:costVarStIn] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :C => :up, :Te => :up, :Ts_dis => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :obj, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
parDef_dic[:costVarStOut] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :C => :up, :Te => :up, :Ts_dis => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :obj, techPre = (preset = :carrierSt, mode = (:stIn,:stOut,:stLvl)))
# energy related ratios (x% of energy from/to technology has to be carrier y)
parDef_dic[:ratioEnerUseUp] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :Te => :up, :Ts_dis => :up), part = :techConv, techPre = (preset = :minUse, mode = (:in,)))
parDef_dic[:ratioEnerUseLow] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :Te => :up, :Ts_dis => :up), part = :techConv, techPre = (preset = :minUse, mode = (:in,)))
parDef_dic[:ratioEnerUseFix] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :Te => :up, :Ts_dis => :up), part = :techConv, techPre = (preset = :minUse, mode = (:in,)))
parDef_dic[:ratioEnerGenUp] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :Te => :up, :Ts_dis => :up), part = :techConv, techPre = (preset = :minGen, mode = (:out,)))
parDef_dic[:ratioEnerGenLow] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :Te => :up, :Ts_dis => :up), part = :techConv, techPre = (preset = :minGen, mode = (:out,)))
parDef_dic[:ratioEnerGenFix] = (dim = (:Ts_dis, :Ts_expSup, :R_dis, :C, :Te, :M), defVal = nothing, herit = (:Ts_expSup => :up, :Ts_dis => :avg_any, :R_dis => :up, :Te => :up, :Ts_dis => :up), part = :techConv, techPre = (preset = :minGen, mode = (:out,)))
# XXX further dispatch properties
parDef_dic[:dem] = (dim = (:Ts_dis, :R_dis, :C), defVal = 0.0, herit = (:Ts_dis => :avg_any, :R_dis => :sum_any), part = :bal)
parDef_dic[:costCrt] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :up, :R_dis => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :bal)
parDef_dic[:costLss] = (dim = (:Ts_dis, :R_dis, :C), defVal = nothing, herit = (:Ts_dis => :up, :R_dis => :up, :Ts_dis => :avg_any, :R_dis => :avg_any), part = :bal)
# trade (=sell or buy to an external market) parameters
parDef_dic[:trdBuyPrc] = (dim = (:Ts_dis, :R_dis, :C, :id), defVal = nothing, herit = (:Ts_dis => :up, :R_dis => :up, :R_dis => :avg_any, :Ts_dis => :avg_any), part = :trd)
parDef_dic[:trdSellPrc] = (dim = (:Ts_dis, :R_dis, :C, :id), defVal = nothing, herit = (:Ts_dis => :up, :R_dis => :up, :R_dis => :avg_any, :Ts_dis => :avg_any), part = :trd)
parDef_dic[:trdBuyCap] = (dim = (:Ts_dis, :R_dis, :C, :id), defVal = nothing, herit = (:Ts_dis => :up, :R_dis => :up, :R_dis => :avg_any, :Ts_dis => :avg_any), part = :trd)
parDef_dic[:trdSellCap] = (dim = (:Ts_dis, :R_dis, :C, :id), defVal = nothing, herit = (:Ts_dis => :up, :R_dis => :up, :R_dis => :avg_any, :Ts_dis => :avg_any), part = :trd)
# exchange (=exchange between explicit regions) parameters
parDef_dic[:avaExc] = (dim = (:Ts_dis, :R_a, :R_b, :C), defVal = 1.0, herit = (:Ts_dis => :up, :R_a => :up, :R_b => :up, :R_a => :avg_any, :R_b => :avg_any, :Ts_dis => :avg_any, :C => :up), part = :exc)
parDef_dic[:lossExc] = (dim = (:Ts_dis, :R_a, :R_b, :C), defVal = 0.0, herit = (:Ts_dis => :up, :R_a => :up, :R_b => :up, :R_a => :avg_any, :R_b => :avg_any, :Ts_dis => :avg_any, :C => :up), part = :exc)
parDef_dic[:costVarExc] = (dim = (:Ts_dis, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_dis => :up, :R_a => :up, :R_b => :up, :R_a => :avg_any, :R_b => :avg_any, :Ts_dis => :avg_any, :C => :up), part = :obj)
parDef_dic[:avaExcDir] = (dim = (:Ts_dis, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_dis => :up, :R_a => :up, :R_b => :up, :R_a => :avg_any, :R_b => :avg_any, :Ts_dis => :avg_any, :C => :up), part = :exc)
parDef_dic[:lossExcDir] = (dim = (:Ts_dis, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_dis => :up, :R_a => :up, :R_b => :up, :R_a => :avg_any, :R_b => :avg_any, :Ts_dis => :avg_any, :C => :up), part = :exc)
parDef_dic[:costVarExcDir] = (dim = (:Ts_dis, :R_a, :R_b, :C), defVal = nothing, herit = (:Ts_dis => :up, :R_a => :up, :R_b => :up, :R_a => :avg_any, :R_b => :avg_any, :Ts_dis => :avg_any, :C => :up), part = :obj)
# </editor-fold>
# check if sets are illdefined with respect to inheritance
heritRules_tup = (:sum_full, :sum_any, :avg_full, :avg_any, :uni_full, :uni_any, :up)
wrongSetHerit_arr = filter(x -> !all(map(y -> y[1] in parDef_dic[x].dim,parDef_dic[x].herit)),collectKeys(keys(parDef_dic)))
wrongRulesHerit_arr = filter(x -> !all(map(y -> y[2] in heritRules_tup,parDef_dic[x].herit)),collectKeys(keys(parDef_dic)))
if !isempty(wrongSetHerit_arr)
push!(report,(3, "parameter definition", "", "inheritance rule for a set not defined as a possible dimension: $(join(wrongSetHerit_arr,", "))"))
end
if !isempty(wrongRulesHerit_arr)
push!(report,(3, "parameter read-in", "definition", "invalid inheritance rule: $(join(wrongRulesHerit_arr,", "))"))
end
return parDef_dic
end
# XXX assign parameter to model parts
function parameterToParts!(paraTemp_dic::Dict{String,Dict{Symbol,DataFrame}}, techIdx_arr::Array{Int,1}, anyM::anyModel)
# parameter defined within input data
allPar_arr = unique(vcat(collectKeys.(keys.(values(paraTemp_dic)))...))
# parameter actually used in the model (difference are the exchange related parameters, that can be provided both directed and symmetric, but within the model only directed values are being used)
parToFile_dic = Dict(x => collectKeys(keys(paraTemp_dic[x])) for x in keys(paraTemp_dic))
# gets defintion of parameters and checks, if all input parameters are defined
parDef_dic = defineParameter(anyM.options,anyM.report)
undefinedPar_arr = setdiff(unique(vcat(values(parToFile_dic)...)),keys(parDef_dic))
if !isempty(undefinedPar_arr)
for undefined in undefinedPar_arr push!(anyM.report,(3,"parameter read-in","definition","parameter with the name $(string(undefined)) does not exist")) end
print(getElapsed(anyM.options.startTime)); errorTest(anyM.report,anyM.options)
end
# maps potential nodes for inheritance from technology tree to "actual" technologies
techToPar_dic = Dict{Symbol,Dict{Int32,Array{Int32,1}}}()
techToPar_dic[:up] = Dict(x => vcat(x,getAncestors(x,anyM.sets[:Te],:int)...) for x in techIdx_arr)
techToPar_dic[:down] = Dict(x => vcat(x,getDescendants(x,anyM.sets[:Te],true)...) for x in techIdx_arr)
techToPar_dic[:both] = Dict(x => union(techToPar_dic[:up][x],techToPar_dic[:down][x]) for x in techIdx_arr)
techToPar_dic[:none] = Dict(x => [x] for x in techIdx_arr)
convTechIdx_arr = filter(r -> !isempty(intersect((:gen,:use),keys(anyM.parts.tech[techSym(r,anyM.sets[:Te])].carrier))),techIdx_arr)
stTechIdx_arr = filter(r -> !isempty(intersect((:stExtIn,:stExtOut,:stIntIn,:stIntOut),keys(anyM.parts.tech[techSym(r,anyM.sets[:Te])].carrier))),techIdx_arr)
# XXX loop over all actual parameters to assign them to parts of the model
@threads for parIt in allPar_arr
# ensures all dataframes with data from single files have the same columns so they can be merged
relFiles_arr = collect(filter(y -> parIt in parToFile_dic[y],keys(paraTemp_dic)))
allCol_arr = unique(vcat(map(x -> namesSym(paraTemp_dic[x][parIt]), relFiles_arr)...))
for parFile in relFiles_arr
misCol_arr = setdiff(allCol_arr,namesSym(paraTemp_dic[parFile][parIt]))
for mis in misCol_arr
paraTemp_dic[parFile][parIt][!,mis] = fill(convert(Int32,0),nrow(paraTemp_dic[parFile][parIt]))
end
end
# actually merge data frames
allParData_df = vcat(map(x -> paraTemp_dic[x][parIt],relFiles_arr)...)
# order regions in ascending order so regions are not ambivalent anymore and duplicates can be identified
if :R_b in namesSym(allParData_df) && !(occursin("Dir",string(parIt)))
sortR_mat = sort(hcat([allParData_df[!,x] for x in (:R,:R_b)]...);dims = 2)
for (index,col) in enumerate((:R,:R_b)) allParData_df[!,col] = sortR_mat[:,index] end
end
# XXX checks for duplicates and removes them in case
nonUnique_bool = nonunique(allParData_df)
if any(nonUnique_bool)
push!(anyM.report,(1,"parameter read-in","validity check","non-unique entries discovered for $(string(parIt))"))
delete!(allParData_df,nonUnique_bool)
end
# XXX checks for contradicting values
rmvVal_df = removeVal(allParData_df)
if !isempty(rmvVal_df)
contradic_bool = nonunique(allParData_df[:,rmvVal_df])
if any(contradic_bool)
push!(anyM.report,(3,"parameter read-in","validity check","contradicting entries discovered for $(string(parIt))"))
end
end
# XXX assign parameters to parts
parDef_tup = parDef_dic[parIt]
parPart_sym =parDef_tup.part
if parPart_sym != :techSt && parPart_sym != :techConv
# adds parameter to non-technology parts
getfield(anyM.parts,parPart_sym).par[parIt] = ParElement(allParData_df,parDef_tup,parIt,anyM.report)
else
allParTech_arr = :Te in namesSym(allParData_df) ? unique(allParData_df[!,:Te]) : [0]
# determines how technology might inherit from other technology nodes (not at all, by going up, by going down or both)
heritRules_arr = map(x -> x[2],filter(x -> x[1] == :Te, collect(parDef_tup.herit)))
if isempty(heritRules_arr)
herit_sym = :none
else
if unique(heritRules_arr) == [:up]
herit_sym = :up
elseif :up in heritRules_arr
herit_sym = :both
else
herit_sym = :down
end
end
for relTech in filter(x -> !isempty(intersect(allParTech_arr,techToPar_dic[herit_sym][x])), parPart_sym == :techSt ? stTechIdx_arr : convTechIdx_arr)
# filters all entries of possible inheritance for each technology
filtParData_df = :Te in namesSym(allParData_df) ? filter(row -> row.Te in techToPar_dic[herit_sym][relTech], allParData_df) : allParData_df
# removes potential zero columns from data being actually written to part
rmvZeroParData_df = filtParData_df[!,filter(x -> unique(filtParData_df[!,x]) != [0] || x == :val,namesSym(filtParData_df))]
anyM.parts.tech[techSym(relTech,anyM.sets[:Te])].par[parIt] = ParElement(rmvZeroParData_df,parDef_tup,parIt,anyM.report)
end
end
end
# XXX adds parameter object for parameters where no explicit values where provided, but a default value exists
for parUndef in keys(filter(r -> r[2].defVal != nothing,parDef_dic))
parPart_sym = parDef_dic[parUndef].part
if parPart_sym != :techSt && parPart_sym != :techConv
if !haskey(getfield(anyM.parts,parPart_sym).par,parUndef)
getfield(anyM.parts,parPart_sym).par[parUndef] = ParElement(DataFrame(),parDef_dic[parUndef],parUndef,anyM.report)
end
else
for relTech in filter(x -> !haskey(anyM.parts.tech[techSym(x,anyM.sets[:Te])].par,parUndef),parPart_sym == :techSt ? stTechIdx_arr : convTechIdx_arr)
anyM.parts.tech[techSym(relTech,anyM.sets[:Te])].par[parUndef] = ParElement(DataFrame(),parDef_dic[parUndef],parUndef,anyM.report)
end
end
end
return parDef_dic
end
# XXX perform pre-setting of dispatch parameters for all technologies
function presetDispatchParameter!(part::TechPart,prepTech_dic::Dict{Symbol,NamedTuple},parDef_dic::Dict{Symbol,NamedTuple},newHerit_dic::Dict{Symbol,Tuple{Pair{Symbol,Symbol},Pair{Symbol,Symbol}}},
ts_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},r_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},anyM::anyModel)
relPar_arr = filter(x -> :techPre in keys(parDef_dic[x]) && (!(isempty(part.par[x].data)) || occursin("eff",string(x))), collectKeys(keys(part.par)))
parPre_dic = Dict(x => parDef_dic[x].techPre.preset for x in relPar_arr)
preType_arr = union(values(parPre_dic))
typeVar_dic = Dict(:out => [:gen, :stIntIn], :in => [:use,:stIntOut], :stIn => [:stExtIn, :stOut], :stOut => [:stExtOut, :stIntOut], :stLvl => [:stLvl])
modeDep_dic = Dict(x => DataFrame(Ts_expSup = Int[], Ts_dis = Int[], R_dis = Int[], C = Int[], Te = Int[]) for x in union(values(typeVar_dic)...))
for preType in preType_arr
# get all relevant carriers
specMode_boo = !isempty(part.modes) && !isempty(filter(y -> :M in namesSym(part.par[y].data), keys(filter(x -> x[2] == preType,parPre_dic))))
# creates table of relevant capacity resolutions and the level of pre-setting
capaLvl_df = unique(vcat(map(x -> select(x,intCol(x)),values(prepTech_dic[preType != :carrierSt ? :capaConv : :capaStSize]))...)) |> (x -> select(copy(x),intCol(x)))
# creates dataframe depending on the respective pre-set mode
if preType == :lowest
carConv_arr = union(map(x -> getfield(part.carrier,x), intersect((:gen,:use),keys(part.carrier)))...)
lowest_tup = map(x -> anyM.cInfo[x],carConv_arr) |> (y -> [maximum(getfield.(y,:tsDis)), part.disAgg ? part.balLvl.exp[2] : maximum(getfield.(y,:rDis))])
capaLvl_df[!,:lvlTs] .= lowest_tup[1]; capaLvl_df[!,:lvlR] .= lowest_tup[2]
elseif preType == :reference
ref_tup = part.balLvl.ref
if isempty(ref_tup) continue end
capaLvl_df[!,:lvlTs] .= ref_tup[1]; capaLvl_df[!,:lvlR] .= ref_tup[2];
elseif preType == :carrierIn || preType == :carrierOut || preType == :carrierSt
if preType == :carrierIn || preType == :carrierOut
car_arr = (preType == :carrierIn ? :use : :gen) |> (y -> haskey(part.carrier,y) ? collect(getfield(part.carrier,y)) : Int[])
if isempty(car_arr) continue end
capaLvl_df[!,:C] .= car_arr
capaLvl_df = flatten(capaLvl_df,:C)
else
# filter carriers that are can be actively stored, although they got descendants
intC_arr = union(collect(part.actSt),map(y -> part.carrier[y],filter(x -> x in keys(part.carrier),[:stIntIn,:stIntOut])) |> (y -> isempty(y) ? Int[] : union(y...)))
capaLvl_df = replCarLeafs(capaLvl_df,anyM.sets[:C],noLeaf = intC_arr)
# filter entries that are already descendants of carrier being actively stored
unique(vcat(map(x -> filter(y -> x != y,getDescendants(x,anyM.sets[:C],true)),unique(capaLvl_df[!,:C]))...)) |> (z -> filter!(x -> !(x.C in z) || x.C in intC_arr,capaLvl_df))
car_arr = unique(capaLvl_df[!,:C])
end
resC_dic = Dict(x => anyM.cInfo[x] |> (y -> [getfield(y,:tsDis), part.disAgg ? part.balLvl.exp[2] : getfield(y,:rDis)]) for x in car_arr)
capaLvl_df = combine(x -> resC_dic[x.C[1]] |> (y -> (lvlTs = y[1], lvlR = y[2])), groupby(capaLvl_df, namesSym(capaLvl_df)))
elseif preType == :minUse || preType == :minGen
car_arr = (preType == :minUse ? :use : :gen) |> (y -> haskey(part.carrier,y) ? collect(getfield(part.carrier,y)) : Int[])
if isempty(car_arr) continue end
insertcols!(capaLvl_df,1,:C => fill(car_arr,size(capaLvl_df,1)))
capaLvl_df = flatten(capaLvl_df,:C)
reso_tup = map(x -> anyM.cInfo[x],car_arr) |> (y -> [minimum(getfield.(y,:tsDis)), part.disAgg ? part.balLvl.exp[2] : minimum(getfield.(y,:rDis))])
capaLvl_df[!,:lvlTs] .= reso_tup[1]; capaLvl_df[!,:lvlR] .= reso_tup[2];
end
# expand based on code above to full table for pre-setting of dispatch paramters
dispReso_df = expandExpToDisp(capaLvl_df,ts_dic,r_dic)
# additionally creates mode specific table in case different modes exists for technology
if specMode_boo
dispResoM_df = copy(dispReso_df)
insertcols!(dispResoM_df,1, :M => fill(part.modes,size(dispReso_df,1)))
dispResoM_df = flatten(dispResoM_df,:M)
end
# loops over all parameters of specific pre-setting type
for parItr in keys(filter(x -> x[2] == preType,parPre_dic))
parPef_ntup = parDef_dic[parItr]
modRel_boo = specMode_boo && :M in namesSym(part.par[parItr].data)
# drops mode related parameter data, that does not match the modes of the technology
if modRel_boo
filter!(x -> x.M in part.modes,part.par[parItr].data)
if isempty(part.par[parItr].data) select!(part.par[parItr].data, Not(:M)) end
end
newPar_obj, report = resetParameter(modRel_boo ? dispResoM_df : dispReso_df, part.par[parItr], part.name[end], anyM.sets, anyM.options, anyM.report, length(part.modes), haskey(newHerit_dic,preType) ? newHerit_dic[preType] : tuple())
# saves mode dependant cases
if modRel_boo && !isempty(newPar_obj.data)
mode_df = unique(filter(x -> x.M != 0, newPar_obj.data)[!,Not([:val,:M])])
# loops over all types of relevant variables (:gen, :use etc.) that have to be mode specific
for va in intersect(union(map(x -> typeVar_dic[x], parPef_ntup.techPre.mode)...),keys(part.carrier) |> (y -> isempty(intersect(y,(:stExtIn,:stIntIn))) ? y : [:stLvl,y...]))
modeItr_df = copy(mode_df)
# determines relevant carriers of variables
if :C in parPef_ntup.dim # carrier is already specified within parameter data
car_arr = unique(modeItr_df[!,:C])
else # carrier is not in parameter data, all possible carriers of respective variable need to be obtained
car_arr = collect(getfield(part.carrier,va))
modeItr_df[!,:C] .= [car_arr]
modeItr_df = flatten(modeItr_df,:C)
end
# adds temporal and spatial level to dataframe
cToLvl_dic = Dict(x => (anyM.cInfo[x].tsDis, part.disAgg ? part.balLvl.exp[2] : anyM.cInfo[x].rDis) for x in car_arr)
modeItr_df[!,:lvlTs] = map(x -> cToLvl_dic[x][1],modeItr_df[!,:C])
modeItr_df[!,:lvlR] = map(x -> cToLvl_dic[x][2],modeItr_df[!,:C])
# expands dataframe along spatial and temporal level according to resolution of respective carriers
for dim in (:R,:Ts)
dimCol = Symbol(dim,:_dis); lvl = Symbol(:lvl,dim)
dim_dic = Dict((x[dimCol],x[lvl]) => getDescendants(x[dimCol], anyM.sets[dim],false,x[lvl]) |> (y -> isempty(y) ? getAncestors(x[dimCol],anyM.sets[dim],:int,x[lvl])[end] : y)
for x in eachrow(unique(modeItr_df[!,[dimCol,lvl]])))
modeItr_df[!,dimCol] = map(x -> dim_dic[(x[dimCol],x[lvl])],eachrow(modeItr_df[!,[dimCol,lvl]]))
modeItr_df = flatten(modeItr_df[!,Not(lvl)],dimCol)
end
modeDep_dic[va] = unique(vcat(modeDep_dic[va],modeItr_df))
end
end
# set lower limit for availabilities to avoid really small but non-zero values
if parItr in (:avaConv, :avaStIn, :avaStOut, :avaStSize)
lowVal_arr = newPar_obj.data[!,:val] .< anyM.options.avaMin
newPar_obj.data[lowVal_arr,:val] .= 0.0
end
if isempty(newPar_obj.data)
delete!(part.par,parItr)
else
part.par[parItr] = newPar_obj
end
end
end
return modeDep_dic
end
# XXX pre-sets specific dispatch parameter
function resetParameter(newData_df::DataFrame, par_obj::ParElement, tStr::String, sets::Dict{Symbol,Tree}, options::modOptions, report::Array{Tuple,1},cntM_int::Int = 0, newHerit_tup::Tuple = ())
# gets dimension of search tables and parameter without mode
newData_df = select(newData_df,intersect(namesSym(newData_df),par_obj.dim))
# creates empty report, that entries are written to within subprocess
if !(:M in namesSym(newData_df))
# in case modes are not being searched for just directly set data
par_obj.data = matchSetParameter(newData_df,par_obj,sets) |> (x -> select(x,orderDim(namesSym(x))))
else
# looks up original table without applying default values
matchData1_df = matchSetParameter(newData_df,par_obj,sets,newCol = :val, useDef = false)
# filter returned table by weather a mode was specified
noMode_df = filter(r -> r.M == 0,matchData1_df)
mode_df = filter(r -> r.M != 0,matchData1_df)
# groups mode related data for further analysis
resDim_arr = filter(x -> x != :M ,intersect(par_obj.dim,namesSym(matchData1_df)))
if !isempty(mode_df)
# filteres entries were there is not one value for each mode
modeGrp_gdf = groupby(mode_df, resDim_arr)
modeGrpDef_arr = filter(r -> cntM_int == size(r,1), collect(modeGrp_gdf))
if length(modeGrp_gdf.ends) > length(modeGrpDef_arr)
push!(report,(2, "parameter pre-setting", string(par_obj.name), "parameter data was not specified for all modes in some cases for $tStr, existing values were ignored"))
end
# filters entries where mode values are not distinct, reports on it and uses these entries as non-mode specific data
disMode_arr = filter(r -> length(unique(r[!,:val])) != 1, modeGrpDef_arr)
if length(modeGrpDef_arr) > length(disMode_arr)
push!(report,(2, "parameter pre-setting", string(par_obj.name), "parameter data was the same for all modes in some cases for $tStr, no differentiation between modes was applied in these cases"))
noMode_df = vcat(noMode_df, vcat(filter(r -> length(unique(r[!,:val])) == 1, modeGrpDef_arr)...) )
end
# filters data where distinct mode data is provided for all modes and expends resulting table again
finalMode_df = isempty(disMode_arr) ? filter(x -> false ,collect(modeGrp_gdf)[1]) : vcat(disMode_arr...)
else
finalMode_df = mode_df
end
# gets all data, where no values where obtained successfully yet and look them up again applying the default value and not specifing the mode anymore
# (hence now non mode-specific parameter values for technologies with modes are taken into account => mode-specific parameter values generally overwrite non-mode specific parameter values)
newSearch_df = unique(antijoin(newData_df[!,resDim_arr], vcat(finalMode_df, noMode_df)[!,Not(:val)], on = resDim_arr))
if !isempty(newSearch_df)
newSearch_df[!,:M] .= 0
matchData2_df = matchSetParameter(newSearch_df,par_obj,sets)
noMode_df = vcat(matchData2_df,noMode_df)
end
# returns tables with and without mode data to parameter object
par_obj.data = vcat(noMode_df,finalMode_df) |> (x -> select(x,orderDim(namesSym(x))))
end
# sets new inherit rules and default value
par_obj.herit = newHerit_tup
return par_obj, report
end
# XXX creates new parameter objects for discount factors from discount rates provided
function computeDisFac!(partObj::OthPart,anyM::anyModel)
# XXX discount factor for technologies
rExp_arr = union(map(x -> getfield.(getNodesLvl(anyM.sets[:R],x),:idx), unique(getfield.(values(anyM.cInfo),:rExp)))...)
discR_df = matchSetParameter(flatten(flatten(DataFrame(Ts_disSup = anyM.supTs.step, R_exp = rExp_arr),:Ts_disSup),:R_exp),partObj.par[:rateDisc],anyM.sets)
discR_df[!,:disFac] = 1 ./ (1 .+ discR_df[!,:val]).^anyM.options.shortExp
discR_df[!,:disFac] = map(x -> filter(y -> y < x.Ts_disSup ,collect(anyM.supTs.step)) |> (z -> prod(filter(y -> y.R_exp == x.R_exp && y.Ts_disSup in z, discR_df)[!,:disFac])*x.disFac),eachrow(discR_df))
select!(discR_df,Not(:val))
discPar_obj = copy(partObj.par[:rateDisc],rename(discR_df,:disFac => :val))
discPar_obj.name = :discFac
discPar_obj.defVal = nothing
partObj.par[:disFac] = discPar_obj
# XXX discount factor for exchange (average of from and to region)
discRExc_df = rename(copy(discR_df),:R_exp => :R_from,:disFac => :disFacFrom)
discRExc_df[!,:R_to] .= [unique(discRExc_df[!,:R_from])]
discRExc_df = flatten(discRExc_df,:R_to)
discRExc_df = innerjoin(discRExc_df,discR_df, on = [:Ts_disSup,:R_to] .=> [:Ts_disSup,:R_exp])
discRExc_df[!,:disFac] = (discRExc_df[!,:disFac] + discRExc_df[!,:disFacFrom]) * 0.5
select!(discRExc_df,Not(:disFacFrom))
discPar_obj = copy(partObj.par[:rateDisc],rename(discRExc_df,:disFac => :val))
discPar_obj.name = :disFacExc
discPar_obj.defVal = nothing
discPar_obj.dim = (:Ts_dis, :R_from, :R_to)
discPar_obj.herit = (:Ts_dis => :up, :R_from => :up, :R_to => :up, :Ts_dis => :avg_any, :R_from => :avg_any, :R_to => :avg_any)
partObj.par[:disFacExc] = discPar_obj
end
# XXX extract specified limit parameter from the limit part of the model
function getLimPar(partLim::OthPart,par_sym::Symbol, tech_tr::Tree; tech::Int = 0)
if par_sym in keys(partLim.par)
parLim_obj = copy(partLim.par[par_sym])
if :Te in namesSym(parLim_obj.data) # case for technology limits with values differentiated by tech
parLim_obj.data = filter(x -> x.Te in [[tech];getAncestors(tech,tech_tr,:int,0)], parLim_obj.data)
if isempty(parLim_obj.data)
parLim_obj = ParElement()
end
end
else
parLim_obj = ParElement()
end
return parLim_obj
end
# </editor-fold>
# <editor-fold desc="perform match between dimension tables and parameter data"
# XXX matches set with input parameters, uses inheritance rules for unmatched cases
function matchSetParameter(srcSetIn_df::DataFrame, par_obj::ParElement, sets::Dict{Symbol,Tree}; newCol::Symbol =:val, useDef::Bool = true, useNew::Bool = true)
# directly return search dataframes with added empty column if it is empty itself
if isempty(srcSetIn_df)
paraMatch_df = copy(srcSetIn_df)
paraMatch_df[!,newCol] = Float64[]
return paraMatch_df
end
# directly returns default values if no data was provided for the parameter
if isempty(par_obj.data) || length(namesSym(par_obj.data)) == 1
paraMatch_df = copy(srcSetIn_df)
paraMatch_df[!,newCol] = fill(isempty(par_obj.data) ? par_obj.defVal : par_obj.data[1,:val],size(paraMatch_df,1))
filter!(x -> x[newCol] != nothing, paraMatch_df)
return paraMatch_df
end
searchCol_arr = namesSym(srcSetIn_df)
paraData_df = par_obj.data
# removes sets the parameter is not specified for from search table and condenses search table accordingly
redunSrc_arr = setdiff(searchCol_arr,namesSym(paraData_df))
searchSet_df = isempty(redunSrc_arr) ? srcSetIn_df : unique(srcSetIn_df[!,Not(redunSrc_arr)])
srcCol_arr = namesSym(searchSet_df)
# searches for matches in original data
paraMatch_df = innerjoin(searchSet_df, paraData_df; on = srcCol_arr)
# boolean that switches to true if all values were matched via inheritance
allMatch_boo = false
# checks if there are actually unmatched values before startin inheritance process
if size(searchSet_df,1) != size(paraMatch_df,1)
noMatch_df = antijoin(searchSet_df, paraData_df; on = srcCol_arr)
if !isempty(noMatch_df)
for herit in filter(x -> x[1] in srcCol_arr, collect(par_obj.herit))
# inherit new values and check for additional matches
unmatch_arr = unique(noMatch_df[!,herit[1]])
if herit[2] == :up
newData_df = heritParameter_up(herit,unmatch_arr,paraData_df,sets)
else
newData_df = heritParameter_rest(herit,unmatch_arr,paraData_df,sets)
end
if isempty(newData_df) continue end
newMatch_df = innerjoin(noMatch_df, newData_df; on = srcCol_arr)
# report on inheritance
cntNewData_int = size(newData_df,1)
cntMatch_int = size(newMatch_df,1)
# add new rows to both table with matches and parameter data
paraData_df = vcat(paraData_df, useNew ? newData_df : antijoin(newData_df,newMatch_df, on = srcCol_arr))
paraMatch_df = vcat(paraMatch_df,newMatch_df)
# removes newly matched values from search and leaves loop if everything is matched now
if cntMatch_int == size(noMatch_df,1)
allMatch_boo = true
break
else
noMatch_df = antijoin(noMatch_df, newMatch_df; on = srcCol_arr )
end
end
# writes default values for remaining unmatched values
cntNoMatch_int = size(noMatch_df,1)
if !allMatch_boo && par_obj.defVal != nothing && useDef
defaultMatch_df = noMatch_df
defaultMatch_df[!,:val] = fill(par_obj.defVal,cntNoMatch_int)
paraMatch_df = isempty(paraMatch_df) ? defaultMatch_df : vcat(paraMatch_df,defaultMatch_df)
end
end
end
# expands table again by rows
if !isempty(redunSrc_arr)
paraMatch_df = innerjoin(paraMatch_df, srcSetIn_df; on = srcCol_arr)
end
rename!(paraMatch_df,:val => newCol)
return paraMatch_df
end
# XXX covers direct inheritance from upper nodes
function heritParameter_up(herit_par::Pair{Symbol,Symbol},unmatch_arr::Array{Int,1},paraData_df::DataFrame,sets::Dict{Symbol,Tree})
heritSetShort_sym = Symbol(split(String(herit_par[1]),"_")[1])
unmatch_set = BitSet(unmatch_arr)
unmatchChild_dic = Dict(x => intersect(unmatch_set,BitSet(getDescendants(x,sets[heritSetShort_sym],true))) for x in unique(paraData_df[!, herit_par[1]]))
# adds children where their data is missing to provided parameter data
paraDataIn_df = filter(r -> !isempty(unmatchChild_dic[getproperty(r,herit_par[1])]),paraData_df)
if isempty(paraDataIn_df) return paraDataIn_df end
paraDataIn_df[!,:child] = map(x -> unmatchChild_dic[x],paraDataIn_df[!,herit_par[1]])
paraDataIn_df = flatten(paraDataIn_df,:child)
# determines all columns for groupby statement
grpBy_arr = filter(x -> !(x in [:val,herit_par[1]]),namesSym(paraDataIn_df))
# uses closest child with value as new data
newData_df = combine(x -> maximum(x[!,herit_par[1]]) |> (z -> NamedTuple{(herit_par[1],:val)}(tuple(z,x.val[findall(x[!,herit_par[1]] .== z)][1]))), groupby(paraDataIn_df, grpBy_arr))
select!(newData_df, Not(herit_par[1]))
rename!(newData_df,:child => herit_par[1])
return newData_df
end
# XXX covers all inheritance from nodes below unmatched nodes
function heritParameter_rest(herit_par::Pair{Symbol,Symbol},unmatch_arr::Array{Int,1},paraData_df::DataFrame,sets::Dict{Symbol,Tree})
# XXX reads out specific inheritance options
heritSet_sym = herit_par[1]
heritSetShort_sym = Symbol(split(String(heritSet_sym),"_")[1])
splHerit_arr = split(String(herit_par[2]),"_")
heritAgg_sym = Symbol(splHerit_arr[1])
heritFull_boo = Symbol(splHerit_arr[2]) == :full
# XXX initialize values for loop (removes and add val again to control its position)
# dimensions not involved in inheritance propcess
noHeritSet_tup = tuple(setdiff(namesSym(paraData_df),[:val,herit_par[1]])...)
colName_tup = tuple(herit_par[1],noHeritSet_tup...)
newData_df = vcat(colName_tup...,:val) |> (y -> select(DataFrame(Dict(x => x != :val ? Int[] : Float64[] for x in y)),y))
# gets all children of unmatched ids to filter relevant part of tree
childrenUnmatch_arr = union(map(x -> BitSet(getDescendants(x,sets[heritSetShort_sym],true)),unmatch_arr)...)
paraDataFilt_df = filter(r -> getproperty(r,heritSet_sym) in childrenUnmatch_arr,paraData_df)
if isempty(paraDataFilt_df) return newData_df end
paraDataSrc_df = paraDataFilt_df
# loops going upwards within tree trying to obtain new values
newVal_boo = true
while newVal_boo
paraDataSrc_df[!,:pare] = map(x -> sets[heritSetShort_sym].up[x],paraDataSrc_df[!,heritSet_sym])
# saves all children of parents currently used in the grouped table within a dictionary to use below within loop over rows
if heritFull_boo
childPar_dic = Dict(x => getDescendants(x,sets[heritSetShort_sym], false, x != 0 ? sets[heritSetShort_sym].nodes[x].lvl+1 : 1) for x in unique(paraDataSrc_df[!,:pare]))
end
# groups table by parents and not used dimensions according to aggregation rule (sum, any, or unique)
if heritAgg_sym == :sum
paraDataGrp_df = combine(groupby(paraDataSrc_df, vcat(noHeritSet_tup...,:pare)), :val => (x -> sum(x)) => :valAgg)
elseif heritAgg_sym == :avg
paraDataGrp_df = combine(groupby(paraDataSrc_df, vcat(noHeritSet_tup...,:pare)), :val => (x -> Statistics.mean(x)) => :valAgg)
else
paraDataGrp_df = dropmissing(combine(groupby(paraDataSrc_df, vcat(noHeritSet_tup...,:pare)), :val => (x -> length(unique(x)) == 1 ? x[1] : missing) => :valAgg))
end
existKey_arr = Tuple.(eachrow(newData_df[!,Not(:val)]))
if heritFull_boo
full_dic = groupby(paraDataSrc_df,vcat(noHeritSet_tup...,:pare)) |> (y -> Dict(x.pare[1] => collect(x[!,heritSet_sym]) for x in y))
end
# loops through rows of grouped table to see if any row can be coverted into new data
newVal_boo = false
for row in eachrow(paraDataGrp_df)
# either "full" is not used or for all children an initial value was provided
if !heritFull_boo || isempty(setdiff(childPar_dic[row.pare],full_dic[row.pare]))
#checkNew_tup = NamedTuple{Tuple(vcat(heritSet_sym,noHeritSet_tup...))}((row.pare,map(x -> getproperty(row,x),noHeritSet_tup)...))
checkNew_tup = (row.pare,map(x -> getproperty(row,x),noHeritSet_tup)...)
# writes values if non-existing in table so far
if !(checkNew_tup in existKey_arr)
newEntry_tup = (row.pare,map(x -> getproperty(row,x),noHeritSet_tup)...,row.valAgg)
newVal_boo = true
push!(newData_df,newEntry_tup)
end
end
end
# add parent column to new data and filters values to consider for further inheritance (= only children of unmatched values)
paraDataSrc_df = filter(r -> getproperty(r,heritSet_sym) in childrenUnmatch_arr,newData_df)
end
return newData_df
end
# </editor-fold>
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 24826 |
# <editor-fold desc= top-level and general read-in functions for sets and parameter"
# XXX read-in all set files
function readSets!(files_dic::Dict{String,Array{String,1}},anyM::anyModel)
# creates relevant sets, manually adds :mode and assigns short names
set_arr = append!(map(x -> Symbol(x[findfirst("set_",x)[1]+4:end-4]),files_dic["set"]), [:mode, :id])
setLngShrt_dic = Dict(:timestep => :Ts, :region => :R, :carrier => :C, :technology => :Te, :mode => :M, :id => :id)
setData_dic = Dict{Symbol,DataFrame}()
anyM.sets = Dict{Symbol,Tree}()
# loop over sets read-in data and create tree objects
for setFile in files_dic["set"]
setLong_sym = getindex(setFile[findfirst("set_",setFile)[1]+4:end-4] |> (y -> filter(x -> occursin(string(x),y),collectKeys(keys(setLngShrt_dic)))),1)
setShort_sym = get!(setLngShrt_dic,setLong_sym,setLong_sym)
if setShort_sym in keys(anyM.sets)
push!(anyM.report,(3,"set read-in",string(setLong_sym),"multiple input files provided for set"))
end
setData_dic[setShort_sym] = convertReadIn(CSV.read(setFile, DataFrame ;delim = anyM.options.csvDelim[1]),setFile,set_arr,setLngShrt_dic,anyM.report,anyM.lock)
anyM.sets[setShort_sym] = createTree(setData_dic[setShort_sym],setLong_sym,anyM.report)
produceMessage(anyM.options,anyM.report, 3," - Read-in set file: ",setFile)
end
# manually adds mode set, creates a top node for all modes to allow for aggregation later
if :mode in namesSym(setData_dic[:Te])
modes_df = DataFrame(mode_1 = vcat(map(y -> split(replace(y," " => ""),";"),filter(x -> x != "",setData_dic[:Te][!,:mode]))...))
else
modes_df = DataFrame(mode_1 = String[])
end
anyM.sets[:M] = createTree(modes_df,:mode,anyM.report)
# reports, if a required set was not defined or if non-unique carrier names were defined
for set in filter(x -> !(x in (:mode, :id)), collectKeys(keys(setLngShrt_dic)))
if !(setLngShrt_dic[set] in keys(anyM.sets))
push!(anyM.report,(3,"set read-in",string(set),"no file provided to define set"))
elseif set == :carrier || set == :technology
# reports error if carrier names are non-unique
strSet_arr = getfield.(values(anyM.sets[setLngShrt_dic[set]].nodes),:val)
if length(strSet_arr) != length(unique(strSet_arr))
push!(anyM.report,(3,"set read-in",set,"non-unique $set names detected"))
end
end
end
produceMessage(anyM.options,anyM.report, 2," - Read-in all set files")
return setData_dic
end
# XXX read-in all parameter files, create model parts and assign parameter
function readParameters!(files_dic::Dict{String,Array{String,1}},setData_dic::Dict{Symbol,DataFrame},anyM::anyModel)
# XXX read-in parameters (do not add to parts yet)
paraTemp_dic = Dict{String, Dict{Symbol, DataFrame}}()
# creates relevant sets, manually adds :mode and assigns short names
set_arr = append!(map(x -> Symbol(x[findfirst("set_",x)[1]+4:end-4]),files_dic["set"]), [:mode, :id])
setLngShrt_dic = Dict(:timestep => :Ts, :region => :R, :carrier => :C, :technology => :Te, :mode => :M, :id => :id)
# read-in parameter files and convert their content @threads
for parFile in files_dic["par"]
parData_df = convertReadIn(CSV.read(parFile, DataFrame;delim = anyM.options.csvDelim[1]),parFile,set_arr,setLngShrt_dic,anyM.report,anyM.lock,anyM.sets)
if isempty(parData_df) || any(getindex.(anyM.report,1) .== 3) continue end
para_obj = writeParameter(parData_df, anyM.sets, setLngShrt_dic, parFile, anyM.report, anyM.lock)
lock(anyM.lock)
paraTemp_dic[parFile] = para_obj
unlock(anyM.lock)
produceMessage(anyM.options,anyM.report, 3," - Read-in parameter file: ",parFile)
end
produceMessage(anyM.options,anyM.report, 2," - Read-in all parameter files")
return paraTemp_dic
end
# XXX read inputs folders for all 'csv' or 'jl' files starting with 'set', 'par', 'var' and 'eqn'
function readInputFolder(inputFolders::Array{String,1},files_dic::Dict{String,Array{String,1}} = Dict(b => String[] for b in ("set","par","var","eqn")))
# loops over main folders provides in constructor
for folder in inputFolders
for file in readdir(folder)
if occursin(".",file) fileType_str = file[findfirst(".",file)[1]+1:end] else fileType_str = "" end
fileGrp_str = file[1:3]
fileDir_str = string(folder,"/",file)
# loops over subfolders, if any exist
if (fileType_str == "csv" && fileGrp_str in ("set","par","var","eqn"))
files_dic[fileGrp_str] = push!(files_dic[fileGrp_str],fileDir_str)
elseif !isfile(fileDir_str)
files_dic = readInputFolder([fileDir_str],files_dic)
end
end
end
return files_dic
end
# XXX filters missing and adjusts data according to "all" statements
function convertReadIn(readIn_df::DataFrame,fileName_str::String,set_arr::Array{Symbol},setLngShrt_dic::Dict{Symbol,Symbol},report::Array{Tuple,1},lock_::ReentrantLock,sets::Dict{Symbol,Tree} = Dict{Symbol,Tree}())
strTypes_arr = [String, String1, String3, String7, String15, String31, String63, String127, String255]
setNames_arr = filterSetColumns(readIn_df,set_arr)
oprNames_arr = filterSetColumns(readIn_df,[:parameter,:variable,:value, :id])
readInColAll_tup = tuple(namesSym(readIn_df)...)
# drop irrelevant column that do not relate to a set or an operator or are completely empty
select!(readIn_df, Not(setdiff(readInColAll_tup,vcat(setNames_arr[1],setNames_arr[2],oprNames_arr[1]))))
emptyCol_arr = filter(x -> eltype(readIn_df[!,x]) == Missing,namesSym(readIn_df))
setNames_arr[1] = setdiff(setNames_arr[1],emptyCol_arr)
select!(readIn_df, Not(emptyCol_arr))
# filter value columns
readInCol_arr = namesSym(readIn_df)
valCol_arr = filter(x -> occursin("value",string(x)),readInCol_arr)
# XXX convert missing values and change array container type for editing later
for j in 1:size(readIn_df,2)
col = collect(readIn_df[!,j])
if eltype(col) >: Int
col = replace(string.(col),"missing" => "")
if readInCol_arr[j] in valCol_arr
replace!(col,"" => "NaN")
col = parse.(Float64,col)
end
readIn_df[!,j] = col
elseif eltype(col) >: Missing
str_type = typeintersect(eltype(col), Union{String, String1, String3, String7, String15, String31, String63, String127, String255})
act_type = any(eltype(col) .>: strTypes_arr) ? str_type : Float64
# convert column at least to String3, because String1 cannot take empty string
if act_type == String1
col = convert(Array{Union{Missing, String3},1},col)
readIn_df[!,j] = convert(Array{Union{Missing, String3},1},readIn_df[!,j])
end
# convert remaining columns to strings and replace 'missing' with empty string
col[findall(ismissing.(col))] .= act_type == str_type ? "" : NaN
readIn_df[!,j] = convert(Array{act_type == String1 ? String3 : act_type,1},col)
else
readIn_df[!,j] = col
end
end
# XXX check types of columns
if !isempty(valCol_arr) && any(map(x -> x in strTypes_arr, map(x -> eltype(readIn_df[!,x]), findall(map(x -> x in valCol_arr, readInCol_arr)))))
lock(lock_)
push!(report,(3,"parameter read-in",fileName_str,"detected strings in value column, file was not read-in"))
unlock(lock_)
return DataFrame()
end
for supCol in findall(.!map(x -> x in strTypes_arr, eltype.(eachcol(readIn_df))))
if !occursin("value",string(readInCol_arr[supCol]))
lock(lock_)
push!(report,(3,"parameter read-in",fileName_str,"entries in $(readInCol_arr[supCol]) could not be converted to strings (probably provided as floats), file was not read-in"))
unlock(lock_)
return DataFrame()
end
end
# XXX rewrites rows with all commands into full format
for col in setNames_arr[1]
colVal_arr = readIn_df[!, col]
# check column for keywords
rowsAll_arr = map(x -> length(x) >= 3 && lowercase(x[1:3]) == "all",colVal_arr)
if all(!,rowsAll_arr) continue end
# determine relevant reference for "all", if parameter are read in
if !isempty(sets) # take reference from readin sets
specSet_arr = split(String(col),"_")
relSet_obj = sets[setLngShrt_dic[Symbol(specSet_arr[1])]]
colValUni_arr = unique(map(x -> x.val,getNodesLvl(relSet_obj, parse(Int,specSet_arr[2]))))
else # take reference from other column values, relevant when sets are currently read in
colValUni_arr = sort(unique(filter(x -> !isempty(x),colVal_arr[(!).(rowsAll_arr)])))
end
readIn_df[!,col] = convert.(String,readIn_df[!,col])
# loop over rows with all
for row in eachrow(readIn_df[rowsAll_arr,:])
# append new rows to dataframe
addRow_df = row
allInfo_str = reduce(replace, ["all"=>"", "("=>"", ")"=>""], init=addRow_df[col])
if occursin(":",allInfo_str)
allVal_arr = split(allInfo_str,":")
rplVal_arr = colValUni_arr[findall(x->x==allVal_arr[1], colValUni_arr)[1]:findall(x->x==allVal_arr[2], colValUni_arr)[1]]
# reports if values within all expression could not be matched to sets
if length(rplVal_arr) != length(allVal_arr)
lock(lock_)
push!(report,(2,"parameter read-in",fileName_str,"at least one value within all expression $(allInfo_str) could not be matched to an existing set"))
unlock(lock_)
end
elseif occursin(",",allInfo_str)
allVal_arr = split(allInfo_str,",")
rplVal_arr = colValUni_arr[map(x -> in(x,allVal_arr),colValUni_arr)]
# reports if values within all expression could not be matched to sets
if length(rplVal_arr) != length(allVal_arr)
lock(lock_)
push!(report,(2,"parameter read-in",fileName_str,"at least one value within all expression $(allInfo_str) could not be matched to an existing set"))
unlock(lock_)
end
else
rplVal_arr = colValUni_arr
end
for addVal in rplVal_arr
addRow_df[col] = addVal
push!(readIn_df, [addRow_df[col] for col in readInCol_arr])
end
end
#remove inital rows with all#
deleteat!(readIn_df,findall(rowsAll_arr))
end
# XXX convert column names if sets are defined for multiple instances (e.g. two regions in case of trade related parameters)
if split(fileName_str,"/")[end][1:3] == "par"
# creates a dictionary that assigns everything after the set name separated with a "_" to the respective set
splitCol_arr = map(x -> split(String(x),"_"),setdiff(namesSym(readIn_df),oprNames_arr[1]))
setCol_arr = unique(map(x -> Symbol(x[1]),splitCol_arr))
grpCol_dic = Dict(x => map(z -> z[2:end],filter(y -> String(x) == y[1],splitCol_arr)) for x in setCol_arr)
# loop over dictionary to check for irregular names and two sets having two columns assigned
letters_arr = ("b","c","d","e","f","g","h")
for set in keys(grpCol_dic)
newCol_dic = Dict{Symbol,Symbol}()
if any(map(x -> tryparse(Int,x),vcat(grpCol_dic[set]...)) .== nothing)
lock(lock_)
push!(report,(3,"parameter read-in",fileName_str,"column for set $(set) does not contain a number after _"))
unlock(lock_)
continue
end
# get the unique number of "_" that appear in columns assigned to the respective set
# if the csv file has several columns of the same name JULIA adds a "_1" to the second column when reading in as a dataframe => so this checks if set is defined for multiple instances
uniLen_arr = unique(map(x -> length(x),grpCol_dic[set]))
if length(uniLen_arr) == 2
setNumbers_arr = map(x -> parse(Int,x[1]),grpCol_dic[set])
switchBool_arr = fill(false,length(setNumbers_arr))
for i in 2:length(setNumbers_arr)
if setNumbers_arr[i-1] >= setNumbers_arr[i]
switchBool_arr[i] = true
end
end
switchInt_arr = findall(switchBool_arr)
z = 0
for (idx,k) in enumerate(grpCol_dic[set])
if idx in switchInt_arr
z = z+1
end
if z == 0
newCol_dic[Symbol(join([set,k...],"_"))] = Symbol(set,"_",k[1])
else
newCol_dic[Symbol(join([set,k...],"_"))] = Symbol(set,"_",letters_arr[z],"_",k[1])
end
end
DataFrames.rename!(readIn_df,newCol_dic)
end
end
end
return readIn_df
end
# </editor-fold>
# <editor-fold desc= creation of tree objects for sets"
# XXX creates tree object for set
function createTree(readIn_df::DataFrame, setLoad_sym::Symbol, report::Array{Tuple,1})
setLoad_str = string(setLoad_sym)
height_int = maximum((map(x -> parse(Int,x[end]), filter(x-> (tryparse(Int,string(x[end])) != nothing) && x[1:minimum([length(x),length(setLoad_str)])] .== setLoad_str,[String(namesSym(readIn_df)[i]) for i = 1:size(readIn_df,2)]))))
# checks if set definitions are unique or if one set is defined multiple times
if setLoad_sym != :mode
relSet_df = select(readIn_df, map(x -> setLoad_str * "_" * string(x), 1:height_int))
if size(relSet_df,1) != size(unique(relSet_df),1)
push!(report,(2,"set read-in",setLoad_str,"non-unique set definition provided, only the first set will be considered"))
end
end
# create tree object and add the top node
tree_obj = Tree()
tree_obj.nodes[0] = Node(0,"none",0,1,Int[])
# writes values of first column
firstCol_sym = Symbol(setLoad_str,"_1")
topNodes_arr = filter(x -> !isempty(x),unique(hcat(map(x -> readIn_df[!,namesSym(readIn_df) .== firstCol_sym][!,x],1:sum(namesSym(readIn_df) .== firstCol_sym))...)))
for (idx, node) in enumerate(sort(topNodes_arr))
tree_obj.nodes[idx] = Node(idx,node,1,idx,Int[])
tree_obj.srcTup[(node,)] = [idx]
tree_obj.up[idx] = 0
end
tree_obj.nodes[0].down = collect(keys(tree_obj.up))
# adds dictionary for occurrence of single strings
for v in getNodesLvl(tree_obj, 1)
a = v.val
if haskey(tree_obj.srcStr,(a,1))
push!(tree_obj.srcStr[(a,1)],v.idx)
else
tree_obj.srcStr[(a,1)] = [v.idx]
end
end
# loop over subsequent columns and add respective tree levels
for i in 2:height_int
createTreeLevel!(readIn_df, tree_obj, setLoad_str, i, report)
end
# adds max level
tree_obj.height = height_int
return tree_obj
end
# XXX adds nodex on level i to tree object
function createTreeLevel!(readIn_df::DataFrame, tree_obj::Tree, setLoad_str::String, i::Int, report::Array{Tuple,1})
colNames_arr = filter(x -> occursin(setLoad_str,string(x)), namesSym(readIn_df))
loLvl_Sym = Symbol(setLoad_str,"_",i)
# removes upper columns with empty values only
grpCol_arr = filter(x -> x in colNames_arr[1:(i-1)], colNames_arr)
grpIn_df = unique(readIn_df[readIn_df[!,loLvl_Sym] .!= "",filter(x -> x in colNames_arr[1:i], colNames_arr)])
grpRel_arr = setdiff(grpCol_arr,filter(x -> [""] == unique(grpIn_df[!,x]), grpCol_arr))
select!(grpIn_df,Not(setdiff(grpCol_arr,grpRel_arr)))
# provides the nodes of the lower level grouped by the upper nodes
lowerNodes_gdf = groupby(grpIn_df,grpRel_arr)
# checks for nodes wihtout any upper node assigned
noUp_arr = findall(map(x -> all(x .== ""), [collect(parent(lowerNodes_gdf)[i, groupcols(lowerNodes_gdf)]) for i in lowerNodes_gdf.starts]))
up_arr = setdiff(1:length(lowerNodes_gdf),noUp_arr)
if !isempty(noUp_arr)
noUpVal_arr = setdiff(union(map(x -> collect(x[!,i]),collect(lowerNodes_gdf[noUp_arr]))...),union(map(x -> collect(x[!,i]),collect(lowerNodes_gdf[up_arr]))...))
for i in noUpVal_arr
push!(report,(3,"set read-in",setLoad_str,"node named $(i) could not be assigned to an upper node"))
end
end
# XXX assigns the upper nodes by id to strings of corresponding lower nodes
startLvl_int = parse(Int,string(grpRel_arr[1])[end])
upToLow_dic = Dict(lookupTupleTree(tuple(collect(lowerNode[1,grpRel_arr])...), tree_obj,startLvl_int)[1] => lowerNode[!,loLvl_Sym] for lowerNode in lowerNodes_gdf[up_arr])
# XXX iterates over dict to write new nodes into tree
createNodes!(upToLow_dic,tree_obj,i)
# adds dictionary for occurrence of single strings
for v in getNodesLvl(tree_obj, i)
a = v.val
if haskey(tree_obj.srcStr,(a,i))
push!(tree_obj.srcStr[(a,i)],v.idx)
else
tree_obj.srcStr[(a,i)] = [v.idx]
end
end
end
# XXX create specific node on branch
function createNodes!(upToLow_dic::Dict,tree_obj::Tree,i::Int)
upToLowSort_dic = Dict(map(x -> x => upToLow_dic[x] ,sort(collect(keys(upToLow_dic)))))
up_arr = sort(collect(keys(upToLowSort_dic)))
for upperNodeId in (i == 2 ? up_arr : sortSiblings(up_arr,tree_obj))
numRow_int = length(tree_obj.nodes) -1
exUp_int = length(tree_obj.nodes[upperNodeId].down)
for (idx, lowerNode) in enumerate(sort(upToLowSort_dic[upperNodeId]))
newIdx_int = numRow_int + idx
tree_obj.nodes[newIdx_int] = Node(newIdx_int,lowerNode,i,idx+exUp_int,Int[])
tree_obj.up[newIdx_int] = upperNodeId
keyStr_tup = fill("",i)
keyStr_tup[i] = lowerNode
foreach(x -> keyStr_tup[x[2]] = tree_obj.nodes[x[1]].val, getAncestors(newIdx_int,tree_obj,:tup,1))
tree_obj.srcTup[tuple(keyStr_tup...)] = [newIdx_int]
end
tree_obj.nodes[upperNodeId].down = union(tree_obj.nodes[upperNodeId].down,collect((numRow_int+1):(numRow_int+length(upToLowSort_dic[upperNodeId]))))
end
end
# </editor-fold>
# <editor-fold desc= read-in of parameter data"
# XXX reads-in parameter data for respective sheet
function writeParameter(parData_df::DataFrame, sets::Dict{Symbol,Tree}, setLngShrt_dic::Dict{Symbol,Symbol}, fileName_str::String, report::Array{Tuple,1},lock_::ReentrantLock)
setShrtLng_dic = Dict(value => key for (key, value) in setLngShrt_dic)
set_arr = vcat(collect(setShrtLng_dic[key] for key in keys(sets))...,:id)
setNames_arr = filterSetColumns(parData_df,set_arr)[1]
para_dic = Dict{Symbol, DataFrame}()
# creates array of all levels provided per set grouped by set
setIndex_arr = map(setdiff(setNames_arr,set_arr)) do x
splitSet = split(String(x),"_")
return length(splitSet) == 1 ? (Symbol(splitSet[1]),1) : (Symbol(join(splitSet[1:end-1],"_")),parse(Int,splitSet[end]))
end
setIndex_arr = map(y -> (y,map(w -> w[2],filter(z -> z[1] == y,setIndex_arr))),unique(map(x -> x[1],setIndex_arr)))
# creates array that is later edited to lookup and save set values, entries: set, blank for set values, levels, start level
setIni_arr = [parEntry(z[1],initializeLookup(maximum(z[2])-minimum(z[2])+1),z[2],minimum(z[2])) for z in setIndex_arr]
# creates special entry for sets with only one level, because they do not need to have a number at the end
for i in intersect(set_arr,setNames_arr) push!(setIni_arr,parEntry(i,String[""],Int[],1)) end
# throws error, if column level exceeds level of the respective set used
for ele in setIndex_arr
set = Symbol(split(string(ele[1]),"_")[1]) # extracts just the actual set name, if it has a letter in the end, because set is used multiple times
if set != :id && sets[setLngShrt_dic[set]].height < maximum(ele[2])
lock(lock_)
push!(report,(2,"parameter read-in",fileName_str,"columns provided for $(ele[1]) exceed level of definition, parameter input ignored"))
unlock(lock_)
return para_dic
end
end
# assigns relevant columns to sets
setCol_dic = Dict{Symbol,Array}()
for i in setIni_arr
if isempty(i.lvl)
setCol_dic[i.colSet] = [i.colSet]; i.lvl = [1]
else
setCol_dic[i.colSet] = [Symbol(i.colSet,"_",j) for j in i.lvl]
end
end
# determines relevant parameter/value columns
oprNames_arr = filterSetColumns(parData_df,[:parameter,:variable,:value],true)[1]
oprLvl_arr = filter(x -> x != nothing,map(x -> tryparse(Int,split(x,"_")[end]),oprNames_arr))
parVal_arr = isempty(oprLvl_arr) ? [[:parameter,:value]] : [[Symbol("parameter_",j),Symbol("value_",j)] for j in unique(oprLvl_arr)]
# converts parameter columns to symbols
for i in parVal_arr parData_df[!,i[1]] = map(x -> Symbol(x),parData_df[!,i[1]]) end
# loop over rows to read respective parameter values
convertParameter!(parData_df,sets,setIni_arr,parVal_arr,para_dic,setCol_dic,setLngShrt_dic,fileName_str,report,lock_)
return para_dic
end
# XXX gets idx from set names and orders all data in dataframe for respective parameter
function convertParameter!(parData_df::DataFrame,sets::Dict{Symbol,Tree},setIni_arr::Array{parEntry,1},parVal_arr::Array{Array{Symbol,1},1},para_dic::Dict{Symbol,DataFrame},setCol_dic::Dict{Symbol,Array},setLngShrt_dic::Dict{Symbol,Symbol},fileName_str::String,report::Array{Tuple,1},lock_::ReentrantLock)
setShrtLng_dic = Dict(value => key for (key, value) in setLngShrt_dic)
for row in eachrow(parData_df)
setId_dic = Dict{Symbol,Union{Int,Array{Int,1}}}()
# XXX obtains node ids for row
# overwrites default values for specific row in setIni_arr
for i in setIni_arr, (index,j) in enumerate(i.lvl)
i.entry[j+1-i.startLvl] = row[setCol_dic[i.colSet][index]]
end
# extracts specific set values and looks them up to obtain the respective node ids, in case of an id column just directly writes value
relSets_arr = filter(x ->!all(("" .== x.entry) .| (false .== x.entry)),setIni_arr)
for ele in relSets_arr
if ele.colSet == :id
setId_dic[:id] = parse(Int,ele.entry[1])
else
split_arr = split(string(ele.colSet),"_")
setShort_sym = setLngShrt_dic[Symbol(split_arr[1])]
saveDic_sym = length(split_arr) == 1 ? setShort_sym : Symbol(setShort_sym,"_",split_arr[2])
setId_dic[saveDic_sym] = lookupTupleTree(tuple(ele.entry...),sets[setShort_sym],ele.startLvl)
end
end
# goes to next iteration and writes report, if any set used is undefined
if Int[] in values(setId_dic)
undefinedDim_arr = map(filter(x -> setId_dic[x] == Int[],collect(keys(setId_dic)))) do x
split_arr = split(String(x),"_")
setName = setShrtLng_dic[Symbol(split_arr[1])]
return length(split_arr) == 1 ? setName : Symbol(setName,"_",split_arr[2])
end
undefinedSets_arr = map(y -> join(map(z -> string(y.entry[z]," (lvl ",y.lvl[z],")") ,1:length(y.lvl))," > "),filter(x -> x.colSet in undefinedDim_arr,relSets_arr))
lock(lock_)
for undef in undefinedSets_arr
push!(report,(2,"parameter read-in",fileName_str,"values provided for undefined set $(undef...)"))
end
unlock(lock_)
continue
end
# creates all possible combinations of found values
agnNodes_mat = []
for i in Iterators.product(collect(values(setId_dic))...) push!(agnNodes_mat,collect(i)) end
ordAgnNodes_ord = hcat(agnNodes_mat...)
addEntry_df = DataFrame()
for (index,y) in enumerate(keys(setId_dic)) addEntry_df[!,y] = ordAgnNodes_ord[index,:] end
# XXX loop over parameter/value columns, prepares and writes
for i in parVal_arr
# extract parameter type and value
par_sym = row[i[1]]
if par_sym == Symbol()
continue
end
# adds values to dataframe
if isempty(addEntry_df)
if isnan(row[i[2]]) continue end
addEntry_df = DataFrame(val = row[i[2]])
else
if isnan(row[i[2]]) continue end
addEntry_df[!,:val] .= row[i[2]]
end
# creates empty dataframe for parameter, if non-existent so far
if !in(par_sym,keys(para_dic)) para_dic[par_sym] = DataFrame(val = Float64[]) end
# adds 0 to dictionary for sets the parameter depends on, but that dont appear in the current file/row
for missKey in setdiff(namesSym(para_dic[par_sym]),namesSym(addEntry_df)) addEntry_df[!,missKey] .= 0 end
# adds new column to dataframe for respective parameter if required
rows_int = nrow(para_dic[par_sym])
for key in setdiff(namesSym(addEntry_df),namesSym(para_dic[par_sym]))
para_dic[par_sym][!,key] = zeros(Int, rows_int)
end
select!(addEntry_df, namesSym(para_dic[par_sym]))
append!(para_dic[par_sym],addEntry_df)
end
end
end
# XXX filters all columns of dataframe that are related to the sets
function filterSetColumns(input_df::DataFrame,input_arr::Array{Symbol},outStr_boo::Bool = false)
colNames_arr = [String(namesSym(input_df)[i]) for i = 1:size(input_df,2)]
# filters columns that relate to input array and further splits them based on "_" seperators
inRelColNames_arr = collect(Iterators.flatten(map(y -> filter(x -> x[1:minimum([length(y),length(x)])] == y, colNames_arr),map(x-> string(x),input_arr))))
# columns that relate to input because of numbering or identity
ctrColNames_arr = vcat(filter(x -> isa(tryparse(Int,x[end:end]),Int),inRelColNames_arr),intersect(colNames_arr,map(x -> String(x),input_arr)))
# remaining columns, solely used to filter mapping set files
mapColNames_arr = setdiff(inRelColNames_arr,ctrColNames_arr)
return_arr = [ctrColNames_arr, mapColNames_arr]
# convert to symbol unless optional input is set to true
if !outStr_boo return_arr = map(y -> map(x -> Symbol(x),y),return_arr) end
return return_arr
end
# XXX initializes dictionary that saves lookups in tree
function initializeLookup(size_int::Int)
Ini_arr = Array{String}(undef,size_int)
Ini_arr .= ""
return Ini_arr
end
# </editor-fold>
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 5667 | # XXX finds provided string tuple in tree structure and returns node id (or false), tuple does not need to start at the top level of tree, in that case function can return an array instead of a number
function lookupTupleTree(input_uni::Tuple{Vararg{Union{InlineString,String},N} where N},tree_obj::Tree, startLvl_int::Int= 1)
if isempty(tree_obj.nodes) return false end
# find leading and trailing empty entries
firstVal_int = findfirst(x -> x != "",input_uni)
lastVal_int = findlast(x -> x != "",input_uni)
# adjust input uni and start level according to removed values
startLvl_int = plus(firstVal_int,startLvl_int) - 1
input_uni = input_uni[firstVal_int:lastVal_int]
gap_arr = findall(input_uni .== "")
if startLvl_int == 1 && isempty(gap_arr)
return getDicEmpty(tree_obj.srcTup,input_uni)
else
noGap_arr = reverse(setdiff(1:length(input_uni),gap_arr))
# initialize by searching for last entry in input tuple
crtLvl_int = noGap_arr[1] + startLvl_int - 1
found_arr = getDicEmpty(tree_obj.srcStr,(input_uni[noGap_arr[1]],crtLvl_int))
# checks which nodes found initially actually comply with rest of input_uni
for i in noGap_arr[2:end]
crtLvlItr_int = i + startLvl_int - 1
found_arr = getDicEmpty(tree_obj.srcStr,(input_uni[i],crtLvlItr_int)) |> (y -> filter(x -> goUp(x,tree_obj.up,crtLvl_int-crtLvlItr_int,tree_obj.nodes) in y,found_arr))
end
end
return found_arr
end
# XXX sorts inputs nodes according to their tree position
function sortSiblings(nodesIndex_arr::Array{Int,1},tree_obj::Tree)
hertiLine_mat = map(x -> getAncestors(x, tree_obj,:tup),nodesIndex_arr)
rowNum_int = length(nodesIndex_arr)
colNum_int = maximum([hertiLine_mat[i][1][2] .+ 1 for i = 1:rowNum_int])
herti_mat = zeros(Int64, rowNum_int, colNum_int)
for (row, row_arr) in enumerate(hertiLine_mat)
for ele in row_arr
herti_mat[row,tree_obj.nodes[ele[1]].lvl+1] = ele[1]
end
end
order_mat = sortslices(hcat(nodesIndex_arr,herti_mat), dims=1, by = x-> x[2:end,1])
return order_mat[:,1]
end
# XXX goes up the tree from x for the number of steps defined by steps_int
function goUp(x::Int,up::Dict{Int,Int},steps_int::Int,nodes_dic::Dict{Int,Node})
startLvl_int = nodes_dic[x].lvl
steps_ctr = 0
while steps_ctr < steps_int
x = up[x]
steps_ctr = startLvl_int - nodes_dic[x].lvl
end
return x
end
# XXX gets all parents (id, level) combination, if node is already on top level returns itself, if limitLvl_int is set only provide parents until that level
getAncestors(startNode_int::Int,tree_obj::Tree,retType::Symbol,limitLvl_int::Int=0) = getAncestors(startNode_int::Int,tree_obj::Tree,Val{retType}(),limitLvl_int::Int)
# XXX returns an array of tuples with ancestors (idx,level)
function getAncestors(startNode_int::Int,tree_obj::Tree,retType::Val{:tup},limitLvl_int::Int=0)
# gets level of start node
currLvl_int = tree_obj.nodes[startNode_int].lvl
# return array, if no further going up the tree is required
if currLvl_int == 0 || limitLvl_int == currLvl_int return [(startNode_int, currLvl_int)] end
# initialize move up the tree
heri_arr = Array{Tuple{Int,Int},1}()
next = startNode_int
# loops up the tree and obtains (id, level) combinations
while limitLvl_int < currLvl_int
next = tree_obj.up[next]
currLvl_int = tree_obj.nodes[next].lvl
push!(heri_arr, (next, currLvl_int))
end
return heri_arr
end
# XXX returns an array of integers with ancestors
function getAncestors(startNode_int::Int,tree_obj::Tree,retType::Val{:int},limitLvl_int::Int=0)
# gets level of start node
currLvl_int = tree_obj.nodes[startNode_int].lvl
# return array, if no further going up the tree is required
if currLvl_int == 0 || limitLvl_int == currLvl_int return [startNode_int] end
# initialize move up the tree
heri_arr = Array{Int,1}()
next = startNode_int
# loops up the tree and obtains (id, level) combinations
while limitLvl_int < currLvl_int
next = tree_obj.up[next]
currLvl_int = tree_obj.nodes[next].lvl
push!(heri_arr, next)
end
return heri_arr
end
# XXX gets all children of node
function getDescendants(startNode_int::Int,tree_obj::Tree,getAll::Bool = false, limitLvl_int::Int=0)
# determines starting point
startLvl_int = tree_obj.nodes[startNode_int].lvl
# sets limits to maximum value if none provided
if limitLvl_int == 0 limitLvl_int = tree_obj.height end
if startLvl_int == limitLvl_int || (getAll && isempty(tree_obj.nodes[startNode_int].down)) return [startNode_int] end
startIdx_arr = tree_obj.nodes[startNode_int].down
curLvl_int = startLvl_int
# initialize array of all children
if getAll allIdx_arr = startIdx_arr end
while curLvl_int < (limitLvl_int-1)
lookUp_arr = vcat(map(x -> tree_obj.nodes[x].down,startIdx_arr)...)
if isempty(lookUp_arr)
#break;
else
startIdx_arr = lookUp_arr
if getAll allIdx_arr = vcat(allIdx_arr,startIdx_arr) end
end
curLvl_int = curLvl_int + 1
end
return getAll ? allIdx_arr : startIdx_arr
end
# XXX returns all nodes of tree on the level provided
getNodesLvl(tree_obj::Tree, level_int::Int) = filter(r -> r.lvl == level_int, sort(collect(values(tree_obj.nodes)), by = x -> x.idx))
# XXX returns (unique) tuple with strings of node itself and its parents
function getUniName(nodeIdx_int::Int, tree_obj::Tree)
if nodeIdx_int == 0 return ("none",) end
relNodes_arr = tree_obj.nodes[nodeIdx_int].lvl == 1 ? [nodeIdx_int] : vcat(reverse(getAncestors(nodeIdx_int,tree_obj,:tup,1))..., nodeIdx_int)
return tuple(map(x -> tree_obj.nodes[x[1]].val, relNodes_arr)...)
end
createFullString(nodeIdx_int::Int,tree_obj::Tree) = join(getUniName(nodeIdx_int,tree_obj)," < ")
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 25301 |
# <editor-fold desc="reporting of calculation progress and error handling"
# XXX return elapsed time since Start_date
function getElapsed(start::DateTime)
elapSec_per = Dates.value(floor(now() - start,Dates.Second(1)))
if elapSec_per < 3600*24
elap_str = Dates.format(DateTime(2015,01,01,Int(floor(elapSec_per / 3600)),Int(floor(elapSec_per % 3600/ 60)),elapSec_per % 60), "HH:MM:SS")
else
elap_str = Dates.format(DateTime(2015,01,Int(floor(elapSec_per / (3600*24))),Int(floor(elapSec_per % (3600*24) / 3600)),Int(floor(elapSec_per % 3600/ 60)),elapSec_per % 60), "dd:HH:MM:SS")
end
return elap_str
end
# XXX teste for errors so far and optional writes report file, even if no serious errrors occured yet
function errorTest(report::Array{Tuple,1},options::modOptions;write::Bool = false, inCode::Bool = false)
errStatus_dic = Dict(1 => :green, 2 => :yellow,3 => :red)
if any(getindex.(report,1) .== 3)
output_df = DataFrame(map(idx -> getindex.(report, idx), eachindex(first(report))), [:type, :section, :location, :message])
CSV.write("$(options.outDir)/reporting_$(options.outStamp).csv", insertcols!(output_df[!,2:end], 1, :errStatus => map(x -> errStatus_dic[x],output_df[!,:type])))
printstyled("$(inCode ? "" : " - " )Errors encountered! Wrote reporting_$(options.outStamp).csv for details!"; color = :light_red)
error()
else
numWarn = length(findall(getindex.(report,1) .== 2))
if write && length(report) > 0
output_df = DataFrame(map(idx -> getindex.(report, idx), eachindex(first(report))), [:type, :section, :location, :message])
CSV.write("$(options.outDir)/reporting_$(options.outStamp).csv", insertcols!(output_df[!,2:end], 1, :errStatus => map(x -> errStatus_dic[x],output_df[!,:type])))
printstyled("$(inCode ? "" : " - " )No errors and $numWarn warning(s) encountered. Wrote reporting_$(options.outStamp).csv for details! \n"; color = numWarn > 0 ? :light_yellow : :light_green)
else
printstyled("$(inCode ? "" : " - " )No errors and $numWarn warning(s) encountered. \n"; color = numWarn > 0 ? :light_yellow : :light_green)
end
end
end
# XXX produces a output message and tests for errors accordingly to globally set reporting values
function produceMessage(options::modOptions,report::Array{Tuple,1},currentLvl::Int64,fixedString::String,dynamicString::Any="")
sty_dic = Dict(1 => :bold, 2 => :normal, 3 => :light_black)
sty_dic[currentLvl]
if options.reportLvl >= currentLvl
if options.errCheckLvl >= currentLvl
printstyled(options.objName; color = :underline); printstyled(" ", getElapsed(options.startTime), fixedString, dynamicString; color = sty_dic[currentLvl])
else
printstyled(options.objName; color = :underline); printstyled(" ",getElapsed(options.startTime), fixedString, dynamicString, "\n"; color = sty_dic[currentLvl])
end
end
if options.errCheckLvl >= currentLvl errorTest(report,options,write = options.errWrtLvl >= currentLvl) end
end
# </editor-fold>
# <editor-fold desc="miscellaneous data processing"
# XXX new plus function to avoid error when one element being added up is nothing
plus(a::Int,b::Int) = a + b
plus(a::Int,b::Nothing) = a
plus(a::Nothing,b::Int) = b
# XXX provides names of columns as array of symbols ('names' function itself was changed from strings to symbols)
namesSym(df::DataFrame) = map(x -> Symbol(x),names(df))
namesSym(df::DataFrameRow) = map(x -> Symbol(x),names(df))
# XXX returns dataframe columns without value column
removeVal(input_df::DataFrame) = filter(x -> !(x in (:val,:ratio)),namesSym(input_df))
removeVal(col_arr::Array{Symbol,1}) = filter(x -> !(x in (:val,:ratio)),col_arr)
# XXX return an empty integer array instead of an error, if a key is not in a dictionary
getDicEmpty(dic::Dict,key::Any) = key in keys(dic) ? dic[key] : Int[]
# XXX get names of column of type integer
intCol(in_df::DataFrame) = getindex.(filter(x -> eltype(x[2]) <: Int, collect(pairs(eachcol(in_df)))),1)
intCol(in_df::DataFrame,add_sym::Symbol) = union(intCol(in_df),intersect(namesSym(in_df),[add_sym]))
# XXX puts relevant dimensions in consistent order and adds remaining entries at the end
orderDim(inDim_arr::Array{Symbol,1},intCol_arr::Array{Symbol,1}) = intersect([:Ts_exp, :Ts_expSup, :Ts_disSup, :Ts_dis, :R_exp, :R_dis, :R_from, :R_to, :C, :Te], intersect(inDim_arr,intCol_arr)) |> (x -> [x...,setdiff(inDim_arr,x)...])
orderDim(inDim_arr::Array{Symbol,1}) = intersect([:Ts_exp, :Ts_expSup, :Ts_disSup, :Ts_dis, :R_exp, :R_dis, :R_from, :R_to, :R_a, :R_b, :C, :Te], inDim_arr) |> (x -> [x...,setdiff(inDim_arr,x)...])
# XXX puts dataframes columns in consistent order
orderDf(in_df::DataFrame) = select(in_df,orderDim(namesSym(in_df),intCol(in_df) |> (z -> isempty(z) ? Symbol[] : z)))
# XXX writes all tuples occuring in a tuple of pairs and tuples
mixedTupToTup(x) = typeof(x) <: Pair ? map(y -> mixedTupToTup(y),collect(x)) : x
# XXX check if dataframe should be considered, if energy balance is created for carriers in array
filterCarrier(var_df::DataFrame,c_arr::Array{Int,1}) = :C in namesSym(var_df) ? filter(r -> r.C in c_arr,var_df) : var_df
# XXX create dataframe with all potential dimensions for carrier provided
function createPotDisp(c_arr::Array{Int,1},anyM::anyModel)
lvl_arr = map(x -> anyM.cInfo[x], c_arr) |> (y -> map(z -> getfield.(y,z),[:tsDis, :rDis]))
allLvl_df = DataFrame(C = c_arr, lvlTs = lvl_arr[1], lvlR = lvl_arr[2])
tsDis_dic, rDis_dic = [Dict(x => getfield.(getNodesLvl(anyM.sets[z[2]],x),:idx) for x in unique(lvl_arr[z[1]])) for z in enumerate([:Ts,:R])]
allLvl_df[!,:Ts_dis] = map(x -> tsDis_dic[x],allLvl_df[!,:lvlTs])
allLvl_df[!,:R_dis] = map(x -> rDis_dic[x],allLvl_df[!,:lvlR])
var_df = flatten(flatten(select(allLvl_df,Not([:lvlTs,:lvlR])),:Ts_dis),:R_dis)
# add column for superordinate dispatch timestep
supTs_dic = Dict(x => getAncestors(x,anyM.sets[:Ts],:int,anyM.supTs.lvl)[end] for x in unique(var_df[!,:Ts_dis]))
var_df[!,:Ts_disSup] = map(x -> supTs_dic[x], var_df[!,:Ts_dis])
return var_df
end
# XXX gets technology name as symbol from id and the other way around
techSym(tInt::Int,tech_tree::Tree) = Symbol(getUniName(tInt,tech_tree)[end])
techInt(tSym::Symbol,tech_tree::Tree) = filter(x -> x.val == string(tSym),collect(values(tech_tree.nodes)))[1].idx
# </editor-fold>
# <editor-fold desc="data frame based manipulations"
# XXX finds entries where expansion or capacity would be fixed to zero
function filterZero(src_df::DataFrame,par_obj::ParElement,anyM::anyModel)
if isdefined(par_obj,:name)
# copies parameter obj and adds ":up" to inheritance for any dimensions, otherwise variables would be created, but fixed to zero due to a zero limit on a higher level in the tree
modPar_obj = par_obj
modPar_obj.herit = modPar_obj.herit |> (y -> tuple(vcat(y..., map(x -> x => :up,getindex.(y,1))...)...))
# filter zero cases
zero_df = select!(filter(r -> r.val == 0, matchSetParameter(src_df, modPar_obj, anyM.sets)),Not(:val))
else
zero_df = src_df[[],:]
end
return zero_df
end
# XXX removes all entries occuring in remove array from input table
function removeEntries(remove_arr::Array{DataFrame,1},input_df::DataFrame)
if !isempty(remove_arr)
remove_df = length(remove_arr) == 1 ? remove_arr[1] : vcat(remove_arr...)
colRemove_arr = namesSym(remove_df)
out_df = antijoin(input_df,remove_df; on = colRemove_arr)
return out_df
else
return input_df
end
end
# XXX merge provided dataframe into prep_dic
function mergePrepDic!(key_sym::Symbol,prep_dic::Dict{Symbol,NamedTuple},capaResi_df::DataFrame,capaRatio_df::DataFrame = DataFrame())
if key_sym in keys(prep_dic)
prep_dic[key_sym]= (var = prep_dic[key_sym].var, ratio = capaRatio_df, resi = capaResi_df)
else
prep_dic[key_sym] = (var = intCol(capaResi_df) |> (x -> DataFrame(Pair.(x,fill(Int[],length(x))))), ratio = capaRatio_df, resi = capaResi_df)
end
end
# XXX performs a left or outer join operation and replaces any missing values
function joinMissing(leftData_df::DataFrame, rightData_df::DataFrame, key_arr::Union{Array{Symbol,1},Array{Pair{Symbol,Symbol},1}}, how_sym::Symbol, missVal_dic::Dict, uni_boo::Bool = false)
# perform join operation
if how_sym == :left
joinData_df = leftjoin(leftData_df,rightData_df; on = key_arr, makeunique = uni_boo)
elseif how_sym == :outer
joinData_df = outerjoin(leftData_df,rightData_df; on = key_arr, makeunique = uni_boo)
end
miss_col = filter(x -> any(ismissing.(x[2])), collect(pairs(eachcol(joinData_df))))
# check, if any column contains missing values
if isempty(miss_col) return dropmissing(joinData_df) end
# replace missing value, cases differ depending if data type needs to be adjusted
for col in miss_col
joinData_df[!,col[1]] = map(x -> coalesce(x,missVal_dic[col[1]]),col[2])
end
return dropmissing(joinData_df)
end
# XXX get array of scaling factors for add_df
function getResize(add_df::DataFrame,time_obj::Tree,supDis::NamedTuple{(:lvl,:step,:sca),Tuple{Int,Tuple{Vararg{Int,N} where N},Dict{Tuple{Int,Int},Float64}}})
tsDisLvl_dic = Dict(x => x == 0 ? 1 : getfield(time_obj.nodes[x],:lvl) for x in unique(add_df[!,:Ts_dis]))
lvl_arr = map(x -> tsDisLvl_dic[x],add_df[!,:Ts_dis])
aboveSupResize_fl = maximum(values(supDis.sca)) * length(supDis.step) # scaling value used for variables above the superordinate dispatch level
sca_arr = map(x -> supDis.lvl > x[1] ? aboveSupResize_fl : supDis.sca[(x[2],x[1])] ,zip(lvl_arr,add_df[!,:Ts_disSup]))
return sca_arr
end
# XXX gets the upper bound used for dispatch variables
function getUpBound(in_df::DataFrame,dispBound_fl::Float64,supTs::NamedTuple{(:lvl,:step,:sca),Tuple{Int,Tuple{Vararg{Int,N} where N},Dict{Tuple{Int,Int},Float64}}},treeTs::Tree)
if !isnan(dispBound_fl)
upBound_arr = dispBound_fl * getResize(in_df,treeTs,supTs)
else
upBound_arr = fill(NaN,size(in_df,1))
end
return upBound_arr
end
# </editor-fold>
# <editor-fold desc="functions and sub-functions to aggregate variables"
# XXX aggregates variables in aggEtr_df to rows in srcEtr_df, function used, if all entries of search have the same resolution (all entries in a relevant column are on the same level)
function aggUniVar(aggEtr_df::DataFrame, srcEtr_df::DataFrame, agg_arr::Array{Symbol,1},srcRes_tup::NamedTuple,sets_dic::Dict{Symbol,Tree})
if isempty(aggEtr_df) return fill(AffExpr(),size(srcEtr_df,1)) end
# only selects relevant columns
aggEtr_df = select(aggEtr_df,vcat(:var,agg_arr...))
# adjusts entries in aggregation dataframe to comply with resolution of search dataframe
for dim in intersect(keys(srcRes_tup),agg_arr)
set_sym = Symbol(split(string(dim),"_")[1])
dim_dic = Dict(x => getAncestors(x,sets_dic[set_sym],:int,getfield(srcRes_tup,dim))[end] for x in unique(aggEtr_df[!,dim]))
aggEtr_df[!,dim] .= map(x -> dim_dic[x],aggEtr_df[!,dim])
end
aggEtrGrp_df = combine(groupby(aggEtr_df,agg_arr), :var => (x -> sum(x)) => :var)
joined_df = joinMissing(srcEtr_df,aggEtrGrp_df,agg_arr,:left,Dict(:var => AffExpr()))
sort!(joined_df,sort(intCol(joined_df)))
return joined_df[!,:var]
end
# XXX aggregates variables in aggEtr_df to rows in srcEtr_df, function used, if entries of search can have different resolutions (not all entries in a relevant column are on the same level)
function aggDivVar(aggEtr_df::DataFrame, srcEtr_df::DataFrame, agg_tup::Tuple, sets_dic::Dict{Symbol,Tree}; aggFilt::Tuple = ())
# XXX sanity checks regarding columns
if all(namesSym(aggEtr_df) |> (y -> map(x -> !(x in y),agg_tup))) error("tried to perform aggregation on column not existing in dataframe to be aggregated") end
if all(namesSym(srcEtr_df) |> (y -> map(x -> !(x in y),agg_tup))) error("tried to perform aggregation on column not existing in dataframe to aggregate") end
select!(aggEtr_df,intCol(aggEtr_df,:var))
# XXX filter entries from aggEtr_df, that based on isolated analysis of columns will not be aggregated
for dim in intersect(aggFilt,agg_tup)
set_sym = Symbol(split(string(dim),"_")[1])
allSrc_set = unique(srcEtr_df[!,dim]) |> (z -> union(BitSet(z),map(x -> BitSet(getDescendants(x,sets_dic[set_sym],true)),z)...))
aggEtr_df = aggEtr_df[findall(map(x -> (x in allSrc_set),aggEtr_df[!,dim])),:]
end
if isempty(aggEtr_df) return fill(AffExpr(),size(srcEtr_df,1)) end
# XXX filter entries from srcEtr_df, that based on isolated anlysis of columns will not have any values aggregated to
idxRel_set = BitSet(1:size(srcEtr_df,1))
for dim in agg_tup
set_sym = Symbol(split(string(dim),"_")[1])
allAgg_set = unique(aggEtr_df[!,dim]) |> (z -> union(BitSet(z),map(y -> BitSet(getAncestors(y,sets_dic[set_sym],:int,0)), z)...))
idxRel_set = intersect(idxRel_set,BitSet(findall(map(x -> x in allAgg_set, srcEtr_df[!,dim]))))
end
srcEtrAct_df = srcEtr_df[collect(idxRel_set),:]
# group aggregation dataframe to relevant columns and removes unrequired columns
aggEtrGrp_df = combine(groupby(aggEtr_df,collect(agg_tup)), :var => (x -> sum(x)) => :var)
# XXX create dictionaries in each dimension that assign rows suited for aggregation for each value
chldRows = Dict{Symbol,Dict{Int,BitSet}}()
for col in agg_tup
# row that are potentially aggregated
findCol_arr = aggEtrGrp_df[!,col]
findCol_set = BitSet(findCol_arr)
# entries that other entries can be aggregated to
searchVal_set = BitSet(unique(srcEtrAct_df[!,col]))
# to every unique value in column the value itself and its children are assigned
set_sym = Symbol(split(string(col),"_")[1])
idxChild_dic = Dict(x => intersect(findCol_set,[x,getDescendants(x,sets_dic[set_sym],true)...]) for x in searchVal_set)
# for each unique value in column the rows with children are assigned
grp_df = groupby(DataFrame(val = findCol_arr, id = 1:length(findCol_arr)),:val)
dicVal_dic = Dict(x.val[1] => BitSet(sort(x[!,:id])) for x in grp_df) |> (dic -> Dict(x => union(map(y -> dic[y],collect(idxChild_dic[x]))...) for x in keys(idxChild_dic)))
# excludes column from search, if based on it, every entry in find could be aggregated to every row in search
# (if this holds true for all columns, make an exception for the last one and dont remove it to, because otherwise correct aggregation cannot happen )
if all(length.(values(dicVal_dic)) .== length(findCol_arr)) && !(col == agg_tup[end] && length(chldRows) < 1)
select!(srcEtrAct_df,Not(col)); continue
else
chldRows[col] = dicVal_dic
end
end
# XXX finds aggregation by intersecting suited rows in each dimension
if isempty(chldRows)
aggRow_arr = fill(BitSet(),size(srcEtrAct_df,1))
else
aggRow_arr = collectKeys(keys(chldRows)) |> (y -> map(x -> intersect(map(y -> chldRows[y][x[y]],y)...) ,eachrow(srcEtrAct_df)))
end
# XXX aggregates values according to lookup
out_arr = Array{AffExpr}(undef,size(srcEtr_df,1))
out_arr[collect(idxRel_set)] = map(x -> sum(aggEtrGrp_df[x,:var]), collect.(aggRow_arr))
out_arr[setdiff(1:size(srcEtr_df,1),idxRel_set)] .= AffExpr()
return out_arr
end
# </editor-fold>
# <editor-fold desc="manipulate model related data frames"
# XXX add superordinate dispatch timestep to expansion dataframe
function addSupTsToExp(expMap_df::DataFrame,para_obj::Dict{Symbol,ParElement},type_sym::Symbol,tsYear_dic::Dict{Int,Int},anyM::anyModel)
if !isempty(expMap_df)
lftm_df = matchSetParameter(flatten(expMap_df,:Ts_expSup),para_obj[Symbol(:life,type_sym)],anyM.sets,newCol = :life)
lftmDel_df = matchSetParameter(lftm_df,para_obj[Symbol(:del,type_sym)],anyM.sets,newCol = :del)
lftmDel_df[!,:Ts_disSup] = map(x -> filter(y -> (tsYear_dic[y] >= tsYear_dic[x.Ts_expSup] + x.del) && (tsYear_dic[y] <= tsYear_dic[x.Ts_expSup] + x.life + x.del),collect(anyM.supTs.step)), eachrow(lftmDel_df))
select!(lftmDel_df,Not([:life,:del]))
grpCol_arr = intCol(expMap_df) |> (x -> :ratio in namesSym(expMap_df) ? vcat(:ratio,x...) : x)
expMap_df = combine(groupby(lftmDel_df,grpCol_arr), [:Ts_expSup,:Ts_disSup] .=> (x -> [x]) .=> [:Ts_expSup,:Ts_disSup])
else
expMap_df[!,:Ts_disSup] = Array{Array{Int,1},1}()
end
return expMap_df
end
# XXX expand expansion dataframe to capacity dataframe
function expandExpToCapa(in_df::DataFrame)
noExpCol_arr = intCol(in_df)
allDf_arr = map(eachrow(in_df)) do x
l_arr = length.(x.Ts_disSup)
rem_df = repeat(DataFrame(x[noExpCol_arr]), inner = sum(l_arr), outer = 1)
ext_df = DataFrame(Ts_expSup = vcat(map(y -> fill(x.Ts_expSup[y],l_arr[y]),1:length(l_arr))...), Ts_disSup = vcat(x.Ts_disSup...))
return hcat(rem_df,ext_df)
end
if !isempty(allDf_arr)
capa_df = select(vcat(allDf_arr...),orderDim(namesSym(allDf_arr[1])))[!,Not(:Ts_exp)]
else
capa_df = select(in_df,Not(:Ts_exp)); capa_df[!,:Ts_disSup] = Int[];
end
return orderDf(capa_df)
end
# XXX expands any table including columns with temporal and spatial dispatch levels and the corresponding expansion regions and superordinate dispatch steps to full dispatch table
function expandExpToDisp(inData_df::DataFrame,ts_dic::Dict{Tuple{Int,Int},Array{Int,1}},r_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},preserveTsSupTs::Bool = false)
# adds regional timesteps and check if this causes non-unique values (because spatial expansion level can be below dispatch level)
expR_df = unique(combine(x -> (R_dis = r_dic[(x.R_exp[1],x.lvlR[1])],), groupby(inData_df,namesSym(inData_df)))[!,Not([:R_exp,:lvlR])])
expTs_df = combine(x -> (Ts_dis = ts_dic[(x.Ts_disSup[1],x.lvlTs[1])],), groupby(expR_df,namesSym(expR_df)))[!,Not(:lvlTs)]
# adds dispatch timesteps to table and returns
if !preserveTsSupTs select!(expTs_df,Not(:Ts_disSup)) end
return expTs_df
end
# XXX obtains residual capacities for technologies
function checkResiCapa(var_sym::Symbol, stockCapa_df::DataFrame, part::AbstractModelPart, anyM::anyModel, addSym::Symbol = Symbol())
resiPar_sym = Symbol(var_sym,:Resi,addSym)
if resiPar_sym in tuple(keys(part.par)...)
# search for defined residual values
stock_df = filter(r -> r.val != 0.0, matchSetParameter(stockCapa_df, part.par[resiPar_sym], anyM.sets))
else
stock_df = filter(x -> false,stockCapa_df)
stock_df[!,:val] = Float64[]
end
# convers returned value to affine expression
stock_df[!,:var] = AffExpr.(stock_df[!,:val])
select!(stock_df,Not(:val))
return stock_df
end
# XXX get a dataframe with all variable of the specified type
function getAllVariables(va::Symbol,anyM::anyModel; reflectRed::Bool = true, filterFunc::Function = x -> true)
varToPart_dic = Dict(:exc => :exc, :capaExc => :exc, :oprCapaExc => :exc, :expExc => :exc, :crt => :bal, :lss => :bal, :trdSell => :trd, :trdBuy => :trd, :emission => Symbol())
techSym_arr = collect(keys(anyM.parts.tech))
if va == :excDir
va = :exc
end
if !(va in keys(varToPart_dic)) # get all variables for technologies
va_dic = Dict(:stIn => (:stExtIn, :stIntIn), :stOut => (:stExtOut, :stIntOut))
techType_arr = filter(x -> !isempty(x[2]),[(vaSpec,filter(y -> vaSpec in keys(anyM.parts.tech[y].var), techSym_arr)) for vaSpec in (va in keys(va_dic) ? va_dic[va] : (va,))])
if !isempty(techType_arr)
allVar_df = vcat(map(x -> anyM.parts.tech[x[2]].var[x[1]], vcat(map(x -> collect(zip(fill(x[1],length(x[2])),x[2])),techType_arr)...))...)
else
allVar_df = DataFrame()
end
elseif va != :emission # get variables from other parts
if va in keys(getfield(anyM.parts,varToPart_dic[va]).var)
allVar_df = getfield(anyM.parts,varToPart_dic[va]).var[va]
else
allVar_df = DataFrame()
end
else va == :emission # for emission all use variables are obtained and then already matched with emission factors
if !(:emissionFac in keys(anyM.parts.lim.par))
lock(anyM.lock)
push!(anyM.report,(2,"limits","emissionUp","upper emission limits but no emission factors provided"))
unlock(anyM.lock)
allVar_df = DataFrame()
else
# get all carriers and technologies that might be relevant to compute emissions
if :Te in namesSym(anyM.parts.lim.par[:emissionFac].data)
emC_arr = unique(vcat(map(x -> [x,getDescendants(x,anyM.sets[:C],true)...],unique(filter(x -> x.Te == 0, anyM.parts.lim.par[:emissionFac].data)[!,:C]))...))
emTe_arr = unique(vcat(map(x -> [x,getDescendants(x,anyM.sets[:Te],true)...],unique(filter(x -> x.Te != 0, anyM.parts.lim.par[:emissionFac].data)[!,:Te]))...))
else
emC_arr = unique(vcat(map(x -> [x,getDescendants(x,anyM.sets[:C],true)...],unique(anyM.parts.lim.par[:emissionFac].data[!,:C]))...))
emTe_arr = Array{Int64,1}()
end
# get use variables
allVar_df = getAllVariables(:use,anyM, filterFunc = x -> x.C in emC_arr || x.Te in emTe_arr)
# get expressions for storage and exchange losses, if this is enabled
if anyM.options.emissionLoss
# get all carriers being stored
allSt_arr = unique(vcat(vcat(map(x -> map(y -> collect(x.carrier[y]),intersect(keys(x.carrier),(:stExtIn,:stExtOut,:stIntIn,:stIntOut))),values(anyM.parts.tech))...)...))
if !isempty(intersect(emC_arr,vcat(map(x -> [x,getDescendants(x,anyM.sets[:C],true)...],allSt_arr)...)))
# get all storage variables where storage losses can lead to emissions
stVar_dic = Dict((string(st) |> (y -> Symbol(uppercase(y[1]),y[2:end]))) => getAllVariables(st,anyM, filterFunc = x -> x.C in emC_arr || x.Te in emTe_arr) for st in (:stIn,:stOut))
stLvl_df = getAllVariables(:stLvl,anyM, filterFunc = x -> x.C in emC_arr)
# loop over relevant storage technologies to obtain loss vallues
tSt_arr = unique(stLvl_df[!,:Te])
for tInt in tSt_arr
part = anyM.parts.tech[techSym(tInt,anyM.sets[:Te])]
# add expression quantifying storage losses for storage in- and and output
for st in keys(stVar_dic)
stVar_df = stVar_dic[st]
stVar_df = matchSetParameter(filter(x -> x.Te == tInt,stVar_df),part.par[Symbol(:eff,st)],anyM.sets)
stVar_df[!,:var] = stVar_df[!,:var] .* (1 .- stVar_df[!,:val])
select!(stVar_df,Not(:val))
allVar_df = vcat(allVar_df,stVar_df)
end
# add expression quantifying storage losses for storage discharge
if :stDis in keys(part.par)
sca_arr = getResize(stLvl_df,anyM.sets[:Ts],anyM.supTs)
stLvl_df = matchSetParameter(filter(x -> x.Te == tInt,stLvl_df),part.par[:stDis],anyM.sets)
stLvl_df[!,:var] = stLvl_df[!,:var] .* (1 .- (1 .- stLvl_df[!,:val]) .^ sca_arr)
select!(stLvl_df,Not(:val))
allVar_df = vcat(allVar_df,stLvl_df)
end
end
end
# add expressions for exchange losses
if :exc in keys(anyM.parts.exc.var)
exc_df = getAllVariables(:exc,anyM, filterFunc = x -> x.C in emC_arr)
exc_df = getExcLosses(convertExcCol(exc_df),anyM.parts.exc.par,anyM.sets)
# exchange losses are equally split between import and export region
filter!(x -> x.loss != 0.0,exc_df)
if !isempty(exc_df)
exc_df[!,:var] = exc_df[!,:var] .* exc_df[!,:loss] .* 0.5
exc_df = rename(combine(groupby(vcat(exc_df,rename(exc_df,:R_a => :R_b,:R_b => :R_a)),filter(x -> x != :R_b,intCol(exc_df))),:var => (x -> sum(x)) => :var),:R_a => :R_dis)
# dimensions not relevant for exchange are set to 0
exc_df[!,:Te] .= 0; exc_df[!,:Ts_expSup] .= 0; exc_df[!,:M] .= 0
allVar_df = vcat(allVar_df,exc_df)
end
end
end
allVar_df = matchSetParameter(allVar_df,anyM.parts.lim.par[:emissionFac],anyM.sets)
end
if !isempty(allVar_df)
allVar_df[!,:var] = allVar_df[!,:val] ./ 1e6 .* allVar_df[!,:var]
select!(allVar_df,Not(:val))
end
end
if !(va in (:capaConv,:capaStIn,:capaStOut,:capaStSize,:oprCapaConv,:oprCapaStIn,:oprCapaStOut,:oprCapaStSize,:expConv,:expStIn,:expStOut,:expStSize)) && !isempty(allVar_df) && reflectRed
allVar_df[!,:var] .= allVar_df[!,:var] .* anyM.options.redStep
end
return filter(filterFunc,allVar_df)
end
# XXX replaces orginal carriers in var_df with all leafes connected to respective carrier (and itself) and flattens it
function replCarLeafs(var_df::DataFrame,c_tree::Tree;cCol::Symbol=:C,noLeaf::Array{Int,1} = Int[])
cToLeafs_dic = Dict(x => filter(y -> isempty(c_tree.nodes[y].down) || y in noLeaf,[x,getDescendants(x,c_tree,true)...]) for x in unique(var_df[!,cCol]))
var_df[!,:C] = map(x -> cToLeafs_dic[x],var_df[!,cCol])
var_df = flatten(var_df,:C)
return var_df
end
# XXX returns array of technologies and respective dispatch variables relevant for input carrier
function getRelTech(c::Int,tech_dic::Dict{Symbol,TechPart},c_tree::Tree)
techSym_arr = collect(keys(tech_dic))
relTech_arr = Array{Tuple{Symbol,Symbol},1}()
for tSym in techSym_arr
addConvTech_arr = intersect((:use,:gen),filter(y -> c in tech_dic[tSym].carrier[y], collect(keys(tech_dic[tSym].carrier))))
if isempty(c_tree.nodes[c].down) # actual dispatch variables for storage only exists for carriers that are leaves
addStTech_arr = intersect((:stExtIn,:stExtOut),filter(y -> c in union(map(z -> union([z],getDescendants(z,c_tree,true)),tech_dic[tSym].carrier[y])...), collect(keys(tech_dic[tSym].carrier))))
else
addStTech_arr = Array{Tuple{Int,Symbol},1}()
end
union(addConvTech_arr,addStTech_arr) |> (y -> append!(relTech_arr,collect(zip(fill(tSym,length(y)),y))))
end
return relTech_arr
end
# </editor-fold>
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 13982 |
# <editor-fold desc= prepare and create exchange related variables"
# XXX prepare expansion and capacity variables for exchange
function prepareExcExpansion!(partExc::OthPart,partLim::OthPart,prepExc_dic::Dict{Symbol,NamedTuple},tsYear_dic::Dict{Int,Int},anyM::anyModel)
# XXX determine dimensions of expansion variables (expansion for exchange capacities is NOT directed!)
# get all possible dimensions of exchange
potDim_df = DataFrame(map(x -> (lvlTs = x[2].tsExp, lvlR = x[2].rDis, C = x[1]), collect(anyM.cInfo)))
tsLvl_dic = Dict(x => getfield.(getNodesLvl(anyM.sets[:Ts],x),:idx) for x in unique(potDim_df[!,:lvlTs]))
rLvl_dic = Dict(x => getfield.(getNodesLvl(anyM.sets[:R],x),:idx) for x in unique(potDim_df[!,:lvlR]))
potExc_df = flatten(flatten(flatten(combine(x -> (Ts_exp = map(y -> tsLvl_dic[y],x.lvlTs), R_a = map(y -> rLvl_dic[y],x.lvlR), R_b = map(y -> rLvl_dic[y],x.lvlR)), groupby(potDim_df,:C)),:Ts_exp),:R_a),:R_b)
# get dimensions where exchange should actually be defined
exExp_df = DataFrame(R_a = Int[], R_b = Int[], C = Int[])
for excPar in intersect((:capaExcResi,:capaExcResiDir),keys(partExc.par))
append!(exExp_df,matchSetParameter(potExc_df,partExc.par[excPar],anyM.sets)[!,[:R_a,:R_b,:C]])
end
# ensure expansion entries are not directed
exExp_df = unique(exExp_df) |> (x -> filter(y -> y.R_a < y.R_b,vcat(x,rename(x,replace(namesSym(x),:R_a => :R_b, :R_b => :R_a))))) |> (z -> unique(z))
# add supordiante timesteps of expansion
allExExp_df = innerjoin(potExc_df,exExp_df, on = namesSym(exExp_df))
allExExp_df[!,:Ts_expSup] = map(x -> getDescendants(x,anyM.sets[:Ts],false,anyM.supTs.lvl) |> (y -> typeof(y) == Array{Int,1} ? y : [y] ), allExExp_df[!,:Ts_exp])
# filter cases where expansion is fixed to zero
if :expExcFix in keys(anyM.parts.lim.par)
allExExp_df = removeEntries([filterZero(allExExp_df,getLimPar(anyM.parts.lim,:expExcFix,anyM.sets[:Te]),anyM)],allExExp_df)
end
# filter cases where in and out region are the same
filter!(x -> x.R_a != x.R_b, allExExp_df)
# save result to dictionary for variable creation
exp_df = addSupTsToExp(allExExp_df,partExc.par,:Exc,tsYear_dic,anyM)
prepExc_dic[:expExc] = (var = convertExcCol(exp_df),ratio = DataFrame(), resi = DataFrame())
return potExc_df
end
# XXX create exchange variables
function createExcVar!(partExc::OthPart,ts_dic::Dict{Tuple{Int,Int},Array{Int,1}},anyM::anyModel)
# XXX extend capacity variables to dispatch variables
capa_df = partExc.var[:capaExc][!,Not([:var,:dir])] |> (x -> unique(vcat(x,rename(x,replace(namesSym(x),:R_from => :R_to, :R_to => :R_from)))))
# replace orginal carrier with leafs
capa_df = replCarLeafs(capa_df,anyM.sets[:C])
cToLvl_dic = Dict(x => (anyM.cInfo[x].tsDis, anyM.cInfo[x].rDis) for x in unique(capa_df[!,:C]))
capa_df[!,:lvlTs] = map(x -> cToLvl_dic[x][1],capa_df[!,:C])
capa_df[!,:lvlR] = map(x -> cToLvl_dic[x][2],capa_df[!,:C])
rExc_dic = Dict(x => anyM.sets[:R].nodes[x[1]].lvl != x[2] ? getDescendants(x[1],anyM.sets[:R],false,x[2]) : [x[1]]
for x in union([map(x -> (x[y],x.lvlR), eachrow(unique(capa_df[!,[y,:lvlR]]))) for y in (:R_from,:R_to)]...))
capa_df[!,:R_from] = map(x -> rExc_dic[x.R_from,x.lvlR],eachrow(capa_df[!,[:R_from,:lvlR]]))
capa_df[!,:R_to] = map(x -> rExc_dic[x.R_to,x.lvlR],eachrow(capa_df[!,[:R_to,:lvlR]]))
capa_df = flatten(select(capa_df,Not(:lvlR)),:R_from); capa_df = unique(flatten(capa_df,:R_to))
disp_df = combine(x -> (Ts_dis = ts_dic[(x.Ts_disSup[1],x.lvlTs[1])],),groupby(capa_df,namesSym(capa_df)))[!,Not(:lvlTs)]
# filter entries where availability is zero
if !isempty(partExc.par[:avaExc].data) && 0.0 in partExc.par[:avaExc].data[!,:val]
disp_df = filter(x -> x.val != 0.0, matchSetParameter(disp_df,partExc.par[:avaExc],anyM.sets))[!,Not(:val)]
end
# computes value to scale up the global limit on dispatch variable that is provied per hour and create variables
partExc.var[:exc] = orderDf(createVar(disp_df,"exc",getUpBound(disp_df,anyM.options.bound.disp / anyM.options.scaFac.dispExc,anyM.supTs,anyM.sets[:Ts]),anyM.optModel,anyM.lock,anyM.sets, scaFac = anyM.options.scaFac.dispExc))
end
# XXX add residual capacties for exchange (both symmetric and directed)
function addResidualCapaExc!(partExc::OthPart,prepExc_dic::Dict{Symbol,NamedTuple},potExc_df::DataFrame,anyM::anyModel)
expSup_dic = Dict(x => getDescendants(x,anyM.sets[:Ts],true,anyM.supTs.lvl) for x in unique(potExc_df[!,:Ts_exp]))
potExc_df[!,:Ts_disSup] = map(x -> expSup_dic[x],potExc_df[!,:Ts_exp])
potExc_df = flatten(potExc_df[!,Not(:Ts_exp)],:Ts_disSup)
# obtain symmetric residual capacites
capaResi_df = filter(x -> x.R_a != x.R_b, checkResiCapa(:capaExc,potExc_df, partExc, anyM))
sortR_mat = sort(hcat([capaResi_df[!,x] for x in (:R_a,:R_b)]...);dims = 2)
for (index,col) in enumerate((:R_a,:R_b)) capaResi_df[!,col] = sortR_mat[:,index] end
# manipulate entries in case directed residual capacities are defined
if :capaExcResiDir in keys(partExc.par)
directExc_df = matchSetParameter(potExc_df,partExc.par[:capaExcResiDir],anyM.sets)
directExc_df[!,:var] = map(x -> AffExpr(x), directExc_df[!,:val]); select!(directExc_df,Not(:val))
excDim_arr = [:C, :R_a, :R_b, :Ts_disSup]
excDimP_arr = replace(excDim_arr,:R_a => :R_b, :R_b => :R_a)
# entries, where a directed capacity is provided and a symmetric one already exists
bothExc_df = vcat(innerjoin(directExc_df, capaResi_df; on = excDim_arr, makeunique = true), innerjoin(directExc_df, capaResi_df; on = Pair.(excDim_arr,excDimP_arr), makeunique = true))
bothExc_df = combine(x -> (var = x.var + x.var_1,), groupby(bothExc_df,excDim_arr))
if !(:var in namesSym(bothExc_df)) bothExc_df[!,:var] = AffExpr[] end
# entries, where only a directed capacity was provided
onlyDirExc_df = antijoin(directExc_df, bothExc_df; on = excDim_arr )
# entries originally symmetric that now become directed, because a directed counterpart was introduced
flipSym_df = antijoin(innerjoin(capaResi_df, bothExc_df[!,Not(:var)]; on = excDim_arr),bothExc_df[!,Not(:var)]; on = excDim_arr .=> excDimP_arr)
swtExc_df = vcat(bothExc_df,flipSym_df)
# solely directed entries
dirExc_df = vcat(onlyDirExc_df,swtExc_df)
dirExc_df[!,:dir] .= true
# entries who become directed because their counterpart became directed
becomDirExc_df = innerjoin(rename(dirExc_df[!,Not([:var,:dir])],:R_a => :R_b, :R_b => :R_a),vcat(capaResi_df,rename(capaResi_df,:R_a => :R_b, :R_b => :R_a)); on = excDim_arr)
becomDirExc_df[!,:dir] .= true
# entries entries originally symmetric that remain symmetric
unDirExc_df = antijoin(capaResi_df, vcat(dirExc_df, rename(dirExc_df,:R_a => :R_b, :R_b => :R_a)); on = excDim_arr )
unDirExc_df[!,:dir] .= false
# adjust dataframe of residual capacities according to directed values
capaResi_df = vcat(dirExc_df,vcat(unDirExc_df,becomDirExc_df))
# adjust dataframe of capacities determining where variables will be created to reflect which of these correspond to directed cases now
allVar_df = prepExc_dic[:capaExc].var
if !isempty(prepExc_dic[:capaExc].var)
undirBoth_df = vcat(dirExc_df,rename(dirExc_df,replace(namesSym(dirExc_df),:R_a => :R_b, :R_b => :R_a)))[!,Not(:dir)]
dirVar_df = convertExcCol(innerjoin(convertExcCol(allVar_df[!,Not(:dir)]), vcat(undirBoth_df,swtExc_df)[!,Not(:var)],on = excDim_arr))
dirVar_df[!,:dir] .= true
adjVar_df = vcat(dirVar_df,antijoin(allVar_df,dirVar_df,on = [:C, :R_from, :R_to, :Ts_disSup] ))
else
adjVar_df = allVar_df
end
else
capaResi_df[!,:dir] .= false
adjVar_df = prepExc_dic[:capaExc].var
end
# filter cases where in and out region are the same
filter!(x -> x.R_a != x.R_b, capaResi_df)
# adjust dictionary accordingly
capaResi_df[!,:Ts_expSup] .= 0
prepExc_dic[:capaExc] = (var = unique(adjVar_df), ratio = DataFrame(), resi = convertExcCol(capaResi_df))
end
# XXX converts table where exchange regins are given as "R_a" and "R_b" to "R_to" and "R_from" and the other way around
convertExcCol(in_df::DataFrame) = rename(in_df, namesSym(in_df) |> (x -> :R_a in x ? replace(x,:R_a => :R_from, :R_b => :R_to) : replace(x,:R_from => :R_a, :R_to => :R_b)))
# converts dataframe where exchange regions are given as "a -> b" or "from -> to" to other way round
switchExcCol(in_df::DataFrame) = rename(in_df, replace(namesSym(in_df),:R_from => :R_to, :R_to => :R_from))
# appends input dataframe to version of itself with from and to column exchanged
function flipExc(in_df::DataFrame)
sw_df = switchExcCol(in_df)
# if input dataframe had only a to or from column respective other column is added
if !(:R_to in namesSym(in_df))
in_df[!,:R_to] .= 0
sw_df[!,:R_from] .= 0
elseif !(:R_from in namesSym(in_df))
in_df[!,:R_from] .= 0
sw_df[!,:R_to] .= 0
end
return orderDf(vcat(in_df,sw_df))
end
# </editor-fold>
# <editor-fold desc= create exchange related constraints"
# XXX connect capacity and expansion constraints for exchange
function createCapaExcCns!(partExc::OthPart,anyM::anyModel)
capaVar_df = rename(partExc.var[:capaExc],:var => :capaVar)
if :expExc in keys(partExc.var)
expVar_df = flatten(partExc.var[:expExc],:Ts_disSup)[!,Not(:Ts_exp)]
cns_df = innerjoin(capaVar_df, combine(groupby(expVar_df,[:Ts_disSup, :R_from, :R_to, :C]), :var => (x -> sum(x)) => :expVar); on = [:Ts_disSup, :R_from, :R_to, :C])
# prepare, scale and create constraints
cns_df[!,:cnsExpr] = map(x -> x.capaVar - x.capaVar.constant - x.expVar, eachrow(cns_df))
cns_df = intCol(cns_df,:dir) |> (x -> orderDf(cns_df[!,[x...,:cnsExpr]]))
scaleCnsExpr!(cns_df,anyM.options.coefRng,anyM.options.checkRng)
partExc.cns[:excCapa] = createCns(cnsCont(cns_df,:equal),anyM.optModel)
end
# create and control operated capacity variables
if anyM.options.decomm != :none && :capaExc in keys(partExc.var)
# constraints for operated capacities are saved into a dictionary of containers and then actually created
cns_dic = Dict{Symbol,cnsCont}()
createOprVarCns!(partExc,cns_dic,anyM)
for cnsSym in keys(cns_dic)
scaleCnsExpr!(cns_dic[cnsSym].data,anyM.options.coefRng,anyM.options.checkRng)
partExc.cns[cnsSym] = createCns(cns_dic[cnsSym],anyM.optModel)
end
end
end
# XXX create capacity restriction for exchange
function createRestrExc!(ts_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},partExc::OthPart,anyM::anyModel)
# group exchange capacities by carrier
grpCapa_df = groupby(rename(partExc.var[anyM.options.decomm != :none ? :oprCapaExc : :capaExc],:var => :capa),:C)
# pre-allocate array of dataframes for restrictions
restr_arr = Array{DataFrame}(undef,length(grpCapa_df))
itrRestr = collect(enumerate(grpCapa_df))
# create restrictions
@threads for x in itrRestr
restr_arr[x[1]] = prepareRestrExc(copy(x[2]),ts_dic,partExc,anyM)
end
anyM.parts.exc.cns[:excRestr] = createCns(cnsCont(vcat(restr_arr...),:smaller),anyM.optModel)
end
# XXX prepare capacity restriction for specific carrier
function prepareRestrExc(cns_df::DataFrame,ts_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},partExc::OthPart,anyM::anyModel)
c_int = cns_df[1,:C]
leafes_arr = filter(y -> isempty(anyM.sets[:C].nodes[y].down), [c_int,getDescendants(c_int,anyM.sets[:C],true)...])
cRes_tup = anyM.cInfo[c_int] |> (x -> (Ts_dis = x.tsDis, R_from = x.rDis, R_to = x.rDis))
# extend constraint dataframe to dispatch levels
cns_df[!,:Ts_dis] .= map(x -> ts_dic[x,cRes_tup.Ts_dis],cns_df[!,:Ts_disSup])
cns_df = flatten(cns_df,:Ts_dis)
sort!(cns_df,sort(intCol(cns_df,:dir)))
# resize capacity variables
cns_df[!,:capa] = cns_df[!,:capa] .* map(x -> anyM.supTs.sca[(x,cRes_tup.Ts_dis)], cns_df[!,:Ts_disSup])
# filter relevant dispatch variables
relDisp_df = filter(x -> x.C in leafes_arr, partExc.var[:exc])
# first aggregate symmetric and directed entries in one direction, then directed entries in the other direction
cns_df[!,:disp] = aggUniVar(relDisp_df,cns_df,[:Ts_dis,:R_from,:R_to],cRes_tup,anyM.sets)
dir_arr = findall(.!cns_df[!,:dir])
cns_df[dir_arr,:disp] = cns_df[dir_arr,:disp] .+ aggUniVar(rename(relDisp_df,:R_from => :R_to, :R_to => :R_from),cns_df[dir_arr,:],[:Ts_dis,:R_from,:R_to],cRes_tup,anyM.sets)
# add availablities to dataframe
cns_df = matchSetParameter(convertExcCol(cns_df),partExc.par[:avaExc],anyM.sets, newCol = :avaSym)
if :avaExcDir in keys(partExc.par)
dirAva_df = matchSetParameter(cns_df[!,intCol(cns_df,:dir)],partExc.par[:avaExcDir],anyM.sets, newCol = :avaDir)
cns_df = joinMissing(cns_df,dirAva_df,[:Ts_disSup,:Ts_dis,:R_a,:R_b,:C,:dir],:left, Dict(:avaDir => nothing))
else
cns_df[!,:avaDir] .= nothing
end
# prepare, scale and create constraints
cns_df[!,:cnsExpr] = map(x -> x.disp - x.capa * (isnothing(x.avaDir) ? x.avaSym : x.avaDir), eachrow(cns_df))
scaleCnsExpr!(cns_df,anyM.options.coefRng,anyM.options.checkRng)
return convertExcCol(cns_df) |> (x -> select(x,intCol(x,:cnsExpr)))
end
# </editor-fold>
# <editor-fold desc= utility functions for exchange"
# XXX obtain values for exchange losses
function getExcLosses(exc_df::DataFrame,excPar_dic::Dict{Symbol,ParElement},sets_dic::Dict{Symbol,Tree})
lossPar_obj = copy(excPar_dic[:lossExc])
if :R_a in namesSym(lossPar_obj.data) && :R_b in namesSym(lossPar_obj.data)
lossPar_obj.data = lossPar_obj.data |> (x -> vcat(x,rename(x,:R_a => :R_b, :R_b => :R_a)))
end
excLoss_df = matchSetParameter(exc_df,lossPar_obj,sets_dic,newCol = :loss)
# overwrite symmetric losses with any directed losses provided
if :lossExcDir in keys(excPar_dic)
oprCol_arr = intCol(excLoss_df)
dirLoss_df = matchSetParameter(excLoss_df[!,oprCol_arr],excPar_dic[:lossExcDir],sets_dic,newCol = :lossDir)
excLoss_df = joinMissing(excLoss_df,dirLoss_df,oprCol_arr,:left,Dict(:lossDir => nothing))
excLoss_df[!,:loss] = map(x -> isnothing(x.lossDir) ? x.loss : x.lossDir,eachrow(excLoss_df[!,[:loss,:lossDir]]))
select!(excLoss_df,Not(:lossDir))
end
return excLoss_df
end
# </editor-fold>
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 15972 |
# XXX sets the objective function according to the arguments provided in obj_dic
"""
Set the objective of the model's underlying optimization problem.
```julia
setObjective!(obj_dic::Union{Dict{Symbol,Float64},Symbol}, model_object::anyModel)
```
`obj_dic` is a keyword argument that specifies the respective objective. To perform a multi-criteria optimization, it can also be a dictionary with the keywords as keys and weights as values. So far, the only supported key-word is `:costs`.
"""
function setObjective!(obj_dic::Union{Dict{Symbol,Float64},Symbol},anyM::anyModel,minimize::Bool=true)
# XXX converts input into dictionary, if only a symbol was provided, if :none keyword was provided returns a dummy objective function
if typeof(obj_dic) == Symbol
if obj_dic == :none @objective(anyM.optModel, Min, 1); return, produceMessage(anyM.options,anyM.report, 1," - Set an empty objective function") end
obj_dic = Dict(obj_dic => 1.0)
end
# XXX create empty variables table for objective variables, if already other object defined, these variables and equations are removed from the model
partObj = anyM.parts.obj
if !(:objVar in keys(partObj.var))
partObj.var[:objVar] = DataFrame(name = Symbol[], group = Symbol[], var = AffExpr[])
partObj.cns[:objEqn] = DataFrame(name = Symbol[], group = Symbol[], cns = ConstraintRef[])
end
# XXX create variables and equations required for specified objectives
for objGrp in setdiff(keys(obj_dic),unique(partObj.var[:objVar][!,:group]))
createObjective!(objGrp,partObj,anyM)
end
# XXX sets overall objective variable with upper limits and according to weights provided in dictionary
objBd_flt = anyM.options.bound.obj |> (x -> isnan(x) ? NaN : x / anyM.options.scaFac.obj)
obj_var = JuMP.add_variable(anyM.optModel, JuMP.build_variable(error, VariableInfo(false, NaN, !isnan(objBd_flt), objBd_flt, false, NaN, false, NaN, false, false)),"obj") * anyM.options.scaFac.obj
obj_eqn = @constraint(anyM.optModel, obj_var == sum(map(x -> sum(filter(r -> r.group == x,partObj.var[:objVar])[!,:var])*obj_dic[x], collectKeys(keys(obj_dic)))))
if minimize
@objective(anyM.optModel, Min, obj_var / anyM.options.scaFac.obj)
else
@objective(anyM.optModel, Max, obj_var / anyM.options.scaFac.obj)
end
produceMessage(anyM.options,anyM.report, 1," - Set objective function according to inputs")
end
createObjective!(objGrp::Symbol, partObj::OthPart,anyM::anyModel) = createObjective!(Val{objGrp}(), partObj::OthPart,anyM::anyModel)
# XXX create variables and equations for cost objective
function createObjective!(objGrp::Val{:costs},partObj::OthPart,anyM::anyModel)
parObj_arr = collectKeys(keys(partObj.par))
techSym_arr = collect(keys(anyM.parts.tech))
varToPart_dic = Dict(:exc => :exc, :ctr => :bal,:trdSell => :trd, :trdBuy => :trd)
# computes discount factors from discount rate provided and saves them as new parameter elements
computeDisFac!(partObj,anyM)
# XXX add elements for expansion costs of technologies
for va in (:Conv, :StIn, :StOut, :StSize, :Exc)
# XXX compute expansion costs
var_sym = Symbol(:exp,va)
costPar_sym = Symbol(:costExp,va)
if !(costPar_sym in parObj_arr) continue end
# get all variables
allExp_df = getAllVariables(var_sym,anyM)
if isempty(allExp_df)
continue
else
allExp_df = rename(allExp_df,:var => :exp)
end
# add economic lifetime to table where it is defined
if Symbol(:lifeEco,va) in parObj_arr
ecoLife_df = matchSetParameter(allExp_df,partObj.par[Symbol(:lifeEco,va)],anyM.sets,newCol = :life)
noEcoLife_df = antijoin(allExp_df,ecoLife_df, on = intCol(allExp_df))
noEcoLife_df[!,:life] .= nothing
allExp_df = vcat(ecoLife_df,noEcoLife_df)
else
allExp_df[!,:life] .= nothing
end
techFilt_arr = filter(y -> var_sym in keys(anyM.parts.tech[y].var), techSym_arr)
# use technical lifetime where no economic lifetime could be obtained
if va != :Exc
allPar_arr = map(w -> isempty(w) ? DataFrame(val = [20.0]) : w,map(x -> anyM.parts.tech[x].par[Symbol(:life,va)].data,filter(y -> var_sym in keys(anyM.parts.tech[y].var), techFilt_arr)))
union(intCol.(allPar_arr)...) |> (z -> map(x -> map(y -> insertcols!(x,1,(y => fill(0,size(x,1)))) , setdiff(z,intCol(x)) ) ,allPar_arr))
lifePar_obj = copy(anyM.parts.tech[techFilt_arr[1]].par[Symbol(:life,va)],unique(vcat(allPar_arr...)))
else
lifePar_obj = anyM.parts.exc.par[:lifeExc]
end
techLife_df = matchSetParameter(convertExcCol(filter(x -> isnothing(x.life),allExp_df))[!,Not(:life)],lifePar_obj,anyM.sets,newCol = :life)
allExp_df = vcat(techLife_df,filter(x -> !isnothing(x.life),convertExcCol(allExp_df)))
# gets expansion costs and interest rate to compute annuity
allExp_df = matchSetParameter(allExp_df,partObj.par[costPar_sym],anyM.sets,newCol = :costExp)
if isempty(allExp_df) continue end
# uses tech specific discount rate and fall back on general discount rate as default
if Symbol(:rateExp,va) in keys(partObj.par)
techRate_df = matchSetParameter(allExp_df,partObj.par[Symbol(:rateExp,va)],anyM.sets,newCol = :rate)
else
techRate_df = filter(x -> false,allExp_df); techRate_df[!,:rate] .= Float64[]
end
# obtains general discount rate
generRate_df = rename(antijoin(allExp_df,techRate_df,on = intCol(techRate_df)),:Ts_expSup => :Ts_disSup, :Ts_disSup => :Ts_expSup)
if va != :Exc
generRate_df = matchSetParameter(generRate_df, partObj.par[:rateDisc],anyM.sets,newCol = :rate)
else
rateB_arr = matchSetParameter(rename(generRate_df,:R_a => :R_exp), partObj.par[:rateDisc],anyM.sets,newCol = :rateA)[!,:rateA]
rateA_arr = matchSetParameter(rename(generRate_df,:R_b => :R_exp), partObj.par[:rateDisc],anyM.sets,newCol = :rateB)[!,:rateB]
generRate_df[!,:rate] = 0.5 .* (rateA_arr .+ rateB_arr)
end
allExp_df = vcat(techRate_df,rename(generRate_df, :Ts_expSup => :Ts_disSup, :Ts_disSup => :Ts_expSup))
# compute annuity costs
allExp_df[!,:costAnn] = map(x -> x.costExp * (x.rate == 0.0 ? 1/x.life : (x.rate * (1 + x.rate)^x.life) / ((1 + x.rate)^x.life-1)), eachrow(allExp_df))
select!(allExp_df,Not([:costExp,:life,:rate]))
allExp_df = flatten(allExp_df,:Ts_disSup)
# adds discount factor and computes cost expression
allExp_df = matchSetParameter(convertExcCol(allExp_df),partObj.par[va != :Exc ? :disFac : :disFacExc],anyM.sets,newCol = :disFac)
# XXX groups cost expressions by technology, scales groups expression and creates a variables for each grouped entry
allExp_df = rename(combine(x -> (expr = sum(x.disFac .* x.exp .* x.costAnn),) ,groupby(allExp_df,va != :Exc ? [:Ts_disSup,:R_exp,:Te] : [:Ts_disSup,:C])),:Ts_disSup => :Ts_exp)
transferCostEle!(allExp_df, partObj,costPar_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costCapa,anyM.options.checkRng)
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for expansion costs")
# XXX add elements for operational costs of technologies
# if decommissioning is enabled, capacity costs depend on commissioned and not on installed capacities
capaTyp_sym = anyM.options.decomm != :none ? :oprCapa : :capa
for va in (:Conv, :StIn, :StOut, :StSize, :Exc)
var_sym = Symbol(capaTyp_sym,va)
costPar_sym = Symbol(:costOpr,va)
if !(costPar_sym in parObj_arr) continue end
# get all variables
allCapa_df = getAllVariables(var_sym,anyM)
if isempty(allCapa_df)
continue
else
allCapa_df = rename(allCapa_df,:var => :capa)
end
# joins costs and discount factors to create cost expression
allCapa_df = matchSetParameter(convertExcCol(allCapa_df),partObj.par[costPar_sym],anyM.sets,newCol = :costOpr)
allCapa_df = matchSetParameter(convertExcCol(allCapa_df),partObj.par[va != :Exc ? :disFac : :disFacExc],anyM.sets,newCol = :disFac)
if isempty(allCapa_df) continue end
# XXX groups cost expressions by technology, scales groups expression and creates a variables for each grouped entry
allCapa_df = combine(x -> (expr = sum(x.disFac .* x.capa .* x.costOpr),), groupby(allCapa_df,va != :Exc ? [:Ts_disSup,:R_exp,:Te] : [:Ts_disSup,:C]))
transferCostEle!(allCapa_df, partObj,costPar_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costCapa,anyM.options.checkRng)
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for capacity costs")
# XXX add elements for variable costs of technologies
for va in (:use,:gen,:stIn,:stOut,:exc)
costPar_sym = string(va) |> (x -> Symbol(:costVar,uppercase(x[1]),x[2:end]))
if !(costPar_sym in parObj_arr || (va == :use && :emissionPrc in parObj_arr && :emissionFac in keys(anyM.parts.lim.par))) continue end
# obtain all variables
allDisp_df = getAllVariables(va,anyM)
if isempty(allDisp_df)
continue
else
allDisp_df = rename(allDisp_df,:var => :disp)
end
# special case for variable costs of exchange (direct and symmetric values need to be considered both) and of use (emission price needs to be considered)
if va == :exc
if :costVarExcDir in parObj_arr
dirCost_df = matchSetParameter(convertExcCol(allDisp_df),anyM.parts.obj.par[:costVarExcDir],anyM.sets,newCol = :costVar)
else
dirCost_df = convertExcCol(allDisp_df[[],:])
dirCost_df[!,:costVar] .= 0.0
end
noDirCost_df = matchSetParameter(antijoin(convertExcCol(allDisp_df),dirCost_df, on = intCol(dirCost_df)),anyM.parts.obj.par[costPar_sym],anyM.sets,newCol = :costVar)
allDisp_df = convertExcCol(vcat(dirCost_df,noDirCost_df))
elseif va == :use && :emissionPrc in parObj_arr && :emissionFac in keys(anyM.parts.lim.par)
# get emission prices as a costs entry
emPrc_df = matchSetParameter(select(allDisp_df,Not(:disp)),partObj.par[:emissionPrc],anyM.sets, newCol = :prc)
emPrc_df = matchSetParameter(emPrc_df,anyM.parts.lim.par[:emissionFac],anyM.sets, newCol = :fac)
emPrc_df[!,:costEms] = emPrc_df[!,:prc] .* emPrc_df[!,:fac] ./ 1000
select!(emPrc_df,Not([:prc,:fac]))
# merge emission costs with other variable costs or just use emission costs if there are not any other
if costPar_sym in parObj_arr
otherVar_df = matchSetParameter(select(allDisp_df,Not(:disp)),anyM.parts.obj.par[costPar_sym],anyM.sets,newCol = :costVar)
allCost_df = joinMissing(otherVar_df,emPrc_df,intCol(emPrc_df),:outer,merge(Dict{Symbol,Any}(:costVar => 0.0, :costEms => 0.0),Dict{Symbol,Any}(x => 0 for x in intCol(emPrc_df))) )
allCost_df[!,:costVar] = allCost_df[!,:costVar] .+ allCost_df[!,:costEms]
select!(allCost_df,Not(:costEms))
else
allCost_df = emPrc_df
rename!(allCost_df,:costEms => :costVar)
end
allDisp_df = innerjoin(allCost_df,allDisp_df, on = intCol(allDisp_df))
else
allDisp_df = matchSetParameter(allDisp_df,anyM.parts.obj.par[costPar_sym],anyM.sets,newCol = :costVar)
end
if isempty(allDisp_df) continue end
# renames dispatch regions to enable join with discount factors
if va != :exc rename!(allDisp_df,:R_dis => :R_exp) end
allDisp_df = matchSetParameter(allDisp_df,partObj.par[va != :exc ? :disFac : :disFacExc],anyM.sets,newCol = :disFac)
# XXX groups cost expressions by technology, scales groups expression and creates a variables for each grouped entry
allDisp_df = combine(x -> (expr = sum(x.disFac .* x.disp .* x.costVar) ./ 1000.0 .* anyM.options.redStep,) ,groupby(allDisp_df,va != :exc ? [:Ts_disSup,:R_exp,:Te] : [:Ts_disSup,:C]))
transferCostEle!(allDisp_df, partObj,costPar_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costDisp,anyM.options.checkRng,(va == :gen ? NaN : 0.0))
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for variables costs")
# XXX add elements for curtailment and loss of load costs of energy carriers
for varType in [:crt,:lss]
if varType in keys(anyM.parts.bal.var)
cost_sym = varType == :crt ? :costCrt : :costLss
# compute discounted curtailment costs
allVar_df = rename(matchSetParameter(anyM.parts.bal.var[varType],anyM.parts.bal.par[cost_sym],anyM.sets,newCol = :cost),:var => varType)
allVar_df = matchSetParameter(rename(allVar_df,:R_dis => :R_exp),partObj.par[:disFac],anyM.sets,newCol = :disFac)
# groups cost expressions by carrier, scales groups expression and creates a variables for each grouped entry
allVar_df = combine(x -> (expr = sum(x.disFac .* x[!,varType] .* x.cost) ./ 1000.0 .* anyM.options.redStep,) ,groupby(allVar_df, [:Ts_disSup,:R_exp,:C]))
transferCostEle!(allVar_df, partObj,cost_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costDisp,anyM.options.checkRng, NaN)
end
end
# XXX add elements for trade costs of energy carriers (buy and sell)
for va in (:trdBuy, :trdSell)
if va in keys(anyM.parts.trd.var)
# compute discounted trade costs
allTrd_df = rename(matchSetParameter(anyM.parts.trd.var[va],anyM.parts.trd.par[Symbol(va,:Prc)],anyM.sets,newCol = :costTrd),:var => :trd)
allTrd_df = matchSetParameter(rename(allTrd_df,:R_dis => :R_exp),partObj.par[:disFac],anyM.sets,newCol = :disFac)
# groups cost expressions by carrier, scales groups expression and creates a variables for each grouped entry
allTrd_df = combine(x -> (expr = sum(x.disFac .* x.trd .* x.costTrd) ./ (va == :trdSell ? -1000.0 : 1000.0) .* anyM.options.redStep,), groupby(allTrd_df, [:Ts_disSup,:R_exp,:C]))
transferCostEle!(allTrd_df, partObj,Symbol(:cost,uppercase(string(va)[1]),string(va)[2:end]),anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costDisp,anyM.options.checkRng,(va == :trdSell ? NaN : 0.0))
end
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for curtailment and trade costs")
# XXX creates overall costs variable considering scaling parameters
relBla = filter(x -> x != :objVar, collectKeys(keys(partObj.var)))
objVar_arr = map(relBla) do varName
# sets lower limit of zero, except for curtailment and revenues from selling, because these can incure "negative" costs
lowBd_tup = !(varName in (:costCrt,:costTrdSell)) |> (x -> (x,x ? 0.0 : NaN))
info = VariableInfo(lowBd_tup[1], lowBd_tup[2], false, NaN, false, NaN, false, NaN, false, false)
return JuMP.add_variable(anyM.optModel, JuMP.build_variable(error, info),string(varName))
end
# create dataframe with for overall cost equations and scales it
objExpr_arr = [objVar_arr[idx] - sum(partObj.var[name][!,:var]) for (idx, name) in enumerate(relBla)]
cns_df = DataFrame(group = fill(:costs,length(objExpr_arr)), name = relBla, cnsExpr = objExpr_arr)
# add variables and equations to overall objective dataframes
partObj.cns[:objEqn] = vcat(partObj.cns[:objEqn],createCns(cnsCont(cns_df,:equal),anyM.optModel))
partObj.var[:objVar] = vcat(partObj.var[:objVar],DataFrame(group = fill(:costs,length(objVar_arr)), name = relBla, var = objVar_arr))
end
# XXX transfers provided cost dataframe into dataframe of overall objective variables and equations (and scales them)
function transferCostEle!(cost_df::DataFrame, partObj::OthPart,costPar_sym::Symbol,optModel::Model,lock_::ReentrantLock,sets_dic::Dict{Symbol,Tree},
coefRng_tup::NamedTuple{(:mat,:rhs),Tuple{Tuple{Float64,Float64},Tuple{Float64,Float64}}}, scaCost_fl::Float64, checkRng_fl::Float64, lowBd::Float64 = 0.0)
# create variables for cost entry and builds corresponding expression for equations controlling them
cost_df = createVar(cost_df,string(costPar_sym),NaN,optModel,lock_,sets_dic, scaFac = scaCost_fl, lowBd = lowBd)
cost_df[!,:cnsExpr] = map(x -> x.expr - x.var, eachrow(cost_df))
select!(cost_df,Not(:expr))
# scales cost expression
scaleCnsExpr!(cost_df,coefRng_tup,checkRng_fl)
cost_df[!,:var] = cost_df[!,:var]
# writes equations and variables
partObj.cns[costPar_sym] = createCns(cnsCont(select(cost_df,Not(:var)),:equal),optModel)
partObj.var[costPar_sym] = select(cost_df,Not(:cnsExpr))
end
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 28026 |
# <editor-fold desc= create other elements of model"
# XXX create variables and capacity constraints for trade variables
function createTradeVarCns!(partTrd::OthPart,anyM::anyModel)
for type in (:Buy, :Sell)
trdPrc_sym = Symbol(:trd,type,:Prc)
trd_sym = Symbol(:trd,type)
if trdPrc_sym in keys(partTrd.par) && :C in namesSym(partTrd.par[trdPrc_sym].data)
# <editor-fold desc="create trade variables"
c_arr = unique(partTrd.par[trdPrc_sym].data[!,:C])
# create dataframe with all potential entries for trade/sell variable
var_df = createPotDisp(c_arr,anyM)
# match all potential variables with defined prices
var_df = matchSetParameter(var_df,partTrd.par[trdPrc_sym],anyM.sets)[!,Not(:val)]
var_df = createVar(var_df,string(:trd,type),getUpBound(var_df,anyM.options.bound.disp / anyM.options.scaFac.dispTrd,anyM.supTs,anyM.sets[:Ts]),anyM.optModel,anyM.lock,anyM.sets, scaFac = anyM.options.scaFac.dispTrd)
partTrd.var[trd_sym] = orderDf(var_df)
produceMessage(anyM.options,anyM.report, 3," - Created variables for $(type == :Buy ? "buying" : "selling") carriers")
# </editor-fold>
# <editor-fold desc="create capacity constraint on variable"
trdCap_sym = Symbol(trd_sym,:Cap)
if trdCap_sym in keys(partTrd.par)
cns_df = matchSetParameter(var_df,partTrd.par[trdCap_sym],anyM.sets,newCol = :cap)
sca_arr = getResize(cns_df,anyM.sets[:Ts],anyM.supTs)
cns_df[!,:cap] = cns_df[!,:cap] .* sca_arr
# prepare, scale and create constraints
cns_df[!,:cnsExpr] = map(x -> x.var - x.cap, eachrow(cns_df))
scaleCnsExpr!(cns_df,anyM.options.coefRng,anyM.options.checkRng)
partTrd.cns[trdCap_sym] = createCns(cnsCont(cns_df,:smaller),anyM.optModel)
produceMessage(anyM.options,anyM.report, 3," - Created capacity restrictions for $(type == :Buy ? "buying" : "selling") carriers")
end
# </editor-fold>
end
produceMessage(anyM.options,anyM.report, 2," - Created variables and constraints for $(type == :Buy ? "buying" : "selling") carriers")
end
produceMessage(anyM.options,anyM.report, 1," - Created variables and constraints for trade")
end
# XXX create all energy balances (and curtailment variables if required)
function createEnergyBal!(techSym_arr::Array{Symbol,1},anyM::anyModel)
partBal = anyM.parts.bal
c_arr = filter(x -> x != 0,getfield.(values(anyM.sets[:C].nodes),:idx))
allDim_df = createPotDisp(c_arr,anyM)
bal_tup = (:C,:Ts_dis)
agg_arr = [:Ts_dis, :R_dis, :C]
# <editor-fold desc="create potential curtailment and loss loss load variables
for varType in (:crt,:lss)
# get defined entries
var_df = DataFrame()
for par in intersect(keys(partBal.par),vcat(varType == :crt ? :costCrt : :costLss, Symbol.(varType,[:Up,:Low,:Fix])...))
append!(var_df,matchSetParameter(allDim_df,partBal.par[par],anyM.sets)[!,Not(:val)])
end
# obtain upper bound for variables and create them
if !isempty(var_df)
partBal.var[varType] = orderDf(createVar(var_df,string(varType),getUpBound(var_df,anyM.options.bound.disp / anyM.options.scaFac.dispTrd,anyM.supTs,anyM.sets[:Ts]),anyM.optModel,anyM.lock,anyM.sets, scaFac = anyM.options.scaFac.dispTrd))
end
end
# </editor-fold>
# <editor-fold desc="create actual balance"
# finds all carriers that require an energy balance (not required for carriers that can only be shifted (temporal or spatial), e.g. that only have storage or exchange defined for them)
relC_arr = Array{Int,1}()
# if demand does not specify a carrier, it is assumed all carriers are relevant
if !isempty(anyM.parts.bal.par[:dem].data)
if :C in namesSym(anyM.parts.bal.par[:dem].data)
append!(relC_arr,unique(anyM.parts.bal.par[:dem].data[!,:C]))
else
append!(relC_arr,c_arr)
end
end
if :crt in keys(anyM.parts.bal.var) append!(relC_arr,unique(anyM.parts.bal.var[:crt][!,:C])) end
if :lss in keys(anyM.parts.bal.var) append!(relC_arr,unique(anyM.parts.bal.var[:lss][!,:C])) end
if :trdSell in keys(anyM.parts.trd.var) append!(relC_arr,unique(anyM.parts.trd.var[:trdSell][!,:C])) end
if :trdBuy in keys(anyM.parts.trd.var) append!(relC_arr,unique(anyM.parts.trd.var[:trdBuy][!,:C])) end
# add carriers beings generated or used
append!(relC_arr,union(union(map(x -> anyM.parts.tech[x].carrier |> (y -> map(z -> getfield(y,z),intersect(keys(y),(:gen,:use)))),techSym_arr)...)...))
relC_arr = unique(relC_arr)
# create object to write constraint data too
cns_arr = Array{Pair{Symbol,cnsCont}}(undef,length(relC_arr))
itrC_arr = collect(enumerate(relC_arr))
@threads for (idx,c) in itrC_arr
subC_arr = unique([c,getDescendants(c,anyM.sets[:C],true)...])
cRes_tup = anyM.cInfo[c] |> (x -> (Ts_dis = x.tsDis, R_dis = x.rDis, C = anyM.sets[:C].nodes[c].lvl))
# XXX add demand and size it
cns_df = matchSetParameter(filter(x -> x.C == c,allDim_df),partBal.par[:dem],anyM.sets)
cns_df[!,:dem] = cns_df[!,:val] .* getResize(cns_df,anyM.sets[:Ts],anyM.supTs)
select!(cns_df,Not(:val))
# XXX get relevant variables
sort!(cns_df,sort(intCol(cns_df)))
src_df = cns_df[!,Not([:Ts_disSup,:dem])]
# add tech variables
cns_df[!,:techVar] = getTechEnerBal(c,subC_arr,src_df,anyM.parts.tech,anyM.cInfo,anyM.sets)
# add curtailment variables
for varType in (:crt,:lss)
if varType in keys(partBal.var)
cns_df[!,Symbol(varType,:Var)] = filterCarrier(partBal.var[varType],subC_arr) |> (x -> aggUniVar(x,src_df,agg_arr, cRes_tup,anyM.sets))
else
cns_df[!,Symbol(varType,:Var)] .= AffExpr()
end
end
# add trade variables
if !isempty(anyM.parts.trd.var)
cns_df[!,:trdVar] = sum([filterCarrier(anyM.parts.trd.var[trd],subC_arr) |> (x -> aggUniVar(x,src_df,agg_arr,cRes_tup,anyM.sets) |> (y -> trd != :trdSell ? y : -1.0 * y)) for trd in keys(anyM.parts.trd.var)])
else
cns_df[!,:trdVar] .= AffExpr()
end
# add exchange variables
if !isempty(anyM.parts.exc.var)
excVarTo_df = filterCarrier(anyM.parts.exc.var[:exc],subC_arr)
excVarFrom_df = convertExcCol(copy(excVarTo_df))
# get loss values and apply them to variables
excVarFrom_df = getExcLosses(excVarFrom_df,anyM.parts.exc.par,anyM.sets)
excVarFrom_df[!,:var] = excVarFrom_df[!,:var] .* (1.0 .- excVarFrom_df[!,:loss])
select!(excVarFrom_df,Not(:loss))
balTo_tup, balFrom_tup = [tuple(replace(collect(bal_tup),:R_dis => x)...) for x in [:R_to, :R_from]]
excFrom_arr = aggUniVar(convertExcCol(excVarFrom_df),rename(src_df,:R_dis => :R_to),[:Ts_dis,:R_to,:C],(Ts_dis = cRes_tup[1], R_to = cRes_tup[2], C = cRes_tup[3]),anyM.sets)
excTo_arr = aggUniVar(excVarTo_df,rename(src_df,:R_dis => :R_from),[:Ts_dis,:R_from,:C],(Ts_dis = cRes_tup[1], R_from = cRes_tup[2], C = cRes_tup[3]),anyM.sets)
cns_df[!,:excVar] = excFrom_arr .- excTo_arr
else
cns_df[!,:excVar] .= AffExpr()
end
# prepare, scale and save constraints to dictionary
c_str = Symbol(anyM.sets[:C].nodes[c].val)
cns_df[!,:cnsExpr] = map(x -> x.techVar + x.excVar + x.trdVar + x.lssVar - x.dem - x.crtVar, eachrow(cns_df))
cns_df = orderDf(cns_df[!,[intCol(cns_df)...,:cnsExpr]])
filter!(x -> x.cnsExpr != AffExpr(),cns_df)
scaleCnsExpr!(cns_df,anyM.options.coefRng,anyM.options.checkRng)
cns_arr[idx] = Symbol(c_str) => cnsCont(cns_df,anyM.cInfo[c].eq ? :equal : :greater)
produceMessage(anyM.options,anyM.report, 2," - Prepared energy balance for $(c_str)")
end
# loops over stored constraints outside of threaded loop to create actual jump constraints
for cns in cns_arr
partBal.cns[cns[1]] = createCns(cns[2],anyM.optModel)
end
produceMessage(anyM.options,anyM.report, 1," - Created energy balances for all carriers")
# </editor-fold>
end
# XXX aggregate all technology variables for energy balance
function getTechEnerBal(cBal_int::Int,subC_arr::Array{Int,1},src_df::DataFrame,tech_dic::Dict{Symbol,TechPart},
cInfo_dic::Dict{Int,NamedTuple{(:tsDis,:tsExp,:rDis,:rExp,:eq),Tuple{Int,Int,Int,Int,Bool}}},sets_dic::Dict{Symbol,Tree})
techVar_arr = Array{Array{AffExpr,1}}(undef,length(subC_arr))
# get temporal and spatial resolution for carrier being balanced
cBalRes_tup = cInfo_dic[cBal_int] |> (x -> (x.tsDis, x.rDis))
# loops over all carriers relevant for respective energy balance
for (idx,c) in enumerate(subC_arr)
# gets technologies relevant for respective filterCarrier
relTech_arr = getRelTech(c,tech_dic,sets_dic[:C])
# leaves loop for carrier, if no relevant technologies could be obtained
if isempty(relTech_arr)
techVar_arr[idx] = fill(AffExpr(),size(src_df,1))
continue
end
# prepare loop over tech for c by creating empty dataframe and get temporal and spatial resolution for carrier being balanced
allVar_df = DataFrame(Ts_dis = Int[], R_dis = Int[], var = AffExpr[])
cRes_tup = cInfo_dic[c] |> (x -> (x.tsDis, x.rDis))
for x in relTech_arr
# gets resolution and adjusts add_df in case of an agggregated technology
add_df = select(filter(r -> r.C == c,tech_dic[x[1]].var[x[2]]),[:Ts_dis,:R_dis,:var])
if isempty(add_df) continue end
tRes_tup = tech_dic[x[1]].disAgg ? (cRes_tup[1], tech_dic[x[1]].balLvl.exp[2]) : cRes_tup
checkTechReso!(tRes_tup,cBalRes_tup,add_df,sets_dic)
# adds sign to variables and adds them to overall dataframe
add_df[!,:var] = add_df[!,:var] .* (x[2] in (:use,:stExtIn) ? -1.0 : 1.0)
append!(allVar_df, add_df)
end
# returns empty expression if no variales could be obtained
if isempty(allVar_df)
techVar_arr[idx] = fill(AffExpr(),size(src_df,1))
else
grpVar_df = combine(groupby(allVar_df, [:Ts_dis, :R_dis]), :var => (x -> sum(x)) => :var)
joined_df = joinMissing(src_df,grpVar_df, [:Ts_dis, :R_dis], :left, Dict(:var => AffExpr()))
sort!(joined_df,sort(intCol(joined_df)))
techVar_arr[idx] = joined_df[!,:var]
end
end
return map(x -> sum(x),eachrow(hcat(techVar_arr...)))
end
# XXX create constarints that enforce any type of limit (Up/Low/Fix) on any type of variable
function createLimitCns!(partLim::OthPart,anyM::anyModel)
parLim_arr = String.(collectKeys(keys(partLim.par)))
techLim_arr = filter(x -> any(map(y -> occursin(y,x),["Up","Low","Fix"])),parLim_arr)
limVar_arr = map(x -> map(k -> Symbol(k[1]) => Symbol(k[2][1]), filter(z -> length(z[2]) == 2,map(y -> y => split(x,y),["Up","Low","Fix"])))[1], techLim_arr)
varToPar_dic = Dict(y => getindex.(filter(z -> z[2] == y,limVar_arr),1) for y in unique(getindex.(limVar_arr,2)))
# loop over all variables that are subject to any type of limit (except emissions)
allKeys_arr = collect(keys(varToPar_dic))
cns_dic = Dict{Symbol,cnsCont}()
signLim_dic= Dict(:Up => :smaller, :Low => :greater, :Fix => :equal)
@threads for va in allKeys_arr
varToPart_dic = Dict(:exc => :exc, :crt => :bal,:trdSell => :trd, :trdBuy => :trd)
# obtain all variables relevant for limits
allVar_df = getAllVariables(va,anyM)
# aggregates exchange variables on the same line
if va == :exc
allVar_df[!,:var] = aggDivVar(flipExc(allVar_df),select(allVar_df,Not([:var])),(:Ts_disSup, :R_from, :R_to, :Ts_dis, :C), anyM.sets)
end
# check if acutally any variables were obtained
if isempty(allVar_df)
lock(anyM.lock)
push!(anyM.report,(2,"limit",string(va),"limits for variable provided, but none of these variables are actually created"))
unlock(anyM.lock)
continue
end
allLimit_df = DataFrame(var = AffExpr[])
# XXX loop over respective type of limits to obtain data
for lim in varToPar_dic[va]
par_obj = copy(partLim.par[Symbol(va,lim)])
if va in (:capaExc,:oprCapaExc) && :R_a in namesSym(par_obj.data) && :R_b in namesSym(par_obj.data)
par_obj.data = vcat(par_obj.data,rename(par_obj.data,:R_a => :R_b,:R_b => :R_a))
end
agg_tup = tuple(intCol(par_obj.data)...)
if :R_a in agg_tup && !(:R_a in namesSym(allVar_df))
allVar_df = convertExcCol(allVar_df)
end
# aggregate search variables according to dimensions in limit parameter
if isempty(agg_tup)
grpVar_df = allVar_df
else
grpVar_df = combine(groupby(allVar_df,collect(agg_tup)), :var => (x -> sum(x)) => :var)
end
# try to aggregate variables to limits directly provided via inputs
limit_df = copy(par_obj.data)
if size(limit_df,2) != 1
limit_df[!,:var] = aggDivVar(grpVar_df, limit_df[!,Not(:val)], agg_tup, anyM.sets, aggFilt = agg_tup)
else
limit_df[!,:var] .= sum(grpVar_df[!,:var])
end
# gets provided limit parameters, that no variables could assigned to so far and tests if via inheritance any could be assigned
mtcPar_arr, noMtcPar_arr = findall(map(x -> x != AffExpr(),limit_df[!,:var])) |> (x -> [x, setdiff(1:size(par_obj.data,1),x)])
# removes entries with no parameter assigned from limits
limit_df = limit_df[mtcPar_arr,:]
if !isempty(noMtcPar_arr)
# tries to inherit values to existing variables only for parameters without variables aggregated so far
aggPar_obj = copy(par_obj,par_obj.data[noMtcPar_arr,:])
aggPar_obj.data = matchSetParameter(grpVar_df[!,Not(:var)],aggPar_obj,anyM.sets, useNew = false)
# again performs aggregation for inherited parameter data and merges if original limits
aggLimit_df = copy(aggPar_obj.data)
if !isempty(aggLimit_df)
aggLimit_df[!,:var] = aggDivVar(grpVar_df, aggLimit_df, agg_tup, anyM.sets, aggFilt = agg_tup)
limit_df = vcat(limit_df,aggLimit_df)
end
end
# merge limit constraint to other limits for the same variables
limit_df = convertExcCol(rename(limit_df,:val => lim))
join_arr = [intersect(intCol(allLimit_df),intCol(limit_df))...,:var]
miss_arr = [intCol(allLimit_df),intCol(limit_df)] |> (y -> union(setdiff(y[1],y[2]), setdiff(y[2],y[1])))
allLimit_df = joinMissing(allLimit_df, limit_df, join_arr, :outer, merge(Dict(z => 0 for z in miss_arr),Dict(:Up => nothing, :Low => nothing, :Fix => nothing)))
end
# XXX check for contradicting values
colSet_dic = Dict(x => Symbol(split(string(x),"_")[1]) for x in intCol(allLimit_df))
limitCol_arr = intersect(namesSym(allLimit_df),(:Fix,:Up,:Low))
entr_int = size(allLimit_df,1)
if :Low in limitCol_arr || :Up in limitCol_arr
# ! errors
# upper and lower limit contradicting each other
if :Low in limitCol_arr && :Up in limitCol_arr
for x in findall(replace(allLimit_df[!,:Low],nothing => 0.0) .> replace(allLimit_df[!,:Up],nothing => Inf))
dim_str = join(map(y -> allLimit_df[x,y] == 0 ? "" : string(y,": ",join(getUniName(allLimit_df[x,y], anyM.sets[colSet_dic[y]])," < ")),intCol(allLimit_df)),"; ")
lock(anyM.lock)
push!(anyM.report,(3,"limit",string(va),"contradicting values for upper and lower limit detected for: " * dim_str))
unlock(anyM.lock)
end
end
# fix and upper limit contradicting each other
if :Fix in limitCol_arr && :Up in limitCol_arr
for x in findall(replace(allLimit_df[!,:Fix],nothing => 0.0) .> (replace(allLimit_df[!,:Up],nothing => Inf) .+ 0.0001))
dim_str = join(map(y -> allLimit_df[x,y] == 0 ? "" : string(y,": ",join(getUniName(allLimit_df[x,y], anyM.sets[colSet_dic[y]])," < ")),intCol(allLimit_df)),"; ")
lock(anyM.lock)
push!(anyM.report,(3,"limit",string(va),"fixed limit exceeds upper limit for: " * dim_str))
unlock(anyM.lock)
end
end
# fix and lower limit contradicting each other
if :Fix in limitCol_arr && :Low in limitCol_arr
for x in findall(replace(allLimit_df[!,:Fix],nothing => Inf) .< replace(allLimit_df[!,:Low],nothing => 0.0) .- 0.0001)
dim_str = join(map(y -> allLimit_df[x,y] == 0 ? "" : string(y,": ",join(getUniName(allLimit_df[x,y], anyM.sets[colSet_dic[y]])," < ")),intCol(allLimit_df)),"; ")
lock(anyM.lock)
push!(anyM.report,(3,"limit",string(va),"fixed limit is smaller than lower limit for: " * dim_str))
unlock(anyM.lock)
end
end
# residual values already violate limits
resiVal_arr = getfield.(allLimit_df[!,:var],:constant)
if :Up in limitCol_arr
for x in findall(resiVal_arr .> replace(allLimit_df[!,:Up],nothing => Inf))
dim_str = join(map(y -> allLimit_df[x,y] == 0 ? "" : string(y,": ",join(getUniName(allLimit_df[x,y], anyM.sets[colSet_dic[y]])," < ")),intCol(allLimit_df)),"; ")
lock(anyM.lock)
push!(anyM.report,(3,"limit",string(va),"residual values already exceed the upper limit for: " * dim_str))
unlock(anyM.lock)
end
end
# ! warnings
# upper or lower limit of zero
if !isempty(limitCol_arr |> (y -> filter(x -> collect(x[y]) |> (z -> any(isnothing.(z)) ? false : any(z .== 0)),allLimit_df))) && va != :emission
lock(anyM.lock)
push!(anyM.report,(2,"limit",string(va),"upper or lower limit of zero detected, please consider to use fix or omit limit instead"))
unlock(anyM.lock)
entr_int = size(allLimit_df,1)
end
# value is fixed, but still a upper a lower limit is provided
if :Fix in limitCol_arr
if !isempty(filter(x -> x != :Fix, limitCol_arr) |> (z -> filter(x -> all([!isnothing(x.Fix),any(.!isnothing.(collect(x[z])))]) ,eachrow(allLimit_df))))
lock(anyM.lock)
push!(anyM.report,(2,"limit",string(va),"upper and/or lower limit detected, although variable is already fixed"))
unlock(anyM.lock)
end
end
end
# if installed capacities differ depending on the direction, because residual values were defined and at the same time fixed limits on the installed capacity were provided
# an error will occur, because a value cannot be fixed but and the same time differ by direction, this is detected here
if :Fix in limitCol_arr && va == :capaExc
# filters fixed exchange capacities and extracts residual values
fix_df = select(filter(x -> x.Fix != nothing, allLimit_df) ,intCol(allLimit_df,:var))
fix_df[!,:resi] .= getfield.(fix_df[!,:var],:constant)
# joins together capacities in both directions
joinA_df = rename(select(fix_df,Not([:var])),:resi => :resiA)
joinB_df = rename(joinA_df,:resiA => :resiB)
comp_df = innerjoin(joinA_df,joinB_df, on = intCol(joinA_df) .=> replace(intCol(joinB_df),:R_from => :R_to,:R_to => :R_from))
# finds cases that lead to contradiction and reports on them
contraExc_df = filter(x -> x.resiA != x.resiB && x.R_from > x.R_to,comp_df)
for x in eachrow(contraExc_df)
dim_str = join(map(y -> x[y] == 0 ? "" : string(y,": ",join(getUniName(x[y], anyM.sets[colSet_dic[y]])," < ")),intCol(contraExc_df)),"; ")
lock(anyM.lock)
push!(anyM.report,(3,"limit",string(va),"for the exchange capacity '" * dim_str * "' residual capacites differ by direction but at the same time the installed capacity in both directions is fixed to the same value by capaExcFix, this is a contradiction and would lead to an infeasible model"))
unlock(anyM.lock)
end
end
# XXX check for suspicious entries for capacity where limits are provided for the sum of capacity over several years
if occursin("capa",string(va))
if !(:Ts_disSup in namesSym(allLimit_df))
lock(anyM.lock)
push!(anyM.report,(2,"limit","capacity","capacity limits were provided without specificing the superordinate dispatch timestep, this means the sum of capacity over all years was limited instead of enforcing the same limit for each year
(see https://leonardgoeke.github.io/AnyMOD.jl/stable/parameter_list/#Limits-on-quantities-dispatched)"))
unlock(anyM.lock)
elseif 0 in unique(allLimit_df[!,:Ts_disSup])
relEntr_df = filter(x -> x.Ts_disSup == 0, allLimit_df)
if :Te in namesSym(relEntr_df)
allTe_arr = unique(relEntr_df[!,:Te])
for tInt in allTe_arr
push!(anyM.report,(2,"limit","capacity","capacity limits were provided for $(string(techSym(tInt,anyM.sets[:Te]))) without specificing the superordinate dispatch timestep, this means the sum of capacity over all years was limited instead of enforcing the same limit for each year
(see https://leonardgoeke.github.io/AnyMOD.jl/stable/parameter_list/#Limits-on-quantities-dispatched)"))
end
else
lock(anyM.lock)
push!(anyM.report,(2,"limit","capacity","capacity limits were provided without specificing the superordinate dispatch timestep, this means the sum of capacity over all years was limited instead of enforcing the same limit for each year
(see https://leonardgoeke.github.io/AnyMOD.jl/stable/parameter_list/#Limits-on-quantities-dispatched)"))
unlock(anyM.lock)
end
end
end
# XXX write constraint containers
for lim in limitCol_arr
# filter respective limits (low, fix or up) out of the entire dataframe
relLim_df = filter(x -> !isnothing(x[lim]),allLimit_df[!,Not(filter(x -> x != lim,limitCol_arr))])
relLim_df = filter(x -> x.var != AffExpr() || x.Fix != 0.0, relLim_df)
if isempty(relLim_df) continue end
rename!(relLim_df,lim => :Lim)
# prepare, scale and save constraints to dictionary
relLim_df[!,:cnsExpr] = map(x -> x.var - x.Lim, eachrow(relLim_df))
relLim_df = orderDf(relLim_df[!,[intCol(relLim_df)...,:cnsExpr]])
scaleCnsExpr!(relLim_df,anyM.options.coefRng,anyM.options.checkRng)
cns_dic[Symbol(va,lim)] = cnsCont(relLim_df,signLim_dic[lim])
produceMessage(anyM.options,anyM.report, 3," - Created constraints for $(lim == :Up ? "upper" : (lim == :Low ? "lower" : "fixed")) limit of variable $va")
end
typeLim_sym = va in (:emission,) ? "term" : "variable"
produceMessage(anyM.options,anyM.report, 2," - Prepared constraints to limit $typeLim_sym $va")
end
# loops over stored constraints outside of threaded loop to create actual jump constraints
for cnsSym in keys(cns_dic)
partLim.cns[cnsSym] = createCns(cns_dic[cnsSym],anyM.optModel)
end
produceMessage(anyM.options,anyM.report, 1," - Created all limiting constraints")
end
# </editor-fold>
# <editor-fold desc= utility functions"
# XXX connect capacity and expansion variables
function createCapaCns!(part::TechPart,prepTech_dic::Dict{Symbol,NamedTuple},cns_dic::Dict{Symbol,cnsCont})
for capaVar in filter(x -> occursin("capa",string(x)),keys(prepTech_dic))
index_arr = intCol(part.var[capaVar])
join_arr = part.type != :mature ? index_arr : filter(x -> x != :Ts_expSup,collect(index_arr))
# joins corresponding capacity and expansion variables together
expVar_sym = Symbol(replace(string(capaVar),"capa" => "exp"))
if !(expVar_sym in keys(part.var)) continue end
expVar_df = flatten(part.var[expVar_sym],:Ts_disSup)
cns_df = rename(innerjoin(part.var[capaVar],combine(groupby(expVar_df,join_arr), :var => (x -> sum(x)) => :exp); on = join_arr),:var => :capa)
# creates final constraint object
cns_df[!,:cnsExpr] = map(x -> x.capa - x.capa.constant - x.exp,eachrow(cns_df))
cns_dic[Symbol(capaVar)] = cnsCont(select(cns_df,Not([:capa,:exp])),:equal)
end
end
# XXX adds column with JuMP variable to dataframe
function createVar(setData_df::DataFrame,name_str::String,upBd_fl::Union{Float64,Array{Float64,1}},optModel::Model,lock_::ReentrantLock,sets::Dict{Symbol,Tree}; scaFac::Float64 = 1.0, lowBd::Float64 = 0.0)
# adds an upper bound to all variables if provided within the options
#if isempty(setData_df) return DataFrame(var = AffExpr[]) end
arr_boo = typeof(upBd_fl) <: Array
if arr_boo
info = VariableInfo.(!isnan(lowBd), lowBd, .!isnan.(upBd_fl), upBd_fl, false, NaN, false, NaN, false, false)
var_obj = JuMP.build_variable.(error, info)
else
info = VariableInfo(!isnan(lowBd), lowBd, !isnan(upBd_fl), upBd_fl, false, NaN, false, NaN, false, false)
var_obj = JuMP.build_variable(error, info)
end
# writes full name for each variable
setData_df = orderDf(setData_df)
dim_arr = map(x -> Symbol(split(String(x),"_")[1]), filter(r -> r != :id,intCol(setData_df)))
dim_int = length(dim_arr)
setData_df[!,:name] = string.(name_str,"[",map(x -> join(map(y -> sets[dim_arr[y]].nodes[x[y]].val,1:dim_int),", "),eachrow(setData_df)),"]")
lock(lock_)
if arr_boo
setData_df[!,:var] = [AffExpr(0,JuMP.add_variable(optModel, nameItr[1], nameItr[2]) => scaFac) for nameItr in zip(var_obj,setData_df[!,:name])]
else
setData_df[!,:var] = [AffExpr(0,JuMP.add_variable(optModel, var_obj, nameItr) => scaFac) for nameItr in setData_df[!,:name]]
end
unlock(lock_)
return setData_df[!,Not(:name)]
end
# XXX scales expressions in the dataframe to be within the range defined within options
function scaleCnsExpr!(cnsExpr_df::DataFrame,coefRng::NamedTuple{(:mat,:rhs),Tuple{Tuple{Float64,Float64},Tuple{Float64,Float64}}},checkRng_fl::Float64)
if isempty(cnsExpr_df) return end
# filters variables with factor zero, because they will lead to scaling errors
foreach(z -> foreach(y -> delete!(z.terms,y), filter(x -> z.terms[x] == 0.0 ,collect(keys(z.terms)))), cnsExpr_df[!,:cnsExpr])
if !all(isnan.(coefRng.rhs))
# scale expression defining constraint so rhs coefficients are within desired range
rhs_arr = abs.(getfield.(cnsExpr_df[!,:cnsExpr],:constant))
findall(rhs_arr .!= 0.0) |> (x -> cnsExpr_df[x,:cnsExpr] = scaleRng(cnsExpr_df[x,:cnsExpr],rhs_arr[x],coefRng.rhs, true))
end
if !all(isnan.(coefRng.mat))
# scale expression defining constraint so matrix coefficients are within desired range
matRng_arr = map(x -> abs.(values(x.terms)) |> (y -> isempty(y) ? (0.0,0.0) : (minimum(y),maximum(y))), cnsExpr_df[!,:cnsExpr])
findall(map(x -> x != (0.0,0.0),matRng_arr)) |> (x -> cnsExpr_df[x,:cnsExpr] = scaleRng(cnsExpr_df[x,:cnsExpr],matRng_arr[x],coefRng.mat,false))
end
if !isnan(checkRng_fl)
checkExprRng(cnsExpr_df[:,:cnsExpr],checkRng_fl)
end
end
# XXX used to perform scaling of expression array based on range of coefficients provided
function scaleRng(expr_arr::Array{AffExpr,1},rng_arr::Array,rng_tup::Tuple{Float64,Float64}, rhs_boo::Bool)
scaRel_arr = rhs_boo ? union(findall(rng_arr .< rng_tup[1]), findall(rng_arr .> rng_tup[2])) : union(findall(getindex.(rng_arr,1) .< rng_tup[1]), findall(getindex.(rng_arr,2) .> rng_tup[2]))
if !isempty(scaRel_arr)
expr_arr[scaRel_arr] = map(x -> x[1] < rng_tup[1] ? rng_tup[1]/x[1] : rng_tup[2]/x[rhs_boo ? 1 : 2], rng_arr[scaRel_arr]) .* expr_arr[scaRel_arr]
end
return expr_arr
end
# XXX check range of coefficients in expressions within input array
function checkExprRng(expr_arr::Array{AffExpr,1},rngThres_fl::Float64)
# obtains range of coefficients for matrix and rhs
matRng_arr = map(x -> abs.(values(x.terms)) |> (y -> isempty(y) ? (0.0,0.0) : (minimum(y),maximum(y))), expr_arr)
rhs_arr = abs.(getfield.(expr_arr,:constant))
both_arr = max.(getindex.(matRng_arr,2),replace(rhs_arr,0.0 => -Inf)) ./ min.(getindex.(matRng_arr,1),replace(rhs_arr,0.0 => Inf))
# filters rows where reange of coefficients is above threshold
aboveThres_arr = findall(both_arr .> rngThres_fl)
for expr in expr_arr[aboveThres_arr]
println(expr)
end
end
# XXX creates an actual jump constraint based on the constraint container provided
function createCns(cnsCont_obj::cnsCont,optModel::Model)
cns_df = cnsCont_obj.data
if cnsCont_obj.sign == :equal
cns_df[!,:cns] = map(x -> @constraint(optModel, x.cnsExpr == 0),eachrow(cns_df))
elseif cnsCont_obj.sign == :greater
cns_df[!,:cns] = map(x -> @constraint(optModel, x.cnsExpr >= 0),eachrow(cns_df))
elseif cnsCont_obj.sign == :smaller
cns_df[!,:cns] = map(x -> @constraint(optModel, x.cnsExpr <= 0),eachrow(cns_df))
end
return select!(cns_df,Not(:cnsExpr))
end
# XXX adjusts resolution of var_df according to information in first two tuples
function checkTechReso!(tRes_tup::Tuple{Int,Int},cBalRes_tup::Tuple{Int,Int},var_df::DataFrame,sets_dic::Dict{Symbol,Tree})
# if dispatch regions for technology were disaggregated, replace the disaggregated with the ones relevant for the carrier
for (idx,dim) in enumerate([:Ts_dis,:R_dis])
if cBalRes_tup[idx] != tRes_tup[idx]
set_sym = Symbol(split(string(dim),"_")[1])
dim_dic = Dict(x => getAncestors(x,sets_dic[set_sym],:int,cBalRes_tup[idx])[end] for x in unique(var_df[!,dim]))
var_df[!,dim] = map(x -> dim_dic[x],var_df[!,dim])
end
end
end
# </editor-fold>
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 36005 |
# XXX iteration over all technologies to create variables and constraints
function createTech!(tInt::Int,part::TechPart,prepTech_dic::Dict{Symbol,NamedTuple},parDef_dic::Dict{Symbol,NamedTuple},ts_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},r_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},anyM::anyModel)
cns_dic = Dict{Symbol,cnsCont}()
newHerit_dic = Dict(:lowest => (:Ts_dis => :avg_any, :R_dis => :avg_any),:reference => (:Ts_dis => :up, :R_dis => :up)) # inheritance rules after presetting
ratioVar_dic = Dict(:StIn => ("StIn" => "Conv"), :StOut => ("StOut" => "StIn"), :StSize => ("StSize" => "StIn")) # assignment of tech types for ratios stuff
tech_str = createFullString(tInt,anyM.sets[:Te])
# presets all dispatch parameter and obtains mode-dependant variables
modeDep_dic = presetDispatchParameter!(part,prepTech_dic,parDef_dic,newHerit_dic,ts_dic,r_dic,anyM)
# creates expansion and capacity variables
createExpCap!(part,prepTech_dic,anyM,ratioVar_dic)
# connect capacity and expansion variables
if part.type != :stock createCapaCns!(part,prepTech_dic,cns_dic) end
# create and control operated capacity variables
if anyM.options.decomm != :none createOprVarCns!(part,cns_dic,anyM) end
produceMessage(anyM.options,anyM.report, 3," - Created all variables and prepared all constraints related to expansion and capacity for technology $(tech_str)")
# create dispatch variables
createDispVar!(part,modeDep_dic,ts_dic,r_dic,anyM)
produceMessage(anyM.options,anyM.report, 3," - Created all dispatch variables for technology $(tech_str)")
# create conversion balance for conversion technologies
if keys(part.carrier) |> (x -> any(map(y -> y in x,(:use,:stIntOut))) && any(map(y -> y in x,(:gen,:stIntIn))))
cns_dic[:convBal] = createConvBal(part,anyM)
produceMessage(anyM.options,anyM.report, 3," - Prepared conversion balance for technology $(tech_str)")
end
# create storage balance for storage technologies
if :stLvl in keys(part.var)
cns_dic[:stBal] = createStBal(part,anyM)
produceMessage(anyM.options,anyM.report, 3," - Prepared storage balance for technology $(tech_str)")
end
# create capacity restrictions
createCapaRestr!(part,ts_dic,r_dic,cns_dic,anyM)
produceMessage(anyM.options,anyM.report, 3," - Prepared capacity restrictions for technology $(tech_str)")
# create ratio constraints
if any(map(x -> occursin("ratioEner",string(x)), collectKeys(keys(part.par))))
createRatioCns!(part,cns_dic,anyM)
produceMessage(anyM.options,anyM.report, 3," - Prepared constraints controlling energy ratios for technology $(tech_str)")
end
# all constraints are scaled and then written into their respective array position
foreach(x -> scaleCnsExpr!(x[2].data,anyM.options.coefRng,anyM.options.checkRng), collect(cns_dic))
produceMessage(anyM.options,anyM.report, 2," - Created all variables and prepared constraints for technology $(tech_str)")
return cns_dic
end
# <editor-fold desc= prepare to create expansion and capacity variables"
# XXX sets dimensions for expansion and capacity variables
function prepareTechs!(techSym_arr::Array{Symbol,1},prepVar_dic::Dict{Symbol,Dict{Symbol,NamedTuple}},tsYear_dic::Dict{Int,Int},anyM::anyModel)
for tSym in techSym_arr
prepTech_dic = Dict{Symbol,NamedTuple}()
part = anyM.parts.tech[tSym]
tInt = techInt(tSym,anyM.sets[:Te])
# dimension of expansion and corresponding capacity variables
if part.type != :stock
prepareExpansion!(prepTech_dic, tsYear_dic, part, tInt, anyM)
for expan in collectKeys(keys(prepTech_dic))
prepareCapacity!(part,prepTech_dic,vcat(map(x -> x[!,removeVal(x)],prepTech_dic[expan])...),Symbol(replace(string(expan),"exp" => "capa")),anyM, tech = tInt)
end
end
# check for capacities variables that have to be created, because of residual capacities provided
addResidualCapa!(prepTech_dic, part, tInt, anyM)
# map required capacity constraints
createCapaRestrMap!(tSym, anyM)
# if any capacity variables or residuals were prepared, add these to overall dictionary
if collect(values(prepTech_dic)) |> (z -> any(map(x -> any(.!isempty.(getfield.(z,x))), (:var,:resi))))
prepVar_dic[tSym] = prepTech_dic
# write reporting, if not all 3 kind of storage capacities will be created
if map(x -> x in keys(prepTech_dic),(:capaStIn,:capaStOut,:capaStSize)) |> (y -> any(y) && !all(y))
push!(anyM.report,(3,"technology dimensions","storage","in case of $(string(tSym)) information for one storage capacity is missing (capaStIn, capaStOut or capaStSize)"))
end
end
end
end
# XXX dimensions for expansion variables
function prepareExpansion!(prepTech_dic::Dict{Symbol,NamedTuple},tsYear_dic::Dict{Int,Int},part::AbstractModelPart,tInt::Int,anyM::anyModel)
# extract tech info
carGrp_ntup = part.carrier
balLvl_ntup = part.balLvl
defPar_tup = tuple(keys(part.par)...)
tsExp_arr, rExp_arr = [getfield.(getNodesLvl(anyM.sets[x[2]], balLvl_ntup.exp[x[1]]),:idx) for x in enumerate([:Ts,:R])]
tsExpSup_arr = map(x -> getDescendants(x,anyM.sets[:Ts],false,anyM.supTs.lvl) |> (y -> typeof(y) == Array{Int,1} ? y : [y] ), tsExp_arr)
if anyM.options.interCapa != :linear tsExp_arr = map(x -> [minimum(x)],tsExp_arr) end
expDim_arr = vcat(collect(Iterators.product(Iterators.zip(tsExp_arr,tsExpSup_arr),rExp_arr))...)
allMap_df = getindex.(expDim_arr,1) |> (x -> DataFrame(Ts_exp = getindex.(x,1), Ts_expSup = getindex.(x,2), R_exp = getindex.(expDim_arr,2), Te = fill(tInt,length(expDim_arr))))
# prepares expansion dimensions for conversion capacity
if !isempty(intersect((:gen,:use),keys(carGrp_ntup)))
# filters cases where expansion is fixed to zero
convMap_df = removeEntries([filterZero(allMap_df,getLimPar(anyM.parts.lim,:expConvFix, anyM.sets[:Te], tech = tInt),anyM)],allMap_df)
if !isempty(convMap_df) prepTech_dic[:expConv] = (var = addSupTsToExp(convMap_df,part.par,:Conv,tsYear_dic,anyM), ratio = DataFrame(), resi = DataFrame()) end
end
stCar_arr::Array{Int,1} = unique(vcat(collect.(map(x -> getproperty(carGrp_ntup,x),intersect(keys(carGrp_ntup),(:stExtIn,:stExtOut,:stIntIn,:stIntOut))))...))
# prepares expansion dimensions for storage capacity
if !isempty(stCar_arr)
stMap_df = combine(groupby(allMap_df,namesSym(allMap_df)), :Te => (x -> stCar_arr) => :C)
for st in (:StIn, :StOut, :StSize)
remove_arr = Array{DataFrame,1}()
# filters cases where expansion is fixed to zero
if Symbol(:exp,st,:Fix) in defPar_tup
push!(remove_arr,filterZero(stMap_df,getLimPar(anyM.parts.lim,Symbol(:exp,st,:Fix), anyM.sets[:Te], tech = tInt),anyM))
end
remove_arr, ratioTab_df = findStorageRatio(tInt,stMap_df,st, remove_arr, part, :exp, anyM)
specStMap_df = removeEntries(remove_arr,stMap_df)
if !(isempty(specStMap_df) && isempty(ratioTab_df))
prepTech_dic[Symbol(:exp,st)] = (var = addSupTsToExp(specStMap_df,part.par,st,tsYear_dic,anyM), ratio = addSupTsToExp(ratioTab_df,part.par,st,tsYear_dic,anyM), resi = DataFrame())
end
end
end
end
# XXX dimensions for capacity variables
function prepareCapacity!(part::AbstractModelPart,prep_dic::Dict{Symbol,NamedTuple},exp_df::DataFrame,capaVar::Symbol,anyM::anyModel; tech::Int = 0)
# XXX initialize assignments and data
defPar_tup = tuple(keys(part.par)...)
techType_sym = :type in fieldnames(typeof(part)) ? part.type : :mature
capaVar_df = expandExpToCapa(exp_df)
# groups by expansion time steps in case of mature technologies
if techType_sym == :mature
select!(capaVar_df,Not(:Ts_expSup))
capaVar_df = unique(capaVar_df)
capaVar_df[!,:Ts_expSup] .= 0
end
# filters cases where capacity is fixed to zero
varFix_sym = Symbol(capaVar,:Fix)
if varFix_sym in defPar_tup
capaVar_df = removeEntries([filterZero(capaVar_df,getLimPar(anyM.parts.lim,Symbol(capaVar,:Fix),anyM.sets[:Te], tech = tech),anyM)],capaVar_df)
end
# for exchange capacities add column to indicate these values are symmetric
if capaVar == :capaExc capaVar_df[!,:dir] .= false; select!(capaVar_df,Not(:Ts_expSup)) end
prep_dic[capaVar] = (var = orderDf(capaVar_df), ratio = DataFrame(), resi = DataFrame())
end
# XXX checks if a storage capacity is defined via a ratio somewhere
function findStorageRatio(t_int::Int,find_df::DataFrame,st_sym::Symbol,remove_arr::Array{DataFrame,1},part::AbstractModelPart,kind_sym::Symbol,anyM::anyModel)
expTypeRatio_dic = Dict(:StIn => :stInToConv, :StOut => :stOutToStIn, :StSize => :sizeToStIn)
strRatio_dic = Dict(:StIn => "storage input", :StOut => "storage output", :StSize => "storage size")
strLim_dic = Dict(:Up => "upper limits", :Low => "lower limits", :Fix => "fixed limits",:Resi => "residual capacities")
ratio_sym = expTypeRatio_dic[st_sym]
if ratio_sym in tuple(keys(part.par)...)
ratioTab_df = filter(r -> r.ratio != 0, matchSetParameter(find_df,part.par[ratio_sym],anyM.sets, newCol = :ratio))
push!(remove_arr, ratioTab_df[!,Not(:ratio)])
else
ratioTab_df = filter(x -> false,find_df)
end
# writes a report, if limits (upper/lower/fixed) on the storage expansion were ignored due to these ratios provided
if !isempty(ratioTab_df) && (kind_sym == :exp || (kind_sym == :capa && part.type == :stock) )
for limPar in intersect(keys(part.par),map(x -> Symbol(kind_sym,st_sym,x),kind_sym == :exp ? [:Up,:Low,:Fix] : [:Up,:Low,:Fix,:Resi]))
lim_obj = getLimPar(anyM.parts.lim,limPar,anyM.sets[:Te], tech = t_int)
if !(isdefined(lim_obj,:name)) continue end
both_df = innerjoin(ratioTab_df,lim_obj.data, on = intersect(intCol(ratioTab_df),intCol(lim_obj.data)))
if !isempty(both_df)
push!(anyM.report,(1,:variable,:expansion,"for $(join(part.name, " < ")) $(strLim_dic[Symbol(split(string(limPar),string(st_sym))[end])]) for $(strRatio_dic[st_sym]) were ignored since an conversion/storage input ratio was provided"))
end
end
end
return remove_arr, ratioTab_df
end
# </editor-fold>
# <editor-fold desc= create technology related variables"
# XXX create expansion and capacity variables
function createExpCap!(part::AbstractModelPart,prep_dic::Dict{Symbol,NamedTuple},anyM::anyModel,ratioVar_dic::Dict{Symbol,Pair{String,String}} = Dict{Symbol,Pair{String,String}}())
for expVar in sort(collectKeys(keys(prep_dic)))
varMap_tup = prep_dic[expVar]
# create dataframe of capacity or expansion variables by creating the required capacity variables and join them with pure residual values
var_df = createVar(varMap_tup.var,string(expVar),anyM.options.bound.capa,anyM.optModel,anyM.lock,anyM.sets, scaFac = anyM.options.scaFac.capa)
if !isempty(varMap_tup.resi)
if expVar == :capaExc # flips and repeats entries for directed exchange variabes before moving on
var_df = filter(r -> r.dir,var_df) |> (x -> vcat(filter(r -> !r.dir,var_df),vcat(x,rename(x,replace(namesSym(x),:R_to => :R_from, :R_from => :R_to)))))
end
join_arr = intCol(var_df,:dir)
var_df = combine(x -> (var = x.var + x.var_1,), groupby(joinMissing(var_df,varMap_tup.resi[!,vcat(:var,join_arr...)], join_arr, :outer, Dict(:var => AffExpr(),:var_1 => AffExpr()),true),intCol(var_df,:dir)))
end
# expands table of expansion variables to superordinate timesteps and modifies expansion variable accordingly
if occursin("exp",string(expVar)) && !isempty(var_df)
noExpCol_arr = intCol(var_df)
allDf_arr = map(eachrow(var_df)) do x
l_int = length(x.Ts_disSup)
rem_df = repeat(DataFrame(x[noExpCol_arr]), inner = l_int, outer = 1)
ext_df = DataFrame(Ts_expSup = x.Ts_expSup, Ts_disSup = x.Ts_disSup, var = x.var ./ fill(l_int,l_int) )
return hcat(rem_df,ext_df)
end
var_df = vcat(allDf_arr...)
end
# check for ratios for expansion or (in case of stock technologies) capacities
if !isempty(varMap_tup.ratio)
ratioVar_sym = Symbol(replace(string(expVar),ratioVar_dic[Symbol(replace(replace(string(expVar),"exp" => ""),"capa" => ""))]))
if occursin("exp",string(expVar)) # ratios controlling expansion
noExpCol_arr = vcat(:ratio,intCol(var_df)...)
ratio_arr = map(eachrow(varMap_tup.ratio)) do x
l_int = length(x.Ts_expSup)
rem_df = repeat(DataFrame(x[noExpCol_arr]), inner = l_int, outer = 1)
ext_df = DataFrame(Ts_expSup = x.Ts_expSup)
return hcat(rem_df,ext_df)
end
preRatio_df = vcat(ratio_arr...)
else # ratios controlling stock capacities
preRatio_df = varMap_tup.ratio
end
join_arr = intCol(part.var[ratioVar_sym])
# join ratios and corresponding
select_arr = expVar in [:capaStIn,:expStIn] ? [:var,:ratio,:Ts_disSup,:C] : [:var,:ratio,:Ts_disSup]
ratio_df = select(innerjoin(preRatio_df,part.var[ratioVar_sym]; on = join_arr),unique(vcat(join_arr,select_arr)))
ratio_df[!,:var] = ratio_df[!,:var] .* ratio_df[!,:ratio]
var_df = ratio_df[!,Not(:ratio)] |> (x -> isempty(var_df) ? x : vcat(x,antijoin(select(var_df,names(x)),x, on = join_arr)))
end
if !isempty(var_df) part.var[expVar] = orderDf(var_df) end
end
end
# XXX create all dispatch variables
function createDispVar!(part::TechPart,modeDep_dic::Dict{Symbol,DataFrame},ts_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},r_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},anyM::anyModel)
# assign relevant availability parameters to each type of variable
relAva_dic = Dict(:gen => (:avaConv,), :use => (:avaConv,), :stIntIn => (:avaConv, :avaStIn), :stIntOut => (:avaConv, :avaStOut), :stExtIn => (:avaStIn,), :stExtOut => (:avaStOut,), :stLvl => (:avaStSize,))
for va in collectKeys(keys(part.carrier)) |> (x -> :capaStIn in keys(part.var) ? [:stLvl,x...] : x) # loop over all relevant kind of variables
conv_boo = va in (:gen,:use)
# obtains relevant capacity variable
if conv_boo
basis_df = copy(part.var[:capaConv])[!,Not(:var)]
basis_df[!,:C] .= [collect(getfield(part.carrier,va))]
basis_df = orderDf(flatten(basis_df,:C))
else
lock(anyM.lock)
basis_df = orderDf(copy(part.var[:capaStIn])[!,Not(:var)])
unlock(anyM.lock)
# filter carriers that are can be actively stored, although they got descendants
intC_arr = union(collect(part.actSt),map(y -> part.carrier[y],filter(x -> x in keys(part.carrier),[:stIntIn,:stIntOut])) |> (y -> isempty(y) ? Int[] : union(y...)))
basis_df = replCarLeafs(basis_df,anyM.sets[:C],noLeaf = intC_arr)
# filter entries that are already descendants of carrier being actively stored
unique(vcat(map(x -> filter(y -> x != y,getDescendants(x,anyM.sets[:C],true)),unique(basis_df[!,:C]))...)) |> (z -> filter!(x -> !(x.C in z) || x.C in intC_arr,basis_df))
end
# adds temporal and spatial level to dataframe
cToLvl_dic = Dict(x => (anyM.cInfo[x].tsDis, part.disAgg ? part.balLvl.exp[2] : anyM.cInfo[x].rDis) for x in unique(basis_df[!,:C]))
basis_df[!,:lvlTs] = map(x -> cToLvl_dic[x][1],basis_df[!,:C])
basis_df[!,:lvlR] = map(x -> cToLvl_dic[x][2],basis_df[!,:C])
allVar_df = orderDf(expandExpToDisp(basis_df,ts_dic,r_dic,true))
# add mode dependencies
modeDep_df = copy(modeDep_dic[va])
modeDep_df[!,:M] .= isempty(modeDep_df) ? [0] : [collect(part.modes)]
modeDep_df = flatten(modeDep_df,:M)
allVar_df = joinMissing(allVar_df,modeDep_df,namesSym(modeDep_dic[va]),:left,Dict(:M => 0))
# filter entries where availability is zero
for avaPar in relAva_dic[va]
if !isempty(part.par[avaPar].data) && 0.0 in part.par[avaPar].data[!,:val]
allVar_df = filter(x -> x.val != 0.0, matchSetParameter(allVar_df,part.par[avaPar],anyM.sets))[!,Not(:val)]
end
end
# computes value to scale up the global limit on dispatch variable that is provied per hour and create variable
if conv_boo
scaFac_fl = anyM.options.scaFac.dispConv
else
scaFac_fl = anyM.options.scaFac.dispSt
end
part.var[va] = orderDf(createVar(allVar_df,string(va), getUpBound(allVar_df,anyM.options.bound.disp / scaFac_fl,anyM.supTs,anyM.sets[:Ts]),anyM.optModel,anyM.lock,anyM.sets, scaFac = scaFac_fl))
end
end
# XXX create variables and constraints regarding operated variables
function createOprVarCns!(part::AbstractModelPart,cns_dic::Dict{Symbol,cnsCont},anyM::anyModel)
for capaVar in filter(x -> occursin("capa",string(x)),keys(part.var))
oprVar_sym = string(capaVar) |> (x -> Symbol(:opr,uppercase(x[1]),x[2:end]))
# XXX create operated variable
var_df = copy(part.var[capaVar])[!,Not(:var)]
var_df = createVar(var_df,string(oprVar_sym),NaN,anyM.optModel,anyM.lock,anyM.sets, scaFac = anyM.options.scaFac.oprCapa)
part.var[oprVar_sym] = orderDf(var_df)
# XXX create constraint to connect operated and installed capacity
var_df[!,:cnsExpr] = map(x -> x[1] - x[2],zip(var_df[!,:var],part.var[capaVar][!,:var]))
cns_dic[oprVar_sym] = cnsCont(select(var_df,Not(:var)),:smaller)
# XXX create constraint to prevent re-commissioning of capacity once decommissioned
if anyM.options.decomm == :decomm
# add previous period and its capacity variable to table
prevTs_dic = Dict(x => anyM.supTs.step[findall(x .== anyM.supTs.step)[1]]-1 for x in anyM.supTs.step[2:end])
select!(var_df, Not(:cnsExpr))
cns_df = rename(filter(r -> r.Ts_disSup != anyM.supTs.step[1],var_df),:var => :oprNow)
cns_df[!,:Ts_disSupPrev] = map(x -> prevTs_dic[x] ,cns_df[!,:Ts_disSup])
cns_df = rename(innerjoin(cns_df,var_df; on = intCol(var_df,:dir) |> (x -> Pair.(replace(x,:Ts_disSup => :Ts_disSupPrev),x))),:var => :oprPrev)
# add expansion variable to dataframe
if (!(:type in fieldnames(typeof(part))) || part.type != :stock) && Symbol(replace(string(capaVar),"capa" => "exp")) in keys(part.var)
exp_df = part.var[Symbol(replace(string(capaVar),"capa" => "exp"))][!,Not(:Ts_disSup)]
join_arr = filter(x -> x != :Ts_expSup,intCol(var_df))
cns_df = joinMissing(cns_df,exp_df, Pair.(join_arr,replace(join_arr,:Ts_disSup => :Ts_expSup)),:left,Dict(:var => AffExpr(),:Ts_exp => 0))
cns_df = rename(cns_df[!,Not(:Ts_exp)],:var => :expNow)
else
cns_df[!,:expNow] .= AffExpr()
end
# add residual capacities of current and previous period
joinResi_arr = filter(x -> x != :Ts_disSupPrev, intCol(cns_df,:dir))
cns_df = rename(innerjoin(cns_df,part.var[capaVar],on = joinResi_arr),:var => :resiNow)
cns_df[!,:resiNow] = getfield.(cns_df[!,:resiNow],:constant)
cns_df = rename(joinMissing(cns_df, part.var[capaVar], Pair.(replace(joinResi_arr,:Ts_disSup => :Ts_disSupPrev),joinResi_arr),:left, Dict(:resiNow => AffExpr(),:var => AffExpr())),:var => :resiPrev)
cns_df[!,:resiPrev] = getfield.(cns_df[!,:resiPrev],:constant)
# create actual constraint information
cns_df[!,:cnsExpr] = map(x -> - x.oprNow + x.oprPrev + x.expNow + (x.resiNow - x.resiPrev |> (l -> l > 0.0 ? l : 0.0)),eachrow(cns_df))
select!(cns_df,Not([:Ts_disSupPrev,:oprNow,:oprPrev,:expNow,:resiNow,:resiPrev]))
cns_dic[string(oprVar_sym) |> (x -> Symbol(:re,uppercase(x[1]),x[2:end]))] = cnsCont(orderDf(cns_df),:greater)
end
end
end
# XXX capacity values for stock technologies
function addResidualCapa!(prepTech_dic::Dict{Symbol,NamedTuple},part::TechPart,tInt::Int,anyM::anyModel)
carGrp_ntup = part.carrier
stCar_arr = unique(vcat(collect.(map(x -> getproperty(carGrp_ntup,x),intersect(keys(carGrp_ntup),(:stExtIn,:stExtOut,:stIntIn,:stIntOut))))...))
defPar_tup = tuple(keys(part.par)...)
# checks conversion capacities for stock data
if !isempty(intersect((:gen,:use),keys(carGrp_ntup)))
permutDim_arr = [getindex.(vcat(collect(Iterators.product(getfield.(getNodesLvl(anyM.sets[:R], part.balLvl.exp[2]),:idx), anyM.supTs.step))...),i) for i in (1,2)]
potCapa_df = DataFrame(Ts_expSup = fill(0,length(permutDim_arr[1])), Ts_disSup = permutDim_arr[2], R_exp = permutDim_arr[1], Te = fill(tInt,length(permutDim_arr[1])))
# filters stock technologies with residual values provided and saves these in dictionary
capaResi_df = checkResiCapa(:capaConv,potCapa_df, part, anyM)
if !isempty(capaResi_df)
mergePrepDic!(:capaConv,prepTech_dic,capaResi_df)
end
end
# checks storage capacities for stock data
if !isempty(stCar_arr)
permutDim_arr = [getindex.(vcat(collect(Iterators.product(getfield.(getNodesLvl(anyM.sets[:R], part.balLvl.exp[2]),:idx), anyM.supTs.step,stCar_arr))...),i) for i in (1,2,3)]
potCapa_df = DataFrame(Ts_expSup = fill(0,length(permutDim_arr[1])), Ts_disSup = permutDim_arr[2], R_exp = permutDim_arr[1], C = permutDim_arr[3], Te = fill(tInt,length(permutDim_arr[1])))
# loops over different type of storage
for st in (:StIn, :StOut, :StSize)
capaResi_df = checkResiCapa(Symbol(:capa,st),potCapa_df, part, anyM)
capa_sym = Symbol(:capa,st)
remove_arr, ratioTab_df = findStorageRatio(tInt,potCapa_df,st, Array{DataFrame,1}(), part, :capa, anyM)
if !isempty(capaResi_df) capaResi_df = removeEntries(remove_arr,capaResi_df) end
if !(isempty(capaResi_df) && isempty(ratioTab_df))
mergePrepDic!(capa_sym,prepTech_dic,capaResi_df,ratioTab_df)
end
end
end
end
# </editor-fold>
# <editor-fold desc= create technology related constraints"
# XXX create conversion balance
function createConvBal(part::TechPart,anyM::anyModel)
cns_df = rename(part.par[:effConv].data,:val => :eff)
sort!(cns_df,sort(intCol(cns_df)))
agg_arr = filter(x -> !(x in (:M, :Te)) && (part.type == :emerging || x != :Ts_expSup), intCol(cns_df))
# defines tuple specificing dimension of aggregation later
if part.type == :emerging
srcRes_ntup = part.balLvl |> (x -> (Ts_expSup = anyM.supTs.lvl, Ts_dis = x.ref[1], R_dis = x.ref[2]))
else
srcRes_ntup = part.balLvl |> (x -> (Ts_dis = x.ref[1], R_dis = x.ref[2]))
end
# if modes are specified, gets rows of conversion dataframe where they are relevant and creates different tuples to define grouping dimensions
if :M in namesSym(cns_df)
srcResM_ntup = (; zip(tuple(:M,keys(srcRes_ntup)...),tuple(1,values(srcRes_ntup)...))...)
srcResNoM_ntup = (; zip(tuple(:M,keys(srcRes_ntup)...),tuple(0,values(srcRes_ntup)...))...)
m_arr = findall(0 .!= cns_df[!,:M])
noM_arr = setdiff(1:size(cns_df,1),m_arr)
end
# add variables via aggregation
in_arr = intersect(keys(part.carrier),(:use,:stIntOut))
out_arr = intersect(keys(part.carrier),(:gen,:stIntIn))
for va in union(in_arr,out_arr)
if :M in namesSym(cns_df) # aggregated dispatch variables, if a mode is specified somewhere, mode dependant and non-mode dependant balances have to be aggregated separately
cns_df[!,va] .= AffExpr()
cns_df[m_arr,va] = aggUniVar(part.var[va], select(cns_df[m_arr,:],intCol(cns_df)), [:M,agg_arr...], srcResM_ntup, anyM.sets)
cns_df[noM_arr,va] = aggUniVar(part.var[va], select(cns_df[noM_arr,:],intCol(cns_df)), [:M,agg_arr...], srcResNoM_ntup, anyM.sets)
else
cns_df[!,va] = aggUniVar(part.var[va], select(cns_df,intCol(cns_df)), agg_arr, srcRes_ntup, anyM.sets)
end
end
# aggregate in and out variables respectively
cns_df[!,:in] = map(x -> sum(x),eachrow(cns_df[!,in_arr]))
cns_df[!,:out] = map(x -> sum(x),eachrow(cns_df[!,out_arr]))
select(cns_df,Not(vcat(in_arr,out_arr)))
# create actual constraint
cns_df[!,:cnsExpr] = map(x -> x.in*x.eff - x.out,eachrow(cns_df))
return cnsCont(orderDf(cns_df[!,[intCol(cns_df)...,:cnsExpr]]),:equal)
end
# XXX create storage balance
function createStBal(part::TechPart,anyM::anyModel)
# XXX get variables for storage level
# get variables for current storage level
cns_df = rename(part.var[:stLvl],:var => :stLvl)
cnsDim_arr = filter(x -> x != :Ts_disSup, intCol(cns_df))
# join variables for previous storage level
tsChildren_dic = Dict((x,y) => getDescendants(x,anyM.sets[:Ts],false,y) for x in anyM.supTs.step, y in unique(map(x -> getfield(anyM.sets[:Ts].nodes[x],:lvl), cns_df[!,:Ts_dis])))
firstLastTs_dic = Dict(minimum(tsChildren_dic[z]) => maximum(tsChildren_dic[z]) for z in keys(tsChildren_dic))
firstTs_arr = collect(keys(firstLastTs_dic))
cns_df[!,:Ts_disPrev] = map(x -> x in firstTs_arr ? firstLastTs_dic[x] : x - 1, cns_df[!,:Ts_dis])
cns_df = rename(joinMissing(cns_df,part.var[:stLvl], intCol(part.var[:stLvl]) |> (x -> Pair.(replace(x,:Ts_dis => :Ts_disPrev),x)), :left, Dict(:var => AffExpr())),:var => :stLvlPrev)
# determines dimensions for aggregating dispatch variables
agg_arr = filter(x -> !(x in (:M, :Te)) && (part.type == :emerging || x != :Ts_expSup), cnsDim_arr)
# obtain all different carriers of level variable and create array to store the respective level constraint data
uniC_arr = unique(cns_df[!,:C])
cCns_arr = Array{DataFrame}(undef,length(uniC_arr))
for (idx,c) in enumerate(uniC_arr)
# get constraints relevant for carrier and find rows where mode is specified
cnsC_df = filter(x -> x.C == c,cns_df)
sort!(cnsC_df,sort(intCol(cnsC_df)))
m_arr = findall(0 .!= cnsC_df[!,:M])
noM_arr = setdiff(1:size(cnsC_df,1),m_arr)
if part.type == :emerging
srcRes_ntup = anyM.cInfo[c] |> (x -> (Ts_expSup = anyM.supTs.lvl, Ts_dis = x.tsDis, R_dis = x.rDis, C = anyM.sets[:C].nodes[c].lvl, M = 1))
else
srcRes_ntup = anyM.cInfo[c] |> (x -> (Ts_dis = x.tsDis, R_dis = x.rDis, C = anyM.sets[:C].nodes[c].lvl, M = 1))
end
# XXX join in and out dispatch variables and adds efficiency to them (hence efficiency can be specific for different carriers that are stored in and out)
for typ in (:in,:out)
typVar_df = copy(cns_df[!,cnsDim_arr])
# create array of all dispatch variables
allType_arr = intersect(keys(part.carrier),typ == :in ? (:stExtIn,:stIntIn) : (:stExtOut,:stIntOut))
effPar_sym = typ == :in ? :effStIn : :effStOut
# adds dispatch variables
typExpr_arr = map(allType_arr) do va
typVar_df = filter(x -> x.C == c,part.par[effPar_sym].data) |> (x -> innerjoin(part.var[va],x; on = intCol(x)))
if typ == :in
typVar_df[!,:var] = typVar_df[!,:var] .* typVar_df[!,:val]
else
typVar_df[!,:var] = typVar_df[!,:var] ./ typVar_df[!,:val]
end
return typVar_df[!,Not(:val)]
end
# adds dispatch variable to constraint dataframe, mode dependant and non-mode dependant balances have to be aggregated separately
dispVar_df = vcat(typExpr_arr...)
cnsC_df[!,typ] .= AffExpr()
if isempty(dispVar_df) continue end
cnsC_df[m_arr,typ] = aggUniVar(dispVar_df, select(cnsC_df[m_arr,:],intCol(cnsC_df)), [:M,agg_arr...], (M = 1,), anyM.sets)
cnsC_df[noM_arr,typ] = aggUniVar(dispVar_df, select(cnsC_df[noM_arr,:],intCol(cnsC_df)), [:M,agg_arr...], (M = 0,), anyM.sets)
end
# XXX adds further parameters that depend on the carrier specified in storage level (superordinate or the same as dispatch carriers)
sca_arr = getResize(cnsC_df,anyM.sets[:Ts],anyM.supTs)
# add discharge parameter, if defined
if :stDis in keys(part.par)
part.par[:stDis].defVal = 0.0
cnsC_df = matchSetParameter(cnsC_df,part.par[:stDis],anyM.sets)
cnsC_df[!,:stDis] = (1 .- cnsC_df[!,:val]) .^ sca_arr
select!(cnsC_df,Not(:val))
else
cnsC_df[!,:stDis] .= 1.0
end
# add inflow parameter, if defined
if :stInflow in keys(part.par)
part.par[:stInflow].defVal = 0.0
cnsC_df = matchSetParameter(cnsC_df,part.par[:stInflow],anyM.sets, newCol = :stInflow)
if !isempty(part.modes)
cnsC_df[!,:stInflow] = cnsC_df[!,:stInflow] ./ length(part.modes) .* sca_arr
end
else
cnsC_df[!,:stInflow] .= 0.0
end
# XXX create final equation
cnsC_df[!,:cnsExpr] = map(x -> x.stLvlPrev * x.stDis + x.stInflow + x.in - x.out - x.stLvl,eachrow(cnsC_df))
cCns_arr[idx] = cnsC_df
end
cns_df = vcat(cCns_arr...)
return cnsCont(orderDf(cns_df[!,[cnsDim_arr...,:cnsExpr]]),:equal)
end
# XXX create all capacity restrictions for technology
function createCapaRestr!(part::TechPart,ts_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},r_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}},cns_dic::Dict{Symbol,cnsCont},anyM::anyModel)
cnstrType_dic = Dict(:out => (dis = (:gen, :stIntIn), capa = :Conv), :in => (dis = (:use,:stIntOut), capa = :Conv),
:stIn => (dis = (:stExtIn, :stIntIn), capa = :StIn), :stOut => (dis = (:stExtOut, :stIntOut), capa = :StOut), :stSize => (dis = (:stLvl,), capa = :StSize))
capa_sym = anyM.options.decomm != :none ? :oprCapa : :capa
capaRestr_gdf = groupby(part.capaRestr,:cnstrType)
# loop over groups of capacity restrictions (like out, stIn, ...)
for restrGrp in capaRestr_gdf
# relevant capacity variables
type_sym = Symbol(restrGrp.cnstrType[1])
info_ntup = cnstrType_dic[type_sym]
allCns_arr = Array{DataFrame}(undef,size(restrGrp,1))
# loop over indiviudal constraints
for (idx,restr) in enumerate(eachrow(restrGrp))
allCns_arr[idx] = createRestr(part,copy(part.var[Symbol(capa_sym,info_ntup.capa)]),restr,type_sym,info_ntup,ts_dic,r_dic,anyM.sets,anyM.supTs)
end
allCns_df = vcat(allCns_arr...)
# add all constraints to part
allCns_df[!,:cnsExpr] = map(x -> x.disp - x.capa,eachrow(allCns_df))
cns_dic[Symbol(type_sym,:Restr)] = cnsCont(orderDf(allCns_df[!,[intCol(allCns_df)...,:cnsExpr]]),:smaller)
end
end
# XXX sub-function to create restriction
function createRestr(part::TechPart, capaVar_df::DataFrame, restr::DataFrameRow, type_sym::Symbol, info_ntup::NamedTuple,
ts_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}}, r_dic::Dict{Tuple{Int64,Int64},Array{Int64,1}}, sets_dic::Dict{Symbol,Tree}, supTs_ntup::NamedTuple)
conv_boo = type_sym in (:out,:in)
dim_arr = conv_boo ? [:Ts_expSup,:Ts_dis,:R_dis,:Te] : [:Ts_expSup,:Ts_dis,:R_dis,:C,:Te]
agg_arr = [:Ts_expSup,:Ts_dis,:R_dis] |> (x -> filter(x -> part.type == :emerging || x != :Ts_expSup,x))
# get relevant carriers for conversion and storage variables
relConv_arr = restr.car
intC_arr = union(collect(part.actSt),map(y -> part.carrier[y],filter(x -> x in keys(part.carrier),[:stIntIn,:stIntOut])) |> (y -> isempty(y) ? Int[] : union(y...)))
relSt_arr = filter(y -> isempty(sets_dic[:C].nodes[y].down) || y in intC_arr, [restr.car[1],getDescendants(restr.car[1],sets_dic[:C],true)...])
# determines dimensions for aggregating dispatch variables
capaVar_df[!,:lvlTs] .= restr.lvlTs
capaVar_df[!,:lvlR] .= restr.lvlR
# resize capacity variables (expect for stSize since these are already provided in energy units)
if type_sym != :stSize
capaVar_df[!,:var] = capaVar_df[!,:var] .* map(x -> supTs_ntup.sca[(x.Ts_disSup,x.lvlTs)], eachrow(capaVar_df[!,[:Ts_disSup,:lvlTs]]))
end
# replaces expansion with dispatch regions and aggregates capacity variables accordingy if required
grpCapaVar_df = copy(select(capaVar_df,Not(:var))) |> (y -> unique(combine(x -> (R_dis = r_dic[(x.R_exp[1],x.lvlR[1])],),groupby(y,namesSym(y)))[!,Not([:R_exp,:lvlR])]))
resExp_ntup = :Ts_expSup in agg_arr ? (Ts_expSup = part.balLvl.exp[1], Ts_disSup = supTs_ntup.lvl, R_dis = restr.lvlR, scr = 1) : (Ts_disSup = supTs_ntup.lvl, R_dis = restr.lvlR, scr = 1)
sort!(grpCapaVar_df,sort(intCol(grpCapaVar_df)))
grpCapaVar_df[!,:var] = aggUniVar(rename(capaVar_df,:R_exp => :R_dis),grpCapaVar_df,replace(agg_arr,:Ts_dis => :Ts_disSup),resExp_ntup,sets_dic)
# expand capacity to dimension of dispatch
capaDim_df = combine(x -> (Ts_dis = ts_dic[(x.Ts_disSup[1],x.lvlTs[1])],), groupby(grpCapaVar_df[!,Not(:var)],namesSym(grpCapaVar_df[!,Not(:var)])))[!,Not(:lvlTs)]
sort!(capaDim_df,sort(intCol(capaDim_df)))
select!(grpCapaVar_df,Not(:lvlTs))
# obtain all relevant dispatch variables
dispVar_arr = type_sym != :stSize ? intersect(keys(part.carrier),info_ntup.dis) : collect(info_ntup.dis)
resDis_ntup = :Ts_expSup in agg_arr ? (Ts_expSup = part.balLvl.exp[1], Ts_dis = restr.lvlTs, R_dis = restr.lvlR) : (Ts_dis = restr.lvlTs, R_dis = restr.lvlR)
for va in dispVar_arr
# filter dispatch variables not belonging to relevant carrier
if va in (:gen,:use)
relC_arr = relConv_arr
else
relC_arr = relSt_arr
end
allVar_df = filter(r -> r.C in relC_arr, part.var[va])[!,Not(:Ts_disSup)]
# get availablity (and in case of paramter of type out also efficiency since capacities refer to input capacity) parameter and add to dispatch variable
ava_arr = matchSetParameter(allVar_df,part.par[Symbol(:ava,info_ntup.capa)],sets_dic, newCol = :ava)[!,:ava]
if type_sym in (:out,:stOut)
ava_arr = matchSetParameter(allVar_df,part.par[type_sym == :out ? :effConv : :effStOut],sets_dic,newCol = :eff)[!,:eff] .* ava_arr
end
allVar_df[!,:var] = allVar_df[!,:var] .* 1 ./ ava_arr
# aggregate dispatch variables
capaDim_df[!,va] = aggUniVar(allVar_df, select(capaDim_df,intCol(capaDim_df)), agg_arr, resDis_ntup, sets_dic)
end
# sum dispatch variables and filter cases without any
capaDim_df[!,:disp] = map(x -> sum(x),eachrow(capaDim_df[!,dispVar_arr]))
select!(capaDim_df,Not(dispVar_arr))
capaDim_df = filter(x -> !(x.disp == AffExpr()),capaDim_df)
# join capacity and dispatch variables to create final constraint
grpCapaVar_df = combine(groupby(grpCapaVar_df,replace(dim_arr,:Ts_dis => :Ts_disSup)), :var => (x -> sum(x)) => :capa)
cns_df = innerjoin(capaDim_df,grpCapaVar_df,on = intCol(grpCapaVar_df))
return cns_df
end
# XXX create ratio constraints (Fix, Low and Up for use and gen)
function createRatioCns!(part::TechPart,cns_dic::Dict{Symbol,cnsCont},anyM::anyModel)
# collects all tables for equations
for type in intersect(keys(part.carrier),(:use, :gen)), limit in (:Up, :Low, :Fix)
typeUp_sym = string(type) |> (x -> Symbol(uppercase(x[1]),x[2:end]))
ratioName_sym = Symbol(:ratioEner,typeUp_sym,limit)
if !(ratioName_sym in keys(part.par)) continue end
# obtain variable name and parameter data
cns_df = rename(copy(part.par[ratioName_sym].data),:val => :ratio)
sort!(cns_df,sort(intCol(cns_df)))
# joins parameter data with ratio controlled variable and all variables
agg_arr = filter(r -> r != :Te && (part.type == :emerging || r != :Ts_expSup), intCol(cns_df))
if part.type == :emerging
srcRes_ntup = (anyM.sets[:Ts].nodes[cns_df[1,:Ts_dis]].lvl, anyM.sets[:R].nodes[cns_df[1,:R_dis]].lvl) |> (x -> (Ts_expSup = anyM.supTs.lvl, Ts_dis = x[1], R_dis = x[2]))
else
srcRes_ntup = (anyM.sets[:Ts].nodes[cns_df[1,:Ts_dis]].lvl, anyM.sets[:R].nodes[cns_df[1,:R_dis]].lvl) |> (x -> (Ts_dis = x[1], R_dis = x[2]))
end
if :M in namesSym(cns_df) # aggregates dispatch variables, if a mode is specified somewhere, mode dependant and non-mode dependant balances have to be aggregated separately
# find cases where ratio constraint is mode dependant
srcResM_ntup = (; zip(tuple(:M,keys(srcRes_ntup)...),tuple(1,values(srcRes_ntup)...))...)
srcResNoM_ntup = (; zip(tuple(:M,keys(srcRes_ntup)...),tuple(0,values(srcRes_ntup)...))...)
m_arr = findall(0 .!= cns_df[!,:M])
noM_arr = setdiff(1:size(cns_df,1),m_arr)
# aggregate variables with defined ratio
cns_df[!,:ratioVar] .= AffExpr()
cns_df[m_arr,:ratioVar] = aggUniVar(part.var[type], select(cns_df[m_arr,:],intCol(cns_df)), agg_arr, srcResM_ntup, anyM.sets)
cns_df[noM_arr,:ratioVar] = aggUniVar(part.var[type], select(cns_df[noM_arr,:],intCol(cns_df)), agg_arr, srcResNoM_ntup, anyM.sets)
# aggregate all variables
cns_df[!,:allVar] .= AffExpr()
cns_df[m_arr,:allVar] = aggUniVar(part.var[type], select(cns_df[m_arr,:],intCol(cns_df)), filter(x -> x != :C,agg_arr), srcResM_ntup, anyM.sets)
cns_df[noM_arr,:allVar] = aggUniVar(part.var[type], select(cns_df[noM_arr,:],intCol(cns_df)), filter(x -> x != :C,agg_arr), srcResNoM_ntup, anyM.sets)
else
cns_df[!,:ratioVar] = aggUniVar(part.var[type], select(cns_df,intCol(cns_df)), agg_arr, srcRes_ntup, anyM.sets)
cns_df[!,:allVar] = aggUniVar(part.var[type], select(cns_df,intCol(cns_df)), filter(x -> x != :C,agg_arr), srcRes_ntup, anyM.sets)
end
# create corresponding constraint
if occursin("Fix",string(limit))
sign_sym = :equal
elseif occursin("Low",string(limit))
sign_sym = :greater
else
sign_sym = :smaller
end
cns_df[!,:cnsExpr] = map(x -> x.ratioVar - x.allVar * x.ratio, eachrow(cns_df))
cns_dic[ratioName_sym] = cnsCont(orderDf(cns_df[!,[intCol(cns_df)...,:cnsExpr]]),sign_sym)
end
end
# </editor-fold>
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | code | 1337 |
using AnyMOD, Cbc, Test
@testset "run_rest" begin
# create model
anyM = anyModel("testModel","testModel", objName = "test", shortExp = 10, checkRng = 1e8)
createOptModel!(anyM)
setObjective!(:costs,anyM)
# solve model
set_optimizer(anyM.optModel,Cbc.Optimizer)
optimize!(anyM.optModel)
# write data reports
reportResults(:summary,anyM, rtnOpt = (:raw,:rawDf,:csv,:csvDf))
reportResults(:exchange,anyM, rtnOpt = (:raw,:rawDf,:csv,:csvDf))
reportResults(:costs,anyM, rtnOpt = (:raw,:rawDf,:csv,:csvDf))
reportTimeSeries(:electricity, anyM, rtnOpt = (:raw,:rawDf,:csv,:csvDf))
reportTimeSeries(:electricity, anyM, rtnOpt = (:raw,:rawDf,:csv,:csvDf), mergeVar = false)
# create plots
plotEnergyFlow(:sankey,anyM, rmvNode = ("electricity; export","import"))
plotTree(:region,anyM)
plotEnergyFlow(:graph,anyM)
moveNode!(anyM,("coal",[0.1,0.1]))
@test length(anyM.report) == 33
@test round(objective_value(anyM.optModel),digits = 1) == 137566.6
# create additional models with several errors
anyM = anyModel(["testModel","errorTest"],"testModel", objName = "test", shortExp = 10, checkRng = 1e8)
err = false
try
createOptModel!(anyM)
catch
err = true
end
@test err
@test length(anyM.report) == 46
end | AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 2043 | [](https://travis-ci.org/leonardgoeke/AnyMOD.jl)
[](https://codecov.io/gh/leonardgoeke/AnyMOD.jl)
[](https://gitter.im/AnyMOD-jl/community)
[](https://opensource.org/licenses/MIT)
<img src="docs/src/assets/schriftzug_plus_logo.png" alt="logo" width="950px"/>
[AnyMOD.jl](https://github.com/leonardgoeke/AnyMOD.jl) is a [Julia](https://julialang.org/) framework for creating large scale energy system models with multiple periods of capacity expansion formulated as linear optimization problems. It was developed to address the challenges in modelling high-levels of intermittent generation and sectoral integration. A comprehensive description of the framework's graph based methodology can found in the working paper [Göke (2020), AnyMOD - A graph-based framework for energy system modelling with high levels of renewables and sector integration](https://arxiv.org/abs/2004.10184). The software itself is seperately introduced in [Göke (2020), AnyMOD.jl: A Julia package for creating energy system models](https://arxiv.org/abs/2011.00895).
Any questions, suggestions, feature requests, or contributions are welcome. To get in touch open an [issue](https://github.com/leonardgoeke/AnyMOD.jl/issues) or use the [chat](https://gitter.im/AnyMOD-jl/community).
## Documentation
- [**STABLE**](https://leonardgoeke.github.io/AnyMOD.jl/stable/) — **last thoroughly tested and fully documented version**
- [**DEV**](https://leonardgoeke.github.io/AnyMOD.jl/dev/) — *in-development version of the tool*
## Acknowledgement
The development of AnyMOD is receiving funding from the European Union’s Horizon 2020 research and innovation programme within the [OSMOSE project](https://www.osmose-h2020.eu/) under grant agreement No 773406.
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 333 | ```@meta
CurrentModule = AnyMOD
```
# API
```@index
Pages = ["api.md"]
Modules = [AnyMOD]
```
## Functions
```@docs
createOptModel!
setObjective!
reportResults
reportTimeSeries
printObject
printDuals
printIIS
plotTree
plotEnergyFlow
moveNode!
```
## Types
```@docs
anyModel
TechPart
OthPart
Node
Tree
ParElement
graInfo
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 31772 | ```@raw html
<style>
table.tabelle td {
border-left: 1px solid;
border-color: #dbdbdb;
}
table.tabelle td:first-child {
border-right: 2.5px solid;
border-color: #dbdbdb;
border-left: none;
}
ul.liste {
list-style-position: inside;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
</style>
```
# Constraints
```@raw html
<p class="norm">
In the following, all constraints used in AnyMOD are listed. Information includes the name used throughout the model, its dimensions, and a stylized formulation of the constraint itself (see <a href="https://arxiv.org/abs/2004.10184)">Göke (2020)</a> for details). Within these constraints, variables are printed in bold. In addition, the parameters, variables, and the model part associated with the constraints are listed.
</p>
<p class="norm">
To increase performance, AnyMOD stores constraints within DataFrames instead of using JuMPs native containers. Each dimension is represented by a column and integers in these columns relate to nodes within the hierarchical trees of sets (see <a href="../data/#printObject"><code>printObject</code></a> on how to export these in a readable format). An additional <code>cns</code> column stores the corresponding constraint. Note that final constraints will look unintuitive, because they are <a href="../performance/#Scaling">scaled</a> to increase performance and converted to standard form by JuMP.
</p>
<p class="norm">
New constraints beyond those listed here can freely be added to a model by using standard JuMP commands.
</p>
```
# Balances
### Energy balance
Constraints supply to at least exceed demand.
The energy balance can alternatively be enforced as an equality constraint (see [Carriers](@ref) for details). Instead of having a fixed name the constraint is always assigned the name of the carrier being balanced.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td><em>carrier name</em></td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $R_{dis}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td><ul class="liste">
<li>dispatch resolution of carrier</li>
</ul></td>
</tr>
<tr>
<td rowspan = "2"><strong>formulation</strong></td>
<td style="text-align:left;border-bottom:none;padding-bottom:0px">$ \scriptstyle dem \, + \, \sum\bm{use} \, + \, \sum\bm{stExt_{in}} \, + \, \sum \bm{exc_{exp}}\, + \, \sum\bm{trd_{sell}} \, + \, \bm{lss} \, \leq \, $</td>
</tr>
<tr>
<td style="text-align:right;border-right:none;border-top:none;padding-top:0px">$ \scriptstyle \; \; \; \; \sum\bm{gen} \, + \, \sum\bm{stExt_{out}} \, + \, \sum (1-lossExc) \bm{exc_{imp}} \, + \, \sum\bm{trd_{buy}} \, + \, \bm{crt} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td><ul class="liste">
<li><a href="../parameter_list/#Demand-1">demand</a></li>
<li><a href="../parameter_list/#Exchange-losses-1">exchange losses</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td><ul class="liste">
<li><a href="../variables/#Generation-and-use-1">generation and use</a></li>
<li><a href="../variables/#Charging-and-discharging-1">charging and discharging</a></li>
<li><a href="../variables/#Exchange-1">exchange</a></li>
<li><a href="../variables/#Buy-and-sell-1">buy and sell</a></li>
<li><a href="../variables/#Curtailment-and-loss-of-load-1">curtailment and loss-of-load</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Balance-1">balance</a></td>
</tr>
</tbody>
</table>
```
### Conversion balance
Controls the ratio between used and generated quantities.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>convBal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $Te$, ($M$)</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td><ul class="liste">
<li>reference resolution of technology</li>
<li>expansion time-steps of technology</li>
<li>relevant modes of technology</li>
</ul></td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td>$ \scriptstyle eff_{conv} (\sum\bm{use} \, + \, \sum\bm{stInt_{out}}) \, = \, \sum\bm{gen} \, + \, \sum\bm{stInt_{in}} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td><ul class="liste">
<li><a href="../parameter_list/#Efficiency-1">conversion efficiency</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td><ul class="liste">
<li><a href="../variables/#Generation-and-use-1">generation and use</a></li>
<li><a href="../variables/#Charging-and-discharging-1">charging and discharging</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Storage Balance
Ensures storage levels comply with charged and discharged quantities.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>stBal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $Te$, $C$, ($M$)</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td><ul class="liste">
<li>dispatch resolution of carrier</li>
<li>expansion time-steps of technology</li>
<li>relevant modes of technology</li>
</ul></td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td>$ \scriptstyle \bm{stLvl_{t}} \, = \, \frac{\bm{stLvl_{t-1}}}{1\,-\,stDis} \, + \, stInflow \, + \, \frac{\bm{stExt_{in}} \, + \, \bm{stInt_{in}}}{1/effSt_{in}} \, - \, \frac{\bm{stExt_{out}} \, + \, \bm{stInt_{out}}}{effSt_{out}} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td><ul class="liste">
<li><a href="../parameter_list/#Efficiency-1">storage efficiencies</a></li>
<li><a href="../parameter_list/#Storage-self-discharge-1">storage self-discharge</a></li>
<li><a href="../parameter_list/#Storage-inflow-1">storage inflow</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td><ul class="liste">
<li><a href="../variables/#Charging-and-discharging-1">charging and discharging</a></li>
<li><a href="../variables/#Storage-level-1">storage level</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
# Dispatch restrictions
### Conversion capacity restriction
Ensures quantities converted comply with the operated conversion capacity.
```@raw html
<p class="norm">
The graph-based approach that allows to vary resolution by energy carrier within the same model complicates the formulation of capacity constraints for conversion technologies. What kind of constraints are necessary is specified in the <code>capaRestr</code> of the technology <a href="../api/#AnyMOD.TechPart"><code>part object</code></a> (see <a href="https://arxiv.org/abs/2004.10184)">Göke (2020)</a> on how this is derived). Capacity constraints are either enforced on used or generated quantities. In the latter case, quantities have to be corrected for the respective efficiency since AnyMOD always denotes capacities <u>after efficiency</u>.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>inRestr</td>
<td>outRestr</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $Te$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:center"><ul class="liste">
<li>required capacity restrictions according to <code>capaRestr</code> field of part</li>
</ul></td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td>$ \scriptstyle \frac{\bm{use} \, + \, \bm{stInt_{out}}}{ava_{conv}} \, \leq \, \bm{oprCapa_{conv}} $</td>
<td>$ \scriptstyle \frac{\bm{gen} \, + \, \bm{stInt_{in}}}{eff_{conv} \, ava_{conv}} \, \leq \, \bm{oprCapa_{conv}} $</td>
</tr>
<tr>
<td rowspan="2"><strong>parameter</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ul class="liste">
<li><a href="../parameter_list/#Availability-1">conversion availability</a></li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ul class="liste">
<li><a href="../parameter_list/#Efficiency-1">conversion efficiency</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="2"; style="text-align:left;padding-left:10.25em"><ul class="liste">
<li><a href="../variables/#Generation-and-use-1">generation and use</a></li>
<li><a href="../variables/#Charging-and-discharging-1">charging and discharging</a></li>
<li><a href="../variables/#Operated-capacity-1">operated conversion capacity</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Storage capacity restriction
Ensures quantities stored comply with the operated storage-input, storage-output, storage-size capacity.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>stInRestr</td>
<td>stOutRestr</td>
<td>stSizeRestr</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="3"; style="text-align:center">$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $Te$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="3"; style="text-align:center"><ul class="liste">
<li>dispatch resolution of carrier</li>
<li>expansion time-steps of technology</li>
</ul></td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td>$ \scriptstyle \frac{\bm{stExt_{in}} \, + \, \bm{stInt_{in}}}{ava_{stIn}} \, \leq \, \bm{oprCapa_{stIn}} $</td>
<td>$ \scriptstyle \frac{\bm{stExt_{out}} \, + \, \bm{stInt_{out}}}{ava_{stOut}} \, \leq \, \bm{oprCapa_{stOut}} $</td>
<td>$ \scriptstyle \frac{\bm{stLvl}}{ava_{stOut}} \, \leq \, \bm{oprCapa_{stSize}} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="3"; style="text-align:center"><ul class="liste">
<li><a href="../parameter_list/#Availability-1">storage availability</a></li>
</ul></td>
</tr>
<tr>
<td rowspan="2"><strong>variables</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ul class="liste">
<li><a href="../variables/#Operated-capacity-1">operated storage capacity</a></li>
</ul>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">
<ul class="liste">
<li><a href="../variables/#Charging-and-discharging-1">charging and discharging</a></li>
</ul>
</td>
<td colspan="1"; style="text-align:left;padding-top:0px">
<ul class="liste">
<li><a href="../variables/#Storage-level-1">storage level</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Energy ratio restriction
Ensures used and generated quantities comply with the specified ratios.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>ratioEnerUse{Fix/Low/Up}</td>
<td>ratioEnerGen{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $Te$, $C$, ($M$)</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:center"><ul class="liste">
<li><a href="../parameter_list/#Ratios-of-carrier-use-and-generation-1">ratios</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td>$ \scriptstyle \bm{use} \; \begin{smallmatrix} = \\[0.5pt] \geq \\[2pt] \leq \end{smallmatrix} \; ratioEner^{use}_{fix/low/up} \sum \bm{use} $</td>
<td>$ \scriptstyle \bm{gen} \; \begin{smallmatrix} = \\[0.5pt] \geq \\[2pt] \leq \end{smallmatrix} \; ratioEner^{gen}_{fix/low/up} \sum \bm{gen} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../variables/#Ratios-of-carrier-use-and-generation">generation and use ratios</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="2"; style="text-align:left;padding-left:8.25em">
<ul class="liste">
<li><a href="../variables/#Generation-and-use-1">generation and use</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Exchange capacity restriction
Ensures exchanged quantities comply with the operated exchange capacity.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>excRestr</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td><ul class="liste">
<li>dispatch resolution of carrier and descendant carriers</li>
<li>regions can exchange carrier</li>
</ul></td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td>$ \scriptstyle \sum \frac{\bm{exc}}{ava_{exc}} \, \leq \, \bm{oprCapa_{exc}} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td><ul class="liste">
<li><a href="../parameter_list/#Exchange-availability-1">exchange availablity</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td><ul class="liste">
<li><a href="../variables/#Exchange-1">exchange</a></li>
<li><a href="../variables/#Operated-capacity-1">operated exchange capacity</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Trade capacity restriction
Ensures traded quantities comply with the specified trade capacity.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>trdBuyCap</td>
<td>trdSellCap</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{dis}$, $R_{dis}$, $C$, $id$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:center"><ul class="liste">
<li><a href="../parameter_list/#Trade-price-1">trade price</a> defined</li>
<li><a href="../parameter_list/#Trade-capacity-1">trade capacity</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td>$ \scriptstyle \bm{trdBuy} \, \leq \, trdBuyCap $</td>
<td>$ \scriptstyle \bm{trdSell} \, \leq \, trdSellCap $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="2"; style="text-align:center"><ul class="liste">
<li><a href="../parameter_list/#Trade-capacity-1">trade capacity</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="2"; style="text-align:left;padding-left:6.4em"><ul class="liste">
<li><a href="../variables/#Buy-and-sell-1">buy and sell</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"><a href="../parts/#Trade-1">trade</a></td>
</tr>
</tbody>
</table>
```
# Capacity expansion
### Definition of installed capacity
```@raw html
<p class="norm">
Connects installed capacities to expansion variables and residual capacities.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>capaConv</td>
<td><nobr>capaSt{In/Out/Size}</nobr></td>
<td>capaExc</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td rowspan="3"><strong>instance</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0px">
<ul class="liste">
<li>superordinate dispatch resolution (usually years)</li>
</ul>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ul class="liste">
<li>spatial expansion resolution of technology</li>
</ul>
</td>
<td colspan="1"; rowspan="2"; style="text-align:center;border-right:none;padding-top:0px;padding-right:0.0px">
<ul class="liste">
<li>spatial expansion resolution of carrier</li>
<li>regions can exchange carrier</li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;padding-top:0px">
<ul class="liste"; start="4">
<li>if uses or generates carriers</li>
</ul>
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ul class="liste"; start="4">
<li>each stored carrier</li>
</ul>
</td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td colspan="3"; style="text-align:center">$ \scriptstyle \bm{capa_{t}} \, = \, resiCapa_{t} \, + \, \sum\limits_{t' \in (t \, + \, del, t \, +\,del \, + \, life]} \bm{exp_{t'}} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="3"; style="text-align:left;padding-left:18.5em"><ul class="liste">
<li><a href="../parameter_list/#Residual-capacities-1">residual capacity</a></li>
<li><a href="../parameter_list/#Technical-lifetime-1">technical lifetime</a></li>
<li><a href="../parameter_list/#Construction-time-1">construction time</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="3"; style="text-align:left;padding-left:18.5em"><ul class="liste">
<li><a href="../variables/#Installed-capacity-1">installed capacity</a></li>
<li><a href="../variables/#Expansion-1">capacity expansion</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Decommissioning of operated capacity
Ensures operated capacities comply with installed capacities.
```@raw html
<p class="norm">
Decommissioning behaviour is determined by the <code>decomm</code> argument of the <a href="../model_object">model constructor</a>. For <code>none</code>, the equations listed here are not enforced. Instead operated and installed capacities are identical. When the argument is set to <code>recomm</code>, only the first equation that limits operated capacities to installed capacities is enforced.
</p>
<p class="norm">
Lastly, for <code>decomm</code> both equations apply. The second equation will then ensure, that once decommissioned capacities cannot be re-decommissioned again. The expression $\displaystyle \Delta Resi_{+}$ in the equation denotes any increase of residual capacities from $\displaystyle t-1$ to $\displaystyle t$.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>oprCapaConv</td>
<td style="padding-right:0.52em"><nobr>oprCapaSt{In/Out/Size}</nobr></td>
<td>oprCapaExc</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td rowspan="3"><strong>instance</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0px">
<ul class="liste">
<li>superordinate dispatch resolution (usually years)</li>
</ul>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ul class="liste">
<li>spatial expansion resolution of technology</li>
</ul>
</td>
<td colspan="1"; rowspan="2"; style="text-align:center;border-right:none;padding-top:0px;padding-right:0px">
<ul class="liste">
<li>spatial expansion resolution of carrier</li>
<li>regions can exchange carrier</li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;padding-top:0px;padding-right:0.52em">
<ul class="liste"; start="4">
<li>if uses or generates carriers</li>
</ul>
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ul class="liste"; start="4">
<li>each stored carrier</li>
</ul>
</td>
</tr>
<tr>
<td rowspan = "2"><strong>formulation</strong></td>
<td colspan = "3"; style="text-align:center;border-bottom:none">$ \scriptstyle \bm{oprCapa} \, \leq \, \bm{capa}$</td>
</tr>
<tr>
<td colspan = "3"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">$ \scriptstyle \bm{oprCapa_{t}} \, \leq \, \bm{oprCapa_{t\,-\,1}} \, + \, \bm{exp_{t}} \, + \, \Delta Resi_{+} $</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="3"; style="text-align:left;padding-left:18.1em"><ul class="liste">
<li><a href="../parameter_list/#Residual-capacities-1">residual capacity</a></li>
</ul></td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="3"; style="text-align:left;padding-left:18.1em"><ul class="liste">
<li><a href="../variables/#Operated-capacity-1">operated capacity</a></li>
<li><a href="../variables/#Installed-capacity-1">installed capacity</a></li>
<li><a href="../variables/#Expansion-1">capacity expansion</a></li>
</ul></td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
# Cost equations
```@raw html
<p class="norm">
Within the cost equations the discount factor is used to discount costs to the present. The discount factor for a year $t$ is computed from the <a href="../parameter_list/#Discount-rate-1">discount rates</a> of the current and the previous years as follows:
</p>
<p class="norm">
$\scriptstyle discFac_{t} = \displaystyle \prod_{t' = t_{0}}^{t}(1 \, + \, rateDisc_{t'})^{-1}$
</p>
```
### Expansion cost equation
Determines costs of capacity expansion.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costExp{Conv,StIn,StOut,StSize}</td>
<td>costExp{Exc}</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$, $C$</td>
</tr>
<tr>
<td rowspan="2"><strong>formulation</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none">
$ \scriptstyle \bm{costExp\{...\}} \, = \, \sum discFac \, \cdot \, ann\{...\} \, \cdot \, \bm{exp\{...\}} $
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">
$ \scriptstyle ann\{...\} \, = \, costExp\{...\} \, \cdot \, \frac{rateExp\{...\} \, (1\,+\,rateExp\{...\})^{lifeEco\{...\}}}{(1\,+\,rateExp\{...\})^{lifeEco\{...\}}\,-\,1}$
</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="2"; style="text-align:left;padding-left:8em">
<ul class="liste">
<li><a href="../parameter_list/#Discount-rate-1">discount rate</a></li>
<li><a href="../parameter_list/#Interest-rate-1">interest rate</a></li>
<li><a href="../parameter_list/#Expansion-cost-1">expansion cost</a></li>
<li><a href="../parameter_list/#Economic-lifetime-1">economic lifetime</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="2"; style="text-align:left;padding-left:8em">
<ul class="liste">
<li><a href="../variables/#Expansion-1">expansion</a></li>
<li><a href="../variables/#Expansion-cost-1">expansion cost</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Operating cost equation
Determines costs of operating capacity.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costOpr{Conv,StIn,StOut,StSize}</td>
<td>costOpr{Exc}</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$, $C$</td>
</tr>
<tr>
<td><strong>formulation</strong></td>
<td colspan="2"; style="text-align:center">
$ \scriptstyle \bm{costOpr\{...\}} \, = \, \sum discFac \, \cdot \, costOpr\{...\} \, \cdot \, \bm{oprCapa\{...\}} $
</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="2"; style="text-align:left;padding-left:8em">
<ul class="liste">
<li><a href="../parameter_list/#Discount-rate-1">discount rate</a></li>
<li><a href="../parameter_list/#Operating-cost-1">operating cost</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="2"; style="text-align:left;padding-left:8em">
<ul class="liste">
<li><a href="../variables/#Operated-capacity-1">operated capacity</a></li>
<li><a href="../variables/#Operating-cost-1">operating cost</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Variable cost equation
```@raw html
<p class="norm">
Determines costs associated with quantities dispatched. Costs incurred by <a href="../parameter_list/#Emission-price-1">emission prices</a> are included in <code>costVarUse</code>.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costVar{Use,Gen}</td>
<td>costVar{StIn,StOut}</td>
<td>costVar{Exc}</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$, $C$</td>
</tr>
<tr>
<td rowspan="2"><strong>formulation</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0px">
$ \scriptstyle \bm{costVar\{...\}} \, = \, \sum discFac \, \cdot \, costVar\{...\} \, \cdot \, \{use,gen,stIn,stOut,exc\} \, \cdot \, 10^3 $
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">
$ \scriptstyle + \, \sum emFac \, \cdot \, emPrc \, \cdot \, \bm{use} \, \cdot \, 10^3$
</td>
<td colspan="2"; style="text-align:left;padding-top:0px">
</td>
</tr>
<tr>
<td rowspan="3"><strong>parameter</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ul class="liste">
<li><a href="../parameter_list/#Discount-rate-1">discount rate</a></li>
</ul>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;padding-top:0px;border-bottom:none;padding-bottom:0.1875em">
<ul class="liste">
<li><a href="../parameter_list/#Variable-cost-1">variable cost of technologies</a></li>
</ul>
</td>
<td colspan="1"; style="text-align:left;padding-top:0px;border-bottom:none">
<ul class="liste">
<li><a href="../parameter_list/#Exchange-cost-1">exchange cost</a></li>
</ul>
</td>
</tr>
<tr>
<td style="text-align:center;border-right:none;border-top:none;padding-top:0px">
<ul class="liste">
<li><a href="../parameter_list/#Emission-factor-1">emission factor</a></li>
<li><a href="../parameter_list/#Emission-price-1">emission price</a></li>
</ul>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td rowspan="2"><strong>variables</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ul class="liste">
<li><a href="../variables/#Variable-cost-1">variable cost</a></li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:left;border-right:none;border-top:none;padding-top:0px">
<ul class="liste">
<li><a href="../variables/#Generation-and-use-1">generation and use</a></li>
</ul>
</td>
<td colspan="1"; style="text-align:left;border-right:none;border-top:none;padding-top:0px">
<ul class="liste">
<li><a href="../variables/#Charging-and-discharging-1">charging and discharging</a></li>
</ul>
</td>
<td colspan="1"; style="text-align:left;padding-top:0px">
<ul class="liste">
<li><a href="../variables/#Exchange-quantities-1">exchange quantities</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Trade cost equation
Determines costs and revenues from buying or selling carriers on an external market.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costTrdBuy</td>
<td>costTrdSell</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{sup}$, $R_{exp}$, $C$</td>
</tr>
<tr>
<td rowspan="2"><strong>formulation</strong></td>
<td style="text-align:left;border-bottom:none;padding-bottom:0px">
$ \scriptstyle \bm{costTrdBuy} \, = $
</td>
<td style="text-align:left;border-bottom:none;padding-bottom:0px">
$ \scriptstyle \bm{costTrdSell} \, = $
</td>
</tr>
<tr>
<td style="text-align:right;border-right:none;border-top:none;padding-top:0px">
$ \scriptstyle \sum discFac \, \cdot \, trdBuyPrc \, \cdot \, \bm{trdBuy} \, \cdot \, 10^3 $
</td>
<td style="text-align:right;border-top:none;padding-top:0px">
$ \scriptstyle \sum discFac \, \cdot \, trdSellPrc \, \cdot \, \bm{trdSell} \, \cdot \, 10^3 $
</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="2"; style="text-align:left;padding-left:13.2em">
<ul class="liste">
<li><a href="../parameter_list/#Discount-rate-1">discount rate</a></li>
<li><a href="../parameter_list/#Trade-price-1">trade price</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="2"; style="text-align:left;padding-left:13.2em">
<ul class="liste">
<li><a href="../variables/#Buy-and-sell-1">buy and sell</a></li>
<li><a href="../variables/#Trade-cost-1">trade cost</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Curtailment and loss-of-load cost equation
Determines costs of curtailment and unmet demand.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costCrt</td>
<td>costLss</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{sup}$, $R_{exp}$, $C$</td>
</tr>
<tr>
<td rowspan="2"><strong>formulation</strong></td>
<td style="text-align:left;border-bottom:none;padding-bottom:0px">
$ \scriptstyle \bm{costCrt} \, = $
</td>
<td style="text-align:left;border-bottom:none;padding-bottom:0px">
$ \scriptstyle \bm{costLss} \, = $
</td>
</tr>
<tr>
<td style="text-align:right;border-right:none;border-top:none;padding-top:0px">
$ \scriptstyle \sum discFac \, \cdot \, costCrt \, \cdot \, \bm{crt} \, \cdot \, 10^3 $
</td>
<td style="text-align:right;border-top:none;padding-top:0px">
$ \scriptstyle \sum discFac \, \cdot \, costLss \, \cdot \, \bm{lss} \, \cdot \, 10^3 $
</td>
</tr>
<tr>
<td><strong>parameter</strong></td>
<td colspan="2"; style="text-align:left;padding-left:5.3em">
<ul class="liste">
<li><a href="../parameter_list/#Discount-rate-1">discount rate</a></li>
<li><a href="../parameter_list/#Cost-of-curtailment-and-loss-of-load-1">cost of curtailment and loss of load</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>variables</strong></td>
<td colspan="2"; style="text-align:left;padding-left:5.3em">
<ul class="liste">
<li><a href="../variables/#Curtailment-and-loss-of-load-1">curtailment and loss-of-load</a></li>
<li><a href="../variables/#Curtailment-and-loss-of-load cost-1">curtailment and loss-of-load cost</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
# Limiting constraints
```@raw html
<p class="norm">
Limiting constraints are largely shaped by the corresponding limit parameters on <a href="../parameter_list/#Limits-on-quantities-dispatched">quantities dispatched</a>, <a href="../parameter_list/#Limits-on-expansion-and-capacity">expansion and capacity</a>, and by the <a href="../parameter_list/#Emission-limit">emission limit</a>.
</p>
<p class="norm">
The name of the constraint will correspond to the name of the respective parameter. Dimension and formulation depend on the way parameter data was provided, as explained <a href="../parameter_list/#Limits-on-quantities-dispatched">here</a>. Lastly, all constraints are stored in the <a href="../parts/#Limit">limit part</a>.
</p>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 10415 | ```@raw html
<style>
table.tabelle2 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-color: #dbdbdb;
font-size: small;
}
ul.liste {
list-style-position: outside;
padding-left: 1em;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
</style>
```
# Data files
AnyMOD includes several functions to obtain the results of a solved model.
# Analysed results
Different analysis can be printed with the `reportResults` functions depending on the `reportType` keyword.
```julia
reportResults(reportType::Symbol, model_object::anyModel; rtnOpt::Tuple = (:csv,))
```
The keyword argument `rtnOpt` controls the output format. Available options are:
- `:csv`: writes a "readable" `.csv` file
- `:csvDf`: returns the same "readable" data as a DataFrame
- `:raw`: writes a `.csv` file with "raw" data, this means sets are not indicated by their name, but their internal id
- `:rawDf`: returns the same "raw" data as a DataFrame
### Summary
Using the `reportType` keyword `:summary` will provide a general overview of results. If the optional argument `wrtSgn` is set to `true`, output quantities (e.g. use or storage input) are given a negative sign. The table below lists all variables included.
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td><strong>explanation</strong></td>
<td style="border-right:none"><strong>definition</strong></td>
</tr>
<tr>
<td><code>exp{Conv/StIn/StOut/StSize}</code></td>
<td style="border-right:none"><ul class="liste">
<li><a href="../variables/#Expansion-1">expansion variable</a></li>
</ul></td>
</tr>
<tr>
<td><code>capa{Conv/StIn/StOut/StSize}</code></td>
<td style="border-right:none"><ul class="liste">
<li><a href="../variables/#Installed-capacity-1">installed capacity variable</a></li>
</ul></td>
</tr>
<tr>
<td><code>oprCapa{Conv/StIn/StOut/StSize}</code></td>
<td style="border-right:none"><ul class="liste">
<li><a href="../variables/#Operated-capacity-1">operated capacity variable</a></li>
</ul></td>
</tr>
<tr>
<td><code>demand</code></td>
<td style="border-right:none"><ul class="liste">
<li>aggregated <a href="../parameter_list/#Demand-1">demand parameter</a></li>
</ul></td>
</tr>
<tr>
<td><code>use</code> & <code>gen</code></td>
<td style="border-right:none"><ul class="liste">
<li>aggregated <a href="../variables/#Generation-and-use-1">generation and use variables</a></li>
</ul></td>
</tr>
<tr>
<td><code>stIn</code> & <code>stOut</code></td>
<td style="border-right:none"><ul class="liste">
<li>aggregated <a href="../variables/#Charging-and-discharging-1">charging and discharging variables</a> (both in- and external)</li>
</ul></td>
</tr>
<tr>
<td><code>stExt{In/Out}</code> & <code>stInt{In/Out}</code></td>
<td style="border-right:none"><ul class="liste">
<li>aggregated <a href="../variables/#Charging-and-discharging-1">charging and discharging variables</a></li>
</ul></td>
</tr>
<tr>
<td><code>import</code> & <code>export</code></td>
<td style="border-right:none"><ul class="liste">
<li>net aggregation of <a href="../variables/#Exchange-1">exchange variables</a></li>
</ul></td>
</tr>
<tr>
<td><code>trdBuy</code> & <code>trdSell</code></td>
<td style="border-right:none"><ul class="liste">
<li>aggregated <a href="../variables/#Buy-and-sell-1">buy and sell variables</a></li>
</ul></td>
</tr>
<tr>
<td><code>crt</code> & <code>lss</code></td>
<td style="border-right:none"><ul class="liste">
<li>aggregated <a href="../variables/#Curtailment-and-loss-of-load-1">curtailment and loss-of-load variables</a></li>
</ul></td>
</tr>
<tr>
<td><code>emission</code></td>
<td style="border-right:none"><ul class="liste">
<li>aggregated emissions</li>
<li>defined as $emFac \cdot \bm{use} \cdot 10^3$</li>
</ul></td>
</tr>
<tr>
<td><code>flhConv</code></td>
<td style="border-right:none">
<ul class="liste">
<li>measure of conversion capacity utilization</li>
<li>defined as $\frac{oprCapaConv}{\sum use + stIntOut}$ or $\frac{oprCapaConv}{\sum gen + stIntIn}$ if no carriers are used</li>
</ul>
</td>
</tr>
<tr>
<td><code>flhStIn</code></td>
<td style="border-right:none">
<ul class="liste">
<li>measure of capacity utilization regarding storage input</li>
<li>defined as $\frac{oprCapaStIn}{\sum stExtIn + stIntIn}$</li>
</ul>
</td>
</tr>
<tr>
<td><code>flhStOut</code></td>
<td style="border-right:none">
<ul class="liste">
<li>measure of capacity utilization regarding storage output</li>
<li>defined as $\frac{oprCapaStOut}{\sum stExtOut + stIntOut}$</li>
</ul>
</td>
</tr>
<tr>
<td><code>cycStIn</code></td>
<td style="border-right:none">
<ul class="liste">
<li>measure to characterize utilization of storage size based on charged quantities</li>
<li>small values indicate long- and large values short-term storage</li>
<li>defined as $\frac{oprCapaStSize}{\sum stExtIn + stIntIn}$</li>
</ul>
</td>
</tr>
<tr>
<td><code>cycStOut</code></td>
<td style="border-right:none">
<ul class="liste">
<li>measure to characterize utilization of storage size based on discharged quantities</li>
<li>small values indicate long- and large values short-term storage</li>
<li>defined as $\frac{oprCapaStSize}{\sum stExtOut + stIntOut}$</li>
</ul>
</td>
</tr>
</tbody>
</table><h3 id="Exchange"><a class="docs-heading-anchor" href="#Exchange">Exchange</a><a id="Exchange-1"></a><a class="docs-heading-anchor-permalink" href="#Exchange" title="Permalink"></a></h3><p>The keyword <code>:exchange</code> gives detailed results on exchange capacities and quantities. Again, reported variables are listed below.</p><table class="tabelle2">
<tbody>
<tr>
<td><strong>explanation</strong></td>
<td style="border-right:none"><strong>definition</strong></td>
</tr>
<tr>
<td><code>expExc</code></td>
<td style="border-right:none"><ul class="liste">
<li><a href="../variables/#Expansion-1">exchange expansion variable</a></li>
</ul></td>
</tr>
<tr>
<td><code>capaExc</code></td>
<td style="border-right:none"><ul class="liste">
<li><a href="../variables/#Installed-capacity-1">installed exchange capacity variable</a></li>
</ul></td>
</tr>
<tr>
<td><code>oprCapaExc</code></td>
<td style="border-right:none"><ul class="liste">
<li><a href="../variables/#Operated-capacity-1">operated exchange capacity variable</a></li>
</ul></td>
</tr>
<tr>
<td><code>flhExc</code></td>
<td style="border-right:none"><ul class="liste">
<li>utilization of exchange capacities</li>
<li>defined as $\frac{oprCapaExc}{exc}$</li>
</ul></td>
</tr>
<tr>
<td><code>exc</code></td>
<td style="border-right:none"><ul class="liste">
<li><a href="../variables/#Exchange-1">exchange variables</a></li>
</ul></td>
</tr>
</tbody>
</table>
```
### Costs
```@raw html
The keyword <code>:costs</code> provides the values of all <a href="../variables/#Costs">cost variables</a>. All costs are provided in million Euros.
```
# Time-series
The `reportTimeSeries` function writes a table with the values of all elements occuring in the energy balance of a respective `carrier`.
```julia
reportTimeSeries(carrier::Symbol, model_object::anyModel)
```
Optional arguments include:
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td><strong>argument</strong></td>
<td><strong>explanation</strong></td>
<td><strong>default</strong></td>
</tr>
<tr>
<td><code>filterFunc</code></td>
<td>
<ul class="liste">
<li>function to filter certain time-series data</li>
<li>for example <code>x -> x.R_dis == 1</code> will only provide time-series data for the region with id 1 (see <br> documentation of <a href="../api/#AnyMOD.Tree"><code>tree objects</code></a> on how to obtain ids)</li>
</ul>
</td>
<td><code>x -> true</code></td>
</tr>
<tr>
<td><code>unstck</code></td>
<td>
<ul class="liste">
<li>controls if data is provided as an unstacked table or in pivot format</li>
</ul>
</td>
<td><code>true</code></td>
</tr>
<tr>
<td><code>signVar</code></td>
<td>
<ul class="liste">
<li>specifies groups of variables to write to output table</li>
<li><code>:in</code> refers to energy inputs (e.g. <a href="../variables/#Generation-and-use-1">generated</a> or <a href="../variables/#Buy-and-sell-1">bought</a> quantities) and <code>:out</code> refers to energy out- <br> puts (e.g. <a href="../variables/#Generation-and-use-1">used</a>, <a href="../parameter_list/#Demand-1">demanded</a>, or <a href="../variables/#Buy-and-sell-1">sold</a> quantities)</li>
</ul>
</td>
<td><code>(:in,:out)</code></td>
</tr>
<tr>
<td><code>mergeVar</code></td>
<td>
<ul class="liste">
<li>if set to <code>false</code> results for energy in- and output are written to separate files</li>
</ul>
</td>
<td><code>true</code></td>
</tr>
<tr>
<td><code>minVal</code></td>
<td>
<ul class="liste">
<li>threshold for values to be included</li>
<li>useful to filter out really small but non-zero values that result from using Barrier without Crossover</li>
</ul>
</td>
<td><code>1e-3</code></td>
</tr>
<tr>
<td><code>rtnOpt</code></td>
<td>
<ul class="liste">
<li>controls the output format as documentated for <a href="#Analysed-results">Analysed results</a></li>
</ul>
</td>
<td><code>(:csv,)</code></td>
</tr>
</tbody>
</table>
```
# Individual elements
```@raw html
<p class="norm">
In addition to the <code>reportResults</code> and <code>reportTimeSeries</code> that aggregate various model elements and report on them, individual variables or constraints can also be printed directly. In this case, the DataFrames used to store variables and constraints within the <a href="../api/#AnyMOD.TechPart"><code>model part</code></a> objects serve as inputs.
</p>
```
The `printObject` function writes a copy of the respective inputted DataFrame, but replaces the internal node ids with their written name.
```julia
printObject(element::DataFrame, model_object::anyModel)
```
For variables, the table will provide their value and for constraints the corresponding constraint expression.
The `printDuals` function works analogously, but returns the duals or shadow prices for the respective elements.
```julia
printDuals(element::DataFrame, model_object::anyModel)
```
```@raw html
<p class="norm">
For both functions the optional arguments <code>filterFunc</code> and <code>rtnOpt</code> as introduced for <a href="#Analysed-results">Analysed results</a> and <a href="#Time-series">Time series</a> are available. In addition, the argument <code>fileName</code> can be used to specify the name of the output file.
</p>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 2545 | ```@raw html
<style>
table.tabelle2 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-color: #dbdbdb;
font-size: small;
}
ul.liste {
list-style-position: outside;
padding-left: 1em;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
</style>
```
# Error handling
To report on errors during model initialization and construction, status reports are printed into the console and a detailed report is written to the output directory. In the report are three different types of entries:
```@raw html
<ul class="liste">
<li><span style="color:rgb(0,188,0);font-weight:bold">green</span>: information on some automated modelling decision</li>
<li><span style="color:rgb(240,237,27);font-weight:bold">yellow</span>: some aspects of the model setup look suspicious</li>
<li><span style="color:rgb(205,49,49);font-weight:bold">red</span>: current model setup contains logical inconsistencies and will lead to an infeasible model</li>
</ul>
<p class="norm">
Entries of the third kind will throw an error and cause termination. If for example a wrong name is provided for the <a href="../parameter_list/#Emission-factor-1">emission factor</a> parameter in <a href="https://github.com/leonardgoeke/AnyMOD.jl/blob/master/examples/demo/par_emissions.csv"><code>par_emissions.csv</code></a>, the following reporting file results:
</p>
<table class="tabelle2">
<tbody>
<tr>
<td><strong>errStatus</strong></td>
<td><strong>section</strong></td>
<td><strong>location</strong></td>
<td style="border-right:none"><strong>message</strong></td>
</tr>
<tr>
<td><p style="color:rgb(0,188,0)">green</p></td>
<td>carrier mapping</td>
<td></td>
<td style="border-right:none">carrier heat inherited resolution from children</td>
</tr>
<tr>
<td><p style="color:rgb(0,188,0)">green</p></td>
<td>carrier mapping</td>
<td></td>
<td style="border-right:none">carrier gas inherited resolution from children</td>
</tr>
<tr>
<td><p style="color:rgb(205,49,49)">red</p></td>
<td>parameter read-in</td>
<td>definition</td>
<td style="border-right:none">parameter with the name emissionFactor does not exist</td>
</tr>
</tbody>
</table>
<p class="norm">
Optional arguments of the <a href="../model_object">model constructor</a> can be set to values between 1 and 3 to adjust the frequency of reporting:
</p>
```
- `reportLvl`: frequency of writing updates to the console
- `errWrtLvl`: frequency of writing to the report file
- `errCheckLvl`: frequency of checking for errors
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 6908 | # AnyMOD
[AnyMOD.jl](https://github.com/leonardgoeke/AnyMOD.jl) is a [Julia](https://julialang.org/) framework for creating large scale energy system models with multiple periods of capacity expansion formulated as linear optimization problems. It was developed to address the challenges in modelling high-levels of intermittent generation and sectoral integration. A comprehensive description of the framework's graph based methodology can found in the working paper [Göke (2020), A graph-based formulation for modeling macro-energy systems](https://arxiv.org/abs/2004.10184). The software itself is separately introduced in [Göke (2020), AnyMOD.jl: A Julia package for creating energy system models](https://arxiv.org/abs/2011.00895).
AnyMODs key characteristic is that all sets (time-steps, regions, energy carriers, and technologies) are organized within a hierarchical tree structure. This enables two unique features:
* The level of temporal and spatial granularity can be varied by energy carrier. For instance, electricity can be modeled with hourly resolution, while supply and demand of gas is balanced daily. As a result, a substantial decrease of computational effort can be achieved. In addition, flexibility inherent to the system, for example in the gas network, can be accounted for.
* The degree to which energy carriers are substitutable when converted, stored, transported, or consumed can be modeled to achieve a detailed but flexible representation of sector integration. As an example, residential and district heat can both equally satisfy heat demand, but technologies to produce these carriers are different.
The framework uses [DataFrames](https://juliadata.github.io/DataFrames.jl/stable/) to store model elements and relies on [JuMP](https://github.com/JuliaOpt/JuMP.jl) as a back-end. In addition, Julia's multi-threading capabilities are heavily deployed to increase performance. Since models entirely consist of .csv files, they can be developed open and collaboratively using version control (see [Model repositories](@ref) for examples).
# Installation
The current version of [AnyMOD](https://github.com/leonardgoeke/AnyMOD.jl) was developed for [Julia 1.3.1](https://julialang.org/downloads/oldreleases/). AnyMOD is installed by switching into Julia's package mode by typing `]` into the console and then execute `add AnyMOD`.
# Getting started
To introduce the packages’ workflow and core functions, a small-scale example model is created, solved and analyzed. The files of this model can either be found in the installation directory of the package (`user/.julia/packages/AnyMOD/.../examples`) or manually loaded from the [GitHub repository](https://github.com/leonardgoeke/AnyMOD.jl/tree/master/examples/demo).
Before we can start working with AnyMOD, it needs to be imported via the `using` command. Afterwards, the function `anyModel` constructs an AnyMOD model object by reading in the `.csv` files found within the directory specified by the first argument. The second argument specifies a directory all model outputs are written to. Furthermore, default model options can be overwritten via optional arguments. In this case, the optional argument `objName` is used to name the model "demo". This name will appear during reporting and added to each output file. The optional argument `shortExp` defines the span of year between different time-steps of capacity expansion.
```julia
using AnyMOD
model_object = anyModel("../demo","results", objName = "demo", shortExp = 10)
```
During the construction process, all input files are read-in and checked for errors. Afterwards sets are mapped to each other and parameter data is assigned to the different model parts. During the whole process status updates are printed to the console and comprehensive reports are written to a dedicated `.csv` file. Since after construction, all qualitative model information, meaning all sets and their interrelations, is written, several graphs describing a models´ structure can be plotted.
```julia
plotTree(:region, model_object)
plotTree(:carrier, model_object)
plotTree(:technology, model_object, plotSize = (28.0,5.0))
plotEnergyFlow(:graph, model_object)
```
All of these plots will be written to the specified results folder. The first three graphs plotted by `plotTree` show the rooted tree defining the sets of regions, carriers, and technologies, respectively. As an example, the rooted tree for carriers is displayed below.

The fourth graph created by using `plotEnergyFlow` with keyword `:graph` gives a qualitative overview of all energy flows within a model. Nodes either correspond to technologies (grey dots) or energy carriers (colored squares). Edges between technology and energy carrier nodes indicate the carrier is either an input (entering edge) or an output (leaving edge) of the respective technology. Edges between carriers indicate the same relationships as displayed in the tree above.

To create the variables and constraints of the model's underlying optimization problem, the model object is passed to the `createOptModel!` function. Afterwards, the `setObjective!` function is used to set the objective function for optimizing. The function requires a keyword input to indicate what is optimized, but so far only `:costs` has been implemented. Again, updates and reports are written to the console and to a dedicated reporting file.
```julia
createOptModel!(model_object)
setObjective!(:costs, model_object)
```
To actually solve the created optimization problem, the field of the model structure containing the corresponding JuMP object is passed to the functions of the [JuMP](https://github.com/JuliaOpt/JuMP.jl) package used for this purpose. The JuMP package itself is part of AnyMOD’s dependencies and therefore does not have to be added separately, but the solver does. In this case we used Gurobi, but CPLEX or a non-commercial solver could have been used as well.
```julia
using Gurobi
set_optimizer(model_object.optModel, Gurobi.Optimizer)
optimize!(model_object.optModel)
```
Once a model is solved, results can be obtained and analyzed by the following functions:
```julia
reportResults(:summary, model_object)
reportTimeSeries(:electricity, model_object)
plotEnergyFlow(:sankey, model_object)
```
Depending on the keyword provided, `reportResults` writes aggregated results to a csv file. `:summary` gives an overview of installed capacities and yearly use and generation of energy carriers. Other keywords available are `:costs` and `:exchange`. `reportTimeSeries` will write the energy balance and the value of each term within the energy balance of the carrier provided as a keyword. Finally, `plotEnergyFlow` used with the keyword `:sankey` creates a sankey diagram that visualizes the quantitative energy flows in the solved model.

| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 8905 | ```@raw html
<style>
table.tabelle2 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-color: #dbdbdb;
font-size: small;
}
ul.liste {
list-style-position: outside;
padding-left: 1em;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
pre.inline {
display: inline;
}
</style>
```
# Model object
```@raw html
<p class="norm">
The <a href="../api/#AnyMOD.anyModel"><code>anyModel</code></a> object is the overarching structure that contains all data and objects of particular energy system model created with AnyMOD.
</p>
```
```julia
anyModel(inDir::Union{String,Array{String,1}}, outDir::String; kwargs)
```
## In- and output files
```@raw html
<p class="norm">
The constructor function above has two mandatory arguments that the directories for in- and output files. <code class="language-julia">inDir::Union{String,Array{String,1}}</code> specifies the directory (or directories) of input files. This can either be a string or an array of strings if input files are spread across different directories.
</p>
<p class="norm">
All <code>.csv</code> files within the provided directories (and their sub-directories) starting with <code>set_</code> or <code>par_</code> will be read-in as an input file. Other files are ignored and can be used for documentation. Within the specific files, only columns whose name contains one of the following keywords are actually read-in: <code>parameter</code>, <code>variable</code>, <code>value</code>, <code>id</code>, <code>region</code>, <code>timestep</code>, <code>carrier</code>, <code>technology</code>, and <code>mode</code>. Other columns can be used freely for documentation.
</p>
```
!!! warning "Reserved keywords"
Within the input files `all` is a reserved keyword. For an explanation on how it is used, see [Time-steps](@ref).
```@raw html
<p class="norm">
<code class="language-julia">outDir::String</code>: defines the directory of output files. All reporting files including status reports, results, or graphs are written to this directory.
</p>
```
## Optional arguments
Additionally, the constructor accepts a list of optional arguments listed in the table below.
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td><strong>argument</strong></td>
<td><strong>explanation</strong></td>
<td><strong>default</strong></td>
<td style="border-right:none"><strong>group</strong></td>
</tr>
<tr>
<td><code>objName</code></td>
<td>
<ul class="liste">
<li>internal name of the model object</li>
<li>added to the name of output files and printed during reporting</li>
</ul>
</td>
<td><code>""</code></td>
<td rowspan="2"; style="border-right:none">data handling</td>
</tr>
<tr>
<td><code>csvDelim</code></td>
<td>
<ul class="liste">
<li>specifies the delimiter used within the read-in csv files</li>
</ul>
</td>
<td><code>","</code></td>
</tr>
<tr>
<td><code>shortExp</code></td>
<td>
<ul class="liste">
<li>interval in years between steps of capacity expansion</li>
</ul>
</td>
<td><code>10</code></td>
<td rowspan="5"; style="border-right:none">model generation</td>
</tr>
<tr>
<td><code>supTsLvl</code></td>
<td>
<ul class="liste">
<li>depth in the tree of time-steps that defines the superordinate dispatch level (usually years)</li>
</ul>
</td>
<td><code>0</code></td>
<tr>
<td><code>redStep</code></td>
<td>
<ul class="liste">
<li>scales down energy quantities within the model, relevant when working <br> with reduced time-series</li>
</ul>
</td>
<td><code>1.0</code></td>
</tr>
<tr>
<td><code>decomm</code></td>
<td>
<ul class="liste">
<li>controls if the model performs endogenous decommissioning, see <a href="#Decommissioning-of-operated-capacitiy">decommissioning <br> constraints</a> for implementation</li>
<li>available options are:
<ul style="margin-top:0px">
<li style="margin-top:0px"><code>decomm</code>: capacities are decommissioned endogenously, once decommissioned <br> capacities cannot be put into operation again </li>
<li><code>recomm</code>: no endogenous decommissioning, operated capacities equal installed capacities</li>
<li><code>none</code>: capacities are decommissioned endogenously and can be put back into operation</li>
</ul>
</ul>
</ul>
</td>
<td><code>:decomm</code></td>
</td>
</tr>
<tr>
<td><code>interCapa</code></td>
<td>
<ul class="liste">
<li>capacity expansion can be modeled at a resolution less detailed than yearly, this option <br> determines how capacities are distributed among the subsequent years in this case</li>
<li>available options are:
<ul style="margin-top:0px">
<li style="margin-top:0px"><code>linear</code>: expansion is equally distributed among years resulting in a linear increase</li>
<li><code>none</code>: all expansion occurs in the first year</li>
</ul>
</ul>
</ul>
</td>
<td><code>:linear</code></td>
</td>
</tr>
<tr>
<td><code>reportLvl</code></td>
<td>
<ul class="liste">
<li>controls the frequency of writing updates to the console</li>
</ul>
</td>
<td><code>2</code></td>
</td>
<td rowspan="3"; style="border-right:none"> <a href="../error/#Error-handling">reporting</a> </td>
</tr>
<tr>
<td><code>errCheckLvl</code></td>
<td>
<ul class="liste">
<li>controls the frequency of checking for errors</li>
</ul>
</td>
<td><code>2</code></td>
</td>
</tr>
</tr>
<tr>
<td><code>errWrtLvl</code></td>
<td>
<ul class="liste">
<li>controls the frequency of writing to the report file</li>
</ul>
</td>
<td><code>1</code></td>
</tr>
<tr>
<td><code>avaMin</code></td>
<td>
<ul class="liste">
<li>availabilities smaller than this value are set to zero</li>
<li>avoids high coefficients in <a href="../constraints/#Conversion-capacity-restriction">conversion</a> and <a href="../constraints/#Storage-capacity-restriction">storage capacity restriction</a>, because availabilities <br> are inversed (see <a href="../performance/#Range-of-factors">Range of factors</a>)</li>
</ul>
</td>
<td><code>0.01</code></td>
<td rowspan="6"; style="border-right:none;border-bottom:none"> <a href="../performance/#Performance-and-stability">performance and stability</a> </td>
</tr>
<tr>
<td><code>emissionLoss</code></td>
<td>
<ul class="liste">
<li>determines if losses from exchange and self-discharge of storage are subject to emissions <br> (see <a href="../performance/#Range-of-factors">Range of factors</a>)</li>
</ul>
</td>
<td><code>true</code></td>
</tr>
<tr>
<td><code>checkRng</code></td>
<td>
<ul class="liste">
<li>if set, reports all constraints whose range exceeds the specified value</li>
<li>type is <code>Float64</code></li>
</ul>
</td>
<td><code>NaN</code></td>
</tr>
<tr>
<td><code>scaFac</code></td>
<td colspan = "2">
<ul class="liste">
<li>scales different groups of variables within the model</li>
<li>format for argument is <code>(capa = 1e1, oprCapa = 1e2, dispConv = 1e3, ...)</code> </li>
<li>see <a href="../performance/#Scaling">Column scaling</a> for details and defaults</li>
</ul>
</tr>
<tr>
<td><code>coefRng</code></td>
<td colspan = "2">
<ul class="liste">
<li>specifies the maximum range of coefficients in the matrix and right-hand side of the model's underlying <br> optimization problem</li>
<li>format for argument is <code>(mat = (1e-2,1e5), rhs = (1e-2,1e2))</code></li>
<li>see <a href="../performance/#Scaling">Row scaling</a> for details and defaults</li>
</ul>
</td>
</td>
</tr>
<tr>
<td><code>bound</code></td>
<td colspan = "2">
<ul class="liste">
<li>sets external bounds for all capacities and dispatch variables (in GW) and for the objective value (in Mil. €)</li>
<li>see <a href="../performance/#Variable-limits">Variable limits</a> for details and defaults</li>
<li>format for argument is <code>(capa = NaN, disp = NaN, obj = NaN)</code></li>
</ul>
</td>
</tr>
</tr>
</tbody>
</table>
```
## Fields
```@raw html
Relevant fields of the model object include:
<ul>
<li><code class="language-julia">sets::Dict{Symbol,Tree}</code>: sets defined within the model and their tree structure each saved as <a href="../api/#AnyMOD.Tree"><code>Tree</code></a> object (see <a href="../sets_and_mappings">Sets and mappings</a> for details)</li>
<li><code class="language-julia">parts::NamedTuple</code>: all parts of the model, these contain the specific parameters, variables, and constraints (see <a href="../parts">Parts</a> for details)</li>
<li><code class="language-julia">report::Array{Tuple,1}</code>: entries for writing to the reporting file (see <a href="../error/#Error-handling">Error handling</a> for details)</li>
<li><code class="language-julia">graInfo::graInfo</code>: properties for creation of plots and graphics, can be used to adjust colors and labels (see <a href="../plots/#Styling">Styling</a> for details)</li>
<li><code class="language-julia">optModel::Model</code>: the actual <a href="https://github.com/JuliaOpt/JuMP.jl">JuMP</a> object of the models underlying optimization problem</li>
</ul>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 69429 | ```@raw html
<style>
table.tabelle2 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-bottom: none;
border-color: #dbdbdb;
font-size: small;
font-weight: normal;
}
table.tabelle td {
border-left: 1px solid;
border-color: #dbdbdb;
}
table.tabelle td:first-child {
border-right: 2.5px solid;
border-color: #dbdbdb;
border-left: none;
}
ol.liste {
list-style-position: inside;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
ul.liste {
list-style-position: inside;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
</style>
```
# Parameter list
In the following all parameters available in AnyMOD are listed. Information includes the name used in the input files and throughout the model, the parameters' unit, its dimensions according to the symbols introduced in [Sets and Mappings](@ref), the default value and the inheritance rules. In addition, related model elements and the part a parameter is assigned are documented.
# Dispatch of technologies
The parameters listed here describe the conversion and storage of energy carriers by technologies. As a result, each of these parameters can vary by operational mode. If any mode specific values are provided, these replace mode unspecific data.
The following two diagrams serve as a remainder on how conversion and storage are generally modeled in AnyMOD.
```@raw html
<p style="text-align:center;"><img src="../assets/convTech.svg" width="64%"/>
<p style="text-align:center;"><img src="../assets/stTech.svg" width="80%"/>
```
### Availability
Technical availability of the operated capacity.
Since operated capacity is split into conversion, storage-input, storage-output, and storage-size, the same distinction applies to availabilities.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>avaConv</td>
<td>avaSt{In/Out/Size}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">percent as decimal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $Te$, $M$</td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">1.0</td>
</tr>
<tr>
<td rowspan="2"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">
<ol class="liste"; start="4">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ol class="liste"; start="4">
<li>$C$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li><a href="../constraints/#Conversion-capacity-restriction-1">conversion capacity restriction</a></li>
<li><a href="../variables/#Generation-and-use-1">conversion variables</a> only created <br> where availability is not zero</li>
</ul>
</td>
<td>
<ul class="liste">
<li><a href="../constraints/#Storage-capacity-restriction-1">storage capacity restriction</a></li>
<li><a href="../variables/#Charging-and-discharging-1">storage variables</a> only created <br> where availability is not zero</li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Efficiency
Efficiency of converting or storing energy carriers.
For conversion the parameter controls the ratio between in- and output carriers. For storage it determines the losses charging to and discharging from the storage system is subjected to.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>effConv</td>
<td>effSt{In/Out}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">percent as decimal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $Te$, $M$</td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">1.0</td>
</tr>
<tr>
<td rowspan="2"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">
<ol class="liste"; start="4">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ol class="liste"; start="4">
<li>$C$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li><a href="../constraints/#Conversion-balance-1">conversion balance</a></li>
<li><a href="../constraints/#Conversion-capacity-restriction-1">conversion capacity restriction</a></li>
</ul>
</td>
<td>
<ul class="liste">
<li><a href="../constraints/#Storage-balance-1">storage balance</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Variable cost
Costs imposed on different types of quantities dispatched.
Note that for storage costs are incurred on quantities as specified in the diagram above. This means `stIn` quantities still include charging losses, while `stOut` quantities are already corrected for losses from discharging.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costVar{Use/Gen/StIn/StOut}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>€/MWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>0.0</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$C$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related constraints</strong></td>
<td>
<ul class="liste">
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Ratios of carrier use and generation
Restricting the share of a single carrier on total use or generation. The share can either be fixed or imposed as a lower or upper limit.
One practical example for the application of this parameter is modelling the power-to-heat ratio of cogeneration plants (see [`par_techDispatch.csv`](https://github.com/leonardgoeke/AnyMOD.jl/blob/master/examples/demo/par_techDispatch.csv)).
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>ratioEnerUse{Fix/Low/Up}</td>
<td>ratioEnerGen{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">percent as decimal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center">
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Energy-ratio-restriction-1">energy ratio restriction</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Storage self-discharge
```@raw html
<p class="norm">
Automatic reduction of stored energy within a storage system.
</p>
<p class="norm">
If the stored carrier is assigned an <a href="../parameter_list/#Emission-factor-1">emission factor</a> and <a href="../model_object/#Optional-arguments-1"><code>emissionLoss</code></a> is set to <code>true</code>, these losses are subject to emissions.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>stDis</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>percent as decimal per hour</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>0.0</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$C$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li><a href="../constraints/#Storage-Balance-1">storage balance</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Storage inflow
```@raw html
<p class="norm">
External charging of the storage system. Inflows can also be negative and are not subject to charging losses.
</p>
<p class="norm">
Flows have to be provided in power units and are converted into energy quantities according to the temporal resolution of the respective carrier (e.g. at a daily resolution 2 GW translate into of 48 GWh). This approach ensures parameters do not need to be adjusted when the temporal resolution is changed. The most important application of this parameter are natural inflows into hydro storages.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>stInflow</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>0.0</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$C$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>sum</em></li>
<li>$R_{dis}$ → <em>sum</em></li>
<li>$Te$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li><a href="../constraints/#Storage-Balance-1">storage balance</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
# Dispatch of exchange and trade
### Exchange availability
Technical availability of exchange capacities. The parameter `avaExc` applies for both directions and will be overwritten by the directed `avaExcDir`.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>avaExc</td>
<td>avaExcDir</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">percent in decimal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td style="text-align:center">$Ts_{dis}$, $R_{a}$, $R_{b}$, $C$</td>
<td style="text-align:center">$Ts_{dis}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">1.0</td>
</tr>
<tr>
<td rowspan="3"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$Ts_{dis}$ → <em>upwards</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{a}$ → <em>upwards</em></li>
<li>$R_{b}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{from}$ → <em>upwards</em></li>
<li>$R_{to}$ → <em>upwards</em></li>
<li>$R_{from}$ → <em>average</em></li>
<li>$R_{to}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;padding-top:0.1875em">
<ol class="liste"; start="6">
<li>$Ts_{dis}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Exchange-capacity-restriction-1">exchange capacity restriction</a></li>
<li><a href="../variables/#Exchange-quantities-1">exchange variables</a> only created where availability is not zero</li>
</ul>
</td>
</tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Exchange losses
```@raw html
<p class="norm">
Losses occurring when energy is exchanged between two regions. The parameter <code>lossExc</code> applies for both directions and will be overwritten by the directed <code>lossExcDir</code>.
</p>
<p class="norm">
If the exchanged carrier is assigned an <a href="../parameter_list/#Emission-factor-1">emission factor</a> and <a href="../model_object/#Optional-arguments-1"><code>emissionLoss</code></a> is set to <code>true</code>, these losses are subject to emissions.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>lossExc</td>
<td>lossExcDir</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">percent in decimal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td style="text-align:center">$Ts_{dis}$, $R_{a}$, $R_{b}$, $C$</td>
<td style="text-align:center">$Ts_{dis}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">0.0</td>
</tr>
<tr>
<td rowspan="3"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$Ts_{dis}$ → <em>upwards</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{a}$ → <em>upwards</em></li>
<li>$R_{b}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{from}$ → <em>upwards</em></li>
<li>$R_{to}$ → <em>upwards</em></li>
<li>$R_{from}$ → <em>average</em></li>
<li>$R_{to}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;padding-top:0.1875em">
<ol class="liste"; start="6">
<li>$Ts_{dis}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Conversion-balance-1">conversion balance</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Exchange cost
Costs imposed on the exchange of quantities. Cost are equally split between the exporting and importing region.
The parameter `costVarExc` applies for both directions and will be overwritten by the directed `costVarExcDir`.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costVarExc</td>
<td>costVarExcDir</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">€/MWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td style="text-align:center">$Ts_{dis}$, $R_{a}$, $R_{b}$, $C$</td>
<td style="text-align:center">$Ts_{dis}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td rowspan="3"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$Ts_{dis}$ → <em>upwards</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{a}$ → <em>upwards</em></li>
<li>$R_{b}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{from}$ → <em>upwards</em></li>
<li>$R_{to}$ → <em>upwards</em></li>
<li>$R_{from}$ → <em>average</em></li>
<li>$R_{to}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;padding-top:0.1875em">
<ol class="liste"; start="6">
<li>$Ts_{dis}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Trade price
```@raw html
<p class="norm">
Price for buying or selling an energy carrier on an external market.
</p>
<p class="norm">
Can be combined with the parameter <a href="../parameter_list/#Trade-capacity-1">trade capacity</a> to create stepped demand and supply curves (see following documentation for details).
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>trdBuyPrc</td>
<td>trdSellPrc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">€/MWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{dis}$, $R_{dis}$, $C$, $id$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center">
<ol class="liste">
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Trade-cost-equation-1">trade cost equation</a></li>
<li><a href="../variables/#Buy-and-sell-1">trade variables</a> only created where costs defined</li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Trade-1">trade</a></td>
</tr>
</tbody>
</table>
```
### Trade capacity
```@raw html
<p class="norm">
Capacity available for buying or selling an energy carrier on an external market.
</p>
<p class="norm">
Capacity has to be provided in power units and is converted into energy quantities according to the temporal resolution of the respective carrier (e.g. at a daily resolution 2 GW translate into 48 GWh). This approach ensures parameters do not need to be adjusted when the temporal resolution is changed.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>trdBuyCap</td>
<td>trdSellCap</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{dis}$, $R_{dis}$, $C$, $id$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center">
<ol class="liste">
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Trade-capacity-restriction-1">trade capacity restriction</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Trade-1">trade</a></td>
</tr>
</tbody>
</table>
<p class="norm">
By assigning the same <code>id</code> to a <a href="../parameter_list/#Trade-price-1">trade price</a> and capacity the amount of energy that can be bought or sold at the given price can be limited. As a result, stepped supply and demand curves for energy carriers can be created.
</p>
<p class="norm">
For example, the table below enables the import of <code>hydrogen</code> to the region <code>West</code> at 100 €/MWh, but limits the import capacity to 20 GW. When imposing this limit, the capacity is scaled according to the temporal resolution hydrogen is modeled at. So, at a yearly resolution 20 GW would translate to 175.2 TWh (= 20 GW × 8760 h).
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>region_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>carrier_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>id</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">West</td>
<td style="border-right:none">hydrogen</td>
<td>1</td>
<td style="border-right:none">trdBuyPrc</td>
<td style="border-right:none;text-align:center">100.0</td>
</tr>
<tr>
<td style="border-right:none">West</td>
<td style="border-right:none">hydrogen</td>
<td>1</td>
<td style="border-right:none">trdBuyCap</td>
<td style="border-right:none;text-align:center">20.0</td>
</tr>
</tbody>
</table>
<p class="norm">
Alternatively, this definition creates an additional electricity demand of 2.0 and 1.0 GW with a willingness-to-pay of 60 and 90 €/MWh, respectively. By adding more columns values could be further differentiated by time-step and region.
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>carrier_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>id</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">electricity</td>
<td>1</td>
<td style="border-right:none">trdSellPrc</td>
<td style="border-right:none;text-align:center">60.0</td>
</tr>
<tr>
<td style="border-right:none">electricity</td>
<td>2</td>
<td style="border-right:none">trdSellPrc</td>
<td style="border-right:none;text-align:center">90.0</td>
</tr>
<tr>
<td style="border-right:none">electricity</td>
<td>1</td>
<td style="border-right:none">trdSellCap</td>
<td style="border-right:none;text-align:center">2.0</td>
</tr>
<tr>
<td style="border-right:none">electricity</td>
<td>2</td>
<td style="border-right:none">trdSellCap</td>
<td style="border-right:none;text-align:center">1.0</td>
</tr>
</tbody>
</table>
</p>
```
# Other dispatch
### Demand
Inelastic demand for an energy carrier.
Capacity has to be provided in power units and is converted into energy quantities according to the temporal resolution of the respective carrier (e.g. at a daily resolution 20 GW translate into 480 GWh). This approach ensures parameters do not need to be adjusted when the temporal resolution is changed.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>dem</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $R_{dis}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>0.0</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>sum</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li><a href="../constraints/#Energy-balance-1">energy balance</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Balance-1">balance</a></td>
</tr>
</tbody>
</table>
```
### Cost of curtailment and loss of load
Variable costs excess generation or unmet demand is subjected to. Costs can also be negative (=revenues).
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costCrt</td>
<td>costLss</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">€/MWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{dis}$, $R_{dis}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center">
<ol class="liste">
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>average</em></li>
<li>$R_{dis}$ → <em>average</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Curtailment-and-loss-of-load-cost-equation-1">curtailment and loss-of-load cost equation</a></li>
<li><a href="../variables/#Curtailment-and-loss-of-load-1">respective variables</a> only created where costs defined</li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Balance-1">balance</a></td>
</tr>
</tbody>
</table>
```
# Capacity expansion
```@raw html
<p class="norm">
Here, all parameters relevant to the expansion of conversion, storage, and exchange capacity are listed.
</p>
<p class="norm">
At this point it is important to stress that, as displayed in the <a href="#Dispatch-of-technologies">technology diagrams</a>, <strong>AnyMOD always indicates capacities before efficiency losses!</strong> For instance capacity of a gas power plant does not denote its maximum electricity output, but the maximum gas input. This approach is pursued, because <a href="../parameter_list/#Efficiency-1">efficiency</a> is not a constant and can differ by time-step, region, and mode. As a result, maximum output varies within the dispatch too and is not suited to universally describe installed capacities.
</p>
```
### Discount rate
Overall rate to discount all costs to the present. See [Cost equations](@ref) for details on use.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>rateDisc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>percent as decimal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $R_{exp}$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>0.02</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{sup}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
<li>$Ts_{sup}$ → <em>average</em></li>
<li>$R_{exp}$ → <em>average</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li>all <a href="../constraints/#Cost-equations-1">cost equations</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Interest rate
Interest rate to compute annuity costs of investments. See [Cost equations](@ref) for details on use.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>rateExpConv</td>
<td>rateExpSt{In/Out/Size}</td>
<td>rateExpExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">percent as decimal</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">respective discount rate is used as a default</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center" >
<div style = "width: 50%; margin: 0 auto;">
<ol class="liste">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
</ol>
</div>
</td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
<li>$C$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Expansion-cost-equation-1">expansion cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Expansion cost
Costs of capacity expansion (or investment).
!!! warning "Cost data before efficiency"
Ensure the cost data provided relates to capacity **before efficiency** (see beginning of section)! Costs before efficiency can be obtained by multiplying costs after efficiency with a nominal efficiency ``K_{before} = K_{after} \cdot \eta``.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costExpConv</td>
<td>costExpSt{In/Out/Size}</td>
<td>costExpExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>Mil.€/GW</td>
<td>Mil.€/GWh</td>
<td>Mil.€/GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">0</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center" >
<div style = "width: 50%; margin: 0 auto;">
<ol class="liste">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
</ol>
</div>
</td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
<li>$C$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Expansion-cost-equation-1">expansion cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Operating cost
Costs of operating installed capacities.
!!! warning "Cost data before efficiency"
Ensure the cost data provided relates to capacity **before efficiency** (see beginning of section)! Costs before efficiency can be obtained by multiplying costs after efficiency with a nominal efficiency ``K_{before} = K_{after} \cdot \eta``.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costOprConv</td>
<td>costOprSt{In/Out/Size}</td>
<td>costOprExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>Mil.€/GW/a</td>
<td>Mil.€/GWh/a</td>
<td>Mil.€/GW/a</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">0</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center" >
<div style = "width: 50%; margin: 0 auto;">
<ol class="liste">
<li>$Ts_{sup}$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
</ol>
</div>
</td>
<td>
<ol class="liste">
<li>$Ts_{sup}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
<li>$R_{a}$ → <em>upwards</em></li>
<li>$R_{b}$ → <em>upwards</em></li>
<li>$C$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Operating-cost-equation-1">operating cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Technical lifetime
Time in years a capacity can be operated after construction.
To avoid distortions lifetimes are advised to be divisible by the steps-size of capacity modelling (e.g rather using 20 or 25 instead of 23 when using 5-year steps).
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>lifeConv</td>
<td>lifeSt{In/Out/Size}</td>
<td>lifeExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">years</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">20</td>
<td>50</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center" >
<div style = "width: 50%; margin: 0 auto;">
<ol class="liste">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
</ol>
</div>
</td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
<li>$C$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Definition-of-installed-capacity-1">definition of installed capacity</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Economic lifetime
Time in years to compute annuity costs of investment. Also determines the time-frame annuity costs are incurred over.
To avoid distortions lifetimes are advised to be divisible by the steps-size of capacity modelling (e.g rather using 20 or 25 instead of 23 when using 5-year steps).
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>lifeEcoConv</td>
<td>lifeEcoSt{In/Out/Size}</td>
<td>lifeEcoExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">years</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">respective technical lifetime is used as a default</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center" >
<div style = "width: 50%; margin: 0 auto;">
<ol class="liste">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
</ol>
</div>
</td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
<li>$C$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Expansion-cost-equation-1">expansion cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Construction time
Time in years for construction of capacity. This parameter introduces an offset between the start of the economic and technical lifetime.
To avoid distortions lifetimes are advised to be divisible by the steps-size of capacity modelling (e.g rather using 0 or 5 instead of 3 when using 5-year steps).
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>delConv</td>
<td>delSt{In/Out/Size}</td>
<td>delExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">years</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">0</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center" >
<div style = "width: 50%; margin: 0 auto;">
<ol class="liste">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
</ol>
</div>
</td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{a}$ → <em>average</em></li>
<li>$R_{b}$ → <em>average</em></li>
<li>$C$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Definition-of-installed-capacity-1">definition of installed capacity</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
# Limits on quantities dispatched
```@raw html
<p class="norm">
Limits on variables also utilize the <a href="../parameter_overview/#Inheritance">inheritance algorithm</a>. Therefore, the way parameter data is provided determines how limits are enforced. For example, in the table below the upper limit of 100 GWh on the use of <code>biomass</code> will be imposed on the sum of use across <u>all years</u>, because the time-step dimension is undefined.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>carrier_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">biomass</td>
<td></td>
<td style="border-right:none">useUp</td>
<td style="border-right:none;text-align:center">100.0</td>
</tr>
</tbody>
</table>
<p class="norm">
If instead the limit should apply to <u>each year</u> seperately, each of these years needs to be specified.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>carrier_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none;border-right:none">biomass</td>
<td>2020</td>
<td style="border-right:none">useUp</td>
<td style="border-right:none;text-align:center">100.0</td>
</tr>
<tr>
<td style="border-right:none;border-right:none">biomass</td>
<td>2030</td>
<td style="border-right:none">useUp</td>
<td style="border-right:none;text-align:center">100.0</td>
</tr>
</tbody>
</table>
<p class="norm">
As an abbrevation we could also apply the keyword <code>all</code> (see <a href="../sets/#Time-steps">Time-steps</a> for details) to reduce the number of required rows.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>carrier_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">biomass</td>
<td>all</td>
<td style="border-right:none">useUp</td>
<td style="border-right:none;text-align:center">100.0</td>
</tr>
</tbody>
</table>
<p class="norm">
So far, the limit for each year still applies to the summed use of biomass across all regions. This could again be altered by adding a respective column.
</p>
<p class="norm">
Applying limits on the sum of variables across different years can be insightful in some case (for example in case of an emission budget from now until 2050). But it also is a likely and severe mistake to make if unfamiliar with AnyMOD's specific mechanics. For this reason defining a limit that sums up variables from different years will cause a warning within the <a href="../error/#Error-handling">reporting file</a>
</p>
```
### Limits on technology dispatch
```@raw html
<p class="norm">
Limits on technology dispatch. In the inheritance rules <em>sum*</em> only applies for upper limits.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>use{Fix/Low/Up}</td>
<td>gen{Fix/Low/Up}</td>
<td>stOut{Fix/Low/Up}</td>
<td>stIn{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="4"; style="text-align:center">GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="4"; style="text-align:center">$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="4"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="4"; style="text-align:center">
<ol class="liste">
<li>$Ts_{dis}$ → <em>sum</em>/<em>sum*</em></li>
<li>$Ts_{exp}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{dis}$ → <em>sum</em>/<em>sum*</em></li>
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
<li>$Te$ → <em>sum</em>/<em>sum*</em></li>
<li>$M$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="4"; style="text-align:center">
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="4"; style="text-align:center"><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
### Limits on exchange
```@raw html
<p class="norm">
Limits on exchange quantities. In the inheritance rules <em>sum*</em> only applies for upper limits.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>exc{Fix/Low/Up}</td>
<td>excDir{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{dis}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center">
<ol class="liste">
<li>$Ts_{dis}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{a}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{b}$ → <em>sum</em>/<em>sum*</em></li>
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td></tr>
<tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
</ul>
</td>
</tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
### Limits on trade, curtailment and loss of load
```@raw html
<p class="norm">
Limits on traded and curtailed quantities as well as on unmet demand. In the inheritance rules <em>sum*</em> only applies for upper limits.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>trdBuy{Fix/Low/Up}</td>
<td>trdSell{Fix/Low/Up}</td>
<td>crt{Fix/Low/Up}</td>
<td>lss{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="4"; style="text-align:center">GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="4"; style="text-align:center">$Ts_{dis}$, $R_{dis}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="4"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="4"; style="text-align:center">
<ol class="liste">
<li>$Ts_{dis}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{dis}$ → <em>sum</em>/<em>sum*</em></li>
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="4"; style="text-align:center">
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="4"; style="text-align:center"><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
# Limits on expansion and capacity
```@raw html
<p class="norm">
Limits on expansion and capacity are enforced analogously to <a href="#Limits-on-quantities-dispatched">limits on dispatch quantities</a>. Therefore, the same caution with regard to how limits are defined should be exercised. As explained for dispatched quantities in greater detail, the table below will impose an upper limit of 80 GW on the installed capacity of <code>wind</code> summed across <u>all years</u>.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>technology_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">wind</td>
<td></td>
<td style="border-right:none">capaConvUp</td>
<td style="border-right:none;text-align:center">80.0</td>
</tr>
</tbody>
</table>
<p class="norm">
While this table will actually enforce separate limits of 80 GW on the installed capacity of <code>wind</code> in <u>each year</u>.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>technology_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">wind</td>
<td>all</td>
<td style="border-right:none">capaConvUp</td>
<td style="border-right:none;text-align:center">80.0</td>
</tr>
</tbody>
</table>
```
### Storage ratios
```@raw html
One technology can have four different kinds of capacity variables (see <a href="../sets/#Technologies">Technologies</a> for details): conversion, storage-input, storage-output, and storage-size. The ratios between these capacities can be fixed by the following parameters:
```
- `stInToConv`: ratio between conversion and storage-input capacity
- `stOutToStIn`: ratio between storage-output and storage-input capacity
- `sizeToStIn`: ratio between storage-size and storage-input capacity, commonly referred to energy-to-power ratio
```@raw html
<p class="norm">
Ratios are not directly applied to <a href="../variables/#Installed-capacity-1">installed capacities</a>, but to <a href="../variables/#Expansion-1">expansion variables</a> instead. Consequently, acutally installed capacities can deviate from the specified ratios, if any <a href="../parameter_list/#Residual-capacities-1">residual capacities</a> are provided. In case of <code>stock</code> technologies, which are not expanded, ratios are directly enforced to capacities. In this case any deviating <a href="../parameter_list/#Residual-capacities-1">residual capacities</a> are ignored.
</p>
```
!!! note "Upper and lower limits on ratios"
So far, AnyMOD does not support the setting of upper and lower limits on these ratios instead of fixing them. As a workaround, the code below shows how an upper limit of 10 on the energy-to-power ratio can be manually added to a model.
```julia
for x in 1:size(model_object.parts.tech[:battery].var[:capaStIn],1)
var = model_object.parts.tech[:battery].var
stIn, stSize = [var[y][x,:var] for y in [:capaStIn,:capaStSize]]
@constraint(model_object.optModel, fixEP, stIn*10 >= stSize)
end
```
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>stInToConv</td>
<td>stOutToStIn</td>
<td>sizeToStIn</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">dimensionless</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="3"; style="text-align:center">$Ts_{exp}$, $R_{exo}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td colspan="3"; style="text-align:center">
<ol class="liste">
<li>$Te$ → <em>upwards</em></li>
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$R_{exp}$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../variables/#Installed-capacity-1">capacity variables</a> substituted by product of ratio and connected variable</li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Residual capacities
Installed capacities for technologies that already exist without any expansion.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>capaConvResi</td>
<td>capa{StIn/StOut/StSize}Resi</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$, $Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td rowspan="2"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$R_{exp}$ → <em>sum</em></li>
<li>$Te$ → <em>sum</em></li>
<li>$Ts_{sup}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;padding-top:0px">
<ol class="liste"; start="4">
<li>$Ts_{exp}$ → <em>sum</em></li>
<li>$Ts_{sup}$ → <em>upwards</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ol class="liste"; start="4">
<li>$C$ → <em>sum</em></li>
<li>$Ts_{exp}$ → <em>sum</em></li>
<li>$Ts_{sup}$ → <em>upwards</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Definition-of-installed-capacity-1">definition of installed capacity</a></li>
<li><a href="../constraints/#Decommissioning-of-operated-capacitiy-1">decommissioning of operated capacitiy</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
Installed exchange capacities that already exist without any expansion.
Defining a residual capacity between two regions generally enables exchange of a specific carrier between these regions. If exchange should be enabled, but no pre-existing capacity exists, a residual capacity of zero can be provided.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>capaExcResi</td>
<td>capaExcResiDir</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td style="text-align:center">$Ts_{dis}$, $R_{a}$, $R_{b}$, $C$</td>
<td style="text-align:center">$Ts_{dis}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="2"; style="text-align:center">none</td>
</tr>
<tr>
<td rowspan="3"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$Ts_{dis}$ → <em>upwards</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{a}$ → <em>sum</em></li>
<li>$R_{b}$ → <em>sum</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ol class="liste"; start="2">
<li>$R_{from}$ → <em>sum</em></li>
<li>$R_{to}$ → <em>sum</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;padding-top:0.1875em">
<ol class="liste"; start="4">
<li>$Ts_{dis}$ → <em>average</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Definition-of-installed-capacity-1">definition of installed capacity</a></li>
<li><a href="../constraints/#Decommissioning-of-operated-capacitiy-1">decommissioning of operated capacitiy</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
`capaExcResi` refers to capacity in both directions, while `capaExcResiDir` refers to directed capacities and is added to any undirected values. Consequently, the table below will result in an residual capacity of 4 GW from `East` to `West` and 3 GW from `West` to `East`.
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>region_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>region_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>carrier_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">East</td>
<td style="border-right:none">West</td>
<td>electricity</td>
<td style="border-right:none">capaExcResi</td>
<td style="border-right:none;text-align:center">3.0</td>
</tr>
<tr>
<td style="border-right:none">East</td>
<td style="border-right:none">West</td>
<td>electricity</td>
<td style="border-right:none">capaExcResiDir</td>
<td style="border-right:none;text-align:center">1.0</td>
</tr>
</tbody>
</table>
```
### Limits on expansion
```@raw html
<p class="norm">
Limits on capacity expansion. In the inheritance rules <em>sum*</em> only applies for upper limits.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>expConv{Fix/Low/Up}</td>
<td>exp{StIn/StOut/StSize}{Fix/Low/Up}</td>
<td>expExc{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">none</td>
</tr>
<tr>
<td rowspan="2"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0.1875em">
<ol class="liste">
<li>$Ts_{exp}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{exp}$ → <em>sum</em>/<em>sum*</em></li>
<li>$Te$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
<td colspan="1"; rowspan="2">
<ol class="liste">
<li>$Ts_{exp}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{a}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{b}$ → <em>sum</em>/<em>sum*</em></li>
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;padding-top:0px">
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ol class="liste"; start="4">
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
### Limits on capacity
```@raw html
<p class="norm">
Limits on installed capacity. In the inheritance rules <em>sum*</em> only applies for upper limits.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>capaConv{Fix/Low/Up}</td>
<td>capa{StIn/StOut/StSize}{Fix/Low/Up}</td>
<td>capaExc{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$,$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{sup}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">none</td>
</tr>
<tr>
<td rowspan="2"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0px">
<ol class="liste">
<li>$R_{exp}$ → <em>sum</em>/<em>sum*</em></li>
<li>$Te$ → <em>sum</em>/<em>sum*</em></li>
<li>$Ts_{sup}$ → <em>average</em></li>
</ol>
</td>
<td colspan="1"; rowspan="2">
<ol class="liste">
<li>$Ts_{sup}$ → <em>average</em></li>
<li>$R_{a}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{b}$ → <em>sum</em>/<em>sum*</em></li>
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none;padding-top:0px">
<ol class="liste"; start="4">
<li>$Ts_{exp}$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center;padding-top:0px">
<ol class="liste"; start="4">
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
<li>$Ts_{exp}$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
### Limits on operated capacity
```@raw html
<p class="norm">
Limits on operated capacity. In the inheritance rules <em>sum*</em> only applies for upper limits.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>oprCapaConv{Fix/Low/Up}</td>
<td>oprCapa{StIn/StOut/StSize}{Fix/Low/Up}</td>
<td>oprCapaExc{Fix/Low/Up}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$,$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{sup}$, $R_{a}$, $R_{b}$, $C$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td colspan="3"; style="text-align:center">none</td>
</tr>
<tr>
<td rowspan="2"><strong>inheritance rules</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0px">
<ol class="liste">
<li>$R_{exp}$ → <em>sum</em>/<em>sum*</em></li>
<li>$Te$ → <em>sum</em>/<em>sum*</em></li>
<li>$Ts_{sup}$ → <em>average</em></li>
</ol>
</td>
<td colspan="1"; rowspan="2">
<ol class="liste">
<li>$Ts_{sup}$ → <em>average</em></li>
<li>$R_{a}$ → <em>sum</em>/<em>sum*</em></li>
<li>$R_{b}$ → <em>sum</em>/<em>sum*</em></li>
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:center;border-right:none">
<ol class="liste"; start="4">
<li>$Ts_{exp}$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
<td colspan="1"; style="text-align:center">
<ol class="liste"; start="4">
<li>$C$ → <em>sum</em>/<em>sum*</em></li>
<li>$Ts_{exp}$ → <em>sum</em>/<em>sum*</em></li>
</ol>
</td>
</tr>
<tr>
<td><strong>related elements</strong></td>
<td colspan="3"; style="text-align:center;vertical-align:middle">
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="3"; style="text-align:center"><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
# Emissions
### Emission limit
Upper limit on carbon emissions.
```@raw html
<p class="norm">
Upper limits on emissions are enforced analogously to <a href="#Limits-on-quantities-dispatched">limits on dispatch quantities</a>. Therefore, the same caution with regard to how limits are defined should be exercised. As explained for dispatched quantities in greater detail, the table below will impose a carbon budget, meaning an upper limit on the sum of carbon emitted across <u>all years</u>.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td></td>
<td style="border-right:none">emissionUp</td>
<td style="border-right:none;text-align:center">80.0</td>
</tr>
</tbody>
</table>
<p class="norm">
While this table will enforce separate limits for <u>each year</u>.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td>all</td>
<td style="border-right:none">emissionUp</td>
<td style="border-right:none;text-align:center">80.0</td>
</tr>
</tbody>
</table>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>emissionUp</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>Mil. tCO<sub>2</sub></td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{dis}$ → <em>sum*</em></li>
<li>$Ts_{exp}$ → <em>sum*</em></li>
<li>$R_{dis}$ → <em>sum*</em></li>
<li>$C$ → <em>sum*</em></li>
<li>$Te$ → <em>sum*</em></li>
<li>$M$ → <em>sum*</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
### Emission factor
Relative emissions associated with the use of a carrier.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>emissionFac</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>tCO<sub>2</sub>/GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$C$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$M$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Limit-1">limit</a></td>
</tr>
</tbody>
</table>
```
### Emission price
Costs imposed on emitting carbon.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>emissionPrc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>€/tCO<sub>2</sub></td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $Ts_{exp}$, $R_{dis}$, $C$, $Te$, $M$</td>
</tr>
<tr>
<td><strong>default value</strong></td>
<td>none</td>
</tr>
<tr>
<td><strong>inheritance rules</strong></td>
<td>
<ol class="liste">
<li>$Ts_{exp}$ → <em>upwards</em></li>
<li>$Ts_{dis}$ → <em>upwards</em></li>
<li>$R_{dis}$ → <em>upwards</em></li>
<li>$C$ → <em>upwards</em></li>
<li>$Te$ → <em>upwards</em></li>
<li>$M$ → <em>upwards</em></li>
</ol>
</td></tr>
<tr>
<td><strong>related elements</strong></td>
<td>
<ul class="liste">
<li>see <a href="../constraints/#Limiting-constraints-1">limiting constraints</a></li>
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</ul>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 23122 | ```@raw html
<style>
table.tabelle2 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-bottom: none;
border-color: #dbdbdb;
font-size: small;
font-weight: normal;
}
table.tabelle3 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-color: #dbdbdb;
font-size: small;
font-weight: normal;
}
ol.nest {
list-style-position: inside;
}
ol.nest2 {
counter-reset: item;
list-style-position: inside;
}
li.nest {
display: block;
}
li.nest li:before {
content: "2." counter(item, decimal);
counter-increment: item;
}
li.nest2 li:before {
content: "2." counter(item, decimal);
counter-increment: item;
}
p.norm {
font-weight: normal;
font-size: medium;
}
</style>
```
# Parameter overview
```@raw html
<p class="norm">
Parameter data is provided in <code>.csv</code> csv files starting with <code>par_</code> and transferred into <a href="../api/#AnyMOD.ParElement"><code>ParElement</code></a> objects. The read-in data is used by the inheritance algorithm to compile the parameter data being used in the model's underlying optimization problem. This avoids huge but largely redundant input files and allows for lean and flexible models.
</p>
```
# Read-in
```@raw html
<p class="norm">
In contrast to sets, the naming of parameter files and what data is provided in which file is entirely up to the user. For instance, data for the same parameter can be spread across different files and within one file multiple parameters can be defined.
</p>
```
### Pivot format
```@raw html
<p class="norm">
Parameter data is provided in a pivot format where the column <code>parameter</code> determines the respective parameter and the column <code>value</code> its respective value. Additional columns specify the nodes within the hierarchical trees of sets a value applies to. Therefore, names of these columns are comprised of the set's name and, if the set is organized in a tree with multiple levels, the number of the respective level.
</p>
<p class="norm">
The <code>all</code> keyword can be used analogously to sets (see <a href="../sets/#Time-steps">Time-steps</a>). Columns that do not contain a set name (i.e. <code>region</code>, <code>timestep</code>, <code>carrier</code>, <code>technology</code>, <code>mode</code>, or <code>id</code>) or keyword (i.e. <code>parameter</code> and <code>value</code>) are not read in and can be used for documentation. To define multiple parameters within the same row, several numbered <code>parameter</code>/<code>value</code> columns can be provided (see <a href="https://github.com/leonardgoeke/AnyMOD.jl/blob/master/examples/demo/par_techCost.csv"><code>par_techCost.csv</code></a>).
</p>
<p class="norm">
As an example, the table below shows the definition of the <a href="../parameter_list/#Discount-rate-1">discount rate</a> parameter in the demo model found in the <a href="https://github.com/leonardgoeke/AnyMOD.jl/blob/master/examples/demo/par_techInvest.csv"><code>par_techInvest.csv</code></a> file.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>region_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>region_2</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none"></td>
<td style="border-right:none">West</td>
<td></td>
<td style="border-right:none">rateDisc</td>
<td style="border-right:none;text-align:center">0.0</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">West</td>
<td>WestSouth</td>
<td style="border-right:none">rateDisc</td>
<td style="border-right:none;text-align:center">0.015</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">East</td>
<td></td>
<td style="border-right:none">rateDisc</td>
<td style="border-right:none;text-align:center">0.03</td>
</tr>
</tbody>
</table>
```
In case, a set is included in the dimensions of a parameter, but no node of that set is assigned for a respective value, the very top node of the tree is assigned instead.
### Multiple dependencies on same set
```@raw html
<p class="norm">
Parameter can depend on multiple instances of the same set. In case of an <code>emerging</code> technology (see section on optional mappings for <a href="../sets/#Technologies">technologies</a>)
<a href="..#Efficiency">efficiency</a> for instance can depend on two different kinds of time-steps: $Ts_{dis}$, the time-step a technology is being used, and $Ts_{exp}$, the time-step a technology was built. Following the order of sets in the definition, the first time-step specified in the input data will always relate to $Ts_{dis}$ and the second to expansion $Ts_{exp}$.
</p>
<p class="norm">
Accordingly, in table below the first column relates to $Ts_{dis}$ and the second to $Ts_{exp}$. Consequently, the specified efficiency applies to the first hour of every year for all <code>heatpumps</code> constructed <code>2020</code>.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_4</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>technology_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">h0001</td>
<td style="border-right:none">2020</td>
<td>heatpump</td>
<td style="border-right:none">effConf</td>
<td style="border-right:none;text-align:center">5.0</td>
</tr>
</tbody>
</table>
<p class="norm">
This concept equally applies if one of the sets is defined by more then one column. In the table below, the first and the second column are attributed to specify $Ts_{dis}$. Since the third column goes back up to the first level, AnyMOD realizes it refers to a different dimension and attributes it to $Ts_{exp}$. As a result, both efficiencies apply to <code>heatpumps</code> constructed <code>2020</code>, but one row relates to the first hour of <code>2020</code> and the other to the first hour of <code>2030</code>. So, this example also shows how the aging process of technologies can be modeled.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_4</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>timestep_1</strong></td>
<td style="border-bottom: solid 1px;border-color: #dbdbdb"><strong>technology_1</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>parameter</strong></td>
<td style="border-right:none;border-bottom: solid 1px;border-color: #dbdbdb"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">h0001</td>
<td style="border-right:none">2020</td>
<td>heatpump</td>
<td style="border-right:none">effConf</td>
<td style="border-right:none;text-align:center">5.0</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">h0001</td>
<td style="border-right:none">2020</td>
<td>heatpump</td>
<td style="border-right:none">effConf</td>
<td style="border-right:none;text-align:center">4.5</td>
</tr>
</tbody>
</table>
```
# Inheritance
```@raw html
<p class="norm">
The combinations of sets or instances parameters are provided for within the input data do not need to match the instances required within the model. The required values are automatically compiled from the data provided. This facilitates the provision of input data and allows for a high level of flexibility in modelling. </p>
<p class="norm">
If, for example, for one technology <a href="../parameter_list/#Efficiency-1">efficiency</a> should be dependent on the time-step and for another irrespective of the time-step, this can simply be achieved in the model by providing efficiencies with an hourly granulation in the first and without specifying the temporal domain in the other case. In the demo problem for instance, heat-pump efficiencies are provided hourly in <a href="https://github.com/leonardgoeke/AnyMOD.jl/blob/master/examples/demo/timeseries/par_heatpumps.csv"><code>par_heatpumps.csv</code></a> while for all other technologies <a href="https://github.com/leonardgoeke/AnyMOD.jl/blob/master/examples/demo/par_techDispatch.csv"><code>par_techDispatch.csv</code></a> defines efficiencies without specifying a dispatch time-step. </p>
```
!!! warning "Do not overuse inheritance"
Poorly provided input data, especially time-series data, can massively increase the run-time of the inheritance algorithm. For example, if your raw data is provided quarter-hourly, but the most detailed resolution you actually want to model is hourly, you should aggregate the data to hours yourself before providing it to the model. To define a quarter-hourly resolution below the hourly resolution, feed-in the unaltered data and have the inheritance algorithm aggregate it instead, is possible but highly inefficient.
### Modes of inheritance
When data is required but not defined for a specific combination of nodes, the algorithm moves along the vertices of the set trees to derive it. There are four different modes to do this, that will be explained based on the graph below. It shows the hierarchical tree of time-steps in the demo problem with data specified for some nodes (green), but not for the one required (red circle).
```@raw html
<img src="../assets/inheritance.png" width="80%"/>
<p class="norm">
<ul>
<li><em><strong>upwards</em></strong><br>Moving upwards the tree until a node with data is reached. In the example this means to assign <code>d001</code> the value <code>8.3</code>. If data would already be provided for <code>2020</code>, the direct ancestor of <code>d001</code>, this value would be used instead. </li>
<li><em><strong>average</em></strong><br>Moving downward until nodes with some data are reached and take the average of those value. In the example this means to assign <code>d001</code> the value <code>2.9</code> ($=\frac{2.7 + 3.1}{2}$).
</li>
<li><em><strong>sum</em></strong><br> Moving downward until nodes with some data are reached and take the sum of those value. In the example this means to assign <code>d001</code> the value <code>5.8</code> ($=2.7 + 3.1$). </li>
<li><em><strong>sum*</em></strong><br> Moving downward until nodes who <u>all</u> have data assigned are reached and take the sum of those value. In the example this means to assign <code>d001</code> the value <code>16.6</code> ($=2.1+6.8+4.5+3.2$).</li>
</ul>
</p>
```
### Inheritance algorithm
```@raw html
<p class="norm">
The actual algorithm is outlined for the example of the <a href="../parameter_list/#Discount-rate-1">discount rate</a> parameter from the demo problem. The instances of the parameter required with in the model are given below, the provided input data was shown above.
</p>
<table class="tabelle3">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_superordinate_dispatch</strong></td>
<td style="border-right:none"><strong>region_expansion</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">West < WestNorth</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">West < WestNorth</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">West < WestSouth</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">West < WestSouth</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">East < EastNorth</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">East < EastNorth</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">East < EastSouth</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">East < EastSouth</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">East</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">East</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">West</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">West</td>
</tr>
</tbody>
</table>
```
Three steps are taken to populate the table with the required data:
```@raw html
<ol class="nest"; >
<li class="nest2"; style="font-size:large;font-weight:bold">  Direct matches</font-size><br>
<p class="norm">
First, values are assigned where instances of the required data exactly match the provided input data. For example, data for <code>2020</code> and <code>WestSouth</code> is specifically provided and can be used directly.
<table class="tabelle3">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_superordinate_dispatch</strong></td>
<td><strong>region_expansion</strong></td>
<td style="border-right:none"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.015</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.03</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West</td>
<td style="border-right:none;text-align:center"></td>
</tr>
</tbody>
</table>
</p>
</li>
<li class="nest2"; style="font-size:large;font-weight:bold">  Go along inheritance rules<br>
<p class="norm"; style = "text-indent:0em">
Next, the algorithm consecutively applies the inheritance rules of the respective parameter. For each parameter these rules are documented in the <a href="../parameter_list">parameter list</a>. These rules assign sets to the modes of inheritance introduced above.
<ol class="nest2">
<li class="nest"; style="font-size:medium;font-weight:bold"> $Ts_{sup}$ → <em>upwards</em><br>
<p class="norm">
For discount rates, the first rule is to try to obtain additional values by moving upwards in the hierarchical trees of time-steps. In the first row of input data, a value was provided irrespective of time-step, but only for the region <code>West</code>. This value is now assigned to the missing entries of that region.
<table class="tabelle3">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_superordinate_dispatch</strong></td>
<td><strong>region_expansion</strong></td>
<td style="border-right:none"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center">0.015</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East</td>
<td style="border-right:none;text-align:center">0.03</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.00</strong></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.00</strong></td>
</tr>
</tbody>
</table>
</li>
</p>
<li class="nest"; style="font-size:medium;font-weight:bold"> $R_{exp}$ → <em>upwards</em><br>
<p class="norm">
Next, the concept is analogously applied to regions. By moving up the tree the value provided for <code>East</code> is now assigned for the descendant regions <code>EastNorth</code> and <code>EastSouth</code> as well.
<table class="tabelle3">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_superordinate_dispatch</strong></td>
<td><strong>region_expansion</strong></td>
<td style="border-right:none"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.00</strong></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.00</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.00</strong></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center">0.015</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.03</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.03</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East</td>
<td style="border-right:none;text-align:center"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East</td>
<td style="border-right:none;text-align:center">0.03</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West</td>
<td style="border-right:none;text-align:center">0.00</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West</td>
<td style="border-right:none;text-align:center">0.00</td>
</tr>
</tbody>
</table>
</p>
</li>
<li class="nest"; style="font-size:medium;font-weight:bold"> $Ts_{sup}$ → <em>average</em> and $R_{exp}$ → <em>average</em><br>
<p class="norm">
The <em>average</em> mode tries to inherit values from descendant carriers. In this case, for none of these any data is defined, and consequently no additional data can be compiled.
</p>
</li>
</ol></li>
<li class="nest2"; style="font-size:large;font-weight:bold">  Use default value<br>
<p class="norm"; style = "text-indent:0em">
Finally, for all cases where no data was assigned, the default value is used instead. In case a parameter does not have a default, cases are dropped.
<table class="tabelle3">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_superordinate_dispatch</strong></td>
<td><strong>region_expansion</strong></td>
<td style="border-right:none"><strong>value</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center">0.00</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestNorth</td>
<td style="border-right:none;text-align:center">0.00</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center">0.00</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West < WestSouth</td>
<td style="border-right:none;text-align:center">0.015</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.02</strong></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastNorth</td>
<td style="border-right:none;text-align:center">0.03</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.02</strong></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East < EastSouth</td>
<td style="border-right:none;text-align:center">0.03</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>East</td>
<td style="border-right:none;text-align:center"><strong style="color:#60ad51">0.02</strong></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>East</td>
<td style="border-right:none;text-align:center">0.03</td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td>West</td>
<td style="border-right:none;text-align:center">0.00</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td>West</td>
<td style="border-right:none;text-align:center">0.00</td>
</tr>
</tbody>
</table>
</p>
</li>
</ol>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 2988 | # Parts
```@raw html
<p class="norm">
The <code>parts</code> field of the <a href="../api/#AnyMOD.anyModel"><code>anyModel</code></a> object structures the elements of a model's underlying optimization problem. Each of these parts has again three fields:
<ul>
<li> <code>par::Dict{Symbol,ParElement}</code> → <a href="../parameter_overview">parameter</a></li>
<li> <code>var::Dict{Symbol,DataFrame}</code> → <a href="../variables">variables</a></li>
<li> <code>cns::Dict{Symbol,DataFrame}</code> → <a href="../constraints">constraints</a></li>
</ul>
</p>
```
### Technology
```@raw html
<p class="norm">
The <a href="../api/#AnyMOD.TechPart"><code>part</code></a> for technologies is accessed via <code>modelObject.parts.tech[:techName]</code>. These objects include all expansion and dispatch related elements for the respective technology. Technology parts have additional fields to store information specific to technologies.
</p>
```
### Exchange
```@raw html
<p class="norm">
The <a href="../api/#AnyMOD.OthPart"><code>part</code></a> object for exchange is accessed via <code>modelObject.parts.exc</code>. It includes all model elements relating to the exchange of energy carriers between regions. Exchange between two regions is enabled, if a value for the <a href="../parameter_list/#Residual-capacities-1">residual exchange capacity</a> parameter can be obtained between these two regions.
</p>
```
### Trade
```@raw html
<p class="norm">
For trade the <a href="../api/#AnyMOD.OthPart"><code>part</code></a> object is accessed via <code>modelObject.parts.trd</code>. It includes all model elements relating to buying and selling energy carriers from "outside" the model. Most importantly these are trade prices and variables for traded quantities.
</p>
```
### Balance
```@raw html
<p class="norm">
The <a href="../api/#AnyMOD.OthPart"><code>part</code></a> object for energy balances is accessed via <code>modelObject.parts.bal</code>. It is used to store all model elements relevant for the energy balance. For example, this includes the demand parameter, curtailment variables or the energy balance constraint itself.
</p>
```
### Limit
```@raw html
<p class="norm">
Model elements used to impose certain limits on model variables are stored in <code>modelObject.parts.lim</code>. These include <a href="../parameter_list/#Limits-on-expansion-and-capacity">limiting parameters</a> and the corresponding constraints enforcing these limits.
</p>
```
### Objective
```@raw html
<p class="norm">
The field <code>modelObject.parts.obj</code> gathers elements relating to the objective function of a model's underlying optimization problem. So far, the only available objective in AnyMOD is cost minimization and set by the <a href="../api/#AnyMOD.setObjective!"><code>setObjective!</code></a> function.
</p>
```
```julia
setObjective!(:costs, model_object)
```
An objective function has to be set after the optimization problem itself was created.
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 9757 | ```@raw html
<style>
p.norm {
font-weight: normal;
font-size: medium;
}
table.tabelle3 td {
padding-left: 0.57em;
padding-right: 0.57em;
font-size: small;
border-right: solid 1px;
border-color: #dbdbdb;
font-weight: normal;
}
ul.liste {
list-style-position: inside;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
</style>
```
Performance and stability
=================
The by far fastest way to solve large linear problems are commercial solvers using the Barrier algorithm. But Barrier is very sensitive to the numerical properties of the problem and often the algorithm is slowed or aborted due to numerical issues. For this reason, AnyMOD provides a range of tools that adjust the problem in order to improve solver performance. These measures are based on recommendations from the [Gurobi documentation](https://www.gurobi.com/documentation/9.0/refman/num_grb_guidelines_for_num.html).
# Scaling
```@raw html
<p class="norm">
The coefficients within the matrix of an optimization problem are advised be kept between $10^{-3}$ to $10^6$, which implies a maximum range of coefficients of $10^9$ within each row. To achieve this, AnyMOD automatically applies a two-step scaling process to each model that is created. This process is outlined based on the simplified optimization problem (excluding an objective) given below.
</p>
<p class="norm">
<img src="../assets/matrix1.svg" width="70%"/>
</p>
<p class="norm">
In the example, currently the first and second row do not comply with the targeted range. Also, the maximum range of coefficients in the second row is $10^{11}$ (= $\frac{10^{2}}{10^{-9}}$), which exceeds $10^9$.
</p>
```
### 1. Column substitution
```@raw html
<p class="norm">
The first step substitutes columns (= variables) of the optimization problem. In the example, the variable $x_1$ is substituted with $10^3 \, x'_1$:
</p>
<p>
<img src="../assets/matrix2.svg" width="70%"/>
</p>
<p class="norm">
After substitution the range of coefficients still does not lie within $10^{-3}$ to $10^6$, but the range of coefficients within each row does not exceed $10^9$ anymore. This is a prerequisite to move all coefficients to the desired range in the next step.
</p>
<p class="norm">
In AnyMOD substitution is done directly within the <code>var</code> column of a variable's dataframe. As a result, only the value of the variable within the optimization problem is affected, but the value accessed by the user is already corrected and always complies with the units provided in the <a href="../variables">Variables section</a>.
The optional argument <code>scaFac</code> of the <a href="../model_object">model constructor</a> overwrites the default factors used for column scaling. As long as no numerical problems occur, it is not advised to change the defaults. The table below lists the fields of the optional argument, to what variables they apply, and what their default value is.
</p>
<p>
<table class="tabelle3">
<tbody>
<tr>
<td><strong>field</strong></td>
<td><strong>scaled variables</strong></td>
<td style="border-right:none"><strong>default factor</strong></td>
</tr>
<tr>
<td><code>capa</code></td>
<td><ul class="liste">
<li><a href="../variables/#Installed-capacity-1">installed capacity</a></li>
<li><a href="../variables/#Expansion-1">capacity expansion</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{1}$</td>
</tr>
<tr>
<td><code>oprCapa</code></td>
<td><ul class="liste">
<li><a href="../variables/#Operated-capacity-1">operated capacity</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{2}$</td>
</tr>
<tr>
<td><code>dispConv</code></td>
<td><ul class="liste">
<li><a href="../variables/#Generation-and-use-1">generation and use</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{3}$</td>
</tr>
<tr>
<td><code>dispSt</code></td>
<td><ul class="liste">
<li><a href="../variables/#Charging-and-discharging-1">charging and discharging</a></li>
<li><a href="../variables/#Storage-level-1">storage level</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{4}$</td>
</tr>
<tr>
<td><code>dispExc</code></td>
<td><ul class="liste">
<li><a href="../variables/#Exchange-1">exchange</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{3}$</td>
</tr>
<tr>
<td><code>dispTrd</code></td>
<td><ul class="liste">
<li><a href="../variables/#Buy-and-sell-1">buy and sell</a></li>
<li><a href="../variables/#Curtailment-and-loss-of-load-1">curtailment and loss-of-load</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{3}$</td>
</tr>
<tr>
<td><code>costDisp</code></td>
<td><ul class="liste">
<li><a href="../variables/#Variable-costs-1">variable costs</a></li>
<li><a href="../variables/#Trade-costs-1">trade costs</a></li>
<li><a href="../variables/#Curtailment-and-loss-of-load-costs-1">curtailment and loss of load costs</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{3}$</td>
</tr>
<tr>
<td><code>costCapa</code></td>
<td><ul class="liste">
<li><a href="../variables/#Operating-costs-1">operating costs</a></li>
<li><a href="../variables/#Expansion-costs-1">expansion costs</a></li>
</ul></td>
<td style="text-align:center;border-right:none">$10^{3}$</td>
</tr>
<td><code>obj</code></td>
<td>objective variable</td>
<td style="text-align:center;border-right:none">$10^{0}$</td>
</tr>
</tbody>
</table>
</p>
<p class="norm">
The optional argument <code>checkRng</code> can be used to specify a maximum range of coefficients for each row. Rows (= constraints) that violate this range after column scaling will be printed to the REPL. This helps to test and adjust the factors used for substitution.
</p>
```
### 2. Row scaling
```@raw html
<p class="norm">
The second step scales the rows (= constraints) of the optimization problem by multiplying them with a constant factor. If the scaling of columns successfully decreased the range of coefficients to $10^9$, this allows to move coefficients into a range from $10^{-3}$ to $10^6$. In the example, the used factors are $10^{2}$ and $10^4$ for the first and second row, respectively, which results in the following optimization problem:
</p>
<p>
<img src="../assets/matrix4.svg" width="70%"/>
</p>
<p class="norm" >
By default, ranges in anyMOD are more narrow than in the example: matrix coefficients range from $10^{-2}$ to $10^{5}$ and values on the right-hand side from $10^{-2}$ to $10^{2}$. Again, these defaults can be overwritten by the <code>coefRng</code> argument of the <a href="../model_object">model constructor</a>.
</p>
```
# Variable limits
```@raw html
<p class="norm">
Numerical stability of the Barrier algorithm can be increased by imposing additional upper limits on model variables. For this purpose, general variable limits can be added to a model by using the <code>bound</code> argument of the <a href="../model_object">model constructor</a>. Since the sole purpose of these limits is to increase solver performance, they are not intended to have any real world equivalents. Consequently, they should be set to high values that prevent them from becoming binding constraints.
</p>
<p class="norm" >
Limits are provided as a NamedTuple with fields for dispatch variables, capacity variables, and for the objective itself: <code>(disp = NaN, capa = NaN, obj = NaN)</code>
For capacity and dispatch, values are provided in GW and limits on dispatch are scaled to energy units to comply with the temporal resolution of the respective carrier. The limit on the objective function is provided in million Euros.
</p>
<p class="norm" >
In general, it is strongly advised to provide a limit for the objective function. Doing so achieves a noticeable increase in performance without risking to distort model results. Instead, a model will just turn infeasible, if the set limit is below the actual objective value.
</p>
<p class="norm" >
General limits on dispatch and capacity variables should only be set with great caution and used as a measure of last resort against numerical instability. Improper limits could create binding constraints that impact final results but remain undetected by the user. In addition, their positive impact on performance is not as clear, because they also cause a substantial increase in model size.
</p>
```
# Range of factors
```@raw html
<p class="norm">
Since numerical stability is closely linked to the range of factors in the optimization problem, two more options are available to limit that range. Again, both these options are set as optional arguments of the <a href="../model_object">model constructor</a>.
</p>
<p class="norm">
<code>avaMin</code> sets a lower limit for the <a href="../parameter_list/#Availability-1">availablity parameter</a> meaning all availabilities below this limit are replaced by zero. Since the parameter is inversed within the <a href="../constraints/#Conversion-capacity-restriction-1">capacity restrictions</a>, small availabilites can lead to large factors and cause numerical instability. The default value is 0.01 (= 1%).
</p>
<p class="norm">
The argument <code>emissionLoss</code> controls, if losses incurred by <a href="../parameter_list/#Exchange-losses-1">exchange</a> and <a href="../parameter_list/#Storage-self-discharge-1">self-discharge of storage</a> are taken into account when emissions are computed. Due to the high range of factors, emissions constraints are already neuralgic points when it comes to numerical stability. Adding even smaller factors to account for the named losses adds to this problem. The default is <code>true</code> meaning self-discharge and exchange losses are accounted for.
</p>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 10089 | ```@raw html
<style>
table.tabelle2 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-color: #dbdbdb;
font-size: small;
}
ul.liste {
list-style-position: outside;
padding-left: 1em;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
pre.inline {
display: inline;
}
</style>
```
# Plots
The graphs that build the conceptual basis for the modelling approach pursued by AnyMOD can be plotted. All plots are created with [plotly](https://github.com/plotly/plotly.py) which is accessed from Julia using [PyCall](https://github.com/JuliaPy/PyCall.jl).
# Node trees
The `plotTree` function is used to plot the hierarchical trees of sets as introduced in [Sets and Mappings](@ref).
```julia
plotTree(tree_sym::Symbol, model_object::anyModel)
```
The input `tree_sym` indicates which set should be plotted (`:region`,`:timestep`,`:carrier`, or `:technology`). As an example, the tree for `:carrier` from the [demo problem](https://github.com/leonardgoeke/AnyMOD.jl/tree/master/examples/demo) is plotted below.
```@raw html
<p style="text-align:center;"><img src="../assets/carrier.png" width="80%"/>
```
Optional arguments include:
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td><strong>argument</strong></td>
<td><strong>explanation</strong></td>
<td style="border-right:none"><strong>default</strong></td>
</tr>
<tr>
<td><code>plotSize</code></td>
<td>
<ul class="liste">
<li>length and width of the plot in inches</li>
</ul>
</td>
<td style="border-right:none"><code>(8.0,4.5)</code></td>
</tr>
<tr>
<td><code>fontSize</code></td>
<td>
<ul class="liste">
<li>font size of labels in plot</li>
</ul>
</td>
<td style="border-right:none"><code>12</code></td>
</tr>
<tr>
<td><code>useColor</code></td>
<td>
<ul class="liste">
<li>if <code>true</code>, tries to obtain <a href="#Styling">node specific colors</a>, otherwise the same color is used for all nodes</li>
</ul>
</td>
<td style="border-right:none"><code>true</code></td>
</tr>
<tr>
<td><code>wide</code></td>
<td>
<ul class="liste">
<li>controls ratio of horizontal distances between nodes that are on the same level, but have <br> different ancestors (e.g. distance between 'synthetic gas' and 'natural gas' relative to <br> distance between 'natural gas' and 'district heat')</li>
<li>first element of input array refers to ratio on the first level and so on</li>
</ul>
</td>
<td style="border-right:none"><code>fill(1.0,30)</code></td>
</tr>
</tbody>
</table>
```
# Energy flow
The `plotEnergyFlow` function provides two ways to visualize the flow of energy within a model: Either as a qualitative node graph or as a quantitative Sankey diagram.
### Node graph
To plot a qualitative node graph use the `plotEnergyFlow` command with the `:graph` argument.
```julia
plotEnergyFlow(:graph, model_object::anyModel)
```
Nodes either correspond to technologies (grey dots) or energy carriers (colored squares). Edges between technology and energy carrier nodes indicate the carrier is either an input (entering edge) or an output (leaving edge) of the respective technology. Edges between carriers result from inheritance relationships between carriers. These are included, because, according to AnyMOD's graph-based approach, descendants can satisfy the demand for an ancestral carrier (see [Göke (2020)](https://arxiv.org/abs/2004.10184) for details).
```@raw html
<p style="text-align:center;"><img src="../assets/energyFlowGraph.png"/>
```
The layout of the graph is created using a [force-directed drawing algorithm](https://en.wikipedia.org/wiki/Force-directed_graph_drawing) originally implemented in [GraphLayout.jl](https://github.com/IainNZ/GraphLayout.jl).
In many cases the resulting layout will be sufficient to get an overview of energy flows for debugging, but inadequate for publication. For this reason, the `moveNode!` function can be used to adjust the layout.
```julia
moveNode!(model_object::anyModel, newPos_arr::Array{Tuple{String,Array{Float64,1}},1})
```
`moveNode!` requires an initial layout within that specific nodes are moved. In the example below, an initial layout is created by calling `plotEnergyFlow`. Afterwards, the node for 'ocgt' is moved 0.2 units to the right and 0.1 units up. The node for 'coal' is moved accordingly. Afterwards, the graph is plotted again with the new layout.
```julia
plotEnergyFlow(:graph, model_object)
moveNode!(model_object, [("ocgt",[0.2,0.1]),("coal",[0.15,0.0])])
plotEnergyFlow(:graph, model_object, replot = false)
```
When plotting again, it is important to set the optional `replot` argument to `false`. Otherwise, the original layout algorithm will be run again and overwrite any changes made manually.
Optional arguments for plotting the qualitative node graph are listed below:
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td><strong>argument</strong></td>
<td><strong>explanation</strong></td>
<td style="border-right:none"><strong>default</strong></td>
</tr>
<tr>
<td><code>plotSize</code></td>
<td>
<ul class="liste">
<li>length and width of the plot in inches</li>
</ul>
</td>
<td style="border-right:none"><code>(16.0,9.0)</code></td>
</tr>
<tr>
<td><code>fontSize</code></td>
<td>
<ul class="liste">
<li>font size of labels in plot</li>
</ul>
</td>
<td style="border-right:none"><code>12</code></td>
</tr>
<tr>
<td><code>useTeColor</code></td>
<td>
<ul class="liste">
<li>if <code>true</code>, tries to obtain <a href="#Styling">node specific colors</a> for technologies, otherwise <br> technology nodes are displayed in grey</li>
</ul>
</td>
<td style="border-right:none"><code>false</code></td>
</tr>
<tr>
<td><code>replot</code></td>
<td>
<ul class="liste">
<li>if <code>false</code>, the current layout of the plot is used instead of computing a new one</li>
</ul>
</td>
<td style="border-right:none"><code>true</code></td>
</tr>
<tr>
<td><code>scaDist</code></td>
<td rowspan="3"; style="border-bottom:none">
<ul class="liste">
<li>control parameters for the graph's layout algorithm (see <a href="https://github.com/IainNZ/GraphLayout.jl/blob/master/src/spring.jl">spring.jl</a> for original <br> implementation)</li>
</ul>
</td>
<td style="border-right:none"><code>0.5</code></td>
</tr>
<tr>
<td><code>maxIter</code></td>
<td style="border-right:none"><code>5000</code></td>
</tr>
<tr>
<td><code>initTemp</code></td>
<td style="border-right:none"><code>2.0</code></td>
</tr>
</tbody>
</table>
```
### Sankey diagram
To plot the qualitative energy flows for a solved model use `plotEnergyFlow` command with the `:sankey` argument:
```julia
plotEnergyFlow(:sankey, model_object::anyModel)
```
```@raw html
<p class="norm">
The command will create an entire <a href="../assets/sankey_example.html">html application</a> including a dropdown menu and drag-and-drop capabilities. Below is a screenshot of one graph from the solved <a href="https://github.com/leonardgoeke/AnyMOD.jl/tree/master/examples/demo">demo problem</a>.
</p>
```

Optional arguments for plotting a Sankey diagram are:
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td><strong>argument</strong></td>
<td><strong>explanation</strong></td>
<td style="border-right:none"><strong>default</strong></td>
</tr>
<tr>
<td><code>plotSize</code></td>
<td>
<ul class="liste">
<li>length and width of the plot in inches</li>
</ul>
</td>
<td style="border-right:none"><code>(16.0,9.0)</code></td>
</tr>
<tr>
<td><code>minVal</code></td>
<td>
<ul class="liste">
<li>threshold for energy flows being plotted</li>
</ul>
</td>
<td style="border-right:none"><code>0.1</code></td>
</tr>
<tr>
<td><code>filterFunc</code></td>
<td>
<ul class="liste">
<li>function to filter certain carriers and technologies from graph</li>
<li>for example <code>x -> x.C == 1</code> will only include flows associated to the energy carrier with <br> id 1 (see documentation of <a href="../api/#AnyMOD.Tree"><code>tree objects</code></a> on how to obtain ids)</li>
</ul>
</td>
<td style="border-right:none"><code>x -> true</code></td>
</tr>
<tr>
<td><code>dropDown</code></td>
<td>
<ul class="liste">
<li>for each relevant set of the dimensions specified here separate diagrams are created, sets <br> then appear in the dropdown menu within the html output</li>
<li>for example <code>dropDown = (:timestep,)</code> will create diagrams aggregating values <br> across all regions, instead of creating a separate diagram for each region</li>
</ul>
</td>
<td style="border-right:none"><code>(:region,:timestep)</code></td>
</tr>
<tr>
<td><code>rmvNode</code></td>
<td>
<ul class="liste">
<li>removes specified nodes from the Sankey diagram</li>
<li>removal is only possible for nodes that either only have an in- or outgoing flow or exactly <br> one in- and exactly one outgoing flow of equal size</li>
<li>in the diagram above for instance, the nodes for 'district heat' or 'curtailment' could be <br> removed, but not for 'ocgt'</li>
</ul>
</td>
<td style="border-right:none"><code>tuple()</code></td>
</tr>
<tr>
<td><code>useTeColor</code></td>
<td>
<ul class="liste">
<li>if <code>true</code>, tries to obtain <a href="#Styling">node specific colors</a> for technologies, otherwise technology nodes <br> are displayed in grey</li>
</ul>
</td>
<td style="border-right:none"><code>true</code></td>
</tr>
</tbody>
</table>
```
# Styling
The colors and labels of nodes within plots can be adjusted using the `graInfo` field of the model object.
The sub-field `graInfo.names` provides a dictionary that maps node names as specified in the input files to node labels used for plotting. By default, some names occurring in the [demo problem](https://github.com/leonardgoeke/AnyMOD.jl/tree/master/examples/demo) are already assigned labels within the dictionary.
Analogously, `graInfo.colors` assigns colors used for plotting to nodes. Both the actual node name or an assigned label can serve as a key. The assigned value is a tuple of three numbers between 0 and 1 corresponding to a RGB color code.
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 886 | # Related material
# Publications
### Method
* [Göke (2020), AnyMOD - A graph-based framework for energy system modelling with high levels of renewables and sector integration, Working Paper.](https://arxiv.org/abs/2004.10184)
* [Göke (2020), AnyMOD.jl: A Julia package for creating energy system models](https://arxiv.org/abs/2011.00895)
### Applications
* [Hainsch et al. (2020), European Green Deal: Using Ambitious Climate Targets and Renewable Energy to Climb out of the Economic Crisis, DIW Weekly Report.](https://www.diw.de/de/diw_01.c.793359.de/publikationen/weekly_reports/2020_28_1/european_green_deal__using_ambitious_climate_targets_and_renewable_energy_to_climb_out_of_the_economic_crisis.html)
# Model repositories
* [AnyMOD\_example\_model](https://github.com/leonardgoeke/AnyMOD_example_model): example model used in [Göke (2020)](https://arxiv.org/abs/2004.10184)
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 23698 | ```@raw html
<style>
table.tabelle2 td {
padding-left: 0.57em;
padding-right: 0.57em;
border-right: solid 1px;
border-color: #dbdbdb;
font-size: small;
}
ul.liste {
list-style-position: outside;
padding-left: 1em;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
</style>
```
# Sets and Mappings
```@raw html
<p class="norm">
In AnyMOD sets like regions, time-steps, carriers, and technologies are organized as <a href="../api/#AnyMOD.Node"><code>Nodes</code></a> within hierarchical <a href="../api/#AnyMOD.Tree"><code>Trees</code></a>. They are defined by a mandatory input files named <code>set_region.csv</code>, <code>set_timestep.csv</code>, <code>set_carrier.csv</code>, and <code>set_technology.csv</code>, respectively. Relations between different sets (e.g. between carriers and technologies) are represented by mapping nodes within these trees to each other. This graph-based modelling approach is explained in detail in <a href="https://arxiv.org/abs/2004.10184">Göke (2020)</a>.</p>
```
Next, these key sets and how their tree structure and mappings are obtained from input files are introduced based on the [demo problem](https://github.com/leonardgoeke/AnyMOD.jl/tree/master/examples/demo). Also, additional sets not organized as hierarchical trees are briefly listed as well.
## Regions
The `set_region.csv` file defines all modelling regions. The file consists out of consecutive columns named `region_1`, `region_2` etc. with each column relating to a level in the resulting tree. A region on a specific level is defined by writing its name into the respective column. To connect regions from different levels, they both need to be written into the same column. Names of regions are not required to be unique, not even on the same level.
The set file for regions from the demo problem is provided below.
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none !important"><strong>region_1</strong></td>
<td style="border-right:none"><strong>region_2</strong></td>
</tr>
<tr>
<td style="border-right:none">East</td>
<td style="border-right:none">EastNorth</td>
</tr>
<tr>
<td style="border-right:none">East</td>
<td style="border-right:none">EastSouth</td>
</tr>
<tr>
<td style="border-right:none">West</td>
<td style="border-right:none">WestNorth</td>
</tr>
<tr>
<td style="border-right:none">West</td>
<td style="border-right:none">WestSouth</td>
</tr>
</tbody>
</table>
```
Consequently, this file defines six regions on two different levels. `East` and `West` are on level 1 and their descendants `EastNorth` and `EastSouth` as well as `WestNorth` and `WestSouth` are on level 2. Since names of regions are not required to be unique, alternatively each of the four descendant regions could have also been named `North` or `South`. By using the `plotTree` function the corresponding hierarchical tree can be visualized and exported to the output directory:
```@raw html
<img src="../assets/region.png" width="80%"/>
```
### Application context
Throughout the model regions are used in different contexts and depending on the context different symbols are used:
- ``R_{exp}``: Region of capacity expansion, used in all investment related model elements
- ``R_{disp}``: Region of dispatch, used in all dispatch related model elements
- ``R_a``, ``R_b``: Regions of exchange, element is not differentiated by direction of exchange (e.g. exchange capacities from ``R_a`` to ``R_b`` also apply from ``R_b`` to ``R_a``)
- ``R_{from}``, ``R_{to}``: Regions of exchange, element is differentiated by direction of exchange (e.g. the exchange loses from ``R_{from}`` to ``R_{to}`` do not apply from ``R_{to}`` to ``R_{from}``)
## Time-steps
The `set_timestep.csv` file defines all modelling time-steps, for both capacity expansion (usually years) and dispatch (for example hours). The file is structured analogously to the file for regions. Just as regions, names of time-steps are not required to be unique.
The first lines of the corresponding file in the demo problem are provided below.
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_1</strong></td>
<td style="border-right:none"><strong>timestep_2</strong></td>
<td style="border-right:none"><strong>timestep_3</strong></td>
<td style="border-right:none"><strong>timestep_4</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">all</td>
<td style="border-right:none">d001</td>
<td style="border-right:none">hh0001</td>
<td style="border-right:none">h0001</td>
</tr>
<tr>
<td style="border-right:none">all</td>
<td style="border-right:none">d001</td>
<td style="border-right:none">hh0001</td>
<td style="border-right:none">h0002</td>
</tr>
<tr>
<td colspan="4"; style="text-align:center;border-right:none">⋮</td>
</tr>
</tbody>
</table>
```
This file makes use of the `all` keyword to reduce the number of rows required in the input file. This keyword serves as a placeholder for all nodes defined on the respective level. Consequently, in this case the following row
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_1</strong></td>
<td style="border-right:none"><strong>timestep_2</strong></td>
<td style="border-right:none"><strong>timestep_3</strong></td>
<td style="border-right:none"><strong>timestep_4</strong></td>
</tr>
<tr>
<td style="border-right:none">all</td>
<td style="border-right:none">d001</td>
<td style="border-right:none">hh0001</td>
<td style="border-right:none">h0001</td>
</tr>
</tbody>
</table>
```
is equivalent to these two rows:
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none"><strong>timestep_1</strong></td>
<td style="border-right:none"><strong>timestep_2</strong></td>
<td style="border-right:none"><strong>timestep_3</strong></td>
<td style="border-right:none"><strong>timestep_4</strong></td>
</tr>
<tr>
<td style="border-right:none">2020</td>
<td style="border-right:none">d001</td>
<td style="border-right:none">hh0001</td>
<td style="border-right:none">h0001</td>
</tr>
<tr>
<td style="border-right:none">2030</td>
<td style="border-right:none">d001</td>
<td style="border-right:none">hh0001</td>
<td style="border-right:none">h0001</td>
</tr>
</tbody>
</table>
```
The keyword `all` can be extended to assign specific nodes:
* `all(node1,node2,node3)` assigns all listed nodes. Accordingly, in the example above `all(2020,2030)` would have achieved the same result as using `all`.
* `all(node1:node2)` assigns not only `node1` and `node2`, but also all nodes in between according to alphabetical order.
In the example above, the use of `all` results in each year having descendant nodes that represent days (level 2), 4-hour steps (level 3) and hours (level 4). Since names of time-steps are not required to be unique, these different nodes for each year can share the same names. A reduced version of the corresponding tree is plotted below:
```@raw html
<img src="../assets/timestep.png" width="80%"/>
```
### Application context
The following symbols are used to refer to time-steps depending on the context:
- ``Ts_{exp}``: Time-steps of capacity expansion
- ``Ts_{dis}``: Time-steps of dispatch
- ``Ts_{sup}``: Superordinate dispatch time-steps (usually years)
## Carriers
The hierarchical tree of energy carriers is defined analogously to regions and time-steps. The respective `.csv` table from the demo problem is given below. Unlike regions and timestep, carrier names are required to be unique. Carriers are always represented by the symbol ``C``.
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none"><strong>carrier_1</strong></td>
<td><strong>carrier_2</strong></td>
<td style="border-right:none"><strong>timestep_dispatch</strong></td>
<td style="border-right:none"><strong>timestep_expansion</strong></td>
<td style="border-right:none"><strong>region_dispatch</strong></td>
<td style="border-right:none"><strong>region_expansion</strong></td>
</tr>
<tr>
<td style="border-right:none">electricity</td>
<td></td>
<td style="text-align:center;border-right:none">4</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">2</td>
</tr>
<tr>
<td style="border-right:none">heat</td>
<td>districtHeat</td>
<td style="text-align:center;border-right:none">3</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">2</td>
<td style="text-align:center;border-right:none">2</td>
</tr>
<tr>
<td style="border-right:none">gas</td>
<td>naturalGas</td>
<td style="text-align:center;border-right:none">2</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">2</td>
<td style="text-align:center;border-right:none">2</td>
</tr>
<tr>
<td style="border-right:none">gas</td>
<td>synthGas</td>
<td style="text-align:center;border-right:none">2</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
</tr>
<tr>
<td style="border-right:none">coal</td>
<td></td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
</tr>
<tr>
<td style="border-right:none">hydrogen</td>
<td></td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
<td style="text-align:center;border-right:none">1</td>
</tr>
</tbody>
</table>
```
The table above corresponds to the following tree:
```@raw html
<img src="../assets/carrier2.png" width="80%"/>
```
### Mapping
**Carrier resolution**
In addition to defining carriers, the `set_carrier.csv` file also maps them to regions and time-steps to specify the resolution there are modeled at. This is done separately for dispatch and expansion in the columns `timestep_dispatch`,`timestep_expansion`,`region_dispatch`, and `region_expansion`. The numbers in these columns correspond to levels of the respective trees. In the example displayed below, `4` in the `timestep_dispatch` column for `electricity` means, dispatch for electricity is modeled for each time-step on level 4. Going back to the definition of time-steps above, this corresponds to an hourly resolution.
!!! tip "Less detailed resolution for debugging"
Creating and especially solving a model is much faster if the temporal resolution of dispatch is decreased. Therefore, it is advisable to first test new models at a less detailed temporal resolution. In the example this would be achieved by replacing the `4` for electricity with `2` to switch to a daily resolution. This will help you to spot and fix mistakes or unintended effects more efficiently.
AnyMOD checks the specified resolutions and will throw an error, if any logical inconsistencies are detected. Resolutions provided in a specific row only apply to the last carrier in that row. However, carrier on higher levels without a specified resolution, like `gas` in the example, automatically inherit a resolution from their descendants.
**Optional mappings**
For reasons elaborated in [Göke (2020)](https://arxiv.org/abs/2004.10184), be default energy balances in AnyMOD are not formulated as equality constraints meaning supply can exceed demand. To overwrite this behaviour, an optional column named `carrier_equality` using the keywords `yes` and `no` can be added to the file, where `yes` will enforce an equality constraint. Carriers are represented by the symbol ``Te``.
## Technologies
The hierarchical tree of technologies is defined analogously to regions and time-steps. The respective `.csv` table from the demo problem is given below. Unlike regions and timesteps, technology names are required to be unique.
```@raw html
<table class="tabelle2">
<tbody>
<tr>
<td style="border-right:none"><strong>technology_1</strong></td>
<td style="border-right:none"><strong>technology_2</strong></td>
<td><strong>technology_3</strong></td>
<td style="border-right:none"><strong>carrier_conversion_in</strong></td>
<td style="border-right:none"><strong>carrier_conversion_out</strong></td>
<td style="border-right:none"><strong>carrier_stored_in</strong></td>
<td style="border-right:none"><strong>carrier_stored_out</strong></td>
</tr>
<tr>
<td style="border-right:none">wind</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none"></td>
<td style="border-right:none">electricity</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">solar</td>
<td style="border-right:none">openspace</td>
<td></td>
<td style="border-right:none"></td>
<td style="border-right:none">electricity</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">solar</td>
<td style="border-right:none">rooftop</td>
<td>photovoltaic</td>
<td style="border-right:none"></td>
<td style="border-right:none">electricity</td>
<td style="border-right:none"></td>
<td style="border-right:none">electricity</td>
</tr>
<tr>
<td style="border-right:none">solar</td>
<td style="border-right:none">rooftop</td>
<td>solarThermal</td>
<td style="border-right:none"></td>
<td style="border-right:none">heat</td>
<td style="border-right:none"></td>
<td style="border-right:none">heat</td>
</tr>
<tr>
<td style="border-right:none">gasPlant</td>
<td style="border-right:none">ccgt</td>
<td>ccgtNoCHP</td>
<td style="border-right:none">gas</td>
<td style="border-right:none">electricity</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">gasPlant</td>
<td style="border-right:none">ccgt</td>
<td>ccgtCHP</td>
<td style="border-right:none">gas</td>
<td style="border-right:none">electricity; districtHeat</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">gasPlant</td>
<td style="border-right:none">ocgt</td>
<td></td>
<td style="border-right:none">gas</td>
<td style="border-right:none">electricity</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">coalPlant</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none">coal</td>
<td style="border-right:none">electricity; districtHeat</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">heatpump</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none">electricity</td>
<td style="border-right:none">heat</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">gasStorage</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
<td style="border-right:none">gas</td>
<td style="border-right:none">gas</td>
</tr>
<tr>
<td style="border-right:none">gasBoiler</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none">gas</td>
<td style="border-right:none">heat</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">hydro</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
<td style="border-right:none">electricity</td>
<td style="border-right:none">electricity</td>
</tr>
<tr>
<td style="border-right:none">electrolysis</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none">electricity</td>
<td style="border-right:none">hydrogen</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">methanation</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none">hydrogen</td>
<td style="border-right:none">synthGas</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
<tr>
<td style="border-right:none">fuelCell</td>
<td style="border-right:none"></td>
<td></td>
<td style="border-right:none">hydrogen</td>
<td style="border-right:none">electricity; districtHeat</td>
<td style="border-right:none"></td>
<td style="border-right:none"></td>
</tr>
</tbody>
</table>
```
Within a model only nodes without any descendants are actual technologies. The remaining nodes have the sole purpose of organizing them. This facilitates the read-in of parameter data and formulation of certain constraints (e.g. the available rooftop area limiting the summed capacity of `photovoltaic` and `solarThermal`). The resulting hierarchical tree from the table above is displayed below.
```@raw html
<img src="../assets/tech.png" width="100%"/>
```
### Mappings
**Converted and stored carriers**
The `.csv` table above does not only define the hierarchical tree of technologies, but also maps carriers to these technologies. This assignment differentiates between in- and output carriers and between conversion and storage.
```@raw html
<p class="norm">
In AnyMOD conversion generally refers to one or multiple carrier being used or generated as displayed in the diagram below. In the diagram expressions in italic refer to <a href="../variables">model variables</a> and expressions in boxes to <a href="../parameter_list">parameters</a>.
</p>
<p style="text-align:center;"><img src="../assets/convTech.svg" width="80%"/>
```
The technology `ccgtCHP` from the example is a pure conversion technology converting `gas` to `electricity` and `districtHeat`, because `conversion_input` and `conversion_output` are assigned accordingly. It is not necessary for a conversion technology to have both, an in- and and output carrier. For instance, in the example renewables like `wind` only have an output.
Storage is analogously described by the diagram below. In the example, `hydro` is a pure storage technology, because `electricity` is assigned to both `storage_input` and `storage_output`. Storage is not limited to a single carrier and a technology can store an arbitrary number of different carriers.
```@raw html
<p style="text-align:center;"><img src="../assets/stTech.svg" width="100%"/>
```
Assigning carriers as a storage output (or input) means they can be discharged to (or charged from) the general energy balance. Technologies that can only discharge but not charge a carrier from the energy balance are conceivable as well. However, this only makes sense, if the carrier can be generated internally to charge the storage in the first place. The interplay of conversion and storage in such a case is illustrated by the following diagram:
```@raw html
<p style="text-align:center;"><img src="../assets/mixTech1.svg" width="100%"/>
```
Such a technology is created by assigning the same carrier to `conversion_output` and `storage_output`, but not to `storage_output`. In the example, this is the case for the technology `photovoltaic`. It is intended to represent a residental photovoltaic panel combined with a home battery that cannot be charged from the gird, but from the panel.
Also the opposite case can be modeled: A carrier charged from outside, but only discharged within the technology. This only makes sense if the carrier is being used within the technology's conversion process. The corresponding diagram is given below and could represent a gas power plant with an onside gas storage.
```@raw html
<p style="text-align:center;"><img src="../assets/mixTech2.svg" width="100%"/>
```
**Optional mappings**
The following table lists all optional columns that can be specified within in `set_technology.csv` file to overwrite a default behaviour.
```@raw html
<p class="norm">
An important option to point out is <code>technology_type</code>. Three different technology types exist: <code>stock</code>, <code>mature</code> and <code>emerging</code>. Stock technologies cannot be expended and are limited to <a href="../parameter_list/#Residual-capacities-1">pre-existing capacities</a>. Emerging technologies differ from mature technologies in the sense that their capacities are differentiated by time-step of construction $Ts_{exp}$. This allows to account for technological improvement over time, but increases the required numbers of variables and constraints.
</p>
<table class="tabelle2">
<tbody>
<tr>
<td><strong>column name</strong></td>
<td><strong>explanation</strong></td>
<td style="border-right:none"><strong>default</strong></td>
</tr>
<tr>
<td><code>mode</code></td>
<td>
<ul class="liste">
<li>different operational modes separated by a semicolon and a space</li>
<li>e.g. <code>moreHeat; moreElec</code> for CHP plant in the example</li>
</ul>
</td>
<td style="border-right:none">none</td>
</tr>
<tr>
<td><code>technology_type</code></td>
<td>
<ul class="liste">
<li>types control expansion behaviour of technology, see <a href="https://arxiv.org/abs/2004.10184">Göke (2020)</a> for details</li>
<li>available options are:
<ul style="margin-top:0px">
<li style="margin-top:0px"><code>stock</code>: no expansion</li>
<li><code>mature</code>: expansion without differentiation by time-step of construction</li>
<li><code>emerging</code>: expansion with differentiation by time-step of construction</li>
</ul>
</ul>
</li>
</ul>
</td>
<td style="border-right:none"><code>mature</code></td>
</tr>
<tr>
<td><code>region_disaggregate</code></td>
<td>
<ul class="liste">
<li>if expansion is spatially more detailed than dispatch, by default the resolution of dispatch <br> is increased to the expansion level</li>
<li>using the keyword <code>no</code> prevents this behaviour and enforces the orginal dispatch resolution</li>
</ul>
</td>
<td style="border-right:none"><code>yes</code></td>
</tr>
<tr>
<td><code>timestep_expansion</code></td>
<td>
<ul class="liste">
<li>sets the resolution of expansion time-steps</li>
<li>cannot be more detailed than the superordinate dispatch level (usually years)</li>
</ul>
</td>
<td style="border-right:none">most detailed resolution <br> of carriers</td>
</tr>
<tr>
<td><code>region_expansion</code></td>
<td><ul class="liste">
<li>sets the resolution of expansion regions</li>
<li>default corresponds to the smallest value feasible</li>
</ul></td>
<td style="border-right:none">most detailed resolution <br> of carriers</td>
</tr>
<tr>
<td><code>carrier_stored_active</code></td>
<td><ul class="liste">
<li>by default, only leafs (nodes without any descendants) of stored carriers are actively <br> stored (see <a href="https://arxiv.org/abs/2004.10184">Göke (2020)</a> for details)</li>
<li>non-leaf carriers to be stored actively can be added here</li>
<li>carriers are separated by a semicolon and a space just like modes</li>
</ul></td>
<td style="border-right:none">none</td>
</tr>
</tbody>
</table>
```
## Other Sets
Other sets in AnyMOD are not organized in hierarchical trees and might even be empty in a specific model. These are listed here.
### Modes
The set of modes includes operational modes defined for technologies. Modes are represented by the symbol ``M``.
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 3724 | # Tips
In addition to the tips listed here, it is always strongly advised to check the reporting files as introduced in the [Error handling](@ref) section.
### Performance
See the [Performance and stability](@ref) section for greater detail on this issue.
```@raw html
<p class="norm">
<ul>
<li>Use a commercial solver and the Barrier algorithm. Also, consider to skip Crossover and set a greater conversion tolerance ('BarConvTool' option in Gurobi).</li>
<li>Set <a href="../performance/#Variable-limits">general upper limits</a>, ideally only for the objective value.</li>
<li>Define <a href="../parameter_list/#Cost-of-curtailment-and-loss-of-load-1">loss-of-load costs</a> in order to create <a href="../variables/#Curtailment-and-loss-of-load-1">loss-of-load variables</a> for each energy carrier.</li>
<li>Reduce the temporal resolution for selected <a href="../sets/#Carriers">carriers</a> and set the temporal resolution of expansion to years.</li>
<li>Disable endogenous decommissioning by setting the <code>decomm</code> argument of the <a href="../model_object">model constructor</a> to <code>:none</code>.</li>
</ul>
</p>
```
### Debugging
```@raw html
<p class="norm">
<ul>
<li>Check, if you provided your parameter data according to the units given in the <a href="../parameter_list">parameter list</a>.</li>
<li>To save time, do not read-in your time-series data until the rest of your model is fully functioning.</li>
<li>Set a less detailed temporal resolution for debugging to speed up the process.</li>
<li>If your model is infeasible and you use Gurobi, try AnyMODs <a href="../api/#AnyMOD.printIIS"><code>printIIS(model_object)</code></a> function. It uses Gurobi's computeIIS function to obtain a set of constraints causing the infeasibility. Generally, this works better the more obvious a contradiction is (e.g. upper limits on expansion contradicts lower limit on installed capacity).</li>
<li>Define <a href="../parameter_list/#Cost-of-curtailment-and-loss-of-load-1">loss-of-load costs</a> in order to create <a href="../variables/#Curtailment-and-loss-of-load-1">loss-of-load variables</a> for each energy carrier.</li>
</ul>
</p>
```
### Workflow
```@raw html
<p class="norm">
<ul>
<li>Input files can be taken from different directories. For example, if you have different models with different regional scope, you could have a shared directory for technology data, that each of these models uses.</li>
<li>Additional columns within the input files and additional files within the input directories can be used for documentation.</li>
<li>The read-in parameter data can be manipulated between the construction of the <a href="../model_object">model object</a> and creation of the optimization problem using <code>createOptModel!(model_object)</code>. This is particular useful for sensitivity analyses.</li>
<li>Models can be extended using standard JuMP commands.</li>
<li>Version control can be used for model development.</li>
</ul>
</p>
```
### Common mistakes
(or at least mistakes that I believe to be common, because I made them when working with AnyMOD)
```@raw html
<p class="norm">
<ul>
<li><a href="../parameter_list/#Expansion-cost-1">Expansion</a> or <a href="../parameter_list/#Operation-cost-1">operating costs</a> not relating to capacity **before efficiency**.</li>
<li>Capacity limits of zero for certain technologies in certain regions (e.g. for wind offshore in Austria) are not explicitly set and as a result capacity is unlimited.</li>
<li>Limiting parameters are not defined correctly and limit the sum of variables across all years (see <a href="../parameter_list/#Limits-on-quantities-dispatched">Limits on quantities dispatched</a>).</li>
</ul>
</p>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.1.12 | 46a00c75bb6eb7fdaf5b15d75e9a544c0f91484d | docs | 23772 | ```@raw html
<style>
table.tabelle td {
border-left: 1px solid;
border-color: #dbdbdb;
}
table.tabelle td:first-child {
border-right: 2.5px solid;
border-color: #dbdbdb;
border-left: none;
}
ul.liste {
list-style-position: inside;
margin-left: 0em;
margin-top: 0em;
white-space: nowrap;
display:inline-block;
text-align: left;
}
</style>
```
# Variables
In the following, all variables used in AnyMOD are listed. Information includes the name used throughout the model, the variables' unit, and its dimensions according to the symbols introduced in [Sets and Mappings](@ref). Also, it is specified what determines the instances a variable is actually created for, in which constraints a variable appears, and the model part it is assigned to.
```@raw html
<p class="norm">
To increase performance, AnyMOD stores variables within DataFrames instead of using JuMPs native containers. Each variable dimension is represented by a column and integers in these columns relate to nodes within the hierarchical trees of sets (see <a href="../data/#printObject"><code>printObject</code></a> on how to export these in a readable format). An additional <code>var</code> column stores the corresponding variables. These variables are not JuMP variable objects, but JuMP expressions, that already include <a href="../performance/#Scaling">scaling factors</a>.
</p>
<p class="norm">
All variables are defined positive. New variables beyond those listed here can freely be added to a model by using standard JuMP commands.
</p>
```
# Dispatch of technologies
### Generation and use
Quantities generated and used by technology.
```@raw html
<p class="norm">
To reduce model size variables are not created, if the <a href="../parameter_list/#Availability-1">availability</a> is zero. Also, mode dependant variables are only created where required. For example, for a technology with mode specific <a href="../parameter_list/#Ratios-of-carrier-use-and-generation-1">generation ratios</a>, different variables for each mode are only created for <code>gen</code>, but not for <code>use</code>.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>gen</td>
<td>use</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $C$, $Te$, ($M$)</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:center"><ul class="liste">
<li>dispatch resolution of carrier</li>
<li>expansion time-steps of technology</li>
<li>relevant modes of technology</li>
<li><a href="../parameter_list/#Availability-1">respective availability</a> exceeds zero</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Energy-balance-1">energy balance</a></li>
<li><a href="../constraints/#Conversion-balance-1">conversion balance</a></li>
<li><a href="../constraints/#Conversion-capacity-restriction-1">conversion capacity restriction</a></li>
<li><a href="../constraints/#Energy-ratio-restriction-1">energy ratio restriction</a></li>
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Charging and discharging
Externally and internally charged and discharged quantities (see [Technologies](@ref) for explanation).
```@raw html
<p class="norm">
To reduce model size variables are not created, if the <a href="../parameter_list/#Availability-1">availability</a> is zero. Also, mode dependant variables are only created where required.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>stExt{In/Out}</td>
<td>stInt{In/Out}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $C$, $Te$, ($M$)</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:center"><ul class="liste">
<li>dispatch resolution of carrier</li>
<li>expansion time-steps of technology</li>
<li>relevant modes of technology</li>
<li><a href="../parameter_list/#Availability-1">respective availability</a> exceeds zero</li>
</ul></td>
</tr>
<tr>
<td rowspan="2"><strong>related constraints</strong></td>
<td colspan="2"; style= "text-align:left;border-bottom:none;padding-bottom:0px">
<ul class="liste">
<li><a href="../constraints/#Energy-balance-1">energy balance</a></li>
<li><a href="../constraints/#Storage-balance-1">storage balance</a></li>
<li><a href="../constraints/#Storage-capacity-restriction-1">storage capacity restriction</a></li>
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste">
</ul>
</td>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste">
<li><a href="../constraints/#Conversion-balance-1">conversion balance</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
### Storage level
Quantity stored by storage system.
```@raw html
<p class="norm">
To reduce model size variables are not created, if the <a href="../parameter_list/#Availability-1">availability</a> is zero. Also, mode dependant variables are only created where required.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>stLvl</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $Ts_{dis}$, $R_{dis}$, $C$, $Te$, ($M$)</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td><ul class="liste">
<li>dispatch resolution of carrier</li>
<li>expansion time-steps of technology</li>
<li>relevant modes of technology</li>
<li><a href="../parameter_list/#Availability-1">availability</a> exceeds zero</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Storage-balance-1">storage balance</a></li>
<li><a href="../constraints/#Storage-capacity-restriction-1">storage capacity restriction</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Technology-1">technology</a></td>
</tr>
</tbody>
</table>
```
# Non-technology dispatch
### Exchange quantities
Quantity exchanged from one region to the other. The variable is directed meaning it only denotes exchange into one direction.
```@raw html
<p class="norm">
Variables are only created between regions that can actually exchange energy, which depends on the definition of <a href="../parameter_list/#Residual-capacities-1">residual capacity</a>.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>exc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td>GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{dis}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td><ul class="liste">
<li>dispatch resolution of carrier</li>
<li><a href="../parameter_list/#Residual-capacities-1">residual capacity</a> between regions defined</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Energy-balance-1">energy balance</a></li>
<li><a href="../constraints/#Exchange-capacity-restriction-1">exchange capacity restriction</a></li>
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Buy and sell
Quantities bought or sold on an external market.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>trdBuy</td>
<td>trdSell</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{dis}$, $R_{dis}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:left"><ul class="liste">
<li>dispatch resolution of carrier</li>
<li><a href="../parameter_list/#Trade-price-1">trade price</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Energy-balance-1">energy balance</a></li>
<li><a href="../constraints/#Trade-capacity-restriction-1">trade capacity restriction</a></li>
<li><a href="../constraints/#Trade-cost-equation-1">trade cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"><a href="../parts/#Trade-1">trade</a></td>
</tr>
</tbody>
</table>
```
### Curtailment and loss-of-load
Curtailed quantities and unmet demand.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>lss</td>
<td>crt</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">GWh</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:left">$Ts_{dis}$, $R_{dis}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:left"><ul class="liste">
<li>dispatch resolution of carrier</li>
<li><a href="../parameter_list/#Cost-of-curtailment-and-loss-of-load-1">respective costs</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Energy-balance-1">energy balance</a></li>
<li><a href="../constraints/#Curtailment-and-loss-of-load-cost-equation-1">curtailment and loss-of-load cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"><a href="../parts/#Balance-1">balance</a></td>
</tr>
</tbody>
</table>
```
# Capacity expansion
### Expansion
Expansion of conversion, storage-input, storage-output, storage-size, and exchange capacity.
```@raw html
<p class="norm">
As explained <a href="../parameter_list/#Capacity-expansion">here</a>, capacity refers to <strong>capacity before efficiency</strong>!
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>expConv</td>
<td>expSt{In/Out/Size}</td>
<td>expExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td rowspan="2"><strong>instances</strong></td>
<td colspan="2"; style="text-align:center;border-bottom:none;padding-bottom:0px">
<ul class="liste">
<li>expansion resolution of technology</li>
</ul>
</td>
<td colspan="1"; rowspan="2">
<ul class="liste">
<li>regions can exchange carrier</li>
<li>expansion resolution of carrier</li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste"; start="4">
<li>if uses or generates carriers</li>
</ul>
</td>
<td colspan="1"; style="text-align:left;padding-top:0px">
<ul class="liste"; start="4">
<li>each stored carrier</li>
</ul>
</td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Definition-of-installed-capacity-1">definition of installed capacity</a></li>
<li><a href="../constraints/#Expansion-cost-equation-1">expansion cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Installed capacity
Installed capacity of conversion, storage-input, storage-output, storage-size, and exchange capacity.
```@raw html
<p class="norm">
As explained <a href="../parameter_list/#Capacity-expansion">here</a>, capacity refers to <strong>capacity before efficiency</strong>!
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>capaConv</td>
<td>capaSt{In/Out/Size}</td>
<td>capaExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td rowspan="3"><strong>instances</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0px">
<ul class="liste">
<li>superordinate dispatch resolution (usually years)</li>
</ul>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ul class="liste">
<li>spatial expansion resolution of technology</li>
</ul>
</td>
<td colspan="1"; rowspan="2"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste">
<li>spatial expansion resolution of carrier</li>
<li>regions can exchange carrier</li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste"; start="4">
<li>if uses or generates carriers</li>
</ul>
</td>
<td colspan="1"; style="text-align:left;padding-top:0px">
<ul class="liste"; start="4">
<li>each stored carrier</li>
</ul>
</td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="3"; style="text-align:center">
<ul class="liste">
<li><a href="../constraints/#Definition-of-installed-capacity-1">definition of installed capacity</a></li>
<li><a href="../constraints/#Decommissioning-of-operated-capacitiy-1">decommissioning of operated capacitiy</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
### Operated capacity
Operated capacity of conversion, storage-input, storage-output, storage-size, and exchange capacity.
```@raw html
<p class="norm">
As explained <a href="../parameter_list/#Capacity-expansion">here</a>, capacity refers to <strong>capacity before efficiency</strong>!
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>oprCapaConv</td>
<td style="padding-right:0.5em"><nobr>oprCapaSt{In/Out/Size}</nobr></td>
<td>oprCapaExc</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="3"; style="text-align:center">GW</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $R_{exp}$, $C$, $Te$</td>
<td>$Ts_{exp}$, $R_{from}$, $R_{to}$, $C$</td>
</tr>
<tr>
<td rowspan="3"><strong>instances</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0px">
<ul class="liste">
<li>superordinate dispatch resolution (usually years)</li>
</ul>
</td>
</tr>
<tr>
<td colspan="2"; style="text-align:center;border-right:none;border-top:none;border-bottom:none;padding-top:0px;padding-bottom:0px">
<ul class="liste">
<li>spatial expansion resolution of technology</li>
</ul>
</td>
<td colspan="1"; rowspan="2"; style="text-align:left;border-right:none;padding-top:0px;padding-right:0.0px">
<ul class="liste">
<li>spatial expansion resolution of carrier</li>
<li>regions can exchange carrier</li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste"; start="4">
<li>if uses or generates carriers</li>
</ul>
</td>
<td colspan="1"; style="text-align:left;padding-top:0px">
<ul class="liste"; start="4">
<li>each stored carrier</li>
</ul>
</td>
</tr>
<tr>
<td rowspan="2"><strong>related constraints</strong></td>
<td colspan="3"; style="text-align:center;border-bottom:none;padding-bottom:0.2em">
<ul class="liste">
<li><a href="../constraints/#Decommissioning-of-operated-capacitiy-1">decommissioning of operated capacitiy</a></li>
<li><a href="../constraints/#Operating-cost-equation-1">operating cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste">
<li><a href="../constraints/#Conversion-capacity-restriction-1">conversion capacity <br> restriction</a></li>
</ul>
</td>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px">
<ul class="liste">
<li><a href="../constraints/#Storage-capacity-restriction-1">storage capacity <br> restriction</a></li>
</ul>
</td>
<td colspan="1"; style="text-align:left;border-right:none;padding-top:0px";padding-right:0.0px>
<ul class="liste">
<li><a href="../constraints/#Exchange-capacity-restriction-1">exchange capacity <br> restriction</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Technology-1">technology</a></td>
<td style="text-align:center"><a href="../parts/#Exchange-1">exchange</a></td>
</tr>
</tbody>
</table>
```
# Costs
The variables listed here solely serve the purpose to aggregate different kinds of costs and do not have any other function within the model. Therefore, their structure is a bit arbitrary and was chosen to facilitate reporting without adversely affecting performance.
### Expansion cost
Costs of capacity expansion.
```@raw html
<p class="norm">
As explained <a href="../parameter_list/#Capacity-expansion">here</a>, capacity and thus also costs of capacity expansion, refer to <strong>capacity before efficiency</strong>!
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costExp{Conv,StIn,StOut,StSize}</td>
<td>costExp{Exc}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">Mil.€</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{exp}$, $R_{exp}$, $Te$</td>
<td>$Ts_{exp}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:left"><ul class="liste">
<li><a href="../parameter_list/#Expansion-cost-1">expansion cost</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Expansion-cost-equation-1">expansion cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Operating cost
Costs of operating capacity.
```@raw html
<p class="norm">
As explained <a href="../parameter_list/#Capacity-expansion">here</a>, capacity and thus also operating costs, refer to <strong>capacity before efficiency</strong>!
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costOpr{Conv,StIn,StOut,StSize}</td>
<td>costOpr{Exc}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">Mil.€</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:left"><ul class="liste">
<li><a href="../parameter_list/#Operation-cost-1">operating cost</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Operating-cost-equation-1">operating cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Variable cost
```@raw html
<p class="norm">
Variable costs associated with quantities dispatched. Costs incurred by <a href="../parameter_list/#Emission-price-1">emission prices</a> are included in <code>costVarUse</code>.
</p>
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costVar{Use,Gen,StIn,StOut}</td>
<td>costVar{Exc}</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">Mil.€</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td>$Ts_{sup}$, $R_{exp}$, $Te$</td>
<td>$Ts_{sup}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:left"><ul class="liste">
<li><a href="../parameter_list/#Variable-cost-1">variable cost</a> defined</li>
<li><a href="../parameter_list/#Emission-factor-1">emission factor</a> and <a href="../parameter_list/#Emission-price-1">emission price</a> defined in case of use</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Variable-cost-equation-1">variable cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Trade cost
Costs and revenues from buying or selling carriers on an external market.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costTrdBuy</td>
<td>costTrdSell</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">Mil.€</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{sup}$, $R_{exp}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:left"><ul class="liste">
<li><a href="../parameter_list/#Trade-price-1">trade price</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Trade-cost-equation-1">trade cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
### Curtailment and loss-of-load cost
Cost of curtailment and unmet demand.
To allow for revenues and costs from curtailment, `costCrt` is the only model variable that can also take negative values.
```@raw html
<table class="tabelle">
<tbody>
<tr>
<td><strong>name</strong></td>
<td>costCrt</td>
<td>costLss</td>
</tr>
<tr>
<td><strong>unit</strong></td>
<td colspan="2"; style="text-align:center">Mil.€</td>
</tr>
<tr>
<td><strong>dimension</strong></td>
<td colspan="2"; style="text-align:center">$Ts_{sup}$, $R_{exp}$, $C$</td>
</tr>
<tr>
<td><strong>instances</strong></td>
<td colspan="2"; style="text-align:left"><ul class="liste">
<li><a href="../parameter_list/#Cost-of-curtailment-and-loss-of-load-1">respective costs</a> defined</li>
</ul></td>
</tr>
<tr>
<td><strong>related constraints</strong></td>
<td colspan="2"; style="text-align:left">
<ul class="liste">
<li><a href="../constraints/#Curtailment-and-loss-of-load-cost-equation-1">curtailment and loss-of-load cost equation</a></li>
</ul>
</td>
</tr>
<tr>
<td><strong>part</strong></td>
<td colspan="2"; style="text-align:center"><a href="../parts/#Objective-1">objective</a></td>
</tr>
</tbody>
</table>
```
| AnyMOD | https://github.com/leonardgoeke/AnyMOD.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | code | 4245 | module Embeddings
using Statistics: norm
using DataDeps
using AutoHashEquals
using GoogleDrive
using PrecompileTools
export load_embeddings, language_files
export Word2Vec, GloVe, FastText_Text, Paragram
abstract type EmbeddingSystem{LANG} end
include("fasttext.jl")
include("glove.jl")
include("word2vec.jl")
include("Paragram.jl")
include("common.jl")
@auto_hash_equals struct EmbeddingTable{M<:AbstractMatrix, A<:AbstractVector}
embeddings::M
vocab::A
end
const supported_languages_files = Dict{DataType, Vector{String}}()
load_datadep(datadep_name) = @datadep_str datadep_name # makes sure to get __FILE__ right
default_file(::Type{T}, file_number) where T = language_files(T)[file_number] |> load_datadep
function language_files(::Type{T}) where {T<:EmbeddingSystem{LANG}} where LANG
get!(supported_languages_files,T) do
String[]
end
end
# Catch the unionall where no type-param given
language_files(::Type{T}) where T = language_files(T{:en})
"""
load_embeddings(EmbeddingSystem, [embedding_file|default_file_number])
load_embeddings(EmbeddingSystem{:lang}, [embedding_file|default_file_number])
Loaded the embeddings from a embedding file.
The embeddings should be of the type given by the Embedding system.
If the `embedding file` is not provided, a default embedding file will be used.
(It will be automatically installed if required).
EmbeddingSystems have a language type parameter.
For example `FastText_Text{:fr}` or `Word2Vec{:en}`, if that language parameter is not given it defaults to English.
(I am sorry for the poor state of the NLP field that many embedding formats are only available pretrained in English.)
Using this the correct default embedding file will be installed for that language.
For some languages and embedding systems there are multiple possible files.
You can check the list of them using for example `language_files(FastText_Text{:de})`.
The first is nominally the most popular, but if you want to default to another you can do so by setting the `default_file_num`.
### Keyword Arguments:
- `max_vocab_size` an integer, it specifies the maximum number of words to load (most formats are sorted by frequency so this keeps the the most common words). Default is to keep all of them
- `keep_words=Set()` if a non-empty set of words is provided, then only word embeddings for words from that list are loaded. Otherwise (default) all words are loaded.
### Returns an `Embeddings` object.
This has 2 fields.
- `embeddings` is a matrix, each column is the embedding for a word.
- `vocab` is a vector of strings, ordered as per the columns of `embeddings`, such that the first string in vocab is the first column of `embeddings` etc
We do not include a method for getting the index of a column from a word.
This is trivial to define in code (`vocab2ind(vocab)=Dict(word=>ii for (ii,word) in enumerate(vocab))`),
and you might like to be doing this in a more consistant way, e.g using [MLLabelUtils.jl](https://github.com/JuliaML/MLLabelUtils.jl),
or you might like to build a much faster Dict solution on top of [InternedStrings.jl](https://github.com/JuliaString/InternedStrings.jl)
```
"""
function load_embeddings(::Type{T},
default_file_number::Int=1;
max_vocab_size=typemax(Int),
keep_words=Set()) where T<:EmbeddingSystem
embedding_file = default_file(T, default_file_number)
load_embeddings(T, embedding_file; max_vocab_size=max_vocab_size, keep_words=keep_words)
end
function load_embeddings(::Type{T},
fh::IO;
max_vocab_size=typemax(Int),
keep_words=Set()) where T<:EmbeddingSystem
EmbeddingTable(_load_embeddings(T, fh, max_vocab_size, Set(keep_words))...)
end
function load_embeddings(::Type{T},
embedding_file::AbstractString;
max_vocab_size=typemax(Int),
keep_words=Set()) where T<:EmbeddingSystem
open(embedding_file, "r") do fh
load_embeddings(T, fh; max_vocab_size=max_vocab_size, keep_words=keep_words)
end
end
function init_systems()
for T in [Word2Vec, GloVe, FastText, Paragram]
init(T)
end
end
@setup_workload begin
@compile_workload init_systems()
end
function __init__()
init_systems()
end
end
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | code | 2058 | abstract type Paragram{LANG} <: EmbeddingSystem{LANG} end
function init(::Type{Paragram})
vectors = [("paragram_300_ws353",
" Paragram-WS353 300 dimensional Paragram embeddings tuned on WordSim353 dataset. 1.7 GB download.",
"8ed9a19f8bc400cdbca5dae7f024c0310a3e5711a46ba48036a7542614440721",
"https://drive.google.com/uc?id=0B9w48e1rj-MOLVdZRzFfTlNsem8&export=download"),
# "https://drive.google.com/file/d/0B9w48e1rj-MOLVdZRzFfTlNsem8/view"),
("paragram_300_sl999",
"Paragram-SL999 300 dimensional Paragram embeddings tuned on SimLex999 dataset. 1.7 GB download.",
"9a16adc7d620642f863278451db4c03a2646016440ccea7e30a37ba17868781d",
"https://drive.google.com/uc?id=0B9w48e1rj-MOck1fRGxaZW1LU2M&export=download"),
]
#https://drive.google.com/file/d/0B9w48e1rj-MOLVdZRzFfTlNsem8/view?usp=sharing
for (depname, description, sha, link) in vectors
register(DataDep(depname,
"""
Pretrained Paragram word embeddings.
Website: https://www.cs.cmu.edu/~jwieting/
Author: John Wieting
Year: 2015
Licence: Open Data Commons Public Domain Dedication and License (PDDL)
Paper:
John Wieting, Mohit Bansal, Kevin Gimpel, Karen Livescu, Dan Roth. From Paraphrase Database to Compositional Paraphrase Model and Back.
$description
""",
link,
sha,
fetch_method = google_download,
post_fetch_method = unpack))
append!(language_files(Paragram{:en}), ["$(depname)/$(depname).txt"])
end
end
_load_embeddings(::Type{<:Paragram}, embedding_file::IO, max_vocab_size, keep_words) = _load_embeddings_csv(embedding_file, max_vocab_size, keep_words , ' ')
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | code | 852 |
function _load_embeddings_csv(fh::IO, max_vocab_size, keep_words, delim::AbstractChar=',')
local LL, indexed_words, index
if length(keep_words) > 0
max_vocab_size = length(keep_words)
end
indexed_words = Vector{String}()
LL = Vector{Vector{Float32}}()
index = 1
for line in eachline(fh)
xs = split(line, delim)
word = xs[1]
if length(keep_words) == 0 || (word in keep_words)
index > max_vocab_size && break
push!(indexed_words, word)
try
push!(LL, parse.(Float32, @view(xs[2:end])))
catch err
err isa ArgumentError || rethrow()
@warn "Could not parse word vector" index word exception=err
end
index += 1
end
end
return reduce(hcat, LL), indexed_words
end
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | code | 15230 | abstract type FastText{LANG} <: EmbeddingSystem{LANG} end
abstract type FastText_Text{LANG} <: FastText{LANG} end
abstract type FastText_Bin{LANG} <: FastText{LANG} end
function _load_embeddings(::Type{<:FastText_Bin}, embedding_file, max_vocab_size, keep_words)
error("FastText Binary Format not supported. If anyone knows how to parse it please feel encouraged to make a PR.")
end
function _load_embeddings(::Type{<:FastText_Text}, fh::IO, max_vocab_size, keep_words)
#If there are any words in keep_words, then only those are kept, otherwise all are kept
local LL, indexed_words, index
if length(keep_words) > 0
max_stored_vocab_size = length(keep_words)
end
vocab_size, vector_size = parse.(Int64, split(readline(fh)))
max_stored_vocab_size = min(max_vocab_size, vocab_size)
indexed_words = Vector{String}(undef, max_stored_vocab_size)
LL = Array{Float32}(undef, vector_size, max_stored_vocab_size)
index = 1
@inbounds for _ in 1:vocab_size
line = readline(fh)
toks = split(line)
word = first(toks)
if length(keep_words)==0 || word in keep_words
indexed_words[index]=word
LL[:,index] .= parse.(Float32, @view toks[2:end])
index+=1
if index>max_stored_vocab_size
break
end
end
end
LL = LL[:,1:index-1] #throw away unused columns
indexed_words = indexed_words[1:index-1] #throw away unused columns
LL, indexed_words
end
function init(::Type{FastText})
#########
# English
for (source, name, hashstring) in [
("Common Crawl", "crawl-300d-2M.vec", "5bfffffbabdab299d4c9165c47275e8f982807a6eaca37ee1f71d3a79ddb544d"),
("Wiki News", "wiki-news-300d-1M.vec","bdeb85f44892c505953e3654183e9cb0d792ee51be0992460593e27198d746f8")
]
push!(language_files(FastText_Text{:en}), "FastText $(source)/$(name)")
register(DataDep("FastText $(source)",
"""
Dataset: FastText Word Embeddings for English (original release)
Author: Bojanowski et. al. (Facebook)
License: CC-SA 3.0
Website: https://fasttext.cc/docs/en/english-vectors.html
1 million 300 dimentional word vectors trained on Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens)
Citation: P. Bojanowski*, E. Grave*, A. Joulin, T. Mikolov, Enriching Word Vectors with Subword Information
Notice: this file is ~ 1GB
""",
"https://dl.fbaipublicfiles.com/fasttext/vectors-english/$(name).zip",
hashstring,
post_fetch_method=DataDeps.unpack
));
end
#########################
# Common Crawl
for (lang, text_hashstring, bin_hashstring) in fast_commoncrawl_languages_and_hashes
push!(language_files(FastText_Bin{lang}), "FastText $lang CommonCrawl Binary/cc.$(lang).300.bin")
push!(language_files(FastText_Text{lang}), "FastText $lang CommonCrawl Text/cc.$(lang).300.vec")
for (mode, hashstring, ext) in [("Text", text_hashstring, "vec"), ("Binary", bin_hashstring, "bin")]
register(DataDep("FastText $lang CommonCrawl $mode",
"""
Dataset: 300 dimentional FastText Word Embeddings, for $lang trained on Wikipedia and the CommonCrawl
Website: https://fasttext.cc/docs/en/crawl-vectors.html
Author: Grave et. al. (Facebook)
License: CC-SA 3.0
Citation: E. Grave*, P. Bojanowski*, P. Gupta, A. Joulin, T. Mikolov, Learning Word Vectors for 157 Languages
""",
"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.$(lang).300.$(ext).gz",
hashstring;
post_fetch_method=DataDeps.unpack
));
end
end
for (lang, hashstring) in fast_text_wiki_languages_and_hashes
# TODO Add Binary files as well
push!(language_files(FastText_Text{lang}), "FastText $lang Wiki Text/wiki.$(lang).vec")
register(DataDep("FastText $lang Wiki Text",
"""
Dataset: 300 dimentional FastText Word Embeddings for $lang, trained on Wikipedia
Website: https://fasttext.cc/docs/en/pretrained-vectors.html
Author: Bojanowski et. al. (Facebook)
License: CC-SA 3.0
Citation: P. Bojanowski*, E. Grave*, A. Joulin, T. Mikolov, Enriching Word Vectors with Subword Information
""",
"https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.$(lang).vec",
hashstring;
));
end
end
# Lang, TextHash, BinHash
const fast_commoncrawl_languages_and_hashes = [
(:af, nothing, nothing),
(:als, nothing, nothing),
(:am, nothing, nothing),
(:an, nothing, nothing),
(:ar, nothing, nothing),
(:arz, nothing, nothing),
(:as, nothing, nothing),
(:ast, nothing, nothing),
(:az, nothing, nothing),
(:azb, nothing, nothing),
(:ba, nothing, nothing),
(:bar, nothing, nothing),
(:bcl, nothing, nothing),
(:be, nothing, nothing),
(:bg, nothing, nothing),
(:bh, nothing, nothing),
(:bn, nothing, nothing),
(:bo, nothing, nothing),
(:bpy, nothing, nothing),
(:br, nothing, nothing),
(:bs, nothing, nothing),
(:ca, nothing, nothing),
(:ce, nothing, nothing),
(:ceb, nothing, nothing),
(:ckb, nothing, nothing),
(:co, nothing, nothing),
(:cs, nothing, nothing),
(:cv, nothing, nothing),
(:cy, nothing, nothing),
(:da, nothing, nothing),
(:de, nothing, nothing),
(:diq, nothing, nothing),
(:dv, nothing, nothing),
(:el, nothing, nothing),
(:eml, nothing, nothing),
(:eo, nothing, nothing),
(:es, "116915c965346a0e3670b0e703e15e3526f31f1d216088eb3b30fc8f94982b82", nothing),
(:et, nothing, nothing),
(:eu, nothing, nothing),
(:fa, nothing, nothing),
(:fi, nothing, nothing),
(:fr, "ab0dca4ef2a8a38d97ca119b491c701c57e17178cfc2f032f3de973e86fe87aa", nothing),
(:frr, nothing, nothing),
(:fy, nothing, nothing),
(:ga, nothing, nothing),
(:gd, nothing, nothing),
(:gl, nothing, nothing),
(:gom, nothing, nothing),
(:gu, nothing, nothing),
(:gv, nothing, nothing),
(:he, nothing, nothing),
(:hi, nothing, nothing),
(:hif, nothing, nothing),
(:hr, nothing, nothing),
(:hsb, nothing, nothing),
(:ht, nothing, nothing),
(:hu, nothing, nothing),
(:hy, nothing, nothing),
(:ia, nothing, nothing),
(:id, nothing, nothing),
(:ilo, nothing, nothing),
(:io, nothing, nothing),
(:is, nothing, nothing),
(:it, nothing, nothing),
(:ja, nothing, nothing),
(:jv, nothing, nothing),
(:ka, nothing, nothing),
(:kk, nothing, nothing),
(:km, nothing, nothing),
(:kn, nothing, nothing),
(:ko, nothing, nothing),
(:ku, nothing, nothing),
(:ky, nothing, nothing),
(:la, nothing, nothing),
(:lb, nothing, nothing),
(:li, nothing, nothing),
(:lmo, nothing, nothing),
(:lt, nothing, nothing),
(:lv, nothing, nothing),
(:mai, nothing, nothing),
(:mg, nothing, nothing),
(:mhr, nothing, nothing),
(:min, nothing, nothing),
(:mk, nothing, nothing),
(:ml, nothing, nothing),
(:mn, nothing, nothing),
(:mr, nothing, nothing),
(:mrj, nothing, nothing),
(:ms, nothing, nothing),
(:mt, nothing, nothing),
(:mwl, nothing, nothing),
(:my, nothing, nothing),
(:myv, nothing, nothing),
(:mzn, nothing, nothing),
(:nah, nothing, nothing),
(:nap, nothing, nothing),
(:nds, nothing, nothing),
(:ne, nothing, nothing),
(:new, nothing, nothing),
(:nl, nothing, nothing),
(:nn, nothing, nothing),
(:no, nothing, nothing),
(:nso, nothing, nothing),
(:oc, nothing, nothing),
(:or, nothing, nothing),
(:os, nothing, nothing),
(:pa, nothing, nothing),
(:pam, nothing, nothing),
(:pfl, nothing, nothing),
(:pl, nothing, nothing),
(:pms, nothing, nothing),
(:pnb, nothing, nothing),
(:ps, nothing, nothing),
(:pt, nothing, nothing),
(:qu, nothing, nothing),
(:rm, nothing, nothing),
(:ro, nothing, nothing),
(:ru, nothing, nothing),
(:sa, nothing, nothing),
(:sah, nothing, nothing),
(:sc, nothing, nothing),
(:scn, nothing, nothing),
(:sco, nothing, nothing),
(:sd, nothing, nothing),
(:sh, nothing, nothing),
(:si, nothing, nothing),
(:sk, nothing, nothing),
(:sl, nothing, nothing),
(:so, nothing, nothing),
(:sq, nothing, nothing),
(:sr, nothing, nothing),
(:su, nothing, nothing),
(:sv, nothing, nothing),
(:sw, nothing, nothing),
(:ta, nothing, nothing),
(:te, nothing, nothing),
(:tg, nothing, nothing),
(:th, nothing, nothing),
(:tk, nothing, nothing),
(:tl, nothing, nothing),
(:tr, nothing, nothing),
(:tt, nothing, nothing),
(:ug, nothing, nothing),
(:uk, nothing, nothing),
(:ur, nothing, nothing),
(:uz, nothing, nothing),
(:vec, nothing, nothing),
(:vi, nothing, nothing),
(:vls, nothing, nothing),
(:vo, nothing, nothing),
(:wa, nothing, nothing),
(:war, nothing, nothing),
(:xmf, nothing, nothing),
(:yi, nothing, nothing),
(:yo, nothing, nothing),
(:zea, nothing, nothing),
(:zh, nothing, nothing),
]
const fast_text_wiki_languages_and_hashes = [
(:en, "f4d87723baad28804f89c0ecf74fd0f52ac2ae194c270cb4c89b0a84f0bcf53b"),
# PR Welcome to add Hashes for the ones below
(:ab, nothing),
(:ace, nothing),
(:ady, nothing),
(:aa, nothing),
(:af, nothing),
(:ak, nothing),
(:sq, nothing),
(:als, nothing),
(:am, nothing),
(:ang, nothing),
(:ar, nothing),
(:an, nothing),
(:arc, nothing),
(:hy, nothing),
(:roa_rup, nothing),
(:as, nothing),
(:ast, nothing),
(:av, nothing),
(:ay, nothing),
(:az, nothing),
(:bm, nothing),
(:bjn, nothing),
(:map_bms, nothing),
(:ba, nothing),
(:eu, nothing),
(:bar, nothing),
(:be, nothing),
(:bn, nothing),
(:bh, nothing),
(:bpy, nothing),
(:bi, nothing),
(:bs, nothing),
(:br, nothing),
(:bug, nothing),
(:bg, nothing),
(:my, nothing),
(:bxr, nothing),
(:zh_yue, nothing),
(:ca, nothing),
(:ceb, nothing),
(:bcl, nothing),
(:ch, nothing),
(:cbk_zam, nothing),
(:ce, nothing),
(:chr, nothing),
(:chy, nothing),
(:ny, nothing),
(:zh, nothing),
(:cho, nothing),
(:cv, nothing),
(:zh_classical, nothing),
(:kw, nothing),
(:co, nothing),
(:cr, nothing),
(:crh, nothing),
(:hr, nothing),
(:cs, nothing),
(:da, nothing),
(:dv, nothing),
(:nl, nothing),
(:nds_nl, nothing),
(:dz, nothing),
(:pa, nothing),
(:arz, nothing),
(:eml, nothing),
(:myv, nothing),
(:eo, nothing),
(:et, nothing),
(:ee, nothing),
(:ext, nothing),
(:fo, nothing),
(:hif, nothing),
(:fj, nothing),
(:fi, nothing),
(:frp, nothing),
(:fr, nothing),
(:fur, nothing),
(:ff, nothing),
(:gag, nothing),
(:gl, nothing),
(:gan, nothing),
(:ka, nothing),
(:de, nothing),
(:glk, nothing),
(:gom, nothing),
(:got, nothing),
(:el, nothing),
(:kl, nothing),
(:gn, nothing),
(:gu, nothing),
(:ht, nothing),
(:hak, nothing),
(:ha, nothing),
(:haw, nothing),
(:he, nothing),
(:hz, nothing),
(:mrj, nothing),
(:hi, nothing),
(:ho, nothing),
(:hu, nothing),
(:is, nothing),
(:io, nothing),
(:ig, nothing),
(:ilo, nothing),
(:id, nothing),
(:ia, nothing),
(:ie, nothing),
(:iu, nothing),
(:ik, nothing),
(:ga, nothing),
(:it, nothing),
(:jam, nothing),
(:ja, nothing),
(:jv, nothing),
(:kbd, nothing),
(:kab, nothing),
(:xal, nothing),
(:kn, nothing),
(:kr, nothing),
(:pam, nothing),
(:krc, nothing),
(:kaa, nothing),
(:ks, nothing),
(:csb, nothing),
(:kk, nothing),
(:km, nothing),
(:ki, nothing),
(:rw, nothing),
(:ky, nothing),
(:rn, nothing),
(:kv, nothing),
(:koi, nothing),
(:kg, nothing),
(:ko, nothing),
(:kj, nothing),
(:ku, nothing),
(:ckb, nothing),
(:lad, nothing),
(:lbe, nothing),
(:lo, nothing),
(:ltg, nothing),
(:la, nothing),
(:lv, nothing),
(:lez, nothing),
(:lij, nothing),
(:li, nothing),
(:ln, nothing),
(:lt, nothing),
(:olo, nothing),
(:jbo, nothing),
(:lmo, nothing),
(:nds, nothing),
(:dsb, nothing),
(:lg, nothing),
(:lb, nothing),
(:mk, nothing),
(:mai, nothing),
(:mg, nothing),
(:ms, nothing),
(:ml, nothing),
(:mt, nothing),
(:gv, nothing),
(:mi, nothing),
(:mr, nothing),
(:mh, nothing),
(:mzn, nothing),
(:mhr, nothing),
(:cdo, nothing),
(:zh_min_nan, nothing),
(:min, nothing),
(:xmf, nothing),
(:mwl, nothing),
(:mdf, nothing),
(:mo, nothing),
(:mn, nothing),
(:mus, nothing),
(:nah, nothing),
(:na, nothing),
(:nv, nothing),
(:ng, nothing),
(:nap, nothing),
(:ne, nothing),
(:new, nothing),
(:pih, nothing),
(:nrm, nothing),
(:frr, nothing),
(:lrc, nothing),
(:se, nothing),
(:nso, nothing),
(:no, nothing),
(:nn, nothing),
(:nov, nothing),
(:ii, nothing),
(:oc, nothing),
(:cu, nothing),
(:or, nothing),
(:om, nothing),
(:os, nothing),
(:pfl, nothing),
(:pi, nothing),
(:pag, nothing),
(:pap, nothing),
(:ps, nothing),
(:pdc, nothing),
(:fa, nothing),
(:pcd, nothing),
(:pms, nothing),
(:pl, nothing),
(:pnt, nothing),
(:pt, nothing),
(:qu, nothing),
(:ksh, nothing),
(:rmy, nothing),
(:ro, nothing),
(:rm, nothing),
(:ru, nothing),
(:rue, nothing),
(:sah, nothing),
(:sm, nothing),
(:bat_smg, nothing),
(:sg, nothing),
(:sa, nothing),
(:sc, nothing),
(:stq, nothing),
(:sco, nothing),
(:gd, nothing),
(:sr, nothing),
(:sh, nothing),
(:st, nothing),
(:sn, nothing),
(:scn, nothing),
(:szl, nothing),
(:simple, nothing),
(:sd, nothing),
(:si, nothing),
(:sk, nothing),
(:sl, nothing),
(:so, nothing),
(:azb, nothing),
(:es, nothing),
(:srn, nothing),
(:su, nothing),
(:sw, nothing),
(:ss, nothing),
(:sv, nothing),
(:tl, nothing),
(:ty, nothing),
(:tg, nothing),
(:ta, nothing),
(:roa_tara, nothing),
(:tt, nothing),
(:te, nothing),
(:tet, nothing),
(:th, nothing),
(:bo, nothing),
(:ti, nothing),
(:tpi, nothing),
(:to, nothing),
(:ts, nothing),
(:tn, nothing),
(:tcy, nothing),
(:tum, nothing),
(:tr, nothing),
(:tk, nothing),
(:tyv, nothing),
(:tw, nothing),
(:udm, nothing),
(:uk, nothing),
(:hsb, nothing),
(:ur, nothing),
(:ug, nothing),
(:uz, nothing),
(:ve, nothing),
(:vec, nothing),
(:vep, nothing),
(:vi, nothing),
(:vo, nothing),
(:fiu_vro, nothing),
(:wa, nothing),
(:war, nothing),
(:cy, nothing),
(:vls, nothing),
(:fy, nothing),
(:pnb, nothing),
(:wo, nothing),
(:wuu, nothing),
(:xh, nothing),
(:yi, nothing),
(:yo, nothing),
(:diq, nothing),
(:zea, nothing),
(:za, nothing),
(:zu, nothing),
]
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | code | 2536 | abstract type GloVe{LANG} <: EmbeddingSystem{LANG} end
const glove_max_size = 4_000_000
function init(::Type{GloVe})
vectors = [("glove.6B",
"Trained on 6B tokens (Wikipedia 2014, Gigaword 5), 400K vocab, uncased. Includes 50d, 100d, 200d, & 300d vectors. 822 MB download.",
"617afb2fe6cbd085c235baf7a465b96f4112bd7f7ccb2b2cbd649fed9cbcf2fb",
["50d", "100d", "200d", "300d"]),
("glove.42B.300d",
"Trained on 42B tokens (Common Crawl), 1.9M vocab, uncased, Includes 300d vectors. 1.75 GB download.",
"03d5d7fa28e58762ace4b85fb71fe86a345ef0b5ff39f5390c14869da0fc1970",
[]),
("glove.840B.300d",
"Trained on 840B tokens (Common Crawl), 2.2M vocab, cased. Includes 300d vectors. 2.03 GB download.",
"c06db255e65095393609f19a4cfca20bf3a71e20cc53e892aafa490347e3849f",
[]),
("glove.twitter.27B",
"Trained on 2B tweets, 27B tokens, 1.2M vocab, uncased, 25d, 50d, 100d, & 200d vectors, 1.42 GB download.",
"792af52f795d1a32c9842a3240f5f3fe5e941a8ff6df5eb0f9d668092ebc019c",
["25d", "50d", "100d", "200d"])]
for (depname, description, sha, dims) in vectors
register(DataDep(depname,
"""
Pretrained GloVe word embeddings.
Website: https://nlp.stanford.edu/projects/glove/
Author: Jeffrey Pennington, Richard Socher, Christopher D. Manning
Year: 2014
Licence: Open Data Commons Public Domain Dedication and License (PDDL)
Paper:
Jeffrey Pennington, Richard Socher, Christopher D. Manning. GloVe: Global Vectors for Word Representation. In Proceedings of EMNLP, 2014.
$description
""",
"https://nlp.stanford.edu/data/$(depname).zip",
sha,
post_fetch_method = unpack))
if length(dims) >= 1
append!(language_files(GloVe{:en}), ["$(depname)/$(depname).$(dim).txt" for dim in dims])
else
push!(language_files(GloVe{:en}), "$depname/$depname.txt")
end
end
end
_load_embeddings(::Type{<:GloVe}, embedding_file::IO, max_vocab_size, keep_words) = _load_embeddings_csv(embedding_file, max_vocab_size, keep_words, ' ')
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | code | 2051 | abstract type Word2Vec{LANG} <: EmbeddingSystem{LANG} end
function init(::Type{Word2Vec})
register(DataDep("word2vec 300d",
"""
Pretrained Word2Vec Word emeddings
Website: https://code.google.com/archive/p/word2vec/
Author: Mikolov et al.
Year: 2013
Pre-trained vectors trained on part of Google News dataset (about 100 billion words). The model contains 300-dimensional vectors for 3 million words and phrases.
Paper:
Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.
""",
"https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz",
"21c05ae916a67a4da59b1d006903355cced7de7da1e42bff9f0504198c748da8";
post_fetch_method=DataDeps.unpack
))
push!(language_files(Word2Vec{:en}), "word2vec 300d/GoogleNews-vectors-negative300.bin")
end
function _load_embeddings(::Type{<:Word2Vec}, fh::IO, max_vocab_size, keep_words)
local LL, indexed_words, index
vocab_size, vector_size = parse.(Int64, split(readline(fh)))
max_stored_vocab_size = min(max_vocab_size, vocab_size)
indexed_words = Vector{String}(undef, max_stored_vocab_size)
LL = Array{Float32}(undef, vector_size, max_stored_vocab_size)
index = 1
@inbounds for _ in 1:vocab_size
word = readuntil(fh, ' ', keep=false)
vector = Vector{Float32}(undef, vector_size)
@inbounds for i = 1:vector_size
vector[i] = read(fh, Float32)
end
if !occursin("_", word) && (length(keep_words)==0 || word in keep_words ) #If it isn't a phrase
LL[:,index]=vector./norm(vector)
indexed_words[index] = word
index+=1
if index>max_stored_vocab_size
break
end
end
end
LL = LL[:,1:index-1] #throw away unused columns
indexed_words = indexed_words[1:index-1] #throw away unused columns
LL, indexed_words
end
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | code | 5261 | using Embeddings
using Test
using DataDeps
"""
tempdatadeps(fun)
Run the function and delete all created datadeps afterwards
"""
function tempdatadeps(fun)
tempdir = mktempdir()
try
@info "sending all datadeps to $tempdir"
withenv("DATADEPS_LOAD_PATH"=>tempdir) do
fun()
end
finally
try
@info "removing $tempdir"
rm(tempdir, recursive=true, force=true)
catch err
@warn "Something went wrong with removing tempdir" tempdir exception=err
end
end
end
# uncomment the below to not use tempdatadeps (i.e. if not debugging)
# tempdatadeps(fun) = fun()
"""
@testset_nokeep_data
Use just like @testset,
but know that it deletes any downloaded data dependencies when it is done.
"""
macro testset_nokeep_data(name, expr)
quote
tempdatadeps() do
println("Testing ", $name)
@testset $name $expr
end
end |> esc
end
@testset_nokeep_data "Word2Vec" begin
embs_full = load_embeddings(Word2Vec)
@test size(embs_full.embeddings) == (300, length(embs_full.vocab))
embs_mini = load_embeddings(Word2Vec; max_vocab_size=100)
@test length(embs_mini.vocab)==100
@test embs_mini.embeddings == embs_full.embeddings[:, 1:100]
@test embs_mini.vocab == embs_full.vocab[1:100]
@test "for" ∈ embs_mini.vocab
embs_specific = load_embeddings(Word2Vec; keep_words=Set(["red", "green", "blue"]))
@test size(embs_specific.embeddings) == (300, 3)
@test Set(embs_specific.vocab) == Set(["red", "green", "blue"])
end
@testset "GloVe" begin
# just test one file from each of provided sets
tests = ["glove.6B/glove.6B.50d.txt",
#"glove.42B.300d/glove.42B.300d.txt", # These files are too slow to download
#"glove.840B.300d/glove.840B.300d.txt", # They are not that big but are on a slow server
#"glove.twitter.27B/glove.twitter.27B.25d.txt"
]
# read dimensionality from name (e.g. glove.6B.300d.txt -> 300)
dim(x) = parse(Int, match(r"\.([0-9]+)d\.", x).captures[1])
for file in tests
filename = split(file, Base.Filesystem.path_separator)[end]
@testset_nokeep_data "$filename" begin
@testset "Basic" begin
glove = load_embeddings(GloVe{:en}, @datadep_str(file), max_vocab_size=1000)
@test length(glove.vocab) == 1000
@test size(glove.embeddings) == (dim(file), 1000)
@test "for" ∈ glove.vocab
end
@testset "Specific" begin
colors = ["red", "green", "blue"]
glove_colors = load_embeddings(GloVe, @datadep_str(file), keep_words=colors)
@test length(glove_colors.vocab) == 3
@test size(glove_colors.embeddings) == (dim(file), 3)
@test Set(glove_colors.vocab) == Set(colors)
end
end
end
@testset "Custom" begin
# first 100 lines of official glove.6B.50d.txt
custom_glove_file = joinpath(@__DIR__, "data", "custom.glove.txt")
@testset "Basic" begin
glove = load_embeddings(GloVe, custom_glove_file)
@test length(glove.vocab) == 100
@test size(glove.embeddings) == (50, 100)
@test "the" ∈ glove.vocab
end
@testset "Specific" begin
punct = [".", ","]
glove_punct = load_embeddings(GloVe, custom_glove_file, keep_words=punct)
@test length(glove_punct.vocab) == 2
@test size(glove_punct.embeddings) == (50, 2)
@test Set(glove_punct.vocab) == Set(punct)
end
end
@testset "non-breaking-space" begin
# https://github.com/JuliaText/Embeddings.jl/issues/24
# This includes the text with nonbreaking space in the work
custom_glove_file = joinpath(@__DIR__, "data", "nbsp.glove.txt")
@testset "Basic" begin
glove = load_embeddings(GloVe, custom_glove_file)
@test ".\U00A0.\U00A0." ∈ glove.vocab
@test size(glove.embeddings) == (300, 3)
end
end
end
@testset "FastText" begin
@testset_nokeep_data "English 1" begin
@testset "Basic" begin
embs1 = load_embeddings(FastText_Text; max_vocab_size=100)
@test length(embs1.vocab)==100
@test size(embs1.embeddings) == (300, 100)
end
@testset "Specific" begin
embs_specific = load_embeddings(FastText_Text; keep_words=Set(["red", "green", "blue"]))
@test size(embs_specific.embeddings) == (300, 3)
@test Set(embs_specific.vocab) == Set(["red", "green", "blue"])
end
end
@testset_nokeep_data "French" begin
embs_fr = load_embeddings(FastText_Text{:fr}; max_vocab_size=100)
@test length(embs_fr.vocab)==100
@test size(embs_fr.embeddings) == (300, 100)
end
@testset_nokeep_data "English file number 2" begin
embs_specific = load_embeddings(FastText_Text, 2; keep_words=Set(["red", "green", "blue"]))
@test size(embs_specific.embeddings) == (300, 3)
@test Set(embs_specific.vocab) == Set(["red", "green", "blue"])
end
end
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | docs | 10224 | # Embeddings
[](https://travis-ci.org/JuliaText/Embeddings.jl)
[](https://codecov.io/gh/JuliaText/Embeddings.jl)
## Introduction
Word Embeddings present words as high-dimensional vectors, where every dimension corresponds to some latent feature [1]. This makes it possible to utilize different mathematical operations between words. With these we can discover semantic relationships between words. E.g. when using [Word2Vec](https://code.google.com/archive/p/word2vec/) embeddings and utilizing cosine similarity between vectors, the calculation `vector(“Madrid”) - vector(“Spain”) + vector(“France”)` gives as an answer the vector for word “Paris” [2].
Pretraining Word Embeddings are commonly uses to initialize the bottom layer of a more advanced NLP method, such as a LSTM [3].
Simply summing the embeddings in a sentence or phrase can in and of itself be a surprisingly powerful way to represent the sentence/phrase, and can be used as a input to simple ML models like SVM 4].
This package gives access to pretrained embeddings. At its current state it includes following word embeddings: [Word2Vec](https://code.google.com/archive/p/word2vec/) (English), [GloVe](https://nlp.stanford.edu/projects/glove/) (English), and [FastText](https://fasttext.cc/) (hundreds of languages).
### Installation
The package can be installed using the [julia package manager in the normal way](https://julialang.github.io/Pkg.jl/v1/managing-packages/#Adding-packages-1).
Open the REPL, press <kbd>]</kbd> to enter package mode, and then:
```julia
pkg> add Embeddings
```
There are no further steps.
Pretrained embeddings will be downloaded the first time you use them.
## Details
### `load_embeddings`
load_embeddings(EmbeddingSystem, [embedding_file|default_file_number])
load_embeddings(EmbeddingSystem{:lang}, [embedding_file|default_file_number])
Loaded the embeddings from a embedding file.
The embeddings should be of the type given by the Embedding system.
If the `embedding file` is not provided, a default embedding file will be used.
(It will be automatically installed if required).
EmbeddingSystems have a language type parameter.
For example `FastText_Text{:fr}` or `Word2Vec{:en}`, if that language parameter is not given it defaults to English.
(I am sorry for the poor state of the NLP field that many embedding formats are only available pretrained in English.)
Using this the correct default embedding file will be installed for that language.
For some languages and embedding systems there are multiple possible files.
You can check the list of them using for example `language_files(FastText_Text{:de})`.
The first is nominally the most popular, but if you want to default to another you can do so by setting the `default_file_num`.
### This returns an `EmbeddingTable` object.
This has 2 fields.
- `embeddings` is a matrix, each column is the embedding for a word.
- `vocab` is a vector of strings, ordered as per the columns of `embeddings`, such that the first string in vocab is the first column of `embeddings` etc
We do not include a method for getting the index of a column from a word.
This is trivial to define in code (`vocab2ind(vocab)=Dict(word=>ii for (ii,word) in enumerate(vocab))`),
and you might like to be doing this in a more consistant way, e.g using [MLLabelUtils.jl](https://github.com/JuliaML/MLLabelUtils.jl),
or you might like to build a much faster Dict solution on top of [InternedStrings.jl](https://github.com/JuliaString/InternedStrings.jl)
## Configuration
This package is build on top of [DataDeps.jl](https://github.com/oxinabox/DataDeps.jl).
To configure, e.g., where downloaded files save to, and read from (and to understand how that works),
see the DataDeps.jl readme.
## Examples
Load the package with
```
julia> using Embeddings
```
### Basic example
The Following script loads up the embeddings,
and defines a `Dict` to map from vocabulary word to index, in the embedding matrix,
and a function that used it to get an embedding vector.
This is a basic way to access the embedding for a word.
```
using Embeddings
const embtable = load_embeddings(Word2Vec) # or load_embeddings(FastText_Text) or ...
const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
function get_embedding(word)
ind = get_word_index[word]
emb = embtable.embeddings[:,ind]
return emb
end
```
This can be used like so:
```
julia> get_embedding("blue")
300-element Array{Float32,1}:
0.01540828
0.03409082
0.0882124
0.04680265
-0.03409082
...
```
### Loading different Embeddings
load up the default word2vec embeddings:
```
julia> load_embeddings(Word2Vec)
Embeddings.EmbeddingTable{Array{Float32,2},Array{String,1}}(Float32[0.0673199 0.0529562 … -0.21143 0.0136373; -0.0534466 0.0654598 … -0.0087888 -0.0742876; … ; -0.00733469 0.0108946 … -0.00405157 0.0156112; -0.00514565 -0.0470722 … -0.0341579 0.0396559], String["</s>", "in", "for", "that", "is", "on", "##", "The", "with", "said" … "#-###-PA-PARKS", "Lackmeyer", "PERVEZ", "KUNDI", "Budhadeb", "Nautsch", "Antuane", "tricorne", "VISIONPAD", "RAFFAELE"])
```
Load up the first 100 embeddings from the default French FastText embeddings:
```
julia> load_embeddings(FastText_Text{:fr}; max_vocab_size=100)
Embeddings.EmbeddingTable{Array{Float32,2},Array{String,1}}(Float32[0.0058 -0.0842 … -0.062 -0.0687; 0.0478 -0.0388 … 0.0064 -0.339; … ; 0.023 -0.0106 … -0.022 -0.1581; 0.0378 0.0579 … 0.0417 0.0714], String[",", "de", ".", "</s>", "la", "et", ":", "à", "le", "\"" … "faire", "c'", "aussi", ">", "leur", "%", "si", "entre", "qu", "€"])
```
List all the default files for FastText in English:
```
julia> language_files(FastText_Text{:en}) # List all the possible default files for FastText in English
3-element Array{String,1}:
"FastText Common Crawl/crawl-300d-2M.vec"
"FastText Wiki News/wiki-news-300d-1M.vec"
"FastText en Wiki Text/wiki.en.vec"
```
From the second of those default files, load the embeddings just for "red", "green", and "blue":
```
julia> load_embeddings(FastText_Text{:en}, 2; keep_words=Set(["red", "green", "blue"]))
Embeddings.EmbeddingTable{Array{Float32,2},Array{String,1}}(Float32[-0.0054 0.0404 -0.0293; 0.0406 0.0621 0.0224; … ; 0.218 0.1542 0.2256; 0.1315 0.1528 0.1051], String["red", "green", "blue"])
```
List all the default files for GloVe in English:
```
julia> language_files(GloVe{:en})
10-element Array{String,1}:
"glove.6B/glove.6B.50d.txt"
"glove.6B/glove.6B.100d.txt"
"glove.6B/glove.6B.200d.txt"
"glove.6B/glove.6B.300d.txt"
"glove.42B.300d/glove.42B.300d.txt"
"glove.840B.300d/glove.840B.300d.txt"
"glove.twitter.27B/glove.twitter.27B.25d.txt"
"glove.twitter.27B/glove.twitter.27B.50d.txt"
"glove.twitter.27B/glove.twitter.27B.100d.txt"
"glove.twitter.27B/glove.twitter.27B.200d.txt"
```
Load the 200d GloVe embedding matrix for the top 10000 words trained on 6B words:
```
julia> glove = load_embeddings(GloVe{:en}, 3, max_vocab_size=10000)
Embeddings.EmbeddingTable{Array{Float32,2},Array{String,1}}(Float32[-0.071549 0.17651 … 0.19765 -0.22419; 0.093459 0.29208 … -0.31357 0.039311; … ; 0.030591 -0.23189 … -0.72917 0.49645; 0.25577 -0.10814 … 0.07403 0.41581], ["the", ",", ".", "of", "to", "and", "in", "a", "\"", "'s" … "slashed", "23-year", "communique", "hawk", "necessity", "petty", "stretching", "taxpayer", "resistant", "quinn"])
julia> size(glove)
(200, 10000)
```
## Contributing
Contributions, in the form of bug-reports, pull requests, additional documentation are encouraged.
They can be made to the Github repository.
**All contributions and communications should abide by the [Julia Community Standards](https://julialang.org/community/standards/).**
The following software contributions would particularly be appreciated:
- Provide Hashstrings: I have only filled in the checksums for the FastText Embeddings that I have downloaded, which is only a small fraction. If you using embeddings files for a language that doesn't have its hashstring set, then DataDeps.jl will tell you the hashstring that need to be added to the file. It is a quick and easy PR.
- Provide Implementations of other loaders: If you have implementations of code to load another format (e.g. Binary FastText) it would be great if you could contribute them. I know I have a few others kicking around somewhere.
Software contributions should follow the prevailing style within the code-base.
If your pull request (or issues) are not getting responses within a few days do not hesitate to "bump" them,
by posting a comment such as "Any update on the status of this?".
Sometimes Github notifications get lost.
## Support
Feel free to ask for help on the [Julia Discourse forum](https://discourse.julialang.org/),
or in the `#natural-language` channel on julia-slack. (Which you can [join here](https://slackinvite.julialang.org/)).
You can also raise issues in this repository to request improvements to the documentation.
## Sources
[1]: [Turian, Joseph, Lev Ratinov, and Yoshua Bengio. "Word representations: a simple and general method for semi-supervised learning." Proceedings of the 48th annual meeting of the association for computational linguistics. Association for Computational Linguistics, 2010.](https://www.aclweb.org/anthology/P10-1040/)
[2]: [Mikolov, Tomas, et al. "Distributed representations of words and phrases and their compositionality." Advances in neural information processing systems. 2013.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf)
[3]: [White, Lyndon et al. Neural Representations of Natural Language. Springer: Studies in Computational Intelligence. 2018.](https://www.springer.com/us/book/9789811300615)
[4]: [White, Lyndon. On the surprising capacity of linear combinations of embeddings for natural language processing.
Doctorial Thesis, The University of Western Australia. 2019](https://research-repository.uwa.edu.au/en/publications/on-the-surprising-capacity-of-linear-combinations-of-embeddings-f)
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.6 | eb8ab2e2d972100fc42a86845076e01fdc0d55b4 | docs | 2652 | ---
title: 'Embeddings.jl: easy access to pretrained word embeddings from Julia'
tags:
- julialang
- opendata
- natural language processing
- NLP
- word embeddings
- machine learning
authors:
- name: Lyndon White
orcid: 0000-0003-1386-1646
affiliation: 1
- name: David Ellison
orcid: 0000-0002-2431-3963
affiliation: 2
affiliations:
- name: The University of Western Australia
index: 1
- name: None
index: 2
date: 30 Aug 2018
bibliography: paper.bib
---
# Summary
Embeddings.jl is a tool to help users of the Julia programming language [@Julia] make use of pretrained word embeddings for natural language processing.
Word embeddings are a very important feature representation in natural language processing.
The use of embeddings pretrained on very large corpora can be seen as a form of transfer learning.
It allows knowledge of lexical semantics derived from the distributional hypothesis-- that words occurring in similar contexts have similar meaning--
to be injected into models which may have only limited amounts of supervised, task oriented training data.
Many creators of word embedding methods have generously made sets of pretrained word representations publicly available.
Embeddings.jl exposes these as a standard matrix of numbers and a corresponding array of strings.
This lets Julia programs use word embeddings easily, either on their own or alongside machine learning packages such as Flux [@flux].
In such deep learning packages, it is common to use word embeddings as an input layer of a LSTM (long short term memory) network or other machine learning model,
where they may be kept invariant or used as initialization for fine-tuning on the supervised task.
They can be summed to represent a bag of words, concatenated to form a matrix representation of a sentence or document, or used otherwise in a wide variety of natural language processing tasks.
Embeddings.jl makes use of DataDeps.jl [@DataDeps],
to allow for convenient automatic downloading of the data when and if required.
It also uses the DataDeps.jl prompt to ensure the user of the embeddings has full knowledge of the original source of the data, and which papers to cite etc.
It currently provides access to:
- multiple sets of word2vec embeddings [@word2vec] for English
- multiple sets of GLoVE embeddings [@glove] for English
- multiple sets of FastText embeddings [@fasttext157lang; @fasttext] for several hundred languages
It is anticipated that as more pretrained embeddings are made available for more languages and using newer methods,
the Embeddings.jl package will be updated to support them.
# References
| Embeddings | https://github.com/JuliaText/Embeddings.jl.git |
|
[
"MIT"
] | 0.4.0 | fc6c301b221ae65a75ac85cc7fe1fc52bfd0a338 | code | 102 | using Documenter, RandomMatrixDistributions
makedocs(
sitename = "Random Matrix Distributions"
)
| RandomMatrixDistributions | https://github.com/damian-t-p/RandomMatrixDistributions.jl.git |
|
[
"MIT"
] | 0.4.0 | fc6c301b221ae65a75ac85cc7fe1fc52bfd0a338 | code | 142 | export eigmax
function eigmax(A::Symmetric{T,<:BandedMatrix{T}}) where T <: Real
maximum(KrylovKit.eigsolve(A, issymmetric=true)[1])
end
| RandomMatrixDistributions | https://github.com/damian-t-p/RandomMatrixDistributions.jl.git |
|
[
"MIT"
] | 0.4.0 | fc6c301b221ae65a75ac85cc7fe1fc52bfd0a338 | code | 1425 | export Jacobi
"""
Jacobi Distribution
If E ~ Wishart<sub>p</sub>(I, n<sub>1</sub>) and H ~ Wishart<sub>p</sub>(I, n<sub>2</sub>) are independent,
then E(E + H)<sup>-1</sup> has Jacobi(n<sub>1</sub>, n<sub>2</sub>, p) distribution.
If λ<sub>i</sub> are the eigenvalues of EH<sup>-1</sup> and μ<sub>i</sub> are the Jacobi eigenvalues,
then μ<sub>i</sub> = λ<sub>i</sub>/(1 + λ<sub>i</sub>) and λ<sub>i</sub> = μ<sub>i</sub>/(1 - μ<sub>i</sub>)
"""
struct Jacobi <: ContinuousMatrixDistribution
beta::Integer
n1::Integer
n2::Integer
p::Integer
end
function randreduced(d::Jacobi)
randtridiagonal(d)
end
function randtridiagonal(d::Jacobi)
a = d.beta/2 * (d.n1 - d.p + 1) - 1;
b = d.beta/2 * (d.n2 - d.p + 1) - 1;
alphaeven = [2*rand(Beta((2*d.p - k - 2)*d.beta/4 + a + 1, (2*d.p - k - 2) * d.beta/4 + b + 1)) - 1 for k in 0:(2 * d.p-2) if k%2 == 0]
alphaodd = [2*rand(Beta((2*d.p - k - 1)*d.beta/4, (2*d.p - k - 3) * d.beta/4 + a + b + 2)) - 1 for k in 0:(2 * d.p-2) if k%2 == 1]
alphaevenleft = [alphaeven; 0]
alphaevenright = [0; alphaeven]
alphaodd = [-1; alphaodd; -1]
dv = (1 .- alphaodd) .* alphaevenleft - (1 .+ alphaodd).* alphaevenright
dv = dv[1:(end-1)]
ev = sqrt.((1 .- alphaodd[1:(end-1)]) .* (1 .- alphaeven.^2) .* (1 .+ alphaodd[2:end]))
ev = ev[1:end-1]
(SymTridiagonal(dv, ev) + 2I)/4
end
| RandomMatrixDistributions | https://github.com/damian-t-p/RandomMatrixDistributions.jl.git |
|
[
"MIT"
] | 0.4.0 | fc6c301b221ae65a75ac85cc7fe1fc52bfd0a338 | code | 1080 | module RandomMatrixDistributions
#import RandomMatrices
using Random, Distributions
using LinearAlgebra
using BandedMatrices
using KrylovKit
import LinearAlgebra: eigmax
import Distributions: minimum, maximum, quantile
import Distributions: ContinuousUnivariateDistribution,
ContinuousMatrixDistribution,
cdf, pdf, entropy, insupport, mean, median, modes, kurtosis, skewness, std, var, moment
export randeigvals, randeigstat,
minimum, maximum, quantile
cdf, pdf, entropy, insupport, mean, median, modes, kurtosis, skewness, std, var, moment,
eigmax
# Generic eigenvalue sampling functions
function randeigvals(d)
eigvals(randreduced(d))
end
function randeigstat(d, eigstat, nsims::Int)
statvals = Array{Float64, 1}(undef, nsims)
for i in 1:nsims
statvals[i] = eigstat(randreduced(d))
end
statvals
end
include("BandedHelpers.jl")
include("SpikedWigner.jl")
include("SpikedWishart.jl")
include("Jacobi.jl")
include("densities/MarchenkoPastur.jl")
include("densities/TracyWidom.jl")
include("densities/Wachter.jl")
end
| RandomMatrixDistributions | https://github.com/damian-t-p/RandomMatrixDistributions.jl.git |
|
[
"MIT"
] | 0.4.0 | fc6c301b221ae65a75ac85cc7fe1fc52bfd0a338 | code | 1812 | export SpikedWigner
struct SpikedWigner <: ContinuousMatrixDistribution
beta::Integer
n::Integer
spikes::Array{Float64, 1}
scaled::Bool
end
SpikedWigner(beta, n, spikes; scaled=false) = SpikedWigner(beta, n, spikes, scaled)
SpikedWigner(beta, n; scaled=false) = SpikedWigner(beta, n, [], scaled)
# SAMPLERS
function scaling(d::SpikedWigner)
d.scaled ? 1/sqrt(d.n) : 1
end
function randreduced(d::SpikedWigner)
if length(d.spikes) <= 1
return randtridiagonal(d)
else
return randbanded(d)
end
end
function randtridiagonal(d::SpikedWigner)
# diagonal and superdiagonal of B
dv = rand(Normal(0, sqrt(2)), d.n)/sqrt(d.beta)
ev = [rand(Chi(d.beta*(d.n-k))) for k in 1:(d.n-1)]/sqrt(d.beta)
if length(d.spikes) == 1
dv[1] += d.spikes[1] * sqrt(d.n)
end
SymTridiagonal(dv, ev) * scaling(d)
end
function randbanded(d::SpikedWigner)
@assert d.beta in [1, 2]
r = length(d.spikes)
if d.beta == 1
U = BandedMatrix{Float64}(undef, (d.n, d.n), (r, r))
elseif d.beta == 2
U = BandedMatrix{Complex{Float64}}(undef, (d.n, d.n), (r, r))
end
dv = randn(d.n) * sqrt(2/d.beta)
@. dv[1:r] += d.spikes * sqrt(d.n)
U[band(0)] .= dv
for k = 1:(r-1)
if d.beta == 1
ev = randn(d.n - k)
elseif d.beta == 2
ev = (randn(d.n - k) + im * randn(d.n-k))/sqrt(2)
end
U[band(k)] .= ev
end
U[band(r)] .= [rand(Chi(d.beta*(d.n - k)))/sqrt(d.beta) for k in r:(d.n-1)]
if d.beta == 1
Symmetric(U) * scaling(d)
elseif d.beta == 2
# The conjugate transpose is done like this rather than with ' because
# U'U is not automatically a banded matrix
Hermitian(U) * scaling(d)
end
end
| RandomMatrixDistributions | https://github.com/damian-t-p/RandomMatrixDistributions.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.