licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 7766 | module sphere_modes_serial
using FinEtools
using FinEtools.AlgoBaseModule: matrix_blocked
using FinEtoolsAcoustics
using FinEtoolsMultithreading.Exports
using FinEtools.MeshExportModule
using LinearAlgebra
using Arpack: eigs
using DataDrop
# For the data
# rho = 1.2*phun("kg/m^3");# mass density
# c = 340.0*phun("m/s");# sound speed
# bulk = c^2*rho;
# R = 1000.0*phun("mm");# radius of the piston
# the reference
# @article{GAO2013914,
# title = {Eigenvalue analysis for acoustic problem in 3D by boundary element method with the block Sakurai–Sugiura method},
# journal = {Engineering Analysis with Boundary Elements},
# volume = {37},
# number = {6},
# pages = {914-923},
# year = {2013},
# issn = {0955-7997},
# doi = {https://doi.org/10.1016/j.enganabound.2013.03.015},
# url = {https://www.sciencedirect.com/science/article/pii/S0955799713000714},
# author = {Haifeng Gao and Toshiro Matsumoto and Toru Takahashi and Hiroshi Isakari},
# keywords = {Eigenvalues, Acoustic, The block SS method, Boundary element method, Burton–Miller's method},
# abstract = {This paper presents accurate numerical solutions for nonlinear eigenvalue analysis of three-dimensional acoustic cavities by boundary element method (BEM). To solve the nonlinear eigenvalue problem (NEP) formulated by BEM, we employ a contour integral method, called block Sakurai–Sugiura (SS) method, by which the NEP is converted to a standard linear eigenvalue problem and the dimension of eigenspace is reduced. The block version adopted in present work can also extract eigenvalues whose multiplicity is larger than one, but for the complex connected region which includes a internal closed boundary, the methodology yields fictitious eigenvalues. The application of the technique is demonstrated through the eigenvalue calculation of sphere with unique homogenous boundary conditions, cube with mixed boundary conditions and a complex connected region formed by cubic boundary and spherical boundary, however, the fictitious eigenvalues can be identified by Burton–Miller's method. These numerical results are supported by appropriate convergence study and comparisons with close form.}
# }
# shows the wave numbers in Table 1.
#=
The multiplicity of the Dirichlet eigenvalues.
Wavenumber*R Multiplicity
3.14159, 6.28319, 9.42478 1
4.49340, 7.72525, 10.90412 3
5.76346, 9.09501, 12.32294 5
6.98793, 10.41711, 13.69802 7
8.18256, 11.70491, 15.03966 9
9.35581, 12.96653, 16.35471 11
=#
# Sphere with Dirichlet boundary conditions: model analysis.
# Sphere of radius $(R), in WATER.
# Tetrahedral T4 mesh.
# Exact fundamental frequency: $(c/2/R)
function run(N=2, assembly_only=false)
rho = 1000 * phun("kg/m^3")# mass density
c = 1500.0 * phun("m/s")# sound speed
bulk = c^2 * rho
R = 500.0 * phun("mm")# radius of the sphere
tolerance = R / 1e3
neigvs = 7
wn_table = [
([3.14159, 6.28319, 9.42478], 1),
([4.49340, 7.72525, 10.90412], 3),
([5.76346, 9.09501, 12.32294], 5),
([6.98793, 10.41711, 13.69802], 7),
([8.18256, 11.70491, 15.03966], 9),
([9.35581, 12.96653, 16.35471], 11),
]
# @info "Reference frequencies"
# for i in axes(wn_table, 1)
# fq = wn_table[i][1] ./ R .* c / (2 * pi)
# @info "$(fq), multiplicity $(wn_table[i][2])"
# end
fens, fes = H8sphere(R, N)
renumb(c) = c[[1, 4, 3, 2, 5, 8, 7, 6]]
fens1, fes1 = mirrormesh(
fens,
fes,
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
renumb=renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
fens1, fes1 = mirrormesh(
fens,
fes,
[0.0, -1.0, 0.0],
[0.0, 0.0, 0.0],
renumb=renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
fens1, fes1 = mirrormesh(
fens,
fes,
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
renumb=renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
@info "$(count(fens)) nodes"
@info "$(count(fes)) elements"
geom = NodalField(fens.xyz)
P = NodalField(zeros(size(fens.xyz, 1), 1))
bfes = meshboundary(fes)
setebc!(P, connectednodes(bfes))
numberdofs!(P)
material = MatAcoustFluid(bulk, rho)
femm = FEMMAcoust(IntegDomain(fes, GaussRule(3, 2)), material)
mass_times = Dict{String,Vector{Float64}}()
t0 = time()
t1 = time()
assmblr = SysmatAssemblerSparse(0.0)
setnomatrixresult(assmblr, true)
acousticmass(femm, assmblr, geom, P)
mass_times["AssembleCOOMass"] = [time() - t1]
println("Assemble mass = $(mass_times["AssembleCOOMass"]) [s]")
t1 = time()
setnomatrixresult(assmblr, false)
Ma = makematrix!(assmblr)
mass_times["ConvertToCSCMass"] = [time() - t1]
println("Convert to CSC = $(mass_times["ConvertToCSCMass"]) [s]")
mass_times["TotalAssemblyMass"] = [time() - t0]
println("Assembly MASS total = $(mass_times["TotalAssemblyMass"]) [s]")
stiffness_times = Dict{String,Vector{Float64}}()
t0 = time()
t1 = time()
assmblr = SysmatAssemblerSparse(0.0)
setnomatrixresult(assmblr, true)
acousticstiffness(femm, assmblr, geom, P)
stiffness_times["AssembleCOOStiffness"] = [time() - t1]
println("Assemble stiffness = $(stiffness_times["AssembleCOOStiffness"]) [s]")
t1 = time()
setnomatrixresult(assmblr, false)
Ka = makematrix!(assmblr)
stiffness_times["ConvertToCSCStiffness"] = [time() - t1]
println("Convert to CSC = $(stiffness_times["ConvertToCSCStiffness"]) [s]")
stiffness_times["TotalAssemblyStiffness"] = [time() - t0]
println("Assembly STIFFNESS total = $(stiffness_times["TotalAssemblyStiffness"]) [s]")
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "sphere_modes_serial-timing-stiffness"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
stiffness_times[k] = cat(stiffness_times[k], storedtimes[k], dims=1)
end
end
DataDrop.store_json(n, stiffness_times)
n = DataDrop.with_extension(joinpath("$(N)", "sphere_modes_serial-timing-mass"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
mass_times[k] = cat(mass_times[k], storedtimes[k], dims=1)
end
end
DataDrop.store_json(n, mass_times)
return
end
Ma_ff = matrix_blocked(Ma, nfreedofs(P), nfreedofs(P))[:ff]
Ka_ff = matrix_blocked(Ka, nfreedofs(P), nfreedofs(P))[:ff]
d, v, nconv = eigs(Ka_ff, Ma_ff; nev=neigvs, which=:SM, explicittransform=:none)
v = real.(v)
fs = real(sqrt.(complex(d))) ./ (2 * pi)
@info("Frequencies (1:5): $(fs[1:5]) [Hz]")
@info "Reference frequencies"
for i in axes(wn_table, 1)
fq = wn_table[i][1] ./ R .* c / (2 * pi)
@info "$(fq), multiplicity $(wn_table[i][2])"
end
ks = (2 * pi) .* fs ./ c ./ phun("m")
# @info("Wavenumbers: $(ks) [m]")
File = "sphere_modes_serial.vtk"
scalarllist = Any[]
for n = [2, 5, 7]
scattersysvec!(P, v[:, n])
push!(scalarllist, ("Pressure_mode_$n", deepcopy(P.values)))
end
vtkexportmesh(
File,
connasarray(fes),
geom.values,
FinEtools.MeshExportModule.VTK.H8;
scalars=scalarllist,
)
# @async run(`"paraview.exe" $File`)
true
end # sphere_h8_in_air
end # module sphere_mode_examples
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 4933 | module Poisson_parallel
using FinEtools
using FinEtools.AlgoBaseModule: solve_blocked!, matrix_blocked, vector_blocked
using FinEtools.AssemblyModule
using FinEtools.MeshExportModule
using FinEtoolsHeatDiff
using ChunkSplitters
using LinearAlgebra
using DataDrop
using SparseArrays
using SymRCM
using FinEtoolsMultithreading
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose,
parallel_matrix_assembly!, SysmatAssemblerSparsePatt, SysmatAssemblerSparsePattwLookup
using ECLGraphColor
const A = 1.0 # dimension of the domain (length of the side of the square)
include("make_model.jl")
function _run(make_model, N, ntasks, assembly_only)
times = Dict{String,Vector{Float64}}()
thermal_conductivity = [i == j ? one(Float64) : zero(Float64) for i = 1:3, j = 1:3] # conductivity matrix
Q = -6.0 # internal heat generation rate
function getsource!(forceout, XYZ, tangents, feid, qpid)
forceout[1] = Q #heat source
end
tempf(x) = (1.0 .+ x[:, 1] .^ 2 + 2.0 .* x[:, 2] .^ 2)#the exact distribution of temperature1
fens, fes, ir = make_model(N)
geom = NodalField(fens.xyz)
Temp = NodalField(zeros(size(fens.xyz, 1), 1))
Tolerance = 1.0 / count(fes) / 100
l1 = selectnode(fens; box = [0.0 0.0 0.0 A 0.0 A], inflate = Tolerance)
l2 = selectnode(fens; box = [A A 0.0 A 0.0 A], inflate = Tolerance)
l3 = selectnode(fens; box = [0.0 A 0.0 0.0 0.0 A], inflate = Tolerance)
l4 = selectnode(fens; box = [0.0 A A A 0.0 A], inflate = Tolerance)
l5 = selectnode(fens; box = [0.0 A 0.0 A 0.0 0.0], inflate = Tolerance)
l6 = selectnode(fens; box = [0.0 A 0.0 A A A], inflate = Tolerance)
List = vcat(l1, l2, l3, l4, l5, l6)
setebc!(Temp, List, true, 1, tempf(geom.values[List, :])[:])
numberdofs!(Temp)
@info "$(count(fens)) nodes"
@info "$(count(fes)) elements"
material = MatHeatDiff(thermal_conductivity)
println("Conductivity")
t1 = time()
n2e = FENodeToFEMapThr(fes, nnodes(Temp))
times["FENodeToFEMap"] = [time() - t1]
println("Make node to element map = $(times["FENodeToFEMap"]) [s]")
GC.enable(false)
AT = SysmatAssemblerSparsePatt
# AT = SysmatAssemblerSparsePattwLookup
t0 = time()
t1 = time()
e2e = FElemToNeighborsMap(n2e, fes, ECLGraphColor.int_type())
times["FElemToNeighborsMap"] = [time() - t1]
println(" Make element to neighbor map = $(times["FElemToNeighborsMap"]) [s]")
t1 = time()
coloring = FinEtoolsMultithreading.element_coloring(fes, e2e, ntasks)
times["ElementColors"] = [time() - t1]
println(" Compute element colors = $(times["ElementColors"]) [s]")
t1 = time()
n2n = FENodeToNeighborsMap(n2e, fes)
times["FENodeToNeighborsMap"] = [time() - t1]
println(" Make node to neighbor map = $(times["FENodeToNeighborsMap"]) [s]")
t1 = time()
K_pattern = csc_symmetric_pattern(Temp.dofnums, nalldofs(Temp), n2n, eltype(Temp.values))
times["SparsityPattern"] = [time() - t1]
println(" Sparsity pattern = $(times["SparsityPattern"]) [s]")
t1 = time()
decomposition = decompose(fes, coloring,
(fessubset) -> FEMMHeatDiff(IntegDomain(fessubset, ir), material),
ntasks)
times["DomainDecomposition"] = [time() - t1]
println(" Domain decomposition = $(times["DomainDecomposition"]) [s]")
t1 = time()
K = parallel_matrix_assembly!(
AT(K_pattern),
decomposition,
(femm, assmblr) -> conductivity(femm, assmblr, geom, Temp),
)
times["AssemblyOfValues"] = [time() - t1]
println(" Add to matrix = $(times["AssemblyOfValues"]) [s]")
times["TotalAssemblyStiffness"] = [time() - t0]
println("Assembly total = $(times["TotalAssemblyStiffness"]) [s]")
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "Poisson_parallel-timing-stiffness"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
times[k] = cat(times[k], storedtimes[k], dims=1)
end
end
return
end
femm = FEMMHeatDiff(IntegDomain(fes, ir), material)
println("Internal heat generation")
fi = ForceIntensity(Float64[Q])
F1 = distribloads(femm, geom, Temp, fi, 3)
println("Solution of the system")
t1 = time()
solve_blocked!(Temp, K, F1)
times["Solution"] = [time() - t0]
println("Solution = $(times["Solution"]) [s]")
Error = 0.0
for k in axes(fens.xyz, 1)
Error = Error + abs.(Temp.values[k, 1] - tempf(reshape(fens.xyz[k, :], (1, 3)))[1])
end
println("Error =$Error")
true
end # Poisson_parallel
function run(N = 25, ntasks=Threads.nthreads(), assembly_only = false)
return _run(make_model, N, ntasks, assembly_only)
end
end # module Poisson_parallel
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 3409 | module Poisson_serial
using FinEtools
using FinEtools.AlgoBaseModule: solve_blocked!, matrix_blocked, vector_blocked
using FinEtools.AssemblyModule
using FinEtools.MeshExportModule
using FinEtoolsHeatDiff
using ChunkSplitters
using LinearAlgebra
using DataDrop
using SparseArrays
using SymRCM
const A = 1.0 # dimension of the domain (length of the side of the square)
include("make_model.jl")
function _run(make_model, N, assembly_only)
times = Dict{String,Vector{Float64}}()
thermal_conductivity = [i == j ? one(Float64) : zero(Float64) for i = 1:3, j = 1:3] # conductivity matrix
Q = -6.0 # internal heat generation rate
function getsource!(forceout, XYZ, tangents, feid, qpid)
forceout[1] = Q #heat source
end
tempf(x) = (1.0 .+ x[:, 1] .^ 2 + 2.0 .* x[:, 2] .^ 2)#the exact distribution of temperature1
fens, fes, ir = make_model(N)
geom = NodalField(fens.xyz)
Temp = NodalField(zeros(size(fens.xyz, 1), 1))
Tolerance = 1.0 / count(fes) / 100
l1 = selectnode(fens; box = [0.0 0.0 0.0 A 0.0 A], inflate = Tolerance)
l2 = selectnode(fens; box = [A A 0.0 A 0.0 A], inflate = Tolerance)
l3 = selectnode(fens; box = [0.0 A 0.0 0.0 0.0 A], inflate = Tolerance)
l4 = selectnode(fens; box = [0.0 A A A 0.0 A], inflate = Tolerance)
l5 = selectnode(fens; box = [0.0 A 0.0 A 0.0 0.0], inflate = Tolerance)
l6 = selectnode(fens; box = [0.0 A 0.0 A A A], inflate = Tolerance)
List = vcat(l1, l2, l3, l4, l5, l6)
setebc!(Temp, List, true, 1, tempf(geom.values[List, :])[:])
numberdofs!(Temp)
@info "$(count(fens)) nodes"
@info "$(count(fes)) elements"
material = MatHeatDiff(thermal_conductivity)
femm = FEMMHeatDiff(IntegDomain(fes, ir), material)
println("Conductivity")
t0 = time()
t1 = time()
assmblr = SysmatAssemblerSparse(0.0)
setnomatrixresult(assmblr, true)
conductivity(femm, assmblr, geom, Temp)
times["AssembleCOOStiffness"] = [time() - t1]
println("Assemble stiffness = $(times["AssembleCOOStiffness"]) [s]")
t1 = time()
setnomatrixresult(assmblr, false)
K = makematrix!(assmblr)
times["ConvertToCSCStiffness"] = [time() - t1]
println("Convert to CSC = $(times["ConvertToCSCStiffness"]) [s]")
times["TotalAssemblyStiffness"] = [time() - t0]
println("Assembly total = $(times["TotalAssemblyStiffness"]) [s]")
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "Poisson_serial-timing-stiffness"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
times[k] = cat(times[k], storedtimes[k], dims=1)
end
end
return
end
println("Internal heat generation")
fi = ForceIntensity(Float64[Q])
F1 = distribloads(femm, geom, Temp, fi, 3)
println("Solution of the system")
t1 = time()
solve_blocked!(Temp, K, F1)
times["Solution"] = [time() - t0]
println("Solution = $(times["Solution"]) [s]")
Error = 0.0
for k in axes(fens.xyz, 1)
Error = Error + abs.(Temp.values[k, 1] - tempf(reshape(fens.xyz[k, :], (1, 3)))[1])
end
println("Error =$Error")
true
end # Poisson_serial
function run(N = 25, assembly_only = false)
return _run(make_model, N, assembly_only)
end
end # module Poisson_serial
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 312 |
function make_model_H20(N = 25)
fens, fes = H20block(A, A, A, N, N, N)
ir = GaussRule(3, 3)
return fens, fes, ir
end
function make_model_T10(N = 25)
fens, fes = T10block(A, A, A, N, N, N)
ir = TetRule(5)
return fens, fes, ir
end
make_model = make_model_H20
make_model = make_model_T10
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 976 | println("Current folder: $(pwd())")
if length(ARGS) < 1
error("I need at least one arguments: N (mesh subdivision)")
end
using Pkg
# Pkg.add("ThreadPinning")
Pkg.activate(".")
Pkg.instantiate()
using LinearAlgebra
LinearAlgebra.BLAS.set_num_threads(1)
# Turn off thread pinning because it seems to interfere with the graph coloring library.
# using ThreadPinning
# ThreadPinning.Prefs.set_os_warning(false)
# pinthreads(:cores)
N = parse(Int, ARGS[1])
ntasks = Threads.nthreads()
if length(ARGS) > 1
ntasks = parse(Int, ARGS[2])
end
assembly_only = true
if length(ARGS) > 2
assembly_only = parse(Bool, ARGS[3])
end
include(raw"Poisson_parallel.jl")
using .Poisson_parallel;
using FinEtoolsMultithreading
Pkg.status("FinEtoolsMultithreading")
NTRIALS = 5
for trial in 1:NTRIALS
@info "Trial $(trial) out of $(NTRIALS): nthreads=$(Threads.nthreads()), ntasks=$(ntasks), N=$(N)"
Poisson_parallel.run(N, ntasks, assembly_only)
GC.gc(true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 483 | println("Current folder: $(pwd())")
if length(ARGS) < 1
error("I need one argument: N (mesh subdivision)")
end
using Pkg
Pkg.activate(".")
Pkg.instantiate()
N = parse(Int, ARGS[1])
assembly_only = true
if length(ARGS) > 1
assembly_only = parse(Bool, ARGS[2])
end
include(raw"Poisson_serial.jl")
using .Poisson_serial;
NTRIALS = 5
for trial in 1:NTRIALS
@info "Trial $(trial) out of $(NTRIALS): N=$(N)"
Poisson_serial.run(N, assembly_only)
GC.gc(true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 994 | println("Current folder: $(pwd())")
if length(ARGS) < 1
error("I need at least one arguments: N (mesh subdivision)")
end
using Pkg
# Pkg.add("ThreadPinning")
Pkg.activate(".")
Pkg.instantiate()
using LinearAlgebra
LinearAlgebra.BLAS.set_num_threads(1)
# Turn off thread pinning because it seems to interfere with the graph coloring library.
# using ThreadPinning
# ThreadPinning.Prefs.set_os_warning(false)
# pinthreads(:cores)
N = parse(Int, ARGS[1])
ntasks = Threads.nthreads()
if length(ARGS) > 1
ntasks = parse(Int, ARGS[2])
end
assembly_only = true
if length(ARGS) > 2
assembly_only = parse(Bool, ARGS[3])
end
include(raw"stubby_corbel_parallel.jl")
using .stubby_corbel_parallel;
using FinEtoolsMultithreading
Pkg.status("FinEtoolsMultithreading")
NTRIALS = 5
for trial in 1:NTRIALS
@info "Trial $(trial) out of $(NTRIALS): nthreads=$(Threads.nthreads()), ntasks=$(ntasks), N=$(N)"
stubby_corbel_parallel.run(N, ntasks, assembly_only)
GC.gc(true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 501 | println("Current folder: $(pwd())")
if length(ARGS) < 1
error("I need one argument: N (mesh subdivision)")
end
using Pkg
Pkg.activate(".")
Pkg.instantiate()
N = parse(Int, ARGS[1])
assembly_only = true
if length(ARGS) > 1
assembly_only = parse(Bool, ARGS[2])
end
include(raw"stubby_corbel_serial.jl")
using .stubby_corbel_serial;
NTRIALS = 5
for trial in 1:NTRIALS
@info "Trial $(trial) out of $(NTRIALS): N=$(N)"
stubby_corbel_serial.run(N, assembly_only)
GC.gc(true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 5889 | module stubby_corbel_parallel
using FinEtools
using FinEtools.AlgoBaseModule: evalconvergencestudy, solve_blocked!
using FinEtoolsDeforLinear
using FinEtoolsDeforLinear.AlgoDeforLinearModule:
linearstatics, exportstresselementwise, exportstress
using Statistics: mean
using LinearAlgebra
using SparseArrays
using SuiteSparse
using Printf
using SymRCM
using Random
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose,
parallel_matrix_assembly!, SysmatAssemblerSparsePatt
using FinEtoolsMultithreading
using ECLGraphColor
using DataDrop
# Isotropic material
E = 1000.0
nu = 0.3 # Compressible material
W = 25.0
H = 50.0
L = 50.0
htol = minimum([L, H, W]) / 1000
uzex = -2.16e-01
magn = 0.2 * (-12.6) / 4
Force = magn * W * H * 2
CTE = 0.0
function getfrcL!(forceout, XYZ, tangents, feid, qpid)
copyto!(forceout, [0.0; 0.0; magn])
end
function run(N = 10, ntasks = Threads.nthreads(), assembly_only = false)
times = Dict{String, Vector{Float64}}()
t1 = time()
fens, fes = H8block(W, L, H, N, 2 * N, 2 * N)
times["MeshGeneration"] = [time() - t1]
println("Number of elements: $(count(fes))")
bfes = meshboundary(fes)
# end cross-section surface for the shear loading
sectionL = selectelem(fens, bfes; facing = true, direction = [0.0 +1.0 0.0])
# 0 cross-section surface for the reactions
section0 = selectelem(fens, bfes; facing = true, direction = [0.0 -1.0 0.0])
# 0 cross-section surface for the reactions
sectionlateral = selectelem(fens, bfes; facing = true, direction = [1.0 0.0 0.0])
MR = DeforModelRed3D
material = MatDeforElastIso(MR, 0.0, E, nu, CTE)
# Material orientation matrix
csmat = [i == j ? one(Float64) : zero(Float64) for i = 1:3, j = 1:3]
function updatecs!(csmatout, XYZ, tangents, feid, qpid)
copyto!(csmatout, csmat)
end
geom = NodalField(fens.xyz)
u = NodalField(zeros(size(fens.xyz, 1), 3)) # displacement field
lx0 = connectednodes(subset(bfes, section0))
setebc!(u, lx0, true, 1, 0.0)
setebc!(u, lx0, true, 2, 0.0)
setebc!(u, lx0, true, 3, 0.0)
lx1 = connectednodes(subset(bfes, sectionlateral))
setebc!(u, lx1, true, 1, 0.0)
applyebc!(u)
numberdofs!(u)
# numberdofs!(u)
println("nfreedofs(u) = $(nfreedofs(u))")
t1 = time()
n2e = FENodeToFEMapThr(fes, nnodes(u))
times["FENodeToFEMap"] = [time() - t1]
println("Make node to element map = $(times["FENodeToFEMap"]) [s]")
fi = ForceIntensity(Float64, 3, getfrcL!)
el2femm = FEMMBase(IntegDomain(subset(bfes, sectionL), GaussRule(2, 2)))
F = distribloads(el2femm, geom, u, fi, 2)
F_f = vector_blocked_f(F, nfreedofs(u))
function createsubdomain(fessubset)
FEMMDeforLinearMSH8(MR, IntegDomain(fessubset, GaussRule(3, 2)), material)
end
function matrixcomputation!(femm, assembler)
associategeometry!(femm, geom)
stiffness(femm, assembler, geom, u)
end
println("Stiffness =============================================================")
GC.enable(false)
t0 = time();
t1 = time()
e2e = FElemToNeighborsMap(n2e, fes, ECLGraphColor.int_type())
times["FElemToNeighborsMap"] = [time() - t1]
println(" Make element to neighbor map = $(times["FElemToNeighborsMap"]) [s]")
t1 = time()
coloring = FinEtoolsMultithreading.element_coloring(fes, e2e, ntasks)
times["ElementColors"] = [time() - t1]
println(" Compute element colors = $(times["ElementColors"]) [s]")
t1 = time()
n2n = FENodeToNeighborsMap(n2e, fes)
times["FENodeToNeighborsMap"] = [time() - t1]
println(" Make node to neighbor map = $(times["FENodeToNeighborsMap"]) [s]")
t1 = time()
K_pattern = csc_symmetric_pattern(u.dofnums, nalldofs(u), n2n, eltype(u.values))
times["SparsityPattern"] = [time() - t1]
println(" Sparsity pattern = $(times["SparsityPattern"]) [s]")
t1 = time()
decomposition = decompose(fes, coloring, createsubdomain, ntasks)
times["DomainDecomposition"] = [time() - t1]
println(" Domain decomposition = $(times["DomainDecomposition"]) [s]")
t1 = time()
K = parallel_matrix_assembly!(
SysmatAssemblerSparsePatt(K_pattern),
decomposition,
matrixcomputation!
)
times["AssemblyOfValues"] = [time() - t1]
println(" Add to matrix = $(times["AssemblyOfValues"]) [s]")
times["TotalAssembly"] = [time() - t0]
println("Assembly total = $(times["TotalAssembly"]) [s]")
GC.enable(true)
K_ff = matrix_blocked_ff(K, nfreedofs(u))
F_f = vector_blocked_f(F, nfreedofs(u))
println("Stiffness: number of non zeros = $(nnz(K_ff)) [ND]")
println("Sparsity = $(nnz(K_ff)/size(K_ff, 1)/size(K_ff, 2)) [ND]")
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "stubby_corbel_H8_ms-timing-parallel-nth=$(ntasks)"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
times[k] = cat(times[k], storedtimes[k], dims = 1)
end
end
DataDrop.store_json(n, times)
return
end
Tipl = selectnode(fens, box = [0 W L L 0 H], inflate = htol)
@show norm(K_ff - K_ff') / norm(K_ff)
@time K_ff_factors = SuiteSparse.CHOLMOD.cholesky(Symmetric(K_ff))
@show nnz(K_ff_factors)
# @time K = SparseArrays.ldlt(K)
# @time K = cholesky(K)
@time U_f = K_ff_factors \ (F_f)
scattersysvec!(u, U_f[:])
utip = mean(u.values[Tipl, 3], dims = 1)
println("Deflection: $(utip), compared to $(uzex)")
File = "stubby_corbel_H8_ms_parallel.vtk"
vtkexportmesh(File, fens, fes; vectors = [("u", u.values)])
# @async run(`"paraview.exe" $File`)
true
end # run
end # module
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 4238 | module stubby_corbel_serial
using FinEtools
using FinEtools.AlgoBaseModule: evalconvergencestudy, solve_blocked!
using FinEtoolsDeforLinear
using FinEtoolsDeforLinear.AlgoDeforLinearModule:
linearstatics, exportstresselementwise, exportstress
using Statistics: mean
using LinearAlgebra
using SparseArrays
using SuiteSparse
using SymRCM
using Random
using DataDrop
# Isotropic material
E = 1000.0
nu = 0.3 # Compressible material
W = 25.0
H = 50.0
L = 50.0
htol = minimum([L, H, W]) / 1000
uzex = -2.16e-01
magn = 0.2 * (-12.6) / 4
Force = magn * W * H * 2
CTE = 0.0
function getfrcL!(forceout, XYZ, tangents, feid, qpid)
copyto!(forceout, [0.0; 0.0; magn])
end
function run(N = 10, assembly_only = false)
times = Dict{String,Vector{Float64}}()
t1 = time()
fens, fes = H8block(W, L, H, N, 2 * N, 2 * N)
times["MeshGeneration"] = [time() - t1]
println("Number of elements: $(count(fes))")
bfes = meshboundary(fes)
# end cross-section surface for the shear loading
sectionL = selectelem(fens, bfes; facing = true, direction = [0.0 +1.0 0.0])
# 0 cross-section surface for the reactions
section0 = selectelem(fens, bfes; facing = true, direction = [0.0 -1.0 0.0])
# 0 cross-section surface for the reactions
sectionlateral = selectelem(fens, bfes; facing = true, direction = [1.0 0.0 0.0])
MR = DeforModelRed3D
material = MatDeforElastIso(MR, 0.0, E, nu, CTE)
# Material orientation matrix
csmat = [i == j ? one(Float64) : zero(Float64) for i = 1:3, j = 1:3]
function updatecs!(csmatout, XYZ, tangents, feid, qpid)
copyto!(csmatout, csmat)
end
geom = NodalField(fens.xyz)
u = NodalField(zeros(size(fens.xyz, 1), 3)) # displacement field
lx0 = connectednodes(subset(bfes, section0))
setebc!(u, lx0, true, 1, 0.0)
setebc!(u, lx0, true, 2, 0.0)
setebc!(u, lx0, true, 3, 0.0)
lx1 = connectednodes(subset(bfes, sectionlateral))
setebc!(u, lx1, true, 1, 0.0)
applyebc!(u)
numberdofs!(u)
# numberdofs!(u)
println("nfreedofs(u) = $(nfreedofs(u))")
fi = ForceIntensity(Float64, 3, getfrcL!)
el2femm = FEMMBase(IntegDomain(subset(bfes, sectionL), GaussRule(2, 2)))
F = distribloads(el2femm, geom, u, fi, 2)
F_f = vector_blocked_f(F, nfreedofs(u))
# femm = FEMMDeforLinear(MR, IntegDomain(fes, GaussRule(3, 2)), material)
femm = FEMMDeforLinearMSH8(MR, IntegDomain(fes, GaussRule(3, 2)), material)
t0 = time()
t1 = time()
associategeometry!(femm, geom)
assembler = SysmatAssemblerSparse(1.0)
setnomatrixresult(assembler, true)
stiffness(femm, assembler, geom, u)
times["ComputeCOO"] = [time() - t1]
println("Compute COO = $(times["ComputeCOO"]) [s]")
t1 = time()
setnomatrixresult(assembler, false)
K = makematrix!(assembler)
times["BuildCSC"] = [time() - t1]
println("Build CSC = $(times["BuildCSC"]) [s]")
times["TotalAssembly"] = [time() - t0]
println("Assembly total = $(times["TotalAssembly"]) [s]")
K_ff = matrix_blocked_ff(K, nfreedofs(u))
F_f = vector_blocked_f(F, nfreedofs(u))
println("Stiffness: number of non zeros = $(nnz(K_ff)) [ND]")
println("Sparsity = $(nnz(K_ff)/size(K_ff, 1)/size(K_ff, 2)) [ND]")
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "stubby_corbel_H8_ms-timing-serial"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
times[k] = cat(times[k], storedtimes[k], dims = 1)
end
end
DataDrop.store_json(n, times)
return
end
Tipl = selectnode(fens, box = [0 W L L 0 H], inflate = htol)
@show norm(K_ff - K_ff') / norm(K_ff)
@time K_ff_factors = SuiteSparse.CHOLMOD.cholesky(Symmetric(K_ff))
@show nnz(K_ff_factors)
@time U_f = K_ff_factors \ (F_f)
scattersysvec!(u, U_f[:])
utip = mean(u.values[Tipl, 3], dims = 1)
println("Deflection: $(utip), compared to $(uzex)")
File = "stubby_corbel_H8_ms_serial.vtk"
vtkexportmesh(File, fens, fes; vectors = [("u", u.values)])
true
end # run
end # module
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 5939 | module ops_parallel
using FinEtools
using FinEtools.AlgoBaseModule: evalconvergencestudy, solve_blocked!
using FinEtoolsDeforLinear
using FinEtoolsDeforLinear.AlgoDeforLinearModule:
linearstatics, exportstresselementwise, exportstress
using Statistics: mean
using LinearAlgebra
using SparseArrays
using SuiteSparse
using Printf
using SymRCM
using Random
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose,
parallel_matrix_assembly!, SysmatAssemblerSparsePatt
using FinEtoolsMultithreading
using ECLGraphColor
using DataDrop
# Isotropic material
E = 1000.0
nu = 0.3 # Compressible material
W = 25.0
H = 50.0
L = 50.0
htol = minimum([L, H, W]) / 1000
uzex = -2.16e-01
magn = 0.2 * (-12.6) / 4
Force = magn * W * H * 2
CTE = 0.0
function getfrcL!(forceout, XYZ, tangents, feid, qpid)
copyto!(forceout, [0.0; 0.0; magn])
end
function run(N = 10, ntasks = Threads.nthreads(), assembly_only = false)
times = Dict{String, Vector{Float64}}()
t1 = time()
fens, fes = H8block(W, L, H, N, 2 * N, 2 * N)
times["MeshGeneration"] = [time() - t1]
println("Number of elements: $(count(fes))")
bfes = meshboundary(fes)
# end cross-section surface for the shear loading
sectionL = selectelem(fens, bfes; facing = true, direction = [0.0 +1.0 0.0])
# 0 cross-section surface for the reactions
section0 = selectelem(fens, bfes; facing = true, direction = [0.0 -1.0 0.0])
# 0 cross-section surface for the reactions
sectionlateral = selectelem(fens, bfes; facing = true, direction = [1.0 0.0 0.0])
MR = DeforModelRed3D
material = MatDeforElastIso(MR, 0.0, E, nu, CTE)
# Material orientation matrix
csmat = [i == j ? one(Float64) : zero(Float64) for i = 1:3, j = 1:3]
function updatecs!(csmatout, XYZ, tangents, feid, qpid)
copyto!(csmatout, csmat)
end
geom = NodalField(fens.xyz)
u = NodalField(zeros(size(fens.xyz, 1), 3)) # displacement field
lx0 = connectednodes(subset(bfes, section0))
setebc!(u, lx0, true, 1, 0.0)
setebc!(u, lx0, true, 2, 0.0)
setebc!(u, lx0, true, 3, 0.0)
lx1 = connectednodes(subset(bfes, sectionlateral))
setebc!(u, lx1, true, 1, 0.0)
applyebc!(u)
numberdofs!(u)
# numberdofs!(u)
println("nfreedofs(u) = $(nfreedofs(u))")
t1 = time()
n2e = FENodeToFEMapThr(fes, nnodes(u))
times["FENodeToFEMap"] = [time() - t1]
println("Make node to element map = $(times["FENodeToFEMap"]) [s]")
fi = ForceIntensity(Float64, 3, getfrcL!)
el2femm = FEMMBase(IntegDomain(subset(bfes, sectionL), GaussRule(2, 2)))
F = distribloads(el2femm, geom, u, fi, 2)
F_f = vector_blocked_f(F, nfreedofs(u))
function createsubdomain(fessubset)
FEMMDeforLinearMSH8(MR, IntegDomain(fessubset, GaussRule(3, 2)), material)
end
function matrixcomputation!(femm, assembler)
associategeometry!(femm, geom)
stiffness(femm, assembler, geom, u)
end
println("Stiffness =============================================================")
GC.enable(false)
t0 = time();
t1 = time()
e2e = FElemToNeighborsMap(n2e, fes, ECLGraphColor.int_type())
times["FElemToNeighborsMap"] = [time() - t1]
println(" Make element to neighbor map = $(times["FElemToNeighborsMap"]) [s]")
t1 = time()
coloring = FinEtoolsMultithreading.element_coloring(fes, e2e, ntasks)
times["ElementColors"] = [time() - t1]
println(" Compute element colors = $(times["ElementColors"]) [s]")
t1 = time()
n2n = FENodeToNeighborsMap(n2e, fes)
times["FENodeToNeighborsMap"] = [time() - t1]
println(" Make node to neighbor map = $(times["FENodeToNeighborsMap"]) [s]")
t1 = time()
K_pattern = csc_symmetric_pattern(u.dofnums, nalldofs(u), n2n, eltype(u.values))
times["SparsityPattern"] = [time() - t1]
println(" Sparsity pattern = $(times["SparsityPattern"]) [s]")
# t1 = time()
# decomposition = decompose(fes, coloring, createsubdomain, ntasks)
# times["DomainDecomposition"] = [time() - t1]
# println(" Domain decomposition = $(times["DomainDecomposition"]) [s]")
# t1 = time()
# K = parallel_matrix_assembly!(
# SysmatAssemblerSparsePatt(K_pattern),
# decomposition,
# matrixcomputation!
# )
# times["AssemblyOfValues"] = [time() - t1]
# println(" Add to matrix = $(times["AssemblyOfValues"]) [s]")
# times["TotalAssembly"] = [time() - t0]
# println("Assembly total = $(times["TotalAssembly"]) [s]")
GC.enable(true)
# K_ff = matrix_blocked_ff(K, nfreedofs(u))
# F_f = vector_blocked_f(F, nfreedofs(u))
# println("Stiffness: number of non zeros = $(nnz(K_ff)) [ND]")
# println("Sparsity = $(nnz(K_ff)/size(K_ff, 1)/size(K_ff, 2)) [ND]")
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "stubby_corbel_H8_ms-timing-parallel-nth=$(ntasks)"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
times[k] = cat(times[k], storedtimes[k], dims = 1)
end
end
DataDrop.store_json(n, times)
return
end
# Tipl = selectnode(fens, box = [0 W L L 0 H], inflate = htol)
# @show norm(K_ff - K_ff') / norm(K_ff)
# @time K_ff_factors = SuiteSparse.CHOLMOD.cholesky(Symmetric(K_ff))
# @show nnz(K_ff_factors)
# # @time K = SparseArrays.ldlt(K)
# # @time K = cholesky(K)
# @time U_f = K_ff_factors \ (F_f)
# scattersysvec!(u, U_f[:])
# utip = mean(u.values[Tipl, 3], dims = 1)
# println("Deflection: $(utip), compared to $(uzex)")
# File = "stubby_corbel_H8_ms_parallel.vtk"
# vtkexportmesh(File, fens, fes; vectors = [("u", u.values)])
# @async run(`"paraview.exe" $File`)
true
end # run
end # module
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 964 | println("Current folder: $(pwd())")
if length(ARGS) < 1
error("I need at least one arguments: N (mesh subdivision)")
end
using Pkg
# Pkg.add("ThreadPinning")
Pkg.activate(".")
Pkg.instantiate()
using LinearAlgebra
LinearAlgebra.BLAS.set_num_threads(1)
# Turn off thread pinning because it seems to interfere with the graph coloring library.
# using ThreadPinning
# ThreadPinning.Prefs.set_os_warning(false)
# pinthreads(:cores)
N = parse(Int, ARGS[1])
ntasks = Threads.nthreads()
if length(ARGS) > 1
ntasks = parse(Int, ARGS[2])
end
assembly_only = true
if length(ARGS) > 2
assembly_only = parse(Bool, ARGS[3])
end
include(raw"ops_parallel.jl")
using .ops_parallel;
using FinEtoolsMultithreading
Pkg.status("FinEtoolsMultithreading")
NTRIALS = 5
for trial in 1:NTRIALS
@info "Trial $(trial) out of $(NTRIALS): nthreads=$(Threads.nthreads()), ntasks=$(ntasks), N=$(N)"
ops_parallel.run(N, ntasks, assembly_only)
GC.gc(true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 1007 | """
FENodeToFEMapModule
Module to construct a map from finite element nodes to elements connected to them.
This map is constructed in parallel.1
"""
module FENodeToFEMapModule
__precompile__(true)
using FinEtools.FESetModule: AbstractFESet, subset, count
using FinEtools
using ChunkSplitters
function _merge_maps!(maps, rnge)
for m in 2:length(maps)
for i in rnge
append!(maps[1][i], maps[m][i])
end
end
return maps
end
function FENodeToFEMapThr(fes::FE, nmax::IT, ntasks = Threads.nthreads()) where {FE<:AbstractFESet,IT<:Integer}
chks = chunks(1:count(fes), ntasks)
maps = Vector{Vector{Vector{IT}}}(undef, length(chks))
Base.Threads.@threads for c in chks
maps[c[2]] = FinEtools.FENodeToFEMapModule._makemap(fes.conn, c[1], nmax)
end
chks = chunks(1:nmax, ntasks)
Base.Threads.@threads for c in chks
_merge_maps!(maps, c[1])
end
return FinEtools.FENodeToFEMapModule.FENodeToFEMap(maps[1])
end
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 2203 | """
FENodeToNeighborsMapModule
Module to construct a map from finite element nodes to nodes connected to them.
"""
module FENodeToNeighborsMapModule
__precompile__(true)
using FinEtools.FESetModule: AbstractFESet
using FinEtools.FENodeToFEMapModule: FENodeToFEMap
function _nneighbors(self, ellist, conn, npe)
totn = length(ellist) * (npe - 1) # we are not adding self-reference
nodes = fill(zero(eltype(conn[1])), totn)
p = 1
@inbounds for i in ellist
for k in conn[i]
if k != self # we are not adding self-reference
nodes[p] = k; p += 1
end
end
end
return unique!(sort!(nodes))
end
function _make_map(n2e, conn)
npe = length(conn[1])
map = Vector{eltype(n2e.map)}(undef, length(n2e.map))
Base.Threads.@threads for i in eachindex(n2e.map) # run this in PARALLEL
map[i] = _nneighbors(i, n2e.map[i], conn, npe)
end
return map
end
"""
FENodeToNeighborsMap
Map from finite element nodes to the nodes that are connected to them by finite
elements.
!!! note
Self references are excluded (a node is its own neighbor,
but it is not included in this data structure as such).
"""
struct FENodeToNeighborsMap{IT}
# Map as a vector of vectors.
map::Vector{Vector{IT}}
end
"""
FENodeToNeighborsMap(
n2e::N2EMAP,
conn::Vector{NTuple{N,IT}},
) where {N2EMAP<:FENodeToFEMap,N,IT<:Integer}
Map from finite element nodes to the nodes connected to them by elements.
- `conns` = connectivities as a vector of tuples
- `nmax` = largest possible node number
"""
function FENodeToNeighborsMap(
n2e::N2EMAP,
conn::Vector{NTuple{N,IT}},
) where {N2EMAP<:FENodeToFEMap,N,IT<:Integer}
return FENodeToNeighborsMap(_make_map(n2e, conn))
end
"""
FENodeToNeighborsMap(
n2e::N2EMAP,
fes::FE,
) where {N2EMAP<:FENodeToFEMap,FE<:AbstractFESet}
Map from finite element nodes to the nodes connected to them by elements.
Convenience constructor.
"""
function FENodeToNeighborsMap(
n2e::N2EMAP,
fes::FE,
) where {N2EMAP<:FENodeToFEMap,FE<:AbstractFESet}
return FENodeToNeighborsMap(_make_map(n2e, fes.conn))
end
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 2738 | """
FElemToNeighborsMapModule
Module to construct a map from finite elements to elements that share nodes with
them (i.e. that are connected to them through the nodes).
"""
module FElemToNeighborsMapModule
__precompile__(true)
using FinEtools.FESetModule: AbstractFESet
using FinEtools.FENodeToFEMapModule: FENodeToFEMap
using ECLGraphColor
function _unique_elem_neighbors(self, n2emap, conn1, IntType)
len = 0
@inbounds for k in conn1
len += (length(n2emap[k]) - 1) # we are not adding self-references
end
neighbors = fill(zero(IntType), len)
p = 1
@inbounds for k in conn1
for j in n2emap[k]
if j != self # we are not adding self-reference
neighbors[p] = j; p += 1
end
end
end
return unique!(sort!(neighbors))
end
function _e2e_map(n2e, conn, IntType)
map = Vector{Vector{IntType}}(undef, length(conn))
Base.Threads.@threads for i in eachindex(map) # run this in PARALLEL
map[i] = _unique_elem_neighbors(i, n2e.map, conn[i], IntType)
end
return map
end
"""
FElemToNeighborsMap
Map from finite elements to the elements that are connected to them by finite
nodes.
!!! note
Self references are included (an element is connected to itself).
"""
struct FElemToNeighborsMap{IT}
# Map as a vector of vectors.
map::Vector{Vector{IT}}
end
"""
FElemToNeighborsMap(
n2e::N2EMAP,
conn::Vector{NTuple{N,IT}},
) where {N2EMAP<:FENodeToFEMap,N,IT<:Integer}
Map from finite elements to the elements that are connected by finite nodes.
- `conns` = connectivities as a vector of tuples
- `nmax` = largest possible node number
"""
function FElemToNeighborsMap(
n2e::N2EMAP,
conn::Vector{NTuple{N,IT}},
) where {N2EMAP<:FENodeToFEMap,N,IT<:Integer}
return FElemToNeighborsMap(_e2e_map(n2e, conn, IT))
end
"""
FElemToNeighborsMap(
n2e::N2EMAP,
fes::FE,
) where {N2EMAP<:FENodeToFEMap,FE<:AbstractFESet}
Map from finite elements to the elements that are connected by finite nodes.
Convenience constructor.
"""
function FElemToNeighborsMap(
n2e::N2EMAP,
fes::FE,
) where {N2EMAP<:FENodeToFEMap,FE<:AbstractFESet}
return FElemToNeighborsMap(_e2e_map(n2e, fes.conn, ECLGraphColor.int_type()))
end
"""
FElemToNeighborsMap(
n2e::N2EMAP,
fes::FE,
) where {N2EMAP<:FENodeToFEMap,FE<:AbstractFESet}
Map from finite elements to the elements that are connected by finite nodes.
Convenience constructor.
"""
function FElemToNeighborsMap(
n2e::N2EMAP,
fes::FE,
IntType::IT
) where {N2EMAP<:FENodeToFEMap,FE<:AbstractFESet,IT}
return FElemToNeighborsMap(_e2e_map(n2e, fes.conn, IntType))
end
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 1707 | """
FinEtoolsMultithreading
Package for parallel finite element computing.
"""
module FinEtoolsMultithreading
using SparseArrays
using ChunkSplitters
using SparseMatricesCSR
using LinearAlgebra
using FinEtools
using ThreadedScans
using ECLGraphColor
include("utilities.jl")
include("prefixsum.jl")
include("FENodeToNeighborsMapModule.jl")
using .FENodeToNeighborsMapModule: FENodeToNeighborsMap
include("FElemToNeighborsMapModule.jl")
using .FElemToNeighborsMapModule: FElemToNeighborsMap
include("FENodeToFEMapModule.jl")
include("element_coloring.jl")
include("parallel_element_coloring.jl")
include("sparsity_pattern.jl")
include("parallel_assembly.jl")
include("domain_decomposition.jl")
using .FENodeToFEMapModule: FENodeToFEMapThr
include("high_level.jl")
module Exports
# The high level interface
using ..FinEtoolsMultithreading: parallel_make_matrix
export parallel_make_matrix
# These three routines give access to intermediate steps
using ..FinEtoolsMultithreading: parallel_matrix_assembly!
export parallel_matrix_assembly!
using ..FinEtoolsMultithreading: csc_symmetric_pattern
export csc_symmetric_pattern
using ..FENodeToNeighborsMapModule: FENodeToNeighborsMap
# Exported: type for maps from nodes to connected nodes
export FENodeToNeighborsMap
using ..FElemToNeighborsMapModule: FElemToNeighborsMap
# Exported: type for maps from nodes to connected nodes
export FElemToNeighborsMap
using ..FENodeToFEMapModule: FENodeToFEMapThr
# Exported: type for maps from nodes to elements computed on multiple threads
export FENodeToFEMapThr
export element_coloring
end
# Enable LSP look up in test modules
if false
include("../test/runtests.jl")
end
end # module FinEtoolsMultithreading
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 4350 | using FinEtools
using SparseArrays
import FinEtools.AssemblyModule.eltype
import FinEtools.AssemblyModule.startassembly!
import FinEtools.AssemblyModule.assemble!
import FinEtools.AssemblyModule.makematrix!
"""
SysmatAssemblerSparsePatt{IT, MBT, IBT} <: AbstractSysmatAssembler
Type for assembling a sparse global matrix from elementwise matrices using a
sparsity pattern (sparse matrix with all potential non-zeroes represented, but
all those numbers are actually zero).
!!! note
All fields of the datatype are private. The type is manipulated by the
functions `startassembly!`, `assemble!`, and `makematrix!`.
"""
mutable struct SysmatAssemblerSparsePatt{TPATT<:SparseMatrixCSC} <: AbstractSysmatAssembler
_pattern::TPATT
_nomatrixresult::Bool
_force_init::Bool
end
function eltype(self::TPATT) where {TPATT<:SysmatAssemblerSparsePatt}
eltype(self._pattern.nzval)
end
function SysmatAssemblerSparsePatt(pattern::TPATT) where {TPATT<:SparseMatrixCSC}
return SysmatAssemblerSparsePatt(pattern, false, false)
end
"""
startassembly!(self::SysmatAssemblerSparsePatt{T},
elem_mat_nrows::IT,
elem_mat_ncols::IT,
n_elem_mats::IT,
row_nalldofs::IT,
col_nalldofs::IT;
force_init = false
) where {T, IT<:Integer}
Start the assembly of a global matrix.
The method makes buffers for matrix assembly. It must be called before
the first call to the method `assemble!`.
# Arguments
- `elem_mat_nrows` = row dimension of the element matrix;
- `elem_mat_ncols` = column dimension of the element matrix;
- `n_elem_mats` = number of element matrices;
- `row_nalldofs`= The total number of rows as a tuple;
- `col_nalldofs`= The total number of columns as a tuple.
This is a noop. The pattern has already been built, per our assumption.
# Returns
- `self`: the modified assembler.
"""
function startassembly!(
self::TPATT,
elem_mat_nrows::IT,
elem_mat_ncols::IT,
n_elem_mats::IT,
row_nalldofs::IT,
col_nalldofs::IT;
force_init = false,
) where {TPATT<:SysmatAssemblerSparsePatt,IT<:Integer}
if force_init
fill!(zero(eltype(self._pattern.nzval)), self._pattern.nzval)
end
return self
end
@inline function _binary_search(array::Array{IT,1}, target::IT, left::IT, right::IT) where {IT}
@inbounds while left <= right # Generating the middle element position
mid = fld((left + right), 2) # If element > mid, then it can only be present in right subarray
if array[mid] < target
left = mid + 1 # If element < mid, then it can only be present in left subarray
elseif array[mid] > target
right = mid - 1 # If element is present at the middle itself
else # ==
return mid
end
end
return 0
end
"""
assemble!(
self::SysmatAssemblerSparsePatt,
mat::MT,
dofnums_row::IT,
dofnums_col::IT,
) where {MT, IT}
Assemble a rectangular matrix.
Assembly of a rectangular matrix. The method assembles a rectangular matrix
using the two vectors of equation numbers for the rows and columns.
"""
function assemble!(
self::TPATT,
mat::MBT,
dofnums_row::CIT,
dofnums_col::CIT,
) where {TPATT<:SysmatAssemblerSparsePatt,MBT,CIT}
nrows = length(dofnums_row)
ncolumns = length(dofnums_col)
size(mat) == (nrows, ncolumns) || error("Wrong size of matrix")
row_nalldofs, col_nalldofs = size(self._pattern)
@inbounds for j = 1:ncolumns
dj = dofnums_col[j]
# dj < 1 && error("Column degree of freedom < 1")
# dj > col_nalldofs && error("Column degree of freedom > size")
for i = 1:nrows
di = dofnums_row[i]
# di < 1 && error("Row degree of freedom < 1")
# di > row_nalldofs && error("Row degree of freedom > size")
k = _binary_search(self._pattern.rowval, di,
self._pattern.colptr[dj], self._pattern.colptr[dj+1] - 1)
self._pattern.nzval[k] += mat[i, j]
end
end
return self
end
"""
makematrix!(self::SysmatAssemblerSparsePatt)
Make a sparse matrix.
The sparse matrix (i.e. a sparsity pattern with the nonzero values filled in) is returned.
"""
function makematrix!(self::TPATT) where {TPATT<:SysmatAssemblerSparsePatt}
return self._pattern
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 4046 | using FinEtools
using SparseArrays
import FinEtools.AssemblyModule.eltype
import FinEtools.AssemblyModule.startassembly!
import FinEtools.AssemblyModule.assemble!
import FinEtools.AssemblyModule.makematrix!
"""
SysmatAssemblerSparsePattwLookup{IT, MBT, IBT} <: AbstractSysmatAssembler
Type for assembling a sparse global matrix from elementwise matrices using a
sparsity pattern (sparse matrix with all potential non-zeroes represented, but
all those numbers are actually zero).
!!! note
All fields of the datatype are private. The type is manipulated by the
functions `startassembly!`, `assemble!`, and `makematrix!`.
"""
mutable struct SysmatAssemblerSparsePattwLookup{TPATT<:SparseMatrixCSC} <: AbstractSysmatAssembler
_pattern::TPATT
_rows::Vector{Dict{Int, Int}}
_nomatrixresult::Bool
_force_init::Bool
end
function eltype(self::TPATT) where {TPATT<:SysmatAssemblerSparsePattwLookup}
eltype(self._pattern.nzval)
end
function SysmatAssemblerSparsePattwLookup(pattern::TPATT) where {TPATT<:SparseMatrixCSC}
empty = Dict{Int,Int}()
rows = fill(empty, size(pattern, 2))
Threads.@threads for c in axes(pattern, 2)
cr = pattern.colptr[c]:(pattern.colptr[c+1]-1)
rows[c] = Dict{Int,Int}(zip(view(pattern.rowval, cr), cr))
end
return SysmatAssemblerSparsePattwLookup(pattern, rows, false, false)
end
"""
startassembly!(self::SysmatAssemblerSparsePattwLookup{T},
elem_mat_nrows::IT,
elem_mat_ncols::IT,
n_elem_mats::IT,
row_nalldofs::IT,
col_nalldofs::IT;
force_init = false
) where {T, IT<:Integer}
Start the assembly of a global matrix.
The method makes buffers for matrix assembly. It must be called before
the first call to the method `assemble!`.
# Arguments
- `elem_mat_nrows` = row dimension of the element matrix;
- `elem_mat_ncols` = column dimension of the element matrix;
- `n_elem_mats` = number of element matrices;
- `row_nalldofs`= The total number of rows as a tuple;
- `col_nalldofs`= The total number of columns as a tuple.
This is a noop. The pattern has already been built, per our assumption.
# Returns
- `self`: the modified assembler.
"""
function startassembly!(
self::TPATT,
elem_mat_nrows::IT,
elem_mat_ncols::IT,
n_elem_mats::IT,
row_nalldofs::IT,
col_nalldofs::IT;
force_init = false,
) where {TPATT<:SysmatAssemblerSparsePattwLookup,IT<:Integer}
if force_init
fill!(zero(eltype(self._pattern.nzval)), self._pattern.nzval)
end
return self
end
"""
assemble!(
self::SysmatAssemblerSparsePattwLookup,
mat::MT,
dofnums_row::IT,
dofnums_col::IT,
) where {MT, IT}
Assemble a rectangular matrix.
"""
function assemble!(
self::TPATT,
mat::MBT,
dofnums_row::CIT,
dofnums_col::CIT,
) where {TPATT<:SysmatAssemblerSparsePattwLookup,MBT,CIT}
# Assembly of a rectangular matrix.
# The method assembles a rectangular matrix using the two vectors of
# equation numbers for the rows and columns.
nrows = length(dofnums_row)
ncolumns = length(dofnums_col)
size(mat) == (nrows, ncolumns) || error("Wrong size of matrix")
row_nalldofs, col_nalldofs = size(self._pattern)
@inbounds for j = 1:ncolumns
dj = dofnums_col[j]
# dj < 1 && error("Column degree of freedom < 1")
# dj > col_nalldofs && error("Column degree of freedom > size")
for i = 1:nrows
di = dofnums_row[i]
# di < 1 && error("Row degree of freedom < 1")
# di > row_nalldofs && error("Row degree of freedom > size")
k = self._rows[dj][di]
self._pattern.nzval[k] += mat[i, j]
end
end
return self
end
"""
makematrix!(self::SysmatAssemblerSparsePattwLookup)
Make a sparse matrix.
The sparse matrix (i.e. a sparsity pattern with the nonzero values filled in) is returned.
"""
function makematrix!(self::TPATT) where {TPATT<:SysmatAssemblerSparsePattwLookup}
return self._pattern
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 1676 | """
decompose(fes, coloring, createsubd, ntasks = Threads.nthreads())
Create finite element machines for the subdomains.
# Arguments
- `fes` = finite element set,
- `ntasks` = number of tasks (subdomains),
- `element_colors` = array of element colors, one for each element,
- `unique_colors` = array of the unique element colours,
- `createsubd` = function to create one finite element machine
Example:
```
function createsubd(fessubset)
FEMMDeforLinear(MR, IntegDomain(fessubset, GaussRule(3, 2)), material)
end
```
Create a vector of vectors: for each unique color in the element coloring, the
vector of the finite element machines is stored. The finite element machines are
created using the function `createsubd`.
The matrix is created by going sequentially through the unique colors
and then in parallel execute all the finite element machines for that color.
"""
function decompose(fes, coloring, createsubd,
ntasks=Threads.nthreads())
el_colors, uniq_colors = coloring
decomp = fill([], length(uniq_colors))
Threads.@threads for i in eachindex(uniq_colors)
c = uniq_colors[i]
ellist = findall(_c -> _c == c, el_colors)
_fes = subset(fes, ellist)
decomp[i] = _make_femms(_fes, ntasks, createsubd)
end
return decomp
end
function _make_femms(fesofacolor, ntasks, createsubd)
chks = chunks(1:count(fesofacolor), ntasks)
return [createsubd(subset(fesofacolor, ch)) for (ch, j) in chks]
end
# using LazyArrays
# i = 1000
# is = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# function f(i, is)
# for n in ApplyArray(vcat, i, is)
# n
# end
# end
# @time f(i, is) | FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 2691 | import FinEtools: element_coloring
"""
element_coloring(fes, e2e::E2EM, ellist::Vector{IT} = Int[]) where {E2EM<:FElemToNeighborsMap,IT<:Integer}
Determine element coloring such that no elements of the same color share a node.
# Arguments
- `fes`: The finite element set.
- `e2e`: The element-to-element map.
- `ellist`: Optional list of element serial numbers to include (default is all
elements need to be considered).
# Returns
Vector of element colors, vector of unique colors, and vector of counts of each
color.
"""
function element_coloring(fes, e2e::E2EM) where {E2EM<:FElemToNeighborsMap{IT} where {IT}}
element_colors = fill(zero(Int16), count(fes))
unique_colors = eltype(element_colors)[1]
color_counts = eltype(element_colors)[0]
return element_coloring!(element_colors, unique_colors, color_counts, e2e.map)
end
function __find_of_min_count(color_used, first, color_counts)
c = 0
mincount = typemax(eltype(color_counts))
@inbounds for k in first:length(color_used)
if color_used[k] == 0 && mincount > color_counts[k]
mincount = color_counts[k]
c = k
end
end
return c
end
function __find_first_avail(color_used)
@inbounds for k in eachindex(color_used)
if color_used[k] == 0
return k
end
end
return 0
end
function element_coloring!(element_colors, unique_colors, color_counts, e2emap)
color_used = fill(zero(eltype(element_colors)), length(unique_colors))
for e in eachindex(element_colors)
if element_colors[e] == 0
color_used .= 0; ncolorsatneighbors = 0
for oe in e2emap[e]
c = element_colors[oe]
if c > 0
color_used[c] += 1
ncolorsatneighbors += 1
end
end
if ncolorsatneighbors == 0
c = __find_of_min_count(color_used, 1, color_counts)
element_colors[e] = unique_colors[c]
color_counts[c] += 1
else
first = __find_first_avail(color_used)
if first == 0
added = maximum(unique_colors) + 1
push!(unique_colors, added)
push!(color_counts, 1)
push!(color_used, 0)
element_colors[e] = added
else
c = __find_of_min_count(color_used, first, color_counts)
element_colors[e] = unique_colors[c]
color_counts[c] += 1
end
end
end
end
return element_colors, unique_colors, color_counts
end | FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 2515 | """
parallel_make_matrix(
fes,
u,
createsubd,
matrixupdt!;
ntasks = Threads.nthreads(),
kind = :CSC,
)
Assemble a sparse matrix.
Either a `:CSC` matrix or a `:CSR` matrix is created. We shall refer to this
matrix as a CSX matrix. The process is:
1. Construct the incidence relation node-to-elements.
2. Construct the incidence relation node-to-neighbors.
3. Make the sparse pattern and create a sparse CSX matrix with all values zero.
4. Construct the incidence relation element-to-neighbors.
5. Compute element coloring.
6. Set up domain decomposition.
7. Compute and assemble the matrix entries.
Here, `createsubd` could be
```
(fessubset) -> FEMMAcoust(IntegDomain(fessubset, GaussRule(3, 2)), material)
```
and `matrixupdt!` could be
```
(femm, assmblr) -> acousticstiffness(femm, assmblr, geom, P)
```
"""
function parallel_make_matrix(
fes,
u,
createsubd,
matrixupdt!;
ntasks = Threads.nthreads(),
kind = :CSC,
)
n2e = FENodeToFEMapThr(fes, nnodes(u))
parallel_make_matrix(
fes,
u.dofnums,
nalldofs(u),
eltype(u.values),
n2e,
createsubd,
matrixupdt!,
ntasks,
kind,
)
end
"""
parallel_make_matrix(
fes,
dofnums,
ndofs,
FT,
n2e,
createsubd,
matrixupdt!,
ntasks,
kind,
)
Assemble a sparse matrix.
1. Construct the incidence relation node-to-neighbors.
2. Make the sparse pattern and create a sparse CSX matrix with all values zero.
3. Construct the incidence relation element-to-neighbors.
4. Compute element coloring.
5. Set up domain decomposition.
6. Compute and assemble the matrix entries.
"""
function parallel_make_matrix(
fes, dofnums, ndofs, FT, n2e, # data
createsubd, matrixupdt!, # functions
ntasks, # how many threads?
kind
)
@assert kind in [:CSC, :CSR]
n2n = FENodeToNeighborsMap(n2e, fes) # ALG 1
matrix = csc_symmetric_pattern(dofnums, # ALG 2
ndofs, n2n, FT)
e2e = FElemToNeighborsMap(n2e, fes) # ALG 3
coloring = element_coloring(fes, e2e, ntasks) # ALG 4
decomposition = decompose(fes, coloring, # ALG 5
createsubd, ntasks)
return parallel_matrix_assembly!( # ALG 6
SysmatAssemblerSparsePatt(matrix),
decomposition,
matrixupdt!,
)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 1459 | using FinEtools
using SparseArrays
import FinEtools.AssemblyModule.eltype
import FinEtools.AssemblyModule.startassembly!
import FinEtools.AssemblyModule.assemble!
import FinEtools.AssemblyModule.makematrix!
include("assembler_patt.jl")
include("assembler_patt_w_lookup.jl")
function _check_femm_compatibility(femms::AbstractArray{FEMM,1}) where {FEMM<:AbstractFEMM}
for j in eachindex(femms)
iselementbased(femms[j]) || error("FEMM is not element-based")
(nameof(typeof(femms[j])) === nameof(typeof(femms[1]))) ||
error("All FEMMs must be of the same type")
(
nameof(typeof(finite_elements(femms[j]))) ===
nameof(typeof(finite_elements(femms[1])))
) || error("All finite elements must be of the same type")
end
return true
end
"""
parallel_matrix_assembly!(
assmblr,
decomposition,
matrixupdt!::F
) where {F<:Function}
Execute the assembly in parallel.
The decomposition is a vector of vector of FEMMs.
As many tasks as there are FEMMs at any level are spawned.
The function `matrixupdt!` updates the assembler.
"""
function parallel_matrix_assembly!(
assmblr,
decomposition,
matrixupdt!::F,
) where {F<:Function}
for femms in decomposition
Threads.@sync begin
for femm in femms
Threads.@spawn matrixupdt!(femm, assmblr)
end
end
end
return assmblr._pattern
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 1435 | using ECLGraphColor: PECLgraph, make_graph, add_nlist_all_row, add_nindex
using ECLGraphColor: get_color, run_graph_coloring, free_graph, print_graph, write_graph
"""
element_coloring(fes, e2e::E2EM, ntasks::IT1) where {E2EM<:FElemToNeighborsMap{IT} where {IT},IT1<:Integer}
Determine element coloring such that no elements of the same color share a node.
# Arguments
- `fes`: The finite element set.
- `e2e`: The element-to-element map.
# Returns
Vector of element colors, vector of unique colors.
"""
function element_coloring(fes, e2e::E2EM, ntasks::IT1) where {E2EM<:FElemToNeighborsMap{IT} where {IT},IT1<:Integer}
element_colors = fill(zero(Int16), count(fes))
if !((Base.Sys.islinux()) || (Base.Sys.isapple()))
return element_coloring(fes, e2e)
end
map = e2e.map
g = make_graph(length(map), sum([length(c) for c in map]))
idx = 1
for i in eachindex(map)
add_nindex(g, i, idx)
idx += length(map[i])
end
add_nindex(g, length(map)+1, idx)
Threads.@threads for i in eachindex(map)
neighbors = map[i]
add_nlist_all_row(g, i, length(neighbors), neighbors);
end
# print_graph(g)
run_graph_coloring(g, ntasks, 0, 0)
Threads.@threads for i in 1:length(map)
element_colors[i] = get_color(g, i)
end
# write_graph(g, "testgraph.egr")
free_graph(g)
return element_colors, sort(unique(element_colors))
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 837 | using ThreadedScans
using ChunkSplitters
function _scan!(arr)
n = length(arr)
@inbounds for i in 2:n
arr[i] += arr[i-1]
end
return arr
end
"""
psp_scan!(arr)
Perform prefix sum scan on the input array `arr` in place.
# Arguments
- `arr`: The input array to perform prefix sum scan on. The array is modified
in-place.
The computation is multithreaded, and uses all available threads.
"""
function psp_scan!(arr)
n = length(arr)
ntasks = Threads.nthreads()
chks = chunks(1:n, ntasks)
Threads.@threads for s in 1:length(chks)
_scan!(@view arr[chks[s][1]])
end
segment_ends = [arr[chks[s][1][end]] for s in 1:length(chks)]
_scan!(segment_ends)
Threads.@threads for s in 2:length(chks)
@view(arr[chks[s][1]]) .+= segment_ends[s-1]
end
return arr
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 2361 | using LazyArrays
function _store_dofs!(rowval, n, nghbrs, dofnums, colptr)
s1 = colptr[dofnums[n, 1]]
p = 0
@inbounds for k in ApplyArray(vcat, n, nghbrs)
for d in axes(dofnums, 2)
rowval[s1+p] = dofnums[k, d]; p += 1
end
end
bl = p # block length
sort!(@view(rowval[s1:s1+bl-1]))
@inbounds for d in 2:size(dofnums, 2)
s = colptr[dofnums[n, d]]
copy!(@view(rowval[s:s+bl-1]), @view(rowval[s1:s1+bl-1]))
end
return nothing
end
function _row_blocks(IT, map, dofnums)
nd = size(dofnums, 2)
total_dofs = length(map) * nd
lengths = Vector{IT}(undef, total_dofs + 1)
lengths[1] = 1
@inbounds Threads.@threads for k in eachindex(map)
kl = (length(map[k]) + 1) * nd # +1 for the node itself
for d in axes(dofnums, 2)
j = dofnums[k, d]
lengths[j+1] = kl
end
end
return lengths
end
function _acc_start_ptr!(s)
len = length(s)
@inbounds for k = 1:len-1
s[k+1] += s[k]
end
s
end
function _csc_arrays(IT, FT, map, dofnums)
# First we create an array of the lengths of the dof blocks
colptr = _row_blocks(IT, map, dofnums)
# Now we start overwriting the "lengths" array colptr with the starts
# ThreadedScans.scan!(+, colptr) # replaced because of some weird results on the Grace system
psp_scan!(colptr)
sumlen = colptr[end] - 1
rowval = Vector{IT}(undef, sumlen) # This will get filled in later
nzval = _zeros_via_calloc(FT, sumlen) # This needs to be initialized for future accumulation
return colptr, rowval, nzval
end
"""
csc_symmetric_pattern(
dofnums::Array{IT,2},
nrowscols,
n2n,
FT = Float64,
) where {IT<:Integer}
Create symmetric sparse zero matrix (sparsity pattern).
Uses the following data structures:
```
n2n = FENodeToNeighborsMap(n2e, fes.conn)
```
"""
function csc_symmetric_pattern(
dofnums::Array{IT,2},
nrowscols,
n2n,
FT = Float64,
) where {IT<:Integer}
@assert length(n2n.map) == size(dofnums, 1)
colptr, rowval, nzval = _csc_arrays(IT, FT, n2n.map, dofnums)
Base.Threads.@threads for n in axes(dofnums, 1)
_store_dofs!(rowval, n, n2n.map[n], dofnums, colptr)
end
return SparseMatrixCSC(nrowscols, nrowscols, colptr, rowval, nzval)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 180 |
function _zeros_via_calloc(::Type{T}, dims::Integer...) where {T}
ptr = Ptr{T}(Libc.calloc(prod(dims), sizeof(T)))
return unsafe_wrap(Array{T}, ptr, dims; own = true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 69 | using Test
include("test_coloring.jl")
include("test_parallel.jl")
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 1931 | module mparallelcoloring_2
using FinEtools
using FinEtoolsMultithreading.Exports
using ECLGraphColor
using LinearAlgebra
using Test
function test_coloring(coloring, n2e)
element_colors, unique_colors = coloring
findall(c -> c <= 0, element_colors) === nothing
findall(c -> c > maximum(unique_colors), element_colors) === nothing
# @show sort(unique(element_colors)), sort(unique_colors)
@assert norm(sort(unique(element_colors)) - sort(unique_colors)) == 0
for k in eachindex(n2e.map)
nc = element_colors[n2e.map[k]]
@assert length(nc) == length(unique(nc))
@assert norm(sort(nc) - sort(unique(nc))) == 0
end
true
end
function test()
W = 11.0
L = 12.0
t = 10.0
nl, nt, nw = 72, 83, 24
nl, nt, nw = 2, 3, 4
# nl, nt, nw = 7, 13, 9
# nl, nt, nw = 17, 13, 19
# nl, nt, nw = 27, 23, 29
ntasks = 2
# @show methods(element_coloring)
fens, fes = T4block(Float64.((nl, nw, nt))..., nl, nw, nt)
n2e = FENodeToFEMap(fes, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
e2e = FElemToNeighborsMap(n2e, fes.conn)
coloring = element_coloring(fes, n2e)
@time coloring = element_coloring(fes, n2e)
# @show unique(coloring[1])
test_coloring(coloring, n2e)
e2e = FElemToNeighborsMap(n2e, fes, ECLGraphColor.int_type())
coloring = element_coloring(fes, e2e, ntasks)
@time coloring = element_coloring(fes, e2e, ntasks)
# @show unique(coloring[1])
# element_colors, unique_colors = coloring
# @show unique_colors = sort(unique_colors)
# @show partitions = unique_colors
# for j in 1:length(partitions)
# sfes = subset(fes, findall(v -> v == partitions[j], element_colors))
# @show count(sfes)
# vtkexportmesh("mesh_test_coloring_1-c=$(partitions[j]).vtk", fens, sfes)
# end
test_coloring(coloring, n2e)
true
end
test()
nothing
end | FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 1416 | module m
function _binary_search(array::Array{IT,1}, target::IT, left::IT, right::IT) where {IT}
@inbounds while left <= right # Generating the middle element position
mid = fld((left + right), 2) # If element > mid, then it can only be present in right subarray
if array[mid] < target
left = mid + 1 # If element < mid, then it can only be present in left subarray
elseif array[mid] > target
right = mid - 1 # If element is present at the middle itself
else # ==
return mid
end
end
return 0
end
N = 10000
r = collect(1:N)
g = collect(N:-1:1)
NLOOP = 1000
d = Dict{Int, Int}(zip(r, r))
function test_bsearch(N, r, g)
for l in 1:NLOOP
for R in Int.(round.([0.13, 0.39, 0.77, 0.98] * N))
v = do_bsearch(N, r, g, R)
@assert v == g[R]
end
end
nothing
end
function do_bsearch(N, r, g, R)
k = _binary_search(r, R, 1, N)
return g[k]
end
function test_dict(N, d, g)
for l in 1:NLOOP
for R in Int.(round.([0.13, 0.39, 0.77, 0.98] * N))
v = do_dict(N, d, g, R)
@assert v == g[R]
end
end
nothing
end
function do_dict(N, d, g, R)
k = d[R]
return g[k]
end
using BenchmarkTools
@btime test_bsearch(N, r, g)
@btime test_bsearch($N, $r, $g)
@btime test_dict(N, d, g)
@btime test_dict($N, $d, $g)
end # module
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 17861 | module mscan001
using Test
using ThreadedScans
using FinEtoolsMultithreading: _scan!, psp_scan!
function test()
a = [1, 3, 4, 2, 6, 3, 2, 1, 3, 4, 7, 8, 3]
for i in 1:12
a = vcat(a, a)
end
@show length(a)
@info "ThreadedScans.scan!"
b = deepcopy(a)
ThreadedScans.scan!(+, b[1:5])
ThreadedScans.scan!(+, b)
@info "_scan!"
c = deepcopy(a)
_scan!(c)
@test maximum(abs.(b - c)) == 0
@info "psp_scan!"
d = deepcopy(a)
psp_scan!(d[1:5])
d = deepcopy(a)
psp_scan!(d)
@test maximum(abs.(b - d)) == 0
true
end
test()
nothing
end
module mparallelfenodetoelemmap
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: SysmatAssemblerSparsePatt, SysmatAssemblerSparsePattwLookup, startassembly!, assemble!, makematrix!
using FinEtoolsMultithreading.FENodeToFEMapModule: FENodeToFEMapThr
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 15, 13, 12
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
n2e = FinEtools.FENodeToFEMap(fes.conn, count(fens))
# n2e = FinEtools.FENodeToFEMap(fes.conn, count(fens))
# n2ethr = FENodeToFEMapThr(fes, count(fens))
n2ethr = FENodeToFEMapThr(fes, count(fens))
for i in eachindex(n2e.map)
@test n2e.map[i] == n2ethr.map[i]
end
# @info "On $(Threads.nthreads()) threads"
true
end
test()
nothing
end
module mparallelassembly_assembler_w_lup_2
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: SysmatAssemblerSparsePatt, SysmatAssemblerSparsePattwLookup, startassembly!, assemble!, makematrix!
using FinEtoolsMultithreading.FENodeToFEMapModule: FENodeToFEMapThr
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
nl, nt, nw = 5, 3, 4
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
femm = FEMMBase(IntegDomain(fes, GaussRule(3, 2)))
n2e = FENodeToFEMapThr(fes, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
K = csc_symmetric_pattern(psi.dofnums, nalldofs(psi), n2n, eltype(psi.values))
K = csc_symmetric_pattern(psi.dofnums, nalldofs(psi), n2n)
c = [i for i in fes.conn[end]]
z = zeros(8, 8); r = psi.dofnums[c, 1]
assembler = SysmatAssemblerSparsePattwLookup(K)
startassembly!(assembler, 8, 8, 1000, nalldofs(psi), nalldofs(psi))
assemble!(assembler, z, r, r)
assembler = SysmatAssemblerSparsePatt(K)
startassembly!(assembler, 8, 8, 1000, nalldofs(psi), nalldofs(psi))
assemble!(assembler, z, r, r)
# @code_warntype assemble!(assembler, zeros(8, 8), psi.dofnums[c, 1], psi.dofnums[c, 1])
true
end
test()
nothing
end
module mparallelassembly_assembler_w_lup_1
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: SysmatAssemblerSparsePattwLookup, startassembly!, assemble!, makematrix!
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
nl, nt, nw = 2, 3, 2
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
femm = FEMMBase(IntegDomain(fes, GaussRule(3, 2)))
n2e = FENodeToFEMap(fes.conn, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
K = csc_symmetric_pattern(psi.dofnums, nalldofs(psi), n2n, eltype(psi.values))
assmblr = SysmatAssemblerSparsePattwLookup(K)
K = bilform_diffusion(femm, assmblr, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
module mmmmaps1
using FinEtools
using FinEtoolsMultithreading.Exports
using Test
function test(n = 2)
W, L, H = 3.5, 7.1, 9.3
fens, fes = H8block(W, L, H, n, n, n)
n2e = FENodeToFEMap(fes.conn, count(fens))
e2e = FElemToNeighborsMap(n2e, fes.conn)
found = true
for i in eachindex(n2e.map)
for k in n2e.map[i]
for m in n2e.map[i]
(k != m) && ( # self-references are excluded
found = found && ((k in e2e.map[m]) && (m in e2e.map[k]))
)
end
end
end
@test found
e2e = FElemToNeighborsMap(n2e, fes)
found = true
for i in eachindex(n2e.map)
for k in n2e.map[i]
for m in n2e.map[i]
(k != m) && ( # self-references are excluded
found = found && ((k in e2e.map[m]) && (m in e2e.map[k]))
)
end
end
end
@test found
end
test(2)
test(17)
test(18)
nothing
end
module mmmmaps2
using FinEtools
using FinEtoolsMultithreading.Exports
using Test
function test(n = 20)
W, L, H = 3.5, 7.1, 9.3
fens, fes = H8block(W, L, H, n, n, n)
# println("nalldofs(u) = $(nalldofs(u))").#
n2e = FENodeToFEMap(fes.conn, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
found = true
for i in eachindex(fes.conn)
for k in fes.conn[i]
for m in fes.conn[i]
(k != m) && ( # exclude self reference
found = found && ((k in n2n.map[m]) && (m in n2n.map[k]))
)
end
end
end
@test found
n2n = FENodeToNeighborsMap(n2e, fes)
found = true
for i in eachindex(fes.conn)
for k in fes.conn[i]
for m in fes.conn[i]
(k != m) && ( # exclude self reference
found = found && ((k in n2n.map[m]) && (m in n2n.map[k]))
)
end
end
end
@test found
end
test(13)
test(17)
test(18)
nothing
end
module mparallelassembly_assembler_1
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: SysmatAssemblerSparsePatt, startassembly!, assemble!, makematrix!
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
nl, nt, nw = 2, 3, 2
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
femm = FEMMBase(IntegDomain(fes, GaussRule(3, 2)))
n2e = FENodeToFEMap(fes.conn, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
K = csc_symmetric_pattern(psi.dofnums, nalldofs(psi), n2n, eltype(psi.values))
assmblr = SysmatAssemblerSparsePatt(K)
K = bilform_diffusion(femm, assmblr, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
module mparallelassembly_high_level_1
using FinEtools
using FinEtoolsMultithreading.Exports
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
nl, nt, nw = 2, 3, 2
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
function createsubdomain(fessubset)
FEMMBase(IntegDomain(fessubset, GaussRule(3, 2)))
end
function matrixcomputation!(femm, assembler)
bilform_diffusion(femm, assembler, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
end
K = parallel_make_matrix(
fes,
psi,
createsubdomain,
matrixcomputation!;
ntasks=Threads.nthreads(),
kind=:CSC,
)
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
module mparallelassembly_high_level_2
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose, parallel_matrix_assembly!, SysmatAssemblerSparsePatt
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
nl, nt, nw = 2, 3, 2
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
function createsubdomain(fessubset)
FEMMBase(IntegDomain(fessubset, GaussRule(3, 2)))
end
function matrixcomputation!(femm, assembler)
bilform_diffusion(femm, assembler, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
end
n2e = FENodeToFEMap(fes.conn, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
K_pattern = csc_symmetric_pattern(psi.dofnums, nalldofs(psi), n2n, eltype(psi.values))
coloring = element_coloring(fes, n2e)
decomposition = decompose(fes, coloring, createsubdomain, ntasks)
K = parallel_matrix_assembly!(
SysmatAssemblerSparsePatt(K_pattern),
decomposition,
matrixcomputation!
)
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
module mparallelassembly_high_level_3
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose, parallel_matrix_assembly!, SysmatAssemblerSparsePatt
using LinearAlgebra
using Test
function test_coloring(coloring, n2e)
element_colors, unique_colors = coloring
@assert norm(sort(unique(element_colors)) - sort(unique_colors)) == 0
for k in eachindex(n2e.map)
nc = element_colors[n2e.map[k]]
@assert length(nc) == length(unique(nc))
@assert norm(sort(nc) - sort(unique(nc))) == 0
end
end
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
function createsubdomain(fessubset)
FEMMBase(IntegDomain(fessubset, GaussRule(3, 2)))
end
function matrixcomputation!(femm, assembler)
bilform_diffusion(femm, assembler, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
end
n2e = FENodeToFEMap(fes.conn, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
K_pattern = csc_symmetric_pattern(psi.dofnums, nalldofs(psi), n2n, eltype(psi.values))
coloring = element_coloring(fes, n2e)
test_coloring(coloring, n2e)
decomposition = decompose(fes, coloring, createsubdomain, ntasks)
K = parallel_matrix_assembly!(
SysmatAssemblerSparsePatt(K_pattern),
decomposition,
matrixcomputation!
)
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
if !(norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5)
@show K_ff
@show K_ff2
end
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
module mparallelassembly_high_level_4
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose, parallel_matrix_assembly!, SysmatAssemblerSparsePatt
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
function createsubdomain(fessubset)
FEMMBase(IntegDomain(fessubset, GaussRule(3, 2)))
end
function matrixcomputation!(femm, assembler)
bilform_diffusion(femm, assembler, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
end
n2e = FENodeToFEMap(fes.conn, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
K = parallel_make_matrix(
fes,
psi,
createsubdomain,
matrixcomputation!;
ntasks,
kind=:CSC,
)
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
module mparallelassembly_high_level_5
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose, parallel_matrix_assembly!, SysmatAssemblerSparsePatt
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
ntasks = 2
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
function createsubdomain(fessubset)
FEMMBase(IntegDomain(fessubset, GaussRule(3, 2)))
end
function matrixcomputation!(femm, assembler)
bilform_diffusion(femm, assembler, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
end
n2e = FENodeToFEMap(fes.conn, count(fens))
n2n = FENodeToNeighborsMap(n2e, fes.conn)
K = parallel_make_matrix(
fes,
psi,
createsubdomain,
matrixcomputation!;
ntasks,
kind=:CSC,
)
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
module mparallelassembly_high_level_6
using FinEtools
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose, parallel_matrix_assembly!, SysmatAssemblerSparsePatt
using LinearAlgebra
using Test
function test()
W = 1.1
L = 12.0
t = 0.32
nl, nt, nw = 12, 33, 24
ntasks = 1
fens, fes = H8block(L, W, t, nl, nw, nt)
geom = NodalField(fens.xyz)
psi = NodalField(fill(1.0, count(fens), 1))
nl = collect(1:3)
setebc!(psi, nl, true, ones(Int, length(nl)), 0.0)
numberdofs!(psi)
v_f = gathersysvec(psi)
function createsubdomain(fessubset)
FEMMBase(IntegDomain(fessubset, GaussRule(3, 2)))
end
function matrixcomputation!(femm, assembler)
bilform_diffusion(femm, assembler, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
end
K = parallel_make_matrix(
fes,
psi,
createsubdomain,
matrixcomputation!
)
# n2e = FENodeToFEMap(fes, nnodes(psi))
# n2n = FENodeToNeighborsMap(n2e, fes)
# K_pattern = csc_symmetric_pattern(psi.dofnums, nalldofs(psi), n2n, zero(Float64))
# coloring = element_coloring(fes, n2e)
# decomposition = decompose(fes, coloring, createsubdomain, ntasks)
# K = parallel_matrix_assembly!(
# SysmatAssemblerSparsePatt(K_pattern),
# decomposition,
# matrixcomputation!
# )
K_ff = matrix_blocked_ff(K, nfreedofs(psi))
result = abs(v_f' * K_ff * v_f)
ass = SysmatAssemblerFFBlock(nfreedofs(psi))
K_ff2 = bilform_diffusion(FEMMBase(IntegDomain(fes, GaussRule(3, 2))), ass, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
@test norm(K_ff - K_ff2) / norm(K_ff2) <= 1.0e-5
@test abs(v_f' * K_ff2 * v_f - (result)) / (result) <= 1.0e-5
true
end
test()
nothing
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | docs | 1372 | [](http://www.repostatus.org/#active)
[](https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl/actions)
[](https://app.codecov.io/gh/PetrKryslUCSD/FinEtoolsMultithreading.jl)
[](https://petrkryslucsd.github.io/FinEtoolsMultithreading.jl/dev)
# FinEtoolsMultithreading.jl
This package is an overlay for FinEtools-based application packages. It can be
used to parallelize certain finite element operations with multithreading.
Currently, sparse matrix operators can be assembled in parallel on multiple threads. Please refer to the documentation.
## Examples
Check out the [`examples/README.md`](examples/README.md) documentation.
## References
A [paper](https://dx.doi.org/10.2139/ssrn.4775111) has been submitted and is under review.
The paper is consistent with v0.1.13.
The revision of the above paper is now under a review. A draft has been placed in the `docs` folder. The revision is consistent with v0.5.0. | FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | docs | 385 |
Issues and ideas:
-- Documenter:
using Pkg; Pkg.add("DocumenterTools");
using DocumenterTools
DocumenterTools.genkeys(user="PetrKryslUCSD", repo="[email protected]:PetrKryslUCSD/FinEtoolsMultithreading.jl.git")
using Pkg; Pkg.rm("DocumenterTools");
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | docs | 531 | # FinEtoolsMultithreading Documentation
```@contents
```
## Conceptual guide
The construction of the toolkit is described: the composition of modules, the basic data structures, the methodology of computing quantities required in the finite element methodology, and more.
```@contents
Pages = [
"guide/guide.md",
]
Depth = 1
```
## Manual
The description of the types and the functions, organized by module and/or other logical principle.
```@contents
Pages = [
"man/man.md",
]
Depth = 2
```
## Index
```@index
``` | FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | docs | 994 | # Guide
The [`FinEtools`](https://petrkryslucsd.github.io/FinEtools.jl/latest/index.html)
package is used here to solve a variety of finite element problems.
This package can provide an overlay to parallelize sparse matrix assembly.
There is one high-level function that can be used to parallelize the assembly of any sparse matrix -- acoustic mass or stiffness, conductivity matrix, stiffness or mass matrix, etc.
This bit of code will assemble the diffusion bilinear form sparse matrix stored in the CSR format:
```
function createsubdomain(fessubset)
FEMMBase(IntegDomain(fessubset, GaussRule(3, 2)))
end
function matrixcomputation!(femm, assblr)
bilform_diffusion(femm, assblr, geom, psi, DataCache(Matrix(1.0 * LinearAlgebra.I(3))))
end
K1 = parallel_make_matrix(fes, psi, createsubdomain, matrixcomputation!;
ntasks=ntasks, kind=:CSR)
```
The user can ask for any number of tasks to be used (even though it would be best to match it to the number of available threads). | FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | docs | 119 | # Manual
```@meta
CurrentModule = FinEtoolsMultithreading
```
## High-level API
```@docs
parallel_make_matrix
```
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | docs | 1828 | # FinEtoolsMultithreading.jl examples
The assumption is that the user is working in the `FinEtoolsMultithreading/examples` folder.
Furthermore, the use of a shell is presumed (for instance `bash`).
Then, we could use the shell commands:
```
user:~$ mkdir try
user:~$ cd try
user:~/try$ git clone https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git
user:~/try$ cd FinEtoolsMultithreading.jl/examples/
```
Then, we pick a testcase and proceed as described below. It is assumed that the executable of `julia` is on the path.
## Linear elasticity test case
The mesh with 50 mesh edges amounts to 500,000 elements.
To run the parallel and serial assembly for a mesh with 40 mesh edges along one
of the dimensions, type the following into the shell prompt:
The parallel assembly of the sparse matrix on three computing threads
```
julia -t 3 ./testlindef/parsim.jl 40
```
The serial assembly of the sparse matrix
```
julia ./testlindef/sersim.jl 40
```
## Steady-state heat analysis test case
The mesh with 50 mesh edges amounts to 750,000 elements.
To run the parallel and serial assembly for a mesh with 40 mesh edges along one
of the dimensions, type the following into the shell prompt:
The parallel assembly of the sparse matrix on three computing threads
```
julia -t 3 ./testheat/parsim.jl 40
```
The serial assembly of the sparse matrix
```
julia ./testheat/sersim.jl 40
```
## Modal acoustic analysis test case
The mesh with number of refinements 6 amounts to 8.4 million elements.
To run the parallel and serial assembly for a mesh refinement 4, type the following into the shell prompt:
The parallel assembly of the sparse matrix on three computing threads
```
julia -t 3 ./testacoust/parsim.jl 4
```
The serial assembly of the sparse matrix
```
julia ./testacoust/sersim.jl 4
```
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 2591 | __precompile__() # this module is safe to precompile
module Sleipnir
# ##############################################
# ########### PACKAGES ##############
# ##############################################
using Base: @kwdef
using Infiltrator
import Pkg
using PyCall
using JLD2
using Distributed
using Statistics
using CairoMakie
using Downloads
using HDF5
# ##############################################
# ############ PARAMETERS ###############
# ##############################################
cd(@__DIR__)
const global root_dir::String = dirname(Base.current_project())
# ##############################################
# ############ PYTHON LIBRARIES ##############
# ##############################################
# We either retrieve the reexported Python libraries from Sleipnir or we start from scratch
const netCDF4::PyObject = isdefined(Sleipnir, :netCDF4) ? Sleipnir.netCDF4 : PyNULL()
const cfg::PyObject = isdefined(Sleipnir, :cfg) ? Sleipnir.cfg : PyNULL()
const utils::PyObject = isdefined(Sleipnir, :utils) ? Sleipnir.utils : PyNULL()
const workflow::PyObject = isdefined(Sleipnir, :workflow) ? Sleipnir.workflow : PyNULL()
const tasks::PyObject = isdefined(Sleipnir, :tasks) ? Sleipnir.tasks : PyNULL()
const global_tasks::PyObject = isdefined(Sleipnir, :global_tasks) ? Sleipnir.global_tasks : PyNULL()
const graphics::PyObject = isdefined(Sleipnir, :graphics) ? Sleipnir.graphics : PyNULL()
const bedtopo::PyObject = isdefined(Sleipnir, :bedtopo) ? Sleipnir.bedtopo : PyNULL()
const millan22::PyObject = isdefined(Sleipnir, :millan22) ? Sleipnir.millan22 : PyNULL()
const MBsandbox::PyObject = isdefined(Sleipnir, :MBsandbox) ? Sleipnir.MBsandbox : PyNULL()
const salem::PyObject = isdefined(Sleipnir, :salem) ? Sleipnir.salem : PyNULL()
# Essential Python libraries
const xr::PyObject = isdefined(Sleipnir, :xr) ? Sleipnir.xr : PyNULL()
const rioxarray::PyObject = isdefined(Sleipnir, :rioxarray) ? Sleipnir.rioxarray : PyNULL()
const pd::PyObject = isdefined(Sleipnir, :pd) ? Sleipnir.pd : PyNULL()
# ##############################################
# ########## SLEIPNIR LIBRARIES ##############
# ##############################################
include("setup/config.jl")
# All parameters needed for the models
include("parameters/Parameters.jl")
# Anything related to managing glacier topographical and climate data
include("glaciers/glacier/Glacier.jl")
# All structures and functions related to ODINN models
include("models/Model.jl")
# Everything related to running simulations in ODINN
include("simulations/Simulation.jl")
end # module
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 907 |
@kwdef mutable struct Climate1Dstep{F <: AbstractFloat}
temp::Vector{F}
PDD::Vector{F}
snow::Vector{F}
rain::Vector{F}
gradient::Ref{F}
avg_gradient::Ref{F}
x::Vector{F}
y::Vector{F}
ref_hgt::Ref{F}
end
@kwdef mutable struct Climate1D{F <: AbstractFloat}
raw_climate::PyObject # Raw climate dataset for the whole simulation
# Buffers to avoid memory allocations
climate_raw_step::Ref{PyObject} # Raw climate trimmed for the current step
climate_step::Ref{PyObject} # Climate data for the current step
climate_2D_step::Climate2Dstep # 2D climate data for the current step to feed to the MB model
longterm_temps::Vector{F} # Longterm temperatures for the ice rheology
avg_temps::Ref{PyObject} # Intermediate buffer for computing average temperatures
avg_gradients::Ref{PyObject} # Intermediate buffer for computing average gradients
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 940 |
export Climate2Dstep, Climate2D
@kwdef mutable struct Climate2Dstep{F <: AbstractFloat}
temp::Matrix{F}
PDD::Matrix{F}
snow::Matrix{F}
rain::Matrix{F}
gradient::Ref{F}
avg_gradient::Ref{F}
x::Vector{F}
y::Vector{F}
ref_hgt::Ref{F}
end
@kwdef mutable struct Climate2D{F <: AbstractFloat}
raw_climate::PyObject # Raw climate dataset for the whole simulation
# Buffers to avoid memory allocations
climate_raw_step::Ref{PyObject} # Raw climate trimmed for the current step
climate_step::Ref{PyObject} # Climate data for the current step
climate_2D_step::Climate2Dstep # 2D climate data for the current step to feed to the MB model
longterm_temps::Vector{F} # Longterm temperatures for the ice rheology
avg_temps::Ref{PyObject} # Intermediate buffer for computing average temperatures
avg_gradients::Ref{PyObject} # Intermediate buffer for computing average gradients
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 10426 |
###############################################
############ FUNCTIONS #####################
###############################################
export initialize_glacier_climate!, downscale_2D_climate!, downscale_2D_climate,
get_cumulative_climate!, get_cumulative_climate, apply_t_cumul_grad!,
apply_t_grad!, trim_period, partial_year, get_longterm_temps
using Dates # to provide correct Julian time slices
"""
function initialize_glacier_climate!(glacier::Glacier, params::Parameters)
Initializes the `Climate` data structure for a given `Glacier``
"""
function initialize_glacier_climate!(glacier::AbstractGlacier, params::Parameters)
dummy_period = partial_year(Day, params.simulation.tspan[1]):Day(1):partial_year(Day, params.simulation.tspan[1] + params.simulation.step)
raw_climate::PyObject = xr.open_dataset(joinpath(glacier.gdir.dir, "raw_climate_$(params.simulation.tspan).nc"))
climate_step = Ref{PyObject}(get_cumulative_climate(raw_climate.sel(time=dummy_period)))
climate_2D_step = downscale_2D_climate(climate_step[], glacier)
longterm_temps = get_longterm_temps(glacier.gdir, raw_climate)
glacier.climate = Climate2D(raw_climate = raw_climate,
climate_raw_step = Ref{PyObject}(raw_climate.sel(time=dummy_period)),
#climate_cum_step = raw_climate.sel(time=dummy_period).sum(),
climate_step = climate_step,
climate_2D_step = climate_2D_step,
longterm_temps = longterm_temps,
avg_temps = Ref{PyObject}(raw_climate.sel(time=dummy_period).temp.mean()),
avg_gradients = Ref{PyObject}(raw_climate.sel(time=dummy_period).gradient.mean()))
end
function generate_raw_climate_files(gdir::PyObject, tspan::Tuple{F, F}) where {F <: AbstractFloat}
if !ispath(joinpath(gdir.dir, "raw_climate_$tspan.nc"))
println("Getting raw climate data for: ", gdir.rgi_id)
# Get raw climate data for gdir
tspan_date = partial_year(Day, tspan[1]):Day(1):partial_year(Day, tspan[2])
climate = get_raw_climate_data(gdir)
# Make sure the desired period is covered by the climate data
period = trim_period(tspan_date, climate)
if any((climate.time[1].dt.date.data[1] <= period[1]) & any(climate.time[end].dt.date.data[1] >= period[end]))
climate = climate.sel(time=period) # Crop desired time period
else
@warn "No overlapping period available between climate tspan!"
end
# Save raw gdir climate on disk
climate.to_netcdf(joinpath(gdir.dir, "raw_climate_$tspan.nc"))
climate.close()
GC.gc()
end
end
"""
get_cumulative_climate(climate, gradient_bounds=[-0.009, -0.003], default_grad=-0.0065)
Computes Positive Degree Days (PDDs) and cumulative rainfall and snowfall from climate data.
"""
function get_cumulative_climate!(climate, period, gradient_bounds=[-0.009, -0.003], default_grad=-0.0065)
climate.climate_raw_step[] = climate.raw_climate.sel(time=period)
climate.avg_temps[] = climate.climate_raw_step[].temp.mean()
climate.avg_gradients[] = climate.climate_raw_step[].gradient.mean()
climate.climate_raw_step[].temp.data = climate.climate_raw_step[].temp.where(climate.climate_raw_step[].temp > 0.0, 0.0).data # get PDDs
climate.climate_raw_step[].gradient.data = utils.clip_array(climate.climate_raw_step[].gradient.data, gradient_bounds[1], gradient_bounds[2]) # Clip gradients within plausible values
climate.climate_step[] = climate.climate_raw_step[].sum() # get monthly cumulative values
climate.climate_step[] = climate.climate_step[].assign(Dict("avg_temp"=>climate.avg_temps[]))
climate.climate_step[] = climate.climate_step[].assign(Dict("avg_gradient"=>climate.avg_gradients[]))
climate.climate_step[].attrs = climate.climate_raw_step[].attrs
end
function get_cumulative_climate(climate, gradient_bounds=[-0.009, -0.003], default_grad=-0.0065)
avg_temp = climate.temp.mean()
avg_gradients = climate.gradient.mean()
climate.temp.data = climate.temp.where(climate.temp > 0, 0).data # get PDDs
climate.gradient.data = utils.clip_array(climate.gradient.data, gradient_bounds[1], gradient_bounds[2]) # Clip gradients within plausible values
attributes = climate.attrs
climate_sum = climate.sum() # get monthly cumulative values
climate_sum = climate_sum.assign(Dict("avg_temp"=>avg_temp))
climate_sum = climate_sum.assign(Dict("avg_gradient"=>avg_gradients))
climate_sum.attrs = attributes
return climate_sum
end
"""
get_raw_climate_data(gdir, temp_resolution="daily", climate="W5E5")
Downloads the raw W5E5 climate data with a given resolution (daily by default). Returns an xarray Dataset.
"""
function get_raw_climate_data(gdir; temp_resolution="daily", climate="W5E5")
MBsandbox.process_w5e5_data(gdir, climate_type=climate, temporal_resol=temp_resolution)
fpath = gdir.get_filepath("climate_historical", filesuffix="_daily_W5E5")
climate = xr.open_dataset(fpath)
return climate
end
# TODO: make snow/rain thresholds customizable
function apply_t_cumul_grad!(climate_2D_step::Climate2Dstep, S::Matrix{F}) where {F <: AbstractFloat}
# We apply the gradients to the temperature
climate_2D_step.temp .= climate_2D_step.temp .+ climate_2D_step.avg_gradient .* (S .- climate_2D_step.ref_hgt)
climate_2D_step.PDD .= climate_2D_step.PDD .+ climate_2D_step.gradient .* (S .- climate_2D_step.ref_hgt)
climate_2D_step.PDD .= ifelse.(climate_2D_step.PDD .< 0.0, 0.0, climate_2D_step.PDD) # Crop negative PDD values
# We adjust the rain/snow fractions with the updated temperature
climate_2D_step.snow .= ifelse.(climate_2D_step.temp .> 0.0, 0.0, climate_2D_step.snow)
climate_2D_step.rain .= ifelse.(climate_2D_step.temp .< 0.0, 0.0, climate_2D_step.rain)
end
"""
apply_t_grad!(climate, g_dem)
Applies temperature gradients to the glacier 2D climate data based on a DEM.
"""
function apply_t_grad!(climate::PyObject, dem)
# We apply the gradients to the temperature
# /!\ AVOID USING `.=` IN JULIA TO ASSIGN. IT'S NOT HANDLED BY XARRAY. USE `=` INSTEAD
climate.temp.data = climate.temp.data .+ climate.gradient.data .* (mean(dem.data) .- climate.ref_hgt)
end
"""
downscale_2D_climate(climate, g_dem)
Projects climate data to the glacier matrix by simply copying the closest gridpoint to all matrix gridpoints.
Generates a new xarray Dataset which is returned.
"""
function downscale_2D_climate!(glacier::Glacier2D)
# Update 2D climate structure
climate = glacier.climate
climate.climate_2D_step.temp .= climate.climate_step[].avg_temp.data[1]
climate.climate_2D_step.PDD .= climate.climate_step[].temp.data[1]
climate.climate_2D_step.snow .= climate.climate_step[].prcp.data[1]
climate.climate_2D_step.rain .= climate.climate_step[].prcp.data[1]
# Update gradients
climate.climate_2D_step.gradient[] = climate.climate_step[].gradient.data[1]
climate.climate_2D_step.avg_gradient[] = climate.climate_step[].avg_gradient.data[1]
# Apply temperature gradients and compute snow/rain fraction for the selected period
apply_t_cumul_grad!(climate.climate_2D_step, reshape(glacier.S, size(glacier.S))) # Reproject current S with xarray structure
end
function downscale_2D_climate(climate_step::PyObject, glacier::Glacier2D)
# Create dummy 2D arrays to have a base to apply gradients afterwards
MFT = typeof(glacier.S)
FT = typeof(glacier.S[1])
dummy_grid::MFT = ones(size(glacier.S))
temp_2D::MFT = climate_step.avg_temp.data .* dummy_grid
PDD_2D::MFT = climate_step.temp.data .* dummy_grid
snow_2D::MFT = climate_step.prcp.data .* dummy_grid
rain_2D::MFT = climate_step.prcp.data .* dummy_grid
# We generate a new dataset with the scaled data
climate_2D_step = Climate2Dstep(temp=temp_2D,
PDD=PDD_2D,
snow=snow_2D,
rain=rain_2D,
gradient=Ref{FT}(climate_step.gradient.data[1]),
avg_gradient=Ref{FT}(climate_step.avg_gradient.data[1]),
x=glacier.S_coords.x.data,
y= glacier.S_coords.y.data,
ref_hgt=Ref{FT}(climate_step.ref_hgt))
# Apply temperature gradients and compute snow/rain fraction for the selected period
apply_t_cumul_grad!(climate_2D_step, reshape(glacier.S, size(glacier.S))) # Reproject current S with xarray structure
return climate_2D_step
end
function downscale_2D_climate(glacier::Glacier2D)
climate_2D_step = downscale_2D_climate(glacier.climate.climate_step[], glacier)
return climate_2D_step
end
"""
trim_period(period, climate)
Trims a time period based on the time range of a climate series.
"""
function trim_period(period, climate)
if any(climate.time[1].dt.date.data[1] > period[1])
head = jldate(climate.time[1])
period = Date(year(head), 10, 1):Day(1):period[end] # make it a hydrological year
end
if any(climate.time[end].dt.date.data[1] < period[end])
tail = jldate(climate.time[end])
period = period[1]:Day(1):Date(year(tail), 9, 30) # make it a hydrological year
end
return period
end
function partial_year(period::Type{<:Period}, float)
_year, Δ = divrem(float, 1)
year_start = Date(_year)
year = period((year_start + Year(1)) - year_start)
partial = period(round(Dates.value(year) * Δ))
year_start + partial
end
partial_year(float) = partial_year(Day, float)
function get_longterm_temps(gdir::PyObject, tspan)
climate = xr.open_dataset(joinpath(gdir.dir, "raw_climate_$tspan.nc")) # load only once at the beginning
dem = rioxarray.open_rasterio(gdir.get_filepath("dem"))
apply_t_grad!(climate, dem)
longterm_temps = climate.groupby("time.year").mean().temp.data
return longterm_temps
end
function get_longterm_temps(gdir::PyObject, climate::PyObject)
dem = rioxarray.open_rasterio(gdir.get_filepath("dem"))
apply_t_grad!(climate, dem)
longterm_temps = climate.groupby("time.year").mean().temp.data
return longterm_temps
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 49 |
include("Glacier2D.jl")
include("Glacier1D.jl") | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 3448 |
export Glacier1D, Climate1D, AbstractGlacier
abstract type AbstractGlacier end
include("../climate/Climate1D.jl")
mutable struct Glacier1D{F <: AbstractFloat, I <: Integer} <: AbstractGlacier
rgi_id::Union{String, Nothing}
gdir::Union{PyObject, Nothing}
climate::Union{Climate1D, Nothing}
H₀::Union{Vector{F}, Nothing}
S::Union{Vector{F}, Nothing}
B::Union{Vector{F}, Nothing}
V::Union{Vector{F}, Nothing}
A::Union{F, Nothing}
C::Union{F, Nothing}
n::Union{F, Nothing}
w₀::Union{Vector{F}, Nothing}
λ::Union{Vector{F}, Nothing}
slope::Union{Vector{F}, Nothing}
dist_border::Union{Vector{F}, Nothing}
S_coords::Union{PyObject, Nothing}
Δx::Union{F, Nothing}
Δy::Union{F, Nothing}
nx::Union{I, Nothing}
ny::Union{I, Nothing}
end
"""
function Glacier1D(;
rgi_id::Union{String, Nothing} = nothing,
gdir::Union{PyObject, Nothing} = nothing,
climate::Union{Climate1D, Nothing} = nothing,
H₀::Union{Vector{F}, Nothing} = nothing,
S::Union{Vector{F}, Nothing} = nothing,
B::Union{Vector{F}, Nothing} = nothing,
V::Union{Vector{F}, Nothing}= nothing,
slope::Union{Vector{F}, Nothing} = nothing,
dist_border::Union{Vector{F}, Nothing} = nothing,
S_coords::Union{PyObject, Nothing} = nothing,
Δx::Union{F, Nothing} = nothing,
Δy::Union{F, Nothing} = nothing,
nx::Union{I, Nothing} = nothing,
ny::Union{I, Nothing} = nothing
) where {F <: AbstractFloat, I <: Integer}
Constructor for empty 2D Glacier object.
"""
function Glacier1D(;
rgi_id::Union{String, Nothing} = nothing,
gdir::Union{PyObject, Nothing} = nothing,
climate::Union{Climate1D, Nothing} = nothing,
H₀::Union{Vector{F}, Nothing} = nothing,
S::Union{Vector{F}, Nothing} = nothing,
B::Union{Vector{F}, Nothing} = nothing,
V::Union{Vector{F}, Nothing}= nothing,
A::Union{F, Nothing} = nothing,
C::Union{F, Nothing} = nothing,
n::Union{F, Nothing} = nothing,
w₀::Union{Vector{F}, Nothing} = nothing,
λ::Union{Vector{F}, Nothing} = nothing,
slope::Union{Vector{F}, Nothing} = nothing,
dist_border::Union{Vector{F}, Nothing} = nothing,
S_coords::Union{PyObject, Nothing} = nothing,
Δx::Union{F, Nothing} = nothing,
Δy::Union{F, Nothing} = nothing,
nx::Union{I, Nothing} = nothing,
ny::Union{I, Nothing} = nothing
) where {F <: AbstractFloat, I <: Integer}
# Define default float and integer type for constructor
ft = Float64
it = Int64
return Glacier1D{ft,it}(rgi_id, gdir, climate, H₀, S, B, V, A, C, n, w₀, λ, slope, dist_border, S_coords, Δx, Δy, nx, ny)
end
###############################################
################### UTILS #####################
###############################################
Base.:(==)(a::Glacier1D, b::Glacier1D) = a.rgi_id == b.rgi_id && a.gdir == b.gdir && a.climate == b.climate &&
a.H₀ == b.H₀ && a.S == b.S && a.B == b.B && a.V == b.V &&
a.A == b.A && a.C == b.C && a.n == b.n && a.w₀ == b.w₀ && a.λ == b.λ &&
a.slope == b.slope && a.dist_border == b.dist_border && a.rgi_id == b.rgi_id &&
a.S_coords == b.S_coords && a.Δx == b.Δx && a.Δy == b.Δy && a.Δx == b.Δx && a.nx == b.nx && a.ny == b.ny
include("glacier1D_utils.jl")
include("../climate/climate1D_utils.jl") | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 4705 |
export Glacier2D, Climate2D
abstract type AbstractGlacier end
include("../climate/Climate2D.jl")
mutable struct Glacier2D{F <: AbstractFloat, I <: Integer} <: AbstractGlacier
rgi_id::Union{String, Nothing}
gdir::Union{PyObject, Nothing}
climate::Union{Climate2D, Nothing}
H₀::Union{Matrix{F}, Nothing}
H_glathida::Union{Matrix{F}, Nothing}
S::Union{Matrix{F}, Nothing}
B::Union{Matrix{F}, Nothing}
V::Union{Matrix{F}, Nothing}
Vx::Union{Matrix{F}, Nothing}
Vy::Union{Matrix{F}, Nothing}
A::Union{F, Nothing}
C::Union{F, Nothing}
n::Union{F, Nothing}
slope::Union{Matrix{F}, Nothing}
dist_border::Union{Matrix{F}, Nothing}
S_coords::Union{PyObject, Nothing}
Δx::Union{F, Nothing}
Δy::Union{F, Nothing}
nx::Union{I, Nothing}
ny::Union{I, Nothing}
cenlon::Union{F, Nothing}
cenlat::Union{F, Nothing}
end
"""
function Glacier2D(;
rgi_id::Union{String, Nothing} = nothing,
gdir::Union{PyObject, Nothing} = nothing,
climate::Union{Climate2D, Nothing} = nothing,
H₀::Union{Matrix{F}, Nothing} = nothing,
H_glathida::Union{Matrix{F}, Nothing},
S::Union{Matrix{F}, Nothing} = nothing,
B::Union{Matrix{F}, Nothing} = nothing,
V::Union{Matrix{F}, Nothing}= nothing,
slope::Union{Matrix{F}, Nothing} = nothing,
dist_border::Union{Matrix{F}, Nothing} = nothing,
S_coords::Union{PyObject, Nothing} = nothing,
Δx::Union{F, Nothing} = nothing,
Δy::Union{F, Nothing} = nothing,
nx::Union{I, Nothing} = nothing,
ny::Union{I, Nothing} = nothing
) where {F <: AbstractFloat, I <: Integer}
Constructor for empty 2D Glacier object.
"""
function Glacier2D(;
rgi_id::Union{String, Nothing} = nothing,
gdir::Union{PyObject, Nothing} = nothing,
climate::Union{Climate2D, Nothing} = nothing,
H₀::Union{Matrix{F}, Nothing} = nothing,
H_glathida::Union{Matrix{F}, Nothing} = nothing,
S::Union{Matrix{F}, Nothing} = nothing,
B::Union{Matrix{F}, Nothing} = nothing,
V::Union{Matrix{F}, Nothing}= nothing,
Vx::Union{Matrix{F}, Nothing}= nothing,
Vy::Union{Matrix{F}, Nothing}= nothing,
A::Union{F, Nothing} = nothing,
C::Union{F, Nothing} = nothing,
n::Union{F, Nothing} = nothing,
slope::Union{Matrix{F}, Nothing} = nothing,
dist_border::Union{Matrix{F}, Nothing} = nothing,
S_coords::Union{PyObject, Nothing} = nothing,
Δx::Union{F, Nothing} = nothing,
Δy::Union{F, Nothing} = nothing,
nx::Union{I, Nothing} = nothing,
ny::Union{I, Nothing} = nothing,
cenlon::Union{F, Nothing} = nothing,
cenlat::Union{F, Nothing} = nothing
) where {F <: AbstractFloat, I <: Integer}
# Define default float and integer type for constructor
ft = typeof(Δx)
it = typeof(nx)
return Glacier2D{ft,it}(rgi_id, gdir, climate, H₀, H_glathida, S, B, V, Vx, Vy, A, C, n, slope, dist_border, S_coords, Δx, Δy, nx, ny, cenlon, cenlat)
end
###############################################
################### UTILS #####################
###############################################
Base.:(==)(a::Glacier2D, b::Glacier2D) = a.rgi_id == b.rgi_id && a.gdir == b.gdir && a.climate == b.climate &&
a.H₀ == b.H₀ && a.H_glathida == b.H_glathida && a.S == b.S && a.B == b.B && a.V == b.V &&
a.A == b.A && a.C == b.C && a.n == b.n &&
a.slope == b.slope && a.dist_border == b.dist_border &&
a.S_coords == b.S_coords && a.Δx == b.Δx && a.Δy == b.Δy && a.nx == b.nx && a.ny == b.ny &&
a.cenlon == b.cenlon && a.cenlat == b.cenlat
Base.:(≈)(a::Glacier2D, b::Glacier2D) = a.rgi_id == b.rgi_id && a.gdir == b.gdir && a.climate == b.climate &&
safe_approx(a.H₀, b.H₀) && safe_approx(a.H_glathida, b.H_glathida) &&
safe_approx(a.S, b.S) && safe_approx(a.B, b.B) && safe_approx(a.V, b.V) &&
safe_approx(a.A, b.A) && safe_approx(a.C, b.C) && safe_approx(a.n, b.n) &&
isapprox(a.slope, b.slope; rtol=1e-3) && safe_approx(a.dist_border, b.dist_border) &&
a.S_coords == b.S_coords && safe_approx(a.Δx, b.Δx) && safe_approx(a.Δy, b.Δy) &&
safe_approx(a.nx, b.nx) && safe_approx(a.ny, b.ny) &&
safe_approx(a.cenlon, b.cenlon) && safe_approx(a.cenlat, b.cenlat)
include("glacier2D_utils.jl")
include("../climate/climate2D_utils.jl") | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 17076 |
export initialize_glaciers
###############################################
############ FUNCTIONS #####################
###############################################
"""
initialize_glaciers(rgi_ids::Vector{String}, params::Parameters; velocities=true)
Initialize multiple `Glacier`s based on a list of RGI IDs, a º span for a simulation and step.
Keyword arguments
=================
- `rgi_ids`: List of RGI IDs of glaciers
- `tspan`: Tuple specifying the initial and final year of the simulation
- `params`: `Parameters` object to be passed
"""
function initialize_glaciers(rgi_ids::Vector{String}, params::Parameters; test=false)
# Generate missing glaciers file
missing_glaciers_path = joinpath(params.simulation.working_dir, "data")
if !isdir(missing_glaciers_path)
mkdir(missing_glaciers_path)
end
if !isfile(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"))
missing_glaciers = Vector([])
jldsave(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"); missing_glaciers)
end
filter_missing_glaciers!(rgi_ids, params)
# Initialize glacier directories
gdirs::Vector{PyObject} = init_gdirs(rgi_ids, params; velocities=params.simulation.velocities)
# Generate raw climate data if necessary
if params.simulation.test_mode
map((gdir) -> generate_raw_climate_files(gdir, params.simulation.tspan), gdirs) # avoid GitHub CI issue
else
pmap((gdir) -> generate_raw_climate_files(gdir, params.simulation.tspan), gdirs)
end
glaciers::Vector{Glacier2D} = pmap((gdir) -> initialize_glacier(gdir, params; smoothing=false, test=test), gdirs)
if params.simulation.use_glathida_data == true
data_glathida, glathida_rgi_ids = get_glathida_path_and_IDs()
# Obtain H_glathida values for the valid RGI IDs
H_glathida_values, valid_gdirs = get_glathida!(data_glathida, gdirs, params)
valid_rgi_ids = [gdir.rgi_id for gdir in valid_gdirs]
if isempty(valid_rgi_ids)
error("None of the provided RGI IDs have GlaThiDa.")
end
if length(valid_rgi_ids) < length(rgi_ids)
@warn "Not all glaciers have GlaThiDa data available."
end
# Create a mapping from RGI ID to H_glathida value
rgi_to_H_glathida = Dict(zip(valid_rgi_ids, H_glathida_values))
# Assign H_glathida to glaciers with valid RGI IDs
for glacier in glaciers
if glacier.rgi_id in valid_rgi_ids
glacier.H_glathida = rgi_to_H_glathida[glacier.rgi_id]
end
end
end
return glaciers
end
"""
initialize_glacier(gdir::PyObject, tspan, step; smoothing=false, velocities=true)
Initialize a single `Glacier`s, including its `Climate`, based on a `gdir` and timestepping arguments.
Keyword arguments
=================
- `gdir`: Glacier directory
- `tspan`: Tuple specifying the initial and final year of the simulation
- `step`: Step in years for the surface mass balance processing
- `smoothing` Flag determining if smoothing needs to be applied to the surface elevation and ice thickness.
- `velocities` Flag determining if the ice surface velocities need to be retrieved.
"""
function initialize_glacier(gdir::PyObject, parameters; smoothing=false, test=false)
# Initialize glacier initial topography
glacier = initialize_glacier_data(gdir, parameters; smoothing=smoothing, test=test)
# Initialize glacier climate
initialize_glacier_climate!(glacier, parameters)
if test
glacier.gdir = nothing
glacier.S_coords = nothing
end
return glacier
end
"""
initialize_glacier(gdir::PyObject; smoothing=false, velocities=true)
Retrieves the initial glacier geometry (bedrock + ice thickness) for a glacier with other necessary data (e.g. grid size and ice surface velocities).
"""
function initialize_glacier_data(gdir::PyObject, params::Parameters; smoothing=false, test=false)
# Load glacier gridded data
F = params.simulation.float_type
I = params.simulation.int_type
glacier_gd = xr.open_dataset(gdir.get_filepath("gridded_data"))
# println("Using $ice_thickness_source for initial state")
# Retrieve initial conditions from OGGM
# initial ice thickness conditions for forward model
if params.OGGM.ice_thickness_source == "Millan22" && params.simulation.velocities
H₀ = F.(ifelse.(glacier_gd.glacier_mask.data .== 1, glacier_gd.millan_ice_thickness.data, 0.0))
elseif params.OGGM.ice_thickness_source == "Farinotti19"
H₀ = F.(ifelse.(glacier_gd.glacier_mask.data .== 1, glacier_gd.consensus_ice_thickness.data, 0.0))
end
fillNaN!(H₀) # Fill NaNs with 0s to have real boundary conditions
if smoothing
println("Smoothing is being applied to initial condition.")
smooth!(H₀) # Smooth initial ice thickness to help the solver
end
# Create path for simulation results
gdir_path = dirname(gdir.get_filepath("dem"))
if !isdir(gdir_path)
mkdir(gdir_path)
end
try
# We filter glacier borders in high elevations to avoid overflow problems
dist_border::Matrix{F} = F.(glacier_gd.dis_from_border.data)
# H_mask = (dist_border .< 20.0) .&& (S .> maximum(S)*0.7)
# H₀[H_mask] .= 0.0
B::Matrix{F} = F.(glacier_gd.topo.data) .- H₀ # bedrock
S_coords::PyObject = rioxarray.open_rasterio(gdir.get_filepath("dem"))
#S::Matrix{F} = F.(glacier_gd.topo.data)
S::Matrix{F} = F.(S_coords[1].values) # surface elevation
#smooth!(S)
if params.simulation.velocities
V::Matrix{F} = F.(ifelse.(glacier_gd.glacier_mask.data .== 1, glacier_gd.millan_v.data, 0.0))
Vx::Matrix{F} = F.(ifelse.(glacier_gd.glacier_mask.data .== 1, glacier_gd.millan_vx.data, 0.0))
Vy::Matrix{F} = F.(ifelse.(glacier_gd.glacier_mask.data .== 1, glacier_gd.millan_vy.data, 0.0))
fillNaN!(V)
fillNaN!(Vx)
fillNaN!(Vy)
else
V = zeros(F, size(H₀))
Vx = zeros(F, size(H₀))
Vy = zeros(F, size(H₀))
end
nx = I(glacier_gd.y.size) # glacier extent
ny = I(glacier_gd.x.size) # really weird, but this is inversed
Δx = abs(F(gdir.grid.dx))
Δy = abs(F.(gdir.grid.dy))
slope = F.(glacier_gd.slope.data)
glacier_gd.close() # Release any resources linked to this object
# We initialize the Glacier with all the initial topographical conditions
glacier = Glacier2D(rgi_id = gdir.rgi_id, gdir = gdir,
climate=nothing,
H₀ = H₀, S = S, B = B, V = V, Vx = Vx, Vy = Vy,
A = 4e-17, C = 0.0, n = 3.0,
slope = slope, dist_border = dist_border,
S_coords = S_coords, Δx=Δx, Δy=Δy, nx=nx, ny=ny,
cenlon = gdir.cenlon, cenlat = gdir.cenlat)
return glacier
catch error
@show error
missing_glaciers = load(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"))["missing_glaciers"]
push!(missing_glaciers, gdir.rgi_id)
jldsave(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"); missing_glaciers)
glacier_gd.close() # Release any resources linked to this object
@warn "Glacier without data: $(gdir.rgi_id). Updating list of missing glaciers. Please try again."
end
end
"""
init_gdirs(rgi_ids; force=false)
Initializes Glacier Directories using OGGM. Wrapper function calling `init_gdirs_scratch(rgi_ids)`.
"""
function init_gdirs(rgi_ids::Vector{String}, params::Parameters; velocities=true)
# Try to retrieve glacier gdirs if they are available
filter_missing_glaciers!(rgi_ids, params)
try
gdirs::Vector{PyObject} = workflow.init_glacier_directories(rgi_ids)
filter_missing_glaciers!(gdirs, params)
# Set different surface topography source if specified
if params.OGGM.DEM_source != "Default"
for gdir in gdirs
tasks.define_glacier_region(gdir, source = params.OGGM.DEM_source)
end
end
return gdirs
catch
@warn "Cannot retrieve gdirs from disk!"
println("Generating gdirs from scratch...")
# Generate all gdirs if needed
gdirs::Vector{PyObject} = init_gdirs_scratch(rgi_ids, params; velocities = velocities)
# Check which gdirs errored in the tasks (useful to filter those gdirs)
filter_missing_glaciers!(gdirs, params)
return gdirs
end
end
"""
init_gdirs_scratch(rgi_ids)
Initializes Glacier Directories from scratch using OGGM.
"""
function init_gdirs_scratch(rgi_ids::Vector{String}, params::Parameters; velocities=true)::Vector{PyObject}
# Check if some of the gdirs is missing files
gdirs::Vector{PyObject} = workflow.init_glacier_directories(rgi_ids, prepro_base_url=params.OGGM.base_url,
from_prepro_level=2, prepro_border=10,
reset=true, force=true)
# Set different surface topography source if specified
if params.OGGM.DEM_source != "Default"
for gdir in gdirs
tasks.define_glacier_region(gdir, source = params.OGGM.DEM_source)
end
end
if velocities
list_talks = [
# tasks.compute_centerlines,
# tasks.initialize_flowlines,
# tasks.compute_downstream_line,
# tasks.catchment_area,
# tasks.process_dem,
tasks.gridded_attributes,
tasks.glacier_masks,
# tasks.gridded_mb_attributes,
# tasks.prepare_for_inversion, # This is a preprocessing task
# tasks.mass_conservation_inversion, # This gdirsdoes the actual job
# tasks.filter_inversion_output, # This smoothes the thicknesses at the tongue a little
# tasks.distribute_thickness_per_altitude,
bedtopo.add_consensus_thickness, # Use consensus ice thicknesses from Farinotti et al. (2019)
# tasks.get_topo_predictors,
millan22.thickness_to_gdir,
millan22.velocity_to_gdir
]
else
list_talks = [
tasks.gridded_attributes,
tasks.glacier_masks,
bedtopo.add_consensus_thickness # Use consensus ice thicknesses from Farinotti et al. (2019)
]
end
for task in list_talks
# The order matters!
workflow.execute_entity_task(task, gdirs)
end
GC.gc()
return gdirs
end
# [Begin] Glathida Utilities
function get_glathida!(gtd_file, gdirs, params; force=false)
glathida = pd.HDFStore(gtd_file)
gtd_grids = map(gdir -> get_glathida_glacier(gdir, glathida, force), gdirs)
# Update missing_glaciers list before removing them
missing_glaciers = load(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"))["missing_glaciers"]
for (gtd_grid, gdir) in zip(gtd_grids, gdirs)
if (length(gtd_grid[gtd_grid .!= 0.0]) == 0) && all(gdir.rgi_id .!= missing_glaciers)
push!(missing_glaciers, gdir.rgi_id)
@info "Glacier with all data at 0: $(gdir.rgi_id). Updating list of missing glaciers..."
end
end
jldsave(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"); missing_glaciers)
# Apply deletion to both gtd_grids and gdirs using the same set of indices
indices_to_remove = findall(x -> length(x[x .!= 0.0]) == 0, gtd_grids)
deleteat!(gtd_grids, indices_to_remove)
deleteat!(gdirs, indices_to_remove)
return gtd_grids, gdirs
end
function get_glathida_glacier(gdir, glathida, force)
gtd_path = joinpath(gdir.dir, "glathida.h5")
if isfile(gtd_path) && !force
gtd_grid = h5read(gtd_path, "gtd_grid")
else
df_gtd = glathida[gdir.rgi_id]
jj, ii = gdir.grid.transform(df_gtd["POINT_LON"], df_gtd["POINT_LAT"], crs=salem.wgs84, nearest=true)
gtd_grid = zeros((gdir.grid.ny,gdir.grid.nx))
for (thick, i, j) in zip(df_gtd["THICKNESS"], ii, jj)
if gtd_grid[i,j] != 0.0
gtd_grid[i,j] = (gtd_grid[i,j] + thick)/2.0 # average
else
gtd_grid[i,j] = thick
end
end
# Save file
h5open(joinpath(gdir.dir, "glathida.h5"), "w") do file
write(file, "gtd_grid", gtd_grid)
end
end
return gtd_grid
end
function get_glathida_path_and_IDs()
gtd_file = Downloads.download("https://cluster.klima.uni-bremen.de/~oggm/glathida/glathida-v3.1.0/data/TTT_per_rgi_id.h5")
glathida = pd.HDFStore(gtd_file)
rgi_ids = glathida.keys()
rgi_ids = String[id[2:end] for id in rgi_ids]
return gtd_file, rgi_ids
end
# [End] Glathida Utilities
function filter_missing_glaciers!(gdirs::Vector{PyObject}, params::Parameters)
task_log::PyObject = global_tasks.compile_task_log(gdirs,
task_names=["gridded_attributes", "velocity_to_gdir", "thickness_to_gdir"])
task_log.to_csv(joinpath(params.simulation.working_dir, "task_log.csv"))
glacier_filter = ((task_log.velocity_to_gdir != "SUCCESS").values .&& (task_log.gridded_attributes != "SUCCESS").values
.&& (task_log.thickness_to_gdir != "SUCCESS").values)
glacier_ids = String[]
for id in task_log.index
push!(glacier_ids, id)
end
missing_glaciers::Vector{String} = glacier_ids[glacier_filter]
try
missing_glaciers_old::Vector{String} = load(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"))["missing_glaciers"]
for missing_glacier in missing_glaciers_old
if all(missing_glacier .!= missing_glaciers) # if the glacier is already not present, let's add it
push!(missing_glaciers, missing_glacier)
end
end
catch error
@warn "$error: No missing_glaciers.jld file available. Skipping..."
end
for id in missing_glaciers
deleteat!(gdirs, findall(x->x.rgi_id==id, gdirs))
end
# Save missing glaciers in a file
jldsave(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"); missing_glaciers)
# @warn "Filtering out these glaciers from gdir list: $missing_glaciers"
return missing_glaciers
end
function filter_missing_glaciers!(rgi_ids::Vector{String}, params::Parameters)
# Check which glaciers we can actually process
rgi_stats::PyObject = pd.read_csv(utils.file_downloader("https://cluster.klima.uni-bremen.de/~oggm/rgi/rgi62_stats.csv"), index_col=0)
# rgi_stats = rgi_stats.loc[rgi_ids]
# if any(rgi_stats.Connect .== 2)
# @warn "You have some level 2 glaciers... Removing..."
# rgi_ids = [rgi_stats.loc[rgi_stats.Connect .!= 2].index]
# end
indices = [rgi_stats.index...]
for rgi_id in rgi_ids
if rgi_stats.Connect.values[indices .== rgi_id] == 2
@warn "Filtering glacier $rgi_id..."
deleteat!(rgi_ids, rgi_ids .== rgi_id)
end
end
try
missing_glaciers::Vector{String} = load(joinpath(params.simulation.working_dir, "data/missing_glaciers.jld2"))["missing_glaciers"]
for missing_glacier in missing_glaciers
deleteat!(rgi_ids, findall(x->x == missing_glacier,rgi_ids))
end
@info "Filtering out these glaciers from RGI ID list: $missing_glaciers"
catch error
@warn "$error: No missing_glaciers.jld file available. Skipping..."
end
end
"""
fillNaN!(x, fill)
Convert empty matrix grid cells into fill value
"""
function fillNaN!(A, fill=zero(eltype(A)))
for i in eachindex(A)
@inbounds A[i] = ifelse(isnan(A[i]), fill, A[i])
end
end
function fillNaN(A, fill=zero(eltype(A)))
return @. ifelse(isnan(A), fill, A)
end
function fillZeros!(A, fill=NaN)
for i in eachindex(A)
@inbounds A[i] = ifelse(iszero(A[i]), fill, A[i])
end
end
function fillZeros(A, fill=NaN)
return @. ifelse(iszero(A), fill, A)
end
"""
smooth!(A)
Smooth data contained in a matrix with one time step (CFL) of diffusion.
"""
@views function smooth!(A)
A[2:end-1,2:end-1] .= A[2:end-1,2:end-1] .+ 1.0./4.1.*(diff(diff(A[:,2:end-1], dims=1), dims=1) .+ diff(diff(A[2:end-1,:], dims=2), dims=2))
A[1,:]=A[2,:]; A[end,:]=A[end-1,:]; A[:,1]=A[:,2]; A[:,end]=A[:,end-1]
end
# function smooth(A)
# A_smooth = A[2:end-1,2:end-1] .+ 1.0./4.1.*(diff(diff(A[:,2:end-1], dims=1), dims=1) .+ diff(diff(A[2:end-1,:], dims=2), dims=2))
# @tullio A_smooth_pad[i,j] := A_smooth[pad(i-1,1,1),pad(j-1,1,1)] # Fill borders
# return A_smooth_pad
# end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 386 |
export Model, AbstractModel
abstract type AbstractModel end
const AbstractEmptyModel = Union{AbstractModel,Nothing}
# Composite type as a representation of ODINN models
mutable struct Model{IFM <: AbstractEmptyModel, MBM <: AbstractEmptyModel, MLM <: AbstractEmptyModel}
iceflow::Union{IFM, Vector{IFM}}
mass_balance::Union{MBM, Vector{MBM}}
machine_learning::MLM
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 3750 |
export oggm_config
struct OGGMparameters <: AbstractParameters
working_dir::String
paths::Union{PyDict, Nothing}
params::Union{PyDict, Nothing}
multiprocessing::Bool
workers::Int64
ice_thickness_source::String
DEM_source::String
base_url::String
end
"""
OGGMparameters(;
working_dir::String = joinpath(homedir(), "OGGM/OGGM_data"),
paths::Union{PyDict, Nothing} = nothing,
paths::Union{PyDict, Nothing} = nothing,
multiprocessing::Bool = false,
workers::Int64 = 1,
base_url::String = "https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.6/L1-L2_files/elev_bands/"
)
Initializes OGGM and it configures its parameters.
Keyword arguments
=================
- `working_dir`: Working directory were all the files will be stored.
- `paths`: Dictionary for OGGM-related paths.
- `params`: Dictionary for OGGM-related parameters.
- `multiprocessing`: Determines if multiprocessing is used for OGGM.
- `workers`: How many workers are to be used for OGGM multiprocessing.
- `ice_thickness_source`: Source for the ice thickness dataset. Either `Millan22` of `Farinotti19`.
- `base_url`: Base URL to download all OGGM data.
"""
function OGGMparameters(;
working_dir::String = joinpath(homedir(), "OGGM/OGGM_data"),
paths::Union{PyDict, Nothing} = nothing,
params::Union{PyDict, Nothing} = nothing,
multiprocessing::Bool = false,
workers::Int64 = 1,
ice_thickness_source::String = "Farinotti19",
DEM_source::String = "Default",
base_url::String = "https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.6/L1-L2_files/elev_bands/",
test = false
)
@assert ((ice_thickness_source == "Millan22") || (ice_thickness_source == "Farinotti19")) "Wrong ice thickness source! Should be either `Millan22` or `Farinotti19`."
# Build the OGGM parameters and configuration
OGGM_parameters = OGGMparameters(working_dir, paths, params,
multiprocessing, workers,
ice_thickness_source, DEM_source,
base_url)
return OGGM_parameters
end
Base.:(==)(a::OGGMparameters, b::OGGMparameters) = a.working_dir == b.working_dir && a.paths == b.paths && a.params == b.params &&
a.multiprocessing == b.multiprocessing && a.workers == b.workers && a.ice_thickness_source == b.ice_thickness_source &&
a.DEM_source == b.DEM_source && a.base_url == b.base_url
"""
oggm_config()
Configures the basic paths and parameters for OGGM.
"""
function oggm_config(working_dir=joinpath(homedir(), "OGGM/OGGM_data"); oggm_processes=1)
scope = @__MODULE__ # Capture current module to allow use from external packages (e.g. Huginn, Muninn and ODINN)
@eval begin
@everywhere begin
@eval $scope begin
cfg.initialize() # initialize OGGM configuration
PATHS = PyDict(cfg."PATHS") # OGGM PATHS
PATHS["working_dir"] = $working_dir # Choose own custom path for the OGGM data
PARAMS = PyDict(cfg."PARAMS")
PARAMS["hydro_month_nh"]=1
PARAMS["dl_verify"] = false
PARAMS["continue_on_error"] = true # avoid stopping when a task fails for a glacier (e.g. lack of data)
PARAMS["border"] = 10
# Multiprocessing
multiprocessing = $oggm_processes > 1 ? true : false
PARAMS["use_multiprocessing"] = multiprocessing # Let's use multiprocessing for OGGM
if multiprocessing
PARAMS["mp_processes"] = $oggm_processes
end
end # @eval Sleipnir
end # @everywhere
end # @eval
end | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 1883 | export Parameters, AbstractParameters, PhysicalParameters, SimulationParameters, OGGMparameters, AbstractEmptyParams
abstract type AbstractParameters end
const AbstractEmptyParams = Union{AbstractParameters,Nothing}
include("PhysicalParameters.jl")
include("SimulationParameters.jl")
include("OGGMparameters.jl")
struct Parameters{PPHY <: AbstractEmptyParams, PSIM <: AbstractEmptyParams, PHY <: AbstractEmptyParams,
PSOL <: AbstractEmptyParams, PUDE <: AbstractEmptyParams, POGGM <: AbstractEmptyParams, PINV <: AbstractEmptyParams}
physical::PPHY
simulation::PSIM
OGGM::POGGM
hyper::PHY
solver::PSOL
UDE::PUDE
inversion::PINV
end
"""
Parameters(;
simulation::SimulationParameters = SimulationParameters()
physical::PhysicalParameters = PhysicalParameters()
)
Initialize ODINN parameters
Keyword arguments
=================
"""
function Parameters(;
physical::PhysicalParameters = PhysicalParameters(),
simulation::SimulationParameters = SimulationParameters(),
OGGM::OGGMparameters = OGGMparameters(),
)
# Build the parameters based on all the subtypes of parameters
parameters = Parameters(physical, simulation, OGGM,
nothing, nothing, nothing, nothing)
if parameters.simulation.multiprocessing
enable_multiprocessing(parameters.simulation.workers)
end
oggm_config(parameters.OGGM.working_dir; oggm_processes=parameters.OGGM.workers)
return parameters
end
Base.:(==)(a::Parameters, b::Parameters) = a.physical == b.physical && a.simulation == b.simulation &&
a.OGGM == b.OGGM && a.solver == b.solver && a.hyper == b.hyper &&
a.UDE == b.UDE && a.inversion == b.inversion
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 1912 |
struct PhysicalParameters{F <: AbstractFloat} <: AbstractParameters
ρ::F
g::F
ϵ::F
η₀::F
maxA::F
minA::F
maxTlaw::F
minTlaw::F
noise_A_magnitude::F
end
"""
PhysicalParameters(;
ρ::Float64 = 900.0,
g::Float64 = 9.81,
ϵ::Float64 = 1e-3,
η₀::F = 1.0,
maxA::Float64 = 8e-17,
minA::Float64 = 8.5e-20,
maxTlaw::Float64 = 1.0,
minTlaw::Float64 = -25.0,
noise_A_magnitude::Float64 = 5e-18
)
Initialize the physical parameters of a model.
Keyword arguments
=================
- `ρ`: Ice density
- `g`: Gravitational constant
- `n`: Glen's exponent
- `A`: Glen's coefficient
- `ϵ`: Small number
- `C`: Sliding coefficient
- `η₀`:
- `maxA`: Maximum value for `A` (Glen's coefficient)
- `minA`: Minimum value for `A` (Glen's coefficient)
"""
function PhysicalParameters(;
ρ::F = 900.0,
g::F = 9.81,
ϵ::F = 1e-3,
η₀::F = 1.0,
maxA::F = 8e-17,
minA::F = 8.5e-20,
maxTlaw::F = 1.0,
minTlaw::F = -25.0,
noise_A_magnitude::F = 5e-18
) where {F <: AbstractFloat}
# Build PhysicalParameters based on values
ft = typeof(g)
physical_parameters = PhysicalParameters{ft}(ρ, g, ϵ, η₀,
maxA, minA,
maxTlaw, minTlaw,
noise_A_magnitude)
return physical_parameters
end
Base.:(==)(a::PhysicalParameters, b::PhysicalParameters) = a.ρ == b.ρ && a.g == b.g &&
a.ϵ == b.ϵ && a.η₀ == b.η₀ &&
a.maxA == b.maxA && a.minA == b.minA && a.maxTlaw == b.maxTlaw &&
a.noise_A_magnitude == b.noise_A_magnitude | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 3121 |
struct SimulationParameters{I <: Integer, F <: AbstractFloat} <: AbstractParameters
use_MB::Bool
use_iceflow::Bool
plots::Bool
velocities::Bool
overwrite_climate::Bool
use_glathida_data::Bool
float_type::DataType
int_type::DataType
tspan::Tuple{F, F}
step::F
multiprocessing::Bool
workers::I
working_dir::String
test_mode::Bool
end
"""
SimulationParameters(;
use_MB::Bool = true,
use_iceflow::Bool = true,
plots::Bool = true,
velocities::Bool = true,
overwrite_climate::Bool = false,
use_glathida_data::Bool = false,
float_type::DataType = Float64,
int_type::DataType = Int64,
tspan::Tuple{F, F} = (2010.0,2015.0),
multiprocessing::Bool = true,
workers::I = 4
)
Initialize the parameters for a simulation.
Keyword arguments
=================
- `use_MB`: Determines if surface mass balance should be used.
- `plots`: Determines if plots should be made.
- `overwrite_climate`: Determines if climate data should be overwritten
- 'use_glathida_data': Determines if data from the Glathida data set should be used
"""
function SimulationParameters(;
use_MB::Bool = true,
use_iceflow::Bool = true,
plots::Bool = true,
velocities::Bool = true,
overwrite_climate::Bool = false,
use_glathida_data::Bool = false,
float_type::DataType = Float64,
int_type::DataType = Int64,
tspan::Tuple{F, F} = (2010.0,2015.0),
step::F = 1/12,
multiprocessing::Bool = true,
workers::I = 4,
working_dir::String = "",
test_mode::Bool = false
) where {I <: Integer, F <: AbstractFloat}
simulation_parameters = SimulationParameters(use_MB, use_iceflow, plots, velocities,
overwrite_climate, use_glathida_data,
float_type, int_type,
tspan, step, multiprocessing, workers, working_dir, test_mode)
if !ispath(working_dir)
mkpath(joinpath(working_dir, "data"))
end
return simulation_parameters
end
Base.:(==)(a::SimulationParameters, b::SimulationParameters) = a.use_MB == b.use_MB && a.use_iceflow == b.use_iceflow && a.plots == b.plots &&
a.velocities == b.velocities && a.overwrite_climate == b.overwrite_climate && a.use_glathida_data == b.use_glathida_data &&
a.float_type == b.float_type && a.int_type == b.int_type &&
a.tspan == b.tspan && a.step == b.step && a.multiprocessing == b.multiprocessing &&
a.workers == b.workers && a.working_dir == b.working_dir && a.test_mode == b.test_mode
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 2344 | export netCDF4, cfg, utils, workflow, tasks, global_tasks, graphics, bedtopo, millan22, MBsandbox, salem, pd, xr, rioxarray
function __init__()
# Create structural folders if needed
OGGM_path = joinpath(homedir(), "Python/OGGM_data")
if !isdir(OGGM_path)
mkpath(OGGM_path)
end
# Load Python packages
try
# Only load Python packages if not previously loaded by Sleipnir
if cfg == PyNULL() && workflow == PyNULL() && utils == PyNULL() && MBsandbox == PyNULL()
println("Initializing Python libraries...")
copy!(netCDF4, pyimport("netCDF4"))
copy!(cfg, pyimport("oggm.cfg"))
copy!(utils, pyimport("oggm.utils"))
copy!(workflow, pyimport("oggm.workflow"))
copy!(tasks, pyimport("oggm.tasks"))
copy!(global_tasks, pyimport("oggm.global_tasks"))
copy!(graphics, pyimport("oggm.graphics"))
copy!(bedtopo, pyimport("oggm.shop.bedtopo"))
copy!(millan22, pyimport("oggm.shop.millan22"))
copy!(MBsandbox, pyimport("MBsandbox.mbmod_daily_oneflowline"))
copy!(salem, pyimport("salem"))
copy!(pd, pyimport("pandas"))
copy!(xr, pyimport("xarray"))
copy!(rioxarray, pyimport("rioxarray"))
end
catch e
@warn "It looks like you have not installed and/or activated the virtual Python environment. \n
Please follow the guidelines in: https://github.com/ODINN-SciML/ODINN.jl#readme"
@warn exception=(e, catch_backtrace())
end
end
function clean()
atexit() do
run(`$(Base.julia_cmd())`)
end
exit()
end
function enable_multiprocessing(procs::Int)
if procs > 0
if nprocs() < procs
@eval begin
addprocs($procs - nprocs(); exeflags="--project")
println("Number of cores: ", nprocs())
println("Number of workers: ", nworkers())
@everywhere using Sleipnir
end # @eval
elseif nprocs() != procs && procs == 1
@eval begin
rmprocs(workers(), waitfor=0)
println("Number of cores: ", nprocs())
println("Number of workers: ", nworkers())
end # @eval
end
end
return nworkers()
end
include("helper_utilities.jl") | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 685 | export safe_approx
# Function to override "≈" to handle nothing values
function safe_approx(a, b)
if isnothing(a) && isnothing(b)
return true
elseif isnothing(a) || isnothing(b)
return false
else
return a ≈ b
end
end
# Function for Python objects
function safe_getproperty(obj::PyObject, prop_name::Symbol)
if PyCall.hasproperty(obj, prop_name)
return PyCall.getproperty(obj, prop_name)
else
return 0.0
end
end
# Function for Julia objects
function safe_getproperty(obj, prop_name::Symbol)
if hasproperty(obj, prop_name)
return getproperty(obj, prop_name)
else
return 0.0
end
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 397 |
export Simulation, Results
# Abstract type as a parent type for simulations
abstract type Simulation end
include("results/Results.jl")
###############################################
################### UTILS #####################
###############################################
include("simulation_utils.jl")
include("results/results_utils.jl")
include("results/results_plotting_utils.jl")
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 216 |
"""
stop_condition_tstops(u,t,integrator, tstops)
Function that iterates through the tstops, with a closure including `tstops`
"""
function stop_condition_tstops(u,t,integrator, tstops)
t in tstops
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 1761 |
mutable struct Results{F <: AbstractFloat}
rgi_id::String
H::Vector{Matrix{F}}
H_glathida::Union{Nothing, Vector{Matrix{F}}}
S::Matrix{F}
B::Matrix{F}
V::Matrix{F}
Vx::Matrix{F}
Vy::Matrix{F}
V_ref::Union{Nothing, Matrix{F}}
Vx_ref::Union{Nothing, Matrix{F}}
Vy_ref::Union{Nothing, Matrix{F}}
Δx::F
Δy::F
lon::Union{Nothing, F}
lat::Union{Nothing, F}
θ::Union{Nothing, Vector{F}}
loss::Union{Nothing, Vector{F}}
end
function Results(glacier::G, ifm::IF;
rgi_id::String = glacier.rgi_id,
H::Vector{Matrix{F}} = Vector{Matrix{F}}([]),
H_glathida::Union{Nothing, Vector{Matrix{F}}} = glacier.H_glathida,
S::Matrix{F} = zeros(F, size(ifm.S)),
B::Matrix{F} = zeros(F, size(ifm.B)),
V::Matrix{F} = zeros(F, size(ifm.V)),
Vx::Matrix{F} = zeros(F, size(ifm.Vx)),
Vy::Matrix{F} = zeros(F, size(ifm.Vy)),
V_ref::Union{Nothing, Matrix{F}} = glacier.V,
Vx_ref::Union{Nothing, Matrix{F}} = glacier.Vx,
Vy_ref::Union{Nothing, Matrix{F}} = glacier.Vy,
Δx::F = glacier.Δx,
Δy::F = glacier.Δy,
lon::Union{Nothing, F} = glacier.cenlon,
lat::Union{Nothing, F} = glacier.cenlat,
θ::Union{Nothing,Vector{F}} = nothing,
loss::Union{Nothing,Vector{F}} = nothing
) where {G <: AbstractGlacier, F <: AbstractFloat, IF <: AbstractModel}
# Build the results struct based on input values
results = Results(rgi_id, H, H_glathida, S, B,
V, Vx, Vy, V_ref, Vx_ref, Vy_ref,
Δx, Δy,lon,lat,
θ, loss)
return results
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 18375 | export plot_glacier
function plot_glacier_heatmaps(results, variables, title_mapping; scale_text_size::Union{Nothing,Float64}=nothing)
# Dictionary of variable-specific colormaps
colormap_mapping = Dict(key => value[3] for (key, value) in title_mapping)
# Extract the rgi_id
rgi_id = :rgi_id in fieldnames(typeof(results)) ? results.rgi_id : "none"
# Extract longitude and latitude
lon = if hasproperty(results, :lon)
results.lon
elseif hasproperty(results.gdir, :cenlon)
results.gdir.cenlon
else
nothing
end
lat = if hasproperty(results, :lat)
results.lat
elseif hasproperty(results.gdir, :cenlat)
results.gdir.cenlat
else
nothing
end
Δx = results.Δx
ice_thickness_vars = [:H, :H₀, :H_glathida, :H_pred, :H_obs] # Ice thickness variables
velocity_vars = [:V, :Vx, :Vy, :V_pred, :V_obs] # Velocity variables, excluding V_diff
# Initialize max_values for ice thickness and velocity separately, considering only given variables
max_values_ice = []
max_values_velocity = []
for var in intersect(union(ice_thickness_vars, velocity_vars), variables) # Check only given vars for maximum
if hasproperty(results, var)
current_matrix = getfield(results, var)
if !isnothing(current_matrix) && !isempty(current_matrix)
if typeof(current_matrix) <: Vector
current_matrix = current_matrix[end]
end
if var in ice_thickness_vars
push!(max_values_ice, maximum(current_matrix))
elseif var in velocity_vars
push!(max_values_velocity, maximum(current_matrix))
end
end
end
end
# Determine global maximum for ice and velocity separately
global_max_ice = isempty(max_values_ice) ? nothing : maximum(max_values_ice)
global_max_velocity = isempty(max_values_velocity) ? nothing : maximum(max_values_velocity)
num_vars = length(variables)
rows, cols = if num_vars == 1
2, 2
elseif num_vars == 2
3, 2
elseif num_vars in [3, 4]
3, 4
else
error("Unsupported number of variables.")
end
fig = Figure(layout=GridLayout(rows, cols))
for (i, var) in enumerate(variables)
ax_row = div(i - 1, 2) + 1
ax_col = 2 * (rem(i - 1, 2)) + 1
ax = Axis(fig[ax_row, ax_col], aspect=DataAspect())
data = getfield(results, var)
if typeof(data) <: Vector
data = data[end]
end
ny, nx = size(data)
data = reverse(data', dims=2)
colormap = get(colormap_mapping, string(var), :cool) # Default colormap
# Apply global_max_ice to ice thickness variables and global_max_velocity to velocity variables
if var in ice_thickness_vars
hm = heatmap!(ax, data, colormap=colormap, colorrange=(0, global_max_ice))
Colorbar(fig[ax_row, ax_col + 1], hm)
elseif var in velocity_vars
hm = heatmap!(ax, data, colormap=colormap, colorrange=(0, global_max_velocity))
Colorbar(fig[ax_row, ax_col + 1], hm)
else
hm = heatmap!(ax, data, colormap=colormap)
Colorbar(fig[ax_row, ax_col + 1], hm)
end
title, unit = get(title_mapping, string(var), (string(var), ""))
ax.title = "$title ($unit)"
ax.xlabel = "Longitude"
ax.ylabel = "Latitude"
ax.xticks=([round(nx/2)], ["$lon °"])
ax.yticks=([round(ny/2)], ["$lat °"])
ax.yticklabelrotation = π/2
ax.ylabelpadding = 15
ax.yticklabelalign = (:center, :bottom)
scale_width = 0.10*nx
scale_number = round(Δx * scale_width / 1000; digits=1) # Convert to km
if scale_text_size === nothing
textsize = if num_vars == 1
1.2*scale_width
elseif num_vars == 2
0.9*scale_width
else
0.5*scale_width
end
else
textsize = scale_text_size
end
poly!(ax, Rect(nx - round(0.15*nx), round(0.075*ny), scale_width, scale_width/10), color=:black)
text!(ax, "$scale_number km", position=(nx - round(0.15*nx) + scale_width/16, round(0.075*ny) + scale_width/10), fontsize=textsize)
end
fig[0, :] = Label(fig, "$rgi_id")
resize_to_layout!(fig)
return fig
end
function plot_glacier_difference_evolution(results, variables, title_mapping; tspan, metrics)
# Check if more than one variable is passed
if length(variables) > 1
error("Only one variable can be passed to this function.")
end
# Check for valid metrics
valid_metrics = ["hist", "difference"]
for metric in metrics
if !(metric in valid_metrics)
error("Invalid metric: $metric. Valid metrics are: $valid_metrics")
return
end
end
# Extract data for the variable
data = getfield(results, variables[1])
#Extract longitude and latitude
lon = hasproperty(results, :lon) ? results.lon : "none"
lat = hasproperty(results, :lat) ? results.lat : "none"
#pixel width
Δx = results.Δx
# Check the shape of the extracted data
if typeof(data) ≠ Vector{Matrix{Float64}}
error("Only temporal quantities can be used in this function.")
end
# Extract the rgi_id
rgi_id = :rgi_id in fieldnames(typeof(results)) ? results.rgi_id : "none"
# Print plot information
variable_title = get(title_mapping, variables[1], variables[1])
# Create a time vector
t = range(tspan[1], stop=tspan[2], length=length(getfield(results, variables[1])))
matrix_size = size(data[1])
diff_width = 1.0 * matrix_size[2]
diff_height = 1.0 * matrix_size[1]
data_diff=data[end] - data[1]
# Determine whether to create a single plot or a subplot
if metrics == ["hist"]
fig = Figure()
ax = Axis(fig[1, 1], xlabel="Δ$variable_title ($(title_mapping[string(variables[1])][2]))", ylabel="Frequency", title="Histogram of $(title_mapping[string(variables[1])][1]) Evolution")
elseif metrics == ["difference"]
fig = Figure()
ax_diff = Axis(fig[1, 1], title="$(title_mapping[string(variables[1])][1]) Evolution",aspect=DataAspect())
else
fig = Figure(layout=GridLayout(1, 4))
ax = Axis(fig[1, 3:4], xlabel="Δ$variable_title ($(title_mapping[string(variables[1])][2]))", ylabel="Frequency", title="Histogram of $(title_mapping[string(variables[1])][1]) Evolution",width=diff_width,height=diff_height)
ax_diff = Axis(fig[1, 1], title="$(title_mapping[string(variables[1])][1]) Evolution",aspect=DataAspect())
end
# Plot based on the metric
for metric in metrics
if metric == "hist"
hist!(ax, vec(data[end]-data[1]), bins=50)
ax.limits[] = (minimum(data_diff), maximum(data_diff), nothing, nothing)
elseif metric == "difference"
ny, nx = size(data_diff)
data_diff = reverse(data_diff',dims=2) # Fix alignment
# Calculate the symmetric color range
max_abs_value = max(abs(minimum(data_diff)), abs(maximum(data_diff)))
hm_diff = heatmap!(ax_diff, data_diff, colormap=:redsblues, halign=:right, colorrange=(-max_abs_value, max_abs_value))
ax_diff.xlabel = "Longitude"
ax_diff.ylabel = "Latitude"
ax_diff.xticks=([round(nx/2)], ["$lon °"])
ax_diff.yticks=([round(ny/2)], ["$lat °"])
ax_diff.yticklabelrotation = π/2
ax_diff.ylabelpadding = 15.0
ax_diff.yticklabelalign = (:center, :bottom)
# Width of the scale division in heatmap data units
scale_width = 0.10*nx
scale_number = round(Δx * scale_width / 1000; digits=1)#to km
if metrics == ["difference"]
textsize=1.2*scale_width
else
textsize=0.5*scale_width
end
# Position and draw the scale division rectangle
poly!(ax_diff, Rect(nx -round(0.15*nx) , round(0.075*ny), scale_width, scale_width/10), color=:black)
text!(ax_diff, "$scale_number km",
position = (nx - round(0.15*nx)+scale_width/16, round(0.075*ny)+scale_width/10),
fontsize=textsize)
Colorbar(fig[1, 2], hm_diff)
end
end
fig[0, :] = Label(fig, "$rgi_id")
resize_to_layout!(fig)
fig # Return the main figure
end
function plot_glacier_statistics_evolution(results, variables, title_mapping; tspan, metrics, threshold=0.5)
# Check if more than one variable is passed
if length(variables) > 1
error("Only one variable can be passed to this function.")
end
# Extract data for the variable
data = getfield(results, variables[1])
# Check the shape of the extracted data
if typeof(data) ≠ Vector{Matrix{Float64}}
error("Only temporal quantities can be used in this function.")
end
# Check for valid metrics
valid_metrics = ["average", "median", "max", "std","min"]
for metric in metrics
if !(metric in valid_metrics)
error("Invalid metric: $metric. Valid metrics are: $valid_metrics")
return
end
end
# Extract the rgi_id
rgi_id = :rgi_id in fieldnames(typeof(results)) ? results.rgi_id : "none"
# Create a time vector
t = range(tspan[1], stop=tspan[2], length=length(getfield(results, variables[1])))
# Create a single plot for all other metrics
fig = Figure()
ax = Axis(fig[1, 1], xlabel="Time (years)", ylabel="$(title_mapping[string(variables[1])][1]) ($(title_mapping[string(variables[1])][2]))", title="Metrics for $(title_mapping[string(variables[1])][1]) through Time ($rgi_id)")
# If "average" or "std" is in metrics, calculate them
if "average" in metrics || "std" in metrics
avg_vals = [mean(filter(x -> !(isnan(x)) && x >= threshold, matrix[:])) for matrix in data]
std_vals = [std(filter(x -> !(isnan(x)) && x >= threshold, matrix[:])) for matrix in data]
end
# Calculate and plot metrics
for metric in metrics
if metric == "average"
if "std" in metrics
band!(ax, t, avg_vals .- std_vals, avg_vals .+ std_vals, fillalpha=0.1, label="Std Dev", color=:lightgray)
end
lines!(ax, t, avg_vals, label="Average")
elseif metric == "median"
median_vals = [median(filter(x -> !isnan(x) && x >= threshold, matrix[:])) for matrix in data]
lines!(ax, t, median_vals, label="Median")
elseif metric == "min"
min_vals = [minimum(filter(x -> !isnan(x) && x >= threshold, matrix[:])) for matrix in data]
lines!(ax, t, min_vals, linestyle=:dot, label="Min")
elseif metric == "max"
max_vals = [maximum(filter(x -> !isnan(x) && x >= threshold, matrix[:])) for matrix in data]
lines!(ax, t, max_vals, linestyle=:dot, label="Max")
end
end
leg = Legend(fig, ax)
fig[1, 2] = leg
resize_to_layout!(fig)
fig # Return the main figure
end
function plot_glacier_integrated_volume(results, variables, title_mapping; tspan)
# Determine pixel area
area=results.Δx*results.Δy
# Check if more than one variable is passed
if length(variables) > 1
error("Only one variable can be passed to this function.")
end
# Extract the rgi_id
rgi_id = :rgi_id in fieldnames(typeof(results)) ? results.rgi_id : "none"
# Print plot information
variable_title = get(title_mapping, variables[1], variables[1])
# Create a time vector
t = range(tspan[1], stop=tspan[2], length=length(getfield(results, variables[1])))
# Extract data for the variable
data = getfield(results, variables[1])
# Check the shape of the extracted data
if typeof(data) ≠ Vector{Matrix{Float64}}
error("Only temporal quantities can be used in this function.")
end
# Calculate integrated ice volume for each time step
integrated_ice_volume = [sum(matrix) * area for matrix in data] # Multiply by area
# Plot the integrated ice volume as a function of time
fig = Figure()
ax = Axis(fig[1, 1])
lines!(ax, t, integrated_ice_volume, color=:blue)
ax.xlabel = "Time (years)"
ax.ylabel = "Integrated Ice Volume (m³) "
ax.title = "Evolution of Integrated Ice Volume ($rgi_id)"
resize_to_layout!(fig)
return fig # Return the main figure with the plot
end
function plot_bias(data, keys; treshold = [0, 0])
# Check for exactly two keys
if length(keys) != 2
error("Exactly two keys are required for the scatter plot.")
end
# Ensure treshold is an array of length 2
if length(treshold) == 1
treshold = [treshold[1], treshold[1]]
end
# Extract data
rgi_id = data.rgi_id
x_values = getfield(data,keys[1])
y_values = getfield(data,keys[2])
# Filter non-zero observations if necessary
if :H_obs in keys || :V_obs in keys
obs_key = :H_obs in keys ? :H_obs : :V_obs
#non_zero_indices = findall(getfield(data,obs_key) .!= 0)
mask_H = getfield(data,obs_key) .> treshold[1] * maximum(getfield(data,obs_key))
mask_H_2 = getfield(data,obs_key) .< (1-treshold[2]) * maximum(getfield(data,obs_key))
x_values = x_values[mask_H .& mask_H_2]
y_values = y_values[mask_H .& mask_H_2]
end
# Calculate metrics
differences = x_values .- y_values
rmse = sqrt(mean(differences .^ 2)) # Root Mean Square Error
bias = mean(differences) # Bias
ss_res = sum(differences .^ 2) # Sum of squares of residuals
ss_tot = sum((x_values .- mean(x_values)) .^ 2) # Total sum of squares
r_squared = 1 - (ss_res / ss_tot) # R-squared
# Plotting
fig = Figure(size = (600, 400))
ax = Axis(fig[1, 1], xlabel = string(keys[1]), ylabel = string(keys[2]), title = "Scatter Plot for RGI ID: " * rgi_id)
scatter!(ax, vec(x_values), vec(y_values), markersize = 5, color = :blue, label = "Data")
xmin, xmax = minimum(x_values), maximum(x_values)
ymin, ymax = minimum(y_values), maximum(y_values)
lines!(ax, [xmin, xmax], [xmin, xmax], linestyle = :dash, color = :red, label = "y = x")
# Display metrics on the plot
metrics_text = "RMSE: $(round(rmse, digits=2))\nR²: $(round(r_squared, digits=2))\nBias: $(round(bias, digits=2))"
text!(ax, metrics_text, position = (xmax, ymax), align = (:right, :top), color = :black)
fig
end
"""
plot_glacier(results::T, plot_type::String, variables::Vector{Symbol}; kwargs...) -> Figure
Generate various types of plots for glacier data.
# Arguments
- `results`: A custom type containing the results of a glacier simulation.
- `plot_type`: Type of plot to generate. Options are:
* "heatmaps": Heatmaps for glacier variables like `:H`, `:S`, `:B`, `:V`, `:Vx`, and `:Vy`.
* "difference": Temporal difference metrics (between start and end) for a variable, with optional metrics like "hist" (histogram) and "difference".
* "statistics": Temporal statistical metrics for a variable, with optional metrics like "average", "median", "min", "max", and "std".
* "integrated_volume": Temporal evolution of the integrated ice volume for a variable.
- `variables`: Variables to be plotted, e.g., `:H`.
# Optional Keyword Arguments
- `tspan`: A tuple representing the start and end time for the simulation.
- `metrics`: Metrics to visualize, e.g., `["average"]` for statistics, `["difference"]` for difference.
# Returns
- A `Figure` object containing the desired visualization.
# Notes
- Ensure the `variables` and `kwargs` match the requirements of the specified `plot_type`.
- The function routes requests to specific plotting functions based on `plot_type`.
"""
function plot_glacier(results::T, plot_type::String, variables::Vector{Symbol}; kwargs...) where T
title_mapping = Dict(
"H" => ("Ice Thickness", "m", :YlGnBu),
"H₀" => ("Ice Thickness", "m", :YlGnBu),
"H_glathida" => ("Ice Thickness (GlaThiDa)", "m", :YlGnBu),
"S" => ("Surface Topography", "m", :terrain),
"B" => ("Bed Topography", "m", :terrain),
"V" => ("Ice Surface Velocity", "m/y", :viridis),
"Vx" => ("Ice Surface Velocity (X-direction)", "m/y", :viridis),
"Vy" => ("Ice Surface Velocity (Y-direction)", "m/y", :viridis),
"H_pred" => ("Predicted Ice Thickness", "m", :YlGnBu),
"H_obs" => ("Observed Ice Thickness", "m", :YlGnBu),
"H_diff" => ("Ice Thickness Difference", "m", :RdBu),
"V_pred" => ("Predicted Ice Surface Velocity", "m/y", :viridis),
"V_obs" => ("Observed Ice Surface Velocity", "m/y", :viridis),
"V_diff" => ("Ice Surface Velocity Difference", "m/y", :RdBu)
)
if plot_type == "heatmaps"
return plot_glacier_heatmaps(results, variables, title_mapping; kwargs...)
elseif plot_type == "evolution_difference"
return plot_glacier_difference_evolution(results, variables, title_mapping; kwargs...)
elseif plot_type == "evolution_statistics"
return plot_glacier_statistics_evolution(results, variables, title_mapping; kwargs...)
elseif plot_type == "integrated_volume"
return plot_glacier_integrated_volume(results, variables, title_mapping; kwargs...)
elseif plot_type == "bias"
return plot_bias(results, variables; kwargs...)
else
error("Invalid plot_type: $plot_type")
end
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 2312 |
"""
store_results!(simulation::SIM, glacier_idx::I, solution) where {SIM <: Simulation, I <: Int}
Store the results of a simulation of a single glacier into a `Results`.
"""
function create_results(simulation::SIM, glacier_idx::I, solution, loss=nothing; light=false, batch_id::Union{Nothing, I}=nothing) where {SIM <: Simulation, I <: Integer}
H = light ? [solution.u[begin],solution.u[end]] : solution.u
# Simulations using Reverse Diff require an iceflow model per glacier
if isnothing(batch_id)
iceflow_model = simulation.model.iceflow
else
iceflow_model = simulation.model.iceflow[batch_id]
end
if !isnothing(simulation.model.machine_learning)
θ = simulation.model.machine_learning.θ
else
θ = nothing
end
results = Results(simulation.glaciers[glacier_idx], iceflow_model;
H = H,
S = iceflow_model.S,
B = simulation.glaciers[glacier_idx].B,
V = iceflow_model.V,
Vx = iceflow_model.Vx,
Vy = iceflow_model.Vy,
Δx = simulation.glaciers[glacier_idx].Δx,
Δy = simulation.glaciers[glacier_idx].Δy,
lon = simulation.glaciers[glacier_idx].cenlon,
lat = simulation.glaciers[glacier_idx].cenlat,
θ = θ,
loss = loss
)
return results
end
"""
save_results_file(simulation::Prediction)
Save simulation `Results` into a `.jld2` file.
"""
function save_results_file!(results_list::Vector{Results{F}}, simulation::SIM; path::Union{String,Nothing}=nothing) where {F <: AbstractFloat, SIM <: Simulation}
# Create path for simulation results
predictions_path = joinpath(dirname(Base.current_project()), "data/results/predictions")
if !ispath(predictions_path)
mkpath(predictions_path)
end
simulation.results = results_list
if isnothing(path)
tspan = simulation.parameters.simulation.tspan
nglaciers = length(simulation.glaciers)
jldsave(joinpath(predictions_path, "prediction_$(nglaciers)glaciers_$tspan.jld2"); simulation.results)
end
end | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 1026 |
function glaciers2D_constructor(; save_refs::Bool = false)
rgi_ids = ["RGI60-11.03638", "RGI60-11.01450"]
params = Parameters(simulation=SimulationParameters(velocities=false,
use_glathida_data=false,
working_dir=Sleipnir.root_dir,
test_mode=true),
OGGM=OGGMparameters(ice_thickness_source="Farinotti19"))
glaciers = initialize_glaciers(rgi_ids, params; test=true)
# Empty all PyCall stuff to avoid issues
for glacier in glaciers
glacier.gdir = nothing
glacier.climate = nothing
glacier.S_coords = nothing
end
if save_refs
jldsave(joinpath(Sleipnir.root_dir, "test/data/glaciers/glaciers2D.jld2"); glaciers)
end
glaciers_ref = load(joinpath(Sleipnir.root_dir,"test/data/glaciers/glaciers2D.jld2"))["glaciers"]
@test all(glaciers .≈ glaciers_ref)
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 4673 |
function params_constructor_specified(; save_refs::Bool = false)
physical_params = PhysicalParameters(ρ = 900.0,
g = 9.81,
ϵ = 1e-3,
η₀ = 1.0,
maxA = 8e-17,
minA = 8.5e-20,
maxTlaw = 1.0,
minTlaw = -25.0,
noise_A_magnitude = 5e-18)
simulation_params = SimulationParameters(use_MB = true,
use_iceflow = true,
plots = false,
velocities = false,
overwrite_climate = false,
use_glathida_data = false,
float_type = Float64,
int_type = Int64,
tspan = (2010.0,2015.0),
multiprocessing = false,
workers = 10,
working_dir = "")
oggm_params = OGGMparameters(working_dir = "",
paths = nothing,
params = nothing,
multiprocessing = false,
workers = 1,
ice_thickness_source = "Millan22",
DEM_source = "Default",
base_url = "https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.6/L1-L2_files/elev_bands/",
test = true)
params = Parameters(physical=physical_params,
simulation=simulation_params,
OGGM=oggm_params)
if save_refs
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/simulation_params_specified.jld2"); simulation_params)
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/physical_params_specified.jld2"); physical_params)
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/oggm_params_specified.jld2"); oggm_params)
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/params_specified.jld2"); params)
end
simulation_params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/simulation_params_specified.jld2"))["simulation_params"]
physical_params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/physical_params_specified.jld2"))["physical_params"]
oggm_params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/oggm_params_specified.jld2"))["oggm_params"]
params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/params_specified.jld2"))["params"]
@test physical_params == physical_params_ref
@test simulation_params == simulation_params_ref
@test oggm_params == oggm_params_ref
@test params == params_ref
end
function params_constructor_default(; save_refs::Bool = false)
physical_params = PhysicalParameters()
simulation_params = SimulationParameters()
oggm_params = OGGMparameters(test=true, working_dir="")
params = Parameters(simulation=simulation_params,
physical=physical_params,
OGGM=oggm_params
)
if save_refs
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/simulation_params_default.jld2"); simulation_params)
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/physical_params_default.jld2"); physical_params)
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/oggm_params_default.jld2"); oggm_params)
jldsave(joinpath(Sleipnir.root_dir, "test/data/params/params_default.jld2"); params)
end
simulation_params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/simulation_params_default.jld2"))["simulation_params"]
physical_params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/physical_params_default.jld2"))["physical_params"]
oggm_params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/oggm_params_default.jld2"))["oggm_params"]
params_ref = load(joinpath(Sleipnir.root_dir, "test/data/params/params_default.jld2"))["params"]
@test physical_params == physical_params_ref
@test simulation_params == simulation_params_ref
@test oggm_params == oggm_params_ref
@test params == params_ref
end | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 1484 | function glaciers2D_plots()
# Load glacier data
@load (joinpath(@__DIR__,"data/glaciers/glaciers2D_test_temporal.jld2")) results
# Test execution
@testset "plot_glacier tests" begin
@testset "Heatmaps" begin
try
plot_glacier(results[2], "heatmaps", [:H,:B])
@test true
catch
@test false
end
end
@testset "Statistics Evolution" begin
try
plot_glacier(results[2], "evolution_statistics", [:H], tspan=(2010.0,2015.0), metrics=["average","std","max","median","min"])
@test true
catch
@test false
end
end
@testset "Difference Evolution" begin
try
plot_glacier(results[2], "evolution_difference", [:H], tspan=(2010.0,2015.0), metrics=["difference","hist"])
@test true
catch
@test false
end
end
@testset "Integrated Volume" begin
try
plot_glacier(results[2], "integrated_volume", [:H], tspan=(2010.0,2015.0))
@test true
catch
@test false
end
end
@testset "Bias" begin
try
plot_glacier(results[2], "bias", [:B,:S])
@test true
catch
@test false
end
end
end
end
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | code | 617 | import Pkg
Pkg.activate(dirname(Base.current_project()))
using Revise
using Sleipnir
using PyCall
using Test
using JLD2
using Infiltrator
using CairoMakie
include("params_construction.jl")
include("glaciers_construction.jl")
include("plot_utils.jl")
# Activate to avoid GKS backend Plot issues in the JupyterHub
ENV["GKSwstype"]="nul"
@testset "Parameters constructors with specified values" params_constructor_specified()
@testset "Parameters constructors by default" params_constructor_default()
@testset "Glaciers 2D constructors" glaciers2D_constructor()
#@testset "Glaciers 2D plots" glaciers2D_plots() | Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.6.1 | ee6a6a5ae18d30c495cf860269b67fe66c044635 | docs | 862 | [](https://github.com/ODINN-SciML/Sleipnir.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://app.codecov.io/gh/ODINN-SciML/Sleipnir.jl)
[](https://github.com/ODINN-SciML/Sleipnir.jl/actions/workflows/CompatHelper.yml)
<img src="https://github.com/JordiBolibar/Sleipnir.jl/blob/main/data/Sleipnir_logo-19.png" width="250">
Sleipnir.jl is the core package of [ODINN.jl](https://github.com/ODINN-SciML/ODINN.jl), containing all the basic data structures to manage glacier and climate data, as well as multiple types of numerical simulations and parameters.
| Sleipnir | https://github.com/ODINN-SciML/Sleipnir.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 231 | using Documenter, ISAtmosphere
makedocs(
sitename = "DDR2import.jl",
modules = [DDR2import],
pages = Any[
"Home" => "index.md",
],
)
deploydocs(
repo = "github.com/rjdverbeek-tud/DDR2import.jl.git",
)
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 15126 | """
ALL_FT+ fileformat
Defines a list of flight trajectories (planned/actual/etc) for a given day
See EUROCONTROL NEST Manual Section 9.7.5 for ALL_FT+ fileformat description
The headers of the DataFrame follow the naming convention from the EUROCONTROL
NEST Manual + '_' + index_number from manual.
"""
module Allftplus
export read
# include("utility.jl")
using ..util
using Format
using CSV
using Dates
using DataFrames
const fileformat = Dict(1=>String, 2=>String, 3=>String, 4=>String, 5=>String,
6=>String, 7=>String, 8=>String, 9=>String, 10=>String, 11=>String, 12=>String,
13=>String, 14=>String, 15=>String, 16=>String, 17=>String, 18=>String,
19=>String, 20=>String, 21=>String, 22=>String, 23=>Int64, 24=>String,
25=>String, 26=>String, 27=>String, 28=>String, 29=>String, 30=>Int64,
31=>Int64, 32=>String, 33=>String, 34=>String, 35=>String, 36=>String,
37=>String, 38=>String, 39=>Int64, 40=>Int64, 41=>Int64, 42=>String,
43=>String, 44=>String, 45=>String, 46=>String, 47=>String, 48=>String,
49=>Int64, 50=>Int64, 51=>String, 52=>String, 53=>String, 54=>Int64,
55=>Int64, 56=>String, 57=>String, 58=>String, 59=>String, 60=>String,
61=>String, 62=>String, 63=>String, 64=>String, 65=>String, 66=>String,
67=>String, 68=>String, 69=>String, 70=>String, 71=>String, 72=>String,
73=>Int64, 74=>String, 75=>Int64, 76=>String, 77=>Int64, 78=>Int64,
79=>String, 80=>String, 81=>Int64, 82=>String, 83=>Float64, 84=>Float64,
85=>Int64, 86=>String, 87=>Int64, 88=>String, 89=>Int64, 90=>String,
91=>Int64, 92=>Int64, 93=>String, 94=>String, 95=>Int64, 96=>String,
97=>Float64, 98=>Float64, 99=>Int64, 100=>String, 101=>Int64, 102=>String,
103=>Int64, 104=>String, 105=>Int64, 106=>Int64, 107=>String, 108=>String,
109=>Int64, 110=>String, 111=>Float64, 112=>Float64, 113=>Int64, 114=>String,
115=>Int64, 116=>String, 117=>Int64, 118=>String, 119=>String, 120=>String,
121=>Float64, 122=>Float64, 123=>Int64, 124=>String, 125=>Int64, 126=>String,
127=>Int64, 128=>String, 129=>String, 130=>Float64, 131=>Float64, 132=>Int64,
133=>String, 134=>Int64, 135=>String, 136=>Int64, 137=>String, 138=>String,
139=>Float64, 140=>Float64, 141=>Int64, 142=>String, 143=>Int64, 144=>String,
145=>Int64, 146=>String, 147=>String, 148=>Float64, 149=>Float64, 150=>Int64,
151=>String, 152=>Int64, 153=>String, 154=>Int64, 155=>String, 156=>String,
157=>Float64, 158=>Float64, 159=>Int64, 160=>String, 161=>Int64, 162=>String,
163=>Int64, 164=>String, 165=>String, 166=>String, 167=>String, 168=>Int64,
169=>String, 170=>String, 171=>String, 172=>String, 173=>String, 174=>String,
175=>Int64, 176=>Int64, 177=>String, 178=>Int64, 179=>String, 180=>Int64,
181=>String)
const header_format = ["departureAerodromeIcaoId_0", "arrivalAerodromeIcaoId_1",
"aircraftId_2", "aircraftOperatorIcaoId_3", "aircraftTypeIcaoId_4", "aobt_5",
"ifpsId_6", "iobt_7", "originalFlightDataQuality_8", "flightDataQuality_9",
"source_10", "exemptionReasonType_11", "exemptionReasonDistance_12", "lateFiler_13",
"lateUpdater_14", "northAtlanticFlight_15", "cobt_16", "eobt_17", "lobt_18",
"flightState_19", "previousToActivationFlightState_20", "suspensionStatus_21",
"tactId_22", "samCtot_23", "samSent_24", "sipCtot_25", "sipSent_26", "slotForced_27",
"mostPenalizingRegulationId_28", "regulationsAffectedByNrOfInstances_29",
"excludedFromNrOfInstances_30", "lastReceivedAtfmMessageTitle_31",
"lastReceivedMessageTitle_32", "lastSentAtfmMessageTitle_33",
"manualExemptionReason_34", "sensitiveFlight_35", "readyForImprovement_36",
"readyToDepart_37", "revisedTaxiTime_38", "tis_39", "trs_40",
"toBeSentSlotMessageTitle_41", "toBeSentProposalMessageTitle_42",
"lastSentSlotMessageTitle_43", "lastSentProposalMessageTitle_44",
"lastSentSlotMessage_45", "lastSentProposalMessage_46", "flightCountOption_47",
"normalFlightTactId_48", "proposalFlightTactId_49",
"operatingAircraftOperatorIcaoId_50", "reroutingWhy_51", "reroutedFlightState_52",
"runwayVisualRange_53", "numberIgnoredErrors_54", "arcAddrSource_55", "arcAddr_56",
"ifpsRegistrationMark_57", "flightType_58", "aircraftEquipment_59", "cdmStatus_60",
"cdmEarlyTtot_61", "cdmAoTtot_62", "cdmAtcTtot_63", "cdmSequencedTtot_64",
"cdmTaxiTime_65", "cdmOffBlockTimeDiscrepancy_66", "cdmDepartureProcedureId_67",
"cdmAircraftTypeId_68", "cdmRegistrationMark_69", "cdmNoSlotBefore_70",
"cdmDepartureStatus_71", "ftfmEetFirNrOfInstances_72", "ftfmEetFirList_73",
"ftfmEetPtNrOfInstances_74", "ftfmEetPtList_75", "ftfmAiracCycleReleaseNumber_76",
"ftfmEnvBaselineNumber_77", "ftfmDepartureRunway_78", "ftfmArrivalRunway_79",
"ftfmReqFlightlevelSpeedNrOfInstances_80", "ftfmReqFlightlevelSpeedList_81",
"ftfmConsumedFuel_82", "ftfmRouteCharges_83", "ftfmAllFtPointNrOfInstances_84",
"ftfmAllFtPointProfile_85", "ftfmAllFtAirspaceNrOfInstances_86",
"ftfmAllFtAirspaceProfile_87", "ftfmAllFtCircleIntersectionsNrOfInstances_88",
"ftfmAllFtCircleIntersections_89", "rtfmAiracCycleReleaseNumber_90",
"rtfmEnvBaselineNumber_91", "rtfmDepartureRunway_92", "rtfmArrivalRunway_93",
"rtfmReqFlightlevelSpeedNrOfInstances_94", "rtfmReqFlightlevelSpeedList_95",
"rtfmConsumedFuel_96", "rtfmRouteCharges_97", "rtfmAllFtPointNrOfInstances_98",
"rtfmAllFtPointProfile_99", "rtfmAllFtAirspaceNrOfInstances_100",
"rtfmAllFtAirspaceProfile_101", "rtfmAllFtCircleIntersectionsNrOfInstances_102",
"rtfmAllFtCircleIntersections_103", "ctfmAiracCycleReleaseNumber_104",
"ctfmEnvBaselineNumber_105", "ctfmDepartureRunway_106", "ctfmArrivalRunway_107",
"ctfmReqFlightlevelSpeedNrOfInstances_108", "ctfmReqFlightlevelSpeedList_109",
"ctfmConsumedFuel_110", "ctfmRouteCharges_111", "ctfmAllFtPointNrOfInstances_112",
"ctfmAllFtPointProfile_113", "ctfmAllFtAirspaceNrOfInstances_114",
"ctfmAllFtAirspaceProfile_115", "ctfmAllFtCircleIntersectionsNrOfInstances_116",
"ctfmAllFtCircleIntersections_117", "noCPGCPFReason_118", "scrObt_119",
"scrConsumedFuel_120", "scrRouteCharges_121", "scrAllFtPointNrOfInstances_122",
"scrAllFtPointProfile_123", "scrAllFtAirspaceNrOfInstances_124",
"scrAllFtAirspaceProfile_125", "scrAllFtCircleIntersectionsNrOfInstances_126",
"scrAllFtCircleIntersections_127", "srrObt_128", "srrConsumedFuel_129",
"srrRouteCharges_130", "srrAllFtPointNrOfInstances_131", "srrAllFtPointProfile_132",
"srrAllFtAirspaceNrOfInstances_133", "srrAllFtAirspaceProfile_134",
"srrAllFtCircleIntersectionsNrOfInstances_135", "srrAllFtCircleIntersections_136",
"surObt_137", "surConsumedFuel_138", "surRouteCharges_139",
"surAllFtPointNrOfInstances_140", "surAllFtPointProfile_141",
"surAllFtAirspaceNrOfInstances_142", "surAllFtAirspaceProfile_143",
"surAllFtCircleIntersectionsNrOfInstances_144", "surAllFtCircleIntersections_145",
"dctObt_146", "dctConsumedFuel_147", "dctRouteCharges_148",
"dctAllFtPointNrOfInstances_149", "dctAllFtPointProfile_150",
"dctAllFtAirspaceNrOfInstances_151", "dctAllFtAirspaceProfile_152",
"dctAllFtCircleIntersectionsNrOfInstances_153", "dctAllFtCircleIntersections_154",
"cpfObt_155", "cpfConsumedFuel_156", "cpfRouteCharges_157",
"cpfAllFtPointNrOfInstances_158", "cpfAllFtPointProfile_159",
"cpfAllFtAirspaceNrOfInstances_160", "cpfAllFtAirspaceProfile_161",
"cpfAllFtCircleIntersectionsNrOfInstances_162", "cpfAllFtCircleIntersections_163",
"aircraftidIATA_164", "intentionFlight_165",
"intentionRelatedRouteAssignmentMethod_166", "intentionUID_167",
"intentionEditionDate_168", "intentionSource_169", "associatedIntentions_170",
"enrichmentOutput_171", "eventID_172", "eventTime_173", "flightVersionNr_174",
"ftfmNrTvProfiles_175", "ftfmTvProfile_176", "rtfmNrTvProfiles_177",
"rtfmTvProfile_178", "ctfmNrTvProfiles_179", "ctfmTvProfile_180"]
const yymmdd = DateFormat("YYmmdd")
const hhmmss = DateFormat("HHMMSS")
const mmmmss = DateFormat("MMMMSS")
const yyyymmddhhmmss = DateFormat("YYYYmmddHHMMSS")
const year2000 = Year(2000)
function read(file)
df = CSV.read(file, delim=";", types=fileformat, header=header_format,
copycols=true, datarow=2)
# remove_unused!(df)
reformat!(df)
return df
end
struct FlightlevelSpeed
FL::Int64
Spd::String
Value::Int64
function FlightlevelSpeed(x::AbstractString)
flspdvalue = split(x, ':')
FL = parse(Int64, flspdvalue[1][2:end])
Spd = flspdvalue[2]
Value = parse(Int64, flspdvalue[3])
new(FL, Spd, Value)
end
end
function reqFlightlevelSpeedList(items::Union{AbstractString, Missing})
if items === missing
return missing
else
return [FlightlevelSpeed(item) for item in split(items)]
end
end
struct AllFtPointProfile
datetime::Union{DateTime, Missing}
point::AbstractString
route::AbstractString
FL::Union{Int64, Missing}
pointDistance::Union{Int64, Missing}
pointType::AbstractString
geoPointId::Union{Point_deg, AbstractString}
ratio::Union{Int64, Missing}
isVisible::Bool # Y indicated IFR/GAT/IFPSTART, N indicates VFR/OAT/IFPSTOP/STAY
function AllFtPointProfile(x::AbstractString)
items = split(x, ':')
datetime = items[1] == "" ? missing : format_datetime(items[1], yyyymmddhhmmss)
point = items[2]
route = items[3]
FL = items[4] == "" ? missing : parse(Int64, items[4])
pointDistance = items[5] == "" ? missing : parse(Int64, items[5])
pointType = items[6]
geoPointId = occursin(r"\d{6}[NS]\d{7}[EW]", items[7]) ? latlon(items[7]) : items[7]
ratio = items[8] == "" ? missing : parse(Int64, items[8])
isVisible = items[9] == "Y" ? true : false
new(datetime, point, route, FL, pointDistance, pointType, geoPointId,
ratio, isVisible)
end
end
function AllFtPointProfileList(items::Union{AbstractString, Missing})
if items === missing
return missing
else
return [AllFtPointProfile(item) for item in split(items)]
end
end
struct AllFtAirspaceProfile
entry_datetime::Union{DateTime, Missing}
sector::AbstractString
exit_datetime::Union{DateTime, Missing}
fir::AbstractString
entry_geoPointId::Union{Point_deg, AbstractString}
exit_geoPointId::Union{Point_deg, AbstractString}
# entry_geoPointId::Union{Point, AbstractString}
# exit_geoPointId::Union{Point, AbstractString}
entry_FL::Union{Int64, Missing}
exit_FL::Union{Int64, Missing}
entry_pointDistance::Union{Int64, Missing}
exit_pointDistance::Union{Int64, Missing}
function AllFtAirspaceProfile(x::AbstractString)
items = split(x, ':')
entry_datetime = items[1] == "" ? missing : format_datetime(items[1], yyyymmddhhmmss)
sector = items[2]
exit_datetime = items[3] == "" ? missing : format_datetime(items[3], yyyymmddhhmmss)
fir = items[4]
entry_geoPointId = occursin(r"\d{6}[NS]\d{7}[EW]", items[5]) ? latlon(items[5]) : items[5]
exit_geoPointId = occursin(r"\d{6}[NS]\d{7}[EW]", items[6]) ? latlon(items[6]) : items[6]
entry_FL = items[7] == "" ? missing : parse(Int64, items[7])
exit_FL = items[8] == "" ? missing : parse(Int64, items[8])
entry_pointDistance = items[9] == "" ? missing : parse(Int64, items[9])
exit_pointDistance = items[10] == "" ? missing : parse(Int64, items[10])
new(entry_datetime, sector, exit_datetime, fir, entry_geoPointId,
exit_geoPointId, entry_FL, exit_FL, entry_pointDistance,
exit_pointDistance)
end
end
function AllFtAirspaceProfileList(items::Union{AbstractString, Missing})
if items === missing
return missing
else
return [AllFtAirspaceProfile(item) for item in split(items)]
end
end
function reformat!(df)
#Date conversion
df[:,:TEMP] = format_datetime.(df[:,:aobt_5], yyyymmddhhmmss)
select!(df, Not(:aobt_5))
df[:,:aobt_5] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:iobt_7], yyyymmddhhmmss)
select!(df, Not(:iobt_7))
df[:,:iobt_7] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:cobt_16], yyyymmddhhmmss)
select!(df, Not(:cobt_16))
df[:,:cobt_16] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:eobt_17], yyyymmddhhmmss)
select!(df, Not(:eobt_17))
df[:,:eobt_17] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:lobt_18], yyyymmddhhmmss)
select!(df, Not(:lobt_18))
df[:,:lobt_18] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:samCtot_23], yyyymmddhhmmss)
select!(df, Not(:samCtot_23))
df[:,:samCtot_23] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:sipCtot_25], yyyymmddhhmmss)
select!(df, Not(:sipCtot_25))
df[:,:sipCtot_25] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:lastSentSlotMessage_45], yyyymmddhhmmss)
select!(df, Not(:lastSentSlotMessage_45))
df[:,:lastSentSlotMessage_45] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:lastSentProposalMessage_46], yyyymmddhhmmss)
select!(df, Not(:lastSentProposalMessage_46))
df[:,:lastSentProposalMessage_46] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:cdmEarlyTtot_61], yyyymmddhhmmss)
select!(df, Not(:cdmEarlyTtot_61))
df[:,:cdmEarlyTtot_61] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:cdmAoTtot_62], yyyymmddhhmmss)
select!(df, Not(:cdmAoTtot_62))
df[:,:cdmAoTtot_62] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:cdmAtcTtot_63], yyyymmddhhmmss)
select!(df, Not(:cdmAtcTtot_63))
df[:,:cdmAtcTtot_63] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:cdmSequencedTtot_64], yyyymmddhhmmss)
select!(df, Not(:cdmSequencedTtot_64))
df[:,:cdmSequencedTtot_64] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_time.(df[:,:cdmTaxiTime_65], mmmmss)
select!(df, Not(:cdmTaxiTime_65))
df[:,:cdmTaxiTime_65] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:cdmNoSlotBefore_70], yyyymmddhhmmss)
select!(df, Not(:cdmNoSlotBefore_70))
df[:,:cdmNoSlotBefore_70] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:scrObt_119], yyyymmddhhmmss)
select!(df, Not(:scrObt_119))
df[:,:scrObt_119] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:srrObt_128], yyyymmddhhmmss)
select!(df, Not(:srrObt_128))
df[:,:srrObt_128] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:surObt_137], yyyymmddhhmmss)
select!(df, Not(:surObt_137))
df[:,:surObt_137] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:dctObt_146], yyyymmddhhmmss)
select!(df, Not(:dctObt_146))
df[:,:dctObt_146] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:eventTime_173], yyyymmddhhmmss)
select!(df, Not(:eventTime_173))
df[:,:eventTime_173] = df[:,:TEMP]
select!(df, Not(:TEMP))
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 4127 | """
ARE fileformat
Newmaxo ASCII Region file
Example:
14 2799 925 0 0 660 0 0 0 0 0 0 0 0 0 LJ
2.799.925
2792 932
2797 943
2785.91 954.62
2780 942
2774 947
2781 957
2784 964
2784 975
2784 977
2790 974
2794 982
2790 992
2799 925
Returns dictionary with airspace description
Key Name of Volume
Airspace struct
nb_point Contains the number of vertices of the volume
bottom_fl Low level of the volume in flight level (FL). Can be 0.
top_fl High level of the volume in flight level (FL). Can be 0.
surface Value 2 (can be negative). Can be 0.
sector_num Value 3 (can be negative). Can be 0.
points Matrix with all lat/lon [deg] vertices of the volume (first/last the same)
See EUROCONTROL NEST Manual sectino 9.7.6 for Are fileformat description
"""
module Are
export read
# include("utility.jl")
using ..util
using Format
using CSV
using Dates
using Navigation
struct Airspace
nb_point::Int64
bottom_fl::Float64
top_fl::Float64
surface::Float64
sector_num::Float64
points::Matrix{Float64}
box::Array{Float64, 2}
end
#TODO Remove open. Just use eachline
function read(filename)
dict = Dict{String, Airspace}()
open(filename) do file
j = 1
nb_point = 3
name = ""
points = Matrix{Float64}(undef, 0, 2)
bottom_fl = 0.0
top_fl = 0.0
surface = 0.0
sector_num = 0.0
for (i, ln) in enumerate(eachline(file))
splitline = split(ln, " ")
if length(splitline) == 2
lat = parse(Float64, splitline[1]) / 60.0
lon = parse(Float64, splitline[2]) / 60.0
points = vcat(points, [lat lon])
if j == nb_point
box = box_spherical_polygon(points)
dict[name] = Airspace(nb_point, bottom_fl, top_fl, surface,
sector_num, points, box)
end
j += 1
elseif length(splitline) == 15
nb_point = parse(Int64, splitline[1])
bottom_fl = parse(Float64, splitline[5])
top_fl = parse(Float64, splitline[6])
surface = parse(Float64, splitline[7])
sector_num = parse(Float64, splitline[8])
name = splitline[15]
points = Matrix{Float64}(undef, 0, 2)
j = 1
else
end
end
end
return dict
end
# function isinside(are::Airspace, point::Point)
# # test if inside box
# if isinsidebox(are, point)
# #test if inside polygon
# return NaN
# else
# return false
# end
# end
function box_spherical_polygon(points::Array{Float64,2})
box = Array{Float64, 2}(undef, 2, 2)
lats_max = Array{Float64,1}()
lats_min = Array{Float64,1}()
previous_point = Array{Float64,1}()
for point in eachrow(points)
if isempty(previous_point)
previous_point = point
else
# pos₁ = Point(previous_point[1], previous_point[2])
# pos₂ = Point(point[1], point[2])
pos₁ = Point_deg(previous_point[1], previous_point[2])
pos₂ = Point_deg(point[1], point[2])
append!(lats_max, maximum(latitude_options(pos₁, pos₂)))
append!(lats_min, minimum(latitude_options(pos₁, pos₂)))
previous_point = point
end
end
box[1,1] = maximum(lats_max)
box[2,1] = minimum(lats_min)
# Limiting longitudes
box[1,2] = maximum(points[:,2])
box[2,2] = minimum(points[:,2])
return box
end
function latitude_options(pos₁::Point_deg, pos₂::Point_deg)
bearing = Navigation.bearing(pos₁, pos₂)
closest_point_deg = Navigation.closest_point_to_pole(pos₁, bearing)
if Navigation.distance(pos₁, pos₂) > Navigation.distance(pos₁,
closest_point_deg)
return pos₁.ϕ, pos₂.ϕ, closest_point_deg.ϕ
else
return pos₁.ϕ, pos₂.ϕ
end
end
# function isinsidebox(are::Airspace, point::Point)
# return are.box[2,1] < point.ϕ < are.box[1,1] && are.box[2,2] < point.λ < are.box[1,2]
# end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 777 | """
ARP fileformat
Defines the geographical location of airports used in the assignment process
Example:
EGSX 3103.267 9.350 EGTT_FIR
EGSY 3203.650 -83.300 EGTT_FIR
EGSZ 3620.000 -102.000 EGPX_FIR
EGTA 3106.883 -56.550 EGTT_FIR
EGTB 3096.700 -48.483 EGTT_FIR
See EUROCONTROL NEST Manual sectino 9.7.7 for Arp fileformat description
"""
module Arp
export read
using CSV
const fileformat = Dict(1=>String, 2=>Float64, 3=>Float64, 4=>String)
const header_format = ["AIRPORT", "LAT_DEG", "LON_DEG", "FIR"]
function read(file)
df = CSV.read(file, types=fileformat, header=header_format,
copycols=true)
reformat!(df)
return df
end
function reformat!(df)
df[:,:LAT_DEG] = df[:,:LAT_DEG] / 60.0
df[:,:LON_DEG] = df[:,:LON_DEG] / 60.0
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 2185 | """
Ase fileformat
ASCII Segment File
Described as route network. Each line represents a route segment
See EUROCONTROL NEST Manual for Ase fileformat description
Ase DataFrame Column Names
# Field Type Size Comment
1 :FLIGHTCOUNT Float64 normally number of flights using this route segment, could be a load
2 :SEGMENTPARITY Int64 0=NO, 1=ODD, 2=EVEN, 3=ODD_LOW, 4=EVEN_LOW, 5=ODD_HIGH, 6=EVEN_HIGH
3 :SEGMENTTYPE Int64 0=NO, 1=NORMAL, 2=ARRIVAL, 3=DEPARTURE (permanent rte segment)
20=NO, 21=NORMAL, 22=ARRIVAL, 23=DEPARTURE (CDR Generic)
40=NO, 41=NORMAL, 42=ARRIVAL, 43=DEPARTURE (CDR 1 )
60=NO, 61=NORMAL, 62=ARRIVAL, 63=DEPARTURE (CDR 2)
80=NO, 81=NORMAL, 82=ARRIVAL, 83=DEPARTURE (CDR 3)
100=NO, 101=NORMAL, 102=ARRIVAL, 103=DEPARTURE (CDR 1+2)
120=NO, 121=NORMAL, 122=ARRIVAL, 123=DEPARTURE (CDR 1+3)
4 :LATBEGINSEGMENT_DEG Float64
5 :LONBEGINSEGMENT_DEG Float64
6 :LATENDSEGMENT_DEG Float64
7 :LONENDSEGMENT_DEG Float64
8 :SEGMENTNAME String 11 routePointNameBegin_routePointNameEnd (separator '_')
%% = TBD
% = TBD
* = TBD
"""
module Ase
export read
using Format
using CSV
using Dates
const fileformat = Dict(1=>Float64, 2=>Int64, 3=>Int64, 4=>Float64,
5=>Float64, 6=>Float64, 7=>Float64, 8=>String)
const header_format = ["FLIGHTCOUNT", "SEGMENTPARITY", "SEGMENTTYPE",
"LATBEGINSEGMENT_DEG", "LONBEGINSEGMENT_DEG", "LATENDSEGMENT_DEG", "LONENDSEGMENT_DEG",
"SEGMENTNAME"]
function read(file)
df = CSV.read(file, delim=" ", types=fileformat, header=header_format,
copycols=true)
reformat!(df)
return df
end
function reformat!(df)
df[:,:LATBEGINSEGMENT_DEG] = df[:,:LATBEGINSEGMENT_DEG] / 60.0
df[:,:LONBEGINSEGMENT_DEG] = df[:,:LONBEGINSEGMENT_DEG] / 60.0
df[:,:LATENDSEGMENT_DEG] = df[:,:LATENDSEGMENT_DEG] / 60.0
df[:,:LONENDSEGMENT_DEG] = df[:,:LONENDSEGMENT_DEG] / 60.0
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 543 | """
Cost fileformat
See EUROCONTROL NEST Manual for Cost fileformat description
Example cost fileformat
139729486 ED 356.159
139729486 LB 230.25
139729486 LC 86.7351
139729486 LH 148.107
139729486 LO 234.767
139729486 LT 365.319
139729486 LY 190.189
"""
module Cost
export read
using CSV
const fileformat = Dict(1=>Int64, 2=>String, 3=>Float64)
const header_format = ["FLIGHTID", "COUNTRYCODE", "COST"]
function read(file)
return CSV.read(file, delim=" ", types=fileformat, header=header_format,
copycols=true)
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 775 | """
Country fileformat
Create the link between the country code and the country name
Example:
#;COUNTRY;2;446;20181206;20190102;307;EAR_P
AG;Solomon Islands;N
AN;Nauru;N
AY;Papua New Guinea;N
BG;Greenland (Denmark);N
BI;Iceland;Y
"""
module Country
export read
struct CountryDc
name::String
member::Bool
CountryDc(name, member) = new(name, member=="Y")
# Country(name::AbstractString, member::Bool) = new(name, member)
end
function read(file)
countries = Dict{String, CountryDc}()
for line in eachline(file)
line_elements = split(line, ';')
if length(line_elements) == 3
countries[line_elements[1]] = CountryDc(line_elements[2],
line_elements[3])
end
end
return countries
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1989 | """
Crco fileformat
See EUROCONTROL NEST Manual for Crco fileformat description
Example CRCO fileformat
ED N182QS B737 1523 3217.8167 599.3000 3018.8009 726.39836 396.03131430 090500 020322 0.000 095918 020322 270.000
LK N182QS B737 1523 3018.8009 726.3983 3019.0000 726.9864 0.78728520 095918 020322 270.000 095922 020322 270.000
LK N182QS B737 1523 3018.8009 726.3983 3019.0000 726.9864 0.78728520 111918 020322 270.000 111922 020322 270.000
ED N182QS B737 1523 3019.0000 726.9864 3217.8167 599.3000 395.93214016 111922 020322 270.000 121700 020322 0.000
"""
module Crco
export read
# include("utility.jl")
using ..util # new
using Format
using CSV
using Dates
using DataFrames
const fileformat = Dict(1=>String, 2=>String, 3=>String, 4=>Int64, 5=>Float64,
6=>Float64, 7=>Float64, 8=>Float64, 9=>Float64, 10=>String, 11=>String,
12=>Float64, 13=>String, 14=>String, 15=>Float64)
const header_format = ["COUNTRYNAME", "CALLSIGN", "ACTYPE", "FLIGHTID",
"LATENTRY_DEG", "LONENTRY_DEG", "LATEXIT_DEG", "LONEXIT_DEG", "DISTANCE_M",
"TIMEENTRY", "DATEENTRY", "ENTRYFL", "TIMEEXIT", "DATEEXIT", "EXITFL"]
const yymmddhhmmss = DateFormat("YYmmddHHMMSS")
const year2000 = Year(2000)
function read(file)
df = CSV.read(file, types=fileformat, header=header_format,
copycols=true)
reformat!(df);
return df
end
function reformat!(df)
df[:,:LATENTRY_DEG] = df[:,:LATENTRY_DEG] / 60.0
df[:,:LONENTRY_DEG] = df[:,:LONENTRY_DEG] / 60.0
df[:,:LATEXIT_DEG] = df[:,:LATEXIT_DEG] / 60.0
df[:,:LONEXIT_DEG] = df[:,:LONEXIT_DEG] / 60.0
df[:,:DISTANCE_M] = df[:,:DISTANCE_M] * 1000.0
df[:,:DATETIMEENTRY] = format_datetime.(df[:,:DATEENTRY] .* df[:,:TIMEENTRY],
yymmddhhmmss, addyear=year2000)
df[:,:DATETIMEEXIT] = format_datetime.(df[:,:DATEEXIT] .* df[:,:TIMEEXIT],
yymmddhhmmss, addyear=year2000)
select!(df, Not(:TIMEENTRY))
select!(df, Not(:TIMEEXIT))
select!(df, Not(:DATEENTRY))
select!(df, Not(:DATEEXIT))
end
end #module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 3678 | """
The EUROCONTROL Demand Data Repository 'DDR2' provides air traffic management
(ATM) actors with the most accurate picture of past and future pan-European air
traffic demand (from several years ahead until the day before operation), as
well as environment data, analysis reports and tools that can read and process
the data.
All this information is management by DDR service, a EUROCONTROL cross-unit
activity, and can be accessed from the Demand Data Repository 2 'DDR2' web
portal. Access to the DDR2 web portal is restricted. Access conditions apply.
DDR future traffic can be forecast thanks to the knowledge of past traffic and
several thousands of flight intentions provided by airlines and airports that
are collected, stored, analysed and treated on a daily basis.
DDR traffic forecast supports strategic, seasonal and pre-tactical planning,
and also special events or major ATM evolution projects.
Finally, DDR provides a refined analysis of past demand to facilitate
post-operations analysis and to identify best practices for future operations.
Functionality
DDR2 gives access to:
Past traffic data - from August 2012 till now, traffic demand, last filed
flight plan traffic trajectories as well as actual trajectories are provided
for past analysis;
Past and Pre-OPS (one AIRAC in advance) environment data - they can be
downloaded and are used internally for processing future traffic trajectories.
They contain all information necessary to analyse and process sector loads,
capacity bottlenecks, re-routing options, etc;
Strategic traffic forecast - this covers the planning phase, from 18 months to
7 days before operations. It is used for medium- to-short-term capacity
planning and seasonal planning. Users can themselves generate, with several 4D
trajectory processing options, and download this type of forecast directly via
the DDR2 web portal;
Pre-tactical traffic forecast - it focusses on the planning phase, from 6 days
to 1 day before operations. Network pre-tactical planning is supported by the
NM PREDICT system and can be accessed via the DDR2 portal;
NEST and SAAM tools - they can be downloaded from DDR2 Web portal and are
compatible with DDR data. These tools analyse and process a great deal of information for the purpose of facilitating airspace design and capacity planning in Europe.
Users
The DDR addresses the needs of a wide range of users such as:
air navigation service providers (ANSPs), who use it to prepare and optimise
their capacity plans;
airlines, who rely on it to detect flight efficiency improvement opportunities,
by visualising and comparing flight plan trajectories for any period of time in
the past;
airspace management actors, for airspace management and coordination of the
available airspace;
airports, with the aim of integrating their local plans with the Network
Operations Plan;
the NM at central/FAB/local level.
See EUROCONTROL NEST Manual Section 9.7 for fileformat descriptions
"""
module DDR2import
include("util.jl")
include("T5.jl")
include("SO6.jl")
include("Exp2.jl")
include("Allftplus.jl")
include("Ase.jl")
include("Are.jl")
include("Gar.jl")
include("Gsl.jl")
include("Frp.jl")
include("Sls.jl")
include("Sid.jl")
include("Star.jl")
include("Spc.jl")
include("Mot.jl")
include("Narp.jl")
include("Nnpt.jl")
include("Ntfv.jl")
include("Cost.jl")
include("Crco.jl")
include("Ur.jl")
include("Arp.jl")
include("Country.jl")
include("Operator.jl")
include("Routes.jl")
include("Runway.jl")
include("For.jl")
end # module
#TODO Add tests for last element in test files.
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 13554 | """
EXP2 fileformat
See EUROCONTROL NEST Manual for EXP2 fileformat description
Exp2 DataFrame Column Names
# CellName Field Type Comment
1 :ADEP origin String ICAO code (airport)
2 :ADES destination String ICAO code (airport)
4 :ACTYPE String
5 :RFL RFL Int64 Requested Flight Level
6 :ZONE_ORIG zone origin String ICAO code (could be the same as airport)
7 :ZONE_DEST zone destin String ICAO code (could be the same as airport)
8 :FLIGHT_ID flight ID Int64 SAMAD ID or SAAM ID or ... (must be unique)
9 :ETD_DATETIME DateTime Estimated Time and Date of Departure
11 :ETA_DATETIME DateTime Estimated Time and Date of Arrival (Not used = !!!!)
12 :CALLSIGN String Char <= 7
13 :COMPANY String Generally the 3 first letters of the callsign
15 :UUID Universal Unique ID String CALLSIGN-ADEP-ADES-EOBD-EOBT
16 :FIPS_CLONED Fips cloned String Y/N
18 :FLIGHT_SAAM_ID Flight SAAM ID Int64 Unique for the day
19 :FLIGHT_SAMAD_ID Flight SAMAD ID Int64 Unique for ever
20 :TACT_ID Int64 CFMU ID: Check with TACTID on ALL-FT
21 :SSR_CODE String plan identifiant (12bits mode A), Secondary Surveillance Rader
22 :REGISTRATION String
23 :PTD_DATETIME DateTime Planned Date and Time Departure
25 :ATFM_DELAY Air Traffic Flow Management Delay: Difference between calculated by CASA take off time (CTOT) and estimated take off time (ETOT)
26 :REROUTING_STATE String
27 :MOST_PEN_REG String Most penelizing regulation. If no regulation 'X' is present: Check with mostPenalizingRegulationID
28 :TYPE_OF_FLIGHT String Letters indicating type of flight (military ?)
29 :EQUIPMENT String Letters indicating equipment of flight
30 :ICAO_EQUIP String
31 :COM_EQUIP String
32 :NAV_EQUIP String
33 :SSR_EQUIP String Secondary Surveillance Radar equipment
34 :SURVIVAL_EQUIP String
35 :PERSONS_ON_BOARD Int64 0 means no information
36 :TOP_FL Int64
37 :MAX_RFL Int64 Requested Flight Level
38 :FLT_PLN_SOURCE String
40 :AOBT DateTime Actual Of Block in Time
41 :IFPSID String Prisme (Integrated Initial Flight Plan Processing System)
42 :IOBT DateTime Initial Of Block in Time | FLF (planned time departure)
43 :ORIGFLIGHTDATAQUALITY String NON, PFD, RPL, FPL
44 :FLIGHTDATAQUALITY String FLF (flt plan source), NON, PFD, RPL, FPL
45 :SOURCE String UNK, FPL, RPL, AFI, MFS, FNM, AFP, DIV
46 :EXEMPTREASON String NEXE, EMER, SERE, HEAD, AEAP, empty
47 :EXEMPTREASONDIST String NEXE, LONG, empty .Note in NEST the flight exemption is
limited to exempted or not exempted states. Not exempted
corresponds to NEXT in fields 46 and 47. All other values
correspon to exempted. During export, NEST writes empty
fields in 46 and 47 for exempted flights.
48 :LATEFILER String Y/N
49 :LATEUPDATER String Y/N
50 :NORTHATLANTIC String Y/N
51 :COBT DateTime Computed Of Block in Time
52 :EOBT DateTime Estimate Of Block in Time
53 :FLIGHTSTATE String NE, PL, PS, PR, SR, FI, FS, SI, TA, AA, CA, TE
54 :PREV2ACTIVATIONFLIGHTSTATE String NE, PL, PS, PR, SR, FI, FS, SI, TA, AA, CA, TE
55 :SUSPENSIONSTATUS String NS, ST, SM, RC, TV, NR, RV
57 :SAMCTOT DateTime
58 :SAMSENT String Y/N
59 :SIPCTOT DateTime
60 :SIPSENT String Y/N
61 :SLOTFORCED String Y/N
62 :MOSTPENALIZINGREGID String Is it the same as most pen reg on FLF
63 :REGAFFECTEDBYNROFINST Int64
64 :EXCLFROMNROFINST Int64
65 :LASTRECEIVEDATFMMESSAGETITLE String DES, ERR, FCM, FUM, FLS, REA, RFI, RJT, RRN, RRP, SAM, SIP, SLC, SMM, SPA, SRJ, SRM, SWM, UNK
66 :LASTRECEIVEDMESSAGETITLE String ABI, ACH, ACT, APL, ARR, CAN, CHG, CNL, DEP, DLA, ERR, EST, FPL, FSA, MFS, PAC, PFD, RPL, UNK
67 :LASTSENTATFMMESSAGETITLE String DES, ERR, FCM, FUM, FLS, REA, RFI, RJT, RRN, RRP, SAM, SIP, SLC, SMM, SPA, SRJ, SRM, SWM, UNK
68 :MANUALEXEMPREASON String N/S/R
69 :SENSITIVEFLIGHT String Y/N
70 :READYFORIMPROVEMENT String Y/N
71 :READYFORDEP String Y/N
72 :REVISEDTAXITIME Int64 0..999999
73 :TIS Int64 Time to intert the sequence, 0..999999
74 :TRS Int64 Time to remove the sequence, 0..999999
75 :TOBESENTSLOTMESSAGE String Related to flight progress, ABI, ACH, ACT, APL, ARR, CAN, CHG, CNL, DEP, DLA, ERR, EST, FPL, FSA, MFS, PAC, PFD, RPL, UNK
76 :TOBESENTPROPMESSAGETITLE String Related to flight progress, ABI, ACH, ACT, APL, ARR, CAN, CHG, CNL, DEP, DLA, ERR, EST, FPL, FSA, MFS, PAC, PFD, RPL, UNK
77 :LASTSENTSLOTMESSAGETITLE String Related to flight progress, ABI, ACH, ACT, APL, ARR, CAN, CHG, CNL, DEP, DLA, ERR, EST, FPL, FSA, MFS, PAC, PFD, RPL, UNK
78 :LASTSENTPROPMESSAGETITLE String Related to flight progress, ABI, ACH, ACT, APL, ARR, CAN, CHG, CNL, DEP, DLA, ERR, EST, FPL, FSA, MFS, PAC, PFD, RPL, UNK
79 :LASTSENTSLOTMESSAGE DateTime Related to flight progress
80 :LASTSENTPROPMESSAGE DateTime Related to flight progress
81 :FLIGHTCOUNTOPTION String Indicates which flight plan should be / has been ussed when doing flight/count related operations. Used in TACT queries and replies P,N
82 :NORMALFLIGHTTACT_ID Int64
83 :PROPFLIGHTTACT_ID Int64
84 :OPERATINGACOPERICAOID String
85 :REROUTINGWHY String N/M/C/A/O
86 :REROUTINGLFIGHTSTATE String P/E/T/R/V/N
87 :RVR Int64
88 :FTFMAIRAC Int64 Filed traffic flight model (TFM=Profile) airac..
89 :FTFMENVBASELINENUM Int64 Filed traffic flight model (TFM=Profile) env..
90 :RTFMAIRAC Int64 Regulated traffic flight model (TFM=Profile) airac..
91 :RTFMENVBASELINENUM Int64 Regulated traffic flight model (TFM=Profile) env..
92 :CTFMAIRAC Int64 Computer traffic flight model (TFM=Profile) airac..
93 :CTFMENVBASELINENUM Int64 Computer traffic flight model (TFM=Profile) env..
94 :LASTRECEIVEDPROGRESSMESSAGE String DPI, EMPTY, SIZE
"""
module Exp2
export read
#TODO Keep unused fields?
using ..util # new
using Format
using CSV
using Dates
using DataFrames
const exp2_fileformat = Dict(1=>String, 2=>String, 3=>String, 4=>String,
5=>Int64, 6=>String, 7=>String, 8=>Int64, 9=>String, 10=>String, 11=>String,
12=>String, 13=>String, 14=>Int64, 15=>String, 16=>String, 17=>Int64,
18=>Int64, 19=>Int64, 20=>Int64, 21=>String, 22=>String, 23=>String,
24=>String, 25=>Int64, 26=>String, 27=>String, 28=>String, 29=>String,
30=>String, 31=>String, 32=>String, 33=>String, 34=>String, 35=>Int64,
36=>Int64, 37=>Int64, 38=>String, 39=>Int64, 40=>String, 41=>String, 42=>String,
43=>String, 44=>String, 45=>String, 46=>String, 47=>String, 48=>String,
49=>String, 50=>String, 51=>String, 52=>String, 53=>String, 54=>String, 55=>String,
56=>Int64, 57=>String, 58=>String, 59=>String, 60=>String, 61=>String, 62=>String,
63=>Int64, 64=>Int64, 65=>String, 66=>String, 67=>String, 68=>String, 69=>String,
70=>String, 71=>String, 72=>Int64, 73=>Int64, 74=>Int64, 75=>String, 76=>String,
77=>String, 78=>String, 79=>String, 80=>String, 81=>String, 82=>Int64, 83=>Int64,
84=>String, 85=>String, 86=>String, 87=>Int64, 88=>Int64, 89=>Int64, 90=>Int64,
91=>Int64, 92=>Int64, 93=>Int64, 94=>String, 95=>Int64)
const header_format = ["ADEP", "ADES", "NOT_USED1", "ACTYPE", "RFL",
"ZONE_ORIG", "ZONE_DEST", "FLIGHT_ID", "DEP_DATE", "DEP_TIME",
"ARR_TIME", "CALLSIGN", "COMPANY", "SEPARATOR1", "UUID",
"FIPS_CLONED", "SEPARATOR2", "FLIGHT_SAAM_ID", "FLIGHT_SAMAD_ID", "TACT_ID",
"SSR_CODE", "REGISTRATION", "PLAN_DEP_DATE", "PLAN_DEP_TIME", "ATFM_DELAY",
"REROUTING_STATE", "MOST_PEN_REG", "TYPE_OF_FLIGHT", "EQUIPMENT", "ICAO_EQUIP",
"COM_EQUIP", "NAV_EQUIP", "SSR_EQUIP", "SURVIVAL_EQUIP", "PERSONS_ON_BOARD",
"TOP_FL", "MAX_RFL", "FLT_PLN_SOURCE", "SEPARATOR3", "AOBT",
"IFPSID", "IOBT", "ORIGFLIGHTDATAQUALITY", "FLIGHTDATAQUALITY", "SOURCE",
"EXEMPTREASON", "EXEMPTREASONDIST", "LATEFILER", "LATEUPDATER", "NORTHATLANTIC",
"COBT", "EOBT", "FLIGHTSTATE", "PREV2ACTIVATIONFLIGHTSTATE", "SUSPENSIONSTATUS",
"TACT_ID_DUPLICATE", "SAMCTOT", "SAMSENT", "SIPCTOT", "SIPSENT",
"SLOTFORCED", "MOSTPENALIZINGREGID", "REGAFFECTEDBYNROFINST", "EXCLFROMNROFINST",
"LASTRECEIVEDATFMMESSAGETITLE",
"LASTRECEIVEDMESSAGETITLE", "LASTSENTATFMMESSAGETITLE", "MANUALEXEMPREASON",
"SENSITIVEFLIGHT", "READYFORIMPROVEMENT",
"READYFORDEP", "REVISEDTAXITIME", "TIS", "TRS", "TOBESENTSLOTMESSAGE",
"TOBESENTPROPMESSAGETITLE", "LASTSENTSLOTMESSAGETITLE",
"LASTSENTPROPMESSAGETITLE", "LASTSENTSLOTMESSAGE", "LASTSENTPROPMESSAGE",
"FLIGHTCOUNTOPTION", "NORMALFLIGHTTACT_ID", "PROPFLIGHTTACT_ID",
"OPERATINGACOPERICAOID", "REROUTINGWHY",
"REROUTINGLFIGHTSTATE", "RVR", "FTFMAIRAC", "FTFMENVBASELINENUM", "RTFMAIRAC",
"RTFMENVBASELINENUM", "CTFMAIRAC", "CTFMENVBASELINENUM",
"LASTRECEIVEDPROGRESSMESSAGE", "SEPARATOR4"]
const yymmdd = DateFormat("YYmmdd")
const hhmm = DateFormat("HHMM")
const yyyymmddhhmmss = DateFormat("YYYYmmddHHMMSS")
const yymmddhhmm = DateFormat("YYmmddHHMM")
const year2000 = Year(2000)
function read(file)
df = CSV.read(file, delim=";", types=exp2_fileformat, header=header_format,
copycols=true)
reformat!(df);
return df
end
function eta_datetime(eta_time::String, etd_datetime::DateTime)
eta_time_type = format_time(eta_time, hhmm)
eta_datetime = ""
if eta_time_type === missing
return missing
else
eta_datetime = Date(etd_datetime) + eta_time_type
end
if eta_datetime === missing
return missing
elseif eta_datetime < etd_datetime
return eta_datetime + Dates.Day(1)
else
eta_datetime
end
end
function reformat_exp!(df)
df[:,:ETD_DATETIME] = format_datetime.(df[:,:DEP_DATE] .*
df[:,:DEP_TIME], yymmddhhmm, addyear=year2000)
df[:,:ETA_DATETIME] = eta_datetime.(df[:,:ARR_TIME], df[:,:ETD_DATETIME])
end
function reformat_flf!(df)
df[:,:PTD_DATETIME] = format_datetime.(df[:,:PLAN_DEP_DATE] .*
df[:,:PLAN_DEP_TIME], yymmddhhmm, addyear=year2000)
end
function reformat_allft!(df)
df[:,:TEMP] = format_datetime.(df[:,:AOBT], yyyymmddhhmmss)
select!(df, Not(:AOBT))
df[:,:AOBT] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:IOBT], yyyymmddhhmmss)
select!(df, Not(:IOBT))
df[:,:IOBT] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:COBT], yyyymmddhhmmss)
select!(df, Not(:COBT))
df[:,:COBT] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_datetime.(df[:,:EOBT], yyyymmddhhmmss)
select!(df, Not(:EOBT))
df[:,:EOBT] = df[:,:TEMP]
select!(df, Not(:TEMP))
end
function remove_unused!(df)
select!(df,[:ADEP, :ADES, :ACTYPE, :RFL, :ZONE_ORIG, :ZONE_DEST,
:FLIGHT_ID, :ETD_DATETIME, :ETA_DATETIME, :CALLSIGN, :COMPANY, :UUID,
:FIPS_CLONED, :FLIGHT_SAAM_ID, :FLIGHT_SAMAD_ID, :TACT_ID, :SSR_CODE,
:REGISTRATION, :PTD_DATETIME, :ATFM_DELAY, :REROUTING_STATE, :MOST_PEN_REG,
:TYPE_OF_FLIGHT, :EQUIPMENT, :ICAO_EQUIP, :COM_EQUIP, :NAV_EQUIP,
:SSR_EQUIP, :SURVIVAL_EQUIP, :PERSONS_ON_BOARD, :TOP_FL, :MAX_RFL,
:FLT_PLN_SOURCE, :AOBT, :IFPSID, :IOBT, :ORIGFLIGHTDATAQUALITY,
:FLIGHTDATAQUALITY, :SOURCE, :EXEMPTREASON, :EXEMPTREASONDIST, :LATEFILER,
:LATEUPDATER, :NORTHATLANTIC, :COBT, :EOBT, :FLIGHTSTATE,
:PREV2ACTIVATIONFLIGHTSTATE, :SUSPENSIONSTATUS, :SAMCTOT, :SAMSENT,
:SIPCTOT, :SIPSENT, :SLOTFORCED, :MOSTPENALIZINGREGID,
:REGAFFECTEDBYNROFINST, :EXCLFROMNROFINST, :LASTRECEIVEDATFMMESSAGETITLE,
:LASTRECEIVEDMESSAGETITLE, :LASTSENTATFMMESSAGETITLE, :MANUALEXEMPREASON,
:SENSITIVEFLIGHT, :READYFORIMPROVEMENT, :READYFORDEP, :REVISEDTAXITIME,
:TIS, :TRS, :TOBESENTSLOTMESSAGE, :TOBESENTPROPMESSAGETITLE,
:LASTSENTSLOTMESSAGETITLE, :LASTSENTPROPMESSAGETITLE, :LASTSENTSLOTMESSAGE,
:LASTSENTPROPMESSAGE, :FLIGHTCOUNTOPTION, :NORMALFLIGHTTACT_ID,
:PROPFLIGHTTACT_ID, :OPERATINGACOPERICAOID, :REROUTINGWHY,
:REROUTINGLFIGHTSTATE, :RVR, :FTFMAIRAC, :FTFMENVBASELINENUM, :RTFMAIRAC,
:RTFMENVBASELINENUM, :CTFMAIRAC, :CTFMENVBASELINENUM,
:LASTRECEIVEDPROGRESSMESSAGE])
end
function reformat!(df)
reformat_exp!(df)
reformat_flf!(df)
reformat_allft!(df)
remove_unused!(df)
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1016 | """
For fileformat
MTF forecasting file
Example:
RANK;Dep;Des;2017;2018;2019;2020;2021;2022;2023;2024
Base;ALANDISLANDS;BELARUS;0.002739726;0.000362248;0.000781042;0.000861343;0.000887779;0.00103794;0.001057792;0.000948328
Base;ALANDISLANDS;DENMARK;0.008219178;0.007356467;0.01588544;0.015864169;0.01588544;0.01588544;0.01588544;0.015864169
Base;ALANDISLANDS;ESTONIA;0.010958904;0.003430455;0.004855271;0.004871514;0.004855271;0.004855271;0.004855271;0.004871514
Base;ALANDISLANDS;FINLAND;2.536986301;2.813270647;2.762885585;2.822041934;2.882595408;2.943180367;3.004312251;3.065653134
"""
module For
export read
using CSV
# using DataFrames
# const fileformat = Dict(1=>String, 2=>String, 3=>String, 4=>String,
# 5=>String, 6=>String, 7=>Int64, 8=>Int64, 9=>Int64, 10=>String)
# const header_format = ["DATEACTIVE", "AIRPORT", "RWY", "TIMEOPEN",
# "TIMECLOSED", "YN", "NUM1", "NUM2", "NUM3", "T"]
function read(file)
df = CSV.read(file, delim=';', header=1, copycols=true)
return df
end
end # Module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 4045 | """
FRP fileformat
Used for generating Free Route segments
See EUROCONTROL NEST Manual for Frp fileformat description
Output:
TBD
Frp fileformat example
ALBANIA EX DIMIS N400421 E0203541
ALBANIA EX DOBAR N411958 E0202941
ALBANIA EX GOKEL N403554 E0190000
ALBANIA EX MAVAR N414012 E0203148
ALBANIA EX PAPIZ N405330 E0185706
ALBANIA EX PINDO N402851 E0205721
ALBANIA EX RETRA N421342 E0192006
ALBANIA EX RODON N412730 E0190600
ALBANIA E PETAK N414631 E0191850
ALBANIA A PETAK LATI
ALBANIA E AKIKA N423203 E0193614
ALBANIA E NIKRO N393957 E0200712
ALBANIA E TUMBO N400402 E0202822
ALBANIA E VJOSA N395855 E0202329
ALBANIA X ALELU N422845 E0195102
ALBANIA X EBELA N411136 E0185432
ALBANIA X PITAS N395400 E0195040
ALBANIA I DIRES N410328 E0191133
ALBANIA AD DIRES LATI
ALBANIA I DITAN N405644 E0191813
ALBANIA AD DITAN LATI
ALBANIA I ELBAK N405151 E0200452
ALBANIA AD ELBAK LATI
ALBANIA I INLOT N415847 E0192711
ALBANIA AD INLOT LATI
ALBANIA I ODRAS N413335 E0201027
ALBANIA AD ODRAS LATI
ALBANIA I RINAV N415901 E0194718
ALBANIA D RINAV LATI
ALBANIA I ADDER N403126 E0201817
ALBANIA I GRIBA N402830 E0193712
ALBANIA I MAGGI N411103 E0192236
ALBANIA I NURSO N403956 E0203217
ALBANIA I OVVER N403230 E0200046
ALBANIA I TR N412748 E0194251
ALBANIA I UMRES N412610 E0192553
ALBANIA I UNASA N412642 E0191801
ALBANIA I UNDER N410405 E0195651
ALBANIA I VALIN N411104 E0194600
ALBANIA I VOLBI N413855 E0194122
"""
module Frp
export read
using ..util
struct FreeRoutePoint
type::AbstractString
name::AbstractString
# point::Union{Point, AbstractString, Missing}
point::Union{Point_deg, AbstractString, Missing}
end
struct FreeRouteAirports
type::AbstractString
name::AbstractString
airports::Vector{String}
end
struct FreeRouteArea
freeroutepoints::Vector{FreeRoutePoint}
freerouteairports::Vector{FreeRouteAirports}
end
function read(file)
freerouteareas = Dict{String, FreeRouteArea}()
freerouteareaname = ""
freeroutepoints = Vector{FreeRoutePoint}()
freerouteairports = Vector{FreeRouteAirports}()
lines = sort(readlines(file))
for line in lines
line_elements = split(line, ' ')
if line_elements[1] != freerouteareaname # new freeroutearea
if freerouteareaname != ""
freerouteareas[freerouteareaname] = FreeRouteArea(
freeroutepoints, freerouteairports)
end
freerouteareaname = line_elements[1]
freeroutepoints = Vector{FreeRoutePoint}()
freerouteairports = Vector{FreeRouteAirports}()
end
if line_elements[2] in ["E", "X", "EX", "I", "EI", "XI", "EXI"]
point_type = convert(String, line_elements[2])
point_name = convert(String, line_elements[3])
if length(line_elements) > 4
point = latlon(line_elements[4], line_elements[5])
elseif length(line_elements) > 3
# println(line_elements)
point = line_elements[4]
else
# println(line_elements)
point = missing
end
freeroutepoints = vcat(freeroutepoints, FreeRoutePoint(point_type,
point_name, point))
elseif line_elements[2] in ["D", "A", "AD"]
point_type = convert(String, line_elements[2])
point_name = convert(String, line_elements[3])
airports = Vector{String}()
if length(line_elements) ≤ 3
elseif length(line_elements) == 4
airports = [convert(String, line_elements[4])]
else
for airport in line_elements[4:end]
airports = vcat(airports, convert(String, airport))
end
end
freerouteairports = vcat(freerouteairports, FreeRouteAirports(point_type,
point_name, airports))
else
end
end
freerouteareas[freerouteareaname] = FreeRouteArea(
freeroutepoints, freerouteairports)
return freerouteareas
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1180 | """
GAR fileformat
See EUROCONTROL NEST Manual for Gar fileformat description
Dictionary with point list (latitude + longitude [deg]) for each airblock ID (key)
"""
module Gar
export read
using Format
using CSV
const fileformat = Dict(1=>String, 2=>String, 3=>String)
const header_format = ["AorP", "LAT_DEG", "LON_DEG"]
function read(file)
csviterator = CSV.File(file, delim=";", types=fileformat, header=header_format,
datarow=2)
return splitintodict(csviterator)
end
function splitintodict(iterator)
dict = Dict{String, Matrix{Float64}}()
points = Matrix{Float64}(undef, 0, 2)
j = 1
n_points = 1000
airblockname = ""
for (i, row) in enumerate(iterator)
if row.AorP == "P"
points = vcat(points,[parse(Float64, row.LAT_DEG) parse(Float64, row.LON_DEG)])
if j == n_points
dict[airblockname] = points
end
j += 1
elseif row.AorP == "A"
airblockname = row.LAT_DEG
n_points = parse(Int64, row.LON_DEG)
points = Matrix{Float64}(undef, 0, 2)
j = 1
else
end
end
return dict
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 2394 | """
GSL fileformat
Describes how elementary sectors are built from airblocks (GAR)
See EUROCONTROL NEST Manual for Gsl fileformat description
Output:
Sectors dictionary with the following elements:
* ID Sector ID String
* Name Sector name String
* Category Airspace category String Single character
* Type Sectory typeassert String FIR=Flight Information Region,
ERSA=Elementary Restricted Airspace,
ES=Elementary Sector
ERAS=Elementary Regulated Airspace
UNK=Unknown type
* Airblocks Vector{Airblock}
Airblocks struct with the following elements:
* Name Airblock name
* LowerFL Lower flight level Int64
* UpperFL Upper flight level Int64
GSL fileformat example
4
S;BALTIC;;1;_;ES
A;BALTIC;+;0;999
S;BGGLFIR;SONDRESTROM FIR;1;_;FIR
A;002BG;+;0;999
S;BIFAROER;FAEROER CTA;1;_;FIR
A;011BI;+;0;200
S;BIRD;_;9;_;FIR
A;001BI;+;0;999
A;002BI;+;0;999
A;003BI;+;0;999
A;004BI;+;0;999
A;011BI;+;200;999
A;100BI;+;0;999
A;104BI;+;0;999
A;107BI;+;0;999
A;108BI;+;0;999
"""
module Gsl
export read
struct Airblock
Name::String
LowerFL::Int64
UpperFL::Int64
end
struct Sector
ID::String
Name::String
Category::String
Type::String
Airblocks::Vector{Airblock}
end
function read(file)
sectors = Dict{String, Sector}()
id = ""
sname = ""
category = 0
type = ""
airblocks = Vector{Airblock}()
for line in eachline(file)
line_elements = split(line, ';')
if line_elements[1] == "S"
if id != ""
sectors[id] = Sector(id, sname, category, type, airblocks)
end
id = line_elements[2]
sname = line_elements[3]
category = line_elements[5]
type = line_elements[6]
airblocks = Vector{Airblock}()
elseif line_elements[1] == "A"
aname = line_elements[2]
lowerfl = parse(Int64, line_elements[4])
upperfl = parse(Int64, line_elements[5])
airblock = Airblock(aname, lowerfl, upperfl)
airblocks = vcat(airblocks, airblock)
end
end
sectors[id] = Sector(id, sname, category, type, airblocks)
return sectors
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 2518 | """
Mot fileformat
Militairy opening times for usage with route choices
See EUROCONTROL NEST Manual for Mot fileformat description
Output:
Mot fileformat example
1 MIL1 1000 1200
1 MIL1 1400 1700
1 MIL2 0900 1300
1 MIL3 1100 1300
1 MIL4 0900 1800
2 MIL1 1000 1200 055 245
2 MIL1 1400 1700 085 285
2 MIL2 0900 1300 000 600
2 MIL3 1100 1300 125 245
2 MIL4 0900 1800 000 285
3 ED 2200 0500 EX
3 LF 2300 0600 EX
3 LI 2200 0500 E
"""
module Mot
using ..util
using Dates
export read
const hhmm = DateFormat("HHMM")
struct OnlyTime
begintime::Union{Time, Missing}
endtime::Union{Time, Missing}
end
struct TimeLevel
begintime::Union{Time, Missing}
endtime::Union{Time, Missing}
lowerFL::Int64
upperFL::Int64
end
struct TimeType
begintime::Union{Time, Missing}
endtime::Union{Time, Missing}
type::String
end
struct OpeningTimes
onlytimes::Vector{OnlyTime}
timelevels::Vector{TimeLevel}
timetypes::Vector{TimeType}
end
function read(file)
militairyzones = Dict{String, OpeningTimes}()
onlytimes = Vector{OnlyTime}()
timelevels = Vector{TimeLevel}()
timetypes = Vector{TimeType}()
for line in eachline(file)
line_elements = split(line)
if length(line_elements) > 3
begintime = format_time.(line_elements[3], hhmm)
endtime = format_time.(line_elements[4], hhmm)
if haskey(militairyzones, line_elements[2])
openingtimes = militairyzones[line_elements[2]]
onlytimes = openingtimes.onlytimes
timelevels = openingtimes.timelevels
timetypes = openingtimes.timetypes
else
onlytimes = Vector{OnlyTime}()
timelevels = Vector{TimeLevel}()
timetypes = Vector{TimeType}()
end
if line_elements[1] == "1"
onlytimes = vcat(onlytimes, OnlyTime(begintime, endtime))
elseif line_elements[1] == "2"
lowerFL = parse(Int64, line_elements[5])
upperFL = parse(Int64, line_elements[6])
timelevels = vcat(timelevels, TimeLevel(begintime, endtime, lowerFL, upperFL))
elseif line_elements[1] == "3"
timetypes = vcat(timetypes, TimeType(begintime, endtime, line_elements[5]))
else
end
militairyzones[line_elements[2]] = OpeningTimes(onlytimes, timelevels, timetypes)
end
end
return militairyzones
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1867 | """
NARP fileformat
See EUROCONTROL NEST Manual for NARP fileformat description
Example Narp file format
8214
AGGA;AUKI/GWAUNARU'U;-8.6983333333;160.6783333333;0;0;0;0
AGGE;BALALAE;-6.9833333333;155.8850000000;0;0;0;0
AGGH;HONIARA/HENDERSON;-9.4316666667;160.0533333333;0;0;0;0
AGGJ;AVU AVU;-9.8683333333;160.4100000000;0;0;0;0
AGGK;KIRA KIRA;-10.4500000000;161.8966666667;0;0;0;0
AGGL;GRACIOSA BAY;-10.7233333333;165.7800000000;0;0;0;0
AGGM;MUNDA;-8.3250000000;157.2633333333;0;0;0;0
AGGN;GIZO/NUSATAPE;-8.1016666667;156.8350000000;0;0;0;0
AGGP;PARASI;-9.6416666667;161.4252777778;0;0;0;82
"""
module Narp
export read
using Format
using CSV
using Dates
using DataFrames
const fileformat = Dict(1=>String, 2=>String, 3=>Float64, 4=>Float64,
5=>String, 6=>String, 7=>String, 8=>String)
const header_format = ["AIRPORT_ID", "AIRPORT_NAME", "LAT_DEG", "LON_DEG",
"TIS", "TRS", "TAXITIME", "ALTITUDE_FL"]
function read(file)
df = CSV.read(file, delim=";", types=fileformat, header=header_format,
copycols=true, datarow=2)
reformat!(df)
return df
end
function reformat!(df)
df[:,:TEMP] = string2int.(df[:,:TIS])
select!(df, Not(:TIS))
df[:,:TIS] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = string2int.(df[:,:TRS])
select!(df, Not(:TRS))
df[:,:TRS] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = string2int.(df[:,:TAXITIME])
select!(df, Not(:TAXITIME))
df[:,:TAXITIME] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = string2int.(df[:,:ALTITUDE_FL])
select!(df, Not(:ALTITUDE_FL))
df[:,:ALTITUDE_FL] = df[:,:TEMP]
select!(df, Not(:TEMP))
end
function string2int(str::String)
maybeint = tryparse(Int, str)
if maybeint == nothing
return missing
elseif str == ""
return missing
else
return maybeint
end
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1345 | """
NNPT fileformat
See EUROCONTROL NEST Manual for NNPT fileformat description
Example NNPT file format
17189
*01BP;DB;47.6513888888889;19.1944444444444;_
*01DC;DB;47.57;21.7191666666667;_
*01RD;DB;52.1555555555556;4.78694444444444;_
*02BP;DB;47.3927777777778;19.5241666666667;_
*02DC;DB;47.3327777777778;21.5022222222222;_
*03BP;DB;47.1725;19.6636111111111;_
*03DC;DB;47.3191666666667;21.6580555555556;_
*03RD;DB;52.0022222222222;4.57416666666667;_
*04BP;DB;47.2441666666667;19.7613888888889;_
*04DC;DB;47.3402777777778;21.7294444444444;_
*04RD;DB;51.8711111111111;4.55694444444444;_
*05BP;DB;47.7327777777778;18.7886111111111;_
"""
module Nnpt
export read
using Format
using CSV
using Dates
using DataFrames
const fileformat = Dict(1=>String, 2=>String, 3=>Float64, 4=>Float64, 5=>String)
const header_format = ["NAV_ID", "NAV_TYPE", "LAT_DEG", "LON_DEG", "NAV_NAME"]
function read(file)
df = CSV.read(file, delim=";", types=fileformat, header=header_format,
copycols=true, datarow=2)
reformat!(df)
return df
end
function reformat!(df)
df[:,:TEMP] = stringreformat.(df[:,:NAV_NAME])
select!(df, Not(:NAV_NAME))
df[:,:NAV_NAME] = df[:,:TEMP]
select!(df, Not(:TEMP))
end
function stringreformat(str::String)
if str == "_"
return missing
else
return str
end
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 3191 | """
NTFV fileformat
See EUROCONTROL NEST Manual for Ntfv fileformat description
Example Ntfv fileformat
#;TRAFFIC_VOLUME;2;383;20140206;20140305;7799;EAR_P
T;ABSON;ABSON FLOWS;_;UKLVCTA;AS;G;5
F;BUK>AB;IN
F;BUK>AG;IN
F;DIB>AB;IN
F;INR>AB;IN
F;INR>AG;IN
T;AERODS1;EDNY ARRS VIA EDGGADS;_;EDGGADS;AS;G;1
F;>EDNY;IN
T;AERODS2;EDNY DEPS VIA EDGGADS;_;EDGGADS;AS;G;1
F;EDNY>;IN
;EX
"""
module Ntfv
export read
struct Airblock
name::String
upperFL::String
end
struct Trafficvolume
name::String
category::String
reflocname::String
refloctype::String
reflocrole::String
airblocks::Vector{Airblock}
end
function read(file)
trafficvolumes = Dict{String, Trafficvolume}()
airblocks = Vector{Airblock}()
tv_id = ""
tv_name = ""
category = ""
reflocname = ""
refloctype = ""
reflocrole = ""
for line in eachline(file)
line_elements = split(line, ';')
if line_elements[1] == "T"
if tv_id != ""
trafficvolumes[tv_id] = Trafficvolume(tv_name, category,
reflocname, refloctype, reflocrole, airblocks)
airblocks = Vector{Airblock}()
end
tv_id = line_elements[2]
tv_name = line_elements[3]
category = line_elements[4]
reflocname = line_elements[5]
refloctype = line_elements[6]
reflocrole = line_elements[7]
elseif line_elements[1] == "F"
airblocks = vcat(airblocks, Airblock(line_elements[2],
line_elements[3]))
end
end
trafficvolumes[tv_id] = Trafficvolume(tv_name, category,
reflocname, refloctype, reflocrole, airblocks)
return trafficvolumes
end
#
# function splitintodict(iterator)
# dict = Dict{String, Trafficvolume}()
# airblocks = Vector{Airblock}(undef, 0)
# j = 1
# n_flowelements = j+2
# trafficvolumeid = ""
# trafficvolumename = ""
# trafficvolumecategory = ""
# reflocname = ""
# refloctype = ""
# reflocrole = ""
# for (i, row) in enumerate(iterator)
# if row.TorF == "F"
# airblocks = vcat(airblocks, Airblock(row.ID, row.NAME))
# if n_flowelements == j
# dict[trafficvolumeid] = Trafficvolume(trafficvolumeid,
# trafficvolumename, trafficvolumecategory, reflocname,
# refloctype, reflocrole, n_flowelements, airblocks)
# end
# j += 1
# elseif row.TorF == "T"
# trafficvolumeid = row.ID === missing ? "" : row.ID
# trafficvolumename = row.NAME === missing ? "" : row.NAME
# trafficvolumecategory = row.CATEGORY === missing ? "" : row.CATEGORY
# reflocname = row.REFLOCNAME === missing ? "" : row.REFLOCNAME
# refloctype = row.REFLOCTYPE === missing ? "" : row.REFLOCTYPE
# reflocrole = row.REFLOCROLE === missing ? "" : row.REFLOCROLE
# n_flowelements = parse(Int64, row.N_FLOWELEMENTS)
# airblocks = Vector{Airblock}(undef, 0)
# j = 1
# else
# end
# end
# return dict
# end
end # module"
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1463 | """
Operator fileformat
Create the link between the ICAO operator code, the operator name, and
the country of origin
Example:
#;OPERATOR;1;446;20181206;20190102;6478;EAR_P
AAA;ANSETT AUSTRALIA HOLDINGS LTD;ANSETT;AU;AUSTRALIA;19000101;21001231
AAB;ABELAG AVIATION;ABG;BE;BELGIUM;19000101;21001231
AAC;ARMY AIR CORPS;ARMYAIR;GB;UNITED KINGDOM;19000101;21001231
AAD;AERO AVIATION CENTRE LTD.;SUNRISE;CA;CANADA;19000101;21001231
AAE;EXPRESS AIR, INC. (PHOENIX, AZ);ARIZONA;US;UNITED STATES;19000101;21001231
AAF;AIGLE AZUR;AIGLE AZUR;FR;FRANCE;19000101;21001231
AAG;AIR ATLANTIQUE;ATLANTIC;GB;UNITED KINGDOM;20150101;21001231
"""
module Operator
export read
using Dates
struct OperatorICAO
name::String
callsign::String
countryid::String
country::String
startdate::Date
enddate::Date
function OperatorICAO(name, callsign, countryid, country, startdate, enddate)
sd = Date(startdate, "YYYYmmdd")
ed = Date(enddate, "YYYYmmdd")
new(name, callsign, countryid, country, sd, ed)
end
end
function read(file)
operators = Dict{String, OperatorICAO}()
for line in eachline(file)
line_elements = split(line, ';')
if length(line_elements) == 7
operators[line_elements[1]] = OperatorICAO(line_elements[2],
line_elements[3], line_elements[4], line_elements[5],
line_elements[6], line_elements[7])
end
end
return operators
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1450 | """
Routes fileformat
Example:
#;ROUTES;3;446;20181206;20190102;8;EAR_P
L;ABIRO1AGMTT;AP;999999999999;000000000000;ABIRO;SP;1
L;ABIRO1AGMTT;AP;999999999999;000000000000;*2TNG;DBP;2
L;ABIRO1DGMTT;DP;999999999999;000000000000;*TAN3;DBP;1
L;ABIRO1DGMTT;DP;999999999999;000000000000;ABIRO;SP;2
L;ABIRO2AGMTT;AP;999999999999;000000000000;ABIRO;SP;1
L;ABIRO2AGMTT;AP;999999999999;000000000000;*2TNG;DBP;2
L;ABIRO2DGMTT;DP;999999999999;000000000000;*1TNG;DBP;1
L;ABIRO2DGMTT;DP;999999999999;000000000000;ABIRO;SP;2
"""
module Routes
export read
struct RoutePoint
wp::String
location_type::String
end
struct Route
type::String
route::Vector{RoutePoint}
end
function read(file)
routes = Dict{String, Route}()
name = ""
type = ""
route = Vector{RoutePoint}()
for route_line in eachline(file)
split_route = split(route_line, ';')
if split_route[1] == "L"
if split_route[2] != name
# Create route
if split_route[2] != ""
routes[name] = Route(type, route)
end
name = split_route[2]
type = split_route[3]
route = Vector{RoutePoint}()
end
route = vcat(route, RoutePoint(split_route[6], split_route[7]))
else
# Initial line
end
end
# Create route
routes[name] = Route(type, route)
return routes
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1644 | """
Runway fileformat
Example:
#;RUNWAY;2;446;20181206;20190102;241483;EAR_P
06/12/2018;BIAR;01;0000;2359;Y;5;1;1;T
06/12/2018;BIAR;19;0000;2359;N;5;1;1;T
06/12/2018;BIBA;03;0000;2359;N;10;10;5;T
06/12/2018;BIBA;12;0000;2359;Y;10;10;5;T
06/12/2018;BIBA;21;0000;2359;N;10;10;5;T
06/12/2018;BIBA;30;0000;2359;N;10;10;5;T
06/12/2018;BIBD;04;0000;2359;Y;10;10;5;T
06/12/2018;BIBD;22;0000;2359;N;10;10;5;T
06/12/2018;BIBF;18;0000;2359;N;10;10;5;T
06/12/2018;BIBF;36;0000;2359;Y;10;10;5;T
06/12/2018;BIBL;03;0000;2359;Y;10;10;5;T
06/12/2018;BIBL;21;0000;2359;N;10;10;5;T
"""
module Runway
export read
using ..util
using CSV
using Dates
using DataFrames
const ddmmYYYY = DateFormat("dd/mm/YYYY")
const hhmm = DateFormat("HHMM")
const fileformat = Dict(1=>String, 2=>String, 3=>String, 4=>String,
5=>String, 6=>String, 7=>Int64, 8=>Int64, 9=>Int64, 10=>String)
const header_format = ["DATEACTIVE", "AIRPORT", "RWY", "TIMEOPEN",
"TIMECLOSED", "YN", "NUM1", "NUM2", "NUM3", "T"]
function read(file)
df = CSV.read(file, delim=';', types=fileformat, header=header_format,
copycols=true, datarow=2)
reformat!(df)
return df
end
function reformat!(df)
df[:,:TEMP] = format_date.(df[:,:DATEACTIVE], ddmmYYYY)
select!(df, Not(:DATEACTIVE))
df[:,:DATEACTIVE] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_time.(df[:,:TIMEOPEN], hhmm)
select!(df, Not(:TIMEOPEN))
df[:,:TIMEOPEN] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:TEMP] = format_time.(df[:,:TIMECLOSED], hhmm)
select!(df, Not(:TIMECLOSED))
df[:,:TIMECLOSED] = df[:,:TEMP]
select!(df, Not(:TEMP))
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 4712 | """
Import SO6 fileformat as a DataFrame
See EUROCONTROL NEST Manual for original SO6 fileformat description
SO6 DataFrame Column Names
# Field Type Size Comment
:SEGMENT_ID segment identifier String first point name "_" last point name (see note)
:ADEP origin of flight String 4 ICAO code
:ADES destination of flight String 4 ICAO code
:ACTYPE aircraft type String 4
:DATETIMEBEGINSEGMENT DateTime
:DATETIMEENDSEGMENT DateTime
:FLBEGINSEGMENT FL begin segment Int64 1-3
:FLENDSEGMENT FL begin segment Int64 1-3
:STATUS flight status Int64 1 0=climb, 1=descent, 2=cruise
:CALLSIGN String
:LATBEGINSEGMENT_DEG Latitude of begin of segment in degrees Float64
:LONBEGINSEGMENT_DEG Longitude of begin of segment in degrees Float64
:LATENDSEGMENT_DEG Latitude of begin of segment in degrees Float64
:LONENDSEGMENT_DEG Longitude of begin of segment in degrees Float64
:FLIGHT_ID Flight identifier Int64 Unique ID
:SEQUENCE Sequence number Int64 Start at 1 for new flight
:SEGMENT_LENGTH_M Segment length in m Float64 In meters
:SEGMENT_PARITY Segment parity Int64 * see below
* Segment parity
0=NO
1=ODD
2=EVEN
3=ODD_LOW
4=EVEN_LOW
5=ODD_HIGH
6=EVEN_HIGH
7 =General Purpose
8=General Purpose
9=General Purpose
Example SO6 file format
EHAM_*AM80 EHAM UUEE A321 214900 214926 0 10 0 AFL2193 171214 171214 3138.483333 285.850000 3137.483333 284.183333 213765625 1 1.427867 0
*AM80_!AAEW EHAM UUEE A321 214926 215004 10 25 0 AFL2193 171214 171214 3137.483333 284.183333 3135.833333 284.033333 213765625 2 1.652550 0
!AAEW_!AAEX EHAM UUEE A321 215004 215026 25 35 0 AFL2193 171214 171214 3135.833333 284.033333 3134.166667 283.883333 213765625 3 1.669195 0
!AAEX_*AM46 EHAM UUEE A321 215026 215047 35 45 0 AFL2193 171214 171214 3134.166667 283.883333 3133.066667 283.783333 213765625 4 1.101704 0
"""
module SO6
export read
# include("utility.jl")
using ..util
using CSV
using Dates
using DataFrames
const fileformat = Dict(1=>String, 2=>String, 3=>String, 4=>String, 5=>String,
6=>String, 7=>Int64, 8=>Int64, 9=>String, 10=>String, 11=>String, 12=>String,
13=>Float64, 14=>Float64, 15=>Float64, 16=>Float64, 17=>Int64, 18=>Int64,
19=>Float64, 20=>Int64)
const header_format = ["SEGMENT_ID", "ADEP", "ADES", "ACTYPE",
"TIMEBEGINSEGMENT", "TIMEENDSEGMENT", "FLBEGINSEGMENT", "FLENDSEGMENT",
"STATUS", "CALLSIGN", "DATEBEGINSEGMENT", "DATEENDSEGMENT", "LATBEGINSEGMENT_DEG",
"LONBEGINSEGMENT_DEG", "LATENDSEGMENT_DEG", "LONENDSEGMENT_DEG", "FLIGHT_ID",
"SEQUENCE", "SEGMENT_LENGTH_M", "SEGMENT_PARITY"]
const yymmdd = DateFormat("YYmmdd")
const hhmmss = DateFormat("HHMMSS")
const yymmddhhmmss = DateFormat("YYmmddHHMMSS")
const year2000 = Year(2000)
function read(file)
df = CSV.read(file, delim=" ", types=fileformat, header=header_format,
copycols=true)
reformat!(df);
return df
end
function format_status(str::Union{String, Missing})
if str === missing
return missing
end
maybeint = tryparse(Int64, str)
if maybeint == nothing
return missing
else
return parse(Int64, str)
end
end
function reformat!(df)
#Date/Time conversion
df[:,:DATETIMEBEGINSEGMENT] = format_datetime.(df[:,:DATEBEGINSEGMENT] .*
df[:,:TIMEBEGINSEGMENT], yymmddhhmmss, addyear=year2000)
df[:,:DATETIMEENDSEGMENT] = format_datetime.(df[:,:DATEENDSEGMENT] .*
df[:,:TIMEENDSEGMENT], yymmddhhmmss, addyear=year2000)
select!(df, Not(:TIMEBEGINSEGMENT))
select!(df, Not(:TIMEENDSEGMENT))
select!(df, Not(:DATEBEGINSEGMENT))
select!(df, Not(:DATEENDSEGMENT))
df[:,:TEMP] = format_status.(df[:,:STATUS])
select!(df, Not(:STATUS))
df[:,:STATUS] = df[:,:TEMP]
select!(df, Not(:TEMP))
df[:,:LATBEGINSEGMENT_DEG] = df[:,:LATBEGINSEGMENT_DEG] / 60.0
df[:,:LONBEGINSEGMENT_DEG] = df[:,:LONBEGINSEGMENT_DEG] / 60.0
df[:,:LATENDSEGMENT_DEG] = df[:,:LATENDSEGMENT_DEG] / 60.0
df[:,:LONENDSEGMENT_DEG] = df[:,:LONENDSEGMENT_DEG] / 60.0
df[:,:SEGMENT_LENGTH_M] = df[:,:SEGMENT_LENGTH_M] * 1852.0
select!(df,[:SEGMENT_ID, :ADEP, :ADES, :ACTYPE, :DATETIMEBEGINSEGMENT,
:DATETIMEENDSEGMENT, :FLBEGINSEGMENT, :FLENDSEGMENT, :STATUS, :CALLSIGN,
:LATBEGINSEGMENT_DEG, :LONBEGINSEGMENT_DEG, :LATENDSEGMENT_DEG,
:LONENDSEGMENT_DEG, :FLIGHT_ID, :SEQUENCE, :SEGMENT_LENGTH_M,
:SEGMENT_PARITY])
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 562 | """
Sid fileformat
Output dictionary with for each Airport a list of SID names.
See EUROCONTROL NEST Manual for Sid fileformat description
Sid fileformat example
BIKF THORI ALDAN
BIRK THORI ALDAN
DTMB MON.D
DTNH NBA.D
"""
module Sid
export read
function read(file)
airportsids = Dict{String, Vector{AbstractString}}()
for line in eachline(file)
line_elements = split(line, ' ')
airport_name = line_elements[1]
airportsids[airport_name] = [sid for sid in line_elements[2:end]]
end
return airportsids
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1138 | """
Sls fileformat
Sector list file
The link between this and the region file ARE is made by volume name
See EUROCONTROL NEST Manual for Sls fileformat description
Sls DataFrame Column Names
# Field Type Size Comment
1 :SECTORNAME String Max 19
2 :VOLUMENAME String Refer to name in volumne IN the ARE file (max 24 characters)
3 :VOLUMEBOTTOMLEVEL Int64 In FL, overwrite level present in ARE file for that volume
4 :VOLUMETOPLEVEL Int64 In FL, overwrite level present in ARE file for that volume
Example SLS fileformat
CASTOR_UP + AX 000 295
CASTOR_UP + GU 245 265
TMA + SLP 000 095
TMA + SLX 000 125
ABRON + TOP 45 660
"""
module Sls
export read
using Format
using CSV
using DataFrames
const fileformat = Dict(1=>String, 2=>String, 3=>String, 4=>Int64,
5=>Int64)
const header_format = ["SECTORNAME", "VOLUMESIGN", "VOLUMENAME",
"VOLUMEBOTTOMLEVEL", "VOLUMETOPLEVEL"]
function read(file)
df = CSV.read(file, delim=" ", types=fileformat, header=header_format,
copycols=true)
select!(df, Not(:VOLUMESIGN))
return df
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1764 | """
Spc fileformat
Described how elementary sectors can be collapsed. Can also contain descriptions
of other bigger airspaces (like ACC...)
See EUROCONTROL NEST Manual for Spc fileformat description
Output:
* name Airspace name/description String
* type Airspace tuype String CS, CRSA, AUA, CLUS, NAS, AREA, AUAG, CRAS, REG, UNK
* sectors
Sub-airspace struct with the following elements:
* name Sub-airspace name
* type Sub-airspace type
Spc fileformat example
#;AIRSPACE;2;383;20140206;20140305;4082;EAR_P
A;BENELUX;EB/EH;AREA;2
S;EB;NAS
S;EH;NAS
A;BG;GREENLAND (DENMARK);NAS;1
S;BGGLFIR;FIR
A;BI;ICELAND;NAS;3
S;BIFAROER;FIR
S;BIRD;FIR
S;ENJA;FIR
A;BICC;ICELAND AUAG;AUAG;6
S;BIRDCTA;AUA
S;BIRDICTA;AUA
S;BIRDTMA;AUA
S;BIRDTOCA;AUA
S;EKVGCTR;AUA
S;ENJACTR;AUA
"""
module Spc
export read
struct Sector
name::String
type::String
end
struct Airspace
name::String
type::String
sectors::Vector{Sector}
end
function read(file)
airspaces = Dict{String, Airspace}()
id = ""
aname = ""
type = ""
sectors = Vector{Sector}()
for line in eachline(file)
line_elements = split(line, ';')
if line_elements[1] == "A"
if id != ""
airspaces[id] = Airspace(aname, type, sectors)
end
id = line_elements[2]
aname = line_elements[3]
type = line_elements[4]
sectors = Vector{Sector}()
elseif line_elements[1] == "S"
sname = line_elements[2]
stype = line_elements[3]
sector = Sector(sname, stype)
sectors = vcat(sectors, sector)
end
end
airspaces[id] = Airspace(aname, type, sectors)
return airspaces
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 604 | """
Star fileformat
Output dictionary with for each Airport a list of STAR names.
See EUROCONTROL NEST Manual for Star fileformat description
Star fileformat example
BIKF THORI METIL ASTAM KEF CELLO
BIRK THORI METIL ASTAM KEF CELLO
DTMB MON.A
DTNH NBA.A
"""
module Star
export read
function read(file)
airportstars = Dict{String, Vector{AbstractString}}()
for line in eachline(file)
line_elements = split(line, ' ')
airport_name = line_elements[1]
airportstars[airport_name] = [star for star in line_elements[2:end]]
end
return airportstars
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 3302 | """
T5 fileformat
See EUROCONTROL NEST Manual for T5 fileformat description
T5 DataFrame Column Names
Field Type Size Comment
:FLIGHT_ID Int64 Flight ID
:SECTOR_NAME String
:ENTRY_DATETIME DateTime
:EXIT_DATETIME DateTime
:ENTRY_FL Float64 Flight level in decimal
:EXIT_FL Float64 Flight level in decimal
:ENTRY_SEGMENT_NAME String Separator between point name is '_'
'##' as a prefix means that the flight was
already present in the sector before it was
opened
:EXIT_SEGMENT_NAME String Separator between point name is '_', might
be different from ENTRY_SEGMENT_NAME
'##' as a prefix means that the flight was
still present in the sector after it was
closed.
:TOT_DISTANCE_IN_SEGMENT_M Float64 For that FLIGHT_ID in that SECTOR_NAME,
expressed in meters.
:TOT_TIME_IN_SEGMENT_S Float64 For that FLIGHT_ID in that SECTOR_NAME,
expressed in seconds.
Example T5 file format
213765625 EHAMCR1 1513288140.000000 1513288226.000000 0.000000 35.000000 0.000000 1.669195 EHAM_*AM80 !AAEW_!AAEX 4.749612 86.000000
213765625 EHAAFIR 1513288140.000000 1513288980.000000 0.000000 255.000000 0.000000 14.478077 EHAM_*AM80 !AAEb_SONEB 84.060844 840.000000
213765625 EHALL 1513288140.000000 1513288980.000000 0.000000 255.000000 0.000000 14.478077 EHAM_*AM80 !AAEb_SONEB 84.060844 840.000000
213765625 EH_DN 1513288140.000000 1513288982.000000 0.000000 255.361450 0.000000 0.247570 EHAM_*AM80 SONEB_OLDOD 84.308411 842.000000
"""
module T5
export read
using Format
using CSV
using Dates
using DataFrames
const fileformat = Dict(1=>Int64, 2=>String, 3=>Float64, 4=>Float64, 5=>Float64,
6=>Float64, 7=>Float64, 8=>Float64, 9=>String, 10=>String, 11=>Float64,
12=>Float64)
const header_format = ["FLIGHT_ID", "SECTOR_NAME", "ENTRY_DATETIME",
"EXIT_DATETIME", "ENTRY_FL", "EXIT_FL", "ENTRY_DISTANCE_M", "EXIT_DISTANCE_M",
"ENTRY_SEGMENT_NAME", "EXIT_SEGMENT_NAME", "TOT_DISTANCE_IN_SEGMENT_M",
"TOT_TIME_IN_SEGMENT_S"]
function read(file)
df = CSV.read(file, delim=" ", types=fileformat, header=header_format,
copycols=true)
reformat!(df);
return df
end
function reformat!(df)
#Date conversion
df[:,:TEMP] = df[:,:ENTRY_DATETIME]
select!(df, Not(:ENTRY_DATETIME))
df[:,:ENTRY_DATETIME] = unix2datetime.(df[:,:TEMP])
select!(df, Not(:TEMP))
df[:,:TEMP] = df[:,:EXIT_DATETIME]
select!(df, Not(:EXIT_DATETIME))
df[:,:EXIT_DATETIME] = unix2datetime.(df[:,:TEMP])
select!(df, Not(:TEMP))
df[:,:ENTRY_DISTANCE_M] = df[:,:ENTRY_DISTANCE_M] * 1852.0
df[:,:EXIT_DISTANCE_M] = df[:,:EXIT_DISTANCE_M] * 1852.0
df[:,:TOT_DISTANCE_IN_SEGMENT_M] = df[:,:TOT_DISTANCE_IN_SEGMENT_M] * 1852.0
select!(df,[:FLIGHT_ID, :SECTOR_NAME, :ENTRY_DATETIME, :EXIT_DATETIME,
:ENTRY_FL, :EXIT_FL, :ENTRY_SEGMENT_NAME, :EXIT_SEGMENT_NAME,
:TOT_DISTANCE_IN_SEGMENT_M, :TOT_TIME_IN_SEGMENT_S])
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1853 | """
Ur fileformat
See EUROCONTROL NEST Manual for Ur fileformat description
Example Ur fileformat
AZ 2020/02/01 2020/02/29 791 Portugal S M
EB 2020/02/01 2020/02/29 9114 Belg.-Luxembourg
ED 2020/02/01 2020/02/29 6374 Germany
EE 2020/02/01 2020/02/29 3151 Estonia
EF 2020/02/01 2020/02/29 4366 Finland
EG 2020/02/01 2020/02/29 6512 0.848504 GBP United Kingdom
EH 2020/02/01 2020/02/29 6739 Netherlands
EI 2020/02/01 2020/02/29 2461 Ireland
EK 2020/02/01 2020/02/29 5759 7.47133 DKK Denmark
Example Ur fileformat
AZ 2017/12/01 2017/12/31 1089
EB 2017/12/01 2017/12/31 6550
ED 2017/12/01 2017/12/31 8268
EF 2017/12/01 2017/12/31 5632
EG 2017/12/01 2017/12/31 9381
EH 2017/12/01 2017/12/31 6709
EI 2017/12/01 2017/12/31 2976
"""
module Ur
export read
# include("utility.jl")
using ..util
using Dates
const yyyymmdd = DateFormat("yyyy/mm/dd")
struct UnitRate
start_date::Date
end_date::Date
unitrate::Float64
valutaconversion::Float64
valuta::String
country::String
function UnitRate(line::String)
line_elements = split(line, '\t')
start_date = format_date(line_elements[2], yyyymmdd)
end_date = format_date(line_elements[3], yyyymmdd)
unitrate = parse(Float64, line_elements[4]) / 100.0
if length(line_elements[5]) > 0
valutaconversion = parse(Float64, line_elements[5])
valuta = strip(line_elements[6])
country = strip(line_elements[7])
else
valutaconversion = 1.0
valuta = "EUR"
country = strip(line_elements[7])
end
new(start_date, end_date, unitrate, valutaconversion, valuta, country)
end
end
function read(file)
ur = Dict{String, UnitRate}()
for line in eachline(file)
ur[line[1:2]] = UnitRate(line)
end
return ur
end
end # module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 4088 | module util
export Point, Point3D, extract_lat, extract_lon, latlon, format_date,
format_datetime, format_time, Point_deg
import Base:(convert)
using Dates
using Navigation: Point_deg, Point_rad
"Point type with latitude `lat` [deg] and longitude `lon` [deg]"
struct Point{T<:Float64}
lat::T
lon::T
end
"Point type with latitude `lat` [deg], longitude `lon` [deg] and altitude [m]"
struct Point3D{T<:Float64}
lat::T
lon::T
alt::T
end
Base.:convert(type::Type{Point_deg}, x::Point) = Point_deg(x.lat, x.lon)
Base.:convert(type::Type{Point_rad}, x::Point) = Point_rad(deg2rad(x.lat), deg2rad(x.lon))
Base.:convert(type::Type{Point}, x::Point_deg) = Point(x.ϕ, x.λ)
Base.:convert(type::Type{Point}, x::Point_rad) = Point(rad2deg(x.ϕ), rad2deg(x.λ))
function extract_lat(str::AbstractString)
if length(str) > 2
if str[1] in ['N', 'S']
sign_lat = str[1] == 'N' ? 1 : -1
relative = 1
elseif str[end] in ['N', 'S']
sign_lat = str[end] == 'N' ? 1 : -1
relative = 0
else
return NaN
end
else
return NaN
end
lat_h = parse(Float64, str[relative+1:relative+2])
lat_m = 0.0
lat_s = 0.0
if length(str) > 3
lat_m = parse(Float64, str[relative+3:relative+4])
end
if length(str) > 5
lat_s = parse(Float64, str[relative+5:end+relative-1])
end
return sign_lat*(lat_h + lat_m/60.0 + lat_s/3600.0)
end
function extract_lon(str::AbstractString)
if length(str) > 3
if str[1] in ['E', 'W']
sign_lon = str[1] == 'E' ? 1 : -1
relative = 1
elseif str[end] in ['E', 'W']
sign_lon = str[end] == 'E' ? 1 : -1
relative = 0
else
return NaN
end
else
return NaN
end
lon_h = parse(Float64, str[relative+1:relative+3])
lon_m = 0.0
lon_s = 0.0
if length(str) > 4
lon_m = parse(Float64, str[relative+4:relative+5])
end
if length(str) > 6
lon_s = parse(Float64, str[relative+6:end+relative-1])
end
return sign_lon*(lon_h + lon_m/60.0 + lon_s/3600.0)
end
function latlon(str_lat::AbstractString, str_lon::AbstractString)
lat = extract_lat(str_lat)
lon = extract_lon(str_lon)
if !isnan(lat*lon)
# return Point(lat, lon)
return Point_deg(lat, lon)
else
return NaN
end
end
function latlon(str::AbstractString)
nors = "S"
if occursin("S", str)
nors = "S"
else
nors = "N"
end
split_str = split(str, nors)
str_lat = split_str[1]*nors
str_lon = split_str[2]
return latlon(str_lat, str_lon)
end
function format_date(str::Union{AbstractString, Missing}, format; addyear = Year(0))
if str === missing
return missing
elseif str == "!!!!"
return missing
end
maybedate = tryparse(Date, str, format)
if maybedate == nothing
return missing
elseif maybedate > Date(1, 1, 1)
return Date(str, format) + addyear
else
return missing
end
end
function format_datetime(str::Union{AbstractString, Missing}, format; addyear = Year(0))
if str === missing
return missing
elseif occursin("!!!!", str)
return missing
end
maybedatetime = tryparse(DateTime, str, format)
if maybedatetime == nothing
return missing
elseif maybedatetime > DateTime(1, 1, 1, 0, 0, 0)
return DateTime(str, format) + addyear
else
return missing
end
end
function format_time(str::Union{AbstractString, Missing}, format)
if str === missing
return missing
end
# handling special case of 2400 hours used in ATM to indicate 0000 24 hours later
if str == "2400"
str = "235959"
format = DateFormat("HHMMSS")
end
maybetime = tryparse(Time, str, format)
if maybetime == nothing
return missing
else
if str == ""
return missing
else
return Time(str, format)
end
end
end
end #Module
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 903 | """
NACT fileformat
See EUROCONTROL NEST Manual for Nact fileformat description
"""
module Nact
export read
using Format
using CSV
using Dates
# using StaticArrays
const fileformat = Dict(1=>String, 2=>String, 3=>String)
const header_format = ["NAME", "TYPE", "ACTIVATION"]
struct Activation
AirspaceOrTv::Bool # Airspace = true / Tv = false
Activated::Bool # Activated = true / Non activated = false
end
function read(file)
csviterator = CSV.File(file, delim=";", types=fileformat,
header=header_format, silencewarnings=false, ignoreemptylines=true)
return splitintovector(csviterator)
end
function splitintovector(iterator)
dict = Dict{String, Activation}()
for row in iterator
airspaceortv = row.TYPE == "AS"
activation = row.ACTIVATION == "A"
dict[row.NAME] = Activation(airspaceortv, activation)
end
return dict
end
end # module"
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 753 | using DDR2import
using Test
@testset "DDR2import.jl" begin
tests = ["Implemented\\SO6", "Implemented\\T5", "Implemented\\Exp2",
"Implemented\\Allftplus", "Implemented\\Ase", "Implemented\\Are",
"Implemented\\Gar", "Implemented\\Gsl", "Implemented\\Frp",
"Implemented\\Sls", "Implemented\\Sid", "Implemented\\Star",
"Implemented\\Spc", "Implemented\\Mot", "Implemented\\Narp",
"Implemented\\Nnpt", "Implemented\\Cost", "Implemented\\Crco",
"Implemented\\Ur", "Implemented\\Arp", "Implemented\\Ntfv",
"util", "Implemented\\Country", "Implemented\\Operator",
"Implemented\\Routes", "Implemented\\Runway", "Implemented\\For"]
# tests = ["Implemented\\For"]
for t in tests
include("$(t).jl")
end
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 3290 | using Dates
@testset "util.jl" begin
p1 = DDR2import.util.Point(1.0, 2.0)
@test p1.lat == 1.0
@test p1.lon == 2.0
@test DDR2import.util.extract_lat("S123456") ≈ -12.58222222 atol = 0.0001
@test DDR2import.util.extract_lat("N123456.0") ≈ 12.58222222 atol = 0.0001
@test DDR2import.util.extract_lat("123456N") ≈ 12.58222222 atol = 0.0001
@test DDR2import.util.extract_lat("S1234") ≈ -12.56666666 atol = 0.0001
@test DDR2import.util.extract_lat("N12") ≈ 12.0 atol = 0.0001
@test DDR2import.util.extract_lat("1234S") ≈ -12.56666666 atol = 0.0001
@test DDR2import.util.extract_lat("12N") ≈ 12.0 atol = 0.0001
@test DDR2import.util.extract_lat("") === NaN
@test DDR2import.util.extract_lon("W1123456") ≈ -112.58222222 atol = 0.0001
@test DDR2import.util.extract_lon("E1123456.0") ≈ 112.58222222 atol = 0.0001
@test DDR2import.util.extract_lon("0123456E") ≈ 12.58222222 atol = 0.0001
@test DDR2import.util.extract_lon("W01234") ≈ -12.56666666 atol = 0.0001
@test DDR2import.util.extract_lon("E112") ≈ 112.0 atol = 0.0001
@test DDR2import.util.extract_lon("11234W") ≈ -112.56666666 atol = 0.0001
@test DDR2import.util.extract_lon("012E") ≈ 12.0 atol = 0.0001
@test DDR2import.util.extract_lon("E1") === NaN
# @test DDR2import.util.latlon("S123456", "012E").lat ≈ -12.58222222 atol = 0.0001
# @test DDR2import.util.latlon("S123456", "012E").lon ≈ 12.0 atol = 0.0001
# @test DDR2import.util.latlon("S123456", "E1") === NaN
#
# @test DDR2import.util.latlon("S123456E1123456.0").lat ≈ -12.58222222 atol = 0.0001
# @test DDR2import.util.latlon("S123456E1123456.0").lon ≈ 112.58222222 atol = 0.0001
# @test DDR2import.util.latlon("S123456W1123456.0").lon ≈ -112.58222222 atol = 0.0001
@test DDR2import.util.latlon("S123456", "012E").ϕ ≈ -12.58222222 atol = 0.0001
@test DDR2import.util.latlon("S123456", "012E").λ ≈ 12.0 atol = 0.0001
@test DDR2import.util.latlon("S123456", "E1") === NaN
@test DDR2import.util.latlon("123456S1123456.0E").ϕ ≈ -12.58222222 atol = 0.0001
@test DDR2import.util.latlon("123456S1123456.0E").λ ≈ 112.58222222 atol = 0.0001
@test DDR2import.util.latlon("123456S1123456.0W").λ ≈ -112.58222222 atol = 0.0001
yymmdd = DateFormat("YYmmdd")
testdate = DDR2import.util.format_date("200110", yymmdd, addyear=Year(2000))
@test Dates.year(testdate) == 2020
@test Dates.month(testdate) == 1
@test Dates.day(testdate) == 10
hhmmss = DateFormat("HHMMSS")
testtime = DDR2import.util.format_time("102030", hhmmss)
@test Dates.hour(testtime) == 10
@test Dates.minute(testtime) == 20
@test Dates.second(testtime) == 30
hhmm = DateFormat("HHMM")
testtime2 = DDR2import.util.format_time("2400", hhmm)
@test Dates.hour(testtime2) == 23
@test Dates.minute(testtime2) == 59
@test Dates.second(testtime2) == 59
yyyymmddhhmmss = DateFormat("YYYYmmddHHMMSS")
testdatetime = DDR2import.util.format_datetime("20200110102030", yyyymmddhhmmss)
@test Dates.year(testdatetime) == 2020
@test Dates.month(testdatetime) == 1
@test Dates.day(testdatetime) == 10
@test Dates.hour(testdatetime) == 10
@test Dates.minute(testdatetime) == 20
@test Dates.second(testdatetime) == 30
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 11140 | using Dates
@testset "Allftplus.jl" begin
filename = "data\\test1.ALL_FT+"
filename2 = "data\\test2.ALL_FT+"
df = DDR2import.Allftplus.read(filename)
df2 = DDR2import.Allftplus.read(filename2)
#EXP
@test df[2,:departureAerodromeIcaoId_0] == "LFST"
@test df[2,:arrivalAerodromeIcaoId_1] == "EHAM"
@test df[2,:aircraftId_2] == "AFR27FK"
@test df[2,:aircraftOperatorIcaoId_3] == "AFR"
@test df[2,:aircraftTypeIcaoId_4] == "E145"
@test Dates.year(df[2,:aobt_5]) == 2017
@test Dates.minute(df[2,:aobt_5]) == 46
@test df[2,:ifpsId_6] == "AA70843154"
@test df[2,:aircraftTypeIcaoId_4] == "E145"
@test Dates.hour(df2[2,:iobt_7]) == 14
@test df2[2,:originalFlightDataQuality_8] == "FPL"
@test df2[2,:flightDataQuality_9] == "FPL"
@test df[2,:source_10] == "UNK"
@test df[2,:exemptionReasonType_11] == "NEXE"
@test df[2,:exemptionReasonDistance_12] == "NEXE"
@test df2[2,:lateFiler_13] == "N"
@test df2[2,:lateUpdater_14] == "N"
@test df2[2,:northAtlanticFlight_15] == "N"
@test Dates.month(df[2,:cobt_16]) == 12
@test Dates.hour(df[2,:cobt_16]) == 5
@test Dates.day(df[2,:eobt_17]) == 15
@test Dates.minute(df[2,:eobt_17]) == 45
@test Dates.month(df2[2,:lobt_18]) == 1
@test df[2,:flightState_19] == "NE"
@test df[2,:previousToActivationFlightState_20] == "NE"
@test df[2,:suspensionStatus_21] == "NS"
@test df[2,:tactId_22] == 512256
@test Dates.year(df[2,:samCtot_23]) == 2017
@test df2[2,:samSent_24] == "N"
@test Dates.day(df[2,:sipCtot_25]) == 15
@test df2[2,:sipSent_26] == "N"
@test df2[2,:slotForced_27] == "N"
@test df[2,:mostPenalizingRegulationId_28] == "EHAMA15M"
@test df[2,:regulationsAffectedByNrOfInstances_29] == 0
@test df[2,:excludedFromNrOfInstances_30] == 0
@test df2[6,:lastReceivedAtfmMessageTitle_31] == "REA"
@test df2[2,:lastReceivedMessageTitle_32] == "FPL"
@test df2[6,:lastSentAtfmMessageTitle_33] == "SRM"
@test df[2,:manualExemptionReason_34] == "N"
@test df2[2,:sensitiveFlight_35] == "N"
@test df2[2,:readyForImprovement_36] == "Y"
@test df2[2,:readyToDepart_37] == "N"
@test df[2,:revisedTaxiTime_38] == 0
@test df[2,:tis_39] == 0
@test df[2,:trs_40] == 0
@test df[2,:toBeSentSlotMessageTitle_41] === missing
@test df[2,:toBeSentProposalMessageTitle_42] === missing
@test df2[3,:lastSentSlotMessageTitle_43] === "SRM"
@test df[2,:lastSentProposalMessageTitle_44] === missing
@test Dates.year(df[2,:lastSentSlotMessage_45]) == 2017
@test Dates.day(df[2,:lastSentSlotMessage_45]) == 15
@test Dates.month(df[2,:lastSentProposalMessage_46]) == 12
@test df[2,:flightCountOption_47] == "N"
@test df2[2,:normalFlightTactId_48] == 0
@test df2[2,:proposalFlightTactId_49] == 0
@test df2[2,:operatingAircraftOperatorIcaoId_50] == "RSC"
@test df2[2,:reroutingWhy_51] == "N"
@test df[2,:reroutedFlightState_52] === missing
@test df[2,:runwayVisualRange_53] == 0
@test df2[2,:numberIgnoredErrors_54] == 0
@test df2[2,:arcAddrSource_55] == "N"
@test df2[1,:arcAddr_56] == "A9F2C6"
@test df2[2,:ifpsRegistrationMark_57] == "ECMIF"
@test df2[2,:flightType_58] == "S"
@test df2[2,:aircraftEquipment_59] == "DFGRSY:B2B3B4:EB1"
@test df2[2,:cdmStatus_60] == "N"
@test Dates.minute(df2[1,:cdmEarlyTtot_61]) == 8
@test Dates.hour(df2[1,:cdmAoTtot_62]) == 21
@test Dates.day(df2[1,:cdmAtcTtot_63]) == 2
@test Dates.month(df2[1,:cdmSequencedTtot_64]) == 1
@test Dates.minute(df2[1,:cdmTaxiTime_65]) == 21
@test df2[2,:cdmOffBlockTimeDiscrepancy_66] == "N"
@test df2[1,:cdmDepartureProcedureId_67] == "DENUT6C"
@test df2[1,:cdmAircraftTypeId_68] == "B744"
@test df2[1,:cdmRegistrationMark_69] == "N740CK"
@test Dates.minute(df2[1,:cdmNoSlotBefore_70]) == 9
@test df2[2,:cdmDepartureStatus_71] == "K"
@test df2[2,:ftfmEetFirNrOfInstances_72] == 0
@test df2[1,:ftfmEetFirList_73] == "EHAAFIR:10"
@test df2[2,:ftfmEetPtNrOfInstances_74] == 0
#TODO Setup function to extract EetPtList
@test df2[1,:ftfmEetPtList_75] == "55N020W:125 57N030W:170 58N040W:211 58N050W:251 CUDDY:281"
@test df2[2,:ftfmAiracCycleReleaseNumber_76] == 447
@test df2[2,:ftfmEnvBaselineNumber_77] == 842
@test df2[2,:ftfmDepartureRunway_78] == "GCXO30"
@test df2[2,:ftfmArrivalRunway_79] == "GCHI34"
@test df[2,:ftfmReqFlightlevelSpeedNrOfInstances_80] == 1
@test df2[1,:ftfmReqFlightlevelSpeedList_81] == "F320:N0487:0 F340:M084:1383 F360:M084:4022"
fll = DDR2import.Allftplus.reqFlightlevelSpeedList(df2[1,:ftfmReqFlightlevelSpeedList_81])
@test fll[1].FL == 320
@test fll[2].Spd == "M084"
@test fll[3].Value == 4022 # TODO What is Value? Time in seconds? Travel distance in NM?
@test df2[2,:ftfmConsumedFuel_82] == 322.0
@test df2[2,:ftfmRouteCharges_83] == 49.0
@test df2[2,:ftfmAllFtPointNrOfInstances_84] == 16
pp = DDR2import.Allftplus.AllFtPointProfileList(df2[2,:ftfmAllFtPointProfile_85])
@test Dates.minute(pp[1].datetime) == 10
@test pp[5].point == "TESEL"
@test pp[1].route == "HIE1J"
@test pp[2].FL == 25
@test pp[3].pointDistance == 11
@test pp[4].pointType == "V"
@test pp[6].geoPointId.ϕ ≈ 28.461666 atol = 0.0001
@test pp[6].geoPointId.λ ≈ -16.875555555555557 atol = 0.0001
@test pp[8].ratio == 54
@test pp[9].isVisible == true
#@test df[2,:ftfmAllFtPointProfile_85]
@test df2[2,:ftfmAllFtAirspaceNrOfInstances_86] == 10
#@test df[2,:ftfmAllFtAirspaceProfile_87]
ap = DDR2import.Allftplus.AllFtAirspaceProfileList(df2[2,:ftfmAllFtAirspaceProfile_87])
@test Dates.minute(ap[1].entry_datetime) == 10
@test ap[2].sector == "GCXOAXO"
@test Dates.day(ap[3].exit_datetime) == 3
@test ap[4].fir == "NAS"
@test ap[5].entry_geoPointId.ϕ ≈ 28.47222222222222 atol = 0.00001
@test ap[6].exit_geoPointId.λ ≈ -17.3275 atol = 0.00001
@test ap[7].entry_FL == 80
@test ap[8].exit_FL == 80
@test ap[9].entry_pointDistance == 119
@test ap[10].exit_pointDistance == 185
@test df2[2,:ftfmAllFtCircleIntersectionsNrOfInstances_88] == 2
#@test df[2,:ftfmAllFtCircleIntersections_89]
@test df2[3,:rtfmAiracCycleReleaseNumber_90] == 447
@test df2[3,:rtfmEnvBaselineNumber_91] == 1498
@test df2[3,:rtfmDepartureRunway_92] == "EDDL23R"
@test df2[3,:rtfmArrivalRunway_93] == "LTAC03R"
@test df2[3,:rtfmReqFlightlevelSpeedNrOfInstances_94] == 3
@test df2[3,:rtfmReqFlightlevelSpeedList_95] == "F350:N0425:0 F370:N0439:410 F390:N0453:1497"
@test df2[3,:rtfmConsumedFuel_96] == 7772.0
@test df2[3,:rtfmRouteCharges_97] == 1153.0
@test df2[3,:rtfmAllFtPointNrOfInstances_98] == 68
#@test df[2,:rtfmAllFtPointProfile_99]
@test df2[3,:rtfmAllFtAirspaceNrOfInstances_100] == 97
#@test df[2,:rtfmAllFtAirspaceProfile_101]
@test df2[3,:rtfmAllFtCircleIntersectionsNrOfInstances_102] == 4
#@test df[2,:rtfmAllFtCircleIntersections_103]
@test df2[2,:ctfmAiracCycleReleaseNumber_104] == 447
@test df2[2,:ctfmEnvBaselineNumber_105] == 1270
@test df2[2,:ctfmDepartureRunway_106] == "GCXO30"
@test df2[2,:ctfmArrivalRunway_107] == "GCHI34"
@test df[2,:ctfmReqFlightlevelSpeedNrOfInstances_108] == 1
@test df2[1,:ctfmReqFlightlevelSpeedList_109] == "F300:N0487:0 F320:N0487:492 F320:M084:1367 F360:M084:4021"
@test df2[2,:ctfmConsumedFuel_110] == 343.0
@test df2[2,:ctfmRouteCharges_111] == 49.0
@test df2[2,:ctfmAllFtPointNrOfInstances_112] == 28
#@test df[2,:ctfmAllFtPointProfile_113]
@test df2[2,:ctfmAllFtAirspaceNrOfInstances_114] == 8
#@test df[2,:ctfmAllFtAirspaceProfile_115]
@test df2[2,:ctfmAllFtCircleIntersectionsNrOfInstances_116] == 2
#@test df[2,:ctfmAllFtCircleIntersections_117]
@test df2[5,:noCPGCPFReason_118] == "X"
@test Dates.day(df2[2,:scrObt_119]) == 3
@test df2[2,:scrConsumedFuel_120] == 322.0
@test df2[2,:scrRouteCharges_121] === 49.0
@test df2[2,:scrAllFtPointNrOfInstances_122] == 16
#@test df[2,:scrAllFtPointProfile_123]
@test df2[2,:scrAllFtAirspaceNrOfInstances_124] == 10
#@test df[2,:scrAllFtAirspaceProfile_125]
@test df2[2,:scrAllFtCircleIntersectionsNrOfInstances_126] == 0
#@test df[2,:scrAllFtCircleIntersections_127]
@test Dates.minute(df2[3,:srrObt_128]) == 15
@test df2[3,:srrConsumedFuel_129] == 7813.0
@test df2[3,:srrRouteCharges_130] == 1113.0
@test df2[3,:srrAllFtPointNrOfInstances_131] == 74
#@test df[2,:srrAllFtPointProfile_132]
@test df2[3,:srrAllFtAirspaceNrOfInstances_133] == 80
#@test df[2,:srrAllFtAirspaceProfile_134]
@test df2[3,:srrAllFtCircleIntersectionsNrOfInstances_135] == 4
#@test df[2,:srrAllFtCircleIntersections_136]
@test Dates.minute(df2[2,:surObt_137]) == 54
@test df2[2,:surConsumedFuel_138] == 343.0
@test df2[2,:surRouteCharges_139] == 49.0
@test df2[2,:surAllFtPointNrOfInstances_140] == 28
#@test df[2,:surAllFtPointProfile_141]
@test df2[2,:surAllFtAirspaceNrOfInstances_142] == 8
#@test df[2,:surAllFtAirspaceProfile_143]
@test df2[3,:surAllFtCircleIntersectionsNrOfInstances_144] == 4
#@test df[2,:surAllFtCircleIntersections_145]
@test Dates.minute(df2[1,:dctObt_146]) == 50
@test df2[1,:dctConsumedFuel_147] == 73966.0
@test df2[1,:dctRouteCharges_148] == 1864.0
@test df2[1,:dctAllFtPointNrOfInstances_149] == 40
#@test df[2,:dctAllFtPointProfile_150]
@test df2[1,:dctAllFtAirspaceNrOfInstances_151] == 79
#@test df[2,:dctAllFtAirspaceProfile_152]
@test df2[1,:dctAllFtCircleIntersectionsNrOfInstances_153] == 4
#@test df[2,:dctAllFtCircleIntersections_154]
@test df[2,:cpfObt_155] === missing
@test df[2,:cpfConsumedFuel_156] === missing
@test df[2,:cpfRouteCharges_157] === missing
@test df2[2,:cpfAllFtPointNrOfInstances_158] == 0
#@test df[2,:cpfAllFtPointProfile_159]
@test df2[2,:cpfAllFtAirspaceNrOfInstances_160] == 0
#@test df[2,:cpfAllFtAirspaceProfile_161]
@test df2[2,:cpfAllFtCircleIntersectionsNrOfInstances_162] == 0
#@test df[2,:cpfAllFtCircleIntersections_163]
@test df[2,:aircraftidIATA_164] === missing
@test df[2,:intentionFlight_165] === missing
@test df[2,:intentionRelatedRouteAssignmentMethod_166] === missing
@test df[2,:intentionUID_167] === missing
@test df[2,:intentionEditionDate_168] === missing
@test df[2,:intentionSource_169] === missing
@test df[2,:associatedIntentions_170] === missing
@test df[2,:enrichmentOutput_171] === missing
@test df2[2,:eventID_172] == "TTE"
@test Dates.second(df2[1,:eventTime_173]) == 44
@test df2[2,:flightVersionNr_174] == 44
@test df[2,:ftfmNrTvProfiles_175] === missing
# @test df[2,:ftfmTvProfile_176] === missing
@test df[2,:rtfmNrTvProfiles_177] === missing
# @test df[2,:rtfmTvProfile_178] === missing
@test df[2,:ctfmNrTvProfiles_179] === missing
# @test df[2,:ctfmTvProfile_180] === missing
#LAST TEST
@test df[5,:departureAerodromeIcaoId_0] == "WSSS"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 521 | # using Dates
@testset "Are.jl" begin
filename = "data\\test.are"
dc = DDR2import.Are.read(filename)
@test dc["AZ"].nb_point == 48
@test dc["AZ"].bottom_fl == 0.0
@test dc["AZ"].top_fl == 0.0
@test dc["AZ"].surface == 0.0
@test dc["AZ"].sector_num == 0.0
@test dc["AZ"].points[5,1] == 45.0
@test dc["AZ"].points[4,2] == -40.0
@test dc["AZ"].box[1,1] ≈ 45.00775771902942 atol = 0.00001
#LAST TEST
@test dc["EB"].nb_point == 550
@test dc["EB"].points[550,1] == 51.5
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 320 | @testset "Arp.jl" begin
filename = "data\\test.arp"
df = DDR2import.Arp.read(filename)
@test df[1, :AIRPORT] == "EGSX"
@test df[2, :LAT_DEG] ≈ 53.3941667 atol = 0.001
@test df[3, :LON_DEG] ≈ -1.7 atol = 0.001
@test df[4, :FIR] == "EGTT_FIR"
#LAST TEST
@test df[5, :AIRPORT] == "EGTB"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 566 | # using Dates
@testset "Ase.jl" begin
filename = "data\\test.ase"
df = DDR2import.Ase.read(filename)
@test df[1,:FLIGHTCOUNT] == 0.0
@test df[1,:SEGMENTPARITY] == 1
@test df[1,:SEGMENTTYPE] == 41
@test df[1,:LATBEGINSEGMENT_DEG] ≈ 48.7580 atol = 0.0001
@test df[1,:LONBEGINSEGMENT_DEG] ≈ 10.8534 atol = 0.0001
@test df[1,:LATENDSEGMENT_DEG] ≈ 48.6956 atol = 0.0001
@test df[1,:LONENDSEGMENT_DEG] ≈ 10.9489 atol = 0.0001
@test df[1,:SEGMENTNAME] == "%%BRU_BURAM"
#LAST TEST
@test df[3,:SEGMENTNAME] == "%%EDH_WSN"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 267 | @testset "Cost.jl" begin
filename = "data\\test.cost"
df = DDR2import.Cost.read(filename)
@test df[1, :FLIGHTID] == 139729486
@test df[2, :COUNTRYCODE] == "LB"
@test df[3, :COST] == 86.7351
#LAST TEST
@test df[7, :COUNTRYCODE] == "LY"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 209 | @testset "Country.jl" begin
filename = "data\\test.country"
dc = DDR2import.Country.read(filename)
@test dc["AG"].name == "Solomon Islands"
@test dc["EB"].member
@test !dc["ZY"].member
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 740 | using Dates
@testset "Crco.jl" begin
filename = "data\\test.crco"
df = DDR2import.Crco.read(filename)
@test df[1, :COUNTRYNAME] == "ED"
@test df[2, :CALLSIGN] == "N182QS"
@test df[3, :ACTYPE] == "B737"
@test df[4, :FLIGHTID] == 1523
@test df[1, :LATENTRY_DEG] ≈ 53.630278 atol = 0.001
@test df[2, :LONENTRY_DEG] ≈ 12.1066383 atol = 0.001
@test df[3, :LATEXIT_DEG] ≈ 50.3166667 atol = 0.001
@test df[4, :LONEXIT_DEG] ≈ 9.98833333 atol = 0.001
@test df[1, :DISTANCE_M] ≈ 396031.3143 atol = 0.1
@test df[4, :ENTRYFL] ≈ 270.0 atol = 0.1
@test df[1, :EXITFL] ≈ 270.0 atol = 0.1
@test Dates.minute(df[2, :DATETIMEENTRY]) == 59
@test Dates.second(df[3, :DATETIMEEXIT]) == 22
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.