licenses
sequencelengths
1
3
version
stringclasses
677 values
tree_hash
stringlengths
40
40
path
stringclasses
1 value
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
code
1154
module GalacticPotentials using DocStringExtensions @template (FUNCTIONS, METHODS, MACROS) = """ $(SIGNATURES) $(DOCSTRING) """ @template (TYPES, CONSTANTS) = """ $(TYPEDEF) $(DOCSTRING) """ using Symbolics, SymbolicUtils using LaTeXStrings using LinearAlgebra using ForwardDiff using ModelingToolkit using SciMLBase using Memoize export ScalarField, HarmonicOscillatorPotential, HenonHeilesPotential, HernquistPotential, IsochronePotential, JaffePotential, KeplerPotential, KuzminPotential, LogarithmicPotential, LongMuraliBarPotential, MiyamotoNagaiPotential, NFWPotential, PlummerPotential, # PowerLawCutoffPotential, # SatohPotential, # StonePotential, states, parameters, ODESystem, ODEProblem include(joinpath(@__DIR__, "gen", "expressions.jl")) include("generic.jl") include("potentials.jl") include("odes.jl") end # module GalacticPotentials
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
code
14613
# # Interfaces for dynamical expressions, like scalar potential fields # abstract type AbstractField <: ModelingToolkit.AbstractTimeDependentSystem end abstract type AbstractScalarField <: AbstractField end # # The code below is copied and modified from ModelingToolkit.jl. # See the ModelingToolkit LICENSE below for more information. # # # The ModelingToolkit.jl package is licensed under the MIT "Expat" License: # # > Copyright (c) 2018-22: Yingbo Ma, Christopher Rackauckas, Julia Computing, and # > contributors # > # > Permission is hereby granted, free of charge, to any person obtaining a copy # > # > of this software and associated documentation files (the "Software"), to deal # > # > in the Software without restriction, including without limitation the rights # > # > to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # > # > copies of the Software, and to permit persons to whom the Software is # > # > furnished to do so, subject to the following conditions: # > # > The above copyright notice and this permission notice shall be included in all # > # > copies or substantial portions of the Software. # > # > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # > # > IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # > # > FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # > # > AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # > # > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # > # > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # > # > SOFTWARE. # The code in `src/structural_transformation/bipartite_tearing/modia_tearing.jl`, # which is from the [Modia.jl](https://github.com/ModiaSim/Modia.jl) project, is # licensed as follows: # MIT License # Copyright (c) 2017-2018 ModiaSim developers # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Base.@kwdef struct ScalarField <: AbstractScalarField """ A tag for the system. If two systems have the same tag, then they are structurally identical. """ tag::UInt """Scalar value defining the field.""" value::Num """Independent variables.""" iv::SymbolicUtils.BasicSymbolic{Real} """Unknown variables.""" unknowns::Vector """Parameters.""" ps::Vector """Array variables.""" var_to_name::Any """Observed states.""" observed::Vector{Equation} """ Time gradient. Note: this field will not be defined until [`calculate_tgrad`](@ref) is called on the system. """ tgrad::Base.RefValue{Any} """ Jacobian matrix. Note: this field will not be defined until [`calculate_jacobian`](@ref) or [`calculate_gradient`](@ref) is called on the system. """ jac::Base.RefValue{Any} """ The name of the system. """ name::Symbol """ The internal systems. These are required to have unique names. """ systems::Vector{ScalarField} """ The default values to use when initial conditions and/or parameters are not supplied in `ODEProblem`. """ defaults::Dict """ Type of the system. """ connector_type::Any """ Metadata for the system, to be used by downstream packages. """ metadata::Any """ Metadata for MTK GUI. """ gui_metadata::Union{Nothing,ModelingToolkit.GUIMetadata} """ Cache for intermediate tearing state. """ tearing_state::Any """ Substitutions generated by tearing. """ substitutions::Any """ If a model `sys` is complete, then `sys.x` no longer performs namespacing. """ complete::Bool """ The hierarchical parent system before simplification. """ parent::Any function ScalarField(tag, value, iv, unknowns, ps, var_to_name, observed, tgrad, jac, name, systems, defaults, connector_type, metadata=nothing, gui_metadata=nothing, tearing_state=nothing, substitutions=nothing, complete=false, parent=nothing; checks::Union{Bool,Int}=true) if checks == true || (checks & ModelingToolkit.CheckComponents) > 0 ModelingToolkit.check_variables(unknowns, iv) ModelingToolkit.check_parameters(ps, iv) end if checks == true || (checks & ModelingToolkit.CheckUnits) > 0 u = ModelingToolkit.__get_unit_type(unknowns, ps, iv) ModelingToolkit.check_units(u, value) end new(tag, value, iv, unknowns, ps, var_to_name, observed, tgrad, jac, name, systems, defaults, connector_type, metadata, gui_metadata, tearing_state, substitutions, complete, parent) end end get_value(sys::ScalarField) = sys.value get_values(sys::ScalarField) = [get_value(sys.value)] ModelingToolkit.get_eqs(::ScalarField) = ModelingToolkit.Equation[] function ScalarField( value, iv, unknowns, ps; observed=Num[], name=nothing, defaults=Dict(), systems=ScalarField[], connector_type=nothing, checks=true, metadata=nothing, gui_metadata=nothing) if isnothing(name) throw(ArgumentError("The `name` keyword must be provided. Please consider using the `@named` macro")) end # Move things over, but do not touch array expressions # # # we cannot scalarize in the loop because `eqs` itself might require # scalarization value = ModelingToolkit.scalarize(value) sysnames = nameof.(systems) if length(unique(sysnames)) != length(sysnames) throw(ArgumentError("System names must be unique.")) end tgrad = Base.RefValue{Any}(ModelingToolkit.EMPTY_TGRAD) jac = Base.RefValue{Any}(ModelingToolkit.EMPTY_TGRAD) defaults = ModelingToolkit.todict(defaults) defaults = Dict{Any,Any}(value(k) => value(v) for (k, v) in pairs(defaults)) iv = ModelingToolkit.scalarize(iv) unknowns = ModelingToolkit.scalarize(unknowns) ps = ModelingToolkit.scalarize(ps) iv = ModelingToolkit.value(iv) unknowns = ModelingToolkit.value.(unknowns) ps = ModelingToolkit.value.(ps) var_to_name = Dict() ModelingToolkit.process_variables!(var_to_name, defaults, unknowns) ModelingToolkit.process_variables!(var_to_name, defaults, ps) isempty(observed) || ModelingToolkit.collect_var_to_name!(var_to_name, (eq.lhs for eq in observed)) ScalarField(Threads.atomic_add!(ModelingToolkit.SYSTEM_COUNT, UInt(1)), value, iv, unknowns, ps, var_to_name, observed, tgrad, jac, name, systems, defaults, connector_type, metadata, gui_metadata, checks=checks) end function ModelingToolkit.calculate_tgrad(sys::AbstractField; simplify=false) isempty(ModelingToolkit.get_tgrad(sys)[]) || return ModelingToolkit.get_tgrad(sys)[] # use cached tgrad, if possible # We need to remove explicit time dependence on the state because when we # have `u(t) * t` we want to have the tgrad to be `u(t)` instead of `u'(t) * # t + u(t)`. vs = ModelingToolkit.detime_dvs.(get_value(sys)) iv = ModelingToolkit.get_iv(sys) xs = ModelingToolkit.unknowns(sys) rule = Dict(map((x, xt) -> xt => x, ModelingToolkit.detime_dvs.(xs), xs)) vs = substitute.(vs, Ref(rule)) tgrad = expand_derivatives.(map(Differential(iv), vs), simplify) reverse_rule = Dict(map((x, xt) -> x => xt, ModelingToolkit.detime_dvs.(xs), xs)) tgrad = Num.(substitute.(tgrad, Ref(reverse_rule))) ModelingToolkit.get_tgrad(sys)[] = tgrad return tgrad end function ModelingToolkit.calculate_jacobian(sys::AbstractField; simplify=false, dvs=ModelingToolkit.unknowns(sys)) if isequal(dvs, ModelingToolkit.unknowns(sys)) cache = ModelingToolkit.get_jac(sys)[] if cache isa Tuple && cache[2] == simplify return cache[1] end end vs = get_value(sys) jac = ModelingToolkit.gradient(vs, dvs, simplify=simplify) if isequal(dvs, ModelingToolkit.unknowns(sys)) ModelingToolkit.get_jac(sys)[] = jac, simplify # cache Jacobian end return jac end ModelingToolkit.calculate_gradient(sys::AbstractField; simplify=false, dvs=ModelingToolkit.unknowns(sys)) = ModelingToolkit.calculate_jacobian(sys; simplify=simplify, dvs=dvs) function ModelingToolkit.generate_jacobian(sys::AbstractField, vs=ModelingToolkit.unknowns(sys), ps=ModelingToolkit.parameters(sys); simplify=false, kwargs...) jac = ModelingToolkit.calculate_jacobian(sys, simplify=simplify) pre = ModelingToolkit.get_preprocess_constants(jac) return Symbolics.build_function(jac, vs, ps; postprocess_fbody=pre, kwargs...) end ModelingToolkit.generate_gradient(sys::AbstractField, vs=ModelingToolkit.unknowns(sys), ps=ModelingToolkit.parameters(sys); simplify=false, kwargs...) = ModelingToolkit.generate_jacobian(sys, vs, ps; simplify=simplify, kwargs...) function ModelingToolkit.calculate_hessian(sys::AbstractScalarField; simplify=false, dvs=ModelingToolkit.unknowns(sys)) vs = ModelingToolkit.calculate_jacobian(sys) hess = ModelingToolkit.jacobian(vs, dvs; simplify=simplify) return hess end function ModelingToolkit.generate_hessian(sys::AbstractScalarField, vs=ModelingToolkit.unknowns(sys), ps=ModelingToolkit.parameters(sys); simplify=false, kwargs...) hess = ModelingToolkit.calculate_hessian(sys, simplify=simplify) pre = ModelingToolkit.get_preprocess_constants(hess) return Symbolics.build_function(hess, vs, ps; postprocess_fbody=pre, kwargs...) end function ModelingToolkit.generate_function(sys::AbstractField, dvs=ModelingToolkit.unknowns(sys), ps=ModelingToolkit.parameters(sys); simplify=false, kwargs...) vs = simplify ? ModelingToolkit.simplify(get_value(sys)) : get_value(sys) return Symbolics.build_function(vs, ModelingToolkit.value.(dvs), ModelingToolkit.value.(ps); kwargs...) end function ModelingToolkit.jacobian_sparsity(sys::AbstractField) ModelingToolkit.jacobian_sparsity([get_value(sys)], ModelingToolkit.unknowns(sys)) end function ModelingToolkit.hessian_sparsity(sys::AbstractScalarField) [ModelingToolkit.hessian_sparsity([get_value(sys)], ModelingToolkit.unknowns(sys)) for eq in equations(sys)] # TODO: this does not look right end function Base.show(io::IO, mime::MIME"text/plain", sys::AbstractField) val = get_value(sys) vars = ModelingToolkit.unknowns(sys) nvars = length(vars) if val isa AbstractArray && eltype(val) <: Num nvs = count(v -> !(v isa Connection), val) Base.printstyled(io, "Model $(nameof(sys)) with $nvs"; bold=true) # nextras = n_extra_equations(sys) # if nextras > 0 # Base.printstyled(io, "("; bold=true) # Base.printstyled(io, neqs + nextras; bold=true, color=:magenta) # Base.printstyled(io, ") "; bold=true) # end Base.printstyled(io, "values\n"; bold=true) else Base.printstyled(io, "Model $(nameof(sys))\n"; bold=true) end # The reduced equations are usually very long. It's not that useful to print # them. #Base.print_matrix(io, eqs) #println(io) rows = first(displaysize(io)) ÷ 5 limit = get(io, :limit, false) Base.printstyled(io, "States ($nvars):"; bold=true) nrows = min(nvars, limit ? rows : nvars) limited = nrows < length(vars) defs = ModelingToolkit.has_defaults(sys) ? ModelingToolkit.defaults(sys) : nothing for i in 1:nrows s = vars[i] print(io, "\n ", s) if defs !== nothing val = get(defs, s, nothing) if val !== nothing print(io, " [defaults to ") show(IOContext(io, :compact => true, :limit => true, :displaysize => (1, displaysize(io)[2])), val) print(io, "]") end description = ModelingToolkit.getdescription(s) if description !== nothing && description != "" print(io, ": ", description) end end end limited && print(io, "\n⋮") println(io) vars = ModelingToolkit.parameters(sys) nvars = length(vars) Base.printstyled(io, "Parameters ($nvars):"; bold=true) nrows = min(nvars, limit ? rows : nvars) limited = nrows < length(vars) for i in 1:nrows s = vars[i] print(io, "\n ", s) if defs !== nothing val = get(defs, s, nothing) if val !== nothing print(io, " [defaults to ") show(IOContext(io, :compact => true, :limit => true, :displaysize => (1, displaysize(io)[2])), val) print(io, "]") end description = getdescription(s) if description !== nothing && description != "" print(io, ": ", description) end end end limited && print(io, "\n⋮") if ModelingToolkit.has_torn_matching(sys) && ModelingToolkit.has_tearing_state(sys) # If the system can take a torn matching, then we can initialize a tearing # state on it. Do so and get show the structure. state = ModelingToolkit.get_tearing_state(sys) if state !== nothing Base.printstyled(io, "\nIncidence matrix:"; color=:magenta) show(io, mime, ModelingToolkit.incidence_matrix(state.structure.graph, Num(Sym{Real}(:×)))) end end return nothing end Base.show(io::IO, mime::MIME"text/html", sys::AbstractField) = show(io, mime, get_value(sys)) Base.show(io::IO, mime::MIME"text/latex", sys::AbstractField) = show(io, mime, get_value(sys))
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
code
1757
""" Construct an `ODESystem` from an `AbstractScalarField` by taking the gradient of the field's value with respect to the state variables. Symbols which describe the state derivatives can be provided via the `var_map_to_dvs` keyword argument. Alternatively, default state derivative symbols are used: :ẋ, :ẏ, and :ż for :x, :y, :z states, and :Δ\$(state) otherwise. ## Example ODESystem(field; var_map_to_dvs = Dict(:x => :ẋ, :y => :ẏ, :z => :ż)) """ function ModelingToolkit.ODESystem(field::AbstractScalarField; var_map_to_dvs::Union{<:AbstractDict,<:Nothing}=nothing) t = ModelingToolkit.get_iv(field) u = unknowns(field) if isnothing(var_map_to_dvs) if string.(collect(u)) == ["x($t)", "y($t)", "z($t)"][CartesianIndices(u)] var_map_to_dvs = Dict(:x => :ẋ, :y => :ẏ, :z => :ż) du = map(x -> Symbol(var_map_to_dvs[Symbol(first(split(string(x), "($(Symbolics.value(t)))")))]), u) else du = map(x -> Symbol(:Δ, Symbol(first(split(string(x), "($(Symbolics.value(t)))")))), u) end else du = map(x -> Symbol(var_map_to_dvs[Symbol(first(split(string(x), "($(Symbolics.value(t)))")))]), u) end u̇ = vcat( (@variables($(δ)(t)) for δ in du)... ) p = parameters(field) Δ = Differential(t) name = field.name # TODO: when ModelingToolkit.jl updates, change to get_name eqs = vcat( Δ.(u) .~ u̇, Δ.(u̇) .~ -calculate_gradient(field), ) return ODESystem( eqs, t, vcat(u, u̇), p; name=name, ) end function SciMLBase.ODEProblem(field::AbstractScalarField, args...; kwargs...) return ODEProblem( complete(ODESystem(field); split=false), args...; kwargs..., ) end
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
code
6621
""" The potential due to a harmonic oscillator. \$$(LATEX_EXPRESSIONS["HarmonicOscillatorPotential"])\$ """ @memoize function HarmonicOscillatorPotential(N::Integer=1; name=:HarmonicOscillator) if N > 1 @variables t (x(t))[1:N] @parameters ω[1:N] x = collect(x) ω = collect(ω) value = (1 // 2) * ω ⋅ ω * x ⋅ x else @variables t x(t) @parameters ω value = (1 // 2) * ω^2 * x^2 x = [x] ω = [ω] end return ScalarField(value, t, x, ω; name=name) end """ The Henon-Heiles potential. \$$(LATEX_EXPRESSIONS["HenonHeilesPotential"])\$ """ @memoize function HenonHeilesPotential(; name=:HenonHeilesPotential) @variables t x(t) y(t) value = x^2 * y + (1 // 2) * x^2 - (1 // 3) * y^3 + (1 // 2)y^2 return ScalarField(value, t, [x, y], Num[]; name=name) end """ The Hernquist potential. \$$(LATEX_EXPRESSIONS["HernquistPotential"])\$ """ @memoize function HernquistPotential(; name=:HernquistPotential) @variables t x(t) y(t) z(t) @parameters G m c value = -(G * m) / (c + sqrt(x^2 + y^2 + z^2)) return ScalarField(value, t, [x, y, z], [G, m, c]; name=name) end """ The Isochrone potential. \$$(LATEX_EXPRESSIONS["IsochronePotential"])\$ """ @memoize function IsochronePotential(; name=:IsochronePotential) @variables t x(t) y(t) z(t) @parameters G m b value = -(G * m) / (b + sqrt(b^2 + x^2 + y^2 + z^2)) return ScalarField(value, t, [x, y, z], [G, m, b]; name=name) end """ The Jaffe potential. \$$(LATEX_EXPRESSIONS["JaffePotential"])\$ """ @memoize function JaffePotential(; name=:JaffePotential) @variables t x(t) y(t) z(t) @parameters G m c value = G * m * log10( sqrt(x^2 + y^2 + z^2) / (c + sqrt(x^2 + y^2 + z^2)) / c ) return ScalarField(value, t, [x, y, z], [G, m, c]; name=name) end """ The Kepler potential. \$$(LATEX_EXPRESSIONS["KeplerPotential"])\$ """ @memoize function KeplerPotential(; name=:KeplerPotential) @variables t x(t) y(t) z(t) @parameters G m value = -G * m / sqrt(x^2 + y^2 + z^2) return ScalarField(value, t, [x, y, z], [G, m]; name=name) end """ The Kuzmin potential. \$$(LATEX_EXPRESSIONS["KuzminPotential"])\$ """ @memoize function KuzminPotential(; name=:KuzminPotential) @variables t x(t) y(t) z(t) @parameters G m a value = -(G * m) / sqrt(x^2 + y^2 + (a + abs(z))^2) return ScalarField(value, t, [x, y, z], [G, m, a]; name=name) end """ The logarithmic potential. \$$(LATEX_EXPRESSIONS["LogarithmicPotential"])\$ """ @memoize function LogarithmicPotential(; name=:LogarithmicPotential) @variables t x(t) y(t) z(t) @parameters v r q[1:3] q = collect(q) value = (1 // 2) * v^2 * log10(r^2 + z^2 / q[3]^2 + y^2 / q[2]^2 + x^2 / q[1]^2) return ScalarField(value, t, [x, y, z], vcat(v, r, q); name=name) end """ The long Murali-bar potential. \$$(LATEX_EXPRESSIONS["LongMuraliBarPotential"])\$ """ @memoize function LongMuraliBarPotential(; name=:LongMuraliBarPotential) @variables t u = @variables x(t) y(t) z(t) p = @parameters G m a b c α value = G * m * log10( (-a + x * cos(α) + y * sin(α) + sqrt((b + sqrt(c^2 + z^2))^2 + (-x * sin(α) + y * cos(α))^2 + (a - x * cos(α) - y * sin(α))^2)) / ( a + x * cos(α) + y * sin(α) + sqrt((b + sqrt(c^2 + z^2))^2 + (-x * sin(α) + y * cos(α))^2 + (a + x * cos(α) + y * sin(α))^2) ) ) / 2a return ScalarField(value, t, u, p; name=name) end """ The Miyamoto-Nagai potential. \$$(LATEX_EXPRESSIONS["MiyamotoNagaiPotential"])\$ """ @memoize function MiyamotoNagaiPotential(; name=:MiyamotoNagaiPotential) @variables t u = @variables x(t) y(t) z(t) p = @parameters G m a b value = -G * m / sqrt(x^2 + y^2 + (a + sqrt(b^2 + z^2))^2) return ScalarField(value, t, u, p; name=name) end """ The NFW potential. \$$(LATEX_EXPRESSIONS["NFWPotential"])\$ """ @memoize function NFWPotential(; name=:NFWPotential) @variables t u = @variables x(t) y(t) z(t) p = @parameters G m a b c r value = -G * m * log10(1 + sqrt(z^2 / c^2 + y^2 / b^2 + x^2 / a^2) / r) / sqrt(z^2 / c^2 + y^2 / b^2 + x^2 / a^2) return ScalarField(value, t, u, p; name=name) end """ The Plummer potential. \$$(LATEX_EXPRESSIONS["PlummerPotential"])\$ """ @memoize function PlummerPotential(; name=:PlummerPotential) @variables t u = @variables x(t) y(t) z(t) p = @parameters G m b value = -G * m / sqrt(b^2 + x^2 + y^2 + z^2) return ScalarField(value, t, u, p; name=name) end """ The power-law cutoff potential. !!! warning Not yet implemented! \$$(LATEX_EXPRESSIONS["PowerLawCutoffPotential"])\$ """ @memoize function PowerLawCutoffPotential(; name=:PowerLawCutoffPotential) @variables t u = @variables x(t) y(t) z(t) p = @parameters G m a α γ throw( ErrorException( """ PowerLawCutoffPotential is not yet implemented. This potential requires special math functions, namely the gamma and lowergamma functions. These functions are provided by `SpecialFunctions.jl`, but some work is necessary to register these functions with `Symbolics.jl`. If you'd like to help, please submit a PR! """ ) ) value = G * α * m * γ * (3 // 2 - α / 2) # TODO: finish this value! return ScalarField(value, t, u, p; name=name) end """ The Satoh potential. !!! warning Not yet implemented! \$$(LATEX_EXPRESSIONS["SatohPotential"])\$ """ @memoize function SatohPotential(; name=:SatohPotential) throw( ErrorException( """ The SatohPotential is not yet implemented. This potential requires special math functions, namely the gamma and lowergamma functions. These functions are provided by `SpecialFunctions.jl`, but some work is necessary to register these functions with `Symbolics.jl`. If you'd like to help, please submit a PR! """ ) ) end """ The StonePotential potential. !!! warning Not yet implemented! \$$(LATEX_EXPRESSIONS["StonePotential"])\$ """ @memoize function StonePotential(; name=:StonePotential) throw( ErrorException( """ The StonePotential is not yet implemented. This potential requires special math functions, namely the gamma and lowergamma functions. These functions are provided by `SpecialFunctions.jl`, but some work is necessary to register these functions with `Symbolics.jl`. If you'd like to help, please submit a PR! """ ) ) end
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
code
3118
# # This is an autogenerated file! It was created on 2023-12-26. # const LATEX_EXPRESSIONS = Base.ImmutableDict( "HarmonicOscillatorPotential" => L"\Phi = 0.5 \omega^{2} x^{2}", "HenonHeilesPotential" => L"\Phi = 1.0 x^{2} y + 0.5 x^{2} - 0.333333333333333 y^{3} + 0.5 y^{2}", "HernquistPotential" => L"\Phi = - \frac{G m}{c + \sqrt{x^{2} + y^{2} + z^{2}}}", "IsochronePotential" => L"\Phi = - \frac{G m}{b + \sqrt{b^{2} + x^{2} + y^{2} + z^{2}}}", "JaffePotential" => L"\Phi = \frac{G m \log{\left(\frac{\sqrt{x^{2} + y^{2} + z^{2}}}{c + \sqrt{x^{2} + y^{2} + z^{2}}} \right)}}{c}", "KeplerPotential" => L"\Phi = - \frac{G m}{\sqrt{x^{2} + y^{2} + z^{2}}}", "KuzminPotential" => L"\Phi = - \frac{G m}{\sqrt{x^{2} + y^{2} + \left(a + \left|{z}\right|\right)^{2}}}", "LogarithmicPotential" => L"\Phi = 0.5 v_{c}^{2} \log{\left(r_{h}^{2} + \frac{z^{2}}{q_{3}^{2}} + \frac{y^{2}}{q_{2}^{2}} + \frac{x^{2}}{q_{1}^{2}} \right)}", "LongMuraliBarPotential" => L"\Phi = \frac{G m \log{\left(\frac{- a + x \cos{\left(\alpha \right)} + y \sin{\left(\alpha \right)} + \sqrt{\left(b + \sqrt{c^{2} + z^{2}}\right)^{2} + \left(- x \sin{\left(\alpha \right)} + y \cos{\left(\alpha \right)}\right)^{2} + \left(a - x \cos{\left(\alpha \right)} - y \sin{\left(\alpha \right)}\right)^{2}}}{a + x \cos{\left(\alpha \right)} + y \sin{\left(\alpha \right)} + \sqrt{\left(b + \sqrt{c^{2} + z^{2}}\right)^{2} + \left(- x \sin{\left(\alpha \right)} + y \cos{\left(\alpha \right)}\right)^{2} + \left(a + x \cos{\left(\alpha \right)} + y \sin{\left(\alpha \right)}\right)^{2}}} \right)}}{2 a}", "MiyamotoNagaiPotential" => L"\Phi = - \frac{G m}{\sqrt{x^{2} + y^{2} + \left(a + \sqrt{b^{2} + z^{2}}\right)^{2}}}", "NFWPotential" => L"\Phi = - \frac{G m \log{\left(1 + \frac{\sqrt{\frac{z^{2}}{c^{2}} + \frac{y^{2}}{b^{2}} + \frac{x^{2}}{a^{2}}}}{r_{s}} \right)}}{\sqrt{\frac{z^{2}}{c^{2}} + \frac{y^{2}}{b^{2}} + \frac{x^{2}}{a^{2}}}}", "PlummerPotential" => L"\Phi = - \frac{G m}{\sqrt{b^{2} + x^{2} + y^{2} + z^{2}}}", "PowerLawCutoffPotential" => L"\Phi = \frac{G \alpha m \gamma\left(1.5 - \frac{\alpha}{2}, \frac{x^{2} + y^{2} + z^{2}}{r_{c}^{2}}\right)}{2 \sqrt{x^{2} + y^{2} + z^{2}} \Gamma\left(2.5 - \frac{\alpha}{2}\right)} - \frac{3 G m \gamma\left(1.5 - \frac{\alpha}{2}, \frac{x^{2} + y^{2} + z^{2}}{r_{c}^{2}}\right)}{2 \sqrt{x^{2} + y^{2} + z^{2}} \Gamma\left(2.5 - \frac{\alpha}{2}\right)} + \frac{G m \gamma\left(1 - \frac{\alpha}{2}, \frac{x^{2} + y^{2} + z^{2}}{r_{c}^{2}}\right)}{r_{c} \Gamma\left(1.5 - \frac{\alpha}{2}\right)}", "SatohPotential" => L"\Phi = - \frac{G m}{\sqrt{a \left(a + 2 \sqrt{b^{2} + z^{2}}\right) + x^{2} + y^{2} + z^{2}}}", "StonePotential" => L"\Phi = - \frac{2 G m \left(- \frac{r_{c} \operatorname{atan}{\left(\frac{\sqrt{x^{2} + y^{2} + z^{2}}}{r_{c}} \right)}}{\sqrt{x^{2} + y^{2} + z^{2}}} + \frac{r_{h} \operatorname{atan}{\left(\frac{\sqrt{x^{2} + y^{2} + z^{2}}}{r_{h}} \right)}}{\sqrt{x^{2} + y^{2} + z^{2}}} + 0.5 \log{\left(\frac{r_{h}^{2} + x^{2} + y^{2} + z^{2}}{r_{c}^{2} + x^{2} + y^{2} + z^{2}} \right)}\right)}{- 3.14159265358979 r_{c} + 3.14159265358979 r_{h}}", )
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
code
1420
# # Unit tests for GalacticPotentials.jl # using GalacticPotentials, Test using ModelingToolkit, Symbolics using GalacticPotentials: AbstractField, AbstractScalarField, ScalarField @testset verbose = true "Scalar Fields" begin @variables t p = @parameters b q = @variables x(t) y(t) z(t) field = ScalarField( (b // 2) * sum(q .^ 2), t, q, p; name=:SomeField ) @testset showtiming = true "Constructors" begin @test field isa ModelingToolkit.AbstractSystem end @testset showtiming = true "Calculations" begin @test all(calculate_jacobian(field) - calculate_gradient(field) .== 0) @test calculate_hessian(field) isa AbstractMatrix end end @testset verbose = true "Galactic Potentials" begin for name in names(GalacticPotentials) !occursin("Potential", "$name") && continue occursin("Potentials", "$name") && continue @eval field = $name() @testset showtiming = true "$name" begin N = length(unknowns(field)) M = length(parameters(field)) @test field isa AbstractField @test calculate_gradient(complete(field; split=false)) isa AbstractVector @test ODESystem(field) isa ODESystem @test ODEProblem(complete(field; split=false), randn(2N), (rand(), rand()), randn(M)) isa ODEProblem end end end
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
docs
2222
[![Tests](https://github.com/cadojo/GalacticPotentials.jl/workflows/Tests/badge.svg)](https://github.com/cadojo/GalacticPotentials.jl/actions?query=workflow%3ATests) [![Docs](https://github.com/cadojo/GalacticPotentials.jl/workflows/Documentation/badge.svg)](https://cadojo.github.io/GalacticPotentials.jl) # 🌌 `GalacticPotentials.jl` _An extension of [ModelingToolkit.jl](https://github.com/SciML/ModelingToolkit.jl) which provides common galactic potentials._ ## Installation Choose one of the two lines below! ```julia Pkg.add("GalacticPotentials.jl") # in Julia code ``` ```julia pkg> GalacticPotentials # in Julia's REPL ``` ## Usage Potentials are defined as subtypes of `ModelingToolkit.jl` models. They can be converted to `ODESystem` and `ODEProblem` types to interact with the rest of the `SciML` ecosystem. ```julia using Plots using ModelingToolkit using DifferentialEquations using GalacticPotentials let model = ODESystem(PlummerPotential()) p = @nonamespace Dict( model.G => 6.6743e-20, # field strength (km³ kg⁻¹ s⁻²) model.m => 6e31, # mass (kg) model.b => 1e-6 # softening parameter (unitless) ) u0 = @nonamespace Dict( model.x => 11e5, model.y => 5e5, model.z => 0, model.Δx => 1e3, model.Δy => 1e3, model.Δz => 0 ) ts = (0.0, 1e6) problem = ODEProblem(model, u0, ts, p) solution = solve(problem; reltol=1e-14, abstol=1e-14) plot(solution; idxs=(:x,:y), label=:none, dpi = 400, aspect_ratio=:equal) end ``` ![](/docs/src/img/plummer-orbit.png) ## Credits This package is [bootstrapped](/gen/gala.jl) off of [`gala`](http://gala.adrian.pw) and [`galpy`](https://docs.galpy.org), two rich Python packages for galactic dynamics. I aim to learn about galactic dynamics by integrating the models within these two popular Python packages into the [SciML](https://sciml.ai) ecosystem. The scalar field symbolic-numerics are copied and modified versions of `AbstractSystem` interfaces within [`ModelingToolkit.jl`](https://github.com/sciml/ModelingToolkit.jl). The field implementations in this package are highly unstable; they may change in the near future.
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
docs
4758
# Example Usage First, let's use everyone's favorite toy potential: the Plummer potential. ```julia julia> using GalacticPotentials julia> field = PlummerPotential() Model PlummerPotential States (3): x(t) y(t) z(t) Parameters (3): G m b ``` The Plummer potential field equation is shown below. $$\Phi = - \frac{G m}{\sqrt{b^{2} + x^{2} + y^{2} + z^{2}}}$$ Let's assume some massless particle which exists within this field. How will the particle move? As [previously](potentials.md) described, we can take the gradient of the scalar field $\Phi$ with respect to the state variables $x$, $y$, and $z$ to find the force (per unit mass) applied to the particle at all points in space. ```julia julia> system = ODESystem(field) Model PlummerPotential with 6 equations States (6): x(t) y(t) z(t) ẋ(t) ẏ(t) ż(t) Parameters (3): G m b ``` The differential equations which define the `system` variable are shown below. Note that the gradient of the scalar potential field has been integrated into a system of first-order differential equations: the expressions for the gradient are shown in the state equations for $\frac{d \dot{x}}{d t}$, $\frac{d \dot{y}}{d t}$, and $\frac{d \dot{z}}{d t}$. $\begin{aligned} \frac{\mathrm{d} x\left( t \right)}{\mathrm{d}t} =& \textnormal{\.{x}}\left( t \right) \\ \frac{\mathrm{d} y\left( t \right)}{\mathrm{d}t} =& \textnormal{\.{y}}\left( t \right) \\ \frac{\mathrm{d} z\left( t \right)}{\mathrm{d}t} =& \textnormal{\.{z}}\left( t \right) \\ \frac{\mathrm{d} \textnormal{\.{x}}\left( t \right)}{\mathrm{d}t} =& \frac{ - G m x\left( t \right)}{\left( \sqrt{b^{2} + \left( x\left( t \right) \right)^{2} + \left( y\left( t \right) \right)^{2} + \left( z\left( t \right) \right)^{2}} \right)^{3}} \\ \frac{\mathrm{d} \textnormal{\.{y}}\left( t \right)}{\mathrm{d}t} =& \frac{ - G m y\left( t \right)}{\left( \sqrt{b^{2} + \left( x\left( t \right) \right)^{2} + \left( y\left( t \right) \right)^{2} + \left( z\left( t \right) \right)^{2}} \right)^{3}} \\ \frac{\mathrm{d} \textnormal{\.{z}}\left( t \right)}{\mathrm{d}t} =& \frac{ - G m z\left( t \right)}{\left( \sqrt{b^{2} + \left( x\left( t \right) \right)^{2} + \left( y\left( t \right) \right)^{2} + \left( z\left( t \right) \right)^{2}} \right)^{3}} \end{aligned}$ The `ModelingToolkit.jl` `AbstractSystem` interface methods are defined for all potential fields within `GalacticPotentials.jl`. Specifically, all fields in `GalacticPotentials.jl` are subtypes of `AbstractTimeDependentSystem`. Special subtype and method implementations have been added to `GalacticPotentials.jl` as needed. ```julia julia> using ModelingToolkit julia> G = calculate_gradient(field) 3-element Vector{Num}: (-((-G*m) / (sqrt(b^2 + x(t)^2 + y(t)^2 + z(t)^2)^2))*x(t)) / sqrt(b^2 + x(t)^2 + y(t)^2 + z(t)^2) (-((-G*m) / (sqrt(b^2 + x(t)^2 + y(t)^2 + z(t)^2)^2))*y(t)) / sqrt(b^2 + x(t)^2 + y(t)^2 + z(t)^2) (-((-G*m) / (sqrt(b^2 + x(t)^2 + y(t)^2 + z(t)^2)^2))*z(t)) / sqrt(b^2 + x(t)^2 + y(t)^2 + z(t)^2) julia> J = calculate_jacobian(system) 6×6 Matrix{Num}: … # expression left out of documentation for brevity julia> f = generate_function(field) :(function (ˍ₋arg1, ˍ₋arg2) begin (/)((*)((*)(-1, ˍ₋arg2[1]), ˍ₋arg2[2]), (sqrt)((+)((+)((+)((^)(ˍ₋arg2[3], 2), (^)(ˍ₋arg1[1], 2)), (^)(ˍ₋arg1[2], 2)), (^)(ˍ₋arg1[3], 2)))) end end) ``` Special constructors for `ODESystem` and `ODEProblem` -- two `SciML` types -- are defined for all potential fields within `GalacticPotentials.jl`. The `ODESystem` constructor was already illustrated above. Let's look at the `ODEProblem` constructor now. ```julia julia> problem = let p = randn(3) u0 = randn(6) ts = randn(2) ODEProblem(field, u0, ts, p) end ``` It's generally safer to use _variable maps_ to provide initial conditions for your `ODEProblem`. Variable maps allow for an arbitrary state vector order; the `ODEProblem` call above assumes the parameter and state vector orders! ```julia julia> problem = let model = system p = @nonamespace Dict( model.G => 6.6743e-20, # field strength (km³ kg⁻¹ s⁻²) model.m => 6e31, # mass (kg) model.b => 1e-6 # softening parameter (unitless) ) u0 = @nonamespace Dict( model.x => 11e5, model.y => 5e5, model.z => 0, model.Δx => 1e3, model.Δy => 1e3, model.Δz => 0 ) ts = (0.0, 1e6) problem = ODEProblem(model, u0, ts, p) end ``` With the `ODEProblem` defined, you can use `OrdinaryDiffEq.jl` or `DifferentialEquations.jl` to numerically integrate the orbit, and `Plots.jl` to plot the result! For more information, consult the `SciML` [documentation](https://docs.sciml.ai), or the `GalacticPotentials.jl` [Getting Started](index.md) page.
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
docs
1627
# `GalacticPotentials.jl` _Common models within galactic dynamics!_ ## Overview This package extends `ModelingToolkit` to represent common galactic potentials. All available potentials are shown on the [Reference](reference.md) page. ## Installation Choose one of the two lines below! ```julia pkg> GalacticPotentials # in Julia's REPL ``` ```julia Pkg.add("GalacticPotentials.jl") # in Julia code ``` ## Usage This package is intended to be used alongside `ModelingToolkit.jl` and the rest of the [`SciML`](https://sciml.ai) ecosystem. Scalar potential fields within `gala` and `galpy` -- two popular Python packages for galactic dynamics -- were used to bootstrap this package. All available potential fields are shown on the [Reference](reference.md) page. See the code block below for a one-click example of `GalacticPotentials.jl` usage. ```julia using Plots using ModelingToolkit using DifferentialEquations using GalacticPotentials let model = ODESystem(PlummerPotential()) p = @nonamespace Dict( model.G => 6.6743e-20, # field strength (km³ kg⁻¹ s⁻²) model.m => 6e31, # mass (kg) model.b => 1e-6 # softening parameter (unitless) ) u0 = @nonamespace Dict( model.x => 11e5, model.y => 5e5, model.z => 0, model.Δx => 1e3, model.Δy => 1e3, model.Δz => 0 ) ts = (0.0, 1e6) problem = ODEProblem(model, u0, ts, p) solution = solve(problem; reltol=1e-14, abstol=1e-14) plot(solution; idxs=(:x,:y), label=:none, dpi = 400, aspect_ratio=:equal) end ``` ![](img/plummer-orbit.png)
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
docs
1603
# Gravitational Potentials Gravitational potential fields provide an alternative (approximate) approach to large-dimensioned n-body systems. Rather than tracking the orbits of _all_ particles in an n-dimensional system, potential fields allow you to integrate _one orbit at a time_. The Plummer potential field is a common potential function used in the field of galactic dynamics; the expression for the Plummer potential is shown below: $G$ is the gravitational field strength, $m$ is the mass of the central body, and $b$ is a softening parameter to avoid infinities when the position of the particle ($x$, $y$, $z$) is near the origin. $$\Phi = - \frac{G m}{\sqrt{b^{2} + x^{2} + y^{2} + z^{2}}}$$ Given any scalar potential field, the field's gradient provides the strength and force applied to a body at any position in the field per unit mass. If we treat the orbiting body as a point mass, then we can use the general ordinary differential equation below to numerically integrate any orbit along the potential with state vector $u$, parameter vector $p$, and scalar time $t$. $$\dot{u} = -\nabla \Phi(u,p,t)$$ Note the generality! This kind of _recipe_ is well suited to tools like `ModelingToolkit.jl`: you may write your expressions _mathematically_ and let the `SciML` ecosystem generate fast and non-allocating codes, and efficiently integrate the dynamics forward (or backward) in time. `GalacticPotentials.jl` provides mathematical descriptions of common scalar potential fields used in galactic dynamics, and hooks these descriptions into `ModelingToolkit.jl` types for ease of use.
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.2.1
177e19ca8dbce81ba0c8f1c3468b72a8f78f9412
docs
124
# Documentation _All docstrings!_ ```@autodocs Modules = [ GalacticPotentials ] Order = [:module, :type, :function] ```
GalacticPotentials
https://github.com/cadojo/GalacticPotentials.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
1034
using SliceSampling using Documenter DocMeta.setdocmeta!(SliceSampling, :DocTestSetup, :(using SliceSampling); recursive=true) makedocs(; modules=[SliceSampling], authors="Kyurae Kim <[email protected]> and contributors", repo="https://github.com/TuringLang/SliceSampling.jl/blob/{commit}{path}#{line}", sitename="SliceSampling.jl", format=Documenter.HTML(; prettyurls=get(ENV, "CI", "false") == "true", canonical="https://TuringLang.org/SliceSampling.jl", edit_link="main", assets=String[], ), pages=[ "Home" => "index.md", "General Usage" => "general.md", "Univariate Slice Sampling" => "univariate_slice.md", "Meta Multivariate Samplers" => "meta_multivariate.md", "Latent Slice Sampling" => "latent_slice.md", "Gibbsian Polar Slice Sampling" => "gibbs_polar.md" ], ) deploydocs(; repo="github.com/TuringLang/SliceSampling.jl", push_preview=true )
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
2066
module SliceSamplingTuringExt if isdefined(Base, :get_extension) using LogDensityProblemsAD using Random using SliceSampling using Turing # using Turing: Turing, Experimental else using ..LogDensityProblemsAD using ..Random using ..SliceSampling using ..Turing #using ..Turing: Turing, Experimental end # Required for using the slice samplers as `externalsampler`s in Turing # begin Turing.Inference.getparams( ::Turing.DynamicPPL.Model, sample::SliceSampling.Transition ) = sample.params # end # Required for using the slice samplers as `Experimental.Gibbs` samplers in Turing # begin Turing.Inference.getparams( ::Turing.DynamicPPL.Model, state::SliceSampling.UnivariateSliceState ) = state.transition.params Turing.Inference.getparams( ::Turing.DynamicPPL.Model, state::SliceSampling.GibbsState ) = state.transition.params Turing.Inference.getparams( ::Turing.DynamicPPL.Model, state::SliceSampling.HitAndRunState ) = state.transition.params Turing.Experimental.gibbs_requires_recompute_logprob( model_dst, ::Turing.DynamicPPL.Sampler{ <: Turing.Inference.ExternalSampler{ <: SliceSampling.AbstractSliceSampling, A, U } }, sampler_src, state_dst, state_src ) where {A,U} = false # end function SliceSampling.initial_sample( rng::Random.AbstractRNG, ℓ ::Turing.LogDensityFunction ) model = ℓ.model spl = Turing.SampleFromUniform() vi = Turing.VarInfo(rng, model, spl) θ = vi[spl] init_attempt_count = 1 while !isfinite(θ) if init_attempt_count == 10 @warn "failed to find valid initial parameters in $(init_attempt_count) tries; consider providing explicit initial parameters using the `initial_params` keyword" end # NOTE: This will sample in the unconstrained space. vi = last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromUniform())) θ = vi[spl] init_attempt_count += 1 end θ end end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
3348
module SliceSampling using AbstractMCMC using Accessors using Distributions using FillArrays using LinearAlgebra using LogDensityProblems using SimpleUnPack using Random # The following is necessary because Turing wraps all models with # LogDensityProblemsAD by default. So we need access to these types. using LogDensityProblemsAD # reexports using AbstractMCMC: sample, MCMCThreads, MCMCDistributed, MCMCSerial export sample, MCMCThreads, MCMCDistributed, MCMCSerial # Interfaces abstract type AbstractSliceSampling <: AbstractMCMC.AbstractSampler end """ struct Transition Struct containing the results of the transition. # Fields - `params`: Samples generated by the transition. - `lp::Real`: Log-target density of the samples. - `info::NamedTuple`: Named tuple containing information about the transition. """ struct Transition{P, L <: Real, I <: NamedTuple} "current state of the slice sampling chain" params::P "log density of the current state" lp::L "information generated from the sampler" info::I end """ initial_sample(rng, model) Return the initial sample for the `model` using the random number generator `rng`. # Arguments - `rng::Random.AbstractRNG`: Random number generator. - `model`: The target `LogDensityProblem`. """ function initial_sample(::Random.AbstractRNG, ::Any) error( "`initial_sample` is not implemented but an initialization wasn't provided. " * "Consider supplying an initialization to `initial_params`." ) println("fuck!!!") end # If target is from `LogDensityProblemsAD`, unwrap target before calling `initial_sample`. # This is necessary since Turing wraps `DynamicPPL.Model`s when passed to an `externalsampler`. initial_sample( rng::Random.AbstractRNG, wrap::LogDensityProblemsAD.ADGradientWrapper ) = initial_sample(rng, parent(wrap)) function exceeded_max_prop(max_prop::Int) error("Exceeded maximum number of proposal $(max_prop).\n", "Here are possible causes:\n", "- The model might be broken or pathologic.\n", "- There might be a bug in the sampler.") end ## Univariate Slice Sampling Algorithms export Slice, SliceSteppingOut, SliceDoublingOut abstract type AbstractUnivariateSliceSampling <: AbstractSliceSampling end accept_slice_proposal( ::AbstractSliceSampling, ::Any, ::Real, ::Real, ::Real, ::Real, ::Real, ::Real, ) = true function find_interval end include("univariate/univariate.jl") include("univariate/fixedinterval.jl") include("univariate/steppingout.jl") include("univariate/doublingout.jl") ## Multivariate slice sampling algorithms abstract type AbstractMultivariateSliceSampling <: AbstractSliceSampling end # Meta Multivariate Samplers export RandPermGibbs, HitAndRun include("multivariate/randpermgibbs.jl") include("multivariate/hitandrun.jl") # Latent Slice Sampling export LatentSlice include("multivariate/latent.jl") # Gibbsian Polar Slice Sampling export GibbsPolarSlice include("multivariate/gibbspolar.jl") # Turing Compatibility if !isdefined(Base, :get_extension) using Requires end @static if !isdefined(Base, :get_extension) function __init__() @require Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" include( "../ext/SliceSamplingTuringExt.jl" ) end end end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
5353
""" GibbsPolarSlice(w; max_proposals) Gibbsian polar slice sampling algorithm by P. Schär, M. Habeck, and D. Rudolf [^SHR2023]. # Arguments - `w::Real`: Initial window size for the radius shrinkage procedure. # Keyword Arguments - `w::Real`: Initial window size for the radius shrinkage procedure - `max_proposals::Int`: Maximum number of proposals allowed until throwing an error (default: `typemax(Int)`). """ struct GibbsPolarSlice{W <: Real} <: AbstractMultivariateSliceSampling w::W max_proposals::Int end GibbsPolarSlice(w::Real; max_proposals::Int = typemax(Int)) = GibbsPolarSlice(w, max_proposals) struct GibbsPolarSliceState{T <: Transition, R <: Real, D <: AbstractVector} "Current [`Transition`](@ref)." transition ::T "direction (\$\\theta\$ in the original paper[^SHR2023])" direction::D "radius (\$r\$ in the original paper[^SHR2023])" radius::R end struct GibbsPolarSliceTarget{M} model::M end function logdensity(target::GibbsPolarSliceTarget, x) d = length(x) (d-1)*log(norm(x)) + LogDensityProblems.logdensity(target.model, x) end function AbstractMCMC.step(rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::GibbsPolarSlice; initial_params = nothing, kwargs...) logdensitymodel = model.logdensity x = initial_params === nothing ? initial_sample(rng, logdensitymodel) : initial_params d = length(x) @assert d ≥ 2 "Gibbsian polar slice sampling works reliably only in dimension ≥2" r = norm(x) if r < 1e-5 @warn "The norm of initial_params is smaller than 1e-5, which might be result in unstable behavior and the sampler might even get stuck indefinitely. If you are using Turing, this might be due to change of support through Bijectors." end θ = x / r ℓp = LogDensityProblems.logdensity(logdensitymodel, x) t = Transition(x, ℓp, NamedTuple()) return t, GibbsPolarSliceState(t, θ, r) end function rand_subsphere(rng::Random.AbstractRNG, θ::AbstractVector) d = length(θ) V1 = randn(rng, eltype(θ), d) V2 = V1 - dot(θ, V1)*θ V2 / max(norm(V2), eps(eltype(θ))) end function geodesic_shrinkage( rng ::Random.AbstractRNG, ϱ1 ::GibbsPolarSliceTarget, ℓT ::F, θ ::AbstractVector{F}, r ::F, max_prop::Int ) where {F <: Real} y = rand_subsphere(rng, θ) ω_max = convert(F, 2π)*rand(rng, F) ω_min = ω_max - convert(F, 2π) for n_props in 1:max_prop # `Uniform` had a type instability issue: # https://github.com/JuliaStats/Distributions.jl/pull/1860 # ω = rand(rng, Uniform(ω_min, ω_max)) ω = ω_min + (ω_max - ω_min)*rand(rng, F) θ′ = θ*cos(ω) + y*sin(ω) if logdensity(ϱ1, r*θ′) > ℓT return θ′, n_props end if ω < 0 ω_min = ω else ω_max = ω end end exceeded_max_prop(max_prop) end function radius_shrinkage( rng ::Random.AbstractRNG, ϱ1 ::GibbsPolarSliceTarget, ℓT ::F, θ ::AbstractVector{F}, r ::F, w ::Real, max_prop::Int ) where {F <: Real} u = rand(rng, F) w = convert(F, w) r_min = max(r - u*w, 0) r_max = r + (1 - u)*w n_props_total = 0 n_props = 0 while (r_min > 0) && logdensity(ϱ1, r_min*θ) > ℓT r_min = max(r_min - w, 0) n_props += 1 if n_props > max_prop exceeded_max_prop(max_prop) end end n_props_total += n_props n_props = 0 while logdensity(ϱ1, r_max*θ) > ℓT r_max = r_max + w n_props += 1 if n_props > max_prop exceeded_max_prop(max_prop) end end n_props_total += n_props for n_props in 1:max_prop # `Uniform` had a type instability issue: # https://github.com/JuliaStats/Distributions.jl/pull/1860 #r′ = rand(rng, Uniform{F}(r_min, r_max)) r′ = r_min + (r_max - r_min)*rand(rng, F) if logdensity(ϱ1, r′*θ) > ℓT n_props_total += n_props return r′, n_props_total end if r′ < r r_min = r′ else r_max = r′ end end exceeded_max_prop(max_prop) end function AbstractMCMC.step( rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::GibbsPolarSlice, state ::GibbsPolarSliceState; kwargs..., ) logdensitymodel = model.logdensity max_prop = sampler.max_proposals x = state.transition.params ℓp = state.transition.lp w = sampler.w r = state.radius θ = state.direction ϱ1 = GibbsPolarSliceTarget(logdensitymodel) d = length(x) ℓT = ((d-1)*log(norm(x)) + ℓp) - Random.randexp(rng, eltype(ℓp)) θ, n_props_θ = geodesic_shrinkage(rng, ϱ1, ℓT, θ, r, max_prop) r, n_props_r = radius_shrinkage( rng, ϱ1, ℓT, θ, r, w, max_prop) x = θ*r ℓp = LogDensityProblems.logdensity(logdensitymodel, x) t = Transition(x, ℓp, ( num_radius_proposals = n_props_r, num_direction_proposals = n_props_θ, )) t, GibbsPolarSliceState(t, θ, r) end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
2258
""" HitAndRun(unislice) Hit-and-run sampling strategy[^BRS1993]. This applies `unislice` along a random direction uniform sampled from the sphere. # Arguments - `unislice::AbstractUnivariateSliceSampling`: Univariate slice sampling algorithm. """ struct HitAndRun{ S <: AbstractUnivariateSliceSampling } <: AbstractMultivariateSliceSampling unislice::S end struct HitAndRunState{T <: Transition} "Current [`Transition`](@ref)." transition::T end struct HitAndRunTarget{Model, Vec <: AbstractVector} model ::Model direction::Vec reference::Vec end function LogDensityProblems.logdensity(target::HitAndRunTarget, λ) @unpack model, reference, direction = target LogDensityProblems.logdensity(model, reference + λ*direction) end function AbstractMCMC.step(rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::HitAndRun; initial_params = nothing, kwargs...) logdensitymodel = model.logdensity θ = isnothing(initial_params) ? initial_sample(rng, logdensitymodel) : initial_params d = length(θ) @assert d ≥ 2 "Hit-and-Run works reliably only in dimension ≥2" lp = LogDensityProblems.logdensity(logdensitymodel, θ) t = Transition(θ, lp, NamedTuple()) return t, HitAndRunState(t) end function rand_uniform_unit_sphere(rng::Random.AbstractRNG, type::Type, d::Int) x = randn(rng, type, d) x / norm(x) end function AbstractMCMC.step( rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::HitAndRun, state ::HitAndRunState; kwargs..., ) logdensitymodel = model.logdensity ℓp = state.transition.lp θ = copy(state.transition.params) d = length(θ) unislice = sampler.unislice direction = rand_uniform_unit_sphere(rng, eltype(θ), d) hnrtarget = HitAndRunTarget(logdensitymodel, direction, θ) λ = zero(eltype(θ)) λ, ℓp, props = slice_sampling_univariate( rng, unislice, hnrtarget, ℓp, λ ) θ′ = θ + direction*λ t = Transition(θ′, ℓp, (num_proposals=props,)) t, HitAndRunState(t) end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
2724
""" LatentSlice(beta) Latent slice sampling algorithm by Li and Walker[^LW2023]. # Arguments - `beta::Real`: Beta parameter of the Gamma distribution of the auxiliary variables. # Keyword Arguments - `max_proposals::Int`: Maximum number of proposals allowed until throwing an error (default: `typemax(Int)`). """ struct LatentSlice{B <: Real} <: AbstractMultivariateSliceSampling beta ::B max_proposals::Int end function LatentSlice(beta::Real; max_proposals::Int = typemax(Int)) @assert beta > 0 "Beta must be strictly positive" LatentSlice(beta, max_proposals) end struct LatentSliceState{T <: Transition, S <: AbstractVector} "Current [`Transition`](@ref)." transition ::T "Auxiliary variables for adapting the slice window (\$s\$ in the original paper[^LW2023])" sliceparams::S end function AbstractMCMC.step(rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::LatentSlice; initial_params = nothing, kwargs...) logdensitymodel = model.logdensity y = initial_params === nothing ? initial_sample(rng, logdensitymodel) : initial_params β = sampler.beta d = length(y) lp = LogDensityProblems.logdensity(logdensitymodel, y) s = convert(Vector{eltype(y)}, rand(rng, Gamma(2, 1/β), d)) t = Transition(y, lp, NamedTuple()) return t, LatentSliceState(t, s) end function AbstractMCMC.step( rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::LatentSlice, state ::LatentSliceState; kwargs..., ) logdensitymodel = model.logdensity max_proposals = sampler.max_proposals β = sampler.beta ℓp = state.transition.lp y = state.transition.params s = state.sliceparams d = length(y) ℓw = ℓp - Random.randexp(rng, eltype(y)) u_l = rand(rng, eltype(y), d) l = (y - s/2) + u_l.*s a = l - s/2 b = l + s/2 props = 0 while true props += 1 u_y = rand(rng, eltype(y), d) ystar = a + u_y.*(b - a) ℓpstar = LogDensityProblems.logdensity(logdensitymodel, ystar) if ℓw < ℓpstar ℓp = ℓpstar y = ystar break end if props > max_proposals exceeded_max_prop(max_proposals) end @inbounds for i = 1:d if ystar[i] < y[i] a[i] = ystar[i] else b[i] = ystar[i] end end end s = β*randexp(rng, eltype(y), d) + 2*abs.(l - y) t = Transition(y, ℓp, (num_proposals = props,)) t, LatentSliceState(t, s) end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
2760
""" RandPermGibbs(unislice) Random permutation coordinate-wise Gibbs sampling strategy. This applies `unislice` coordinate-wise in a random order. # Arguments - `unislice::Union{<:AbstractUnivariateSliceSampling,<:AbstractVector{<:AbstractUnivariateSliceSampling}}`: a single or a vector of univariate slice sampling algorithms. When `unislice` is a vector of samplers, each slice sampler is applied to the corresponding coordinate of the target posterior. In that case, the `length(unislice)` must match the dimensionality of the posterior. """ struct RandPermGibbs{ S <: Union{ <: AbstractUnivariateSliceSampling, <: AbstractVector{<: AbstractUnivariateSliceSampling} } } <: AbstractMultivariateSliceSampling unislice::S end struct GibbsState{T <: Transition} "Current [`Transition`](@ref)." transition::T end struct GibbsTarget{Model, Idx <: Integer, Vec <: AbstractVector} model::Model idx ::Idx θ ::Vec end function LogDensityProblems.logdensity(gibbs::GibbsTarget, θi) @unpack model, idx, θ = gibbs LogDensityProblems.logdensity(model, (@set θ[idx] = θi)) end function AbstractMCMC.step(rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::RandPermGibbs; initial_params = nothing, kwargs...) logdensitymodel = model.logdensity θ = initial_params === nothing ? initial_sample(rng, logdensitymodel) : initial_params d = length(θ) if sampler.unislice isa AbstractVector @assert length(sampler.unislice) == d "Number of slice samplers does not match the dimensionality of the initial parameter." end lp = LogDensityProblems.logdensity(logdensitymodel, θ) t = Transition(θ, lp, NamedTuple()) return t, GibbsState(t) end function AbstractMCMC.step( rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::RandPermGibbs, state ::GibbsState; kwargs..., ) logdensitymodel = model.logdensity ℓp = state.transition.lp θ = copy(state.transition.params) d = length(θ) unislices = if sampler.unislice isa AbstractVector sampler.unislice else Fill(sampler.unislice, d) end props = zeros(Int, d) for i in shuffle(rng, 1:d) model_gibbs = GibbsTarget(logdensitymodel, i, θ) unislice = unislices[i] θ′_coord, ℓp, props_coord = slice_sampling_univariate( rng, unislice, model_gibbs, ℓp, θ[i] ) props[i] = props_coord θ[i] = θ′_coord end t = Transition(θ, ℓp, (num_proposals=props,)) t, GibbsState(t) end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
2297
""" SliceDoublingOut(window; max_doubling_out, max_proposals) Univariate slice sampling by automatically adapting the initial interval through the "doubling-out" procedure (Scheme 4 by Neal[^N2003]) # Arguments - `window::Real`: Proposal window. # Keyword Arguments - `max_doubling_out`: Maximum number of "doubling outs" (default: 8). - `max_proposals::Int`: Maximum number of proposals allowed until throwing an error (default: `typemax(Int)`). """ struct SliceDoublingOut{W <: Real} <: AbstractUnivariateSliceSampling window ::W max_doubling_out::Int max_proposals ::Int end function SliceDoublingOut( window ::Real; max_doubling_out::Int = 8, max_proposals ::Int = typemax(Int), ) @assert window > 0 SliceDoublingOut(window, max_doubling_out, max_proposals) end function find_interval( rng ::Random.AbstractRNG, alg ::SliceDoublingOut, model, w ::Real, ℓy ::Real, θ₀ ::F, ) where {F <: Real} p = alg.max_doubling_out u = rand(rng, F) L = θ₀ - w*u R = L + w ℓπ_L = LogDensityProblems.logdensity(model, L) ℓπ_R = LogDensityProblems.logdensity(model, R) K = 2 for _ = 1:p if ((ℓy ≥ ℓπ_L) && (ℓy ≥ ℓπ_R)) break end v = rand(rng, F) if v < 0.5 L = L - (R - L) ℓπ_L = LogDensityProblems.logdensity(model, L) else R = R + (R - L) ℓπ_R = LogDensityProblems.logdensity(model, R) end K += 1 end L, R, K end function accept_slice_proposal( ::SliceDoublingOut, model, w ::Real, ℓy ::Real, θ₀ ::Real, θ₁ ::Real, L ::Real, R ::Real, ) D = false ℓπ_L = LogDensityProblems.logdensity(model, L) ℓπ_R = LogDensityProblems.logdensity(model, R) while R - L > 1.1*w M = (L + R)/2 if (θ₀ < M && θ₁ ≥ M) || (θ₀ ≥ M && θ₁ < M) D = true end if θ₁ < M R = M ℓπ_R = LogDensityProblems.logdensity(model, R) else L = M ℓπ_L = LogDensityProblems.logdensity(model, L) end if D && ℓy ≥ ℓπ_L && ℓy ≥ ℓπ_R return false end end true end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
772
""" Slice(window; max_proposals) Univariate slice sampling with a fixed initial interval (Scheme 2 by Neal[^N2003]) # Arguments - `window::Real`: Proposal window. # Keyword Arguments - `max_proposals::Int`: Maximum number of proposals allowed until throwing an error (default: `typemax(Int)`). """ struct Slice{W <: Real} <: AbstractUnivariateSliceSampling window ::W max_proposals::Int end function Slice( window ::Real; max_proposals::Int = typemax(Int), ) @assert window > 0 Slice(window, max_proposals) end function find_interval( rng::Random.AbstractRNG, ::Slice, ::Any, w ::Real, ::Real, θ₀ ::F, ) where {F <: Real} u = rand(rng, F) L = θ₀ - w*u R = L + w L, R, 0 end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
1475
""" SliceSteppingOut(window; max_stepping_out, max_proposals) Univariate slice sampling by automatically adapting the initial interval through the "stepping-out" procedure (Scheme 3 by Neal[^N2003]) # Arguments - `window::Real`: Proposal window. # Keyword Arguments - `max_stepping_out::Int`: Maximum number of "stepping outs" (default: 32). - `max_proposals::Int`: Maximum number of proposals allowed until throwing an error (default: `typemax(Int)`). """ struct SliceSteppingOut{W <: Real} <: AbstractUnivariateSliceSampling window ::W max_stepping_out::Int max_proposals ::Int end function SliceSteppingOut( window ::Real; max_stepping_out::Int = 32, max_proposals ::Int = typemax(Int), ) @assert window > 0 SliceSteppingOut(window, max_stepping_out, max_proposals) end function find_interval( rng ::Random.AbstractRNG, alg ::SliceSteppingOut, model, w ::Real, ℓy ::Real, θ₀ ::F, ) where {F <: Real} m = alg.max_stepping_out u = rand(rng, F) L = θ₀ - w*u R = L + w V = rand(rng, F) J = floor(Int, m*V) K = (m - 1) - J n_eval = 0 while J > 0 && ℓy < LogDensityProblems.logdensity(model, L) L = L - w J = J - 1 n_eval += 1 end while K > 0 && ℓy < LogDensityProblems.logdensity(model, R) R = R + w K = K - 1 n_eval += 1 end L, R, n_eval end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
2156
function slice_sampling_univariate( rng ::Random.AbstractRNG, alg ::AbstractSliceSampling, model, ℓπ ::Real, θ ::F, ) where {F <: Real} w, max_prop = alg.window, alg.max_proposals ℓy = ℓπ - Random.randexp(rng, F) L, R, props = find_interval(rng, alg, model, w, ℓy, θ) for _ in 1:max_prop U = rand(rng, F) θ′ = L + U*(R - L) ℓπ′ = LogDensityProblems.logdensity(model, θ′) props += 1 if (ℓy < ℓπ′) && accept_slice_proposal(alg, model, w, ℓy, θ, θ′, L, R) return θ′, ℓπ′, props end if θ′ < θ L = θ′ else R = θ′ end end exceeded_max_prop(max_prop) end struct UnivariateSliceState{T <: Transition} "Current [`Transition`](@ref)." transition::T end function AbstractMCMC.step(rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::AbstractUnivariateSliceSampling; initial_params = nothing, kwargs...) logdensitymodel = model.logdensity θ = isnothing(initial_params) ? initial_sample(rng, logdensitymodel) : initial_params @assert length(θ) == 1 "The dimensionality of the parameter should be 1." lp = LogDensityProblems.logdensity(logdensitymodel, θ) t = Transition(θ, lp, NamedTuple()) return t, UnivariateSliceState(t) end struct UnivariateTarget{Model} model::Model end function LogDensityProblems.logdensity(uni::UnivariateTarget, θi) LogDensityProblems.logdensity(uni.model, [θi]) end function AbstractMCMC.step( rng ::Random.AbstractRNG, model ::AbstractMCMC.LogDensityModel, sampler::AbstractUnivariateSliceSampling, state ::UnivariateSliceState; kwargs..., ) logdensitymodel = model.logdensity θ, ℓp = only(state.transition.params), state.transition.lp θ, ℓp, props = slice_sampling_univariate( rng, sampler, UnivariateTarget(logdensitymodel), ℓp, θ ) t = Transition([θ], ℓp, (num_proposals=props,)) t, UnivariateSliceState(t) end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
1273
struct WrongModel end LogDensityProblems.logdensity(::WrongModel, θ) = -Inf function LogDensityProblems.capabilities(::Type{<:WrongModel}) LogDensityProblems.LogDensityOrder{0}() end function LogDensityProblems.dimension(::WrongModel) 2 end @testset "error handling" begin model = AbstractMCMC.LogDensityModel(WrongModel()) @testset for sampler in [ # Univariate slice samplers RandPermGibbs(Slice(1; max_proposals=32)), RandPermGibbs(SliceSteppingOut(1; max_proposals=32)), RandPermGibbs(SliceDoublingOut(1; max_proposals=32)), HitAndRun(Slice(1; max_proposals=32)), HitAndRun(SliceSteppingOut(1; max_proposals=32)), HitAndRun(SliceDoublingOut(1; max_proposals=32)), # Latent slice sampling LatentSlice(5; max_proposals=32), # Gibbs polar slice sampling GibbsPolarSlice(5; max_proposals=32), ] @testset "max proposal error" begin rng = Random.default_rng() θ = [1., 1.] _, init_state = AbstractMCMC.step(rng, model, sampler; initial_params=copy(θ)) @test_throws ErrorException begin _, _ = AbstractMCMC.step(rng, model, sampler, init_state) end end end end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
5029
struct MultiModel{F <: Real, V <: AbstractVector} α::F β::F y::V end function MCMCTesting.sample_joint( rng::AbstractRNG, model::MultiModel{F, V} ) where {F <: Real, V <: AbstractVector} α, β = model.α, model.β μ = rand(rng, Normal(zero(F), one(F))) σ = rand(rng, InverseGamma(α, β)) |> F # InverseGamma is not type stable y = rand(rng, Normal(μ, σ), 10) θ = [μ, σ] θ, y end function MCMCTesting.markovchain_transition( rng ::Random.AbstractRNG, model ::MultiModel, sampler::SliceSampling.AbstractSliceSampling, θ, y ) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) transition.params end function LogDensityProblems.logdensity(model::MultiModel{F, V}, θ) where {F <: Real, V} α, β, y = model.α, model.β, model.y μ = θ[1] σ = θ[2] if σ ≤ 0 return typemin(F) end logpdf(Normal(zero(F), one(F)), μ) + logpdf(InverseGamma(α, β), σ) + sum(Base.Fix1(logpdf, Normal(μ, σ)), y) end function SliceSampling.initial_sample(rng::Random.AbstractRNG, model::MultiModel) randn(rng, LogDensityProblems.dimension(model)) end function LogDensityProblems.capabilities(::Type{<:MultiModel}) LogDensityProblems.LogDensityOrder{0}() end function LogDensityProblems.dimension(model::MultiModel) 2 end @testset "multivariate samplers" begin model = MultiModel(1., 1., [0.]) @testset for sampler in [ # Vector-valued windows RandPermGibbs(Slice.(fill(1, LogDensityProblems.dimension(model)))), RandPermGibbs(SliceSteppingOut.(fill(1, LogDensityProblems.dimension(model)))), RandPermGibbs(SliceDoublingOut.(fill(1, LogDensityProblems.dimension(model)))), # Scalar-valued windows RandPermGibbs(Slice(1)), RandPermGibbs(SliceSteppingOut(1)), RandPermGibbs(SliceDoublingOut(1)), HitAndRun(Slice(1)), HitAndRun(SliceSteppingOut(1)), HitAndRun(SliceDoublingOut(1)), # Latent slice sampling LatentSlice(5), # Gibbsian polar slice sampling GibbsPolarSlice(10), ] @testset "initial_params" begin model = MultiModel(1.0, 1.0, [0.0]) θ, y = MCMCTesting.sample_joint(Random.default_rng(), model) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) θ0 = [1.0, 0.1] chain = sample( model, sampler, 10; initial_params=θ0, progress=false, ) @test first(chain).params == θ0 end @testset "initial_sample" begin rng = StableRNG(1) model = MultiModel(1.0, 1.0, [0.0]) θ0 = SliceSampling.initial_sample(rng, model) rng = StableRNG(1) chain = sample(rng, model, sampler, 10; progress=false) @test first(chain).params == θ0 end @testset "determinism" begin model = MultiModel(1.0, 1.0, [0.0]) θ, y = MCMCTesting.sample_joint(Random.default_rng(), model) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) rng = StableRNG(1) _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) θ′ = transition.params rng = StableRNG(1) _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) θ′′ = transition.params @test θ′ == θ′′ end @testset "type stability $(type)" for type in [Float32, Float64] rng = Random.default_rng() model = MultiModel(one(type), one(type), [zero(type)]) θ, y = MCMCTesting.sample_joint(Random.default_rng(), model) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) @test eltype(θ) == type @test eltype(y) == type _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) θ′ = transition.params @test eltype(θ′) == type end @testset "inference" begin n_pvalue_samples = 64 n_samples = 100 n_mcmc_steps = 10 n_mcmc_thin = 10 test = ExactRankTest(n_samples, n_mcmc_steps, n_mcmc_thin) model = MultiModel(1., 1., [0.]) subject = TestSubject(model, sampler) @test seqmcmctest(test, subject, 0.001, n_pvalue_samples; show_progress=false) end end end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
272
using AbstractMCMC using Accessors using Distributions using LogDensityProblems using MCMCTesting using Random using Test using Turing using StableRNGs using SliceSampling include("univariate.jl") include("multivariate.jl") include("maxprops.jl") include("turing.jl")
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
1449
@testset "turing compatibility" begin @model function demo() s ~ InverseGamma(2, 3) m ~ Normal(0, sqrt(s)) 1.5 ~ Normal(m, sqrt(s)) 2.0 ~ Normal(m, sqrt(s)) end n_samples = 1000 model = demo() @testset for sampler in [ RandPermGibbs(Slice(1)), RandPermGibbs(SliceSteppingOut(1)), RandPermGibbs(SliceDoublingOut(1)), HitAndRun(Slice(1)), HitAndRun(SliceSteppingOut(1)), HitAndRun(SliceDoublingOut(1)), LatentSlice(5), GibbsPolarSlice(5), ] chain = sample( model, externalsampler(sampler), n_samples; initial_params=[1.0, 0.1], progress=false, ) chain = sample( model, externalsampler(sampler), n_samples; progress=false, ) end @testset "gibbs($sampler)" for sampler in [ RandPermGibbs(Slice(1)), RandPermGibbs(SliceSteppingOut(1)), RandPermGibbs(SliceDoublingOut(1)), Slice(1), SliceSteppingOut(1), SliceDoublingOut(1), ] sample( model, Turing.Experimental.Gibbs( ( s = externalsampler(sampler), m = externalsampler(sampler), ), ), n_samples, progress=false, ) end end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
code
4069
struct UniModel{V <: AbstractVector} y::V end function MCMCTesting.sample_joint( rng::AbstractRNG, ::UniModel{<:AbstractVector{F}} ) where {F <: Real} μ = rand(rng, Normal(zero(F), one(F))) y = rand(rng, Normal(μ, one(F)), 10) [μ], y end function MCMCTesting.markovchain_transition( rng ::Random.AbstractRNG, model ::UniModel, sampler::SliceSampling.AbstractSliceSampling, θ, y ) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) transition.params end function LogDensityProblems.logdensity( model::UniModel{<:AbstractVector{F}}, θ ) where {F <: Real} y = model.y μ = only(θ) logpdf(Normal(zero(F), one(F)), μ) + sum(Base.Fix1(logpdf, Normal(μ, one(F))), y) end function SliceSampling.initial_sample(rng::Random.AbstractRNG, model::UniModel) randn(rng, LogDensityProblems.dimension(model)) end function LogDensityProblems.capabilities(::Type{<:UniModel}) LogDensityProblems.LogDensityOrder{0}() end function LogDensityProblems.dimension(model::UniModel) 1 end @testset "multivariate samplers" begin model = UniModel([0.]) @testset for sampler in [ Slice(1), SliceSteppingOut(1), SliceDoublingOut(1), ] @testset "initialization" begin model = UniModel([0.0]) θ, y = MCMCTesting.sample_joint(Random.default_rng(), model) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) θ0 = [1.0] chain = sample( model, sampler, 10; initial_params=θ0, progress=false, ) @test first(chain).params == θ0 end @testset "initial_sample" begin rng = StableRNG(1) model = UniModel([0.0]) θ0 = SliceSampling.initial_sample(rng, model) rng = StableRNG(1) chain = sample(rng, model, sampler, 10; progress=false) @test first(chain).params == θ0 end @testset "determinism" begin model = UniModel([0.0]) θ, y = MCMCTesting.sample_joint(Random.default_rng(), model) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) rng = StableRNG(1) _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) θ′ = transition.params rng = StableRNG(1) _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) θ′′ = transition.params @test θ′ == θ′′ end @testset "type stability $(type)" for type in [Float32, Float64] rng = Random.default_rng() model = UniModel([zero(type)]) θ, y = MCMCTesting.sample_joint(Random.default_rng(), model) model′ = AbstractMCMC.LogDensityModel(@set model.y = y) @test eltype(θ) == type @test eltype(y) == type _, init_state = AbstractMCMC.step(rng, model′, sampler; initial_params=copy(θ)) transition, _ = AbstractMCMC.step(rng, model′, sampler, init_state) θ′ = transition.params @test eltype(θ′) == type end @testset "inference" begin n_pvalue_samples = 64 n_samples = 100 n_mcmc_steps = 10 n_mcmc_thin = 10 test = ExactRankTest(n_samples, n_mcmc_steps, n_mcmc_thin) model = UniModel([0.]) subject = TestSubject(model, sampler) @test seqmcmctest(test, subject, 0.001, n_pvalue_samples; show_progress=false) end end end
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
docs
3712
# Slice Sampling Algorithms in Julia [![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://TuringLang.org/SliceSampling.jl/stable/) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://TuringLang.org/SliceSampling.jl/dev/) [![Build Status](https://github.com/TuringLang/SliceSampling.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/Red-Portal/SliceSampling.jl/actions/workflows/CI.yml?query=branch%3Amain) [![Coverage](https://codecov.io/gh/TuringLang/SliceSampling.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/Red-Portal/SliceSampling.jl) This package implements slice sampling algorithms accessible through the `AbstractMCMC` [interface](https://github.com/TuringLang/AbstractMCMC.jl). For general usage, please refer to [here](https://turinglang.org/SliceSampling.jl/dev/general/). ## Implemented Algorithms ### Univariate Slice Sampling Algorithms - Univariate slice sampling ([Slice](https://turinglang.org/SliceSampling.jl/dev/univariate_slice/)) algorithms by R. Neal [^N2003]: - Fixed window (`Slice`) - stepping-out window adaptation (`SliceSteppingOut`) - doubling-out window adaptation (`SliceDoublingOut`) ### Meta Multivariate Samplers for Augmenting Univariate Samplers - Random permutation coordinate-wise Gibbs sampling[^GG1984] (`RandPermGibbs`) - Hit-and-run sampling[^BRS1993] (`HitAndRun`) ### Multivariate Slice Sampling Algorithms - Latent slice sampling ([LSS](https://turinglang.org/SliceSampling.jl/dev/latent_slice/)) by Li and Walker[^LW2023] (`LatentSlice`) - Gibbsian polar slice sampling ([GPSS](https://turinglang.org/SliceSampling.jl/dev/gibbs_polar/)) by P. Schär, M. Habeck, and D. Rudolf[^SHR2023] (`GibbsPolarSlice`) ## Example with Turing Models This package supports the [Turing](https://github.com/TuringLang/Turing.jl) probabilistic programming framework: ```julia using Distributions using Turing using SliceSampling @model function demo() s ~ InverseGamma(3, 3) m ~ Normal(0, sqrt(s)) end sampler = RandPermGibbs(SliceSteppingOut(2.)) n_samples = 10000 model = demo() sample(model, externalsampler(sampler), n_samples) ``` The following slice samplers can also be used as a conditional sampler in `Turing.Experimental.Gibbs` sampler: * For multidimensional variables: * `RandPermGibbs` * `HitAndRun` * For unidimensional variables: * `Slice` * `SliceSteppingOut` * `SliceDoublingOut` See the following example: ```julia using Distributions using Turing using SliceSampling @model function simple_choice(xs) p ~ Beta(2, 2) z ~ Bernoulli(p) for i in 1:length(xs) if z == 1 xs[i] ~ Normal(0, 1) else xs[i] ~ Normal(2, 1) end end end sampler = Turing.Experimental.Gibbs( ( p = externalsampler(SliceSteppingOut(2.0)), z = PG(20, :z) ) ) n_samples = 1000 model = simple_choice([1.5, 2.0, 0.3]) sample(model, sampler, n_samples) ``` [^N2003]: Neal, R. M. (2003). Slice sampling. The annals of statistics, 31(3), 705-767. [^LW2023]: Li, Y., & Walker, S. G. (2023). A latent slice sampling algorithm. Computational Statistics & Data Analysis, 179, 107652. [^SHR2023]: Schär, P., Habeck, M., & Rudolf, D. (2023, July). Gibbsian polar slice sampling. In International Conference on Machine Learning. [^GG1984]: Geman, S., & Geman, D. (1984). Stochastic relaxation, Gibbs distributions, and the Bayesian restoration of images. IEEE Transactions on Pattern Analysis and Machine Intelligence, (6). [^BRS1993]: Bélisle, C. J., Romeijn, H. E., & Smith, R. L. (1993). Hit-and-run algorithms for generating multivariate distributions. Mathematics of Operations Research, 18(2), 255-266.
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
docs
3894
# General Usage This package implements the `AbstractMCMC` [interface](https://github.com/TuringLang/AbstractMCMC.jl). `AbstractMCMC` provides a unifying interface for MCMC algorithms applied to [LogDensityProblems](https://github.com/tpapp/LogDensityProblems.jl). ## Examples ### Drawing Samples From a `LogDensityProblems` Through `AbstractMCMC` `SliceSampling.jl` implements the [`AbstractMCMC`](https://github.com/TuringLang/AbstractMCMC.jl) interface through [`LogDensityProblems`](https://github.com/tpapp/LogDensityProblems.jl). That is, one simply needs to define a `LogDensityProblems` and pass it to `AbstractMCMC`: ```@example logdensityproblems using AbstractMCMC using Distributions using LinearAlgebra using LogDensityProblems using Plots using SliceSampling struct Target{D} dist::D end LogDensityProblems.logdensity(target::Target, x) = logpdf(target.dist, x) LogDensityProblems.dimension(target::Target) = length(target.distx) LogDensityProblems.capabilities(::Type{<:Target}) = LogDensityProblems.LogDensityOrder{0}() sampler = GibbsPolarSlice(2.0) n_samples = 10000 model = Target(MvTDist(5, zeros(10), Matrix(I, 10, 10))) logdensitymodel = AbstractMCMC.LogDensityModel(model) chain = sample(logdensitymodel, sampler, n_samples; initial_params=randn(10)) samples = hcat([transition.params for transition in chain]...) plot(samples[1,:], xlabel="Iteration", ylabel="Trace") savefig("abstractmcmc_demo.svg") ``` ![](abstractmcmc_demo.svg) ### Drawing Samples From `Turing` Models `SliceSampling.jl` can also be used to sample from [Turing](https://github.com/TuringLang/Turing.jl) models through `Turing`'s `externalsampler` interface: ```@example turing using Distributions using Turing using SliceSampling @model function demo() s ~ InverseGamma(3, 3) m ~ Normal(0, sqrt(s)) end sampler = RandPermGibbs(SliceSteppingOut(2.)) n_samples = 10000 model = demo() sample(model, externalsampler(sampler), n_samples) ``` ### Conditional sampling in a `Turing.Experimental.Gibbs` sampler `SliceSampling.jl` be used as a conditional sampler in `Turing.Experimental.Gibbs`. ```@example turinggibbs using Distributions using Turing using SliceSampling @model function simple_choice(xs) p ~ Beta(2, 2) z ~ Bernoulli(p) for i in 1:length(xs) if z == 1 xs[i] ~ Normal(0, 1) else xs[i] ~ Normal(2, 1) end end end sampler = Turing.Experimental.Gibbs( ( p = externalsampler(SliceSteppingOut(2.0)), z = PG(20, :z) ) ) n_samples = 1000 model = simple_choice([1.5, 2.0, 0.3]) sample(model, sampler, n_samples) ``` ## Drawing Samples For drawing samples using the algorithms provided by `SliceSampling`, the user only needs to call: ```julia sample([rng,] model, slice, N; initial_params) ``` - `slice::AbstractSliceSampling`: Any slice sampling algorithm provided by `SliceSampling`. - `model`: A model implementing the `LogDensityProblems` interface. - `N`: The number of samples The output is a `SliceSampling.Transition` object, which contains the following: ```@docs SliceSampling.Transition ``` For the keyword arguments, `SliceSampling` allows: - `initial_params`: The intial state of the Markov chain (default: `nothing`). If `initial_params` is `nothing`, the following function can be implemented to provide an initialization: ```@docs SliceSampling.initial_sample ``` ## Performing a Single Transition For more fined-grained control, the user can call `AbstractMCMC.step`. That is, the chain can be initialized by calling: ```julia transition, state = AbstractMCMC.steps([rng,] model, slice; initial_params) ``` and then each MCMC transition on `state` can be performed by calling: ```julia transition, state = AbstractMCMC.steps([rng,] model, slice, state) ``` For more details, refer to the documentation of `AbstractMCMC`.
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
docs
4715
# [Gibbsian Polar Slice Sampling](@id polar) ## Introduction Gibbsian polar slice sampling (GPSS) is a recent vector-valued slice sampling algorithm proposed by P. Schär, M. Habeck, and D. Rudolf[^SHR2023]. It is an computationally efficient variant of polar slice sampler previously proposed by Roberts and Rosenthal[^RR2002]. Unlike other slice sampling algorithms, it operates a Gibbs sampler over polar coordinates, reminiscent of the elliptical slice sampler (ESS). Due to the involvement of polar coordinates, GPSS only works reliably on more than one dimension. However, unlike ESS, GPSS is applicable to any target distribution. ## Description For a $$d$$-dimensional target distribution $$\pi$$, GPSS utilizes the following augmented target distribution: ```math \begin{aligned} p(x, T) &= \varrho_{\pi}^{(0)}(x) \varrho_{\pi}^{(1)}(x) \, \operatorname{Uniform}\left(T; 0, \varrho^1(x)\right) \\ \varrho_{\pi}^{(0)}(x) &= {\lVert x \rVert}^{1 - d} \\ \varrho_{\pi}^{(1)}(x) &= {\lVert x \rVert}^{d-1} \pi\left(x\right) \end{aligned} ``` As described in Appendix A of the GPSS paper, sampling from $$\varrho^{(1)}(x)$$ in polar coordinates magically targets the augmented target distribution. In a high-level view, GPSS operates a Gibbs sampler in the following fashion: ```math \begin{aligned} T_n &\sim \operatorname{Uniform}\left(0, \varrho^{(1)}\left(x_{n-1}\right)\right) \\ \theta_n &\sim \operatorname{Uniform}\left\{ \theta \in \mathbb{S}^{d-1} \mid \varrho^{(1)}\left(r_{n-1} \theta\right) > T_n \right\} \\ r_n &\sim \operatorname{Uniform}\left\{ r \in \mathbb{R}_{\geq 0} \mid \varrho^{(1)}\left(r \theta_n\right) > T_n \right\} \\ x_n &= \theta_n r_n, \end{aligned} ``` where $$T_n$$ is the usual acceptance threshold auxiliary variable, while $$\theta$$ and $$r$$ are the sampler states in polar coordinates. The Gibbs steps on $$\theta$$ and $$r$$ are implemented through specialized shrinkage procedures. The only tunable parameter of the algorithm is the size of the search interval (window) of the shrinkage sampler for the radius variable $$r$$. !!! info The kernel corresponding to this sampler is defined on an **augmented state space** and cannot directly perform a transition on $$x$$. This also means that the corresponding kernel is not reversible with respect to $$x$$. ## Interface !!! info By the nature of polar coordinates, GPSS only works reliably for targets with dimension at least $$d \geq 2$$. ```@docs GibbsPolarSlice ``` !!! warning When initializing the chain (*e.g.* the `initial_params` keyword arguments in `AbstractMCMC.sample`), it is necessary to inialize from a point $$x_0$$ that has a sensible norm $$\lVert x_0 \rVert > 0$$, otherwise, the chain will start from a pathologic point in polar coordinates. This might even result in the sampler getting stuck in an infinite loop. (This can be prevented by setting `max_proposals`.) If $$\lVert x_0 \rVert \leq 10^{-5}$$, the current implementation will display a warning. !!! info For Turing users: `Turing` might change `initial_params` to match the support of the posterior. This might lead to $$\lVert x_0 \rVert$$ being small, even though the vector you passed to`initial_params` has a sufficiently large norm. If this is suspected, simply try a different initialization value. ## Demonstration As illustrated in the original paper, GPSS shows good performance on heavy-tailed targets despite being a multivariate slice sampler. Consider a 10-dimensional Student-$$t$$ target with 1-degree of freedom (this corresponds to a multivariate Cauchy): ```@example gpss using Distributions using Turing using SliceSampling using LinearAlgebra using Plots @model function demo() x ~ MvTDist(1, zeros(10), Matrix(I,10,10)) end model = demo() n_samples = 1000 latent_chain = sample(model, externalsampler(LatentSlice(10)), n_samples; initial_params=ones(10)) polar_chain = sample(model, externalsampler(GibbsPolarSlice(10)), n_samples; initial_params=ones(10)) l = @layout [a; b] p1 = Plots.plot(1:n_samples, latent_chain[:,1,:], ylims=[-10,10], label="LSS") p2 = Plots.plot(1:n_samples, polar_chain[:,1,:], ylims=[-10,10], label="GPSS") plot(p1, p2, layout = l) savefig("student_latent_gpss.svg") ``` ![](student_latent_gpss.svg) Clearly, GPSS is better at exploring the deep tails compared to the [latent slice sampler](@ref latent) (LSS) despite having a similar per-iteration cost. [^SHR2023]: Schär, P., Habeck, M., & Rudolf, D. (2023, July). Gibbsian polar slice sampling. In International Conference on Machine Learning. [^RR2002]: Roberts, G. O., & Rosenthal, J. S. (2002). The polar slice sampler. Stochastic Models, 18(2), 257-280.
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
docs
1127
```@meta CurrentModule = SliceSampling ``` # SliceSampling This package implements slice sampling algorithms. Slice sampling finds its roots in the Swendsen–Wang algorithm for Ising models[^SW1987][^ES1988]. It later came into the interest of the statistical community through Besag and Green[^BG1993], and popularized by Neal [^N2003]. Furthermore, Neal introduced various ways to efficiently implement slice samplers. This package provides the original slice sampling algorithms by Neal and their later extensions. [^SW1987]: Swendsen, R. H., & Wang, J. S. (1987). Nonuniversal critical dynamics in Monte Carlo simulations. Physical review letters, 58(2), 86. [^ES1988]: Edwards, R. G., & Sokal, A. D. (1988). Generalization of the fortuin-kasteleyn-swendsen-wang representation and monte carlo algorithm. Physical review D, 38(6), 2009. [^BG1993]: Besag, J., & Green, P. J. (1993). Spatial statistics and Bayesian computation. Journal of the Royal Statistical Society Series B: Statistical Methodology, 55(1), 25-37. [^N2003]: Neal, R. M. (2003). Slice sampling. The annals of statistics, 31(3), 705-767. ```@index ```
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
docs
2527
# [Latent Slice Sampling](@id latent) ## Introduction Latent slice sampling is a recent vector-valued slice sampling algorithm proposed by Li and Walker[^LW2023]. Unlike other slice sampling algorithms, it treats the "search intervals" as auxiliary variables and adapts them along the samples from the log-target in a Gibbs-type scheme. ## Description Specifically, the extended joint density of the latent slice sampler is as follows: ```math p(x, t, s, l) = \pi(x) \, p(s) \, \operatorname{Uniform}\left(t; 0, \pi\left(x\right)\right) \, \operatorname{Uniform}\left(l; \; x - s/2,\, x + s/2\right), ``` where $$y$$ is the parameters of the log-target $$\pi$$, $$s$$ is the width of the search interval and $$l$$ is the centering of the search interval relative to $$y$$. Naturally, the sampler operates as a blocked-Gibbs sampler ```math \begin{aligned} l_n &\sim \operatorname{Uniform}\left(l; \; x_{n-1} - s_{n-1}/2,\, x_{n-1} + s_{n-1}/2\right) \\ s_n &\sim p(s \mid x_{n-1}, l_{n}) \\ t_n &\sim \operatorname{Uniform}\left(0, \pi\left(x_{n-1}\right)\right) \\ x_n &\sim \operatorname{Uniform}\left\{x \mid \pi\left(x\right) > t_n\right\}, \end{aligned} ``` When $$x_n$$ is updated using the usual shrinkage procedure of Neal[^N2003], $$s_n$$ and $$l_n$$ are used to form the initial search window. ($$s_n$$ is the width of the window and $$l_n$$ is its center point.) Therefore, the latent slice sampler can be regarded as an automatic tuning mechanism of the "initial interval" of slice samplers. The only tunable parameter of the algorithm is then the distribution of the width $$p(s)$$. For this, Li and Walker recommend ```math p(s; \beta) = \operatorname{Gamma}(s; 2, \beta), ``` where $$\beta$$ is a tunable parameter. The use of the gamma distribution is somewhat important since the complete conditional $$p(s \mid y, l)$$ needs to be available in closed-form for efficiency. (It is a truncated exponential distribution in case of the gamma.) Therefore, we only provide control over $$\beta$$. !!! info The kernel corresponding to this sampler is defined on an **augmented state space** and cannot directly perform a transition on $$x$$. This also means that the corresponding kernel is not reversible with respect to $$x$$. ## Interface ```@docs LatentSlice ``` [^LW2023]: Li, Y., & Walker, S. G. (2023). A latent slice sampling algorithm. Computational Statistics & Data Analysis, 179, 107652. [^N2003]: Neal, R. M. (2003). Slice sampling. The annals of statistics, 31(3), 705-767.
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
docs
3276
# [Meta Multivariate Samplers](@id meta) To use univariate slice sampling strategies on targets with more than on dimension, one has to embed them into a "meta" multivariate sampling scheme that relies on univariate sampling elements. The two most popular approaches for this are Gibbs sampling[^GG1984] and hit-and-run[^BRS1993]. ## Random Permutation Gibbs Gibbs sampling[^GG1984] is a strategy where we sample from the posterior one coordinate at a time, conditioned on the values of all other coordinates. In practice, one can pick the coordinates in any order they want as long as it does not depend on the state of the chain. It is generally hard to know a-prior which "scan order" is best, but randomly picking coordinates tend to work well in general. Currently, we only provide random permutation scan, which guarantees that all coordinates are updated at least once after $$d$$ transitions. At the same time, reversibility is maintained by randomly permuting the order we go through each coordinate: ```@docs RandPermGibbs ``` Each call to `AbstractMCMC.step` internally performs $$d$$ Gibbs transition so that all coordinates are updated. For example: ```julia RandPermGibbs(SliceSteppingOut(2.)) ``` If one wants to use a different slice sampler configuration for each coordinate, one can mix-and-match by passing a `Vector` of slice samplers, one for each coordinate. For instance, for a 2-dimensional target: ```julia RandPermGibbs([SliceSteppingOut(2.; max_proposals=32), SliceDoublingOut(2.),]) ``` ## Hit-and-Run Hit-and-run is a simple meta algorithm where we sample over a random 1-dimensional projection of the space. That is, at each iteration, we sample a random direction ```math \theta_n \sim \operatorname{Uniform}(\mathbb{S}^{d-1}), ``` and perform a Markov transition along the 1-dimensional subspace ```math \begin{aligned} \lambda_n &\sim p\left(\lambda \mid x_{n-1}, \theta_n \right) \propto \pi\left( x_{n-1} + \lambda \theta_n \right) \\ x_{n} &= x_{n-1} + \lambda_n \theta_n, \end{aligned} ``` where $$\pi$$ is the target unnormalized density. Applying slice sampling for the 1-dimensional subproblem has been popularized by David Mackay[^M2003], and is, technically, also a Gibbs sampler. (Or is that Gibbs samplers are hit-and-run samplers?) Unlike `RandPermGibbs`, which only makes axis-aligned moves, `HitAndRun` can choose arbitrary directions, which could be helpful in some cases. ```@docs HitAndRun ``` This can be used, for example, as follows: ```julia HitAndRun(SliceSteppingOut(2.)) ``` Unlike `RandPermGibbs`, `HitAndRun` does not provide the option of using a unique `unislice` object for each coordinate. This is a natural limitation of the hit-and-run sampler: it does not operate on individual coordinates. [^GG1984]: Geman, S., & Geman, D. (1984). Stochastic relaxation, Gibbs distributions, and the Bayesian restoration of images. IEEE Transactions on Pattern Analysis and Machine Intelligence, (6). [^BRS1993]: Bélisle, C. J., Romeijn, H. E., & Smith, R. L. (1993). Hit-and-run algorithms for generating multivariate distributions. Mathematics of Operations Research, 18(2), 255-266. [^M2003]: MacKay, D. J. (2003). Information theory, inference and learning algorithms. Cambridge university press.
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
0.6.1
9f472a2b96a8ff899635d6eb19dd9afeb8bde82b
docs
1353
# Univariate Slice Sampling Algorithms ## Introduction These algorithms are the "single-variable" slice sampling algorithms originally described by Neal[^N2003]. Since these algorithms are univariate, one has to incorporate them into a "meta" multivariate sampler, which are discussed in [this section](@ref meta). ## Fixed Initial Interval Slice Sampling This is the most basic form of univariate slice sampling, where the proposals are generated within a fixed interval formed by the `window`. ```@docs Slice ``` ## Adaptive Initial Interval Slice Sampling These algorithms try to adaptively set the initial interval through a simple search procedure. The "stepping-out" procedure grows the initial window on a linear scale, while the "doubling-out" procedure grows it geometrically. `window` controls the scale of the increase. ### What Should I Use? This highly depends on the problem at hand. In general, the doubling-out procedure tends to be more expensive as it requires additional log-target evaluations to decide whether to accept a proposal. However, if the scale of the posterior varies drastically, doubling out might work better. In general, it is recommended to use the stepping-out procedure. ```@docs SliceSteppingOut SliceDoublingOut ``` [^N2003]: Neal, R. M. (2003). Slice sampling. The Annals of Statistics, 31(3), 705-767.
SliceSampling
https://github.com/TuringLang/SliceSampling.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
code
639
#!/usr/bin/env julia # Root of the repository const repo_root = dirname(@__DIR__) # Make sure docs environment is active and instantiated import Pkg Pkg.activate(@__DIR__) Pkg.instantiate() # Communicate with docs/make.jl that we are running in live mode push!(ARGS, "liveserver") # Run LiveServer.servedocs(...) import LiveServer LiveServer.servedocs(; # Documentation root where make.jl and src/ are located foldername = joinpath(repo_root, "docs"), # Extra source folder to watch for changes include_dirs = [ # Watch the src folder so docstrings can be Revise'd joinpath(repo_root, "src"), ], )
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
code
1707
const liveserver = "liveserver" in ARGS if liveserver using Revise Revise.revise() end using Documenter using Prometheus # Build it! Documenter.makedocs( sitename = "Prometheus.jl", format = Documenter.HTML( canonical = "https://fredrikekre.github.io/Prometheus.jl/v1", ), modules = [Prometheus], warnonly = true, ) # Rewrite to "single page" mode index = joinpath(@__DIR__, "build/index.html") str = read(index, String) # Remove the sidebar str = replace(str, r"<nav class=\"docs-sidebar\">.*?</nav>" => "") # Remove the breadcrumb str = replace(str, r"<nav class=\"breadcrumb\">.*?</nav>" => "") # Remove the hamburger in narrow mode str = replace(str, r"<a class=\"docs-sidebar-button.*?</a>" => "") # Move the buttons to the right str = replace(str, r"<div class=\"docs-right\">" => "<div class=\"docs-right\" style=\"margin-left: auto;\">") # Center the content str = replace(str, r"<div class=\"docs-main\">" => "<div class=\"docs-main\" style=\"margin: auto; padding-right: 0;\">") # Remove the global docstring folding str = replace(str, r"<a class=\"docs-article-toggle-button.*?</a>" => "") # Write it back write(index, str) # Nuke a function in documenter.js... documenterjs = joinpath(@__DIR__, "build/assets/documenter.js") str = read(documenterjs, String) str = replace( str, """ document.querySelector(".docs-search-query").addEventListener("click", () => { openModal(); }); """ => "" ) write(documenterjs, str) # Deploy it! if !liveserver Documenter.deploydocs( repo = "github.com/fredrikekre/Prometheus.jl.git", push_preview = true, versions = ["v1" => "v^", "v#.#", "dev" => "dev"], ) end
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
code
36855
# SPDX-License-Identifier: MIT module Prometheus using CodecZlib: GzipCompressorStream using HTTP: HTTP using SimpleBufferStream: BufferStream if VERSION >= v"1.11.0-DEV.469" eval(Meta.parse(""" public CollectorRegistry, register, unregister, Counter, Gauge, Histogram, Summary, GCCollector, ProcessCollector, inc, dec, set, set_to_current_time, observe, @inprogress, @time, Family, labels, remove, clear, expose """ )) end abstract type Collector end ######### # Utils # ######### abstract type PrometheusException <: Exception end struct ArgumentError <: PrometheusException msg::String end function Base.showerror(io::IO, err::ArgumentError) print(io, "Prometheus.", nameof(typeof(err)), ": ", err.msg) end struct AssertionError <: PrometheusException msg::String end macro assert(cond) msg = string(cond) return :($(esc(cond)) || throw(AssertionError($msg))) end function Base.showerror(io::IO, err::AssertionError) print( io, "Prometheus.AssertionError: `", err.msg, "`. This is unexpected, please file an " * "issue at https://github.com/fredrikekre/Prometheus.jl/issues/new.", ) end # https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels # Metric names may contain ASCII letters, digits, underscores, and colons. # It must match the regex [a-zA-Z_:][a-zA-Z0-9_:]*. # Note: The colons are reserved for user defined recording rules. They should # not be used by exporters or direct instrumentation. function verify_metric_name(metric_name::String) metric_name_regex = r"^[a-zA-Z_:][a-zA-Z0-9_:]*$" if !occursin(metric_name_regex, metric_name) throw(ArgumentError("metric name \"$(metric_name)\" is invalid")) end return metric_name end ########################################### # Compat for const fields, @lock, @atomic # ########################################### @eval macro $(Symbol("const"))(field) if VERSION >= v"1.8.0-DEV.1148" Expr(:const, esc(field)) else return esc(field) end end if VERSION < v"1.7.0" # Defined but not exported using Base: @lock end if !isdefined(Base, Symbol("@atomic")) # v1.7.0 const ATOMIC_COMPAT_LOCK = ReentrantLock() macro atomic(expr) if Meta.isexpr(expr, :(::)) return esc(expr) else return quote lock(ATOMIC_COMPAT_LOCK) tmp = $(esc(expr)) unlock(ATOMIC_COMPAT_LOCK) tmp end end end end if !isdefined(Base, :eachsplit) # v1.8.0 const eachsplit = split end ##################### # CollectorRegistry # ##################### struct CollectorRegistry lock::ReentrantLock collectors::Base.IdSet{Collector} function CollectorRegistry() return new(ReentrantLock(), Base.IdSet{Collector}()) end end function register(reg::CollectorRegistry, collector::Collector) existing_names = Set{String}() # TODO: Cache existing_names in the registry? @lock reg.lock begin for c in reg.collectors union!(existing_names, metric_names(c)) end for metric_name in metric_names(collector) if metric_name in existing_names throw(ArgumentError( "collector already contains a metric with the name \"$(metric_name)\"" )) end end push!(reg.collectors, collector) end return end function unregister(reg::CollectorRegistry, collector::Collector) @lock reg.lock delete!(reg.collectors, collector) return end ############## # Collectors # ############## # abstract type Collector end function collect(collector::Collector) return collect!(Metric[], collector) end ######################## # Counter <: Collector # ######################## # https://prometheus.io/docs/instrumenting/writing_clientlibs/#counter # TODO: A counter is ENCOURAGED to have: # - A way to count exceptions throw/raised in a given piece of code, and optionally only # certain types of exceptions. This is count_exceptions in Python. mutable struct Counter <: Collector @const metric_name::String @const help::String @atomic value::Float64 function Counter( metric_name::String, help::String; registry::Union{CollectorRegistry, Nothing}=DEFAULT_REGISTRY, ) initial_value = 0.0 counter = new(verify_metric_name(metric_name), help, initial_value) if registry !== nothing register(registry, counter) end return counter end end """ Prometheus.Counter(name, help; registry=DEFAULT_REGISTRY) Construct a Counter collector. **Arguments** - `name :: String`: the name of the counter metric. - `help :: String`: the documentation for the counter metric. **Keyword arguments** - `registry :: Prometheus.CollectorRegistry`: the registry in which to register the collector. If not specified the default registry is used. Pass `registry = nothing` to skip registration. **Methods** - [`Prometheus.inc`](@ref): increment the counter. """ Counter(::String, ::String; kwargs...) function metric_names(counter::Counter) return (counter.metric_name, ) end """ Prometheus.inc(counter::Counter, v::Real = 1) Increment the value of the counter with `v`. The value defaults to `v = 1`. Throw a `Prometheus.ArgumentError` if `v < 0` (a counter must not decrease). """ function inc(counter::Counter, v::Real = 1.0) if v < 0 throw(ArgumentError( "invalid value $v: a counter must not decrease" )) end @atomic counter.value += convert(Float64, v) return nothing end function collect!(metrics::Vector, counter::Counter) push!(metrics, Metric( "counter", counter.metric_name, counter.help, Sample(nothing, nothing, nothing, @atomic(counter.value)), ), ) return metrics end ###################### # Gauge <: Collector # ###################### # https://prometheus.io/docs/instrumenting/writing_clientlibs/#gauge mutable struct Gauge <: Collector @const metric_name::String @const help::String @atomic value::Float64 function Gauge( metric_name::String, help::String; registry::Union{CollectorRegistry, Nothing}=DEFAULT_REGISTRY, ) initial_value = 0.0 gauge = new(verify_metric_name(metric_name), help, initial_value) if registry !== nothing register(registry, gauge) end return gauge end end """ Prometheus.Gauge(name, help; registry=DEFAULT_REGISTRY) Construct a Gauge collector. **Arguments** - `name :: String`: the name of the gauge metric. - `help :: String`: the documentation for the gauge metric. **Keyword arguments** - `registry :: Prometheus.CollectorRegistry`: the registry in which to register the collector. If not specified the default registry is used. Pass `registry = nothing` to skip registration. **Methods** - [`Prometheus.inc`](@ref inc(::Gauge, ::Real)): increment the value of the gauge. - [`Prometheus.dec`](@ref): decrement the value of the gauge. - [`Prometheus.set`](@ref): set the value of the gauge. - [`Prometheus.set_to_current_time`](@ref): set the value of the gauge to the current unixtime. - [`Prometheus.@time`](@ref): time a section and set the value of the the gauge to the elapsed time. - [`Prometheus.@inprogress`](@ref): Track number of inprogress operations; increment the gauge when entering the section, decrement it when leaving. """ Gauge(::String, ::String; kwargs...) function metric_names(gauge::Gauge) return (gauge.metric_name, ) end """ Prometheus.inc(gauge::Gauge, v::Real = 1) Increment the value of the gauge with `v`. `v` defaults to `v = 1`. """ function inc(gauge::Gauge, v::Real = 1.0) @atomic gauge.value += convert(Float64, v) return nothing end """ Prometheus.dec(gauge::Gauge, v::Real = 1) Decrement the value of the gauge with `v`. `v` defaults to `v = 1`. """ function dec(gauge::Gauge, v::Real = 1.0) @atomic gauge.value -= convert(Float64, v) return nothing end """ Prometheus.set(gauge::Gauge, v::Real) Set the value of the gauge to `v`. """ function set(gauge::Gauge, v::Real) @atomic gauge.value = convert(Float64, v) return nothing end """ Prometheus.set_to_current_time(gauge::Gauge) Set the value of the gauge to the current unixtime in seconds. """ function set_to_current_time(gauge::Gauge) @atomic gauge.value = time() return nothing end function collect!(metrics::Vector, gauge::Gauge) push!(metrics, Metric( "gauge", gauge.metric_name, gauge.help, Sample(nothing, nothing, nothing, @atomic(gauge.value)), ), ) return metrics end ########################## # Histogram <: Collector # ########################## # https://prometheus.io/docs/instrumenting/writing_clientlibs/#histogram # A histogram SHOULD have the same default buckets as other client libraries. # https://github.com/prometheus/client_python/blob/d8306b7b39ed814f3ec667a7901df249cee8a956/prometheus_client/metrics.py#L565 const DEFAULT_BUCKETS = [ .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, Inf, ] mutable struct Histogram <: Collector @const metric_name::String @const help::String @const buckets::Vector{Float64} @atomic _count::Int @atomic _sum::Float64 @const bucket_counters::Vector{Threads.Atomic{Int}} function Histogram( metric_name::String, help::String; buckets::Vector{Float64}=DEFAULT_BUCKETS, registry::Union{CollectorRegistry, Nothing}=DEFAULT_REGISTRY, ) # Make a copy of and verify buckets buckets = copy(buckets) issorted(buckets) || throw(ArgumentError("buckets must be sorted")) length(buckets) > 0 && buckets[end] != Inf && push!(buckets, Inf) length(buckets) < 2 && throw(ArgumentError("must have at least two buckets")) initial_sum = 0.0 initial_count = 0 bucket_counters = [Threads.Atomic{Int}(0) for _ in 1:length(buckets)] histogram = new( verify_metric_name(metric_name), help, buckets, initial_count, initial_sum, bucket_counters, ) if registry !== nothing register(registry, histogram) end return histogram end end """ Prometheus.Histogram(name, help; buckets=DEFAULT_BUCKETS, registry=DEFAULT_REGISTRY) Construct a Histogram collector. **Arguments** - `name :: String`: the name of the histogram metric. - `help :: String`: the documentation for the histogram metric. **Keyword arguments** - `buckets :: Vector{Float64}`: the upper bounds for the histogram buckets. The buckets must be sorted. `Inf` will be added as a last bucket if not already included. The default buckets are `DEFAULT_BUCKETS = $(DEFAULT_BUCKETS)`. - `registry :: Prometheus.CollectorRegistry`: the registry in which to register the collector. If not specified the default registry is used. Pass `registry = nothing` to skip registration. **Methods** - [`Prometheus.observe`](@ref): add an observation to the histogram. - [`Prometheus.@time`](@ref): time a section and add the elapsed time as an observation. """ Histogram(::String, ::String; kwargs...) function metric_names(histogram::Histogram) return ( histogram.metric_name * "_count", histogram.metric_name * "_sum", histogram.metric_name, ) end """ Prometheus.observe(histogram::Histogram, v::Real) Add the observed value `v` to the histogram. This increases the sum and count of the histogram with `v` and `1`, respectively, and increments the counter for all buckets containing `v`. """ function observe(histogram::Histogram, v::Real) v = convert(Float64, v) @atomic histogram._count += 1 @atomic histogram._sum += v for (bucket, bucket_counter) in zip(histogram.buckets, histogram.bucket_counters) # TODO: Iterate in reverse and break early if v <= bucket Threads.atomic_add!(bucket_counter, 1) end end return nothing end function collect!(metrics::Vector, histogram::Histogram) label_names = LabelNames(("le",)) push!(metrics, Metric( "histogram", histogram.metric_name, histogram.help, [ Sample("_count", nothing, nothing, @atomic(histogram._count)), Sample("_sum", nothing, nothing, @atomic(histogram._sum)), ( Sample( nothing, label_names, make_label_values(label_names, (histogram.buckets[i],)), histogram.bucket_counters[i][], ) for i in 1:length(histogram.buckets) )..., ] ), ) return metrics end ######################## # Summary <: Collector # ######################## # https://prometheus.io/docs/instrumenting/writing_clientlibs/#summary mutable struct Summary <: Collector @const metric_name::String @const help::String @atomic _count::Int @atomic _sum::Float64 function Summary( metric_name::String, help::String; registry::Union{CollectorRegistry, Nothing}=DEFAULT_REGISTRY, ) initial_count = 0 initial_sum = 0.0 summary = new(verify_metric_name(metric_name), help, initial_count, initial_sum) if registry !== nothing register(registry, summary) end return summary end end """ Prometheus.Summary(name, help; registry=DEFAULT_REGISTRY) Construct a Summary collector. **Arguments** - `name :: String`: the name of the summary metric. - `help :: String`: the documentation for the summary metric. **Keyword arguments** - `registry :: Prometheus.CollectorRegistry`: the registry in which to register the collector. If not specified the default registry is used. Pass `registry = nothing` to skip registration. **Methods** - [`Prometheus.observe`](@ref observe(::Summary, ::Real)): add an observation to the summary. - [`Prometheus.@time`](@ref): time a section and add the elapsed time as an observation. """ Summary(::String, ::String; kwargs...) function metric_names(summary::Summary) return (summary.metric_name * "_count", summary.metric_name * "_sum") end """ Prometheus.observe(summary::Summary, v::Real) Add the observed value `v` to the summary. This increases the sum and count of the summary with `v` and `1`, respectively. """ function observe(summary::Summary, v::Real) @atomic summary._count += 1 @atomic summary._sum += convert(Float64, v) return nothing end function collect!(metrics::Vector, summary::Summary) push!(metrics, Metric( "summary", summary.metric_name, summary.help, [ Sample("_count", nothing, nothing, @atomic(summary._count)), Sample("_sum", nothing, nothing, @atomic(summary._sum)), ] ), ) return metrics end ################ # "Decorators" # ################ """ Prometheus.@time collector expr Time the evaluation of `expr` and send the elapsed time in seconds to `collector`. The specific action depends on the type of collector: - `collector :: Gauge`: set the value of the gauge to the elapsed time ([`Prometheus.set`](@ref)) - `collector :: Histogram` and `collector :: Summary`: add the elapsed time as an observation ([`Prometheus.observe`](@ref)) The expression to time, `expr`, can be a single expression (for example a function call), or a code block (`begin`, `let`, etc), e.g. ```julia Prometheus.@time collector <expr> Prometheus.@time collector begin <expr> end ``` It is also possible to apply the macro to a function *definition*, i.e. ```julia Prometheus.@time collector function time_this(args...) # function body end ``` to time every call to this function (covering all call sites). """ macro time(collector, expr) return expr_gen(:time, collector, expr) end at_time(gauge::Gauge, v) = set(gauge, v) at_time(summary::Summary, v) = observe(summary, v) at_time(histogram::Histogram, v) = observe(histogram, v) """ Prometheus.@inprogress collector expr Track the number of concurrent in-progress evaluations of `expr`. From the builtin collectors this is only applicable to the [`Gauge`](@ref) -- the value of the gauge is incremented with 1 when entering the section, and decremented with 1 when exiting the section. The expression, `expr`, can be a single expression (for example a function call), or a code block (`begin`, `let`, etc), e.g. ```julia Prometheus.@inprogress collector <expr> Prometheus.@inprogress collector begin <expr> end ``` It is also possible to apply the macro to a function *definition*, i.e. ```julia Prometheus.@inprogress collector function track_this(args...) # function body end ``` to track number of concurrent in-progress calls (covering all call sites). """ macro inprogress(collector, expr) return expr_gen(:inprogress, collector, expr) end at_inprogress_enter(gauge::Gauge) = inc(gauge) at_inprogress_exit(gauge::Gauge) = dec(gauge) function expr_gen(macroname, collector, code) if macroname === :time local cllctr, t0, val @gensym cllctr t0 val preamble = Expr[ Expr(:(=), cllctr, esc(collector)), Expr(:(=), t0, Expr(:call, time)), ] postamble = Expr[ Expr(:(=), val, Expr(:call, max, Expr(:call, -, Expr(:call, time), t0), 0.0)), Expr(:call, at_time, cllctr, val) ] elseif macroname === :inprogress local cllctr @gensym cllctr preamble = Expr[ Expr(:(=), cllctr, esc(collector)), Expr(:call, at_inprogress_enter, cllctr), ] postamble = Expr[ Expr(:call, at_inprogress_exit, cllctr) ] else throw(ArgumentError("unknown macro name $(repr(macroname))")) end local ret @gensym ret if Meta.isexpr(code, :function) || Base.is_short_function_def(code) @assert length(code.args) == 2 fsig = esc(code.args[1]) fbody = esc(code.args[2]) return Expr( code.head, # might as well preserve :function or :(=) fsig, Expr( :block, preamble..., Expr( :tryfinally, Expr(:(=), ret, fbody), Expr(:block, postamble...,), ), ret, ), ) else return Expr( :block, preamble..., Expr( :tryfinally, Expr(:(=), ret, esc(code)), Expr(:block, postamble...,), ), ret, ) end end #################################### # Family{<:Collector} <: Collector # #################################### # https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels # - Labels may contain ASCII letters, numbers, as well as underscores. # They must match the regex [a-zA-Z_][a-zA-Z0-9_]*. # - Label names beginning with __ (two "_") are reserved for internal use. function verify_label_name(label_name::String) label_name_regex = r"^[a-zA-Z_][a-zA-Z0-9_]*$" if !occursin(label_name_regex, label_name) || startswith(label_name, "__") throw(ArgumentError("label name \"$(label_name)\" is invalid")) end return label_name end struct LabelNames{N} label_names::NTuple{N, Symbol} function LabelNames(label_names::NTuple{N, Symbol}) where N for label_name in label_names verify_label_name(String(label_name)) end return new{N}(label_names) end end # Tuple of strings function LabelNames(label_names::NTuple{N, String}) where N return LabelNames(map(Symbol, label_names)) end # NamedTuple-type or a (user defined) struct function LabelNames(::Type{T}) where T return LabelNames(fieldnames(T)) end struct LabelValues{N} label_values::NTuple{N, String} end function make_label_values(::LabelNames{N}, label_values::NTuple{N, String}) where N return LabelValues(label_values) end stringify(str::String) = str stringify(str) = String(string(str))::String # Heterogeneous tuple function make_label_values(::LabelNames{N}, label_values::Tuple{Vararg{Any, N}}) where N return LabelValues(map(stringify, label_values)::NTuple{N, String}) end # NamedTuple or a (user defined) struct function make_label_values(label_names::LabelNames{N}, label_values) where N t::NTuple{N, String} = ntuple(N) do i stringify(getfield(label_values, label_names.label_names[i]))::String end return LabelValues{N}(t) end function Base.hash(l::LabelValues, h::UInt) h = hash(0x94a2d04ee9e5a55b, h) # hash("Prometheus.LabelValues") on Julia 1.9.3 for v in l.label_values h = hash(v, h) end return h end function Base.:(==)(l1::LabelValues, l2::LabelValues) return l1.label_values == l2.label_values end struct Family{C, N, F} <: Collector metric_name::String help::String label_names::LabelNames{N} children::Dict{LabelValues{N}, C} lock::ReentrantLock constructor::F function Family{C}( metric_name::String, help::String, args_first, args_tail...; registry::Union{CollectorRegistry, Nothing}=DEFAULT_REGISTRY, kwargs..., ) where {C} # Support ... on non-final argument args_all = (args_first, args_tail...,) label_names = last(args_all) args = Base.front(args_all) @assert(isempty(args)) # TODO: Perhaps extract this into # make_constructor(::Type{Collector}, metric_name, help, args...; kwargs...) # so that some Collectors (like Counter) can skip the closure over args and kwargs. function constructor() return C(metric_name, help, args...; kwargs..., registry=nothing)::C end labels = LabelNames(label_names) N = length(labels.label_names) children = Dict{LabelValues{N}, C}() lock = ReentrantLock() family = new{C, N, typeof(constructor)}( verify_metric_name(metric_name), help, labels, children, lock, constructor, ) if registry !== nothing register(registry, family) end return family end end """ Prometheus.Family{C}(name, help, args..., label_names; registry=DEFAULT_REGISTRY, kwargs...) Create a labeled collector family with labels given by `label_names`. For every new set of label values encountered a new collector of type `C <: Collector` will be created, see [`Prometheus.labels`](@ref). **Arguments** - `name :: String`: the name of the family metric. - `help :: String`: the documentation for the family metric. - `args...`: any extra positional arguments required for `C`s constructor, see [`Prometheus.labels`](@ref). - `label_names`: the label names for the family. Label names can be given as either of the following (typically matching the methods label values will be given later, see [`Prometheus.labels`](@ref)): - a tuple of symbols or strings, e.g. `(:target, :status_code)` or `("target", "status_code")` - a named tuple type, e.g. `@NamedTuple{target::String, status_code::Int}` where the names are used as the label names - a custom struct type, e.g. `RequestLabels` defined as ```julia struct RequestLabels target::String status_code::Int end ``` where the field names are used for the label names. **Keyword arguments** - `registry :: Prometheus.CollectorRegistry`: the registry in which to register the collector. If not specified the default registry is used. Pass `registry = nothing` to skip registration. - `kwargs...`: any extra keyword arguments required for `C`s constructor, see [`Prometheus.labels`](@ref). **Methods** - [`Prometheus.labels`](@ref): get or create the collector for a specific set of labels. - [`Prometheus.remove`](@ref): remove the collector for a specific set of labels. - [`Prometheus.clear`](@ref): remove all collectors in the family. # Examples ```julia # Construct a family of Counters counter_family = Prometheus.Family{Counter}( "http_requests", "Number of HTTP requests", (:target, :status_code), ) # Increment the counter for the labels `target="/api"` and `status_code=200` Prometheus.inc(Prometheus.labels(counter_family, (target="/api", status_code=200))) ``` """ Family{C}(::String, ::String, ::Any; kwargs...) where C function metric_names(family::Family) return (family.metric_name, ) end """ Prometheus.labels(family::Family{C}, label_values) where C Get or create the collector of type `C` from the family corresponding to the labels given by `label_values`. If no collector exist for the input labels a new one is created by invoking the `C` constructor as `C(name, help, args...; kwargs..., registry=nothing)`, where `name`, `help`, `args...`, and `kwargs...` are the arguments from the family constructor, see [`Family`](@ref). Similarly to when creating the [`Family`](@ref), `label_values` can be given as either of the following: - a tuple, e.g. `("/api", 200)` - a named tuple with names matching the label names, e.g.`(target="/api", status_code=200)` - a struct instance with field names matching the label names , e.g. `RequestLabels("/api", 200)` All non-string values (e.g. `200` in the examples above) are stringified using `string`. !!! tip `Base.getindex` is overloaded to have the meaning of `Prometheus.labels` for the family collector: `family[label_values]` is equivalent to `Prometheus.labels(family, label_values)`. !!! note This method does an acquire/release of a lock, and a dictionary lookup, to find the collector matching the label names. For typical applications this overhead does not matter (below 100ns for some basic benchmarks) but it is safe to cache the returned collector if required. """ function labels(family::Family{C, N}, label_values) where {C, N} labels = make_label_values(family.label_names, label_values)::LabelValues{N} collector = @lock family.lock get!(family.children, labels) do family.constructor()::C end return collector end # Support family[labels] as a cute way of extracting the collector function Base.getindex(family::Family, label_values) return labels(family, label_values) end """ Prometheus.remove(family::Family, label_values) Remove the collector corresponding to `label_values`. Effectively this resets the collector since [`Prometheus.labels`](@ref) will recreate the collector when called with the same label names. Refer to [`Prometheus.labels`](@ref) for how to specify `label_values`. !!! note This method invalidates cached collectors for the label names. """ function remove(family::Family{<:Any, N}, label_values) where N labels = make_label_values(family.label_names, label_values)::LabelValues{N} @lock family.lock delete!(family.children, labels) return end """ Prometheus.clear(family::Family) Remove all collectors in the family. Effectively this resets the collectors since [`Prometheus.labels`](@ref) will recreate them when needed. !!! note This method invalidates all cached collectors. """ function clear(family::Family) @lock family.lock empty!(family.children) return end prometheus_type(::Type{Counter}) = "counter" prometheus_type(::Type{Gauge}) = "gauge" prometheus_type(::Type{Histogram}) = "histogram" prometheus_type(::Type{Summary}) = "summary" function collect!(metrics::Vector, family::Family{C}) where C type = prometheus_type(C) samples = Sample[] buf = Metric[] label_names = family.label_names @lock family.lock begin for (label_values, child) in family.children # collect!(...) the child, throw away the metric, but keep the samples child_metrics = collect!(resize!(buf, 0), child) @assert length(child_metrics) == 1 # TODO: maybe this should be supported? child_metric = child_metrics[1] @assert(child_metric.type == type) # Unwrap and rewrap samples with the labels child_samples = child_metric.samples if child_samples isa Sample push!(samples, Sample(child_samples.suffix, label_names, label_values, child_samples.value)) else @assert(child_samples isa Vector{Sample}) for child_sample in child_samples if C === Histogram && (child_sample.label_names !== nothing) && (child_sample.label_values !== nothing) # TODO: Only allow child samples to be labeled for Histogram # collectors for now. @assert( length(child_sample.label_names.label_names) == length(child_sample.label_values.label_values) ) # TODO: Bypass constructor verifications merged_names = LabelNames(( label_names.label_names..., child_sample.label_names.label_names..., )) merged_values = LabelValues(( label_values.label_values..., child_sample.label_values.label_values..., )) push!(samples, Sample(child_sample.suffix, merged_names, merged_values, child_sample.value)) else @assert( (child_sample.label_names === nothing) === (child_sample.label_values === nothing) ) push!(samples, Sample(child_sample.suffix, label_names, label_values, child_sample.value)) end end end end end # Sort samples lexicographically by the labels sort!(samples; by = function(x) labels = x.label_values @assert(labels !== nothing) return labels.label_values end) push!( metrics, Metric(type, family.metric_name, family.help, samples), ) return metrics end ############## # Exposition # ############## struct Sample suffix::Union{String, Nothing} # e.g. _count or _sum label_names::Union{LabelNames, Nothing} label_values::Union{LabelValues, Nothing} value::Float64 function Sample( suffix::Union{String, Nothing}, label_names::Union{Nothing, LabelNames{N}}, label_values::Union{Nothing, LabelValues{N}}, value::Real, ) where N @assert((label_names === nothing) === (label_values === nothing)) return new(suffix, label_names, label_values, value) end end struct Metric type::String metric_name::String help::String # TODO: Union{Tuple{Sample}, Vector{Sample}} would always make this iterable. samples::Union{Sample, Vector{Sample}} end function print_escaped(io::IO, help::String, esc) for c in help if c in esc c == '\n' ? print(io, "\\n") : print(io, '\\', c) else print(io, c) end end return end function expose_metric(io::IO, metric::Metric) print(io, "# HELP ", metric.metric_name, " ") print_escaped(io, metric.help, ('\\', '\n')) println(io) println(io, "# TYPE ", metric.metric_name, " ", metric.type) samples = metric.samples if samples isa Sample # Single sample, no labels @assert(samples.label_names === nothing) @assert(samples.label_values === nothing) @assert(samples.suffix === nothing) val = samples.value println(io, metric.metric_name, " ", isinteger(val) ? Int(val) : val) else # Multiple samples, might have labels @assert(samples isa Vector{Sample}) for sample in samples # Print metric name print(io, metric.metric_name) # Print potential suffix if sample.suffix !== nothing print(io, sample.suffix) end # Print potential labels label_names = sample.label_names label_values = sample.label_values @assert((label_names === nothing) === (label_values === nothing)) if label_names !== nothing && label_values !== nothing first = true print(io, "{") for (name, value) in zip(label_names.label_names, label_values.label_values) first || print(io, ",") print(io, name, "=\"") print_escaped(io, value, ('\\', '\"', '\n')) print(io, "\"") first = false end print(io, "}") end # Print the value println(io, " ", isinteger(sample.value) ? Int(sample.value) : sample.value) end end end """ Prometheus.expose(file::String, reg::CollectorRegistry = DEFAULT_REGISTRY) Export all metrics in `reg` by writing them to the file `file`. """ function expose(path::String, reg::CollectorRegistry = DEFAULT_REGISTRY) dir = dirname(path) mkpath(dir) mktemp(dirname(path)) do tmp_path, tmp_io expose_io(tmp_io, reg) close(tmp_io) mv(tmp_path, path; force=true) end return end """ expose(io::IO, reg::CollectorRegistry = DEFAULT_REGISTRY) Export all metrics in `reg` by writing them to the I/O stream `io`. """ function expose(io::IO, reg::CollectorRegistry = DEFAULT_REGISTRY) return expose_io(io, reg) end function expose_io(io::IO, reg::CollectorRegistry) # Collect all metrics metrics = Metric[] @lock reg.lock begin for collector in reg.collectors collect!(metrics, collector) end end sort!(metrics; by = metric -> metric.metric_name) # Write to IO buf = IOBuffer(; maxsize=1024^2) # 1 MB for metric in metrics truncate(buf, 0) expose_metric(buf, metric) seekstart(buf) write(io, buf) end return end ####################### # HTTP.jl integration # ####################### const CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8" function gzip_accepted(http::HTTP.Stream) accept_encoding = HTTP.header(http.message, "Accept-Encoding") for enc in eachsplit(accept_encoding, ',') if lowercase(strip(first(eachsplit(enc, ';')))) == "gzip" return true end end return false end """ expose(http::HTTP.Stream, reg::CollectorRegistry = DEFAULT_REGISTRY; kwargs...) Export all metrics in `reg` by writing them to the HTTP stream `http`. The caller is responsible for checking e.g. the HTTP method and URI target. For HEAD requests this method do not write a body, however. """ function expose(http::HTTP.Stream, reg::CollectorRegistry = DEFAULT_REGISTRY; compress::Bool=true) # TODO: Handle Accept request header for different formats? # Compress by default if client supports it and user haven't disabled it if compress compress = gzip_accepted(http) end # Create the response HTTP.setstatus(http, 200) HTTP.setheader(http, "Content-Type" => CONTENT_TYPE_LATEST) if compress HTTP.setheader(http, "Content-Encoding" => "gzip") end HTTP.startwrite(http) # The user is responsible for making sure that e.g. target and method is # correct, but at least we skip writing the body for HEAD requests. if http.message.method != "HEAD" if compress buf = BufferStream() gzstream = GzipCompressorStream(buf) tsk = @async try expose_io(gzstream, reg) finally # Close the compressor stream to free resources in zlib and # to let the write(http, buf) below finish. close(gzstream) end write(http, buf) wait(tsk) else expose_io(http, reg) end end return end include("gc_collector.jl") include("process_collector.jl") # Default registry and collectors const DEFAULT_REGISTRY = CollectorRegistry() const GC_COLLECTOR = GCCollector(; registry=DEFAULT_REGISTRY) const PROCESS_COLLECTOR = ProcessCollector(; registry=DEFAULT_REGISTRY) end # module Prometheus
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
code
3244
# SPDX-License-Identifier: MIT ############################ # GCCollector <: Collector # ############################ mutable struct GCCollector <: Collector function GCCollector(; registry::Union{CollectorRegistry, Nothing}=DEFAULT_REGISTRY) gcc = new() if registry !== nothing register(registry, gcc) end return gcc end end """ Prometheus.GCCollector(; registry=DEFAULT_REGISTRY) Create a collector that exports metrics about allocations and garbage collection. **Keyword arguments** - `registry :: Prometheus.CollectorRegistry`: the registry in which to register the collector. The default registry is used by default. Pass `registry = nothing` to skip registration. !!! note A `GCCollector` is registered automatically with the default registry. If necessary it can be removed by calling ```julia Prometheus.unregister(Prometheus.DEFAULT_REGISTRY, Prometheus.GC_COLLECTOR) ``` """ GCCollector(; kwargs...) function metric_names(::GCCollector) return ( "julia_gc_alloc_total", "julia_gc_free_total", "julia_gc_alloc_bytes_total", "julia_gc_live_bytes", "julia_gc_seconds_total", "julia_gc_collections_total", ) end function collect!(metrics::Vector, ::GCCollector) # See base/timing.jl gc_num = Base.gc_num() gc_live_bytes = Base.gc_live_bytes() # Push all the metrics push!(metrics, let label_names = LabelNames(("type",)) Metric( "counter", "julia_gc_alloc_total", "Total number of allocations (calls to malloc, realloc, etc)", [ Sample(nothing, label_names, LabelValues(("bigalloc",)), gc_num.bigalloc), Sample(nothing, label_names, LabelValues(("malloc",)), gc_num.malloc), Sample(nothing, label_names, LabelValues(("poolalloc",)), gc_num.poolalloc), Sample(nothing, label_names, LabelValues(("realloc",)), gc_num.realloc), ], ) end, Metric( "counter", "julia_gc_free_total", "Total number of calls to free()", Sample(nothing, nothing, nothing, gc_num.freecall), ), Metric( "counter", "julia_gc_alloc_bytes_total", "Total number of allocated bytes", Sample(nothing, nothing, nothing, Base.gc_total_bytes(gc_num)), ), Metric( "gauge", "julia_gc_live_bytes", "Current number of live bytes", Sample(nothing, nothing, nothing, gc_live_bytes), ), Metric( "counter", "julia_gc_seconds_total", "Total time spent in garbage collection", Sample(nothing, nothing, nothing, gc_num.total_time / 10^9), # [ns] to [s] ), let label_names = LabelNames(("type",)) Metric( "counter", "julia_gc_collections_total", "Total number of calls to garbage collection", [ Sample(nothing, label_names, LabelValues(("full",)), gc_num.full_sweep), Sample(nothing, label_names, LabelValues(("minor",)), gc_num.pause - gc_num.full_sweep), ], ) end, ) return metrics end
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
code
11057
# SPDX-License-Identifier: MIT ################################# # ProcessCollector <: Collector # ################################# mutable struct ProcessCollector <: Collector @const pid::Function @atomic initialized::Ptr{Nothing} @atomic system_boot_time::Int @atomic clock_ticks_per_second::Int @atomic pagesize::Int function ProcessCollector( pid::Function = () -> "self"; registry::Union{CollectorRegistry, Nothing}=DEFAULT_REGISTRY, ) procc = new(pid, C_NULL, 0, 0, 0) if registry !== nothing register(registry, procc) end return procc end end # Initialize the ProcessCollector on first use in a given process. This is necessary because # typically collectors are defined as global variables which may have been cached during # precompilation. The struct field initialized::Ptr is used to detect this: if it is NULL, # then either the collector was constructed in this session (since it is set to null in the # inner constructor), or it was deserialized from a cache file (since pointers are zeroed in # the precompilation serialize/deserialize process). Important to note is that this property # holds even if the collector was initialized in the process that output the serialized # file. This would not be hold for e.g. a initialized::Bool field. function initialize_process_collector(procc::ProcessCollector) if procc.initialized !== C_NULL return end system_boot_time = 0 try proc_stat = read("/proc/stat", String) m = match(r"^btime\s+(\d+)"m, proc_stat)::RegexMatch system_boot_time = parse(Int, m.captures[1]::AbstractString) catch e @debug "ProcessCollector: /proc is not available or not readable, disabling." e end # Fetch clock ticks per second clock_ticks_per_second = 0 try cmd = pipeline(`getconf CLK_TCK`, stderr=devnull) str = read(cmd, String) clock_ticks_per_second = parse(Int, strip(str)) catch e if system_boot_time > 0 @debug "ProcessCollector: /proc is available but could not read " * "CLK_TCK from getconf, partially disabling." e end end # Fetch pagesize pagesize = 0 try cmd = pipeline(`getconf PAGESIZE`, stderr=devnull) str = read(cmd, String) pagesize = parse(Int, strip(str)) catch e if system_boot_time > 0 @debug "ProcessCollector: /proc is available but could not read " * "PAGESIZE from getconf, partially disabling." e end end # Set the values and return @atomic procc.system_boot_time = system_boot_time @atomic procc.clock_ticks_per_second = clock_ticks_per_second @atomic procc.pagesize = pagesize @atomic procc.initialized = Ptr{Nothing}(0xdeadbeef % UInt) return end """ Prometheus.ProcessCollector(pid; registry=DEFAULT_REGISTRY) Create a process collector for the process id given by the `pid` function. The collector exposes metrics about the process' CPU time, start time, memory usage, file usage, and I/O operations. **Arguments** - `pid :: Function`: a function returning a process id as a string or integer for which to collect metrics. By default the `"self"` pid is used, i.e. the current process. **Keyword arguments** - `registry :: Prometheus.CollectorRegistry`: the registry in which to register the collector. The default registry is used by default. Pass `registry = nothing` to skip registration. !!! note A `ProcessCollector` for the current process is registered automatically with the default registry. If necessary it can be removed by calling ```julia Prometheus.unregister(Prometheus.DEFAULT_REGISTRY, Prometheus.PROCESS_COLLECTOR) ``` !!! note The process collector is currently only available on Linux since it requires the `/proc` file system. On Windows and macOS this collector will not expose any metrics. """ ProcessCollector(::Function; kwargs...) function metric_names(::ProcessCollector) return ( "process_cpu_seconds_total", "process_start_time_seconds", "process_virtual_memory_bytes", "process_resident_memory_bytes", "process_open_fds", "process_io_rchar_bytes_total", "process_io_wchar_bytes_total", "process_io_syscr_total", "process_io_syscw_total", "process_io_read_bytes_total", "process_io_write_bytes_total" ) end function collect!(metrics::Vector, procc::ProcessCollector) initialize_process_collector(procc) @assert procc.initialized !== C_NULL # Unpack variables system_boot_time = procc.system_boot_time clock_ticks_per_second = procc.clock_ticks_per_second pagesize = procc.pagesize # If reading the system boot time from /proc/stat failed then that is used as an # indicator for a missing or unreadable /proc fs so then return early procc.system_boot_time == 0 && return metrics # Fetch the pid pid = try String(strip(string(procc.pid()::Union{AbstractString,Integer})))::String catch e @error "ProcessCollector: could not look up the pid from the lambda" e return metrics end if isempty(pid) || !isdir("/proc/$(pid)") @error "ProcessCollector: invalid pid '$(pid)' from lamba: /proc/$(pid)/ does not exist" return metrics end # Read /proc/$(pid)/stat proc_stat = nothing try proc_stat = read("/proc/$(pid)/stat", String) catch e @error "ProcessCollector: could not read /proc/$(pid)/stat" e end if proc_stat !== nothing fields = split(split(proc_stat, ')')[end]) # This strips off the first two fields # CPU time and start time requires clock_ticks_per_second if clock_ticks_per_second > 0 utime = parse(Int, fields[14 - 2]) / clock_ticks_per_second stime = parse(Int, fields[15 - 2]) / clock_ticks_per_second label_names = LabelNames(("mode",)) proc_cpu_seconds = Metric( "counter", "process_cpu_seconds_total", "Total CPU time (user and system mode) in seconds.", [ Sample(nothing, label_names, LabelValues(("system",)), stime), Sample(nothing, label_names, LabelValues(("user",)), utime), ], ) push!(metrics, proc_cpu_seconds) # Process start time starttime = parse(Int, fields[22 - 2]) / clock_ticks_per_second proc_start_time = Metric( "gauge", "process_start_time_seconds", "Start time since unix epoch in seconds.", Sample(nothing, nothing, nothing, starttime + system_boot_time), ) push!(metrics, proc_start_time) end # Virtual memory vsize = parse(Int, fields[23 - 2]) proc_virtual_memory = Metric( "gauge", "process_virtual_memory_bytes", "Virtual memory size in bytes.", Sample(nothing, nothing, nothing, vsize), ) push!(metrics, proc_virtual_memory) if pagesize > 0 # Resident memory rss = parse(Int, fields[24 - 2]) proc_resident_memory = Metric( "gauge", "process_resident_memory_bytes", "Resident memory size (RSS) in bytes.", Sample(nothing, nothing, nothing, rss * pagesize), ) push!(metrics, proc_resident_memory) end end # Read /proc/$(pid)/fds proc_fd = nothing try proc_fd = length(readdir("/proc/$(pid)/fd")) catch e @error "ProcessCollector: could not read /proc/$(pid)/fd" e end if proc_fd !== nothing # Open file descriptors proc_open_fds = Metric( "gauge", "process_open_fds", "Number of open file descriptors.", Sample(nothing, nothing, nothing, proc_fd), ) push!(metrics, proc_open_fds) # TODO: Maybe add maximum open fds from /proc/$(pid)/limits like the Python client end # Read /proc/$(pid)/io proc_io = nothing try proc_io = read("/proc/$(pid)/io", String) catch e @error "ProcessCollector: could not read /proc/$(pid)/io" e end if proc_io !== nothing rchar = match(r"rchar:\s+(\d+)", proc_io) if rchar !== nothing proc_io_rchar = Metric( "counter", "process_io_rchar_bytes_total", "Total number of bytes read in bytes (rchar from /proc/[pid]/io).", Sample(nothing, nothing, nothing, parse(Int, rchar.captures[1]::AbstractString)), ) push!(metrics, proc_io_rchar) end wchar = match(r"wchar:\s+(\d+)", proc_io) if wchar !== nothing proc_io_wchar = Metric( "counter", "process_io_wchar_bytes_total", "Total number of bytes written in bytes (wchar from /proc/[pid]/io).", Sample(nothing, nothing, nothing, parse(Int, wchar.captures[1]::AbstractString)), ) push!(metrics, proc_io_wchar) end syscr = match(r"syscr:\s+(\d+)", proc_io) if syscr !== nothing proc_io_syscr = Metric( "counter", "process_io_syscr_total", "Total number of read I/O operations (syscalls) (syscr from /proc/[pid]/io).", Sample(nothing, nothing, nothing, parse(Int, syscr.captures[1]::AbstractString)), ) push!(metrics, proc_io_syscr) end syscw = match(r"syscw:\s+(\d+)", proc_io) if syscw !== nothing proc_io_syscw = Metric( "counter", "process_io_syscw_total", "Total number of write I/O operations (syscalls) (syscw from /proc/[pid]/io).", Sample(nothing, nothing, nothing, parse(Int, syscw.captures[1]::AbstractString)), ) push!(metrics, proc_io_syscw) end read_bytes = match(r"read_bytes:\s+(\d+)", proc_io) if read_bytes !== nothing proc_io_read_bytes = Metric( "counter", "process_io_read_bytes_total", "Total number of bytes read from the file system (read_bytes from /proc/[pid]/io).", Sample(nothing, nothing, nothing, parse(Int, read_bytes.captures[1]::AbstractString)), ) push!(metrics, proc_io_read_bytes) end write_bytes = match(r"write_bytes:\s+(\d+)", proc_io) if write_bytes !== nothing proc_io_write_bytes = Metric( "counter", "process_io_write_bytes_total", "Total number of bytes written to the file system (write_bytes from /proc/[pid]/io).", Sample(nothing, nothing, nothing, parse(Int, write_bytes.captures[1]::AbstractString)), ) push!(metrics, proc_io_write_bytes) end end return metrics end
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
code
31990
# SPDX-License-Identifier: MIT using HTTP: HTTP using Prometheus: Prometheus using Test: @test, @test_logs, @test_throws, @testset @testset "Prometheus.CollectorRegistry" begin empty!(Prometheus.DEFAULT_REGISTRY.collectors) # Default registry c = Prometheus.Counter("metric_name_counter", "A counter.") @test c in Prometheus.DEFAULT_REGISTRY.collectors @test_throws( Prometheus.ArgumentError("collector already contains a metric with the name \"metric_name_counter\""), Prometheus.Counter("metric_name_counter", "A counter."), ) Prometheus.unregister(Prometheus.DEFAULT_REGISTRY, c) @test !(c in Prometheus.DEFAULT_REGISTRY.collectors) c2 = Prometheus.Counter("metric_name_counter", "A counter.") @test c2 in Prometheus.DEFAULT_REGISTRY.collectors # Provided registry r = Prometheus.CollectorRegistry() c = Prometheus.Counter("metric_name_counter", "A counter."; registry=r) @test c in r.collectors @test !(c in Prometheus.DEFAULT_REGISTRY.collectors) # No registry on construction, register after c = Prometheus.Counter("metric_name_counter", "A counter."; registry=nothing) @test !(c in Prometheus.DEFAULT_REGISTRY.collectors) r = Prometheus.CollectorRegistry() Prometheus.register(r, c) @test c in r.collectors @test_throws( Prometheus.ArgumentError("collector already contains a metric with the name \"metric_name_counter\""), Prometheus.register(r, c), ) end @testset "Prometheus.Counter" begin # Constructors and implicit registration empty!(Prometheus.DEFAULT_REGISTRY.collectors) c = Prometheus.Counter("metric_name_counter", "A counter.") @test c in Prometheus.DEFAULT_REGISTRY.collectors r = Prometheus.CollectorRegistry() c = Prometheus.Counter("metric_name_counter", "A counter."; registry=r) @test c in r.collectors @test c.value == 0 @test_throws( Prometheus.ArgumentError("metric name \"invalid-name\" is invalid"), Prometheus.Counter("invalid-name", "help"), ) # Prometheus.inc(...) Prometheus.inc(c) @test c.value == 1 Prometheus.inc(c, 0) @test c.value == 1 Prometheus.inc(c, 2) @test c.value == 3 @test_throws Prometheus.ArgumentError Prometheus.inc(c, -1) # Prometheus.collect(...) metrics = Prometheus.collect(c) @test length(metrics) == 1 metric = metrics[1] @test metric.metric_name == c.metric_name @test metric.help == c.help @test metric.samples.value == c.value # Prometheus.expose_metric(...) @test sprint(Prometheus.expose_metric, metric) == sprint(Prometheus.expose_io, r) == """ # HELP metric_name_counter A counter. # TYPE metric_name_counter counter metric_name_counter 3 """ end @testset "Prometheus.Gauge" begin # Constructors and implicit registration empty!(Prometheus.DEFAULT_REGISTRY.collectors) c = Prometheus.Gauge("metric_name_gauge", "A gauge.") @test c in Prometheus.DEFAULT_REGISTRY.collectors r = Prometheus.CollectorRegistry() c = Prometheus.Gauge("metric_name_gauge", "A gauge."; registry=r) @test c in r.collectors @test c.value == 0 @test_throws( Prometheus.ArgumentError("metric name \"invalid-name\" is invalid"), Prometheus.Gauge("invalid-name", "help"), ) # Prometheus.inc(...) Prometheus.inc(c) @test c.value == 1 Prometheus.inc(c, 0) @test c.value == 1 Prometheus.inc(c, 2) @test c.value == 3 # Prometheus.dec(...) Prometheus.dec(c) @test c.value == 2 Prometheus.dec(c, 1) @test c.value == 1 # Prometheus.set_to_current_time(...) t0 = time() sleep(0.1) Prometheus.set_to_current_time(c) sleep(0.1) @test t0 < c.value < time() # Prometheus.set(...) Prometheus.set(c, 42) @test c.value == 42 # Prometheus.collect(...) metrics = Prometheus.collect(c) @test length(metrics) == 1 metric = metrics[1] @test metric.metric_name == c.metric_name @test metric.help == c.help @test metric.samples.value == c.value # Prometheus.expose_metric(...) @test sprint(Prometheus.expose_metric, metric) == sprint(Prometheus.expose_io, r) == """ # HELP metric_name_gauge A gauge. # TYPE metric_name_gauge gauge metric_name_gauge 42 """ end @testset "Prometheus.Summary" begin # Constructors and implicit registration empty!(Prometheus.DEFAULT_REGISTRY.collectors) c = Prometheus.Summary("metric_name_summary", "A summary.") @test c in Prometheus.DEFAULT_REGISTRY.collectors r = Prometheus.CollectorRegistry() c = Prometheus.Summary("metric_name_summary", "A summary."; registry=r) @test c in r.collectors @test c._count == 0 @test c._sum == 0 @test_throws( Prometheus.ArgumentError("metric name \"invalid-name\" is invalid"), Prometheus.Summary("invalid-name", "help"), ) # Prometheus.observe(...) Prometheus.observe(c, 1) @test c._count == 1 @test c._sum == 1 Prometheus.observe(c, 10) @test c._count == 2 @test c._sum == 11 # Prometheus.collect(...) metrics = Prometheus.collect(c) @test length(metrics) == 1 metric = metrics[1] @test metric.metric_name == c.metric_name @test metric.help == c.help @test length(metric.samples) == 2 s1, s2 = metric.samples[1], metric.samples[2] @test s1.suffix == "_count" @test s2.suffix == "_sum" @test s1.label_values === nothing @test s2.label_values === nothing @test s1.value == 2 @test s2.value == 11 # Prometheus.expose_metric(...) @test sprint(Prometheus.expose_metric, metric) == sprint(Prometheus.expose_io, r) == """ # HELP metric_name_summary A summary. # TYPE metric_name_summary summary metric_name_summary_count 2 metric_name_summary_sum 11 """ end @testset "Prometheus.Histogram" begin # Constructors and implicit registration empty!(Prometheus.DEFAULT_REGISTRY.collectors) c = Prometheus.Histogram("metric_name_histogram", "A histogram.") @test c in Prometheus.DEFAULT_REGISTRY.collectors r = Prometheus.CollectorRegistry() c = Prometheus.Histogram("metric_name_histogram", "A histogram."; registry=r) @test c in r.collectors @test c.buckets == Prometheus.DEFAULT_BUCKETS @test c._count == 0 @test c._sum == 0 @test all(x -> x[] == 0, c.bucket_counters) @test_throws( Prometheus.ArgumentError("metric name \"invalid-name\" is invalid"), Prometheus.Histogram("invalid-name", "help"), ) # Prometheus.observe(...) v1 = 0.9 Prometheus.observe(c, v1) @test c._count == 1 @test c._sum == v1 for (ub, counter, known_count) in zip(c.buckets, c.bucket_counters, [zeros(Int, 9); ones(Int, 6)]) @test counter[] == (v1 > ub ? 0 : 1) == known_count end v2 = 10v1 Prometheus.observe(c, v2) @test c._count == 2 @test c._sum == v1 + v2 for (ub, counter, known_count) in zip(c.buckets, c.bucket_counters, [zeros(Int, 9); [1, 1, 1, 1]; [2, 2]]) @test counter[] == ((v2 > ub && v1 > ub) ? 0 : v2 > ub ? 1 : 2) == known_count end # Prometheus.collect(...) r = Prometheus.CollectorRegistry() buckets = [1.0, 2.0, Inf] c = Prometheus.Histogram("metric_name_histogram", "A histogram."; buckets=buckets, registry=r) Prometheus.observe(c, 0.5) Prometheus.observe(c, 1.6) metrics = Prometheus.collect(c) @test length(metrics) == 1 metric = metrics[1] @test metric.metric_name == c.metric_name @test metric.help == c.help @test length(metric.samples) == length(buckets) + 2 s1, s2 = metric.samples[1], metric.samples[2] @test s1.suffix == "_count" @test s2.suffix == "_sum" @test s1.label_values === nothing @test s2.label_values === nothing @test s1.value == 2 @test s2.value == 0.5 + 1.6 for (ub, counter, sample, known_count) in zip(c.buckets, c.bucket_counters, metric.samples[3:end], [1, 2, 2]) @test sample.suffix === nothing @test (sample.label_names::Prometheus.LabelNames{1}).label_names === (:le,) @test (sample.label_values::Prometheus.LabelValues{1}).label_values == (string(ub),) @test sample.value == counter[] == known_count end # Prometheus.expose_metric(...) @test sprint(Prometheus.expose_metric, metric) == sprint(Prometheus.expose_io, r) == """ # HELP metric_name_histogram A histogram. # TYPE metric_name_histogram histogram metric_name_histogram_count 2 metric_name_histogram_sum 2.1 metric_name_histogram{le="1.0"} 1 metric_name_histogram{le="2.0"} 2 metric_name_histogram{le="Inf"} 2 """ end @testset "Prometheus.LabelNames and Prometheus.LabelValues" begin @test_throws( Prometheus.ArgumentError("label name \"invalid-label\" is invalid"), Prometheus.LabelNames(("invalid-label",)), ) # Custom hashing of values v1 = Prometheus.LabelValues(("foo", "bar")) v2 = Prometheus.LabelValues(("foo", "bar")) v3 = Prometheus.LabelValues(("foo", "baz")) @test hash(v1) == hash(v2) @test hash(v1) != hash(v3) @test v1 == v2 @test v1 != v3 @test isequal(v1, v2) @test !isequal(v1, v3) end @testset "Prometheus.Family{$(Collector)}" for Collector in (Prometheus.Counter, Prometheus.Gauge) # Constructors and implicit registration empty!(Prometheus.DEFAULT_REGISTRY.collectors) c = Prometheus.Family{Collector}( "http_requests", "Number of HTTP requests.", ("endpoint", "status_code"), ) @test c in Prometheus.DEFAULT_REGISTRY.collectors r = Prometheus.CollectorRegistry() c = Prometheus.Family{Collector}( "http_requests", "Number of HTTP requests.", ("endpoint", "status_code"); registry = r, ) @test c in r.collectors @test length(c.children) == 0 @test_throws( Prometheus.ArgumentError("metric name \"invalid-name\" is invalid"), Prometheus.Family{Collector}("invalid-name", "help", ("label",)), ) @test_throws( Prometheus.ArgumentError("label name \"invalid-label\" is invalid"), Prometheus.Family{Collector}("valid_name", "help", ("invalid-label",)), ) # Prometheus.labels(...), Prometheus.remove(...), Prometheus.clear() l1 = ("/foo/", "200") l2 = ("/bar/", "404") @test Prometheus.labels(c, l1) === Prometheus.labels(c, l1) === c[l1] @test Prometheus.labels(c, l2) === Prometheus.labels(c, l2) === c[l2] @test length(c.children) == 2 @test Prometheus.labels(c, l1).value == 0 @test Prometheus.labels(c, l2).value == 0 Prometheus.remove(c, l1) @test length(c.children) == 1 Prometheus.clear(c) @test length(c.children) == 0 # Prometheus.inc(...) Prometheus.inc(Prometheus.labels(c, l1)) Prometheus.inc(Prometheus.labels(c, l2)) @test Prometheus.labels(c, l1).value == 1 @test Prometheus.labels(c, l2).value == 1 Prometheus.inc(Prometheus.labels(c, l1), 2) Prometheus.inc(Prometheus.labels(c, l2), 2) @test Prometheus.labels(c, l1).value == 3 @test Prometheus.labels(c, l2).value == 3 # Prometheus.collect(...) metrics = Prometheus.collect(c) @test length(metrics) == 1 metric = metrics[1] @test metric.metric_name == c.metric_name @test metric.help == c.help @test length(metric.samples) == 2 s1, s2 = metric.samples[1], metric.samples[2] @test s1.label_values.label_values == ("/bar/", "404") @test s2.label_values.label_values == ("/foo/", "200") @test s1.value == 3 @test s2.value == 3 # Prometheus.expose_metric(...) type = Collector === Prometheus.Counter ? "counter" : "gauge" @test sprint(Prometheus.expose_metric, metric) == sprint(Prometheus.expose_io, r) == """ # HELP http_requests Number of HTTP requests. # TYPE http_requests $(type) http_requests{endpoint="/bar/",status_code="404"} 3 http_requests{endpoint="/foo/",status_code="200"} 3 """ end @testset "Prometheus.@time gauge::Gauge" begin gauge = Prometheus.Gauge("call_time_last", "Time of last call"; registry=nothing) Prometheus.@time gauge sleep(0.1) @test 0.3 > gauge.value > 0.1 Prometheus.@time gauge let sleep(0.1) end @test 0.3 > gauge.value > 0.1 Prometheus.@time gauge f() = sleep(0.1) @sync begin @async f() @async f() end @test 0.3 > gauge.value > 0.1 Prometheus.@time gauge function g() sleep(0.1) end @sync begin @async g() @async g() end @test 0.3 > gauge.value > 0.1 end @testset "Prometheus.@time collector::$(Collector)" for Collector in (Prometheus.Histogram, Prometheus.Summary) ishist = Collector === Prometheus.Histogram buckets = [1.0, Inf] collector = Collector( "call_time", "Time of calls"; (ishist ? (; buckets=buckets) : (;))..., registry=nothing, ) Prometheus.@time collector sleep(0.1) @test 0.3 > collector._sum > 0.1 @test collector._count == 1 ishist && @test (x->x[]).(collector.bucket_counters) == [1, 1] Prometheus.@time collector let sleep(0.1) end @test 0.4 > collector._sum > 0.2 @test collector._count == 2 ishist && @test (x->x[]).(collector.bucket_counters) == [2, 2] Prometheus.@time collector f() = sleep(0.1) @sync begin @async f() @async f() end @test 0.7 > collector._sum > 0.4 @test collector._count == 4 ishist && @test (x->x[]).(collector.bucket_counters) == [4, 4] Prometheus.@time collector function g() sleep(0.1) end @sync begin @async g() @async g() end @test 0.9 > collector._sum > 0.6 @test collector._count == 6 ishist && @test (x->x[]).(collector.bucket_counters) == [6, 6] if ishist Prometheus.@time collector sleep(1.1) @test (x->x[]).(collector.bucket_counters) == [6, 7] end end @testset "Prometheus.@inprogress gauge::Gauge" begin gauge = Prometheus.Gauge("calls_inprogres", "Number of calls in progress"; registry=nothing) Prometheus.@inprogress gauge sleep(0.01) @test gauge.value == 0.0 Prometheus.@inprogress gauge let sleep(0.01) end @test gauge.value == 0.0 Prometheus.@inprogress gauge f() = sleep(0.01) @sync begin @async f() @async f() end @test gauge.value == 0.0 Prometheus.@inprogress gauge function g() sleep(0.01) end @sync begin @async g() @async g() end @test gauge.value == 0.0 # Concurrency tests @sync begin tsks = Vector{Task}(undef, 100) for i in 1:100 tsk = @async begin 0 <= gauge.value <= 100 || error() Prometheus.@inprogress gauge sleep(1 + rand()) end tsks[i] = tsk end # Make sure all tasks have started before testing the value while any(!istaskstarted, tsks) sleep(0.1) end @test gauge.value == 100 end @test gauge.value == 0 end # TODO: Document interface and test it @testset "Custom collector with @time/@inprogress" begin # struct Coll <: Prometheus.Collector end @test_throws Prometheus.ArgumentError Prometheus.expr_gen(:unknown, nothing, nothing) end @testset "Prometheus.Family{$Collector}" for Collector in (Prometheus.Histogram, Prometheus.Summary) r = Prometheus.CollectorRegistry() c = Prometheus.Family{Collector}( "http_request_time", "Time to process requests.", ("endpoint", "status_code"); (Collector === Prometheus.Histogram ? (; buckets = [2.0, Inf]) : (;))..., registry = r, ) @test c in r.collectors @test length(c.children) == 0 # Prometheus.inc(...) l1 = ("/foo/", "200") l2 = ("/bar/", "404") @test Prometheus.labels(c, l1) === Prometheus.labels(c, l1) === c[l1] @test Prometheus.labels(c, l2) === Prometheus.labels(c, l2) === c[l2] @test length(c.children) == 2 @test Prometheus.labels(c, l1)._count == 0 @test Prometheus.labels(c, l1)._sum == 0 @test Prometheus.labels(c, l2)._count == 0 @test Prometheus.labels(c, l2)._sum == 0 Prometheus.observe(Prometheus.labels(c, l1), 1.2) Prometheus.observe(Prometheus.labels(c, l2), 2.1) @test Prometheus.labels(c, l1)._count == 1 @test Prometheus.labels(c, l1)._sum == 1.2 @test Prometheus.labels(c, l2)._count == 1 @test Prometheus.labels(c, l2)._sum == 2.1 Prometheus.observe(Prometheus.labels(c, l1), 3.4) Prometheus.observe(Prometheus.labels(c, l2), 4.3) @test Prometheus.labels(c, l1)._count == 2 @test Prometheus.labels(c, l1)._sum == 4.6 @test Prometheus.labels(c, l2)._count == 2 @test Prometheus.labels(c, l2)._sum == 6.4 # Prometheus.collect(...) metrics = Prometheus.collect(c) @test length(metrics) == 1 metric = metrics[1] @test metric.metric_name == c.metric_name @test metric.help == c.help if Collector === Prometheus.Histogram buckets = Prometheus.labels(c, l1).buckets @test length(buckets) == length(Prometheus.labels(c, l2).buckets) @test length(metric.samples) == 2 * (length(buckets) + 2) # _count and _sum samples s1, s2, s5, s6 = metric.samples[[1, 2, 5, 6]] @test s1.label_values.label_values == s2.label_values.label_values == ("/bar/", "404") @test s5.label_values.label_values == s6.label_values.label_values == ("/foo/", "200") @test s1.value == 2 # _count @test s2.value == 6.4 # _sum @test s5.value == 2 # _count @test s6.value == 4.6 # _sum # {le} samples for (ls, subrange) in ((l1, 7:8), (l2, 3:4)) for (ub, counter, sample) in zip(buckets, Prometheus.labels(c, ls).bucket_counters, metric.samples[subrange]) @test sample.suffix === nothing @test (sample.label_names::Prometheus.LabelNames{3}).label_names === (:endpoint, :status_code, :le) @test (sample.label_values::Prometheus.LabelValues{3}).label_values == (ls..., string(ub)) @test sample.value == counter[] end end else # Collector === Prometheus.Summary @test length(metric.samples) == 4 s1, s2, s3, s4 = metric.samples @test s1.label_values.label_values == s2.label_values.label_values == ("/bar/", "404") @test s3.label_values.label_values == s4.label_values.label_values == ("/foo/", "200") @test s1.value == 2 # _count @test s2.value == 6.4 # _sum @test s3.value == 2 # _count @test s4.value == 4.6 # _sum # Prometheus.expose_metric(...) @test sprint(Prometheus.expose_metric, metric) == sprint(Prometheus.expose_io, r) == """ # HELP http_request_time Time to process requests. # TYPE http_request_time summary http_request_time_count{endpoint="/bar/",status_code="404"} 2 http_request_time_sum{endpoint="/bar/",status_code="404"} 6.4 http_request_time_count{endpoint="/foo/",status_code="200"} 2 http_request_time_sum{endpoint="/foo/",status_code="200"} 4.6 """ end end @testset "Label types for Prometheus.Family{C}" begin struct RequestLabels target::String status_code::Int end for fam in ( # Constructor with NTuple{N, String} names Prometheus.Family{Prometheus.Counter}( "http_requests", "Total number of HTTP requests", ("target", "status_code"); registry=nothing, ), # Constructor with NTuple{N, Symbol} names Prometheus.Family{Prometheus.Counter}( "http_requests", "Total number of HTTP requests", (:target, :status_code); registry=nothing, ), # Constructor with NamedTuple type Prometheus.Family{Prometheus.Counter}( "http_requests", "Total number of HTTP requests", @NamedTuple{target::String, status_code::Int}; registry=nothing, ), # Constructor with custom struct Prometheus.Family{Prometheus.Counter}( "http_requests", "Total number of HTTP requests", RequestLabels; registry=nothing, ), ) @test Prometheus.labels(fam, ("/api", "200")) === fam[("/api", "200")] === Prometheus.labels(fam, ("/api", 200)) === fam[("/api", 200)] === Prometheus.labels(fam, (target="/api", status_code="200")) === fam[(target="/api", status_code="200")] === Prometheus.labels(fam, (target="/api", status_code=200)) === fam[(target="/api", status_code=200)] === Prometheus.labels(fam, (status_code="200", target="/api")) === fam[(status_code="200", target="/api")] === Prometheus.labels(fam, (status_code=200, target="/api")) === fam[(status_code=200, target="/api")] === Prometheus.labels(fam, RequestLabels("/api", 200)) === fam[RequestLabels("/api", 200)] end end @testset "Prometheus.GCCollector" begin r = Prometheus.CollectorRegistry() c = Prometheus.GCCollector(; registry=r) @test c in r.collectors # Record before and after stats and test that the metrics are in between old_stats = Base.gc_num() x = zeros(1024^2); x = nothing; GC.gc(); GC.gc() metrics = Prometheus.collect(c) x = zeros(1024^2); x = nothing; GC.gc(); GC.gc() new_stats = Base.gc_num() @test length(metrics) == 6 gc_alloc_total = metrics[findfirst(x -> x.metric_name == "julia_gc_alloc_total", metrics)] @test old_stats.bigalloc <= gc_alloc_total.samples[1].value <= new_stats.bigalloc @test old_stats.malloc <= gc_alloc_total.samples[2].value <= new_stats.malloc @test old_stats.poolalloc <= gc_alloc_total.samples[3].value <= new_stats.poolalloc @test old_stats.realloc <= gc_alloc_total.samples[4].value <= new_stats.realloc gc_free_total = metrics[findfirst(x -> x.metric_name == "julia_gc_free_total", metrics)] @test old_stats.freecall <= gc_free_total.samples.value <= new_stats.freecall gc_alloc_bytes_total = metrics[findfirst(x -> x.metric_name == "julia_gc_alloc_bytes_total", metrics)] @test Base.gc_total_bytes(old_stats) <= gc_alloc_bytes_total.samples.value <= Base.gc_total_bytes(new_stats) gc_seconds_total = metrics[findfirst(x -> x.metric_name == "julia_gc_seconds_total", metrics)] @test old_stats.total_time / 10^9 <= gc_seconds_total.samples.value <= new_stats.total_time / 10^9 # Prometheus.expose_metric(...) str = sprint(Prometheus.expose_metric, gc_alloc_total) @test occursin( r""" # HELP julia_gc_alloc_total Total number of allocations \(calls to malloc, realloc, etc\) # TYPE julia_gc_alloc_total counter julia_gc_alloc_total{type="bigalloc"} \d+ julia_gc_alloc_total{type="malloc"} \d+ julia_gc_alloc_total{type="poolalloc"} \d+ julia_gc_alloc_total{type="realloc"} \d+ """, sprint(Prometheus.expose_metric, gc_alloc_total), ) end @testset "Prometheus.ProcessCollector" begin r = Prometheus.CollectorRegistry() c = Prometheus.ProcessCollector(; registry=r) @test c in r.collectors metrics = Prometheus.collect(c) procfs_available = c.system_boot_time > 0 if procfs_available # Prometheus.expose_metric(...) str = sprint(Prometheus.expose_io, r) @test occursin( r""" # HELP process_cpu_seconds_total Total CPU time \(user and system mode\) in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total{mode="system"} [0-9\.]+ process_cpu_seconds_total{mode="user"} [0-9\.]+ # HELP process_io_rchar_bytes_total Total number of bytes read in bytes \(rchar from /proc/\[pid\]/io\). # TYPE process_io_rchar_bytes_total counter process_io_rchar_bytes_total \d+ # HELP process_io_read_bytes_total Total number of bytes read from the file system \(read_bytes from /proc/\[pid\]/io\). # TYPE process_io_read_bytes_total counter process_io_read_bytes_total \d+ # HELP process_io_syscr_total Total number of read I/O operations \(syscalls\) \(syscr from /proc/\[pid\]/io\). # TYPE process_io_syscr_total counter process_io_syscr_total \d+ # HELP process_io_syscw_total Total number of write I/O operations \(syscalls\) \(syscw from /proc/\[pid\]/io\). # TYPE process_io_syscw_total counter process_io_syscw_total \d+ # HELP process_io_wchar_bytes_total Total number of bytes written in bytes \(wchar from /proc/\[pid\]/io\). # TYPE process_io_wchar_bytes_total counter process_io_wchar_bytes_total \d+ # HELP process_io_write_bytes_total Total number of bytes written to the file system \(write_bytes from /proc/\[pid\]/io\). # TYPE process_io_write_bytes_total counter process_io_write_bytes_total \d+ # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds \d+ # HELP process_resident_memory_bytes Resident memory size \(RSS\) in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes \d+ # HELP process_start_time_seconds Start time since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds .* # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes \d+ """, sprint(Prometheus.expose_io, r), ) else @test isempty(metrics) end # Test that pid function works procc = Prometheus.ProcessCollector(() -> getpid(); registry=nothing) metrics = Prometheus.collect(procc) if procfs_available @test length(metrics) > 0 else @test length(metrics) == 0 end if procfs_available # Not a pid empty!(Prometheus.DEFAULT_REGISTRY.collectors) procc = Prometheus.ProcessCollector(() -> "notapid") empty!(Prometheus.DEFAULT_REGISTRY.collectors) metrics = @test_logs (:error, r"/proc/notapid/ does not exist") Prometheus.collect(procc) @test length(metrics) == 0 # Pid function error empty!(Prometheus.DEFAULT_REGISTRY.collectors) procc = Prometheus.ProcessCollector(() -> error()) metrics = @test_logs (:error, r"pid from the lambda") Prometheus.collect(procc) @test length(metrics) == 0 end end @testset "Character escaping in exposition" begin counter = Prometheus.Family{Prometheus.Counter}( "counter_name", "Help with slash \\ and newline \n", ("label_name", ); registry = nothing, ) Prometheus.inc(Prometheus.labels(counter, ("backslash \\, quote \", newline \n", ))) metric = first(Prometheus.collect(counter)) @test sprint(Prometheus.expose_metric, metric) == """ # HELP counter_name Help with slash \\\\ and newline \\n # TYPE counter_name counter counter_name{label_name="backslash \\\\, quote \\", newline \\n"} 1 """ end @testset "Prometheus.expose(::Union{String, IO})" begin r = Prometheus.DEFAULT_REGISTRY empty!(r.collectors) Prometheus.inc(Prometheus.Counter("prom_counter", "Counting things"; registry=r)) Prometheus.set(Prometheus.Gauge("prom_gauge", "Gauging things"; registry=r), 1.2) mktempdir() do dir default = joinpath(dir, "default.prom") Prometheus.expose(default) reg = joinpath(dir, "reg.prom") Prometheus.expose(reg, r) default_io = IOBuffer() Prometheus.expose(default_io) reg_io = IOBuffer() Prometheus.expose(reg_io, r) @test read(default, String) == read(reg, String) == String(take!(default_io)) == String(take!(reg_io)) == """ # HELP prom_counter Counting things # TYPE prom_counter counter prom_counter 1 # HELP prom_gauge Gauging things # TYPE prom_gauge gauge prom_gauge 1.2 """ end end @testset "Prometheus.expose(::HTTP.Stream)" begin empty!(Prometheus.DEFAULT_REGISTRY.collectors) Prometheus.inc(Prometheus.Counter("prom_counter", "Counting things")) Prometheus.set(Prometheus.Gauge("prom_gauge", "Gauging things"), 1.2) iob = IOBuffer() Prometheus.expose(iob) reference_output = String(take!(iob)) # Spin up the server server = HTTP.listen!(8123) do http if http.message.target == "/metrics/default" return Prometheus.expose(http) elseif http.message.target == "/metrics/reg" return Prometheus.expose(http, Prometheus.DEFAULT_REGISTRY) elseif http.message.target == "/metrics/nogzip" return Prometheus.expose(http; compress=false) else HTTP.setstatus(http, 404) HTTP.startwrite(http) end end # Normal requests r_default = HTTP.request("GET", "http://localhost:8123/metrics/default") r_ref = HTTP.request("GET", "http://localhost:8123/metrics/reg") @test String(r_default.body) == String(r_ref.body) == reference_output # HEAD @test isempty(HTTP.request("HEAD", "http://localhost:8123/metrics/default").body) # POST (no filtering in the server above) r_post = HTTP.request("POST", "http://localhost:8123/metrics/default") @test String(r_post.body) == reference_output # Bad URI r_bad = HTTP.request("GET", "http://localhost:8123"; status_exception=false) @test r_bad.status == 404 # Compression for enc in ("gzip", "br, compress, gzip", "br;q=1.0, gzip;q=0.8, *;q=0.1") r_gzip = HTTP.request( "GET", "http://localhost:8123/metrics/default", ["Accept-Encoding" => enc] ) @test HTTP.header(r_gzip, "Content-Encoding") == "gzip" @test String(r_gzip.body) == reference_output # HTTP.jl decompresses gzip r_nogzip = HTTP.request( "GET", "http://localhost:8123/metrics/nogzip", ["Accept-Encoding" => enc] ) @test HTTP.header(r_nogzip, "Content-Encoding", nothing) === nothing @test String(r_nogzip.body) == reference_output end # Test missing Accept-Encoding (HTTP.jl adds it automatically unless explicitly set) r_nogzip = HTTP.request( "GET", "http://localhost:8123/metrics/default", ["Accept-Encoding" => ""], ) @test HTTP.header(r_nogzip, "Content-Encoding", nothing) === nothing @test String(r_nogzip.body) == reference_output # Clean up close(server) wait(server) end @testset "Utilities" begin x = 1 err = try Prometheus.@assert x === nothing; catch e; e; end @test err isa Prometheus.AssertionError @test err.msg == "x === nothing" @test occursin("`x === nothing`", sprint(showerror, err)) @test occursin("please file an issue", sprint(showerror, err)) @test occursin( "Prometheus.ArgumentError: err", sprint(showerror, Prometheus.ArgumentError("err")), ) end
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
docs
3878
# Prometheus.jl changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [v1.4.0] - 2024-07-12 ### Changed - The public API of Prometheus.jl is now marked `public` in Julia versions that support it. The public names are: `CollectorRegistry`, `register`, `unregister`, `Counter`, `Gauge`, `Histogram`, `Summary`, `GCCollector`, `ProcessCollector`, `inc`, `dec`, `set`, `set_to_current_time`, `observe`, `@inprogress`, `@time`, `Family`, `labels`, `remove`, `clear`, and `expose`. ([#19]) ## [v1.3.0] - 2023-11-29 ### Added - `Base.getindex` is overloaded for the `Prometheus.Family` collector to have the same meaning as `Prometheus.labels`. `family[labels]` is equivalent to `Prometheus.labels(family, labels)`. ([#13]) ### Fixed - The `ProcessCollector` is now initialized on first use in a given process. This fixes a bug where values cached during precompilation (e.g. system boot time) would be used instead of the current values. ([#14]) ## [v1.2.0] - 2023-11-22 ### Added - The fourth basic collector, `Histogram`, have been added. ([#10]) ## [v1.1.0] - 2023-11-13 ### Added - New macro `Prometheus.@time collector <expr>` for timing `<expr>` and pass the elapsed time to the collector. `<expr>` can be a single expression, a block, or a function *definition*. In the latter case, all calls to the function will be instrumented (no matter the call site). See documentation for more details. ([#6]) - New macro `Prometheus.@inprogress collector <expr>` to track number of in-progress concurrent evalutations of `<expr>`. Just like `Prometheus.@time`, valid `<expr>`s are single expressions, blocks, and function definitions. See documentation for more details. ([#6]) - New ways to specify label names and label values in `Prometheus.Family{C}`. Label names can now be passed to the constructor as i) a tuple of strings or symbols, ii) a named tuple type (names used for label names), or iii) a custom struct type (field names used for label names). Similarly, label values (passed to e.g. `Prometheus.labels`) can be passed as i) tuple of strings, ii) named tuple, iii) struct instance. See documentation for examples and more details. ([#7]) ## [v1.0.1] - 2023-11-06 ### Fixed - Fixed verification of metric names and label names. - Correctly escape special characters in exposition (specifically help and label values). ## [v1.0.0] - 2023-11-05 First stable release of Prometheus.jl: - Supported basic collectors: Counter, Gauge, Summary - GCCollector for metrics about allocations and garbage collection - ProcessCollector for process metrics such as CPU time and I/O operations (requires the /proc file system). - Support for default and custom collector registries - Support for metric labeling - Support for exposing metrics to file and over HTTP - Support for gzip compression when exposing over HTTP See [README.md](README.md) for details and documentation. <!-- Links generated by Changelog.jl --> [v1.0.0]: https://github.com/fredrikekre/Prometheus.jl/releases/tag/v1.0.0 [v1.0.1]: https://github.com/fredrikekre/Prometheus.jl/releases/tag/v1.0.1 [v1.1.0]: https://github.com/fredrikekre/Prometheus.jl/releases/tag/v1.1.0 [v1.2.0]: https://github.com/fredrikekre/Prometheus.jl/releases/tag/v1.2.0 [v1.3.0]: https://github.com/fredrikekre/Prometheus.jl/releases/tag/v1.3.0 [#6]: https://github.com/fredrikekre/Prometheus.jl/issues/6 [#7]: https://github.com/fredrikekre/Prometheus.jl/issues/7 [#10]: https://github.com/fredrikekre/Prometheus.jl/issues/10 [#13]: https://github.com/fredrikekre/Prometheus.jl/issues/13 [#14]: https://github.com/fredrikekre/Prometheus.jl/issues/14
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
docs
2195
# Prometheus.jl - Prometheus client for Julia | **Documentation** | **Build Status** | |:------------------------- |:------------------------------------------------------- | | [![][docs-img]][docs-url] | [![][ci-img]][ci-url] [![][coverage-img]][coverage-url] | Prometheus.jl is a Julia client for [Prometheus](https://prometheus.io/). ## Quickstart 1. Install Prometheus.jl and [HTTP.jl](https://github.com/JuliaWeb/HTTP.jl) using the package manager: ``` pkg> add Prometheus HTTP ``` 2. Paste the following code into a Julia REPL. ```julia # Load the packages using Prometheus, HTTP # Create a Counter metric const request_counter = Prometheus.Counter("request_count", "Number of handled requests") # Start a HTTP server on localhost port 8000 to server the metrics server = HTTP.listen!(8000) do http Prometheus.inc(request_counter) # Increment the request counter return Prometheus.expose(http) # Expose the metrics end ``` 3. Visit <http://localhost:8000> in your browser. You will see something like the following ``` # HELP gc_alloc_bytes_total Total number of allocated bytes # TYPE gc_alloc_bytes_total counter gc_alloc_bytes_total 365578814 [...] # HELP request_count Number of handled requests # TYPE request_count counter request_count 1 ``` The output contains some default metrics related to the running process, as well as the request counter that we added ourselves. Every time you refresh, the counter will increment its value. `close(server)` will shutdown the server. Visit the [documentation](https://fredrikekre.github.io/Prometheus.jl/) for much more details! [docs-img]: https://img.shields.io/badge/docs-latest%20release-blue.svg [docs-url]: https://fredrikekre.github.io/Prometheus.jl/ [ci-img]: https://github.com/fredrikekre/Prometheus.jl/actions/workflows/CI.yml/badge.svg?event=push [ci-url]: https://github.com/fredrikekre/Prometheus.jl/actions/workflows/CI.yml [coverage-img]: https://codecov.io/github/fredrikekre/Prometheus.jl/graph/badge.svg [coverage-url]: https://codecov.io/github/fredrikekre/Prometheus.jl
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
1.4.0
c748516eb86c833395f5f90c41d1f3f11ce95f01
docs
7889
# Prometheus.jl ## Introduction This package is a Julia client for [Prometheus](https://prometheus.io/). If you are not familiar with Prometheus it is recommended to browse the [upstream documentation](https://prometheus.io/docs/introduction/overview/). The documentation here focuses on the Julia client. Two of the basic concepts of a Prometheus client are [Registries](@ref) and [Collectors](@ref). Registries are collections of collectors, and the collectors are the units responsible to record and capture metrics. Client libraries implement a default registry which all collectors implicity register with, so for basic usage there is no need to interact with a registry (see [Default registry](@ref)). The third important concept is [Exposition](@ref) of the collected metrics. Typically metrics are exposed over a HTTP server, as in the [Quickstart](@ref)-example just below. See the section about [Exposition](@ref) for more details and examples on how metrics can be exposed. ## Quickstart 1. Install Prometheus.jl and [HTTP.jl](https://github.com/JuliaWeb/HTTP.jl) using the package manager: ``` pkg> add Prometheus HTTP ``` 2. Paste the following code into a Julia REPL. ```julia # Load the packages using Prometheus, HTTP # Create a Counter metric const request_counter = Prometheus.Counter("request_count", "Number of handled requests") # Start a HTTP server on localhost port 8000 to server the metrics server = HTTP.listen!(8000) do http Prometheus.inc(request_counter) # Increment the request counter return Prometheus.expose(http) # Expose the metrics end ``` 3. Visit <http://localhost:8000> in your browser. You will see something like the following ``` # HELP gc_alloc_bytes_total Total number of allocated bytes # TYPE gc_alloc_bytes_total counter gc_alloc_bytes_total 365578814 [...] # HELP request_count Number of handled requests # TYPE request_count counter request_count 1 ``` The output contains some default metrics related to the running process, as well as the request counter that we added ourselves. Every time you refresh, the counter will increment its value. `close(server)` will shutdown the server. ## Collectors This section documents the collectors that are currently supported. This include the "basic" collectors ([Counter](@ref), [Gauge](@ref), [Histogram](@ref), [Summary](@ref)) as well as some custom collectors ([GCCollector](@ref), [ProcessCollector](@ref)). There is also a section on how to implement your own collector, see [Custom collectors](@ref). Upstream documentation: - <https://prometheus.io/docs/concepts/metric_types/> - <https://prometheus.io/docs/instrumenting/writing_clientlibs/#metrics> ### Counter Quoting the [upstream documentation](https://prometheus.io/docs/concepts/metric_types/#counter): > A counter is a cumulative metric that represents a single monotonically increasing counter > whose value can only increase or be reset to zero on restart. For example, you can use a > counter to represent the number of requests served, tasks completed, or errors. > > Do not use a counter to expose a value that can decrease. For example, do not use a > counter for the number of currently running processes; instead use a gauge. #### Counter API reference ```@docs Prometheus.Counter(::String, ::String; kwargs...) Prometheus.inc(::Prometheus.Counter, ::Real) ``` ### Gauge Quoting the [upstream documentation](https://prometheus.io/docs/concepts/metric_types/#gauge): > A gauge is a metric that represents a single numerical value that can arbitrarily go up > and down. > > Gauges are typically used for measured values like temperatures or current memory usage, > but also "counts" that can go up and down, like the number of concurrent requests. #### Gauge API reference ```@docs Prometheus.Gauge(::String, ::String; kwargs...) Prometheus.inc(::Prometheus.Gauge, ::Real) Prometheus.dec(::Prometheus.Gauge, ::Real) Prometheus.set(::Prometheus.Gauge, ::Real) Prometheus.set_to_current_time(::Prometheus.Gauge) Prometheus.@time Prometheus.@inprogress ``` ### Histogram Quoting the [upstream documentation](https://prometheus.io/docs/concepts/metric_types/#histogram): > A histogram samples observations (usually things like request durations or response sizes) > and counts them in configurable buckets. It also provides a sum of all observed values. #### Histogram API reference ```@docs Prometheus.Histogram(::String, ::String; kwargs...) Prometheus.observe(::Prometheus.Histogram, ::Real) ``` ```@docs; canonical=false Prometheus.@time ``` ### Summary Quoting the [upstream documentation](https://prometheus.io/docs/concepts/metric_types/#summary): > Similar to a histogram, a summary samples observations (usually things like request > durations and response sizes). While it also provides a total count of observations and a > sum of all observed values, it calculates configurable quantiles over a sliding time > window. #### Summary API reference ```@docs Prometheus.Summary(::String, ::String; kwargs...) Prometheus.observe(::Prometheus.Summary, ::Real) ``` ```@docs; canonical=false Prometheus.@time ``` ### GCCollector A collector that exports metrics about allocations and garbage collection (for example number of allocations, number of bytes allocated, time spent in garbage collection, etc). These metrics have the `julia_gc_` prefix in their name. A `GCCollector` is registered automatically with the default registry, see [Default registry](@ref) for more details. #### GCCollector API reference ```@docs Prometheus.GCCollector(; kwargs...) ``` ### ProcessCollector A collector that exports metrics about a running process, for example CPU seconds and metrics about I/O operations. Metrics from this collector have the `process_` prefix in their name. This collector is only available on Linux since it requires the `/proc` file system. A `ProcessCollector` for the current process is registered automatically with the default registry, see [Default registry](@ref) for more details. #### ProcessCollector API reference ```@docs Prometheus.ProcessCollector(::Function; kwargs...) ``` ### Custom collectors RandomCollector ## Labels Prometheus allows attaching labels to metrics, see the upstream documentation: - <https://prometheus.io/docs/practices/naming/#labels> - <https://prometheus.io/docs/practices/instrumentation/#use-labels> - <https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels> In this package labeling of collectors is done with [`Prometheus.Family`](@ref). A collector family consist of a number of regular collectors, the children, with unique labels. A concrete example is a HTTP request `Counter`, where we might also want to keep track of the target resource and the status code of the request. Such instrumentation can be implemented as follows ```julia # Custom label struct struct RequestLabels target::String status_code::Int end # Create the counter family request_counter = Prometheus.Family{Prometheus.Counter}( "http_requests", "Total number of HTTP requests", RequestLabels ) # Extract a Counter for a specific set of labels counter = Prometheus.labels(request_counter, RequestLabels("/api", 200)) # Increment the counter Prometheus.inc(counter) ``` Note that using a custom label struct is optional, refer to the constructor [`Prometheus.Family`](@ref) and [`Prometheus.labels`](@ref) for alternative methods. ### Family API reference ```@docs Prometheus.Family{C}(::String, ::String, ::Any; kwargs...) where C Prometheus.labels(::Prometheus.Family{C, N}, ::Any) where {C, N} Prometheus.remove(::Prometheus.Family{<:Any, N}, ::Any) where {N} Prometheus.clear(::Prometheus.Family) ``` ## Registries ### Default registry ## Exposition Prometheus support ```@docs Prometheus.expose ```
Prometheus
https://github.com/fredrikekre/Prometheus.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
617
push!(LOAD_PATH, "../src/") using Pkg; Pkg.add("Documenter") using Documenter, SeisMain makedocs( modules = [SeisMain], doctest = false, clean = true, checkdocs = :all, sitename = "SeisMain.jl", format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing)== "true"), pages = [ "Home" => "index.md", "Manual" => Any[ "Guide" => "man/guide.md"], "Library" => Any[ "Public" => "lib/public.md", "Internals" => "lib/internals.md" ], ], ) deploydocs( repo = "github.com/fercarozzi/SeisMain.jl.git", target = "build", deps = nothing, make = nothing )
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
148
module SeisMain using Printf using Statistics using Distributed include("ReadWrite/ReadWrite.jl") include("Utils/Utils.jl") end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
360
""" ExtractSegyHeader(h,key) Extract a specific key value from the header array """ function ExtractSegyHeader(h::Array{SeisMain.SegyHeader,1},key::AbstractString) keytype = eval(Meta.parse("typeof(SeisMain.InitSegyHeader().$(key))")) out = keytype[] for ix = 1 : length(h) push!(out,getfield(h[ix],Symbol(key))) end return out end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
18085
mutable struct Header tracenum::Int32 o1::Float32 n1::Int32 d1::Float32 sx::Float32 sy::Float32 gx::Float32 gy::Float32 mx::Float32 my::Float32 hx::Float32 hy::Float32 h::Float32 az::Float32 ang::Float32 isx::Int32 isy::Int32 igx::Int32 igy::Int32 imx::Int32 imy::Int32 ihx::Int32 ihy::Int32 ih::Int32 iaz::Int32 iang::Int32 selev::Float32 gelev::Float32 sstat::Float32 gstat::Float32 trid::Int32 end header_count = Dict{AbstractString,Int32}() header_count["tracenum"] = 0 header_count["o1"] = 4 header_count["n1"] = 8 header_count["d1"] = 12 header_count["sx"] = 16 header_count["sy"] = 20 header_count["gx"] = 24 header_count["gy"] = 28 header_count["mx"] = 32 header_count["my"] = 36 header_count["hx"] = 40 header_count["hy"] = 44 header_count["h"] = 48 header_count["az"] = 52 header_count["ang"] = 56 header_count["isx"] = 60 header_count["isy"] = 64 header_count["igx"] = 68 header_count["igy"] = 72 header_count["imx"] = 76 header_count["imy"] = 80 header_count["ihx"] = 84 header_count["ihy"] = 88 header_count["ih"] = 92 header_count["iaz"] = 96 header_count["iang"] = 100 header_count["selev"] = 104 header_count["gelev"] = 108 header_count["sstat"] = 112 header_count["gstat"] = 116 header_count["trid"] = 120 """ InitSeisHeader Initialize a variable of composite type Header corresponding to the data header in seis format. All the fields are initialized to 0.0. Type ?SeisMain.Header for a detail of the fields included. """ function InitSeisHeader() h = Header(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0, 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0, 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0, 0.0) return h end """ GrabHeader(stream,j) Extract the header from trace j of a file in seis format. # Arguments - `stream::IOStream`: data file in seis format - `j::Integer`: Trace number """ function GrabHeader(stream,j) position = 4*length(fieldnames(Header))*(j-1) seek(stream,position) h = InitSeisHeader() h.tracenum = read(stream,typeof(h.tracenum)) h.o1 = read(stream,typeof(h.o1)) h.n1 = read(stream,typeof(h.n1)) h.d1 = read(stream,typeof(h.d1)) h.sx = read(stream,typeof(h.sx)) h.sy = read(stream,typeof(h.sy)) h.gx = read(stream,typeof(h.gx)) h.gy = read(stream,typeof(h.gy)) h.mx = read(stream,typeof(h.mx)) h.my = read(stream,typeof(h.my)) h.hx = read(stream,typeof(h.hx)) h.hy = read(stream,typeof(h.hy)) h.h = read(stream,typeof(h.h)) h.az = read(stream,typeof(h.az)) h.ang = read(stream,typeof(h.ang)) h.isx = read(stream,typeof(h.isx)) h.isy = read(stream,typeof(h.isy)) h.igx = read(stream,typeof(h.igx)) h.igy = read(stream,typeof(h.igy)) h.imx = read(stream,typeof(h.imx)) h.imy = read(stream,typeof(h.imy)) h.ihx = read(stream,typeof(h.ihx)) h.ihy = read(stream,typeof(h.ihy)) h.ih = read(stream,typeof(h.ih)) h.iaz = read(stream,typeof(h.iaz)) h.iang = read(stream,typeof(h.iang)) h.selev = read(stream,typeof(h.selev)) h.gelev = read(stream,typeof(h.gelev)) h.sstat = read(stream,typeof(h.sstat)) h.gstat = read(stream,typeof(h.gstat)) h.trid = read(stream,typeof(h.trid)) return h end """ PutHeader(stream,j) Write the header of trace j to a file in seis format. # Arguments - `stream::IOStream`: data file in seis format - `j::Integer`: Trace number """ function PutHeader(stream,h,j) position = 4*length(fieldnames(Header))*(j-1) seek(stream,position) write(stream,h.tracenum) write(stream,h.o1) write(stream,h.n1) write(stream,h.d1) write(stream,h.sx) write(stream,h.sy) write(stream,h.gx) write(stream,h.gy) write(stream,h.mx) write(stream,h.my) write(stream,h.hx) write(stream,h.hy) write(stream,h.h) write(stream,h.az) write(stream,h.ang) write(stream,h.isx) write(stream,h.isy) write(stream,h.igx) write(stream,h.igy) write(stream,h.imx) write(stream,h.imy) write(stream,h.ihx) write(stream,h.ihy) write(stream,h.ih) write(stream,h.iaz) write(stream,h.iang) write(stream,h.selev) write(stream,h.gelev) write(stream,h.sstat) write(stream,h.gstat) write(stream,h.trid) end primitive type Header32Bits 32 end """ BitsToHeader(h_in) Reinterpret an input header to its correct type. The function outputs a variable of composite type Header. """ function BitsToHeader(h_in) h = InitSeisHeader() h.tracenum = reinterpret(typeof(h.tracenum),h_in[1]) h.o1 = reinterpret(typeof(h.o1),h_in[2]) h.n1 = reinterpret(typeof(h.n1),h_in[3]) h.d1 = reinterpret(typeof(h.d1),h_in[4]) h.sx = reinterpret(typeof(h.sx),h_in[5]) h.sy = reinterpret(typeof(h.sy),h_in[6]) h.gx = reinterpret(typeof(h.gx),h_in[7]) h.gy = reinterpret(typeof(h.gy),h_in[8]) h.mx = reinterpret(typeof(h.mx),h_in[9]) h.my = reinterpret(typeof(h.my),h_in[10]) h.hx = reinterpret(typeof(h.hx),h_in[11]) h.hy = reinterpret(typeof(h.hy),h_in[12]) h.h = reinterpret(typeof(h.h),h_in[13]) h.az = reinterpret(typeof(h.az),h_in[14]) h.ang = reinterpret(typeof(h.ang),h_in[15]) h.isx = reinterpret(typeof(h.isx),h_in[16]) h.isy = reinterpret(typeof(h.isy),h_in[17]) h.igx = reinterpret(typeof(h.igx),h_in[18]) h.igy = reinterpret(typeof(h.igy),h_in[19]) h.imx = reinterpret(typeof(h.imx),h_in[20]) h.imy = reinterpret(typeof(h.imy),h_in[21]) h.ihx = reinterpret(typeof(h.ihx),h_in[22]) h.ihy = reinterpret(typeof(h.ihy),h_in[23]) h.ih = reinterpret(typeof(h.ih),h_in[24]) h.iaz = reinterpret(typeof(h.iaz),h_in[25]) h.iang = reinterpret(typeof(h.iang),h_in[26]) h.selev = reinterpret(typeof(h.selev),h_in[27]) h.gelev = reinterpret(typeof(h.gelev),h_in[28]) h.sstat = reinterpret(typeof(h.sstat),h_in[29]) h.gstat = reinterpret(typeof(h.gstat),h_in[30]) h.trid = reinterpret(typeof(h.trid),h_in[31]) return h end """ HeaderToBits(h_in) Reinterpret a composite type variable of type Header to 32 bits The function outputs an Vector with 31 elements of type Header32Bit. """ function HeaderToBits(h); h_out = [reinterpret(Header32Bits,h.tracenum); reinterpret(Header32Bits,h.o1); reinterpret(Header32Bits,h.n1); reinterpret(Header32Bits,h.d1); reinterpret(Header32Bits,h.sx); reinterpret(Header32Bits,h.sy); reinterpret(Header32Bits,h.gx); reinterpret(Header32Bits,h.gy); reinterpret(Header32Bits,h.mx); reinterpret(Header32Bits,h.my); reinterpret(Header32Bits,h.hx); reinterpret(Header32Bits,h.hy); reinterpret(Header32Bits,h.h); reinterpret(Header32Bits,h.az); reinterpret(Header32Bits,h.ang); reinterpret(Header32Bits,h.isx); reinterpret(Header32Bits,h.isy); reinterpret(Header32Bits,h.igx); reinterpret(Header32Bits,h.igy); reinterpret(Header32Bits,h.imx); reinterpret(Header32Bits,h.imy); reinterpret(Header32Bits,h.ihx); reinterpret(Header32Bits,h.ihy); reinterpret(Header32Bits,h.ih); reinterpret(Header32Bits,h.iaz); reinterpret(Header32Bits,h.iang); reinterpret(Header32Bits,h.selev); reinterpret(Header32Bits,h.gelev); reinterpret(Header32Bits,h.sstat); reinterpret(Header32Bits,h.gstat); reinterpret(Header32Bits,h.trid)] return h_out end """ GetNumTraces(file) Calculate the total number of traces in a data file with seis format """ function GetNumTraces(in) filename_h = ParseHeaderName(in) nhead = length(fieldnames(Header)) stream_h = open(filename_h) nx = round(Int,filesize(stream_h)/(nhead*4)) close(stream_h) return nx end """ ParseHeaderName(file::AbstractString) Parse the name of the header file from an extent file to a String variable. """ function ParseHeaderName(filename::AbstractString) f = open(filename,"r") fstring = read(f,String) close(f) ini = first(something(findlast("\theaders=",fstring), 0:-1)) ini == 0 ? headers = "NULL" : headers = fstring[something(findnext(r"\theaders=.*", fstring, ini), 0:-1)][11:end-1] return headers end """ ParseDataName(file::AbstractString) Parse the name of the data file from an extent file to a String variable. """ function ParseDataName(filename::AbstractString) f = open(filename,"r") fstring = read(f,String) close(f) ini = first(something(findlast("\tin=",fstring), 0:-1)) ini == 0 ? in = "NULL" : in = fstring[something(findnext(r"\tin=.*", fstring, ini), 0:-1)][6:end-1] return in end """ ParseDataFormat(file::AbstractString) Parse the format of the data type in the extent file to a String variable. """ function ParseDataFormat(filename::AbstractString) f = open(filename,"r") fstring = read(f,String) close(f) ini = first(something(findlast("\tdata_format=",fstring), 0:-1)) ini == 0 ? data_format = "native_float" : data_format = fstring[something(findnext(r"\tdata_format=.*", fstring, ini), 0:-1)][15:end-1] return data_format end """ ParseDataESize(file::AbstractString) Parse the esize in the extent file to a Int32 variable. """ function ParseDataESize(filename::AbstractString) f = open(filename,"r") fstring = read(f,String) close(f) ini = first(something(findlast("\tesize=",fstring), 0:-1)) ini == 0 ? esize = Int32(4) : esize = parse(Int32, fstring[something(findnext(r"\tesize=.*", fstring, ini), 0:-1)][8:end]) return esize end """ ExtractHeader(h::Array{Header,1},key::AbstractString) Extracts the values of a field in the header. The output is a vector of the same type as the field. """ function ExtractHeader(h::Array{Header,1},key::AbstractString) keytype = eval(Meta.parse("typeof(SeisMain.InitSeisHeader().$(key))")) out = keytype[] for ix = 1 : length(h) push!(out,getfield(h[ix],Symbol(key))) end return out end mutable struct Extent n1::Int32 n2::Int32 n3::Int32 n4::Int32 n5::Int32 o1::Float32 o2::Float32 o3::Float32 o4::Float32 o5::Float32 d1::Float32 d2::Float32 d3::Float32 d4::Float32 d5::Float32 label1::AbstractString label2::AbstractString label3::AbstractString label4::AbstractString label5::AbstractString unit1::AbstractString unit2::AbstractString unit3::AbstractString unit4::AbstractString unit5::AbstractString title::AbstractString end """ ReadTextHeader(filein) Reads the extent file of data in seis format. The result outputs in a variable of composite type Extent. Type ?SeisMain.Extent for a detail on the included fields. """ function ReadTextHeader(filename) f = open(filename,"r") fstring = read(f,String) close(f) ini = first(something(findlast("\tn1=",fstring),0:-1)) ini == 0 ? n1 = Int32(1) : n1 = parse(Int32, fstring[something(findnext(r"\tn1=.*",fstring,ini))][5:end]) ini = first(something(findlast("\tn2=",fstring),0:-1)) ini == 0 ? n2 = Int32(1) : n2 = parse(Int32, fstring[something(findnext(r"\tn2=.*",fstring,ini))][5:end]) ini = first(something(findlast("\tn3=",fstring),0:-1)) ini == 0 ? n3 = Int32(1) : n3 = parse(Int32, fstring[something(findnext(r"\tn3=.*",fstring,ini))][5:end]) ini = first(something(findlast("\tn4=",fstring),0:-1)) ini == 0 ? n4 = Int32(1) : n4 = parse(Int32, fstring[something(findnext(r"\tn4=.*",fstring,ini))][5:end]) ini = first(something(findlast("\tn5=",fstring),0:-1)) ini == 0 ? n5 = Int32(1) : n5 = parse(Int32, fstring[something(findnext(r"\tn5=.*",fstring,ini))][5:end]) ini = first(something(findlast("\to1=",fstring),0:-1)) ini == 0 ? o1 = Float32(0) : o1 = parse(Float32, fstring[something(findnext(r"\to1=.*",fstring,ini))][5:end]) ini = first(something(findlast("\to2=",fstring),0:-1)) ini == 0 ? o2 = Float32(0) : o2 = parse(Float32, fstring[something(findnext(r"\to2=.*",fstring,ini))][5:end]) ini = first(something(findlast("\to3=",fstring),0:-1)) ini == 0 ? o3 = Float32(0) : o3 = parse(Float32, fstring[something(findnext(r"\to3=.*",fstring,ini))][5:end]) ini = first(something(findlast("\to4=",fstring),0:-1)) ini == 0 ? o4 = Float32(0) : o4 = parse(Float32, fstring[something(findnext(r"\to4=.*",fstring,ini))][5:end]) ini = first(something(findlast("\to5=",fstring),0:-1)) ini == 0 ? o5 = Float32(0) : o5 = parse(Float32, fstring[something(findnext(r"\to5=.*",fstring,ini))][5:end]) ini = first(something(findlast("\td1=",fstring),0:-1)) ini == 0 ? d1 = Float32(1) : d1 = parse(Float32, fstring[something(findnext(r"\td1=.*",fstring,ini))][5:end]) ini = first(something(findlast("\td2=",fstring),0:-1)) ini == 0 ? d2 = Float32(1) : d2 = parse(Float32, fstring[something(findnext(r"\td2=.*",fstring,ini))][5:end]) ini = first(something(findlast("\td3=",fstring),0:-1)) ini == 0 ? d3 = Float32(1) : d3 = parse(Float32, fstring[something(findnext(r"\td3=.*",fstring,ini))][5:end]) ini = first(something(findlast("\td4=",fstring),0:-1)) ini == 0 ? d4 = Float32(1) : d4 = parse(Float32, fstring[something(findnext(r"\td4=.*",fstring,ini))][5:end]) ini = first(something(findlast("\td5=",fstring),0:-1)) ini == 0 ? d5 = Float32(1) : d5 = parse(Float32, fstring[something(findnext(r"\td5=.*",fstring,ini))][5:end]) ini = first(something(findlast("\tlabel1=",fstring),0:-1)) ini == 0 ? label1 = "" : label1 = fstring[something(findnext(r"\tlabel1=.*",fstring,ini))][10:end-1] ini = first(something(findlast("\tlabel2=",fstring),0:-1)) ini == 0 ? label2 = "" : label2 = fstring[something(findnext(r"\tlabel2=.*",fstring,ini))][10:end-1] ini = first(something(findlast("\tlabel3=",fstring),0:-1)) ini == 0 ? label3 = "" : label3 = fstring[something(findnext(r"\tlabel3=.*",fstring,ini))][10:end-1] ini = first(something(findlast("\tlabel4=",fstring),0:-1)) ini == 0 ? label4 = "" : label4 = fstring[something(findnext(r"\tlabel4=.*",fstring,ini))][10:end-1] ini = first(something(findlast("\tlabel5=",fstring),0:-1)) ini == 0 ? label5 = "" : label5 = fstring[something(findnext(r"\tlabel5=.*",fstring,ini))][10:end-1] ini = first(something(findlast("\tunit1=",fstring),0:-1)) ini == 0 ? unit1 = "" : unit1 = fstring[something(findnext(r"\tunit1=.*",fstring,ini))][9:end-1] ini = first(something(findlast("\tunit2=",fstring),0:-1)) ini == 0 ? unit2 = "" : unit2 = fstring[something(findnext(r"\tunit2=.*",fstring,ini))][9:end-1] ini = first(something(findlast("\tunit3=",fstring),0:-1)) ini == 0 ? unit3 = "" : unit3 = fstring[something(findnext(r"\tunit3=.*",fstring,ini))][9:end-1] ini = first(something(findlast("\tunit4=",fstring),0:-1)) ini == 0 ? unit4 = "" : unit4 = fstring[something(findnext(r"\tunit4=.*",fstring,ini))][9:end-1] ini = first(something(findlast("\tunit5=",fstring),0:-1)) ini == 0 ? unit5 = "" : unit5 = fstring[something(findnext(r"\tunit5=.*",fstring,ini))][9:end-1] ini = first(something(findlast("\ttitle=",fstring),0:-1)) ini == 0 ? title = "" : title = fstring[something(findnext(r"\ttitle=.*",fstring,ini))][9:end-1] extent = Extent(convert(Int32,n1),convert(Int32,n2),convert(Int32,n3),convert(Int32,n4),convert(Int32,n5), convert(Float32,o1),convert(Float32,o2),convert(Float32,o3),convert(Float32,o4),convert(Float32,o5), convert(Float32,d1),convert(Float32,d2),convert(Float32,d3),convert(Float32,d4),convert(Float32,d5), label1,label2,label3,label4,label5, unit1,unit2,unit3,unit4,unit5, title) return extent end """ WriteTextHeader(filename,extent,format,esize,filename_d,filename_h) Writes the extent file, corresponding to the seis format, to the file filename. # Arguments - `extent`: variable of type Extent to write to file - `format`: format of the data type in file @data@ - `esize`: esize - `filename_d`: path of the @data@ file as a String variable - `filename_h`: path of the @headers@ file a a String variable """ function WriteTextHeader(filename,extent,format,esize,filename_d,filename_h) # write the text header stream = open(filename, "w") write(stream,join([" n1=",extent.n1,"\n"])) write(stream,join([" n2=",extent.n2,"\n"])) write(stream,join([" n3=",extent.n3,"\n"])) write(stream,join([" n4=",extent.n4,"\n"])) write(stream,join([" n5=",extent.n5,"\n"])) write(stream,join([" o1=",extent.o1,"\n"])) write(stream,join([" o2=",extent.o2,"\n"])) write(stream,join([" o3=",extent.o3,"\n"])) write(stream,join([" o4=",extent.o4,"\n"])) write(stream,join([" o5=",extent.o5,"\n"])) write(stream,join([" d1=",extent.d1,"\n"])) write(stream,join([" d2=",extent.d2,"\n"])) write(stream,join([" d3=",extent.d3,"\n"])) write(stream,join([" d4=",extent.d4,"\n"])) write(stream,join([" d5=",extent.d5,"\n"])) write(stream,join([" label1=\"",extent.label1,"\"\n"])) write(stream,join([" label2=\"",extent.label2,"\"\n"])) write(stream,join([" label3=\"",extent.label3,"\"\n"])) write(stream,join([" label4=\"",extent.label4,"\"\n"])) write(stream,join([" label5=\"",extent.label5,"\"\n"])) write(stream,join([" unit1=\"",extent.unit1,"\"\n"])) write(stream,join([" unit2=\"",extent.unit2,"\"\n"])) write(stream,join([" unit3=\"",extent.unit3,"\"\n"])) write(stream,join([" unit4=\"",extent.unit4,"\"\n"])) write(stream,join([" unit5=\"",extent.unit5,"\"\n"])) write(stream,join([" title=\"",extent.title,"\"\n"])) write(stream,join([" data_format=\"",format,"\"\n"])) write(stream,join([" esize=",esize,"\n"])) write(stream,join([" in=\"",filename_d,"\"\n"])) write(stream,join([" headers=\"",filename_h,"\"\n"])) close(stream) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
2308
""" ReadSegyHeader(filename;<keyword arguments>) Read the headers of a input file in segy format # Arguments - `group="all"` : Options are all, some or gather - `swap_bytes=true` : If the flag equals true, the function swaps bytes - `key=[]` - `minval=0` - `maxval=0` """ function ReadSegyHeader(filename_in;swap_bytes="true",group="all",key=" ",minval=0,maxval=0) #1) Stats on segy header stream = open(filename_in) position = 3200 seek(stream, position) fh = SeisMain.GrabFileHeader(stream) ntfh = swap_bytes == true ? bswap(fh.netfh) : fh.netfh fh = 0 if ntfh == -1 error("add instructions to deal with variable extended text header") end if ntfh == 0 file_hsize = 3600 elseif ntfh > 0 # file_hsize = 3200 * (ntfh+1) + 400 file_hsize = 3200 * 1 + 400 else error("unknown data format") end stream = open(filename_in) seek(stream, SeisMain.segy_count["ns"] + file_hsize) if (swap_bytes=="true") nt = bswap(read(stream,Int16)) else nt = read(stream,Int16) end total = 60 + nt nx = round(Int,(filesize(stream)-file_hsize)/4/total) println("total number of traces: ",nx) println("number of samples per trace: ",nt) itrace = 1 ntrace = nx tt = nx if group == "gather" # Using this option means that the traces are organized as the gather. If traces are not organized, this option won't work itrace = nx ntrace = 1 for j=1:nx h1 = SeisMain.GrabSegyHeader(stream,swap_bytes,nt,file_hsize,j) aux = getfield(h1,Symbol(key)) if aux >=minval && aux<=maxval if itrace>j itrace = j elseif j > ntrace ntrace = j end end end tt = ntrace - itrace +1 println(" number of traces in gather: ",tt) end h_segy = Array{SeisMain.SegyHeader}(undef,tt) for i=1:tt # este position me parece que esta mal + no lo necesito j = i + itrace -1 position = file_hsize + total*(j-1)*4 + SeisMain.segy_count["trace"] seek(stream,position) #este solo ya sabe donde ir h_segy[i] = SeisMain.GrabSegyHeader(stream,swap_bytes,nt,file_hsize,j) end #podria haber hecho h_segy = push!(h_segy, SeisMain.GrabSegyHeader(stream,swap_bytes,nt,file_hsize,j) # me parece que no por el tema de definir h_segy pero no estoy segura close(stream) println("The output array has the segy data header.") return h_segy end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
554
export Header, SegyToSeis, SeisToSegy, SeisCopy, SeisHeaderInfo, SeisRead, SeisReadHeaders, SeisRemove, SeisWrite, SeisWriteHeaders, ExtractSegyHeader, ReadSegyHeader, SegyHeaderInfo include("Header.jl") include("SegyStruct.jl") include("SegyToSeis.jl") include("SeisToSegy.jl") include("SeisCopy.jl") include("SeisHeaderInfo.jl") include("SeisRead.jl") include("SeisReadHeaders.jl") include("SeisRemove.jl") include("SeisWrite.jl") include("SeisWriteHeaders.jl") include("ExtractSegyHeader.jl") include("ReadSegyHeader.jl") include("SegyHeaderInfo.jl")
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
1261
""" SegyHeaderInfo(filename_in) Print segy header information to screen. The input is the name of the headers file """ function SegyHeaderInfo(h_segy) #requires using Printf, Statistics println("Calculating statistics.") #key gets all the types of the composite type SegyHeader). key = fieldnames(SeisMain.SegyHeader) min_h = zeros(Float64,length(key)) max_h = zeros(Float64,length(key)) mean_h = zeros(Float64,length(key)) for ikey=1:length(key) min_h[ikey] = convert(Float64,getfield(h_segy[1],key[ikey])) max_h[ikey] = convert(Float64,getfield(h_segy[1],key[ikey])) mean_h[ikey] += convert(Float64,getfield(h_segy[1],key[ikey])) end nx = size(h_segy,1) for itrace = 2:nx for ikey = 1 : length(key) aux = convert(Float64,getfield(h_segy[itrace],ikey)) if (aux < min_h[ikey]) min_h[ikey] = aux end if ( aux > max_h[ikey]) max_h[ikey] = aux end mean_h[ikey] += aux end end for ikey=1:length(key) mean_h[ikey] /= nx end println(" Key Minimum Maximum Mean"); println("=============================================================") for ikey=1:length(key) @printf("%10s %11.3f %11.3f %11.3f\n",string(key[ikey]),min_h[ikey],max_h[ikey],mean_h[ikey]) end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
22724
mutable struct fileHeader jobid :: Int32 linnum :: Int32 renum :: Int32 ntrpe :: Int16 natrpe :: Int16 dt :: Int16 dtfr :: Int16 ns :: Int16 nsfr :: Int16 fmtc :: Int16 expf :: Int16 trsc :: Int16 vsumc :: Int16 sfs :: Int16 sfe :: Int16 slen :: Int16 styp :: Int16 tnumsc :: Int16 stalens:: Int16 stalene:: Int16 tyta :: Int16 corr :: Int16 rgc :: Int16 arm :: Int16 unit :: Int16 pol :: Int16 vpol :: Int16 fvn :: Int16 fltf :: Int16 netfh :: Int16 end fileHeader_count = Dict{AbstractString, Int32}() fileHeader_count["jobid" ] = 3200 fileHeader_count["linnum" ] = 3204 fileHeader_count["renum" ] = 3208 fileHeader_count["ntrpe" ] = 3212 fileHeader_count["natrpe" ] = 3214 fileHeader_count["dt" ] = 3216 fileHeader_count["dtfr" ] = 3218 fileHeader_count["ns" ] = 3220 fileHeader_count["nsfr" ] = 3222 fileHeader_count["fmtc" ] = 3224 fileHeader_count["expf" ] = 3226 fileHeader_count["trsc" ] = 3228 fileHeader_count["vsumc" ] = 3230 fileHeader_count["sfs" ] = 3232 fileHeader_count["sfe" ] = 3234 fileHeader_count["slen" ] = 3236 fileHeader_count["styp" ] = 3238 fileHeader_count["tnumsc" ] = 3240 fileHeader_count["stalens"] = 3242 fileHeader_count["stalene"] = 3244 fileHeader_count["tyta" ] = 3246 fileHeader_count["corr" ] = 3248 fileHeader_count["rgc" ] = 3250 fileHeader_count["arm" ] = 3252 fileHeader_count["unit" ] = 3254 fileHeader_count["pol" ] = 3256 fileHeader_count["vpol" ] = 3258 fileHeader_count["fvn" ] = 3500 fileHeader_count["fltf" ] = 3502 fileHeader_count["netfh" ] = 3504 """ InitFileHeader Initializes a variable of composite type fileHeader corresponding to the file header in SEGY format. All the fields are initialized to 0. Type ?SeisMain.fileHeader for a detail of the fields included. """ function InitFileHeader() fh = fileHeader(0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,) return fh end """ GrabFileHeader(stream) Extracts the file header from a standard SEGY rev0 data file, starting from byte 3200. """ function GrabFileHeader(stream) position = fileHeader_count["jobid"] seek(stream,position) fh = InitFileHeader() fh.jobid = read(stream,typeof(fh.jobid)) fh.linnum = read(stream,typeof(fh.linnum)) fh.renum = read(stream,typeof(fh.renum)) fh.ntrpe = read(stream,typeof(fh.ntrpe)) fh.natrpe = read(stream,typeof(fh.natrpe)) fh.dt = read(stream,typeof(fh.dt)) fh.dtfr = read(stream,typeof(fh.dtfr)) fh.ns = read(stream,typeof(fh.ns)) fh.nsfr = read(stream,typeof(fh.nsfr)) fh.fmtc = read(stream,typeof(fh.fmtc)) fh.expf = read(stream,typeof(fh.expf)) fh.trsc = read(stream,typeof(fh.trsc)) fh.vsumc = read(stream,typeof(fh.vsumc)) fh.sfs = read(stream,typeof(fh.sfs)) fh.sfe = read(stream,typeof(fh.sfe)) fh.slen = read(stream,typeof(fh.slen)) fh.styp = read(stream,typeof(fh.styp)) fh.tnumsc = read(stream,typeof(fh.tnumsc)) fh.stalens = read(stream,typeof(fh.stalens)) fh.stalene = read(stream,typeof(fh.stalene)) fh.tyta = read(stream,typeof(fh.tyta)) fh.corr = read(stream,typeof(fh.corr)) fh.rgc = read(stream,typeof(fh.rgc)) fh.arm = read(stream,typeof(fh.arm)) fh.unit = read(stream,typeof(fh.unit)) fh.pol = read(stream,typeof(fh.pol)) fh.vpol = read(stream,typeof(fh.vpol)) fh.fvn = read(stream,typeof(fh.fvn)) fh.fltf = read(stream,typeof(fh.fltf)) fh.netfh = read(stream,typeof(fh.netfh)) return fh end """ PutFileHeader(stream,fh) Writes the file header on a standard SEGY rev0 format file. # Arguments - `stream::IOStream`: data file in seis format - `fh::fileHeader`: file Header to write """ function PutFileHeader(stream,fh) position = 3600 seek(stream,position) write(stream,fh.jobid) write(stream,fh.linnum) write(stream,fh.renum) write(stream,fh.ntrpe) write(stream,fh.natrpe) write(stream,fh.dt) write(stream,fh.dtfr) write(stream,fh.ns) write(stream,fh.nsfr) write(stream,fh.fmtc) write(stream,fh.expf) write(stream,fh.trsc) write(stream,fh.vsumc) write(stream,fh.sfs) write(stream,fh.sfe) write(stream,fh.slen) write(stream,fh.styp) write(stream,fh.tnumsc) write(stream,fh.stalens) write(stream,fh.stalene) write(stream,fh.tyta) write(stream,fh.corr) write(stream,fh.rgc) write(stream,fh.arm) write(stream,fh.unit) write(stream,fh.pol) write(stream,fh.vpol) write(stream,fh.fvn) write(stream,fh.fltf) write(stream,fh.netfh) end mutable struct SegyHeader tracl::Int32 tracr::Int32 fldr::Int32 tracf::Int32 ep::Int32 cdp::Int32 cdpt::Int32 trid::Int16 nva::Int16 nhs::Int16 duse::Int16 offset::Int32 gelev::Int32 selev::Int32 sdepth::Int32 gdel::Int32 sdel::Int32 swdep::Int32 gwdep::Int32 scalel::Int16 scalco::Int16 sx::Int32 sy::Int32 gx::Int32 gy::Int32 counit::Int16 wevel::Int16 swevel::Int16 sut::Int16 gut::Int16 sstat::Int16 gstat::Int16 tstat::Int16 laga::Int16 lagb::Int16 delrt::Int16 muts::Int16 mute::Int16 ns::Int16 dt::Int16 gain::Int16 igc::Int16 igi::Int16 corr::Int16 sfs::Int16 sfe::Int16 slen::Int16 styp::Int16 stas::Int16 stae::Int16 tatyp::Int16 afilf::Int16 afils::Int16 nofilf::Int16 nofils::Int16 lcf::Int16 hcf::Int16 lcs::Int16 hcs::Int16 year::Int16 day::Int16 hour::Int16 minute::Int16 sec::Int16 timbas::Int16 trwf::Int16 grnors::Int16 grnofr::Int16 grnlof::Int16 gaps::Int16 otrav::Int16 d1::Float32 f1::Float32 d2::Float32 f2::Float32 ungpow::Float32 unscale::Float32 ntr::Int32 mark::Int16 unass::Int16 end segy_count = Dict{AbstractString,Int32}() segy_count["tracl"] = 0 segy_count["tracr"] = 4 segy_count["fldr"] = 8 segy_count["tracf"] = 12 segy_count["ep"] = 16 segy_count["cdp"] = 20 segy_count["cdpt"] = 24 segy_count["trid"] = 28 segy_count["nva"] = 30 segy_count["nhs"] = 32 segy_count["duse"] = 34 segy_count["offset"] = 36 segy_count["gelev"] = 40 segy_count["selev"] = 44 segy_count["sdepth"] = 48 segy_count["gdel"] = 52 segy_count["sdel"] = 56 segy_count["swdep"] = 60 segy_count["gwdep"] = 64 segy_count["scalel"] = 68 segy_count["scalco"] = 70 segy_count["sx"] = 72 segy_count["sy"] = 76 segy_count["gx"] = 80 segy_count["gy"] = 84 segy_count["counit"] = 88 segy_count["wevel"] = 90 segy_count["swevel"] = 92 segy_count["sut"] = 94 segy_count["gut"] = 96 segy_count["sstat"] = 98 segy_count["gstat"] = 100 segy_count["tstat"] = 102 segy_count["laga"] = 104 segy_count["lagb"] = 106 segy_count["delrt"] = 108 segy_count["muts"] = 110 segy_count["mute"] = 112 segy_count["ns"] = 114 segy_count["dt"] = 116 segy_count["gain"] = 118 segy_count["igc"] = 120 segy_count["igi"] = 122 segy_count["corr"] = 124 segy_count["sfs"] = 126 segy_count["sfe"] = 128 segy_count["slen"] = 130 segy_count["styp"] = 132 segy_count["stas"] = 134 segy_count["stae"] = 136 segy_count["tatyp"] = 138 segy_count["afilf"] = 140 segy_count["afils"] = 142 segy_count["nofilf"] = 144 segy_count["nofils"] = 146 segy_count["lcf"] = 148 segy_count["hcf"] = 150 segy_count["lcs"] = 152 segy_count["hcs"] = 154 segy_count["year"] = 156 segy_count["day"] = 158 segy_count["hour"] = 160 segy_count["minute"] = 162 segy_count["sec"] = 164 segy_count["timbas"] = 166 segy_count["trwf"] = 168 segy_count["grnors"] = 170 segy_count["grnofr"] = 172 segy_count["grnlof"] = 174 segy_count["gaps"] = 176 segy_count["otrav"] = 178 segy_count["d1"] = 180 segy_count["f1"] = 184 segy_count["d2"] = 188 segy_count["f2"] = 192 segy_count["ungpow"] = 196 segy_count["unscale"]= 200 segy_count["ntr"] = 204 segy_count["mark"] = 208 segy_count["unass"] = 210 segy_count["trace"] = 240 """ InitSegyHeader() Initializes a variable of composite type SegyHeader corresponding to the trace header in SEGY format. All the fields are initialized to 0. Type ?SeisMain.SegyHeader for a detail of the fields included. """ function InitSegyHeader() h = SegyHeader(0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0) return h end """ GrabSegyHeader(stream,swap_bytes,nt,file_header_size,j) Extracts the header from trace j of a file in segy format. The output is of composite type SegyHeader. # Arguments - `stream::IOStream`: data file in segy format - `swap_bytes`: boolean variable. Consider it for indianness - `nt`: time samples. - `file_header_size`: Size of file header in bytes: 0 for su files, 3600 for Segy rev0 standard. - `j::Integer`: Trace number """ function GrabSegyHeader(stream,swap_bytes,nt,file_header_size,j) position = file_header_size + (240+nt*4)*(j-1) seek(stream,position) h = InitSegyHeader() if (swap_bytes == false) h.tracl = read(stream,typeof(h.tracl)) h.tracr = read(stream,typeof(h.tracr)) h.fldr = read(stream,typeof(h.fldr)) h.tracf = read(stream,typeof(h.tracf)) h.ep = read(stream,typeof(h.ep)) h.cdp = read(stream,typeof(h.cdp)) h.cdpt = read(stream,typeof(h.cdpt)) h.trid = read(stream,typeof(h.trid)) h.nva = read(stream,typeof(h.nva)) h.nhs = read(stream,typeof(h.nhs)) h.duse = read(stream,typeof(h.duse)) h.offset = read(stream,typeof(h.offset)) h.gelev = read(stream,typeof(h.gelev)) h.selev = read(stream,typeof(h.selev)) h.sdepth = read(stream,typeof(h.sdepth)) h.gdel = read(stream,typeof(h.gdel)) h.sdel = read(stream,typeof(h.sdel)) h.swdep = read(stream,typeof(h.swdep)) h.gwdep = read(stream,typeof(h.gwdep)) h.scalel = read(stream,typeof(h.scalel)) h.scalco = read(stream,typeof(h.scalco)) h.sx = read(stream,typeof(h.sx)) h.sy = read(stream,typeof(h.sy)) h.gx = read(stream,typeof(h.gx)) h.gy = read(stream,typeof(h.gy)) h.counit = read(stream,typeof(h.counit)) h.wevel = read(stream,typeof(h.wevel)) h.swevel = read(stream,typeof(h.swevel)) h.sut = read(stream,typeof(h.sut)) h.gut = read(stream,typeof(h.gut)) h.sstat = read(stream,typeof(h.sstat)) h.gstat = read(stream,typeof(h.gstat)) h.tstat = read(stream,typeof(h.tstat)) h.laga = read(stream,typeof(h.laga)) h.lagb = read(stream,typeof(h.lagb)) h.delrt = read(stream,typeof(h.delrt)) h.muts = read(stream,typeof(h.muts)) h.mute = read(stream,typeof(h.mute)) h.ns = read(stream,typeof(h.ns)) h.dt = read(stream,typeof(h.dt)) h.gain = read(stream,typeof(h.gain)) h.igc = read(stream,typeof(h.igc)) h.igi = read(stream,typeof(h.igi)) h.corr = read(stream,typeof(h.corr)) h.sfs = read(stream,typeof(h.sfs)) h.sfe = read(stream,typeof(h.sfe)) h.slen = read(stream,typeof(h.slen)) h.styp = read(stream,typeof(h.styp)) h.stas = read(stream,typeof(h.stas)) h.stae = read(stream,typeof(h.stae)) h.tatyp = read(stream,typeof(h.tatyp)) h.afilf = read(stream,typeof(h.afilf)) h.afils = read(stream,typeof(h.afils)) h.nofilf = read(stream,typeof(h.nofilf)) h.nofils = read(stream,typeof(h.nofils)) h.lcf = read(stream,typeof(h.lcf)) h.hcf = read(stream,typeof(h.hcf)) h.lcs = read(stream,typeof(h.lcs)) h.hcs = read(stream,typeof(h.hcs)) h.year = read(stream,typeof(h.year)) h.day = read(stream,typeof(h.day)) h.hour = read(stream,typeof(h.hour)) h.minute = read(stream,typeof(h.minute)) h.sec = read(stream,typeof(h.sec)) h.timbas = read(stream,typeof(h.timbas)) h.trwf = read(stream,typeof(h.trwf)) h.grnors = read(stream,typeof(h.grnors)) h.grnofr = read(stream,typeof(h.grnofr)) h.grnlof = read(stream,typeof(h.grnlof)) h.gaps = read(stream,typeof(h.gaps)) h.otrav = read(stream,typeof(h.otrav)) h.d1 = read(stream,typeof(h.d1)) h.f1 = read(stream,typeof(h.f1)) h.d2 = read(stream,typeof(h.d2)) h.f2 = read(stream,typeof(h.f2)) h.ungpow = read(stream,typeof(h.ungpow)) h.unscale= read(stream,typeof(h.unscale)) h.ntr = read(stream,typeof(h.ntr)) h.mark = read(stream,typeof(h.mark)) h.unass = read(stream,typeof(h.unass)) else h.tracl = bswap(read(stream,typeof(h.tracl))) h.tracr = bswap(read(stream,typeof(h.tracr))) h.fldr = bswap(read(stream,typeof(h.fldr))) h.tracf = bswap(read(stream,typeof(h.tracf))) h.ep = bswap(read(stream,typeof(h.ep))) h.cdp = bswap(read(stream,typeof(h.cdp))) h.cdpt = bswap(read(stream,typeof(h.cdpt))) h.trid = bswap(read(stream,typeof(h.trid))) h.nva = bswap(read(stream,typeof(h.nva))) h.nhs = bswap(read(stream,typeof(h.nhs))) h.duse = bswap(read(stream,typeof(h.duse))) h.offset = bswap(read(stream,typeof(h.offset))) h.gelev = bswap(read(stream,typeof(h.gelev))) h.selev = bswap(read(stream,typeof(h.selev))) h.sdepth = bswap(read(stream,typeof(h.sdepth))) h.gdel = bswap(read(stream,typeof(h.gdel))) h.sdel = bswap(read(stream,typeof(h.sdel))) h.swdep = bswap(read(stream,typeof(h.swdep))) h.gwdep = bswap(read(stream,typeof(h.gwdep))) h.scalel = bswap(read(stream,typeof(h.scalel))) h.scalco = bswap(read(stream,typeof(h.scalco))) h.sx = bswap(read(stream,typeof(h.sx))) h.sy = bswap(read(stream,typeof(h.sy))) h.gx = bswap(read(stream,typeof(h.gx))) h.gy = bswap(read(stream,typeof(h.gy))) h.counit = bswap(read(stream,typeof(h.counit))) h.wevel = bswap(read(stream,typeof(h.wevel))) h.swevel = bswap(read(stream,typeof(h.swevel))) h.sut = bswap(read(stream,typeof(h.sut))) h.gut = bswap(read(stream,typeof(h.gut))) h.sstat = bswap(read(stream,typeof(h.sstat))) h.gstat = bswap(read(stream,typeof(h.gstat))) h.tstat = bswap(read(stream,typeof(h.tstat))) h.laga = bswap(read(stream,typeof(h.laga))) h.lagb = bswap(read(stream,typeof(h.lagb))) h.delrt = bswap(read(stream,typeof(h.delrt))) h.muts = bswap(read(stream,typeof(h.muts))) h.mute = bswap(read(stream,typeof(h.mute))) h.ns = bswap(read(stream,typeof(h.ns))) h.dt = bswap(read(stream,typeof(h.dt))) h.gain = bswap(read(stream,typeof(h.gain))) h.igc = bswap(read(stream,typeof(h.igc))) h.igi = bswap(read(stream,typeof(h.igi))) h.corr = bswap(read(stream,typeof(h.corr))) h.sfs = bswap(read(stream,typeof(h.sfs))) h.sfe = bswap(read(stream,typeof(h.sfe))) h.slen = bswap(read(stream,typeof(h.slen))) h.styp = bswap(read(stream,typeof(h.styp))) h.stas = bswap(read(stream,typeof(h.stas))) h.stae = bswap(read(stream,typeof(h.stae))) h.tatyp = bswap(read(stream,typeof(h.tatyp))) h.afilf = bswap(read(stream,typeof(h.afilf))) h.afils = bswap(read(stream,typeof(h.afils))) h.nofilf = bswap(read(stream,typeof(h.nofilf))) h.nofils = bswap(read(stream,typeof(h.nofils))) h.lcf = bswap(read(stream,typeof(h.lcf))) h.hcf = bswap(read(stream,typeof(h.hcf))) h.lcs = bswap(read(stream,typeof(h.lcs))) h.hcs = bswap(read(stream,typeof(h.hcs))) h.year = bswap(read(stream,typeof(h.year))) h.day = bswap(read(stream,typeof(h.day))) h.hour = bswap(read(stream,typeof(h.hour))) h.minute = bswap(read(stream,typeof(h.minute))) h.sec = bswap(read(stream,typeof(h.sec))) h.timbas = bswap(read(stream,typeof(h.timbas))) h.trwf = bswap(read(stream,typeof(h.trwf))) h.grnors = bswap(read(stream,typeof(h.grnors))) h.grnofr = bswap(read(stream,typeof(h.grnofr))) h.grnlof = bswap(read(stream,typeof(h.grnlof))) h.gaps = bswap(read(stream,typeof(h.gaps))) h.otrav = bswap(read(stream,typeof(h.otrav))) h.d1 = bswap(read(stream,typeof(h.d1))) h.f1 = bswap(read(stream,typeof(h.f1))) h.d2 = bswap(read(stream,typeof(h.d2))) h.f2 = bswap(read(stream,typeof(h.f2))) h.ungpow = bswap(read(stream,typeof(h.ungpow))) h.unscale= bswap(read(stream,typeof(h.unscale))) h.ntr = bswap(read(stream,typeof(h.ntr))) h.mark = bswap(read(stream,typeof(h.mark))) h.unass = bswap(read(stream,typeof(h.unass))) end return h end """ PutSegyHeader(stream,h,nt,file_header_size,j) Writes the header of trace j to a file in segy format. # Arguments - `stream::IOStream`: data file in segy format - `h::SegyHeader`: header variable to write - `file_header_size`: Size of file header in bytes: 0 for su files, 3600 for Segy rev0 standard. - `j::Integer`: Trace number """ function PutSegyHeader(stream,h,nt,file_header_size,j) position = file_header_size + (240+nt*4)*(j-1) seek(stream,position) write(stream,h.tracl) write(stream,h.tracr) write(stream,h.fldr) write(stream,h.tracf) write(stream,h.ep) write(stream,h.cdp) write(stream,h.cdpt) write(stream,h.trid) write(stream,h.nva) write(stream,h.nhs) write(stream,h.duse) write(stream,h.offset) write(stream,h.gelev) write(stream,h.selev) write(stream,h.sdepth) write(stream,h.gdel) write(stream,h.sdel) write(stream,h.swdep) write(stream,h.gwdep) write(stream,h.scalel) write(stream,h.scalco) write(stream,h.sx) write(stream,h.sy) write(stream,h.gx) write(stream,h.gy) write(stream,h.counit) write(stream,h.wevel) write(stream,h.swevel) write(stream,h.sut) write(stream,h.gut) write(stream,h.sstat) write(stream,h.gstat) write(stream,h.tstat) write(stream,h.laga) write(stream,h.lagb) write(stream,h.delrt) write(stream,h.muts) write(stream,h.mute) write(stream,h.ns) write(stream,h.dt) write(stream,h.gain) write(stream,h.igc) write(stream,h.igi) write(stream,h.corr) write(stream,h.sfs) write(stream,h.sfe) write(stream,h.slen) write(stream,h.styp) write(stream,h.stas) write(stream,h.stae) write(stream,h.tatyp) write(stream,h.afilf) write(stream,h.afils) write(stream,h.nofilf) write(stream,h.nofils) write(stream,h.lcf) write(stream,h.hcf) write(stream,h.lcs) write(stream,h.hcs) write(stream,h.year) write(stream,h.day) write(stream,h.hour) write(stream,h.minute) write(stream,h.sec) write(stream,h.timbas) write(stream,h.trwf) write(stream,h.grnors) write(stream,h.grnofr) write(stream,h.grnlof) write(stream,h.gaps) write(stream,h.otrav) write(stream,h.d1) write(stream,h.f1) write(stream,h.d2) write(stream,h.f2) write(stream,h.ungpow) write(stream,h.unscale) write(stream,h.ntr) write(stream,h.mark) write(stream,h.unass) end """ MapHeaders(h_in,j,map_type) Maps header attributes from seis byte location to segy byte location in the file and backwards. The output is of type Header or SegyHeader accordingly. # Arguments - `h_in`: header input data - `j::Integer`: Trace number - `map type`: "SegyToSeis" or "SeisToSegy" """ function MapHeaders(h_in,j,map_type) if map_type=="SegyToSeis" #scalco = abs(convert(Float32,h_in[1].scalco)) < 10 ? convert(Float32,h_in[1].scalco) : sign(convert(Float32,h_in[1].scalco))*log10(abs(convert(Float32,h_in[1].scalco))) scalco = sign(convert(Float32,h_in[1].scalco))*log10(abs(convert(Float32,h_in[1].scalco))) #scalel = abs(convert(Float32,h_in[1].scalel)) < 10 ? convert(Float32,h_in[1].scalel) : sign(convert(Float32,h_in[1].scalco))*log10(abs(convert(Float32,h_in[1].scalco))) scalel = sign(convert(Float32,h_in[1].scalco))*log10(abs(convert(Float32,h_in[1].scalco))) h_out = InitSeisHeader() h_out.tracenum = convert(typeof(h_out.tracenum),j) h_out.o1 = convert(typeof(h_out.o1),0) h_out.n1 = convert(typeof(h_out.n1),h_in[1].ns) h_out.d1 = convert(typeof(h_out.d1),h_in[1].dt/1000000) h_out.sx = scalco >= 0 ? convert(typeof(h_out.sx),h_in[1].sx)*10^scalco : convert(typeof(h_out.sx),h_in[1].sx)/(10^abs(scalco)) h_out.sy = scalco >= 0 ? convert(typeof(h_out.sy),h_in[1].sy)*10^scalco : convert(typeof(h_out.sx),h_in[1].sy)/(10^abs(scalco)) h_out.gx = scalco >= 0 ? convert(typeof(h_out.gx),h_in[1].gx)*10^scalco : convert(typeof(h_out.gx),h_in[1].gx)/(10^abs(scalco)) h_out.gy = scalco >= 0 ? convert(typeof(h_out.gy),h_in[1].gy)*10^scalco : convert(typeof(h_out.gy),h_in[1].gy)/(10^abs(scalco)) h_out.h = convert(typeof(h_out.h),h_in[1].offset) h_out.isx = convert(typeof(h_out.isx),h_in[1].ep) h_out.imx = convert(typeof(h_out.imx),h_in[1].cdp) h_out.selev = scalel >= 0 ? convert(typeof(h_out.selev),h_in[1].selev)*(10^scalel) : convert(typeof(h_out.selev),h_in[1].selev)/(10^abs(scalel)) h_out.gelev = scalel >= 0 ? convert(typeof(h_out.gelev),h_in[1].gelev)*(10^scalel) : convert(typeof(h_out.gelev),h_in[1].gelev)/(10^abs(scalel)) h_out.trid = convert(typeof(h_out.trid),h_in[1].trid) else h_out = InitSegyHeader() h_out.tracl = convert(typeof(h_out.tracl),j) h_out.tracr = convert(typeof(h_out.tracr),j) h_out.scalel = convert(typeof(h_out.scalel),-3) h_out.scalco = convert(typeof(h_out.scalco),-3) h_out.counit = convert(typeof(h_out.counit),1) h_out.gain = convert(typeof(h_out.gain),1) h_out.corr = convert(typeof(h_out.corr),1) h_out.ns = convert(typeof(h_out.ns),Int(h_in[1].n1)) h_out.dt = convert(typeof(h_out.dt),round(Int,h_in[1].d1*1000000)) h_out.sx = convert(typeof(h_out.sx),round(Int,h_in[1].sx*1000)) h_out.sy = convert(typeof(h_out.sy),round(Int,h_in[1].sy*1000)) h_out.gx = convert(typeof(h_out.gx),round(Int,h_in[1].gx*1000)) h_out.gy = convert(typeof(h_out.gy),round(Int,h_in[1].gy*1000)) h_out.offset = convert(typeof(h_out.offset),round(Int,h_in[1].h)) h_out.ep = convert(typeof(h_out.ep),Int(h_in[1].isx)) h_out.cdp = convert(typeof(h_out.cdp),Int(h_in[1].imx)) h_out.selev = convert(typeof(h_out.selev),round(Int,h_in[1].selev*1000)) h_out.gelev = convert(typeof(h_out.gelev),round(Int,h_in[1].gelev*1000)) h_out.trid = convert(typeof(h_out.trid),h_in[1].trid) end return h_out end import Base.convert primitive type IBMFloat32 32 end ieeeOfPieces(fr::UInt32, exp::Int32, sgn::UInt32) = reinterpret(Float32, convert(UInt32,fr >>> 9) | convert(UInt32,exp << 23) | sgn) :: Float32 import Base.convert """ convert(::Type{Float32}, ibm::IBMFloat32) Extension of function convert in SeismicJulia to cover IBMFloat32 Floating-point format """ function convert(::Type{Float32}, ibm::IBMFloat32) local fr::UInt32 = ntoh(reinterpret(UInt32, ibm)) local sgn::UInt32 = fr & 0x80000000 # save sign fr <<= 1 # shift sign out local exp::Int32 = convert(Int32,fr >>> 25) # save exponent fr <<= 7 # shift exponent out if (fr == convert(UInt32,0)) zero(Float32) else # normalize the signficand local norm::UInt32 = leading_zeros(fr) fr <<= norm exp = (exp << 2) - 130 - norm # exp <= 0 --> ieee(0,0,sgn) # exp >= 255 --> ieee(0,255,sgn) # else -> ieee(fr<<1, exp, sgn) local clexp::Int32 = exp & convert(Int32,0xFF) ieeeOfPieces(clexp == exp ? fr << 1 : convert(UInt32,0), clexp, sgn) end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
2636
""" SegyToSeis(filename_in,filename_out;<keyword arguments>) Convert SEGY or SU data to seis format. The function needs input and output filenames. # Arguments - `format="segy"` : Options are segy or su - `swap_bytes=true` : If the flag equals true, the function swaps bytes - `input_type="ibm"` : Options are ibm or ieee *Credits: AS, 2015* """ function SegyToSeis(filename_in,filename_out;format="segy",swap_bytes=true,input_type="ibm") if (format=="su") file_hsize = 0 else file_hsize = 3600 # add commands here to read text and binary headers and write them out to # filename_out.thead and filename_out.bhead stream = open(filename_in) position = 3200 seek(stream, position) fh = GrabFileHeader(stream) ntfh = swap_bytes == true ? bswap(fh.netfh) : fh.netfh if ntfh == -1 error("add instructions to deal with variable extended text header") end if ntfh == 0 file_hsize = 3600 elseif ntfh > 0 # file_hsize = 3200 * (ntfh+1) + 400 file_hsize = 3200 * 1 + 400 else error("unknown data format") end end stream = open(filename_in) seek(stream, segy_count["ns"] + file_hsize) if (swap_bytes==true) nt = bswap(read(stream,Int16)) else nt = read(stream,Int16) end total = 60 + nt nx = round(Int,(filesize(stream)-file_hsize)/4/total) println("number of traces: ",nx) println("number of samples per trace: ",nt) h_segy = Array{SegyHeader}(undef,1) h_seis = Array{Header}(undef,1) seek(stream,file_hsize + segy_count["trace"]) h_segy[1] = GrabSegyHeader(stream,swap_bytes,nt,file_hsize,1) dt = h_segy[1].dt/1000000 extent = Extent(convert(Int32,nt),convert(Int32,nx),convert(Int32,1),convert(Int32,1),convert(Int32,1), convert(Float32,0),convert(Float32,1),convert(Float32,0),convert(Float32,0),convert(Float32,0), convert(Float32,dt),convert(Float32,1),convert(Float32,1),convert(Float32,1),convert(Float32,1), "Time","Trace Number","","","", "s","index","","","", "") for j=1:nx position = file_hsize + total*(j-1)*4 + segy_count["trace"] seek(stream,position) if (input_type == "ieee") d = Array{Float32}(undef,nt); read!(stream,d) else d = Array{IBMFloat32}(undef,nt); read!(stream,d) end if (swap_bytes==true && input_type == "ieee") d = bswap_vector(d) end if (input_type != "ieee") d = convert(Array{Float32,1},d) end h_segy[1] = GrabSegyHeader(stream,swap_bytes,nt,file_hsize,j) h_seis[1] = MapHeaders(h_segy,j,"SegyToSeis") SeisWrite(filename_out,d,h_seis,extent,itrace=j) end close(stream) end function bswap_vector(a) for i = 1 : length(a) a[i] = bswap(a[i]); end return a end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
912
""" SeisCopy(in,out) Copy a seis input file to a seis output file. In and out should be of type AbstractString *Credits: AS, 2015* """ function SeisCopy(in::AbstractString,out::AbstractString) filename_data_in = ParseDataName(in) filename_headers_in = ParseHeaderName(in) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_data_out = join([DATAPATH out "@data@"]) if filename_headers_in != "NULL" filename_headers_out = join([DATAPATH out "@headers@"]) else filename_headers_out = "NULL" end extent = ReadTextHeader(in) WriteTextHeader(out,extent,"native_float",4,filename_data_out,filename_headers_out) run(`cp $filename_data_in $filename_data_out`) if filename_headers_in != "NULL" run(`cp $filename_headers_in $filename_headers_out`) end end function SeisCopy(in::Array{AbstractString,1},out::Array{AbstractString,1}) for j = 1 : length(in) SeisCopy(in[j],out[j]) end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
1816
""" SeisHeaderInfo(filename;<keyword arguments>) Print Seis header information to screen. The input is the name of the data file # Arguments - `ntrace=100000` : Number of traces to analyze *Credits: AS, 2015* """ function SeisHeaderInfo(filename;ntrace=100000) key = fieldnames(Header) nhead = length(key) filename_headers = ParseHeaderName(filename) stream = open(filename_headers) NX = GetNumTraces(filename) h = GrabHeader(stream,1) println("Displaying information for ", filename," (",NX," traces):") min_h = zeros(Float32,length(key)) max_h = zeros(Float32,length(key)) mean_h = zeros(Float32,length(key)) for ikey=1:length(key) min_h[ikey] = convert(Float32,getfield(h,key[ikey])) max_h[ikey] = convert(Float32,getfield(h,key[ikey])) mean_h[ikey] += convert(Float32,getfield(h,key[ikey])) end itrace = 2 while itrace <= NX nx = NX - itrace + 1 ntrace = nx > ntrace ? ntrace : nx position = 4*nhead*(itrace-1) seek(stream,position) h1 = read!(stream,Array{Header32Bits}(undef,nhead*ntrace)) h1 = reshape(h1,nhead,convert(Int,ntrace)) for ikey = 1 : length(key) keytype = eval(Meta.parse("typeof(SeisMain.InitSeisHeader().$(string(key[ikey])))")) h2 = reinterpret(keytype,vec(h1[ikey,:])) a = minimum(h2) b = maximum(h2) c = mean(h2) if (a < min_h[ikey]) min_h[ikey] = a end if (b > max_h[ikey]) max_h[ikey] = b end mean_h[ikey] += c*ntrace end itrace += ntrace end for ikey=1:length(key) mean_h[ikey] /= NX end close(stream) println(" Key Minimum Maximum Mean"); println("=============================================================") for ikey=1:length(key) @printf("%10s %11.3f %11.3f %11.3f\n",string(key[ikey]),min_h[ikey],max_h[ikey],mean_h[ikey]) end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
3195
""" SeisRead(filename;<keyword arguments>) Read seismic data from a given filename in seis format. The format is comprised of three elements: * a text file (data extent) with geometry information * a binary file containing data (@data@) * a binary file containing headers (@headers@) # Keyword arguments - `group="all"` : Options are all, some or gather - `key=["imx","imy"]` - `itrace=1` : Number of trace where the function starts reading - `ntrace=10000` : Total number of traces to read # Out - d: data as 2d array - h: headers as 1d array - extent: extent of the data (try _fieldnames(Extent)_ to see the information this contains) # Example ```julia d,h,ext = SeisRead(filename) ``` *Credits: AS, 2015* """ function SeisRead(filename;group="all",key=["imx","imy"],itrace=1,ntrace=10000) filename_data = ParseDataName(filename) filename_headers = ParseHeaderName(filename) extent = ReadTextHeader(filename) #println(filename_data) stream_d = open(filename_data) dtype = ParseDataFormat(filename) dtype = dtype == "native_float" ? Float32 : Complex{Float32} esize = ParseDataESize(filename) total = convert(Int,filesize(stream_d)/esize) close(stream_d) nhead = length(fieldnames(Header)) curr = zeros(length(key),1) prev = 1*curr nx = extent.n2*extent.n3*extent.n4*extent.n5 if filename_headers != "NULL" stream_h = open(filename_headers) if (group == "all") ntrace = nx elseif (group == "gather") j = 1 for outer j=itrace:nx h1 = GrabHeader(stream_h,j) for ikey=1:length(key) curr[ikey] = getfield(h1,Symbol(key[ikey])) end if curr != prev && j > itrace ntrace = j - itrace break end prev = 1*curr end ntrace = j < nx ? j - itrace : j - itrace + 1 else ntrace = nx - itrace + 1 > ntrace ? ntrace : nx - itrace + 1 end position_h = 4*nhead*(itrace-1) seek(stream_h,position_h) h1 = read!(stream_h,Array{Header32Bits}(undef,nhead*ntrace)) h1 = reshape(h1,nhead,convert(Int64,ntrace)) h = Header[] for j = 1 : ntrace h = push!(h,BitsToHeader(h1[:,j])) end close(stream_h) else ntrace = nx > ntrace ? ntrace : nx end stream_d = open(filename_data) position_d = 4*extent.n1*(itrace-1) seek(stream_d,position_d) d = read!(stream_d,Array{dtype}(undef,extent.n1*ntrace)) if group=="all" if extent.n5 == 1 && extent.n4 == 1 && extent.n3 == 1 && extent.n2 == 1 d = reshape(d,Int(extent.n1)) elseif extent.n5 == 1 && extent.n4 == 1 && extent.n3 == 1 d = reshape(d,convert(Int,extent.n1),convert(Int,extent.n2)) elseif extent.n5 == 1 && extent.n4 == 1 d = reshape(d,convert(Int,extent.n1),convert(Int,extent.n2),convert(Int,extent.n3)) elseif extent.n5 == 1 d = reshape(d,convert(Int,extent.n1),convert(Int,extent.n2),convert(Int,extent.n3),convert(Int,extent.n4)) else d = reshape(d,convert(Int,extent.n1),convert(Int,extent.n2),convert(Int,extent.n3),convert(Int,extent.n4),convert(Int,extent.n5)) end else d = reshape(d,convert(Int64,extent.n1),convert(Int64,ntrace)) end close(stream_d) if filename_headers != "NULL" return d,h,extent else return d,extent end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
1294
""" SeisReadHeaders(filename;<keyword arguments>) Read the headers of a input file in seis format # Arguments - `group="all"` : Options are all, some or gather - `key=[]` - `itrace=1` : Number of trace where the function starts reading - `ntrace=100` : Total number of traces to read # Example ```julia h = SeisRead(filename) ``` *Credits: AS, 2015* """ function SeisReadHeaders(filename;group="all",key=[],itrace=1,ntrace=100) filename_h = ParseHeaderName(filename) stream_h = open(filename_h) nhead = length(fieldnames(Header)) curr = zeros(length(key),1) prev = 1*curr nx = round(Int,filesize(stream_h)/(4*length(fieldnames(Header)))) - itrace + 1 if (group == "all") ntrace = nx elseif (group == "gather") for j=itrace:itrace+nx-1 h1 = GrabHeader(stream_h,j) for ikey=1:length(key) curr[ikey] = getfield(h1,Symbol(key[ikey])) end if curr != prev && j > itrace nx = j - itrace break end prev = 1*curr end else ntrace = nx > ntrace ? ntrace : nx end position_h = 4*nhead*(itrace-1) seek(stream_h,position_h) h1 = read!(stream_h,Array{Header32Bits}(undef,nhead*ntrace)) h1 = reshape(h1,nhead,round(Int,ntrace)) h = Header[] for itrace = 1 : ntrace h = push!(h,BitsToHeader(h1[:,itrace])) end close(stream_h) return h end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
511
""" SeisRemove(filename) Delete a seis file (deletes the text file, binary data file, and binary header file if there is one) *Credits: AS, 2015* """ function SeisRemove(filename::AbstractString) filename_data = ParseDataName(filename) filename_headers = ParseHeaderName(filename) rm(filename); rm(filename_data); if filename_headers != "NULL" rm(filename_headers); end end function SeisRemove(filename::Array{AbstractString,1}) for j = 1 : length(filename) SeisRemove(filename[j]) end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
1488
""" SeisToSegy(filename_in,filename_out;<keyword arguments>) Convert seis data to SU or SEGY format. The function needs input and output filenames. # Arguments - `su=true` : If the flag equals true, converts tu SU format, otherwise to SEGY format *Credits: AS, 2015* """ function SeisToSegy(in,out;su=true) if (su==true) file_hsize = 0 else file_hsize = 900 # add commands here to read text and binary headers out.thead and # out.bhead and write them to out end filename_headers = ParseHeaderName(in) extent = ReadTextHeader(in) n1 = extent.n1 nx = extent.n2*extent.n3*extent.n4*extent.n5 stream = open(out,"w") total = 60 + n1 println("ok") h_segy = Array{SeisMain.SegyHeader}(undef,1) h_seis = Array{Header}(undef,1) h1 = Header[] push!(h1,SeisMain.InitSeisHeader()) h1[1].o1 = extent.o1 h1[1].d1 = extent.d1 h1[1].n1 = extent.n1 for j = 1 : nx if filename_headers != "NULL" d,h1,e = SeisRead(in,itrace=j,ntrace=1,group="some") else println("j=",j) d,e = SeisRead(in,itrace=j,ntrace=1,group="some") println("size(d)=",size(d)) h1[1].tracenum = j end println("test") #h_segy[1] = MapHeaders(h1,j,"SeisToSegy") h_segy = MapHeaders(h1,j,"SeisToSegy") positn = file_hsize + total*(j-1)*4 + segy_count["trace"] seek(stream,positn) write(stream,convert(Array{Float32,1},d[:])) PutSegyHeader(stream,h_segy,n1,file_hsize,j) end close(stream) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
1611
""" SeisWrite(filename,d,h,extent;<keyword arguments>) Write seismic data in seis format # Arguments - `filename` : Name of file to write/generate - `d`: seismic data - `h::Array{Header,1}`: headers as 1d array with elements of type Header - `extent::Extent` : extent of the data (try _names(Extent)_ to see the information this contains) - `itrace=1` : First trace number to write *Credits: AS, 2015* """ function SeisWrite(filename,d,h::Array{Header,1},extent::Extent;itrace=1) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_d = join([DATAPATH filename "@data@"]) filename_h = join([DATAPATH filename "@headers@"]) if (itrace==1) WriteTextHeader(filename,extent,"native_float",4,filename_d,filename_h) stream_dout = open(filename_d,"w") stream_hout = open(filename_h,"w") else stream_dout = open(filename_d,"a") stream_hout = open(filename_h,"a") end write(stream_dout,convert(Array{Float32,1},d[:])) close(stream_dout) nt = size(d,1) d = reshape(d,nt,:); vec = size(d) nx = vec[2] h1 = Header32Bits[] for j = itrace : itrace + nx - 1 h[j - itrace + 1].tracenum = j h2 = HeaderToBits(h[j - itrace + 1]) append!(h1,h2) end write(stream_hout,h1) close(stream_hout) end function SeisWrite(filename,d,extent::Extent) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_d = join([DATAPATH filename "@data@"]) WriteTextHeader(filename,extent,"native_float",4,filename_d,"NULL") stream_dout = open(filename_d,"w") write(stream_dout,convert(Array{Float32,1},d[:])) close(stream_dout) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
806
""" SeisWriteHeaders(filename,h;<keyword arguments>) Write seismic headers in seis format # Arguments - `itrace=1` : First trace number to write - `update_tracenum=true` *Credits: AS, 2015* """ function SeisWriteHeaders(filename,h;itrace=1,update_tracenum=true) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_d = join([DATAPATH filename "@data@"]) filename_h = join([DATAPATH filename "@headers@"]) if (itrace==1) stream_hout = open(filename_h,"w") else stream_hout = open(filename_h,"a") end nx = length(h) h1 = Header32Bits[] for j = itrace : itrace + nx - 1 if update_tracenum == true h[j - itrace + 1].tracenum = j end h2 = HeaderToBits(h[j - itrace + 1]) append!(h1,h2) end write(stream_hout,h1) close(stream_hout) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
1531
using Downloads using SHA """ download_if_needed(url::String, output::String, sha256sum::String) Download the file from `url` to `output` if the file is not already present or if its SHA256 checksum does not match the `sha256sum`. # Arguments - `url::String`: The URL from which to download the file. - `output::String`: The path where the downloaded file will be saved. # Keyword arguments - `sha256sum::String`: The expected SHA256 checksum of the file to ensure its integrity. # Output - `nothing`: This function returns `nothing`. It performs the side effect of downloading a file and verifying its checksum. If the checksum does not match, the function raises an error and deletes the downloaded file. *Credits: Átila Saraiva Quintela Soares, 2024* """ function download_if_needed(url::String, output::String; sha256sum::String) file_exists = isfile(output) if file_exists file_sha256 = open(output) do io io |> sha256 |> bytes2hex end if file_sha256 == sha256sum return nothing else @warn("File exists but checksum does not match. Downloading again.") end end # Download the file Downloads.download(url, output) # Verify checksum of the downloaded file open(output) do io file_sha256 = io |> sha256 |> bytes2hex if file_sha256 != sha256sum rm(output) error("Downloaded file checksum does not match the expected checksum.") end end return nothing end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
9259
""" SeisBinData(in,out; <keyword arguments>) Sequentially bin seismic data using already binned trace headers. Input arguments should be consistent with SeisBinHeaders input arguments. See also: [`SeisBinHeaders`](@ref) # Arguments - `in`: filename of input, irregularly sampled data - `out`: filename of output, regularly sampled data # Keyword arguments - `style="sxsygxgy"`: bin style. Options: "mxmyhxhy","mxmyhaz","sxsyhxhy","gxgyhxhy","sxsyhaz","gxgyhaz" - `ang=90`: inline direction measured in degrees CC from East - `gamma=1`: vp/vs ratio for PS Asymptotic Conversion Point gathers (use gamma=1 for PP data) - `osx=0`,`osy=0`,`ogx=0`,`ogy=0` : origin for source and receiver coordinate system - `omx=0`,`omy=0`,`ohx=0`,`ohy=0`: origin for midpoint and offset coordinate system - `oaz=0`,`oh=0` : origin for azimuth and offset coordinate system - `dsx=1`,`dsy=1`,`dgx=1`,`dgy=1`: source and receiver step-size - `dmx=1`,`dmy=1`,`dhx=1`,`dhy=1`: midpoint and offset step-size - `dh=1`,`daz=1`: offset and azimuth step-size - `min_isx=0`,`max_isx=0`,`min_isy=0`,`max_isy=0`: grid extreme values for sources - `min_igx=0`,`max_igx=0`,`min_igy=0`,`max_igy=0`: grid extreme values for receivers - `min_imx=0`,`max_imx=0`,`min_imy=0`,`max_imy=0`: grid extreme values for midpoints - `min_ihx=0`,`max_ihx=0`,`min_ihy=0`,`max_ihy=0`: grid extreme values for offsets - `min_ih=0`,`max_ih=0`,`min_iaz=0`,`max_iaz=0`: grid extreme values for azimuth and offset - `ntrace=10000`: maximum number of traces processed at a time # Output In file `out`, the binned data is saved. *Credits: Aaron Stanton, 2017* """ function SeisBinData(in,out;style="sxsygxgy",ang=90,gamma=1,osx=0,osy=0,ogx=0,ogy=0,omx=0,omy=0,ohx=0,ohy=0,oh=0,oaz=0,dsx=1,dsy=1,dgx=1,dgy=1,dmx=1,dmy=1,dhx=1,dhy=1,dh=1,daz=1,min_isx=0,max_isx=0,min_isy=0,max_isy=0,min_igx=0,max_igx=0,min_igy=0,max_igy=0,min_imx=0,max_imx=0,min_imy=0,max_imy=0,min_ihx=0,max_ihx=0,min_ihy=0,max_ihy=0,min_ih=0,max_ih=0,min_iaz=0,max_iaz=0,ntrace=10000) r2d = 180/pi; d2r = pi/180; gammainv = 1/gamma; if (ang > 90) ang2=-d2r*(ang-90) else ang2=d2r*(90-ang) end naz=convert(Int32,360/daz) if (style=="sxsygxgy") nsx = max_isx - min_isx + 1 nsy = max_isy - min_isy + 1 ngx = max_igx - min_igx + 1 ngy = max_igy - min_igy + 1 nx1=nsx;nx2=nsy;nx3=ngx;nx4=ngy; elseif (style=="mxmyhxhy") nmx = max_imx - min_imx + 1 nmy = max_imy - min_imy + 1 nhx = max_ihx - min_ihx + 1 nhy = max_ihy - min_ihy + 1 nx1=nmx;nx2=nmy;nx3=nhx;nx4=nhy; elseif (style=="mxmyhaz") nmx = max_imx - min_imx + 1 nmy = max_imy - min_imy + 1 nh = max_ih - min_ih + 1 naz = max_iaz - min_iaz + 1 nx1=nmx;nx2=nmy;nx3=nh;nx4=naz; elseif (style=="sxsyhxhy") nsx = max_isx - min_isx + 1 nsy = max_isy - min_isy + 1 nhx = max_ihx - min_ihx + 1 nhy = max_ihy - min_ihy + 1 nx1=nsx;nx2=nsy;nx3=nhx;nx4=nhy; elseif (style=="gxgyhxhy") ngx = max_igx - min_igx + 1 ngy = max_igy - min_igy + 1 nhx = max_ihx - min_ihx + 1 nhy = max_ihy - min_ihy + 1 nx1=ngx;nx2=ngy;nx3=nhx;nx4=nhy; elseif (style=="sxsyhaz") nsx = max_isx - min_isx + 1 nsy = max_isy - min_isy + 1 nh = max_ih - min_ih + 1 naz = max_iaz - min_iaz + 1 nx1=nsx;nx2=nsy;nx3=nh;nx4=naz; elseif (style=="gxgyhaz") ngx = max_igx - min_igx + 1 ngy = max_igy - min_igy + 1 nh = max_ih - min_ih + 1 naz = max_iaz - min_iaz + 1 nx1=ngx;nx2=ngy;nx3=nh;nx4=naz; else error("style not recognized.") end nx_out = nx1*nx2*nx3*nx4 stream = open(ParseHeaderName(in)) nx_in = round(Int,filesize(stream)/(4*length(fieldnames(Header)))) seek(stream, header_count["n1"]) nt = read(stream,Int32) seek(stream, header_count["o1"]) o1 = read(stream,Float32) seek(stream, header_count["d1"]) dt = read(stream,Float32) close(stream) d = zeros(Float32,nt,1) out_d = ParseDataName(out) out_h = ParseHeaderName(out) stream_d = open(out_d,"a") #Create 0 volume j = 1 for ix4 = 1 : nx4 for ix3 = 1 : nx3 for ix2 = 1 : nx2 for ix1 = 1 : nx1 position_d = 4*nt*(j - 1) seek(stream_d,position_d) write(stream_d,d[:,1]) j += 1 end end end end close(stream_d) stream_d = open(out_d,"a") if (style=="sxsygxgy") j = 1 while j <= nx_in d,h,e = SeisRead(in,group="some",itrace=j,ntrace=ntrace) num_traces_in = size(d[:,:],2) for k = 1 : num_traces_in itrace = (h[k].igy - min_igy)*nx1*nx2*nx3 + (h[k].igx - min_igx)*nx2*nx1 + (h[k].isy - min_isy)*nx1 + h[k].igx - min_igx + 1 if (itrace > 0 && itrace <= nx_out) if (h[k].isx >= min_isx && h[k].isx <= max_isx && h[k].isy >= min_isy && h[k].isy <= max_isy && h[k].igx >= min_igx && h[k].igx <= max_igx && h[k].igy >= min_igy && h[k].igy <= max_igy) position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) write(stream_d,d[:,k]) end end end j += num_traces_in end elseif (style=="mxmyhxhy") j = 1 while j <= nx_in d,h,e = SeisRead(in,group="some",itrace=j,ntrace=ntrace) num_traces_in = size(d[:,:],2) for k = 1 : num_traces_in itrace = (h[k].ihy - min_ihy)*nx3*nx2*nx1 + (h[k].ihx - min_ihx)*nx2*nx1 + (h[k].imy - min_imy)*nx1 + h[k].imx - min_imx + 1 if (itrace > 0 && itrace <= nx_out) if (h[k].imx >= min_imx && h[k].imx <= max_imx && h[k].imy >= min_imy && h[k].imy <= max_imy && h[k].ihx >= min_ihx && h[k].ihx <= max_ihx && h[k].ihy >= min_ihy && h[k].ihy <= max_ihy) position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) write(stream_d,d[:,k]) end end end j += num_traces_in end elseif (style=="mxmyhaz") j = 1 while j <= nx_in d,h_in,e = SeisRead(in,group="some",itrace=j,ntrace=ntrace) num_traces_in = size(d[:,:],2) for k = 1 : num_traces_in itrace = (h_in[k].iaz - min_iaz)*nx3*nx2*nx1 + (h_in[k].ih - min_ih)*nx2*nx1 + (h_in[k].imy - min_imy)*nx1 + h_in[k].imx - min_imx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].imx >= min_imx && h_in[k].imx <= max_imx && h_in[k].imy >= min_imy && h_in[k].imy <= max_imy && h_in[k].ih >= min_ih && h_in[k].ih <= max_ih && h_in[k].iaz >= min_iaz && h_in[k].iaz <= max_iaz) position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) write(stream_d,d[:,k]) end end end j += num_traces_in end elseif (style=="sxsyhxhy") j = 1 while j <= nx_in d,h_in,e = SeisRead(in,group="some",itrace=j,ntrace=ntrace) num_traces_in = size(d[:,:],2) for k = 1 : num_traces_in itrace = (h_in[k].ihy - min_ihy)*nx3*nx2*nx1 + (h_in[k].ihx - min_ihx)*nx2*nx1 + (h_in[k].isy - min_isy)*nx1 + h_in[k].isx - min_isx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].isx >= min_isx && h_in[k].isx <= max_isx && h_in[k].isy >= min_isy && h_in[k].isy <= max_isy && h_in[k].ihx >= min_ihx && h_in[k].ihx <= max_ihx && h_in[k].ihy >= min_ihy && h_in[k].ihy <= max_ihy) position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) write(stream_d,d[:,k]) end end end j += num_traces_in end elseif (style=="gxgyhxhy") j = 1 while j <= nx_in d,h_in,e = SeisRead(in,group="some",itrace=j,ntrace=ntrace) num_traces_in = size(d[:,:],2) for k = 1 : num_traces_in itrace = (h_in[k].ihy - min_ihy)*nx3*nx2*nx1 + (h_in[k].ihx - min_ihx)*nx2*nx1 + (h_in[k].igy - min_igy)*nx1 + h_in[k].igx - min_igx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].igx >= min_igx && h_in[k].igx <= max_igx && h_in[k].igy >= min_igy && h_in[k].igy <= max_igy && h_in[k].ihx >= min_ihx && h_in[k].ihx <= max_ihx && h_in[k].ihy >= min_ihy && h_in[k].ihy <= max_ihy) position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) write(stream_d,d[:,k]) end end end j += num_traces_in end elseif (style=="sxsyhaz") j = 1 while j <= nx_in d,h_in,e = SeisRead(in,group="some",itrace=j,ntrace=ntrace) num_traces_in = size(d[:,:],2) for k = 1 : num_traces_in itrace = (h_in[k].iaz - min_iaz)*nx3*nx2*nx1 + (h_in[k].ih - min_ih)*nx2*nx1 + (h_in[k].isy - min_isy)*nx1 + h_in[k].isx - min_isx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].isx >= min_isx && h_in[k].isx <= max_isx && h_in[k].isy >= min_isy && h_in[k].isy <= max_isy && h_in[k].ih >= min_ih && h_in[k].ih <= max_ih && h_in[k].ias >= min_iaz && h_in[k].iaz <= max_iaz) position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) write(stream_d,d[:,k]) end end end j += num_traces_in end elseif (style=="gxgyhaz") j = 1 while j <= nx_in d,h,e = SeisRead(in,group="some",itrace=j,ntrace=ntrace) num_traces_in = size(d[:,:],2) for k = 1 : num_traces_in itrace = (h[k].iaz - min_iaz)*nx1*nx2*nx3 + (h[k].ih - min_ih)*nx1*nx2 + (h[k].igy - min_igy)*nx1 + h[k].igx - min_igx + 1 if (itrace > 0 && itrace <= nx_out) if (h[k].igx >= min_igx && h[k].igx <= max_igx && h[k].igy >= min_igy && h[k].igy <= max_igy && h[k].ih >= min_ih && h[k].ih <= max_ih && h[k].iaz >= min_iaz && h[k].iaz <= max_iaz) position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) write(stream_d,d[:,k]) end end end j += num_traces_in end end close(stream_d) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
21325
""" SeisBinHeaders(in,out; <keyword arguments>) Sequentially bin seismic headers using the available grid information. Keyword arguments should be consistent with SeisGeometry keyword arguments. # Arguments - `in`: filename of input, irregularly sampled data - `out`: filename of output, regularly sampled data # Keyword arguments - `style="sxsygxgy"`: bin style. Options: "mxmyhxhy","mxmyhaz","sxsyhxhy","gxgyhxhy","sxsyhaz","gxgyhaz" - `ang=90`: inline direction measured in degrees CC from East - `gamma=1`: vp/vs ratio for PS Asymptotic Conversion Point gathers (use gamma=1 for PP data) - `osx=0`,`osy=0`,`ogx=0`,`ogy=0` : origin for source and receiver coordinate system - `omx=0`,`omy=0`,`ohx=0`,`ohy=0`: origin for midpoint and offset coordinate system - `oaz=0`,`oh=0` : origin for azimuth and offset coordinate system - `dsx=1`,`dsy=1`,`dgx=1`,`dgy=1`: source and receiver step-size - `dmx=1`,`dmy=1`,`dhx=1`,`dhy=1`: midpoint and offset step-size - `dh=1`,`daz=1`: offset and azimuth step-size - `min_isx=0`,`max_isx=0`,`min_isy=0`,`max_isy=0`: grid extreme values for sources - `min_igx=0`,`max_igx=0`,`min_igy=0`,`max_igy=0`: grid extreme values for receivers - `min_imx=0`,`max_imx=0`,`min_imy=0`,`max_imy=0`: grid extreme values for midpoints - `min_ihx=0`,`max_ihx=0`,`min_ihy=0`,`max_ihy=0`: grid extreme values for offsets - `min_ih=0`,`max_ih=0`,`min_iaz=0`,`max_iaz=0`: grid extreme values for azimuth and offset - `ntrace=10000`: maximum number of traces processed at a time # Output In file `out@headers@`, binned headers are saved. *Credits: Aaron Stanton,2017* """ function SeisBinHeaders(in,out;style="sxsygxgy",ang=90,gamma=1,osx=0,osy=0,ogx=0,ogy=0,omx=0,omy=0,ohx=0,ohy=0,oh=0,oaz=0,dsx=1,dsy=1,dgx=1,dgy=1,dmx=1,dmy=1,dhx=1,dhy=1,dh=1,daz=1,min_isx=0,max_isx=0,min_isy=0,max_isy=0,min_igx=0,max_igx=0,min_igy=0,max_igy=0,min_imx=0,max_imx=0,min_imy=0,max_imy=0,min_ihx=0,max_ihx=0,min_ihy=0,max_ihy=0,min_ih=0,max_ih=0,min_iaz=0,max_iaz=0,ntrace=10000) r2d = 180/pi; d2r = pi/180; gammainv = 1/gamma; if (ang > 90) ang2=-d2r*(ang-90) else ang2=d2r*(90-ang) end naz=convert(Int32,360/daz) if (style=="sxsygxgy") nsx = max_isx - min_isx + 1 nsy = max_isy - min_isy + 1 ngx = max_igx - min_igx + 1 ngy = max_igy - min_igy + 1 nx1=nsx;nx2=nsy;nx3=ngx;nx4=ngy; ox1=osx;ox2=osy;ox3=ogx;ox4=ogy; dx1=dsx;dx2=dsy;dx3=dgx;dx4=dgy; label2="sx";label3="sy";label4="gx";label5="gy"; unit2="m";unit3="m";unit4="m";unit5="m"; elseif (style=="mxmyhxhy") nmx = max_imx - min_imx + 1 nmy = max_imy - min_imy + 1 nhx = max_ihx - min_ihx + 1 nhy = max_ihy - min_ihy + 1 nx1=nmx;nx2=nmy;nx3=nhx;nx4=nhy; ox1=omx;ox2=omy;ox3=ohx;ox4=ohy; dx1=dmx;dx2=dmy;dx3=dhx;dx4=dhy; label2="mx";label3="my";label4="hx";label5="hy"; unit2="m";unit3="m";unit4="m";unit5="m"; elseif (style=="mxmyhaz") nmx = max_imx - min_imx + 1 nmy = max_imy - min_imy + 1 nh = max_ih - min_ih + 1 naz = max_iaz - min_iaz + 1 nx1=nmx;nx2=nmy;nx3=nh;nx4=naz; ox1=omx;ox2=omy;ox3=oh;ox4=oaz; dx1=dmx;dx2=dmy;dx3=dh;dx4=daz; label2="mx";label3="my";label4="h";label5="az"; unit2="m";unit3="m";unit4="m";unit5="Degrees"; elseif (style=="sxsyhxhy") nsx = max_isx - min_isx + 1 nsy = max_isy - min_isy + 1 nhx = max_ihx - min_ihx + 1 nhy = max_ihy - min_ihy + 1 nx1=nsx;nx2=nsy;nx3=nhx;nx4=nhy; ox1=osx;ox2=osy;ox3=ohx;ox4=ohy; dx1=dsx;dx2=dsy;dx3=dhx;dx4=dhy; label2="sx";label3="sy";label4="hx";label5="hy"; unit2="m";unit3="m";unit4="m";unit5="m"; elseif (style=="gxgyhxhy") ngx = max_igx - min_igx + 1 ngy = max_igy - min_igy + 1 nhx = max_ihx - min_ihx + 1 nhy = max_ihy - min_ihy + 1 nx1=ngx;nx2=ngy;nx3=nhx;nx4=nhy; ox1=ogx;ox2=ogy;ox3=ohx;ox4=ohy; dx1=dgx;dx2=dgy;dx3=dhx;dx4=dhy; label2="gx";label3="gy";label4="hx";label5="hy"; unit2="m";unit3="m";unit4="m";unit5="m"; elseif (style=="sxsyhaz") nsx = max_isx - min_isx + 1 nsy = max_isy - min_isy + 1 nh = max_ih - min_ih + 1 naz = max_iaz - min_iaz + 1 nx1=nsx;nx2=nsy;nx3=nh;nx4=naz; ox1=osx;ox2=osy;ox3=oh;ox4=oaz; dx1=dsx;dx2=dsy;dx3=dh;dx4=daz; label2="sx";label3="sy";label4="hx";label5="az"; unit2="m";unit3="m";unit4="m";unit5="Degrees"; elseif (style=="gxgyhaz") ngx = max_igx - min_igx + 1 ngy = max_igy - min_igy + 1 nh = max_ih - min_ih + 1 naz = max_iaz - min_iaz + 1 nx1=ngx;nx2=ngy;nx3=nh;nx4=naz; ox1=ogx;ox2=ogy;ox3=oh;ox4=oaz; dx1=dgx;dx2=dgy;dx3=dh;dx4=daz; label2="gx";label3="gy";label4="h";label5="az"; unit2="m";unit3="m";unit4="m";unit5="Degrees"; else error("style not recognized.") end nx_out = nx1*nx2*nx3*nx4 stream_in = open(ParseHeaderName(in)) nx_in = round(Int,filesize(stream_in)/(4*length(fieldnames(Header)))) seek(stream_in, header_count["n1"]) nt = read(stream_in,Int32) println("nt= ",nt) println("The final binned cube will have an approximate size of ", round(nx1*nx2*nx3*nx4*nt*4*1e-9,digits=3)," Gb") seek(stream_in, header_count["o1"]) o1 = read(stream_in,Float32) seek(stream_in, header_count["d1"]) dt = read(stream_in,Float32) close(stream_in) extent = Extent(convert(Int32,nt),convert(Int32,nx1),convert(Int32,nx2),convert(Int32,nx3),convert(Int32,nx4), convert(Float32,o1),convert(Float32,ox1),convert(Float32,ox2),convert(Float32,ox3),convert(Float32,ox4), convert(Float32,dt),convert(Float32,dx1),convert(Float32,dx2),convert(Float32,dx3),convert(Float32,dx4), "Time",label5,label4,label3,label2, "s",unit5,unit4,unit3,unit2, "") DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_d = join([DATAPATH out "@data@"]) filename_h = join([DATAPATH out "@headers@"]) #writes extent file WriteTextHeader(out,extent,"native_float", 4,filename_d,filename_h) h = Array{Header}(undef,1) h[1] = InitSeisHeader() stream_out = open(filename_h,"a+") j = 1 if (style=="sxsygxgy") for ix4 = 1 : nx4 for ix3 = 1 : nx3 for ix2 = 1 : nx2 for ix1 = 1 : nx1 h[1].tracenum = convert(typeof(h[1].tracenum),j) h[1].o1 = convert(typeof(h[1].o1),o1) h[1].n1 = convert(typeof(h[1].n1),nt) h[1].d1 = convert(typeof(h[1].d1),dt) h[1].isx = convert(typeof(h[1].isx),ix1 - 1 + min_isx) h[1].isy = convert(typeof(h[1].isy),ix2 - 1 + min_isy) h[1].igx = convert(typeof(h[1].igx),ix3 - 1 + min_igx) h[1].igy = convert(typeof(h[1].igy),ix4 - 1 + min_igy) sx_rot = convert(Float32,(ix1 - 1 + min_isx)*dsx + osx); sy_rot = convert(Float32,(ix2 - 1 + min_isy)*dsy + osy); gx_rot = convert(Float32,(ix3 - 1 + min_igx)*dgx + ogx); gy_rot = convert(Float32,(ix4 - 1 + min_igy)*dgy + ogy); h[1].sx = (sx_rot-osx)*cos(ang2) + (sy_rot-osy)*sin(ang2) + osx; h[1].sy = -(sx_rot-osx)*sin(ang2) + (sy_rot-osy)*cos(ang2) + osy; h[1].gx = (gx_rot-ogx)*cos(ang2) + (gy_rot-ogy)*sin(ang2) + ogx; h[1].gy = -(gx_rot-ogx)*sin(ang2) + (gy_rot-ogy)*cos(ang2) + ogy; h[1].hx = h[1].gx - h[1].sx h[1].hy = h[1].gy - h[1].sy h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = r2d*atan((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].mx = h[1].sx + h[1].hx/(1 + gammainv); h[1].my = h[1].sy + h[1].hy/(1 + gammainv); mx_rot = (h[1].mx-omx)*cos(ang2) - (h[1].my-omy)*sin(ang2) + omx; my_rot = (h[1].mx-omx)*sin(ang2) + (h[1].my-omy)*cos(ang2) + omy; hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx; hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy; h[1].imx = convert(Int32,round((mx_rot-omx)/dmx)) h[1].imy = convert(Int32,round((my_rot-omy)/dmy)) h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz))<naz ? convert(Int32,round((h[1].az-oaz)/daz)) : 0 h[1].selev = convert(typeof(h[1].selev),0) h[1].gelev = convert(typeof(h[1].gelev),0) h[1].trid = convert(typeof(h[1].trid),0) PutHeader(stream_out,h[1],j) j += 1 end end end end h_in=SeisReadHeaders(in) hout = SeisReadHeaders(out) for k = 1 : nx_in itrace = (h_in[k].igy - min_igy)*nx3*nx2*nx1 + (h_in[k].igx - min_igx)*nx2*nx1 + (h_in[k].isy - min_isy)*nx1 + h_in[k].isx - min_isx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].isx >= min_isx && h_in[k].isx <= max_isx && h_in[k].isy >= min_isy && h_in[k].isy <= max_isy && h_in[k].igx >= min_igx && h_in[k].igx <= max_igx && h_in[k].igy >= min_igy && h_in[k].igy <= max_igy) hout=GrabHeader(stream_out,itrace) hout.sx=h_in[k].sx hout.sy=h_in[k].sy hout.gx=h_in[k].gx hout.gy=h_in[k].gy hout.mx=h_in[k].mx hout.my=h_in[k].my hout.hx=h_in[k].hx hout.hy=h_in[k].hy hout.h=h_in[k].h hout.az=h_in[k].az hout.ang=h_in[k].ang PutHeader(stream_out,hout,itrace) end end end elseif (style=="mxmyhxhy") for ix4 = 1 : nx4 for ix3 = 1 : nx3 for ix2 = 1 : nx2 for ix1 = 1 : nx1 h[1].tracenum = convert(typeof(h[1].tracenum),j) h[1].o1 = convert(typeof(h[1].o1),o1) h[1].n1 = convert(typeof(h[1].n1),nt) h[1].d1 = convert(typeof(h[1].d1),dt) h[1].imx = convert(typeof(h[1].imx),ix1 - 1 + min_imx) h[1].imy = convert(typeof(h[1].imy),ix2 - 1 + min_imy) h[1].ihx = convert(typeof(h[1].ihx),ix3 - 1 + min_ihx) h[1].ihy = convert(typeof(h[1].ihy),ix4 - 1 + min_ihy) mx_rot = convert(Float32,(ix1 - 1 + min_imx)*dmx + omx); my_rot = convert(Float32,(ix2 - 1 + min_imy)*dmy + omy); hx_rot = convert(Float32,(ix3 - 1 + min_ihx)*dhx + ohx); hy_rot = convert(Float32,(ix4 - 1 + min_ihy)*dhy + ohy); h[1].mx = (mx_rot-omx)*cos(ang2) + (my_rot-omy)*sin(ang2) + omx; h[1].my = -(mx_rot-omx)*sin(ang2) + (my_rot-omy)*cos(ang2) + omy; h[1].hx = (hx_rot-ohx)*cos(ang2) + (hy_rot-ohy)*sin(ang2) + ohx; h[1].hy = -(hx_rot-ohx)*sin(ang2) + (hy_rot-ohy)*cos(ang2) + ohy; h[1].sx = h[1].mx - h[1].hx/(1 + gammainv); h[1].sy = h[1].my - h[1].hy/(1 + gammainv); h[1].gx = h[1].mx + h[1].hx*(1-(1/(1 + gammainv))); h[1].gy = h[1].my + h[1].hy*(1-(1/(1 + gammainv))); sx_rot = (h[1].sx-osx)*cos(ang2) - (h[1].sy-osy)*sin(ang2) + osx; sy_rot = (h[1].sx-osx)*sin(ang2) + (h[1].sy-osy)*cos(ang2) + osy; gx_rot = (h[1].gx-ogx)*cos(ang2) - (h[1].gy-ogy)*sin(ang2) + ogx; gy_rot = (h[1].gx-ogx)*sin(ang2) + (h[1].gy-ogy)*cos(ang2) + ogy; h[1].isx = convert(Int32,round((sx_rot-osx)/dsx)) h[1].isy = convert(Int32,round((sy_rot-osy)/dsy)) h[1].igx = convert(Int32,round((gx_rot-ogx)/dgx)) h[1].igy = convert(Int32,round((gy_rot-ogy)/dgy)) h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = r2d*atan((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz))<naz ? convert(Int32,round((h[1].az-oaz)/daz)) : 0 h[1].selev = convert(typeof(h[1].selev),0) h[1].gelev = convert(typeof(h[1].gelev),0) h[1].trid = convert(typeof(h[1].trid),0) PutHeader(stream_out,h[1],j) j += 1 end end end end h_in=SeisReadHeaders(in) hout = SeisReadHeaders(out) for k = 1 : nx_in itrace = (h_in[k].ihy - min_ihy)*nx3*nx2*nx1 + (h_in[k].ihx - min_ihx)*nx2*nx1 + (h_in[k].imy - min_imy)*nx1 + h_in[k].imx - min_imx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].imx >= min_imx && h_in[k].imx <= max_imx && h_in[k].imy >= min_imy && h_in[k].imy <= max_imy && h_in[k].ihx >= min_ihx && h_in[k].ihx <= max_ihx && h_in[k].ihy >= min_ihy && h_in[k].ihy <= max_ihy) hout=GrabHeader(stream_out,itrace) hout.sx=h_in[k].sx hout.sy=h_in[k].sy hout.gx=h_in[k].gx hout.gy=h_in[k].gy hout.mx=h_in[k].mx hout.my=h_in[k].my hout.hx=h_in[k].hx hout.hy=h_in[k].hy hout.h=h_in[k].h hout.az=h_in[k].az hout.ang=h_in[k].ang PutHeader(stream_out,hout,itrace) end end end elseif (style=="mxmyhaz") for ix4 = 1 : nx4 for ix3 = 1 : nx3 for ix2 = 1 : nx2 for ix1 = 1 : nx1 h[1].tracenum = convert(typeof(h[1].tracenum),j) h[1].o1 = convert(typeof(h[1].o1),o1) h[1].n1 = convert(typeof(h[1].n1),nt) h[1].d1 = convert(typeof(h[1].d1),dt) h[1].imx = convert(typeof(h[1].imx),ix1 - 1 + min_imx) h[1].imy = convert(typeof(h[1].imy),ix2 - 1 + min_imy) h[1].ih = convert(typeof(h[1].ih), ix3 - 1 + min_ih) h[1].iaz = convert(typeof(h[1].iaz),ix4 - 1 + min_iaz) mx_rot = convert(Float32,(ix1 - 1 + min_imx)*dmx + omx); my_rot = convert(Float32,(ix2 - 1 + min_imy)*dmy + omy); h[1].mx = (mx_rot-omx)*cos(ang2) + (my_rot-omy)*sin(ang2) + omx; h[1].my = -(mx_rot-omx)*sin(ang2) + (my_rot-omy)*cos(ang2) + omy; h[1].h = convert(Float32,(ix3 - 1 + min_ih)*dh + oh); h[1].az = r2d*atan((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az <= 90) h[1].hx = h[1].h*cos(d2r*h[1].az); h[1].hy = h[1].h*sin(d2r*h[1].az); elseif (h[1].az > 90 && h[1].az <= 180) h[1].hx =-h[1].h*cos(pi-(d2r*h[1].az)); h[1].hy = h[1].h*sin(pi-(d2r*h[1].az)); elseif (h[1].az > 180 && h[1].az <= 270) h[1].hx =-h[1].h*cos((d2r*h[1].az)-pi); h[1].hy =-h[1].h*sin((d2r*h[1].az)-pi); else h[1].hx = h[1].h*cos(2*pi-(d2r*h[1].az)); h[1].hy =-h[1].h*sin(2*pi-(d2r*h[1].az)); end h[1].sx = h[1].mx - h[1].hx/(1 + gammainv); h[1].sy = h[1].my - h[1].hy/(1 + gammainv); h[1].gx = h[1].mx + h[1].hx*(1-(1/(1 + gammainv))); h[1].gy = h[1].my + h[1].hy*(1-(1/(1 + gammainv))); sx_rot = (h[1].sx-osx)*cos(ang2) - (h[1].sy-osy)*sin(ang2) + osx; sy_rot = (h[1].sx-osx)*sin(ang2) + (h[1].sy-osy)*cos(ang2) + osy; gx_rot = (h[1].gx-ogx)*cos(ang2) - (h[1].gy-ogy)*sin(ang2) + ogx; gy_rot = (h[1].gx-ogx)*sin(ang2) + (h[1].gy-ogy)*cos(ang2) + ogy; h[1].isx = convert(Int32,round((sx_rot-osx)/dsx)) h[1].isy = convert(Int32,round((sy_rot-osy)/dsy)) h[1].igx = convert(Int32,round((gx_rot-ogx)/dgx)) h[1].igy = convert(Int32,round((gy_rot-ogy)/dgy)) hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx; hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy; h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h[1].selev = convert(typeof(h[1].selev),0) h[1].gelev = convert(typeof(h[1].gelev),0) h[1].trid = convert(typeof(h[1].trid),0) PutHeader(stream_out,h[1],j) j += 1 end end end end h_in=SeisReadHeaders(in) hout = SeisReadHeaders(out) for k = 1 : nx_in itrace = (h_in[k].iaz - min_iaz)*nx3*nx2*nx1 + (h_in[k].ih - min_ih)*nx2*nx1 + (h_in[k].imy - min_imy)*nx1 + h_in[k].imx - min_imx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].imx >= min_imx && h_in[k].imx <= max_imx && h_in[k].imy >= min_imy && h_in[k].imy <= max_imy && h_in[k].ih >= min_ih && h_in[k].ih <= max_ih && h_in[k].iaz >= min_iaz && h_in[k].iaz <= max_iaz) hout[itrace].sx=h_in[k].sx hout[itrace].sy=h_in[k].sy hout[itrace].gx=h_in[k].gx hout[itrace].gy=h_in[k].gy hout[itrace].mx=h_in[k].mx hout[itrace].my=h_in[k].my hout[itrace].hx=h_in[k].hx hout[itrace].hy=h_in[k].hy hout[itrace].h=h_in[k].h hout[itrace].az=h_in[k].az hout[itrace].ang=h_in[k].ang PutHeader(stream_out,hout[itrace],itrace) end end end elseif (style=="sxsyhxhy") for ix4 = 1 : nx4 for ix3 = 1 : nx3 for ix2 = 1 : nx2 for ix1 = 1 : nx1 h[1].tracenum = convert(typeof(h[1].tracenum),j) h[1].o1 = convert(typeof(h[1].o1),o1) h[1].n1 = convert(typeof(h[1].n1),nt) h[1].d1 = convert(typeof(h[1].d1),dt) h[1].isx = convert(typeof(h[1].isx),ix1 - 1 + min_isx) h[1].isy = convert(typeof(h[1].isy),ix2 - 1 + min_isy) h[1].ihx = convert(typeof(h[1].ihx),ix3 - 1 + min_ihx) h[1].ihy = convert(typeof(h[1].ihy),ix4 - 1 + min_ihy) sx_rot = convert(Float32,(ix1 - 1 + min_isx)*dsx + osx); sy_rot = convert(Float32,(ix2 - 1 + min_isy)*dsy + osy); hx_rot = convert(Float32,(ix3 - 1 + min_ihx)*dhx + ohx); hy_rot = convert(Float32,(ix4 - 1 + min_ihy)*dhy + ohy); h[1].sx = (sx_rot-osx)*cos(ang2) + (sy_rot-osy)*sin(ang2) + osx; h[1].sy = -(sx_rot-osx)*sin(ang2) + (sy_rot-osy)*cos(ang2) + osy; h[1].hx = (hx_rot-ohx)*cos(ang2) + (hy_rot-ohy)*sin(ang2) + ohx; h[1].hy = -(hx_rot-ohx)*sin(ang2) + (hy_rot-ohy)*cos(ang2) + ohy; h[1].gx = h[1].sx + h[1].hx; h[1].gy = h[1].sy + h[1].hy; h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = r2d*atan((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].mx = h[1].sx + h[1].hx/(1 + gammainv); h[1].my = h[1].sy + h[1].hy/(1 + gammainv); mx_rot = (h[1].mx-omx)*cos(ang2) - (h[1].my-omy)*sin(ang2) + omx; my_rot = (h[1].mx-omx)*sin(ang2) + (h[1].my-omy)*cos(ang2) + omy; hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx; hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy; h[1].imx = convert(Int32,round((mx_rot-omx)/dmx)) h[1].imy = convert(Int32,round((my_rot-omy)/dmy)) h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz))<naz ? convert(Int32,round((h[1].az-oaz)/daz)) : 0 h[1].selev = convert(typeof(h[1].selev),0) h[1].gelev = convert(typeof(h[1].gelev),0) h[1].trid = convert(typeof(h[1].trid),0) PutHeader(stream_out,h[1],j) j += 1 end end end end h_in=SeisReadHeaders(in) hout = SeisReadHeaders(out) for k = 1 : nx_in itrace = (h_in[k].ihy - min_ihy)*nx3*nx2*nx1 + (h_in[k].ihx - min_ihx)*nx2*nx1 + (h_in[k].isy - min_isy)*nx1 + h_in[k].isx - min_isx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].isx >= min_isx && h_in[k].isx <= max_isx && h_in[k].isy >= min_isy && h_in[k].isy <= max_isy && h_in[k].ihx >= min_ihx && h_in[k].ihx <= max_ihx && h_in[k].ihy >= min_ihy && h_in[k].ihy <= max_ihy) hout[itrace].sx=h_in[k].sx hout[itrace].sy=h_in[k].sy hout[itrace].gx=h_in[k].gx hout[itrace].gy=h_in[k].gy hout[itrace].mx=h_in[k].mx hout[itrace].my=h_in[k].my hout[itrace].hx=h_in[k].hx hout[itrace].hy=h_in[k].hy hout[itrace].h=h_in[k].h hout[itrace].az=h_in[k].az hout[itrace].ang=h_in[k].ang PutHeader(stream_out,hout[itrace],itrace) end end end elseif (style=="gxgyhxhy") for ix4 = 1 : nx4 for ix3 = 1 : nx3 for ix2 = 1 : nx2 for ix1 = 1 : nx1 h[1].tracenum = convert(typeof(h[1].tracenum),j) h[1].o1 = convert(typeof(h[1].o1),o1) h[1].n1 = convert(typeof(h[1].n1),nt) h[1].d1 = convert(typeof(h[1].d1),dt) h[1].igx = convert(typeof(h[1].igx),ix1 - 1 + min_igx) h[1].igy = convert(typeof(h[1].igy),ix2 - 1 + min_igy) h[1].ihx = convert(typeof(h[1].ihx),ix3 - 1 + min_ihx) h[1].ihy = convert(typeof(h[1].ihy),ix4 - 1 + min_ihy) gx_rot = convert(Float32,(ix1 - 1 + min_igx)*dgx + ogx); gy_rot = convert(Float32,(ix2 - 1 + min_igy)*dgy + ogy); hx_rot = convert(Float32,(ix3 - 1 + min_ihx)*dhx + ohx); hy_rot = convert(Float32,(ix4 - 1 + min_ihy)*dhy + ohy); h[1].gx = (gx_rot-ogx)*cos(ang2) + (gy_rot-ogy)*sin(ang2) + ogx; h[1].gy = -(gx_rot-ogx)*sin(ang2) + (gy_rot-ogy)*cos(ang2) + ogy; h[1].hx = (hx_rot-ohx)*cos(ang2) + (hy_rot-ohy)*sin(ang2) + ohx; h[1].hy = -(hx_rot-ohx)*sin(ang2) + (hy_rot-ohy)*cos(ang2) + ohy; h[1].sx = h[1].gx - h[1].hx; h[1].sy = h[1].gy - h[1].hy; h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = r2d*atan((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].mx = h[1].sx + h[1].hx/(1 + gammainv); h[1].my = h[1].sy + h[1].hy/(1 + gammainv); mx_rot = (h[1].mx-omx)*cos(ang2) - (h[1].my-omy)*sin(ang2) + omx; my_rot = (h[1].mx-omx)*sin(ang2) + (h[1].my-omy)*cos(ang2) + omy; hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx; hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy; h[1].imx = convert(Int32,round((mx_rot-omx)/dmx)) h[1].imy = convert(Int32,round((my_rot-omy)/dmy)) h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz))<naz ? convert(Int32,round((h[1].az-oaz)/daz)) : 0 h[1].selev = convert(typeof(h[1].selev),0) h[1].gelev = convert(typeof(h[1].gelev),0) h[1].trid = convert(typeof(h[1].trid),0) PutHeader(stream_out,h[1],j) j += 1 end end end end h_in=SeisReadHeaders(in) hout = SeisReadHeaders(out) for k = 1 : nx_in itrace = (h_in[k].ihy - min_ihy)*nx3*nx2*nx1 + (h_in[k].ihx - min_ihx)*nx2*nx1 + (h_in[k].igy - min_igy)*nx1 + h_in[k].igx - min_igx + 1 if (itrace > 0 && itrace <= nx_out) if (h_in[k].igx >= min_igx && h_in[k].igx <= max_igx && h_in[k].igy >= min_igy && h_in[k].igy <= max_igy && h_in[k].ihx >= min_ihx && h_in[k].ihx <= max_ihx && h_in[k].ihy >= min_ihy && h_in[k].ihy <= max_ihy) hout[itrace].sx=h_in[k].sx hout[itrace].sy=h_in[k].sy hout[itrace].gx=h_in[k].gx hout[itrace].gy=h_in[k].gy hout[itrace].mx=h_in[k].mx hout[itrace].my=h_in[k].my hout[itrace].hx=h_in[k].hx hout[itrace].hy=h_in[k].hy hout[itrace].h=h_in[k].h hout[itrace].az=h_in[k].az hout[itrace].ang=h_in[k].ang PutHeader(stream_out,hout[itrace],itrace) end end end elseif (style=="sxsyhaz") error("sxsyhaz not developed yet.") elseif (style=="gxgyhaz") error("gxgyhaz not developed yet.") end close(stream_out) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
2960
""" SeisGeometry(in;<keyword arguments>) Update headers with geometry information. Offsets and azimuths are calculated from source and receivers coordinates. # Arguments - `in`: input filename # Keyword arguments - `ang=90`: inline direction measured in degrees CC from East - `gamma=1`: vp/vs ratio for PS Asymptotic Conversion Point gathers (use gamma=1 for PP data) - `osx=0`,`osy=0`,`ogx=0`,`ogy=0` : origin for source and receiver coordinate system - `omx=0`,`omy=0`,`ohx=0`,`ohy=0`: origin for midpoint and offset coordinate system - `oaz=0`,`oh=0` : origin for azimuth and offset coordinate system - `dsx=1`,`dsy=1`,`dgx=1`,`dgy=1`: source and receiver step-size - `dmx=1`,`dmy=1`,`dhx=1`,`dhy=1`: midpoint and offset step-size - `dh=1`,`daz=1`: offset and azimuth step-size # Outputs the @headers@ file is updated with the following information: * hx,hy,h,az,mx,my : calculated offset, azimuth and midpoint * isx,isy,igx,igy,imx,imy,ihx,ihy,ih,iaz: calculated grid nodes for source and receiver position and midpoint, offset and azimuth. *Credits: A. Stanton, 2017* """ function SeisGeometry(in; ang=90, gamma=1, osx=0, osy=0, ogx=0, ogy=0, omx=0, omy=0, ohx=0, ohy=0, oh=0, oaz=0, dsx=1, dsy=1, dgx=1, dgy=1, dmx=1, dmy=1, dhx=1, dhy=1, dh=1, daz=1) r2d = 180/pi d2r = pi/180 gammainv = 1/gamma if (ang > 90) ang2=-d2r*(ang-90) else ang2=d2r*(90-ang) end filename = ParseHeaderName(in) stream = open(filename,"r+") nhead = 27 nx = round(Int,filesize(stream)/(4*length(fieldnames(Header)))) naz=convert(Int32,360/daz) for j=1:nx h = GrabHeader(stream,j) h.hx = h.gx - h.sx h.hy = h.gy - h.sy h.h = sqrt((h.hx^2) + (h.hy^2)) h.az = r2d*atan((h.gy-h.sy),(h.gx-h.sx)) if (h.az < 0) h.az += 360.0 end h.mx = h.sx + h.hx/(1 + gammainv); h.my = h.sy + h.hy/(1 + gammainv); sx_rot = (h.sx-osx)*cos(ang2) - (h.sy-osy)*sin(ang2) + osx; sy_rot = (h.sx-osx)*sin(ang2) + (h.sy-osy)*cos(ang2) + osy; gx_rot = (h.gx-ogx)*cos(ang2) - (h.gy-ogy)*sin(ang2) + ogx; gy_rot = (h.gx-ogx)*sin(ang2) + (h.gy-ogy)*cos(ang2) + ogy; mx_rot = (h.mx-omx)*cos(ang2) - (h.my-omy)*sin(ang2) + omx; my_rot = (h.mx-omx)*sin(ang2) + (h.my-omy)*cos(ang2) + omy; hx_rot = (h.hx-ohx)*cos(ang2) - (h.hy-ohy)*sin(ang2) + ohx; hy_rot = (h.hx-ohx)*sin(ang2) + (h.hy-ohy)*cos(ang2) + ohy; h.isx = convert(Int32,round((sx_rot-osx)/dsx)) h.isy = convert(Int32,round((sy_rot-osy)/dsy)) h.igx = convert(Int32,round((gx_rot-ogx)/dgx)) h.igy = convert(Int32,round((gy_rot-ogy)/dgy)) h.imx = convert(Int32,round((mx_rot-omx)/dmx)) h.imy = convert(Int32,round((my_rot-omy)/dmy)) h.ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h.ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h.ih = convert(Int32,round((h.h-oh)/dh)) h.iaz = convert(Int32,round((h.az-oaz)/daz))<naz ? convert(Int32,round((h.az-oaz)/daz)) : 0 PutHeader(stream,h,j) end close(stream) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
6638
""" SeisPatch(in,out;<keyword arguments>) Creates overlapping 5d patches from a 5d volume # Arguments - `in::String`: input filename (data should have grid information in headers) - `out::String`: prefix for output filenames # Keyword arguments - `style="sxsygxgy"`: bin style. Options: "mxmyhxhy","mxmyhaz","sxsyhxhy","gxgyhxhy","sxsyhaz","gxgyhaz" - `min_isx=0`,`max_isx=0`,`min_isy=0`,`max_isy=0`: grid extreme values for sources - `min_igx=0`,`max_igx=0`,`min_igy=0`,`max_igy=0`: grid extreme values for receivers - `min_imx=0`,`max_imx=0`,`min_imy=0`,`max_imy=0`: grid extreme values for midpoints - `min_ihx=0`,`max_ihx=0`,`min_ihy=0`,`max_ihy=0`: grid extreme values for offsets - `min_ih=0`,`max_ih=0`,`min_iaz=0`,`max_iaz=0`: grid extreme values for azimuth and offset - `it_WL=9e9`,`it_WO=0` : length and overlapping samples in time patches - `ix1_WL=9e9`,`ix1_WO=0`:length and overlapping samples in first space dimension - `ix2_WL=9e9`,`ix2_WO=0`,`ix3_WL=9e9`,`ix3_WO=0`,`ix4_WL=9e9`,`ix4_WO=0` # Output `filename,npatch`: String Array with the file name of the data patches, number of patches created *Credits: A. Stanton* """ function SeisPatch(in::String, out::String; style="sxsygxgy", min_isx=0, max_isx=0, min_isy=0, max_isy=0, min_igx=0, max_igx=0, min_igy=0, max_igy=0, min_imx=0, max_imx=0, min_imy=0, max_imy=0, min_ihx=0, max_ihx=0, min_ihy=0, max_ihy=0, min_ih=0, max_ih=0, min_iaz=0, max_iaz=0, it_WL=9e9, it_WO=0, ix1_WL=9e9, ix1_WO=0, ix2_WL=9e9, ix2_WO=0, ix3_WL=9e9, ix3_WO=0, ix4_WL=9e9, ix4_WO=0) if (style == "sxsygxgy") key = ["t","isx","isy","igx","igy"] min_ix1 = min_isx max_ix1 = max_isx min_ix2 = min_isy max_ix2 = max_isy min_ix3 = min_igx max_ix3 = max_igx min_ix4 = min_igy max_ix4 = max_igy elseif (style=="mxmyhxhy") key = ["t","imx","imy","ihx","ihy"] min_ix1 = min_imx max_ix1 = max_imx min_ix2 = min_imy max_ix2 = max_imy min_ix3 = min_ihx max_ix3 = max_ihx min_ix4 = min_ihy max_ix4 = max_ihy elseif (style=="mxmyhaz") key = ["t","imx","imy","ih","iaz"] min_ix1 = min_imx max_ix1 = max_imx min_ix2 = min_imy max_ix2 = max_imy min_ix3 = min_ih max_ix3 = max_ih min_ix4 = min_iaz max_ix4 = max_iaz elseif (style=="sxsyhxhy") key = ["t","isx","isy","ihx","ihy"] min_ix1 = min_isx max_ix1 = max_isx min_ix2 = min_isy max_ix2 = max_isy min_ix3 = min_ihx max_ix3 = max_ihx min_ix4 = min_ihy max_ix4 = max_ihy elseif (style=="gxgyhxhy") key = ["t","igx","igy","ihx","ihy"] min_ix1 = min_igx max_ix1 = max_igx min_ix2 = min_igy max_ix2 = max_igy min_ix3 = min_ihx max_ix3 = max_ihx min_ix4 = min_ihy max_ix4 = max_ihy elseif (style=="sxsyhaz") key = ["t","isx","isy","ih","iaz"] min_ix1 = min_isx max_ix1 = max_isx min_ix2 = min_isy max_ix2 = max_isy min_ix3 = min_ih max_ix3 = max_ih min_ix4 = min_iaz max_ix4 = max_iaz elseif (style=="gxgyhaz") key = ["t","igx","igy","ih","iaz"] min_ix1 = min_igx max_ix1 = max_igx min_ix2 = min_igy max_ix2 = max_igy min_ix3 = min_ih max_ix3 = max_ih min_ix4 = min_iaz max_ix4 = max_iaz else error("style not defined.") end nx1 = max_ix1 - min_ix1 + 1 nx2 = max_ix2 - min_ix2 + 1 nx3 = max_ix3 - min_ix3 + 1 nx4 = max_ix4 - min_ix4 + 1 filename_data = ParseDataName(in) filename_headers = ParseHeaderName(in) extent = ReadTextHeader(in) nt = extent.n1 dt = extent.d1 ot = extent.o1 it_WL = it_WL > nt ? nt : it_WL ix1_WL = ix1_WL > nx1 ? nx1 : ix1_WL ix2_WL = ix2_WL > nx2 ? nx2 : ix2_WL ix3_WL = ix3_WL > nx3 ? nx3 : ix3_WL ix4_WL = ix4_WL > nx4 ? nx4 : ix4_WL tmax = ot + dt*nt it_NW = Int(floor(nt/(it_WL-it_WO))) ix1_NW = Int(floor(nx1/(ix1_WL-ix1_WO))) ix2_NW = Int(floor(nx2/(ix2_WL-ix2_WO))) ix3_NW = Int(floor(nx3/(ix3_WL-ix3_WO))) ix4_NW = Int(floor(nx4/(ix4_WL-ix4_WO))) if (ot + dt*(it_NW-1)*(it_WL-it_WO) + dt*it_WL < tmax) it_NW += 1 end if (min_ix1 + (ix1_NW-1)*(ix1_WL-ix1_WO) + ix1_WL < max_ix1) ix1_NW += 1 end if (min_ix2 + (ix2_NW-1)*(ix2_WL-ix2_WO) + ix2_WL < max_ix2) ix2_NW += 1 end if (min_ix3 + (ix3_NW-1)*(ix3_WL-ix3_WO) + ix3_WL < max_ix3) ix3_NW += 1 end if (min_ix4 + (ix4_NW-1)*(ix4_WL-ix4_WO) + ix4_WL < max_ix4) ix4_NW += 1 end println("Patches in each dimension ",it_NW," ",ix1_NW," ",ix2_NW," ",ix3_NW," ",ix4_NW) #split cube into multipatches npatch=0 NW=it_NW*ix1_NW*ix2_NW*ix3_NW*ix4_NW println("Number of patches=",NW) patch_list = Patch[] patch_names = String[] nmbr = 1 for it_W = 1 : it_NW mint = ot + dt*(it_W-1)*(it_WL-it_WO) maxt = mint + dt*(it_WL - 1) if (maxt >= tmax) maxt = tmax end for ix1_W = 1 : ix1_NW minx1 = min_ix1 + (ix1_W-1)*(ix1_WL-ix1_WO) maxx1 = minx1 + ix1_WL - 1 if (maxx1 >= max_ix1) maxx1 = max_ix1 end for ix2_W = 1 : ix2_NW minx2 = min_ix2 + (ix2_W-1)*(ix2_WL-ix2_WO) maxx2 = minx2 + ix2_WL - 1 if (maxx2 >= max_ix2) maxx2 = max_ix2 end for ix3_W = 1 : ix3_NW minx3 = min_ix3 + (ix3_W-1)*(ix3_WL-ix3_WO) maxx3 = minx3 + ix3_WL - 1 if (maxx3 >= max_ix3) maxx3 = max_ix3 end for ix4_W = 1 : ix4_NW minx4 = min_ix4 + (ix4_W-1)*(ix4_WL-ix4_WO) maxx4 = minx4 + ix4_WL - 1 if (maxx4 >= max_ix4) maxx4 = max_ix4 end patch_name = join([out "_" nmbr]) minval=[mint minx1 minx2 minx3 minx4] maxval=[maxt maxx1 maxx2 maxx3 maxx4] patch_names = push!(patch_names,patch_name) npatch += 1 nmbr +=1 it_nt=it_WL push!(patch_list, Patch(in, patch_name, key, mint, maxt,minx1, maxx1, minx2, maxx2,minx3, maxx3, minx4, maxx4,it_nt)) end end end end end pmap(grab_patch,patch_list) return patch_names,npatch end #******************************************************************************* mutable struct Patch in name key mint maxt minx1 maxx1 minx2 maxx2 minx3 maxx3 minx4 maxx4 it_nt end #******************************************************************************* function grab_patch(patch) minval=[patch.mint patch.minx1 patch.minx2 patch.minx3 patch.minx4] maxval=[patch.maxt patch.maxx1 patch.maxx2 patch.maxx3 patch.maxx4] SeisWindowPatch(patch.in,patch.name,key=patch.key,minval=minval,maxval=maxval,it_nt=patch.it_nt) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
2023
""" SeisPatchProcess(in,out;<keyword arguments>) Read data from disk, split into multidimensional overlapping patches, apply processes, merge patches with tapers, and write to disk. Processing of patches can be done in parallel by running the code using (for example): julia -p 2 script_name.jl Important Notice: You must make declare global variables f and f_param on every processor. You can do this in your main function by typing (for example): @everywhere global f_param = ["style"=>"mxmyhxhy", "Niter"=> 100, "alpha"=>1,"fmax"=>80] @everywhere global f = [SeisPOCS] f is an array of functions that have the following syntax: d2, h2 = f(d1,h1,f_param), where param is a dictionary (Dict) of parameters for the function. note that param should contain parameters for the patching and unpatching operations. to execute the code type: julia -p 4 my_code.jl where 4 can be replaced with the number of processors you wish to use. *Credits: A. Stanton* """ function SeisPatchProcess(in,out,functions,param_patch=Dict(),param_f=Dict();patch_file="patch",patch_out="patch_function",rm=1) extent = SeisMain.ReadTextHeader(in) println(extent.n1) patches,npatch = SeisPatch(in,patch_file;param_patch...) list = Vector{patch_list}(undef,npatch) patches_out = Vector{String}(undef,npatch) for i=1:npatch p_out=join([patch_out i]) list[i] = patch_list(patches[i],p_out,param_f,functions) patches_out[i] = p_out end a = pmap(MyProcess,list) SeisUnPatch(patches_out,out;nt=extent.n1,param_patch...) if rm==1 for ipatch = 1 : npatch SeisRemove(patches[ipatch]) SeisRemove(patches_out[ipatch]) end end end function MyProcess(params) f = params.functions p = params.param_f d1,h1,e1 = SeisRead(params.p_file) for ifunc = 1 : length(f) func = f[ifunc] d2 = func(d1;p[ifunc]...) d1 = copy(d2) #h1 = copy(h2) end SeisWrite(params.p_out,d1,h1,e1) return(1) end mutable struct patch_list p_file p_out param_f functions end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
3466
""" SeisProcess(in,out,operators,parameters;<keyword arguments>) Run processing flows that read and write from disk f is a function that has the following syntax: d2,h2 = f(d1,h1,e1,param), where param is list of keyword arguments for the function. Note that f can be a vector of functions. They will be executed sequentially on the same group of traces. # Arguments - `in::String`: input filename - `out::String`: output filenames - `key=[]` """ function SeisProcess(in::String,out::String,operators,parameters;key=[]) # get list of gather lengths ext = ReadTextHeader(in) nx = ext.n2*ext.n3*ext.n4*ext.n5 filename_headers = ParseHeaderName(in) stream_h = open(filename_headers) curr = zeros(length(key),1) itrace = 1 h1 = GrabHeader(stream_h,1) for ikey = 1 : length(key) curr[ikey] = getfield(h1,Symbol(key[ikey])) end prev = 1*curr L = [] for j = 1 : nx h1 = GrabHeader(stream_h,j) for ikey = 1 : length(key) curr[ikey] = getfield(h1,Symbol(key[ikey])) end if curr != prev push!(L,j - itrace) itrace = j end prev = 1*curr end push!(L,nx - itrace + 1) close(stream_h) itrace_in = 1 itrace_out = 1 for igather = 1 : length(L) d1,h1,e1 = SeisRead(in, group="some",itrace=itrace_in,ntrace=L[igather]) num_traces_in = size(d1,2) for j = 1 : length(operators) op = operators[j] d2,h2 = op(d1,h1,e1;parameters[j]...) d1 = copy(d2) h1 = copy(h2) end num_traces_out = size(d1,2) SeisWrite(out,d1,h1,e1,itrace=itrace_out) itrace_in += num_traces_in itrace_out += num_traces_out end end function SeisProcess(in::Array{String,1}, out::Array{String,1}, operators, parameters;key=[]) for j = 1 : length(in) SeisProcess(in[j],out[j],operators,parameters;key=key) end end function SeisProcess(in1::String, in2::String, out::String, operators, parameters; key=[]) # Run processing flows that read 2 inputs and write 1 output # # f is a function that has the following syntax: # d3,h3 = f(d1,d2,h1,h2,param), where # param is list of keyword arguments for the function. # note that f can be a vector of functions. # They will be executed sequentially on the same group of traces. # get list of gather lengths ext = ReadTextHeader(in1) nx = ext.n2*ext.n3*ext.n4*ext.n5 filename_headers = ParseHeaderName(in1) stream_h = open(filename_headers) curr = zeros(length(key),1) itrace = 1 h1 = GrabHeader(stream_h,1) for ikey = 1 : length(key) curr[ikey] = getfield(h1,Symbol(key[ikey])) end prev = 1*curr L = [] for j = 1 : nx h1 = GrabHeader(stream_h,j) for ikey = 1 : length(key) curr[ikey] = getfield(h1,Symbol(key[ikey])) end if curr != prev push!(L,j - itrace) itrace = j end prev = 1*curr end push!(L,nx - itrace + 1) close(stream_h) itrace_in = 1 itrace_out = 1 for igather = 1 : length(L) d1,h1,e1 = SeisRead(in1,group="some",itrace=itrace_in,ntrace=L[igather]) d2,h2,e2 = SeisRead(in2,group="some",itrace=itrace_in,ntrace=L[igather]) num_traces_in = size(d1,2) for j = 1 : length(operators) op = operators[j] d3,h3 = op(d1,d2,h1,h2;parameters[j]...) d1 = copy(d3) h1 = copy(h3) end num_traces_out = size(d1,2) SeisWrite(out,d1,h1,e1,itrace=itrace_out) itrace_in += num_traces_in itrace_out += num_traces_out end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
1094
function SeisProcessHeaders(in, out, functions, parameters; group="some", key=[], ntrace=1000000, update_tracenum=true) if (group=="all") h1 = SeisReadHeaders(in,group=group,key=key,itrace=1,ntrace=ntrace) for ifunc = 1 : length(functions) f = functions[ifunc] p = parameters[ifunc] h2 = f(h1;p...) h1 = copy(h2) end SeisWriteHeaders(out,h1) else itrace_in = 1 itrace_out = 1 NX = GetNumTraces(in) while itrace_in <= NX nx = NX - itrace_in + 1 ntrace = nx > ntrace ? ntrace : nx h1 = SeisReadHeaders(in, group=group, key=key, itrace=itrace_in, ntrace=ntrace) num_traces_in = length(h1) for ifunc = 1 : length(functions) f = functions[ifunc] p = parameters[ifunc] h2 = f(h1;p...) h1 = copy(h2) end num_traces_out = length(h1) SeisWriteHeaders(out, h1, itrace=itrace_out, update_tracenum=update_tracenum) itrace_in += num_traces_in itrace_out += num_traces_out end end end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
2807
""" SeisSort(in, out;<keyword arguments>) Sort a seis file using its header words # Arguments - `in`: input filename >> a text file with information about data extent, data and header file names; a binary file containing data and a binary file containing headers. - `out`: output filename # Keyword arguments - `key=["imx","imy"]` - `rev=false` : sort headers in decreasing order - `ntrace=1000` : number of traces to read at a time # Output file `out` is created with data sorted. *Credits: AS, 2015* """ function SeisSort(in, out;key=["imx","imy"],rev=false,ntrace=100000) filename_h = ParseHeaderName(in) stream_h = open(filename_h) seek(stream_h, header_count["n1"]) nt = read(stream_h,Int32) nx = convert(Int64,filesize(stream_h)/(4*length(fieldnames(Header)))) h = Header[] # find min and max for each key h1 = GrabHeader(stream_h,1) minval = Array{Float32}(undef,length(key)) for ikey=1:length(key) minval[ikey] = getfield(h1,Symbol(key[ikey])) end for j=2:nx h1 = GrabHeader(stream_h,j) for ikey=1:length(key) key_val = abs(getfield(h1,Symbol(key[ikey]))) if (key_val < minval[ikey]) minval[ikey] = key_val end end end mykey = vec(zeros(Float64,nx)) seekstart(stream_h) for j=1:nx h1 = GrabHeader(stream_h,j) for ikey=1:length(key) mykey[j] += ((getfield(h1,Symbol(key[ikey])) + minval[ikey] + 1)* (10^(6*(length(key)-ikey)))) end end close(stream_h) p = convert(Array{Int32,1},sortperm(mykey,rev=rev)) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_d_out = join([DATAPATH out "@data@"]) filename_h_out = join([DATAPATH out "@headers@"]) nhead = length(fieldnames(Header)) stream_h = open(filename_h_out) nx = Int(floor(filesize(stream_h)/(nhead*4))) close(stream_h) extent = ReadTextHeader(in) extent.n2 = nx extent.n3 = 1 extent.n4 = 1 extent.n5 = 1 WriteTextHeader(out,extent,"native_float",4,filename_d_out,filename_h_out) FetchHeaders(filename_h,out,p,nx) SeisMain.FetchTraces(in,out) tmp = join(["tmp_SeisSort_",string(Int(floor(rand()*100000)))]) cp(out,tmp,force=true); SeisProcessHeaders(out, tmp, [UpdateHeader], [Dict(:itmin=>1,:itmax=>nt)]) filename_h_tmp = join([DATAPATH tmp "@headers@"]) filename_h_out = join([DATAPATH out "@headers@"]) cp(filename_h_tmp,filename_h_out,force=true); rm(filename_h_tmp); rm(tmp); end function FetchHeaders(filename_h_in::String, filename_out::String, p::Array{Int32,1}, nx) stream_h = open(filename_h_in) h = Header[] for j = 1:nx append!(h,[GrabHeader(stream_h,p[j])]) end SeisWriteHeaders(filename_out,h,update_tracenum=false) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
20902
""" SeisUnPatch(in,out;<keyword arguments>) Reconstruct a 5D data volume from a set of 5D data patches. # Arguments - `in::Array{String,1}`: array containing filename of patches - `out::String`: filename for reconstructed volume # Keyword arguments - `style="sxsygxgy"`: bin style. Options: "mxmyhxhy","mxmyhaz","sxsyhxhy","gxgyhxhy","sxsyhaz","gxgyhaz" - `min_isx=0`,`max_isx=0`,`min_isy=0`,`max_isy=0`: grid extreme values for sources - `min_igx=0`,`max_igx=0`,`min_igy=0`,`max_igy=0`: grid extreme values for receivers - `min_imx=0`,`max_imx=0`,`min_imy=0`,`max_imy=0`: grid extreme values for midpoints - `min_ihx=0`,`max_ihx=0`,`min_ihy=0`,`max_ihy=0`: grid extreme values for offsets - `min_ih=0`,`max_ih=0`,`min_iaz=0`,`max_iaz=0`: grid extreme values for azimuth and offset - `it_WL=9e9`,`it_WO=0` : length and overlapping samples in time patches - `ix1_WL=9e9`,`ix1_WO=0`:length and overlapping samples in first space dimension - `ix2_WL=9e9`,`ix2_WO=0`,`ix3_WL=9e9`,`ix3_WO=0`,`ix4_WL=9e9`,`ix4_WO=0` - `nt=0`: time samples of reconstructed cube - `ang=90`: inline direction measured in degrees CC from East - `gamma=1`: vp/vs ratio for PS Asymptotic Conversion Point gathers (use gamma=1 for PP data) - `osx=0`,`osy=0`,`ogx=0`,`ogy=0` : origin for source and receiver coordinate system - `omx=0`,`omy=0`,`ohx=0`,`ohy=0`: origin for midpoint and offset coordinate system - `oaz=0`,`oh=0` : origin for azimuth and offset coordinate system - `dsx=1`,`dsy=1`,`dgx=1`,`dgy=1`: source and receiver step-size - `dmx=1`,`dmy=1`,`dhx=1`,`dhy=1`: midpoint and offset step-size - `dh=1`,`daz=1`: offset and azimuth step-size # Output In file `out`, the 5D reconstructed volume is created. *Credits: A. Stanton, F Carozzi, 2017* """ function SeisUnPatch(patch_names::Array{String,1},out::String;style="sxsygxgy",min_isx=0,max_isx=0,min_isy=0,max_isy=0,min_igx=0,max_igx=0,min_igy=0,max_igy=0,min_imx=0,max_imx=0,min_imy=0,max_imy=0,min_ihx=0,max_ihx=0,min_ihy=0,max_ihy=0,min_ih=0,max_ih=0,min_iaz=0,max_iaz=0,it_WL=9e9,it_WO=0,ix1_WL=9e9,ix1_WO=0,ix2_WL=9e9,ix2_WO=0,ix3_WL=9e9,ix3_WO=0,ix4_WL=9e9,ix4_WO=0,nt=0,ang=90,gamma=1,osx=0,osy=0,ogx=0,ogy=0,omx=0,omy=0,ohx=0,ohy=0,oh=0,oaz=0,dsx=1,dsy=1,dgx=1,dgy=1,dmx=1,dmy=1,dhx=1,dhy=1,dh=1,daz=1) if (style == "sxsygxgy") key = ["t","isx","isy","igx","igy"] min_ix1 = min_isx max_ix1 = max_isx min_ix2 = min_isy max_ix2 = max_isy min_ix3 = min_igx max_ix3 = max_igx min_ix4 = min_igy max_ix4 = max_igy elseif (style=="mxmyhxhy") key = ["t","imx","imy","ihx","ihy"] min_ix1 = min_imx max_ix1 = max_imx min_ix2 = min_imy max_ix2 = max_imy min_ix3 = min_ihx max_ix3 = max_ihx min_ix4 = min_ihy max_ix4 = max_ihy elseif (style=="mxmyhaz") key = ["t","imx","imy","ih","iaz"] min_ix1 = min_imx max_ix1 = max_imx min_ix2 = min_imy max_ix2 = max_imy min_ix3 = min_ih max_ix3 = max_ih min_ix4 = min_iaz max_ix4 = max_iaz elseif (style=="sxsyhxhy") key = ["t","isx","isy","ihx","ihy"] min_ix1 = min_isx max_ix1 = max_isx min_ix2 = min_isy max_ix2 = max_isy min_ix3 = min_ihx max_ix3 = max_ihx min_ix4 = min_ihy max_ix4 = max_ihy elseif (style=="gxgyhxhy") key = ["t","igx","igy","ihx","ihy"] min_ix1 = min_igx max_ix1 = max_igx min_ix2 = min_igy max_ix2 = max_igy min_ix3 = min_ihx max_ix3 = max_ihx min_ix4 = min_ihy max_ix4 = max_ihy elseif (style=="sxsyhaz") key = ["t","isx","isy","ih","iaz"] min_ix1 = min_isx max_ix1 = max_isx min_ix2 = min_isy max_ix2 = max_isy min_ix3 = min_ih max_ix3 = max_ih min_ix4 = min_iaz max_ix4 = max_iaz elseif (style=="gxgyhaz") key = ["t","igx","igy","ih","iaz"] min_ix1 = min_igx max_ix1 = max_igx min_ix2 = min_igy max_ix2 = max_igy min_ix3 = min_ih max_ix3 = max_ih min_ix4 = min_iaz max_ix4 = max_iaz else error("style not defined.") end nx1 = max_ix1 - min_ix1 + 1 nx2 = max_ix2 - min_ix2 + 1 nx3 = max_ix3 - min_ix3 + 1 nx4 = max_ix4 - min_ix4 + 1 it_WL = it_WL > nt ? nt : it_WL ix1_WL = ix1_WL > nx1 ? nx1 : ix1_WL ix2_WL = ix2_WL > nx2 ? nx2 : ix2_WL ix3_WL = ix3_WL > nx3 ? nx3 : ix3_WL ix4_WL = ix4_WL > nx4 ? nx4 : ix4_WL rad2deg = 180/pi deg2rad = pi/180 gammainv = 1/gamma if (ang > 90) ang2=-deg2rad*(ang-90) else ang2=deg2rad*(90-ang) end nsx = max_isx - min_isx + 1 nsy = max_isy - min_isy + 1 ngx = max_igx - min_igx + 1 ngy = max_igy - min_igy + 1 nmx = max_imx - min_imx + 1 nmy = max_imy - min_imy + 1 nhx = max_ihx - min_ihx + 1 nhy = max_ihy - min_ihy + 1 nh = max_ih - min_ih + 1 naz = max_iaz - min_iaz + 1 if (style=="sxsygxgy") nx1=nsx; nx2=nsy; nx3=ngx; nx4=ngy elseif (style=="mxmyhxhy") nx1=nmx; nx2=nmy; nx3=nhx; nx4=nhy elseif (style=="mxmyhaz") nx1=nmx; nx2=nmy; nx3=nh; nx4=naz elseif (style=="sxsyhxhy") nx1=nsx; nx2=nsy; nx3=nhx; nx4=nhy elseif (style=="gxgyhxhy") nx1=ngx; nx2=ngy; nx3=nhx; nx4=nhy elseif (style=="sxsyhaz") nx1=nsx; nx2=nsy; nx3=nh; nx4=naz elseif (style=="gxgyhaz") nx1=ngx; nx2=ngy; nx3=nh; nx4=naz else error("style not recognized.") end filename_data = ParseDataName(patch_names[1]) filename_headers = ParseHeaderName(patch_names[1]) extent = ReadTextHeader(patch_names[1]) dt = extent.d1 ot = extent.o1 d = zeros(Float32,nt,1) h = Array{Header}(undef,1) h[1] = InitSeisHeader() extent = SeisMain.Extent(nt, max_ix1-min_ix1+1, max_ix2-min_ix2+1, max_ix3-min_ix3+1, max_ix4-min_ix4+1,convert(Float32,ot), 0, 0, 0, 0, convert(Float32,dt), 1, 1, 1, 1, "", "", "", "", "", "", "", "", "", "", "") println("creating headers") j = 1 for ix1 = 1 : nx1 for ix2 = 1 : nx2 for ix3 = 1 : nx3 for ix4 = 1 : nx4 h[1].tracenum = convert(typeof(h[1].tracenum),j) h[1].o1 = convert(typeof(h[1].o1),ot) h[1].n1 = convert(typeof(h[1].n1),nt) h[1].d1 = convert(typeof(h[1].d1),dt) if (style=="sxsygxgy") h[1].isx = convert(typeof(h[1].isx),ix1 - 1 + min_isx) h[1].isy = convert(typeof(h[1].isy),ix2 - 1 + min_isy) h[1].igx = convert(typeof(h[1].igx),ix3 - 1 + min_igx) h[1].igy = convert(typeof(h[1].igy),ix4 - 1 + min_igy) sx_rot = convert(Float32,(ix1 - 1 + min_isx)*dsx + osx) sy_rot = convert(Float32,(ix2 - 1 + min_isy)*dsy + osy) gx_rot = convert(Float32,(ix3 - 1 + min_igx)*dgx + ogx) gy_rot = convert(Float32,(ix4 - 1 + min_igy)*dgy + ogy) h[1].sx = (sx_rot-osx)*cos(ang2) + (sy_rot-osy)*sin(ang2) + osx h[1].sy = -(sx_rot-osx)*sin(ang2) + (sy_rot-osy)*cos(ang2) + osy h[1].gx = (gx_rot-ogx)*cos(ang2) + (gy_rot-ogy)*sin(ang2) + ogx h[1].gy = -(gx_rot-ogx)*sin(ang2) + (gy_rot-ogy)*cos(ang2) + ogy h[1].hx = h[1].gx - h[1].sx h[1].hy = h[1].gy - h[1].sy h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = rad2deg*atan2((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].mx = h[1].sx + h[1].hx/(1 + gammainv) h[1].my = h[1].sy + h[1].hy/(1 + gammainv) mx_rot = (h[1].mx-omx)*cos(ang2) - (h[1].my-omy)*sin(ang2) + omx my_rot = (h[1].mx-omx)*sin(ang2) + (h[1].my-omy)*cos(ang2) + omy hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy h[1].imx = convert(Int32,round((mx_rot-omx)/dmx)) h[1].imy = convert(Int32,round((my_rot-omy)/dmy)) h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz)) elseif (style=="mxmyhxhy") h[1].imx = convert(typeof(h[1].imx),ix1 - 1 + min_imx) h[1].imy = convert(typeof(h[1].imy),ix2 - 1 + min_imy) h[1].ihx = convert(typeof(h[1].ihx),ix3 - 1 + min_ihx) h[1].ihy = convert(typeof(h[1].ihy),ix4 - 1 + min_ihy) mx_rot = convert(Float32,(ix1 - 1 + min_imx)*dmx + omx) my_rot = convert(Float32,(ix2 - 1 + min_imy)*dmy + omy) hx_rot = convert(Float32,(ix3 - 1 + min_ihx)*dhx + ohx) hy_rot = convert(Float32,(ix4 - 1 + min_ihy)*dhy + ohy) h[1].mx = (mx_rot-omx)*cos(ang2) + (my_rot-omy)*sin(ang2) + omx h[1].my = -(mx_rot-omx)*sin(ang2) + (my_rot-omy)*cos(ang2) + omy h[1].hx = (hx_rot-ohx)*cos(ang2) + (hy_rot-ohy)*sin(ang2) + ohx h[1].hy = -(hx_rot-ohx)*sin(ang2) + (hy_rot-ohy)*cos(ang2) + ohy h[1].sx = h[1].mx - h[1].hx/(1 + gammainv) h[1].sy = h[1].my - h[1].hy/(1 + gammainv) h[1].gx = h[1].mx + h[1].hx*(1-(1/(1 + gammainv))) h[1].gy = h[1].my + h[1].hy*(1-(1/(1 + gammainv))) sx_rot = (h[1].sx-osx)*cos(ang2) - (h[1].sy-osy)*sin(ang2) + osx sy_rot = (h[1].sx-osx)*sin(ang2) + (h[1].sy-osy)*cos(ang2) + osy gx_rot = (h[1].gx-ogx)*cos(ang2) - (h[1].gy-ogy)*sin(ang2) + ogx gy_rot = (h[1].gx-ogx)*sin(ang2) + (h[1].gy-ogy)*cos(ang2) + ogy h[1].isx = convert(Int32,round((sx_rot-osx)/dsx)) h[1].isy = convert(Int32,round((sy_rot-osy)/dsy)) h[1].igx = convert(Int32,round((gx_rot-ogx)/dgx)) h[1].igy = convert(Int32,round((gy_rot-ogy)/dgy)) h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = rad2deg*atan2((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz)) elseif (style=="mxmyhaz") h[1].imx = convert(typeof(h[1].imx),ix1 - 1 + min_imx) h[1].imy = convert(typeof(h[1].imy),ix2 - 1 + min_imy) h[1].ih = convert(typeof(h[1].ih), ix3 - 1 + min_ih) h[1].iaz = convert(typeof(h[1].iaz),ix4 - 1 + min_iaz) mx_rot = convert(Float32,(ix1 - 1 + min_imx)*dmx + omx) my_rot = convert(Float32,(ix2 - 1 + min_imy)*dmy + omy) h[1].mx = (mx_rot-omx)*cos(ang2) + (my_rot-omy)*sin(ang2) + omx h[1].my = -(mx_rot-omx)*sin(ang2) + (my_rot-omy)*cos(ang2) + omy h[1].h = convert(Float32,(ix3 - 1 + min_ih)*dh + oh) h[1].az = convert(Float32,(ix4 - 1 + min_iaz)*daz + oaz) if (h[1].az <= 90) h[1].hx = h[1].h*cos(deg2rad*h[1].az) h[1].hy = h[1].h*sin(deg2rad*h[1].az) elseif (h[1].az > 90 && h[1].az <= 180) h[1].hx =-h[1].h*cos(pi-(deg2rad*h[1].az)) h[1].hy = h[1].h*sin(pi-(deg2rad*h[1].az)) elseif (h[1].az > 180 && h[1].az <= 270) h[1].hx =-h[1].h*cos((deg2rad*h[1].az)-pi) h[1].hy =-h[1].h*sin((deg2rad*h[1].az)-pi) else h[1].hx = h[1].h*cos(2*pi-(deg2rad*h[1].az)) h[1].hy =-h[1].h*sin(2*pi-(deg2rad*h[1].az)) end h[1].sx = h[1].mx - h[1].hx/(1 + gammainv) h[1].sy = h[1].my - h[1].hy/(1 + gammainv) h[1].gx = h[1].mx + h[1].hx*(1-(1/(1 + gammainv))) h[1].gy = h[1].my + h[1].hy*(1-(1/(1 + gammainv))) sx_rot = (h[1].sx-osx)*cos(ang2) - (h[1].sy-osy)*sin(ang2) + osx sy_rot = (h[1].sx-osx)*sin(ang2) + (h[1].sy-osy)*cos(ang2) + osy gx_rot = (h[1].gx-ogx)*cos(ang2) - (h[1].gy-ogy)*sin(ang2) + ogx gy_rot = (h[1].gx-ogx)*sin(ang2) + (h[1].gy-ogy)*cos(ang2) + ogy h[1].isx = convert(Int32,round((sx_rot-osx)/dsx)) h[1].isy = convert(Int32,round((sy_rot-osy)/dsy)) h[1].igx = convert(Int32,round((gx_rot-ogx)/dgx)) h[1].igy = convert(Int32,round((gy_rot-ogy)/dgy)) hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) elseif (style=="sxsyhxhy") h[1].isx = convert(typeof(h[1].isx),ix1 - 1 + min_isx) h[1].isy = convert(typeof(h[1].isy),ix2 - 1 + min_isy) h[1].ihx = convert(typeof(h[1].ihx),ix3 - 1 + min_ihx) h[1].ihy = convert(typeof(h[1].ihy),ix4 - 1 + min_ihy) sx_rot = convert(Float32,(ix1 - 1 + min_isx)*dsx + osx) sy_rot = convert(Float32,(ix2 - 1 + min_isy)*dsy + osy) hx_rot = convert(Float32,(ix3 - 1 + min_ihx)*dhx + ohx) hy_rot = convert(Float32,(ix4 - 1 + min_ihy)*dhy + ohy) h[1].sx = (sx_rot-osx)*cos(ang2) + (sy_rot-osy)*sin(ang2) + osx h[1].sy = -(sx_rot-osx)*sin(ang2) + (sy_rot-osy)*cos(ang2) + osy h[1].hx = (hx_rot-ohx)*cos(ang2) + (hy_rot-ohy)*sin(ang2) + ohx h[1].hy = -(hx_rot-ohx)*sin(ang2) + (hy_rot-ohy)*cos(ang2) + ohy h[1].gx = h[1].sx + h[1].hx h[1].gy = h[1].sy + h[1].hy h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = rad2deg*atan2((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].mx = h[1].sx + h[1].hx/(1 + gammainv) h[1].my = h[1].sy + h[1].hy/(1 + gammainv) mx_rot = (h[1].mx-omx)*cos(ang2) - (h[1].my-omy)*sin(ang2) + omx my_rot = (h[1].mx-omx)*sin(ang2) + (h[1].my-omy)*cos(ang2) + omy hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy h[1].imx = convert(Int32,round((mx_rot-omx)/dmx)) h[1].imy = convert(Int32,round((my_rot-omy)/dmy)) h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz)) elseif (style=="gxgyhxhy") h[1].igx = convert(typeof(h[1].igx),ix1 - 1 + min_igx) h[1].igy = convert(typeof(h[1].igy),ix2 - 1 + min_igy) h[1].ihx = convert(typeof(h[1].ihx),ix3 - 1 + min_ihx) h[1].ihy = convert(typeof(h[1].ihy),ix4 - 1 + min_ihy) gx_rot = convert(Float32,(ix1 - 1 + min_igx)*dgx + ogx) gy_rot = convert(Float32,(ix2 - 1 + min_igy)*dgy + ogy) hx_rot = convert(Float32,(ix3 - 1 + min_ihx)*dhx + ohx) hy_rot = convert(Float32,(ix4 - 1 + min_ihy)*dhy + ohy) h[1].gx = (gx_rot-ogx)*cos(ang2) + (gy_rot-ogy)*sin(ang2) + ogx h[1].gy = -(gx_rot-ogx)*sin(ang2) + (gy_rot-ogy)*cos(ang2) + ogy h[1].hx = (hx_rot-ohx)*cos(ang2) + (hy_rot-ohy)*sin(ang2) + ohx h[1].hy = -(hx_rot-ohx)*sin(ang2) + (hy_rot-ohy)*cos(ang2) + ohy h[1].sx = h[1].gx - h[1].hx h[1].sy = h[1].gy - h[1].hy h[1].h = sqrt((h[1].hx^2) + (h[1].hy^2)) h[1].az = rad2deg*atan2((h[1].gy-h[1].sy),(h[1].gx-h[1].sx)) if (h[1].az < 0) h[1].az += 360.0 end h[1].mx = h[1].sx + h[1].hx/(1 + gammainv) h[1].my = h[1].sy + h[1].hy/(1 + gammainv) mx_rot = (h[1].mx-omx)*cos(ang2) - (h[1].my-omy)*sin(ang2) + omx my_rot = (h[1].mx-omx)*sin(ang2) + (h[1].my-omy)*cos(ang2) + omy hx_rot = (h[1].hx-ohx)*cos(ang2) - (h[1].hy-ohy)*sin(ang2) + ohx hy_rot = (h[1].hx-ohx)*sin(ang2) + (h[1].hy-ohy)*cos(ang2) + ohy h[1].imx = convert(Int32,round((mx_rot-omx)/dmx)) h[1].imy = convert(Int32,round((my_rot-omy)/dmy)) h[1].ihx = convert(Int32,round((hx_rot-ohx)/dhx)) h[1].ihy = convert(Int32,round((hy_rot-ohy)/dhy)) h[1].ih = convert(Int32,round((h[1].h-oh)/dh)) h[1].iaz = convert(Int32,round((h[1].az-oaz)/daz)) elseif (style=="sxsyhaz") error("sxsyhaz not developed yet.") elseif (style=="gxgyhaz") error("gxgyhaz not developed yet.") end h[1].selev = convert(typeof(h[1].selev),0) h[1].gelev = convert(typeof(h[1].gelev),0) h[1].trid = convert(typeof(h[1].trid),0) SeisWrite(out,d,h,extent,itrace=j) j += 1 end end end end DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) out_d = join([DATAPATH out "@data@"]) out_h = join([DATAPATH out "@headers@"]) for ipatch = 1 : length(patch_names) println("ipatch= ",ipatch," ",patch_names[ipatch]) stream_d = open(out_d,"a+") stream_h = open(out_h,"a+") d_patch,h_patch,e_patch = SeisRead(patch_names[ipatch]) nx_patch=e_patch.n2*e_patch.n3*e_patch.n4*e_patch.n5 nt_patch = h_patch[1].n1 d_patch=reshape(d_patch,Int(nt_patch),Int(nx_patch)) ot_patch = h_patch[1].o1 min_it_patch = Int(floor((ot_patch-ot)/dt)+1) max_it_patch = Int(floor((ot_patch-ot)/dt)+nt_patch) min_ix1_patch = getfield(h_patch[1],Symbol(key[2])) max_ix1_patch = getfield(h_patch[nx_patch],Symbol(key[2])) min_ix2_patch = getfield(h_patch[1],Symbol(key[3])) max_ix2_patch = getfield(h_patch[nx_patch],Symbol(key[3])) min_ix3_patch = getfield(h_patch[1],Symbol(key[4])) max_ix3_patch = getfield(h_patch[nx_patch],Symbol(key[4])) min_ix4_patch = getfield(h_patch[1],Symbol(key[5])) max_ix4_patch = getfield(h_patch[nx_patch],Symbol(key[5])) tapti = min_it_patch > 1 ? it_WO : 0 taptf = max_it_patch < nt ? it_WO : 0 tapx1i = min_ix1_patch > min_ix1 ? ix1_WO : 0 tapx1f = max_ix1_patch < max_ix1 ? ix1_WO : 0 tapx2i = min_ix2_patch > min_ix2 ? ix2_WO : 0 tapx2f = max_ix2_patch < max_ix2 ? ix2_WO : 0 tapx3i = min_ix3_patch > min_ix3 ? ix3_WO : 0 tapx3f = max_ix3_patch < max_ix3 ? ix3_WO : 0 tapx4i = min_ix4_patch > min_ix4 ? ix4_WO : 0 tapx4f = max_ix4_patch < max_ix4 ? ix4_WO : 0 d_patch = Taper(d_patch, max_it_patch - min_it_patch + 1, max_ix1_patch - min_ix1_patch + 1, max_ix2_patch - min_ix2_patch + 1, max_ix3_patch - min_ix3_patch + 1, max_ix4_patch - min_ix4_patch + 1, tapti,taptf,tapx1i,tapx1f,tapx2i,tapx2f,tapx3i,tapx3f,tapx4i,tapx4f) for j = 1 : nx_patch h = h_patch[j] if (style=="sxsygxgy") itrace = (h.igy - min_igy)*nx3*nx2*nx1 + (h.igx - min_igx)*nx2*nx1 + (h.isy - min_isy)*nx1 + h.isx - min_isx + 1 elseif (style=="mxmyhxhy") itrace = (h.ihy - min_ihy)*nx3*nx2*nx1 + (h.ihx - min_ihx)*nx2*nx1 + (h.imy - min_imy)*nx1 + h.imx - min_imx + 1 elseif (style=="mxmyhaz") # itrace = (h.imx - min_imx)*nx2*nx3*nx4 + (h.imy - min_imy)*nx3*nx4 + (h.ih - min_ih)*nx4 + h.iaz - min_iaz + 1 itrace=(h.iaz-min_iaz)*nx3*nx2*nx1 + (h.ih - min_ih)*nx2*nx1 + (h.imy - min_imy)*nx1 + h.imx - min_imx+1 elseif (style=="sxsyhxhy") itrace =(h.ihy-min_ihy)*nx3*nx2*nx1 + (h.ihx - min_ihx)*nx2*nx1 + (h.isy - min_isy)*nx1 + h.isx - min_isx+1 elseif (style=="gxgyhxhy") itrace =(h.ihy-min_ihy)*nx3*nx2*nx1 + (h.ihx - min_ihx)*nx2*nx1 + (h.igy - min_igy)*nx1 + h.igx - min_igx+1 elseif (style=="sxsyhaz") itrace =(h.iaz-min_iaz)*nx3*nx2*nx1 + (h.ih - min_ih)*nx2*nx1 + (h.isy - min_isy)*nx1 + h.isx - min_isx+1 elseif (style=="gxgyhaz") itrace =(h.iaz-min_iaz)*nx3*nx2*nx1 + (h.ih - min_ih)*nx2*nx1 + (h.igy - min_igy)*nx1 + h.igx - min_igx+1 end if j==1 println(min_it_patch," ",max_it_patch," ",nt," ",nx_patch," ",ipatch," ",size(d_patch)) end h.tracenum = itrace h.n1 = nt h.o1 = ot position_d = 4*nt*(itrace - 1) seek(stream_d,position_d) d = read!(stream_d,Array{Float32}(undef,nt)) d[min_it_patch:max_it_patch] += d_patch[:,j] seek(stream_d,position_d) write(stream_d,d) PutHeader(stream_h,h,itrace) end close(stream_d) close(stream_h) end end function Taper(d,nt,nx1,nx2,nx3,nx4,tapti,taptf,tapx1i,tapx1f,tapx2i,tapx2f,tapx3i,tapx3f,tapx4i,tapx4f) tx1=1; tx2=1; tx3=1; tx4=1 for ix1 = 1 : nx1 if (ix1>=1 && ix1<=tapx1i) tx1 = 1.0/(tapx1i-1)*(ix1-1) end if (ix1>tapx1i && ix1<=nx1-tapx1f) tx1 = 1 end if (ix1>nx1-tapx1f && ix1<=nx1) tx1 = 1-1.0/(tapx1f-1)*(ix1-1-nx1+tapx1f) end for ix2 = 1 : nx2 if (ix2>=1 && ix2<=tapx2i) tx2 = 1.0/(tapx2i-1)*(ix2-1) end if (ix2>tapx2i && ix2<=nx2-tapx2f) tx2 = 1 end if (ix2>nx2-tapx2f && ix2<=nx2) tx2 = 1-1.0/(tapx2f-1)*(ix2-1-nx2+tapx2f) end for ix3 = 1 : nx3 if (ix3>=1 && ix3<=tapx3i) tx3 = 1.0/(tapx3i-1)*(ix3-1) end if (ix3>tapx3i && ix3<=nx3-tapx3f) tx3 = 1 end if (ix3>nx3-tapx3f && ix3<=nx3) tx3 = 1-1.0/(tapx3f-1)*(ix3-1-nx3+tapx3f) end for ix4 = 1 : nx4 if (ix4>=1 && ix4<=tapx4i) tx4 = 1.0/(tapx4i-1)*(ix4-1) end if (ix4>tapx4i && ix4<=nx4-tapx4f) tx4 = 1 end if (ix4>nx4-tapx4f && ix4<=nx4) tx4 = 1-1.0/(tapx4f-1)*(ix4-1-nx4+tapx4f) end ix = (ix4-1)*nx2*nx3*nx1 + (ix3-1)*nx1*nx2 + (ix2-1)*nx1 + ix1 for it = 1 : nt if (it>=1 && it<=tapti) tt = 1.0/(tapti-1)*(it-1) end if (it>tapti && it<=nt-taptf) tt = 1 end if (it>nt-taptf && it<=nt) tt = 1-1.0/(taptf-1)*(it-1-nt+taptf) end d[it,ix] = tt*tx1*tx2*tx3*tx4*d[it,ix] end end end end end return d end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
3197
""" SeisWindow(in,out;<keyword arguments>) Window a seis file using header words. # Arguments - `in::String`: filename of input - `out::String`: filename of output # Keyword arguments - `key=[]` - `minval=[]` - `maxval=[]` note that windowing along the time axis is achieved by using the key "t". *Credits: AS, 2015* """ function SeisWindow(in::String,out::String;key=[],minval=[],maxval=[]) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) extent = ReadTextHeader(in) tmin = extent.o1 tmax = extent.o1 + extent.d1*(extent.n1-1) for ikey=1:length(key) if key[ikey] == "t" tmin = minval[ikey] tmax = maxval[ikey] println("tmin= ",tmin," tmax= ",tmax) end end #itmin is the sample at which tmin is saved itmin = convert(Int32,round((tmin - extent.o1)/extent.d1) + 1) if (itmin < 1) itmin = 1 end itmax = convert(Int32,round((tmax - extent.o1)/extent.d1)) if (itmax) > extent.n1 itmax = extent.n1 end println("itmin= ",itmin," itmax= ",itmax) SeisWindowHeaders(in, out; key=key, minval=minval, maxval=maxval, tmin=tmin, tmax=tmax) FetchTraces(in,out;itmin=itmin,itmax=itmax) tmp = join(["tmp_SeisWindow_",string(round(Int,rand()*100000))]) SeisProcessHeaders(out, tmp, [UpdateHeader], [Dict(:itmin=>itmin,:itmax=>itmax)]) filename_h_tmp = join([DATAPATH tmp "@headers@"]) filename_h_out = join([DATAPATH out "@headers@"]) cp(filename_h_tmp,filename_h_out,force=true); rm(filename_h_tmp,force=true); end function FetchTraces(in::String, out::String; ntrace=500, itmin=round(Int,1), itmax=round(Int,9999999999)) NX = GetNumTraces(out) println("NX in FetchTraces= ",NX) itrace = round(Int,1) filename_data_in = ParseDataName(in) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_data_out = join([DATAPATH out "@data@"]) stream_in = open(filename_data_in) stream_out = open(filename_data_out,"w") extent = ReadTextHeader(in) nt = extent.n1 if itmin < 1 itmin = round(Int,1) end if itmax > nt itmax = round(Int,nt) end while itrace <= NX if (itrace > 1) stream_out = open(filename_data_out,"a") end nx = NX - itrace + 1 ntrace = nx > ntrace ? ntrace : nx h = SeisReadHeaders(out,group="some",itrace=itrace,ntrace=ntrace) d = zeros(Float32,itmax-itmin+1,ntrace) SeekTraces!(d,stream_in,h,itmin,itmax,nt,round(Int,ntrace)) write(stream_out,d) close(stream_out) itrace += ntrace end close(stream_in) end function SeekTraces!(d::AbstractArray{T,2}, stream_in::IOStream, h::Array{Header,1},itmin,itmax,nt,ntrace) where T d1 = zeros(Float32,nt) for ix = 1 : ntrace position = 4*nt*(h[ix].tracenum-1) seek(stream_in,position) d1 = read!(stream_in,Array{Float32}(undef,nt)) for it = itmin : itmax d[it - itmin + 1,ix] = d1[it] end end nothing end function UpdateHeader(h;itrace=1,itmin=1,itmax=9e9) ot = (itmin-1)*h[1].d1 + h[1].o1 nt = itmax - itmin + 1 for j = 1 : length(h) h[j].o1 = ot h[j].n1 = nt h[j].tracenum = j + itrace - 1 end return h end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
2434
function SeisWindowHeaders(in, out; key=[], minval=[], maxval=[], tmin=0, tmax=99999, ntrace=1000000) SeisProcessHeaders(in, out, [WindowHeaders], [Dict(:key=>key,:minval=>minval,:maxval=>maxval)], group="some", key=key, ntrace=ntrace, update_tracenum=false) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_d_out = join([DATAPATH out "@data@"]) filename_h_out = join([DATAPATH out "@headers@"]) nhead = length(fieldnames(Header)) stream_h = open(filename_h_out) nx = round(Int,filesize(stream_h)/(nhead*4)) h = GrabHeader(stream_h,1) close(stream_h) nt = convert(Int64,round((tmax - tmin)/h.d1)) #+ 1 if nt > h.n1 nt = h.n1 end extent = Extent(convert(Int32,nt), convert(Int32,nx), convert(Int32,1), convert(Int32,1), convert(Int32,1), convert(Float32,tmin), convert(Float32,1), convert(Float32,0), convert(Float32,0), convert(Float32,0), convert(Float32,h.d1), convert(Float32,1), convert(Float32,1), convert(Float32,1), convert(Float32,1), "Time", "Trace Number", "", "", "", "s", "index", "", "", "", "") WriteTextHeader(out,extent,"native_float",4,filename_d_out,filename_h_out) end function WindowHeaders(h_in;key=[],minval=[],maxval=[]) minval = convert(Array{Float32,1},vec(minval)) maxval = convert(Array{Float32,1},vec(maxval)) nx = length(h_in) key2 = String[] minval2 = Float32[] maxval2 = Float32[] for ikey=1:length(key) if key[ikey] != "t" key2 = push!(key2,key[ikey]) minval2 = push!(minval2,minval[ikey]) maxval2 = push!(maxval2,maxval[ikey]) end end return RejectHeaders(h_in,key2,minval2,maxval2,length(key2),length(h_in)) end function RejectHeaders(h_in::Array{Header,1}, key::Array{String,1}, minval::Array{Float32,1}, maxval::Array{Float32,1}, nkeys, nx) h_out = Header[] keep = true key_val = 0f0 for j=1:nx keep = true for ikey=1:nkeys key_val = convert(Float32,getfield(h_in[j],Symbol(key[ikey]))) if (key_val < minval[ikey] || key_val > maxval[ikey]) keep = false end end if (keep==true) h_out = push!(h_out,h_in[j]) end end return h_out end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
3502
function SeisWindowHeadersPatch(in, out; key=[], minval=[], maxval=[], tmin=0, tmax=99999, ntrace=1000000,it_nt=9e9) itrace_in = 1 itrace_out = 1 NX = GetNumTraces(in) group="some" while itrace_in <= NX nx = NX - itrace_in + 1 ntrace = nx > ntrace ? ntrace : nx h1 = SeisReadHeaders(in, group=group, key=key, itrace=itrace_in, ntrace=ntrace) num_traces_in = length(h1) h2=WindowHeadersPatch(h1,key=key,minval=minval,maxval=maxval) h1 = copy(h2) num_traces_out = length(h1) SeisWriteHeaders(out, h1, itrace=itrace_out, update_tracenum=false) itrace_in += num_traces_in itrace_out += num_traces_out end DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_d_out = join([DATAPATH out "@data@"]) filename_h_out = join([DATAPATH out "@headers@"]) nhead = length(fieldnames(Header)) stream_h = open(filename_h_out) nx = round(Int,filesize(stream_h)/(nhead*4)) h = GrabHeader(stream_h,1) close(stream_h) nt = convert(Int64,round((tmax - tmin)/h.d1)) + 1 if nt > h.n1 nt = h.n1 end if nt<it_nt nt = nt-1 end minval = convert(Array{Float32,1},vec(minval)) maxval = convert(Array{Float32,1},vec(maxval)) key2 = String[] minval2 = Float32[] maxval2 = Float32[] for ikey=1:length(key) if key[ikey] != "t" key2 = push!(key2,key[ikey]) minval2 = push!(minval2,minval[ikey]) maxval2 = push!(maxval2,maxval[ikey]) end end nx1=maxval2[1]-minval2[1]+1 nx2=maxval2[2]-minval2[2]+1 nx3=maxval2[3]-minval2[3]+1 nx4=maxval2[4]-minval2[4]+1 extent = Extent(convert(Int32,nt), convert(Int32,nx1), convert(Int32,nx2), convert(Int32,nx3), convert(Int32,nx4), convert(Float32,tmin), convert(Float32,1), convert(Float32,0), convert(Float32,0), convert(Float32,0), convert(Float32,h.d1), convert(Float32,1), convert(Float32,1), convert(Float32,1), convert(Float32,1), "Time", key[2], key[3], key[4], key[5], "s", "index", "index", "index", "index", "") WriteTextHeader(out,extent,"native_float",4,filename_d_out,filename_h_out) end function WindowHeadersPatch(h_in;key=[],minval=[],maxval=[]) minval = convert(Array{Float32,1},vec(minval)) maxval = convert(Array{Float32,1},vec(maxval)) nx = length(h_in) key2 = String[] minval2 = Float32[] maxval2 = Float32[] for ikey=1:length(key) if key[ikey] != "t" key2 = push!(key2,key[ikey]) minval2 = push!(minval2,minval[ikey]) maxval2 = push!(maxval2,maxval[ikey]) end end return RejectHeadersPatch(h_in,key2,minval2,maxval2,length(key2),length(h_in)) end function RejectHeadersPatch(h_in::Array{Header,1}, key::Array{String,1}, minval::Array{Float32,1}, maxval::Array{Float32,1}, nkeys, nx) h_out = Header[] keep = true key_val = 0f0 for j=1:nx keep = true for ikey=1:nkeys key_val = convert(Float32,getfield(h_in[j],Symbol(key[ikey]))) if (key_val < minval[ikey] || key_val > maxval[ikey]) keep = false end end if (keep==true) h_out = push!(h_out,h_in[j]) end end return h_out end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
5378
""" SeisWindowPatch(in,out;<keyword arguments>) Window a seis file using header words. # Arguments - `in::String`: filename of input - `out::String`: filename of output - `key=[]`: note that windowing along the time axis is achieved by using the key "t". - `minval=[]` - `maxval=[]` - `it_nt=9e9` *Credits: AS, FC, 2017* """ function SeisWindowPatch(in::String,out::String;key=[],minval=[],maxval=[],it_nt=9e9) println("processing patch ",out) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) extent = ReadTextHeader(in) tmin = extent.o1 tmax = extent.o1 + extent.d1*(extent.n1-1) for ikey=1:length(key) if key[ikey] == "t" tmin = minval[ikey] tmax = maxval[ikey] end end #itmin is the sample at which tmin is saved itmin = convert(Int32,round((tmin - extent.o1)/extent.d1) + 1) if (itmin < 1) itmin = 1 end itmax = convert(Int32,round((tmax - extent.o1)/extent.d1) + 1) if (itmax) > extent.n1 itmax = extent.n1 end SeisWindowHeadersPatch(in, out; key=key, minval=minval, maxval=maxval, tmin=tmin, tmax=tmax,it_nt=it_nt) FetchTracesPatch(in,out;itmin=itmin,itmax=itmax ,key=key, minval=minval, maxval=maxval) tmp = join(["tmp_SeisWindow_",string(round(Int,rand()*100000))]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #@compat SeisProcessHeaders(out, tmp, [UpdateHeader],[Dict(:itmin=>itmin,:itmax=>itmax)]) itrace_in = 1 itrace_out = 1 ntrace=1000000 NX = GetNumTraces(out) while itrace_in <= NX nx = NX - itrace_in + 1 ntrace = nx > ntrace ? ntrace : nx h1 = SeisReadHeaders(out, group="some", itrace=itrace_in, ntrace=ntrace) ext=ReadTextHeader(out) num_traces_in = length(h1) h2=UpdateHeaderPatch(h1;itrace=itrace_in,itmin=itmin,itmax=itmax) h1 = copy(h2) num_traces_out = length(h1) SeisWriteHeaders(tmp, h1, itrace=itrace_out) itrace_in += num_traces_in itrace_out += num_traces_out end filename_h_tmp = join([DATAPATH tmp "@headers@"]) filename_h_out = join([DATAPATH out "@headers@"]) cp(filename_h_tmp,filename_h_out,force=true); rm(filename_h_tmp); #rm(tmp); end ######################################################################################## ##################################################################################### function FetchTracesPatch(in::String, out::String; ntrace=500, itmin=round(Int,1), itmax=round(Int,9999999999),key=[], minval=[], maxval=[]) NX = GetNumTraces(out) itrace = round(Int,1) filename_data_in = ParseDataName(in) DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"])) filename_data_out = join([DATAPATH out "@data@"]) stream_in = open(filename_data_in) stream_out = open(filename_data_out,"w") extent = ReadTextHeader(in) nt = extent.n1 if itmin < 1 itmin = round(Int,1) end if itmax > nt itmax = round(Int,nt) end nhead = length(fieldnames(Header)) while itrace <= NX if (itrace > 1) stream_out = open(filename_data_out,"a") end nx = NX - itrace + 1 ntrace = nx > ntrace ? ntrace : nx h = SeisReadHeaders(out,group="some",itrace=itrace,ntrace=ntrace) d = zeros(Float32,itmax-itmin+1,ntrace) SeekTracesPatch!(d,stream_in,h,itmin,itmax,nt,round(Int,ntrace)) write(stream_out,d) close(stream_out) itrace += ntrace end close(stream_in) end ############################################################################### function SeekTracesPatch!(d::AbstractArray{T,2}, stream_in::IOStream, h::Array{Header,1},itmin,itmax,nt,ntrace) where T d1 = zeros(Float32,nt) for ix = 1 : ntrace position = 4*nt*(h[ix].tracenum-1) seek(stream_in,position) d1 = read!(stream_in,Array{Float32}(undef, nt)) for it = itmin : itmax d[it - itmin + 1,ix] = d1[it] end end nothing end ############################################################################## #function SeekTracesPatch{T}(d::AbstractArray{T,2}, h::Array{Header,1},out::String, stream_in::IOStream,stream_out::IOStream, itmin,itmax,nt,ntrace;key=[],minval=[],maxval=[]) # println(itmin," ",itmax," ",nt) # d1 = zeros(Float32,nt) #extent_out=ReadTextHeader(out) #nx2=extent_out.n3 #nx3=extent_out.n4 #nx4=extent_out.n5 # for ix = 1 : ntrace # position = 4*nt*(h[ix].tracenum-1) # seek(stream_in,Int(position)) # d1 = read(stream_in,Float32,nt) # for it = itmin : itmax # d[it - itmin + 1,ix] = d1[it] # end # itrace_out=(h[ix].imx-minval[2])*nx2*nx3*nx4+(h[ix].imy-minval[3])*nx3*nx4+(h[ix].ihx-minval[4])*nx4+h[ix].ihy-minval[5]+1 # position_out=4*nt*(itrace_out-1) #println(out," ",h[ix].imx," ",minval[2]," ",itrace_out) # seek(stream_out,Int(position_out)) # write(stream_out,convert(Array{Float32,1},d[1:itmax-itmin+1,ix])) # end # nothing #end ############################################################################## ############################################################################## function UpdateHeaderPatch(h;itrace=1,itmin=1,itmax=9e9) ot = (itmin-1)*h[1].d1 + h[1].o1 nt = itmax - itmin + 1 for j = 1 : length(h) h[j].o1 = ot h[j].n1 = nt h[j].tracenum = j + itrace - 1 end return h end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
626
export SeisGeometry, SeisPatch, SeisWindowPatch, SeisWindowHeadersPatch, SeisPatchProcess, SeisProcess, SeisProcessHeaders, SeisSort, SeisUnPatch, SeisWindow, SeisWindowHeaders, SeisBinData, SeisBinHeaders, download_if_needed include("SeisGeometry.jl") include("SeisPatch.jl") include("SeisWindowPatch.jl") include("SeisWindowHeadersPatch.jl") include("SeisUnPatch.jl") include("SeisPatchProcess.jl") include("SeisProcess.jl") include("SeisProcessHeaders.jl") include("SeisSort.jl") include("SeisWindow.jl") include("SeisWindowHeaders.jl") include("SeisBinData.jl") include("SeisBinHeaders.jl") include("DownloadIfNeeded.jl")
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
97
using SeisMain using Test include("test_IO.jl") include("test_bin.jl") include("test_patch.jl")
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
917
#From USGS, National Petroleum Reserve - Alaska Data Archive #https://energy.usgs.gov/GeochemistryGeophysics/SeismicDataProcessingInterpretation/NPRASeismicDataArchive.aspx#3862174-data- download_if_needed("https://saigfileserver.physics.ualberta.ca/static/Datasets/Testing/16_81_PT1_PR.SGY", "16_81_PT1_PR.SGY", sha256sum="54791e8626215d36f6ebcecc3039da2fd74f3472518f47d8e8137c81c2ccfc2f") SegyToSeis("16_81_PT1_PR.SGY","16_81_PT1_PR") d,h,ext = SeisRead("16_81_PT1_PR") imx = SeisMain.ExtractHeader(h,"imx") println("size(d)=",size(d)) println("imx=",imx) SeisWrite("16_81_PT1_PR_copy",d,h,ext) for file in ( "16_81_PT1_PR", "16_81_PT1_PR.SGY", "16_81_PT1_PR@data@", "16_81_PT1_PR@headers@", "16_81_PT1_PR_copy", "16_81_PT1_PR_copy@data@", "16_81_PT1_PR_copy@headers@", ) rm(joinpath(@__DIR__, file)) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
935
println("Testing test_bin.jl") download_if_needed("https://saigfileserver.physics.ualberta.ca/static/Datasets/Testing/prestack_section.su", "section.su", sha256sum="8baaf281a36dcd5656e07e728bf97e0c1b513302b12206efbb24c3edb34d9ec8"); SegyToSeis("section.su","section",format="su",input_type="ieee"); param1 = Dict( :dmx=>15, :dmy=>15, :dh=>30, :daz=>45 ); param2 = Dict(:style=>"mxmyhaz", :min_imx=>10,:max_imx=>100, :min_imy=>35, :max_imy=>45, :min_ih=>1, :max_ih=>6, :min_iaz=>0, :max_iaz=>7); SeisGeometry("section"; param1...) SeisBinHeaders("section","section_bin"; param1..., param2...); SeisBinData("section","section_bin"; param1..., param2...); db,hb,eb=SeisRead("section_bin"); N =size(db) @test N == (251, 91, 11, 6, 8) for file in ( "section", "section@data@", "section@headers@", "section.su", ) rm(joinpath(@__DIR__, file)) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
code
972
param2 = Dict(:style=>"mxmyhaz", :min_imx=>10,:max_imx=>100, :min_imy=>35, :max_imy=>45, :min_ih=>1, :max_ih=>6, :min_iaz=>0, :max_iaz=>7); param3 = Dict( :ix1_WL=>30,:ix1_WO=>5); file_bin="section_bin" # Run first Demo_Utils_1_Binning.ipynb patch_out="patch" # will make files patch1, patch2,.... file_final ="section_bin_final" # file put back from patches via code SeisUnPatch patch_out,npatch=SeisPatch(file_bin,"patch"; param2...,param3... ); SeisUnPatch(patch_out,file_final; param2...,param3...,nt=251); db,hb,eb=SeisRead(file_bin); df,hf,ef=SeisRead(file_final); alpha = maximum(db-df) @test alpha < 0.1 for file in ( "section_bin", "section_bin_final", "patch_1", "patch_2", "patch_3", "patch_4", ) rm(joinpath(@__DIR__, file)) rm(joinpath(@__DIR__, file*"@data@")) rm(joinpath(@__DIR__, file*"@headers@")) end
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
docs
4118
# Modifying the Seismic.jl package ## Contents * Introduction to making modifications to the package * Files organization * Naming conventions * Formatting conventions * Documentation * Style guidelines * Tests * Reproducible examples ## Introduction to making modifications to the package * We show [here](http://seismic.physics.ualberta.ca/docs/develop_SeismicJulia.pdf) the basics of how to fork the main repository, edit it, commit the changes, and make pull requests. ## Files organization New files must be placed in the proper location. Source files are organized in the following directories into the /src directory: * API/c * Imaging * Modelling * Operators * Plotting * Processing * ReadWrite * Solvers * Tools * Utils * Wavelets * Windows Tests (see below) must me placed into /test directory and a line should be added in runtests.jl, while examples must be placed into /examples directory and IJulia examples must be placed in examples/IJulia. ## Naming conventions * Programs: SeisProgramName (eg SeisRadon) * Linear operators * OperationNameOp (eg RadonOp) * Optimization routines * SolverName (eg ConjugateGradients) ## Formatting conventions * General notes: if you want a function to do fancy disk operations, use trace headers, processing on gathers etc, first define a simple function that works on data in memory, then make other function definitions at the end of the file (multiple dispatch) where this function will accept an ASCII-string input and will do input from disk, call your function, then write to disk. The idea is to always have a very simple matlab like code at least for the first function definition in the file. Multiple dispatch was designed for these situations. * Programs: ```julia function foo(in1, in2; parameter1=default1, parameter2=default2, parameterN=defaultN) return out end ``` * Linear operators: ```julia function foo(m, d, adj=true; parameter1=default1, parameter2=default2, parameterN=defaultN) return m or return d end ``` * Optimization routines: ```julia function foo(m, d, op, params; parameter1=default1, parameter2=default2, parameterN=defaultN) return cost end ``` ## Documentation There are two components to the documentation: * Self documentation in the function. This is text (docstrings) that is placed at the top of the function in markdown format. Guidelines for docstrings given [here](http://docs.julialang.org/en/release-0.4/manual/documentation/). * The website: written in Markdown and converted to HTML using MkDocs. Then HTML is just placed in the website directory. ## Style guidelines The codes should be written following the following guides: * [Julia Style.jl guidelines](https://github.com/johnmyleswhite/Style.jl) * [Style Guide for Python Codes](https://www.python.org/dev/peps/pep-0008/#whitespace-in-expressions-and-statements) ## Tests * Whenever a program is added, a very small, minimalistic, very fast simple test should be added to the /test directory and to /test/runtests.jl so that one can be sure the code will work for other people when they download the package. These tests are not so much about ensuring the geophysics or math is correct in a program, but making sure that a code you write or modify wont make another program in the package fail (for example if you were to modify conjugate gradients incorrectly it might cause MWNI to fail). This is tested automatically by [Travis CI](https://travis-ci.org/) when a pull request is generated and the owners of the repository will get a notice of whether all tests have passed or not before deciding whether to merge the pull request to the main repository. ## Reproducible examples * Examples showing the functionalities of the package should be added to the directory /examples. Should be small, fast, and show the basic use of the program. If possible have the example download input data (small) from seismic.physics.ualberta.ca/data/. These tests can be written as IJulia notebooks so people can see the results inside Github and see how to use the package and what it can do. * Need examples for 5d interpolation, migration, etc.
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
docs
3189
<a name="logo"/> <div align="center"> <a href="http://saig.physics.ualberta.ca/" target="_blank"> <img src="https://saig.physics.ualberta.ca/lib/tpl/dokuwiki/images/logo.png" alt="SAIG Logo" width="240" height="106"></img> </a> </div> # SeisMain.jl [![Build Status](https://travis-ci.com/SeismicJulia/SeisMain.jl.svg?branch=master)](https://travis-ci.com/SeismicJulia/SeisMain.jl) This package contains tools to handle seismic data with the Seis format. At the moment, it is updated and tested in Julia v1.1. ## Installation To use this package you must first install the [Julia](http://julialang.org/downloads/) programming language. Then, run the Julia application and add the package ``` julia>using Pkg julia>Pkg.add("SeisMain") ``` or ``` julia>] pkg> add SeisMain ``` Finally, in the julia prompt type ``` julia>using SeisMain ``` ## Contents 1. ReadWrite * Exported: SegyToSeis, SeisRead, SeisReadHeaders, SeisWrite, SeisWriteHeaders, SeisCopy, , SeisHeaderInfo, SeisToSegy, SeisRemove * Not exported: seis format: InitHeader, GrabHeader, PutHeader, GetNumTraces, ExtractHeader; su format: InitFileHeader, GrabFileHeader, PutFileHeader; segy format: InitSegyHeader, GrabSegyHeader,PutSegyHeader, MapHeaders 2. Utils * SeisBinGeometry, SeisBinHeaders, SeisBinData, SeisPatch, SeisUnPatch, SeisProcess, SeisSort, SeisWindow. Functions that are not exported can be accessed via SeisMain.ExtractHeader Use julia help for detail. For example, ``` julia>? help?> SegyToSeis ``` ## Basic usage For SeisPlot, please refer [here](https://github.com/SeismicJulia/SeisPlot.jl). The following example produces the figure below. ```Julia using SeisMain using SeisPlot run(`mkdir -p data`) download("http://seismic.physics.ualberta.ca/data/616_79_PR.SGY", "data/616_79_PR.SGY") SegyToSeis("data/616_79_PR.SGY", "data/616_79_PR.seis") SeisWindow("data/616_79_PR.seis", "data/616_79_PR_2s.seis", key= ["t"], minval=[0.0], maxval=[2.0]) d, head, extent = SeisRead("data/616_79_PR_2s.seis") SeisPlotTX(d, title="Seismic Plot Example", cmap="PuOr", wbox=9,ylabel="Time(s)",xlabel="Trace Number (index)",dy=extent.d1) ``` ![plot1](http://seismic.physics.ualberta.ca/figures/616_79_PR2.png) ## For developers: contributing to the project * New at GitHub? These [basic commands](http://seismic.physics.ualberta.ca/docs/git_basic_commands.pdf) and this [dictionary](http://seismic.physics.ualberta.ca/docs/git_dictionary.pdf) might help. * This [tutorial](http://seismic.physics.ualberta.ca/docs/develop_SeismicJulia.pdf) provides the basics steps you need to follow in order to fork the main repository, change the source code in your forked repository, commit the changes, and make pull requests using GitHub. * For contributions to the package, please follow the general guidelines given here: [Modifications.md](https://github.com/SeismicJulia/Seismic.jl/blob/master/Modifications.md). ## Credits If you use the SeismicJulia project, please cite the following paper ``` @article{stanton2016efficient, title={Efficient geophysical research in Julia}, author={Stanton, Aaron and Sacchi, Mauricio D}, journal={CSEG GeoConvention 2016}, pages={1--3}, year={2016} } ```
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
docs
522
# SeisMain.jl * A package to read, write and process seismic data in Julia ## Package Features - Convert data to a simple format - Window data according to defined keywords - Sort data - Bin seismic volumes - Organize data into patches to process independently - Once your data is processed, unpatch to a single volume ## Installation SeisMain, from the SeismicJulia project, can be installed using Julia package manager. From the Julia REPL, type `]` to enter the Pkg REPL mode and run ``` Pkg> add SeisMain ```
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
docs
584
# Internal Documentation Documentation for internal `SeisMain.jl` interface ## Internals ```@docs SeisMain.InitSeisHeader SeisMain.GrabHeader SeisMain.PutHeader SeisMain.BitsToHeader SeisMain.HeaderToBits SeisMain.GetNumTraces SeisMain.ParseHeaderName SeisMain.ParseDataName SeisMain.ParseDataFormat SeisMain.ParseDataESize SeisMain.ExtractHeader SeisMain.ReadTextHeader SeisMain.WriteTextHeader SeisMain.InitFileHeader SeisMain.GrabFileHeader SeisMain.PutFileHeader SeisMain.InitSegyHeader SeisMain.GrabSegyHeader SeisMain.PutSegyHeader SeisMain.MapHeaders SeisMain.convert ```
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
docs
368
# Public Documentation Documentation for public `SeisMain.jl` public interface ## Public interface ```@docs SegyToSeis SeisRead SeisReadHeaders SeisWrite SeisWriteHeaders SeisCopy SeisHeaderInfo SeisToSegy SeisRemove SeisWindow SeisPatch SeisUnPatch SeisGeometry SeisBinHeaders SeisBinData SeisProcess SeisSort ReadSegyHeader ExtractSegyHeader SegyHeaderInfo ```
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.1.2
b7b8281ddfcad1f1343deb5e539e03f5022a79b4
docs
928
# Package guide SeisMain.jl provides seismic data reading, writing and handling tools. Format conversion is available between SEIS data and SEGY, and SU. With SeisMain.jl installed we can do a simple example showing format conversion ```julia using SeisMain, SeisPlot run(`mkdir -p data`) download("http://seismic.physics.ualberta.ca/data/616_79_PR.SGY", "data/616_79_PR.SGY") SegyToSeis("data/616_79_PR.SGY", "data/616_79_PR.seis") SeisWindow("data/616_79_PR.seis", "data/616_79_PR_2s.seis", key= ["t"], minval=[0.0], maxval=[2.0]) d, head, extent = SeisRead("data/616_79_PR_2s.seis") SeisPlotTX(d, title="Seismic Plot Example", cmap="PuOr", wbox=9,ylabel="Time(s)",xlabel="Trace Number(index)",dy=extent.d1) ``` ![plot1](http://seismic.physics.ualberta.ca/figures/616_79_PR2.png) In the above example, we first download the data, then convert the data from SU data format to SEIS format, finally the data are plotted.
SeisMain
https://github.com/SeismicJulia/SeisMain.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
1706
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ module GeoStatsTransforms using Meshes using GeoTables using GeoStatsModels using GeoStatsProcesses using Tables using Unitful using TableDistances using TableTransforms using DataScienceTraits using Combinatorics using Distances using Clustering using ArnoldiMethod using CategoricalArrays using SparseArrays using LinearAlgebra using Statistics using Random using OhMyThreads: tmap using Unitful: AffineQuantity using TiledIteration: TileIterator using DataScienceTraits: Continuous using ColumnSelectors: ColumnSelector, SingleColumnSelector using ColumnSelectors: Column, AllSelector, NoneSelector using ColumnSelectors: selector, selectsingle using GeoStatsModels: GeoStatsModel, fitpredict using GeoStatsProcesses: GeoStatsProcess import TableTransforms: apply, revert, reapply import TableTransforms: isrevertible include("utils.jl") include("interpolate.jl") include("interpneighbors.jl") include("interpmissing.jl") include("interpnan.jl") include("simulate.jl") include("cookiecutter.jl") include("uniquecoords.jl") include("aggregate.jl") include("transfer.jl") include("upscale.jl") include("downscale.jl") include("clustering.jl") include("rasterize.jl") include("potrace.jl") include("detrend.jl") include("precompile.jl") export # transforms InterpolateNeighbors, Interpolate, InterpolateMissing, InterpolateNaN, Simulate, CookieCutter, UniqueCoords, Aggregate, Transfer, Upscale, Downscale, Rasterize, Potrace, Detrend, SLIC, GHC, GSC end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
3522
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Aggregate(domain, var₁ => agg₁, var₂ => agg₂, ..., varₙ => aggₙ) Aggregate([g₁, g₂, ..., gₙ], var₁ => agg₁, var₂ => agg₂, ..., varₙ => aggₙ) Aggregate variables `var₁`, `var₂`, ..., `varₙ` over geospatial `domain` using aggregation functions `agg₁`, `agg₂`, ..., `aggₙ`. Alternatively, aggregate variables over geometries `g₁`, `g₂`, ..., `gₙ`. Default aggregation function is `mean` for continuous variables and `first` otherwise. # Examples ```julia Aggregate(domain, 1 => last, 2 => maximum) Aggregate(domain, :a => first, :b => minimum) Aggregate(domain, "a" => last, "b" => maximum) Aggregate(geoms, "a" => last, "b" => maximum) ``` """ struct Aggregate{D<:Domain,S<:ColumnSelector} <: TableTransform domain::D selector::S aggfuns::Vector{Function} end Aggregate(domain::Domain) = Aggregate(domain, NoneSelector(), Function[]) Aggregate(domain::Domain, pairs::Pair{C,<:Function}...) where {C<:Column} = Aggregate(domain, selector(first.(pairs)), collect(Function, last.(pairs))) Aggregate(geoms::AbstractVector{<:Geometry}, args...) = Aggregate(GeometrySet(geoms), args...) isrevertible(::Type{<:Aggregate}) = false function apply(transform::Aggregate, geotable::AbstractGeoTable) gtb = _adjustunits(geotable) table = values(gtb) cols = Tables.columns(table) vars = Tables.columnnames(cols) # aggregation functions svars = transform.selector(vars) aggfun = Dict(zip(svars, transform.aggfuns)) for var in vars if !haskey(aggfun, var) vals = Tables.getcolumn(cols, var) aggfun[var] = _defaultagg(vals) end end # source and target domains sdom = domain(gtb) tdom = transform.domain # perform aggregation newcols = _aggregate(sdom, tdom, cols, vars, aggfun) newtable = (; newcols...) |> Tables.materializer(table) georef(newtable, tdom), nothing end function _aggregate(sdom, tdom, cols, vars, aggfun) if sdom isa Grid && tdom isa Grid && extrema(sdom) == extrema(tdom) # we have two grids overlaid, and can rely on # tiled iteration for efficient aggregation _gridagg(sdom, tdom, cols, vars, aggfun) else # general case with knn search _knnagg(sdom, tdom, cols, vars, aggfun) end end function _gridagg(sdom, tdom, cols, vars, aggfun) # determine tile size for tiled iteration tilesize = ceil.(Int, size(sdom) ./ size(tdom)) if any(<(1), tilesize) throw(ArgumentError("cannot aggregate a coarse grid over a fine grid")) end # perform aggregation map(vars) do var svals = Tables.getcolumn(cols, var) array = reshape(svals, size(sdom)) titer = TileIterator(axes(array), tilesize) tvals = tmap(titer) do sinds aggfun[var](array[sinds...]) end |> vec var => tvals end end function _knnagg(sdom, tdom, cols, vars, aggfun) # find nearest elements in target domain knn = KNearestSearch(tdom, 1) near = tmap(1:nelements(sdom)) do i first(search(centroid(sdom, i), knn)) end # map target element to source elements group = Dict(tind => Int[] for tind in 1:nelements(tdom)) for (sind, tind) in enumerate(near) push!(group[tind], sind) end # perform aggregation map(vars) do var svals = Tables.getcolumn(cols, var) tvals = tmap(1:nelements(tdom)) do tind aggfun[var](svals[group[tind]]) end var => tvals end end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
484
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ ClusteringTransform A transform for geostatistical clustering. """ abstract type ClusteringTransform <: TableTransform end # ---------------- # IMPLEMENTATIONS # ---------------- include("clustering/slic.jl") include("clustering/ghc.jl") include("clustering/gsc.jl")
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
3468
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ CookieCutter(domain, parent => process, var₁ => procmap₁, ..., varₙ => procmapₙ; [parameters]) CookieCutter(domain, nreals, parent => process, var₁ => procmap₁, ..., varₙ => procmapₙ; [parameters]) Simulate `nreals` realizations of variable `parent` with geostatistical process `process`, and each child variable `varsᵢ` with process map `procmapᵢ`, over given `domain`. The process map must be an iterable of pairs of the form: value => process. Each process in the map is related to a value of the `parent` realization, therefore the values of the child variables will be chosen according to the values of the corresponding `parent` realization. The `parameters` are forwarded to the `rand` method of the geostatistical processes. # Examples ```julia parent = QuiltingProcess(trainimg, (30, 30)) child0 = GaussianProcess(SphericalVariogram(range=20.0, sill=0.2)) child1 = GaussianProcess(SphericalVariogram(MetricBall((200.0, 20.0)))) transform = CookieCutter(domain, :parent => parent, :child => [0 => child0, 1 => child1]) ``` """ struct CookieCutter{D<:Domain,M,C,R<:AbstractRNG,K} <: TableTransform domain::D nreals::Int parent::M children::Vector{C} rng::R kwargs::K end function CookieCutter( domain::Domain, nreals::Int, parent::Pair{C,<:GeoStatsProcess}, children::Pair{C}...; rng=Random.default_rng(), kwargs... ) where {C<:Column} if isempty(children) throw(ArgumentError("cannot create CookieCutter transform without children")) end ppair = selector(first(parent)) => last(parent) cpairs = [selector(first(p)) => _procmap(last(p)) for p in children] CookieCutter(domain, nreals, ppair, cpairs, rng, values(kwargs)) end CookieCutter(domain::Domain, parent::Pair{C,<:GeoStatsProcess}, children::Pair{C}...; kwargs...) where {C<:Column} = CookieCutter(domain, 1, parent, children...; kwargs...) function _procmap(itr) pairs = collect(itr) if !(eltype(pairs) <: Pair{<:Any,<:GeoStatsProcess}) throw(ArgumentError("process map must be an iterable of pairs of the form: value => process")) end pairs end isrevertible(::Type{<:CookieCutter}) = false function apply(transform::CookieCutter, geotable::AbstractGeoTable) tab = values(geotable) cols = Tables.columns(tab) vars = Tables.columnnames(cols) (; domain, nreals, parent, children, rng, kwargs) = transform pselector, pprocess = parent pvar = selectsingle(pselector, vars) psim = geotable |> Simulate(domain, nreals, pvar => pprocess; rng, kwargs...) csim = mapreduce(hcat, children) do (cselector, procmap) cvar = selectsingle(cselector, vars) prep = map(procmap) do (val, cprocess) sim = geotable |> Simulate(domain, nreals, cvar => cprocess; rng, kwargs...) val => sim end names = let tab = values(last(first(prep))) cols = Tables.columns(tab) Tables.columnnames(cols) end simmap = Dict(prep) columns = map(1:nreals) do r mcolumn = psim[:, r] map(enumerate(mcolumn)) do (i, v) if haskey(simmap, v) sim = simmap[v] sim[:, r][i] else missing end end end georef((; zip(names, columns)...), domain) end newgeotable = hcat(psim, csim) newgeotable, nothing end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
2429
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Detrend(col₁, col₂, ..., colₙ; degree=1) Detrend([col₁, col₂, ..., colₙ]; degree=1) Detrend((col₁, col₂, ..., colₙ); degree=1) The transform that detrends columns `col₁`, `col₂`, ..., `colₙ` with a polynomial of given `degree`. Detrend(regex; degree=1) Detrends the columns that match with `regex`. # Examples ```julia Detrend(1, 3, 5) Detrend([:a, :c, :e]) Detrend(("a", "c", "e")) Detrend(r"[ace]", degree=2) Detrend(:) ``` ## References * Menafoglio, A., Secchi, P. 2013. [A Universal Kriging predictor for spatially dependent functional data of a Hilbert Space] (https://doi.org/10.1214/13-EJS843) """ struct Detrend{S<:ColumnSelector} <: TableTransform selector::S degree::Int end Detrend(; degree=1) = Detrend(AllSelector(), degree) Detrend(cols; degree=1) = Detrend(selector(cols), degree) Detrend(cols::C...; degree=1) where {C<:Column} = Detrend(selector(cols), degree) isrevertible(::Type{<:Detrend}) = true function apply(transform::Detrend, geotable) dom = domain(geotable) tab = values(geotable) cols = Tables.columns(tab) names = Tables.columnnames(cols) snames = transform.selector(names) gview = geotable |> Select(snames) model = Polynomial(transform.degree) fitted = GeoStatsModels.fit(model, gview) ncols = map(names) do name z = Tables.getcolumn(cols, name) ẑ(i) = GeoStatsModels.predict(fitted, name, centroid(dom, i)) if name ∈ snames @inbounds [z[i] - ẑ(i) for i in 1:nelements(dom)] else z end end 𝒯 = (; zip(names, ncols)...) newtab = 𝒯 |> Tables.materializer(tab) newgeotable = georef(newtab, dom) newgeotable, (snames, fitted) end function revert(::Detrend, newgeotable, cache) newdom = domain(newgeotable) newtab = values(newgeotable) cols = Tables.columns(newtab) names = Tables.columnnames(cols) snames, fitted = cache ocols = map(names) do name z = Tables.getcolumn(cols, name) ẑ(i) = GeoStatsModels.predict(fitted, name, centroid(newdom, i)) if name ∈ snames @inbounds [z[i] + ẑ(i) for i in 1:nelements(newdom)] else z end end 𝒯 = (; zip(names, ocols)...) table = 𝒯 |> Tables.materializer(newtab) georef(table, newdom) end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
835
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Downscale(f₁, f₂, ..., fₙ) Downscale each dimension of the grid by given factors `f₁`, `f₂`, ..., `fₙ`. Resulting values are obtained with the [`Transfer`](@ref) transform. # Examples ```julia Downscale(2, 2) Downscale(3, 3, 2) ``` """ struct Downscale{Dim} <: TableTransform factors::Dims{Dim} end Downscale(factors::Int...) = Downscale(factors) isrevertible(::Type{<:Downscale}) = false function apply(transform::Downscale, geotable::AbstractGeoTable) grid = domain(geotable) tgrid = refine(grid, RegularRefinement(transform.factors)) newgeotable = geotable |> Transfer(tgrid) newgeotable, nothing end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
3721
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ InterpolateMissing(vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) InterpolateMissing(vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) Interpolate geospatial data on its own domain, using geostatistical models `model₁`, ..., `modelₙ` and non-missing values of the variables `vars₁`, ..., `varsₙ`. InterpolateMissing(model=NN(); [parameters]) InterpolateMissing(model=NN(); [parameters]) Interpolate geospatial data on its own domain, using geostatistical `model` and non-missing values of all variables. Just like [`InterpolateNeighbors`](@ref), this transform uses neighbor search methods to fit geostatistical models at each interpolation location with a reduced number of measurements. ## Parameters * `minneighbors` - Minimum number of neighbors (default to `1`) * `maxneighbors` - Maximum number of neighbors (default to `10`) * `neighborhood` - Search neighborhood (default to `nothing`) * `distance` - A distance defined in Distances.jl (default to `Euclidean()`) * `point` - Perform interpolation on point support (default to `true`) * `prob` - Perform probabilistic interpolation (default to `false`) The `maxneighbors` parameter can be used to perform interpolation with a subset of measurements per prediction location. If `maxneighbors` is not provided, then all measurements are used. Two `neighborhood` search methods are available: * If a `neighborhood` is provided, local prediction is performed by sliding the `neighborhood` in the domain. * If a `neighborhood` is not provided, the prediction is performed using `maxneighbors` nearest neighbors according to `distance`. See also [`InterpolateNaN`](@ref), [`InterpolateNeighbors`](@ref), [`Interpolate`](@ref). """ struct InterpolateMissing{N,M} <: TableTransform selectors::Vector{ColumnSelector} models::Vector{GeoStatsModel} minneighbors::Int maxneighbors::Int neighborhood::N distance::M point::Bool prob::Bool end InterpolateMissing( selectors, models; minneighbors=1, maxneighbors=10, neighborhood=nothing, distance=Euclidean(), point=true, prob=false ) = InterpolateMissing( collect(ColumnSelector, selectors), collect(GeoStatsModel, models), minneighbors, maxneighbors, neighborhood, distance, point, prob ) InterpolateMissing(model::GeoStatsModel=NN(); kwargs...) = InterpolateMissing([AllSelector()], [model]; kwargs...) InterpolateMissing(pairs::Pair{<:Any,<:GeoStatsModel}...; kwargs...) = InterpolateMissing(selector.(first.(pairs)), last.(pairs); kwargs...) isrevertible(::Type{<:InterpolateMissing}) = false function _interp(geotable, selectors, models, droptrans; kwargs...) tab = values(geotable) dom = domain(geotable) cols = Tables.columns(tab) vars = Tables.columnnames(cols) interps = map(selectors, models) do selector, model svars = selector(vars) mapreduce(hcat, svars) do var data = geotable[:, [var]] |> droptrans fitpredict(model, data, dom; kwargs...) end end reduce(hcat, interps) end function apply(transform::InterpolateMissing, geotable::AbstractGeoTable) selectors = transform.selectors models = transform.models kwargs = ( minneighbors=transform.minneighbors, maxneighbors=transform.maxneighbors, neighborhood=transform.neighborhood, distance=transform.distance, point=transform.point, prob=transform.prob ) newgeotable = _interp(geotable, selectors, models, DropMissing(); kwargs...) newgeotable, nothing end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
3229
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ InterpolateNaN(vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) InterpolateNaN(vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) Interpolate geospatial data on its own domain, using geostatistical models `model₁`, ..., `modelₙ` and non-NaN values of the variables `vars₁`, ..., `varsₙ`. InterpolateNaN(model=NN(); [parameters]) InterpolateNaN(model=NN(); [parameters]) Interpolate geospatial data on its own domain, using geostatistical `model` and non-NaN values of all variables. Just like [`InterpolateNeighbors`](@ref), this transform uses neighbor search methods to fit geostatistical models at each interpolation location with a reduced number of measurements. ## Parameters * `minneighbors` - Minimum number of neighbors (default to `1`) * `maxneighbors` - Maximum number of neighbors (default to `10`) * `neighborhood` - Search neighborhood (default to `nothing`) * `distance` - A distance defined in Distances.jl (default to `Euclidean()`) * `point` - Perform interpolation on point support (default to `true`) * `prob` - Perform probabilistic interpolation (default to `false`) The `maxneighbors` parameter can be used to perform interpolation with a subset of measurements per prediction location. If `maxneighbors` is not provided, then all measurements are used. Two `neighborhood` search methods are available: * If a `neighborhood` is provided, local prediction is performed by sliding the `neighborhood` in the domain. * If a `neighborhood` is not provided, the prediction is performed using `maxneighbors` nearest neighbors according to `distance`. See also [`InterpolateMissing`](@ref), [`InterpolateNeighbors`](@ref), [`Interpolate`](@ref). """ struct InterpolateNaN{N,M} <: TableTransform selectors::Vector{ColumnSelector} models::Vector{GeoStatsModel} minneighbors::Int maxneighbors::Int neighborhood::N distance::M point::Bool prob::Bool end InterpolateNaN( selectors, models; minneighbors=1, maxneighbors=10, neighborhood=nothing, distance=Euclidean(), point=true, prob=false ) = InterpolateNaN( collect(ColumnSelector, selectors), collect(GeoStatsModel, models), minneighbors, maxneighbors, neighborhood, distance, point, prob ) InterpolateNaN(model::GeoStatsModel=NN(); kwargs...) = InterpolateNaN([AllSelector()], [model]; kwargs...) InterpolateNaN(pairs::Pair{<:Any,<:GeoStatsModel}...; kwargs...) = InterpolateNaN(selector.(first.(pairs)), last.(pairs); kwargs...) isrevertible(::Type{<:InterpolateNaN}) = false function apply(transform::InterpolateNaN, geotable::AbstractGeoTable) selectors = transform.selectors models = transform.models kwargs = ( minneighbors=transform.minneighbors, maxneighbors=transform.maxneighbors, neighborhood=transform.neighborhood, distance=transform.distance, point=transform.point, prob=transform.prob ) newgeotable = _interp(geotable, selectors, models, DropNaN(); kwargs...) newgeotable, nothing end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
3911
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ InterpolateNeighbors(domain, vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) InterpolateNeighbors([g₁, g₂, ..., gₙ], vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) Interpolate geospatial data on given `domain` or set of geometries `g₁`, `g₂`, ..., `gₙ`, using geostatistical models `model₁`, ..., `modelₙ` for variables `vars₁`, ..., `varsₙ`. InterpolateNeighbors(domain, model=NN(); [parameters]) InterpolateNeighbors([g₁, g₂, ..., gₙ], model=NN(); [parameters]) Interpolate geospatial data on given `domain` or set of geometries `g₁`, `g₂`, ..., `gₙ`, using geostatistical `model` for all variables. Unlike [`Interpolate`](@ref), this transform uses neighbor search methods to fit geostatistical models at each interpolation location with a reduced number of measurements. ## Parameters * `minneighbors` - Minimum number of neighbors (default to `1`) * `maxneighbors` - Maximum number of neighbors (default to `10`) * `neighborhood` - Search neighborhood (default to `nothing`) * `distance` - A distance defined in Distances.jl (default to `Euclidean()`) * `point` - Perform interpolation on point support (default to `true`) * `prob` - Perform probabilistic interpolation (default to `false`) The `maxneighbors` parameter can be used to perform interpolation with a subset of measurements per prediction location. If `maxneighbors` is not provided, then all measurements are used. Two `neighborhood` search methods are available: * If a `neighborhood` is provided, local prediction is performed by sliding the `neighborhood` in the domain. * If a `neighborhood` is not provided, the prediction is performed using `maxneighbors` nearest neighbors according to `distance`. See also [`Interpolate`](@ref), [`InterpolateMissing`](@ref), [`InterpolateNaN`](@ref). """ struct InterpolateNeighbors{D<:Domain,N,M} <: TableTransform domain::D selectors::Vector{ColumnSelector} models::Vector{GeoStatsModel} minneighbors::Int maxneighbors::Int neighborhood::N distance::M point::Bool prob::Bool end InterpolateNeighbors( domain::Domain, selectors, models; minneighbors=1, maxneighbors=10, neighborhood=nothing, distance=Euclidean(), point=true, prob=false ) = InterpolateNeighbors( domain, collect(ColumnSelector, selectors), collect(GeoStatsModel, models), minneighbors, maxneighbors, neighborhood, distance, point, prob ) InterpolateNeighbors(geoms::AbstractVector{<:Geometry}, selectors, models; kwargs...) = InterpolateNeighbors(GeometrySet(geoms), selectors, models; kwargs...) InterpolateNeighbors(domain, model::GeoStatsModel=NN(); kwargs...) = InterpolateNeighbors(domain, [AllSelector()], [model]; kwargs...) InterpolateNeighbors(domain, pairs::Pair{<:Any,<:GeoStatsModel}...; kwargs...) = InterpolateNeighbors(domain, selector.(first.(pairs)), last.(pairs); kwargs...) isrevertible(::Type{<:InterpolateNeighbors}) = false function apply(transform::InterpolateNeighbors, geotable::AbstractGeoTable) tab = values(geotable) cols = Tables.columns(tab) vars = Tables.columnnames(cols) domain = transform.domain selectors = transform.selectors models = transform.models minneighbors = transform.minneighbors maxneighbors = transform.maxneighbors neighborhood = transform.neighborhood distance = transform.distance point = transform.point prob = transform.prob interps = map(selectors, models) do selector, model svars = selector(vars) data = geotable[:, svars] fitpredict(model, data, domain; point, prob, minneighbors, maxneighbors, neighborhood, distance) end newgeotable = reduce(hcat, interps) newgeotable, nothing end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
2407
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Interpolate(domain, vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) Interpolate([g₁, g₂, ..., gₙ], vars₁ => model₁, ..., varsₙ => modelₙ; [parameters]) Interpolate geospatial data on given `domain` or vector of geometries `[g₁, g₂, ..., gₙ]`, using geostatistical models `model₁`, ..., `modelₙ` for variables `vars₁`, ..., `varsₙ`. Interpolate(domain, model=NN(); [parameters]) Interpolate([g₁, g₂, ..., gₙ], model=NN(); [parameters]) Interpolate geospatial data on given `domain` or vector of geometries `[g₁, g₂, ..., gₙ]`, using geostatistical `model` for all variables. ## Parameters * `point` - Perform interpolation on point support (default to `true`) * `prob` - Perform probabilistic interpolation (default to `false`) See also [`InterpolateNeighbors`](@ref), [`InterpolateMissing`](@ref), [`InterpolateNaN`](@ref). """ struct Interpolate{D<:Domain} <: TableTransform domain::D selectors::Vector{ColumnSelector} models::Vector{GeoStatsModel} point::Bool prob::Bool end Interpolate(domain::Domain, selectors, models; point=true, prob=false) = Interpolate(domain, collect(ColumnSelector, selectors), collect(GeoStatsModel, models), point, prob) Interpolate(geoms::AbstractVector{<:Geometry}, selectors, models; kwargs...) = Interpolate(GeometrySet(geoms), selectors, models; kwargs...) Interpolate(domain, model::GeoStatsModel=NN(); kwargs...) = Interpolate(domain, [AllSelector()], [model]; kwargs...) Interpolate(domain, pairs::Pair{<:Any,<:GeoStatsModel}...; kwargs...) = Interpolate(domain, selector.(first.(pairs)), last.(pairs); kwargs...) isrevertible(::Type{<:Interpolate}) = false function apply(transform::Interpolate, geotable::AbstractGeoTable) tab = values(geotable) cols = Tables.columns(tab) vars = Tables.columnnames(cols) domain = transform.domain selectors = transform.selectors models = transform.models point = transform.point prob = transform.prob interps = map(selectors, models) do selector, model svars = selector(vars) data = geotable[:, svars] fitpredict(model, data, domain; point, prob, neighbors=false) end newgeotable = reduce(hcat, interps) newgeotable, nothing end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
7341
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Potrace(mask; [ϵ]) Potrace(mask, var₁ => agg₁, ..., varₙ => aggₙ; [ϵ]) Trace polygons on 2D image data with Selinger's Potrace algorithm. The categories stored in column `mask` are converted into binary masks, which are then traced into multi-polygons. When provided, the option `ϵ` is forwarded to Selinger's simplification algorithm. Duplicates of a variable `varᵢ` are aggregated with aggregation function `aggᵢ`. If an aggregation function is not defined for variable `varᵢ`, the default aggregation function will be used. Default aggregation function is `mean` for continuous variables and `first` otherwise. # Examples ```julia Potrace(:mask, ϵ=0.1) Potrace(1, 1 => last, 2 => maximum) Potrace(:mask, :a => first, :b => minimum) Potrace("mask", "a" => last, "b" => maximum) ``` ## References - Selinger, P. 2003. [Potrace: A polygon-based tracing algorithm] (https://potrace.sourceforge.net/potrace.pdf) """ struct Potrace{M<:SingleColumnSelector,S<:ColumnSelector,T} <: TableTransform mask::M selector::S aggfuns::Vector{Function} ϵ::T end Potrace(mask::Column; ϵ=nothing) = Potrace(selector(mask), NoneSelector(), Function[], ϵ) Potrace(mask::Column, pairs::Pair{C,<:Function}...; ϵ=nothing) where {C<:Column} = Potrace(selector(mask), selector(first.(pairs)), collect(Function, last.(pairs)), ϵ) isrevertible(::Type{<:Potrace}) = true function apply(transform::Potrace, geotable::AbstractGeoTable) gtb = _adjustunits(geotable) dom = domain(gtb) tab = values(gtb) cols = Tables.columns(tab) vars = Tables.columnnames(cols) # sanity check if !(dom isa Grid) throw(ArgumentError("potrace only defined for grid data")) end # simplification threshold ϵ = transform.ϵ # select column name sname = selectsingle(transform.mask, vars) # aggregation functions svars = transform.selector(vars) agg = Dict(zip(svars, transform.aggfuns)) for var in vars if !haskey(agg, var) v = Tables.getcolumn(cols, var) agg[var] = _defaultagg(v) end end # convert column to image col = Tables.getcolumn(cols, sname) img = reshape(col, size(dom)) # all possible colors colors = unique(img) # aggregate variables within each color preproc = map(colors) do color mask = isequal.(img, color) inds = findall(vec(mask)) feat = Any[sname => color] for var in setdiff(vars, [sname]) v = Tables.getcolumn(cols, var) newv = agg[var](v[inds]) push!(feat, var => newv) end (; feat...), mask end # split preprocessing results feats = first.(preproc) masks = last.(preproc) # collect vertices and topology verts = vertices(dom) topo = topology(dom) # map pixels to vertices ∂ = Boundary{2,0}(topo) # map direction to first vertex of edge of # interest (i.e. edge touched by direction) d = Dict(:→ => 1, :↑ => 2, :← => 3, :↓ => 4) # map (→, i) representation to ring of points ring(itr) = Ring([verts[∂(i)[d[→]]] for (→, i) in itr[begin:(end - 1)]]) # trace multi-polygons on each mask multis = map(masks) do mask rings = trace(mask) polys = map(rings) do (outer, inners) ochain = ring(outer) ichains = [ring(inner) for inner in inners] PolyArea([ochain, ichains...]) end Multi(polys) end # simplify multi-polygons if necessary elems = isnothing(ϵ) ? multis : [simplify(multi, SelingerSimplification(ϵ)) for multi in multis] # georeference new features on new geometries newtab = feats |> Tables.materializer(tab) newdom = elems |> GeometrySet newgeotable = georef(newtab, newdom) newgeotable, dom end revert(::Potrace, newgeotable::AbstractGeoTable, cache) = newgeotable |> Rasterize(cache) # trace polygonal geometries on mask function trace(mask) # pad mask with inactive pixels M = falses(size(mask) .+ 2) M[(begin + 1):(end - 1), (begin + 1):(end - 1)] .= mask # trace paths on padded mask paths = tracerecursion!(M) # convert paths into rings rings = paths2rings(paths) # unpad and linearize indices linear = LinearIndices(mask) fun(■) = linear[■ - CartesianIndex(1, 1)] map(rings) do (outer, inners) o = [(→, fun(■)) for (□, →, ■) in outer] is = [[(→, fun(■)) for (□, →, ■) in inner] for inner in inners] o, is end end function tracerecursion!(M) paths = [] while any(M) # trace outer path outer = tracepath(M) # invert pixels inside path O = copy(M) insideout!(M, outer) I = @. M & !O @. M = M & !I if any(I) # perform recursion inners = tracerecursion!(I) push!(paths, (outer, inners)) else # single outer path push!(paths, (outer, [])) end end paths end # trace the top-left polygon on the mask function tracepath(M) # find top-left corner (□ → ■ link) i, j = 1, findfirst(==(1), M[1, :]) while isnothing(j) && i < size(M, 1) i += 1 j = findfirst(==(1), M[i, :]) end # there must be at least one active pixel @assert !isnothing(j) "invalid input mask" # define □ → ■ link □ = CartesianIndex(i, j - 1) ■ = CartesianIndex(i, j) # step direction along the path step(□, ■) = CartesianIndex(■[2] - □[2], □[1] - ■[1]) # direction after a given turn left = Dict(:→ => :↑, :↑ => :←, :← => :↓, :↓ => :→) right = Dict(:→ => :↓, :↓ => :←, :← => :↑, :↑ => :→) # find the next edge along the path function move((□, →, ■)) □ₛ = □ + step(□, ■) ■ₛ = ■ + step(□, ■) # 4 possible configurations if M[□ₛ] == 1 && M[■ₛ] == 1 □, right[→], □ₛ # make a right turn elseif M[□ₛ] == 0 && M[■ₛ] == 1 □ₛ, →, ■ₛ # continue straight elseif M[□ₛ] == 0 && M[■ₛ] == 0 ■ₛ, left[→], ■ # make a left turn else # cross pattern ■ₛ, left[→], ■ # left turn policy end end # build a closed path start = (□, :→, ■) next = move(start) path = [start, next] while next ≠ start next = move(next) push!(path, next) end path end # invert the the mask inside the path function insideout!(M, path) □s, ⬕s = first.(path), last.(path) frontier = collect(zip(□s, ⬕s)) visited = falses(size(M)) visited[□s] .= true while !isempty(frontier) □, ⬕ = pop!(frontier) if !visited[⬕] # flip color M[⬕] = 1 - M[⬕] visited[⬕] = true # update frontier δ = ⬕ - □ ⬕₁ = ⬕ + δ ⬕₂ = ⬕ + CartesianIndex(δ[2], -δ[1]) ⬕₃ = ⬕ + CartesianIndex(-δ[2], δ[1]) for ⬕ₛ in [⬕₁, ⬕₂, ⬕₃] if !visited[⬕ₛ] push!(frontier, (⬕, ⬕ₛ)) end end end end M end # convert forest of paths to rings paths2rings(paths) = mapreduce(treebfs, vcat, paths) # breadth-first-search on tree of paths function treebfs(root) record(node) = (first(node), first.(last(node))) visited = [] frontier = [root] while !isempty(frontier) node = popfirst!(frontier) seen = false for vnode in visited if first(node) ∈ last(vnode) seen = true break end end if !seen push!(visited, record(node)) end for child in last(node) push!(frontier, child) end end visited end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
836
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ using PrecompileTools @setup_workload begin gtb = georef((; Z=rand(10, 10))) proc = GaussianProcess() @compile_workload begin gtb |> Interpolate(gtb.geometry) gtb |> InterpolateNeighbors(gtb.geometry) gtb |> InterpolateMissing() gtb |> InterpolateNaN() gtb |> Simulate(gtb.geometry, :Z => proc) gtb |> UniqueCoords() gtb |> Aggregate(gtb.geometry) gtb |> Transfer(gtb.geometry) gtb |> Upscale(2, 2) gtb |> Downscale(2, 2) gtb |> SLIC(3, 1.0) gtb |> GHC(3, 1.0) gtb |> GSC(3, 2.0) gtb |> Rasterize(gtb.geometry) gtb |> Potrace(:Z) gtb |> Detrend(:Z) end end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
3653
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Rasterize(grid) Rasterize(grid, var₁ => agg₁, ..., varₙ => aggₙ) Rasterize geometries within specified `grid`. Rasterize(nx, ny) Rasterize(nx, ny, var₁ => agg₁, ..., varₙ => aggₙ) Alternatively, use the grid with size `nx` by `ny` obtained with discretization of the bounding box. Duplicates of a variable `varᵢ` are aggregated with aggregation function `aggᵢ`. If an aggregation function is not defined for variable `varᵢ`, the default aggregation function will be used. Default aggregation function is `mean` for continuous variables and `first` otherwise. # Examples ```julia grid = CartesianGrid(10, 10) Rasterize(grid) Rasterize(10, 10) Rasterize(grid, 1 => last, 2 => maximum) Rasterize(10, 10, 1 => last, 2 => maximum) Rasterize(grid, :a => first, :b => minimum) Rasterize(10, 10, :a => first, :b => minimum) Rasterize(grid, "a" => last, "b" => maximum) Rasterize(10, 10, "a" => last, "b" => maximum) ``` """ struct Rasterize{T<:Union{Grid,Dims},S<:ColumnSelector} <: TableTransform grid::T selector::S aggfuns::Vector{Function} end Rasterize(grid::Grid) = Rasterize(grid, NoneSelector(), Function[]) Rasterize(nx::Int, ny::Int) = Rasterize((nx, ny), NoneSelector(), Function[]) Rasterize(grid::Grid, pairs::Pair{C,<:Function}...) where {C<:Column} = Rasterize(grid, selector(first.(pairs)), collect(Function, last.(pairs))) Rasterize(nx::Int, ny::Int, pairs::Pair{C,<:Function}...) where {C<:Column} = Rasterize((nx, ny), selector(first.(pairs)), collect(Function, last.(pairs))) isrevertible(::Type{<:Rasterize}) = true _grid(grid::Grid, dom) = grid _grid(dims::Dims, dom) = CartesianGrid(extrema(boundingbox(dom))...; dims) function apply(transform::Rasterize, geotable::AbstractGeoTable) gtb = _adjustunits(geotable) dom = domain(gtb) tab = values(gtb) cols = Tables.columns(tab) vars = Tables.columnnames(cols) types = Tables.schema(tab).types grid = _grid(transform.grid, dom) ncols = length(vars) nrows = nelements(grid) # aggregation functions svars = transform.selector(vars) agg = Dict(zip(svars, transform.aggfuns)) for var in vars if !haskey(agg, var) v = Tables.getcolumn(cols, var) agg[var] = _defaultagg(v) end end mask = zeros(Int, nrows) rows = [[T[] for T in types] for _ in 1:nrows] for (ind, geom) in enumerate(dom) for i in indices(grid, geom) mask[i] = ind row = Tables.subset(tab, ind) for j in 1:ncols v = Tables.getcolumn(row, j) push!(rows[i][j], v) end end end # generate grid column function gencol(j, var) map(1:nrows) do i vs = rows[i][j] if isempty(vs) missing else agg[var](vs) end end end # construct new table pairs = (var => gencol(j, var) for (j, var) in enumerate(vars)) newtab = (; pairs...) |> Tables.materializer(tab) # new spatial data newgeotable = georef(newtab, grid) newgeotable, mask end function revert(::Rasterize, newgeotable::AbstractGeoTable, cache) dom = domain(newgeotable) tab = values(newgeotable) cols = Tables.columns(tab) names = Tables.columnnames(cols) mask = :mask # make unique while mask ∈ names mask = Symbol(mask, :_) end pairs = (nm => Tables.getcolumn(cols, nm) for nm in names) newtab = (; mask => cache, pairs...) newgtb = georef(newtab, dom) newgtb |> Potrace(mask) |> Filter(row -> row[mask] > 0) |> Reject(mask) end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
2459
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Simulate(domain, vars₁ => process₁, ..., varsₙ => processₙ; [parameters]) Simulate(domain, nreals, vars₁ => process₁, ..., varsₙ => processₙ; [parameters]) Simulate([g₁, g₂, ..., gₙ], vars₁ => process₁, ..., varsₙ => processₙ; [parameters]) Simulate([g₁, g₂, ..., gₙ], nreals, vars₁ => process₁, ..., varsₙ => processₙ; [parameters]) Simulate `nreals` realizations of variables `varsᵢ` with geostatistical process `processᵢ` over given `domain` or vector of geometries `[g₁, g₂, ..., gₙ]`. The `parameters` are forwarded to the `rand` method of the geostatistical processes. """ struct Simulate{D<:Domain,R<:AbstractRNG,K} <: TableTransform domain::D nreals::Int selectors::Vector{ColumnSelector} processes::Vector{GeoStatsProcess} rng::R kwargs::K end Simulate(domain::Domain, nreals::Int, selectors, processes, rng, kwargs) = Simulate(domain, nreals, collect(ColumnSelector, selectors), collect(GeoStatsProcess, processes), rng, kwargs) Simulate(geoms::AbstractVector{<:Geometry}, nreals::Int, selectors, processes, rng, kwargs) = Simulate(GeometrySet(geoms), nreals, selectors, processes, rng, kwargs) Simulate(domain, nreals::Int, pairs::Pair{<:Any,<:GeoStatsProcess}...; rng=Random.default_rng(), kwargs...) = Simulate(domain, nreals, selector.(first.(pairs)), last.(pairs), rng, values(kwargs)) Simulate(domain, pairs::Pair{<:Any,<:GeoStatsProcess}...; rng=Random.default_rng(), kwargs...) = Simulate(domain, 1, selector.(first.(pairs)), last.(pairs), rng, values(kwargs)) isrevertible(::Type{<:Simulate}) = false function apply(transform::Simulate, geotable::AbstractGeoTable) tab = values(geotable) cols = Tables.columns(tab) vars = Tables.columnnames(cols) (; domain, nreals, selectors, processes, rng, kwargs) = transform ensembles = map(selectors, processes) do selector, process svars = selector(vars) data = geotable[:, svars] svars => rand(rng, process, domain, data, nreals; kwargs...) end pad = ndigits(nreals) pairs = mapreduce(vcat, ensembles) do (vars, ensemble) mapreduce(vcat, 1:nreals) do i [Symbol(v, :_, string(i; pad)) => ensemble[v][i] for v in vars] end end newgeotable = georef((; pairs...), domain) newgeotable, nothing end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git
[ "MIT" ]
0.8.1
d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5
code
2414
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ Transfer(domain) Transfer([g₁, g₂, ..., gₙ]) Transfer variables `var₁`, `var₂`, ..., `varₙ` from source domain to target `domain`. Alternatively, transfer variables to geometries `g₁`, `g₂`, ..., `gₙ`. # Examples ```julia Transfer(CartesianGrid(10, 10)) Transfer(rand(Point, 100)) ``` """ struct Transfer{D<:Domain} <: TableTransform domain::D end Transfer(geoms::AbstractVector{<:Geometry}) = Transfer(GeometrySet(geoms)) isrevertible(::Type{<:Transfer}) = false function apply(transform::Transfer, geotable::AbstractGeoTable) gtb = _adjustunits(geotable) table = values(gtb) cols = Tables.columns(table) vars = Tables.columnnames(cols) # source and target domains sdom = domain(gtb) tdom = transform.domain # perform transfer newcols = _transfer(sdom, tdom, cols, vars) newtable = (; newcols...) |> Tables.materializer(table) georef(newtable, tdom), nothing end function _transfer(sdom, tdom, cols, vars) if sdom isa Grid && tdom isa Grid && extrema(sdom) == extrema(tdom) # we have two grids overlaid, and can rely on # tiled iteration for efficient transfer _gridtransfer(sdom, tdom, cols, vars) else # general case with knn search _knntransfer(sdom, tdom, cols, vars) end end function _gridtransfer(sdom, tdom, cols, vars) # determine tile size for tiled iteration tilesize = ceil.(Int, size(tdom) ./ size(sdom)) if any(<(1), tilesize) # fallback to general case with knn search _knntransfer(sdom, tdom, cols, vars) else # perform transfer with tiled iteration map(vars) do var svals = Tables.getcolumn(cols, var) array = similar(svals, size(tdom)) titer = TileIterator(axes(array), tilesize) for (sind, tinds) in enumerate(titer) array[tinds...] .= svals[sind] end tvals = vec(array) var => tvals end end end function _knntransfer(sdom, tdom, cols, vars) # find nearest elements in source domain knn = KNearestSearch(sdom, 1) near = tmap(1:nelements(tdom)) do i first(search(centroid(tdom, i), knn)) end # perform transfer map(vars) do var svals = Tables.getcolumn(cols, var) tvals = svals[near] var => tvals end end
GeoStatsTransforms
https://github.com/JuliaEarth/GeoStatsTransforms.jl.git