licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1087 | #
# Plot orbits
#
"""
Plots every timestep in `sols` in `3D` space. All keyward
arguments are passed directly to `Plots.jl`.
"""
function orbitplot(sols::Trajectory{<:NBodySystem}; bodies=1:length(sols.step[1].body), kwargs...)
# Referencing:
# [1] https://discourse.julialang.org/t/smart-kwargs-dispatch/14571/15
# Set default kwargs (modified from [1])
defaults = (; formatter=:scientific,
legend=:topleft,
xlabel="X Position (km)",
ylabel="Y Position (km)",
zlabel="Z Position (km)",
title ="NBody Positions vs. Time")
options = merge(defaults, kwargs)
fig = Plots.plot()
for i = bodies
Plots.plot!(fig, ustrip.(u"km", map(x -> x.body[i].r̅[1], sols.step)),
ustrip.(u"km", map(x -> x.body[i].r̅[2], sols.step)),
ustrip.(u"km", map(x -> x.body[i].r̅[3], sols.step)),
label=string("Body ", i))
end
Plots.plot!(fig; options...)
return fig
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1028 | #
# CR3BP Plots
#
"""
Plot specified lagrange points in the rotating
reference frame of CR3BP system μ.
"""
function lagrangeplot(μ, L=1:5; kwargs...)
defaults = (; title="Nondimensional Lagrange Points",
xlabel="X (DU)", ylabel="Y (DU)",
labels=["Body 1" "Body 2" [string("L",i) for i ∈ L]...])
options = merge(defaults, kwargs)
lagrange_points = lagrange(μ)
fig = scatter([-μ], [0]; markersize=10, markercolor=:lightblue, label="Body 1")
scatter!(fig, [1-μ], [0]; markersize=6, markercolor=:gray, label="Body 2")
colors = (:red, :orange, :tan, :cyan, :indigo)
for point ∈ zip(lagrange_points, 1:length(lagrange_points))
p, i = point
scatter!(fig, [p[1]], [p[2]];
markershape=:x, markercolor=colors[i], label=string("L", i))
end
scatter!(fig; options...)
for i ∈ 1:min(length(fig.series_list), length(options.labels))
fig.series_list[i].plotattributes[:label] = options.labels[i]
end
fig
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 2153 | #
# Plot orbits
#
# Referencing:
# [1] https://discourse.julialang.org/t/smart-kwargs-dispatch/14571/15
"""
Plots every timestep in `sols` in `3D` space. All keyward
arguments are passed directly to `Plots.jl`.
"""
function orbitplot(sols::Trajectory{<:RestrictedTwoBodySystem}, frame=:Cartesian; kwargs...)
# Provided frame can be :Cartesian, or :Perifocal
if frame == :Perifocal
return plot2d(sols; kwargs...)
elseif frame == :Cartesian
return plot3d(sols; kwargs...)
else
throw(ArgumentError("`frame` must be set to `:Cartesian` or `:Perifocal`"))
end
end
function plot2d(sols::Trajectory{<:RestrictedTwoBodySystem}; kwargs...)
# Set default kwargs (modified from [1])
defaults = (; formatter=:scientific,
legend=:topleft,
size=(900, 600),
xlabel="Xₚ (km)",
ylabel="Yₚ (km)",
title ="Twobody Orbit Positions vs. Time")
options = merge(defaults, kwargs)
fig = Plots.plot()
Plots.plot!(fig, ustrip.(u"km", map(x->periapsis_radius(x)[1], sols.step)),
ustrip.(u"km", map(x->perifocal_radius(x)[2], sols.step)),
label="Perifocal Position")
Plots.plot!(fig; options...)
return fig
end
function plot3d(sols::Trajectory{<:RestrictedTwoBodySystem}; kwargs...)
# Set default kwargs (modified from [1])
defaults = (; formatter=:scientific,
legend=:topleft,
size=(900, 600),
xlabel="X Position (km)",
ylabel="Y Position (km)",
zlabel="Z Position (km)",
title ="Twobody Orbit Positions vs. Time")
options = merge(defaults, kwargs)
fig = Plots.plot()
Plots.plot!(fig, ustrip.(u"km", map(x->radius_vector(x)[1], sols.step)),
ustrip.(u"km", map(x->radius_vector(x)[2], sols.step)),
ustrip.(u"km", map(x->radius_vector(x)[3], sols.step)),
label="Cartesian Position")
Plots.plot!(fig; options...)
return fig
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 741 | """
Contains abstractions for describing orbital states and bodies.
Implementations are provided in TwoBody, and NBody.
"""
module CommonTypes
using Reexport
@reexport using Unitful, UnitfulAngles, UnitfulAstro
include("../Misc/DocStringExtensions.jl")
include("../Misc/UnitfulAliases.jl")
export AbstractBody, OrbitalSystem, AbstractTrajectory
"""
Abstract type for bodies in space: both `CelestialBody`s (in
`TwoBody.jl`), and `Body`s (in `NBody.jl`).
"""
abstract type AbstractBody end
"""
Abstract type describing all states in select Astrodynamics problems.
"""
abstract type OrbitalSystem end
"""
Abstract type describing a collection of states resulting from numerical integration
"""
abstract type AbstractTrajectory end
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 478 | """
Provides calculations for orbit maneuvers.
"""
module Maneuvers
using Reexport
@reexport using ..CommonTypes
include("../Misc/DocStringExtensions.jl")
include("../Misc/UnitfulAliases.jl")
using ..NBody
using ..TwoBody
using LinearAlgebra: norm, cross, ×, dot, ⋅
export AbstractManeuver, TwoBodyManeuver, ConstantManeuver
export escape_radius, escape_velocity, escape_time, escape_path_length
include("maneuver_types.jl")
include("twobody_maneuver_calculations.jl")
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 372 | #
# Data structures and types for orbit maneuvers
#
"""
An abstract type for all orbit maneuvers.
"""
abstract type AbstractManeuver end
"""
An abstract type for all twobody maneuvers.
"""
abstract type TwoBodyManeuver <: AbstractManeuver end
"""
A type for constant, continuous thrust.
"""
struct ConstantManeuver <: TwoBodyManeuver
aₜ::Unitful.Acceleration
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1198 | #
# Maneuver calculations for orbits within the Twobody Problem
#
"""
Provides the radius of escape.
"""
escape_radius(r₀, v₀, aₜ) = r₀ * v₀ / (20 * aₜ^2 * r₀^2)^(1/4)
escape_radius(orbit::T, m::ConstantManeuver) where T <: RestrictedTwoBodySystem = escape_radius(radius(orbit), velocity(orbit), m.aₜ)
"""
Provides the velocity at escape.
"""
escape_velocity(r₀, v₀, aₜ, μ) = √(2 * μ / escape_radius(r₀, v₀, aₜ))
escape_velocity(orbit::T, m::ConstantManeuver) where T <: RestrictedTwoBodySystem = escape_velocity(radius(orbit), velocity(orbit), m.aₜ, orbit.body.μ)
"""
Provides time delta from the provided initial orbit to escape.
"""
escape_time(r₀, v₀, aₜ) = upreferred(v₀ / aₜ) * (1 - (upreferred(20aₜ^2 * r₀^2) / upreferred(v₀^4))^(1/8))
escape_time(orbit::T, m::ConstantManeuver) where T <: RestrictedTwoBodySystem = escape_time(radius(orbit), velocity(orbit), m.aₜ)
"""
Provides the path length from the initial condition to escape.
"""
escape_path_length(r₀, v₀, aₜ) = upreferred(v₀^2 / 2aₜ) * (1 - upreferred((1/v₀) * (20aₜ^2 * r₀^2)^(1/4)))
escape_path_length(orbit::T, m::ConstantManeuver) where T <: RestrictedTwoBodySystem = escape_path_length(radius(orbit), velocity(orbit), m.aₜ) | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 2002 | #
# Provides common DocStringExtensions abbreviations
# for UnitfulAstrodynamics.jl sub-modules. This file
# is NOT meant to be included by anything else! It
# includes `export` line(s), which will cause an
# error you this file is included in any non-module
# context.
#
using DocStringExtensions
struct TerseMethods <: DocStringExtensions.Abbreviation end
const TERSEMETHODS = TerseMethods()
function DocStringExtensions.format(::TerseMethods, buf, doc)
local binding = doc.data[:binding]
local typesig = doc.data[:typesig]
local modname = doc.data[:module]
local func = Docs.resolve(binding)
local groups = DocStringExtensions.methodgroups(func, typesig, modname; exact = false)
if !isempty(groups)
println(buf)
println(buf, "```julia")
for group in groups
for method in group
DocStringExtensions.printmethod(buf, binding, func, method)
println(buf)
end
end
println(buf, "```\n")
println(buf)
end
return nothing
end
struct SourceCode <: DocStringExtensions.Abbreviation end
const SOURCECODE = SourceCode()
function DocStringExtensions.format(abbrv::SourceCode, buf, doc)
if include_source_in_docstring
println("Adding source code!")
file = doc.data[:path]
if isfile(file)
lines = Base.Iterators.drop(eachline(file), doc.data[:linenumber] - 1)
text = join(lines, '\n')
_, from = Meta.parse(text, 1; greedy=false)
_, to = Meta.parse(text, from)
println(buf, "```julia")
println(buf, rstrip(text[from:to]))
println(buf, "```")
end
else
println("Skipping sourceode.")
end
return nothing
end
include_source_in_docstring = false
include_sourcecode(b::Bool) = include_source_in_docstring = b
@template (FUNCTIONS, METHODS, MACROS) =
"""
$(METHODLIST)
$(DOCSTRING)
"""
@template (DEFAULT) =
"""
$(DOCSTRING)
""" | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 265 | #
# Aliases for Unitful dimension singletons
#
Unitful.@derived_dimension MassParameter Unitful.𝐋^3/Unitful.𝐓^2
const MassParameter = MassParameter
const Length = Unitful.Length
const Velocity = Unitful.Velocity
const Time = Unitful.Time
const Mass = Unitful.Mass | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 514 | """
Handles the non-relativistic NBody problem for planets,
and other celestial bodies.
"""
module NBody
using Reexport
@reexport using ..CommonTypes
include("../Misc/DocStringExtensions.jl")
include("../Misc/UnitfulAliases.jl")
using StaticArrays: SVector, @SVector, SMatrix, @SMatrix
export Body, NBodySystem, system_energy,
system_angular_momentum, promote, convert,
Float16, Float32, Float64, BigFloat,
length, getindex
include("NBodyStates.jl")
include("NBodyCalculations.jl")
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 716 | #
# NBody calculations
#
"""
Returns total energy for `NBodySystem`.
"""
function system_energy(sys::NBodySystem)
E = 0.0u"J"
for i = 1:length(sys.body)
E += sys.body[i].m * dot(sys.body[i].v̅, sys.body[i].v̅)
for j = 1:length(sys.body)
if i ≠ j
E -= 6.6743e-11u"m^3/(kg*s^2)" * (sys.body[i].m * sys.body[j].m) /
norm(sys.body[j].r̅ .- sys.body[i].r̅)
end
end
end
return E
end
"""
Returns total angular momentum for `NBodySystem`.
"""
function system_angular_momentum(sys::NBodySystem)
H = reduce(+, [sys.body[i].m * cross(sys.body[i].r̅, sys.body[i].v̅) for i ∈ 1:length(sys.body)])
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 3044 | #
# Handles NBody problem states.
#
"""
Stores the state of each body in the NBody problem.
"""
struct Body{F<:AbstractFloat} <: AbstractBody
r::SVector{3, Unitful.Length{F}}
v::SVector{3, Unitful.Velocity{F}}
m::Unitful.Mass{F}
name::String
function Body(r::VR, v::VV, m::M, name::String="") where {
T1 <: Real,
T2 <: Real,
T3 <: Real,
R <: Unitful.Length{T1},
V <: Unitful.Velocity{T2},
VR <: AbstractVector{R},
VV <: AbstractVector{V},
M <: Unitful.Mass{T3}
}
if length(r) ≢ length(v) ≢ 3
throw(ArgumentError("The `Body` constructor requires 3 element vectors for position `r` and velocity `v`"))
else
T = promote_type(T1, T2, T3)
if !(T <: AbstractFloat)
@warn "Non-float parameters provided. Defaulting to Float64."
T = Float64
end
return new{T}(SVector{3}(T.(r)), SVector{3}(T.(v)), T(m), name)
end
end
function Body(r::VR, v::VV, m::M, name::String="") where {
R <: Real,
V <: Real,
VR <: AbstractVector{R},
VV <: AbstractVector{V},
M <: Real
}
@warn "No units provided! Assuming km, km/s, and kg."
return Body(r * u"km", v * u"km/s", m * u"kg", name)
end
end
Base.convert(::Type{T}, b::Body) where {T<:AbstractFloat} = Body(T.(b.r), T.(b.v), T(b.m), b.name)
Base.promote(::Type{Body{A}}, ::Type{Body{B}}) where {A<:AbstractFloat, B<:AbstractFloat} = Body{promote_type(A,B)}
Core.Float16(o::Body) = convert(Float16, o)
Core.Float32(o::Body) = convert(Float32, o)
Core.Float64(o::Body) = convert(Float64, o)
Base.MPFR.BigFloat(o::Body) = convert(BigFloat, o)
"""
Describes a system of `n` `NBodyStates`'s.
"""
struct NBodySystem{N, T<:AbstractFloat} <: OrbitalSystem
bodies::SVector{N, Body{T}}
function NBodySystem(b::B...) where B<:Body
N = length(b)
bodies = promote(b...)
T = promote_type([typeof(bᵢ).parameters[1] for bᵢ ∈ b]...)
return new{length(b),T}(SVector{N, Body{T}}(bodies...))
end
NBodySystem(b::VB) where {B<:Body, VB<:AbstractVector{B}} = NBodySystem(b...)
end
"""
The `length` of an `NBodySystem` is the number of bodies in the system.
"""
Base.@pure Base.length(sys::NBodySystem{N,T}) where N where T = N
"""
The n-th `index` of an `NBodySystem` is the n-th body in the system.
"""
Base.getindex(sys::NBodySystem, i) = sys.bodies[i]
Base.convert(::Type{T}, sys::NBodySystem) where {T<:AbstractFloat} = NBodySystem(convert.(T, sys.bodies)...)
Base.promote(::Type{NBodySystem{N, A}}, ::Type{NBodySystem{N, B}}) where {A<:AbstractFloat, B<:AbstractFloat,N} = NBodySystem{promote_type(A,B)}
Core.Float16(o::NBodySystem) = convert(Float16, o)
Core.Float32(o::NBodySystem) = convert(Float32, o)
Core.Float64(o::NBodySystem) = convert(Float64, o)
Base.MPFR.BigFloat(o::NBodySystem) = convert(BigFloat, o)
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1813 | #
# Propagator for the NBody problem.
#
"""
nbody_tic
Currently not exported. Used for n-body numerical integration.
"""
function NBodyTic!(∂u, u, p, t=0)
for i = 1:length(u.body)
∂u.body[i].r = u.body[i].v
∂u.body[i].v = zero.(∂u.body[i].v)
for j = 1:length(u.body)
if i ≠ j
∂u.body[i].v += ((p.m[i] * p.m[j]) / norm(u.body[j].r .- u.body[i].r)^3 * (u.body[j].r .- u.body[i].r))
end
end
∂u.body[i].v *= (p.G / p.m[i])
end
return nothing
end
"""
Uses OrdinaryDiffEq solvers to propagate `sys` Δt into the future.
All keyword arguments are passed directly to OrdinaryDiffEq solvers.
References:
* [1] https://diffeq.sciml.ai/v4.0/tutorials/ode_example.html
* [2] https://github.com/SciML/DifferentialEquations.jl/issues/393#issuecomment-658210231
* [3] https://discourse.julialang.org/t/smart-kwargs-dispatch/14571/15
"""
function propagate(sys::NBodySystem{N,T}, Δt::Unitful.Quantity; kwargs...) where N where T
# Integration options
defaults = (; reltol=1e-14, abstol=1e-14)
options = merge(defaults, kwargs)
# Initial conditions
u₀ = ComponentArray(body=(map(b -> ComponentArray((r=ustrip.(u"m", b.r), v=ustrip.(u"m/s", b.v))), sys.bodies)))
ts = T.(ustrip.(u"s", (zero(Δt), Δt)))
p = ComponentArray((G=6.6743e-11, m=map(b->ustrip(u"kg",b.m), sys.bodies)))
# Integrate!
sols = solve(ODEProblem(NBodyTic!, u₀, ts, p); options...)
# Unpack and return
bodies = map(x->NBodySystem(
map(i->Body(u"m" * x.body[i].r, u"m/s" * x.body[i].v, sys.body[i].m),
1:length(sys.body))),
sols.u)
return Trajectory(
bodies,
u"s" * sols.t,
sols.retcode
)
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1943 | #
# PropagateThreeBody.jl
#
# Includes functions and structures for propagating orbits
# within the circular restricted three-body problem.
#
"""
threebody_tic
Currently not exported. Used for two-body numerical integration.
"""
function RestrictedThreeBodyTic!(∂u, u, p, t=0)
∂u.rₛ = u.vₛ
ThreeBody.accel!(∂u.vₛ, u.rₛ, u.vₛ, p.μ)
return nothing
end
"""
Uses OrdinaryDiffEq solvers to propagate `sys` Δt into the future.
All keyword arguments are passed directly to OrdinaryDiffEq solvers.
"""
function propagate(sys::NondimensionalThreeBodyState, Δt::T = sys.Δt; kwargs...) where T<:Real
# Referencing:
# [1] https://diffeq.sciml.ai/v4.0/tutorials/ode_example.html
# [2] https://github.com/SciML/DifferentialEquations.jl/issues/393#issuecomment-658210231
# [3] https://discourse.julialang.org/t/smart-kwargs-dispatch/14571/15
# Set default kwargs (modified from [3])
defaults = (; reltol=1e-14, abstol=1e-14)
options = merge(defaults, kwargs)
# Initial conditions
u₀ = ComponentArray((rₛ=sys.r, vₛ=sys.v))
ts = (zero(Δt), Δt)
p = ComponentArray((μ=sys.μ, x₁=-sys.μ, x₂=1-sys.μ))
# Numerically integrate!
sols = solve(ODEProblem(RestrictedThreeBodyTic!, u₀, ts, p); options...)
# Return PropagationResult structure
return Trajectory(
map(step->NondimensionalThreeBodyState(step.rₛ, step.vₛ, sys.μ, sys.Δt, sys.DU, sys.DT), sols.u),
sols.t,
sols.retcode
)
end
"""
Uses OrdinaryDiffEq solvers to propagate `sys` Δt into the future.
All keyword arguments are passed directly to OrdinaryDiffEq solvers.
"""
function propagate(sys::ThreeBodyState, Δt::Unitful.Time{<:Real} = sys.Δt)
traj = propagate(nondimensionalize(sys), nondimensionalize(Δt, sys.a, sys.μ₁, sys.μ₂))
return Trajectory(
redimensionalize.(traj.t, sys.a, sys.μ₁, sys.μ₂),
redimensionalize.(traj.step),
traj.retcode
)
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 2647 | #
# propagator.jl
#
# Includes functions and structures for propagating orbits
# within the two-body problem.
#
"""
Currently not exported. Used for ideal two-body numerical integration.
"""
function RestrictedTwoBodyTic!(∂u, u, p, t)
∂u.rᵢ = u.vᵢ
∂u.vᵢ = -p.μ .* (u.rᵢ ./ norm(u.rᵢ,2)^3)
return nothing
end
"""
Currently not exported. Used for ideal two-body numerical integration.
"""
function RestrictedBiasedTwoBodyTic!(∂u, u, p, t)
∂u.rᵢ = u.vᵢ
∂u.vᵢ = (-p.μ .* (u.rᵢ ./ norm(u.rᵢ,2)^3)) .+ (normalize(u.vᵢ) .* p.T)
return nothing
end
"""
Uses OrdinaryDiffEq solvers to propagate `orbit` Δt into the future.
All keyword arguments are passed directly to OrdinaryDiffEq solvers.
References:
* [1] https://diffeq.sciml.ai/v4.0/tutorials/ode_example.html
* [2] https://github.com/SciML/DifferentialEquations.jl/issues/393#issuecomment-658210231
* [3] https://discourse.julialang.org/t/smart-kwargs-dispatch/14571/15
"""
function propagate(orbit::TwoBodyState{C, T},
Δt::Unitful.Time = period(orbit);
thrust::Unitful.Acceleration = 0.0u"N/kg",
kwargs...) where C where T
# Set default kwargs
defaults = (; reltol=1e-14, abstol=1e-14)
options = merge(defaults, kwargs)
# Initial conditions
r₀ = Array(ustrip.(u"m", radius_vector(orbit)))
v₀ = Array(ustrip.(u"m/s", velocity_vector(orbit)))
u₀ = ComponentArray((rᵢ=r₀, vᵢ=v₀))
ts = T.(ustrip.(u"s", (zero(Δt), Δt)))
f = thrust == zero(thrust) ? RestrictedTwoBodyTic! : RestrictedBiasedTwoBodyTic!
p = let
if thrust == zero(thrust)
ComponentArray((μ=ustrip(u"m^3/s^2", orbit.body.μ)))
else
ComponentArray((μ=ustrip(u"m^3/s^2", orbit.body.μ), T=ustrip(u"N/kg", thrust)))
end
end
# Integrate!
sols = solve(ODEProblem(f, u₀, ts, p); options...)
# Return PropagationResult structure
return Trajectory(
map(x -> TwoBodyState(u"m" * x.rᵢ, u"m/s" * x.vᵢ, orbit.body), sols.u),
sols.t .* u"s",
sols.retcode
)
end
"""
Uses OrdinaryDiffEq solvers to propagate `orbit` Δt into the future.
All keyword arguments are passed directly to OrdinaryDiffEq solvers.
"""
function propagate(orbit::KeplerianState,
Δt::Unitful.Time=period(orbit),
ode_alg::OrdinaryDiffEqAlgorithm=Tsit5(),
thrust::Unitful.Acceleration=0.0u"N/kg";
kwargs...)
traj = propagate(TwoBodyState(orbit), Δt, ode_alg, thrust, kwargs...)
return Trajectory(KeplerianState.(traj.step), traj.t, traj.status)
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 743 | """
Provides orbit propagators for the two-body problem,
and the n-body problem.
"""
module Propagators
using Reexport
@reexport using ..CommonTypes
include("../Misc/DocStringExtensions.jl")
include("../Misc/UnitfulAliases.jl")
using ..NBody
using ..TwoBody
using ..ThreeBody
using DifferentialEquations
using LinearAlgebra: norm, normalize, cross, ×, dot, ⋅
using ComponentArrays
using StaticArrays: SVector, @SVector, SMatrix, @SMatrix
export Trajectory,
propagate,
RestrictedTwoBodyTic!,
RestrictedBiasedTwoBodyTic!,
RestrictedThreeBodyTic!,
NBodyTic!,
show
include("Trajectory.jl")
include("PropagateTwoBody.jl")
include("PropagateThreeBody.jl")
include("PropagateNBody.jl")
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1180 | #
# Describes trajectories for all `OrbitalSystem`s.
#
"""
A structure for storing trajectories of `TwoBodySystem` orbits,
`RestrictedThreeBodySystem` orbits, and `NBodySystem` orbits.
"""
struct Trajectory{T<:OrbitalSystem} <: AbstractTrajectory
t::Vector{<:Number}
step::Vector{T}
status::Symbol
function Trajectory(step::AbstractVector{T},
t::AbstractVector{<:Number} = [i for i ∈ 1:length(step)],
status::Symbol = :notapplicable) where T <: OrbitalSystem
@assert length(step) == length(t) "Time vector and state vectors must have the same length!"
return new{T}(Vector(t), Vector(step), status)
end
end
"""
Copy constructor for `Trajectory` instances.
"""
Trajectory(traj::Trajectory) = Trajectory(traj.step, traj.t, traj.status)
"""
The `length` of a trajectory is the number of steps in the trajectory.
"""
Base.length(traj::Trajectory) = length(traj.t)
"""
The _n-th_ `index` of a trajectory is the _n-th_ step of the trajectory.
"""
Base.getindex(traj::Trajectory, i) = traj.step[i]
Base.show(io::IO, traj::Trajectory) = println(io, typeof(traj), " with ", length(traj), " steps")
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 11565 | #
# Functions related to Halo orbits
#
"""
Returns the partial derivative matrix of potential `U`.
__Arguments:__
- `μ`: Non-dimensional mass parameter for the CR3BP system.
- `r`: Non-dimensional position vector for the spacecraft.
__Outputs:__
- Partial derivative matrix of potential `U`.
__References:__
- [Rund, 2018](https://digitalcommons.calpoly.edu/theses/1853/)
"""
potential_energy_hessian = let
func = include("PotentialEnergyHessian.jl")
(r,μ) -> func(r..., μ)
end
"""
Returns the derivative mapping of CR3BP state transition matrix, `F`.
__Arguments:__
- `μ`: Non-dimensional mass parameter for the CR3BP system.
- `r`: Non-dimensional position vector for the spacecraft.
__Outputs:__
- Linear mapping from Φ to Φ̇, `F`.
__References:__
- [Rund, 2018](https://digitalcommons.calpoly.edu/theses/1853/)
"""
function state_transition_dynamics(μ, r)
return SMatrix{6,6}(vcat(
hcat(zeros((3,3)), I(3)),
hcat(potential_energy_hessian(r, μ), [0 2 0; -2 0 0; 0 0 0])
))
end
"""
Returns an analytical solution for a Halo orbit about `L`.
__Arguments:__
- `μ`: Non-dimensional mass parameter for the CR3BP system.
- `Az`: Desired non-dimensional Z-amplitude for Halo orbit.
- `ϕ`: Desired Halo orbit phase.
- `steps`: Number of non-dimensional timepoints in returned state.
- `L`: Lagrange point to orbit (L1 or L2).
- `hemisphere`: Specifies northern or southern Halo orbit.
__Outputs:__
- Synodic position vector `r::Array{<:AbstractFloat}`
- Synodic velocity vector `v::Array{<:Abstractfloat}`.
- Halo orbit period `Τ`.
- Throws `ArgumentError` if L is not `1` or `2`.
__References:__
- [Rund, 2018](https://digitalcommons.calpoly.edu/theses/1853/).
"""
function analyticalhalo(μ; Az=0.00, ϕ=0.0, steps=1,
L=1, hemisphere=:northern)
if L == 1
point = first(lagrange(μ, 1))
γ = abs(1 - μ - point)
n = collect(1:4)
c = @. (μ + (-1)^n * (1-μ)γ^(n+1)) / (γ^3 * (1 - γ^(n+1)))
elseif L == 2
point = first(lagrange(μ, 2))
γ = abs(point - 1 + μ)
n = collect(1:4)
c = @. ((-1)^n * μ + (-1)^n * (1-μ)γ^(n+1)) / (γ^3 * (1 + γ^(n+1)))
else
throw(ArgumentError("Only Halo orbits about L1 or L2 are supported."))
end
ωₚ = √((2 - c[2] + √((9c[2]^2 - 8c[2])))/2)
k = (ωₚ^2 + 1 + 2c[2]) / (2ωₚ)
d₁ = (3ωₚ^2 / k) * (k*(6ωₚ^2 - 1) - 2ωₚ)
d₂ = (8ωₚ^2 / k) * (k*(11ωₚ^2 - 1) - 2ωₚ)
a₂₁ = (3c[3] * (k^2 - 2)) / (4(1 + 2c[2]))
a₂₂ = (3c[3]) / (4(1 + 2c[2]))
a₂₃ = (-3c[3]ωₚ / (4k*d₁)) * (3k^3 * ωₚ - 6k*(k-ωₚ) + 4)
a₂₄ = (-3c[3]ωₚ / (4k*d₁)) * (2 + 3k*ωₚ)
b₂₁ = (-3c[3]ωₚ / (2d₁)) * (3k*ωₚ - 4)
b₂₂ = -3c[3]*ωₚ / d₁
d₂₁ = -c[3] / (2ωₚ^2)
a₃₁ = (-9ωₚ / (4d₂)) * (4c[3] * (k*a₂₃-b₂₁) + k*c[4]*(4+k^2)) +
((9ωₚ^2 + 1 - c[2]) / (2d₂)) * (3c[3]*(2a₂₃-k*b₂₁) + c[4]*(2+3k^2))
a₃₂ = (-9ωₚ / (4d₂)) * (4c[3] * (3k*a₂₄-b₂₂) + k*c[4]) -
(3 / (2d₂)) * (9ωₚ^2 + 1 - c[2]) * (c[3]*(k*b₂₂+d₂₁-2a₂₄) - c[4])
b₃₁ = (3 / (8d₂)) * 8ωₚ * (3c[3] * (k*b₂₁ - 2a₂₃) - c[4]*(2+3k^2)) +
(3/(8d₂)) * (9ωₚ^2 + 1 + 2c[2]) * (4c[3]*(k*a₂₃-b₂₁) + k*c[4]*(4+k^2))
b₃₂ = (9ωₚ/d₂)*(c[3]*(k*b₂₂+d₂₁-2a₂₄)-c[4]) +
(3(9ωₚ^2 + 1 + 2c[2]) / (8d₂) * (4c[3]*(k*a₂₄-b₂₂)+k*c[4]))
d₃₁ = (3 / (64ωₚ^2)) * (4c[3]*a₂₄ + c[4])
d₃₂ = (3 / (64 + ωₚ^2)) * (4c[3]*(a₂₃ - d₂₁) + c[4]*(4+k^2))
s₁ = (1 / (2ωₚ*(ωₚ*(1+k^2) - 2k))) *
(3c[3]/2 * (2a₂₁*(k^2 - 2) - a₂₃*(k^2 + 2) - 2k*b₂₁) -
(3c[4]/8) * (3k^4 - 8k^2 + 8))
s₂ = (1 / (2ωₚ*(ωₚ*(1+k^2) - 2k))) *
(3c[3]/2 * (2a₂₂*(k^2-2) + a₂₄*(k^2 + 2) + 2k*b₂₂ + 5d₂₁) +
(3c[4]/8) * (12 - k^2))
l₁ = (-3c[3] / 2) * (2a₂₁ + a₂₃ + 5d₂₁) - (3c[4]/8)*(12 - k^2) + 2ωₚ^2 * s₁
l₂ = (3c[3]/2) * (a₂₄ - 2a₂₂) + (9c[4]/8) + 2ωₚ^2 * s₂
Δ = ωₚ^2 - c[2]
Aᵧ = Az / γ
Aₓ = √((-l₂*Aᵧ^2 - Δ) / l₁)
ν = 1 + s₁*Aₓ^2 + s₂*Aᵧ^2
Τ = 2π / (ωₚ*ν)
τ = ν .* (steps > 1 ? range(0, stop=Τ, length=steps) : range(0, stop=Τ, length=1000))
if hemisphere == :northern
m = 1.0
elseif hemisphere == :southern
m = 3.0
else
throw(ArgumentError("`hemisphere` must be `:northern` or `:southern`."))
end
δₘ = 2 - m
τ₁ = @. ωₚ*τ + ϕ
x = @. γ * (a₂₁*Aₓ^2 + a₂₂*Aᵧ^2 - Aₓ*cos(τ₁) + (a₂₃*Aₓ^2 -
a₂₄*Aᵧ^2)*cos(2τ₁) + (a₃₁*Aₓ^3 - a₃₂*Aₓ*Aᵧ^2)*cos(3τ₁)) + 1 - μ - (L == 1 ? γ : -γ)
y = @. γ * (k*Aₓ*sin(τ₁) + (b₂₁*Aₓ^2 - b₂₂*Aᵧ^2)*sin(2τ₁) +
(b₃₁*Aₓ^3 - b₃₂*Aₓ*Aᵧ^2)*sin(3τ₁))
z = @. γ * (δₘ*Aᵧ*cos(τ₁) + δₘ*d₂₁*Aₓ*Aᵧ*(cos(2τ₁)-3) +
δₘ*(d₃₂*Aᵧ*Aₓ^2 - d₃₁*Aᵧ^3)*cos(3τ₁))
ẋ = @. γ * (ωₚ*ν*Aₓ*sin(τ₁) - 2ωₚ*ν*(a₂₃*Aₓ^2 - a₂₄*Aᵧ^2)*sin(2τ₁) -
3ωₚ*ν*(a₃₁*Aₓ^3 - a₃₂*Aₓ*Aᵧ^2)*sin(3τ₁))
ẏ = @. γ * (ωₚ*ν*k*Aₓ*cos(τ₁) + 2ωₚ*ν*(b₂₁*Aₓ^2 - b₂₂*Aᵧ^2)*cos(2τ₁) +
3ωₚ*ν*(b₃₁*Aₓ^3 - b₃₂*Aₓ*Aᵧ^2)*cos(3τ₁))
ż = @. γ * (-ωₚ*ν*δₘ*Aᵧ*sin(τ₁) - 2ωₚ*ν*δₘ*d₂₁*Aₓ*Aᵧ*sin(2τ₁) -
3ωₚ*ν*δₘ*(d₃₂*Aᵧ*Aₓ^2 - d₃₁*Aᵧ^2)*sin(3τ₁))
return hcat(x, y, z)[1:steps, :], hcat(ẋ, ẏ, ż)[1:steps, :], Τ
end
"""
Returns a numerical solution for a Halo orbit about `L`.
__Arguments:__
- `μ`: Non-dimensional mass parameter for the CR3BP system.
- `Az`: Desired non-dimensional Z-amplitude for Halo orbit.
- `ϕ`: Desired Halo orbit phase.
- `L`: Lagrange point to orbit (L1 or L2).
- `hemisphere`: Specifies northern or southern Halo orbit.
__Outputs:__
- Tuple of initial states: `(r, v)` where `r::Vector{<:AbstractFloat}`, `v::Vector{<:Abstractfloat}`.
- Throws `ArgumentError` if L is not `:L1` or `:L2`
__References:__
- [Rund, 2018](https://digitalcommons.calpoly.edu/theses/1853/).
"""
function halo(μ; Az=0.0, L=1, hemisphere=:northern,
tolerance=1e-8, max_iter=20,
reltol=1e-14, abstol=1e-14)
r₀, v₀, Τ = analyticalhalo(μ; Az=Az, ϕ=0.0, L=L, hemisphere=hemisphere)
r₀ = r₀[1,:]
v₀ = v₀[1,:]
τ = Τ/2
Φ = Matrix{promote_type(eltype(r₀), eltype(v₀), typeof(τ))}(undef, 6, 6)
for i ∈ 1:max_iter
problem = ODEProblem(
RestrictedThreeBodySTMTic!,
ComponentArray(rₛ = r₀,
vₛ = v₀,
Φ₁ = [1.0, 0, 0, 0, 0, 0],
Φ₂ = [0, 1.0, 0, 0, 0, 0],
Φ₃ = [0, 0, 1.0, 0, 0, 0],
Φ₄ = [0, 0, 0, 1.0, 0, 0],
Φ₅ = [0, 0, 0, 0, 1.0, 0],
Φ₆ = [0, 0, 0, 0, 0, 1.0]),
(0.0, τ),
ComponentArray(μ = μ)
)
integrator = init(problem, Vern9(); reltol=reltol, abstol=abstol)
solve!(integrator)
rₛ = integrator.u.rₛ
vₛ = integrator.u.vₛ
Φ = hcat(integrator.u.Φ₁, integrator.u.Φ₂, integrator.u.Φ₃, integrator.u.Φ₄, integrator.u.Φ₅, integrator.u.Φ₆) |> transpose
∂vₛ = accel(rₛ, vₛ, μ)
if Az ≉ 0
F = @SMatrix [
Φ[4,1] Φ[4,5] ∂vₛ[1];
Φ[6,1] Φ[6,5] ∂vₛ[3];
Φ[2,1] Φ[2,5] vₛ[2]
]
TERM1 = @SMatrix [r₀[1]; v₀[2]; τ]
TERM2 = - inv(F) * @SMatrix [vₛ[1]; vₛ[3]; rₛ[2]]
xᵪ = TERM1 + TERM2
r₀[1] = xᵪ[1]
v₀[2] = xᵪ[2]
τ = xᵪ[3]
else
F = @SMatrix [
Φ[4,3] Φ[4,5] ∂vₛ[1];
Φ[6,3] Φ[6,5] ∂vₛ[3];
Φ[2,3] Φ[2,5] vₛ[2]
]
TERM1 = @SMatrix [r₀[3]; v₀[2]; τ]
TERM2 = - inv(F) * @SMatrix [vₛ[1]; vₛ[3]; rₛ[2]]
xᵪ = TERM1 + TERM2
r₀[3] = xᵪ[1]
v₀[2] = xᵪ[2]
τ = xᵪ[3]
end
if abs(integrator.u.vₛ[1]) ≤ tolerance && abs(integrator.u.vₛ[3]) ≤ tolerance
break;
elseif i == max_iter
@warn "Desired tolerance was not reached, and iterations have hit the maximum number of iterations: $max_iter."
end
end
return r₀, v₀, 2τ
end
"""
Returns a `NondimensionalThreeBodyState` type, instead of tuple `r,v,T`.
"""
function halo(μ, DU::T1, DT::T2; kwargs...) where T1 <: Length where T2 <: Time
r, v, T = halo(μ; kwargs...)
return NondimensionalThreeBodyState(r, v, μ, T, DU, DT)
end
"""
Iterative halo solver, returns a `NondimensionalThreeBodyState` in-place.
"""
function halo!(state, μ; kwargs...)
r,v,T = halo(μ; kwargs...)
state = NondimensionalThreeBodyState(r, v, μ, T)
return nothing
end
"""
Returns dynamics tic for combined Halo iterative solver state vector.
__Arguments:__
- `∂u`: Derivative of state `u`.
- `u`: State vector: `[x, y, z, ẋ, ẏ, ż, Φ₁, Φ₂, Φ₃, Φ₄, Φ₅, Φ₆]`.
- `p`: Parameters (contains non-dimensional mass parameter `μ`, positions `x₁`, `x₂`, and configuration).
- `t`: Time in seconds.
__Outputs:__
- None (sets derivative `∂u` in-place).
__References:__
- [Rund, 2018](https://digitalcommons.calpoly.edu/theses/1853/).
"""
function RestrictedThreeBodySTMTic!(∂u, u, p, t)
# Cartesian state
∂u.rₛ = u.vₛ
accel!(∂u.vₛ, u.rₛ, u.vₛ, p.μ)
# State transition matrix
∂Φ = state_transition_dynamics(p.μ, u.rₛ) * SMatrix{6,6}(transpose(hcat(u.Φ₁, u.Φ₂, u.Φ₃, u.Φ₄, u.Φ₅, u.Φ₆)))
∂u.Φ₁ = copy(∂Φ[1,:])[:]
∂u.Φ₂ = copy(∂Φ[2,:])[:]
∂u.Φ₃ = copy(∂Φ[3,:])[:]
∂u.Φ₄ = copy(∂Φ[4,:])[:]
∂u.Φ₅ = copy(∂Φ[5,:])[:]
∂u.Φ₆ = copy(∂Φ[6,:])[:]
end
"""
Returns the Monodromy Matrix for a Halo orbit.
"""
function monodromy(orbit::NondimensionalThreeBodyState; check_periodicity = true, reltol = 1e-14, abstol = 1e-14, atol = 1e-8)
problem = ODEProblem(
RestrictedThreeBodySTMTic!,
ComponentArray(rₛ = orbit.r,
vₛ = orbit.v,
Φ₁ = [1.0, 0, 0, 0, 0, 0],
Φ₂ = [0, 1.0, 0, 0, 0, 0],
Φ₃ = [0, 0, 1.0, 0, 0, 0],
Φ₄ = [0, 0, 0, 1.0, 0, 0],
Φ₅ = [0, 0, 0, 0, 1.0, 0],
Φ₆ = [0, 0, 0, 0, 0, 1.0]),
(0.0, orbit.Δt),
ComponentArray(μ = orbit.μ)
)
u = solve(problem; reltol = reltol, abstol = abstol).u[end]
if check_periodicity
if !isapprox(orbit, NondimensionalThreeBodyState(u.rₛ, u.vₛ, orbit.μ, orbit.Δt, orbit.DU, orbit.DT); atol = atol)
throw(ErrorException("Provided CR3BP system is NOT periodic!"))
end
end
Matrix(transpose(hcat(u.Φ₁, u.Φ₂, u.Φ₃, u.Φ₄, u.Φ₅, u.Φ₆)))
end
"""
Returns true if a `RestrictedThreeBodySystem` is numerically periodic.
"""
function isperiodic(orbit::NondimensionalThreeBodyState; reltol = 1e-14, abstol = 1e-14, atol = 1e-8)
problem = ODEProblem(
RestrictedThreeBodySTMTic!,
ComponentArray(rₛ = orbit.r,
vₛ = orbit.v,
Φ₁ = [1.0, 0, 0, 0, 0, 0],
Φ₂ = [0, 1.0, 0, 0, 0, 0],
Φ₃ = [0, 0, 1.0, 0, 0, 0],
Φ₄ = [0, 0, 0, 1.0, 0, 0],
Φ₅ = [0, 0, 0, 0, 1.0, 0],
Φ₆ = [0, 0, 0, 0, 0, 1.0]),
(0.0, orbit.Δt),
ComponentArray(μ = orbit.μ)
)
u = solve(problem; reltol = reltol, abstol = abstol).u[end]
return isapprox(orbit, NondimensionalThreeBodyState(u.rₛ, u.vₛ, orbit.μ, orbit.Δt, orbit.DU, orbit.DT); atol = 1e-8)
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 2936 | # Autogenerated with Symbolics.jl 😃
function (r₁, r₂, r₃, μ)
begin
(SymbolicUtils.Code.create_array)(Array, nothing, Val{(3, 3)}(), (+)((+)(1, (*)(-1//1, μ, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 3)), (*)(-1//1, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 3), (+)(1, (*)(-1, μ))), (*)(3//1, μ, (^)((+)(-1, r₁, μ), 2), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (+)((*)(3//1, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5), (^)((+)(r₁, μ), 2), (+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, r₂, μ, (+)(-1, r₁, μ)), (*)((^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (*)((*)(3//1, r₂, (+)(r₁, μ), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, r₃, μ, (+)(-1, r₁, μ)), (*)((^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (*)((*)(3//1, r₃, (+)(r₁, μ), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, r₂, μ, (+)(-1, r₁, μ)), (*)((^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (*)((*)(3//1, r₂, (+)(r₁, μ), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((+)(1, (*)(-1//1, μ, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 3)), (*)(-1//1, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 3), (+)(1, (*)(-1, μ))), (*)(3//1, μ, (^)(r₂, 2), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (+)((*)(3//1, (^)(r₂, 2), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5), (+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, r₂, r₃, μ), (*)((^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (*)((*)(3//1, r₂, r₃, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, r₃, μ, (+)(-1, r₁, μ)), (*)((^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (*)((*)(3//1, r₃, (+)(r₁, μ), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, r₂, r₃, μ), (*)((^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5))), (*)((*)(3//1, r₂, r₃, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((*)(-1//1, μ, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 3)), (*)(-1//1, (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 3), (+)(1, (*)(-1, μ))), (*)(3//1, μ, (^)(r₃, 2), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(-1, r₁, μ), 2)))), 5)), (*)(3//1, (^)(r₃, 2), (^)((inv)((sqrt)((+)((^)(r₂, 2), (^)(r₃, 2), (^)((+)(r₁, μ), 2)))), 5), (+)(1, (*)(-1, μ)))))
end
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1321 | """
Handles calculations relevant to the Circular Restricted
Three Body Problem.
"""
module ThreeBody
using Reexport
@reexport using ..CommonTypes
using ..TwoBody
include("../Misc/DocStringExtensions.jl")
include("../Misc/UnitfulAliases.jl")
using LinearAlgebra: norm, cross, ×, dot, ⋅, I
using StaticArrays: SVector, @SVector, SMatrix, @SMatrix
using DifferentialEquations
using ComponentArrays
using Symbolics
using Roots
export ThreeBodyState, NondimensionalThreeBodyState, RestrictedThreeBodySystem
export time_scale_factor,
nondimensionalize_length,
nondimensionalize_velocity,
nondimensionalize_time,
nondimensionalize_mass_parameter,
nondimensionalize,
redimensionalize_length,
redimensionalize_velocity,
redimensionalize_time,
redimensionalize,
potential_energy,
jacobi_constant,
lagrange,
analyticalhalo,
halo,
potential_energy_hessian,
accel,
accel!,
RestrictedThreeBodySTMTic!,
state_transition_dynamics,
nondimensional_radius,
inertial,
synodic,
convert,
promote,
monodromy,
isapprox,
isequal,
isperiodic
include("ThreeBodyStates.jl")
include("ThreeBodyCalculations.jl")
include("Halo.jl")
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 7375 | #
# ThreeBodyCalculations.jl
# Calculations for the Circular Restricted
# Three Body Problem.
#
"""
Returns time scale factor, `Tₛ`.
"""
time_scale_factor(a, μ₁, μ₂) = period(a, μ₁+μ₂)
"""
Returns nondimensional length unit, `DU`.
"""
nondimensionalize_length(rᵢ, a) = upreferred.(rᵢ ./ a)
"""
Returns nondimensional velocity unit, `DU/DT`.
"""
nondimensionalize_velocity(vᵢ, a, Tₛ) = upreferred.(vᵢ ./ (a / Tₛ))
"""
Returns nondimensional time unit, `DT`.
"""
nondimensionalize_time(t, a, μ₁, μ₂) = t / time_scale_factor(a, μ₁, μ₂)
"""
Returns nondimensional mass parameter, `μ`.
"""
nondimensionalize_mass_parameter(μ₁, μ₂) = min(μ₁,μ₂) / (μ₁+μ₂)
"""
Returns nondimensional form of (`Unitful`) scalar posiion.
"""
nondimensionalize(rᵢ::R, a::A) where {
R<:Length, A<:Length
} = nondimensionalize_length(rᵢ, a)
"""
Returns nondimensional form of (`Unitful`) position vector.
"""
nondimensionalize(rᵢ::R, a::A) where {
U<:Length, R<:AbstractVector{U}, A<:Length
} = nondimensionalize_length(rᵢ, a)
"""
Returns nondimensional form of (`Unitful`) scalar velocity.
"""
nondimensionalize(vᵢ::V, a::A, Tₛ::T) where {
V<:Velocity, A<:Length, T<:Time
} = nondimensionalize_velocity(vᵢ, a, Tₛ)
"""
Returns nondimensional form of (`Unitful`) velocity vector.
"""
nondimensionalize(vᵢ::V, a::A, Tₛ::T) where {
U<:Velocity, V<:AbstractVector{U}, A<:Length, T<:Time
} = nondimensionalize_velocity(vᵢ, a, Tₛ)
"""
Returns nondimensional form of (`Unitful`) velocity vector.
"""
nondimensionalize(vᵢ::V, a::A, μ₁::U1, μ₂::U2) where {
U<:Velocity, V<:AbstractVector{U}, A<:Length, U1<:MassParameter, U2<:MassParameter
} = nondimensionalize_velocity(vᵢ, a, time_scale_factor(a, μ₁, μ₂))
"""
Returns nondimensional form of (`Unitful`) time duration.
"""
nondimensionalize(t::T1, Tₛ::T2) where {
T1<:Time, T2<:Time
} = upreferred(t / Tₛ)
"""
Returns nondimensional form of (`Unitful`) time duration.
"""
nondimensionalize(t::T1, a::A, μ₁::U1, μ₂::U2) where {
T1<:Time, A<:Length, U1<:MassParameter, U2<:MassParameter
} = nondimensionalize(t, time_scale_factor(a, μ₁, μ₂))
"""
Returns nondimensional form of (`Unitful`) graviational parameters.
"""
nondimensionalize(μ₁::U1, μ₂::U2) where {
U1<:MassParameter, U2<:MassParameter
} = min(μ₁, μ₂) / (μ₁+μ₂)
"""
Returns nondimensional Circular Restricted Three-body State.
"""
function nondimensionalize(r₃::R, v₃::V, Δt::T, μ₁::U1, μ₂::U2, a::A) where {
RT<:Length, R<:AbstractVector{RT},
VT<:Velocity, V<:AbstractVector{VT},
T<:Time, U1<:MassParameter, U2<:MassParameter,
A<:Length
}
Tₛ = time_scale_factor(a, μ₁, μ₂)
return nondimensionalize(r₃, a),
nondimensionalize(v₃, a, Tₛ),
nondimensionalize(Δt, Tₛ),
nondimensionalize(μ₁, μ₂)
end
"""
Returns the nondimensional (synodic / rotating) representation of a CR3BP state.
"""
function nondimensionalize(state::D) where D <: ThreeBodyState
return NondimensionalThreeBodyState(
nondimensionalize(state.r₃, state.a),
nondimensionalize(state.v₃, state.a, time_scale_factor(state.a, state.μ₁, state.μ₂)),
nondimensionalize(state.μ₁, state.μ₂),
nondimensionalize(state.Δt, state.a, state.μ₁, state.μ₂),
state.a, time_scale_factor(state.a, state.μ₁, state.μ₂)
)
end
"""
Returns dimensional length units.
"""
redimensionalize_length(rᵢ, a) = upreferred(rᵢ .* a)
"""
Returns dimensional velocity units.
"""
redimensionalize_velocity(vᵢ, a, Tₛ) = upreferred(vᵢ .* (a / Tₛ))
"""
Returns dimensional time unit.
"""
redimensionalize_time(t, a, μ₁, μ₂) = redimensionalize_time(t, time_scale_factor(a, μ₁, μ₂))
"""
Returns dimensional time unit.
"""
redimensionalize_time(t, Tₛ) = t * Tₛ
"""
Returns dimensional (inertial) form of (`Unitful`) scalar posiion.
"""
redimensionalize(rᵢ::R, a::A) where {
R<:Real, A<:Length
} = redimensionalize_length(rᵢ, a)
"""
Returns dimensional (inertial) form of (`Unitful`) velocity vector.
"""
redimensionalize(vᵢ::U, a::A, Tₛ::T) where {
U<:Real, A<:Length, T<:Time
} = redimensionalize_velocity(vᵢ, a, Tₛ)
"""
Returns dimensional (inertial) form of (`Unitful`) time duration.
"""
redimensionalize(t::T1, Tₛ::T2) where {
T1<:Real, T2<:Time
} = redimensionalize_time(t, Tₛ)
"""
Returns dimensional (inertial) form of (`Unitful`) time duration.
"""
redimensionalize(t::T1, a::A, μ₁::U1, μ₂::U2) where {
T1<:Real, A<:Length, U1<:MassParameter, U2<:MassParameter
} = redimensionalize(t, time_scale_factor(a, μ₁, μ₂))
"""
Returns the dimensional (inertial) representation of a CR3BP state.
"""
function redimensionalize(state::N, μ₁::U1, μ₂::U2) where {
N <: NondimensionalThreeBodyState,
U1 <: MassParameter, U2 <: MassParameter
}
return ThreeBodyState(
μ₁, μ₂,
state.DU,
redimensionalize.(state.r, state.DU),
redimensionalize.(state.v, state.DU, time_scale_factor(state.DU, μ₁, μ₂)),
redimensionalize(state.Δt, state.DT)
)
end
"""
Returns the spacecraft's nondimensional position w.r.t. body 1 (or 2).
"""
nondimensional_radius(r, xᵢ=0) = √( (r[1]-xᵢ)^2 + r[2]^2 + r[3]^2 )
"""
Returns the potential energy `U`.
"""
potential_energy(r, μ) = (r[1]^2 + r[2]^2)/2 + ((1-μ)/nondimensional_radius(r,-μ)) + (μ/nondimensional_radius(r,1-μ))
"""
Returns the Jacobi Constant `C`.
"""
jacobi_constant(r, v, μ) = 2*potential_energy(r, μ) - (v⋅v)
"""
Given the Synodic frame vector, returns the vector in the inertial reference frame.
"""
function inertial(vecₛ, t, ω=1.0u"rad"/unit(t))
θ = ω*t
ᴵTₛ = @SMatrix [
cos(θ) sin(θ) 0
-sin(θ) cos(θ) 0
0 0 1
]
return ᴵTₛ * vecₛ
end
"""
Returns the position and velocity vectors in the synodic (rotating) reference frame.
"""
synodic(rᵢ, vᵢ, a, Tₛ) = nondimensionalize(rᵢ, a), nondimensionalize(vᵢ, a, Tₛ)
"""
Returns the lagrange points for a CR3BP system.
__Arguments:__
- `μ`: Non-dimensional mass parameter for the CR3BP system.
- `L`: Langrange points requested, must be in range [1,5]
__Outputs:__
- Tuple of Lagrange points
- Throws `ArgumentError` if L is out of range [1,5]
__References:__
- [Rund, 2018](https://digitalcommons.calpoly.edu/theses/1853/)
"""
function lagrange(μ, L=1:5)
if !all(L[i] ∈ (1,2,3,4,5) for i ∈ 1:length(L))
throw(ArgumentError("Requested lagrange points must be in range [1,5]"))
end
expressions = @SVector [
x -> x - (1-μ)/(x+μ)^2 + μ/(x+μ-1)^2,
x -> x - (1-μ)/(x+μ)^2 - μ/(x+μ-1)^2,
x -> x + (1-μ)/(x+μ)^2 + μ/(x+μ+1)^2
]
return (map(f->[find_zero(f, (-3,3)), 0, 0], expressions)...,
[(1/2) - μ, √(3)/2, 0], [(1/2) - μ, -√(3)/2, 0])[L]
end
"""
Returns non-dimensional acceleration for CR3BP state.
"""
function accel!(aₛ, rₛ, vₛ, μ)
x₁ = -μ
x₂ = 1-μ
r₁ = nondimensional_radius(rₛ, x₁)
r₂ = nondimensional_radius(rₛ, x₂)
aₛ[1] = 2vₛ[2] + rₛ[1] - (1-μ)*(rₛ[1] - x₁) / r₁^3 - μ*(rₛ[1] - x₂) / r₂^3
aₛ[2] = -2vₛ[1] + rₛ[2] - ((1-μ) / r₁^3 + (μ / r₂^3)) * rₛ[2]
aₛ[3] = -((1-μ) / r₁^3 + (μ / r₂^3)) * rₛ[3]
return nothing
end
"""
Returns non-dimensional acceleration for CR3BP state.
"""
function accel(rₛ, vₛ, μ)
aₛ = similar(vₛ)
accel!(aₛ, rₛ, vₛ, μ)
return aₛ
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 6557 | #
# Handles CR3BP problem states.
#
"""
Abstract type for restricted three-body systems.
"""
abstract type RestrictedThreeBodySystem <: OrbitalSystem end
"""
Describes a dimensional state of a spacecraft
within the Circular Restriested Three-body Problem in
the Synodic frame.
"""
struct ThreeBodyState{F<:AbstractFloat} <: RestrictedThreeBodySystem
μ₁::MassParameter{F}
μ₂::MassParameter{F}
a::Length{F}
r₃::SVector{3, <:Length{F}}
v₃::SVector{3, <:Velocity{F}}
Δt::Time{F}
function ThreeBodyState(μ₁::MP1, μ₂::MP2, a::A, r₃::R, v₃::V, Δt::DT) where {
MP1 <: MassParameter{<:Real},
MP2 <: MassParameter{<:Real},
A <: Length{<:Real},
RT <: Length{<:Real},
VT <: Velocity{<:Real},
R <: AbstractVector{RT},
V <: AbstractVector{VT},
DT <: Time{<:Real}
}
T = promote_type(
typeof(μ₁.val), typeof(μ₂.val), typeof(a.val),
map(x->typeof(x.val), r₃)..., map(x->typeof(x.val), v₃)...,
typeof(Δt.val)
)
if !(T <: AbstractFloat)
@warn "Non-float parameters provided: defaulting to Float64."
T = Float64
end
return new{T}(
T(μ₁), T(μ₂), T(a),
SVector{3, RT}(T.(r₃)),
SVector{3, VT}(T.(v₃)),
T(Δt)
)
end
end
Base.convert(::Type{T}, t::ThreeBodyState) where {
T<:AbstractFloat
} = ThreeBodyState(map(f->T.(getfield(t,f), fieldnames(t)))...)
Base.promote(::Type{ThreeBodyState{A}}, ::Type{ThreeBodyState{B}}) where {
A<:AbstractFloat, B<:AbstractFloat
} = ThreeBodyState{promote_type(A,B)}
Core.Float16(o::ThreeBodyState) = convert(Float16, o)
Core.Float32(o::ThreeBodyState) = convert(Float32, o)
Core.Float64(o::ThreeBodyState) = convert(Float64, o)
Base.MPFR.BigFloat(o::ThreeBodyState) = convert(BigFloat, o)
function Base.show(io::IO, sys::ThreeBodyState)
println(io, "Dimensioned Circular Restricted Three-body State")
println(io, " μ₁: ", sys.μ₁)
println(io, " μ₂: ", sys.μ₂)
println(io, " a: ", sys.a)
println(io, " r₃: ", transpose(ustrip.(sys.r₃)), " ", unit(first(sys.r₃)))
println(io, " v₃: ", transpose(ustrip.(sys.v₃)), " ", unit(first(sys.v₃)))
println(io, " Δt: ", sys.Δt)
end
"""
Describes the non-dimensional state of a spacecraft
within the Circular Restricted Three-body Problem in
the Synodic frame.
"""
struct NondimensionalThreeBodyState{F<:AbstractFloat} <: RestrictedThreeBodySystem
r::SVector{3, F}
v::SVector{3, F}
μ::F
Δt::F
DU::Length{F}
DT::Time{F}
function NondimensionalThreeBodyState(rₛ::AbstractVecOrMat{R}, vₛ::AbstractVecOrMat{V}, μ::U, Δt::D = 1.0,
DU::Unitful.Length{L} = NaN * u"km",
DT::Unitful.Time{C} = NaN * u"s") where {
R <: Real, V <: Real, L <: Real, C <: Real, U <: Real, D <: Real}
T = promote_type(R, V, L, C, U, D)
if !(T <: AbstractFloat)
@warn "Non-float parameters provided. Defaulting to Float64."
T = Float64
end
return new{T}(
SVector{3,T}(rₛ...),
SVector{3,T}(vₛ...),
T(μ), T(Δt), T(DU), T(DT)
)
end
end
Base.convert(::Type{T}, t::NondimensionalThreeBodyState) where {
T<:AbstractFloat
} = NondimensionalThreeBodyState(T.(Array(t.rₛ)), T.(Array(t.vₛ)), T(t.μ), T(t.Δt), T(t.DU), T(t.DT))
Base.promote(::Type{NondimensionalThreeBodyState{A}}, ::Type{NondimensionalThreeBodyState{B}}) where {
A<:AbstractFloat, B<:AbstractFloat
} = NondimensionalThreeBodyState{promote_type(A,B)}
Core.Float16(o::NondimensionalThreeBodyState) = convert(Float16, o)
Core.Float32(o::NondimensionalThreeBodyState) = convert(Float32, o)
Core.Float64(o::NondimensionalThreeBodyState) = convert(Float64, o)
Base.MPFR.BigFloat(o::NondimensionalThreeBodyState) = convert(BigFloat, o)
function Base.show(io::IO, sys::NondimensionalThreeBodyState)
println(io, "Nondimensional Circular Restricted Three-body State")
println(io, " μ: ", sys.μ)
println(io, " r: ", transpose(sys.r))
println(io, " v: ", transpose(sys.v))
println(io, " Δt: ", sys.Δt)
println(io, " DU: ", sys.DU)
println(io, " DT: ", sys.DT)
end
"""
Returns true if all elements in each system are within `atol` of the other.
"""
function Base.isapprox(c1::ThreeBodyState, c2::ThreeBodyState; atol = 1e-8)
ru(x) = ustrip(upreferred(x))
return isapprox(ru(c1.μ₁), ru(c2.μ₁); atol = atol) &&
isapprox(ru(c1.μ₂), ru(c2.μ₂); atol = atol) &&
isapprox(ru(c1.a), ru(c2.a); atol = atol) &&
isapprox(ru.(c1.r₃), ru.(c2.r₃); atol = atol) &&
isapprox(ru.(c1.v₃), ru.(c2.v₃); atol = atol) &&
isapprox(ru(c1.Δt), ru(c2.Δt); atol = atol)
end
"""
Returns true if all elements in each system are equal to the other.
"""
function Base.isequal(c1::ThreeBodyState, c2::ThreeBodyState)
ru(x) = ustrip(upreferred(x))
return isequal(ru(c1.μ₁), ru(c2.μ₁)) &&
isequal(ru(c1.μ₂), ru(c2.μ₂)) &&
isequal(ru(c1.a), ru(c2.a)) &&
isequal(ru.(c1.r₃), ru.(c2.r₃)) &&
isequal(ru.(c1.v₃), ru.(c2.v₃)) &&
isequal(ru(c1.Δt), ru(c2.Δt))
end
"""
Returns true if all elements in each system are within `atol` of the other.
"""
function Base.isapprox(c1::NondimensionalThreeBodyState, c2::NondimensionalThreeBodyState; atol = 1e-8)
ru(x) = ustrip(upreferred(x))
return isapprox(ru(c1.μ), ru(c2.μ); atol = atol) &&
isapprox(ru.(c1.r), ru.(c2.r); atol = atol) &&
isapprox(ru.(c1.v), ru.(c2.v); atol = atol) &&
isapprox(ru(c1.Δt), ru(c2.Δt); atol = atol) &&
isapprox(ru.(c1.DU), ru(c2.DU); atol = atol) &&
isapprox(ru(c1.DT), ru(c2.DT); atol = atol)
end
"""
Returns true if all elements in each system are equal to the other.
"""
function Base.isequal(c1::NondimensionalThreeBodyState, c2::NondimensionalThreeBodyState)
ru(x) = ustrip(upreferred(x))
return isequal(ru(c1.μ), ru(c2.μ)) &&
isequal(ru.(c1.r), ru.(c2.r)) &&
isequal(ru.(c1.v), ru.(c2.v)) &&
isequal(ru(c1.Δt), ru(c2.Δt)) &&
isequal(ru(c1.DU), ru(c2.DU)) &&
isequal(ru(c1.DT), ru(c2.DT))
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 2666 | #
# Kepler.jl
#
# Solves Kepler's problem for TwoBody orbits.
#
"""
Solves Kepler's Problem for `orbit` and `Δtᵢ`.
"""
function kepler(orbit::O, Δtᵢ::T = period(orbit); tol=1e-6, max_iter=100) where O <: RestrictedTwoBodySystem where T<:Unitful.Time
conic_section = conic(orbit)
# Guess χ₀
if conic_section == Circular || conic_section == Elliptical
Δt = mod(Δtᵢ, period(orbit))
χ₀ = √(orbit.body.μ) * Δt / semimajor_axis(orbit)
elseif conic_section == Hyperbolic
Δt = Δtᵢ
χ₀ = sign(Δt) * √(-semimajor_axis(orbit)) * log(ℯ, (-2 * orbit.body.μ / semimajor_axis(orbit) * Δt) /
(radius_vector(orbit) ⋅ velocity_vector(orbit) + (sign(Δt) * √(-orbit.body.μ * semimajor_axis(orbit)) * (1 - norm(radius_vector(orbit)) / semimajor_axis(orbit)))))
elseif conic_section == Parabolic
Δt = Δtᵢ
χ₀ = √(semi_parameter(orbit)) * tan(true_anomoly(orbit) / 2)
else
@warn "Kepler's problem failed to converge."
return Orbit([NaN, NaN, NaN] * u"km", [NaN, NaN, NaN] * u"km/s", orbit.body)
end
# Iteratively solve for χ
# TODO: Compare loop vs. recursion performance here.
# There shouldn't be too large of a difference, since this tends
# to converge with only a few iterations.
χₙ, r, ψ, C₂, C₃ = χₖ(χ₀, Δt, radius_vector(orbit), velocity_vector(orbit), semimajor_axis(orbit), orbit.body.μ, tol=tol, max_iter=max_iter)
# Convert to a Orbit
f = 1 - χₙ^2 / norm(radius_vector(orbit)) * C₂
ḟ = √(orbit.body.μ) / (norm(radius_vector(orbit)) * r) * χₙ * (ψ * C₃ - 1)
g = Δt - (χₙ^3 / √(orbit.body.μ)) * C₃
ġ = 1 - (χₙ^2 / r) * C₂
return Orbit(f * radius_vector(orbit) + g * velocity_vector(orbit), ḟ * radius_vector(orbit) + ġ * velocity_vector(orbit), orbit.body)
end
function χₖ(χₙ, Δt, rᵢ₀, vᵢ₀, a, μ; iter=1, tol=1e-14, max_iter=100)
r₀ = norm(rᵢ₀)
ψ = upreferred(χₙ^2 / a)
if ψ > tol
C₂ = (1 - cos(√(ψ))) / ψ
C₃ = (√(ψ) - sin(√(ψ))) / √(ψ^3)
elseif ψ < -tol
println(√(-ψ))
C₂ = (1 - cosh(√(-ψ))) / ψ
C₃ = (sinh(√(-ψ)) - √(-ψ)) / √((-ψ)^3)
else
C₂ = 1.0 / 2.0
C₃ = 1.0 / 6.0
end
r = χₙ^2 * C₂ + (rᵢ₀ ⋅ vᵢ₀) * χₙ / √(μ) * (1 - ψ*C₃) + r₀ * (1 - ψ * C₂)
χₙ₊₁ = χₙ + ((√(μ) * Δt - χₙ^3 * C₃ - (rᵢ₀ ⋅ vᵢ₀) / √(μ) * χₙ^2 * C₂ - r₀ * χₙ * (1 - ψ * C₃)) / r)
if iter > max_iter
return NaN, NaN, NaN, NaN, NaN
elseif abs(χₙ₊₁ - χₙ) < oneunit(χₙ) * tol
return χₙ, r, ψ, C₂, C₃
else
return χₖ(χₙ₊₁, Δt, rᵢ₀, vᵢ₀, a, μ; iter=iter+1, tol=tol, max_iter=max_iter)
end
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1798 | #
# Solver for Lambert's problem
#
# References:
# [1] David, A. "Vallado. Fundamentals of Astrodynamics and Applications." (2013).
#
"""
Solves Lambert's problem through the use of univeral variables.
"""
function lambert(r̅₁, r̅₂, μ, Δt, trajectory=:short; tol=1e-6, max_iter=100)
# Specify short way, or long way trajectory
if trajectory == :short
tₘ = 1
elseif trajectory == :long
tₘ = -1
else
throw(ArgumentError("`trajectory` must be set to `:short` or `:long`"))
end
r₁ = norm(r̅₁)
r₂ = norm(r̅₂)
cosΔν = (r̅₁⋅r̅₂) / (r₁*r₂)
Δν = asin(u"rad", tₘ * √(1 - (cosΔν)^2))
A = upreferred(tₘ * √(r₂*r₁ * (1 + cosΔν)))
if A ≈ 0
throw(ErrorException("Can't calculate the orbit."))
end
ψₙ = 0.0
C₂ = 1/2
C₃ = 1/6
ψ₊ = 4π^2
ψ₋ = -4π
yₙ = r₁ + r₂ + (A * (ψₙ*C₃ - 1) / √(C₂))
Δtₙ = Δt + 1u"s"
iter = 0
while (iter < max_iter) &&
(abs(Δtₙ - Δt) > (tol * oneunit(Δt))) || (A > 0 *oneunit(A) && yₙ < 0 * oneunit(yₙ))
yₙ = r₁ + r₂ + (A * (ψₙ*C₃ - 1) / √(C₂))
χₙ = √(yₙ / C₂)
Δtₙ = (χₙ^3 * C₃ + A*√(yₙ)) / √(μ)
if Δtₙ < Δt
ψ₋ = ψₙ
else
ψ₊ = ψₙ
end
ψₙ = (ψ₊ + ψ₋) / 2
if ψₙ > tol
C₂ = (1 - cos(√(ψₙ))) / ψₙ
C₃ = (√(ψₙ) - sin(√(ψₙ))) / √(ψₙ^3)
elseif ψₙ < -tol
C₂ = (1 - cosh(√(-ψₙ))) / ψₙ
C₃ = (sinh(√(-ψₙ)) - √(-ψₙ)) / √((-ψₙ)^3)
else
C₂ = 1.0 / 2.0
C₃ = 1.0 / 6.0
end
iter += 1
end
f = 1 - yₙ/r₁
ġ = 1 - yₙ/r₂
g = A * √(yₙ/μ)
v̅₁ = upreferred.((r̅₂ .- (f .* r̅₁)) ./ g)
v̅₂ = upreferred.(((ġ .* r̅₂) .- r̅₁) ./ g)
return v̅₁, v̅₂
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1654 | """
Provides structures & functions for the two-body problem.
"""
module TwoBody
# Dependencies
using Reexport
@reexport using ..CommonTypes
include("../Misc/DocStringExtensions.jl")
include("../Misc/UnitfulAliases.jl")
using Crayons
using LinearAlgebra: norm, cross, ×, dot, ⋅
using StaticArrays: SVector, @SVector, SMatrix, @SMatrix
# Newton's Gravitation Constant
import PhysicalConstants.CODATA2018
G = 1.0 * CODATA2018.G
# Export data structures, constants, and constructors
export RestrictedTwoBodySystem, TwoBodyState, KeplerianState, AbstractConic, Circular,
Elliptical, Parabolic, Hyperbolic, Invalid, Body, CelestialBody,
Sun, Mercury, Venus, Earth, Moon, Luna, Mars, Jupiter,
Saturn, Uranus, Neptune, Pluto, G
# Export functions
export semimajor_axis, semi_parameter, eccentricity,
eccentricity_vector, inclination, true_anomoly,
periapsis_radius, apoapsis_radius, periapsis_velocity,
apoapsis_velocity, radius, velocity, period,
radius_vector, velocity_vector, Orbit, perifocal_radius,
mass, mass_parameter, perifocal, RAAN, argument_of_periapsis,
time_since_periapsis, mean_motion, mean_motion_vector,
eccentric_anomoly, specific_angular_momentum_vector,
specific_angular_momentum, specific_energy, specific_potential_energy,
isapprox, isequal, TwobodyPropagationResult, kepler, lambert,
conic, keplerian, cartesian, promote, convert, Float16, Float32, Float64,
BigFloat
# Include all module source code
include("TwoBodyStates.jl")
include("TwoBodyCalculations.jl")
include("Kepler.jl")
include("Lambert.jl")
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 13232 | #
# TwoBodyCalculations.jl
#
# Includes simple calculations relevant to the Two Body Problem.
"""
Returns the conic section, as specified by eccentricity `e`.
"""
function conic(e::T) where T<:Number
if e ≈ 0
return Circular
elseif e ≈ 1
return Parabolic
elseif 0 < e && e < 1
return Elliptical
elseif e > 1
return Hyperbolic
else
return Invalid
end
end
conic(orbit::T) where T<:RestrictedTwoBodySystem = conic(eccentricity(orbit))
"""
Returns a Keplarian representation of a Cartesian orbital state.
Algorithm taught in ENAE601.
"""
function keplerian(rᵢ, vᵢ, μ)
safe_acos(unit, num) = isapprox(num, 1) ? acos(one(num)) * unit : acos(num) * unit
î = SVector{3, Float64}([1, 0, 0])
ĵ = SVector{3, Float64}([0, 1, 0])
k̂ = SVector{3, Float64}([0, 0, 1])
h̅ = specific_angular_momentum_vector(rᵢ, vᵢ)
a = semimajor_axis(norm(rᵢ), norm(vᵢ), μ)
n̅ = k̂ × specific_angular_momentum_vector(rᵢ, vᵢ)
e̅ = eccentricity_vector(rᵢ, vᵢ, μ)
e = norm(e̅) |> upreferred
i = safe_acos(u"rad", (h̅ ⋅ k̂) / norm(h̅))
Ω = ustrip(n̅ ⋅ ĵ) > 0 ?
safe_acos(u"rad", (n̅ ⋅ î) / norm(n̅)) :
2π * u"rad" - safe_acos(u"rad", (n̅ ⋅ î) / norm(n̅))
ω = ustrip(e̅ ⋅ k̂) > 0 ?
safe_acos(u"rad", (n̅ ⋅ e̅) / (norm(n̅) * e)) :
2π * u"rad" - safe_acos(u"rad", (n̅ ⋅ e̅) / (norm(n̅) * e))
ν = ustrip(rᵢ ⋅ vᵢ) > 0 ?
safe_acos(u"rad", (e̅ ⋅ rᵢ) / (e * norm(rᵢ))) :
2π * u"rad" - safe_acos(u"rad", (e̅ ⋅ rᵢ) / (e * norm(rᵢ)))
return e, uconvert(u"km", a), uconvert(u"°", i),
uconvert(u"°", Ω), uconvert(u"°", ω),
uconvert(u"°", ν)
end
keplerian(rᵢ, vᵢ, body::CelestialBody) = keplerian(rᵢ, vᵢ, body.μ)
keplerian(orbit::KeplerianState) = orbit.e, orbit.a, orbit.i, orbit.Ω, orbit.ω, orbit.ν
keplerian(orbit::TwoBodyState) = keplerian(orbit.r, orbit.v, orbit.body)
"""
Returns a Cartesian representation of a Keplerian two-body orbital state
in an inertial frame, centered at the center of mass of the central body.
Algorithm taught in ENAE601.
"""
function cartesian(e, a, i, Ω, ω, ν, μ)
rᵢ, vᵢ = cartesian(i, Ω, ω, perifocal(a, e, ν, μ)...)
return uconvert.(u"km", rᵢ),
uconvert.(u"km/s", vᵢ)
end
cartesian(e, a, i, Ω, ω, ν, body::CelestialBody) = cartesian(e, a, i, Ω, ω, ν, body.μ)
cartesian(orbit::TwoBodyState) = orbit.r, orbit.v
cartesian(orbit::KeplerianState) = cartesian(orbit.e, orbit.a, orbit.i, orbit.Ω, orbit.ω, orbit.ν, orbit.body)
"""
Returns a Cartesian (inertial) representation of the provied Perifocal state.
"""
function cartesian(i, Ω, ω, rₚ, vₚ)
# Set up Perifocal ⟶ Cartesian conversion
R_3Ω = SMatrix{3,3}(
[cos(Ω) -sin(Ω) 0.;
sin(Ω) cos(Ω) 0.;
0. 0. 1.])
R_1i = SMatrix{3,3}(
[1. 0. 0.;
0. cos(i) -sin(i);
0. sin(i) cos(i)])
R_3ω = SMatrix{3,3}(
[cos(ω) -sin(ω) 0.
sin(ω) cos(ω) 0.
0. 0. 1.])
ᴵTₚ = R_3Ω * R_1i * R_3ω
return ᴵTₚ * rₚ, ᴵTₚ * vₚ
end
"""
Returns position and velocity vectors in the Perifocal frame.
"""
function perifocal(a, e, ν, μ)
p = semi_parameter(a, e)
r = radius(p, e, ν)
P̂=SVector{3, Float64}([1, 0, 0])
Q̂=SVector{3, Float64}([0, 1, 0])
Ŵ=SVector{3, Float64}([0, 0, 1])
rₚ = (r * cos(ν) .* P̂ .+ r * sin(ν) .* Q̂)
vₚ = √(μ/p) * ((-sin(ν) * P̂) .+ ((e + cos(ν)) .* Q̂))
return rₚ, vₚ
end
function perifocal(i, Ω, ω, rᵢ, vᵢ)
# Set up Cartesian ⟶ Perifocal conversion
R_3Ω = SMatrix{3,3}(
[cos(Ω) -sin(Ω) 0.;
sin(Ω) cos(Ω) 0.;
0. 0. 1.])
R_1i = SMatrix{3,3}(
[1. 0. 0.;
0. cos(i) -sin(i);
0. sin(i) cos(i)])
R_3ω = SMatrix{3,3}(
[cos(ω) -sin(ω) 0.
sin(ω) cos(ω) 0.
0. 0. 1.])
ᵖTᵢ = inv(R_3Ω * R_1i * R_3ω)
return ᵖTᵢ*rᵢ, ᵖTᵢ*vᵢ
end
function perifocal(orbit::T) where T <: RestrictedTwoBodySystem
return perifocal(
inclination(orbit),
RAAN(orbit),
argument_of_periapsis(orbit),
radius_vector(orbit),
velocity_vector(orbit)
)
end
"""
Returns semimajor axis parameter, a.
"""
semimajor_axis(r, v, μ) = inv( (2 / r) - (v^2 / μ) )
semimajor_axis(orbit::TwoBodyState) = semimajor_axis(radius(orbit), velocity(orbit), orbit.body.μ)
semimajor_axis(orbit::KeplerianState) = orbit.a
"""
Returns specific angular momentum vector, h̅.
"""
specific_angular_momentum_vector(rᵢ, vᵢ) = rᵢ × vᵢ
specific_angular_momentum_vector(orbit::TwoBodyState) = specific_angular_momentum_vector(orbit.r, orbit.v)
specific_angular_momentum_vector(orbit::KeplerianState) = specific_angular_momentum_vector(TwoBodyState(orbit))
"""
Returns scalar specific angular momentum vector, h.
"""
specific_angular_momentum(rᵢ, vᵢ) = norm(specific_angular_momentum_vector(rᵢ, vᵢ))
specific_angular_momentum(orbit::TwoBodyState) = specific_angular_momentum(orbit.r, orbit.v)
specific_angular_momentum(orbit::KeplerianState) = specific_angular_momentum(cartesian(orbit)...)
"""
Returns specific orbital energy, ϵ.
"""
specific_energy(a, μ) = ( -μ / (2 * a) )
specific_energy(r, v, μ) = (v^2 / 2) - (μ / r)
specific_energy(orbit::TwoBodyState) = specific_energy(orbit.r, orbit.v, orbit.body.μ)
specific_energy(orbit::KeplerianState) = specific_energy(orbit.a, orbit.body.μ)
"""
Returns potential energy for an orbit about a `CelestialBody`.
"""
specific_potential_energy(r, μ) = (μ/r)
specific_potential_energy(r, μ, R, J₂, ϕ) = (μ/r) * (1 - J₂ * (R/r)^2 * ((3/2) * (sin(ϕ))^2 - (1/2)))
specific_potential_energy(orbit::TwoBodyState) = specific_potential_energy(orbit.r, orbit.body.μ)
specific_potential_energy(orbit::KeplerianState) = specific_potential_energy(TwoBodyState(orbit))
"""
Returns orbital eccentricity vector e̅.
"""
function eccentricity_vector(rᵢ, vᵢ, μ)
return map(x-> abs(x) < eps(typeof(x)) ? zero(x) : x, (1 / μ) * ((vᵢ × specific_angular_momentum_vector(rᵢ, vᵢ)) - μ * rᵢ / norm(rᵢ)))
end
eccentricity_vector(orbit::TwoBodyState) = eccentricity_vector(orbit.r, orbit.v, orbit.body.μ)
eccentricity_vector(orbit::KeplerianState) = eccentricity_vector(TwoBodyState(orbit))
"""
Returns orbital eccentricity, e.
"""
eccentricity(rᵢ, vᵢ, μ) = norm(eccentricity_vector(rᵢ, vᵢ, μ)) |> upreferred
eccentricity(orbit::TwoBodyState) = eccentricity(orbit.r, orbit.v, orbit.body.μ)
eccentricity(orbit::KeplerianState) = orbit.e
"""
Returns semilatus parameter, p.
"""
semi_parameter(a, e) = a * (1 - e^2)
semi_parameter(orbit::KeplerianState) = semi_parameter(orbit.a, orbit.e)
semi_parameter(orbit::TwoBodyState) = semi_parameter(KeplerianState(orbit))
"""
Returns radius, r.
"""
radius(p, e, ν) = upreferred(p / (1 + e * cos(ν)))
radius(orbit::KeplerianState) = radius(semi_parameter(orbit), orbit.e, orbit.ν)
radius(orbit::TwoBodyState) = norm(orbit.r)
radius(body::CelestialBody) = body.R
"""
Returns radius vector, rᵢ.
"""
radius_vector(orbit::TwoBodyState) = orbit.r
radius_vector(orbit::KeplerianState) = radius_vector(TwoBodyState(orbit))
"""
Returns instantaneous velocity, v, for any orbital representation.
"""
velocity(r, a, μ) = upreferred(√( (2 * μ / r) - (μ / a)))
velocity(orbit::KeplerianState) = velocity(radius(orbit), orbit.a, orbit.body.μ)
velocity(orbit::TwoBodyState) = norm(orbit.v)
"""
Returns velocity vector, vᵢ.
"""
velocity_vector(orbit::TwoBodyState) = orbit.v
velocity_vector(orbit::KeplerianState) = velocity_vector(TwoBodyState(orbit))
"""
Returns periapsis radius, rₚ.
"""
periapsis_radius(a, e) = a * (1 - e)
periapsis_radius(orbit::T) where T<:RestrictedTwoBodySystem = periapsis_radius(semimajor_axis(orbit), eccentricity(orbit))
"""
Returns apoapsis radius, rₐ.
"""
apoapsis_radius(a, e) = a * (1 + e)
apoapsis_radius(orbit::T) where T<:RestrictedTwoBodySystem = apoapsis_radius(semimajor_axis(orbit), eccentricity(orbit))
"""
Returns periapsis velocity, vₚ, for any orbital representation.
"""
periapsis_velocity(orbit::T) where T<:RestrictedTwoBodySystem = velocity(periapsis_radius(orbit), semimajor_axis(orbit), orbit.body.μ)
"""
Returns apoapsis velocity, v_a, for any orbital representation.
"""
apoapsis_velocity(orbit::T) where T<:RestrictedTwoBodySystem = velocity(apoapsis_radius(orbit), semimajor_axis(orbit), orbit.body.μ)
"""
Returns mass `m`.
"""
mass(body::CelestialBody) = body.μ / G
"""
Returns mass parameter `μ`.
"""
mass_parameter(body::CelestialBody) = body.μ
"""
Returns the orbital period.
"""
period(a, μ) = 2π * √(upreferred(a^3 / μ))
period(orbit::T) where T<:RestrictedTwoBodySystem = period(semimajor_axis(orbit), orbit.body.μ)
"""
Returns true anomoly, ν.
"""
function true_anomoly(r, h, e, μ)
val = (h^2 - μ * r) / (μ * r * e)
acos(u"rad", isapprox(val, one(val)) ? one(val) : val)
end
true_anomoly(orbit::KeplerianState) = orbit.ν
true_anomoly(orbit::TwoBodyState) = true_anomoly(radius(orbit), specific_angular_momentum(orbit), eccentricity(orbit), orbit.body.μ)
"""
Returns mean motion, n.
"""
mean_motion(a, μ) = √(μ / a^3)
mean_motion(orbit::T) where T<:RestrictedTwoBodySystem = mean_motion(semimajor_axis(orbit), orbit.μ)
"""
Returns mean motion vector, n̄.
"""
function mean_motion_vector(orbit::T) where T<:RestrictedTwoBodySystem
# î = SVector{3, Float64}([1, 0, 0])
# ĵ = SVector{3, Float64}([0, 1, 0])
k̂ = SVector{3, Float64}([0, 0, 1])
return k̂ × specific_angular_momentum_vector(orbit)
end
"""
Returns eccentric anomoly, E, parabolic anomoly, B, or hyperbolic
anomoly, H.
"""
function eccentric_anomoly(orbit::T) where T <: RestrictedTwoBodySystem
e = eccentricity(orbit)
ν = true_anomoly(orbit)
return acos(u"rad", (e + cos(ν) / (1 + e * cos(ν))))
end
"""
Returns time since periapsis, t.
"""
time_since_periapsis(n, e, E) = (E - e * sin(E)) / (n)
time_since_periapsis(orbit::T) where T <: RestrictedTwoBodySystem = time_since_periapsis(mean_motion(orbit), eccentricity(orbit), eccentric_anomoly(orbit))
"""
Returns orbital inclination, i.
"""
inclination(orbit::KeplerianState) = orbit.i
inclination(orbit::TwoBodyState) = inclination(KeplerianState(orbit))
"""
Returns Right Ascension of the Ascending Node, Ω.
"""
RAAN(orbit::KeplerianState) = orbit.Ω
RAAN(orbit::TwoBodyState) = RAAN(KeplerianState(orbit))
"""
Returns the Argument of Periapsis, ω.
"""
argument_of_periapsis(orbit::KeplerianState) = orbit.ω
argument_of_periapsis(orbit::TwoBodyState) = argument_of_periapsis(KeplerianState(orbit))
"""
Returns true if all elements in each system are within `atol` of the other.
"""
function Base.isapprox(c1::RestrictedTwoBodySystem, c2::RestrictedTwoBodySystem; atol=1e-6)
return all(ustrip.(abs.(radius_vector(c1) - radius_vector(c2))) .< atol) &&
all(ustrip.(abs.(velocity_vector(c1) - velocity_vector(c2))) .< atol) &&
ustrip(upreferred(abs(eccentricity(c1) - eccentricity(c2)))) < atol &&
ustrip(upreferred(abs(semimajor_axis(c1) - semimajor_axis(c2)))) < atol &&
ustrip(upreferred(abs(mod(inclination(c1), 180u"°") - mod(inclination(c2), 180u"°")))) < atol &&
ustrip(upreferred(abs(mod(RAAN(c1), 360u"°") - mod(RAAN(c2), 360u"°")))) < atol &&
ustrip(upreferred(abs(mod(argument_of_periapsis(c1), 360u"°") - mod(argument_of_periapsis(c2), 360u"°")))) < atol &&
ustrip(upreferred(abs(mod(true_anomoly(c1), 360u"°") - mod(true_anomoly(c2), 360u"°")))) < atol &&
isapprox(c1.body, c2.body; atol=atol)
end
"""
Returns true if all elements of each system are identically equal.
"""
function Base.isequal(c1::RestrictedTwoBodySystem, c2::RestrictedTwoBodySystem)
return all(radius_vector(c1) .== radius_vector(c2)) &&
all(velocity_vector(c1) .== velocity_vector(c2)) &&
eccentricity(c1) == eccentricity(c2) &&
semimajor_axis(c1) == semimajor_axis(c2) &&
mod(inclination(c1), 180u"°") == mod(inclination(c2), 180u"°") &&
mod(RAAN(c1), 360u"°") == mod(RAAN(c2), 360u"°") &&
mod(argument_of_periapsis(c1), 360u"°") == mod(argument_of_periapsis(c2), 360u"°") &&
mod(true_anomoly(c1), 360u"°") == mod(true_anomoly(c2), 360u"°") &&
c1.body == c2.body
end
"""
Returns true if all elements are within `atol` of the other.
"""
function Base.isapprox(b1::CelestialBody, b2::CelestialBody; atol=1e-6)
return ustrip(upreferred(abs(b1.R - b2.R))) < atol &&
ustrip(upreferred(abs(b1.μ - b2.μ))) < atol
end
"""
Returns true if all elements are identically equal.
"""
function Base.isequal(b1::CelestialBody, b2::CelestialBody)
return b1.R == b2.R && b1.μ == b2.μ
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 11486 | #
# TwoBodyStates.jl
#
# Describes Two Body Orbits through Cartesian coordinates and Orbital Elements.
#
"""
Abstract type for all four conic sections.
"""
abstract type AbstractConic end
"""
Type for orbits in the circular conic section.
"""
struct Circular <: AbstractConic end
"""
Type for orbits in the elliptical conic section.
"""
struct Elliptical <: AbstractConic end
"""
Type for orbits in the parabolic conic section.
"""
struct Parabolic <: AbstractConic end
"""
Type for orbits in the hyperbolic conic section.
"""
struct Hyperbolic <: AbstractConic end
"""
Type for invalid orbits (orbits with NaN fields)
"""
struct Invalid <: AbstractConic end
"""
Abstract type for all two-body orbital representations.
"""
abstract type RestrictedTwoBodySystem{C<:AbstractConic, F<:AbstractFloat} <: OrbitalSystem end
"""
Type representing large bodies in space. Currently, the following
solar system bodies are supported:
Sun, Mercury, Venus, Earth, Moon (Luna), Mars, Jupiter,
Saturn, Uranus, Neptune, Pluto.
"""
struct CelestialBody{F<:AbstractFloat}
R::Length{F}
μ::MassParameter{F}
name::String
function CelestialBody(m::Mass{<:AbstractFloat}, R::Length{<:AbstractFloat}, name::String="")
T = promote_type(typeof(ustrip(m)), typeof(ustrip(R)))
return new{T}(T(R), T(G * m), name)
end
function CelestialBody(μ::MassParameter{<:AbstractFloat}, R::Length{<:AbstractFloat}, name::String="")
T = promote_type(typeof(ustrip(μ)), typeof(ustrip(R)))
new{T}(R, μ, name)
end
function CelestialBody(μ::T1, R::T2, name::String="") where {T1<:AbstractFloat, T2<:AbstractFloat}
@warn "No units provided! Assuming km and km^3/s^2."
T = promote_type(T1, T2)
return new{T}(T(R), T(μ), name)
end
function CelestialBody(μ::MassParameter, name::String="")
@warn "No radius provided! Setting to NaN."
return CelestialBody(μ, NaN * u"km", name)
end
function CelestialBody(μ::T, name::String="") where T<:AbstractFloat
@warn "No units provided! Assuming km^3/s^2."
return CelestialBody(μ * u"km^3/s^2", name)
end
CelestialBody(m::Mass) = CelestialBody(m * G)
CelestialBody(body::CelestialBody) = CelestialBody(body.μ, body.R, body.name)
end
Base.convert(::Type{T}, b::CelestialBody) where {T<:AbstractFloat} = CelestialBody(T(b.μ), T(b.R), b.name)
Base.promote(::Type{CelestialBody{A}}, ::Type{CelestialBody{B}}) where {A<:AbstractFloat, B<:AbstractFloat} = CelestialBody{promote_type(A,B)}
Core.Float16(o::CelestialBody) = convert(Float16, o)
Core.Float32(o::CelestialBody) = convert(Float32, o)
Core.Float64(o::CelestialBody) = convert(Float64, o)
Base.MPFR.BigFloat(o::CelestialBody) = convert(BigFloat, o)
"""
Custom display for `CelestialBody` instances.
"""
function Base.show(io::IO, body::CelestialBody)
println(io, crayon"blue", "CelestialBody:")
println(io, crayon"default",
" Mass: ", ustrip(u"kg", body.μ / G), " ", u"kg")
println(io, " Radius: ", ustrip(u"km", body.R), " ", u"km")
println(io, " Mass Parameter: ", ustrip(u"km^3/s^2", body.μ), " ", u"km^3/s^2")
end
"""
Struct for storing `TwoBody` Cartesian states for all conics.
"""
struct TwoBodyState{C<:AbstractConic, F<:AbstractFloat} <: RestrictedTwoBodySystem{C,F}
r::SVector{3, Length{F}}
v::SVector{3, Velocity{F}}
body::CelestialBody{F}
function TwoBodyState(r::R, v::V, body::CelestialBody) where {R <: AbstractVector{<:Length}, V <: AbstractVector{<:Velocity}}
C = conic(eccentricity(r, v, body.μ))
T = promote_type(typeof(ustrip(r[1])), typeof(ustrip(v[1])), typeof(ustrip(body.μ)))
if !(T <: AbstractFloat)
@warn "Non-float parameters provided. Defaulting to Float64."
T = Float64
end
return new{C,T}(SVector{3,Length{T}}(r...), SVector{3, Velocity{T}}(v...), T(body))
end
function TwoBodyState(r::R, v::V, body::CelestialBody) where {R <: AbstractVector{<:Real}, V <: AbstractVector{<:Real}}
C = conic(eccentricity(r, v, body.μ))
T = promote_type(typeof(ustrip(r[1])), typeof(ustrip(v[1])), typeof(ustrip(body.μ)))
if !(T <: AbstractFloat)
@warn "Non-float parameters provided. Defaulting to Float64."
T = Float64
end
return new{C,T}(SVector{3,Length{T}}((r * u"km")...), SVector{3, Velocity{T}}((v * u"km/s")...), T(body))
end
TwoBodyState(r, v, μ::T) where T <: Real = TwoBodyState(r, v, CelestialBody(μ))
TwoBodyState(orbit::TwoBodyState) = TwoBodyState(orbit.r, orbit.v, orbit.body)
end
Base.convert(::Type{T}, o::TwoBodyState) where {T<:AbstractFloat} = TwoBodyState(T.(o.r), T.(o.v), convert(T, o.body))
Base.promote(::Type{TwoBodyState{A}}, ::Type{TwoBodyState{B}}) where {A<:AbstractFloat, B<:AbstractFloat} = Orbit{promote_type(A,B)}
Core.Float16(o::TwoBodyState) = convert(Float16, o)
Core.Float32(o::TwoBodyState) = convert(Float32, o)
Core.Float64(o::TwoBodyState) = convert(Float64, o)
Base.MPFR.BigFloat(o::TwoBodyState) = convert(BigFloat, o)
"""
Alias for `TwoBodyState`.
"""
Orbit(r, v, body) = TwoBodyState(r, v, body)
Orbit(e, a, i, Ω, ω, ν, body) = KeplerianState(e, a, i, Ω, ω, ν, body)
"""
Struct for storing `Keplerian` states for all conics.
"""
struct KeplerianState{C<:AbstractConic, F<:AbstractFloat} <: RestrictedTwoBodySystem{C,F}
e::F
a::Length{F}
i::DimensionlessQuantity{F}
Ω::DimensionlessQuantity{F}
ω::DimensionlessQuantity{F}
ν::DimensionlessQuantity{F}
body::CelestialBody{F}
function KeplerianState(e::E, a::Length{A}, i::DimensionlessQuantity{I},
Ω::DimensionlessQuantity{O}, ω::DimensionlessQuantity{W},
ν::DimensionlessQuantity{V}, body::CelestialBody{B}) where {
E <: Real,
A <: Real,
I <: Real,
O <: Real,
W <: Real,
V <: Real,
B <: Real
}
C = conic(e)
T = promote_type(typeof(e), typeof(ustrip(a)), typeof(ustrip(i)), typeof(ustrip(Ω)),
typeof(ustrip(ω)), typeof(ustrip(ν)), typeof(ustrip(body.μ)))
if !(T <: AbstractFloat)
@warn "Non-float parameters provided. Defaulting to Float64."
T = Float64
end
return new{C,T}(T(e), T(a), T(i), T(Ω), T(ω), T(ν), T(body))
end
function KeplerianState(e::E, a::A, i::I,
Ω::O, ω::W, ν::V,
body::CelestialBody{B}) where {
E <: Real,
A <: Real,
I <: Real,
O <: Real,
W <: Real,
V <: Real,
B <: Real
}
@warn "No units provided! Assuming km and rad."
return KeplerianState(e, a * u"km", (i % 2π) * u"rad", (Ω % 2π) * u"rad",
(ω % 2π) * u"rad", (ν % 2π) * u"rad", body)
end
KeplerianState(e, a, i, Ω, ω, ν, μ::T) where T <: Real = KeplerianState(e, a, i, Ω, ω, ν, CelestialBody(μ))
KeplerianState(orbit::KeplerianState) = KeplerianState(orbit.e, orbit.a, orbit.i, orbit.Ω, orbit.ω, orbit.ν, orbit.body)
end
Base.convert(::Type{T}, o::KeplerianState) where {T<:AbstractFloat} = KeplerianState(T(o.e), T(o.a), T(o.i), T(o.Ω), T(o.ω), T(o.ν), convert(T, o.body))
Base.promote(::Type{KeplerianState{A}}, ::Type{KeplerianState{B}}) where {A<:AbstractFloat, B<:AbstractFloat} = KeplerianState{promote_type(A,B)}
Core.Float16(o::KeplerianState) = convert(Float16, o)
Core.Float32(o::KeplerianState) = convert(Float32, o)
Core.Float64(o::KeplerianState) = convert(Float64, o)
Base.MPFR.BigFloat(o::KeplerianState) = convert(BigFloat, o)
TwoBodyState(orbit::KeplerianState) = TwoBodyState(cartesian(orbit)..., orbit.body)
KeplerianState(orbit::TwoBodyState) = KeplerianState(keplerian(orbit)..., orbit.body)
"""
Custom display for TwoBodyState instances.
"""
function Base.show(io::IO, orbit::TwoBodyState)
println(io, conic(orbit), " Two-body State (Cartesian):")
println(io, "")
println(io, " Position (inertial): [",
ustrip(u"km", orbit.r[1]), ", ",
ustrip(u"km", orbit.r[2]), ", ",
ustrip(u"km", orbit.r[3]), "] ", u"km")
println(io, " Velocity (inertial): [",
ustrip(u"km/s", orbit.v[1]), ", ",
ustrip(u"km/s", orbit.v[2]), ", ",
ustrip(u"km/s", orbit.v[3]), "] ", u"km/s")
println(io, "")
println(io, " $(orbit.body.name == "" ? "Body" : orbit.body.name) (μ): ",
ustrip(u"km^3 / s^2", orbit.body.μ), " ", u"km^3/s^2")
end
"""
Custom display for KeplerianState instances.
"""
function Base.show(io::IO, orbit::KeplerianState)
println(io, conic(orbit), " Two-body State (Keplerian):")
println(io, "")
println(io, "")
println(io, " Eccentricity: ",
orbit.e)
println(io, " Semimajor Axis: ",
ustrip(u"km", orbit.a), " ", u"km")
println(io, " Inclination: ",
ustrip(u"°", orbit.i), u"°")
println(io, " RAAN: ",
ustrip(u"°", orbit.Ω), u"°")
println(io, " Arg. Periapsis: ",
ustrip(u"°", orbit.ω), u"°")
println(io, " True Anomoly: ",
ustrip(u"°", orbit.ν), u"°")
println(io, "")
println(io, " $(orbit.body.name == "" ? "Body" : orbit.body.name) (μ): ",
ustrip(u"km^3 / s^2", orbit.body.μ), " ", u"km^3/s^2")
end
# Constants
# All data pulled from the following references:
# [1] https://en.wikipedia.org/wiki/List_of_Solar_System_objects_by_size
# [2] https://docs.astropy.org/en/stable/constants/#module-astropy.constants
"""
Constant `CelestialBody` for our sun!
"""
const Sun = CelestialBody(1.327124400419393e11u"km^3/s^2", 696000.0u"km", "Sun")
"""
Constant `CelestialBody` for Mercury.
"""
const Mercury = CelestialBody(22031.78000000002u"km^3/s^2", 2439.7u"km", "Mercury")
"""
Constant `CelestialBody` for Venus.
"""
const Venus = CelestialBody(324858.592u"km^3/s^2", 6051.8u"km", "Venus")
"""
Constant `CelestialBody` for your home planet!
"""
const Earth = CelestialBody(398600.4354360959u"km^3/s^2", 6371.008366666666u"km", "Earth")
"""
Constant `CelestialBody` for our moon.
"""
const Moon = CelestialBody(4902.800066163796u"km^3/s^2", 1737.4000000000003u"km", "Moon")
"""
Constant `CelestialBody` (alias for our mooon).
"""
const Luna = Moon
"""
Constant `CelestialBody` for Mars.
"""
const Mars = CelestialBody(42828.37362069909u"km^3/s^2", 3389.5266666666666u"km", "Mars")
"""
Constant `CelestialBody` for Jupiter.
"""
const Jupiter = CelestialBody(1.2668653492180079e8u"km^3/s^2", 69946.0u"km", "Jupiter")
"""
Constant `CelestialBody` for Saturn.
"""
const Saturn = CelestialBody(3.793120749865224e7u"km^3/s^2", 58300.0u"km", "Saturn")
"""
Constant `CelestialBody` for Uranus.
"""
const Uranus = CelestialBody(5.793951322279009e6u"km^3/s^2", 25363.666666666668u"km", "Uranus")
"""
Constant `CelestialBody` for Neptune.
"""
const Neptune = CelestialBody(6.835099502439672e6u"km^3/s^2", 24623.0u"km", "Neptune")
"""
Constant `CelestialBody` for Pluto. We couldn't leave you out again!
"""
const Pluto = CelestialBody(869.6138177608749u"km^3/s^2", 1195.0u"km", "Pluto")
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 236 | #
# Run all unit tests in UnitulAstrodynamics.jl
#
include("TwoBody/test_twobody.jl")
include("ThreeBody/test_threebody.jl")
include("NBody/test_nbody.jl")
include("Propagators/test_propagators.jl")
include("AstroPlots/test_plots.jl")
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 317 | module PlotsUnitTests
using Test
using UnitfulAstrodynamics
@testset "AstroPlots" begin
# Twobody Orbit
r̅ = [0.0, 11681, 0.0] * u"km"
v̅ = [5.134, 4.226, 2.787] * u"km/s"
orbit = Orbit(r̅, v̅, Earth)
# Does plot run?
fig = orbitplot(propagate(orbit, 1u"s"))
@test true
end
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 428 | module NBodyUnitTests
using Test
using UnitfulAstrodynamics
@testset "NBody" begin
r̅₁ = [0, 0, 0]u"km"
v̅₁ = [0, 0, 0]u"km/s"
m₁ = uconvert(u"kg", 1u"Mearth")
myEarth = Body(r̅₁, v̅₁, m₁)
r̅₂ = [0, 11681, 0]u"km"
v̅₂ = [5.134, 4.226, 2.787]u"km/s"
m₂ = 150.0u"kg"
mySatellite = Body(r̅₂, v̅₂, m₂)
sys1 = NBodySystem([myEarth, mySatellite])
# No errors!
@test true
end
end | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 398 | module PropagatorUnitTests
using Test
using UnitfulAstrodynamics
@testset "Propagators" begin
# Twobody Orbit
r̅ = [0.0, 11681, 0.0] * u"km"
v̅ = [5.134, 4.226, 2.787] * u"km/s"
orbit = Orbit(r̅, v̅, Earth)
# Propagate twobody
sols = propagate(orbit, 5u"s"; save_everystep=false)
@test isapprox(sols.step[end], kepler(orbit, 5u"s"; tol=1e-14), atol=1e-6)
end
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 811 | module ThreeBodyUnitTests
using Test
using UnitfulAstrodynamics
@testset "ThreeBody" begin
# Hardcode Gravity parameters for the Sun,
# and the Earth-Moon System
μₛ = 1.32712440018e20u"m^3/s^2"
μₑ = 4.035032351966808e14u"m^3/s^2"
# Dimensional initial conditions for spacecraft
r = [2e9, 7000, 2000]u"km"
v = [0.001, 0.08, 0.02]u"km/s"
t = 500u"d"
a = 1.0u"AU"
# Construct nondimensional state
sys = ThreeBodyState(μₛ, μₑ, a, r, v, t);
@test true
# This should run!
μ = nondimensionalize(Sun.μ, Earth.μ) |> upreferred
r, v, T = halo(μ; Az = 1e-2, L = 2)
sys = NondimensionalThreeBodyState(r, v, μ, T, 1.0u"AU", 500u"d")
sys = redimensionalize(sys, Sun.μ, Earth.μ)
sys = nondimensionalize(sys)
@test true
end
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | code | 1454 | module TwoBodyUnitTests
using Test
using UnitfulAstrodynamics
@testset "Transforms" begin
rᵢ = [0.0, 11681.0, 0.0] * u"km"
vᵢ = [5.134, 4.226, 2.787] * u"km/s"
orbit = Orbit(rᵢ, vᵢ, Earth) |> KeplerianState
@test orbit.a == 24509.265399338536 * u"km"
@test orbit.e == 0.723452708202361
@test orbit.i == 151.50460766373865 * u"°"
@test orbit.ν == 89.99652573907436 * u"°"
@test orbit ≈ TwoBodyState(orbit)
e = 0.3
a = 15000. * u"km" + 1.0u"Rearth"
i = 10. * u"°"
Ω = 0. * u"°"
ω = 10. * u"°"
ν = 0. * u"°"
orbit = KeplerianState(e, a, i, Ω, ω, ν, Earth)
@test isapprox(orbit, TwoBodyState(orbit), atol=1e-6)
end
@testset "Kepler" begin
rᵢ = [0.0, 11681.0, 0.0]u"km"
vᵢ = [5.134, 4.226, 2.787]u"km/s"
orbit = Orbit(rᵢ, vᵢ, Earth)
@test kepler(orbit, period(orbit)) ≈ orbit
end
@testset "Lambert" begin
rᵢ = [0.0, 11681.0, 0.0]u"km"
vᵢ = [5.134, 4.226, 2.787]u"km/s"
initial = Orbit(rᵢ, vᵢ, Earth)
Δt = 1000u"s"
final = kepler(initial, Δt; tol=1e-12)
v₁, v₂ = lambert(radius_vector(initial), radius_vector(final), Earth.μ, Δt, :short; tol=1e-12, max_iter=1000)
@test isapprox(ustrip.(u"km/s", v₁), ustrip.(u"km/s", velocity_vector(initial)); atol=1e-6)
@test isapprox(ustrip.(u"km/s", v₂), ustrip.(u"km/s", velocity_vector(final)); atol=1e-6)
end
end
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 3353 | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [email protected]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 2587 | [](https://github.com/cadojo/UnitfulAstrodynamics.jl/actions?query=workflow%3ATests)
[](https://cadojo.github.io/UnitfulAstrodynamics.jl/stable)
# UnitfulAstrodynamics.jl
Common astrodynamics calculations, with units!
## Features
* Restricted two-body problem equations, states, propagation, and plotting
* Restricted three-body problem equations, states, propagation, and iterative Halo orbit solvers
* N-body problem equations, states, propagation, and plotting
* A collection of fairly accurate planetary constants from our solar system (pulled from [SPICE](https://naif.jpl.nasa.gov/pub/naif/generic_kernels/) kernals)
More to come! In the near term, additional features will include...
* Manifold-based transfer equations and states within the circular restricted three-body problem
* Hohmann-based transfer equations and states within the restricted two-body problem
* Zero-velocity curve plots for circular restricted three-body problem trajectories
* Stability analysis for circular restricted three-body problem states
## Motivation
This package aims to provide a simple interface for common astrodynamics problems. It was created to learn more about Astrodynamics, and will be developed alongside a Graduate Astrodynamics course at the University of Maryland. The packages [JuliaSpace/Astrodynamics.jl](https://github.com/JuliaSpace/Astrodynamics.jl) and [JuliaAstro/AstroBase.jl](https://github.com/JuliaAstro/AstroBase.jl) are more fully featured. I will continue adding features to this package, but for a more complete feature set, use the packages provided by [JuliaSpace](https://github.com/JuliaSpace) and [JuliaAstro](https://github.com/JuliaAstro).
## Credits
\[1\] Vallado, David A. Fundamentals of astrodynamics and applications. Vol. 12. Springer Science & Business Media, 2001.
* All equations and algorithms within `UnitfulAstrodynamics` are pulled from Vallado's _Fundamentals of Astrodynamics and Applications_, as well as course notes from ENAE 601 (Astrodynamics) at the University of Maryland.
\[2\] [Unitful.jl](https://github.com/PainterQubits/Unitful.jl) and [UnitfulAstro.jl](https://github.com/JuliaAstro/UnitfulAstro.jl) are used for unit handling.
## Usage
Check out the [Getting Started](https://cadojo.github.io/UnitfulAstrodynamics.jl/stable/Overview/getting-started/#Getting-Started) documentation for code examples, and more detail about using this package. | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 776 | # UnitfulAstrodynamics.jl Documentation
## Overview
```@contents
Pages = ["Overview/about.md", "Overview/getting-started.md"]
Depth = 1
```
## TwoBody
```@contents
Pages = ["TwoBody/types.md", "TwoBody/functions.md"]
Depth = 1
```
## ThreeBody
```@contents
Pages = ["ThreeBody/types.md", "ThreeBody/functions.md"]
Depth = 1
```
## NBody
```@contents
Pages = ["NBody/types.md", "NBody/functions.md"]
Depth = 1
```
## Propagators
```@contents
Pages = ["Propagators/types.md", "Propagators/functions.md"]
Depth = 1
```
## Maneuvers
```@contents
Pages = ["Maneuvers/types.md", "Maneuvers/functions.md"]
Depth = 1
```
## Plots
```@contents
Pages = ["AstroPlots/functions.md"]
Depth = 1
```
## Common Types
```@contents
Pages = ["CommonTypes/types.md"]
Depth = 1
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 345 | # Plotting `TwoBody` and `NBody` Systems
`TwoBodyPropagationResult` and `MultibodyPropagationResult` structs can be plotted with the `plot` function. Currently, two-body orbits can be plotted in the `Perifocal` (2D) frame, and the `Cartesian` (3D) frame. N-body systems can currently only be plotted in 3D.
```@docs
orbitplot
lagrangeplot
```
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 193 | # `CommonTypes`
The following abstract types are defined, which are common parent types for all submodules within `Astrodynamics`.
```@docs
AbstractBody
OrbitalSystem
AbstractTrajectory
```
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 216 | # `Maneuvers` Calculations
Currently, `Maneuvers` focuses on the twobody problem. The following functions have been developed. More to come!
```@docs
escape_radius
escape_velocity
escape_time
escape_path_length
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 171 | # `Maneuvers` Data Structures
Maneuvers are a current focus in development. The currently developed types are shown below.
```@docs
AbstractManeuver
ConstantManeuver
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 337 | # `NBody` Calculations
Currently, two functions are provided to calculate information for a `MultibodySystem`: [`system_energy`](@ref), which calculates the total energy for the system, and [`system_angular_momentum`](@ref), which calculates the total angular momentum for the system.
```@docs
system_energy
system_angular_momentum
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 394 | # `NBody` Data Structures
As with [`TwoBody` Data Structures](@ref), the `NBody` submodule includes data structures for storing multibody orbital states. The `Body` structure holds position, velocity, and mass information for a single body. A `MultibodySystem` contains an array of `Body` structures, and is used to completely describe a multibody orbital state.
```@docs
Body
NBodySystem
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 1388 | # Package Overview
`UnitfulAstrodynamics.jl` is a [Unitful](https://github.com/PainterQubits/Unitful.jl) Astrodynamics library, which includes `TwoBody` and `NBody` problem calculations, as well as orbit propagation and plotting!
## Motivation
This package aims to provide a simple interface for common astrodynamics problems. It was created to learn more about Astrodynamics, and will be developed alongside a Graduate Astrodynamics course at the University of Maryland. The packages [JuliaSpace/Astrodynamics.jl](https://github.com/JuliaSpace/Astrodynamics.jl) [JuliaAstro/AstroBase.jl](https://github.com/JuliaAstro/AstroBase.jl)are more fully featured. I will continue adding features to this package, but for a more complete feature set, use the packages provided by [JuliaSpace](https://github.com/JuliaSpace) and [JuliaAstro](https://github.com/JuliaAstro).
## Credits
\[1\] Vallado, David A. Fundamentals of astrodynamics and applications. Vol. 12. Springer Science & Business Media, 2001.
* All equations and algorithms within `UnitfulAstrodynamics` are pulled from Vallado's _Fundamentals of Astrodynamics and Applications_, as well as course notes from ENAE 601 (Astrodynamics) at the University of Maryland.
\[2\] [Unitful.jl](https://github.com/PainterQubits/Unitful.jl) and [UnitfulAstro.jl](https://github.com/JuliaAstro/UnitfulAstro.jl) are used for unit handling.
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 4361 | # Getting Started
## Installation
`UnitfulAstrodynamics` is included in Julia's General package registry.
```Julia
# In Julia's REPL
]add UnitfulAstrodynamics
# Or, with `Pkg`
import Pkg
Pkg.add("UnitfulAstrodynamics")
```
## Units are Provided!
`UnitfulAstrodynamics.jl` uses `Reexport.jl` to expose `Unitful`, `UnitfulAstro`, and `UnitfulAngles`. Units are required for all `TwoBody`, `ThreeBody`, and `NBody` data structures. Functions often have non-unit equivalents --
check the docstrings!
## TwoBody
The `TwoBody` module handles Astrodynamics scenarios within the two-body problem. You can make a `Orbit` by specifying a `CelestialBody` (Sun, Earth, Moon, Mars, etc.), and a Cartesian or Keplerian state.
```Julia
# Cartesian state to Orbit
r = [0.0, 11681.0, 0.0]u"km"
v = [5.134, 4.226, 2.787]u"km/s"
orbit1 = TwoBodyState(r, v, Earth)
# Keplerian state to Orbit
e = 0.3
a = 15000 * u"km" + Earth.R
i = 10 * u"°"
Ω = 0 * u"°"
ω = 10 * u"°"
ν = 0 * u"°"
orbit2 = KeplerianState(e, a, i, Ω, ω, ν, Earth)
# This is a true fact!
orbit1 ≈ orbit2
# For the rest of this section...
orbit = orbit1
```
Now you can solve __Kepler's Prediction Problem__, __propagate__ the satellite's trajectory over a specified intervol in time, and __plot__ the resultant trajectory with `Plots.jl`.
```Julia
# Kepler's Prediction problem
orbit_later = kepler(orbit, period(orbit))
# Lambert's Proplem
v₁, v₂ = lambert(orbit.r, orbit_later.r, Earth.μ, period(orbit), :short)
# Orbit propagation
sols = propagate(orbit, period(orbit))
# Plotting (with Plots.jl kwargs)
plot(sols; title="Plots.jl keywords work!", xlabel="Woo")
# Another true fact!
sols.step[end] ≈ orbit_later
```
You may have noticed the `orbital_period` function. All common two-body problem equations have been included as functions with common arguments,`orbital_period(a, μ)`, and with `Astrodynamics.jl` structure arguments, `orbital_period(orbit)`. The current list of supported functions is described in [`TwoBody` Calculations](@ref).
Not sure how to use one of those helper functions? Check the docstrings in Julia's REPL!
```Julia
help?> eccentricity
search: eccentricity eccentricity_vector
eccentricity(r̅, v̅, μ)
eccentricity(orbit::TwoBodyState)
eccentricity(orbit::KeplerianState)
Returns orbital eccentricity, e.
```
## ThreeBody
The `ThreeBody` module helps to solve the Circular Restricted `ThreeBody` problem.
```julia
# Hardcode Gravity parameters for the Sun,
# and the Earth-Moon System
μₛ = 1.32712440018e20u"m^3/s^2"
μₑ = 4.035032351966808e14u"m^3/s^2"
# Dimensional initial conditions for spacecraft
r = [2e9, 7000, 2000]u"km"
v = [0.001, 0.08, 0.02]u"km/s"
t = 500u"d"
a = 1u"AU"
# Construct nondimensional state
sys = ThreeBodySystem(μₛ, μₑ, a, r, v, t) |> nondimensionalize
# Propagate!
sols = propagate(sys)
# Halo solvers
μ = nondimensionalize(Sun.μ, Earth.μ)
r, v, T = analyticalhalo(μ; L = 1, steps = 1000)
r, v, T = halo(μⁿ; Az = 0.02, L = 2)
```
## NBody
The `NBody` module helps to solve the classical gravitational `NBody` problem. This is the baby version - point mass bodies, and no relativity. But it's still useful!
You can make your own `Body` by specifying an initial Cartesian state, and a mass.
```Julia
# It's MY Earth, and I want it now
r₁ = [0.0, 0.0, 0.0]u"km"
v₁ = [0.0, 0.0, 0.0]u"km/s"
m₁ = Earth.m
myEarth = Body(r₁, v₁, m₁)
# And we'll need a satellite...
r₂ = [0.0, 11681.0, 0.0]u"km"
v₂ = [5.134, 4.226, 2.787]u"km/s"
m₂ = 1000.0u"kg"
mySatellite = Body(r₂, v₂, m₂)
```
A `NBodySystem` contains an array of `Bodies`.
```Julia
# Construct a MultibodySystem
sys = NBodySystem(myEarth, mySatellite)
```
And you can __propagate__ a `MultibodySystem` through time to numerically find the final states for each `Body`. The package `DifferentialEquations.jl` is used for the numerical integration. For all __propagation__ functions in `Astrodynamics.jl`, you can specify `kwargs` as you would for a `DifferentialEquations.jl` `solve` call.
```Julia
# Propagate n-body system
sols = propagate(sys, 10000u"s"; abstol=1e-14, reltol=1e-14)
```
As with a two-body `Orbit`, you can also plot each timestep in the n-body propagation.
```Julia
# Plot n-body propagation results
plot(sols; title="Plots.jl keywords work!", xlabel="Woo")
```
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 174 | # Propagating Orbits
A function `propagate` is defined for both `TwoBody` and `NBody` orbits.
```@docs
propagate
RestrictedTwoBodyTic!
RestrictedThreeBodyTic!
NBodyTic!
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 140 | # Storing Propagation Results
Both `TwoBody` and `NBody` systems have a structure for storing propagation results.
```@docs
Trajectory
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 916 | # `ThreeBody` Calculations
Common `ThreeBody` problem calculations are provided through functions.
## Frame Representations
You can convert between the Inertial and Rotating (Synodic) reference frames through
the `inertial` and `synodic` functions.
```@docs
inertial
synodic
```
## Dimensionalization
Functions to nondimensionalize spacecraft states, and re-dimensionalize spacecraft
states are provided.
```@docs
time_scale_factor
nondimensionalize_length
nondimensionalize_velocity
nondimensionalize_time
nondimensionalize_mass_parameter
nondimensionalize
redimensionalize_length
redimensionalize_velocity
redimensionalize_time
redimensionalize
```
## Halo Orbit Solvers
```@docs
analyticalhalo
halo
monodromy
```
## Other Common Calculations
```@docs
potential_energy
jacobi_constant
lagrange
potential_energy_hessian
accel
RestrictedThreeBodySTMTic!
state_transition_dynamics
nondimensional_radius
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 305 | # `ThreeBody` Data Structures
The `ThreeBody` module contains one key structure: `ThreeBodySystem`. A `ThreeBodySystem` holds the
nondimensional state of the spacecraft within the Circular Restricted Three-body Problem.
```@docs
RestrictedThreeBodySystem
ThreeBodyState
NondimensionalThreeBodyState
``` | UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 1186 | # `TwoBody` Calculations
For convenience, common `TwoBody` problem calculations are provided through functions.
Often, these functions are provided with common arguments (such as `orbital_period(a,μ)`), _and_ with [`TwoBody` Data Structures](@ref) arguments (such as `orbital_period(::Orbit)`).
## Orbital Representations
You can convert between Cartesian and Keplerian `TwoBody` orbital representations by using [`cartesian`](@ref) and [`keplerian`](@ref).
```@docs
cartesian
keplerian
perifocal
```
## Kepler's Prediction Problem
The function `kepler` can solve Kepler's Prediction Problem for an `Orbit`.
```@docs
kepler
```
## Lambert's Problem
The function `lambert` can solve Lambert's Problem for an `Orbit`.
```@docs
lambert
```
## Common `TwoBody` Problem Calculations
```@docs
semimajor_axis
eccentricity
eccentricity_vector
inclination
true_anomoly
periapsis_radius
apoapsis_radius
periapsis_velocity
apoapsis_velocity
radius
radius_vector
velocity
velocity_vector
mass
mass_parameter
period
time_since_periapsis
mean_motion
mean_motion_vector
semi_parameter
eccentric_anomoly
specific_angular_momentum
specific_angular_momentum_vector
specific_energy
conic
```
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 0.8.3 | 6338b3ac7b0f1c55f5f2a457df49529a9287aca2 | docs | 1490 | # `TwoBody` Data Structures
The `TwoBody` module contains two key structures: `Orbit` and `CelestialBody`.
An `Orbit` is the core structure for `TwoBody` calculations. It contains an orbital state (both Cartesian and the equivalent Keplerian representation), and a central body.
The central body within `Orbit` is of type `CelestialBody`. Common bodies in our solar system have been added for convenience, as described in [Default `CelestialBodies`](@ref), but you can also make your own.
```@docs
TwoBodyState
KeplerianState
Orbit
CelestialBody
```
## Abstract Types and Pre-defined Parameters
The first section in `TwoBody` documentation described the core [`TwoBody` Data Structures](@ref). Each data structure has an abstract parent type. All `Orbit` structures extend `TwoBodySystem`. In addition, all `Orbit` structures are paremeterized by their conic section, which is of type `AbstractConic`. All conic sections are pre-defined structures: `Circular`, `Elliptical`, `Parabolic`, `Hyperbolic`, and the `Invalid` conic is used to describe invalid orbital states (such as providing a `NaN` value to an `Orbit` constructor).
```@docs
RestrictedTwoBodySystem
AbstractConic
Circular
Elliptical
Parabolic
Hyperbolic
Invalid
```
## Default `CelestialBodies`
For convenence, the following common bodies in _our_ solar system have already been defined!
* `Sun`
* `Mercury`
* `Venus `
* `Earth`
* `Moon`
* `Luna`
* `Mars`
* `Jupiter`
* `Saturn`
* `Uranus`
* `Neptune`
* `Pluto`
| UnitfulAstrodynamics | https://github.com/cadojo/UnitfulAstrodynamics.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 744 | using Documenter, FASTX
DocMeta.setdocmeta!(FASTX, :DocTestSetup, :(using FASTX, BioSequences); recursive=true)
makedocs(
modules = [FASTX],
format = Documenter.HTML(),
sitename = "FASTX.jl",
doctest = true,
pages = [
"Overview" => Any[
"Quickstart" => "index.md",
"Records" => "records.md",
"File I/O" => "files.md",
],
"FASTA" => "fasta.md",
"FASTQ" => "fastq.md",
"FAI" => "fai.md"
],
authors = "Sabrina J. Ward, Jakob N. Nissen, The BioJulia Organisation and other contributors.",
checkdocs = :exports
)
deploydocs(
repo = "github.com/BioJulia/FASTX.jl.git",
push_preview = true,
deps = nothing,
make = nothing
)
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 2143 | module BioSequencesExt
import FASTX: FASTARecord, FASTQRecord, sequence, Record, seqsize, seq_data_part
using BioSequences: BioSequence, LongSequence
function sequence(
::Type{S},
record::Record,
part::UnitRange{Int}=1:seqsize(record)
)::S where S <: BioSequence
return S(sequence(record, part))
end
# Special method for LongSequence: Can operate on bytes directly
# and more efficiently
function sequence(
::Type{S},
record::Record,
part::UnitRange{Int}=1:seqsize(record)
)::S where S <: LongSequence
return S(@view(record.data[seq_data_part(record, part)]))
end
function Base.copy!(dest::LongSequence, src::Record)
resize!(dest, UInt(seqsize(src)))
copyto!(dest, 1, src, 1, seqsize(src))
end
function Base.copyto!(dest::LongSequence, src::Record)
return copyto!(dest, 1, src, 1, seqsize(src))
end
function Base.copyto!(dest::LongSequence, doff, src::Record, soff, N)
# This check is here to prevent boundserror when indexing src.sequence
iszero(N) && return dest
return copyto!(dest, doff, src.data, Int(src.description_len) + soff, N)
end
function FASTARecord(description::AbstractString, sequence::BioSequence)
buf = IOBuffer()
print(buf, '>', description, '\n')
# If the sequence is empty, we need to print a newline in order to not
# have the FASTA file truncated, thus invalid
print(buf, isempty(sequence) ? '\n' : sequence)
return parse(FASTARecord, take!(buf))
end
function FASTQRecord(
description::AbstractString,
sequence::BioSequence,
quality::AbstractString
)
if length(sequence) != ncodeunits(quality)
throw(ArgumentError("Byte length of sequence doesn't match codeunits of quality"))
end
buf = IOBuffer()
print(buf,
'@', description, '\n',
sequence, "\n+\n",
quality
)
parse(FASTQRecord, take!(buf))
end
function FASTQRecord(
description::AbstractString,
sequence::BioSequence,
quality::Vector{<:Number};
offset::Integer=33
)
ascii_quality = String([UInt8(q + offset) for q in quality])
FASTQRecord(description, sequence, ascii_quality)
end
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 6926 | module FASTX
using StringViews: StringView
using Automa: Automa
"""
identifier(record::Record)::AbstractString
Get the sequence identifier of `record`. The identifier is the description
before any whitespace. If the identifier is missing, return an empty string.
Returns an `AbstractString` view into the record. If the record is overwritten,
the string data will be corrupted.
See also: [`description`](@ref), [`sequence`](@ref)
# Examples
```jldoctest
julia> record = parse(FASTA.Record, ">ident_here some descr \\nTAGA");
julia> identifier(record)
"ident_here"
```
"""
function identifier end
"""
description(record::Record)::AbstractString
Get the description of `record`. The description is the entire header line, minus the
leading `>` or `@` symbols for FASTA/FASTQ records, respectively, including trailing whitespace.
Returns an `AbstractString` view into the record. If the record is overwritten,
the string data will be corrupted.
See also: [`identifier`](@ref), [`sequence`](@ref)
# Examples
```jldoctest
julia> record = parse(FASTA.Record, ">ident_here some descr \\nTAGA");
julia> description(record)
"ident_here some descr "
```
"""
function description end
"""
sequence([::Type{S}], record::Record, [part::UnitRange{Int}])::S
Get the sequence of `record`.
`S` can be either a subtype of `BioSequences.BioSequence`, `AbstractString` or `String`.
If elided, `S` defaults to an `AbstractString` subtype.
If `part` argument is given, it returns the specified part of the sequence.
See also: [`identifier`](@ref), [`description`](@ref)
# Examples
```jldoctest
julia> record = parse(FASTQ.Record, "@read1\\nTAGA\\n+\\n;;]]");
julia> sequence(record)
"TAGA"
julia> sequence(LongDNA{2}, record)
4nt DNA Sequence:
TAGA
```
"""
function sequence end
"""
seqsize(::Record)::Int
Get the number of bytes in the sequence of a `Record`.
Note that in the presence of non-ASCII characters, this may differ from `length(sequence(record))`.
See also: [`sequence`](@ref)
# Examples
```jldoctest
julia> seqsize(parse(FASTA.Record, ">hdr\\nKRRLPW\\nYHS"))
9
julia> seqsize(parse(FASTA.Record, ">hdr\\nαβγδϵ"))
10
```
"""
function seqsize end
# line is nothing if the reader does not have line information after random IO access.
@noinline function throw_parser_error(
data::Vector{UInt8},
p::Integer,
line::Union{Integer, Nothing}
)
byte = data[p]
# These bytes are printable in the Julia REPL as single chars e.g. "\t"
bytestr = if byte in 0x07:0x13 || byte == 0x1b || byte in 0x20:0x7e
''' * Char(byte) * '''
else
repr(byte)
end
# These chars do not need escaping, e.g. '!', but not '\t'.
bytestr = in(byte, 0x20:0x7e) ? bytestr : escape_string(bytestr)
buf = IOBuffer()
print(
buf,
"Error when parsing FASTX file. Saw unexpected byte ",
bytestr
)
if line !== nothing
print(buf, " on line ", string(line))
# Compute column if possible, by looking at last '\n'.
# it may not be possible because it may be past the data buffer `data`.
lastnewline = findprev(isequal(UInt8('\n')), data, p)
if lastnewline !== nothing
col = p - lastnewline
print(buf, " col ", string(col))
end
end
error(String(take!(buf)))
end
# Truncate to at most `len` chars.
function truncate(s::AbstractString, len::Integer)
if length(s) > len
String(first(s, len-1) * '…')
else
return s
end
end
function memcmp(p1::Ptr, p2::Ptr, n::Integer)
return ccall(:memcmp, Cint, (Ptr{Cvoid}, Ptr{Cvoid}, Csize_t), p1, p2, n)
end
function appendfrom!(dst::Vector{UInt8}, dpos::Integer, src::Vector{UInt8}, spos::Integer, n::Integer)
if length(dst) < dpos + n - 1
resize!(dst, dpos + n - 1)
end
copyto!(dst, dpos, src, spos, n)
return dst
end
CONTEXT = Automa.CodeGenContext(
generator=:goto,
vars=Automa.Variables(;p=:p, p_end=:p_end, cs=:cs, data=:data, mem=:mem, byte=:byte)
)
include("fasta/fasta.jl")
include("fastq/fastq.jl")
const Record = Union{FASTA.Record, FASTQ.Record}
# Generic methods
function identifier(record::Record)::StringView
return StringView(view(record.data, 1:Int(record.identifier_len)))
end
function description(record::Record)::StringView
return StringView(view(record.data, 1:Int(record.description_len)))
end
import .FASTA: FASTA, validate_fasta, Index, faidx, index!, extract, validate_fasta, seekrecord
import .FASTQ: FASTQ, quality, quality_scores, quality_header!, QualityEncoding, validate_fastq
function FASTA.Record(record::FASTQ.Record)
slen = seqsize(record)
dlen = record.description_len
FASTA.Record(record.data[1:slen+dlen], record.identifier_len, dlen, slen)
end
"""
copy!(::FASTA.Record, ::FASTQ.Record)
Copy the content of the FASTQ record into the FASTA record.
"""
function Base.copy!(dst::FASTA.Record, src::FASTQ.Record)
dlen = src.description_len
slen = seqsize(src)
tlen = UInt(dlen + slen)
dstdata = dst.data
length(dstdata) < tlen && resize!(dstdata, tlen)
copyto!(dstdata, 1, src.data, 1, tlen)
dst.identifier_len = src.identifier_len
dst.description_len = dlen
dst.sequence_len = slen
dst
end
Base.parse(::Type{T}, s::AbstractString) where {T <: Record} = parse(T, String(s))
Base.parse(::Type{T}, s::Union{String, SubString{String}}) where {T <: Record} = parse(T, codeunits(s))
"Get the indices of `data` that correspond to sequence indices `part`"
function seq_data_part(record::Record, part::AbstractUnitRange)
start, stop = first(part), last(part)
(start < 1 || stop > seqsize(record)) && throw(BoundsError(record, start:stop))
Int(record.description_len) + start:Int(record.description_len) + stop
end
sequence(record::Record, part::UnitRange{Int}=1:seqsize(record)) = sequence(StringView, record, part)
function sequence(::Type{StringView}, record::Record, part::UnitRange{Int}=1:seqsize(record))
return StringView(view(record.data, seq_data_part(record, part)))
end
function sequence(
::Type{String},
record::Record,
part::UnitRange{Int}=1:seqsize(record)
)::String
return String(record.data[seq_data_part(record, part)])
end
const FASTARecord = FASTA.Record
const FASTQRecord = FASTQ.Record
const FASTAReader = FASTA.Reader
const FASTQReader = FASTQ.Reader
const FASTAWriter = FASTA.Writer
const FASTQWriter = FASTQ.Writer
if !isdefined(Base, :get_extension)
include("../ext/BioSequencesExt.jl")
end
include("workload.jl")
export
FASTA,
FASTQ,
FASTARecord,
FASTAReader,
FASTAWriter,
validate_fasta,
FASTQRecord,
FASTQReader,
FASTQWriter,
validate_fastq,
identifier,
description,
sequence,
seqsize,
quality,
quality_scores,
quality_header!,
QualityEncoding,
Index,
faidx,
index!,
extract,
seekrecord
end # module
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 1218 | using PrecompileTools: @setup_workload, @compile_workload
@setup_workload begin
fasta_path = joinpath(dirname(@__DIR__), "test", "data", "test.fasta")
fastq_path = joinpath(dirname(@__DIR__), "test", "data", "test.fastq")
fasta = read(fasta_path, String)
fastq = read(fastq_path, String)
@compile_workload begin
records = (
parse(FASTA.Record, fasta),
parse(FASTQ.Record, fastq)
)
for record in records
identifier(record)
description(record)
seqsize(record)
sequence(record)
sequence(String, record)
sequence(String, record)
sequence(String, record)
end
# FASTQ specific
record::FASTQ.Record = last(records)
quality(record)
collect(quality_scores(record))
open(validate_fasta, fasta_path)
open(validate_fastq, fastq_path)
open(collect, FASTAReader, fasta_path)
open(collect, FASTQReader, fastq_path)
ind = open(faidx, fasta_path)
rdr = FASTAReader(open(fasta_path); index=ind)
extract(rdr, "abc", 2:3)
seekrecord(rdr, "abc")
close(rdr)
end
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 618 | # FASTA File Format
# =================
"""
FASTA
Module under FASTX with code related to FASTA files.
"""
module FASTA
using Automa: Automa, @re_str, @mark, @markpos, @relpos, @abspos, onenter!, onexit!, onall!
import BioGenerics: BioGenerics
import StringViews: StringView
import TranscodingStreams: TranscodingStreams, TranscodingStream, NoopStream
import ..FASTX: identifier, description, sequence, seqsize, truncate, memcmp, appendfrom!, CONTEXT, throw_parser_error
const Re = Automa.RegExp
include("record.jl")
include("readrecord.jl")
include("index.jl")
include("reader.jl")
include("writer.jl")
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 11399 | # FASTA Index
# ===========
#
# Index for random access to FASTA files.
#
# This file is a part of BioJulia.
# License is MIT: https://github.com/BioJulia/BioSequences.jl/blob/master/LICENSE.md
"""
Index(src::Union{IO, AbstractString})
FASTA index object, which allows constant-time seeking of FASTA files by name.
The index is assumed to be in FAI format.
Notable methods:
* `Index(::Union{IO, AbstractString})`: Read FAI file from IO or file at path
* `write(::IO, ::Index)`: Write index in FAI format
* `faidx(::IO)::Index`: Index FASTA file
* `seekrecord(::Reader, ::AbstractString)`: Go to position of seq
* `extract(::Reader, ::AbstractString)`: Extract part of sequence
Note that the FAI specs are stricter than FASTX.jl's definition of FASTA,
such that some valid FASTA records may not be indexable.
See the specs at: http://www.htslib.org/doc/faidx.html
See also: [`FASTA.Reader`](@ref)
# Examples
```jldoctest
julia> src = IOBuffer("seqname\\t9\\t14\\t6\\t8\\nA\\t1\\t3\\t1\\t2");
julia> fna = IOBuffer(">A\\nG\\n>seqname\\nACGTAC\\r\\nTTG");
julia> rdr = FASTA.Reader(fna; index=src);
julia> seekrecord(rdr, "seqname");
julia> sequence(String, first(rdr))
"ACGTACTTG"
```
"""
struct Index
# Vector index for the record's sequence by header: See above specification
names::Dict{String, Int}
lengths::Vector{Int}
offsets::Vector{Int}
# Upper bit is linewidth - linebases - 1, whose only valid values
# are 0 or 1.
encoded_linebases::Vector{UInt}
# According to specs, the index need not be ordered by the offset in the FASTA
# file. However, we make sure the Index object is, because it make seeking easier.
function Index(
names::Dict{String, Int},
lengths::Vector{Int},
offsets::Vector{Int},
encoded_linebases::Vector{UInt}
)
issorted(offsets) && return new(names, lengths, offsets, encoded_linebases)
perm = sortperm(offsets)
new(
Dict(name => perm[i] for (name, i) in names),
lengths[perm],
offsets[perm],
encoded_linebases[perm]
)
end
end
function linebases_width(index::Index, i::Integer)
enc = index.encoded_linebases[i]
linebases = (enc % Int) & typemax(Int)
linewidth = linebases + 1 + (enc ≥ (typemin(Int) % UInt))
(linebases, linewidth)
end
function Base.show(io::IO, index::Index)
print(io, summary(index) * ":\n")
nrows = min(10, length(index.names))
elided = nrows < length(index.names)
names = Vector{String}(undef, nrows)
found = 0
for (name, i) in index.names
if i ≤ nrows
names[i] = name
found += 1
end
found == nrows && break
end
for i in 1:nrows
print(io, " ")
writeline(io, index, i, names)
# Do not write trailing newline
if elided || i < nrows
write(io, UInt8('\n'))
end
end
elided && print(io, '⋮')
end
index_machine = let
newline = let
lf = onenter!(re"\n", :countline)
Re.opt('\r') * lf
end
# The specs refer to the SAM specs, which contain this regex
name = onexit!(onenter!(re"[0-9A-Za-z!#$%&+./:;?@^_|~-][0-9A-Za-z!#$%&*+./:;=?@^_|~-]*", :mark), :name)
number = onexit!(onall!(re"[0-9]+", :digit), :number)
line = name * re"\t" * number * re"\t" * number * re"\t" * number * re"\t" * number
fai = Re.opt(line * Re.rep(newline * line)) * Re.rep(newline)
Automa.compile(fai)
end
index_actions = Dict{Symbol, Expr}(
:mark => :(start = p),
:countline => :(linenum += 1),
:name => quote
let n = p - start
name = unsafe_string(pointer(data, start), n)
names[name] = linenum
end
end,
:digit => quote
num2 = 10*num + (byte - 0x30)
if num2 < num
error("Integer overflow on line " * string(linenum))
end
num = num2
end,
:number => quote
nnum = mod1(nnum + 1, 4)
if nnum == 1
push!(vectors[1], num)
elseif nnum == 2
num < 2 && error("First offset must be at least 2 in a valid FAI index")
push!(vectors[2], num)
# Number of basepairs per line obviously cannot exceed the sequence length.
elseif nnum == 3
if num > vectors[1][end]
error("Bases per line exceed sequence length on line ", string(linenum))
end
linebases = num
# Linewidth is linebases plus the length of the line terminator.
# Since we only accept \n and \r\n as line terminator, validate
# linewidth is linebases +1 or +2.
elseif nnum == 4
if num ∉ (linebases+1, linebases+2)
error("Linewidth must be equal to linebases +1 or +2 at line ", string(linenum))
end
# Encode linebases
encoded_linebases = (linebases % UInt)
encoded_linebases |= ifelse(num == linebases+1, UInt(0), typemin(Int) % UInt)
push!(vectors[3], encoded_linebases)
end
num = 0
end,
)
@noinline function throw_index_error(data::Vector{UInt8}, linenum::Integer, p::Integer)
p_newline = findprev(isequal(UInt8('\n')), data, p)
offset = p_newline === nothing ? 0 : Int(p_newline)
col = p - offset
error("Error when parsing FAI file: Unexpected byte at index $p (line $linenum col $col)")
end
@eval function read_faidx(data::Vector{UInt8})
start = 0
linenum = 1
names = Dict{String, Int}()
num = num2 = 0
nnum = 0
linebases = 0
linebases_num = 0
vectors = (Int[], Int[], UInt[])
$(Automa.generate_code(CONTEXT, index_machine, index_actions))
return Index(names, vectors...)
end
Index(io::IO) = read_faidx(read(io))
Index(filepath::AbstractString) = open(i -> Index(i), filepath)
function writeline(io::IO, index::Index, line::Integer, names::Vector{String})
(linebases, linewidth) = linebases_width(index, line)
write(io,
names[line], UInt8('\t'),
string(index.lengths[line]), UInt8('\t'),
string(index.offsets[line]), UInt8('\t'),
string(linebases), UInt8('\t'),
string(linewidth),
)
end
function Base.write(io::IO, index::Index)
# Put names dict in a sorted array
names = Vector{String}(undef, length(index.names))
for (name, i) in index.names
names[i] = name
end
n = 0
for i in eachindex(names)
n += writeline(io, index, i, names)
n += write(io, UInt8('\n'))
end
n
end
index_fasta_actions = Dict(
:mark => :(@mark),
:countline => :(linenum += 1),
:identifier => quote
identifier = unsafe_string(pointer(data, @markpos), p - @markpos)
end,
# Not used in fai files, the newline byte is consistent within one record
# and since this is the first newline in a record, we set it here
:description => quote
uses_rn_newline = byte == UInt8('\r')
no_more_seqlines = false
# +1 for > symbol, +1 for newline, +1 if \r is used
offset += p - @markpos() + uses_rn_newline + 2
last_offset = offset
end,
:seqline => quote
# Validate line terminator is same, i.e. no seq have have both \r\n and \n
if p < p_end && (uses_rn_newline ⊻ (byte == UInt8('\r')))
error("Line endings must be same within one record to index, but is not on line ", string(linenum))
end
# Validate sequence length is the same for all lines
let current_seqwidth = p - @markpos
# If on first line, seqwidth is -1, we set it correctly
if seqwidth == -1
seqwidth = current_seqwidth
# If we are not supposed to see more lines, or the next line
# is longer than expected, error
elseif no_more_seqlines || current_seqwidth > seqwidth
error("Sequence line width must be consistent to index, but is not on line ", string(linenum))
# If we see a shorter line, then it must be the last line
elseif current_seqwidth < seqwidth
no_more_seqlines = true
end
offset += current_seqwidth + 1 + uses_rn_newline
seqlen += current_seqwidth
end
end,
:record => quote
record_count += 1
names[identifier] = record_count
push!(lengths, seqlen)
push!(offsets, last_offset)
enc_linebases = (seqwidth % UInt)
enc_linebases |= ifelse(uses_rn_newline, typemin(Int) % UInt, UInt(0))
push!(encoded_linebases, enc_linebases)
seqwidth = -1
seqlen = 0
end
)
initcode = quote
names = Dict{String, Int}()
lengths = Int[]
offsets = Int[]
encoded_linebases = UInt[]
offset = 0
last_offset = 0
seqwidth = -1
seqlen = 0
linenum = 1
uses_rn_newline = false
# Marks whether the current seqline must be the last in the record
# (which is the case if its shorter than the previous)
no_more_seqlines = false
record_count = 0
end
returncode = quote
if cs < 0
throw_parser_error(data, p, linenum)
end
return Index(names, lengths, offsets, encoded_linebases)
end
Automa.generate_reader(
:faidx_,
machine,
actions = index_fasta_actions,
context = CONTEXT,
initcode = initcode,
returncode = returncode
) |> eval
# Set the reading position of `input` to the starting position of the record `name`.
function seekrecord(io::IO, index::Index, name::AbstractString)
seekrecord(io, index, index.names[name])
end
# We seek to previous sequence to find the > start of next sequence
function seekrecord(io::IO, index::Index, i::Integer)
i == 1 && return seekstart(io)
linebases, linewidth = linebases_width(index, i-1)
len = index.lengths[i-1]
prev_offset = index.offsets[i-1]
nlines = cld(len, linebases)
offset = prev_offset + len + nlines * (linewidth - linebases)
seek(io, offset)
return nothing
end
# Note: Current implementation relies on the IO being a NoopStream exactly,
# no other transcoding stream will do.
# This is because an indexer needs to find the absolute position of the mark
# in the stream, and this is, as far as I can tell, not supported in Automa.
# As a hacky workaround, I reach into the internals of NoopStream in the
# action dict code.
"""
faidx(io::IO)::Index
Read a `FASTA.Index` from `io`.
See also: [`Index`](@ref)
# Examples
```jldoctest
julia> ind = faidx(IOBuffer(">ab\\nTA\\nT\\n>x y\\nGAG\\nGA"))
Index:
ab 3 4 2 3
x 5 14 3 4
```
"""
faidx(x::IO) = faidx_(NoopStream(x))
faidx(x::NoopStream) = faidx_(x)
# High-level interface - not sure on this yet!
"""
faidx(fnapath::AbstractString, [idxpath::AbstractString], check=true)
Index FASTA path at `fnapath` and write index to `idxpath`.
If `idxpath` is not given, default to same name as `fnapath * ".fai"`.
If `check`, throw an error if the output file already exists
See also: [`Index`](@ref)
"""
function faidx(fnapath::AbstractString, faidxpath::AbstractString; check::Bool=true)
check && ispath(faidxpath) && error("Output path $faidxpath already exsists")
index = open(faidx, fnapath)
open(i -> write(i, index), faidxpath, "w")
index
end
faidx(path::AbstractString; check::Bool=true) = faidx(path, path * ".fai"; check=check)
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 7639 | # FASTA Reader
# ============
"""
FASTA.Reader(input::IO; index=nothing, copy::Bool=true)
Create a buffered data reader of the FASTA file format.
The reader is a `BioGenerics.IO.AbstractReader`, a stateful iterator of `FASTA.Record`.
Readers take ownership of the underlying IO. Mutating or closing the underlying IO
not using the reader is undefined behaviour.
Closing the Reader also closes the underlying IO.
See more examples in the FASTX documentation.
See also: [`FASTA.Record`](@ref), [`FASTA.Writer`](@ref)
# Arguments
* `input`: data source
* `index`: Optional random access index (currently *fai* is supported).
`index` can be `nothing`, a `FASTA.Index`, or an `IO` in which case an index will
be parsed from the IO, or `AbstractString`, in which case it will be treated as a path
to a fai file.
* `copy::Bool`: iterating returns fresh copies instead of the same Record. Set to `false`
for improved performance, but be wary that iterating mutates records.
# Examples
```jldoctest
julia> rdr = FASTAReader(IOBuffer(">header\\nTAG\\n>another\\nAGA"));
julia> records = collect(rdr); close(rdr);
julia> foreach(println, map(identifier, records))
header
another
julia> foreach(println, map(sequence, records))
TAG
AGA
```
"""
mutable struct Reader{S <: TranscodingStream} <: BioGenerics.IO.AbstractReader
stream::S
automa_state::Int
# set to typemin(Int) if reader uses seek, then the linenum is
# irreversibly lost.
encoded_linenum::Int
index::Union{Index, Nothing}
record::Record
copy::Bool
function Reader{T}(io::T, index::Union{Index, Nothing}, copy::Bool) where {T <: TranscodingStream}
record = Record(Vector{UInt8}(undef, 2048), 0, 0, 0)
new{T}(io, 1, 1, index, record, copy)
end
end
function Reader(io::TranscodingStream; index::Union{Index, Nothing, IO, AbstractString}=nothing, copy::Bool=true)
index!(Reader{typeof(io)}(io, nothing, copy), index)
end
Reader(io::IO; kwargs...) = Reader(NoopStream(io); kwargs...)
"""
index!(r::FASTA.Reader, ind::Union{Nothing, Index, IO, AbstractString})
Set the index of `r`, and return `r`.
If `ind` isa `Union{Nothing, Index}`, directly set the index to `ind`.
If `ind` isa `IO`, parse the index from the FAI-formatted IO first.
If `ind` isa `AbstractString`, treat it as the path to a FAI file to parse.
See also: [`Index`](@ref), [`FASTA.Reader`](@ref)
"""
function index! end
index!(reader::Reader, index::Union{Index, Nothing}) = (reader.index = index; reader)
index!(reader::Reader, index::Union{IO, AbstractString}) = (reader.index = Index(index); reader)
function Base.iterate(rdr::Reader, state=nothing)
(cs, f) = _read!(rdr, rdr.record)
if !f
iszero(cs) && return nothing
# Make sure reader's record in not invalid
empty!(rdr.record)
error("Unexpected end of file when reading FASTA record")
end
return if rdr.copy
(copy(rdr.record), nothing)
else
(rdr.record, nothing)
end
end
function Base.read!(rdr::Reader, rec::Record)
(cs, f) = _read!(rdr, rec)
if !f
cs == 0 && throw(EOFError())
throw(ArgumentError("malformed FASTA file"))
end
return rec
end
function _read!(rdr::Reader, rec::Record)
enc_linenum = rdr.encoded_linenum
cs, ln, found = readrecord!(rdr.stream, rec, (rdr.automa_state, enc_linenum))
rdr.automa_state = cs
# If enc_linenum is < 0, then it was unknown when entering readrecord!,
# and so ln is meaningless.
enc_linenum > 0 && (rdr.encoded_linenum = ln)
return (cs, found)
end
function Base.eltype(::Type{<:Reader})
return Record
end
function BioGenerics.IO.stream(reader::Reader)
return reader.stream
end
Base.close(reader::Reader) = close(reader.stream)
function Base.getindex(reader::Reader, name::AbstractString)
seekrecord(reader, name)
record = Record()
cs, _, found = readrecord!(NoopStream(reader.stream), record, (1, 1))
@assert cs ≥ 0 && found
return record
end
"""
seekrecord(reader::FASTAReader, i::Union{AbstractString, Integer})
Seek `Reader` to the `i`'th record. The next iterated record with be the `i`'th record.
`i` can be the identifier of a sequence, or the 1-based record number in the `Index`.
The `Reader` needs to be indexed for this to work.
"""
function seekrecord end
function seekrecord(reader::Reader, name::AbstractString)
index = reader.index
if index === nothing
throw(ArgumentError("no index attached"))
end
seekrecord(reader, index.names[name])
end
function seekrecord(reader::Reader, i::Integer)
index = reader.index
index === nothing && error("no index attached")
seekrecord(reader.stream, index, i)
reader.automa_state = 1
# Make linenum unrecoverably lost
reader.encoded_linenum = typemin(Int)
nothing
end
"""
extract(reader::Reader, name::AbstractString, range::Union{Nothing, UnitRange})
Extract a subsequence given by index `range` from the sequence `named` in a
`Reader` with an index. Returns a `String`.
If `range` is nothing (the default value), return the entire sequence.
"""
function extract(
reader::Reader,
name::AbstractString,
range::Union{Nothing, AbstractUnitRange{<:Integer}}=nothing
)
# Validate it has index, and index has sequence, and range
# is inbound
index = reader.index
if index === nothing
throw(ArgumentError("no index attached"))
end
index_of_name = index.names[name]
len = index.lengths[index_of_name]
checked_range = if range !== nothing
checkbounds(1:len, range)
isempty(range) && return ""
range
else
1:len
end
total_bases = length(checked_range)
# Load all required bytes into a buffer, including newlines
(linebases, linewidth) = linebases_width(index, index_of_name)
len_newline = linewidth - linebases
(start_lineind_z, start_lineoff_z) = divrem(first(checked_range) - 1, linebases)
start_offset = start_lineind_z * linewidth + start_lineoff_z
(stop_lineind_z, stop_lineoff_z) = divrem(last(checked_range), linebases)
stop_offset = stop_lineind_z * linewidth + stop_lineoff_z
until_first_newline = linebases - start_lineoff_z
buffer = Vector{UInt8}(undef, stop_offset - start_offset)
start_file_offset = index.offsets[index_of_name] + start_offset
seek(reader.stream, start_file_offset)
read!(reader.stream, buffer)
# Now remove newlines in buffer by shifting the non-newline content
remaining = total_bases - until_first_newline
write_index = until_first_newline + 1
read_index = write_index + len_newline
while remaining > 0
n = min(linebases, remaining)
copyto!(buffer, write_index, buffer, read_index, n)
write_index += n
read_index += n + len_newline
remaining -= n
end
# After having removed newlines, we shrink buffer to fit
resize!(buffer, total_bases)
# Now check that there are no bad bytes in our buffer
# Note: The disallowed bytes must correspond to the allowed bytes in
# the FASTA machine to ensure we can seek the same FASTA files we can read
bad_byte = false
for byte in buffer
bad_byte |= (
(byte === UInt8('\r')) |
(byte === UInt8('\n')) |
(byte === UInt8('>'))
)
bad_byte && error("Invalid byte in FASTA sequence line: '>', '\\r' or '\\n'")
end
# Return the Reader to a usable state after having messed with its
# underlying IO, then return result
seekrecord(reader, index_of_name)
return String(buffer)
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 4398 | # Automa.jl generated readrecord! function
# ========================================
# The current implementation of the machine has the following debatable choices
# * You can put anything except \r and \n in the description, including trailing
# whitespace.
# * You can put anything in the sequence lines except \n, \r and, and cannot begin with >.
# The sequence must include at least one newline, i.e. ">A>A" is not valid FASTA,
# but ">A\n>A\n" is. The newlines are not considered part of the sequence lines themselves.
# This implies all whitespace except newlines, including trailing whitespace, is part
# of the sequence.
machine = let
hspace = re"[ \t\v]"
newline = let
lf = onenter!(re"\n", :countline)
Re.opt('\r') * lf
end
space = hspace | newline
# Identifier: Leading non-space
identifier = onexit!(onenter!(Re.rep(Re.any() \ Re.space()), :mark), :identifier)
# Description here include trailing whitespace.
# This is needed for the FSM, since the description can contain arbitrary
# whitespace, the only way to know the description ends is to encounter a newline.
# NB: Make sure to also change the Index machine to match this is you change it.
description = onexit!(identifier * Re.opt(hspace * re"[^\r\n]*"), :description)
# Header: '>' then description
header = re">" * description
# Sequence line: Anything except \r, \n and >
# Note: Must be consistent with the disallowed bytes in reader.jl used for seeking
sequence_line = onexit!(onenter!(re"[^\n\r>]+", :mark), :seqline)
# Sequence: This is intentionally very liberal with whitespace.
# Any trailing whitespace is simply considered part of the sequence.
# Is this bad? Maybe.
sequence = Re.rep1(Re.opt(sequence_line) * Re.rep1(newline))
# We have sequence_eof to allow the final sequence to not end in whitespace
sequence_eof = Re.opt(sequence_line) * Re.rep(Re.rep1(newline) * Re.opt(sequence_line))
record = onexit!(header * newline * sequence, :record)
record_eof = onexit!(header * newline * sequence_eof, :record)
fasta = Re.rep(space) * Re.rep(record) * Re.opt(record_eof)
Automa.compile(fasta)
end
actions = Dict(
:mark => :(@mark),
:countline => :(linenum += 1),
:identifier => :(record.identifier_len = Int32(@relpos(p-1))),
# Append entire header line to buffer from pos 1
:description => quote
let n = @relpos(p-1)
appendfrom!(record.data, 1, data, @markpos, n)
filled += n
record.description_len = Int32(n)
end
end,
# Append sequence line to buffer
:seqline => quote
let n = @relpos(p-1)
appendfrom!(record.data, filled + 1, data, @markpos, n)
filled += n
record.sequence_len += n
end
end,
:record => quote
found = true
@escape
end
)
initcode = quote
pos = 0
filled = 0
found = false
empty!(record)
cs, linenum = state
end
loopcode = quote
if cs < 0
throw_parser_error(data, p, linenum < 0 ? nothing : linenum)
end
found && @goto __return__
end
returncode = :(return cs, linenum, found)
Automa.generate_reader(
:readrecord!,
machine,
arguments = (:(record::Record), :(state::Tuple{Int,Int})),
actions = actions,
context = CONTEXT,
initcode = initcode,
loopcode = loopcode,
returncode = returncode
) |> eval
validator_actions = Dict(k => quote nothing end for k in keys(actions))
validator_actions[:countline] = :(linenum += 1)
Automa.generate_reader(
:validate_fasta,
machine,
arguments = (),
actions= validator_actions,
context = CONTEXT,
initcode = :(linenum = 1),
loopcode = :(cs < 0 && return linenum),
returncode = :(iszero(cs) ? nothing : linenum)
) |> eval
# Currently returns linenumber if it is not, but we might remove
# this from the readers, since this state cannot be kept when seeking.
"""
validate_fasta(io::IO) >: Nothing
Check if `io` is a valid FASTA file.
Return `nothing` if it is, and an instance of another type if not.
# Examples
```jldoctest
julia> validate_fasta(IOBuffer(">a bc\\nTAG\\nTA")) === nothing
true
julia> validate_fasta(IOBuffer(">a bc\\nT>G\\nTA")) === nothing
false
```
"""
validate_fasta(io::IO) = validate_fasta(NoopStream(io)) | FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 5238 | # FASTA Record
# ============
"""
FASTA.Record
Mutable struct representing a FASTA record as parsed from a FASTA file.
The content of the record can be queried with the following functions:
`identifier`, `description`, `sequence`.
FASTA records are un-typed, i.e. they are agnostic to what kind of data they contain.
See also: [`FASTA.Reader`](@ref), [`FASTA.Writer`](@ref)
# Examples
```jldoctest
julia> rec = parse(FASTARecord, ">some header\\nTAqA\\nCC");
julia> identifier(rec)
"some"
julia> description(rec)
"some header"
julia> sequence(rec)
"TAqACC"
julia> typeof(description(rec)) == typeof(sequence(rec)) <: AbstractString
true
```
"""
mutable struct Record
# Data contains the description, then the sequence immediately after
# without newlines, or the initial > symbol, and then any unused trailing bytes
data::Vector{UInt8}
# These lengths are in bytes, not chars
# Identifier is data[1:identifier_len]
identifier_len::Int32
# Description is data[1:description_len], i.e. it includes the identifier
description_len::Int32
# Sequence is data[description_len+1 : description_len+sequence_len]
sequence_len::Int
end
filled(x::Record) = Int(x.description_len) + Int(x.sequence_len)
@inline seqsize(record::Record)::Int = record.sequence_len
"""
FASTA.Record()
Create the default FASTA record.
"""
function Record()
return Record(Vector{UInt8}(), 0, 0, 0)
end
function Base.empty!(record::Record)
# Do not truncate the underlying data buffer
record.identifier_len = 0
record.description_len = 0
record.sequence_len = 0
return record
end
function Base.parse(::Type{Record}, data::AbstractVector{UInt8})
# Error early on empty data to not construct buffers
isempty(data) && throw(ArgumentError("Cannot parse empty string as FASTA record"))
record = Record(Vector{UInt8}(undef, sizeof(data)), 0, 0, 0)
stream = NoopStream(IOBuffer(data), bufsize=sizeof(data))
cs, _, found = readrecord!(stream, record, (1, 1))
# If found is not set, then the data terminated early
found || throw(ArgumentError("Incomplete FASTA record"))
# In this case, the machine ran out of data exactly after one record
p = stream.state.buffer1.bufferpos
p > sizeof(data) && iszero(cs) && return record
# Else, we check all trailing data to see it contains only \r\n
for i in p-1:sizeof(data)
if !in(data[i], (UInt8('\r'), UInt8('\n')))
throw(ArgumentError("Invalid trailing data after FASTA record"))
end
end
return record
end
"""
FASTA.Record(description::AbstractString, sequence)
Create a FASTA record object from `description` and `sequence`.
"""
function Record(description::AbstractString, sequence::AbstractString)
buf = IOBuffer()
print(buf, '>', description, '\n')
# If the sequence is empty, we need to print a newline in order to not
# have the FASTA file truncated, thus invalid
print(buf, isempty(sequence) ? '\n' : sequence)
return parse(Record, take!(buf))
end
function Base.:(==)(record1::Record, record2::Record)
record1.description_len == record2.description_len || return false
filled1 = filled(record1)
filled1 == filled(record2) || return false
(data1, data2) = (record1.data, record2.data)
GC.@preserve data1 data2 begin
return memcmp(pointer(data1), pointer(data2), filled1) == 0
end
end
function Base.copy(record::Record)
return Record(
record.data[1:filled(record)],
record.identifier_len,
record.description_len,
record.sequence_len
)
end
function Base.copy!(dst::Record, src::Record)
n = filled(src)
length(dst.data) < n && resize!(dst.data, n)
unsafe_copyto!(dst.data, 1, src.data, 1, n)
dst.identifier_len = src.identifier_len
dst.description_len = src.description_len
dst.sequence_len = src.sequence_len
dst
end
function Base.write(io::IO, record::Record)
data = record.data
write(io, UInt8('>'))
GC.@preserve data begin
unsafe_write(io, pointer(data), UInt(record.description_len))
write(io, UInt8('\n'))
unsafe_write(io, pointer(data) + UInt(record.description_len), UInt(record.sequence_len))
end
return filled(record) + 2 # number of bytes
end
function Base.print(io::IO, record::Record)
write(io, record)
return nothing
end
function Base.show(io::IO, record::Record)
print(io,
summary(record), '(', repr(description(record)), ", \"", truncate(sequence(record), 40), "\")"
)
end
function Base.show(io::IO, ::MIME"text/plain", record::Record)
print(io, summary(record), ':')
println(io)
println(io, " description: \"", description(record), '"')
print(io, " sequence: \"", truncate(sequence(record), 40), '"')
end
# TODO: Base's hash does not hash all elements. Do we have a better implementation?
function Base.hash(record::Record, h::UInt)
# The description length is informative of the record's content
# in a way that the sequence length and identifier length isn't.
# I.e. you could have ">A\nAG" vs ">AA\nG"
h = hash(record.description_len, h)
hash(view(record.data, filled(record)), h)
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 2528 | # FASTA Writer
# ============
"""
FASTA.Writer(output::IO; width=70)
Create a data writer of the FASTA file format.
The writer is a `BioGenerics.IO.AbstractWriter`.
Writers take ownership of the underlying IO. Mutating or closing the underlying IO
not using the writer is undefined behaviour.
Closing the writer also closes the underlying IO.
See more examples in the FASTX documentation.
See also: [`FASTA.Record`](@ref), [`FASTA.Reader`](@ref)
# Arguments
* `output`: Data sink to write to
* `width`: Wrapping width of sequence characters. If < 1, no wrapping.
# Examples
```
julia> FASTA.Writer(open("some_file.fna", "w")) do writer
write(writer, record) # a FASTA.Record
end
```
"""
mutable struct Writer{S <: TranscodingStream} <: BioGenerics.IO.AbstractWriter
output::S
# maximum sequence width (no limit when width ≤ 0)
width::Int
function Writer{S}(output::S, width::Int) where {S <: TranscodingStream}
finalizer(new{S}(output, width)) do writer
@async close(writer.output)
end
end
end
function BioGenerics.IO.stream(writer::Writer)
return writer.output
end
Writer(io::T; width::Integer=70) where {T <: TranscodingStream} = Writer{T}(io, width)
Writer(io::IO; kwargs...) = Writer(NoopStream(io); kwargs...)
function Base.flush(writer::Writer)
# This is, bizarrely needed for TranscodingStreams for now.
write(writer.output, TranscodingStreams.TOKEN_END)
flush(writer.output)
end
function Base.write(writer::Writer, record::Record)
output = writer.output
width = writer.width
n::Int = 0
# If write has no width, we can just write the record in a single line
# as the default method does
if width ≤ 0 || width ≥ record.sequence_len
n += write(output, record, '\n')
# Else we write it in chunks.
else
data = record.data
GC.@preserve data begin
# Write header
n += write(output, UInt8('>'))
n += unsafe_write(output, pointer(data), record.description_len)
n += write(output, UInt8('\n'))
# Write sequence in a loop of chunks of width bytes
p = pointer(data, record.description_len + 1)
p_end = pointer(data, record.description_len + record.sequence_len)
while p ≤ p_end
w = min(width, p_end - p + 1)
n += unsafe_write(output, p, w)
n += write(output, UInt8('\n'))
p += w
end
end
end
return n
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 571 | """
FASTA
Module under FASTX with code related to FASTA files.
"""
module FASTQ
using Automa: Automa, @re_str, @mark, @markpos, @relpos, @abspos, onenter!, onexit!
import BioGenerics: BioGenerics
import StringViews: StringView
import TranscodingStreams: TranscodingStreams, TranscodingStream, NoopStream
import ..FASTX: identifier, description, sequence, seqsize, truncate, memcmp, appendfrom!, CONTEXT, throw_parser_error
const Re = Automa.RegExp
include("quality.jl")
include("record.jl")
include("readrecord.jl")
include("reader.jl")
include("writer.jl")
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 2838 | # FASTQ Base Quality
# ==================
#
# A representation of positions-specific integer quality scores, as in FASTQ.
#
# This file is a part of BioJulia.
# License is MIT: https://github.com/BioJulia/BioSequences.jl/blob/master/LICENSE.md
"""
QualityEncoding(range::StepRange{Char}, offset::Integer)
FASTQ quality encoding scheme. `QualityEncoding` objects are used to
interpret the quality scores of FASTQ records.
`range` is a range of allowed ASCII chars in the encoding, e.g. `'!':'~'` for
the most common encoding scheme.
The offset is the ASCII offset, i.e. a character with ASCII value `x` encodes
the value `x - offset`.
See also: [`quality_scores`](@ref)
# Examples
```jldoctest
julia> read = parse(FASTQ.Record, "@hdr\\nAGA\\n+\\nabc");
julia> qe = QualityEncoding('a':'z', 16); # hypothetical encoding
julia> collect(quality_scores(read, qe)) == [Int8(i) - 16 for i in "abc"]
true
```
"""
struct QualityEncoding
# Lowest/highest acceptable ASCII byte
low::Int8
high::Int8
# ASCII byte offset, i.e. 33 for standard PHRED scores
offset::Int8
function QualityEncoding(ascii::StepRange{Char}, offset::Integer)
isone(step(ascii)) || error("Must use an ordinal Char range with step 1")
off = Int8(offset)
(low, high) = (Int8(first(ascii)), Int8(last(ascii)))
if low > high
error("Quality encoding range must be nonempty")
elseif high > 127
error("Quality encoding only works with ASCII charsets")
elseif offset < 0
error("Quality offset must be non-negative")
else
return new(low, high, off)
end
end
end
"Sanger (Phred+33) quality score encoding"
const SANGER_QUAL_ENCODING = QualityEncoding('!':'~', 33)
"Solexa (Solexa+64) quality score encoding"
const SOLEXA_QUAL_ENCODING = QualityEncoding(';':'~', 64)
"Illumina 1.3 (Phred+64) quality score encoding"
const ILLUMINA13_QUAL_ENCODING = QualityEncoding('@':'~', 64)
"Illumina 1.5 (Phred+64) quality score encoding"
const ILLUMINA15_QUAL_ENCODING = QualityEncoding('B':'~', 64)
"Illumina 1.8 (Phred+33) quality score encoding"
const ILLUMINA18_QUAL_ENCODING = QualityEncoding('!':'~', 33)
const DEFAULT_ENCODING = SANGER_QUAL_ENCODING
@noinline function throw_decoding_error(encoding::QualityEncoding, ascii::Integer)
error("Quality $ascii not in encoding range $(encoding.low):$(encoding.high)")
end
@inline function decode_quality(encoding::QualityEncoding, quality::Integer)
check_quality(encoding, quality) || throw_decoding_error(encoding, quality)
# We just checked it's in 0:127, so can use unsafe truncation
(quality % Int8) - encoding.offset
end
@inline function check_quality(encoding::QualityEncoding, quality::Integer)
(quality ≥ encoding.low) & (quality ≤ encoding.high)
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 2484 | # FASTQ Reader
# ============
"""
FASTQ.Reader(input::IO; copy::Bool=true)
Create a buffered data reader of the FASTQ file format.
The reader is a `BioGenerics.IO.AbstractReader`, a stateful iterator of `FASTQ.Record`.
Readers take ownership of the underlying IO. Mutating or closing the underlying IO
not using the reader is undefined behaviour.
Closing the Reader also closes the underlying IO.
See more examples in the FASTX documentation.
See also: [`FASTQ.Record`](@ref), [`FASTQ.Writer`](@ref)
# Arguments
* `input`: data source
* `copy::Bool`: iterating returns fresh copies instead of the same Record. Set to `false`
for improved performance, but be wary that iterating mutates records.
# Examples
```jldoctest
julia> rdr = FASTQReader(IOBuffer("@readname\\nGGCC\\n+\\njk;]"));
julia> record = first(rdr); close(rdr);
julia> identifier(record)
"readname"
julia> sequence(record)
"GGCC"
julia> show(collect(quality_scores(record))) # phred 33 encoding by default
Int8[73, 74, 26, 60]
```
"""
mutable struct Reader{S <: TranscodingStream} <: BioGenerics.IO.AbstractReader
stream::S
automa_state::Int
linenum::Int
record::Record
copy::Bool
function Reader{T}(io::T, copy::Bool) where {T <: TranscodingStream}
record = Record(Vector{UInt8}(undef, 2048), 0, 0, 0)
new{T}(io, 1, 1, record, copy)
end
end
Reader(io::TranscodingStream; copy::Bool=true) = Reader{typeof(io)}(io, copy)
Reader(io::IO; kwargs...) = Reader(NoopStream(io); kwargs...)
function Base.iterate(rdr::Reader, state=nothing)
(cs, f) = _read!(rdr, rdr.record)
if !f
iszero(cs) && return nothing
# Make sure reader's record in not invalid
empty!(rdr.record)
error("Unexpected end of file when reading FASTA record")
end
return if rdr.copy
(copy(rdr.record), nothing)
else
(rdr.record, nothing)
end
end
function Base.read!(rdr::Reader, rec::Record)
(cs, f) = _read!(rdr, rec)
if !f
cs == 0 && throw(EOFError())
throw(ArgumentError("malformed FASTQ file"))
end
return rec
end
function _read!(rdr::Reader, rec::Record)
(cs, ln, found) = readrecord!(rdr.stream, rec, (rdr.automa_state, rdr.linenum))
rdr.automa_state = cs
rdr.linenum = ln
return (cs, found)
end
function Base.eltype(::Type{<:Reader})
return Record
end
BioGenerics.IO.stream(reader::Reader) = reader.stream
Base.close(reader::Reader) = close(reader.stream)
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 5813 | machine = let
hspace = re"[ \t\v]"
header1 = let
identifier = onexit!(onenter!(Re.rep(Re.any() \ Re.space()), :mark), :header1_identifier)
# Description here means "after whitespace", not whole line
description = (Re.any() \ Re.space()) * re"[^\r\n]*"
'@' * identifier * Re.opt(Re.rep1(hspace) * Re.opt(description))
end
onexit!(header1, :header1_description)
sequence = onexit!(onenter!(re"[A-z]*", :mark), :sequence)
# The pattern recognized by header2 should be identical to header1
# with the only difference being that h1 is split into identifier
# and description
header2 = let
description2 = onexit!(onenter!(re"[^\r\n]+", :mark), :header2_description)
'+' * Re.opt(description2)
end
quality = onexit!(onenter!(re"[!-~]*", :mark), :quality)
newline = let
lf = onenter!(re"\n", :countline)
Re.opt('\r') * lf
end
record = onexit!(onenter!(header1 * newline * sequence * newline * header2 * newline * quality, :mark), :record)
fastq = Re.opt(record) * Re.rep(newline * record) * Re.opt(newline)
Automa.compile(fastq)
end
actions = Dict(
:mark => :(@mark),
:countline => :(linenum += 1),
# Since the identifier is contained in the description, we just need
# to store the length of the identifier. The bytes are copied in the description action
:header1_identifier => :(record.identifier_len = Int32(@relpos(p-1))),
# Copy description bytes and keep track of how many bytes copied
:header1_description => quote
let n = @relpos(p-1)
appendfrom!(record.data, 1, data, @markpos, n)
filled += n
record.description_len = Int32(n)
end
end,
# Copy sequence bytes and keep track of how many bytes copied
:sequence => quote
let n = @relpos(p-1)
appendfrom!(record.data, filled + 1, data, @markpos, n)
filled += n
record.has_description_seq_len = UInt(n)
end
end,
# Verify the second description is identical to the first,
# and set the top bit in record.has_description_seq_len
:header2_description => quote
let n = @relpos(p-1)
recdata = record.data
# This might look horribly unsafe, and it is.
# But Automa should guarantee that data is under GC preservation, and that the
# pointer p-n is valid. SHOULD, at least.
GC.@preserve recdata begin
if n != record.description_len || !iszero(memcmp(pointer(recdata), pointer(data, p-n), n%UInt))
error("First and second description line not identical")
end
end
record.has_description_seq_len |= (0x80_00_00_00_00_00_00_00 % UInt)
end
end,
# Verify the length of quality and sequence is identical, then copy bytes over
:quality => quote
let n = @relpos(p-1)
n == seqsize(record) || error("Length of quality must be identical to length of sequence")
appendfrom!(record.data, filled + 1, data, @markpos, n)
end
end,
# Break from the loop
:record => quote
found = true
@escape
end,
)
initcode = quote
pos = 0
found = false
filled = 0
empty!(record)
cs, linenum = state
end
loopcode = quote
if cs < 0
throw_parser_error(data, p, linenum)
end
found && @goto __return__
end
returncode = :(return cs, linenum, found)
Automa.generate_reader(
:readrecord!,
machine,
arguments = (:(record::Record), :(state::Tuple{Int,Int})),
actions = actions,
context = CONTEXT,
initcode = initcode,
loopcode = loopcode,
returncode = returncode
) |> eval
validator_actions = Dict(
:mark => :(@mark),
:countline => :(linenum += 1),
:header1_identifier => quote nothing end,
# Copy description to buffer to check if second description is same
:header1_description => quote
let n = @relpos(p-1)
appendfrom!(headerbuffer, 1, data, @markpos, n)
description_len = n
end
end,
# Copy sequence bytes and keep track of how many bytes copied
:sequence => :(sequence_len = @relpos(p-1)),
# Verify the second description is identical to the first,
:header2_description => quote
let n = @relpos(p-1)
n == description_len || return linenum
GC.@preserve headerbuffer begin
iszero(memcmp(pointer(headerbuffer), pointer(data, p-n), n%UInt)) || return linenum
end
end
end,
# Verify the length of quality and sequence is identical
:quality => quote
let n = @relpos(p-1)
n == sequence_len || return linenum
end
end,
:record => quote nothing end
)
initcode = quote
linenum = 1
description_len = 0
sequence_len = 0
headerbuffer = Vector{UInt8}(undef, 1024)
end
Automa.generate_reader(
:validate_fastq,
machine,
arguments = (),
actions= validator_actions,
context = CONTEXT,
initcode = initcode,
loopcode = :(cs < 0 && return linenum),
returncode = :(iszero(cs) ? nothing : linenum)
) |> eval
# Currently returns linenumber if it is not, but we might remove
# this from the readers, since this state cannot be kept when seeking.
"""
validate_fastq(io::IO) >: Nothing
Check if `io` is a valid FASTQ file.
Return `nothing` if it is, and an instance of another type if not.
# Examples
```jldoctest
julia> validate_fastq(IOBuffer("@i1 r1\\nuuag\\n+\\nHJKI")) === nothing
true
julia> validate_fastq(IOBuffer("@i1 r1\\nu;ag\\n+\\nHJKI")) === nothing
false
```
"""
validate_fastq(io::IO) = validate_fastq(NoopStream(io))
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 10054 | # FASTQ Record
# ============
"""
FASTQ.Record
Mutable struct representing a FASTQ record as parsed from a FASTQ file.
The content of the record can be queried with the following functions:
`identifier`, `description`, `sequence`, `quality`
FASTQ records are un-typed, i.e. they are agnostic to what kind of data they contain.
See also: [`FASTQ.Reader`](@ref), [`FASTQ.Writer`](@ref)
# Examples
```jldoctest
julia> rec = parse(FASTQRecord, "@ill r1\\nGGC\\n+\\njjk");
julia> identifier(rec)
"ill"
julia> description(rec)
"ill r1"
julia> sequence(rec)
"GGC"
julia> show(collect(quality_scores(rec)))
Int8[73, 73, 74]
julia> typeof(description(rec)) == typeof(sequence(rec)) <: AbstractString
true
```
"""
mutable struct Record
# Contains: description, then sequence, then quality, then any noncoding bytes.
# all rest, including newlines and the @ and + symbol, are not stored.
# The second description after + must be identical to first description.
# The quality is not corrected for offset, i.e. it is stored as it in the input file
data::Vector{UInt8}
# In bytes, not chars
identifier_len::Int32
description_len::Int32
# Top bit stores whether the description is repeated after the +
has_description_seq_len::UInt
end
@inline seqsize(record::Record)::Int = (record.has_description_seq_len & (typemax(Int) % UInt)) % Int
has_extra_description(record::Record) = record.has_description_seq_len ≥ (typemin(Int) % UInt)
# Number of stored bytes in data field
filled(record::Record) = record.description_len + 2 * seqsize(record)
"""
quality_header!(record::Record, x::Bool)
Set whether the record repeats its header on the quality comment line,
i.e. the line with `+`.
# Examples
```
julia> record = parse(FASTQ.Record, "@A B\\nT\\n+\\nJ");
julia> string(record)
"@A B\\nT\\n+\\nJ"
julia> quality_header!(record, true);
julia> string(record)
"@A B\\nT\\n+A B\\nJ"
```
"""
function quality_header!(record::Record, x::Bool)
bits = record.has_description_seq_len
y = if x
bits | (typemin(Int) % UInt)
else
bits & (typemax(Int) % UInt)
end
record.has_description_seq_len = y
record
end
"""
FASTQ.Record()
Create the default FASTQ record.
"""
Record() = Record(UInt8[], 0, 0, 0)
function Base.empty!(record::Record)
# Do not truncate the underlying data buffer
record.identifier_len = 0
record.description_len = 0
record.has_description_seq_len = 0
return record
end
function Base.parse(::Type{Record}, data::AbstractVector{UInt8})
# Error early on empty data to not construct buffers
isempty(data) && throw(ArgumentError("Cannot parse empty string as FASTQ record"))
record = Record(Vector{UInt8}(undef, sizeof(data)), 0, 0, 0)
stream = NoopStream(IOBuffer(data), bufsize=sizeof(data))
cs, _, found = readrecord!(stream, record, (1, 1))
# If found is not set, then the data terminated early
found || throw(ArgumentError("Incomplete FASTQ record"))
# In this case, the machine ran out of data exactly after one record
p = stream.state.buffer1.bufferpos
p > sizeof(data) && iszero(cs) && return record
# Else, we check all trailing data to see it contains only \r\n
for i in p-1:sizeof(data)
if !in(data[i], (UInt8('\r'), UInt8('\n')))
throw(ArgumentError("Invalid trailing data after FASTQ record"))
end
end
return record
end
"""
FASTQ.Record(description, sequence, quality; offset=33)
Create a FASTQ record from `description`, `sequence` and `quality`.
Arguments:
* `description::AbstractString`
* `sequence::Union{AbstractString, BioSequence}`,
* `quality::Union{AbstractString, Vector{<:Number}}`
* Keyword argument `offset` (if `quality isa Vector`): PHRED offset
"""
function Record(
description::AbstractString,
sequence::AbstractString,
quality::AbstractString
)
seqsize = sequence isa AbstractString ? ncodeunits(sequence) : length(sequence)
if seqsize != ncodeunits(quality)
throw(ArgumentError("Byte length of sequence doesn't match codeunits of quality"))
end
buf = IOBuffer()
print(buf,
'@', description, '\n',
sequence, "\n+\n",
quality
)
parse(Record, take!(buf))
end
function Record(
description::AbstractString,
sequence::AbstractString,
quality::Vector{<:Number};
offset::Integer=33
)
ascii_quality = String([UInt8(q + offset) for q in quality])
Record(description, sequence, ascii_quality)
end
function Base.:(==)(record1::Record, record2::Record)
record1.description_len == record2.description_len || return false
filled1 = filled(record1)
filled1 == filled(record2) || return false
(data1, data2) = (record1.data, record2.data)
GC.@preserve data1 data2 begin
return memcmp(pointer(data1), pointer(data2), filled1) == 0
end
end
function Base.copy(record::Record)
return Record(
record.data[1:filled(record)],
record.identifier_len,
record.description_len,
record.has_description_seq_len
)
end
function Base.copy!(dst::Record, src::Record)
n = filled(src)
length(dst.data) < n && resize!(dst.data, n)
unsafe_copyto!(dst.data, 1, src.data, 1, n)
dst.identifier_len = src.identifier_len
dst.description_len = src.description_len
dst.has_description_seq_len = src.has_description_seq_len
dst
end
function Base.write(io::IO, record::Record)
data = record.data
len = UInt(seqsize(record))
desclen = UInt(record.description_len)
GC.@preserve data begin
# Header line
nbytes = write(io, UInt8('@'))
nbytes += unsafe_write(io, pointer(data), desclen)
nbytes += write(io, '\n')
# Sequence
nbytes += unsafe_write(io, pointer(data) + desclen, len)
# + line, with additional description if applicable
nbytes += write(io, UInt8('\n'), UInt8('+'))
if has_extra_description(record)
nbytes += unsafe_write(io, pointer(data), desclen)
end
# Quality
nbytes += write(io, '\n')
nbytes += unsafe_write(io, pointer(data) + desclen + len, len)
end
return nbytes
end
function Base.print(io::IO, record::Record)
write(io, record)
return nothing
end
function Base.show(io::IO, record::Record)
print(io,
summary(record), '(',
repr(description(record)), ", \"",
truncate(sequence(record), 20), "\", \"",
truncate(quality(record), 20), "\")",
)
end
function Base.show(io::IO, ::MIME"text/plain", record::Record)
println(io, "FASTQ.Record:")
println(io, " description: \"", description(record), '"')
println(io, " sequence: \"", truncate(sequence(record), 40), '"')
print(io, " quality: \"", truncate(quality(record), 40), '"')
end
# Accessor functions
# ------------------
function quality_indices(record::Record, part::UnitRange{<:Integer})
start, stop = first(part), last(part)
(start < 1 || stop > seqsize(record)) && throw(BoundsError(record, start:stop))
offset = record.description_len + seqsize(record)
start+offset:stop+offset
end
"""
quality([T::Type{String, StringView}], record::FASTQ.Record, [part::UnitRange])
Get the ASCII quality of `record` at positions `part` as type `T`.
If not passed, `T` defaults to `StringView`.
If not passed, `part` defaults to the entire quality string.
# Examples
```jldoctest
julia> rec = parse(FASTQ.Record, "@hdr\\nUAGUCU\\n+\\nCCDFFG");
julia> qual = quality(rec)
"CCDFFG"
julia> qual isa AbstractString
true
```
"""
function quality(record::Record, part::UnitRange{<:Integer}=1:seqsize(record))
quality(StringView, record, part)
end
function quality(::Type{String}, record::Record, part::UnitRange{<:Integer}=1:seqsize(record))
String(record.data[quality_indices(record, part)])
end
function quality(::Type{StringView}, record::Record, part::UnitRange{<:Integer}=1:seqsize(record))
StringView(view(record.data, quality_indices(record, part)))
end
function quality_scores(record::Record, part::UnitRange{<:Integer}=1:seqsize(record))
quality_scores(record, DEFAULT_ENCODING, part)
end
"""
quality_scores(record::FASTQ.Record, [encoding::QualityEncoding], [part::UnitRange])
Get an iterator of PHRED base quality scores of `record` at positions `part`.
This iterator is corrupted if the record is mutated.
By default, `part` is the whole sequence.
By default, the encoding is PHRED33 Sanger encoding, but may be specified with a `QualityEncoding` object
"""
function quality_scores(record::Record, encoding::QualityEncoding, part::UnitRange{<:Integer}=1:seqsize(record))
start, stop = first(part), last(part)
(start < 1 || stop > seqsize(record)) && throw(BoundsError(record, start:stop))
data = record.data
offset = record.description_len + seqsize(record)
return Iterators.map(offset+start:offset+stop) do i
v = data[i]
decode_quality(encoding, v)
end
end
"""
quality(record::Record, encoding_name::Symbol, [part::UnitRange])::Vector{UInt8}
Get an iterator of base quality of the slice `part` of `record`'s quality.
The `encoding_name` can be either `:sanger`, `:solexa`, `:illumina13`, `:illumina15`, or `:illumina18`.
"""
function quality_scores(
record::Record,
encoding_name::Symbol,
part::UnitRange{<:Integer}=1:seqsize(record)
)
encoding = (
encoding_name == :sanger ? SANGER_QUAL_ENCODING :
encoding_name == :solexa ? SOLEXA_QUAL_ENCODING :
encoding_name == :illumina13 ? ILLUMINA13_QUAL_ENCODING :
encoding_name == :illumina15 ? ILLUMINA15_QUAL_ENCODING :
encoding_name == :illumina18 ? ILLUMINA18_QUAL_ENCODING :
throw(ArgumentError("quality encoding ':$(encoding_name)' is not supported")))
quality_scores(record, encoding, part)
end
function Base.hash(record::Record, h::UInt)
h = hash(record.description_len, h)
hash(view(record.data, filled(record)), h)
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 2897 | # FASTQ Writer
# ============
"""
FASTQ.Writer(output::IO; quality_header::Union{Nothing, Bool}=nothing)
Create a data writer of the FASTQ file format.
The writer is a `BioGenerics.IO.AbstractWriter`.
Writers take ownership of the underlying IO. Mutating or closing the underlying IO
not using the writer is undefined behaviour.
Closing the writer also closes the underlying IO.
See more examples in the FASTX documentation.
See also: [`FASTQ.Record`](@ref), [`FASTQ.Reader`](@ref)
# Arguments
* `output`: Data sink to write to
* `quality_header`: Whether to print second header on the + line. If `nothing` (default),
check the individual `Record` objects for whether they contain a second header.
# Examples
```
julia> FASTQ.Writer(open("some_file.fq", "w")) do writer
write(writer, record) # a FASTQ.Record
end
```
"""
mutable struct Writer{S <: TranscodingStream} <: BioGenerics.IO.AbstractWriter
output::S
quality_header::UInt8 # 0x00: No, 0x01: Yes, 0x02: Same as when read
function Writer{S}(output::S, quality_header::UInt8) where {S <: TranscodingStream}
finalizer(new{S}(output, quality_header)) do writer
@async close(writer.output)
end
end
end
function Writer(io::T; quality_header::Union{Nothing, Bool}=nothing) where {T <: TranscodingStream}
qstate = quality_header === nothing ? 0x02 : UInt8(quality_header)
Writer{T}(io, qstate)
end
Writer(io::IO; kwargs...) = Writer(NoopStream(io); kwargs...)
function BioGenerics.IO.stream(writer::Writer)
return writer.output
end
function Base.flush(writer::Writer)
# This is, bizarrely needed for TranscodingStreams for now.
write(writer.output, TranscodingStreams.TOKEN_END)
flush(writer.output)
end
function Base.write(writer::Writer, record::Record)
output = writer.output
n = 0
data = record.data
desclen = UInt(record.description_len)
seqlength = UInt(seqsize(record))
GC.@preserve data begin
# Header
n += write(output, UInt8('@'))
n += unsafe_write(output, pointer(data), desclen)
# Sequence
n += write(output, UInt8('\n'))
n += unsafe_write(output, pointer(data) + desclen, seqlength)
# Second header
n += write(output, "\n+")
# Write description in second header if either the writer is set to do that,
# or writer is set to look at record, and record has second header
if (
writer.quality_header == 0x01 ||
(writer.quality_header == 0x02 && has_extra_description(record))
)
n += unsafe_write(output, pointer(data), desclen)
end
# Quality
n += write(output, UInt8('\n'))
n += unsafe_write(output, pointer(data) + desclen + seqlength, seqlength)
# Final trailing newline
n += write(output, UInt8('\n'))
end
return n
end
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 5585 | # Common tests
@testset "FASTX" begin
@testset "Copying to LongSequence" begin
@test true
strings = [
"ATCGTAGTAC", # DNA 2
"AACGMYKATNwhdvAC", # DNA 4
"AUTcutUAUU", # RNA 2
"AUGMNmuaWUAGUC", # RNA 4
"AGCGGACAAC", # DNA/RNA2
"AHCDNnnkmaAGCNvSSW", # DNA/RNA4
"KPLMQWDCB", # AA
"AKLVYhkxzX", # AA
"BOJarleaiilvw", # AA
# The next two have symbols that are in the FASTQ
# accepted range A-z. So they can be parsed, but not
# copied to sequence. If the FASTQ parsing changes,
# just remove these from the test.
"m^^jsommelig", # Invalid
"__m]]kvLMO", # Invalid
]
seqtypes = [
LongDNA{4},
LongDNA{2},
LongRNA{4},
LongRNA{2},
LongAA
]
for T in (FASTA.Record, FASTQ.Record)
empty_record = if T == FASTA.Record
record = T("name", "")
else
T("name", "", Int[])
end
success = false
seq = nothing
for seqtype in seqtypes
short_seq = seqtype()
long_seq = seqtype(undef, 100)
for str in strings
record = if T == FASTA.Record
record = T("name", str)
else
T("name", str, [75 for i in str])
end
# Empty sequence
copy!(short_seq, empty_record)
@test isempty(short_seq)
cp = copy(long_seq)
copyto!(long_seq, empty_record)
@test long_seq == cp
# Nonempty sequence
try
seq = seqtype(str)
success = true
catch error
success = false
end
if success
# copy! will change the size, whether smaller or larger
empty!(short_seq)
resize!(long_seq, 100)
copy!(long_seq, record)
copy!(short_seq, record)
@test length(short_seq) == ncodeunits(str)
@test short_seq == seq == long_seq
# copyto! will error if too short...
resize!(short_seq, ncodeunits(str) - 1)
@test_throws BoundsError copyto!(short_seq, record)
# if too long, it will leave extra symbols untouched
resize!(long_seq, 100)
rand!(long_seq)
rest = long_seq[ncodeunits(str)+1:100]
copyto!(long_seq, record)
@test long_seq[1:ncodeunits(str)] == seq
@test long_seq[ncodeunits(str)+1:100] == rest
# copyto with indices.
resize!(long_seq, ncodeunits(str))
old = copy(long_seq)
copyto!(long_seq, 3, record, 2, 6)
@test old[1:2] == long_seq[1:2]
@test long_seq[3:8] == seq[2:7]
@test long_seq[9:end] == old[9:end]
else
empty!(short_seq)
resize!(long_seq, 100)
# Test both, since it must throw no matter if the exception
# is a bounds error or an alphabet incompatibility error.
@test_throws Exception copy!(short_seq, record)
@test_throws Exception copyto!(short_seq, record)
@test_throws Exception copyto!(short_seq, 1, record, 1, ncodeunits(str))
@test_throws Exception copy!(long_seq, record)
@test_throws Exception copyto!(long_seq, record)
@test_throws Exception copyto!(long_seq, 1, record, 1, ncodeunits(str))
end
end
end
end
end
@testset "Convert FASTQ to FASTA" begin
for func in (FASTA.Record, i -> copy!(FASTA.Record(), i))
rec = func(parse(FASTQ.Record, "@ta_g^ ha||;; \nTAGJKKm\n+\njjkkmmo"))
@test description(rec) == "ta_g^ ha||;; "
@test identifier(rec) == "ta_g^"
@test sequence(rec) == "TAGJKKm"
rec = func(parse(FASTQ.Record, "@\n\n+\n"))
@test identifier(rec) == description(rec) == sequence(rec) == ""
rec = func(parse(FASTQ.Record, "@mba M\npolA\n+mba M\nTAGA"))
@test description(rec) == "mba M"
@test identifier(rec) == "mba"
@test sequence(rec) == "polA"
end
# Copyin conversion do no modify underlying record
fq = parse(FASTQ.Record, "@ta_g^ ha||;; \nTAGJKKm\n+\njjkkmmo")
fa = FASTA.Record(fq)
fill!(fa.data, UInt8('a'))
@test description(fq) == "ta_g^ ha||;; "
@test identifier(fq) == "ta_g^"
@test sequence(fq) == "TAGJKKm"
@test quality(fq) == "jjkkmmo"
end
end | FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 310 | module FASTXTests
export TestFASTA
export TestFASTQ
using FASTX: FASTA, FASTQ, identifier, description, sequence, quality
using BioSequences: LongDNA, LongAA, LongRNA
using Random: rand!
using Test
include("maintests.jl")
include("fasta/TestFASTA.jl")
include("fastq/TestFASTQ.jl")
end # module FASTXTests
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 745 | module TestFASTA
using Test
using FASTX.FASTA: FASTA, Record, identifier, description, sequence,
Reader, Writer, Index, index!, validate_fasta, faidx, seqsize, extract, seekrecord
using BioSequences: LongDNA, LongRNA, LongAA, @dna_str, @rna_str, @aa_str
using Random: rand!, shuffle!
using FormatSpecimens: list_valid_specimens, list_invalid_specimens, path_of_format, filename, hastag
using StringViews: StringView
using TranscodingStreams: NoopStream
const VALID_INDEX_CHARS = append!(vcat('0':'9', 'A':'Z', 'a':'z'), collect("!#\$%&+./:;?@^_|~-"))
const VALID_SEQ_BYTES = [i for i in 0x00:0xff if i ∉ UInt8.(Tuple(">\r\n"))]
include("record.jl")
include("io.jl")
include("index.jl")
include("specimens.jl")
end # module TestFASTA
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 7514 | @testset "Index" begin
INDEX_GOOD = "abc\t100\t5\t15\t16\r\n^def?@l~2:/\t17\t200\t14\t16\nABC\t12\t55\t8\t10"
INDEX_NAME_SPACE = "abc def\t100\t5\t15\t16"
INDEX_BAD_NAME_1 = "=abc\t100\t5\t15\t16"
INDEX_BAD_NAME_2 = "abc\\def\t100\t5\t15\t16"
INDEX_NEGATIVE = "abc\t100\t5\t-4\t-3"
INDEX_OVERFLOW = "abc\t10000000000000000000000000000000\t5\t15\t16"
# Longer than len
INDEX_BAD_LINEBASES = "abc\t100\t5\t101\t102"
# Too short compared to linebases
INDEX_BAD_LINEWIDTH_1 = "abc\t100\t5\t15\t15"
# Too long compared to linebases
INDEX_BAD_LINEWIDTH_2 = "abc\t100\t5\t15\t18"
INDEX_ZERO_OFFSET = "abc\t100\t5\t15\t16\ndef\t6\t0\t1\t2"
function test_same_index(a::Index, b::Index)
@test a.names == b.names
@test a.lengths == b.lengths
@test a.offsets == b.offsets
@test a.encoded_linebases == b.encoded_linebases
@test string(a) == string(b)
end
@testset "Parsing index" begin
# Test correctly parsed _and ordered_
ind = Index(IOBuffer(INDEX_GOOD))
@test ind.names["abc"] == 1
@test ind.names["ABC"] == 2
@test ind.names["^def?@l~2:/"] == 3
@test ind.lengths == [100, 12, 17]
@test ind.offsets == [5, 55, 200]
for bad_index in [
INDEX_NAME_SPACE,
INDEX_BAD_NAME_1,
INDEX_BAD_NAME_2,
INDEX_NEGATIVE,
INDEX_OVERFLOW,
INDEX_BAD_LINEBASES,
INDEX_BAD_LINEWIDTH_1,
INDEX_BAD_LINEWIDTH_2,
INDEX_ZERO_OFFSET
]
@test_throws ErrorException Index(IOBuffer(bad_index))
end
end
random_name() = join(rand(VALID_INDEX_CHARS, rand(10:25)))
random_seqline(len::Integer) = String(rand(VALID_SEQ_BYTES, len))
function make_random_index()
buf = IOBuffer()
offset = 0
for i in 1:25
name = random_name()
print(buf, name, '\t')
offset += ncodeunits(name) + 1
len = rand(20:250)
print(buf, len, '\t')
print(buf, offset, '\t')
lineb = rand(5:len)
print(buf, lineb, '\t')
linewidth = lineb + rand(1:2)
print(buf, linewidth, '\n')
offset += cld(len, lineb) * (linewidth - lineb) + len
end
seekstart(buf)
bytes = take!(buf)
(Index(IOBuffer(bytes)), bytes)
end
@testset "Writing index" begin
for i in 1:10
index, bytes = make_random_index()
buf = IOBuffer()
write(buf, index)
@test take!(buf) == bytes
end
end
@testset "Parsing index from file" begin
name = tempname()
(ind, bytes) = make_random_index()
open(name, "w") do io
write(io, bytes)
end
test_same_index(ind, Index(name))
end
function make_random_indexable_fasta()
buf = IOBuffer()
names = String[]
newlines = String[]
linelengths = Int[]
lengths = Int[]
for i in 1:15
newline = rand(("\n", "\r\n"))
push!(newlines, newline)
len = rand(100:1000)
seq = codeunits(random_seqline(len))
linelen = rand(10:len)
push!(linelengths, linelen)
name = random_name()
push!(names, name)
print(buf, '>', name, newline)
push!(lengths, len)
for i in Iterators.partition(1:len, linelen)
print(buf, String(seq[i]), newline)
end
end
return (take!(buf), names, newlines, linelengths, lengths)
end
BADFNA_LINEENDINGS = ">abc\nTA\nAG\n>def\r\nAA\nGA\r\nGG"
BADFNA_INCONSISTENT_SEQWIDTH_1 = ">A\nTT\nTTT\nT"
BADFNA_INCONSISTENT_SEQWIDTH_2 = ">A\nTTT\nTT\nTT"
BADFNA_UNPARSEABLE = "dfklgjs\r\r\r\r\n\n\n\n"
@testset "Creating index" begin
@test Index(IOBuffer("")) isa Index
# Random parseable cases
for i in 1:10
(buffer, names, newlines, linelengths, lengths) = make_random_indexable_fasta()
index = faidx(IOBuffer(buffer))
@test index.names == Dict(name => i for (i, name) in enumerate(names))
@test index.lengths == lengths
obs_lw = [FASTA.linebases_width(index, i) for i in eachindex(lengths)]
exp_lw = [(linelengths[i], linelengths[i] + length(newlines[i])) for i in eachindex(lengths)]
@test obs_lw == exp_lw
# Current implementation is special for NoopStream, test it
index2 = faidx(NoopStream(IOBuffer(buffer)))
test_same_index(index, index2)
end
# Failure cases
for bad_case in [
BADFNA_LINEENDINGS,
BADFNA_INCONSISTENT_SEQWIDTH_1,
BADFNA_INCONSISTENT_SEQWIDTH_2
]
@test_throws Exception faidx(IOBuffer(bad_case))
end
@test_throws Exception faidx(IOBuffer(BADFNA_UNPARSEABLE))
end
@testset "Reader with index" begin
(buffer, names, newlines, linelengths, lengths) = make_random_indexable_fasta()
idx = faidx(IOBuffer(buffer))
reader1 = Reader(IOBuffer(buffer), index=idx)
io = IOBuffer()
write(io, idx)
seekstart(io)
reader2 = Reader(IOBuffer(buffer), index=io)
name = tempname()
open(i -> write(i, idx), name, "w")
reader3 = Reader(IOBuffer(buffer), index=name)
reader4 = index!(Reader(IOBuffer(buffer)), name)
reader5 = index!(Reader(IOBuffer(buffer)), idx)
for reader in [reader1, reader2, reader3, reader4, reader5]
# Test getindex
inames = shuffle!(collect(enumerate(names)))
for (i, name) in inames
record = reader[name]
@test identifier(record) == name
@test seqsize(record) == lengths[i]
end
# Test seekrecord
for (i, name) in inames
FASTA.seekrecord(reader, name)
record = first(reader)
@test identifier(record) == name
@test seqsize(record) == lengths[i]
end
# Test extract
for (i, name) in inames
seq = extract(reader, name)
record = reader[name]
@test ncodeunits(seq) == lengths[i]
@test seq == sequence(record)
start = rand(1:seqsize(record))
stop = rand(start:seqsize(record))
seq = extract(reader, name, start:stop)
seq2 = sequence(record, start:stop)
@test seq == seq2
end
end
# Test extract with bad seq
data = Vector{UInt8}(">A\nABC")
reader = Reader(IOBuffer(data), index=faidx(IOBuffer(data)))
data[5] = UInt8('>')
@test_throws Exception extract(reader, "A")
# Test can't do these operations without an index
data = ">A\nT"
reader = Reader(IOBuffer(data))
@test_throws Exception reader["A"]
@test_throws Exception extract(reader, "A")
end
@testset "Faidx existing file" begin
name1 = tempname()
name2 = tempname()
(buffer, names, newlines, linelengths, lengths) = make_random_indexable_fasta()
open(i -> write(i, buffer), name1, "w")
# Generic
faidx(name1)
@test Index(name1 * ".fai") isa Index
# Already exists
@test_throws Exception faidx(name1)
faidx(name1, name2)
@test Index(name2) isa Index
# Force overwrite file
open(i -> print(i, "bad data"), name2, "w")
@test_throws Exception faidx(name1, name2)
faidx(name1, name2, check=false)
@test Index(name2) isa Index
end
@testset "Issue" begin
data = """>seq1 sequence
TAGAAAGCAA
TTAAAC
>seq2 sequence
AACGG
UUGC
"""
reader = FASTA.Reader(IOBuffer(data), index=faidx(IOBuffer(data)))
seekrecord(reader, "seq2")
record = first(reader)
@test sequence(record) == "AACGGUUGC"
record = reader["seq1"]
@test description(record) == "seq1 sequence"
@test extract(reader, "seq1", 3:5) == "GAA"
end
end # testset Index
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 7124 | @testset "IO" begin
@testset "Reader basics" begin
# Empty reader
reader = Reader(IOBuffer(""))
@test isnothing(iterate(reader))
close(reader)
# Resumable
reader = Reader(IOBuffer(">header\nTAG\nAA\n\r\n\r\n>header2\nAAA\n\nGG\n\r\n"))
(r, s) = iterate(reader)
@test identifier(r) == "header"
@test sequence(String, r) == "TAGAA"
(r, s) = iterate(reader)
@test identifier(r) == "header2"
@test sequence(String, r) == "AAAGG"
@test isnothing(iterate(reader))
close(reader)
# Copies on iteration
copy_str = ">A\nG\n>A\nG\n>A\nT"
reader = Reader(IOBuffer(copy_str))
records = collect(reader)
@test records[1] == records[2]
@test records[1] !== records[2]
@test records[1] != records[3]
close(reader)
# Test Base.read! works
reader = Reader(IOBuffer(">header string\r\nYWBL\nKKL\r\n>another\nAAGTC"))
record = Record()
read!(reader, record)
@test identifier(record) == "header"
@test description(record) == "header string"
@test sequence(record) == "YWBLKKL"
(record, _) = iterate(reader)
@test (description(record), sequence(record)) == ("another", "AAGTC")
# Does not copy on iteration if copy=false
# in this case it will iterate the same object which
# will just be overwritten.
# This is not intended to be relied on, so can be removed,
# but is currently necessary for performance, so we test it
# to prevent performance regressions on this front
reader = Reader(IOBuffer(copy_str); copy=false)
records = [first(iterate(reader)) for i in 1:3]
@test records[1] === records[2] === records[3]
@test sequence(records[1]) == "T"
close(reader)
# Test using new syntax
filename = tempname()
open(filename, "w") do io
print(io, ">ABC\nUUGG\n>LLK\nAN\nPA\n\n")
end
Reader(NoopStream(open(filename))) do reader
recs = collect(reader)
@test map(identifier, recs) == ["ABC", "LLK"]
@test map(sequence, recs) == ["UUGG", "ANPA"]
end
end
@testset "Reader edgecases" begin
good = ">A\nA\n>B\nB"
# do not accept > in sequence to detect missing newline
# when two records are concatenated without newline
bad = ">A\nA>B\nB"
@test Reader(collect, IOBuffer(good)) isa Vector{Record}
@test_throws Exception Reader(collect, IOBuffer(bad))
end
@testset "Writer basics" begin
function test_writer(records, regex::Regex)
buffer = IOBuffer()
writer = Writer(buffer)
for record in records
write(writer, record)
end
flush(writer)
str = String(take!(buffer))
close(writer)
@test occursin(regex, str)
end
# Empty writer
records = []
test_writer(records, r"^$")
# Empty records
records = [Record(), Record()]
test_writer(records, r"^>\n\n>\n\n$")
# Does not write uncoding bytes in records
records = [
Record(codeunits("someheader hereAACCGGTT"), 10, 15, 3),
Record(codeunits("fewhjlkdsjepis.."), 0, 0, 0)
]
test_writer(records, r"^>someheader here\nAAC\n>\n\n")
# Lots of records to exercise the IO a little more
# we don't test width here, that's for later
target_buffer = IOBuffer()
writer_buffer = IOBuffer()
writer = Writer(writer_buffer; width=0)
for i in 1:50
name = join(rand('A':'z'), rand(20:30))
name2 = join(rand('A':'z'), rand(30:40))
descr = name * ' ' * name2
seq = join(rand(('A', 'C', 'G', 'T', 'a', 'c', 'g', 't'), rand(1000:2000)))
write(target_buffer, '>', descr, '\n', seq, '\n')
write(writer, Record(descr, seq))
end
flush(writer)
writer_bytes = take!(writer_buffer)
target_bytes = take!(target_buffer)
close(writer)
@test writer_bytes == target_bytes
# Test using new syntax
filename = tempname()
Writer(NoopStream(open(filename, "w"))) do writer
write(writer, Record("some header", "UHAGC"))
write(writer, Record("another_thing", "KJLM"))
end
@test read(filename, String) == ">some header\nUHAGC\n>another_thing\nKJLM\n"
end
# TranscodingStreams does not support flushing yet, so the FASTX implementation
# reaches into TS's internals. Hence we need to test it thoroughly
@testset "Writer flushing" begin
strings = [
">hello there \nAACC\r\nTTGG",
">\n",
">someheader\n",
"> tr wh [] ... ab **7\npwQ.---0l\r\n\r\naaaccc\r\n",
]
target_strings = mktemp() do path, io
map(strings) do string
writer = Writer(open(path, "w"))
write(writer, parse(Record, string))
close(writer)
open(io -> read(io, String), path)
end
end
# Test that flushing at arbitrary points, then writing some more
# works as expected
for i in eachindex(strings)
buf = IOBuffer()
writer = Writer(buf)
# First write some of the records and check that flush works
for j in 1:i-1
write(writer, parse(Record, strings[j]))
end
flush(writer)
str = String(take!(copy(buf)))
@test str == join(target_strings[1:i-1])
# Then write the rest of them, and check the total results is as expected
for j in i:lastindex(strings)
write(writer, parse(Record, strings[j]))
end
flush(writer)
str = String(take!(buf))
@test str == join(target_strings)
end
end
@testset "Writer width" begin
header = "some data here"
for width in (-10, 5, 25, 50)
for seqlen in [width-1, width, 3*width, 3*width+3, 75, 200]
seqlen < 1 && continue
seq = join(rand('A':'Z', seqlen))
record = Record(header, seq)
buf = IOBuffer()
writer = Writer(buf; width=width)
write(writer, record)
flush(writer)
str = String(take!(buf))
close(writer)
target_seq = if width < 1
seq
else
join(Iterators.map(join, Iterators.partition(seq, width)), '\n')
end
@test str == ">" * header * '\n' * target_seq * '\n'
end
end
end
# Records can be written, then re-read without loss,
# except arbitrary whitespace in the sequence
@testset "Round trip" begin
strings = [
">abc some
def
hgi",
">A\n\n>A B C \nlkpo",
"
> here | be [dragons > 1]
polm---GA
--PPPLLAA
>and more
AAA",
"",
]
strings = [
join(Iterators.map(lstrip, eachline(IOBuffer(s))), '\n')
for s in strings
]
for string in strings
read = collect(Reader(IOBuffer(string)))
buf = IOBuffer()
writer = Writer(buf, width=0)
for record in read
write(writer, record)
end
flush(writer)
data = String(take!(buf))
close(writer)
read2 = collect(Reader(IOBuffer(string)))
@test read == read2
end
end
end # testset IO
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 6397 | @testset "Record" begin
# Only using empty records here
@testset "Basic properties" begin
# Equality
record = Record()
record2 = Record()
@test record == record2
@test record !== record2
empty!(record)
@test record == record2
# Components of empty records
@test identifier(record) isa AbstractString
@test isempty(identifier(record))
@test identifier(record) == description(record)
@test sequence(String, record) === ""
@test sequence(String, record, 1:0) === ""
@test_throws BoundsError sequence(String, record, 1:1)
end
# Parsing from strings and arrays
@testset "Basic construction" begin
function test_is_equal(a::Record, b::Record)
@test a == b
@test identifier(a) == identifier(b)
@test description(a) == description(b)
@test sequence(String, a) == sequence(String, b)
end
str = ">some_identifier \tmy_description | text\nAAT\nTA\nCCG"
record = parse(Record, str)
record2 = Record()
# Identity and emptiness
@test record != record2
@test empty!(copy(record)) == record2
# Basic properties
@test identifier(record) == "some_identifier"
@test description(record) == "some_identifier \tmy_description | text"
@test sequence(String, record) == "AATTACCG"
# Construct from two strings, description and sequence
record3 = Record("some_identifier \tmy_description | text", "AATTACCG")
test_is_equal(record, record3)
# From substrings
record4 = parse(Record, SubString(str, 1:lastindex(str)))
test_is_equal(record, record4)
# From arrays
record5 = parse(Record, codeunits(str))
test_is_equal(record, record5)
record6 = parse(Record, collect(codeunits(str)))
test_is_equal(record, record6)
# From AbstractString
record3 = parse(Record, Test.GenericString(str))
test_is_equal(record, record3)
end
@testset "Construction edge cases" begin
# Minimal sequence
record = parse(Record, ">\n")
@test "" == identifier(record) == description(record) == sequence(String, record)
# Empty identifier
record = parse(Record, ">\tsome header\nTAGA\n\nAAG")
@test identifier(record) == ""
@test description(record) == "\tsome header"
@test sequence(String, record) == "TAGAAAG"
# Empty description
record = parse(Record, ">\nAAG\nWpKN.\n\n")
@test identifier(record) == description(record) == ""
@test sequence(String, record) == "AAGWpKN."
# Empty sequence
record = parse(Record, ">ag | kop[\ta]\n\n")
@test identifier(record) == "ag"
@test description(record) == "ag | kop[\ta]"
@test sequence(String, record) == ""
# Trailing description whitespace
record = parse(Record, ">hdr name\t \r\npkmn\naj")
@test identifier(record) == "hdr"
@test description(record) == "hdr name\t "
@test sequence(String, record) == "pkmnaj"
# Trailing sequence whitespace
record = parse(Record, ">here\nplKn\n.\n \t\v\n\n \n \n")
@test identifier(record) == description(record) == "here"
@test sequence(String, record) == "plKn. \t\v "
# Trailing extra record
@test_throws Exception parse(Record, ">A\nT\n>G\nA\n")
@test_throws Exception parse(Record, ">A\nT\n>")
@test parse(Record, ">A\nT\n\r\n\r\n") isa Record
end
@testset "Equality" begin
record = parse(Record, ">AAG\nWpKN.\n\n")
record2 = parse(Record, ">AAG\n\r\nWpKN.\n\n\n\r\n")
append!(record2.data, [0x05, 0x65, 0x81])
@test record == record2
record3 = parse(Record, ">AA\nGWpKN.\n\n")
@test record != record3
end
# Tests trailing bytes in data field are OK
@testset "Noncoding bytes" begin
record = parse(Record, ">abc\nOOJM\nQQ")
resize!(record.data, 1000)
@test identifier(record) == description(record) == "abc"
@test sequence(String, record) == "OOJMQQ"
cp = copy(record)
@test record == cp
@test identifier(cp) == description(record) == "abc"
@test sequence(String, cp) == "OOJMQQ"
end
@testset "Copying" begin
record = parse(Record, ">some_identifier \tmy_description | text\nAAT\nTA\nCCG")
resize!(record.data, 1000)
cp = copy(record)
@test record !== cp
@test record == cp
@test sequence(record) == sequence(cp)
@test identifier(record) == identifier(cp)
@test description(record) == description(cp)
@test record.data !== cp.data
record = parse(Record, ">another record\r\nUAGWMPK\nKKLLAAM")
@test record != cp
copy!(record, cp)
@test record !== cp
@test record == cp
end
# Get sequence as String/StringView
@testset "Get sequence" begin
record = Record(codeunits("ab cAACCAAGGTTKKKMMMM"), 2, 4, 10)
@test sequence(String, record) == "AACCAAGGTT"
@test sequence(String, record, 1:0) == ""
@test sequence(String, record, 1:3) == "AAC"
@test sequence(String, record, 6:10) == "AGGTT"
@test_throws Exception sequence(String, record, 6:11)
@test_throws Exception sequence(String, record, 0:3)
# Default: StringView
@test sequence(record) isa StringView
@test sequence(record) === sequence(StringView, record)
@test sequence(record) == sequence(String, record)
@test sequence(record, 2:6) == "ACCAA"
end
# Encode to various biosequences
@testset "Encode sequence" begin
# Encode to LongSequence
record = parse(Record, ">header\naAtC\nwsdNN\n\nhhH")
@test sequence(LongDNA{4}, record) == dna"AATCWSDNNHHH"
@test sequence(LongAA, record) == aa"AATCWSDNNHHH"
@test_throws Exception sequence(LongDNA{2}, record)
@test_throws Exception sequence(LongRNA{4}, record)
# Encode empty to longsequence of any type
record = parse(Record, ">name\n\n")
for S in [
LongDNA{4}, LongDNA{2}, LongRNA{4}, LongRNA{2}, LongAA
]
@test sequence(S, record) == S("")
end
end
# Includes "unique"
@testset "Hashing" begin
records = map(i -> parse(Record, i), [
">A\n\n",
">A\nAG",
">AA\nG",
])
# Same as previous, but with noncoding data
push!(records, Record(codeunits("AAGGGG"), 2, 2, 1))
@test hash(first(records)) == hash(first(records))
@test hash(records[end]) == hash(records[end-1])
@test isequal(records[end], records[end-1])
@test !isequal(records[3], records[2])
@test length(unique(records)) == length(records) - 1
end
end # testset Record | FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 1567 | @testset "Specimens" begin
@testset "Valid specimens" begin
# All valid specimens should be read, written, re-read, and the
# second read should be identical.
# All invalid specimens should throw an exception
function test_valid_specimen(path)
try
records = open(collect, Reader, path)
buf = IOBuffer()
writer = Writer(buf)
foreach(i -> write(writer, i), records)
flush(writer)
data = take!(buf)
close(writer)
records2 = collect(Reader(IOBuffer(data)))
issame = records == records2
if !issame
println("Valid format not parsed properly: $path")
end
@test issame
@test isnothing(open(validate_fasta, path))
catch e
println("Error when parsing $path")
@test false
end
end
for specimen in list_valid_specimens("FASTA")
path = joinpath(path_of_format("FASTA"), filename(specimen))
# We intentionally do not support comments!
if hastag(specimen, "comments")
@test_throws Exception open(collect, Reader, path)
else
test_valid_specimen(path)
end
end
end
@testset "Invalid specimens" begin
for specimen in list_invalid_specimens("FASTA")
path = joinpath(path_of_format("FASTA"), filename(specimen))
@test_throws Exception open(collect, Reader, path)
@test !isnothing(open(validate_fasta, path))
end
end
end # testset Specimens | FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 1399 | module TestFASTQ
# Default offset for quality
const OFFSET = 33
using Test
using FASTX: FASTQ
using FASTX.FASTQ: Record, Reader, Writer, identifier, description,
sequence, quality, quality_scores, QualityEncoding, quality_header!, validate_fastq
using BioSequences: LongDNA, LongRNA, LongAA, @dna_str, @rna_str, @aa_str
using FormatSpecimens: list_valid_specimens, list_invalid_specimens, path_of_format, filename, hastag
using StringViews: StringView
using TranscodingStreams: NoopStream
TEST_RECORD_STRINGS = [
# Standard records
"@some_header\r\nAAGG\r\n+\r\njjll",
"@prkl_19900 [a b]:211\nkjmn\n+\naabb",
"@some_header\nAAGG\n+some_header\njjll\n\n", # same as #1
# Edge cases:
"@\nTAG\n+\n!!!", # empty description
"@ ||;;211name \nkakana\n+\naabbcc", # empty some_identifier
"@header here\n\n+\n", # empty sequence
#
]
TEST_BAD_RECORD_STRINGS = [
"@some\n\nTAG\n+\r\njjj", # extra newline
"@abc\nABC\n+\nABCD", # qual too long,
"@abc\nABC\n+\nAB", # qual too short,
"@A B \nC\n+A B\nA", # second header different
"@A\nC\n+AB\nA", # second header too long
"@AB\nC\n+A\nA", # second header too short,
"@AB\nC\n+AB\n\t", # qual not in range
"@AB\nABC\n+\nK V", # qual not in range
"@AB\nABC\n+\nK\x7fV", # qual not in range
]
include("record.jl")
include("io.jl")
include("specimens.jl")
end # module TestFASTQ
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 5634 | @testset "IO" begin
@testset "Reader basics" begin
# Empty reader
reader = Reader(IOBuffer(""))
@test isnothing(iterate(reader))
close(reader)
# Resumable
reader = Reader(IOBuffer("@A\nTAG\n+\nJJK\n@B C\nMNB\n+B C\nLLL"))
record = first(iterate(reader))
@test identifier(record) == "A"
@test sequence(record) == "TAG"
@test quality(record) == "JJK"
record = first(iterate(reader))
@test identifier(record) == "B"
@test description(record) == "B C"
@test sequence(record) == "MNB"
@test quality(record) == "LLL"
@test isnothing(iterate(reader))
close(reader)
# Copies on iteration
copy_str = "@A\nT\n+\nJ\n@A\nT\n+\nJ\n@A\nB\n+\nK"
reader = Reader(IOBuffer(copy_str))
records = collect(reader)
@test records[1] == records[2]
@test records[1] !== records[2]
@test records[1] != records[3]
close(reader)
# Test Base.read! works
reader = Reader(IOBuffer("@HWI:HDR TEXT\nTAGGCTAG\n+\nKM@BCAAC\n@A\nTAG\n+\nJJK\n"))
record = Record()
read!(reader, record)
@test identifier(record) == "HWI:HDR"
@test description(record) == "HWI:HDR TEXT"
@test sequence(record) == "TAGGCTAG"
@test quality(record) == "KM@BCAAC"
(record, _) = iterate(reader)
@test (description(record), sequence(record), quality(record)) == ("A", "TAG", "JJK")
# Does not copy on iteration if copy=false
# See comments in equivalent FASTA tests
reader = Reader(IOBuffer(copy_str); copy=false)
records = [first(iterate(reader)) for i in 1:3]
@test records[1] === records[2] === records[3]
@test sequence(records[1]) == "B"
close(reader)
end
@testset "Writer basics" begin
function test_writer(records, regex::Regex)
buffer = IOBuffer()
writer = Writer(buffer)
for record in records
write(writer, record)
end
flush(writer)
str = String(take!(buffer))
close(writer)
@test occursin(regex, str)
end
# Empty writer
records = []
test_writer(records, r"^$")
# Empty records
records = [Record(), Record()]
test_writer(records, r"^@\n\n\+\n\n@\n\n\+\n\n$")
# Does not write noncoding bytes in records
records = map(i -> parse(Record, i), [
"@ABC DEF\nkjhmn\n+ABC DEF\njjjkk",
"@pro [1-2](HLA=2);k=1\nttagga\n+\nabcdef",
])
for record in records
resize!(record.data, 512)
end
test_writer(records, r"@ABC DEF\nkjhmn\n\+ABC DEF\njjjkk\n@pro \[1-2\]\(HLA=2\);k=1\nttagga\n\+\nabcdef\n")
# Exercise the IO with larger seqs to exceed the buffer
target_buffer = IOBuffer()
writer_buffer = IOBuffer()
writer = Writer(writer_buffer)
for i in 1:250
name = join(rand('A':'z'), rand(20:30))
name2 = join(rand('A':'z'), rand(30:40))
descr = name * ' ' * name2
seq = join(rand(('A', 'C', 'G', 'T', 'a', 'c', 'g', 't'), rand(200:300)))
qual_str = join(rand('A':'z', ncodeunits(seq)))
write(target_buffer, '@', descr, '\n', seq, "\n+\n", qual_str, '\n')
write(writer, Record(descr, seq, [Int8(i - OFFSET) for i in codeunits(qual_str)]))
end
flush(writer)
writer_bytes = take!(writer_buffer)
target_bytes = take!(target_buffer)
close(writer)
@test writer_bytes == target_bytes
end
# Rudamentary, see FASTA's tests
@testset "Writer flushing" begin
records = map(i -> parse(Record, i), TEST_RECORD_STRINGS)
target_strings = mktemp() do path, io
map(records) do record
writer = Writer(open(path, "w"))
write(writer, record)
close(writer)
open(io -> read(io, String), path)
end
end
for i in eachindex(records)
buf = IOBuffer()
writer = Writer(buf)
# First write some of the records and check that flush works
for j in 1:i-1
write(writer, records[j])
end
flush(writer)
str = String(take!(copy(buf)))
@test str == join(target_strings[1:i-1])
# Then write the rest of them, and check the total results is as expected
for j in i:lastindex(records)
write(writer,records[j])
end
flush(writer)
str = String(take!(buf))
@test str == join(target_strings)
end
end
@testset "Writer optional second header" begin
function iowrite(records, quality_header)
buf = IOBuffer()
writer = Writer(buf; quality_header=quality_header)
for record in records
write(writer, record)
end
flush(writer)
str = String(take!(buf))
close(writer)
return str
end
records = map(i -> parse(Record, i), TEST_RECORD_STRINGS)
@test iowrite(records, nothing) == join(map(string, records), '\n') * '\n'
@test iowrite(records, true) == join(map(records) do record
string(quality_header!(copy(record), true))
end, '\n') * '\n'
@test iowrite(records, false) == join(map(records) do record
string(quality_header!(copy(record), false))
end, '\n') * '\n'
end
@testset "Round trip" begin
function iowrite(records)
buf = IOBuffer()
writer = Writer(buf)
for record in records
write(writer, record)
end
flush(writer)
str = String(take!(buf))
close(writer)
return str
end
records = map(i -> parse(Record, i), TEST_RECORD_STRINGS)
str = iowrite(records)
records2 = Reader(collect, IOBuffer(str))
str2 = iowrite(records2)
@test str == str2
end
end # testset IO | FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 10238 | @testset "Record" begin
# Only using empty records here
@testset "Basic properties" begin
# Equality of empty records
record = Record()
record2 = Record()
@test record == record2
@test record !== record2
empty!(record)
@test record == record2
# Components of empty records
@test identifier(record) == ""
@test description(record) == ""
@test sequence(record) == ""
@test isempty(quality(record))
end
@testset "Basic construction" begin
function test_is_equal(a::Record, b::Record)
@test a == b
@test identifier(a) == identifier(b)
@test description(a) == description(b)
@test sequence(String, a) == sequence(String, b)
@test quality(a) == quality(b)
end
string = "@some header\nAAGG\n+\njjll"
record = parse(Record, string)
record2 = Record()
# Identity and emptiness
@test record != record2
@test empty!(copy(record)) == record2
# Test basic properties
@test identifier(record) == "some"
@test description(record) == "some header"
@test sequence(record) == "AAGG"
@test quality(record) == "jjll"
# Construct from two strings and quality
record3 = Record("some header", "AAGG", [Int8(i)-OFFSET for i in "jjll"])
test_is_equal(record, record3)
@test_throws Exception Record("some_header", "TAG", [Int8(i)-OFFSET for i in "jj"])
# From substrings
record4 = parse(Record, SubString(string, 1:lastindex(string)))
test_is_equal(record, record4)
# From arrays
cu = codeunits(string)
test_is_equal(parse(Record, cu), record)
test_is_equal(parse(Record, collect(cu)), record)
# From AbstractString
record5 = parse(Record, Test.GenericString(string))
test_is_equal(record5, record)
# From BioSequence/quality string
record6 = Record("some header", dna"AAGG", "jjll")
test_is_equal(record6, record)
end
@testset "Construction edge cases" begin
# Can construct good examples
strings = TEST_RECORD_STRINGS[1:6]
records = map(strings) do string
@test parse(Record, string) isa Any # does not throw
parse(Record, string)
end
@test description(records[4]) == ""
@test identifier(records[5]) == ""
@test description(records[5]) != ""
@test sequence(records[6]) == ""
@test isempty(quality(records[6]))
@test records[3] == records[1]
# Throws when constructing bad examples
for string in TEST_BAD_RECORD_STRINGS
@test_throws Exception Record(string)
end
# Trailing extra record
@test_throws Exception parse(Record, "@A\nT\n+\nJ\n@B\nU\n+\nK")
@test_throws Exception parse(Record, "@A\nT\n+\nJ\n@B\nU\n")
@test parse(Record, "@A\nT\n+\nJ\n\r\n") isa Record
end
@testset "Equality" begin
a, b = parse(Record, TEST_RECORD_STRINGS[1]), parse(Record, TEST_RECORD_STRINGS[3])
@test a == b
push!(a.data, 0x00)
@test a == b
# The descr/seq break matter
@test parse(Record, "@AA\nT\n+\nA") != parse(Record, "@A\nAT\n+\nAT")
# Second header does not matter
@test parse(Record, "@AA\nT\n+\nA") == parse(Record, "@AA\nT\n+AA\nA")
end
# I.e. trailing bytes in the data field not used do not matter
@testset "Noncoding bytes" begin
record = parse(Record, TEST_RECORD_STRINGS[1])
resize!(record.data, 1000)
cp = copy(record)
for rec in (record, cp)
@test identifier(rec) == description(rec) == "some_header"
@test sequence(rec) == "AAGG"
@test quality(rec) == "jjll"
end
end
@testset "Copying" begin
record = parse(Record, "@A\nTGGAA\n+\nJJJAA")
resize!(record.data, 1000)
cp = copy(record)
@test record !== cp
@test record == cp
@test sequence(record) == sequence(cp)
@test identifier(record) == identifier(cp)
@test description(record) == description(cp)
@test quality(record) == quality(cp)
@test record.data !== cp.data
# Test that we don't unnecessarily copy noncoding data
@test length(record.data) > length(cp.data)
record = parse(Record, "@some other record\r\nTAGA\r\n+\r\nJJJK")
@test record != cp
copy!(record, cp)
@test record !== cp
@test record == cp
end
@testset "Get sequence as String/StringView" begin
records = map(i -> parse(Record, i), TEST_RECORD_STRINGS)
@test sequence(String, records[1]) == "AAGG"
@test sequence(String, records[2]) == "kjmn"
@test sequence(String, records[2], 1:3) == "kjm"
@test sequence(String, records[2], 1:0) == ""
@test sequence(String, records[2], 3:4) == "mn"
@test sequence(String, records[5], 1:6) == "kakana"
@test_throws Exception sequence(String, records[5], 0:1)
@test_throws Exception sequence(String, records[5], 1:7)
@test_throws Exception sequence(String, records[5], -3:3)
# Default is StringView
@test sequence(records[1]) isa StringView
@test sequence(records[1]) === sequence(StringView, records[1])
@test sequence(records[1]) == sequence(String, records[1])
@test sequence(records[1], 2:3) == "AG"
@test_throws Exception sequence(records[1], 0:3)
@test_throws Exception sequence(records[1], 2:5)
end
# The same machinery as FASTA is used, and that is much more
# thoroughly tested, so I only test a few cases here
@testset "Encode BioSequences" begin
record1 = parse(Record, "@A\nTAG\n+\nPRJ")
record2 = parse(Record, "@A\nYJP\n+\nACG")
@test sequence(LongDNA{4}, record1) == dna"TAG"
@test sequence(LongDNA{2}, record1) == dna"TAG"
@test sequence(LongAA, record1) == aa"TAG"
@test sequence(LongAA, record2) == aa"YJP"
@test_throws Exception sequence(LongRNA{2}, record1)
@test_throws Exception sequence(LongDNA{4}, record2)
end
# We have already tested basic quality iteration in rest of tests
@testset "Quality" begin
records = map(i -> parse(Record, i), TEST_RECORD_STRINGS)
# Slicing
@test isnothing(iterate(quality(records[1], 1:0)))
@test quality(records[1]) == "jjll"
@test quality(records[1], 1:4) == "jjll"
@test quality(records[1], 2:4) == "jll"
@test_throws BoundsError quality(records[1], 0:0)
@test_throws BoundsError quality(records[1], -2:2)
@test_throws BoundsError quality(records[1], 4:6)
# QualityEncoding
@test_throws Exception QualityEncoding('B':'A', 10)
@test_throws Exception QualityEncoding('a':'A', 10)
@test_throws Exception QualityEncoding('Z':'Y', 10)
@test_throws Exception QualityEncoding('A':'B', -1)
@test_throws Exception QualityEncoding('α':'β', 10)
@test_throws Exception QualityEncoding(Char(0xa5):Char(0xa5), 10)
# Default Quality encoding
@test collect(quality_scores(records[2])) == [Int8(i) - OFFSET for i in "aabb"]
@test collect(quality_scores(records[5])) == [Int8(i) - OFFSET for i in "aabbcc"]
@test collect(quality_scores(records[2], 3:4)) == [Int8(i) - OFFSET for i in "bb"]
@test collect(quality_scores(records[2], 4:4)) == [Int8(i) - OFFSET for i in "b"]
@test_throws BoundsError quality_scores(records[2], 0:4)
@test_throws BoundsError quality_scores(records[2], 2:5)
@test_throws BoundsError quality_scores(records[2], 5:5)
# Solexa encoding is weird in thay it can be negative
rec = Record("abc", "TAG", [20, 0, -5]; offset=64)
@test collect(quality_scores(rec, FASTQ.SOLEXA_QUAL_ENCODING)) == [20, 0, -5]
# Custom quality encoding
CustomQE = QualityEncoding('A':'Z', 12)
good = parse(Record, "@a\naaaaaa\n+\nAKPZJO")
@test collect(quality_scores(good, CustomQE)) == [Int8(i-12) for i in "AKPZJO"]
good = parse(Record, "@a\naaa\n+\nABC")
@test collect(quality_scores(good, CustomQE)) == [Int8(i-12) for i in "ABC"]
good = parse(Record, "@a\naaaa\n+\nXYZW")
@test collect(quality_scores(good, CustomQE)) == [Int8(i-12) for i in "XYZW"]
# Bad sequences
for seq in [
"BACDEf",
"ABCC!",
"abc",
"}}!]@@"
]
record = parse(Record, string("@a\n", 'a'^length(seq), "\n+\n", seq))
@test_throws Exception collect(quality_scores(record, CustomQE))
end
# As strings
@test quality(String, records[1]) == quality(StringView, records[1]) == "jjll"
@test quality(String, records[1], 2:3) == "jl"
@test quality(String, records[1], 1:3) == "jjl"
@test quality(records[1]) == quality(StringView, records[1])
@test_throws Exception quality(String, records[1], 0:3)
@test_throws Exception quality(String, records[1], 1:5)
@test quality(String, records[1]) isa String
@test quality(StringView, records[1]) isa StringView
end
@testset "Named quality encodings" begin
@test_throws Exception quality(record, :fakename)
@test_throws Exception quality(record, :snager)
@test_throws Exception quality(record, :illumina) # must specify which kind
record = parse(Record, "@ABC\nABCDEFGHIJK\n+\n]C_Za|}~^xA")
for (symbol, offset) in [
(:sanger, 33),
(:solexa, 64),
(:illumina13, 64),
(:illumina15, 64),
(:illumina18, 33),
]
@test collect(quality_scores(record, symbol, 1:10)) == [Int8(i - offset) for i in "]C_Za|}~^x"]
end
# 64-encodings do not support lower end of qual range
bad_records = map(i -> parse(Record, i), [
"@A\nABCDE\n+\n:KPab", # Colon not in range
"@A\nABCDE\n+\nJKH!I", # ! not in range
"@A\nABCDE\n+\nBDE72", # numbers not in range
])
for record in bad_records
@test_throws ErrorException collect(quality_scores(record, :solexa))
@test_throws ErrorException collect(quality_scores(record, :illumina13))
@test_throws ErrorException collect(quality_scores(record, :illumina15))
end
end
@testset "Hashing" begin
records = map(i -> parse(Record, i), TEST_RECORD_STRINGS)
@test hash(records[1]) != hash(records[2])
@test hash(records[1]) == hash(records[3])
@test !isequal(records[1], records[2])
@test isequal(records[1], records[3])
@test length(unique(records)) == length(records) - 1
cp = copy(records[2])
append!(cp.data, rand(UInt8, 128))
@test hash(cp) == hash(records[2])
@test isequal(cp, records[2])
end
end # testset Record
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | code | 1667 | @testset "Specimens" begin
@testset "Valid specimens" begin
# All valid specimens should be read, written, re-read, and the
# second read should be identical.
# All invalid specimens should throw an exception
function test_valid_specimen(path)
try
records = open(collect, Reader, path)
buf = IOBuffer()
writer = Writer(buf)
foreach(i -> write(writer, i), records)
flush(writer)
data = take!(buf)
close(writer)
records2 = collect(Reader(IOBuffer(data)))
issame = records == records2
if !issame
println("Valid format not parsed properly: $path")
end
@test issame
@test isnothing(open(validate_fastq, path))
catch e
println("Error when parsing $path")
@test false
end
end
for specimen in list_valid_specimens("FASTQ")
path = joinpath(path_of_format("FASTQ"), filename(specimen))
# These files contain multiline FASTQ, which we can't currently parse,
# and parsing these is surprisingly hard (see issue #78)
if hastag(specimen, "linewrap")
@test_throws Exception open(collect, Reader, path)
else
test_valid_specimen(path)
end
end
end
@testset "Invalid specimens" begin
for specimen in list_invalid_specimens("FASTQ")
path = joinpath(path_of_format("FASTQ"), filename(specimen))
@test_throws Exception open(collect, Reader, path)
@test !isnothing(open(validate_fastq, path))
end
end
end # testset Specimens | FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 6443 | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [2.1.4]
* Add Base.print(::IO, ::Index)
* Touch up documentation
* Bump TranscodingStreams to 0.10
## [2.1.3]
* Add short-form show for records
* Migrate to Automa v1
* Drop ReTest test dep
## [2.1.2]
### Bugfix
* Allow non-PHRED quality scores, such as Solexa scores, which can be negative (#104)
## [2.1.0]
### Bugfix
* Fix doc examples for writer with do-syntax (#100)
## [2.1.0]
### Additions
* Implement `Base.copy!` for `FASTQRecord` and `FASTARecord`
## [2.0.1]
### Bugfix
* Fix `Base.read!(::FASTQReader, ::FASTQRecord)` (issue #95)
## [2.0.0]
Version 2 is a near-complete rewrite of FASTX.
It brings strives to provide an easier and more consistent API, while also being
faster, more memory efficient, and better tested than v1.
The changes are comprehensive, but code should only need a few minor tweaks to
work with v2. I recommend upgrading your packages using a static analysis tool like JET.jl.
### Breaking changes
#### Records
* `description` has changed meaning: In v1, it meant the part of the header after the '>' symbol
and up until first whitespace. Now it extends to the whole header line until the ending newline.
This implies the identifier is a prefix of the description.
* `header` has been removed, and is now replaced by `description`.
* All `Record` objects now have an identifier, a description and a sequence, and all `FASTQRecord`s
have a quality. These may be empty, but will not throw an error when accessing them.
* As a consequence, all "checker" functions like `hassequence`, `isfilled`, `hasdescription` and
so on has been removed, since the answer now is trivially "yes" in all cases.
* `identifier`, `description`, `sequence` and `quality` now returns an `AbstractString` by default.
Although it is an implementation detail, it uses zero-copy string views for performance.
* You can no longer construct a record using e.g. `Record(::String)`. Instead, use `parse(Record, ::String)`.
* `seqlen` is renamed `seqsize` to emphasize that it returns the data size of the sequence,
not necessarily its length.
#### Readers/writers
* All readers/writers now take any other arguments than the main IO as a keyword for clarity
and consistency.
* FASTQ.Writers will no longer by default modify `FASTQ.Records`'s second header.
An optional keyword forces the reader to always write/skip second header if set to `true` or `false`,
but it defaults to `nothing`, meaning it leaves it intact.
* FASTQ writers now can no longer fill in ambiguous bases in Records transparently,
or otherwise transform records, when writing.
If the user wishes to transform records, they must do it my manually calling a function that transforms the records.
#### Other breaking changes
* `FASTQ.Read` has been removed. To subset a read, extract the sequence and quality, and construct
a new Record object from these.
* `transcribe` has been removed, as it is now trivial to do the same thing.
It may be added in a future release with new functionality.
### New features
* Function `quality_scores` return the qualities of a FASTQ record as a lazy, validating iterator
of PHRED quality scores.
* New object: `QualityEncoding` can be used to construct custom PHRED/ASCII quality encodings.
accessing quality scores uses an existing default object.
* Readers now have a keyword `copy` that defaults to `true`. If set to `false`, iterating over
a reader will overwrite the same record for performance. Use with care.
This makes the old `while !eof(reader)`-idiom obsolete in favor of iterating over a reader
constructed with `copy=false`.
* Users can now use the following syntax to make processing gzipped readers easier:
```
Reader(GzipDecompressorStream(open(path)); kwargs...) do reader
# stuff
end
```
this is a change in BioGenerics.jl, but is guaranteed to work in FASTX.jl v2.
* FAI (FASTX index) files can now be written as well as read.
* FASTA files can now be indexed with the new function `faidx`.
* Function `extract` can extract parts of a sequence from an indexed FASTA reader
without loading the entire sequence into memory.
You can use this to e.g. extract a small part of a large chromosome. (see #29)
* New functions `validate_fasta` and `validate_fastq` validates if an `IO` is formatted
validly, faster and more memory-efficiently than loading in the file.
### Other changes
* All practically useful functions and types are now exported directly from FASTX,
so users don't need to prepend identifiers with `FASTA.` or `FASTQ.`.
* FASTA readers are more liberal in what formats they will accept (#73)
### Removed
* The method `FASTA.sequence(::FASTA.Record)` has been removed, since the auto-detection of sequence
type chould not be made reliable enough.
## [1.2.0] - 2021-07-13
### Added:
* `header(::Union{FASTA.Record, FASTQ.Record})` returns the full header line.
* `sequence_iter(::Union{FASTA.Record, FASTQ.Record})` returns a no-copy iterator over the sequence. If the record is mutated, this iterator will be in an invalid state.
* `quality_iter(::FASTQ.Record)` - same as above, but for PHRED quality.
* New type `FASTQRead` stores the same data as a FASTQ record, but in a Julia native format instead of a ASCII-encoding byte vector. (PR #35)
### Bugfixes
* Allow trailing newlines after last record of FASTA and FASTQ
* Fix parser FSM ambiguity
* Fix off-by-one error in line counting of FASTQ files
* Various small fixes to the internal parsing regex
* Writers are now parametric and buffered for increased writing speed
* Fixed a bug where Windows-style newlines would break the parser
[4;1386;2550t]
## [1.1.0] - 2019-08-07
### Added
- `Base.copyto!` methods for copying record data to LongSequences.
- `FASTA.seqlen` & `FASTQ.seqlen` for getting the length of a sequence in a record.
### Changed
- Use BioSequence.jl v2.0 or higher.
- Use TranscodingStreams v0.9.5.
## [1.0.0] - 2019-06-30
### Added
- FASTA submodule.
- FASTQ submodule.
- User manual.
- API reference.
[Unreleased]: https://github.com/BioJulia/FASTX.jl/compare/v1.1.0...HEAD
[1.1.0]: https://github.com/BioJulia/FASTX.jl/compare/v1.0.0...v1.1.0
[1.0.0]: https://github.com/BioJulia/FASTX.jl/tree/v1.0.0
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 3738 | # <img src="./sticker.svg" width="30%" align="right" /> FASTX
[](https://github.com/BioJulia/FASTX.jl/releases/latest)
[](https://github.com/BioJulia/FASTX.jl/blob/master/LICENSE)
[](https://doi.org/10.5281/zenodo.3361839)
[](https://biojulia.github.io/FASTX.jl/stable)
[](https://biojulia.github.io/FASTX.jl/latest/)
[](https://www.repostatus.org/#active)
## Description
FASTX provides I/O and utilities for manipulating FASTA and FASTQ, formatted
sequence data files.
## Installation
You can install FASTX from the julia REPL.
Press `]` to enter pkg mode, and enter the following:
```julia
add FASTX
```
If you are interested in the cutting edge of the development, please check out
the master branch to try new features before release.
## Testing
FASTX is tested against Julia `1.X` on Linux, OS X, and Windows.
**Latest build status:**
[](https://github.com/BioJulia/FASTX.jl/actions?query=workflow%3A%22Unit+tests%22+branch%3Amaster)
[](https://github.com/BioJulia/FASTX.jl/actions?query=workflow%3ADocumentation+branch%3Amaster)
[](https://codecov.io/gh/BioJulia/FASTX.jl)
## Contributing
We appreciate contributions from users including reporting bugs, fixing
issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing)
detailed contributor and maintainer guidelines, and code of conduct.
## Backers & Sponsors
Thank you to all our backers and sponsors!
[](https://opencollective.com/biojulia#backers)
[](https://opencollective.com/biojulia/sponsor/0/website)
[](https://opencollective.com/biojulia/sponsor/1/website)
[](https://opencollective.com/biojulia/sponsor/2/website)
[](https://opencollective.com/biojulia/sponsor/3/website)
[](https://opencollective.com/biojulia/sponsor/4/website)
[](https://opencollective.com/biojulia/sponsor/5/website)
[](https://opencollective.com/biojulia/sponsor/6/website)
[](https://opencollective.com/biojulia/sponsor/7/website)
[](https://opencollective.com/biojulia/sponsor/8/website)
[](https://opencollective.com/biojulia/sponsor/9/website)
## Questions?
If you have a question about contributing or using BioJulia software, come on over and chat to us on [the Julia Slack workspace](https://julialang.org/slack/), or you can try the [Bio category of the Julia discourse site](https://discourse.julialang.org/c/domain/bio).
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 279 | Thank you for making an issue.
If you are submitting a bug report, it will help us if you include the following information:
- Your version of Julia and any packages
- A small example that demonstrates the bug. If possible, please make the code copy-pastable into a fresh REPL.
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 481 | Thank you for your contribution!
If you have any questions about your PR, or need help completing it, you can ping the maintainers of this repository, who will be happy to help if they can find time.
You can optionally use the following checklist when you work on your PR:
- [ ] I have updated any relevant documentation and docstrings.
- [ ] I have added unit tests, and the CodeCov bot shows tests cover my new code.
- [ ] I have mentioned my changes in the CHANGELOG.md file.
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 3448 | ```@meta
CurrentModule = FASTX
DocTestSetup = quote
using FASTX
end
```
# FASTA index (FAI files)
FASTX.jl supports FASTA index (FAI) files.
When a FASTA file is indexed with a FAI file, one can seek records by their name, or extract parts of records easily.
See the FAI specifcation here: http://www.htslib.org/doc/faidx.html
### Making an `Index`
A FASTA index (of type `Index`) can be constructed from an `IO` object representing a FAI file:
```jldoctest
julia> io = IOBuffer("seqname\t9\t2\t6\t8");
julia> Index(io) isa Index
true
```
Or from a path representing a FAI file:
```jldoctest
julia> Index("../test/data/test.fasta.fai");
```
Alternatively, a FASTA file can be indexed to produce an `Index` using `faidx`.
```jldoctest
julia> faidx(IOBuffer(">abc\nTAGA\nTA"))
Index:
abc 6 5 4 5
```
Alternatively, a FASTA file can be indexed, and the index immediately written to a FAI file,
by passing an `AbstractString` to `faidx`:
```jldoctest
julia> rm("../test/data/test.fasta.fai") # remove existing fai
julia> ispath("../test/data/test.fasta.fai")
false
julia> faidx("../test/data/test.fasta");
julia> ispath("../test/data/test.fasta.fai")
true
```
Note that the restrictions on FASTA files for indexing are stricter than Julia's FASTA parser,
so not all FASTA files that can be read can be indexed:
```jldoctest
julia> str = ">\0\n\0";
julia> first(FASTAReader(IOBuffer(str))) isa FASTARecord
true
julia> Index(IOBuffer(str))
ERROR
[...]
```
### Writing a FAI file
If you have an `Index` object, you can simply `write` it to an IO:
```jldoctest
julia> index = open(i -> Index(i), "../test/data/test.fasta.fai");
julia> filename = tempname();
julia> open(i -> write(i, index), filename, "w");
julia> index2 = open(i -> Index(i), filename);
julia> string(index) == string(index2)
true
```
### Attaching an `Index` to a `Reader`
When opening a `FASTA.Reader`, you can attach an `Index` by passing the `index` keyword.
You can either pass an `Index` directly, or else an `IO`, in which case an `Index` will be parsed from the `IO`,
or an `AbstractString` that will be interpreted as a path to a FAI file:
```jldoctest
julia> str = ">abc\nTAG\nTA";
julia> idx = faidx(IOBuffer(str));
julia> rdr = FASTAReader(IOBuffer(str), index=idx);
```
You can also add a index to an existing reader using the `index!` function:
```@docs
index!
```
### Seeking using an `Index`
With an `Index` attached to a `Reader`, you can do the following operation in O(1) time.
In these examples, we will use the following FASTA file:
```
>seq1 sequence
TAGAAAGCAA
TTAAAC
>seq2 sequence
AACGG
UUGC
```
```@meta
DocTestSetup = quote
using FASTX
data = """>seq1 sequence
TAGAAAGCAA
TTAAAC
>seq2 sequence
AACGG
UUGC
"""
reader = FASTA.Reader(IOBuffer(data), index=faidx(IOBuffer(data)))
end
```
* Seek to a Record using its identifier:
```jldoctest
julia> seekrecord(reader, "seq2");
julia> record = first(reader); sequence(record)
"AACGGUUGC"
```
* Directly extract a record using its identifier
```jldoctest
julia> record = reader["seq1"];
julia> description(record)
"seq1 sequence"
```
* Extract a sequence directly without loading the whole record into memory.
This is useful for huge sequences like chromosomes
```jldoctest
julia> extract(reader, "seq1", 3:5)
"GAA"
```
```@meta
DocTestSetup = nothing
```
FASTX.jl does not yet support indexing FASTQ files.
### Reference:
```@docs
faidx
seekrecord
extract
Index
```
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 1659 | ```@meta
CurrentModule = FASTX
DocTestSetup = quote
using FASTX
end
```
# FASTA formatted files
__NB: First read the overview in the sidebar__
FASTA is a text-based file format for representing biological sequences.
A FASTA file stores a list of sequence records with name, description, and
sequence.
The template of a sequence record is:
```
>{description}
{sequence}
```
Where the "identifier" is the first part of the description up to the first whitespace
(or the entire description if there is no whitespace)
Here is an example of a chromosomal sequence:
```
>chrI chromosome 1
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACC
CACACACACACATCCTAACACTACCCTAACACAGCCCTAATCTA
```
Here:
* The `identifier` is `"chrI"`
* The `description` is `"chrI chromosome 1"`, containing the identifier
* The sequence is the DNA sequence `"CCACA..."`
## The `FASTARecord`
FASTA records are, by design, very lax in what they can contain.
They can contain almost arbitrary byte sequences, including invalid unicode, and trailing whitespace on their sequence lines, which will be interpreted as part of the sequence.
If you want to have more certainty about the format, you can either check the content of the sequences with a regex, or (preferably), convert them to the desired `BioSequence` type.
```@docs
FASTA.Record
```
## `FASTAReader` and `FASTAWriter`
`FASTAWriter` can optionally be passed the keyword `width` to control the line width.
If this is zero or negative, it will write all record sequences on a single line.
Else, it will wrap lines to the given maximal width.
### Reference:
```@docs
FASTA
FASTA.Reader
FASTA.Writer
validate_fasta
```
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 3173 | ```@meta
CurrentModule = FASTX
DocTestSetup = quote
using FASTX
end
```
# FASTQ formatted files
__NB: First read the overview in the sidebar__
FASTQ is a text-based file format for representing DNA sequences along with qualities for each base.
A FASTQ file stores a list of sequence records in the following format:
The template of a sequence record is:
```
@{description}
{sequence}
+{description}?
{qualities}
```
Where the "identifier" is the first part of the description up to the first whitespace
(or the entire description if there is no whitespace)
The description may optionally be present on the third line, and if so, must be identical to the description on the first line.
Here is an example of one record from a FASTQ file:
```
@FSRRS4401BE7HA
tcagTTAAGATGGGAT
+
###EEEEEEEEE##E#
```
Where:
* `identifier` is `"FSRRS4401BE7HA"`
* `description` is also `"FSRRS4401BE7HA"`
* `sequence` is `"tcagTTAAGATGGGAT"`
* `quality` is `"###EEEEEEEEE##E#"`
## The `FASTQRecord`
`FASTQRecord`s optionally have the description repeated on the third line.
This can be toggled with `quality_header!(::Record, ::Bool)`:
```jldoctest qual
julia> record = parse(FASTQRecord, "@ILL01\nCCCGC\n+\nKM[^d");
julia> print(record)
@ILL01
CCCGC
+
KM[^d
julia> quality_header!(record, true); print(record)
@ILL01
CCCGC
+ILL01
KM[^d
```
```@docs
FASTQ.Record
```
## Qualities
Unlike `FASTARecord`s, a `FASTQRecord` contain quality scores, see the example above.
The quality string can be obtained using the `quality` method:
```jldoctest qual
julia> record = parse(FASTQRecord, "@ILL01\nCCCGC\n+\nKM[^d");
julia> quality(record)
"KM[^d"
```
Qualities are numerical values that are encoded by ASCII characters.
Unfortunately, multiple encoding schemes exist, although PHRED+33 is the most common.
The scores can be obtained using the `quality_scores` function, which returns an iterator of PHRED+33 scores:
```jldoctest qual
julia> collect(quality_scores(record))
5-element Vector{Int8}:
42
44
58
61
67
```
If you want to decode the qualities using another scheme, you can use one of the predefined `QualityEncoding` objects.
For example, Illumina v 1.3 used PHRED+64:
```jldoctest qual
julia> collect(quality_scores(record, FASTQ.ILLUMINA13_QUAL_ENCODING))
5-element Vector{Int8}:
11
13
27
30
36
```
Alternatively, `quality_scores` accept a name of the known quality encodings:
```jldoctest qual
julia> (collect(quality_scores(record, FASTQ.ILLUMINA13_QUAL_ENCODING)) ==
collect(quality_scores(record, :illumina13)))
true
```
Lastly, you can create your own:
```@docs
QualityEncoding
```
### Reference:
```@docs
quality
quality_scores
quality_header!
```
## `FASTQReader` and `FASTQWriter`
`FASTQWriter` can optionally be passed the keyword `quality_header` to control whether or not to print the description on the third line (the one with `+`).
By default this is `nothing`, meaning that it will print the second header, if present in the record itself.
If set to a `Bool` value, the `Writer` will override the `Records`, without changing the records themselves.
### Reference:
```@docs
FASTQ
FASTQ.Reader
FASTQ.Writer
validate_fastq
```
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 3457 | ```@meta
CurrentModule = FASTX
DocTestSetup = quote
using FASTX
end
```
# FASTX formatted files
### Readers and writers
A `Reader` and a `Writer` are structs that wrap an IO, and allows efficient reading/writing of FASTX `Record`s.
For FASTA, use `FASTAReader` and `FASTAWriter`, and for FASTQ - well I'm sure you've guessed it.
Readers and writers take control over the underlying IO, and manipulating the IO underneath a Reader/Writer, e.g. by flushing or closing it, cause them to behave in an undefined manner.
Instead, you must interact directly with the reader and writer object.
Closing readers/writers closes the underlying IO.
Because they carry their own buffers, it's important to remember to close writers in particular, else the results may not be fully written to the file.
Readers are iterables of `Record`:
```jldoctest
julia> reader = FASTAReader(IOBuffer(">A\nTAG\n>B\nAGA"));
julia> record = first(reader); typeof(record) == FASTA.Record
true
julia> sequence(record)
"TAG"
julia> # NB! Readers are mutable iterators as can be seen here:
julia> sequence(first(reader))
"AGA"
julia> iterate(reader) === nothing # now empty
true
julia> close(reader)
```
They are normally more than fast enough as they are.
To squeeze extra performance out, you can pass the keyword `copy=false`.
This will cause the reader to return the _same_ record over and over, and mutate it into place.
```jldoctest
julia> reader = FASTAReader(IOBuffer(">A\nTAG\n>B\nAGA"); copy=false);
julia> rec1 = first(reader); sequence(rec1)
"TAG"
julia> rec2 = first(reader); sequence(rec2)
"AGA"
julia> rec1 === rec2
true
julia> sequence(rec1)
"AGA"
julia> close(reader)
```
When using readers and writers, be careful that they carry their own buffer,
meaning that the underlying IO may not be updated immediately after reading/writing:
```jldoctest
julia> io = IOBuffer();
julia> writer = FASTAWriter(io);
julia> write(writer, parse(FASTARecord, ">ABC\nDEF"));
julia> take!(io) # NB: Empty!
UInt8[]
```
To use it correctly, either call `flush`, or close the writer first (which also closes the underlying stream).
It is recommended to use readers and writers to `do` syntax in the form:
```jldoctest
julia> FASTAWriter(open(tempname(), "w")) do writer
write(writer, FASTARecord("identifier", "seq"))
end
16
```
Which will work for most underlying IO types, and will close the writer when the function returns (hence also closing the underlying IO).
Alternatively, the following syntax may be used:
```jldoctest
julia> open(FASTAWriter, tempname()) do writer
write(writer, FASTARecord("identifier", "seq"))
end
16
```
However, this latter syntax does not easily extend to different types of IO, such as gzip compressed streams.
### Validate files
The functions `validate_fasta` and `validate_fastq` can be used to check if an `IO`
contains data that can be read as FASTX.
They return `nothing` if the IO is correctly formatted, and another value if not.
They are significantly faster than parsing the whole file into records,
and are memory efficient.
Be aware that the validators mutate the IO by reading it, so make sure to reset the IO before using it to parse FASTX files.
```jldoctest
julia> io = IOBuffer(">header\r\nAGG\nKK");
julia> validate_fasta(io) === nothing
true
julia> read(io) # NB: IO is now exhausted
UInt8[]
julia> validate_fastq(IOBuffer("@header\nTTT\n+\njkm")) === nothing
true
```
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 3161 | ```@meta
CurrentModule = FASTX
DocTestSetup = quote
using FASTX, BioSequences
end
```
# FASTX
[](https://github.com/BioJulia/FASTX.jl/releases/latest)
[](https://github.com/BioJulia/FASTX.jl/blob/master/LICENSE)
[](https://doi.org/10.5281/zenodo.3663087)
[](https://www.repostatus.org/#active)
[](https://gitter.im/BioJulia/FASTX.jl)
Read and write files in FASTA and FASTQ format, the most common biological sequence file format.
## Installation
You can install FASTX from the julia REPL.
Press `]` to enter pkg mode again, and enter the following:
```julia
(v1.8) pkg> add FASTX
```
## Quickstart
See more documentation in the sections in the sidebar.
### Read FASTA or FASTQ files
It is preferred to use the `do` syntax to automatically close the file when you're done with it:
```jldoctest
julia> FASTAReader(open("../test/data/test.fasta")) do reader
for record in reader
println(identifier(record))
end
end
abc
```
Alternatively, you can open and close the reader manually:
```jldoctest
julia> reader = FASTAReader(open("../test/data/test.fasta"));
julia> for record in reader
println(identifier(record))
end
abc
julia> close(reader)
```
### Write FASTA or FASTQ files
```jldoctest
julia> FASTQWriter(open(tempname(), "w")) do writer
write(writer, FASTQRecord("abc", "TAG", "ABC"))
end
15
```
### Read and write Gzip compressed FASTA files
```jldoctest
julia> using CodecZlib
julia> FASTAReader(GzipDecompressorStream(open("../test/data/seqs.fna.gz"))) do reader
for record in reader
println(identifier(record))
end
end
seqa
seqb
julia> FASTQWriter(GzipCompressorStream(open(tempname(), "w"))) do writer
write(writer, FASTQRecord("header", "sequence", "quality!"))
end
28
```
### Construct FASTA or FASTQ records from raw parts
```jldoctest
julia> fasta_record = FASTARecord("some header", dna"TAGAAGA");
julia> fastq_record = FASTQRecord("read1", "TAGA", "ABCD");
```
### Validate that a file (or an arbitrary `IO`) is well-formatted
The `validate_fast*` functions return `nothing` if the IO is well formatted
```jldoctest
julia> validate_fasta(IOBuffer(">ABC\nDEF")) === nothing
true
julia> validate_fastq(IOBuffer("@ABC\nTAG\n+\nDDD")) === nothing
true
```
To check if files are well-formatted:
```jldoctest
julia> open(validate_fasta, "../test/data/test.fasta") === nothing
true
julia> open(validate_fasta, "Project.toml") === nothing
false
```
## Contributing
We appreciate contributions from users including reporting bugs, fixing
issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing)
detailed contributor and maintainer guidelines, and code of conduct.
| FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 2.1.7 | cfbc767762419cc2b6b61a2c70aa81e54b27000f | docs | 2069 | ```@meta
CurrentModule = FASTX
DocTestSetup = quote
using FASTX
end
```
# Records
FASTX files are considered a sequence of `Record`s, `FASTA.Record` for FASTA files and `FASTQ.Record` for FASTQ.
For convenience, `FASTARecord` and `FASTQRecord` are aliases of `FASTA.Record` and `FASTQ.Record`.
A `Record` object represent the text of the FASTX record as it is, e.g the following FASTA record:
```
>some header here
TAGATGAA
AA
```
Is stored in a `FASTA.Record` object roughly as its constituent bytes, plus some metadata.
There is no notion in the record object of being a DNA or RNA sequence - it's simply an array of bytes.
Records can be constructed from raw parts (i.e. description and sequence and, for FASTQ, quality), where
* `description::AbstractString`
* `sequence::Union{AbstractString, BioSequence}`
* `quality::Union{AbstractString, Vector{<:Number}}`
Alternatively, they can be parsed directly from a string or an `AbstractVector{UInt8}`.
```jldoctest
julia> record = parse(FASTARecord, ">abc\nAGCC\nCCGA");
julia> record2 = FASTARecord("abc", "AGCCCCGA");
julia> record == record2
true
```
Records can be queried for their information, namely identifier, description and sequence (and quality, for FASTQ).
By default, this returns an `AbstractString` view into the `Record`'s data:
```jldoctest
julia> record = parse(FASTARecord, ">ident desc\nUGU\nGA");
julia> (identifier(record), description(record), sequence(record))
("ident", "ident desc", "UGUGA")
```
However, you can ask for getting the sequences as a `String` or any subtype of `BioSequence`:
```jldoctest
julia> record = parse(FASTARecord, ">abc\nUGC\nCCA");
julia> using BioSequences # LongRNA defined in BioSequences.jl
julia> sequence(LongRNA{2}, record)
6nt RNA Sequence:
UGCCCA
julia> sequence(String, record)
"UGCCCA"
```
The number of bytes in the sequence of a `Record` can be queried using `seqsize`:
```jldoctest
julia> record = parse(FASTARecord, ">abc\nUGC\nCCA");
julia> seqsize(record)
6
```
### Reference:
```@docs
identifier
description
sequence
seqsize
``` | FASTX | https://github.com/BioJulia/FASTX.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 5608 | """
YAML
A package to read and write YAML.
https://github.com/JuliaData/YAML.jl
Reading:
* `YAML.load` parses the first YAML document of a YAML file as a Julia object.
* `YAML.load_all` parses all YAML documents of a YAML file.
* `YAML.load_file` is the same as `YAML.load` except it reads from a file.
* `YAML.load_all_file` is the same as `YAML.load_all` except it reads from a file.
Writing:
* `YAML.write` prints a Julia object as a YAML file.
* `YAML.write_file` is the same as `YAML.write` except it writes to a file.
* `YAML.yaml` converts a given Julia object to a YAML-formatted string.
"""
module YAML
import Base: isempty, length, show, peek
import Base: iterate
using Base64: base64decode
using Dates
using Printf
using StringEncodings
# Singleton object used to indicate that a stream contains no document
# content, i.e. whitespace and comments only.
struct MissingDocument end
const missing_document = MissingDocument()
include("versions.jl")
include("queue.jl")
include("buffered_input.jl")
include("mark.jl")
include("span.jl")
include("tokens.jl")
include("scanner.jl")
include("events.jl")
include("parser.jl")
include("nodes.jl")
include("resolver.jl")
include("composer.jl")
include("constructor.jl")
include("writer.jl")
const _constructor = Union{Nothing, Dict}
const _dicttype = Union{Type,Function}
# add a dicttype-aware version of construct_mapping to the constructors
function _patch_constructors(more_constructors::_constructor, dicttype::_dicttype)
if more_constructors === nothing
more_constructors = Dict{String,Function}()
else
more_constructors = copy(more_constructors) # do not change the outside world
end
if !haskey(more_constructors, "tag:yaml.org,2002:map")
more_constructors["tag:yaml.org,2002:map"] = custom_mapping(dicttype) # map to the custom type
elseif dicttype != Dict{Any,Any} # only warn if another type has explicitly been set
@warn "dicttype=$dicttype has no effect because more_constructors has the key \"tag:yaml.org,2002:map\""
end
return more_constructors
end
"""
load(x::Union{AbstractString, IO})
Parse the string or stream `x` as a YAML file, and return the first YAML document as a
Julia object.
"""
function load(tokenstream::TokenStream, constructor::Constructor)
resolver = Resolver()
eventstream = EventStream(tokenstream)
node = compose(eventstream, resolver)
construct_document(constructor, node)
end
load(input::IO, constructor::Constructor) =
missing_to_nothing(load(TokenStream(input), constructor))
load(ts::TokenStream, more_constructors::_constructor = nothing, multi_constructors::Dict = Dict(); dicttype::_dicttype = Dict{Any, Any}, constructorType::Function = SafeConstructor) =
load(ts, constructorType(_patch_constructors(more_constructors, dicttype), multi_constructors))
load(input::IO, more_constructors::_constructor = nothing, multi_constructors::Dict = Dict(); kwargs...) =
missing_to_nothing(load(TokenStream(input), more_constructors, multi_constructors ; kwargs...))
# When doing `load` or `load_file` of something that doesn't start any
# document, return `nothing`.
missing_to_nothing(::MissingDocument) = nothing
missing_to_nothing(x) = x
"""
YAMLDocIterator
An iterator type to represent multiple YAML documents. You can retrieve each YAML document
as a Julia object by iterating.
"""
mutable struct YAMLDocIterator
input::IO
ts::TokenStream
constructor::Constructor
next_doc
function YAMLDocIterator(input::IO, constructor::Constructor)
it = new(input, TokenStream(input), constructor, nothing)
it.next_doc = eof(it.input) ? missing_document : load(it.ts, it.constructor)
return it
end
end
YAMLDocIterator(input::IO, more_constructors::_constructor=nothing, multi_constructors::Dict = Dict(); dicttype::_dicttype=Dict{Any, Any}, constructorType::Function = SafeConstructor) = YAMLDocIterator(input, constructorType(_patch_constructors(more_constructors, dicttype), multi_constructors))
# It's unknown how many documents will be found. By doing this,
# functions like `collect` do not try to query the length of the
# iterator.
Base.IteratorSize(::YAMLDocIterator) = Base.SizeUnknown()
# Iteration protocol.
function iterate(it::YAMLDocIterator, _ = nothing)
it.next_doc === missing_document && return nothing
doc = it.next_doc
if eof(it.input)
it.next_doc = missing_document
else
reset!(it.ts)
it.next_doc = load(it.ts, it.constructor)
end
return doc, nothing
end
"""
load_all(x::Union{AbstractString, IO}) -> YAMLDocIterator
Parse the string or stream `x` as a YAML file, and return corresponding YAML documents.
"""
load_all(input::IO, args...; kwargs...) =
YAMLDocIterator(input, args...; kwargs...)
load(input::AbstractString, args...; kwargs...) =
load(IOBuffer(input), args...; kwargs...)
load_all(input::AbstractString, args...; kwargs...) =
load_all(IOBuffer(input), args...; kwargs...)
"""
load_file(filename::AbstractString)
Parse the YAML file `filename`, and return the first YAML document as a Julia object.
"""
load_file(filename::AbstractString, args...; kwargs...) =
open(filename, "r") do input
load(input, args...; kwargs...)
end
"""
load_all_file(filename::AbstractString) -> YAMLDocIterator
Parse the YAML file `filename`, and return corresponding YAML documents.
"""
load_all_file(filename::AbstractString, args...; kwargs...) =
open(filename, "r") do input
load_all(input, args...; kwargs...)
end
end # module
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 1813 |
# Simple buffered input that allows peeking an arbitrary number of characters
# ahead by maintaining a typically quite small buffer of a few characters.
mutable struct BufferedInput
input::IO
buffer::Vector{Char}
offset::UInt64
avail::UInt64
function BufferedInput(input::IO)
return new(input, Char[], 0, 0)
end
end
# Read and buffer `n` more characters
function buffer!(bi::BufferedInput, n::Integer)::Nothing
for i in (bi.offset + bi.avail) .+ (1:n)
c = eof(bi.input) ? '\0' : read(bi.input, Char)
if i ≤ length(bi.buffer)
bi.buffer[i] = c
else
push!(bi.buffer, c)
end
end
bi.avail += n
nothing
end
# Peek the character in the i-th position relative to the current position.
# (0-based)
function peek(bi::BufferedInput, i::Integer=0)
i1 = i + 1
if bi.avail < i1
buffer!(bi, i1 - bi.avail)
end
bi.buffer[bi.offset + i1]
end
# Return the string formed from the first n characters from the current position
# of the stream.
function prefix(bi::BufferedInput, n::Integer=1)::String
bi.avail < n && buffer!(bi, n - bi.avail)
String(bi.buffer[bi.offset .+ (1:n)])
end
# NOPE: This is wrong. What if n > bi.avail
# Advance the stream by n characters.
function forward!(bi::BufferedInput, n::Integer=1)
if n < bi.avail
bi.offset += n
bi.avail -= n
else
n -= bi.avail
bi.offset = 0
bi.avail = 0
while n > 0
read(bi.input, Char)
n -= 1
end
end
nothing
end
# Ugly hack to allow peeking of `StringDecoder`s
function peek(io::StringDecoder, ::Type{UInt8})
c = read(io, UInt8)
io.skip -= 1
c
end
# The same but for Julia 1.3
peek(io::StringDecoder) = peek(io, UInt8)
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 5730 |
struct ComposerError <: Exception
context::Union{String, Nothing}
context_mark::Union{Mark, Nothing}
problem::Union{String, Nothing}
problem_mark::Union{Mark, Nothing}
note::Union{String, Nothing}
function ComposerError(context=nothing, context_mark=nothing,
problem=nothing, problem_mark=nothing,
note=nothing)
new(context, context_mark, problem, problem_mark, note)
end
end
function show(io::IO, error::ComposerError)
if error.context !== nothing
print(io, error.context, " at ", error.context_mark, ": ")
end
print(io, error.problem, " at ", error.problem_mark)
end
mutable struct Composer
input::EventStream
anchors::Dict{String, Node}
resolver::Resolver
end
function compose(events::EventStream, resolver::Resolver)
composer = Composer(events, Dict{String, Node}(), resolver)
@assert forward!(composer.input) isa StreamStartEvent
node = compose_document(composer)
if peek(composer.input) isa StreamEndEvent
forward!(composer.input)
else
@assert peek(composer.input) isa DocumentStartEvent
end
node
end
function compose_document(composer::Composer)
peek(composer.input) isa StreamEndEvent && return missing_document
@assert forward!(composer.input) isa DocumentStartEvent
node = compose_node(composer)
@assert forward!(composer.input) isa DocumentEndEvent
empty!(composer.anchors)
node
end
function handle_event(event::AliasEvent, composer::Composer)
anchor = event.anchor
forward!(composer.input)
haskey(composer.anchors, anchor) || throw(ComposerError(
nothing, nothing, "found undefined alias '$(anchor)'", event.start_mark))
return composer.anchors[anchor]
end
handle_error(event::Event, composer::Composer, anchor::Union{String, Nothing}) =
anchor !== nothing && haskey(composer.anchors, anchor) && throw(ComposerError(
"found duplicate anchor '$(anchor)'; first occurance",
composer.anchors[anchor].start_mark, "second occurence",
event.start_mark))
function handle_event(event::ScalarEvent, composer::Composer)
anchor = event.anchor
handle_error(event, composer, anchor)
compose_scalar_node(composer, anchor)
end
function handle_event(event::SequenceStartEvent, composer::Composer)
anchor = event.anchor
handle_error(event, composer, anchor)
compose_sequence_node(composer, anchor)
end
function handle_event(event::MappingStartEvent, composer::Composer)
anchor = event.anchor
handle_error(event, composer, anchor)
compose_mapping_node(composer, anchor)
end
handle_event(event::Event, composer::Composer) = nothing
function compose_node(composer::Composer)
event = peek(composer.input)
handle_event(event, composer)
end
function _compose_scalar_node(event::ScalarEvent, composer::Composer, anchor::Union{String, Nothing})
tag = event.tag
if tag === nothing || tag == "!"
tag = resolve(composer.resolver, ScalarNode,
event.value, event.implicit)
end
node = ScalarNode(tag, event.value, event.start_mark, event.end_mark,
event.style)
if anchor !== nothing
composer.anchors[anchor] = node
end
node
end
compose_scalar_node(composer::Composer, anchor::Union{String, Nothing}) =
_compose_scalar_node(forward!(composer.input), composer, anchor)
__compose_sequence_node(event::SequenceEndEvent, composer::Composer, node::Node) = false
function __compose_sequence_node(event::Event, composer, node)
push!(node.value, compose_node(composer))
true
end
function _compose_sequence_node(start_event::SequenceStartEvent, composer::Composer, anchor::Union{String, Nothing})
tag = start_event.tag
if tag === nothing || tag == "!"
tag = resolve(composer.resolver, SequenceNode,
nothing, start_event.implicit)
end
node = SequenceNode(tag, Any[], start_event.start_mark, nothing,
start_event.flow_style)
if anchor !== nothing
composer.anchors[anchor] = node
end
while true
event = peek(composer.input)
event === nothing && break
__compose_sequence_node(event, composer, node) || break
end
end_event = forward!(composer.input)
node.end_mark = end_event.end_mark
node
end
compose_sequence_node(composer::Composer, anchor::Union{String, Nothing}) =
_compose_sequence_node(forward!(composer.input), composer, anchor)
__compose_mapping_node(event::MappingEndEvent, composer::Composer, node::Node) = false
function __compose_mapping_node(event::Event, composer::Composer, node::Node)
item_key = compose_node(composer)
item_value = compose_node(composer)
push!(node.value, (item_key, item_value))
true
end
function _compose_mapping_node(start_event::MappingStartEvent, composer::Composer, anchor::Union{String, Nothing})
tag = start_event.tag
if tag === nothing || tag == "!"
tag = resolve(composer.resolver, MappingNode,
nothing, start_event.implicit)
end
node = MappingNode(tag, Any[], start_event.start_mark, nothing,
start_event.flow_style)
if anchor !== nothing
composer.anchors[anchor] = node
end
while true
event = peek(composer.input)
__compose_mapping_node(event, composer, node) || break
end
end_event = forward!(composer.input)
node.end_mark = end_event.end_mark
node
end
compose_mapping_node(composer::Composer, anchor::Union{String, Nothing}) =
_compose_mapping_node(forward!(composer.input), composer, anchor)
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 13777 |
struct ConstructorError <: Exception
context::Union{String, Nothing}
context_mark::Union{Mark, Nothing}
problem::Union{String, Nothing}
problem_mark::Union{Mark, Nothing}
note::Union{String, Nothing}
function ConstructorError(context=nothing, context_mark=nothing,
problem=nothing, problem_mark=nothing,
note=nothing)
new(context, context_mark, problem, problem_mark, note)
end
end
function show(io::IO, error::ConstructorError)
if error.context !== nothing
print(io, error.context, " at ", error.context_mark, ": ")
end
print(io, error.problem, " at ", error.problem_mark)
end
mutable struct Constructor
constructed_objects::Dict{Node, Any}
recursive_objects::Set{Node}
yaml_constructors::Dict{Union{String, Nothing}, Function}
yaml_multi_constructors::Dict{Union{String, Nothing}, Function}
function Constructor(single_constructors = Dict(), multi_constructors = Dict())
new(Dict{Node, Any}(), Set{Node}(),
convert(Dict{Union{String, Nothing},Function}, single_constructors),
convert(Dict{Union{String, Nothing},Function}, multi_constructors))
end
end
function add_constructor!(func::Function, constructor::Constructor, tag::Union{String, Nothing})
constructor.yaml_constructors[tag] = func
constructor
end
function add_multi_constructor!(func::Function, constructor::Constructor, tag::Union{String, Nothing})
constructor.yaml_multi_constructors[tag] = func
constructor
end
Constructor(::Nothing) = Constructor(Dict{String,Function}())
SafeConstructor(constructors::Dict = Dict(), multi_constructors::Dict = Dict()) = Constructor(merge(copy(default_yaml_constructors), constructors), multi_constructors)
function construct_document(constructor::Constructor, node::Node)
data = construct_object(constructor, node)
empty!(constructor.constructed_objects)
empty!(constructor.recursive_objects)
data
end
construct_document(::Constructor, ::MissingDocument) = missing_document
function construct_object(constructor::Constructor, node::Node)
if haskey(constructor.constructed_objects, node)
return constructor.constructed_objects[node]
end
if in(node, constructor.recursive_objects)
throw(ConstructorError(nothing, nothing,
"found unconstructable recursive node",
node.start_mark))
end
push!(constructor.recursive_objects, node)
node_constructor = nothing
tag_suffix = nothing
if haskey(constructor.yaml_constructors, node.tag)
node_constructor = constructor.yaml_constructors[node.tag]
else
for (tag_prefix, node_const) in constructor.yaml_multi_constructors
if tag_prefix !== nothing && startswith(node.tag, tag_prefix)
tag_suffix = node.tag[length(tag_prefix) + 1:end]
node_constructor = node_const
break
end
end
if node_constructor === nothing
if haskey(constructor.yaml_multi_constructors, nothing)
tag_suffix = node.tag
node_constructor = constructor.yaml_multi_constructors[nothing]
elseif haskey(constructor.yaml_constructors, nothing)
node_constructor = constructor.yaml_constructors[nothing]
elseif node isa ScalarNode
node_constructor = construct_scalar
elseif node isa SequenceNode
node_constructor = construct_sequence
elseif node isa MappingNode
node_constructor = construct_mapping
end
end
end
if tag_suffix === nothing
data = node_constructor(constructor, node)
else
data = node_constructor(constructor, tag_suffix, node)
end
# TODO: Handle generators/iterators
constructor.constructed_objects[node] = data
delete!(constructor.recursive_objects, node)
data
end
function construct_scalar(constructor::Constructor, node::Node)
if !(node isa ScalarNode)
throw(ConstructorError(nothing, nothing,
"expected a scalar node, but found $(typeof(node))",
node.start_mark))
end
node.value
end
function construct_sequence(constructor::Constructor, node::Node)
if !(node isa SequenceNode)
throw(ConstructorError(nothing, nothing,
"expected a sequence node, but found $(typeof(node))",
node.start_mark))
end
[construct_object(constructor, child) for child in node.value]
end
function flatten_mapping(node::MappingNode)
merge = []
index = 1
while index <= length(node.value)
key_node, value_node = node.value[index]
if key_node.tag == "tag:yaml.org,2002:merge"
node.value = node.value[setdiff(axes(node.value, 1), index)]
if value_node isa MappingNode
flatten_mapping(value_node)
append!(merge, value_node.value)
elseif value_node isa SequenceNode
submerge = []
for subnode in value_node.value
if !(subnode isa MappingNode)
throw(ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping node, but found $(typeof(subnode))",
subnode.start_mark))
end
flatten_mapping(subnode)
push!(submerge, subnode.value)
for value in reverse(submerge)
append!(merge, value)
end
end
end
elseif key_node.tag == "tag:yaml.org,2002:value"
key_node.tag = "tag:yaml.org,2002:str"
index += 1
else
index += 1
end
end
if !isempty(merge)
node.value = vcat(merge, node.value)
end
end
function construct_mapping(dicttype::Union{Type,Function}, constructor::Constructor, node::MappingNode)
flatten_mapping(node)
mapping = dicttype()
for (key_node, value_node) in node.value
key = construct_object(constructor, key_node)
value = construct_object(constructor, value_node)
if !(value isa keytype(mapping))
try
key = keytype(mapping)(key) # try to cast
catch
throw(ConstructorError(nothing, nothing,
"Cannot cast $key to the key type of $dicttype",
node.start_mark))
end
end
try
mapping[key] = value
catch
throw(ConstructorError(nothing, nothing,
"Cannot store $key=>$value in $dicttype",
node.start_mark))
end
end
mapping
end
construct_mapping(constructor::Constructor, node::Node) = construct_mapping(Dict{Any,Any}, constructor, node)
# create a construct_mapping instance for a specific dicttype
custom_mapping(dicttype::Type{D}) where D <: AbstractDict =
(constructor::Constructor, node::Node) -> construct_mapping(dicttype, constructor, node)
function custom_mapping(dicttype::Function)
dicttype_test = try dicttype() catch
throw(ArgumentError("The dicttype Function cannot be called without arguments"))
end
if !(dicttype_test isa AbstractDict)
throw(ArgumentError("The dicttype Function does not return an AbstractDict"))
end
return (constructor::Constructor, node::Node) -> construct_mapping(dicttype, constructor, node)
end
function construct_yaml_null(constructor::Constructor, node::Node)
construct_scalar(constructor, node)
nothing
end
const bool_values = Dict(
"yes" => true,
"no" => false,
"true" => true,
"false" => false,
"on" => true,
"off" => false )
function construct_yaml_bool(constructor::Constructor, node::Node)
value = construct_scalar(constructor, node)
bool_values[lowercase(value)]
end
function construct_yaml_int(constructor::Constructor, node::Node)
value = string(construct_scalar(constructor, node))
value = lowercase(replace(value, "_" => ""))
if in(':', value)
# TODO
#throw(ConstructorError(nothing, nothing,
#"sexagesimal integers not yet implemented", node.start_mark))
@warn "sexagesimal integers not yet implemented. Returning String."
return value
end
if length(value) > 2 && value[1] == '0' && (value[2] == 'x' || value[2] == 'X')
return parse(Int, value[3:end], base = 16)
elseif length(value) > 1 && value[1] == '0'
return parse(Int, value, base = 8)
else
return parse(Int, value, base = 10)
end
end
function construct_yaml_float(constructor::Constructor, node::Node)
value = string(construct_scalar(constructor, node))
value = lowercase(replace(value, "_" => ""))
if in(':', value)
# TODO
# throw(ConstructorError(nothing, nothing,
# "sexagesimal floats not yet implemented", node.start_mark))
@warn "sexagesimal floats not yet implemented. Returning String."
return value
end
if value == ".nan"
return NaN
end
m = match(r"^([+\-]?)\.inf$", value)
if m !== nothing
if m.captures[1] == "-"
return -Inf
else
return Inf
end
end
return parse(Float64, value)
end
const timestamp_pat =
r"^(\d{4})- (?# year)
(\d\d?)- (?# month)
(\d\d?) (?# day)
(?:
(?:[Tt]|[ \t]+)
(\d\d?): (?# hour)
(\d\d): (?# minute)
(\d\d) (?# second)
(?:\.(\d*))? (?# fraction)
(?:
[ \t]*(Z|(?:[+\-])(\d\d?)
(?:
:(\d\d)
)?)
)?
)?$"x
function construct_yaml_timestamp(constructor::Constructor, node::Node)
value = construct_scalar(constructor, node)
mat = match(timestamp_pat, value)
if mat === nothing
throw(ConstructorError(nothing, nothing,
"could not make sense of timestamp format", node.start_mark))
end
yr = parse(Int, mat.captures[1])
mn = parse(Int, mat.captures[2])
dy = parse(Int, mat.captures[3])
if mat.captures[4] === nothing
return Date(yr, mn, dy)
end
h = parse(Int, mat.captures[4])
m = parse(Int, mat.captures[5])
s = parse(Int, mat.captures[6])
if mat.captures[7] === nothing
return DateTime(yr, mn, dy, h, m, s)
end
ms = 0
if mat.captures[7] !== nothing
ms = mat.captures[7]
if length(ms) > 3
ms = ms[1:3]
end
ms = parse(Int, string(ms, repeat("0", 3 - length(ms))))
end
delta_hr = 0
delta_mn = 0
if mat.captures[9] !== nothing
delta_hr = parse(Int, mat.captures[9])
end
if mat.captures[10] !== nothing
delta_mn = parse(Int, mat.captures[10])
end
# TODO: Also, I'm not sure if there is a way to numerically set the timezone
# in Calendar.
return DateTime(yr, mn, dy, h, m, s, ms)
end
function construct_yaml_omap(constructor::Constructor, node::Node)
throw(ConstructorError(nothing, nothing,
"omap type not yet implemented", node.start_mark))
end
function construct_yaml_pairs(constructor::Constructor, node::Node)
throw(ConstructorError(nothing, nothing,
"pairs type not yet implemented", node.start_mark))
end
function construct_yaml_set(constructor::Constructor, node::Node)
throw(ConstructorError(nothing, nothing,
"set type not yet implemented", node.start_mark))
end
function construct_yaml_str(constructor::Constructor, node::Node)
string(construct_scalar(constructor, node))
end
function construct_yaml_seq(constructor::Constructor, node::Node)
construct_sequence(constructor, node)
end
function construct_yaml_map(constructor::Constructor, node::Node)
construct_mapping(constructor, node)
end
function construct_yaml_object(constructor::Constructor, node::Node)
throw(ConstructorError(nothing, nothing,
"object type not yet implemented", node.start_mark))
end
function construct_undefined(constructor::Constructor, node::Node)
throw(ConstructorError(nothing, nothing,
"could not determine a constructor for the tag '$(node.tag)'",
node.start_mark))
end
function construct_yaml_binary(constructor::Constructor, node::Node)
value = replace(string(construct_scalar(constructor, node)), "\n" => "")
base64decode(value)
end
const default_yaml_constructors = Dict{Union{String, Nothing}, Function}(
"tag:yaml.org,2002:null" => construct_yaml_null,
"tag:yaml.org,2002:bool" => construct_yaml_bool,
"tag:yaml.org,2002:int" => construct_yaml_int,
"tag:yaml.org,2002:float" => construct_yaml_float,
"tag:yaml.org,2002:binary" => construct_yaml_binary,
"tag:yaml.org,2002:timestamp" => construct_yaml_timestamp,
"tag:yaml.org,2002:omap" => construct_yaml_omap,
"tag:yaml.org,2002:pairs" => construct_yaml_pairs,
"tag:yaml.org,2002:set" => construct_yaml_set,
"tag:yaml.org,2002:str" => construct_yaml_str,
"tag:yaml.org,2002:seq" => construct_yaml_seq,
"tag:yaml.org,2002:map" => construct_yaml_map,
nothing => construct_undefined,
)
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 1578 |
abstract type Event end
struct StreamStartEvent <: Event
start_mark::Mark
end_mark::Mark
encoding::String
end
struct StreamEndEvent <: Event
start_mark::Mark
end_mark::Mark
end
struct DocumentStartEvent <: Event
start_mark::Mark
end_mark::Mark
explicit::Bool
version::Union{Tuple, Nothing}
tags::Union{Dict{String, String}, Nothing}
function DocumentStartEvent(start_mark::Mark,end_mark::Mark,
explicit::Bool, version=nothing,
tags=nothing)
new(start_mark, end_mark, explicit, version, tags)
end
end
struct DocumentEndEvent <: Event
start_mark::Mark
end_mark::Mark
explicit::Bool
end
struct AliasEvent <: Event
start_mark::Mark
end_mark::Mark
anchor::Union{String, Nothing}
end
struct ScalarEvent <: Event
start_mark::Mark
end_mark::Mark
anchor::Union{String, Nothing}
tag::Union{String, Nothing}
implicit::Tuple
value::String
style::Union{Char, Nothing}
end
struct SequenceStartEvent <: Event
start_mark::Mark
end_mark::Mark
anchor::Union{String, Nothing}
tag::Union{String, Nothing}
implicit::Bool
flow_style::Bool
end
struct SequenceEndEvent <: Event
start_mark::Mark
end_mark::Mark
end
struct MappingStartEvent <: Event
start_mark::Mark
end_mark::Mark
anchor::Union{String, Nothing}
tag::Union{String, Nothing}
implicit::Bool
flow_style::Bool
end
struct MappingEndEvent <: Event
start_mark::Mark
end_mark::Mark
end
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 215 | # Position within the document being parsed
struct Mark
index::UInt64
line::UInt64
column::UInt64
end
function show(io::IO, mark::Mark)
@printf(io, "line %d, column %d", mark.line, mark.column)
end
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 538 |
abstract type Node end
mutable struct ScalarNode <: Node
tag::String
value::String
start_mark::Union{Mark, Nothing}
end_mark::Union{Mark, Nothing}
style::Union{Char, Nothing}
end
mutable struct SequenceNode <: Node
tag::String
value::Vector
start_mark::Union{Mark, Nothing}
end_mark::Union{Mark, Nothing}
flow_style::Bool
end
mutable struct MappingNode <: Node
tag::String
value::Vector
start_mark::Union{Mark, Nothing}
end_mark::Union{Mark, Nothing}
flow_style::Bool
end
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 20240 |
const DEFAULT_TAGS = Dict{String,String}("!" => "!", "!!" => "tag:yaml.org,2002:")
struct ParserError <: Exception
context::Union{String, Nothing}
context_mark::Union{Mark, Nothing}
problem::Union{String, Nothing}
problem_mark::Union{Mark, Nothing}
note::Union{String, Nothing}
function ParserError(context=nothing, context_mark=nothing,
problem=nothing, problem_mark=nothing,
note=nothing)
new(context, context_mark, problem, problem_mark, note)
end
end
function show(io::IO, error::ParserError)
if error.context !== nothing
print(io, error.context, " at ", error.context_mark, ": ")
end
print(io, error.problem, " at ", error.problem_mark)
end
mutable struct EventStream
input::TokenStream
next_event::Union{Event, Nothing}
state::Union{Function, Nothing}
states::Vector{Function}
marks::Vector{Mark}
yaml_version::Union{Tuple, Nothing}
tag_handles::Dict{String, String}
end_of_stream::Union{StreamEndEvent, Nothing}
function EventStream(input::TokenStream)
new(input, nothing, parse_stream_start, Function[], Mark[],
nothing, Dict{String, String}(), nothing)
end
end
function peek(stream::EventStream)
if stream.next_event === nothing
if stream.state === nothing
return nothing
elseif stream.end_of_stream !== nothing
stream.state = nothing
return stream.end_of_stream
else
x = stream.state(stream)
#@show x
stream.next_event = x
#stream.next_event = stream.state(stream)
end
end
return stream.next_event
end
function forward!(stream::EventStream)
if stream.next_event === nothing
if stream.state === nothing
nothing
elseif stream.end_of_stream !== nothing
stream.state = nothing
return stream.end_of_stream
else
stream.next_event = stream.state(stream)
end
end
e = stream.next_event
stream.next_event = nothing
return e
end
function process_directives(stream::EventStream)
stream.yaml_version = nothing
stream.tag_handles = Dict{String, String}()
while peek(stream.input) isa DirectiveToken
token = forward!(stream.input)
if token.name == "YAML"
if stream.yaml_version !== nothing
throw(ParserError(nothing, nothing,
"found duplicate YAML directive",
firstmark(token)))
end
major, minor = token.value
if major != 1
throw(ParserError(nothing, nothing,
"found incompatible YAML document (version 1.* is required)",
firstmark(token)))
end
stream.yaml_version = token.value
elseif token.name == "TAG"
handle, prefix = token.value
if haskey(stream.tag_handles, handle)
throw(ParserError(nothing, nothing,
"duplicate tag handle $(handle)", firstmark(token)))
end
stream.tag_handles[handle] = prefix
end
end
if stream.tag_handles !== nothing
value = stream.yaml_version, copy(stream.tag_handles)
else
value = stream.yaml_version, nothing
end
for key in keys(DEFAULT_TAGS)
if !haskey(stream.tag_handles, key)
stream.tag_handles[key] = DEFAULT_TAGS[key]
end
end
value
end
# Parser state functions
function parse_stream_start(stream::EventStream)
token = forward!(stream.input) :: StreamStartToken
event = StreamStartEvent(firstmark(token), lastmark(token),
token.encoding)
stream.state = parse_implicit_document_start
event
end
function parse_implicit_document_start(stream::EventStream)
token = peek(stream.input)
# Parse a byte order mark
if token isa ByteOrderMarkToken
forward!(stream.input)
token = peek(stream.input)
end
if !(token isa Union{DirectiveToken, DocumentStartToken, StreamEndToken})
stream.tag_handles = DEFAULT_TAGS
event = DocumentStartEvent(firstmark(token), firstmark(token),
false)
push!(stream.states, parse_document_end)
stream.state = parse_block_node
event
else
parse_document_start(stream)
end
end
function parse_document_start(stream::EventStream)
# Parse any extra document end indicators.
while peek(stream.input) isa DocumentEndToken
stream.input = Iterators.rest(stream.input)
end
token = peek(stream.input)
# Parse a byte order mark if it exists
if token isa ByteOrderMarkToken
forward!(stream.input)
token = peek(stream.input)
end
# Parse explicit document.
if !(token isa StreamEndToken)
start_mark = firstmark(token)
version, tags = process_directives(stream)
if !(peek(stream.input) isa DocumentStartToken)
throw(ParserError(nothing, nothing,
"expected '<document start>' but found $(typeof(token))"))
end
token = forward!(stream.input)
event = DocumentStartEvent(start_mark, lastmark(token),
true, version, tags)
push!(stream.states, parse_document_end)
stream.state = parse_document_content
event
else
# Parse the end of the stream
token = forward!(stream.input)
event = StreamEndEvent(firstmark(token), lastmark(token))
@assert isempty(stream.states)
@assert isempty(stream.marks)
stream.state = nothing
event
end
end
function parse_document_end(stream::EventStream)
token = peek(stream.input)
start_mark = end_mark = firstmark(token)
explicit = false
if token isa DocumentEndToken
forward!(stream.input)
end_mark = lastmark(token)
explicit = true
stream.end_of_stream = StreamEndEvent(firstmark(token),
lastmark(token))
end
event = DocumentEndEvent(start_mark, end_mark, explicit)
stream.state = parse_document_start
event
end
function parse_document_content(stream::EventStream)
if peek(stream.input) isa Union{DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken}
event = process_empty_scalar(stream, firstmark(peek(stream.input)))
stream.state = pop!(stream.states)
event
else
parse_block_node(stream)
end
end
function parse_block_node(stream::EventStream)
parse_node(stream, true)
end
function parse_flow_node(stream::EventStream)
parse_node(stream)
end
function parse_block_node_or_indentless_sequence(stream::EventStream)
parse_node(stream, true, true)
end
function _parse_node(token::AliasToken, stream::EventStream, block, indentless_sequence)
forward!(stream.input)
stream.state = pop!(stream.states)
return AliasEvent(firstmark(token), lastmark(token), token.value)
end
function __parse_node(token::ScalarToken, stream::EventStream, block, start_mark, end_mark, anchor, tag, implicit)
forward!(stream.input)
end_mark = lastmark(token)
if (token.plain && tag === nothing) || tag == "!"
implicit = true, false
elseif tag === nothing
implicit = false, true
else
implicit = false, false
end
stream.state = pop!(stream.states)
ScalarEvent(start_mark, end_mark, anchor, tag, implicit,
token.value, token.style)
end
function __parse_node(token::FlowSequenceStartToken, stream::EventStream, block, start_mark, end_mark, anchor, tag, implicit)
end_mark = lastmark(token)
stream.state = parse_flow_sequence_first_entry
SequenceStartEvent(start_mark, end_mark, anchor, tag,
implicit, true)
end
function __parse_node(token::FlowMappingStartToken, stream::EventStream, block, start_mark, end_mark, anchor, tag, implicit)
end_mark = lastmark(token)
stream.state = parse_flow_mapping_first_key
MappingStartEvent(start_mark, end_mark, anchor, tag,
implicit, true)
end
function __parse_node(token::BlockSequenceStartToken, stream::EventStream, block, start_mark, end_mark, anchor, tag, implicit)
block || return nothing
end_mark = firstmark(token)
stream.state = parse_block_sequence_first_entry
SequenceStartEvent(start_mark, end_mark, anchor, tag,
implicit, false)
end
function __parse_node(token::BlockMappingStartToken, stream::EventStream, block, start_mark, end_mark, anchor, tag, implicit)
block || return nothing
end_mark = firstmark(token)
stream.state = parse_block_mapping_first_key
MappingStartEvent(start_mark, end_mark, anchor, tag,
implicit, false)
end
function __parse_node(token, stream::EventStream, block, start_mark, end_mark, anchor, tag, implicit)
if anchor !== nothing || tag !== nothing
stream.state = pop!(stream.states)
return ScalarEvent(start_mark, end_mark, anchor, tag,
(implicit, false), "", nothing)
else
node = block ? "block" : "flow"
throw(ParserError("while parsing a $(node) node", start_mark,
"expected the node content, but found $(typeof(token))",
firstmark(token)))
end
end
function _parse_node(token, stream::EventStream, block, indentless_sequence)
anchor = nothing
tag = nothing
start_mark = end_mark = tag_mark = nothing
if token isa AnchorToken
forward!(stream.input)
start_mark = firstmark(token)
end_mark = lastmark(token)
anchor = token.value
token = peek(stream.input)
if token isa TagToken
forward!(stream.input)
tag_mark = firstmark(token)
end_mark = lastmark(token)
tag = token.value
end
elseif token isa TagToken
forward!(stream.input)
start_mark = firstmark(token)
end_mark = lastmark(token)
tag = token.value
token = peek(stream.input)
if token isa AnchorToken
forward!(stream.input)
end_mark = lastmark(token)
anchor = token.value
end
end
if tag !== nothing
handle, suffix = tag
if handle !== nothing
if !haskey(stream.tag_handles, handle)
throw(ParserError("while parsing a node", start_mark,
"found undefined tag handle $(handle)",
tag_mark))
end
tag = string(stream.tag_handles[handle], suffix)
else
tag = suffix
end
end
token = peek(stream.input)
if start_mark === nothing
start_mark = end_mark = firstmark(token)
end
event = nothing
implicit = tag === nothing || tag == "!"
if indentless_sequence && token isa BlockEntryToken
end_mark = lastmark(token)
stream.state = parse_indentless_sequence_entry
event = SequenceStartEvent(start_mark, end_mark, anchor, tag, implicit,
false)
else
event = __parse_node(token, stream, block, start_mark, end_mark, anchor, tag, implicit)
end
event
end
function parse_node(stream::EventStream, block=false, indentless_sequence=false)
token = peek(stream.input)
_parse_node(token, stream, block, indentless_sequence)
end
function parse_block_sequence_first_entry(stream::EventStream)
token = forward!(stream.input)
push!(stream.marks, firstmark(token))
parse_block_sequence_entry(stream)
end
function parse_block_sequence_entry(stream::EventStream)
token = peek(stream.input)
if token isa BlockEntryToken
forward!(stream.input)
if !(peek(stream.input) isa Union{BlockEntryToken, BlockEndToken})
push!(stream.states, parse_block_sequence_entry)
return parse_block_node(stream)
else
stream.state = parse_block_sequence_entry
return process_empty_scalar(stream, lastmark(token))
end
end
if !(token isa BlockEndToken)
throw(ParserError("while parsing a block collection", stream.marks[end],
"expected <block end>, but found $(typeof(token))",
firstmark(token)))
end
forward!(stream.input)
pop!(stream.marks)
stream.state = pop!(stream.states)
SequenceEndEvent(firstmark(token), lastmark(token))
end
function parse_indentless_sequence_entry(stream::EventStream)
token = peek(stream.input)
if token isa BlockEntryToken
forward!(stream.input)
if !(peek(stream.input) isa Union{BlockEntryToken, KeyToken, ValueToken, BlockEndToken})
push!(stream.states, parse_indentless_sequence_entry)
return parse_block_node(stream)
else
stream.state = parse_indentless_sequence_entry
return process_empty_scalar(stream, lastmark(token))
end
end
stream.state = pop!(stream.states)
SequenceEndEvent(firstmark(token), lastmark(token))
end
function parse_block_mapping_first_key(stream::EventStream)
token = forward!(stream.input)
push!(stream.marks, firstmark(token))
parse_block_mapping_key(stream)
end
function parse_block_mapping_key(stream::EventStream)
token = peek(stream.input)
if token isa KeyToken
forward!(stream.input)
if !(peek(stream.input) isa Union{KeyToken, ValueToken, BlockEndToken})
push!(stream.states, parse_block_mapping_value)
return parse_block_node_or_indentless_sequence(stream)
else
stream.state = parse_block_mapping_value
return process_empty_scalar(stream, lastmark(token))
end
end
if !(token isa BlockEndToken)
throw(ParserError("while parsing a block mapping", stream.marks[end],
"expected <block end>, but found $(typeof(token))",
firstmark(token)))
end
forward!(stream.input)
pop!(stream.marks)
stream.state = pop!(stream.states)
MappingEndEvent(firstmark(token), lastmark(token))
end
function parse_block_mapping_value(stream::EventStream)
token = peek(stream.input)
if token isa ValueToken
forward!(stream.input)
if !(peek(stream.input) isa Union{KeyToken, ValueToken, BlockEndToken})
push!(stream.states, parse_block_mapping_key)
parse_block_node_or_indentless_sequence(stream)
else
stream.state = parse_block_mapping_key
process_empty_scalar(stream, lastmark(token))
end
else
stream.state = parse_block_mapping_key
process_empty_scalar(stream, firstmark(token))
end
end
function parse_flow_sequence_first_entry(stream::EventStream)
token = forward!(stream.input)
push!(stream.marks, firstmark(token))
parse_flow_sequence_entry(stream, true)
end
function _parse_flow_sequence_entry(token::FlowSequenceEndToken, stream::EventStream, first_entry=false)
forward!(stream.input)
pop!(stream.marks)
stream.state = pop!(stream.states)
SequenceEndEvent(firstmark(token), lastmark(token))
end
function _parse_flow_sequence_entry(token::Any, stream::EventStream, first_entry=false)
if !first_entry
if token isa FlowEntryToken
forward!(stream.input)
else
throw(ParserError("while parsing a flow sequence",
stream.marks[end],
"expected ',' or ']', but got $(typeof(token))",
firstmark(token)))
end
end
token = peek(stream.input)
if isa(token, KeyToken)
stream.state = parse_flow_sequence_entry_mapping_key
MappingStartEvent(firstmark(token), lastmark(token),
nothing, nothing, true, true)
elseif isa(token, FlowSequenceEndToken)
nothing
else
push!(stream.states, parse_flow_sequence_entry)
parse_flow_node(stream)
end
end
function parse_flow_sequence_entry(stream::EventStream, first_entry=false)
token = peek(stream.input)
_parse_flow_sequence_entry(token::Token, stream::EventStream, first_entry)
end
function parse_flow_sequence_entry_mapping_key(stream::EventStream)
token = forward!(stream.input)
if !(token isa Union{ValueToken, FlowEntryToken, FlowSequenceEndToken})
push!(stream.states, parse_flow_sequence_entry_mapping_value)
parse_flow_node(stream)
else
stream.state = parse_flow_sequence_entry_mapping_value
process_empty_scalar(stream, lastmark(token))
end
end
function parse_flow_sequence_entry_mapping_value(stream::EventStream)
token = peek(stream.input)
if token isa ValueToken
forward!(stream.input)
if !(peek(stream.input) isa Union{FlowEntryToken, FlowSequenceEndToken})
push!(stream.states, parse_flow_sequence_entry_mapping_end)
parse_flow_node(stream)
else
stream.state = parse_flow_sequence_entry_mapping_end
process_empty_scalar(stream, lastmark(token))
end
else
stream.state = parse_flow_sequence_entry_mapping_end
process_empty_scalar(stream, firstmark(token))
end
end
function parse_flow_sequence_entry_mapping_end(stream::EventStream)
stream.state = parse_flow_sequence_entry
token = peek(stream.input)
MappingEndEvent(firstmark(token), lastmark(token))
end
function parse_flow_mapping_first_key(stream::EventStream)
token = forward!(stream.input)
push!(stream.marks, firstmark(token))
parse_flow_mapping_key(stream, true)
end
function parse_flow_mapping_key(stream::EventStream, first_entry=false)
token = peek(stream.input)
if !(token isa FlowMappingEndToken)
if !first_entry
if token isa FlowEntryToken
forward!(stream.input)
else
throw(ParserError("while parsing a flow mapping",
stream.marks[end],
"expected ',' or '}', but got $(typeof(token))",
firstmark(token)))
end
end
token = peek(stream.input)
if token isa KeyToken
forward!(stream.input)
if !(peek(stream.input) isa Union{ValueToken, FlowEntryToken, FlowMappingEndToken})
push!(stream.states, parse_flow_mapping_value)
return parse_flow_node(stream)
else
stream.state = parse_flow_mapping_value
return process_empty_scalar(stream, lastmark(token))
end
elseif !(token isa FlowMappingEndToken)
push!(stream.states, parse_flow_mapping_empty_value)
return parse_flow_node(stream)
end
end
forward!(stream.input)
pop!(stream.marks)
stream.state = pop!(stream.states)
MappingEndEvent(firstmark(token), lastmark(token))
end
function parse_flow_mapping_value(stream::EventStream)
token = peek(stream.input)
if token isa ValueToken
forward!(stream.input)
if !(peek(stream.input) isa Union{FlowEntryToken, FlowMappingEndToken})
push!(stream.states, parse_flow_mapping_key)
parse_flow_node(stream)
else
stream.state = parse_flow_mapping_key
process_empty_scalar(stream, lastmark(token))
end
else
stream.state = parse_flow_mapping_key
process_empty_scalar(stream, firstmark(token))
end
end
function parse_flow_mapping_empty_value(stream::EventStream)
stream.state = parse_flow_mapping_key
process_empty_scalar(stream, firstmark(peek(stream.input)))
end
function process_empty_scalar(stream::EventStream, mark::Mark)
ScalarEvent(mark, mark, nothing, nothing, (true, false), "", nothing)
end
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 438 | mutable struct Queue{T}
data::Vector{T}
function (::Type{Queue{T}})() where T
new{T}(Vector{T}())
end
end
isempty(q::Queue) = length(q.data) == 0
length(q::Queue) = length(q.data)
peek(q::Queue) = q.data[1]
enqueue!(q::Queue{T}, value::T) where T = push!(q.data, value)
# Enqueue into kth place.
enqueue!(q::Queue{T}, value::T, k::Integer) where T = insert!(q.data, k+1, value)
dequeue!(q::Queue) = popfirst!(q.data)
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 2210 |
# TODO:
# This is a punt for now. It does not handle any sort of custom resolving tags,
# only matching default implicits.
const DEFAULT_SCALAR_TAG = "tag:yaml.org,2002:str"
const DEFAULT_SEQUENCE_TAG = "tag:yaml.org,2002:seq"
const DEFAULT_MAPPING_TAG = "tag:yaml.org,2002:map"
const default_implicit_resolvers =
[
("tag:yaml.org,2002:bool",
r"^(?:true|True|TRUE|false|False|FALSE)$"x),
("tag:yaml.org,2002:int",
r"^(?:[-+]?0b[0-1_]+
|[-+]? [0-9]+
|0o [0-7]+
|0x [0-9a-fA-F]+)$"x),
("tag:yaml.org,2002:float",
r"^(?:[-+]? ( \. [0-9]+ | [0-9]+ ( \. [0-9]* )? ) ( [eE] [-+]? [0-9]+ )?
|[-+]? (?: \.inf | \.Inf | \.INF )
|\.nan | \.NaN | \.NAN)$"x),
("tag:yaml.org,2002:merge",
r"^(?:<<)$"),
("tag:yaml.org,2002:null",
r"^(?:~|null|Null|NULL|)$"x),
("tag:yaml.org,2002:timestamp",
r"^(\d{4})- (?# year)
(\d\d?)- (?# month)
(\d\d?) (?# day)
(?:
(?:[Tt]|[ \t]+)
(\d\d?): (?# hour)
(\d\d): (?# minute)
(\d\d) (?# second)
(?:\.(\d*))? (?# fraction)
(?:
[ \t]*(Z|([+\-])(\d\d?)
(?:
:(\d\d)
)?)
)?
)?$"x),
("tag:yaml.org,2002:value",
r"^(?:=)$"),
("tag:yaml.org,2002:yaml",
r"^(?:!|&|\*)$")
]
struct Resolver
implicit_resolvers::Vector{Tuple{String,Regex}}
function Resolver()
new(copy(default_implicit_resolvers))
end
end
function resolve(resolver::Resolver, ::Type{ScalarNode}, value, implicit)
if implicit[1]
for (tag, pat) in resolver.implicit_resolvers
if occursin(pat, value)
return tag
end
end
end
DEFAULT_SCALAR_TAG
end
function resolve(resolver::Resolver, ::Type{SequenceNode}, value, implicit)
DEFAULT_SEQUENCE_TAG
end
function resolve(resolver::Resolver, ::Type{MappingNode}, value, implicit)
DEFAULT_MAPPING_TAG
end
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 50303 | # YAML 1.1 [27] b-char ::= b-line-feed | b-carriage-return | b-next-line | b-line-separator | b-paragraph-separator
is_b_char(::YAMLV1_1, c::Char) = c == '\n' || c == '\r' || c == '\u85' || c == '\u2028' || c == '\u2029'
# YAML 1.2 [31] s-space ::= x20
const yaml_1_2_s_space = ' '
# YAML 1.2 [32] s-tab ::= x09
const yaml_1_2_s_tab = '\t'
# YAML 1.1 [36] s-white ::= #x9 /*TAB*/ | #x20 /*SP*/
# YAML 1.2 [33] s-white ::= s-space | s-tab
is_s_white(c::Char) = c == yaml_1_2_s_space || c == yaml_1_2_s_tab
# YAML 1.1 [40] ns-hex-digit ::= ns-dec-digit | [#x41-#x46] /*A-F*/ | [#x61-#x66] /*a-f*/
# YAML 1.2 [36] ns-hex-digit ::= ns-dec-digit | [x41-x46] | [x61-x66] # 0-9 A-F a-f
is_ns_hex_digit(c::Char) = isdigit(c) || 'A' ≤ c ≤ 'F' || 'a' ≤ c ≤ 'f'
# YAML 1.1 [41] ns-ascii-letter ::= [#x41-#x5A] /*A-Z*/ | [#61-#x7A] /*a-z*/
# YAML 1.2 [37] ns-ascii-letter ::= [x41-x5A] | [x61-x7A] # A-Z a-z
is_ns_ascii_letter(c::Char) = 'A' ≤ c ≤ 'Z' || 'a' ≤ c ≤ 'z'
is_whitespace(::YAMLV1_1, c::Char) = c == '\0' || c == ' ' || c == '\t' || is_b_char(YAMLV1_1(), c)
struct SimpleKey
token_number::UInt64
required::Bool
mark::Mark
end
# Errors thrown by the scanner.
struct ScannerError <: Exception
context::Union{String, Nothing}
context_mark::Union{Mark, Nothing}
problem::String
problem_mark::Mark
end
function show(io::IO, error::ScannerError)
if error.context !== nothing
print(io, error.context, " at ", error.context_mark, ": ")
end
print(io, error.problem, " at ", error.problem_mark)
end
function detect_encoding(input::IO)::Encoding
pos = position(input)
start_bytes = Array{UInt8}(undef, 4)
bytes_read = readbytes!(input, start_bytes, 4)
seek(input, pos)
start_bytes[bytes_read+1:end] .= 1 #fill blanks with non-special bytes
intro = UInt32(start_bytes[4]) << 24 +
UInt32(start_bytes[3]) << 16 +
UInt32(start_bytes[2]) << 8 +
UInt32(start_bytes[1])
# https://yaml.org/spec/1.2/spec.html#id2771184
if intro & 0x00ffffff == 0xbfbbef #utf-8 bom
enc"UTF-8"
elseif intro == 0xfffe0000 #utf-32be BOM
enc"UTF-32BE"
elseif intro == 0x0000feff #utf-32le BOM
enc"UTF-32LE"
elseif intro & 0xffff == 0xfffe #utf-16be BOM
enc"UTF-16BE"
elseif intro & 0xffff == 0xfeff #utf-16le BOM
enc"UTF-16LE"
elseif intro & 0x00ffffff == 0 #ascii char in utf-32be
enc"UTF-32BE"
elseif intro & 0xffffff00 == 0 #ascii char in utf-32le
enc"UTF-32LE"
elseif intro & 0x00ff == 0 #ascii char in utf-16be
enc"UTF-16BE"
elseif intro & 0xff00 == 0 #ascii char in utf-16le
enc"UTF-16LE"
else
enc"UTF-8"
end
end
# A stream type for the scanner, which is just a IO stream with scanner state.
mutable struct TokenStream
input::BufferedInput
encoding::Encoding
# All tokens read.
done::Bool
# Tokens queued to be read.
token_queue::Queue{Token}
# Index of the start of the head of the stream. (0-based)
index::UInt64
# Index of the current column. (0-based)
column::UInt64
# Current line numebr. (0-based)
line::UInt64
# Number of tokens read, not including those still in token_queue.
tokens_taken::UInt64
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
flow_level::UInt64
# Current indentation level.
indent::Int
# Past indentation levels.
indents::Vector{Int}
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
allow_simple_key::Bool
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
possible_simple_keys::Dict{UInt64,SimpleKey}
function TokenStream(stream::IO)
encoding = detect_encoding(stream)
decoded_stream = encoding == enc"UTF-8" ? stream : StringDecoder(stream, encoding)
tokstream = new(BufferedInput(decoded_stream),
encoding, false, Queue{Token}(),
1, 0, 1, 0, 0, -1,
Int[], true, Dict())
fetch_stream_start(tokstream)
tokstream
end
end
function reset!(stream::TokenStream)
stream.done = false
fetch_stream_start(stream)
end
function get_mark(stream::TokenStream)
Mark(stream.index, stream.line, stream.column)
end
# Advance the stream by k characters.
function forwardchars!(stream::TokenStream, k::Integer=1)
for _ in 1:k
c = peek(stream.input)
forward!(stream.input)
stream.index += 1
if in(c, "\n\u0085\u2028\u2029") ||
(c == '\r' && peek(stream.input) == '\n')
stream.column = 0
stream.line += 1
else
stream.column += 1
end
end
stream.index += k
nothing
end
function need_more_tokens(stream::TokenStream)
if stream.done
return false
elseif isempty(stream.token_queue)
return true
end
stale_possible_simple_keys(stream)
next_possible_simple_key(stream) == stream.tokens_taken
end
# peek the first token from the token stream
function peek(stream::TokenStream)::Union{Token, Nothing}
while need_more_tokens(stream)
fetch_more_tokens(stream)
end
if !isempty(stream.token_queue)
peek(stream.token_queue)
else
nothing
end
end
# advance and return the first token from the token stream
function forward!(stream::TokenStream)::Union{Token, Nothing}
while need_more_tokens(stream)
fetch_more_tokens(stream)
end
if !isempty(stream.token_queue)
stream.tokens_taken += 1
dequeue!(stream.token_queue)
else
nothing
end
end
# Read one or more tokens from the input stream.
function fetch_more_tokens(stream::TokenStream)
# Eat whitespace.
scan_to_next_token(stream::TokenStream)
# Remove obsolete possible simple keys.
stale_possible_simple_keys(stream)
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
unwind_indent(stream, stream.column)
c = peek(stream.input)
if c == '\0' || c === nothing
fetch_stream_end(stream)
elseif c == '%' && check_directive(stream)
fetch_directive(stream)
elseif c == '-' && check_document_start(stream)
fetch_document_start(stream)
elseif c == '.' && check_document_end(stream)
fetch_document_end(stream)
stream.done = true
elseif c == '['
fetch_flow_sequence_start(stream)
elseif c == '{'
fetch_flow_mapping_start(stream)
elseif c == ']'
fetch_flow_sequence_end(stream)
elseif c == '}'
fetch_flow_mapping_end(stream)
elseif c == ','
fetch_flow_entry(stream)
elseif c == '-' && check_block_entry(stream)
fetch_block_entry(stream)
elseif c == '?' && check_key(stream)
fetch_key(stream)
elseif c == ':' && check_value(stream)
fetch_value(stream)
elseif c == '*'
fetch_alias(stream)
elseif c == '&'
fetch_anchor(stream)
elseif c == '!'
fetch_tag(stream)
elseif c == '|' && stream.flow_level == 0
fetch_literal(stream)
elseif c == '>' && stream.flow_level == 0
fetch_folded(stream)
elseif c == '\''
fetch_single(stream)
elseif c == '\"'
fetch_double(stream)
elseif c == '\uFEFF'
fetch_byte_order_mark(stream)
elseif check_plain(stream)
fetch_plain(stream)
else
throw(ScannerError(nothing, nothing,
"while scanning for the next token, found character '$c' that cannot start any token",
get_mark(stream)))
end
end
# Simple keys
# -----------
# Return the number of the nearest possible simple key.
function next_possible_simple_key(stream::TokenStream)
min_token_number = nothing
for (level, key) in stream.possible_simple_keys
key = stream.possible_simple_keys[level]
if min_token_number === nothing || key.token_number < min_token_number
min_token_number = key.token_number
end
end
min_token_number
end
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
function stale_possible_simple_keys(stream::TokenStream)
for (level, key) in stream.possible_simple_keys
if key.mark.line != stream.line || stream.index - key.mark.index > 1024
if key.required
throw(ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", get_mark(stream)))
end
delete!(stream.possible_simple_keys, level)
end
end
end
function save_possible_simple_key(stream::TokenStream)
# Simple key required at the current position.
required = stream.flow_level == 0 && stream.indent == stream.column
@assert stream.allow_simple_key || !required
if stream.allow_simple_key
remove_possible_simple_key(stream)
token_number = stream.tokens_taken + length(stream.token_queue)
key = SimpleKey(token_number, required, get_mark(stream))
stream.possible_simple_keys[stream.flow_level] = key
end
end
function remove_possible_simple_key(stream::TokenStream)
# Remove the saved possible key position at the current flow level.
if haskey(stream.possible_simple_keys, stream.flow_level)
key = stream.possible_simple_keys[stream.flow_level]
if key.required
throw(ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", get_mark(stream)))
end
delete!(stream.possible_simple_keys, stream.flow_level)
end
end
function unwind_indent(stream::TokenStream, column)
# In the flow context, indentation is ignored. We make the scanner less
# restrictive than specification requires.
if stream.flow_level != 0
return
end
# In block context, we may need to issue the BLOCK-END tokens.
while stream.indent > column
mark = get_mark(stream)
stream.indent = pop!(stream.indents)
enqueue!(stream.token_queue, BlockEndToken(Span(mark, mark)))
end
end
function add_indent(stream::TokenStream, column)
if stream.indent < column
push!(stream.indents, stream.indent)
stream.indent = column
true
else
false
end
end
# Checkers
# --------
function check_directive(stream::TokenStream)
stream.column == 0
end
function check_document_start(stream::TokenStream)
stream.column == 0 &&
prefix(stream.input, 3) == "---" &&
is_whitespace(YAMLV1_1(), peek(stream.input, 3))
end
function check_document_end(stream::TokenStream)
stream.column == 0 &&
prefix(stream.input, 3) == "..." &&
(is_whitespace(YAMLV1_1(), peek(stream.input, 3)) || peek(stream.input, 3) === nothing)
end
function check_block_entry(stream::TokenStream)
is_whitespace(YAMLV1_1(), peek(stream.input, 1))
end
function check_key(stream::TokenStream)
stream.flow_level > 0 || is_whitespace(YAMLV1_1(), peek(stream.input, 1))
end
function check_value(stream::TokenStream)
cnext = peek(stream.input, 1)
stream.flow_level > 0 || is_whitespace(YAMLV1_1(), cnext) || cnext === nothing
end
function check_plain(stream::TokenStream)
!in(peek(stream.input), "\0 \t\r\n\u0085\u2028\u2029-?:,[]{}#&*!|>\'\"%@`\uFEFF") ||
(!is_whitespace(YAMLV1_1(), peek(stream.input, 1)) &&
(peek(stream.input) == '-' || (stream.flow_level == 0 &&
in(peek(stream.input), "?:"))))
end
# Fetchers
# --------
function fetch_stream_start(stream::TokenStream)
mark = get_mark(stream)
enqueue!(stream.token_queue,
StreamStartToken(Span(mark, mark), string(stream.encoding)))
end
function fetch_stream_end(stream::TokenStream)
# Set the current intendation to -1.
unwind_indent(stream, -1)
# Reset simple keys.
remove_possible_simple_key(stream)
stream.allow_simple_key = false
empty!(stream.possible_simple_keys)
mark = get_mark(stream)
enqueue!(stream.token_queue, StreamEndToken(Span(mark, mark)))
stream.done = true
end
function fetch_directive(stream::TokenStream)
# Set the current intendation to -1.
unwind_indent(stream, -1)
# Reset simple keys.
remove_possible_simple_key(stream)
stream.allow_simple_key = false
enqueue!(stream.token_queue, scan_directive(stream))
end
function fetch_document_start(stream::TokenStream)
fetch_document_indicator(stream, DocumentStartToken)
end
function fetch_document_end(stream::TokenStream)
fetch_document_indicator(stream, DocumentEndToken)
end
function fetch_document_indicator(stream::TokenStream, ::Type{T}) where {T<:Token}
# Set the current intendation to -1.
unwind_indent(stream, -1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
remove_possible_simple_key(stream)
stream.allow_simple_key = false
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = get_mark(stream)
forwardchars!(stream, 3)
end_mark = get_mark(stream)
enqueue!(stream.token_queue, T(Span(start_mark, end_mark)))
end
function fetch_byte_order_mark(stream::TokenStream)
# Set the current intendation to -1.
unwind_indent(stream, -1)
start_mark = get_mark(stream)
forward!(stream.input)
stream.index += 1
end_mark = get_mark(stream)
enqueue!(stream.token_queue, ByteOrderMarkToken(Span(start_mark, end_mark)))
end
function fetch_flow_sequence_start(stream::TokenStream)
fetch_flow_collection_start(stream, FlowSequenceStartToken)
end
function fetch_flow_mapping_start(stream::TokenStream)
fetch_flow_collection_start(stream, FlowMappingStartToken)
end
function fetch_flow_collection_start(stream::TokenStream, ::Type{T}) where {T<:Token}
# '[' and '{' may start a simple key.
save_possible_simple_key(stream)
# Increase the flow level.
stream.flow_level += 1
# Simple keys are allowed after '[' and '{'.
stream.allow_simple_key = true
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = get_mark(stream)
forwardchars!(stream)
end_mark = get_mark(stream)
enqueue!(stream.token_queue, T(Span(start_mark, end_mark)))
end
function fetch_flow_sequence_end(stream::TokenStream)
fetch_flow_collection_end(stream, FlowSequenceEndToken)
end
function fetch_flow_mapping_end(stream::TokenStream)
fetch_flow_collection_end(stream, FlowMappingEndToken)
end
function fetch_flow_collection_end(stream::TokenStream, ::Type{T}) where {T<:Token}
# Reset possible simple key on the current level.
remove_possible_simple_key(stream)
# Decrease the flow level.
stream.flow_level -= 1
# No simple keys after ']' or '}'.
stream.allow_simple_key = false
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = get_mark(stream)
forwardchars!(stream)
end_mark = get_mark(stream)
enqueue!(stream.token_queue, T(Span(start_mark, end_mark)))
end
function fetch_flow_entry(stream::TokenStream)
# Simple keys are allowed after ','.
stream.allow_simple_key = true
# Reset possible simple key on the current level.
remove_possible_simple_key(stream)
# Add FLOW-ENTRY.
start_mark = get_mark(stream)
forwardchars!(stream)
end_mark = get_mark(stream)
enqueue!(stream.token_queue, FlowEntryToken(Span(start_mark, end_mark)))
end
function fetch_block_entry(stream::TokenStream)
# Block context needs additional checks.
if stream.flow_level == 0
# Are we allowed to start a new entry?
if !stream.allow_simple_key
throw(ScannerError(nothing, nothing,
"sequence entries not allowed here",
get_mark(stream)))
end
if add_indent(stream, stream.column)
mark = get_mark(stream)
enqueue!(stream.token_queue,
BlockSequenceStartToken(Span(mark, mark)))
end
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else
return
end
# Simple keys are allowed after '-'.
stream.allow_simple_key = true
# Reset possible simple key on the current level.
remove_possible_simple_key(stream)
# Add BLOCK-ENTRY.
start_mark = get_mark(stream)
forwardchars!(stream)
end_mark = get_mark(stream)
enqueue!(stream.token_queue,
BlockEntryToken(Span(start_mark, end_mark)))
end
function fetch_key(stream::TokenStream)
if stream.flow_level == 0
# Are we allowed to start a key (not nessesary a simple)?
if !stream.allow_simple_key
throw(ScannerError(nothing, nothing,
"mapping keys are not allowed here",
get_mark(stream)))
end
# We may need to add BLOCK-MAPPING-START.
if add_indent(stream, stream.column)
mark = get_mark(stream)
enqueue!(stream.token_queue,
BlockMappingStartToken(Span(mark, mark)))
end
end
# Simple keys are allowed after '?' in the block context.
stream.allow_simple_key = stream.flow_level == 0
# Reset possible simple key on the current level.
remove_possible_simple_key(stream)
# Add KEY.
start_mark = get_mark(stream)
forwardchars!(stream)
end_mark = get_mark(stream)
enqueue!(stream.token_queue, KeyToken(Span(start_mark, end_mark)))
end
function fetch_value(stream::TokenStream)
# Simple key
if haskey(stream.possible_simple_keys, stream.flow_level)
# Add KEY.
key = stream.possible_simple_keys[stream.flow_level]
delete!(stream.possible_simple_keys, stream.flow_level)
enqueue!(stream.token_queue, KeyToken(Span(key.mark, key.mark)),
key.token_number - stream.tokens_taken)
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if stream.flow_level == 0 && add_indent(stream, key.mark.column)
enqueue!(stream.token_queue,
BlockMappingStartToken(Span(key.mark, key.mark)),
key.token_number - stream.tokens_taken)
end
stream.allow_simple_key = false
# Complex key
else
# Block context needs additional checks.
# (Do we really need them? They will be caught by the parser
# anyway.)
if stream.flow_level == 0
# We are allowed to start a complex value if and only if
# we can start a simple key.
if !stream.allow_simple_key
throw(ScannerError(nothing, nothing,
"mapping values are not allowed here",
get_mark(stream)))
end
end
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if stream.flow_level == 0 && add_indent(stream, stream.column)
mark = get_mark(stream)
enqueue!(stream.token_queue,
BlockMappingStartToken(Span(mark, mark)))
end
# Simple keys are allowed after ':' in the block context.
stream.allow_simple_key = stream.flow_level == 0
# Reset possible simple key on the current level.
remove_possible_simple_key(stream)
end
# Add VALUE.
start_mark = get_mark(stream)
forwardchars!(stream)
end_mark = get_mark(stream)
enqueue!(stream.token_queue, ValueToken(Span(start_mark, end_mark)))
end
function fetch_alias(stream::TokenStream)
# ALIAS could be a simple key.
save_possible_simple_key(stream)
# No simple keys after ALIAS.
stream.allow_simple_key = false
# Scan and add ALIAS.
enqueue!(stream.token_queue, scan_anchor(stream, AliasToken))
end
function fetch_anchor(stream::TokenStream)
# ANCHOR could start a simple key.
save_possible_simple_key(stream)
# No simple keys after ANCHOR.
stream.allow_simple_key = false
# Scan and add ANCHOR.
enqueue!(stream.token_queue, scan_anchor(stream, AnchorToken))
end
function fetch_tag(stream::TokenStream)
# TAG could start a simple key.
save_possible_simple_key(stream)
# No simple keys after TAG.
stream.allow_simple_key = false
# Scan and add TAG.
enqueue!(stream.token_queue, scan_tag(stream))
end
function fetch_literal(stream::TokenStream)
fetch_block_scalar(stream, '|')
end
function fetch_folded(stream::TokenStream)
fetch_block_scalar(stream, '>')
end
function fetch_block_scalar(stream::TokenStream, style::Char)
# A simple key may follow a block scalar.
stream.allow_simple_key = true
# Reset possible simple key on the current level.
remove_possible_simple_key(stream)
# Scan and add SCALAR.
enqueue!(stream.token_queue, scan_block_scalar(stream, style))
end
function fetch_single(stream::TokenStream)
fetch_flow_scalar(stream, '\'')
end
function fetch_double(stream::TokenStream)
fetch_flow_scalar(stream, '"')
end
function fetch_flow_scalar(stream::TokenStream, style::Char)
# A flow scalar could be a simple key.
save_possible_simple_key(stream)
# No simple keys after flow scalars.
stream.allow_simple_key = false
# Scan and add SCALAR.
enqueue!(stream.token_queue, scan_flow_scalar(stream, style))
end
function fetch_plain(stream::TokenStream)
save_possible_simple_key(stream)
stream.allow_simple_key = false
enqueue!(stream.token_queue, scan_plain(stream))
end
# Scanners
# --------
# If the stream is at a line break, advance past it.
#
# YAML 1.1
#
# [22] b-line-feed ::= #xA /*LF*/
# [23] b-carriage-return ::= #xD /*CR*/
# [24] b-next-line ::= #x85 /*NEL*/
# [25] b-line-separator ::= #x2028 /*LS*/
# [26] b-paragraph-separator ::= #x2029 /*PS*/
# [28] b-specific ::= b-line-separator | b-paragraph-separator
# [29] b-generic ::= ( b-carriage-return b-line-feed) | b-carriage-return | b-line-feed | b-next-line
# [30] b-as-line-feed ::= b-generic
# [31] b-normalized ::= b-as-line-feed | b-specific
#
# U+000D U+000A → U+000A
# U+000D → U+000A
# U+000A → U+000A
# U+0085 → U+000A
# U+2028 → U+2028
# U+2029 → U+2029
# otherwise → (empty)
#
function scan_line_break(::YAMLV1_1, stream::TokenStream)::String
c = peek(stream.input)
if c == '\u000d'
if peek(stream.input, 1) == '\u000a'
forwardchars!(stream, 2)
else
forwardchars!(stream)
end
"\u000a"
elseif c == '\u000a' || c == '\u0085'
forwardchars!(stream)
"\u000a"
elseif c == '\u2028' || c == '\u2029'
forwardchars!(stream)
string(c)
else
""
end
end
#
# YAML 1.2
#
# [24] b-line-feed ::= x0A
# [25] b-carriage-return ::= x0D
# [26] b-char ::= b-line-feed | b-carriage-return
# [27] nb-char ::= c-printable - b-char - c-byte-order-mark
# [28] b-break ::= ( b-carriage-return b-line-feed ) | b-carriage-return | b-line-feed
#
# U+000D U+000A → U+000A
# U+000D → U+000A
# U+000A → U+000A
# otherwise → (empty)
#
function scan_line_break(::YAMLV1_2, stream::TokenStream)::String
c = peek(stream.input)
if c == '\u000d'
if peek(stream.input, 1) == '\u000a'
forwardchars!(stream, 2)
else
forwardchars!(stream)
end
"\u000a"
elseif c == '\u000a'
forwardchars!(stream)
"\u000a"
else
""
end
end
# Scan past whitespace to the next token.
function scan_to_next_token(stream::TokenStream)
while true
# whitespace
while peek(stream.input) == ' '
forwardchars!(stream)
end
# comment
if peek(stream.input) == '#'
forwardchars!(stream)
while !in(peek(stream.input), "\0\r\n\u0085\u2028\u2029")
forwardchars!(stream)
end
end
# line break
if scan_line_break(YAMLV1_1(), stream) != ""
if stream.flow_level == 0
stream.allow_simple_key = true
end
# found a token
else
break
end
end
end
function scan_directive(stream::TokenStream)
start_mark = get_mark(stream)
forwardchars!(stream)
name = scan_directive_name(stream, start_mark)
value = nothing
if name == "YAML"
value = scan_yaml_directive_value(stream, start_mark)
end_mark = get_mark(stream)
elseif name == "TAG"
tag_handle = scan_tag_directive_handle(stream, start_mark)
tag_prefix = scan_tag_directive_prefix(stream, start_mark)
value = (tag_handle, tag_prefix)
end_mark = get_mark(stream)
else
# Otherwise we warn and ignore the directive.
end_mark = get_mark(stream)
@warn """unknown directive name: "$name" at $end_mark. We ignore this."""
while !in(peek(stream.input), "\0\r\n\u0085\u2028\u2029")
forwardchars!(stream)
end
end
scan_directive_ignored_line(stream, start_mark)
DirectiveToken(Span(start_mark, end_mark), name, value)
end
function scan_directive_name(stream::TokenStream, start_mark::Mark)
length = 0
c = peek(stream.input)
while is_ns_ascii_letter(c) || isdigit(c) || c == '-' || c == '_'
length += 1
c = peek(stream.input, length)
end
if length == 0
throw(ScannerError("while scanning a directive", start_mark,
"expected alphanumeric character, but found '$(c)'",
get_mark(stream)))
end
value = prefix(stream.input, length)
forwardchars!(stream, length)
c = peek(stream.input)
if !in(c, ":\0 \r\n\u0085\u2028\u2029")
throw(ScannerError("while scanning a directive", start_mark,
"expected alphanumeric character, but found '$(c)'",
get_mark(stream)))
end
value
end
function scan_yaml_directive_value(stream::TokenStream, start_mark::Mark)
while peek(stream.input) == ' ' || peek(stream.input) == ':'
forwardchars!(stream)
end
major = scan_yaml_directive_number(stream, start_mark)
if peek(stream.input) != '.'
throw(ScannerError("while scanning a directive", start_mark,
"expected '.' but found '$(peek(stream.input))'",
get_mark(stream)))
end
forwardchars!(stream)
minor = scan_yaml_directive_number(stream, start_mark)
if !in(peek(stream.input), "\0 \r\n\u0085\u2028\u2029")
throw(ScannerError("while scanning a directive", start_mark,
"expected ' ' or a line break, but found '$(peek(stream.input))'",
get_mark(stream)))
end
return (major, minor)
end
# scan the YAML directive's number from a stream
function scan_yaml_directive_number(stream::TokenStream, start_mark::Mark)::Int
# -------------------------------------------------
# check that the first character is a decimal digit
# -------------------------------------------------
# the current position of the character in the stream
pos = 0
# the current character
c = peek(stream.input, pos)
# throw an error if the input is not decimal digits
isdigit(c) || throw(ScannerError(
"while scanning a directive", start_mark,
"expected a digit, but found '$c'", get_mark(stream),
))
# -----------------------------------------------------------
# until the end of the decimal digits, increment the position
# -----------------------------------------------------------
while true
pos += 1
c = peek(stream.input, pos)
isdigit(c) || break
end
# ------------------------------
# get the decimal digit as `Int`
# ------------------------------
# the decimal digit as a `String`
str = prefix(stream.input, pos)
# the decimal digit as an `Int`
n = parse(Int, str)
# ---------------------------------------------------
# advance the stream by the length that has been read
# ---------------------------------------------------
forwardchars!(stream, pos)
# -----------------
# return the number
# -----------------
n
end
function scan_tag_directive_handle(stream::TokenStream, start_mark::Mark)
while peek(stream.input) == ' '
forwardchars!(stream)
end
value = scan_tag_handle(stream, "directive", start_mark)
if peek(stream.input) != ' '
throw(ScannerError("while scanning a directive", start_mark,
"expected ' ', but found '$(peek(stream.input))'",
get_mark(stream)))
end
value
end
function scan_tag_directive_prefix(stream::TokenStream, start_mark::Mark)
while peek(stream.input) == ' '
forwardchars!(stream)
end
value = scan_tag_uri(stream, "directive", start_mark)
if !in(peek(stream.input), "\0 \r\n\u0085\u2028\u2029")
throw(ScannerError("while scanning a directive", start_mark,
"expected ' ', but found $(peek(stream.input))",
get_mark(stream)))
end
value
end
function scan_directive_ignored_line(stream::TokenStream, start_mark::Mark)
while peek(stream.input) == ' '
forwardchars!(stream)
end
if peek(stream.input) == '#'
forwardchars!(stream)
while !in(peek(stream.input), "\0\r\n\u0085\u2028\u2029")
forwardchars!(stream)
end
end
if !in(peek(stream.input), "\0\r\n\u0085\u2028\u2029")
throw(ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found '$(peek(stream.input))'",
get_mark(stream)))
end
scan_line_break(YAMLV1_1(), stream)
end
function scan_anchor(stream::TokenStream, ::Type{T}) where {T<:Token}
start_mark = get_mark(stream)
indicator = peek(stream.input)
if indicator == '*'
name = "alias"
else
name = "anchor"
end
forwardchars!(stream)
length = 0
c = peek(stream.input)
while is_ns_ascii_letter(c) || isdigit(c) || c == '-' || c == '_'
length += 1
c = peek(stream.input, length)
end
if length == 0
throw(ScannerError("while scanning an $(name)", start_mark,
"expected an alphanumeric character, but found '$(peek(stream.input))'",
get_mark(stream)))
end
value = prefix(stream.input, length)
forwardchars!(stream, length)
if !in(peek(stream.input), "\0 \t\r\n\u0085\u2028\u2029?:,]}%@`")
throw(ScannerError("while scanning an $(name)", start_mark,
"expected an alphanumeric character, but found '$(peek(stream.input))'",
get_mark(stream)))
end
end_mark = get_mark(stream)
T(Span(start_mark, end_mark), value)
end
function scan_tag(stream::TokenStream)
start_mark = get_mark(stream)
c = peek(stream.input, 1)
if c == '<'
handle = nothing
forwardchars!(stream, 2)
suffix = scan_tag_uri(stream, "tag", start_mark)
if peek(stream.input) != '>'
throw(ScannerError("while parsing a tag", start_mark,
"expected '>', but found '$(peek(stream.input))'",
get_mark(stream)))
end
forwardchars!(stream)
elseif in(c, "\0 \t\r\n\u0085\u2028\u2029")
handle = nothing
suffix = '!'
forwardchars!(stream)
else
length = 1
use_handle = false
while !in(c, "\0 \r\n\u0085\u2028\u2029")
if c == '!'
use_handle = true
break
end
length += 1
c = peek(stream.input, length)
end
if use_handle
handle = scan_tag_handle(stream, "tag", start_mark)
else
handle = "!"
forwardchars!(stream)
end
suffix = scan_tag_uri(stream, "tag", start_mark)
end
c = peek(stream.input)
if !in(c, "\0 \r\n\u0085\u2028\u2029")
throw(ScannerError("while scanning a tag", start_mark,
"expected ' ' or a line break, but found '$(c)'",
get_mark(stream)))
end
value = (handle, suffix)
end_mark = get_mark(stream)
TagToken(Span(start_mark, end_mark), value)
end
function scan_block_scalar(stream::TokenStream, style::Char)
folded = style == '>'
chunks = Any[]
start_mark = get_mark(stream)
# Scan the header.
forwardchars!(stream)
chomping, increment = scan_block_scalar_indicators(stream, start_mark)
scan_block_scalar_ignored_line(stream, start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = max(1, stream.indent + 1)
if increment === nothing
breaks, max_indent, end_mark = scan_block_scalar_indentation(stream)
indent = max(min_indent, max_indent)
else
indent = min_indent + increment - 1
breaks, end_mark = scan_block_scalar_breaks(stream, indent)
end
line_break = ""
# Scan the inner part of the block scalar.
while stream.column == indent && peek(stream.input) ≠ '\0'
append!(chunks, breaks)
leading_non_space = !is_s_white(peek(stream.input))
length = 0
while !in(peek(stream.input, length), "\0\r\n\u0085\u2028\u2029")
length += 1
end
push!(chunks, prefix(stream.input, length))
forwardchars!(stream, length)
line_break = scan_line_break(YAMLV1_1(), stream)
breaks, end_mark = scan_block_scalar_breaks(stream, indent)
if stream.column == indent && peek(stream.input) != '\0'
if folded && line_break == "\n" &&
leading_non_space && !is_s_white(peek(stream.input))
if isempty(breaks)
push!(chunks, ' ')
end
else
push!(chunks, line_break)
end
else
break
end
end
# Chomp the tail.
# Chomping may be Nothing or Bool.
if chomping === nothing
push!(chunks, line_break)
elseif chomping
push!(chunks, line_break)
append!(chunks, breaks)
end
ScalarToken(Span(start_mark, end_mark), string(chunks...), false, style)
end
function scan_block_scalar_ignored_line(stream::TokenStream, start_mark::Mark)
while peek(stream.input) == ' '
forwardchars!(stream)
end
if peek(stream.input) == '#'
while !in(peek(stream.input), "\0\r\n\u0085\u2028\u2029")
forwardchars!(stream)
end
end
if !in(peek(stream.input), "\0\r\n\u0085\u2028\u2029")
throw(ScannerError("while scanning a block scalal", start_mark,
"expected a comment or a line break, but found '$(peek(stream.input))'",
get_mark(stream)))
end
scan_line_break(YAMLV1_1(), stream)
end
function scan_block_scalar_indicators(stream::TokenStream, start_mark::Mark)
chomping = nothing
increment = nothing
c = peek(stream.input)
if c == '+' || c == '-'
chomping = c == '+'
forwardchars!(stream)
c = peek(stream.input)
if isdigit(c)
increment = parse(Int, c)
increment == 0 && throw(ScannerError(
"while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0", get_mark(stream),
))
end
elseif isdigit(c)
increment = parse(Int, c)
increment == 0 && throw(ScannerError(
"while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0", get_mark(stream),
))
forwardchars!(stream)
c = peek(stream.input)
if c == '+' || c == '-'
chomping = c == '+'
forwardchars!(stream)
end
end
c = peek(stream.input)
# c ∉ "\0 \r\n\u0085\u2028\u2029"
!(c == '\0' || c == ' ' || c == '\r' || c == '\n' || c == '\u85' || c == '\u2028' || c == '\u2029') && throw(ScannerError(
"while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found '$c'", get_mark(stream),
))
chomping, increment
end
function scan_block_scalar_indentation(stream::TokenStream)
chunks = Any[]
max_indent = 0
end_mark = get_mark(stream)
while in(peek(stream.input), " \r\n\u0085\u2028\u2029")
if peek(stream.input) != ' '
push!(chunks, scan_line_break(YAMLV1_1(), stream))
end_mark = get_mark(stream)
else
forwardchars!(stream)
if stream.column > max_indent
max_indent = stream.column
end
end
end
chunks, max_indent, end_mark
end
function scan_block_scalar_breaks(stream::TokenStream, indent)
chunks = Any[]
end_mark = get_mark(stream)
while stream.column < indent && peek(stream.input) == ' '
forwardchars!(stream)
end
while is_b_char(YAMLV1_1(), peek(stream.input))
push!(chunks, scan_line_break(YAMLV1_1(), stream))
end_mark = get_mark(stream)
while stream.column < indent && peek(stream.input) == ' '
forwardchars!(stream)
end
end
chunks, end_mark
end
function scan_flow_scalar(stream::TokenStream, style::Char)
double = style == '"'
chunks = Any[]
start_mark = get_mark(stream)
q = peek(stream.input) # quote
forwardchars!(stream)
while peek(stream.input) != q || peek(stream.input, 1) == q
append!(chunks, scan_flow_scalar_spaces(stream, double, start_mark))
append!(chunks, scan_flow_scalar_non_spaces(stream, double, start_mark))
end
forwardchars!(stream)
end_mark = get_mark(stream)
ScalarToken(Span(start_mark, end_mark), string(chunks...), false, style)
end
const ESCAPE_REPLACEMENTS = Dict{Char,Char}(
'0' => '\0',
'a' => '\u0007',
'b' => '\u0008',
't' => '\u0009',
'\t' => '\u0009',
'n' => '\u000a',
'v' => '\u000b',
'f' => '\u000c',
'r' => '\u000d',
'e' => '\u001b',
' ' => '\u0020',
'"' => '"',
'\\' => '\\',
'N' => '\u0085',
'_' => '\u00A0',
'L' => '\u2028',
'P' => '\u2029'
)
const ESCAPE_CODES = Dict{Char, Int}(
'x' => 2,
'u' => 4,
'U' => 8
)
function scan_flow_scalar_non_spaces(stream::TokenStream, double::Bool,
start_mark::Mark)
chunks = Any[]
while true
length = 0
while !in(peek(stream.input, length), "\'\"\\\0 \t\r\n\u0085\u2028\u2029")
length += 1
end
if length > 0
push!(chunks, prefix(stream.input, length))
forwardchars!(stream, length)
end
c = peek(stream.input)
if !double && c == '\'' && peek(stream.input, 1) == '\''
push!(chunks, '\'')
forwardchars!(stream, 2)
elseif (double && c == '\'') || (!double && in(c, "\"\\"))
push!(chunks, c)
forward!(stream.input)
elseif double && c == '\\'
forward!(stream.input)
c = peek(stream.input)
if haskey(ESCAPE_REPLACEMENTS, c)
push!(chunks, ESCAPE_REPLACEMENTS[c])
forward!(stream.input)
elseif haskey(ESCAPE_CODES, c)
length = ESCAPE_CODES[c]
forward!(stream.input)
for k in 0:(length-1)
c = peek(stream.input, k)
if !in(peek(stream.input, k), "0123456789ABCDEFabcdef")
throw(ScannerError("while scanning a double-quoted scalar",
start_mark,
string("expected escape sequence of",
" $(length) hexadecimal",
"digits, but found '$(c)'"),
get_mark(stream)))
end
end
push!(chunks, Char(parse(Int, prefix(stream.input, length), base = 16)))
forwardchars!(stream, length)
elseif is_b_char(YAMLV1_1(), c)
scan_line_break(YAMLV1_1(), stream)
append!(chunks, scan_flow_scalar_breaks(stream, double, start_mark))
else
throw(ScannerError("while scanning a double-quoted scalar",
start_mark,
"found unknown escape character '$(c)'",
get_mark(stream)))
end
else
return chunks
end
end
end
function scan_flow_scalar_spaces(stream::TokenStream, double::Bool,
start_mark::Mark)
chunks = Any[]
length = 0
while is_s_white(peek(stream.input, length))
length += 1
end
whitespaces = prefix(stream.input, length)
forwardchars!(stream, length)
c = peek(stream.input)
if c == '\0'
throw(ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", get_mark(stream)))
elseif is_b_char(YAMLV1_1(), c)
line_break = scan_line_break(YAMLV1_1(), stream)
breaks = scan_flow_scalar_breaks(stream, double, start_mark)
if line_break != '\n'
push!(chunks, line_break)
else isempty(breaks)
push!(chunks, ' ')
end
append!(chunks, breaks)
else
push!(chunks, whitespaces)
end
chunks
end
function scan_flow_scalar_breaks(stream::TokenStream, double::Bool,
start_mark::Mark)
chunks = Any[]
while true
pref = prefix(stream.input, 3)
if pref == "---" || pref == "..." &&
in(peek(stream.input, 3), "\0 \t\r\n\u0085\u2028\u2029")
throw(ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document seperator",
get_mark(stream)))
end
while is_s_white(peek(stream.input))
forward!(stream.input)
end
if is_b_char(YAMLV1_1(), peek(stream.input))
push!(chunks, scan_line_break(YAMLV1_1(), stream))
else
return chunks
end
end
end
function scan_plain(stream::TokenStream)
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',', ':' and '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = Any[]
start_mark = get_mark(stream)
end_mark = start_mark
indent = stream.indent + 1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = Any[]
while true
length = 0
if peek(stream.input) == '#'
break
end
while true
c = peek(stream.input, length)
cnext = peek(stream.input, length + 1)
if is_whitespace(YAMLV1_1(), c) ||
c === nothing ||
(stream.flow_level == 0 && c == ':' &&
(cnext === nothing || is_whitespace(YAMLV1_1(), cnext))) ||
(stream.flow_level != 0 && in(c, ",:?[]{}"))
break
end
length += 1
end
# It's not clear what we should do with ':' in the flow context.
c = peek(stream.input)
if stream.flow_level != 0 && c == ':' &&
!in(peek(stream.input, length + 1), "\0 \t\r\n\u0085\u2028\u2029,[]{}")
forwardchars!(stream, length)
throw(ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", get_mark(stream)))
end
if length == 0
break
end
stream.allow_simple_key = true
append!(chunks, spaces)
push!(chunks, prefix(stream.input, length))
forwardchars!(stream, length)
end_mark = get_mark(stream)
spaces = scan_plain_spaces(stream, indent, start_mark)
if isempty(spaces) || peek(stream.input) == '#' ||
(stream.flow_level == 0 && stream.column < indent)
break
end
end
ScalarToken(Span(start_mark, end_mark), string(chunks...), true, nothing)
end
function scan_plain_spaces(stream::TokenStream, indent::Integer,
start_mark::Mark)
chunks = Any[]
length = 0
while peek(stream.input, length) == ' '
length += 1
end
whitespaces = prefix(stream.input, length)
forwardchars!(stream, length)
c = peek(stream.input)
if is_b_char(YAMLV1_1(), c)
line_break = scan_line_break(YAMLV1_1(), stream)
stream.allow_simple_key = true
if peek(stream.input) == '\uFEFF'
return Any[]
end
pref = prefix(stream.input, 3)
if pref == "---" || pref == "..." &&
in(peek(stream.input, 3), "\0 \t\r\n\u0085\u2028\u2029")
return Any[]
end
breaks = Any[]
while in(peek(stream.input), " \r\n\u0085\u2028\u2029")
if peek(stream.input) == ' '
forwardchars!(stream)
else
push!(breaks, scan_line_break(YAMLV1_1(), stream))
if peek(stream.input) == '\uFEFF'
return Any[]
end
pref = prefix(stream.input, 3)
if pref == "---" || pref == "..." &&
in(peek(stream.input, 3), "\0 \t\r\n\u0085\u2028\u2029")
return Any[]
end
end
end
if line_break != '\n'
push!(chunks, line_break)
elseif isempty(breaks)
push!(chunks, ' ')
end
elseif !isempty(whitespaces)
push!(chunks, whitespaces)
end
chunks
end
function scan_tag_handle(stream::TokenStream, name::String, start_mark::Mark)
c = peek(stream.input)
if c != '!'
throw(ScannerError("while scanning a $(name)", start_mark,
"expected '!', but found '$(c)'", get_mark(stream)))
end
length = 1
c = peek(stream.input, length)
if c != ' '
while is_ns_ascii_letter(c) || isdigit(c) || c == '-' || c == '_'
length += 1
c = peek(stream.input, length)
end
if c != '!'
forwardchars!(stream, length)
throw(ScannerError("while scanning a $(name)", start_mark,
"expected '!', but found '$(c)'",
get_mark(stream)))
end
length += 1
end
value = prefix(stream.input, length)
forwardchars!(stream, length)
value
end
function scan_tag_uri(stream::TokenStream, name::String, start_mark::Mark)
chunks = Any[]
length = 0
c = peek(stream.input, length)
while is_ns_ascii_letter(c) || isdigit(c) || in(c, "-;/?:@&=+\$,_.!~*\'()[]%")
if c == '%'
push!(chunks, prefix(stream.input, length))
forwardchars!(stream, length)
length = 0
push!(chunks, scan_uri_escapes(stream, name, start_mark))
else
length += 1
end
c = peek(stream.input, length)
end
if length > 0
push!(chunks, prefix(stream.input, length))
forwardchars!(stream, length)
length = 0
end
if isempty(chunks)
throw(ScannerError("while parsing a $(name)", start_mark,
"expected URI, but found '$(c)'",
get_mark(stream)))
end
string(chunks...)
end
function scan_uri_escapes(stream::TokenStream, name::String, start_mark::Mark)::String
bytes = Char[]
while peek(stream.input) == '%'
forward!(stream.input)
# check ns-hex-digit
for k in 0:1
c = peek(stream.input, k)
is_ns_hex_digit(c) || throw(ScannerError(
"while scanning a $name", start_mark,
"expected URI escape sequence of 2 hexadecimal digits, but found '$c'", get_mark(stream),
))
end
push!(bytes, Char(parse(Int, prefix(stream.input, 2), base=16)))
forwardchars!(stream, 2)
end
String(bytes)
end
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 277 | # Where in the stream a particular token lies.
struct Span
start_mark::Mark
end_mark::Mark
end
show(io::IO, span::Span) = print(io, "(line, column) ∈ (", span.start_mark.line, ", ", span.start_mark.column, ")...(", span.end_mark.line, ", ", span.end_mark.column, ")")
| YAML | https://github.com/JuliaData/YAML.jl.git |
|
[
"MIT"
] | 0.4.12 | dea63ff72079443240fbd013ba006bcbc8a9ac00 | code | 1656 | # YAML Tokens.
abstract type Token
# span::Span
end
firstmark(token::Token) = token.span.start_mark
lastmark(token::Token) = token.span.end_mark
# The '%YAML' directive.
struct DirectiveToken <: Token
span::Span
name::String
value::Union{Tuple, Nothing}
end
# '---'
struct DocumentStartToken <: Token
span::Span
end
# '...'
struct DocumentEndToken <: Token
span::Span
end
# '\uFEFF'
struct ByteOrderMarkToken <: Token
span::Span
end
# The stream start
struct StreamStartToken <: Token
span::Span
encoding::String
end
# The stream end
struct StreamEndToken <: Token
span::Span
end
#
struct BlockSequenceStartToken <: Token
span::Span
end
#
struct BlockMappingStartToken <: Token
span::Span
end
#
struct BlockEndToken <: Token
span::Span
end
# '['
struct FlowSequenceStartToken <: Token
span::Span
end
# '{'
struct FlowMappingStartToken <: Token
span::Span
end
# ']'
struct FlowSequenceEndToken <: Token
span::Span
end
# '}'
struct FlowMappingEndToken <: Token
span::Span
end
# '?' or nothing (simple keys).
struct KeyToken <: Token
span::Span
end
# ':'
struct ValueToken <: Token
span::Span
end
# '-'
struct BlockEntryToken <: Token
span::Span
end
# ','
struct FlowEntryToken <: Token
span::Span
end
# '*anchor'
struct AliasToken <: Token
span::Span
value::String
end
# '&anchor'
struct AnchorToken <: Token
span::Span
value::String
end
# '!handle!suffix'
struct TagToken <: Token
span::Span
value
end
# A scalar.
struct ScalarToken <: Token
span::Span
value::String
plain::Bool
style::Union{Char, Nothing}
end
| YAML | https://github.com/JuliaData/YAML.jl.git |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.