licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 1863 | @testset "Thermodynamics" begin
@test mixing_ratio_to_specific_humidity(specific_humidity_to_mixing_ratio(0.5)) ≈ 0.5
@test get_saturation_vapor_pressure(273.15u"K") == 6.112u"hPa"
@test get_saturation_vapor_pressure(273.15) == 6.112
@test get_partial_vapor_pressure(0,1000u"hPa") == 0u"hPa"
@test get_partial_vapor_pressure(1,1000u"hPa") == 1000u"hPa"/(18.016/28.966 + 1.0)
@test get_mixing_ratio(0u"hPa",1000u"hPa") == 0
@test get_mixing_ratio(get_partial_vapor_pressure(0.5,1000.0), 1000.0) == 0.5
@test get_partial_vapor_pressure(0,1000) == 0
@test get_partial_vapor_pressure(1,1000) == 1000/(18.016/28.966 + 1.0)
@test get_mixing_ratio(0,1000) == 0
@test get_mixing_ratio(get_partial_vapor_pressure(0.5,1000.0), 1000.0) == 0.5
@test unit(get_specific_entropy(300u"K",0.2,1000u"hPa"))== u"J/K/kg"
@test get_potential_temperature(300,1000,1000) == 300
@test get_potential_temperature(300,1010,1000) < 300
@test get_potential_temperature(300,900,1000) > 300
@test get_virtual_temperature(300,0) == 300
@test get_virtual_temperature(300,0) == 300
@test get_virtual_temperature(300,10) > 300
@test get_potential_temperature(300u"K",1000u"hPa",1000u"hPa") == 300u"K"
@test get_potential_temperature(300u"K",1010u"hPa",1000u"hPa") < 300u"K"
@test get_potential_temperature(300u"K",900u"hPa",1000u"hPa") > 300u"K"
@test get_virtual_temperature(300u"K",0u"g/kg") == 300u"K"
@test get_virtual_temperature(300u"K",0u"g/kg") == 300u"K"
@test get_virtual_temperature(300u"K",10u"g/kg") > 300u"K"
@test get_virtual_temperature(300,mixing_ratio_to_specific_humidity(0.10)) ≈ get_virtual_temperature(300,0.10,0.10)
@test get_virtual_temperature(300,0.10) ≈ get_virtual_temperature(300,specific_humidity_to_mixing_ratio(0.10),specific_humidity_to_mixing_ratio(0.10))
end
| TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 2538 | pres = 1.0f0u"hPa" .* Float32.(readdlm("pressure_in_hpa")[:])
tabs = 1.0f0u"K" .* Float32.(readdlm("tabs_in_Kelvin")[:])
qv = 1.0f0u"kg/kg" .* Float32.(readdlm("specific_humidity_in_kgperkg")[:])
r = specific_humidity_to_mixing_ratio.(qv)
pparcel = pres[1]
tparcel = tabs[1]
rparcel = r[1]
pres_nu = ustrip.(pres)
tabs_nu = ustrip.(tabs)
qv_nu = ustrip.(qv)
r_nu = specific_humidity_to_mixing_ratio.(qv_nu)
pparcel_nu = pres_nu[1]
tparcel_nu = tabs_nu[1]
rparcel_nu = r_nu[1]
#With these value, the fortran implementation yields:
# include(joinpath(@__DIR__,"emanuel_potential_intensity_wrapper.jl"))
# cape_fortran_implementation, temp_outflow_fortran_implementation = get_cape(ustrip(tparcel),ustrip(rparcel), ustrip(pparcel), ustrip.(tabs), ustrip.(r), ustrip.(pres))
# @show cape_fortran_implementation, temp_outflow_fortran_implementation
# min_pres_fortran_implementation, max_speed_fortran_implementation = get_pcmin( ustrip(tparcel) .- 273.15f0,ustrip(pparcel),ustrip.(pres),ustrip.(tabs) .- 273.15f0, 1f3.*ustrip.(r))
# @show min_pres_fortran_implementation, max_speed_fortran_implementation
cape_fortran_implementation, temp_outflow_fortran_implementation = (1106.4801f0, 240.90143f0)
min_pres_fortran_implementation, max_speed_fortran_implementation = (991.1094f0, 26.201181f0)
cape, temp_outflow, index_outflow = get_cape_and_outflow_temp_from_sounding(tparcel,rparcel,pparcel,tabs,r,pres)
min_pres, max_speed = get_potential_intensity_of_tropical_cyclone(tparcel, pparcel, pres, tabs, r)
@testset "Potential Intensity" begin
@test unit(get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,tabs,r,pres)[1]) == u"K"
@test get_buoyancy_of_lifted_parcel(tparcel_nu,rparcel_nu,pparcel_nu,tabs_nu,r_nu,pres_nu) == ustrip.(get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,tabs,r,pres))
@test get_potential_intensity_of_tropical_cyclone(tparcel_nu, pparcel_nu, pres_nu, tabs_nu, r_nu) == ustrip.(get_potential_intensity_of_tropical_cyclone(tparcel, pparcel, pres, tabs, r))
@test get_cape_and_outflow_temp_from_sounding(tparcel_nu,rparcel_nu,pparcel_nu,tabs_nu,r_nu,pres_nu) == ustrip.(get_cape_and_outflow_temp_from_sounding(tparcel,rparcel,pparcel,tabs,r,pres))
@test isapprox(cape, cape_fortran_implementation*u"J/kg", rtol = 0.01)
@test isapprox(temp_outflow, temp_outflow_fortran_implementation*u"K", rtol = 0.01)
@test isapprox(min_pres, min_pres_fortran_implementation*u"hPa", rtol = 0.01)
@test isapprox(max_speed, max_speed_fortran_implementation*u"m/s", rtol = 0.01)
end | TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 253 | using TropicalCyclonePotentialIntensity
using Test
using Unitful: @u_str, unit, ustrip, Quantity
using DelimitedFiles
@testset "TropicalCyclonePotentialIntensity.jl" begin
include("physicsfunctions.jl")
include("potentialintensity.jl")
end
| TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | docs | 2371 | # TropicalCyclonePotentialIntensity
[](http://www.argelramirezreyes.com/TropicalCyclonePotentialIntensity.jl/dev/)
[](http://www.argelramirezreyes.com/TropicalCyclonePotentialIntensity.jl/dev/)
[](https://codecov.io/gh/aramirezreyes/TropicalCyclonePotentialIntensity.jl)
# TropicalCyclonePotentialIntensity.jl
This package implements the functions of [Daniel Gliford](https://github.com/dgilford)'s [TCPyPI](https://github.com/dgilford/tcpyPI/blob/master/tcpyPI/pi.py) for the [JuliaLanguage](https://github.com/JuliaLang/julia). This are widely used routines in the Tropical Cyclone community. I hope this helps widen the atmospheric science ecosystem for Julia.
# Installation
This package is very lightweight and it is registered in the Julia General Registry so to install it in the Julia REPL just hit the ```]``` key to enter Pkg mode, then type
```julia
Pkg> add TropicalCyclonePotentialIntensity
```
# Usage
TropicalCyclonePotentialIntensity.jl has three main functions:
In here, tparcel, pparcel and rparcel are the temperature (in Kelvin), pressure (in hPa) and mixing ratio (kg/kg) of a parcel and tabs, r and pres are environmental profiles (should be equally sized arrays).
```julia
get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,tabs,r,pres)
get_potential_intensity_of_tropical_cyclone(tparcel, pparcel, pres, tabs, r)
get_cape_and_outflow_temp_from_sounding(tparcel,rparcel,pparcel,tabs,r,pres)
```
# Units
This package uses [Unitful.jl](https://github.com/PainterQubits/Unitful.jl) to manage units. We recommend to use TropicalCyclonePotentialIntensity.jl with unitful quantities to help maintain your results consistent, although it should run without real numbers as well.
## Package under development
This package is under development by Argel Ramírez Reyes. It is definitely not perfect. Bugs are expected as this has not been widely tested. Bug reports or collaborations are highly appreciated.
### Main reference
Gilford, D. M., 2021: pyPI (v1.3): Tropical Cyclone Potential Intensity Calculations in Python. Geoscientific Model Development, 14, 2351–2369, https://doi.org/10.5194/gmd-14-2351-2021. | TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | docs | 306 | ```@meta
CurrentModule = TropicalCyclonePotentialIntensity
```
# TropicalCyclonePotentialIntensity
Documentation for [TropicalCyclonePotentialIntensity](https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl).
```@index
```
```@autodocs
Modules = [TropicalCyclonePotentialIntensity]
```
| TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 827 | ######################################################################
# Example of how to create a window in borderless fullscreen mode.
# -----
# Licensed under MIT License
using Pkg
Pkg.activate(".")
using KirUtil
using GLFWAbstraction
let vidmode = videomode(monitor(1))
# Create window in borderless fullscreen mode
@windowhint RedBits vidmode.bits.red
@windowhint GreenBits vidmode.bits.green
@windowhint BlueBits vidmode.bits.blue
@windowhint RefreshRate vidmode.refresh_rate
let wnd = window(:mywnd, "Hello, world!", vidmode.width, vidmode.height, monitor(1))
@assert !wnd.shouldclose
use(wnd)
while !wnd.shouldclose
sleep(0.1)
swapbuffers(wnd)
pollevents()
end
end
end
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 1454 | ######################################################################
# Example for various input methods
# -----
# Licensed under MIT License
using Pkg
Pkg.activate(".")
using GLFWAbstraction
using GLFW
buffer = ""
function GLFWAbstraction.on_receive_char(::Window{:mywnd}, char::Char)
global buffer
buffer *= char
end
function GLFWAbstraction.on_key_press(::Window{:mywnd}, key::Key, scancode::Integer, modifiers::ModifierKey)
global buffer
if key === Key(:backspace) && !isempty(buffer)
buffer = buffer[1:length(buffer)-1]
elseif key === Key(:enter)
println(buffer)
buffer = ""
end
end
function GLFWAbstraction.on_key_repeat(::Window{:mywnd}, key::Key, scancode::Integer, modifiers::ModifierKey)
global buffer
if key === Key(:backspace) && !isempty(buffer)
buffer = buffer[1:length(buffer)-1]
end
end
function GLFWAbstraction.on_mouse_press(::Window{:mywnd}, button::MouseButton, modifiers::ModifierKey)
println(button)
end
function GLFWAbstraction.on_mouse_move(::Window{:mywnd}, xpos::AbstractFloat, ypos::AbstractFloat)
println("mouse moved to $xpos, $ypos")
end
GLFWAbstraction.on_mouse_enter(::Window{:mywnd}) = println("hi mouse :)")
GLFWAbstraction.on_mouse_leave(::Window{:mywnd}) = println("bye mouse :(")
let wnd = window(:mywnd, "Input Example", 960, 480)
while !wnd.shouldclose
sleep(0.1)
swapbuffers(wnd)
pollevents()
end
end
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 860 | ######################################################################
# General example of how to create & manipulate a window.
# -----
# Licensed under MIT License
using Pkg
Pkg.activate(".")
using KirUtil
using GLFWAbstraction
GLFWAbstraction.on_window_move(::Window{:mywnd}, posx::Integer, posy::Integer) = println("window moved to $posx, $posy")
GLFWAbstraction.on_window_resize(::Window{:mywnd}, sizex::Integer, sizey::Integer) = println("window resized to $sizex×$sizey")
GLFWAbstraction.on_window_close(::Window{:mywnd}) = println("window closing")
@windowhint TransparentFramebuffer true
let wnd = window(:mywnd, "Hello, world!", 960, 480)
@assert !wnd.shouldclose
wnd.aspect_ratio = 16//9
use(wnd)
while !wnd.shouldclose
sleep(0.1)
swapbuffers(wnd)
pollevents()
end
end
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 568 | ######################################################################
# GLFWAbstraction central include hub
# -----
# Licensed under MIT License
module GLFWAbstraction
const Optional{T} = Union{T, Nothing}
abstract type GLFWWrapper{T} end
wrapped(w::GLFWWrapper) = w.handle
wrapped_type(::Type{GLFWWrapper{T}}) where T = T
"""`lhs×rhs` creates a 2-tuple (lhs, rhs) for a familiar and convenient notation of 2D measures."""
(×)(lhs::Real, rhs::Real) = (lhs, rhs)
include("./monitors.jl")
include("./windows.jl")
include("./input.jl")
end # module GLFWAbstraction
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 3490 | ######################################################################
# Input Abstraction Layer
# -----
# Licensed under MIT License
using BitFlags
using GLFW
export InputAction, Press, Release, Repeat
@enum InputAction Press = Integer(GLFW.PRESS) Release = Integer(GLFW.RELEASE) Repeat = Integer(GLFW.REPEAT)
InputAction(action::GLFW.Action) = InputAction(Integer(action))
export ModifierKey, NoMod, ShiftMod, ControlMod, AltMod, SuperMod, CapsLockMod, NumLockMod
@bitflag ModifierKey::UInt16 NoMod = 0 ShiftMod ControlMod AltMod SuperMod CapsLockMod NumLockMod
export pollevents, waitevents, post_empty_event
pollevents() = GLFW.PollEvents()
waitevents() = GLFW.WaitEvents()
waitevents(timeout::AbstractFloat) = GLFW.WaitEvents(timeout)
post_empty_event() = GLFW.PostEmptyEvent()
include("./input.keyboard.jl")
include("./input.mouse.jl")
# It appears GLFW.jl does not support the latest version of GLFW? Gamepad support is incomplete.
# include("./input.gamepad.jl")
# ===== Events =====
"""Called when a key is pressed down."""
function on_key_press(wnd::Window, key::Key, scancode::Integer, modifiers::ModifierKey) end
"""Called when a key is released."""
function on_key_release(wnd::Window, key::Key, scancode::Integer, modifiers::ModifierKey) end
"""Called when a key is being held, triggering continuous textual input."""
function on_key_repeat(wnd::Window, key::Key, scancode::Integer, modifiers::ModifierKey) end
"""Called when a key results in a Unicode character input."""
function on_receive_char(wnd::Window, char::Char) end
"""Called when the mouse cursor is moved while within the window's boundaries."""
function on_mouse_move(wnd::Window, xpos::AbstractFloat, ypos::AbstractFloat) end
"""Called when the mouse cursor enters the window's boundaries."""
function on_mouse_enter(wnd::Window) end
"""Called when the mouse cursor leaves the window's boundaries."""
function on_mouse_leave(wnd::Window) end
"""Called when a mouse button is pressed down."""
function on_mouse_press(wnd::Window, button::MouseButton, modifiers::ModifierKey) end
"""Called when a mouse button is released."""
function on_mouse_release(wnd::Window, button::MouseButton, modifiers::ModifierKey) end
"""Called when a new gamepad is connected."""
function register_default_input_callbacks(wnd::Window)
register_key_callback(wnd) do _::Window, key::Key, scancode::Integer, action::InputAction, modifiers::ModifierKey
if action === Press
on_key_press(wnd, key, scancode, modifiers)
elseif action === Release
on_key_release(wnd, key, scancode, modifiers)
elseif action === Repeat
on_key_repeat(wnd, key, scancode, modifiers)
else
error("unknown key action $action")
end
end
register_text_callback(wnd) do _::Window, char::Char; on_receive_char(wnd, char) end
register_cursor_pos_callback(wnd) do _::Window, xpos::AbstractFloat, ypos::AbstractFloat; on_mouse_move(wnd, xpos, ypos) end
register_cursor_enter_callback(wnd) do _::Window, entered::Bool
if entered
on_mouse_enter(wnd)
else
on_mouse_leave(wnd)
end
end
register_mouse_button_callback(wnd) do _::Window, button::MouseButton, action::InputAction, modifiers::ModifierKey
@assert action !== Repeat
if action === Press
on_mouse_press(wnd, button, modifiers)
else
on_mouse_release(wnd, button, modifiers)
end
end
end
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 4669 | ######################################################################
# Keyboard-related input abstraction
# -----
# Licensed under MIT License
using GLFW
export Key
struct Key <: GLFWWrapper{GLFW.Key}
wrapped::GLFW.Key
end
Key(::Nothing) = Key(GLFW.KEY_UNKNOWN)
Key(special::Symbol) = resolve_key(Ident{special}())
function Key(digit::Integer; keypad::Bool = false)
if digit ∉ 0:9
throw(ArgumentError("number key must be single digit integers"))
end
I = Base.Enums.basetype(GLFW.Key)
Key(GLFW.Key(Integer(keypad ? GLFW.KEY_KP_0 : GLFW.KEY_0) + I(digit)))
end
function Key(char::Char)
if char ∈ 'a':'z' || char ∈ 'A':'Z'
I = Base.Enums.basetype(GLFW.Key)
return Key(GLFW.Key(Integer(GLFW.KEY_A) + convert(I, codepoint(lowercase(char)) - codepoint('a'))))
end
resolve_key(Ident{char}())
end
resolve_key(::Ident{K}) where K = throw(ArgumentError("unknown key \"$K\""))
resolve_key(::Ident{'\''}) = Key(GLFW.KEY_APOSTROPHE)
resolve_key(::Ident{','}) = Key(GLFW.KEY_COMMA)
resolve_key(::Ident{'-'}) = Key(GLFW.KEY_MINUS)
resolve_key(::Ident{'.'}) = Key(GLFW.KEY_PERIOD)
resolve_key(::Ident{'/'}) = Key(GLFW.KEY_SLASH)
resolve_key(::Ident{';'}) = Key(GLFW.KEY_SEMICOLON)
resolve_key(::Ident{'='}) = Key(GLFW.KEY_EQUAL)
resolve_key(::Ident{'['}) = Key(GLFW.KEY_LEFT_BRACKET)
resolve_key(::Ident{'\\'}) = Key(GLFW.KEY_BACKSLASH)
resolve_key(::Ident{']'}) = Key(GLFW.KEY_RIGHT_BRACKET)
resolve_key(::Ident{'`'}) = Key(GLFW.KEY_GRAVE_ACCENT)
macro addresolvekeys(blk::Expr)
@assert blk.head === :block "must be a block expression enumerating the various keys/mappings"
result = Expr(:block)
for expr ∈ blk.args
if expr isa LineNumberNode continue end
key, mapping = if expr isa Expr
@assert expr.head === :call && expr.args[1] === :(=>) && expr.args[2] isa Symbol && expr.args[3] isa Symbol "must be an arrow pair (a=>b) assignment"
QuoteNode(expr.args[2]), Expr(:., :GLFW, QuoteNode(Symbol("KEY_$(uppercase(string(expr.args[3])))")))
elseif expr isa Symbol
QuoteNode(expr), Expr(:., :GLFW, QuoteNode(Symbol("KEY_$(uppercase(string(expr)))")))
else
throw(ArgumentError("invalid key mapping $expr"))
end
push!(result.args, :($(esc(:resolve_key))(::Ident{$key}) = Key($mapping)))
end
result
end
# This one needs to be added manually because 'end' is a keyword which breaks the macro
resolve_key(::Ident{:end}) = Key(GLFW.KEY_END)
@addresolvekeys begin
space
world1 => WORLD_1
world2 => WORLD_2
escape; esc => ESCAPE
enter; tab; backspace; insert; delete
right; left; down; up
pageup => PAGE_UP
pagedown => PAGE_DOWN
home
capslock => CAPS_LOCK
scrolllock => SCROLL_LOCK
numlock => NUM_LOCK
print => PRINT_SCREEN
printscreen => PRINT_SCREEN
pause
f1; f2; f3; f4; f5; f6; f7; f8; f9; f10; f11; f12; f13; f14; f15; f16; f17; f18; f19; f20; f21; f22; f23; f24; f25
decimal => KP_DECIMAL
divide => KP_DIVIDE
multiply => KP_MULTIPLY
subtract => KP_SUBTRACT
add => KP_ADD
keypad_enter => KP_ENTER
keypad_equal => KP_EQUAL
left_shift => LEFT_SHIFT
lshift => LEFT_SHIFT
left_control => LEFT_CONTROL
lcontrol => LEFT_CONTROL
lctrl => LEFT_CONTROL
left_alt => LEFT_ALT
lalt => LEFT_ALT
left_super => LEFT_SUPER
lsuper => LEFT_SUPER
right_shift => RIGHT_SHIFT
rshift => RIGHT_SHIFT
right_control => RIGHT_CONTROL
rcontrol => RIGHT_CONTROL
rctrl => RIGHT_CONTROL
right_alt => RIGHT_ALT
ralt => RIGHT_ALT
right_super => RIGHT_SUPER
rsuper => RIGHT_SUPER
menu => MENU
end
function register_key_callback(handler, wnd::Window)
wrapper(::GLFW.Window, key::GLFW.Key, scancode::Integer, action::GLFW.Action, modifiers::Integer) = handler(wnd, Key(key), scancode, InputAction(action), ModifierKey(modifiers))
GLFW.SetKeyCallback(wnd.handle, wrapper)
wnd
end
function register_text_callback(handler, wnd::Window)
wrapper(::GLFW.Window, char::Char) = handler(wnd, char)
GLFW.SetCharCallback(wnd.handle, wrapper)
wnd
end
export getkeyname
"""`getkeyname(named::Key, scancode::Integer)` attempts to retrieve a key's human readable name. If `named` is not `Key(nothing)`,
the name will be reminiscent of the named key. Otherwise, attempts to retrieve the key identified by `scancode`."""
getkeyname(named::Key, scancode::Integer) = GLFW.GetKeyName(named.wrapped, scancode)
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 2683 | ######################################################################
# Mouse-related input abstraction
# TODO: Custom Cursor Image. This is currently not supported by GLFW.jl,
# so may need to interface with GLFW_jll directly.
# -----
# Licensed under MIT License
using GLFW
using KirUtil
export CursorMode, CursorDisabled, CursorHidden, CursorNormal
@enum CursorMode CursorDisabled = GLFW.CURSOR_DISABLED CursorHidden = GLFW.CURSOR_HIDDEN CursorNormal = GLFW.CURSOR_NORMAL
export MouseButton
struct MouseButton
idx::Int32
function MouseButton(idx)
idx = idx-1
if idx ∉ 0:7
throw(ArgumentError("GLFW only supports mouse buttons 0:7; given $idx"))
end
new(idx)
end
end
# Using Ident{name} here because it allows injecting additional custom names.
MouseButton(name::Symbol) = MouseButton(Ident{name}())
MouseButton(::Ident{:left}) = MouseButton(1)
MouseButton(::Ident{:right}) = MouseButton(2)
MouseButton(::Ident{:middle}) = MouseButton(3)
export Mouse
struct Mouse
wnd::Window
end
@generate_properties Mouse begin
@get position = (pos = GLFW.GetCursorPos(self.wnd.handle); (pos.x, pos.y))
@set position = GLFW.SetCursorPos(self.wnd.handle, value[1], value[2])
@get mode = GLFW.GetInputMode(self.wnd.handle, GLFW.CURSOR)
@set mode = GLFW.SetInputMode(self.wnd.handle, GLFW.CURSOR, Integer(value))
end
isbuttondown(mouse::Mouse, button::MouseButton) = GLFW.GetMouseButton(mouse.wnd.handle, GLFW.MouseButton(button.idx)) == GLFW.PRESS
isbuttonup( mouse::Mouse, button::MouseButton) = GLFW.GetMouseButton(mouse.wnd.handle, GLFW.MouseButton(button.idx)) == GLFW.RELEASE
function register_cursor_pos_callback(handler, wnd::Window)
wrapper(::GLFW.Window, xpos::AbstractFloat, ypos::AbstractFloat) = handler(wnd, xpos, ypos)
GLFW.SetCursorPosCallback(wnd.handle, wrapper)
wnd
end
function register_cursor_enter_callback(handler, wnd::Window)
wrapper(::GLFW.Window, entered::Integer) = handler(wnd, entered != 0)
GLFW.SetCursorEnterCallback(wnd.handle, wrapper)
wnd
end
function register_mouse_button_callback(handler, wnd::Window)
wrapper(::GLFW.Window, button::GLFW.MouseButton, action::GLFW.Action, modifiers::Integer) = handler(wnd, MouseButton(Integer(button)), InputAction(action), ModifierKey(modifiers))
GLFW.SetMouseButtonCallback(wnd.handle, wrapper)
wnd
end
function register_mouse_scroll_callback(handler, wnd::Window)
wrapper(::GLFW.Window, scrollx::AbstractFloat, scrolly::AbstractFloat) = handler(wnd, scrollx, scrolly)
GLFW.SetScrollCallback(wnd.handle, wrapper)
wnd
end
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 1436 | ######################################################################
# Abstraction of GLFW.Monitor
# -----
# Licensed under MIT License
using GLFW
export VideoMode
struct VideoMode
width::Int
height::Int
bits::NamedTuple{(:red, :green, :blue), Tuple{UInt8, UInt8, UInt8}}
refresh_rate::Int
end
export Monitor, monitor
struct Monitor <: GLFWWrapper{GLFW.Monitor}
handle::GLFW.Monitor
end
monitor(idx::Integer = 1) = Monitors[idx]
monitor(::Nothing) = nothing
export Monitors
"""`Monitors` is a meta type designed to query connected physical monitors using `Base.getindex` and `Base.iterate`."""
struct Monitors end
Base.iterate(::Type{Monitors}, idx = 1) = Monitors[idx], idx < length(Monitors) ? idx+1 : nothing
Base.getindex(::Type{Monitors}, idx::Integer) = collect(Monitors)[idx]
Base.length(::Type{Monitors}) = length(GLFW.GetMonitors())
export videomode, videomodes
videomode(monitor::Monitor) = videomode(GLFW.GetVideoMode(monitor.handle))
videomode(vidmode::GLFW.VidMode) = VideoMode(vidmode.width, vidmode.height, (red = vidmode.redbits, green = vidmode.greenbits, blue = vidmode.bluebits), vidmode.refreshrate)
videomodes(monitor::Monitor) = videomode.(GLFW.GetVideoModes(monitor.handle))
Base.convert(::Type{GLFW.Monitor}, monitor::Monitor) = monitor.handle
Base.convert(::Type{GLFW.Monitor}, ::Nothing) = GLFW.Monitor(C_NULL)
Base.collect(::Type{Monitors}) = collect(Monitor.(GLFW.GetMonitors()))
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | code | 13611 | ######################################################################
# Window built on GLFW library
# -----
# Licensed under MIT License
using KirUtil
using GenerateProperties
using GLFW
export WindowEvent, FramebufferSizeEvent, WindowCloseEvent, WindowContentScaleEvent, WindowFocusEvent, WindowIconifyEvent, WindowMaximizeEvent, WindowPosEvent, WindowRefreshEvent, WindowSizeEvent
"""Enum of various window-related events. These are a relatively low-level abstraction used with the
`register_window_callback` method. However, a more Julian solution exists. Refer to the documentation's *Event System*
section for more details."""
@enum WindowEvent FramebufferSizeEvent WindowCloseEvent WindowContentScaleEvent WindowFocusEvent WindowIconifyEvent WindowMaximizeEvent WindowPosEvent WindowRefreshEvent WindowSizeEvent
export ClientAPI, OpenGLAPI, OpenGLESAPI, NoAPI
"""Enum of the various possible client APIs which GLFW can use to create an OpenGL (ES) context."""
@enum ClientAPI OpenGLAPI = GLFW.OPENGL_API OpenGLESAPI = GLFW.OPENGL_ES_API NoAPI = GLFW.NO_API
export ContextCreationAPI, NativeContextAPI, EGLContextAPI
"""Enum of the various OpenGL (ES) context creation APIs which GLFW may use."""
@enum ContextCreationAPI NativeContextAPI = GLFW.NATIVE_CONTEXT_API EGLContextAPI = GLFW.EGL_CONTEXT_API
export OpenGLProfile, CoreProfile, CompatProfile, AnyProfile
"""Enum of possible OpenGL profiles."""
@enum OpenGLProfile CoreProfile = GLFW.OPENGL_CORE_PROFILE CompatProfile = GLFW.OPENGL_COMPAT_PROFILE AnyProfile = GLFW.OPENGL_ANY_PROFILE
export ContextReleaseBehavior, AnyReleaseBehavior, FlushReleaseBehavior, NoReleaseBehavior
"""Enum of context release behaviors GLFW may employ upon destroying an OpenGL (ES) context."""
@enum ContextReleaseBehavior AnyReleaseBehavior = GLFW.ANY_RELEASE_BEHAVIOR FlushReleaseBehavior = GLFW.RELEASE_BEHAVIOR_FLUSH NoReleaseBehavior = GLFW.RELEASE_BEHAVIOR_NONE
export ContextRobustness, LoseContextOnReset, NoResetNotification, NoRobustness
"""Context robustness solutions GLFW may employ when an OpenGL (ES) context fails."""
@enum ContextRobustness LoseContextOnReset = GLFW.LOSE_CONTEXT_ON_RESET NoResetNotification = GLFW.NO_RESET_NOTIFICATION NoRobustness = GLFW.NO_ROBUSTNESS
export Window, window
mutable struct Window{L} <: GLFWWrapper{GLFW.Window}
handle::GLFW.Window
function Window{L}(handle) where L
inst = new(handle)
register_default_window_callbacks(inst)
register_default_input_callbacks(inst)
inst
end
end
function window(label::Symbol, title::AbstractString, width::Integer, height::Integer, monitor::Optional{Monitor} = nothing, share::Optional{Window} = nothing)
Window{label}(GLFW.CreateWindow(width, height, title, convert(GLFW.Monitor, monitor), convert(GLFW.Window, share)))
end
# Use virtual properties to reduce namespace clutter
@generate_properties Window begin
# Getters & Setters
@get auto_iconify = GLFW.GetWindowAttrib(self.handle, GLFW.AUTO_ICONIFY)
@set auto_iconify = GLFW.SetWindowAttrib(self.handle, GLFW.AUTO_ICONIFY, value)
@get decorated = GLFW.GetWindowAttrib(self.handle, GLFW.DECORATED)
@set decorated = GLFW.SetWindowAttrib(self.handle, GLFW.DECORATED, value)
@get floating = GLFW.GetWindowAttrib(self.handle, GLFW.FLOATING)
@set floating = GLFW.SetWindowAttrib(self.handle, GLFW.FLOATING, value)
@get focus_on_show = GLFW.GetWindowAttrib(self.handle, GLFW.FOCUS_ON_SHOW)
@set focus_on_show = GLFW.SetWindowAttrib(self.handle, GLFW.FOCUS_ON_SHOW, value)
@get opacity = GLFW.GetWindowOpacity(self.handle)
@set opacity = GLFW.SetWindowOpacity(self.handle, value)
@get position = (pos = GLFW.GetWindowPos(self.handle); pos.x × pos.y)
@set position = GLFW.SetWindowPos(self.handle, value[1], value[2])
@get resizable = GLFW.GetWindowAttrib(self.handle, GLFW.RESIZABLE)
@set resizable = GLFW.SetWindowAttrib(self.handle, GLFW.RESIZABLE, value)
@get shouldclose = GLFW.WindowShouldClose(self.handle)
@set shouldclose = GLFW.SetWindowShouldClose(self.handle, value)
@get visible = GLFW.GetWindowAttrib(self.handle, GLFW.VISIBLE)
@set visible = if value; GLFW.ShowWindow(self.handle) else; GLFW.HideWindow(self.handle) end
# Setters Only
@set aspect_ratio = set_aspect_ratio(self, value)
@set icon = GLFW.SetWindowIcon(self.handle, value) # can be a single 2D matrix of pixels or an array of matrices of pixels.
@set title = GLFW.SetWindowTitle(self.handle, value)
# Getters Only
@get content_scale = (scales = GLFW.GetWindowContentScale(self.handle); scales.xscale × scales.yscale)
@get focused = GLFW.GetWindowAttrib(self.handle, GLFW.FOCUSED)
@get framebuffer_size = (size = GLFW.GetFramebufferSize(self.handle); size.width × size.height)
@get hovered = GLFW.GetWindowAttrib(self.handle, GLFW.HOVERED)
@get iconified = GLFW.GetWindowAttrib(self.handle, GLFW.ICONIFIED)
@get maximized = GLFW.GetWindowAttrib(self.handle, GLFW.MAXIMIZED)
@get transparent = GLFW.GetWindowAttrib(self.handle, GLFW.TRANSPARENT_FRAMEBUFFER)
# Context-related Properties
@get client_api = ClientAPI(GLFW.GetWindowAttrib(self.handle, GLFW.CLIENT_API))
@get context_creation_api = ContextCreationAPI(GLFW.GetWindowAttrib(self.handle, GLFW.CONTEXT_CREATION_API))
@get context_version = VersionNumber(GLFW.GetWindowAttrib(self.handle, GLFW.CONTEXT_VERSION_MAJOR), GLFW.GetWindowAttrib(self.handle, GLFW.CONTEXT_VERSION_MINOR), GLFW.GetWindowAttrib(self.handle, GLFW.CONTEXT_REVISION))
@get context_forward_compatible = GLFW.GetWindowAttrib(self.handle, GLFW.OPENGL_FORWARD_COMPAT) != 0
@get context_debug = GLFW.GetWindowAttrib(self.handle, GLFW.OPENGL_DEBUG_CONTEXT) != 0
@get context_profile = OpenGLProfile(GLFW.GetWindowAttrib(self.handle, GLFW.OPENGL_PROFILE))
@get context_release_behavior = ContextReleaseBehavior(GLFW.GetWindowAttrib(self.handle, GLFW.CONTEXT_RELEASE_BEHAVIOR))
@get context_generates_errors = GLFW.GetWindowAttrib(self.handle, GLFW.CONTEXT_NO_ERROR) == 0
@get context_robustness = ContextRobustness(GLFW.GetWindowAttrib(self.handle, GLFW.CONTEXT_ROBUSTNESS))
end
function Base.close(wnd::Window)
GLFW.DestroyWindow(wnd.handle)
wnd.handle = nothing
end
KirUtil.use(wnd::Window) = (GLFW.MakeContextCurrent(wnd.handle); nothing)
monitor(wnd::Window) = monitor(GLFW.GetWindowMonitor(wnd.handle))
monitor(wnd::Window, monitor::Monitor, refresh_rate::Integer) = GLFW.SetWindowMonitor(wnd.handle, monitor.handle, 0, 0, size(wnd)..., refresh_rate)
monitor(wnd::Window, ::Nothing; xpos::Integer = 0, ypos::Integer = 0) = GLFW.SetWindowMonitor(wnd.handle, nothing, xpos, ypos, size(wnd)..., GLFW.DONT_CARE)
set_aspect_ratio(wnd::Window, ::Nothing) = GLFW.SetWindowAspectRatio(wnd.handle, GLFW.DONT_CARE, GLFW.DONT_CARE)
set_aspect_ratio(wnd::Window, aspect_ratio::Rational) = GLFW.SetWindowAspectRatio(wnd.handle, numerator(aspect_ratio), denominator(aspect_ratio))
set_aspect_ratio(wnd::Window, values) = GLFW.SetWindowAspectRatio(wnd.handle, values[1], values[2])
export limitsize
function limitsize(wnd::Window; min_width::Integer = -1, min_height::Integer = -1, max_width::Integer = -1, max_height::Integer = -1)
real_limits(lim) = if lim < 1; GLFW.DONT_CARE else; lim end
real_min_width, real_min_height, real_max_width, real_max_height = real_limits.((min_width, min_height, max_width, max_height))
GLFW.SetWindowSizeLimits(wnd.handle, real_min_width, real_min_height, real_max_width, real_max_height)
wnd
end
export maximize, restore
maximize(wnd::Window) = GLFW.MaximizeWindow(wnd.handle)
restore(wnd::Window) = GLFW.RestoreWindow(wnd.handle)
export swapbuffers
swapbuffers(wnd::Window) = GLFW.SwapBuffers(wnd.handle)
export request_attention
request_attention(wnd::Window) = GLFW.RequestWindowAttention(wnd.handle)
macro make_register_window_callback(event, registrar)
esc(:(register_window_callback(handler, wnd::Window, ::Ident{$event}) = GLFW.$registrar(wnd.handle, handler)))
end
"""Registers the default callbacks for the window. These callbacks forward events to concrete, semantic signatures in a more Julian approach."""
function register_default_window_callbacks(wnd::Window)
register_window_callback(wnd, FramebufferSizeEvent) do _::GLFW.Window, width::Integer, height::Integer; on_framebuffer_resize(wnd, width, height) end
register_window_callback(wnd, WindowCloseEvent) do _::GLFW.Window; on_window_close(wnd) end
register_window_callback(wnd, WindowContentScaleEvent) do _::GLFW.Window, scalex::AbstractFloat, scaley::AbstractFloat; on_window_content_scale(wnd, scalex, scaley) end
register_window_callback(wnd, WindowFocusEvent) do _::GLFW.Window, focused::Integer; if focused != 0; on_window_focus(wnd) else; on_window_defocus(wnd) end; end
register_window_callback(wnd, WindowIconifyEvent) do _::GLFW.Window, iconified::Integer; if iconified != 0; on_window_iconify(wnd) else; on_window_restore(wnd) end; end
register_window_callback(wnd, WindowMaximizeEvent) do _::GLFW.Window, maximized::Integer; if maximized != 0; on_window_maximize(wnd) else; on_window_restore(wnd) end; end
register_window_callback(wnd, WindowPosEvent) do _::GLFW.Window, posx::Integer, posy::Integer; on_window_move(wnd, posx, posy) end
register_window_callback(wnd, WindowRefreshEvent) do _::GLFW.Window; on_window_refresh(wnd) end
register_window_callback(wnd, WindowSizeEvent) do _::GLFW.Window, sizex::Integer, sizey::Integer; on_window_resize(wnd, sizex, sizey) end
end
register_window_callback(handler, wnd::Window, event::WindowEvent) = register_window_callback(handler, wnd, Ident{event}())
@make_register_window_callback FramebufferSizeEvent SetFramebufferSizeCallback
@make_register_window_callback WindowCloseEvent SetWindowCloseCallback
@make_register_window_callback WindowContentScaleEvent SetWindowContentScaleCallback
@make_register_window_callback WindowFocusEvent SetWindowFocusCallback
@make_register_window_callback WindowIconifyEvent SetWindowIconifyCallback
@make_register_window_callback WindowMaximizeEvent SetWindowMaximizeCallback
@make_register_window_callback WindowPosEvent SetWindowPosCallback
@make_register_window_callback WindowRefreshEvent SetWindowRefreshCallback
@make_register_window_callback WindowSizeEvent SetWindowSizeCallback
# Window Events
"""Called when the window's framebuffer has been resized."""
function on_framebuffer_resize(::Window, width::Integer, height::Integer) end
"""Called when the window is *requested* to close."""
function on_window_close(::Window) end
"""Called when the window's content scale is adjusted."""
function on_window_content_scale(::Window, scalex::AbstractFloat, scaley::AbstractFloat) end
"""Called when the window loses focus."""
function on_window_defocus(::Window) end
"""Called when the window gains focus."""
function on_window_focus(::Window) end
"""Called when the window is iconified (minimized to taskbar)."""
function on_window_iconify(::Window) end
"""Called when the window is maximized."""
function on_window_maximize(::Window) end
"""Called when the window is moved."""
function on_window_move(::Window, posx::Integer, posy::Integer) end
"""Called when the window should refresh."""
function on_window_refresh(::Window) end
"""Called when the window is resized."""
function on_window_resize(::Window, sizex::Integer, sizey::Integer) end
"""Called when the window is restored, either from being iconified or maximized."""
function on_window_restore(::Window) end
export @windowhint
"""`@windowhint hint value` is a more semantic & Julian, albeit comparatively low-level interface which sets the window
creation hint. It is equivalent to `GLFW.WindowHint(...)`. However, most hints are can be adjusted post creation through
`Window`'s virtual properties. Some, especially framebuffer context hints, can only be configured prior to creation
through this macro.
`hint` may be either the desired GLFW hint constant (e.g. `RED_BITS`) or a camel-cased version (e.g. `RedBits` or `redBits`).
`value` may be the appropriate value (Integer/Bool) which is passed to the hint directly.
If `value` is `nothing`, `GLFW.DONT_CARE` is passed instead.
If `value` is an `Enum`, it's `Integer(value)` is passed in. This allows setting the hint to a value such as `OpenGLAPI`
(a `ClientAPI` enum value) which is assigned the appropriate GLFW constant.
Example
-------
```julia
@windowhint TransparentFramebuffer true
@windowhint ClientAPI OpenGLAPI"""
macro windowhint(hint::Symbol, value)
glfw_hint = Expr(:., :GLFW, QuoteNode(Symbol(decamelcase(string(hint)))))
Expr(:call, :windowhint, glfw_hint, esc(value))
end
windowhint(hint::UInt32, ::Union{Nothing, Missing}) = windowhint(hint, GLFW.DONT_CARE)
windowhint(hint::UInt32, value::Enum) = windowhint(hint, Integer(value))
windowhint(hint::UInt32, value) = GLFW.WindowHint(hint, value)
decamelcase(sym::AbstractString) = Symbol(uppercase(replace(sym, r"([a-z])([A-Z])" => s"\1_\2")))
Base.convert(::Type{GLFW.Window}, wnd::Window) = wnd.handle
Base.convert(::Type{GLFW.Window}, ::Nothing) = GLFW.Window(C_NULL)
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.2.2 | 601805a1a8bf17062d598b847e1e74c9b5d14036 | docs | 22630 | # GLFWAbstraction.jl
Abstraction layer for [GLFW.jl](https://github.com/JuliaGL/GLFW.jl).
**Call to Action:** I'd gladly appreciate some help with the documentation. :) There's only so much you can do with markdown. A proper website would help readability and maneuverability of the documentation a lot, but as a sole developer working on a couple very ambitious projects, time is a scarce luxury.
# Mission
*GLFWAbstraction* is designed to provide a more Julia-native and partly functional experience to working with the [GLFW](https://www.glfw.org/) library.
# Abstractions
Not every component of the GLFW library is abstracted here. Two reasons for this exist: A) GLFW.jl does not expose the feature itself; B) time pressure.
Following are the GLFW components which have received abstractions:
* Windows
* Monitors
* Input
* Keyboard Input
* Mouse Input
These abstractions are not implemented for given reasons:
* Joystick/Gamepad abstraction - GLFW.jl is not up-to-date.
* Input:
* Custom Cursor Images - GLFW.jl is not up-to-date.
* Time & clipboard input - GLFW.jl intentionally does not expose these as there is standard Julia functionality.
* Path dropping - lack of time.
* Vulkan - lack of time.
# Table of Contents
- [GLFWAbstraction.jl](#glfwabstractionjl)
- [Mission](#mission)
- [Abstractions](#abstractions)
- [Table of Contents](#table-of-contents)
- [Documentation](#documentation)
- [Monitors](#monitors)
- [Video Modes](#video-modes)
- [Windows](#windows)
- [Window Creation](#window-creation)
- [Entering & Leaving Fullscreen Mode](#entering--leaving-fullscreen-mode)
- [Borderless Fullscreen Mode](#borderless-fullscreen-mode)
- [Window Attributes](#window-attributes)
- [Aspect Ratio](#aspect-ratio)
- [Window Icon](#window-icon)
- [Window Creation Hints](#window-creation-hints)
- [Window Manipulation Functions](#window-manipulation-functions)
- [Events](#events)
- [Input](#input)
- [Modifier Keys](#modifier-keys)
- [Keyboard Input](#keyboard-input)
- [Events](#events-1)
- [Mouse Input](#mouse-input)
- [Mouse Interface](#mouse-interface)
- [Events](#events-2)
# Documentation
This preliminary documentation shall provide every information necessary to work with GLFWAbstraction.jl.
## Monitors
Monitors revolve around the `Monitor` struct retrieved from the `monitor` function.
Use `monitor(window::Window)` to retrieve the monitor associated with a fullscreen window. If none (i.e. the window is not in fullscreen mode), returns `nothing`. Use `monitor(n::Integer)` to retrieve the `n`-th connected monitor. Usually, you will simply call `monitor(1)` to assign a window to the primary monitor. A convenient `monitor(::Nothing) = nothing` exists if needed.
Another abstraction exists through the `Monitors` meta type. Note the plural *s*. This meta type allows querying information on the collective of connected monitors.
Use `length(Monitors)` to retrieve the total number of connected monitors. You may also use `Monitors[index]` in place of `monitor(index)`. You may also `collect(Monitors)` to retrieve a vector of all currently connected monitors, or `iterate(Monitors)` which allows usage with a regular `for` loop:
```julia
for monitor in Monitors
# do something
end
```
### Video Modes
Monitors are associated with one or more `VideoMode`s. The current video mode of the monitor - either the default desktop video mode or the video mode of its current fullscreen application - can be retrieved through `videomode(monitor)`. All of its supported video modes can be queried through `videomodes(monitor)`, respectively.
While the `VideoMode` contains the same information as `GLFW.VidMode`, it arranges it slightly differently:
```julia
struct VideoMode
width::Int
height::Int
bits::NamedTuple{(:red, :green, :blue), Tuple{UInt8, UInt8, UInt8}}
refresh_rate::Int
end
```
See section [Borderless Fullscreen Mode](#Borderless-Fullscreen-Mode) below to learn how to use this struct.
## Windows
The centerpiece of GLFW is arguably the windowing system. *GLFWAbstraction* tries to simplify it as much as possible. Various GLFW constants were wrapped in their own enums. Updating & retrieving window attributes is accomplished through virtual properties. Events are delegated through Julia's native multiple dispatch.
### Window Creation
`Window{ID}`s are created through the `window` factory. Every `Window` is decorated with an arbitrary `ID`entifier. These are used to hook into the multiple dispatch based event system. A window can be created as such:
```julia
window(id::Symbol, title::AbstractString, width::Integer, height::Integer, [monitor::Monitor], [share::Window])
```
If `monitor === nothing`, the window will be created in windowed mode. Otherwise, it will be created in fullscreen on the specified window. `share` may be provided if multiple windows need to share the same OpenGL context. One such use case is spanning multiple monitors in fullscreen with two distinct windows.
The `title` will be displayed in the window's title bar - given it uses border decorations. `width` and `height` describe the desired window's drawing size - although the size of the window need not necessarily match the framebuffer's size.
`id` is passed to the `Window{id}` such that it may be used to uniquely identify your window in the event system described below.
### Entering & Leaving Fullscreen Mode
Entering and leaving fullscreen mode is as easy as setting the window's monitor to either a concrete instance or `nothing` respectively:
```julia
monitor(window, monitor::Monitor, refresh_rate::Integer = 0)
monitor(window, ::Nothing)
```
When `refresh_rate` is set to non-positive, it is synonymous for `GLFW.DONT_CARE`.
### Borderless Fullscreen Mode
*Borderless Fullscreen Mode* is a special variant of fullscreen mode where the video mode of the window in fullscreen mode matches that of the monitor's video mode in desktop mode. Note that, if another application is already in fullscreen on the queried monitor, its video mode will be retrieved. Currently, there is no way to retrieve the desktop video mode directly from the OS through GLFW.
```julia
let monitor = Monitor(1), vidmode = videomode(monitor)
@windowhint redBits vidmode.bits.red
@windowhint greenBits vidmode.bits.green
@windowhint blueBits vidmode.bits.blue
@windowhint refreshRate vidmode.refresh_rate
wnd = window(:id, "Borderless Fullscreen Window", vidmode.width, vidmode.height, Monitor(1))
end
```
Unfortunately, window hints are still relatively low-level, lending the syntax almost directly from GLFW.
### Window Attributes
GLFW exposes countless window attributes - some related to the window itself, others to its underlying OpenGL context. To the best of my knowledge, all window attributes have received virtual getters and setters for convenient use. Example:
```julia
let wnd = window(...)
wnd.decorated = false
wnd.opacity = 0.5
if wnd.hovered
println("mouse cursor is currently above window")
end
end
```
**Attributes with both getters & setters include:**
| Virtual Property | Get | Set |
| ---------------: | :----------------------------------------- | :------------------------------------------ |
| `auto_iconify` | window attribute | window attribute |
| `decorated` | window attribute | window attribute |
| `floating` | window attribute | window attribute |
| `focus_on_show` | window attribute | window attribute |
| `opacity` | window attribute | window attribute |
| `position` | (unnamed) 2-tuple of `GLFW.GetWindowPos()` | (unnamed) 2-tuple to `GLFW.SetWindowPos()` |
| `resizable` | window attribute | window attribute |
| `shouldclose` | `GLFW.WindowShouldClose()` | `GLFW.SetWindowShouldClose()` |
| `visible` | window attribute | `GLFW.ShowWindow()` and `GLFW.HideWindow()` |
**Attributes with only setters include:**
| Virtual Property | Set |
| ---------------: | :------------------------------------------------ |
| `aspect_ratio` | `GLFW.SetWindowAspectRatio()` - see details below |
| `icon` | `GLFW.SetWindowIcon()` - see details below |
| `title` | `GLFW.SetWindowTitle()` |
**Attributes with only getters include:**
| Virtual Property | Get |
| -----------------: | :-------------------------------------------------- |
| `content_scale` | (unnamed) 2-tuple of `GLFW.GetWindowContentScale()` |
| `focused` | window attribute |
| `framebuffer_size` | (unnamed) 2-tuple of `GLFW.GetFramebufferSize()` |
| `hovered` | window attribute |
| `iconified` | window attribute |
| `maximized` | window attribute |
| `transparent` | window attribute `GLFW.TRANSPARENT_FRAMEBUFFER` |
**Context Attributes with only getters include:**
| Virtual Property | Get |
| ---------------------------: | :------------------------------------------------------------------------------------------------------------------------- |
| `client_api` | `ClientAPI` (enum) from window attribute |
| `context_creation_api` | `ContextCreationAPI` (enum) from window attribute |
| `context_version` | `VersionNumber` from window attributes `GLFW.CONTEXT_VERSION_MAJOR`, `GLFW.CONTEXT_VERSION_MINOR`, `GLFW.CONTEXT_REVISION` |
| `context_forward_compatible` | `Bool` from window attribute |
| `context_debug` | `Bool` from window attribute |
| `context_profile` | `OpenGLProfile` (enum) from window attribute |
| `context_release_behavior` | `ContextReleaseBehavior` (enum) from window attribute |
| `context_generates_errors` | `Bool` from window attribute `GLFW.CONTEXT_NO_ERROR` |
| `context_robustness` | `ContextRobustness` (enum) from window attribute |
### Aspect Ratio
The `wnd.aspect_ratio` attribute has received special treatment. For convenient and semantic use, it can be written as such:
```julia
let wnd = window(...)
wnd.aspect_ratio = 16//9 # |-These are equivalent
wnd.aspect_ratio = (16, 9) # |
wnd.aspect_ratio = nothing # Clear aspect ratio
end
```
When set, the windowing system will enforce the given aspect ratio upon attempting to resize it. The exact behavior is a platform-dependent implementation detail.
### Window Icon
GLFW.jl already simplifies setting the application's icon. All that is needed is to pass in either a single 2-by-2 `Matrix` of pixels resembling the icon data, or a `Vector` of such images for animated icons.
*TODO*
### Window Creation Hints
Unfortunately, window creation hints are intertwined with the above [Window Attributes](#window-attributes) and difficult to simplify. As of the time of writing, the best solution I've come up with is the `@windowhint` macro, which is comparatively low-level and follows this syntax:
```julia
@windowhint <attribute> <value>
```
Where `attribute` directly corresponds to the GLFW constants in camel-case, e.g. `TransparentFramebuffer` - although one may also choose to simply use `TRANSPARENT_FRAMEBUFFER`. `value` may then be any valid value - usually an integer. `Enum`s are converted to their integer values and `nothing` is synonymous for `GLFW.DONT_CARE`.
### Window Manipulation Functions
Few manipulation functions for windows are exposed:
| Function | Purpose |
| -------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `maximize(wnd)` | Maximize the window. |
| `restore(wnd)` | Restore a window from either maximized or iconified/minimized state. |
| `limitsize(wnd; min_width, min_height, max_width, max_height)` | Adjust window's minimum and maximum dimensions. All components are optional. When non-positive, the respective limit is removed. |
| `swapbuffers(wnd)` | Swap front & back buffers. |
| `request_attention(wnd)` | Request the users attention. The exact behavior is dependent on the underlying platform. On windows, this will cause the window's icon in the taskbar to blink. |
### Events
Events have shifted from callbacks to a Julia-native multiple dispatch based solution. This is where the `ID` in `Window{ID}` comes into play. Following are the signatures for all currently available event handlers:
* `on_framebuffer_resize(::Window{ID}, width::Integer, height::Integer)`
* `on_window_close(::Window{ID})`
* `on_window_content_scale(::Window{ID}, scalex::AbstractFloat, scaley::AbstractFloat)`
* `on_window_defocus(::Window{ID})`
* `on_window_focus(::Window{ID})`
* `on_window_iconify(::Window{ID})`
* `on_window_maximize(::Window{ID})`
* `on_window_move(::Window{ID}, posx::Integer, posy::Integer)`
* `on_window_refresh(::Window{ID})`
* `on_window_resize(::Window{ID}, sizex::Integer, sizey::Integer)`
* `on_window_restore(::Window{ID})`
## Input
Few global input functions exist:
| Function | Purpose |
| ----------------------: | :----------------------------------------------------------------------------------------------------------------- |
| `pollevents()` | Poll events & trigger corresponding handlers. |
| `waitevents([timeout])` | Wait for any event & trigger corresponding handlers. If `timeout` is supplied, wait at most for `timeout` seconds. |
| `post_empty_event()` | Post an empty event to the event queue, waking up any `Task` or `Thread` waiting on events. |
### Modifier Keys
Both keyboard & mouse events can come with `ModifierKey`s that were active at the moment the key or mouse button was pressed or released. The pseudo-enum is built using [BitFlags.jl](https://github.com/jmert/BitFlags.jl) which has various benefits over regular enums. The following modifier keys exist:
* `NoMod` - no modifier key was held.
* `ShiftMod`
* `ControlMod`
* `AltMod`
* `SuperMod`
* `CapsLockMod`
* `NumLockMod`
Note that caps lock and num lock may not be usable yet as *GLFW.jl* is not up-to-date.
Test for specific modifier keys like such:
```julia
function foo(modifiers::ModifierKey)
if (modifier & ShiftMod) != NoMod
# do something
end
end
```
## Keyboard Input
Keyboard input has been simplified through a more humanized `Key` wrapper and dynamic dispatch-style events. The `Key` wrapper provides both more human & programmatic access to all `GLFW.KEY_*` constants. Its signatures like as thus:
```julia
Key(digit::Integer, numpad::Bool = false)
Key(char::Char)
Key(special::Symbol)
```
The `digit` overload retrieves Keys `GLFW.KEY_0` through `GLFW.KEY_9`. It throws an `ArgumentError` if `digit ∉ 0:9`. If `numpad` is true, returns the corresponding `GLFW.KEY_KP_*` instead.
The `char` overload retrieves any key on a standard *US QWERTY* keyboard which produces a character. As with the `digit` overload, it throws an `ArgumentError` if the character is invalid. **Note** that, unfortunately, other languages are not supported by the underlying GLFW library itself.
The `special` overload retrieves any key on a standard *US QWERTY* keyboard which *does not* produce a character, such as the *escape*, *print screen*, arrow keys, or *enter* key. Following is a full list of supported special characters:
* Arrow keys (`:up`, `:down`, `:left`, `:right`)
* F keys (`:f1`, `:f2`, ..., `:f25`)
* Left/Right `shift`, `control`, `alt`, `super`, prefixed with `:left_`/`:l`/`:right_`/`:r` respectively
* Numpad keys: `:add`, `:decimal`, `:divide`, `:keypad_enter`, `:keypad_equal`, `:multiply`, `:subtract`
* `:capslock`, `:end`, `:enter`, `:escape`/`:esc`, `:home`, `:menu`, `:numlock`, `:pagedown`, `:pageup`, `:pause`, `:print`/`:printscreen`, `:scrolllock`, `:space`, `:world1`, `:world2`
### Events
The following key input related events exist. One hooks into these by specializing on `ID` in `::Window{ID}`, as passed to `window(:ID, ...)`.
| Event | Trigger |
| ------------------------------------------------------------------- | --------------------------------------------------------------- |
| `on_key_press(::Window, ::Key, scancode::Integer, ::ModifierKey)` | Triggered when a key is pressed down. |
| `on_key_release(::Window, ::Key, scancode::Integer, ::ModifierKey)` | Triggered when a key is released. |
| `on_key_repeat(::Window, ::Key, scancode::Integer, ::ModifierKey)` | Triggered when a key is held down and trigger's the OS' repeat. |
| `on_receive_char(::Window, ::Char)` | Triggered when a key stroke produces a unicode character. |
## Mouse Input
Akin to [Keyboard Input](#keyboard-input) above, mouse input is abstracted through three main interfaces: `MouseButton`, the `Mouse` meta type, and dynamic dispatch-style events. The `MouseButton` struct has the following signatures:
```julia
MouseButton(idx::Integer)
MouseButton(name::Symbol)
```
The underlying GLFW library only supports mouse buttons `1:8`. Any number beyond this range throws an `ArgumentError`.
The three default named mouse buttons are `:left`, `:right`, and `:middle`, representing mouse buttons 1, 2, and 3 respectively. One may introduce new names for convenience by defining:
```julia
MouseButton(::Ident{:new_button}) = MouseButton(4)
```
where `:new_button` is to be replaced with your desired name.
### Mouse Interface
One may poll the state of the mouse at any point by constructing a `Mouse(window)` instance. It exposes two virtual properties and two traits functions. Its properties are:
| Virtual Property | Get | Set |
| ---------------: | :----------------------------------------- | :---------------------------------------------------- |
| `position` | (unnamed) 2-tuple of `GLFW.GetCursorPos()` | `GLFW.SetCursorPos(wnd, values[1], values[2])` |
| `mode` | `GLFW.GetInputMode(wnd, GLFW.CURSOR)` | `GLFW.SetInputMode(wnd, GLFW.Cursor, Integer(value))` |
Note that `mode` should be assigned a value from the `CursorMode` enum, which exposes values `CursorDisabled`, `CursorHidden`, and `CursorNormal`. Both *hidden* and *disabled* modes show no cursor image. Difference being *disabled* prompts GLFW to recenter the cursor on the window whereas *hidden* allows it to leave the window. One would thus, for example, use *disabled* to control a 3D camera.
The two global functions are concerned with testing `isbuttondown` and `isbuttonup` on a `Mouse(window)`. Their signatures are as follows:
```julia
isbuttondown(mouse::Mouse, button::MouseButton)::InputAction
isbuttonup( mouse::Mouse, button::MouseButton)::InputAction
```
The `InputAction` is an enum with values `Press`, `Release`, and `Repeat`; although `Repeat` will never be emitted for mouse buttons.
### Events
As with keyboard input, various mouse events are triggered:
| Event Signature | Trigger |
| ---------------------------------------------------------- | -------------------------------------------------------------------------- |
| `on_mouse_move(::Window, xpos, ypos)` | Triggered when the mouse is moved while within the confines of the window. |
| `on_mouse_enter(::Window)` | Triggered when the mouse enters the window area. |
| `on_mouse_leave(::Window)` | Triggered when the mouse leaves the window area. |
| `on_mouse_press(::Window, ::MouseButton, ::ModifierKey)` | Triggered when a mouse button is pressed down. |
| `on_mouse_release(::Window, ::MouseButton, ::ModifierKey)` | Triggered when a mouse button is released. |
As with any event, one would hook into these by implementing a specialization on `ID` in `Window{ID}` as provided in the call to `window(:ID, ...)`.
| GLFWAbstraction | https://github.com/VoxelPopuliEngine/GLFWAbstraction.jl.git |
|
[
"MIT"
] | 0.1.0 | e4412e9fadd14220bcdf0dbb7d0644f86ae92f2f | code | 6440 | module CBSOData3
import HTTP
import JSON
import Tables
export get_tables, get_meta, OdataTable
BASE_URL = "http://opendata.cbs.nl"
API = "ODataFeed/odata"
CATALOG = "ODataCatalog"
function get_odata(url)
result = HTTP.get(url)
data = JSON.parse(String(result.body))
end
"""
get_tables(base, catalog)
Get all the tables in the catalog as a list of dicts. The `Identifier` can be
used to get the actual data. If no parameters are given, the data is retrieved
from the (CBS)[https://www.cbs.nl] OData3 portal.
The optional parameters are:
- base::String, basename of the OData3 server
- catalog::String, path part of the OData3 server for catalog information
"""
function get_tables(base=BASE_URL, catalog=CATALOG)
url = base * "/" * catalog * "/Tables?\$format=json"
return get_odata(url)["value"]
end
"""
get_meta(table; base, api)
Get the metadata for a given `table`. The result is a dict with the keys `"TableInfos"`, `"DataProperties"` and all the classifications.
Optional parameters are:
- base::String, basename of the OData3 server
- api::String, path part of the OData3 server for regular data
"""
function get_meta(table; base=BASE_URL, api=API)
url = base * "/" * api * "/" * table * "?\$format=json"
metalinks = get_odata(url)["value"]
meta = Dict{String,Any}()
for link in metalinks
name = link["name"]
if name in ("UntypedDataSet", "TypedDataSet")
continue
end
linkurl = link["url"] * "?\$format=json"
meta[name] = get_odata(linkurl)["value"]
end
return meta
end
struct OdataTable
table::String
block::Vector{Any}
nextlink::Union{Nothing, String}
names::Vector{Symbol}
types::Vector{Type}
length::Int
end
function firstline(t::OdataTable)
datablock = getfield(t, :block)
nextlink = getfield(t, :nextlink)
row = 1
return (OdataRow(datablock[row], t), (datablock, nextlink, row))
end
function nextline(t::OdataTable, status)
datablock, nextlink, row = status
if row == length(datablock) && !isnothing(nextlink)
datablock, nextlink = get_block(nextlink)
row = 0
end
row += 1
if row > length(datablock)
return nothing
else
return (OdataRow(datablock[row], t), (datablock, nextlink, row))
end
end
struct OdataRow <: Tables.AbstractRow
row::Dict{String, Any}
table::OdataTable
end
Tables.istable(::OdataTable) = true
names(t::OdataTable) = getfield(t, :names)
Tables.columnnames(t::OdataTable) = getfield(t, :names)
types(t::OdataTable) = getfield(t, :types)
Tables.schema(t::OdataTable) = Tables.Schema(names(t), types(t))
Tables.rowaccess(::OdataTable) = true
Tables.rows(t::OdataTable) = t
Base.eltype(t::OdataTable) = OdataRow
Base.length(t::OdataTable) = getfield(t, :length)
Base.iterate(t::OdataTable) = firstline(t)
Base.iterate(t::OdataTable, st) = nextline(t, st)
Tables.getcolumn(r::OdataRow, s::String) = something(getfield(r, :row)[s], missing)
Tables.getcolumn(r::OdataRow, i::Int) = something(collect(values(getfield(r, :row)))[getfield(getfield(r, :table), :names)[i]], missing)
Tables.getcolumn(r::OdataRow, s::Symbol) = something(getfield(r, :row)[String(s)], missing)
Tables.columnnames(r::OdataRow) = collect(keys(getfield(r, :row)))
Base.NamedTuple(r::OdataRow) = NamedTuple{Tuple(Symbol.(keys(getfield(r, :row))))}(values(getfield(r, :row)))
function get_block(url)
data = get_odata(url)
nextlink = "odata.nextLink" in keys(data) ? data["odata.nextLink"] : nothing
return (data["value"], nextlink)
end
function get_table(table; base=BASE_URL, api=API, kind="UntypedDataSet", columns=[], filter="")
if length(columns) == 0
select = ""
else
select = "&\$select=" * join(columns, ",")
end
if length(filter) > 0
filter = "&\$filter=" * HTTP.escape(filter)
end
url = base * "/" * api * "/" * table * "/" * kind * "/?\$format=json" * select * filter
data = get_odata(url)
nextlink = "odata.nextLink" in keys(data) ? data["odata.nextLink"] : nothing
return (data["value"], nextlink)
end
"""
ODataTable(table; typed, columns, filters, base, api, catalog)
Get a `Tables` object for the dataset defined by `table`.
Optional parameters are:
- typed::Bool, do we want a TypedDataSet (numbers as ints or floats) or an
UntypedDataSet (everything string)
- columns::Vector{String}, exact columnnames to select, empty for all columns
- filters::String, OData3 specification for row filter
- base::String, basename of the OData3 server
- api::String, path part of the OData3 server for regular data
- catalog::String, path part of the OData3 server for catalog information
# Examples
```julia
tbl = CBSOData3.ODataTable("82811NED", columns=["Perioden", "Onderzoekspopulatie_1", "Innovatoren_2"], filter="Perioden eq '2010X000'");
```
"""
function ODataTable(table; typed=true, columns=[], filter="", base=BASE_URL, api=API, catalog=CATALOG)
kind = typed ? "TypedDataSet" : "UntypedDataSet"
if length(filter) == 0
tabellen = get_tables(base, catalog)
tabelinfo = [x for x in tabellen if x["Identifier"] == table]
if length(tabelinfo) == 1
recordcount = tabelinfo[1]["RecordCount"]
else
recordcount = None
end
nextblock, nextlink = get_table(table, base=base, api=api, kind=kind, columns=columns)
else
# bij opgegeven filter alle blokken inlezen
nextblock, nextlink = get_table(table, base=base, api=api, kind=kind, columns=columns, filter=filter)
while !isnothing(nextlink)
nxtblk, nextlink = get_block(nextlink)
nextblock = vcat(nextblock, nxtblk)
end
recordcount = length(nextblock)
end
if length(columns) > 1
names = Symbol.(columns)
else
meta = get_meta(table)
names = Symbol.(collect(keys(nextblock[1])))
dd = [Symbol(x["Key"])=>x["Position"] for x in meta["DataProperties"] if Symbol(x["Key"]) in names]
sort!(dd, by=x->x.second)
dd = [x.first=>nr for (nr, x) in enumerate(dd)]
names = [x.first for x in dd]
end
types = [typeof(nextblock[1][String(name)]) for name in names]
if typed
types = [Union{Missing, x} for x in types]
end
return OdataTable(table, nextblock, nextlink, names, types, recordcount)
end
end # module
| CBSOData3 | https://github.com/HenricoWitvliet/CBSOData3.jl.git |
|
[
"MIT"
] | 0.1.0 | e4412e9fadd14220bcdf0dbb7d0644f86ae92f2f | docs | 1046 | CBSOData3
=========
This is a simple [Tables](https://github.com/JuliaData/Tables.jl)-interface to use the [CBS](https://opendata.cbs.nl/statline/portal.html?_la=nl&_catalog=CBS) odata3 portal to download all CBS-datasets.
How to use it
-------------
If you know the exact dataset, you can directly use it:
```julia
using DataFrames, CBSOData3
df = DataFrame(CBSOData3.ODataTable("82811NED"))
```
You can use the keyword argument `columns` to give a list of column names to select (case sensitive). And you can use `filter` to give a filter expression for the rows (as a String) using the [Odata3](https://www.odata.org/documentation/odata-version-3-0/) rules.
For a given table you can get information about the columns in the dataset and about the used classifications by using `get_meta`:
```julia
df_meta = CBSOData3.get_meta("82811NED")
```
This gives a dict with DataProperties and the classifications.
Using `get_tables` you can get a list of dicts of all available tables. The `Identifier` can then be used to get the actual data.
| CBSOData3 | https://github.com/HenricoWitvliet/CBSOData3.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 256 | module LinearSegmentation
using DocStringExtensions, Statistics, Graphs
# Write your package code here.
include("utils.jl")
include("slidingwindow.jl")
include("topdown.jl")
include("shortestpath.jl")
export sliding_window, top_down, shortest_path
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 3680 | """
$(TYPEDSIGNATURES)
Partition `xs` into segments that are at least longer than `min_segment_length`,
and that have a fit better than `fit_threshold`. By default, the goodness-of-fit
is measured using the coefficient of determination. Each segment must have a
minimum R² of `fit_threshold`. Root mean squared error can also be used by
setting `fit_function = :rmse`, and adjusting `fit_threshold` to a dataset
dependent error threshold. In this case, the root mean squared error must be
smaller than `fit_threshold`.
Builds a directed graph where nodes are the x-data points, and the edge weights
are the goodness-of-fit measure associated with a linear model between these
nodes. Nodes are connected only if their minimum length is greater than
`min_segment_length`, and the goodness-of-fit better than `fit_threshold`. By
default, the end of a segment is also the start of the next segment, but this
can be changed by setting `overlap` to `false` (resulting in disjoint
segmentations). Thereafter, the shortest weighted path spanning the entire
dataset is found using the A-star algorithm. This more-or-less corresponds to
the dynamic programming approach used by other segmentation algorithms.
Sorts data internally as a precomputation step. This is the slowest algorithm,
but should return a segmentation where the segments are as long as possible,
balanced against their fit.
Returns an array of indices `[idxs1, ...]`, where `idxs1` are the indices of
`xs` in the first segment, etc.
# Example
```
segments = shortest_path(xs, ys; min_segment_length=1.2)
```
See also: [`sliding_window`](@ref), [`top_down`](@ref).
"""
function shortest_path(
xs,
ys;
min_segment_length = heuristic_min_segment_length(xs),
fit_threshold = 0.9,
fit_function = :r2,
overlap = true,
)
len_xs = length(xs)
segments = Vector{Vector{Int64}}()
sxs = sortperm(xs) # do this once
g, w = build_digraph(
xs[sxs],
ys[sxs],
min_segment_length,
fit_threshold,
fit_function,
overlap,
)
path_edges = a_star(g, 1, len_xs, w)
for edge in path_edges
i = edge.src
j = edge.dst
if overlap
jj = j
else
jj = j == len_xs ? j : j - 1
end
push!(segments, sxs[i:jj])
end
segments
end
"""
$(TYPEDSIGNATURES)
Build a directed graph where each node represents an index, and edges are
segments that link indices. Enumerates all possible segments that are at least
`min_segment_length` long. Weights are assigned by mean squared error of the
linear fit. If rmse is bigger than `max_rmse`, then weight is set to `Inf`.
"""
function build_digraph(xs, ys, min_segment_length, fit_threshold, fit_function, overlap)
len_xs = length(xs)
g = SimpleDiGraph(len_xs)
weightmatrix = zeros(len_xs, len_xs)
threshold = fit_function == :r2 ? -fit_threshold : fit_threshold
for j in eachindex(xs)
jj = overlap ? j : j - 1
for i in eachindex(xs)
if i < j && i < jj && is_min_length(xs[i:jj], min_segment_length)
add_edge!(g, i, j) # i -> j
_xs = xs[i:jj]
_ys = ys[i:jj]
w =
fit_function == :r2 ? -rsquared(_xs, _ys, least_squares(_xs, _ys)...) :
rmse(_xs, _ys, least_squares(_xs, _ys)...)
weightmatrix[i, j] = w > threshold ? Inf : w + 1.0 # add a constant offset to make r² all positive
elseif i == j
weightmatrix[i, j] = 0.0
else
weightmatrix[i, j] = Inf
end
end
end
g, weightmatrix
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 2247 | """
$(TYPEDSIGNATURES)
Partition `xs` into segments that are at least longer than `min_segment_length`,
and that have a fit better than `fit_threshold`. By default, the goodness-of-fit
is measured using the coefficient of determination. Each segment must have a
minimum R² of `fit_threshold`. Root mean squared error can also be used by
setting `fit_function = :rmse`, and adjusting `fit_threshold` to a dataset
dependent error threshold. In this case, the root mean squared error must be
smaller than `fit_threshold`.
Uses a sliding window approach to segment the data: initially an empty segment
is made, and data added to it until `fit_threshold` is reached. Then a new
segment is made, and the process repeats until the data is exhausted. By
default, the end of a segment is also the start of the next segment, but this
can be changed by setting `overlap` to `false` (resulting in disjoint
segmentations).
Sorts data internally as a precomputation step. Fastest segmentation algorithm
implemented, but also the least accurate.
Returns an array of indices `[idxs1, ...]`, where `idxs1` are the indices of
`xs` in the first segment, etc.
# Example
```
segments = sliding_window(xs, ys; min_segment_length=1.2, fit_threshold=0.9)
```
See also: [`top_down`](@ref), [`shortest_path`](@ref).
"""
function sliding_window(
xs,
ys;
min_segment_length = heuristic_min_segment_length(xs),
fit_threshold = 0.9,
fit_function = :r2,
overlap = true,
)
sxs = sortperm(xs) # increasing order
segments = Vector{Vector{Int64}}()
start_idx = 1
for current_idx = 2:length(sxs)
_xs = xs[sxs[start_idx:current_idx]]
_ys = ys[sxs[start_idx:current_idx]]
is_min_length(_xs, min_segment_length) || continue
lmfit, threshold =
fit_function == :r2 ?
(-rsquared(_xs, _ys, least_squares(_xs, _ys)...), -fit_threshold) :
(rmse(_xs, _ys, least_squares(_xs, _ys)...), fit_threshold)
if lmfit >= threshold
push!(segments, sxs[start_idx:(current_idx-1)])
start_idx = overlap ? current_idx - 1 : current_idx # start is previous end
end
end
# clean up
push!(segments, sxs[start_idx:end])
segments
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 4727 | """
$(TYPEDSIGNATURES)
Partition `xs` into segments that are at least longer than `min_segment_length`,
and that have a fit better than `fit_threshold`. By default, the goodness-of-fit
is measured using the coefficient of determination. Each segment must have a
minimum R² of `fit_threshold`. Root mean squared error can also be used by
setting `fit_function = :rmse`, and adjusting `fit_threshold` to a dataset
dependent error threshold. In this case, the root mean squared error must be
smaller than `fit_threshold`.
Recursively splits the data into two parts with the best fit according to
`fit_function`, while obeying the minimum segment length restrictions from
`min_segment_length`, and goodness-off-fit restrictions from `fit_threshold`.
Sorts data internally as a precomputation step. By default, the end of a
segment is also the start of the next segment, but this can be changed by
setting `overlap` to `false` (resulting in disjoint segmentations).
Returns an array of indices `[idxs1, ...]`, where `idxs1` are the indices of
`xs` in the first segment, etc.
# Example
```
segments = top_down(xs, ys; min_segment_length=1.2)
```
See also: [`sliding_window`](@ref), [`shortest_path`](@ref).
"""
function top_down(
xs,
ys;
min_segment_length = heuristic_min_segment_length(xs),
fit_threshold = 0.9,
fit_function = :r2,
overlap = true,
)
segments = Vector{Vector{Int64}}()
sxs = sortperm(xs) # do this once
_top_down!(
segments,
xs,
ys,
sxs,
1,
length(xs),
min_segment_length,
fit_threshold,
fit_function,
overlap,
)
segments
end
function _top_down!(
segments,
xs,
ys,
sxs,
start_idx,
stop_idx,
min_segment_length,
fit_threshold,
fit_function,
overlap,
)
brkpnt1 = _find_optimum_break_point(
xs,
ys,
sxs,
start_idx,
stop_idx,
min_segment_length,
fit_function,
overlap,
)
if isnothing(brkpnt1)
push!(segments, sxs[start_idx:stop_idx])
return nothing
end
brkpnt2 = overlap ? brkpnt1 : brkpnt1 + 1 # end/start overlap?
_xs1 = xs[sxs[start_idx:brkpnt1]]
_ys1 = ys[sxs[start_idx:brkpnt1]]
ls1, threshold =
fit_function == :r2 ?
(-rsquared(_xs1, _ys1, least_squares(_xs1, _ys1)...), -fit_threshold) :
(rmse(_xs1, _ys1, least_squares(_xs1, _ys1)...), fit_threshold)
_xs2 = xs[sxs[brkpnt2:stop_idx]]
_ys2 = ys[sxs[brkpnt2:stop_idx]]
ls2 =
fit_function == :r2 ? -rsquared(_xs2, _ys2, least_squares(_xs1, _ys1)...) :
rmse(_xs2, _ys2, least_squares(_xs2, _ys2)...)
if ls1 <= threshold || !is_min_length(_xs1, min_segment_length)
push!(segments, sxs[start_idx:brkpnt1])
else
_top_down!(
segments,
xs,
ys,
sxs,
start_idx,
brkpnt1,
min_segment_length,
fit_threshold,
fit_function,
overlap,
)
end
if ls2 <= threshold || !is_min_length(_xs2, min_segment_length)
push!(segments, sxs[brkpnt2:stop_idx])
else
_top_down!(
segments,
xs,
ys,
sxs,
brkpnt2,
stop_idx,
min_segment_length,
fit_threshold,
fit_function,
overlap,
)
end
nothing
end
function _find_optimum_break_point(
xs,
ys,
sxs,
start1_idx,
stop2_idx,
min_segment_length,
fit_function,
overlap,
)
brkpnts = Int64[]
losses = Float64[]
for current_idx1 = start1_idx:stop2_idx
current_idx2 = overlap ? current_idx1 : current_idx1 + 1
current_idx2 == stop2_idx && break # done
_xs1 = xs[sxs[start1_idx:current_idx1]]
_xs2 = xs[sxs[current_idx2:stop2_idx]]
# both segments need to have at least the minimum length
is_min_length(_xs1, min_segment_length) || continue
is_min_length(_xs2, min_segment_length) || break
_ys1 = ys[sxs[start1_idx:current_idx1]]
_ys2 = ys[sxs[current_idx2:stop2_idx]]
# get minimum loss and break point
v1 =
fit_function == :r2 ? -rsquared(_xs1, _ys1, least_squares(_xs1, _ys1)...) :
rmse(_xs1, _ys1, least_squares(_xs1, _ys1)...)
v2 =
fit_function == :r2 ? -rsquared(_xs2, _ys2, least_squares(_xs2, _ys2)...) :
rmse(_xs2, _ys2, least_squares(_xs2, _ys2)...)
push!(losses, min(v1, v2))
push!(brkpnts, current_idx1)
end
isempty(losses) ? nothing : brkpnts[argmin(losses)]
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 1201 |
"""
$(TYPEDSIGNATURES)
Return true if the interval is longer than `min_segment_length`.
"""
is_min_length(xs, min_segment_length) = abs(-(extrema(xs)...)) >= min_segment_length
"""
$(TYPEDSIGNATURES)
Guess an appropriate minimum segment length, which the length of `xs` divided by
10.
"""
heuristic_min_segment_length(xs) = abs(-(extrema(xs)...)) / 10
"""
$(TYPEDSIGNATURES)
Calculate the least squares coefficients.
"""
function least_squares(xs, ys)
ymean = mean(ys)
xmean = mean(xs)
b1 =
sum((x - xmean) * (y - ymean) for (x, y) in zip(xs, ys)) /
sum((x - xmean)^2 for x in xs)
b0 = ymean - b1 * xmean
b0, b1
end
"""
$(TYPEDSIGNATURES)
Root mean square error of a fit.
"""
rmse(xs, ys, b0, b1) = sqrt(se(xs, ys, b0, b1) / length(xs))
"""
$(TYPEDSIGNATURES)
Squared error of the fit (the residuals).
"""
se(xs, ys, b0, b1) = sum((y - b0 - b1 * x)^2 for (x, y) in zip(xs, ys))
"""
$(TYPEDSIGNATURES)
Calculate the coefficient of determination (R²) for a linear fit of `b0`, `b1`
on `xs` and `ys`.
"""
function rsquared(xs, ys, b0, b1)
ymean = mean(ys)
1 - sum((y - b0 - b1 * x)^2 for (x, y) in zip(xs, ys)) / sum((y - ymean)^2 for y in ys)
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 473 | using LinearSegmentation
using Test
using Graphs
# test settings
atol = 1e-6
# settings
min_segment_length = 1.0
max_rmse = 0.10
# generate test data
N = 100
xs = collect(range(0, 3 * pi, length = N))
ys = sin.(xs)
@testset "LinearSegmentation.jl" begin
@testset "Internals" begin
include("utils.jl")
end
@testset "Segmentation" begin
include("slidingwindow.jl")
include("topdown.jl")
include("shortestpath.jl")
end
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 1077 | @testset "Shortest path" begin
#r2
segments = shortest_path(xs, ys; min_segment_length)
@test length(segments) == 4
b0, b1 = LinearSegmentation.least_squares(xs[last(segments)], ys[last(segments)])
@test isapprox(
LinearSegmentation.rsquared(xs[last(segments)], ys[last(segments)], b0, b1),
0.9787001222049421;
atol,
)
# rmse
segments = shortest_path(
xs,
ys;
min_segment_length,
fit_threshold = max_rmse,
fit_function = :rmse,
)
@test length(segments) == 4
b0, b1 = LinearSegmentation.least_squares(xs[first(segments)], ys[first(segments)])
@test isapprox(
LinearSegmentation.rmse(xs[first(segments)], ys[first(segments)], b0, b1),
0.06440621325389449;
atol,
)
@test N != length(reduce(vcat, segments))
segments = shortest_path(
xs,
ys;
min_segment_length,
fit_threshold = max_rmse,
fit_function = :rmse,
overlap = false,
)
@test N == length(reduce(vcat, segments))
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 1214 | @testset "Sliding window" begin
# test r2
segments = sliding_window(
xs,
ys;
min_segment_length,
# fit_threshold = 0.9,
)
@test length(segments) == 4
b0, b1 = LinearSegmentation.least_squares(xs[last(segments)], ys[last(segments)])
@test isapprox(
LinearSegmentation.rsquared(xs[last(segments)], ys[last(segments)], b0, b1),
0.9926778131496851;
atol,
)
@test N != length(reduce(vcat, segments))
# test root mean square
segments = sliding_window(
xs,
ys;
min_segment_length,
fit_threshold = max_rmse,
fit_function = :rmse,
)
@test length(segments) == 4
b0, b1 = LinearSegmentation.least_squares(xs[first(segments)], ys[first(segments)])
@test isapprox(
LinearSegmentation.rmse(xs[first(segments)], ys[first(segments)], b0, b1),
0.09975406785624195;
atol,
)
@test N != length(reduce(vcat, segments))
segments = sliding_window(
xs,
ys;
min_segment_length,
fit_threshold = max_rmse,
fit_function = :rmse,
overlap = false,
)
@test N == length(reduce(vcat, segments))
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 1019 | @testset "Top down" begin
#r2
segments = top_down(xs, ys; min_segment_length)
@test length(segments) == 5
b0, b1 = LinearSegmentation.least_squares(xs[last(segments)], ys[last(segments)])
@test isapprox(
LinearSegmentation.rsquared(xs[last(segments)], ys[last(segments)], b0, b1),
0.9926778131496851;
atol,
)
# rmse
segments =
top_down(xs, ys; min_segment_length, fit_threshold = max_rmse, fit_function = :rmse)
@test length(segments) == 9
b0, b1 = LinearSegmentation.least_squares(xs[first(segments)], ys[first(segments)])
@test isapprox(
LinearSegmentation.rmse(xs[first(segments)], ys[first(segments)], b0, b1),
0.023667594679358903;
atol,
)
@test N != length(reduce(vcat, segments))
segments = top_down(
xs,
ys;
min_segment_length,
fit_threshold = max_rmse,
fit_function = :rmse,
overlap = false,
)
@test N == length(reduce(vcat, segments))
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | code | 691 | @testset "Utils" begin
xs = [1, 2, 3, 4, 5, 6]
ys = [1.1, 1.9, 3.1, 4.2, 5.3, 5.9]
b0, b1 = LinearSegmentation.least_squares(xs, ys)
@test isapprox(b0, 0.0533333; atol)
@test isapprox(b1, 1.0085714; atol)
@test isapprox(LinearSegmentation.se(xs, ys, b0, b1), 0.1270476190476182; atol)
@test isapprox(LinearSegmentation.rmse(xs, ys, b0, b1), 0.1455149585939639; atol)
@test isapprox(LinearSegmentation.rsquared(xs, ys, b0, b1), 0.9929135845097545; atol)
@test isapprox(LinearSegmentation.heuristic_min_segment_length(xs), 0.5; atol)
@test !LinearSegmentation.is_min_length(xs[1:2], 3)
@test LinearSegmentation.is_min_length(xs[1:2], 1)
end
| LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 0.3.0 | f53b547aa5bba38171c67d23b00623c6ecca06dc | docs | 3914 | # LinearSegmentation
[repostatus-url]: https://www.repostatus.org/#active
[repostatus-img]: https://www.repostatus.org/badges/latest/active.svg
[](https://github.com/stelmo/LinearSegmentation.jl/actions/workflows/CI.yml?query=branch%3Amaster) [![repostatus-img]][repostatus-url] [](https://pkgs.genieframework.com?packages=LinearSegmentation)
This is a small package that performs linear segmented regression: fitting
piecewise linear functions to data, and simultaneously estimating the best
breakpoints. Three algorithm are implemented, `sliding_window`, `top_down`, and
`shortest_path`.
## Interface
```julia
using LinearSegmentation
segments = segmentation_function(
x_values,
y_values;
min_segment_length = minimum_segment_x_length,
fit_threshold = minimum_r2,
fit_function = :r2,
overlap = true,
)
```
Where `segments = [idxs1, idxs2, ...]` is an array of indices, with `idxs1`
corresponding to the indices of `xs` in the first segment, `idxs2` the second
segment, etc. Minimum segment lengths are specified with `min_segment_length`.
By default, the goodness-of-fit is measured using the coefficient of
determination (R²). Each segment must have a minimum R² of `fit_threshold`. Root
mean squared error can also be used by setting `fit_function = :rmse`, and
adjusting `fit_threshold` to a dataset dependent error threshold. In this case,
the root mean squared error must be smaller than `fit_threshold` for each
segment. By default, the end of a segment is also the start of the next segment,
but this can be changed by setting `overlap` to `false` (resulting in disjoint
segmentations).
## Generate some data
```julia
N = 100
xs = collect(range(0, 3 * pi, length = N)) .+ 0.1 .* randn(N)
ys = sin.(xs) .+ 0.1 .* randn(N)
```

## Sliding window
Uses a sliding window approach to segment the data: initially an empty segment
is made, and data added to it until `fit_threshold` is reached. Then a new
segment is made, and the process repeats until the data is exhausted. This
algorithm is the cheapest to run, but may generate worse fits due to its
simplicity.
```julia
segments = sliding_window(xs, ys; min_segment_length=1.2)
```

## Top down
This algorithm recursively splits the data into two parts, attempting to find
segments that are both long enough, and have a good enough fit (set via the
kwargs).
```julia
segments = top_down(xs, ys; min_segment_length=1.2)
```

## Shortest path
This algorithm is *my take* on the dynamic programming approaches used by the R
packages listed below (NB: not equivalent implementations!). In essence, a
weighted directional graph is constructed, where each node corresponds to an
index of `xs`, and the edge weight between nodes corresponds to the
goodness-of-fit measure between the two nodes (segment length restrictions and
maximum error are both incorporated). The shortest weighted path that spans `xs`
is the found with `Graphs.a_star` (see
[Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl)), and should correspond to
the best segmentation.
```julia
segments = shortest_path(xs, ys; min_segment_length=1.2)
```

## Other useful resources
1. https://cran.r-project.org/web/packages/dpseg/vignettes/dpseg.html
2. https://winvector.github.io/RcppDynProg/
3. E. Keogh, S. Chu, D. Hart and M. Pazzani, "An online algorithm for segmenting
time series," Proceedings 2001 IEEE International Conference on Data Mining,
San Jose, CA, USA, 2001, pp. 289-296, doi: 10.1109/ICDM.2001.989531. | LinearSegmentation | https://github.com/stelmo/LinearSegmentation.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 1791 | #using Pkg
#Pkg.add(path="/home/dellint5/repos/SymbolicInference.jl")
using SymbolicInference
using Random , Distributions
using RecurrenceAnalysis
using CairoMakie
using Colors
cycles = 10
rec_rate = 0.3
sd_norm = 0.5
p_window_size = 12
linear_basis = (0:0.6:2*pi*cycles)
n_sample = length(linear_basis)
time_series_sin = map(sin,linear_basis)
# White noise
rng = Random.MersenneTwister(42)
dist = Distributions.Normal(0,sd_norm)
white_n_time_series = rand(dist,n_sample)
# Noisy sine wave
time_series_sin_nois = white_n_time_series .+ time_series_sin
res_recs = map(x -> RecurrenceAnalysis.CrossRecurrenceMatrix(x,time_series_sin,rec_rate;fixedrate=true),
[time_series_sin,white_n_time_series,time_series_sin_nois])
res_recs_joint = map(x -> RecurrenceAnalysis.JointRecurrenceMatrix(x,time_series_sin,rec_rate;fixedrate=true),
[time_series_sin,white_n_time_series,time_series_sin_nois])
#rec_matrix_motifs(rec_matrix::RecurrenceMatrix;seqs="recurrences",max_window=6,n_motifs=2)
all_probs = map(x-> rec_matrix_motifs(x;
max_window=p_window_size,seqs="recurrences"),res_recs)
all_probs_joint = map(x-> rec_matrix_motifs(x;
max_window=p_window_size,seqs="recurrences"),res_recs_joint)
motifs_dict = all_probs[3]
motifs_dict_joint = all_probs_joint[3]
coordinates = SymbolicInference.extract_recurrences_cross(time_series_sin_nois,time_series_sin,
motifs_dict;num_windows = 12)
coordinates_joint = SymbolicInference.extract_recurrences_cross(time_series_sin_nois,time_series_sin,
motifs_dict_joint;num_windows = 12)
CairoMakie.activate!(inline=false)
p = plot_motifs_cross(time_series_sin_nois,time_series_sin,
coordinates;n_motifs=2)
p2 = plot_motifs_cross(time_series_sin_nois,time_series_sin,
coordinates_joint;n_motifs=3)
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 1368 | #using Pkg
#Pkg.add(path="/home/dellint5/repos/SymbolicInference.jl")
using SymbolicInference
using Random , Distributions
using RecurrenceAnalysis
using CairoMakie
using Colors
cycles = 30
rec_rate = 0.6
sd_norm = 0.5
p_window_size = 30
linear_basis = (0:0.6:2*pi*cycles)
n_sample = length(linear_basis)
time_series_sin = map(sin,linear_basis)
# White noise
rng = Random.MersenneTwister(42)
dist = Distributions.Normal(0,sd_norm)
white_n_time_series = rand(dist,n_sample)
# Noisy sine wave
time_series_sin_nois = white_n_time_series .+ time_series_sin
pl_persist_p = persistence_motifs(time_series_sin_nois;n_windows=30)
pl_persist_bar = persistence_barcode(time_series_sin_nois;n_windows=30)
#CairoMakie.save("../paper-vignettes/outputs/persist_sin_nois.png",
res_recs = map(x -> RecurrenceAnalysis.RecurrenceMatrix(x,rec_rate;fixedrate=true),
[time_series_sin,white_n_time_series,time_series_sin_nois])
#rec_matrix_motifs(rec_matrix::RecurrenceMatrix;seqs="recurrences",max_window=6,n_motifs=2)
all_probs = map(x-> SymbolicInference.rec_matrix_motifs(x;
max_window=p_window_size,seqs="recurrences",n_motifs=2),res_recs)
motifs_dict = all_probs[3]
coordinates = SymbolicInference.extract_recurrences(time_series_sin,motifs_dict;num_windows = 10)
GLMakie.activate!(inline=false)
p = plot_motifs(time_series_sin_nois,coordinates;n_motifs=3)
p
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 697 | using SymbolicInference
using Documenter
DocMeta.setdocmeta!(SymbolicInference, :DocTestSetup, :(using SymbolicInference); recursive=true)
makedocs(;
modules=[SymbolicInference],
authors="= <=> and contributors",
repo="https://github.com/fargolo/SymbolicInference.jl/blob/{commit}{path}#{line}",
sitename="SymbolicInference.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://fargolo.github.io/SymbolicInference.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/fargolo/SymbolicInference.jl",
devbranch="main",
)
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 490 | module SymbolicInference
using StatsBase
using RecurrenceAnalysis
using AnalyticComb
using Distributions
using LinearAlgebra
using Colors
using CairoMakie
include("rqa_interface.jl")
include("explore.jl")
include("tda.jl")
export rec_matrix_probs, rec_matrix_motifs,
extract_recurrences, plot_motifs,
extract_recurrences_cross , plot_motifs_cross ,
extract_recurrences_joint , plot_motifs_joint ,
persistence_motifs, persistence_barcode
end
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 12475 |
"""
extract_recurrences(data_source::Vector{Float64}, motifs_dict::Dict{String, Vector}; num_windows::Int64 = 3)
This function returns x and y coordinates for a given window considering start and size of each motif.
The y coordinates are the values from data provided by the user.
"""
function extract_recurrences(data_source::Vector{Float64},
motifs_dict::Dict{String, Vector{Any}}; num_windows::Int64 = 3)
probs_n = first(size(motifs_dict["Probs"][1]))
probs_flat = vcat(motifs_dict["Probs"]...)
sorted_probs_inds = sortperm(probs_flat)
motifs_flat = vcat(motifs_dict["Motifs starts and duration"]...)
motifs_by_window = motifs_flat[sorted_probs_inds[1:num_windows]]
full_data_dict = []
for (index, motif) in enumerate(motifs_by_window)
motif_start, motif_size = motif
motif_range = motif_start:(motif_start+motif_size)-1
motif_y1 = data_source[motif_range]
motif_window = div(sorted_probs_inds[index],probs_n)
motif_rec_start = motif_start + motif_window
motif_rec_range = motif_rec_start:(motif_rec_start+motif_size)-1
motif_y2 = data_source[motif_rec_range]
push!(full_data_dict, Dict(
"x1" => collect(motif_range), "y1" => motif_y1,
"x2" => collect(motif_rec_range), "y2" => motif_y2,
"window" => motif_window, "size" => motif_size,
"prob" => probs_flat[sorted_probs_inds][index]))
end
return full_data_dict
end
"""
plot_motifs(time_series::Vector{Float64},coordinates::Vector{Any}; plot_size=(2000, 1000), n_motifs=2)
Plot motifs from coordinates extracted with `extract_recurrences`.
"""
function plot_motifs(time_series::Vector{Float64},
coordinates::Vector{Any}; plot_size=(2000, 1000), n_motifs=2)
pl = CairoMakie.Figure(size = plot_size)
p = Axis(pl[1, 1])
lines!(p,1:length(time_series), time_series, color=:black)
if n_motifs == 1
R1, G1, B1 = rand(3)
rcolor = Colors.RGB(R1, G1, B1)
first_coordinate = coordinates[1]
lines!(p, first_coordinate["x1"], first_coordinate["y1"], linewidth=5, color=rcolor)
lines!(p, first_coordinate["x2"], first_coordinate["y2"], linewidth=5, color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x1"], first_coordinate["y1"], color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x2"], first_coordinate["y2"], color=rcolor)
CairoMakie.bracket!(p,
text = "window:"*string(first_coordinate["window"])*
"; size:"*string(first_coordinate["size"])*
"\n p-value: "*string(round(first_coordinate["prob"],digits=3)),
first_coordinate["x1"][1], first_coordinate["y1"][1],
first_coordinate["x2"][1], first_coordinate["y2"][1], color=rcolor)
else
for motif in coordinates[1:n_motifs]
R1, G1, B1 = rand(3)
rcolor = Colors.RGB(R1, G1, B1)
lines!(p, motif["x1"], motif["y1"], linewidth=5, color=(rcolor,0.3))
lines!(p, motif["x2"], motif["y2"], linewidth=5, color=(rcolor,0.3))
CairoMakie.scatter!(p, motif["x1"], motif["y1"], color=rcolor)
CairoMakie.scatter!(p, motif["x2"], motif["y2"], color=rcolor)
CairoMakie.bracket!(p,
text = "window:"*string(motif["window"])*
"; size:"*string(motif["size"])*
"\n p-value: "*string(round(motif["prob"],digits=3)),
motif["x1"][1], motif["y1"][1],
motif["x2"][1], motif["y2"][1], color=rcolor)
end
end
return pl
end
"""
extract_recurrences_cross(data_source::Vector{Float64}, data_source2::Vector{Float64},
motifs_dict::Dict{String, Vector}; num_windows::Int64 = 3)
This function returns x and y coordinates for a given window
considering start and size of each motif detected from
two time-series in cross-recurrence matrices .
The y coordinates are the values from data provided by the user.
"""
function extract_recurrences_cross(data_source::Vector{Float64}, data_source2::Vector{Float64},
motifs_dict::Dict{String, Vector{Any}}; num_windows::Int64 = 3)
probs_n = first(size(motifs_dict["Probs"][1]))
probs_flat = vcat(motifs_dict["Probs"]...)
sorted_probs_inds = sortperm(probs_flat)
motifs_flat = vcat(motifs_dict["Motifs starts and duration"]...)
motifs_by_window = motifs_flat[sorted_probs_inds[1:num_windows]]
full_data_dict = []
for (index, motif) in enumerate(motifs_by_window)
motif_start, motif_size = motif
motif_range = motif_start:(motif_start+motif_size)-1
motif_y1 = data_source[motif_range]
motif_window = div(sorted_probs_inds[index],probs_n)
motif_rec_start = motif_start + motif_window
motif_rec_range = motif_rec_start:(motif_rec_start+motif_size)-1
motif_y2 = data_source2[motif_rec_range]
push!(full_data_dict, Dict(
"x1" => collect(motif_range), "y1" => motif_y1,
"x2" => collect(motif_rec_range), "y2" => motif_y2,
"window" => motif_window, "size" => motif_size,
"prob" => probs_flat[sorted_probs_inds][index]))
end
return full_data_dict
end
"""
plot_motifs_cross(time_series::Vector{Float64},time_series2::Vector{Float64},
coordinates::Vector{Any}; plot_size=(2000, 1000), n_motifs=2)
Plot motifs from coordinates extracted with `extract_recurrences_cross`.
"""
function plot_motifs_cross(time_series::Vector{Float64}, time_series2::Vector{Float64},
coordinates::Vector{Any}; plot_size=(2000, 1000), n_motifs=2)
pl = CairoMakie.Figure(size = plot_size)
p = Axis(pl[1, 1])
lines!(p,1:length(time_series), time_series, color=:black)
lines!(p,1:length(time_series2), time_series2, color=:gray)
if n_motifs == 1
R1, G1, B1 = rand(3)
rcolor = Colors.RGB(R1, G1, B1)
first_coordinate = coordinates[1]
lines!(p, first_coordinate["x1"], first_coordinate["y1"], linewidth=5, color=rcolor)
lines!(p, first_coordinate["x2"], first_coordinate["y2"], linewidth=5, color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x1"], first_coordinate["y1"], color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x2"], first_coordinate["y2"], color=rcolor)
CairoMakie.bracket!(p,
text = "window:"*string(first_coordinate["window"])*
"; size:"*string(first_coordinate["size"])*
"\n p-value: "*string(round(first_coordinate["prob"],digits=3)),
first_coordinate["x1"][1], first_coordinate["y1"][1],
first_coordinate["x2"][1], first_coordinate["y2"][1], color=rcolor)
else
for motif in coordinates[1:n_motifs]
R1, G1, B1 = rand(3)
rcolor = Colors.RGB(R1, G1, B1)
lines!(p, motif["x1"], motif["y1"], linewidth=5, color=(rcolor,0.3))
lines!(p, motif["x2"], motif["y2"], linewidth=5, color=(rcolor,0.3))
CairoMakie.scatter!(p, motif["x1"], motif["y1"], color=rcolor)
CairoMakie.scatter!(p, motif["x2"], motif["y2"], color=rcolor)
CairoMakie.bracket!(p,
text = "window:"*string(motif["window"])*
"; size:"*string(motif["size"])*
"\n p-value: "*string(round(motif["prob"],digits=3)),
motif["x1"][1], motif["y1"][1],
motif["x2"][1], motif["y2"][1], color=rcolor)
end
end
return pl
end
"""
extract_recurrences_joint(data_source::Vector{Float64}, data_source2::Vector{Float64},
motifs_dict::Dict{String, Vector}; num_windows::Int64 = 3)
This function returns x and y coordinates for a given window
considering start and size of each motif detected from
two time-series in joint-recurrence matrices .
The y coordinates are the values from data provided by the user.
"""
function extract_recurrences_joint(data_source::Vector{Float64}, data_source2::Vector{Float64},
motifs_dict::Dict{String, Vector{Any}}; num_windows::Int64 = 3)
probs_n = first(size(motifs_dict["Probs"][1]))
probs_flat = vcat(motifs_dict["Probs"]...)
sorted_probs_inds = sortperm(probs_flat)
motifs_flat = vcat(motifs_dict["Motifs starts and duration"]...)
motifs_by_window = motifs_flat[sorted_probs_inds[1:num_windows]]
full_data_dict = []
for (index, motif) in enumerate(motifs_by_window)
motif_start, motif_size = motif
motif_range = motif_start:(motif_start+motif_size)-1
motif_ya1 = data_source[motif_range]
motif_window = div(sorted_probs_inds[index],probs_n)
motif_rec_start = motif_start + motif_window
motif_rec_range = motif_rec_start:(motif_rec_start+motif_size)-1
motif_yb1 = data_source2[motif_range]
motif_yb2 = data_source2[motif_rec_range]
motif_ya2 = data_source[motif_rec_range]
push!(full_data_dict, Dict(
"x1" => collect(motif_range), "x2" => collect(motif_rec_range),
"ya1" => motif_ya1, "ya2" => motif_ya2,
"yb1" => motif_yb1, "yb2" => motif_yb2,
"window" => motif_window, "size" => motif_size,
"prob" => probs_flat[sorted_probs_inds][index]))
end
return full_data_dict
end
"""
plot_motifs_joint(time_series::Vector{Float64},time_series2::Vector{Float64},
coordinates::Vector{Any}; plot_size=(2000, 1000), n_motifs=2)
Plot motifs from coordinates extracted with `extract_recurrences_joint`.
"""
function plot_motifs_joint(time_series::Vector{Float64}, time_series2::Vector{Float64},
coordinates::Vector{Any}; plot_size=(2000, 1000), n_motifs=2)
pl = CairoMakie.Figure(size = plot_size)
p = Axis(pl[1, 1])
lines!(p,1:length(time_series), time_series, color=:black)
lines!(p,1:length(time_series2), time_series2, color=:gray)
if n_motifs == 1
R1, G1, B1 = rand(3)
rcolor = Colors.RGB(R1, G1, B1)
first_coordinate = coordinates[1]
lines!(p, first_coordinate["x1"], first_coordinate["ya1"], linewidth=5, color=rcolor)
lines!(p, first_coordinate["x1"], first_coordinate["yb1"], linewidth=5, color=rcolor)
lines!(p, first_coordinate["x2"], first_coordinate["ya2"], linewidth=5, color=rcolor)
lines!(p, first_coordinate["x2"], first_coordinate["yb2"], linewidth=5, color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x1"], first_coordinate["ya1"], color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x1"], first_coordinate["yb1"], color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x2"], first_coordinate["ya2"], color=rcolor)
CairoMakie.scatter!(p, first_coordinate["x2"], first_coordinate["yb2"], color=rcolor)
CairoMakie.bracket!(p,
text = "window:"*string(first_coordinate["window"])*
"; size:"*string(first_coordinate["size"])*
"\n p-value: "*string(round(first_coordinate["prob"],digits=3)),
first_coordinate["x1"][1], first_coordinate["ya1"][1],
first_coordinate["x2"][1], first_coordinate["ya2"][1], color=rcolor)
else
for motif in coordinates[1:n_motifs]
R1, G1, B1 = rand(3)
rcolor = Colors.RGB(R1, G1, B1)
lines!(p, motif["x1"], motif["ya1"], linewidth=5, color=(rcolor,0.3))
lines!(p, motif["x1"], motif["yb1"], linewidth=5, color=(rcolor,0.3))
lines!(p, motif["x2"], motif["ya2"], linewidth=5, color=(rcolor,0.3))
lines!(p, motif["x2"], motif["yb2"], linewidth=5, color=(rcolor,0.3))
CairoMakie.scatter!(p, motif["x1"], motif["ya1"], color=(rcolor,0.3))
CairoMakie.scatter!(p, motif["x1"], motif["yb1"], color=(rcolor,0.3))
CairoMakie.scatter!(p, motif["x2"], motif["ya2"], color=(rcolor,0.3))
CairoMakie.scatter!(p, motif["x2"], motif["yb2"], color=(rcolor,0.3))
CairoMakie.bracket!(p,
text = "window:"*string(motif["window"])*
"; size:"*string(motif["size"])*
"\n p-value: "*string(round(motif["prob"],digits=3)),
motif["x1"][1], motif["ya1"][1],
motif["x2"][1], motif["ya2"][1], color=rcolor)
end
end
return pl
end
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 3586 | """
rec_matrix_motifs(rec_matrix::RecurrenceMatrix;seqs="recurrences",window_range=collect(1:6),n_motifs=2)
Returns set of probabilities associated with consecutive runs in off-diagonals.
Argument `seqs` sets the type of consecutive sequences: either 'double' (recurrences and non-recurrences),
'recurrences' or 'poincare' (non-recurrences). The diagonals given by `window_range` argument are considered,
along with n_motifs for each diagonal. See `AnalyticComb.weighted_bin_runs_prob` for definition of symbolic construction.
"""
function rec_matrix_motifs(
rec_matrix::Union{RecurrenceMatrix,CrossRecurrenceMatrix,JointRecurrenceMatrix};
seqs="recurrences", window_range=collect(1:6), n_motifs=2)
if seqs ∉ ["double","recurrences","poincare"]
println("'seqs' must be either 'double', 'recurrences' or 'poincare'")
return(NaN)
end
mat_len = size(rec_matrix,1)
if mat_len < window_range[end]
println("Largest range in 'window_range' must be smaller than matrix length")
return(NaN)
end
probs = []#Vector{Union{Missing,Float64}}[]
motifs_inds_duration = []#Vector{Union{Missing,{Tuple{Int64, Int64}}}}[]
# p and q values
p = RecurrenceAnalysis.recurrencerate(rec_matrix)
q = 1-p
println("\n RR is: ",p)
println("P and Q are: ",p," and ",q)
for i in window_range
# add Try catch to entire loop over window
try
cur_len = mat_len - i
col_counts = StatsBase.rle(LinearAlgebra.diag(Matrix(rec_matrix),i))
zipped_tups = collect(zip(col_counts[2],col_counts[1]))
if seqs == "recurrences"
zipped_tups = filter(x -> x[2] == 1, zipped_tups) # RECURRENCES: Tuples in which the 1st value is 1
elseif seqs == "poincare"
zipped_tups = filter(x -> x[2] == 0, zipped_tups) # POINCARE TIMES: Tuples in which the 1st value is 1
end
println("\n Zipped tuples: ")
print(zipped_tups)
println("\n Current segment length: ")
print(cur_len)
cur_counts = first.(zipped_tups)
max_vals_inds = partialsortperm(cur_counts, 1:n_motifs, rev=true)
max_vals = cur_counts[max_vals_inds]
println("\n Diagonal largest sequences sizes and current total length are")
print(max_vals," and ", cur_len)
cur_probs = map(x -> AnalyticComb.weighted_bin_runs_pval(p,q,x,cur_len),max_vals)
println("\n Probabilities:")
print(cur_probs)
push!(probs,cur_probs)
# Sum counts (stored in col_counts[2]) of sequences appearing before current sequence col_counts[2][max_val_inds[x] - 1]
# to obtain position of current sequence in original diagonal
cur_inds = map(x -> sum(col_counts[2][1:(max_vals_inds[x]-1)]) + 1,1:n_motifs)
zipped_motifs_inds_duration = collect(zip(cur_inds, max_vals))
push!(motifs_inds_duration, zipped_motifs_inds_duration)
catch e
println("\n Check if sequence has zero occurences or n_motifs is larger than the number of motifs in diagonal")
push!(probs,missing)
push!(motifs_inds_duration,missing)
end
end
dict_keys = ["Window","Probs","Motifs starts and duration"]
Dict(zip(dict_keys,
[window_range, # Window
probs, # Probs
motifs_inds_duration])) #Motif starts
end | SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 2101 | """
persistence_motifs(time_series; range = collect(0.1:0.1:0.9), n_windows=10)
Returns curves for p-values vs. Recurrence rate in each window.
"""
function persistence_motifs(time_series; range = collect(0.1:0.1:0.9), n_windows=10)
all_motifs = []
for cur_range in range
time_series_mat = RecurrenceAnalysis.RecurrenceMatrix(time_series, cur_range; fixedrate=true)
motifs_result = SymbolicInference.rec_matrix_motifs(time_series_mat; window_range = collect(1:n_windows), n_motifs=1)
push!(all_motifs, motifs_result)
end
num_windows = length(all_motifs[1]["Probs"]) # Assuming all have the same number of motifs
fig = Figure()
ax = Axis(fig[1, 1], limits = (0, 1, 0, 1),
xlabel="Recurrence Rates",
ylabel="Probabilities")
for j in 1:num_windows
x_vals = range
y_vals = [all_motifs[i]["Probs"][j] for i in 1:length(range)]
lines!(ax, x_vals, vcat(y_vals...), label="Window $j")
end
Legend(fig[1, 2], ax, "Motifs")
fig
end
"""
persistence_barcode(time_series; range = collect(0.1:0.1:0.9), n_windows=10,alpha_thresh=0.05)
Return barcode plot for each window.
Points are plotted whenever the p-value is smaller than `alpha_thresh`.
"""
function persistence_barcode(time_series; range = collect(0.1:0.1:0.9), n_windows=10,alpha_thresh=0.05)
all_motifs = []
for cur_range in range
time_series_mat = RecurrenceMatrix(time_series, cur_range; fixedrate=true)
motifs_result = rec_matrix_motifs(time_series_mat; window_range=collect(1:n_windows), n_motifs=1)
push!(all_motifs, motifs_result)
end
fig = Figure()
ax = Axis(fig[1, 1], limits=(0, 1, 0, n_windows + 1),
xlabel="Recurrence Rates", ylabel="Windows")
for cur_window in 1:n_windows
x_vals = range
y_vals = [all_motifs[j]["Probs"][cur_window] for j in 1:length(range)]
inds = findall(x -> x < alpha_thresh, vcat(y_vals...))
scatter!(ax, x_vals[inds], fill(cur_window,length(inds)) , label="Window $cur_window")
end
fig
end
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | code | 107 | using SymbolicInference
using Test
@testset "SymbolicInference.jl" begin
# Write your tests here.
end
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | docs | 2357 | # SymbolicInference
[](https://fargolo.github.io/SymbolicInference.jl/stable/)
[](https://fargolo.github.io/SymbolicInference.jl/dev/)
[](https://github.com/fargolo/SymbolicInference.jl/actions/workflows/CI.yml?query=branch%3Amain)
Probability-based inferences based on the symbolic method.
This software implements algorithms that leverage analytic combinatorics to make inference about different types of objects. [See AnalyticComb.jl](https://fargolo.github.io/AnalyticComb.jl/dev/)
## Time-series
### Chaotic, non-linear systems and recurrence analysis
See the [white-paper](https://osf.io/preprints/osf/3ws85) and it's [repo](https://github.com/fargolo/paper-vignettes/)
The recurrence of states, in the meaning that states are again arbitrarily close after some time of divergence, is a fundamental property of deterministic dynamical systems and is typical for nonlinear or chaotic systems.
Poincaré discusse this property in 1890 and it was later proved by Constantin Carathéodory (see Poincaré recurrence theorem). Further on, several techniques address recurrences in dynamical systems for inference.
Recurrence plots (RPs) were creature to capture such patterns and several parameters that caractherize the underlying time-series can be obtained with recurrence quantification analysis (RQA).
Since each pair of states is mapped into a binary value ('close enough',1, or 'not close enough',0), making probabilistic inference with symbolic methods is straightforward.
The procedure in `rec_matrix_motifs()` iterates over [recurrence matrices](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/recurrenceanalysis/stable/) diagonals and checks for repeated motifs. Specifically, whether the size of the longest consecutive sequence in each off-diagonal is unexpectedly large. This may hint at underlying patterns, such as autocorrelations and periodic components.


## Graphs
## Genomics
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 1.0.4 | 5873fd059b5562d0bc17a8e920e14fb1629e9c95 | docs | 220 | ```@meta
CurrentModule = SymbolicInference
```
# SymbolicInference
Documentation for [SymbolicInference](https://github.com/fargolo/SymbolicInference.jl).
```@index
```
```@autodocs
Modules = [SymbolicInference]
```
| SymbolicInference | https://github.com/fargolo/SymbolicInference.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | code | 301 | module ElementaryChemistry
include("GassLaws.jl")
include("conversions/TempratureConversion.jl")
include("conversions/PressureConversion.jl")
export charles_law, boyles_law_solve_pressure, boyles_law_solve_volume, gay_lussacs_law, avogadros_law, absolute_temperature,
grahams_law_velocity_ratio
end
| ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | code | 3339 | ```
The gas laws to calculate volume by
temperature and/or pressure changes respectively :
Charles, Boyles, Gay Lussacs and Avogadros laws
```
"""
Calculate the volume by using charles law, stating that gas volume
increases or decreases directly relative to change in temprature.
curentVolume = Curent c.c of gas
curenttemperature = The Curent temperature in either C or F
newtemperature = temperature the gas is being heated, or cooled to
temperatureUnit = (c)entigrade, (f)ahrenheit, or (a)bsolute
"""
function charles_law(curentVolume, curenttemperature, newtemperature, temperatureUnit)
newVolume = 0;
curenttemperature = absolute_temperature(curenttemperature, temperatureUnit)
newtemperature = absolute_temperature(newtemperature, temperatureUnit)
newVolume = curentVolume * (newtemperature / curenttemperature)
return newVolume
end
"""
When temperature remains contstant but pressure changes we can calculate new volume by using
Boyles law when solving for pressure.
Which simlpy states : New volume = old volume * old pressure devided by new pressure
curentPressure = The curent pressure in mm
curentVolume = The curent volume of the gas
desiredPressure = The pressure we want to be at
"""
function boyles_law_solve_pressure(curentPressure, curentVolume, desiredPressure)
newVolume = 0;
newVolume = (curentVolume * curentPressure) / desiredPressure
return newVolume
end
"""
When temperature remains contstant but volume changes we can calculate new pressure by using
Boyles law when solving for volume.
Which simlpy states : New pressure = old pressure * curent volume devided by new volume
curentPressure = The curent pressure in mm
curentVolume = The curent volume of the gas
desiredVolume = The volume we want to have
"""
function boyles_law_solve_volume(curentPressure, curentVolume, desiredVolume)
newPressure = 0
newPressure = (curentPressure * curentVolume) / desiredVolume
return newPressure
end
"""
The pressure of a gas of fixed mass and fixed volume is directly
proprtional to the gas's absolute temperature.
If a gas's temperature increases, so does its pressure if the mass and volume are
held constant.
pressure = Curent pressure in atm
temperature = Curent temperature of container
newTemp = temperature at which we want to know the pressure
temperatureUnit = (c)entigrade, (f)ahrenheit, or (a)bsolute
"""
function gay_lussacs_law(pressure, temperature, newTemp, temperatureUnit)
newTemp = absolute_temperature(newTemp, temperatureUnit)
temperature = absolute_temperature(temperature, temperatureUnit)
newPressure = (pressure * newTemp) / temperature
return newPressure
end
"""
Avrogados law under the assumption of ideal gases teaches us that
any gas, while having the same temperature, pressure and volume will have the
same number of molecules. or : V1|N1 = V2|N2
"""
function avogadros_law()
return null
end
"""
One of the use cases of grahams law
is that we can calculate the ratio of diffusion rates of two gasses.
the rate of effusion or diffusion of a gass in inversely proprtional to the square
root of the molar mass of the gas.
using g/mol give the molar masses of both gasses.
"""
function grahams_law_velocity_ratio(molar_mass_a,molar_mass_b)
ratio = sqrt(molar_mass_a/molar_mass_b)
return ratio
end
| ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | code | 1016 | ```
Chlorine, a gas first used by the Germans during the war, as it is an extremely irritating and poisonous gas.
However, it can also sterilize germ-infected water, rendering it safe for consumption.
It's made in a lab by heating Manganese dioxide and concentrated hydrochloric acid.
```
"""
Given that we use enough hydrochloric acid,
how much chlorine in Grams (default) or (L)iters at NTP do we get from x grams of Manganese dioxide.
"""
function magnese_dioxide_to_chlorine(magneseGrams,unit = "grams")
if lowercase(unit) == "grams"
return (magneseGrams/86.94) * 71
elseif lowercase(unit) == "litre" || unit == "liter"
return (magneseGrams/86.94) * 22.4
end
end
"""
How much hydrochloric acid do we need to convert X amount of Manganese dioxide into chlorine.
"""
function hydrochloric_acid_needed(magneseGrams)
hydroc = (36.5 * 4) * magneseGrams / 86.94
return hydroc
end | ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | code | 49 | #This script will handle all pressure conversion. | ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | code | 484 | """
Converts Fahrenheit or Centigrade to absolute temperature
temperature = temperature to Convert
temperatureUnit = (c)entigrade, (f)ahrenheit, or (a)bsolute
"""
function absolute_temperature(temperature,temperatureUnit)
if lowercase(temperatureUnit) == "c"
return round(temperature + 273)
elseif lowercase(temperatureUnit) == "f"
return round((temperature + 459) / 1.8)
elseif lowercase(temperatureUnit) == "a"
return temperature
end
end
| ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | code | 879 | using Test
include("../src/GassLaws.jl")
include("../src/Gasses/Chlorine.jl")
include("../src/conversions/TempratureConversion.jl")
@test 288 == absolute_temperature(15,"c")
@test 288 == absolute_temperature(59,"f")
@test 93.25 == charles_law(72,15,100,"c")
@test 93.25 == charles_law(72,59,212,"F")
@test 125 == boyles_law_solve_pressure(500,100,400)
@test 67.0 == round(boyles_law_solve_pressure(500,100,750))
@test 200 == boyles_law_solve_volume(100,2,1)
@test 50 == boyles_law_solve_volume(100,2,4)
@test 11.25503355704698 == gay_lussacs_law(3,25,845,"c")
@test 1.291 == round(grahams_law_velocity_ratio(20,12), digits = 3)
@test 0.775 == round(grahams_law_velocity_ratio(12,20), digits =3)
@test 8.166551644812515 == magnese_dioxide_to_chlorine(10)
@test 2.576489533011272 == magnese_dioxide_to_chlorine(10,"litre")
@test 73.0503795721187 == hydrochloric_acid_needed(43.5) | ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | code | 83 | using Test
@testset "Temperature, Gas laws" begin
include("GassesTest.jl")
end
| ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.2 | 1a27d437079ce4a5970fef69013565ea21424df9 | docs | 410 | # chemistry.jl
An elementary introduction into chemistry,
Based on "An Elementary Chemistry" By Eric John Holmyard.
Starting with Gas Laws :
- [x] Charles law
- [x] Boyles law
- [x] Gay Lussac's law
- [x] Avogadro's law
- [x] Grahams law
Specific Gasses :
- [ ] Chlorine - in development
Extra Functionalities
- [x] Temparture (C°/F°) to K° (absolute)
- [ ] Converting pressure units (atm,mmhg, etc.)
| ElementaryChemistry | https://github.com/baobabfruit88/ElementaryChemistry.jl.git |
|
[
"MIT"
] | 0.1.0 | 30b6ee355f4fb9a79db85300f2d96e1582eb89aa | code | 590 |
module SimpleGraphConverter
using SimpleGraphs, Graphs
import SimpleGraphs: UndirectedGraph, UG
import Graphs: SimpleGraph
function UndirectedGraph(g::SimpleGraph{T})::UG{T} where {T}
G = UG{T}()
for v in vertices(g)
add!(G, v)
end
for e in edges(g)
u = e.dst
v = e.src
add!(G, u, v)
end
return G
end
function SimpleGraph(G::UG)::SimpleGraph
H = relabel(G)
n = NV(H)
g = SimpleGraph(n)
for e in H.E
u, v = e
add_edge!(g, u, v)
end
return g
end
end # module SimpleGraphConverter
| SimpleGraphConverter | https://github.com/scheinerman/SimpleGraphConverter.jl.git |
|
[
"MIT"
] | 0.1.0 | 30b6ee355f4fb9a79db85300f2d96e1582eb89aa | code | 438 | using Test, SimpleGraphs, Graphs, SimpleGraphConverter
@testset "SimpleGraph to UndirectedGraph" begin
g = path_graph(10)
G = UG(g)
@test G == Path(10)
g = cycle_graph(8)
G = UndirectedGraph(g)
@test G == Cycle(8)
end
@testset "UndirectedGraph to SimpleGraph" begin
G = Path(10)
g = SimpleGraph(G)
@test g == path_graph(10)
G = Cycle(8)
g = SimpleGraph(G)
@test g == cycle_graph(8)
end | SimpleGraphConverter | https://github.com/scheinerman/SimpleGraphConverter.jl.git |
|
[
"MIT"
] | 0.1.0 | 30b6ee355f4fb9a79db85300f2d96e1582eb89aa | docs | 2398 | # SimpleGraphConverter
This package convertes graphs between the
[`Graphs`](https://github.com/JuliaGraphs/Graphs.jl) and
[`SimpleGraphs`](https://github.com/scheinerman/SimpleGraphs.jl.git) modules.
## Overview
The Julia modules `Graphs` [formerly `LightGraphs`] and `SimpleGraphs` can be used for graph theory computations.
Simple graphs (graphs without directions, loops, or multiple edges) are defined in both modules.
* In `Graphs` the datatype is `SimpleGraph`.
* In `SimpleGraphs` the data type is `UndirectedGraph` (which may be abbreviated `UG`).
This `SimpleGraphConverter` module helps with conversion from one type to the other.
* If `g` is a `SimpleGraph`, then `UndirectedGraph(g)` [or `UG(g)`] converts the graph to type `UndirectedGraph`.
* If `G` is an `UndirectedGraph`, then `SimpleGraph(G)` converts the graph to a `SimpleGraph`.
## Examples
#### Converting a `SimpleGraph` to an `UndirectedGraph`
```
julia> using Graphs, SimpleGraphs, SimpleGraphConverter
julia> g = cycle_graph(6)
{6, 6} undirected simple Int64 graph
julia> G = UG(g)
UndirectedGraph{Int64} (n=6, m=6)
julia> G == Cycle(6)
true
```
#### Converting an `UndirectedGraph` to a `SimpleGraph`
```
julia> G = Path(9)
Path (n=9, m=8)
julia> g = SimpleGraph(G)
{9, 8} undirected simple Int64 graph
julia> g == path_graph(9)
true
```
## Loss of vertex names
The vertices of a `SimpleGraph` (from the `Graphs` module) is always a
set of integers of the form `{1,2,...,n}`.
The vertex set of an `UndirectedGraph` can contain
arbitrary types.
When converting from a `SimpleGraph` to an `UndirectedGraph`, the names
of the vertices are converted to consecutive integers.
In this example, the `Petersen()` function returns the Petersen graph as an `UndirectedGraph`. The ten vertices are the two-element subsets of `{1,2,3,4,5}`.
When we convert to a `SimpleGraph`, the resulting graph has ten vertices that are the integers from `1` to `10`. When we convert that `SimpleGraph` back to an `UndirectedGraph`, the
vertices are different (integers vs. two-element sets) from the original.
```
julia> using ShowSet
julia> G = Petersen()
Petersen (n=10, m=15)
julia> g = SimpleGraph(G)
{10, 15} undirected simple Int64 graph
julia> H = UG(g)
UndirectedGraph{Int64} (n=10, m=15)
julia> G == H
false
julia> using SimpleGraphAlgorithms
julia> is_iso(G,H) # lots of output deleted
true
```
| SimpleGraphConverter | https://github.com/scheinerman/SimpleGraphConverter.jl.git |
|
[
"MIT"
] | 0.1.0 | 427088191e7fb0bd1935b74bb2d1dd69d23c3643 | code | 524 | include(joinpath(dirname(@__DIR__), "src", "PenultimateDays.jl"))
using Documenter, .PenultimateDays
Documenter.makedocs(
clean = true,
doctest = true,
modules = Module[PenultimateDays],
repo = "",
highlightsig = true,
sitename = "PenultimateDays Documentation",
expandfirst = [],
pages = [
"Home" => "index.md",
"Documentation" => "documentation.md",
"Examples" => "examples.md"
]
)
deploydocs(;
repo = "github.com/jakewilliami/PenultimateDays.jl.git",
)
| PenultimateDays | https://github.com/jakewilliami/PenultimateDays.jl.git |
|
[
"MIT"
] | 0.1.0 | 427088191e7fb0bd1935b74bb2d1dd69d23c3643 | code | 3087 | module PenultimateDays
using Dates
# Dates (stdlib) extended
for m in (:first, :last), t in (:week, :month, :quarter, :year)
f = Symbol("$(m)dayof$(t)")
ms, ts = string(m), string(t)
@eval begin
import Dates: $f
@doc """
$($f)(dt::TimeType, d::Int) -> TimeType
Adjusts `dt` to the $($ms) `d`-day of its $($ts), given some `d`, the day of the week as an `Int`, with `1 = Monday, 2 = Tuesday, &c...`
For example, the `$($f)(dt, 6)` will find the $($ms) Saturday of the $($ts). Dates also exports integer aliases `Monday`–`Sunday`, so you can write `$($f)(dt, Saturday)`.
See also: `Dates.dayofweek`
"""
$f
end
end
## Week
function firstdayofweek(dt::TimeType, d::Int)
fd_date = firstdayofweek(dt)
return fd_date + Day(d - 1)
end
lastdayofweek(dt::TimeType, d::Int) = firstdayofweek(dt, d)
## Month
function firstdayofmonth(dt::TimeType, d::Int)
fd_date = firstdayofmonth(dt)
fd_i = dayofweek(fd_date)
fd_day = d - fd_i + 1 + 7(fd_i > d)
return fd_date + Day(fd_day - 1)
end
function lastdayofmonth(dt::TimeType, d::Int)
ld_date = lastdayofmonth(dt)
ld_i = dayofweek(ld_date)
ld_day = 7(ld_i < d) - d + ld_i + 1
return ld_date - Day(ld_day - 1)
end
## Quarter
firstdayofquarter(dt::TimeType, d::Int) = firstdayofmonth(firstdayofquarter(dt), d)
lastdayofquarter(dt::TimeType, d::Int) = lastdayofmonth(lastdayofquarter(dt), d)
## Year
firstdayofyear(dt::TimeType, d::Int) = firstdayofmonth(firstdayofyear(dt), d)
lastdayofyear(dt::TimeType, d::Int) = lastdayofmonth(lastdayofyear(dt), d)
# Trivial Penultimate Functions
for t in (:week, :month, :quarter, :year)
f, f′ = Symbol("penultimatedayof$(t)"), Symbol("lastdayof$(t)")
ts = string(t)
@eval begin
@doc """
$($f)(dt::TimeType) -> TimeType
Adjusts `dt` to the penultimate (second-to-last) day of its $($ts).
"""
$f(dt::TimeType) = $f′(dt) - Day(1)
export $f
end
end
# Day-specific Penultimate Functions
## Week
penultimatedayofweek(dt::TimeType, d::Int) =
throw(ArgumentError("It is impossible to find the second-to-last day of a week (specifying the day) wth necessarily only one of each days in the week"))
## Month, Quarter, and Year
for t in (:month, :quarter, :year)
f, f′ = Symbol("penultimatedayof$(t)"), Symbol("lastdayof$(t)")
ts = string(t)
@eval begin
@doc """
$($f)(dt::TimeType, d::Int) -> TimeType
Adjusts `dt` to the penultimate (second-to-last) day of its $($ts), given some `d`, the day of the week as an `Int`, with `1 = Monday, 2 = Tuesday, &c...`
For example, the `$($f)(dt, 6)` will find the penultimate Saturday of the $($ts). Dates also exports integer aliases `Monday`–`Sunday`, so you can write `$($f)(dt, Saturday)`.
See also: `Dates.dayofweek`, `$($f′)`
"""
$f(dt::TimeType, d::Int) = $f′(dt, d) - Week(1)
end
end
end # end module
| PenultimateDays | https://github.com/jakewilliami/PenultimateDays.jl.git |
|
[
"MIT"
] | 0.1.0 | 427088191e7fb0bd1935b74bb2d1dd69d23c3643 | code | 9320 | using Dates
using PenultimateDays
using Test
#=Test.eval(quote
function record(ts::DefaultTestSet, t::Error) # t::Union{Fail, Error}
if t isa Error
e = only(match(r"^(\w+)\(.*\)$", t.value).captures)
if e == "ErrorException" # || e == "MethodError"
push!(ts.results, t)
end
end
end
end)
=#
@testset "PenultimateDays.jl" begin
d1 = Date(2022, 6, 24)
d2 = Date(2023, 1, 12)
d3 = Date(2033, 2, 7)
dt = DateTime("1996-01-05T12:30:00")
@testset "Trivial Penultimate Functions" begin
# Week
@test penultimatedayofweek(d1) == Date(2022, 6, 25)
@test penultimatedayofweek(d2) == Date(2023, 1, 14)
@test penultimatedayofweek(d3) == Date(2033, 2, 12)
# Month
@test penultimatedayofmonth(d1) == Date(2022, 6, 29)
@test penultimatedayofmonth(d2) == Date(2023, 1, 30)
@test penultimatedayofmonth(d3) == Date(2033, 2, 27)
# Quarter
@test penultimatedayofquarter(d1) == Date(2022, 6, 29)
@test penultimatedayofquarter(d2) == Date(2023, 3, 30)
@test penultimatedayofquarter(d3) == Date(2033, 3, 30)
# Year
@test penultimatedayofyear(d1) == Date(2022, 12, 30)
@test penultimatedayofyear(d2) == Date(2023, 12, 30)
@test penultimatedayofyear(d3) == Date(2033, 12, 30)
# Time type
@test penultimatedayofweek(dt) isa typeof(dt)
@test penultimatedayofmonth(dt) isa typeof(dt)
@test penultimatedayofquarter(dt) isa typeof(dt)
@test penultimatedayofyear(dt) isa typeof(dt)
end
@testset "Day-specific Penultimate Functions" begin
# Week
## Cannot have a second to last specified day in a week, which
## only contains one of each day
@test_throws ArgumentError penultimatedayofweek(d1, Tuesday)
# Month
@test penultimatedayofmonth(d1, Monday) == Date(2022, 6, 20)
@test penultimatedayofmonth(d1, Wednesday) == Date(2022, 6, 22)
@test penultimatedayofmonth(d1, Sunday) == Date(2022, 6, 19)
@test penultimatedayofmonth(d2, Monday) == Date(2023, 1, 23)
@test penultimatedayofmonth(d2, Wednesday) == Date(2023, 1, 18)
@test penultimatedayofmonth(d2, Sunday) == Date(2023, 1, 22)
@test penultimatedayofmonth(d3, Monday) == Date(2033, 2, 21)
@test penultimatedayofmonth(d3, Wednesday) == Date(2033, 2, 16)
@test penultimatedayofmonth(d3, Sunday) == Date(2033, 2, 20)
# Quarter
@test penultimatedayofquarter(d1, Monday) == Date(2022, 6, 20)
@test penultimatedayofquarter(d1, Wednesday) == Date(2022, 6, 22)
@test penultimatedayofquarter(d1, Sunday) == Date(2022, 6, 19)
@test penultimatedayofquarter(d2, Monday) == Date(2023, 3, 20)
@test penultimatedayofquarter(d2, Wednesday) == Date(2023, 3, 22)
@test penultimatedayofquarter(d2, Sunday) == Date(2023, 3, 19)
@test penultimatedayofquarter(d3, Monday) == Date(2033, 3, 21)
@test penultimatedayofquarter(d3, Wednesday) == Date(2033, 3, 23)
@test penultimatedayofquarter(d3, Sunday) == Date(2033, 3, 20)
# Year
@test penultimatedayofyear(d1, Monday) == Date(2022, 12, 19)
@test penultimatedayofyear(d1, Wednesday) == Date(2022, 12, 21)
@test penultimatedayofyear(d1, Sunday) == Date(2022, 12, 18)
@test penultimatedayofyear(d2, Monday) == Date(2023, 12, 18)
@test penultimatedayofyear(d2, Wednesday) == Date(2023, 12, 20)
@test penultimatedayofyear(d2, Sunday) == Date(2023, 12, 24)
@test penultimatedayofyear(d3, Monday) == Date(2033, 12, 19)
@test penultimatedayofyear(d3, Wednesday) == Date(2033, 12, 21)
@test penultimatedayofyear(d3, Sunday) == Date(2033, 12, 18)
# Time type
@test penultimatedayofmonth(dt, Tuesday) isa typeof(dt)
@test penultimatedayofquarter(dt, Tuesday) isa typeof(dt)
@test penultimatedayofyear(dt, Tuesday) isa typeof(dt)
end
@testset "Dates (stdlib) Extended" begin
# Week
@test firstdayofweek(d1, Monday) == Date(2022, 6, 20)
@test firstdayofweek(d1, Wednesday) == Date(2022, 6, 22)
@test firstdayofweek(d1, Sunday) == Date(2022, 6, 26)
@test firstdayofweek(d2, Monday) == Date(2023, 1, 9)
@test firstdayofweek(d2, Wednesday) == Date(2023, 1, 11)
@test firstdayofweek(d2, Sunday) == Date(2023, 1, 15)
@test firstdayofweek(d3, Monday) == Date(2033, 2, 7)
@test firstdayofweek(d3, Wednesday) == Date(2033, 2, 9)
@test firstdayofweek(d3, Sunday) == Date(2033, 2, 13)
@test lastdayofweek(d1, Monday) == Date(2022, 6, 20)
@test lastdayofweek(d1, Wednesday) == Date(2022, 6, 22)
@test lastdayofweek(d1, Sunday) == Date(2022, 6, 26)
@test lastdayofweek(d2, Monday) == Date(2023, 1, 9)
@test lastdayofweek(d2, Wednesday) == Date(2023, 1, 11)
@test lastdayofweek(d2, Sunday) == Date(2023, 1, 15)
@test lastdayofweek(d3, Monday) == Date(2033, 2, 7)
@test lastdayofweek(d3, Wednesday) == Date(2033, 2, 9)
@test lastdayofweek(d3, Sunday) == Date(2033, 2, 13)
# Month
@test firstdayofmonth(d1, Monday) == Date(2022, 6, 6)
@test firstdayofmonth(d1, Wednesday) == Date(2022, 6, 1)
@test firstdayofmonth(d1, Sunday) == Date(2022, 6, 5)
@test firstdayofmonth(d2, Monday) == Date(2023, 1, 2)
@test firstdayofmonth(d2, Wednesday) == Date(2023, 1, 4)
@test firstdayofmonth(d2, Sunday) == Date(2023, 1, 1)
@test firstdayofmonth(d3, Monday) == Date(2033, 2, 7)
@test firstdayofmonth(d3, Wednesday) == Date(2033, 2, 2)
@test firstdayofmonth(d3, Sunday) == Date(2033, 2, 6)
@test lastdayofmonth(d1, Monday) == Date(2022, 6, 27)
@test lastdayofmonth(d1, Wednesday) == Date(2022, 6, 29)
@test lastdayofmonth(d1, Sunday) == Date(2022, 6, 26)
@test lastdayofmonth(d2, Monday) == Date(2023, 1, 30)
@test lastdayofmonth(d2, Wednesday) == Date(2023, 1, 25)
@test lastdayofmonth(d2, Sunday) == Date(2023, 1, 29)
@test lastdayofmonth(d3, Monday) == Date(2033, 2, 28)
@test lastdayofmonth(d3, Wednesday) == Date(2033, 2, 23)
@test lastdayofmonth(d3, Sunday) == Date(2033, 2, 27)
# Quarter
@test firstdayofquarter(d1, Monday) == Date(2022, 4, 4)
@test firstdayofquarter(d1, Wednesday) == Date(2022, 4, 6)
@test firstdayofquarter(d1, Sunday) == Date(2022, 4, 3)
@test firstdayofquarter(d2, Monday) == Date(2023, 1, 2)
@test firstdayofquarter(d2, Wednesday) == Date(2023, 1, 4)
@test firstdayofquarter(d2, Sunday) == Date(2023, 1, 1)
@test firstdayofquarter(d3, Monday) == Date(2033, 1, 3)
@test firstdayofquarter(d3, Wednesday) == Date(2033, 1, 5)
@test firstdayofquarter(d3, Sunday) == Date(2033, 1, 2)
@test lastdayofquarter(d1, Monday) == Date(2022, 6, 27)
@test lastdayofquarter(d1, Wednesday) == Date(2022, 6, 29)
@test lastdayofquarter(d1, Sunday) == Date(2022, 6, 26)
@test lastdayofquarter(d2, Monday) == Date(2023, 3, 27)
@test lastdayofquarter(d2, Wednesday) == Date(2023, 3, 29)
@test lastdayofquarter(d2, Sunday) == Date(2023, 3, 26)
@test lastdayofquarter(d3, Monday) == Date(2033, 3, 28)
@test lastdayofquarter(d3, Wednesday) == Date(2033, 3, 30)
@test lastdayofquarter(d3, Sunday) == Date(2033, 3, 27)
# Year
@test firstdayofyear(d1, Monday) == Date(2022, 1, 3)
@test firstdayofyear(d1, Wednesday) == Date(2022, 1, 5)
@test firstdayofyear(d1, Sunday) == Date(2022, 1, 2)
@test firstdayofyear(d2, Monday) == Date(2023, 1, 2)
@test firstdayofyear(d2, Wednesday) == Date(2023, 1, 4)
@test firstdayofyear(d2, Sunday) == Date(2023, 1, 1)
@test firstdayofyear(d3, Monday) == Date(2033, 1, 3)
@test firstdayofyear(d3, Wednesday) == Date(2033, 1, 5)
@test firstdayofyear(d3, Sunday) == Date(2033, 1, 2)
@test lastdayofyear(d1, Monday) == Date(2022, 12, 26)
@test lastdayofyear(d1, Wednesday) == Date(2022, 12, 28)
@test lastdayofyear(d1, Sunday) == Date(2022, 12, 25)
@test lastdayofyear(d2, Monday) == Date(2023, 12, 25)
@test lastdayofyear(d2, Wednesday) == Date(2023, 12, 27)
@test lastdayofyear(d2, Sunday) == Date(2023, 12, 31)
@test lastdayofyear(d3, Monday) == Date(2033, 12, 26)
@test lastdayofyear(d3, Wednesday) == Date(2033, 12, 28)
@test lastdayofyear(d3, Sunday) == Date(2033, 12, 25)
# Time type
@test firstdayofweek(dt, Tuesday) isa typeof(dt)
@test lastdayofweek(dt, Tuesday) isa typeof(dt)
@test firstdayofmonth(dt, Tuesday) isa typeof(dt)
@test lastdayofmonth(dt, Tuesday) isa typeof(dt)
@test firstdayofquarter(dt, Tuesday) isa typeof(dt)
@test lastdayofquarter(dt, Tuesday) isa typeof(dt)
@test firstdayofyear(dt, Tuesday) isa typeof(dt)
@test lastdayofyear(dt, Tuesday) isa typeof(dt)
end
end
| PenultimateDays | https://github.com/jakewilliami/PenultimateDays.jl.git |
|
[
"MIT"
] | 0.1.0 | 427088191e7fb0bd1935b74bb2d1dd69d23c3643 | docs | 2096 | <h1 align="center">PenultimateDays.jl</h1>
<!-- [](https://jakewilliami.github.io/PenultimateDays.jl/stable) -->
[](https://jakewilliami.github.io/PenultimateDays.jl/dev)
[](https://github.com/jakewilliami/PenultimateDays.jl/actions?query=workflow%3ACI)
[](https://github.com/invenia/BlueStyle)
<!--  -->
Similar to [Dates](https://github.com/JuliaLang/julia/tree/master/stdlib/Dates)' `firstdayof*` and `lastdayof*`, I introduce to you `penultimatedayof*`; a useful function to find the second-to-last day of a measure of time.
Unlike Dates' `*dayof*` functions, I have extended functionality to find, for example, the penultimate _Tuesday_ of a measure of time. We have also extended some of Dates' `*dayof*` functions to allow this functionality.
## Quick Start
```julia
julia> using Dates, PenultimateDays
julia> d = today()
2022-06-24
julia> penultimatedayofweek(d) # second-to-last day of the week
2022-06-25
julia> penultimatedayofmonth(d) # second-to-last day of the month
2022-06-29
julia> penultimatedayofmonth(d, Tuesday) # second-to-last Tuesday of the month
2022-06-21
julia> penultimatedayofquarter(d) # second-to-last day of the quarter
2022-06-29
julia> penultimatedayofquarter(d, Tuesday) # second-to-last Tuesday of the quarter
2022-06-21
julia> penultimatedayofyear(d) # second-to-last day of the year
2022-12-31
julia> penultimatedayofyear(d, Tuesday) # second-to-last Tuesday of the year
2022-12-20
```
## Extending the Dates standard library
We have also extended the Dates standard library to allow specification of day for `*dayof*` functions:
```julia
julia> using Dates, PenultimateDays
julia> d = today()
2022-06-24
julia> firstdayofmonth(d, Tuesday)
2022-06-07
julia> lastdayofmonth(d, Tuesday)
2022-06-28
``` | PenultimateDays | https://github.com/jakewilliami/PenultimateDays.jl.git |
|
[
"MIT"
] | 0.1.0 | 427088191e7fb0bd1935b74bb2d1dd69d23c3643 | docs | 699 | # Documentation
```@contents
Depth = 3
```
```@meta
CurrentModule = PenultimateDays
DocTestSetup = quote
using PenultimateDays
end
```
## Trivial Penultimate Functions
```@docs
penultimatedayofweek(::TimeType)
penultimatedayofmonth(::TimeType)
penultimatedayofquarter(::TimeType)
penultimatedayofyear(::TimeType)
```
## Day-specific Penultimate Functions
```@docs
penultimatedayofmonth(::TimeType, ::Int)
penultimatedayofquarter(::TimeType, ::Int)
penultimatedayofyear(::TimeType, ::Int)
```
## Dates (Standard Library) Extended
```@docs
firstdayofweek
lastdayofweek
firstdayofmonth
lastdayofmonth
firstdayofquarter
lastdayofquarter
firstdayofyear
lastdayofyear
```
# Index
```@index
``` | PenultimateDays | https://github.com/jakewilliami/PenultimateDays.jl.git |
|
[
"MIT"
] | 0.1.0 | 427088191e7fb0bd1935b74bb2d1dd69d23c3643 | docs | 1071 | # Examples
## Basic examples
```julia
julia> using Dates, PenultimateDays
julia> d = today()
2022-06-24
julia> penultimatedayofweek(d) # second-to-last day of the week
2022-06-25
julia> penultimatedayofmonth(d) # second-to-last day of the month
2022-06-29
julia> penultimatedayofmonth(d, Tuesday) # second-to-last Tuesday of the month
2022-06-21
julia> penultimatedayofquarter(d) # second-to-last day of the quarter
2022-06-29
julia> penultimatedayofquarter(d, Tuesday) # second-to-last Tuesday of the quarter
2022-06-21
julia> penultimatedayofyear(d) # second-to-last day of the year
2022-12-31
julia> penultimatedayofyear(d, Tuesday) # second-to-last Tuesday of the year
2022-12-20
```
## Extending the Dates standard library
We have also extended the Dates standard library to allow specification of day for `*dayof*` functions:
```julia
julia> using Dates, PenultimateDays
julia> d = today()
2022-06-24
julia> firstdayofmonth(d, Tuesday)
2022-06-07
julia> lastdayofmonth(d, Tuesday)
2022-06-28
```
See also `test/runtests.jl` in the repository. | PenultimateDays | https://github.com/jakewilliami/PenultimateDays.jl.git |
|
[
"MIT"
] | 0.1.0 | 427088191e7fb0bd1935b74bb2d1dd69d23c3643 | docs | 230 | # PenultimateDays.jl Documentation
```@contents
```
```@meta
CurrentModule = PenultimateDays
DocTestSetup = quote
using PenultimateDays
end
```
## Adding PenultimateDays.jl
```@repl
using Pkg
Pkg.add("PenultimateDays")
```
| PenultimateDays | https://github.com/jakewilliami/PenultimateDays.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 247 | using Documenter, FeatherLib
makedocs(
modules = [FeatherLib],
sitename = "FeatherLib.jl",
analytics="UA-132838790-1",
pages = [
"Introduction" => "index.md"
]
)
deploydocs(
repo = "github.com/queryverse/FeatherLib.jl.git"
)
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 391 | module FeatherLib
using Arrow, FlatBuffers, CategoricalArrays, Mmap
export featherread, featherwrite
import Dates
const FEATHER_VERSION = 2
# wesm/feather/cpp/src/common.h
const FEATHER_MAGIC_BYTES = Vector{UInt8}(codeunits("FEA1"))
const MIN_FILE_LENGTH = 12
include("metadata.jl") # flatbuffer defintions
include("loadfile.jl")
include("read.jl")
include("write.jl")
end # module
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 1548 |
getoutputlength(version::Int32, x::Integer) = version < FEATHER_VERSION ? x : padding(x)
function validatefile(filename::AbstractString, data::AbstractVector{UInt8})
if length(data) < MIN_FILE_LENGTH
throw(ArgumentError("'$file' is not in feather format: total length of file: $(length(data))"))
end
header = data[1:4]
footer = data[(end-3):end]
if header ≠ FEATHER_MAGIC_BYTES || footer ≠ FEATHER_MAGIC_BYTES
throw(ArgumentError(string("'$filename' is not in feather format: header = $header, ",
"footer = $footer.")))
end
end
function loadfile(filename::AbstractString; use_mmap::Bool=true)
isfile(filename) || throw(ArgumentError("'$filename' is not a valid file."))
data = use_mmap ? Mmap.mmap(filename) : read(filename)
validatefile(filename, data)
data
end
function metalength(data::AbstractVector{UInt8})
read(IOBuffer(data[(length(data)-7):(length(data)-4)]), Int32)
end
function metaposition(data::AbstractVector{UInt8}, metalen::Integer=metalength(data))
length(data) - (metalen+7)
end
function rootposition(data::AbstractVector{UInt8}, mpos::Integer=metaposition(data))
read(IOBuffer(data[mpos:(mpos+4)]), Int32)
end
function getctable(data::AbstractVector{UInt8})
metapos = metaposition(data)
rootpos = rootposition(data, metapos)
ctable = FlatBuffers.read(Metadata.CTable, data, metapos + rootpos - 1)
if ctable.version < FEATHER_VERSION
@warn("This feather file is old and may not be readable.")
end
ctable
end
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 5623 | module Metadata
import Dates
using FlatBuffers
@enum(DType, BOOL = 0, INT8 = 1, INT16 = 2, INT32 = 3, INT64 = 4,
UINT8 = 5, UINT16 = 6, UINT32 = 7, UINT64 = 8,
FLOAT = 9, DOUBLE = 10, UTF8 = 11, BINARY = 12,
CATEGORY = 13, TIMESTAMP = 14, DATE = 15, TIME = 16)
@enum(Encoding, PLAIN = 0, DICTIONARY = 1)
@enum(TimeUnit, SECOND = 0, MILLISECOND = 1, MICROSECOND = 2, NANOSECOND = 3)
# FlatBuffers.enumsizeof(::Type{TimeUnit}) = UInt8
mutable struct PrimitiveArray
dtype::DType
encoding::Encoding
offset::Int64
length::Int64
null_count::Int64
total_bytes::Int64
end
mutable struct CategoryMetadata
levels::PrimitiveArray
ordered::Bool
end
@DEFAULT CategoryMetadata ordered=false
mutable struct TimestampMetadata
unit::TimeUnit
timezone::String
end
mutable struct DateMetadata
end
mutable struct TimeMetadata
unit::TimeUnit
end
@UNION TypeMetadata (Nothing,CategoryMetadata,TimestampMetadata,DateMetadata,TimeMetadata)
mutable struct Column
name::String
values::PrimitiveArray
metadata_type::Int8
metadata::TypeMetadata
user_metadata::String
end
function Column(name::String, values::PrimitiveArray, metadata::TypeMetadata=nothing,
user_metadata::String="")
Column(name, values, FlatBuffers.typeorder(TypeMetadata, typeof(metadata)),
metadata, user_metadata)
end
mutable struct CTable
description::String
num_rows::Int64
columns::Vector{Column}
version::Int32
metadata::String
end
end # module
# wesm/feather/cpp/src/metadata_generated.h
# wesm/feather/cpp/src/types.h
const JULIA_TYPE_DICT = Dict{Metadata.DType,DataType}(
Metadata.BOOL => Bool,
Metadata.INT8 => Int8,
Metadata.INT16 => Int16,
Metadata.INT32 => Int32,
Metadata.INT64 => Int64,
Metadata.UINT8 => UInt8,
Metadata.UINT16 => UInt16,
Metadata.UINT32 => UInt32,
Metadata.UINT64 => UInt64,
Metadata.FLOAT => Float32,
Metadata.DOUBLE => Float64,
Metadata.UTF8 => String, # can also be WeakRefString{UInt8}
Metadata.BINARY => Vector{UInt8},
Metadata.CATEGORY => Int64,
Metadata.TIMESTAMP => Int64,
Metadata.DATE => Int64,
Metadata.TIME => Int64
)
const METADATA_TYPE_DICT = Dict{DataType,Metadata.DType}(
Bool => Metadata.BOOL,
Int8 => Metadata.INT8,
Int16 => Metadata.INT16,
Int32 => Metadata.INT32,
Int64 => Metadata.INT64,
UInt8 => Metadata.UINT8,
UInt16 => Metadata.UINT16,
UInt32 => Metadata.UINT32,
UInt64 => Metadata.UINT64,
Float32 => Metadata.FLOAT,
Float64 => Metadata.DOUBLE,
String => Metadata.UTF8,
Vector{UInt8} => Metadata.BINARY,
Dates.Time => Metadata.INT64,
Dates.DateTime => Metadata.INT64,
Dates.Date => Metadata.INT32,
# WeakRefString{UInt8} => Metadata.UTF8 # not currently being used
)
const NON_PRIMITIVE_TYPES = Set([Metadata.UTF8, Metadata.BINARY])
const JULIA_TIME_DICT = Dict{Metadata.TimeUnit,DataType}(
Metadata.SECOND => Dates.Second,
Metadata.MILLISECOND => Dates.Millisecond,
Metadata.MICROSECOND => Dates.Microsecond,
Metadata.NANOSECOND => Dates.Nanosecond
)
const METADATA_TIME_DICT = Dict{DataType,Metadata.TimeUnit}(v=>k for (k,v) in JULIA_TIME_DICT)
isprimitivetype(t::Metadata.DType) = t ∉ NON_PRIMITIVE_TYPES
juliatype(meta::Nothing, values_type::Metadata.DType) = JULIA_TYPE_DICT[values_type]
juliatype(values_type::Metadata.DType) = juliatype(nothing, values_type)
function juliatype(meta::Metadata.CategoryMetadata, values_type::Metadata.DType)
JULIA_TYPE_DICT[meta.levels.dtype]
end
function juliatype(meta::Metadata.TimestampMetadata, values_type::Metadata.DType)
Timestamp{JULIA_TIME_DICT[meta.unit]}
end
function juliatype(meta::Metadata.TimeMetadata, values_type::Metadata.DType)
TimeOfDay{JULIA_TIME_DICT[meta.unit],JULIA_TYPE_DICT[values_type]}
end
juliatype(meta::Metadata.DateMetadata, values_type::Metadata.DType) = Datestamp
function juliatype(col::Metadata.Column)
T = juliatype(col.metadata, col.values.dtype)
col.values.null_count == 0 ? T : Union{T,Missing}
end
function feathertype(::Type{T}) where T
if !haskey(METADATA_TYPE_DICT, T)
throw(ArgumentError("Type $T is not supported by the Feather format."))
else
return METADATA_TYPE_DICT[T]
end
end
feathertype(::Type{Union{T,Missing}}) where T = feathertype(T)
feathertype(::Type{<:Arrow.Datestamp}) = Metadata.INT32
feathertype(::Type{<:Arrow.Timestamp}) = Metadata.INT64
feathertype(::Type{<:Arrow.TimeOfDay{P,Int32}}) where P = Metadata.INT32
feathertype(::Type{<:Arrow.TimeOfDay{P,Int64}}) where P = Metadata.INT64
getmetadata(io::IO, ::Type{T}, A::ArrowVector) where T = nothing
getmetadata(io::IO, ::Type{Union{T,Missing}}, A::ArrowVector) where T = getmetadata(io, T, A)
getmetadata(io::IO, ::Type{Arrow.Datestamp}, A::ArrowVector) = Metadata.DateMetadata()
function getmetadata(io::IO, ::Type{Arrow.Timestamp{T}}, A::ArrowVector) where T
Metadata.TimestampMetadata(METADATA_TIME_DICT[T], "")
end
function getmetadata(io::IO, ::Type{Arrow.TimeOfDay{P,T}}, A::ArrowVector) where {P,T}
Metadata.TimeMetadata(METADATA_TIME_DICT[P])
end
# WARNINGMDATA_TYPE_DICT Arrow standard says nothing about specifying whether DictEncoding is ordered!
function getmetadata(io::IO, ::Type{T}, A::DictEncoding) where T
vals = writecontents(Metadata.PrimitiveArray, io, levels(A))
Metadata.CategoryMetadata(vals, true)
end
getmetadata(io::IO, ::Type{Union{Missing, T}}, A::DictEncoding) where T = getmetadata(io, T, A)
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 2760 | struct ResultSet
columns::AbstractVector{AbstractVector}
names::Vector{Symbol}
description::String
metadata::String
end
function featherread(filename::AbstractString; use_mmap=true)
data = loadfile(filename, use_mmap=use_mmap)
ctable = getctable(data)
ncols = length(ctable.columns)
colnames = [Symbol(col.name) for col in ctable.columns]
coltypes = [juliatype(col) for col in ctable.columns]
columns = ArrowVector[constructcolumn(coltypes[i], data, ctable.columns[i].metadata, ctable.columns[i]) for i in 1:ncols]
return ResultSet(columns, colnames, ctable.description, ctable.metadata)
end
#=====================================================================================================
new column construction stuff
=====================================================================================================#
Base.length(p::Metadata.PrimitiveArray) = p.length
startloc(p::Metadata.PrimitiveArray) = p.offset+1
Arrow.nullcount(p::Metadata.PrimitiveArray) = p.null_count
function bitmasklength(p::Metadata.PrimitiveArray)
nullcount(p) == 0 ? 0 : padding(bytesforbits(length(p)))
end
function offsetslength(p::Metadata.PrimitiveArray)
isprimitivetype(p.dtype) ? 0 : padding((length(p)+1)*sizeof(Int32))
end
valueslength(p::Metadata.PrimitiveArray) = p.total_bytes - offsetslength(p) - bitmasklength(p)
function offsetsloc(p::Metadata.PrimitiveArray)
if isprimitivetype(p.dtype)
throw(ErrorException("Trying to obtain offset values for primitive array."))
end
startloc(p) + bitmasklength(p)
end
# override default offset type
Locate.Offsets(col::Metadata.PrimitiveArray) = Locate.Offsets{Int32}(offsetsloc(col))
Locate.length(col::Metadata.PrimitiveArray) = length(col)
Locate.values(col::Metadata.PrimitiveArray) = startloc(col) + bitmasklength(col) + offsetslength(col)
# this is only relevant for lists, values type must be UInt8
Locate.valueslength(col::Metadata.PrimitiveArray) = valueslength(col)
Locate.bitmask(col::Metadata.PrimitiveArray) = startloc(col)
function constructcolumn(::Type{T}, data::Vector{UInt8}, meta::Metadata.CategoryMetadata, col::Metadata.Column) where T
reftype = juliatype(col.values.dtype)
DictEncoding{T}(locate(data, reftype, col.values), locate(data, T, col.metadata.levels))
end
function constructcolumn(::Type{Union{T,Missing}}, data::Vector{UInt8}, meta::Metadata.CategoryMetadata, col::Metadata.Column) where T
reftype = Union{juliatype(col.values.dtype),Missing}
DictEncoding{Union{T,Missing}}(locate(data, reftype, col.values), locate(data, T, col.metadata.levels))
end
function constructcolumn(::Type{T}, data::Vector{UInt8}, meta, col::Metadata.Column) where T
locate(data, T, col.values)
end
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 2712 | function featherwrite(filename::AbstractString, columns, colnames; description::AbstractString="", metadata::AbstractString="")
ncol = length(columns)
nrows = length(columns[1])
cols = ArrowVector[arrowformat(_first_col_convert_pass(col)) for col in columns]
open(filename, "w+") do io
writepadded(io, FEATHER_MAGIC_BYTES)
colmetadata = Metadata.Column[writecolumn(io, string(colnames[i]), cols[i]) for i in 1:ncol]
ctable = Metadata.CTable(description, nrows, colmetadata, FEATHER_VERSION, metadata)
len = writemetadata(io, ctable)
write(io, Int32(len)) # these two writes combined are properly aligned
write(io, FEATHER_MAGIC_BYTES)
end
return nothing
end
# NOTE: the below is very inefficient, but we are forced to do it by the Feather format
# Feather requires us to encode any vector that doesn't have a missing value
# as a normal list.
_first_col_convert_pass(col) = col
function _first_col_convert_pass(col::AbstractVector{Union{T,Missing}}) where T
hasmissing = findfirst(ismissing, col)
return hasmissing == nothing ? convert(AbstractVector{T}, col) : col
end
function Metadata.PrimitiveArray(A::ArrowVector{J}, off::Integer, nbytes::Integer) where J
Metadata.PrimitiveArray(feathertype(J), Metadata.PLAIN, off, length(A), nullcount(A), nbytes)
end
function Metadata.PrimitiveArray(A::DictEncoding, off::Integer, nbytes::Integer)
Metadata.PrimitiveArray(feathertype(eltype(references(A))), Metadata.PLAIN, off, length(A),
nullcount(A), nbytes)
end
writecontents(io::IO, A::Primitive) = writepadded(io, A)
writecontents(io::IO, A::NullablePrimitive) = writepadded(io, A, bitmask, values)
writecontents(io::IO, A::List) = writepadded(io, A, offsets, values)
writecontents(io::IO, A::NullableList) = writepadded(io, A, bitmask, offsets, values)
writecontents(io::IO, A::BitPrimitive) = writepadded(io, A, values)
writecontents(io::IO, A::NullableBitPrimitive) = writepadded(io, A, bitmask, values)
writecontents(io::IO, A::DictEncoding) = writecontents(io, references(A))
function writecontents(::Type{Metadata.PrimitiveArray}, io::IO, A::ArrowVector)
a = position(io)
writecontents(io, A)
b = position(io)
Metadata.PrimitiveArray(A, a, b-a)
end
function writecolumn(io::IO, name::AbstractString, A::ArrowVector{J}) where J
vals = writecontents(Metadata.PrimitiveArray, io, A)
Metadata.Column(String(name), vals, getmetadata(io, J, A), "")
end
function writemetadata(io::IO, ctable::Metadata.CTable)
meta = FlatBuffers.build!(ctable)
rng = (meta.head+1):length(meta.bytes)
writepadded(io, view(meta.bytes, rng))
Int32(length(rng))
end
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 2720 | using FeatherLib, Missings, Dates, CategoricalArrays, Random, Arrow
using Test
temps = []
@testset "FeatherLib" begin
include("test_readwrite.jl")
include("test_arrow.jl")
GC.gc(); GC.gc()
for t in temps
try
rm(t)
catch
GC.gc()
try
rm(t)
catch
end
end
end
# issue #34
# data = DataFrame(A=Union{Missing, String}[randstring(10) for i ∈ 1:100], B=rand(100))
# data[2, :A] = missing
# Feather.write("testfile.feather", data)
# dfo = Feather.read("testfile.feather")
# @test size(Data.schema(dfo)) == (100, 2)
# GC.gc();
# rm("testfile.feather")
# @testset "PythonRoundtrip" begin
# try
# println("Generate a test.feather file from python...")
# run(`docker cp runtests.py feathertest:/home/runtests.py`)
# run(`docker exec feathertest python /home/runtests.py`)
# println("Read test.feather into julia...")
# run(`docker cp feathertest:/home/test.feather test.feather`)
# df = Feather.read("test.feather")
# dts = [Dates.DateTime(2016,1,1), Dates.DateTime(2016,1,2), Dates.DateTime(2016,1,3)]
# @test df[:Autf8][:] == ["hey","there","sailor"]
# @test df[:Abool][:] == [true, true, false]
# @test df[:Acat][:] == categorical(["a","b","c"]) # these violate Arrow standard by using Int8!!
# @test df[:Acatordered][:] == categorical(["d","e","f"]) # these violate Arrow standard by using Int8!!
# @test convert(Vector{Dates.DateTime}, df[:Adatetime][:]) == dts
# @test isequal(df[:Afloat32][:], [1.0, missing, 0.0])
# @test df[:Afloat64][:] == [Inf,1.0,0.0]
# df_ = Feather.read("test.feather"; use_mmap=false)
# println("Writing test2.feather from julia...")
# Feather.write("test2.feather", df)
# df2 = Feather.read("test2.feather")
# @test df2[:Autf8][:] == ["hey","there","sailor"]
# @test df2[:Abool][:] == [true, true, false]
# @test df2[:Acat][:] == categorical(["a","b","c"]) # these violate Arrow standard by using Int8!!
# @test df2[:Acatordered][:] == categorical(["d","e","f"]) # these violate Arrow standard by using Int8!!
# @test convert(Vector{Dates.DateTime}, df2[:Adatetime][:]) == dts
# @test isequal(df2[:Afloat32][:], [1.0, missing, 0.0])
# @test df2[:Afloat64][:] == [Inf,1.0,0.0]
# println("Read test2.feather into python...")
# @test (run(`docker cp test2.feather feathertest:/home/test2.feather`); true)
# @test (run(`docker cp runtests2.py feathertest:/home/runtests2.py`); true)
# @test (run(`docker exec feathertest python /home/runtests2.py`); true)
# finally
# run(`docker stop feathertest`)
# run(`docker rm feathertest`)
# rm("test.feather")
# rm("test2.feather")
# end
# end
end
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 3389 | using Random
const SEED = 999
const NROWS = 128
const N_IDX_TESTS = 16
arrow_tempname = tempname()
push!(temps, arrow_tempname)
Random.seed!(SEED)
randdate() = Date(rand(0:4000), rand(1:12), rand(1:27))
randtime() = Dates.Time(rand(0:23), rand(0:59), rand(0:59))
randdatetime() = randdate() + randtime()
randstrings() = String[[randstring(rand(0:20)) for i ∈ 1:(NROWS-1)]; "a"]
function randstrings(::Missing)
Union{String,Missing}[[rand(Bool) ? missing : randstring(rand(0:20)) for i ∈ 1:(NROWS-1)]; "a"]
end
convstring(str::AbstractString) = String(str)
convstring(::Missing) = missing
@testset "ArrowTests" begin
cols = [rand(Int32,NROWS),
rand(Float64,NROWS),
Date[randdate() for i ∈ 1:NROWS],
DateTime[randdatetime() for i ∈ 1:NROWS],
Dates.Time[randtime() for i ∈ 1:NROWS],
Union{Int64,Missing}[rand(Bool) ? missing : rand(Int64) for i ∈ 1:NROWS],
randstrings(),
randstrings(missing),
CategoricalArrays.categorical(randstrings()),
CategoricalArrays.categorical(randstrings(missing))]
colnames = [:ints,:floats,:dates,:datetimes,:times,:missingints,:strings,
:missingstrings,:catstrings,:catstringsmissing]
featherwrite(arrow_tempname, cols, colnames)
ndf = featherread(arrow_tempname)
@test ndf.names == colnames
@test typeof(ndf.columns[1]) == Arrow.Primitive{Int32}
@test typeof(ndf.columns[2]) == Arrow.Primitive{Float64}
@test typeof(ndf.columns[3]) == Arrow.Primitive{Arrow.Datestamp}
@test typeof(ndf.columns[4]) == Arrow.Primitive{Arrow.Timestamp{Dates.Millisecond}}
@test typeof(ndf.columns[5]) == Arrow.Primitive{Arrow.TimeOfDay{Dates.Nanosecond,Int64}}
@test typeof(ndf.columns[6]) == Arrow.NullablePrimitive{Int64}
@test typeof(ndf.columns[7]) == Arrow.List{String,Arrow.DefaultOffset,Arrow.Primitive{UInt8}}
@test typeof(ndf.columns[8]) == Arrow.NullableList{String,Arrow.DefaultOffset,Arrow.Primitive{UInt8}}
@test typeof(ndf.columns[9]) == Arrow.DictEncoding{String,Arrow.Primitive{Int32},
Arrow.List{String,Arrow.DefaultOffset,Arrow.Primitive{UInt8}}}
@test typeof(ndf.columns[10]) ==
Arrow.DictEncoding{Union{String,Missing},Arrow.NullablePrimitive{Int32},Arrow.List{String,Arrow.DefaultOffset,
Arrow.Primitive{UInt8}}}
for j ∈ 1:N_IDX_TESTS
i = rand(1:NROWS)
@test cols[1][i] == ndf.columns[1][i]
@test cols[2][i] == ndf.columns[2][i]
@test cols[3][i] == convert(Date, ndf.columns[3][i])
@test cols[4][i] == convert(DateTime, ndf.columns[4][i])
@test cols[5][i] == convert(Dates.Time, ndf.columns[5][i])
@test isequal(cols[6][i], ndf.columns[6][i])
@test cols[7][i] == ndf.columns[7][i]
@test isequal(cols[8][i], ndf.columns[8][i])
@test cols[9][i] == String(ndf.columns[9][i])
@test isequal(cols[10][i], convstring(ndf.columns[10][i]))
end
for j ∈ 1:N_IDX_TESTS
a, b = extrema(rand(1:NROWS, 2))
i = a:b
@test cols[1][i] == ndf.columns[1][i]
@test cols[2][i] == ndf.columns[2][i]
@test cols[3][i] == convert.(Date, ndf.columns[3][i])
@test cols[4][i] == convert.(DateTime, ndf.columns[4][i])
@test cols[5][i] == convert.(Dates.Time, ndf.columns[5][i])
@test isequal(cols[6][i], ndf.columns[6][i])
@test cols[7][i] == ndf.columns[7][i]
@test isequal(cols[8][i], ndf.columns[8][i])
@test cols[9][i] == String.(ndf.columns[9][i])
@test isequal(cols[10][i], convstring.(ndf.columns[10][i]))
end
end
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | code | 1595 | @testset "ReadWrite" begin
testdir = joinpath(@__DIR__, "data")
files = map(x -> joinpath(testdir, x), readdir(testdir))
for f in files
res = featherread(f)
columns, headers = res.columns, res.names
ncols = length(columns)
nrows = length(columns[1])
temp = tempname()
push!(temps, temp)
featherwrite(temp, columns, headers, description=res.description, metadata=res.metadata)
res2 = featherread(temp)
columns2, headers2 = res2.columns, res2.names
@test length(columns2) == ncols
@test headers==headers2
for (c1,c2) in zip(columns, columns2)
@test length(c1)==nrows
@test length(c2)==nrows
for i = 1:nrows
@test isequal(c1[i], c2[i])
end
end
@test res.description == res2.description
@test res.metadata == res2.metadata
# for (col1,col2) in zip(source.ctable.columns,sink.ctable.columns)
# @test col1.name == col2.name
# @test col1.metadata_type == col2.metadata_type
# @test typeof(col1.metadata) == typeof(col2.metadata)
# @test col1.user_metadata == col2.user_metadata
# v1 = col1.values; v2 = col2.values
# @test v1.dtype == v2.dtype
# @test v1.encoding == v2.encoding
# # @test v1.offset == v2.offset # currently not python/R compatible due to wesm/feather#182
# @test v1.length == v2.length
# @test v1.null_count == v2.null_count
# # @test v1.total_bytes == v2.total_bytes
# end
end
end
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | docs | 440 | If you have a question or are unsure if the behavior you're experiencing
is a bug, please search or post to the Data domain on the julia Discourse
site: https://discourse.julialang.org/c/domain/data. I use the GitHub
issue tracker for bug reports and feature requests only.
By contributing code to FeatherLib.jl, you are agreeing to release it under
the [MIT License](https://github.com/davidanthoff/FeatherLib.jl/blob/master/LICENSE.md).
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | docs | 391 | # FeatherLib.jl v0.2.0
* Drop julia 0.7 support
* Move to Project.toml
* Fix a test dependency
# FeatherLib.jl v0.1.3
* Fix a method ambiguity bug on julia 1.1
# FeatherLib.jl v0.1.2
* Improve error message
# FeatherLib.jl v0.1.1
* Fix remaining julia 0.7/1.0 compat issues
# FeatherLib.jl v0.1.0
* Drop julia 0.6 support, add julia 0.7 support
# FeatherLib.jl v0.0.1
* Initial release
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.2.0 | a3d0c5ca2f08bc8fae4394775f371f8e032149ab | docs | 2250 | # FeatherLib
[](http://www.repostatus.org/#active)
[](https://travis-ci.org/queryverse/FeatherLib.jl)
[](https://ci.appveyor.com/project/queryverse/featherlib-jl/branch/master)
[](https://codecov.io/gh/queryverse/FeatherLib.jl)
## Overview
This is a low level package to read feather files. It is not meant to be used by end users, but rather as a building block for other packages that expose user friendly APIs for file IO.
End users are encouraged to use either [FeatherFiles.jl](https://github.com/queryverse/FeatherFiles.jl) or [Feather.jl](https://github.com/JuliaData/Feather.jl) to interact with feather files.
## Getting Started
The package exports two functions: ``featherread`` and ``featherwrite``.
Use the ``featherread`` function to read a feather file:
````julia
data = featherread("testfile.feather")
````
``data`` will then be of type ``ResultSet``. The field ``columns`` is a vector of vectors and holds the actual data columns. The field ``names`` returns the names of the columns. The ``description`` and ``metadata`` fields return additional data from the feather file.
Use the ``featherwrite`` function to write a feather file:
````julia
featherwrite("testfile.feather", column_data, column_names)
````
``columns`` should be a vector of vectors that holds the data to be written. ``column_names`` should be a vector of ``Symbol``s with the column names.
## Acknowledgements
[Douglas Bates](https://github.com/dmbates), [ExpandingMan](https://github.com/ExpandingMan) and [Jacob Quinn](https://github.com/quinnj) deserve most of the credit for the code in this package: their code in the [Feather.jl](https://github.com/JuliaData/Feather.jl) package was the starting point for this package here. They are of course not responsible for any errors introduced by myself in this package here.
| FeatherLib | https://github.com/queryverse/FeatherLib.jl.git |
|
[
"MIT"
] | 0.1.1 | 1b8256c863036512246d0df3677c0f0324176c7f | code | 679 | using LibGEOSMakie
using Documenter
DocMeta.setdocmeta!(LibGEOSMakie, :DocTestSetup, :(using LibGEOSMakie); recursive=true)
makedocs(;
modules=[LibGEOSMakie],
authors="Jan Weidner <[email protected]> and contributors",
repo="https://github.com/jw3126/LibGEOSMakie.jl/blob/{commit}{path}#{line}",
sitename="LibGEOSMakie.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://jw3126.github.io/LibGEOSMakie.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/jw3126/LibGEOSMakie.jl",
devbranch="main",
)
| LibGEOSMakie | https://github.com/jw3126/LibGEOSMakie.jl.git |
|
[
"MIT"
] | 0.1.1 | 1b8256c863036512246d0df3677c0f0324176c7f | code | 116 | module LibGEOSMakie
import LibGEOS
import GeoInterfaceMakie
GeoInterfaceMakie.@enable(LibGEOS.AbstractGeometry)
end
| LibGEOSMakie | https://github.com/jw3126/LibGEOSMakie.jl.git |
|
[
"MIT"
] | 0.1.1 | 1b8256c863036512246d0df3677c0f0324176c7f | code | 688 | using Test
import LibGEOSMakie
import LibGEOS as LG
using Makie
@testset "smoketest" begin
unitsquare = LG.readgeom("POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))")
bigsquare = LG.readgeom("POLYGON((0 0, 11 0, 11 11, 0 11, 0 0))")
smallsquare = LG.readgeom("POLYGON((5 5, 8 5, 8 8, 5 8, 5 5))")
fig = Figure()
geoms = [
unitsquare,
LG.difference(bigsquare, smallsquare),
LG.boundary(unitsquare),
LG.union(smallsquare, unitsquare),
LG.readgeom("POINT(1 0)"),
LG.readgeom("MULTIPOINT(1 2, 2 3, 3 4)"),
]
for (i,geom) in enumerate(geoms)
Makie.plot!(Axis(fig[i,1], title="$(typeof(geom))"), geom)
end
fig
end
| LibGEOSMakie | https://github.com/jw3126/LibGEOSMakie.jl.git |
|
[
"MIT"
] | 0.1.1 | 1b8256c863036512246d0df3677c0f0324176c7f | docs | 563 | # LibGEOSMakie
[](https://jw3126.github.io/LibGEOSMakie.jl/stable/)
[](https://jw3126.github.io/LibGEOSMakie.jl/dev/)
[](https://github.com/jw3126/LibGEOSMakie.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/jw3126/LibGEOSMakie.jl)
| LibGEOSMakie | https://github.com/jw3126/LibGEOSMakie.jl.git |
|
[
"MIT"
] | 0.1.1 | 1b8256c863036512246d0df3677c0f0324176c7f | docs | 194 | ```@meta
CurrentModule = LibGEOSMakie
```
# LibGEOSMakie
Documentation for [LibGEOSMakie](https://github.com/jw3126/LibGEOSMakie.jl).
```@index
```
```@autodocs
Modules = [LibGEOSMakie]
```
| LibGEOSMakie | https://github.com/jw3126/LibGEOSMakie.jl.git |
|
[
"MIT"
] | 0.5.24 | 5a13ae8a41237cff5ecf34f73eb1b8f42fff6531 | code | 12587 | ############
# TimeData #
############
mutable struct TimeData
ncalls::Int
time::Int64
allocs::Int64
firstexec::Int64
end
TimeData(ncalls, time, allocs) = TimeData(ncalls, time, allocs, time)
Base.copy(td::TimeData) = TimeData(td.ncalls, td.time, td.allocs)
TimeData() = TimeData(0, 0, 0, time_ns())
function Base.:+(self::TimeData, other::TimeData)
TimeData(self.ncalls + other.ncalls,
self.time + other.time,
self.allocs + other.allocs,
min(self.firstexec, other.firstexec))
end
###############
# TimerOutput #
###############
mutable struct TimerOutput
start_data::TimeData
accumulated_data::TimeData
inner_timers::Dict{String,TimerOutput}
timer_stack::Vector{TimerOutput}
name::String
flattened::Bool
enabled::Bool
totmeasured::Tuple{Int64,Int64}
prev_timer_label::String
prev_timer::Union{TimerOutput,Nothing}
function TimerOutput(label::String = "root")
start_data = TimeData(0, time_ns(), gc_bytes())
accumulated_data = TimeData()
inner_timers = Dict{String,TimerOutput}()
timer_stack = TimerOutput[]
return new(start_data, accumulated_data, inner_timers, timer_stack, label, false, true, (0, 0), "", nothing)
end
# Jeez...
TimerOutput(start_data, accumulated_data, inner_timers, timer_stack, name, flattened, enabled, totmeasured, prev_timer_label,
prev_timer) = new(start_data, accumulated_data, inner_timers, timer_stack, name, flattened, enabled, totmeasured, prev_timer_label,
prev_timer)
end
Base.copy(to::TimerOutput) = TimerOutput(copy(to.start_data), copy(to.accumulated_data), copy(to.inner_timers),
copy(to.timer_stack), to.name, to.flattened, to.enabled, to.totmeasured, "", nothing)
const DEFAULT_TIMER = TimerOutput()
const _timers = Dict{String, TimerOutput}("Default" => DEFAULT_TIMER)
const _timers_lock = ReentrantLock() # needed for adding new timers on different threads
"""
get_timer(name::String)
Returns the `TimerOutput` associated with `name`.
If no timers are associated with `name`, a new `TimerOutput` will be created.
"""
function get_timer(name::String)
lock(_timers_lock) do
if !haskey(_timers, name)
_timers[name] = TimerOutput(name)
end
return _timers[name]
end
end
# push! and pop!
function Base.push!(to::TimerOutput, label::String)
if length(to.timer_stack) == 0 # Root section
current_timer = to
else # Not a root section
current_timer = to.timer_stack[end]
end
# Fast path
if current_timer.prev_timer_label == label
timer = current_timer.prev_timer
else
maybe_timer = get(current_timer.inner_timers, label, nothing)
# this could be implemented more elegant using
# get!(() -> TimerOutput(label), current_timer.inner_timers, label)
# however that causes lots of allocations in
# julia v1.3
if maybe_timer === nothing
timer = TimerOutput(label)
current_timer.inner_timers[label] = timer
else
timer = maybe_timer
end
end
timer = timer::TimerOutput
current_timer.prev_timer_label = label
current_timer.prev_timer = timer
push!(to.timer_stack, timer)
return timer.accumulated_data
end
Base.pop!(to::TimerOutput) = pop!(to.timer_stack)
# Only sum the highest parents
function totmeasured(to::TimerOutput)
t, b = Int64(0), Int64(0)
for section in values(to.inner_timers)
timedata = section.accumulated_data
t += timedata.time
b += timedata.allocs
end
return t, b
end
function longest_name(to::TimerOutput, indent = 0)
m = textwidth(to.name) + indent
for inner_timer in values(to.inner_timers)
m = max(m, longest_name(inner_timer, indent + 2))
end
return m
end
# merging timer outputs
const merge_lock = ReentrantLock() # needed for merges of objects on different threads
Base.merge(self::TimerOutput, others::TimerOutput...) = merge!(TimerOutput(), self, others...)
function Base.merge!(self::TimerOutput, others::TimerOutput...; tree_point = String[])
lock(merge_lock) do
for other in others
self.accumulated_data += other.accumulated_data
its = self.inner_timers
for point in tree_point
its = its[point].inner_timers
end
_merge(its, other.inner_timers)
end
return self
end
end
function _merge(self::Dict{String,TimerOutput}, other::Dict{String,TimerOutput})
for key in keys(other)
if haskey(self, key)
self[key].accumulated_data += other[key].accumulated_data
_merge(self[key].inner_timers, other[key].inner_timers)
else
self[key] = deepcopy(other[key])
end
end
end
#######
# API #
#######
# Accessors
ncalls(to::TimerOutput) = to.accumulated_data.ncalls
allocated(to::TimerOutput) = to.accumulated_data.allocs
time(to::TimerOutput) = to.accumulated_data.time
totallocated(to::TimerOutput) = totmeasured(to)[2]
tottime(to::TimerOutput) = totmeasured(to)[1]
time() = time(DEFAULT_TIMER)
ncalls() = ncalls(DEFAULT_TIMER)
allocated() = allocated(DEFAULT_TIMER)
totallocated() = totmeasured(DEFAULT_TIMER)[2]
tottime() = totmeasured(DEFAULT_TIMER)[1]
get_defaulttimer() = DEFAULT_TIMER
Base.@deprecate get_defaultimer get_defaulttimer
# Macro
macro timeit(args...)
return timer_expr(__module__, false, args...)
end
macro timeit_debug(args...)
if !isdefined(__module__, :timeit_debug_enabled)
Core.eval(__module__, :(timeit_debug_enabled() = false))
end
return timer_expr(__module__, true, args...)
end
function enable_debug_timings(m::Module)
if !getfield(m, :timeit_debug_enabled)()
Core.eval(m, :(timeit_debug_enabled() = true))
end
end
function disable_debug_timings(m::Module)
if getfield(m, :timeit_debug_enabled)()
Core.eval(m, :(timeit_debug_enabled() = false))
end
end
timer_expr(args...) = throw(ArgumentError("invalid macro usage for @timeit, use as @timeit [to] label codeblock"))
function is_func_def(f)
if isa(f, Expr) && (f.head === :function || Base.is_short_function_def(f))
return true
else
return false
end
end
function timer_expr(m::Module, is_debug::Bool, ex::Expr)
is_func_def(ex) && return timer_expr_func(m, is_debug, :($(TimerOutputs.DEFAULT_TIMER)), ex)
return esc(_timer_expr(m, is_debug, :($(TimerOutputs).DEFAULT_TIMER), ex))
end
function timer_expr(m::Module, is_debug::Bool, label_or_to, ex::Expr)
is_func_def(ex) && return timer_expr_func(m, is_debug, label_or_to, ex)
return esc(_timer_expr(m, is_debug, :($(TimerOutputs).DEFAULT_TIMER), label_or_to, ex))
end
function timer_expr(m::Module, is_debug::Bool, label::String, ex::Expr)
is_func_def(ex) && return timer_expr_func(m, is_debug, :($(TimerOutputs).DEFAULT_TIMER), ex, label)
return esc(_timer_expr(m, is_debug, :($(TimerOutputs).DEFAULT_TIMER), label, ex))
end
function timer_expr(m::Module, is_debug::Bool, to, label, ex::Expr)
is_func_def(ex) && return timer_expr_func(m, is_debug, to, ex, label)
return esc(_timer_expr(m, is_debug, to, label, ex))
end
function _timer_expr(m::Module, is_debug::Bool, to::Union{Symbol, Expr, TimerOutput}, label, ex::Expr)
@gensym local_to enabled accumulated_data b₀ t₀ val
timeit_block = quote
$local_to = $to
$enabled = $local_to.enabled
if $enabled
$accumulated_data = $(push!)($local_to, $label)
end
$b₀ = $(gc_bytes)()
$t₀ = $(time_ns)()
$(Expr(:tryfinally,
:($val = $ex),
quote
if $enabled
$(do_accumulate!)($accumulated_data, $t₀, $b₀)
$(pop!)($local_to)
end
end))
$val
end
if is_debug
return quote
if $m.timeit_debug_enabled()
$timeit_block
else
$ex
end
end
else
return timeit_block
end
end
function timer_expr_func(m::Module, is_debug::Bool, to, expr::Expr, label=nothing)
expr = macroexpand(m, expr)
def = splitdef(expr)
label === nothing && (label = string(def[:name]))
def[:body] = if is_debug
quote
@inline function inner()
$(def[:body])
end
$(_timer_expr(m, is_debug, to, label, :(inner())))
end
else
_timer_expr(m, is_debug, to, label, def[:body])
end
return esc(combinedef(def))
end
function do_accumulate!(accumulated_data, t₀, b₀)
accumulated_data.time += time_ns() - t₀
accumulated_data.allocs += gc_bytes() - b₀
accumulated_data.ncalls += 1
end
reset_timer!() = reset_timer!(DEFAULT_TIMER)
function reset_timer!(to::TimerOutput)
to.inner_timers = Dict{String,TimerOutput}()
to.start_data = TimeData(0, time_ns(), gc_bytes())
to.accumulated_data = TimeData()
to.prev_timer_label = ""
to.prev_timer = nothing
resize!(to.timer_stack, 0)
return to
end
# We can remove this now that the @timeit macro is exception safe.
# Doesn't hurt to keep it for a while though
timeit(f::Function, label::String) = timeit(f, DEFAULT_TIMER, label)
function timeit(f::Function, to::TimerOutput, label::String)
accumulated_data = push!(to, label)
b₀ = gc_bytes()
t₀ = time_ns()
local val
try
val = f()
finally
accumulated_data.time += time_ns() - t₀
accumulated_data.allocs += gc_bytes() - b₀
accumulated_data.ncalls += 1
pop!(to)
end
return val
end
Base.haskey(to::TimerOutput, name::String) = haskey(to.inner_timers, name)
Base.getindex(to::TimerOutput, name::String) = to.inner_timers[name]
function flatten(to::TimerOutput)
t, b = totmeasured(to)
inner_timers = Dict{String,TimerOutput}()
for inner_timer in values(to.inner_timers)
_flatten!(inner_timer, inner_timers)
end
toc = copy(to)
return TimerOutput(toc.start_data, toc.accumulated_data, inner_timers, TimerOutput[], "Flattened", true, true, (t, b), "", to)
end
function _flatten!(to::TimerOutput, inner_timers::Dict{String,TimerOutput})
for inner_timer in values(to.inner_timers)
_flatten!(inner_timer, inner_timers)
end
if haskey(inner_timers, to.name)
timer = inner_timers[to.name]
timer.accumulated_data += to.accumulated_data
else
toc = copy(to)
toc.inner_timers = Dict{String,TimerOutput}()
inner_timers[toc.name] = toc
end
end
enable_timer!(to::TimerOutput=DEFAULT_TIMER) = to.enabled = true
disable_timer!(to::TimerOutput=DEFAULT_TIMER) = to.enabled = false
# Macro to selectively disable timer for expression
macro notimeit(args...)
notimeit_expr(args...)
end
# Default function throws an error for the benefit of the user
notimeit_expr(args...) = throw(ArgumentError("invalid macro usage for @notimeit, use as @notimeit [to] codeblock"))
complement!() = complement!(DEFAULT_TIMER)
function complement!(to::TimerOutput)
if length(to.inner_timers) == 0
return nothing
end
tot_time = to.accumulated_data.time
tot_allocs = to.accumulated_data.allocs
for timer in values(to.inner_timers)
tot_time -= timer.accumulated_data.time
tot_allocs -= timer.accumulated_data.allocs
complement!(timer)
end
tot_time = max(tot_time, 0)
tot_allocs = max(tot_allocs, 0)
if !(to.name in ["root", "Flattened"])
name = string("~", to.name, "~")
timer = TimerOutput(to.start_data, TimeData(max(1,to.accumulated_data.ncalls), tot_time, tot_allocs), Dict{String,TimerOutput}(), TimerOutput[], name, false, true, (tot_time, tot_allocs), to.name, to)
to.inner_timers[name] = timer
end
return to
end
# If @notimeit was called without a TimerOutput instance, use default timer
notimeit_expr(ex::Expr) = notimeit_expr(:($(TimerOutputs.DEFAULT_TIMER)), ex)
# Disable timer, evaluate expression, restore timer to previous value, and return expression result
function notimeit_expr(to, ex::Expr)
return quote
local to = $(esc(to))
local enabled = to.enabled
$(disable_timer!)(to)
local val
$(Expr(:tryfinally,
:(val = $(esc(ex))),
quote
if enabled
$(enable_timer!)(to)
end
end))
val
end
end
| TimerOutputs | https://github.com/KristofferC/TimerOutputs.jl.git |
|
[
"MIT"
] | 0.5.24 | 5a13ae8a41237cff5ecf34f73eb1b8f42fff6531 | code | 737 | module TimerOutputs
using ExprTools
import Base: show, time_ns
export TimerOutput, @timeit, @timeit_debug, reset_timer!, print_timer, timeit,
enable_timer!, disable_timer!, @notimeit, get_timer
# https://github.com/JuliaLang/julia/pull/33717
if VERSION < v"1.4.0-DEV.475"
gc_bytes() = Base.gc_bytes()
else
function gc_bytes()
b = Ref{Int64}(0)
Base.gc_bytes(b)
return b[]
end
end
using Printf
include("TimerOutput.jl")
include("show.jl")
include("utilities.jl")
if Base.VERSION >= v"1.4.2"
include("compile.jl")
_precompile_()
end
function __init__()
# Reset DEFAULT_TIMER; otherwise starting time is the time of precompile
reset_timer!()
end
end # module
| TimerOutputs | https://github.com/KristofferC/TimerOutputs.jl.git |
|
[
"MIT"
] | 0.5.24 | 5a13ae8a41237cff5ecf34f73eb1b8f42fff6531 | code | 863 | # To make it less likely that users measure TimerOutputs compilation time.
let
to = TimerOutput()
@timeit to "1" string(1)
end
function _precompile_()
ccall(:jl_generating_output, Cint, ()) == 1 || return nothing
@assert Base.precompile(Tuple{typeof(print_timer), typeof(stdout), TimerOutput})
@assert Base.precompile(Tuple{typeof(print_timer), TimerOutput})
@assert Base.precompile(Tuple{typeof(push!), TimerOutput, String})
@assert Base.precompile(Tuple{typeof(reset_timer!), TimerOutput})
@assert Base.precompile(Tuple{typeof(disable_timer!), TimerOutput})
@assert Base.precompile(Tuple{typeof(enable_timer!), TimerOutput})
@assert Base.precompile(Tuple{typeof(complement!), TimerOutput})
@assert Base.precompile(Tuple{typeof(do_accumulate!), TimeData, UInt64, Int64})
@assert Base.precompile(Tuple{Type{TimerOutput}, String})
end
| TimerOutputs | https://github.com/KristofferC/TimerOutputs.jl.git |
|
[
"MIT"
] | 0.5.24 | 5a13ae8a41237cff5ecf34f73eb1b8f42fff6531 | code | 6674 | print_timer(; kwargs...) = print_timer(stdout; kwargs...)
print_timer(to::TimerOutput; kwargs...) = print_timer(stdout, to; kwargs...)
print_timer(io::IO; kwargs...) = print_timer(io, DEFAULT_TIMER; kwargs...)
print_timer(io::IO, to::TimerOutput; kwargs...) = (show(io, to; kwargs...); println(io))
Base.show(to::TimerOutput; kwargs...) = show(stdout, to; kwargs...)
function Base.show(io::IO, to::TimerOutput; allocations::Bool = true, sortby::Symbol = :time, linechars::Symbol = :unicode, compact::Bool = false, title::String = "")
sortby in (:time, :ncalls, :allocations, :name, :firstexec) || throw(ArgumentError("sortby should be :time, :allocations, :ncalls, :name, or :firstexec, got $sortby"))
linechars in (:unicode, :ascii) || throw(ArgumentError("linechars should be :unicode or :ascii, got $linechars"))
t₀, b₀ = to.start_data.time, to.start_data.allocs
t₁, b₁ = time_ns(), gc_bytes()
Δt, Δb = t₁ - t₀, b₁ - b₀
∑t, ∑b = to.flattened ? to.totmeasured : totmeasured(to)
max_name = longest_name(to)
available_width = displaysize(io)[2]
requested_width = max_name
if compact
if allocations
requested_width += 43
else
requested_width += 25
end
else
if allocations
requested_width += 59
else
requested_width += 33
end
end
#requested_width = 34 + (allocations ? 27 : 0) + max_name
name_length = max(9, max_name - max(0, requested_width - available_width))
print_header(io, Δt, Δb, ∑t, ∑b, name_length, true, allocations, linechars, compact, title)
rev = !in(sortby, [:name, :firstexec])
by(x) = sortf(x, sortby)
for timer in sort!(collect(values(to.inner_timers)); rev = rev, by = by)
_print_timer(io, timer, ∑t, ∑b, 0, name_length, allocations, sortby, compact)
end
print_header(io, Δt, Δb, ∑t, ∑b, name_length, false, allocations, linechars, compact, title)
end
function sortf(x, sortby)
sortby == :time && return x.accumulated_data.time
sortby == :ncalls && return x.accumulated_data.ncalls
sortby == :allocations && return x.accumulated_data.allocs
sortby == :name && return x.name
sortby == :firstexec && return x.accumulated_data.firstexec
error("internal error")
end
# truncate string and add dots
function truncdots(str, n)
textwidth(str) <= n && return str
n <= 3 && return ""
io = IOBuffer()
for (i, c) in enumerate(str)
i == n - 2 && (write(io, "..."); break)
write(io, c)
end
return String(take!(io))
end
function print_header(io, Δt, Δb, ∑t, ∑b, name_length, header, allocations, linechars, compact, title)
global BOX_MODE, ALLOCATIONS_ENABLED
midrule = linechars == :unicode ? "─" : "-"
topbottomrule = linechars == :unicode ? "─" : "-"
sec_ncalls = string(rpad("Section", name_length, " "), " ncalls ")
time_headers = " time %tot" * (compact ? "" : " avg")
alloc_headers = allocations ? (" alloc %tot" * (compact ? "" : " avg")) : ""
total_table_width = sum(textwidth.((sec_ncalls, time_headers, alloc_headers))) + 3
# Just hardcoded shit to make things look nice
!allocations && (total_table_width -= 3)
function center(str, len)
x = (len - textwidth(str)) ÷ 2
return string(" "^x, str, " "^(len - textwidth(str) - x))
end
if header
time_alloc_pading = " "^(textwidth(sec_ncalls))
title = center(truncdots(title, textwidth(sec_ncalls)), textwidth(sec_ncalls))
if compact
time_header = " Time "
else
time_header = " Time "
end
time_underline = midrule^textwidth(time_header)
if compact
allocation_header = " Allocations "
else
allocation_header = " Allocations "
end
alloc_underline = midrule^textwidth(allocation_header)
#tot_meas_str = string(" ", rpad("Tot / % measured:", textwidth(sec_ncalls) - 1, " "))
if compact
tot_meas_str = center("Total measured:", textwidth(sec_ncalls))
else
tot_meas_str = center("Tot / % measured:", textwidth(sec_ncalls))
end
str_time = center(string(prettytime(Δt), compact ? "" : string(" / ", prettypercent(∑t, Δt))), textwidth(time_header))
str_alloc = center(string(prettymemory(Δb), compact ? "" : string(" / ", prettypercent(∑b, Δb))), textwidth(allocation_header))
header_str = string(" time %tot %timed")
tot_midstr = string(sec_ncalls, " ", header_str)
printstyled(io, " ", topbottomrule^total_table_width, "\n"; bold=true)
if ! (allocations == false && compact == true)
printstyled(io, " ", title; bold=true)
print(io, time_header)
allocations && print(io, " ", allocation_header)
print(io, "\n")
print(io, " ", time_alloc_pading, time_underline)
allocations && print(io, " ", alloc_underline)
print(io, "\n")
print(io, " ", tot_meas_str, str_time)
allocations && print(io, " ", str_alloc)
print(io, "\n\n")
end
print(io, " ", sec_ncalls, time_headers)
allocations && print(io, " ", alloc_headers)
print(io, "\n")
print(io, " ", midrule^total_table_width, "\n")
else
printstyled(io, " ", topbottomrule^total_table_width; bold=true)
end
end
function _print_timer(io::IO, to::TimerOutput, ∑t::Integer, ∑b::Integer, indent::Integer, name_length, allocations, sortby, compact)
accum_data = to.accumulated_data
t = accum_data.time
b = accum_data.allocs
name = truncdots(to.name, name_length - indent)
print(io, " ")
nc = accum_data.ncalls
print(io, " "^indent, rpad(name, name_length + 2 - indent))
print(io, lpad(prettycount(nc), 5, " "))
print(io, " ", lpad(prettytime(t), 6, " "))
print(io, " ", lpad(prettypercent(t, ∑t), 5, " "))
!compact && print(io, " ", rpad(prettytime(t / nc), 6, " "))
if allocations
print(io, " ", rpad(prettymemory(b), 9, " "))
print(io, rpad(prettypercent(b, ∑b), 5, " "))
!compact && print(io, " ", lpad(prettymemory(b / nc), 5, " "))
end
print(io, "\n")
rev = !in(sortby, [:name, :firstexec])
by(x) = sortf(x, sortby)
for timer in sort!(collect(values(to.inner_timers)); rev = rev, by = by)
_print_timer(io, timer, ∑t, ∑b, indent + 2, name_length, allocations, sortby, compact)
end
end
| TimerOutputs | https://github.com/KristofferC/TimerOutputs.jl.git |
|
[
"MIT"
] | 0.5.24 | 5a13ae8a41237cff5ecf34f73eb1b8f42fff6531 | code | 4319 | ###################
# Pretty Printing #
###################
function prettytime(t)
if t < 1e3
value, units = t, "ns"
elseif t < 1e6
value, units = t / 1e3, "μs"
elseif t < 1e9
value, units = t / 1e6, "ms"
elseif t < 3600e9
value, units = t / 1e9, "s"
# We intentionally do not show minutes
else
value, units = t / 3600e9, "h"
end
if round(value) >= 100
str = string(@sprintf("%.0f", value), units)
elseif round(value * 10) >= 100
str = string(@sprintf("%.1f", value), units)
else
str = string(@sprintf("%.2f", value), units)
end
return lpad(str, 6, " ")
end
function prettymemory(b)
if b < 1000
value, units = b, "B"
elseif b < 1000^2
value, units = b / 1024, "KiB"
elseif b < 1000^3
value, units = b / 1024^2, "MiB"
elseif b < 1000^4
value, units = b / 1024^3, "GiB"
elseif b < 1000^5
value, units = b / 1024^4, "TiB"
elseif b < 1000^6
value, units = b / 1024^5, "PiB"
else
value, units = b / 1024^6, "EiB"
end
if round(value) >= 100
str = string(@sprintf("%.0f", value), units)
elseif round(value * 10) >= 100
str = string(@sprintf("%.1f", value), units)
elseif value >= 0
str = string(@sprintf("%.2f", value), units)
else
str = "-"
end
return lpad(str, 7, " ")
end
function prettypercent(nominator, denominator)
value = nominator / denominator * 100
if denominator == 0 && nominator == 0
str = " - %"
elseif denominator == 0
str = "inf %"
else
str = string(@sprintf("%.1f", value), "%")
end
return lpad(str, 6, " ")
end
function prettycount(t::Integer)
if t < 1000
return string(t)
elseif t < 1000^2
value, units = t / 1000, "k"
elseif t < 1000^3
value, units = t / 1e6, "M"
else
value, units = t / 1e9, "B"
end
if round(value) >= 100
str = string(@sprintf("%.0f", value), units)
elseif round(value * 10) >= 100
str = string(@sprintf("%.1f", value), units)
else
str = string(@sprintf("%.2f", value), units)
end
return str
end
function rpad(
s::Union{AbstractChar,AbstractString},
n::Integer,
p::Union{AbstractChar,AbstractString}=' ',
) :: String
n = Int(n)::Int
m = signed(n) - Int(textwidth(s))::Int
m ≤ 0 && return string(s)
l = textwidth(p)
q, r = divrem(m, l)
r == 0 ? string(s, p^q) : string(s, p^q, first(p, r))
end
#################
# Serialization #
#################
"""
todict(to::TimerOutput) -> Dict{String, Any}
Converts a `TimerOutput` into a nested set of dictionaries, with keys and value types:
* `"n_calls"`: `Int`
* `"time_ns"`: `Int`
* `"allocated_bytes"`: `Int`
* `"total_allocated_bytes"`: `Int`
* `"total_time_ns"`: `Int`
* `"inner_timers"`: `Dict{String, Dict{String, Any}}`
"""
function todict(to::TimerOutput)
return Dict{String,Any}(
"n_calls" => ncalls(to),
"time_ns" => time(to),
"allocated_bytes" => allocated(to),
"total_allocated_bytes" => totallocated(to),
"total_time_ns" => tottime(to),
"inner_timers" => Dict{String, Any}(k => todict(v) for (k,v) in to.inner_timers)
)
end
##########################
# Instrumented functions #
##########################
# implemented as a callable type to get better error messages
# (i.e. you see `F` explictly, which might be `typeof(f)` telling
# you that `f` is involved).
struct InstrumentedFunction{F} <: Function
func::F
t::TimerOutput
name::String
end
function funcname(f::F) where {F}
if @generated
string(repr(F.instance))
else
string(repr(f))
end
end
InstrumentedFunction(f, t) = InstrumentedFunction(f, t, funcname(f))
function (inst::InstrumentedFunction)(args...; kwargs...)
@timeit inst.t inst.name inst.func(args...; kwargs...)
end
"""
(t::TimerOutput)(f, name=string(repr(f))) -> InstrumentedFunction
Instruments `f` by the [`TimerOutput`](@ref) `t` returning an `InstrumentedFunction`.
This function can be used just like `f`, but whenever it is called it stores timing
results in `t`.
"""
(t::TimerOutput)(f, name=funcname(f)) = InstrumentedFunction(f, t, name)
| TimerOutputs | https://github.com/KristofferC/TimerOutputs.jl.git |
|
[
"MIT"
] | 0.5.24 | 5a13ae8a41237cff5ecf34f73eb1b8f42fff6531 | code | 18999 | using TimerOutputs
using Test
import TimerOutputs: DEFAULT_TIMER, ncalls, flatten,
prettytime, prettymemory, prettypercent, prettycount, todict
reset_timer!()
# Timing from modules that don't import much
baremodule NoImports
using TimerOutputs
using Base: sleep
@timeit "baresleep" sleep(0.1)
end
@testset "TimerOutput" begin
@test "baresleep" in keys(DEFAULT_TIMER.inner_timers)
to = TimerOutput()
@timeit to "sleep" sleep(0.1)
@timeit "sleep" sleep(0.1)
@test "sleep" in keys(to.inner_timers)
@test "sleep" in keys(DEFAULT_TIMER.inner_timers)
@timeit to "multi statement" begin
1+1
sleep(0.1)
end
@timeit "multi statement" begin
1+1
sleep(0.1)
end
@test "multi statement" in keys(to.inner_timers)
@test "multi statement" in keys(DEFAULT_TIMER.inner_timers)
@timeit to "sleep" sleep(0.1)
@timeit to "sleep" sleep(0.1)
@timeit to "sleep" sleep(0.1)
@timeit "sleep" sleep(0.1)
@timeit "sleep" sleep(0.1)
@timeit "sleep" sleep(0.1)
@test haskey(to, "sleep")
@test !haskey(to, "slep")
@test ncalls(to["sleep"]) == 4
@test ncalls(DEFAULT_TIMER["sleep"]) == 4
# Check reset works
reset_timer!(to)
reset_timer!()
@test length(keys(to.inner_timers)) == 0
@test length(keys(DEFAULT_TIMER.inner_timers)) == 0
# Check return values get propagated
function foo(a)
a+a
end
to2 = TimerOutput()
a = @timeit to2 "foo" foo(5)
b = @timeit "foo" foo(5)
@test a === 10
@test b === 10
@test "foo" in collect(keys(to2.inner_timers))
@test "foo" in collect(keys(DEFAULT_TIMER.inner_timers))
# Test nested
c = @timeit to2 "nest 1" begin
sleep(0.01)
@timeit to2 "nest 2" sleep(0.02)
@timeit to2 "nest 2" sleep(0.02)
5
end
d = @timeit "nest 1" begin
sleep(0.01)
@timeit "nest 2" sleep(0.02)
@timeit "nest 2" sleep(0.02)
5
end
@test ncalls(to2["nest 1"]) == 1
@test ncalls(to2["nest 1"]["nest 2"]) == 2
@test ncalls(DEFAULT_TIMER["nest 1"])== 1
@test ncalls(DEFAULT_TIMER["nest 1"]["nest 2"]) == 2
@test c === 5
@test d == 5
# test throws
function foo2(v)
@timeit to "throwing" begin
sleep(0.01)
print(v[6]) # OOB
end
end
function foo3(v)
@timeit "throwing" begin
sleep(0.01)
print(v[6]) # OOB
end
end
try
foo2(rand(5))
catch e
isa(e, BoundsError) || rethrow(e)
end
try
foo3(rand(5))
catch e
isa(e, BoundsError) || rethrow(e)
end
@test "throwing" in keys(to.inner_timers)
@test "throwing" in keys(DEFAULT_TIMER.inner_timers)
reset_timer!(to)
@timeit to "foo" begin
sleep(0.05)
@timeit to "bar" begin
@timeit to "foo" sleep(0.05)
@timeit to "foo" sleep(0.05)
@timeit to "baz" sleep(0.05)
@timeit to "bar" sleep(0.05)
end
@timeit to "bur" sleep(0.025)
end
@timeit to "bur" sleep(0.025)
tom = flatten(to)
@test ncalls(tom["foo"]) == 3
@test ncalls(tom["bar"]) == 2
@test ncalls(tom["bur"]) == 2
@test ncalls(tom["baz"]) == 1
function many_loops()
for i in 1:10^7
@timeit to "loop" 1+1
end
end
many_loops()
a = 3
@timeit to "a$a" 1+1
@timeit "a$a" 1+1
@test "a3" in collect(keys(to.inner_timers))
@test "a3" in collect(keys(DEFAULT_TIMER.inner_timers))
reset_timer!(DEFAULT_TIMER)
toz = TimerOutput()
@timeit toz "foo" 1+1
reset_timer!(toz)
@timeit toz "foo" 1+1
@test "foo" in keys(toz.inner_timers)
tof = TimerOutput()
@timeit tof ff1(x) = x
@timeit tof ff2(x)::Float64 = x
@timeit tof function ff3(x) x end
@timeit tof function ff4(x)::Float64 x end
@timeit ff5(x) = x
@timeit ff6(x)::Float64 = x
@timeit function ff7(x) x end
@timeit function ff8(x)::Float64 x end
@timeit ff9(x::T) where {T} = x
@timeit (ff10(x::T)::Float64) where {T} = x
@timeit function ff11(x::T) where {T} x end
@timeit function ff12(x::T)::Float64 where {T} x end
@timeit "foo" ff13(x::T) where {T} = x
@timeit "bar" (ff14(x::T)::Float64) where {T} = x
@timeit "baz" function ff15(x::T) where {T} x end
@timeit "quz" function ff16(x::T)::Float64 where {T} x end
@timeit tof "foo" ff17(x::T) where {T} = x
@timeit tof "bar" (ff18(x::T)::Float64) where {T} = x
@timeit tof "baz" function ff19(x::T) where {T} x end
@timeit tof "quz" function ff20(x::T)::Float64 where {T} x end
for i in 1:2
@test ff1(1) === 1
@test ff2(1) === 1.0
@test ff3(1) === 1
@test ff4(1) === 1.0
@test ff5(1) === 1
@test ff6(1) === 1.0
@test ff7(1) === 1
@test ff8(1) === 1.0
@test ff9(1) === 1
@test ff10(1) === 1.0
@test ff11(1) === 1
@test ff12(1) === 1.0
@test ff13(1) === 1
@test ff14(1) === 1.0
@test ff15(1) === 1
@test ff16(1) === 1.0
@test ff17(1) === 1
@test ff18(1) === 1.0
@test ff19(1) === 1
@test ff20(1) === 1.0
end
@test ncalls(tof["ff1"]) == 2
@test ncalls(tof["ff2"]) == 2
@test ncalls(tof["ff3"]) == 2
@test ncalls(tof["ff4"]) == 2
@test ncalls(tof["foo"]) == 2
@test ncalls(tof["bar"]) == 2
@test ncalls(tof["baz"]) == 2
@test ncalls(tof["quz"]) == 2
@test ncalls(DEFAULT_TIMER["ff5"]) == 2
@test ncalls(DEFAULT_TIMER["ff6"]) == 2
@test ncalls(DEFAULT_TIMER["ff7"]) == 2
@test ncalls(DEFAULT_TIMER["ff8"]) == 2
@test ncalls(DEFAULT_TIMER["ff9"]) == 2
@test ncalls(DEFAULT_TIMER["ff10"]) == 2
@test ncalls(DEFAULT_TIMER["ff11"]) == 2
@test ncalls(DEFAULT_TIMER["ff12"]) == 2
@test ncalls(DEFAULT_TIMER["foo"]) == 2
@test ncalls(DEFAULT_TIMER["bar"]) == 2
@test ncalls(DEFAULT_TIMER["baz"]) == 2
@test ncalls(DEFAULT_TIMER["quz"]) == 2
function foo()
reset_timer!()
@timeit "asdf" bar()
end
bar() = print_timer()
foo()
io = IOBuffer()
show(io, to)
show(io, to; allocations = false)
show(io, to; allocations = false, compact = true)
show(io, to; sortby = :ncalls)
show(io, to; sortby = :time)
show(io, to; sortby = :allocations)
show(io, to; sortby = :name)
show(io, to; sortby = :firstexec)
show(io, to; linechars = :ascii)
show(io, to; title = "A short title")
show(io, to; title = "A very long title that will be truncated")
# issue 22: edge cases for rounding
for (t, str) in ((9999, "10.0μs"), (99999, " 100μs"),
(9999999, "10.0ms"), (99999999, " 100ms"))
@test prettytime(t) == str
end
for (b, str) in ((9.999*1024, "10.0KiB"), (99.999*1024, " 100KiB"),
(9.999*1024^2, "10.0MiB"), (99.999*1024^2, " 100MiB"),
(9.999*1024^3, "10.0GiB"), (99.999*1024^3, " 100GiB"))
@test prettymemory(b) == str
end
for (num, den, str) in ((0.9999, 1, "100.0%"), (0.09999, 1, " 10.0%"))
@test prettypercent(num, den) == str
end
for (t, str) in ((9.999*1024, "10.0KiB"), (99.999*1024, " 100KiB"),
(9.999*1024^2, "10.0MiB"), (99.999*1024^2, " 100MiB"),
(9.999*1024^3, "10.0GiB"), (99.999*1024^3, " 100GiB"))
@test prettymemory(t) == str
end
for (c, str) in ((9999, "10.0k"), (99999, "100k"),
(9999999, "10.0M"), (99999999, "100M"),
(9999999999, "10.0B"), (99999999999, "100B"))
@test prettycount(c) == str
end
# `continue` inside a timeit section
to_continue = TimerOutput()
function continue_test()
for i = 1:10
@timeit to_continue "x" @timeit to_continue "test" begin
continue
end
end
end
continue_test()
@test isempty(to_continue.inner_timers["x"].inner_timers["test"].inner_timers)
# Test @timeit_debug
to_debug = TimerOutput()
function debug_test()
@timeit_debug to_debug "sleep" sleep(0.001)
end
TimerOutputs.disable_debug_timings(@__MODULE__)
debug_test()
@test !("sleep" in keys(to_debug.inner_timers))
TimerOutputs.enable_debug_timings(@__MODULE__)
debug_test()
@test "sleep" in keys(to_debug.inner_timers)
# Test functional-form @timeit_debug with @eval'ed functions
to_debug = TimerOutput()
@timeit_debug to_debug function baz(x, y)
@timeit_debug to_debug "sleep" sleep(0.001)
return x + y * x
end
TimerOutputs.disable_debug_timings(@__MODULE__)
baz(1, 2.0)
@test isempty(to_debug.inner_timers)
TimerOutputs.enable_debug_timings(@__MODULE__)
baz(1, 2.0)
@test "baz" in keys(to_debug.inner_timers)
@test "sleep" in keys(to_debug.inner_timers["baz"].inner_timers)
TimerOutputs.disable_debug_timings(@__MODULE__)
to = TimerOutput()
@timeit to "section1" sleep(0.02)
@timeit to "section2" begin
@timeit to "section2.1" sleep(0.1)
sleep(0.01)
end
TimerOutputs.complement!(to)
tom = flatten(to)
@test ncalls(tom["~section2~"]) == 1
end # testset
struct Simulation
timer::TimerOutput
# state
end
@testset "Timer from argument" begin
get_timer(sim) = sim.timer
@timeit get_timer(sim) function step!(sim::Simulation)
# important computation
end
sim = Simulation(TimerOutputs.TimerOutput())
step!(sim)
@test TimerOutputs.ncalls(sim.timer["step!"]) == 1
step!(sim)
@test TimerOutputs.ncalls(sim.timer["step!"]) == 2
@timeit get_timer(args...; kw...) step2!(args...; kw...) = nothing
step2!(sim)
@test TimerOutputs.ncalls(sim.timer["step!"]) == 2
@test TimerOutputs.ncalls(sim.timer["step2!"]) == 1
step2!(sim)
@test TimerOutputs.ncalls(sim.timer["step2!"]) == 2
end
# default timer without explicitly loading TimerOutputs
TimerOutputs.reset_timer!()
module TestModule
using TimerOutputs: @timeit
foo(x) = x
@timeit "foo" foo(1)
end
@test "foo" in keys(DEFAULT_TIMER.inner_timers)
TimerOutputs.reset_timer!()
# Test sharing timers between modules
@test !haskey(TimerOutputs._timers, "TestModule2")
@test !haskey(TimerOutputs._timers, "my_timer")
to = get_timer("my_timer")
@timeit to "foo" sleep(0.1)
@test ncalls(get_timer("my_timer")["foo"]) == 1
module TestModule2
using TimerOutputs: @timeit, get_timer
foo(x) = x
@timeit get_timer("TestModule2") "foo" foo(1)
@timeit get_timer("my_timer") "foo" foo(1)
end
# Timer from module is accessible to root
@test haskey(TimerOutputs._timers, "TestModule2")
@test ncalls(get_timer("TestModule2")["foo"]) == 1
# Timer from root is accessible to module
@test ncalls(get_timer("my_timer")["foo"]) == 2
# Broken
#=
# Type inference with @timeit_debug
@timeit_debug function make_zeros()
dims = (3, 4)
zeros(dims)
end
@inferred make_zeros()
TimerOutputs.enable_debug_timings(@__MODULE__)
@inferred make_zeros()
=#
to = TimerOutput()
@timeit_debug to function f(x)
g(x) = 2x
g(x)
end
@test f(3) == 6
TimerOutputs.enable_debug_timings(@__MODULE__)
@test f(3) == 6
TimerOutputs.disable_debug_timings(@__MODULE__)
@testset "Not too many allocations #59" begin
function doit(timer, n)
ret = 0
for i in 1:n
@timeit timer "depth0" begin
@timeit timer "depth1" begin
@timeit timer "depth2" begin
ret += sin(i)
end
@timeit timer "depth2b" begin
ret += cos(i)
end
end
@timeit timer "depth1b" begin
end
end
end
ret
end
to = TimerOutput()
doit(to, 1)
a0 = TimerOutputs.allocated(to["depth0"])
a1 = TimerOutputs.allocated(to["depth0"]["depth1"])
a2 = TimerOutputs.allocated(to["depth0"]["depth1"]["depth2"])
to = TimerOutput()
doit(to, 100000)
to0 = to["depth0"]
to1 = to0["depth1"]
to1b = to0["depth1b"]
to2 = to1["depth2"]
to2b = to1["depth2b"]
# test that leaf timers add zero allocations
# and other timers only add allocations once
@test TimerOutputs.allocated(to0) == a0
@test TimerOutputs.allocated(to1) == a1
@test TimerOutputs.allocated(to2) == a2
@test TimerOutputs.allocated(to1b) == 0
@test TimerOutputs.allocated(to2) == 0
@test TimerOutputs.allocated(to2b) == 0
end
@testset "disable enable" begin
to = TimerOutput()
ff1() = @timeit to "ff1" 1+1
ff1()
@test ncalls(to["ff1"]) == 1
disable_timer!(to)
ff1()
@test ncalls(to["ff1"]) == 1
enable_timer!(to)
ff1()
@test ncalls(to["ff1"]) == 2
@notimeit to ff1()
ff1()
@test ncalls(to["ff1"]) == 3
end
# Type inference with @timeit_debug
@timeit_debug function make_zeros()
dims = (3, 4)
zeros(dims)
end
@inferred make_zeros()
TimerOutputs.enable_debug_timings(@__MODULE__)
@inferred make_zeros()
@testset "merge" begin
to1 = TimerOutput()
to2 = TimerOutput()
to3 = TimerOutput()
@timeit to1 "foo" identity(nothing)
@timeit to1 "baz" identity(nothing)
@timeit to1 "foobar" begin
@timeit to1 "foo" identity(nothing)
@timeit to1 "baz" identity(nothing)
end
@timeit to1 "bar" identity(nothing)
@timeit to2 "baz" identity(nothing)
@timeit to2 "foobar" begin
@timeit to2 "bar" identity(nothing)
@timeit to2 "baz" identity(nothing)
end
@timeit to3 "bar" identity(nothing)
@test_throws MethodError merge()
to_merged = merge(to1, to2, to3)
@test to_merged !== to1
merge!(to1, to2, to3)
for to in [to1, to_merged]
@test "foo" in collect(keys(to.inner_timers))
@test "bar" in collect(keys(to.inner_timers))
@test "foobar" in collect(keys(to.inner_timers))
subto = to["foobar"]
@test "foo" in collect(keys(subto.inner_timers))
@test "bar" in collect(keys(subto.inner_timers))
@test ncalls(to["foo"]) == 1
@test ncalls(to["bar"]) == 2
@test ncalls(to["baz"]) == 2
@test ncalls(subto["foo"]) == 1
@test ncalls(subto["bar"]) == 1
@test ncalls(subto["baz"]) == 2
end
end
# Issue #118
let to = TimerOutput()
@timeit to "foo" identity(nothing)
@timeit to "foobar" begin
@timeit to "foo" identity(nothing)
@timeit to "baz" identity(nothing)
end
@timeit to "baz" identity(nothing)
@test ncalls(to.inner_timers["foo"]) == 1
@test ncalls(to.inner_timers["foobar"]) == 1
@test ncalls(to.inner_timers["foobar"].inner_timers["foo"]) == 1
@test ncalls(to.inner_timers["foobar"].inner_timers["baz"]) == 1
@test ncalls(to.inner_timers["baz"]) == 1
end
@testset "sortby firstexec" begin
to = TimerOutput()
@timeit to "cccc" sleep(0.1)
@timeit to "cccc" sleep(0.1)
@timeit to "bbbb" sleep(0.1)
@timeit to "aaaa" sleep(0.1)
@timeit to "cccc" sleep(0.1)
table = sprint((io, to)->show(io, to, sortby = :firstexec), to)
@test match(r"cccc", table).offset < match(r"bbbb", table).offset < match(r"aaaa", table).offset
to = TimerOutput()
@timeit to "group" begin
@timeit to "aaaa" sleep(0.1)
@timeit to "nested_group" begin sleep(0.1)
@timeit to "bbbb" sleep(0.1)
@timeit to "cccc" sleep(0.1)
end
end
table = sprint((io, to)->show(io, to, sortby = :firstexec), to)
@test match(r"aaaa", table).offset < match(r"bbbb", table).offset < match(r"cccc", table).offset
end
@static if isdefined(Threads, Symbol("@spawn"))
@testset "merge at custom points during multithreading" begin
to = TimerOutput()
@timeit to "1" begin
@timeit to "1.1" sleep(0.1)
@timeit to "1.2" sleep(0.1)
@timeit to "1.3" sleep(0.1)
end
@sync begin
@timeit to "2" Threads.@spawn begin
to2 = TimerOutput()
@timeit to2 "2.1" sleep(0.1)
@timeit to2 "2.2" sleep(0.1)
@timeit to2 "2.3" sleep(0.1)
merge!(to, to2, tree_point = ["2"])
end
@timeit to "3" Threads.@spawn begin
to3 = TimerOutput()
@sync begin
@timeit to3 "3.1" Threads.@spawn begin
to31 = TimerOutput()
@timeit to31 "3.1.1" sleep(0.1)
@timeit to31 "3.1.2" sleep(0.1)
@timeit to31 "3.1.3" sleep(0.1)
merge!(to3, to31, tree_point = ["3.1"])
end
@timeit to3 "3.2" Threads.@spawn begin
to32 = TimerOutput()
@timeit to32 "3.2.1" sleep(0.1)
@timeit to32 "3.2.2" sleep(0.1)
@timeit to32 "3.2.3" sleep(0.1)
merge!(to3, to32, tree_point = ["3.2"])
end
end
merge!(to, to3, tree_point = ["3"])
end
end
@test "1" in collect(keys(to.inner_timers))
@test ncalls(to.inner_timers["1"]) == 1
@test "2" in collect(keys(to.inner_timers))
@test ncalls(to.inner_timers["2"]) == 1
@test "3" in collect(keys(to.inner_timers))
@test ncalls(to.inner_timers["3"]) == 1
@test !in("1.1", collect(keys(to.inner_timers)))
@test !in("2.1", collect(keys(to.inner_timers)))
@test !in("3.1", collect(keys(to.inner_timers)))
@test !in("3.1.1", collect(keys(to.inner_timers)))
@test !in("3.2", collect(keys(to.inner_timers)))
@test !in("3.2.1", collect(keys(to.inner_timers)))
to1 = to.inner_timers["1"]
@test "1.1" in collect(keys(to1.inner_timers))
@test ncalls(to1.inner_timers["1.1"]) == 1
to2 = to.inner_timers["2"]
@test "2.1" in collect(keys(to2.inner_timers))
@test ncalls(to2.inner_timers["2.1"]) == 1
@test !in("3.1", collect(keys(to2.inner_timers)))
to3 = to.inner_timers["3"]
@test "3.1" in collect(keys(to3.inner_timers))
@test ncalls(to3.inner_timers["3.1"]) == 1
@test "3.2" in collect(keys(to3.inner_timers))
@test ncalls(to3.inner_timers["3.2"]) == 1
@test !in("2.1", collect(keys(to3.inner_timers)))
to31 = to3.inner_timers["3.1"]
@test "3.1.1" in collect(keys(to31.inner_timers))
@test ncalls(to31.inner_timers["3.1.1"]) == 1
@test !in("3.2.1", collect(keys(to31.inner_timers)))
to32 = to3.inner_timers["3.2"]
@test "3.2.1" in collect(keys(to32.inner_timers))
@test ncalls(to32.inner_timers["3.2.1"]) == 1
@test !in("3.1.1", collect(keys(to32.inner_timers)))
end
end
@testset "Serialization" begin
# Setup a timer
to = TimerOutput()
@timeit to "foo" identity(nothing)
@timeit to "foobar" begin
@timeit to "foo" identity(nothing)
@timeit to "baz" identity(nothing)
end
@timeit to "baz" identity(nothing)
function compare(to, d)
@test TimerOutputs.tottime(to) == d["total_time_ns"]
@test TimerOutputs.ncalls(to) == d["n_calls"]
@test TimerOutputs.totallocated(to) == d["total_allocated_bytes"]
@test TimerOutputs.allocated(to) == d["allocated_bytes"]
@test TimerOutputs.time(to) == d["time_ns"]
for ((k1, timer), (k2, obj)) in zip(to.inner_timers, d["inner_timers"])
@test k1 == k2
compare(timer, obj)
end
end
compare(to, todict(to))
end
@testset "InstrumentedFunctions" begin
to = TimerOutput()
f = to(x -> x^2, "f")
@test isempty(to.inner_timers)
f(1)
@test ncalls(to.inner_timers["f"]) == 1
h = to(x -> f(x) + 1, "h")
h(1)
@test ncalls(to.inner_timers["h"]) == 1
@test ncalls(to.inner_timers["h"].inner_timers["f"]) == 1
s = x -> x+1
t = to(s)
t(1)
ncalls(to.inner_timers[repr(s)]) == 1
end
| TimerOutputs | https://github.com/KristofferC/TimerOutputs.jl.git |
|
[
"MIT"
] | 0.5.24 | 5a13ae8a41237cff5ecf34f73eb1b8f42fff6531 | docs | 20663 | # TimerOutputs
[](https://travis-ci.com/KristofferC/TimerOutputs.jl) [](https://codecov.io/gh/KristofferC/TimerOutputs.jl)
`TimerOutputs` is a small Julia package that is used to generate formatted output from timings made in different sections of a program.
It's main functionality is the `@timeit` macro, similar to the `@time` macro in Base except one also assigns a label to the code section being timed.
Multiple calls to code sections with the same label (and in the same "scope") will accumulate the data for that label.
After the program has executed, it is possible to print a nicely formatted table presenting how much time, allocations and number of calls were made in each section.
The output can be customized as to only show the things you are interested in.
If you find this package useful please give it a star. I like stars and it also helps me know where my development time is best spent.
## Example output
An example of the output (used in a finite element simulation) is shown below
```
───────────────────────────────────────────────────────────────────────────────
Time Allocations
────────────────────── ───────────────────────
Tot / % measured: 6.89s / 97.8% 5.20GiB / 85.0%
Section ncalls time %tot avg alloc %tot avg
───────────────────────────────────────────────────────────────────────────────
assemble 6 3.27s 48.6% 545ms 3.65GiB 82.7% 624MiB
inner assemble 240k 1.92s 28.4% 7.98μs 3.14GiB 71.1% 13.7KiB
linear solve 5 2.73s 40.5% 546ms 108MiB 2.39% 21.6MiB
create sparse matrix 6 658ms 9.77% 110ms 662MiB 14.6% 110MiB
export 1 78.4ms 1.16% 78.4ms 13.1MiB 0.29% 13.1MiB
───────────────────────────────────────────────────────────────────────────────
```
The first line shows the total (wall) time passed and allocations made since the start of the timer as well as
the percentage of those totals spent inside timed sections.
The following lines shows data for all the timed sections.
The section label is shown first followed by the number of calls made to that section.
Finally, the total time elapsed or allocations made in that section are shown together with the
percentage of the total in that section and the average (time / allocations per call).
## Usage
The easiest way to show how the package work is with a few examples of timing sections.
```julia
using TimerOutputs
# Create a TimerOutput, this is the main type that keeps track of everything.
const to = TimerOutput()
# Time a section code with the label "sleep" to the `TimerOutput` named "to"
@timeit to "sleep" sleep(0.02)
# Create a function to later time
rands() = rand(10^7)
# Time the function, @timeit returns the value being evaluated, just like Base @time
rand_vals = @timeit to "randoms" rands();
# Nested sections (sections with same name are not accumulated
# if they have different parents)
function time_test()
@timeit to "nest 1" begin
sleep(0.1)
# 3 calls to the same label
@timeit to "level 2.1" sleep(0.03)
@timeit to "level 2.1" sleep(0.03)
@timeit to "level 2.1" sleep(0.03)
@timeit to "level 2.2" sleep(0.2)
end
@timeit to "nest 2" begin
@timeit to "level 2.1" sleep(0.3)
@timeit to "level 2.2" sleep(0.4)
end
end
time_test()
# exception safe
function i_will_throw()
@timeit to "throwing" begin
sleep(0.5)
throw(error("this is fine..."))
print("nope")
end
end
i_will_throw()
# Use disable_timer! to selectively turn off a timer, enable_timer! turns it on again
disable_timer!(to)
@timeit to "not recorded" sleep(0.1)
enable_timer!(to)
# Use @notimeit to disable timer and re-enable it afterwards (if it was enabled
# before)
@notimeit to time_test()
# Call to a previously used label accumulates data
for i in 1:100
@timeit to "sleep" sleep(0.01)
end
# Can also annotate function definitions
@timeit to funcdef(x) = x
funcdef(2)
# Or to instrument an existing function:
foo(x) = x + 1
timed_foo = to(foo)
timed_foo(5)
# Print the timings in the default way
show(to)
```
Printing `to` shows a formatted table showing the number of calls,
the total time spent in each section, and the percentage of the time
spent in each section since `to` was created as well as averages (per call).
Similar information is available for allocations:
```
────────────────────────────────────────────────────────────────────────
Time Allocations
─────────────────────── ────────────────────────
Tot / % measured: 7.99s / 39.1% 207MiB / 46.7%
Section ncalls time %tot avg alloc %tot avg
────────────────────────────────────────────────────────────────────────
sleep 101 1.36s 43.4% 13.4ms 23.3KiB 0.0% 236B
nest 2 1 711ms 22.8% 711ms 2.05KiB 0.0% 2.05KiB
level 2.2 1 405ms 13.0% 405ms 144B 0.0% 144B
level 2.1 1 306ms 9.8% 306ms 448B 0.0% 448B
throwing 1 517ms 16.6% 517ms 912B 0.0% 912B
nest 1 1 417ms 13.4% 417ms 2.17KiB 0.0% 2.17KiB
level 2.2 1 202ms 6.5% 202ms 144B 0.0% 144B
level 2.1 3 108ms 3.5% 36.0ms 432B 0.0% 144B
randoms 1 120ms 3.8% 120ms 96.5MiB 100.0% 96.5MiB
funcdef 1 94.4μs 0.0% 94.4μs 0.00B 0.0% 0.00B
foo 1 1.50μs 0.0% 1.50μs 0.00B 0.0% 0.00B
────────────────────────────────────────────────────────────────────────
```
## Settings for printing:
The `print_timer([io::IO = stdout], to::TimerOutput, kwargs)`, (or `show`) takes a number of keyword arguments to change the output. They are listed here:
* `title::String` ─ title for the timer
* `allocations::Bool` ─ show the allocation columns (default `true`)
* `sortby::Symbol` ─ sort the sections according to `:time` (default), `:ncalls`, `:allocations`, `:name` or `:firstexec`
* `linechars::Symbol` ─ use either `:unicode` (default) or `:ascii` to draw the horizontal lines in the table
* `compact::Bool` ─ hide the `avg` column (default `false`)
## Flattening
If sections are nested like in the example below:
```julia
to = TimerOutput()
@timeit to "nest 1" begin
sleep(0.1)
@timeit to "level 2.1" sleep(0.1)
for i in 1:20; @timeit to "level 2.2" sleep(0.02); end
end
@timeit to "nest 2" begin
for i in 1:30; @timeit to "level 2.1" sleep(0.01); end
@timeit to "level 2.2" sleep(0.1)
end
```
the table is displayed as:
```julia
julia> show(to, allocations = false, compact = true)
────────────────────────────────────
Section ncalls time %tot
────────────────────────────────────
nest 1 1 669ms 60.5%
level 2.2 20 423ms 38.3%
level 2.1 1 101ms 9.15%
nest 2 1 437ms 39.5%
level 2.1 30 335ms 30.3%
level 2.2 1 101ms 9.16%
────────────────────────────────────
```
It is possible to flatten this timer using the `TimerOutputs.flatten` function that accumulates the data for all sections with identical labels:
```julia
julia> to_flatten = TimerOutputs.flatten(to);
julia> show(to_flatten; compact = true, allocations = false)
──────────────────────────────────
Section ncalls time %tot
──────────────────────────────────
nest 1 1 669ms 60.5%
level 2.2 21 525ms 47.5%
nest 2 1 437ms 39.5%
level 2.1 31 436ms 39.5%
──────────────────────────────────
```
## Merging
Two or more timers can be merged using `merge` or `merge!`:
```julia
julia> to1 = TimerOutput(); to2 = TimerOutput();
julia> @timeit to1 "outer" begin
@timeit to1 "inner" begin
sleep(1)
end
end
julia> @timeit to2 "outer" begin
sleep(1)
end
julia> show(to1; compact=true, allocations=false)
────────────────────────────────
Section ncalls time %tot
────────────────────────────────
outer 1 1.00s 100%
inner 1 1.00s 100%
────────────────────────────────
julia> show(to2; compact=true, allocations=false)
────────────────────────────────
Section ncalls time %tot
────────────────────────────────
outer 1 1.00s 100%
────────────────────────────────
julia> show(merge(to1, to2); compact=true, allocations=false)
────────────────────────────────
Section ncalls time %tot
────────────────────────────────
outer 2 2.00s 100%
inner 1 1.00s 50.0%
────────────────────────────────
```
Merging can be used to facilitate timing coverage throughout simple multi-threaded setups.
For instance, use thread-local `TimerOutput` objects that are merged at custom merge points
via the `tree_point` keyword arg, which is a vector of label strings used to navigate to
the merge point in the timing tree. `merge!` is thread-safe via a lock.
```julia
julia> using TimerOutputs
julia> to = TimerOutput()
julia> @timeit to "1" begin
@timeit to "1.1" sleep(0.1)
@timeit to "1.2" sleep(0.1)
@timeit to "1.3" sleep(0.1)
end
julia> @timeit to "2" Threads.@spawn begin
to2 = TimerOutput()
@timeit to2 "2.1" sleep(0.1)
@timeit to2 "2.2" sleep(0.1)
@timeit to2 "2.3" sleep(0.1)
merge!(to, to2, tree_point = ["2"])
end
julia> to
──────────────────────────────────────────────────────────────────
Time Allocations
────────────────────── ───────────────────────
Tot / % measured: 3.23s / 9.79% 13.5MiB / 36.9%
Section ncalls time %tot avg alloc %tot avg
──────────────────────────────────────────────────────────────────
1 1 309ms 98.0% 309ms 4.55MiB 91.5% 4.55MiB
1.3 1 106ms 33.6% 106ms 320B 0.01% 320B
1.2 1 102ms 32.3% 102ms 320B 0.01% 320B
1.1 1 101ms 32.0% 101ms 4.54MiB 91.4% 4.54MiB
2 1 6.47ms 2.05% 6.47ms 435KiB 8.54% 435KiB
2.2 1 106ms 33.6% 106ms 480B 0.01% 480B
2.3 1 105ms 33.4% 105ms 144B 0.00% 144B
2.1 1 103ms 32.5% 103ms 5.03MiB 101% 5.03MiB
──────────────────────────────────────────────────────────────────
```
## Resetting
A timer is reset by calling `reset_timer!(to::TimerOutput)`. This will remove all sections and reset the start of the timer to the current time / allocation values.
## Indexing into a table
Any `TimerOutput` can be indexed with the name of a section which returns a new `TimerOutput` with that section as the "root". For example:
```julia
to = TimerOutput()
@timeit to "nest 1" begin
@timeit to "nest 2" begin
@timeit to "nest 3.1" sleep(0.1)
@timeit to "nest 3.2" sleep(0.1)
@timeit to "nest 3.3" sleep(0.1)
end
sleep(0.3)
end
```
```julia
julia> show(to; compact = true, allocations = false, linechars = :ascii)
-------------------------------------
Section ncalls time %tot
-------------------------------------
nest 1 1 605ms 100%
nest 2 1 304ms 50.2%
nest 3.2 1 101ms 16.7%
nest 3.1 1 101ms 16.7%
nest 3.3 1 101ms 16.7%
-------------------------------------
julia> to_2 = to["nest 1"]["nest 2"];
julia> show(to_2; compact = true, allocations = false, linechars = :ascii)
---------------------------------
Section ncalls time %tot
---------------------------------
nest 3.2 1 101ms 33.3%
nest 3.1 1 101ms 33.3%
nest 3.3 1 101ms 33.3%
---------------------------------
```
The percentages showed are now relative to that "root".
## Querying data
The (unexported) functions `ncalls`, `time`, `allocated` give the accumulated data for a section.
The returned time has units in nano seconds and allocations in bytes.
For example (using the `to` object from above):
```julia
julia> TimerOutputs.ncalls(to["nest 1"])
1
julia> TimerOutputs.time(to["nest 1"]["nest 2"])
350441733
julia> TimerOutputs.allocated(to["nest 1"]["nest 2"])
5280
```
Furthermore, you can request the total time spent in the "root" timer:
```julia
julia> TimerOutputs.tottime(to)
604937208
julia> TimerOutputs.totallocated(to)
7632
```
## Default Timer
It is often the case that it is enough to only use one timer. For convenience, there is therefore a version of
all the functions and macros that do not take a `TimerOutput` instance and then use a global timer defined in the package.
Note that this global timer is shared among all users of the package.
For example:
```julia
reset_timer!()
@timeit "section" sleep(0.02)
@timeit "section2" sleep(0.1)
print_timer()
```
which prints:
```julia
julia> print_timer()
───────────────────────────────────────────────────────────────────
Time Allocations
────────────────────── ───────────────────────
Tot / % measured: 276ms / 44.3% 422KiB / 0.21%
Section ncalls time %tot avg alloc %tot avg
───────────────────────────────────────────────────────────────────
section2 1 101ms 82.7% 101ms 464B 50.0% 464B
section 1 21.1ms 17.3% 21.1ms 464B 50.0% 464B
───────────────────────────────────────────────────────────────────
```
The default timer object can be retrieved with `TimerOutputs.get_defaulttimer()`.
## Measuring time consumed outside `@timeit` blocks
Often, operations that we do not consider time consuming turn out to be relevant.
However, adding additional timming blocks just to time initializations and other
less important calls is annoying.
The `TimerOutputs.complement!` function can be used to modify a timer and add
values for complement of timed sections. For instance:
```julia
to = TimerOutput()
@timeit to "section1" sleep(0.02)
@timeit to "section2" begin
@timeit to "section2.1" sleep(0.1)
sleep(0.01)
end
TimerOutputs.complement!(to)
```
We can print the result:
```julia
julia> print_timer(to)
───────────────────────────────────────────────────────────────────────
Time Allocations
────────────────────── ───────────────────────
Tot / % measured: 144ms / 100% 6.11KiB / 22.0%
Section ncalls time %tot avg alloc %tot avg
───────────────────────────────────────────────────────────────────────
section2 1 120ms 83.6% 120ms 1.17KiB 87.2% 1.17KiB
section2.1 1 106ms 73.9% 106ms 176B 12.8% 176B
~section2~ 1 13.9ms 9.69% 13.9ms 1.00KiB 74.4% 1.00KiB
section1 1 23.4ms 16.4% 23.4ms 176B 12.8% 176B
───────────────────────────────────────────────────────────────────────
```
In order to complement the default timer simply call `TimerOutputs.complement!()`.
## Shared Timers
It is sometimes desirable for a timer to be shared across all users of the
package. For this purpose, `get_timer` maintains a collection of named timers
defined in the package.
`get_timer(timer_name::String)` retrieves the timer `timer_name` from the
collection, creating a new timer if none already exists.
For example:
```julia
module UseTimer
using TimerOutputs: @timeit, get_timer
function foo()
to = get_timer("Shared")
@timeit get_timer("Shared") "foo" sleep(0.1)
end
end
@timeit get_timer("Shared") "section1" begin
UseTimer.foo()
sleep(0.01)
end
```
which prints:
```julia
julia> print_timer(get_timer("Shared"))
───────────────────────────────────────────────────────────────────
Time Allocations
────────────────────── ───────────────────────
Tot / % measured: 17.1s / 0.82% 44.0MiB / 2.12%
Section ncalls time %tot avg alloc %tot avg
───────────────────────────────────────────────────────────────────
section1 1 140ms 100% 140ms 956KiB 100% 956KiB
foo 1 102ms 72.7% 102ms 144B 0.01% 144B
───────────────────────────────────────────────────────────────────
```
Note that the result of `get_timer` should not be called from top-level in a
package that is getting precompiled since the retrieved timer will no longer be
shared with other users getting a timer with the same name. Also, this function
is not recommended to be used extensively by libraries as the namespace is
shared and collisions are possible if two libraries happen to use the same timer
name.
## Serialization
Timers may be converted to a nested set of dictionaries with the (unexported) `TimerOutputs.todict` function. This can be used to serialize a timer as JSON, for example.
```julia
julia> to = TimerOutput();
julia> @timeit to "nest 1" begin
sleep(0.1)
@timeit to "level 2.1" sleep(0.1)
for i in 1:20; @timeit to "level 2.2" sleep(0.02); end
end
julia> TimerOutputs.todict(to)
Dict{String, Any} with 6 entries:
"total_time_ns" => 726721166
"total_allocated_bytes" => 474662
"time_ns" => 0
"n_calls" => 0
"allocated_bytes" => 0
"inner_timers" => Dict{String, Any}("nest 1"=>Dict{String, Any}("total_time_ns"=>611383374, "total_allocated_bytes"=>11888, "time_ns"=>726721166, "n_calls"=>1, "allocated_bytes"=>474662, "inner_timers"=>Dict{String, Any}("level 2.1"=>Dict{String, Any}("total_time_ns"=>0, "total_allocated_bytes"=>0, "time_ns"=>115773750, "n_calls"=>1, "allocated_bytes"=>8064, "inner_timers"=>Dict{String, Any}()), "level 2.2"=>Dict{String, Any}("total_time_ns"=>0, "total_allocated_bytes"=>0, "time_ns"=>495609624, "n_calls"=>20, "allocated_bytes"=>3824, "inner_timers"=>Dict{String, Any}()))))
julia> using JSON3 # or JSON
julia> JSON3.write(TimerOutputs.todict(to))
"{\"total_time_ns\":712143250,\"total_allocated_bytes\":5680,\"time_ns\":0,\"n_calls\":0,\"allocated_bytes\":0,\"inner_timers\":{\"nest 1\":{\"total_time_ns\":605922416,\"total_allocated_bytes\":4000,\"time_ns\":712143250,\"n_calls\":1,\"allocated_bytes\":5680,\"inner_timers\":{\"level 2.1\":{\"total_time_ns\":0,\"total_allocated_bytes\":0,\"time_ns\":106111333,\"n_calls\":1,\"allocated_bytes\":176,\"inner_timers\":{}},\"level 2.2\":{\"total_time_ns\":0,\"total_allocated_bytes\":0,\"time_ns\":499811083,\"n_calls\":20,\"allocated_bytes\":3824,\"inner_timers\":{}}}}}}"
```
## Overhead
There is a small overhead in timing a section (0.25 μs) which means that this package is not suitable for measuring sections that finish very quickly.
For proper benchmarking you want to use a more suitable tool like [*BenchmarkTools*](https://github.com/JuliaCI/BenchmarkTools.jl).
It is sometimes desireable to be able "turn on and off" the `@timeit` macro, for instance you may wish to instrument a package with `@timeit` macros, but then not deal with the overhead of the timings during normal package operation.
To enable this, we provide the `@timeit_debug` macro, which wraps the `@timeit` macro with a conditional, checking if debug timings have been enabled.
Because you may wish to turn on only certain portions of your instrumented code base (or multiple codebases may have instrumented their code), debug timings are enabled on a module-by-module basis.
By default, debug timings are disabled, and this conditional should be optimized away, allowing for truly zero-overhead.
If a user calls `TimerOutputs.enable_debug_timings(<module>)`, the `<module>.timeit_debug_enabled()` method will be redefined, causing all dependent methods to be recompiled within that module.
This may take a while, and hence is intended only for debugging usage, however all calls to `@timeit_debug` (within that Module) will thereafter be enabled.
## Author
Kristoffer Carlsson - @KristofferC
## Acknowledgments
This package is inspired by the `TimerOutput` class in [deal.ii](https://dealii.org/).
| TimerOutputs | https://github.com/KristofferC/TimerOutputs.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 793 | using Documenter
using WannierIO
# Generate the HTML pages by Documenter.jl
makedocs(;
sitename="WannierIO.jl",
authors="Junfeng Qiao and contributors.",
clean=true,
modules=[WannierIO],
pages=[
"Home" => "index.md",
"API" => [
"Convention" => "api/convention.md",
"Utilities" => "api/util.md",
"Wannier90" => "api/w90.md",
"Volumetric data" => "api/volumetric.md",
"Quantum ESPRESSO" => "api/qe.md",
"EPW" => "api/epw.md",
"Index" => "api/index.md",
],
],
)
# Documenter will auto dectect build environment; on local machine it will be
# skipped, so it's safe to run this script
deploydocs(; repo="github.com/qiaojunfeng/WannierIO.jl.git", devbranch="main")
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 1035 | """
`WannierIO.jl`: a package for reading and writing Wannier90 file formats.
---
$(README)
---
Exported functions:
$(EXPORTS)
"""
module WannierIO
using Printf: @printf, @sprintf
using DocStringExtensions
include("common/const.jl")
include("common/type.jl")
include("util/fortran.jl")
include("util/lattice.jl")
include("util/header.jl")
include("util/toml.jl")
include("util/parser.jl")
include("util/compare.jl")
using FortranFiles: FortranFile, FString, trimstring, Record
include("w90/win.jl")
include("w90/wout.jl")
include("w90/nnkp.jl")
include("w90/amn.jl")
include("w90/mmn.jl")
include("w90/eig.jl")
include("w90/chk.jl")
include("w90/unk.jl")
include("w90/spn.jl")
include("w90/uHu.jl")
include("w90/band.jl")
include("w90/tb.jl")
include("w90/wsvec.jl")
include("w90/hr.jl")
include("w90/r.jl")
include("w90/hh_r.jl")
include("w90/u_mat.jl")
# volumetric files
include("volume/xsf.jl")
include("volume/cube.jl")
include("volume/bxsf.jl")
include("qe/band.jl")
include("qe/xml.jl")
include("misc/epw.jl")
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 314 | """
Bohr radius in Angstrom unit.
This is the default CODATA2006 value in W90 `src/constants.F90`.
"""
const Bohr::Float64 = 0.52917721092
"""
Bohr radius in Angstrom unit.
This is the default (Physical constants, SI (NIST 2018)) value in QE
`Modules/constants.f90`.
"""
const Bohr_QE::Float64 = 0.529177210903
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 2296 | using StaticArrays
"""
3 x 3 matrix type.
For lattice and recip_lattice.
"""
const Mat3{T} = SMatrix{3,3,T,9} where {T}
"""
Length-3 vector type.
For atom posistions, kpoints, etc.
"""
const Vec3{T} = SVector{3,T} where {T}
"""
`Vector{Vector}` -> `Mat3`
"""
Mat3(A::AbstractVector) = Mat3(reduce(hcat, A))
"""
`Mat3` -> `Vec3{Vec3}`
"""
Vec3(A::Mat3) = Vec3(eachcol(A))
"""
Pair type associating a `Symbol` with a `Vec3`.
For win file `atoms_frac` and `kpoint_path`.
"""
const SymbolVec3{T} = Pair{Symbol,Vec3{T}} where {T}
SymbolVec3(s, v) = SymbolVec3{eltype(v)}(s, v)
SymbolVec3(s::AbstractString, v) = SymbolVec3(Symbol(s), v)
SymbolVec3(p::Pair) = SymbolVec3(p.first, p.second)
SymbolVec3(d::Dict) = SymbolVec3(only(d))
abstract type FileFormat end
"""
Fortran formatted IO.
"""
struct FortranText <: FileFormat end
"""
Fortran unformatted IO.
"""
struct FortranBinary <: FileFormat end
"""
Fortran unformatted IO with stream access.
For example, file written using these Fortran code:
```fortran
OPEN(UNIT=11, FILE="ustream.demo", STATUS="NEW", ACCESS="STREAM", FORM="UNFORMATTED")
```
"""
struct FortranBinaryStream <: FileFormat end
"""
Plain text format for Wannier90 `win` and `nnkp` files.
The W90 default `win` or `nnkp` are plain text files but are not
simple arrays of numbers that can be read by `readdlm`, therefore this struct
is used to indicate that the file is plain text but need to be handled
by corresponding functions, e.g., [`read_win`](@ref), [`read_nnkp`](@ref), etc.
This somewhat overlaps with [`FortranText`](@ref), but this one is only
used for small input parameter files e.g. `win` and `nnkp` (in comparison with
the [`Wannier90Toml`](@ref) format), while the [`FortranText`](@ref) (in
comparison with the [`FortranBinary`](@ref) format) is used for large matrices
e.g. `amn`, `mmn`, `eig`, etc.
"""
struct Wannier90Text <: FileFormat end
"""
TOML file format for Wannier90 `win` and `nnkp` files.
Here we introduce a TOML format for `win` and `nnkp`, so that once the `win` or
`nnkp` files are converted into TOML, the TOML files can be loaded by standard
TOML parsers without the headache of writing custom parsers in other Julia packages.
See also [`write_win`](@ref), [`write_nnkp`](@ref), etc.
"""
struct Wannier90Toml <: FileFormat end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 8920 | """
$(SIGNATURES)
Read the EPW mmn file.
The EPW mmn format is different from that of W90. It does not contain the number
of kpoints/bvectors/bands, so they need to be provided as keyword arguments.
# Arguments
- `filename`: the mmn file name
# Keyword arguments
- `n_kpts`: number of kpoints
- `n_bvecs`: number of bvectors
- `n_bands`: number of bands
# Return
- `M`: length-`n_kpts` vector of length-`n_bvecs` vector of `n_bands * n_bands` matrices
"""
function read_epw_mmn(
filename::AbstractString; n_kpts::Integer, n_bvecs::Integer, n_bands::Integer
)
return open(filename) do io
M = [[zeros(ComplexF64, n_bands, n_bands) for _ in 1:n_bvecs] for _ in 1:n_kpts]
for ik in 1:n_kpts
for ib in 1:n_bvecs
for n in 1:n_bands
for m in 1:n_bands
line = readline(io)
line = replace(strip(line), "(" => "", ")" => "", "," => " ")
line = split(line)
M[ik][ib][m, n] =
parse(Float64, line[1]) + parse(Float64, line[2]) * im
end
end
end
end
@assert eof(io) "Did not reach the end of the file, maybe wrong n_kpts, n_bvecs, or n_bands?"
return M
end
end
"""
Struct for the EPW `.ukk` file.
Similar to the W90 `.chk` file.
$(TYPEDEF)
# Fields
$(FIELDS)
"""
struct Ukk{T<:Real}
"""index of the first band"""
ibndstart::Int
"""index of the last band"""
ibndend::Int
"""number of kpoints"""
n_kpts::Int
"""number of bands"""
n_bands::Int
"""number of wannier functions"""
n_wann::Int
"""gauge matrices, length-`n_kpts` vector, each element is a `n_bands * n_wann` matrix"""
U::Vector{Matrix{Complex{T}}}
"""flag for frozen bands, length-`n_kpts` vector, each element is a length-`n_bands` vector"""
frozen_bands::Vector{BitVector}
"""flag for excluded bands, length-`n_bands + n_excl_bands` vector, where
`n_excl_bands` is the number of excluded bands"""
excluded_bands::BitVector
"""centers of WFs, length-`n_wann` vector of `Vec3`.
Note that EPW uses Cartesian coordinates w.r.t the QE `alat`, so it is dimensionless."""
centers::Vector{Vec3{T}}
end
"""
$(SIGNATURES)
Compare two `Ukk` structs.
"""
function Base.isapprox(a::Ukk, b::Ukk)
return _isapprox(a, b)
end
"""
$(SIGNATURES)
Read the EPW `.ukk` file.
# Arguments
- `filename`: the output file name
# Return
- `ukk`: the [`Ukk`](@ref) struct
"""
function read_epw_ukk(filename::AbstractString)
# Need to 1st read the last part to get the number of WFs
centers = open(filename) do io
centers = Vec3{Float64}[]
# note Julia 1.8 is required for reverse(eachline(io))
for line in Iterators.reverse(eachline(io))
r = split(strip(line))
if length(r) == 3
push!(centers, Vec3(parse.(Float64, r)))
else
break
end
end
return reverse(centers)
end
n_wann = length(centers)
@assert n_wann > 0 "n_wann = $n_wann ≤ 0"
ibndstart, ibndend, Uflat, flags = open(filename) do io
ibndstart, ibndend = parse.(Int, split(readline(io)))
# the unitary matrices
# now we don't know n_kpts and n_bands yet, so we can only read the
# complex numbers into a flat vector
Uflat = ComplexF64[]
# both the frozen_bands and excluded_bands, still flat vector
flags = Bool[]
for line in eachline(io)
line = replace(strip(line), "(" => "", ")" => "", "," => " ")
line = split(line)
if length(line) == 2
push!(Uflat, parse(Float64, line[1]) + parse(Float64, line[2]) * im)
elseif length(line) == 1
push!(flags, parse_bool(line[1]))
break
else
error("Wrong number of elements in line: $line")
end
end
for line in eachline(io)
line = strip(line)
if length(split(line)) == 1
push!(flags, parse_bool(line))
else
break
end
end
return ibndstart, ibndend, Uflat, flags
end
n_kpts_bands_wann = length(Uflat)
n_kpts_bands = n_kpts_bands_wann ÷ n_wann
# the kept bands are false in the last part of flags
excluded_bands = BitVector(flags[(n_kpts_bands + 1):end])
n_bands = count(!, excluded_bands)
n_kpts = n_kpts_bands ÷ n_bands
@assert n_kpts > 0 "n_kpts = $n_kpts ≤ 0"
@assert n_bands > 0 "n_bands = $n_bands ≤ 0"
@info "Reading ukk file" filename n_kpts n_bands n_wann
U = [zeros(ComplexF64, n_bands, n_wann) for _ in 1:n_kpts]
counter = 1
for ik in 1:n_kpts
for ib in 1:n_bands
for iw in 1:n_wann
U[ik][ib, iw] = Uflat[counter]
counter += 1
end
end
end
frozen_bands = [trues(n_bands) for _ in 1:n_kpts]
counter = 1
for ik in 1:n_kpts
for ib in 1:n_bands
frozen_bands[ik][ib] = flags[counter]
end
end
return Ukk(
ibndstart,
ibndend,
n_kpts,
n_bands,
n_wann,
U,
frozen_bands,
excluded_bands,
centers,
)
end
"""
$(SIGNATURES)
Write the EPW `.ukk` file.
# Arguments
- `filename`: the output file name
- `ukk`: the [`Ukk`](@ref) struct
# Examples
See [`Ukk(chk::Chk, alat::Real)`](@ref) for how to construct a `Ukk` from a [`Chk`](@ref).
"""
function write_epw_ukk(filename::AbstractString, ukk::Ukk)
open(filename, "w") do io
@printf(io, "%d %d\n", ukk.ibndstart, ukk.ibndend)
# the unitary matrices
for ik in 1:(ukk.n_kpts)
for ib in 1:(ukk.n_bands)
for iw in 1:(ukk.n_wann)
u = ukk.U[ik][ib, iw]
@printf(io, "(%25.18E,%25.18E)\n", real(u), imag(u))
end
end
end
# needs also lwindow when disentanglement is used
for ik in 1:(ukk.n_kpts)
for ib in 1:(ukk.n_bands)
if ukk.frozen_bands[ik][ib]
@printf(io, "T\n")
else
@printf(io, "F\n")
end
end
end
for ex in ukk.excluded_bands
if ex
@printf(io, "T\n")
else
@printf(io, "F\n")
end
end
# now write the Wannier centers to files
for iw in 1:(ukk.n_wann)
# meed more precision other WS are not determined properly.
@printf(io, "%22.12E %22.12E %22.12E\n", ukk.centers[iw]...)
end
end
@printf("Written to %s\n", filename)
end
"""
$(SIGNATURES)
Construct a EPW [`Ukk`](@ref) from a W90 [`Chk`](@ref).
# Arguments
- `chk`: the [`Chk`](@ref) struct
- `alat`: the QE `alat` in Å unit. Note that the `alat` from QE stdout file is
in Bohr unit, you need to do the conversion by multiplying it with
[`Bohr_QE`](@ref).
# Examples
Convert a W90 `.chk` file to a EPW `.ukk` file:
```julia
using WannierIO
chk = read_chk("BN.chk")
# Note we need QE `alat` for ukk. You can get it
# - either by inspecting the QE stdout file, from line like
# lattice parameter (alat) = 6.8330 a.u.
# where the 6.8330 is the alat in Bohr unit. However, the Bohr constant
# in W90 and QE are slightly different, to be exact we need to do the unit
# conversion using QE constant:
alat = 6.8330 * WannierIO.Bohr_QE
# - or better by parsing the QE xml file, and the unit conversion is done automatically
alat = read_qe_xml("BN.xml").alat
ukk = Ukk(chk, alat)
WannierIO.write_epw_ukk("BN.ukk", ukk)
```
"""
function Ukk(chk::Chk, alat::Real)
n_bands = chk.n_bands
exclude_band_indices = chk.exclude_bands
n_kpts = chk.n_kpts
n_wann = chk.n_wann
frozen_bands = [trues(n_bands) for _ in 1:n_kpts]
n_excl_bands = length(exclude_band_indices)
n_bands_tot = n_bands + n_excl_bands
included = trues(n_bands_tot)
included[exclude_band_indices] .= false
excluded_bands = .!included
if n_excl_bands > 0
ibndstart = findfirst(included)
ibndend = n_bands_tot - findfirst(reverse(included)) + 1
else
ibndstart = 1
ibndend = n_bands_tot
end
# the centers in ukk file is dimensionless: Cartesian coordinates w.r.t alat
# the centers in chk file is Cartesian coordinates in Å
# the input arg `alat` should be in Å unit
centers = chk.r / alat
Uchk = get_U(chk)
return Ukk(
ibndstart,
ibndend,
n_kpts,
n_bands,
n_wann,
Uchk,
frozen_bands,
excluded_bands,
centers,
)
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 3249 | using LinearAlgebra
"""
$(SIGNATURES)
Read Quantum ESPRESSO `bands.x` output data file.
The data file has format
```
&plot nbnd= 20, nks= 380 /
-0.500000 0.500000 0.500000
-3.320 -0.666 5.173 5.173 7.994 9.725 9.725 14.147 16.993 16.993
17.841 17.841 17.902 19.666 25.961 26.563 28.186 28.186 28.368 28.368
-0.495000 0.495000 0.495000
-3.322 -0.664 5.173 5.173 7.994 9.725 9.725 14.148 16.980 16.980
...
```
"""
function read_qe_band(filename::AbstractString)
res = open(filename) do io
line = readline(io)
regex = r"&plot nbnd=\s*(\d+), nks=\s*(\d+) /"
m = match(regex, line)
if m !== nothing
n_bands, n_kpts = parse.(Int, m.captures)
else
# this is my customized version, with `alat` added to header,
# so we can return kpoints in Å⁻¹ unit instead of arbitrary
regex = r"&plot nbnd=\s*(\d+), nks=\s*(\d+) alat=\s*([+-]?([0-9]*[.])?[0-9]+) /"
m = match(regex, line)
n_bands, n_kpts = parse.(Int, m.captures[1:2])
alat = parse.(Float64, m.captures[3])
end
kpoints = Vec3{Float64}[]
eigenvalues = Vector{Float64}[]
for _ in 1:n_kpts
# QE kpt are in cartesian coordinates, but scaled by `alat`
kpt = parse.(Float64, split(readline(io)))
push!(kpoints, kpt)
ib = 1
eig = zeros(Float64, n_bands)
while ib <= n_bands
e = parse.(Float64, split(readline(io)))
n_e = length(e)
eig[ib:(ib + n_e - 1)] = e
ib += n_e
end
@assert ib == (n_bands + 1)
push!(eigenvalues, eig)
end
@assert eof(io)
return (; kpoints, eigenvalues)
end
return res
end
"""
$(SIGNATURES)
Guess high symmetry points from kpoint coordinates.
If there is angle between two consecutive kpoints, then
it is labeled as a high-symmetry point.
# Arguments
- `kpoints`: Vector of `Vector` or `Vec3`, in Cartesian coordinates
# Keyword Arguments
- `atol`: Absolute tolerance for checking cross product of two vectors
# Returns
- `symm_point_indices`: Vector of indices of high-symmetry points
- `symm_point_labels`: Vector of labels of high-symmetry points, for the moment
it is empty
"""
function guess_kpath(kpoints::AbstractVector{<:AbstractVector}; atol=2e-6)
# of course index starts from 1
symm_point_indices = Vector{Int}()
symm_point_labels = Vector{String}()
n_kpts = length(kpoints)
if n_kpts == 0
return (; symm_point_indices, symm_point_labels)
end
# push the first kpt
push!(symm_point_indices, 1)
push!(symm_point_labels, "")
for ik in 2:(n_kpts - 1)
u = kpoints[ik] - kpoints[ik - 1]
v = kpoints[ik + 1] - kpoints[ik]
if !all(isapprox.(cross(u, v), 0; atol))
push!(symm_point_indices, ik)
push!(symm_point_labels, "")
end
end
# push the last kpt
push!(symm_point_indices, n_kpts)
push!(symm_point_labels, "")
return (; symm_point_indices, symm_point_labels)
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 3532 | # using GarishPrint: GarishPrint
# using Configurations: Configurations
# Base.@kwdef mutable struct AtomicWavefunction
# # to differentiate same kind of atoms
# atom_index::Int
# # e.g. "Si", use "Si1", "Si2" to differentiate same kind but different types (spin up, down)
# atom_label::String
# # orbital label, e.g. "3S"
# wfc_label::String
# # quantum numbers
# n::Int
# l::Int
# m::Int
# end
# Base.@kwdef mutable struct Projectabilities
# n_kpts::Int
# n_bands::Int
# # number of atomic wavefunctions
# num_wfcs::Int
# # atomic wavefunction types, size: num_wfcs
# wfcs_type::Vector{AtomicWavefunction}
# # projectability data, size: n_kpts * n_bands * num_wfcs
# proj::Array{Float64,3}
# end
# function read_qe_projwfcup(filename::String)
# fdat = open(filename)
# splitline() = split(strip(readline(fdat)))
# # header
# title = strip(readline(fdat), '\n')
# nr1x, nr2x, nr3x, nr1, nr2, nr3, nat, ntyp = parse.(Int, splitline())
# line = splitline()
# ibrav = parse(Int, line[1])
# celldm = parse.(Float64, line[2:end])
# line = splitline()
# # some version of projwfc.x output the unit_cell
# if length(line) == 3
# readline(fdat)
# readline(fdat)
# line = splitline()
# end
# gcutm, dual, ecutwfc = parse.(Float64, line[1:(end - 1)])
# magicnum = parse(Int, line[end])
# @assert magicnum == 9
# atm = Vector{String}(undef, ntyp)
# zv = zeros(Float64, ntyp)
# for i in 1:ntyp
# line = splitline()
# nt = parse(Int, line[1])
# @assert nt == i
# atm[i] = line[2]
# zv[i] = parse(Float64, line[3])
# end
# tau = zeros(Float64, 3, nat)
# ityp = zeros(Int, nat)
# for i in 1:nat
# line = splitline()
# na = parse(Int, line[1])
# @assert na == i
# tau[:, i] = parse.(Float64, line[2:4])
# ityp[i] = parse(Int, line[5])
# end
# natomwfc, nkstot, nbnd = parse.(Int, splitline())
# parsebool(s::Union{String,SubString}) = lowercase(s) == "t" ? true : false
# noncolin, lspinorb = parsebool.(splitline())
# @assert !noncolin && !lspinorb
# # projection data
# nlmchi = Vector{Dict}()
# proj = zeros(Float64, nkstot, nbnd, natomwfc)
# for iw in 1:natomwfc
# line = splitline()
# nwfc = parse(Int, line[1])
# @assert nwfc == iw
# na = parse(Int, line[2])
# atm_name = line[3]
# @assert atm_name == atm[ityp[na]]
# els = line[4]
# n, l, m = parse.(Int, line[5:end])
# push!(nlmchi, Dict("na" => na, "els" => els, "n" => n, "l" => l, "m" => m))
# for ik in 1:nkstot
# for ib in 1:nbnd
# line = splitline()
# k, b = parse.(Int, line[1:2])
# @assert k == ik && b == ib
# p = parse(Float64, line[3])
# proj[ik, ib, iw] = p
# end
# end
# end
# wfcs_type = Vector{AtomicWavefunction}(undef, natomwfc)
# for i in 1:natomwfc
# atom_index = nlmchi[i]["na"]
# atom_label = atm[ityp[atom_index]]
# wfc_label = nlmchi[i]["els"]
# n = nlmchi[i]["n"]
# l = nlmchi[i]["l"]
# m = nlmchi[i]["m"]
# wfc = AtomicWavefunction(atom_index, atom_label, wfc_label, n, l, m)
# wfcs_type[i] = wfc
# end
# return Projectabilities(nkstot, nbnd, natomwfc, wfcs_type, proj)
# end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 4780 | using EzXML
"""
$(SIGNATURES)
Read atomic structure and band structure from QE's XML output.
# Return
- `lattice`: `3 * 3`, Å, each column is a lattice vector
- `atom_positions`: length-`n_atoms` vector, each element is a fractional position
- `atom_labels`: length-`n_atoms` vector, each element is the label of the corresponding atom
- `recip_lattice`: `3 * 3`, Å⁻¹, each column is a reciprocal lattice vector
- `kpoints`: length-`n_kpts` vector, each element is a fractional kpoint
- `fermi_energy`: eV
- `alat`: the `alat` of QE in Å
- `eigenvalues`: length-`n_kpts` vector, each element is a length-`n_bands` vector of
eigenvalue in eV. For spin-polarized but without SOC calculations,
return two arries of `eigenvalues_up` and `eigenvalues_dn` for the two spin channels.
"""
function read_qe_xml(filename::AbstractString)
# from qe/Modules/constants.f90
BOHR_RADIUS_ANGS = Bohr_QE # Angstrom
HARTREE_SI = 4.3597447222071e-18 # J
ELECTRONVOLT_SI = 1.602176634e-19 # J
AUTOEV = HARTREE_SI / ELECTRONVOLT_SI
doc = readxml(filename)
output = findfirst("/qes:espresso/output", root(doc))
# atoms
atomic_structure = findfirst("atomic_structure", output)
alat = parse(Float64, atomic_structure["alat"])
# from bohr to angstrom
alat *= BOHR_RADIUS_ANGS
n_atoms = parse(Int, atomic_structure["nat"])
# structure info, each column is a vector for position or lattice vector
atom_positions = Vec3{Float64}[]
atom_labels = Vector{String}(undef, n_atoms)
lattice = zeros(3, 3)
for (i, atom) in enumerate(eachelement(findfirst("atomic_positions", atomic_structure)))
pos = parse.(Float64, split(atom.content))
push!(atom_positions, pos)
atom_labels[i] = atom["name"]
end
# lattice
for i in 1:3
a = findfirst("cell/a$i", atomic_structure)
lattice[:, i] = parse.(Float64, split(a.content))
end
# from cartesian to fractional
inv_lattice = inv(lattice)
atom_positions = map(atom_positions) do pos
Vec3(inv_lattice * pos)
end
# from bohr to angstrom
lattice *= BOHR_RADIUS_ANGS
# reciprocal lattice
recip_lattice = zeros(3, 3)
for i in 1:3
b = findfirst("basis_set/reciprocal_lattice/b$i", output)
recip_lattice[:, i] = parse.(Float64, split(b.content))
end
# to 1/angstrom
recip_lattice *= 2π / alat
band_structure = findfirst("band_structure", output)
n_kpts = parse(Int, findfirst("nks", band_structure).content)
lsda = parse(Bool, findfirst("lsda", band_structure).content)
# noncolin = parse(Bool, findfirst("noncolin", band_structure).content)
spinorbit = parse(Bool, findfirst("spinorbit", band_structure).content)
# check spin-polarized case
if lsda && !spinorbit
nbnd_up = parse(Int, findfirst("nbnd_up", band_structure).content)
nbnd_dn = parse(Int, findfirst("nbnd_dw", band_structure).content)
# they should be the same in QE
@assert nbnd_up == nbnd_dn
n_bands = nbnd_up
eigenvalues_up = Vector{Float64}[]
eigenvalues_dn = Vector{Float64}[]
else
n_bands = parse(Int, findfirst("nbnd", band_structure).content)
eigenvalues = Vector{Float64}[]
end
kpoints = Vec3{Float64}[]
n_electrons = parse(Float64, findfirst("nelec", band_structure).content)
fermi_energy = parse(Float64, findfirst("fermi_energy", band_structure).content)
# Hartree to eV
fermi_energy *= AUTOEV
inv_recip = inv(recip_lattice)
ks_energies = findall("ks_energies", band_structure)
for ks_energy in ks_energies
k_point = findfirst("k_point", ks_energy)
kpt = parse.(Float64, split(k_point.content))
# to 1/angstrom
kpt *= 2π / alat
# from cartesian to fractional
kpt = inv_recip * kpt
push!(kpoints, kpt)
qe_eigenvalues = findfirst("eigenvalues", ks_energy)
if lsda && !spinorbit
e = parse.(Float64, split(qe_eigenvalues.content))
# Hartree to eV
e .*= AUTOEV
push!(eigenvalues_up, e[1:n_bands])
push!(eigenvalues_dn, e[(n_bands + 1):end])
else
e = parse.(Float64, split(qe_eigenvalues.content))
# Hartree to eV
e .*= AUTOEV
push!(eigenvalues, e)
end
end
lattice = Mat3(lattice)
recip_lattice = Mat3(recip_lattice)
results = (;
lattice,
atom_positions,
atom_labels,
recip_lattice,
kpoints,
n_electrons,
fermi_energy,
alat,
)
if lsda && !spinorbit
return (; results..., eigenvalues_up, eigenvalues_dn)
end
return (; results..., eigenvalues)
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 534 | """
$(SIGNATURES)
Compare two (same-type) structs.
"""
function _isapprox(a::T, b::T) where {T}
for f in propertynames(a)
va = getfield(a, f)
vb = getfield(b, f)
if va isa String
va == vb || return false
elseif va isa Vector
if eltype(va) isa BitVector
all(va .== vb) || return false
else
all(va .≈ vb) || return false
end
else
va ≈ vb || return false
end
end
return true
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 725 |
"""
$(SIGNATURES)
Check if a sequence of chars is binary.
"""
function isbinary(chars::AbstractVector{UInt8})::Bool
# normal ASCII chars
text_chars = Vector{UInt8}([7, 8, 9, 10, 12, 13, 27])
append!(text_chars, 0x20:0x99)
# null character
push!(text_chars, 0x00)
deleteat!(text_chars, text_chars .== 0x7F)
# remove normal ASCII
filter!(x -> x ∉ text_chars, chars)
# display([Char(_) for _ in chars])
return length(chars) > 0
end
"""
$(SIGNATURES)
Check if the file is in binary format.
"""
function isbinary(filename::AbstractString)
raw_data = zeros(UInt8, 1024)
io = open(filename)
readbytes!(io, raw_data)
close(io)
return isbinary(raw_data)
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 143 | using Dates: now
"""
Default header for writing wannier90 files.
"""
default_header() = @sprintf "# Created by WannierIO.jl %s" string(now())
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 253 | """
$(SIGNATURES)
Compute reciprocal lattice from lattice.
"""
get_recip_lattice(lattice::Mat3) = 2π * inv(lattice)'
"""
$(SIGNATURES)
Compute lattice from reciprocal lattice.
"""
get_lattice(recip_lattice::Mat3) = inv(recip_lattice / (2π))'
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 2548 |
"""
$(SIGNATURES)
Parse a string as `Float64`.
The is capable of parsing Fortran outputs, e.g. `1.0D-10`, to the ordinary `1e-10`.
"""
function parse_float(s::AbstractString)
if occursin("*", s)
return NaN
else
return parse(Float64, replace(lowercase(strip(s)), "d" => "e"))
end
end
"""
$(SIGNATURES)
Parse a string as `bool`.
This is capable of parsing Fortran outputs, e.g., `.true.`, `.false.`, `true`, `T`.
"""
function parse_bool(s::AbstractString)
s = replace(lowercase(strip(s)), "." => "")[1] # only 1st char
return s == 't' || s == '1'
end
"""
$(SIGNATURES)
Parse an integer as `bool`.
- `0`: `false`
- `1` or `-1`: `true`
"""
function parse_bool(i::Integer)
return i != 0
end
"""
$(SIGNATURES)
Parse a vector of `n_elements` elements of type `T` from `io`.
# Arguments
- `io`: input stream
- `T`: type of elements
- `n_elements::Int`: total number of elements
# Examples
Suppose a file `demo.txt` has the following content:
```
1 2 3 4 5 6 7 8 9 10
11 12 13 14 15 16 17 18 19 20
21 22 23
```
Then the following code parses the file and return a vector filled with 1 to 23:
```julia-repl
julia> vector = open("demo.txt") do io
parse_vector(io, Int, 23)
end
```
The number of elements in each line can be different.
"""
function parse_vector(io::IO, T::Type, n_elements::Integer)
vec = zeros(T, n_elements)
counter = 0
while counter < n_elements
@assert !eof(io) "unexpected end of file"
line = strip(readline(io))
splitted = split(line)
n_splitted = length(splitted)
vec[(counter + 1):(counter + n_splitted)] = parse.(T, splitted)
counter += n_splitted
end
return vec
end
"""
$(SIGNATURES)
Parse a string of comma-separated indices or range into a vector of integers.
E.g., the `exclude_bands` tag of `win` file.
# Examples
```julia-repl
julia> parse_indices("1-2, 5,8 -10")
6-element Vector{Int64}:
1
2
5
8
9
10
```
"""
function parse_indices(str::AbstractString)
segments = split(replace(strip(str), r"\s+" => ""), ",")
indices = Vector{Int}()
for s in segments
if isempty(s)
continue
elseif occursin(r"^\d+-\d+$", s)
start, stop = parse.(Int, split(s, "-"))
push!(indices, start:stop...)
elseif occursin(r"^\d+$", s)
push!(indices, parse(Int, s))
else
throw(ArgumentError("invalid index: $s while parsing $str"))
end
end
return indices
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 351 | """
$(SIGNATURES)
Write `kwargs` into `io` as a TOML file.
Do some type conversion before writing.
"""
function write_toml(io; kwargs...)
TOML.print(io, kwargs) do x
x isa Pair && return Dict(x)
x isa Symbol && return String(x)
x isa Mat3 && return eachcol(x)
error("unhandled type $(typeof(x))")
end
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 5305 | # BXSF format
# Specification from http://www.xcrysden.org/doc/XSF.html#__toc__14
export read_bxsf, write_bxsf
"""
$(SIGNATURES)
Read `bxsf` file.
# Return
- `fermi_energy`: Fermi energy in eV
- `origin`: `3`, Å⁻¹, origin of the grid
- `span_vectors`: `3 * 3`, Å⁻¹, each column is a spanning vector
- `X`: `nx`, fractional coordinate of grid points along the first spanning vector
- `Y`: `ny`, fractional coordinate of grid points along the second spanning vector
- `Z`: `nz`, fractional coordinate of grid points along the third spanning vector
- `E`: `n_bands * nx * ny * nz`, eigenvalues at each grid point
"""
function read_bxsf(filename::AbstractString)
io = open(filename)
fermi_energy = nothing
origin = nothing
span_vectors = nothing
X = Y = Z = nothing
E = nothing
while !eof(io)
line = strip(readline(io))
if isempty(line) || startswith(line, '#')
continue
end
if occursin("BEGIN_INFO", line)
# skip comments
line = strip(readline(io))
while isempty(line) || startswith(line, '#')
line = strip(readline(io))
end
@assert startswith(line, "Fermi Energy:")
fermi_energy = parse(Float64, split(line, ':')[2])
line = strip(readline(io))
@assert line == "END_INFO"
elseif occursin("BEGIN_BLOCK_BANDGRID_3D", line)
comment = strip(readline(io))
line = strip(readline(io))
# There should be only one data grid
@assert startswith(line, "BEGIN_BANDGRID_3D")
# identifier = chopprefix(line, "BEGIN_BANDGRID_3D_")
n_bands = parse(Int, strip(readline(io)))
n_x, n_y, n_z = parse.(Int, split(strip(readline(io))))
origin = parse.(Float64, split(strip(readline(io))))
# spanning vectors
span_vectors = zeros(Float64, 3, 3)
for i in 1:3
line = strip(readline(io))
span_vectors[:, i] = parse.(Float64, split(line))
end
E = zeros(Float64, n_bands, n_x, n_y, n_z)
# temp storage for each band, but in row-major
Eib = similar(E, n_z, n_y, n_x)
for ib in 1:n_bands
line = strip(readline(io))
@assert split(line) == ["BAND:", string(ib)]
idx = 1
while idx <= n_x * n_y * n_z
line = split(strip(readline(io)))
ncol = length(line)
Eib[idx:(idx + ncol - 1)] = parse.(Float64, line)
idx += ncol
end
@assert idx == n_x * n_y * n_z + 1
# to column-major
E[ib, :, :, :] = permutedims(Eib, [3, 2, 1])
end
@assert occursin("END_BANDGRID_3D", strip(readline(io)))
@assert strip(readline(io)) == "END_BLOCK_BANDGRID_3D"
end
end
if !isnothing(E)
_, n_x, n_y, n_z = size(E)
# the kpoint grid is a general grid, i.e., it includes the last kpoint
# which is periodic to the first kpoint
X = range(0, 1, n_x)
Y = range(0, 1, n_y)
Z = range(0, 1, n_z)
end
return (; fermi_energy, origin, span_vectors, X, Y, Z, E)
end
"""
$(SIGNATURES)
Write `bxsf` file.
# Arguments
- `fermi_energy`: Fermi energy in eV
- `origin`: `3`, Å⁻¹, origin of the grid
- `span_vectors`: `3 * 3`, Å⁻¹, each column is a spanning vector
- `E`: `n_bands * nx * ny * nz`, eigenvalues at each grid point
"""
function write_bxsf(
filename::AbstractString,
fermi_energy::T,
origin::AbstractVector{T},
span_vectors::AbstractMatrix{T},
E::AbstractArray{T,4},
) where {T<:Real}
size(origin) == (3,) || error("origin should be a 3-element vector")
size(span_vectors) == (3, 3) || error("span_vectors should be a 3×3 matrix")
@info "Writing bxsf file: " filename
io = open(filename, "w")
# header
@printf(io, "BEGIN_INFO\n")
@printf(io, " %s\n", default_header())
@printf(io, " Fermi Energy: %21.16f\n", fermi_energy)
@printf(io, "END_INFO\n\n")
@printf(io, "BEGIN_BLOCK_BANDGRID_3D\n")
@printf(io, "from_WannierIO.jl_code\n")
@printf(io, "BEGIN_BANDGRID_3D_fermi\n")
n_bands, n_x, n_y, n_z = size(E)
@printf(io, "%d\n", n_bands)
@printf(io, "%d %d %d\n", n_x, n_y, n_z)
@printf(io, "%12.7f %12.7f %12.7f\n", origin...)
@printf(io, "%12.7f %12.7f %12.7f\n", span_vectors[:, 1]...)
@printf(io, "%12.7f %12.7f %12.7f\n", span_vectors[:, 2]...)
@printf(io, "%12.7f %12.7f %12.7f\n", span_vectors[:, 3]...)
for ib in 1:n_bands
@printf(io, "BAND: %d\n", ib)
# row-major
ncol = 0
for i in 1:n_x
for j in 1:n_y
for k in 1:n_z
@printf(io, " %16.8e", E[ib, i, j, k])
ncol += 1
if ncol == 6
@printf(io, "\n")
ncol = 0
end
end
end
end
ncol != 0 && @printf(io, "\n")
end
@printf(io, "END_BANDGRID_3D\n")
@printf(io, "END_BLOCK_BANDGRID_3D\n")
close(io)
return nothing
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 4083 | # Cube format
# Specification from http://paulbourke.net/dataformats/cube/
export read_cube, write_cube
"""
$(SIGNATURES)
Read `cube` file.
!!! note
By default, `cube` use Bohr unit, here all returns are in Cartesian coordinates, Å unit.
"""
function read_cube(filename::AbstractString)
@info "Reading cube file: " filename
io = open(filename)
# header
header = readline(io; keep=true)
header *= readline(io; keep=true)
print(header)
line = split(strip(readline(io)))
n_atoms = parse(Int, line[1])
origin = parse.(Float64, line[2:4])
# to Å unit
origin .*= Bohr
# number of voxels in domain
n_voxels = zeros(Int, 3)
voxel_vectors = zeros(Float64, 3, 3)
for i in 1:3
line = split(strip(readline(io)))
n_v = parse(Int, line[1])
n_voxels[i] = n_v
# bohr unit
voxel_vectors[:, i] = parse.(Float64, line[2:4])
end
# to Å unit
voxel_vectors .*= Bohr
atom_positions = zeros(Float64, 3, n_atoms)
atom_numbers = zeros(Int, n_atoms)
for i in 1:n_atoms
line = split(strip(readline(io)))
atom_numbers[i] = parse(Int, line[1])
charge = parse(Float64, line[2])
# cartesian coordinates, Bohr unit
atom_positions[:, i] = parse.(Float64, line[3:5])
end
# to Å unit
atom_positions .*= Bohr
n_x, n_y, n_z = n_voxels
# fractional w.r.t. voxel_vectors
X = range(0, n_x-1, n_x)
Y = range(0, n_y-1, n_y)
Z = range(0, n_z-1, n_z)
W = zeros(Float64, n_x, n_y, n_z)
# 6 columns per line
d, r = divrem(n_z, 6)
if r > 0
nline = d + 1
else
nline = d
end
for ix in 1:n_x
for iy in 1:n_y
iz = 1
for _ in 1:nline
line = split(strip(readline(io)))
n = length(line)
# skip empty line
if n == 0
line = split(strip(readline(io)))
n = length(line)
end
W[ix, iy, iz:(iz + n - 1)] = parse.(Float64, line)
iz += n
end
end
end
close(io)
return (; atom_positions, atom_numbers, origin, voxel_vectors, X, Y, Z, W)
end
"""
$(SIGNATURES)
Write `cube` file.
# Arguments
- `atom_positions`: `3 * n_atoms`, Å, cartesian coordinates
- `atom_numbers`: `n_atoms`, atomic numbers
- `origin`: `3`, Å, origin of the grid
- `voxel_vectors`: `3 * 3`, Å, each column is a voxel vector
- `W`: `nx * ny * nz`, volumetric data
"""
function write_cube(
filename::AbstractString,
atom_positions::AbstractMatrix{T},
atom_numbers::AbstractVector{Int},
origin::AbstractVector{T},
voxel_vectors::AbstractMatrix{T},
W::AbstractArray{T,3},
) where {T<:Real}
n_atoms = length(atom_numbers)
size(atom_positions, 2) == n_atoms || error("incompatible n_atoms")
size(voxel_vectors) == (3, 3) || error("incompatible voxel_vectors")
length(origin) == 3 || error("origin must be 3-vector")
@info "Writing cube file: " filename
io = open(filename, "w")
# header
@printf(io, "%s\n", default_header())
@printf(io, "outer loop: x, middle loop: y, inner loop: z\n")
# to Bohr
origin_bohr = origin ./ Bohr
@printf(io, "%d %12.6f %12.6f %12.6f\n", n_atoms, origin_bohr...)
n_xyz = size(W)
for i in 1:3
# number of voxels
n_v = n_xyz[i]
ax = voxel_vectors[:, i] ./ Bohr
@printf(io, "%d %12.6f %12.6f %12.6f\n", n_v, ax...)
end
for i in 1:n_atoms
n = atom_numbers[i]
charge = 1.0
pos = atom_positions[:, i] ./ Bohr
@printf(io, "%d %12.6f %12.6f %12.6f %12.6f\n", n, charge, pos...)
end
for ix in 1:n_xyz[1]
for iy in 1:n_xyz[2]
for iz in 1:n_xyz[3]
@printf(io, "%12.6g ", W[ix, iy, iz])
if (iz % 6 == 0)
@printf(io, "\n")
end
end
@printf(io, "\n")
end
end
close(io)
return nothing
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 6547 | # XSF format
# Specification from http://www.xcrysden.org/doc/XSF.html
export read_xsf, write_xsf
"""
$(SIGNATURES)
Read `xsf` file.
# Return
- `primvec`: `3 * 3`, Å, each column is a primitive lattice vector
- `convvec`: `3 * 3`, Å, each column is a conventional lattice vector
- `atoms`: `n_atoms` String, atomic symbols or numbers
- `atom_positions`: length-`n_atoms` vector, Å, cartesian coordinates
- `origin`: `3`, Å, origin of the grid
- `span_vectors`: `3 * 3`, Å, each column is a spanning vector
- `X`: `nx`, fractional coordinate of grid points along the first spanning vector
- `Y`: `ny`, fractional coordinate of grid points along the second spanning vector
- `Z`: `nz`, fractional coordinate of grid points along the third spanning vector
- `W`: `nx * ny * nz`, volumetric data
!!! note
Only support reading 1 datagrid in `BLOCK_DATAGRID_3D`.
"""
function read_xsf(filename::AbstractString)
io = open(filename)
primvec = nothing
convvec = nothing
atoms = nothing
atom_positions = nothing
origin = nothing
span_vectors = nothing
X = Y = Z = nothing
W = nothing
while !eof(io)
line = strip(readline(io))
if isempty(line) || startswith(line, '#')
continue
end
if occursin("CRYSTAL", line)
# primitive lattice, each column is a lattice vector
@assert strip(readline(io)) == "PRIMVEC"
primvec = zeros(Float64, 3, 3)
for i in 1:3
line = strip(readline(io))
primvec[:, i] = parse.(Float64, split(line))
end
# conventional lattice, each column is a lattice vector
@assert strip(readline(io)) == "CONVVEC"
convvec = zeros(Float64, 3, 3)
for i in 1:3
line = strip(readline(io))
convvec[:, i] = parse.(Float64, split(line))
end
# read atom positions
@assert strip(readline(io)) == "PRIMCOORD"
line = strip(readline(io))
n_atom = parse(Int, split(line)[1])
atoms = Vector{String}(undef, n_atom)
# each column is a position vector
atom_positions = zeros(Vec3{Float64}, n_atom)
for i in 1:n_atom
line = strip(readline(io))
# might be element label, or atomic number
atoms[i] = split(line)[1]
atom_positions[i] = Vec3(parse.(Float64, split(line)[2:4])...)
end
elseif occursin("BEGIN_BLOCK_DATAGRID_3D", line)
comment = strip(readline(io))
line = strip(readline(io))
# I only read the 1st data grid, others are ignored
@assert startswith(line, "BEGIN_DATAGRID_3D")
# identifier = chopprefix(line, "BEGIN_DATAGRID_3D_")
ngx, ngy, ngz = parse.(Int, split(strip(readline(io))))
origin = parse.(Float64, split(strip(readline(io))))
# spanning vectors
span_vectors = zeros(Float64, 3, 3)
for i in 1:3
line = strip(readline(io))
span_vectors[:, i] = parse.(Float64, split(line))
end
# column-major
W = zeros(Float64, ngx, ngy, ngz)
idx = 1
while idx <= ngx * ngy * ngz
line = split(strip(readline(io)))
ncol = length(line)
W[idx:(idx + ncol - 1)] = parse.(Float64, line)
idx += ncol
end
@assert occursin("END_DATAGRID_3D", strip(readline(io)))
@assert strip(readline(io)) == "END_BLOCK_DATAGRID_3D"
end
end
if !isnothing(W)
n_x, n_y, n_z = size(W)
X = range(0, 1, n_x)
Y = range(0, 1, n_y)
Z = range(0, 1, n_z)
end
return (; primvec, convvec, atoms, atom_positions, origin, span_vectors, X, Y, Z, W)
end
"""
$(SIGNATURES)
Write `xsf` file.
# Arguments
- `lattice`: `3 * 3`, Å, each column is a lattice vector
- `atom_positions`: length-`n_atoms` vector, fractional coordinates
- `atom_numbers`: `n_atoms`, atomic numbers
- `origin`: `3`, Å, origin of the grid
- `span_vectors`: `3 * 3`, Å, each column is a spanning vector
- `W`: `nx * ny * nz`, volumetric data
"""
function write_xsf(
filename::AbstractString,
lattice::AbstractMatrix{T},
atom_positions::Vector{Vec3{T}},
atom_numbers::AbstractVector{Int},
origin::AbstractVector{T},
span_vectors::AbstractMatrix{T},
W::AbstractArray{T,3},
) where {T<:Real}
n_atoms = length(atom_numbers)
length(atom_positions) == n_atoms || error("incompatible n_atoms")
size(lattice) == (3, 3) || error("incompatible lattice")
size(span_vectors) == (3, 3) || error("incompatible span_vectors")
@info "Writing xsf file: " filename
io = open(filename, "w")
# header
@printf(io, "%s\n", default_header())
@printf(io, "CRYSTAL\n")
@printf(io, "PRIMVEC\n")
@printf(io, "%12.7f %12.7f %12.7f\n", lattice[:, 1]...)
@printf(io, "%12.7f %12.7f %12.7f\n", lattice[:, 2]...)
@printf(io, "%12.7f %12.7f %12.7f\n", lattice[:, 3]...)
@printf(io, "CONVVEC\n")
@printf(io, "%12.7f %12.7f %12.7f\n", lattice[:, 1]...)
@printf(io, "%12.7f %12.7f %12.7f\n", lattice[:, 2]...)
@printf(io, "%12.7f %12.7f %12.7f\n", lattice[:, 3]...)
@printf(io, "PRIMCOORD\n")
@printf(io, "%d 1\n", n_atoms)
for i in 1:n_atoms
pos = lattice * atom_positions[i]
@printf(io, "%d %12.7f %12.7f %12.7f\n", atom_numbers[i], pos...)
end
@printf(io, "\n")
@printf(io, "BEGIN_BLOCK_DATAGRID_3D\n")
@printf(io, "3D_field\n")
@printf(io, "BEGIN_DATAGRID_3D_UNKNOWN\n")
n_x, n_y, n_z = size(W)
@printf(io, "%d %d %d\n", n_x, n_y, n_z)
@printf(io, "%12.7f %12.7f %12.7f\n", origin...)
@printf(io, "%12.7f %12.7f %12.7f\n", span_vectors[:, 1]...)
@printf(io, "%12.7f %12.7f %12.7f\n", span_vectors[:, 2]...)
@printf(io, "%12.7f %12.7f %12.7f\n", span_vectors[:, 3]...)
# column-major
ncol = 0
for k in 1:n_z
for j in 1:n_y
for i in 1:n_x
@printf(io, " %13.5e", W[i, j, k])
ncol += 1
if ncol == 6
@printf(io, "\n")
ncol = 0
end
end
end
end
ncol != 0 && @printf(io, "\n")
@printf(io, "END_DATAGRID_3D\n")
@printf(io, "END_BLOCK_DATAGRID_3D\n")
close(io)
return nothing
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 5963 | export read_amn, write_amn
"""
read_amn(filename)
read_amn(filename, ::FortranText)
read_amn(filename, ::FortranBinaryStream)
Read wannier90 `amn` file.
# Return
- `A`: length-`n_kpts` vector, each element is a `n_bands * n_wann` matrix.
- `header`: first line of the file
Note there are three versions of this function: the 1st one is a wrapper
function that automatically detect the format (text or binary) of the file,
and does some additional pretty printing to give user a quick hint of
the dimensions of the A matrix; it internally calls the 2nd or the 3rd version
for actual reading.
Wannier90 only has Fortran text format for `amn`, however I wrote a custom version
of QE pw2wannier90.x that can output Fortran binary format (using Fortran stream
IO) to save some disk space. The 1st function auto detect the file format so it is
transparent to the user.
"""
function read_amn end
function read_amn(filename::AbstractString, ::FortranText)
res = open("$filename") do io
header = strip(readline(io))
line = split(readline(io))
n_bands, n_kpts, n_wann = parse.(Int64, line[1:3])
A = [zeros(ComplexF64, n_bands, n_wann) for _ in 1:n_kpts]
while !eof(io)
line = split(readline(io))
m, n, k = parse.(Int64, line[1:3])
a = parse(Float64, line[4]) + im * parse(Float64, line[5])
A[k][m, n] = a
end
return (; A, header)
end
return res
end
function read_amn(filename::AbstractString, ::FortranBinaryStream)
# I use stream io to write amn, so I should use plain julia `open`
# io = FortranFile("$filename")
res = open("$filename") do io
header_len = 60
header = read(io, FString{header_len})
# From FString to String
header = strip(String(header))
# gfortran default integer size = 4
# https://gcc.gnu.org/onlinedocs/gfortran/KIND-Type-Parameters.html
Tint = Int32
n_bands = read(io, Tint)
n_kpts = read(io, Tint)
n_wann = read(io, Tint)
A = [zeros(ComplexF64, n_bands, n_wann) for _ in 1:n_kpts]
while !eof(io)
m = read(io, Tint)
n = read(io, Tint)
k = read(io, Tint)
r = read(io, Float64)
i = read(io, Float64)
A[k][m, n] = r + im * i
end
return (; A, header)
end
return res
end
function read_amn(filename::AbstractString)
if isbinary(filename)
format = FortranBinaryStream()
else
format = FortranText()
end
A, header = read_amn(filename, format)
n_kpts = length(A)
@assert n_kpts > 0 "A is empty"
n_bands, n_wann = size(A[1])
@info "Reading amn file" filename header n_kpts n_bands n_wann
# I do not return header here, since
# - it is already printed by @info
# - user can directly use `A = read_amn(filename)` to load it, without
# the need to unpack the NamedTuple
return A
end
"""
write_amn(filename, A; header=default_header(), binary=false)
write_amn(filename, A, ::FortranText; header=default_header())
write_amn(filename, A, ::FortranBinaryStream; header=default_header())
Write wannier90 `amn` file.
# Arguments
- `filename`: output filename
- `A`: a length-`n_kpts` vector, each element is a `n_bands * n_wann` matrix
# Keyword arguments
- `header`: 1st line of the file
- `binary`: write as Fortran unformatted file, which is the Wannier90 default.
Here the `binary` kwargs is provided for convenience.
Same as [`read_amn`](@ref) there are three versions of this function, the 1st
one is a wrapper function, it calls the 2nd or the 3rd version depending on
the `binary` kwargs.
"""
function write_amn end
function write_amn(
filename::AbstractString, A::AbstractVector, ::FortranText; header=default_header()
)
n_kpts = length(A)
@assert n_kpts > 0 "A is empty"
n_bands, n_wann = size(A[1])
open(filename, "w") do io
write(io, header, "\n")
@printf(io, "%3d %4d %4d\n", n_bands, n_kpts, n_wann)
for ik in 1:n_kpts
for iw in 1:n_wann
for ib in 1:n_bands
a = A[ik][ib, iw]
@printf(
io, "%5d %4d %4d %16.12f %16.12f\n", ib, iw, ik, real(a), imag(a)
)
end
end
end
end
return nothing
end
function write_amn(
filename::AbstractString,
A::AbstractVector,
::FortranBinaryStream;
header=default_header(),
)
n_kpts = length(A)
@assert n_kpts > 0 "A is empty"
n_bands, n_wann = size(A[1])
# I write in Fortran stream io format.
open(filename, "w") do io
# I need to convert to String instead of SubString, for FString
header = String(header)
header_len = 60
header = FString(header_len, header)
write(io, header)
# gfortran default integer is 4 bytes
Tint = Int32
write(io, Tint(n_bands))
write(io, Tint(n_kpts))
write(io, Tint(n_wann))
for ik in 1:n_kpts
for iw in 1:n_wann
for ib in 1:n_bands
write(io, Tint(ib))
write(io, Tint(iw))
write(io, Tint(ik))
a = A[ik][ib, iw]
write(io, Float64(real(a)))
write(io, Float64(imag(a)))
end
end
end
end
return nothing
end
function write_amn(
filename::AbstractString, A::AbstractVector; header=default_header(), binary=false
)
n_kpts = length(A)
@assert n_kpts > 0 "A is empty"
n_bands, n_wann = size(A[1])
@info "Writing amn file" filename header n_kpts n_bands n_wann
if binary
format = FortranBinaryStream()
else
format = FortranText()
end
return write_amn(filename, A, format; header)
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 9445 | using DelimitedFiles: readdlm
export read_w90_band,
write_w90_band,
read_w90_band_kpt,
read_w90_band_dat,
read_w90_band_labelinfo,
write_w90_band_kpt,
write_w90_band_dat,
write_w90_band_labelinfo
"""
$(SIGNATURES)
Read a `prefix_band.kpt` file.
# Return
- `kpoints`: a vector of length `n_kpts`, fractional coordinates
- `kweights`: a vector of length `n_kpts`, weights of kpoints
"""
function read_w90_band_kpt(filename::AbstractString)
# in fractional coordinates
kpoints = readdlm(filename, Float64; skipstart=1)
kweights = kpoints[:, 4]
# remove weights
kpoints = map(1:size(kpoints, 1)) do i
Vec3(kpoints[i, 1:3])
end
return (; kpoints, kweights)
end
"""
$(SIGNATURES)
Read `prefix_band.dat` file generated by `wannier90.x`, or `prefix-band.dat` file
generated by `postw90.x`.
# Return
- `x`: `n_kpts`, x axis value of kpath, in cartesian length
- `eigenvalues`: length-`n_kpts` vector, each elemnt is a length-`n_bands` vector
of band energies
- `extras`: optional (the `postw90.x` might generate a file with a third column),
same size as `eigenvalues`, often the color of each eigenvalue,
e.g., spin projection
"""
function read_w90_band_dat(filename::AbstractString)
res = open(filename) do io
# Unfortunately I need to read the whole file twice:
# 1st time to get n_kpts, 2nd time to get data
n_kpts = 0
while true
line = strip(readline(io))
isempty(line) && break
n_kpts += 1
end
seekstart(io)
dat = readdlm(io, Float64)
x = reshape(dat[:, 1], n_kpts, :)[:, 1]
eigenvalues = reshape(dat[:, 2], n_kpts, :)
eigenvalues = [Vector(e) for e in eachrow(eigenvalues)]
# the prefix-bands.dat generated by postw90.x might have an additional
# column for the color of the bands, e.g., spin projection
if size(dat, 2) == 3
extras = reshape(dat[:, 3], n_kpts, :)
extras = [Vector(e) for e in eachrow(extras)]
return (; x, eigenvalues, extras)
end
return (; x, eigenvalues)
end
return res
end
"""
$(SIGNATURES)
Read `prefix_band.labelinfo` file.
# Return
- `symm_point_indices`: index of high-symmetry points in `prefix_band.dat`
- `symm_point_labels`: name of high-symmetry points
"""
function read_w90_band_labelinfo(filename::AbstractString)
labels = open(filename) do io
readlines(io)
end
n_symm = length(labels)
symm_point_indices = Vector{Int}(undef, n_symm)
symm_point_labels = Vector{String}(undef, n_symm)
for (i, line) in enumerate(labels)
lab, idx = split(line)[1:2]
symm_point_indices[i] = parse(Int, idx)
symm_point_labels[i] = lab
end
return (; symm_point_indices, symm_point_labels)
end
"""
$(SIGNATURES)
Read `prefix_band.dat`, `prefix_band.kpt`, `prefix_band.labelinfo.dat`.
# Arguments
- `prefix`: *prefix* of the filenames (or called seedname in wannier90), NOT the full filename.
# Return
- `x`: `n_kpts`, x axis value of kpath, in cartesian length
- `eigenvalues`: length-`n_kpts` vector, each element is a length-`n_bands` vector of band energies
- `kpoints`: a vector of length `n_kpts`, fractional coordinates
- `kweights`: a vector of length `n_kpts`, weights of kpoints
- `symm_point_indices`: index of high-symmetry points in `prefix_band.dat`
- `symm_point_labels`: name of high-symmetry points
"""
function read_w90_band(prefix::AbstractString)
band_dat = prefix * "_band.dat"
band_kpt = prefix * "_band.kpt"
band_labelinfo = prefix * "_band.labelinfo.dat"
dat = read_w90_band_dat(band_dat)
kpt = read_w90_band_kpt(band_kpt)
labelinfo = read_w90_band_labelinfo(band_labelinfo)
n_kpts = length(kpt.kpoints)
n_symm = length(labelinfo.symm_point_indices)
@info "Reading Wannier90 band files" band_dat band_kpt band_labelinfo n_kpts n_symm
return (; dat..., kpt..., labelinfo...)
end
"""
$(SIGNATURES)
Wannier90 default kweights in `prefix_band.kpt` is all 1.0.
"""
default_band_kpt_kweights(kpoints::AbstractVector) = ones(length(kpoints))
"""
$(SIGNATURES)
Write `prefix_band.kpt` file.
# Arguments
- `filename`: filename of `prefix_band.kpt`
# Keyword Arguments
- `kpoints`: length-`n_kpts` vector, fractional coordinates
- `kweights`: `n_kpts`, optional, weights of kpoints, default to 1.0.
"""
function write_w90_band_kpt(
filename::AbstractString;
kpoints::AbstractVector,
kweights::AbstractVector=default_band_kpt_kweights(kpoints),
)
n_kpts = length(kpoints)
length(kweights) == n_kpts || error("kweights must have same length as kpoints")
open(filename, "w") do io
@printf(io, " %5d\n", n_kpts)
for (k, w) in zip(kpoints, kweights)
length(k) == 3 || error("kpoint must be 3-vector")
@printf(io, " %10.6f %10.6f %10.6f %10.6f\n", k..., w)
end
end
end
"""
$(SIGNATURES)
Write `prefix_band.dat` file.
# Arguments
- `filename`: filename of `prefix_band.dat`
# Keyword Arguments
- `x`: `n_kpts`, x axis value, in cartesian length
- `eigenvalues`: length-`n_kpts` vector, each element is a length-`n_bands`
vector of band energies
- `extras`: optional, same size as `eigenvalues`, will be written as the third
column of `prefix_band.dat`. The `prefix-band.dat` file generated by
`postw90.x` sometimes has a third column for e.g. the color of the eigenvalues
"""
function write_w90_band_dat(
filename::AbstractString;
x::AbstractVector,
eigenvalues::AbstractVector{<:AbstractVector},
extras::Union{AbstractVector,Nothing}=nothing,
)
n_kpts = length(eigenvalues)
@assert n_kpts > 0 "eigenvalues is empty"
n_bands = length(eigenvalues[1])
length(x) == n_kpts || error("x must has same length as eigenvalues")
open(filename, "w") do io
for ib in 1:n_bands
for ik in 1:n_kpts
if isnothing(extras)
@printf(io, " %15.8E %15.8E\n", x[ik], eigenvalues[ik][ib])
else
@printf(
io,
" %15.8E %15.8E %15.8E\n",
x[ik],
eigenvalues[ik][ib],
extras[ik][ib]
)
end
end
@printf(io, "\n")
end
end
end
"""
$(SIGNATURES)
Write `prefix_band.labelinfo` file.
# Arguments
- `filename`: filename of `prefix_band.labelinfo`
# Keyword Arguments
- `x`: `n_kpts`-vector, x axis value, in cartesian length
- `kpoints`: length-`n_kpts` vector, fractional coordinates
- `symm_point_indices`: index of high-symmetry points in `prefix_band.dat`
- `symm_point_labels`: name of high-symmetry points
"""
function write_w90_band_labelinfo(
filename::AbstractString;
x::AbstractVector{<:Real},
kpoints::AbstractVector,
symm_point_indices::AbstractVector{<:Integer},
symm_point_labels::AbstractVector,
)
n_symm = length(symm_point_indices)
n_symm == length(symm_point_labels) ||
error("symm_idx and symm_label must have same length")
open(filename, "w") do io
for i in 1:n_symm
idx = symm_point_indices[i]
kpt = kpoints[idx]
length(kpt) == 3 || error("kpoint must be 3-vector")
@printf(
io,
"%2s %31d %20.10f %17.10f %17.10f %17.10f\n",
symm_point_labels[i],
idx,
x[idx],
kpt...
)
end
end
end
"""
$(SIGNATURES)
Write `prefix_band.dat, prefix_band.kpt, prefix_band.labelinfo.dat`.
# Arguments
- `prefix`: prefix of `prefix_band.dat, prefix_band.kpt, prefix_band.labelinfo.dat`
# Keyword Arguments
- `x`: `n_kpts`, x axis value, in cartesian length
- `eigenvalues`: length-`n_kpts` vector, each element is a length-`n_bands` vector of band energies
- `kpoints`: length-`n_kpts` vector, fractional coordinates
- `kweights`: a vector of length `n_kpts`, weights of kpoints
- `symm_point_indices`: index of high-symmetry points in `prefix_band.dat`
- `symm_point_labels`: name of high-symmetry points
"""
function write_w90_band(
prefix::AbstractString;
x::AbstractVector,
eigenvalues::AbstractVector{<:AbstractVector},
kpoints::AbstractVector,
kweights::AbstractVector=default_band_kpt_kweights(kpoints),
symm_point_indices::AbstractVector,
symm_point_labels::AbstractVector,
)
n_kpts = length(kpoints)
length(eigenvalues) == n_kpts || error("kpoints and eigenvalues have different n_kpts")
length(kweights) == n_kpts || error("kpoints and kweights have different n_kpts")
length(x) == n_kpts || error("kpoints and x have different n_kpts")
n_symm = length(symm_point_indices)
n_symm == length(symm_point_labels) ||
error("symm_idx and symm_label must have same length")
band_kpt = "$(prefix)_band.kpt"
band_dat = "$(prefix)_band.dat"
band_labelinfo = "$(prefix)_band.labelinfo.dat"
@info "Writing Wannier90 band files" band_kpt band_dat band_labelinfo n_kpts n_symm
write_w90_band_dat(band_dat; x, eigenvalues)
write_w90_band_kpt(band_kpt; kpoints, kweights)
return write_w90_band_labelinfo(
band_labelinfo; x, kpoints, symm_point_indices, symm_point_labels
)
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
|
[
"MIT"
] | 0.2.6 | 3675a287430806f9b48d341eb0abf44e3074d1fa | code | 20049 | export read_chk, write_chk, get_U, get_Udis
"""
Struct for storing matrices in `prefix.chk` file.
$(TYPEDEF)
One-to-one mapping to the wannier90 `chk` file, but renaming the variable
names so that they are consistent with the rest of the code.
# Fields
$(FIELDS)
"""
struct Chk{T<:Real}
"The header line, usually contains date and time"
header::String
"""number of bands, can be auto set in constructor according to dimensions
of other variables"""
n_bands::Int
"number of excluded bands, can be auto set in constructor"
n_exclude_bands::Int
"""Indices of excluded bands, starts from 1.
Vector of integers, size: `n_exclude_bands`"""
exclude_bands::Vector{Int}
"Matrix of size 3 x 3, each column is a lattice vector in Å unit"
lattice::Mat3{T}
"Matrix of size 3 x 3, each column is a reciprocal lattice vector in Å⁻¹ unit"
recip_lattice::Mat3{T}
"number of kpoints, can be auto set in constructor"
n_kpts::Int
"dimensions of kpoint grid, 3 integers"
kgrid::Vec3{Int}
"kpoint coordinates, fractional, length-`n_kpts` vector"
kpoints::Vector{Vec3{T}}
"number of b-vectors, can be auto set in constructor"
n_bvecs::Int
"number of Wannier functions, can be auto set in constructor"
n_wann::Int
"""a string to indicate the current step (after disentanglement, after
maximal localization, ...) in wannier90"""
checkpoint::String
"""Have finished disentanglement or not"""
have_disentangled::Bool
"Omega invariant part of MV spreads, in Ų unit"
ΩI::T
"""
Indices of bands taking part in disentanglement, not frozen bands!
length-`n_kpts` vector, each element is a length-`n_bands` vector of bool.
This is needed since W90 puts all the disentanglement bands
in the first several rows of `Udis`,
(and the first few columns of `Udis` are the frozen bands)
so directly multiplying eigenvalues e.g.
`(Udis * U)' * diag(eigenvalues) * (Udis * U)` is wrong!
"""
dis_bands::Vector{BitVector}
"""number of bands taking part in disentanglement at each kpoint.
can be auto set in constructor from `dis_bands`"""
n_dis::Vector{Int}
"""Semi-unitary matrix for disentanglement,
length-`n_kpts` vector, each elment has size: `n_bands` x `n_wann`,
i.e., the `u_matrix_opt` in wannier90"""
Udis::Vector{Matrix{Complex{T}}}
"""Unitary matrix for maximal localization,
length-`n_kpts` vector, each element has size: `n_wann` x `n_wann`,
i.e., the `u_matrix` in wannier90.
The abbreviation `ml` stands for maximal localization, so as to
differentiate from the (combined) unitary matrix `U = Udis * Uml`."""
Uml::Vector{Matrix{Complex{T}}}
"""Wannier-gauge overlap matrix,
length-`n_kpts` vector of length-`n_bvecs` vector, each element is
a matrix of size `n_wann` x `n_wann`,
i.e., the `m_matrix` in wannier90"""
M::Vector{Vector{Matrix{Complex{T}}}}
"""Wannier function centers, length-`n_wann` vector, Cartesian coordinates
in Å unit, i.e., the `wannier_centres` variable in wannier90"""
r::Vector{Vec3{T}}
"""Wannier function spreads, length-`n_wann` vector, Ų unit,
i.e., the `wannier_spreads` variable in wannier90"""
ω::Vector{T}
end
"""
$(SIGNATURES)
Convenience constructor of [`Chk`](@ref) struct that auto set some fields.
"""
function Chk(
header::AbstractString,
exclude_bands::AbstractVector{Int},
lattice::AbstractMatrix,
recip_lattice::AbstractMatrix,
kgrid::AbstractVector{<:Integer},
kpoints::AbstractVector,
checkpoint::AbstractString,
have_disentangled::Bool,
ΩI::Real,
dis_bands::AbstractVector{BitVector},
Udis::AbstractVector,
Uml::AbstractVector,
M::AbstractVector,
r::AbstractVector,
ω::AbstractVector,
)
if have_disentangled
@assert length(Udis) > 0 "empty Udis"
n_bands = size(Udis[1], 1)
else
@assert length(Uml) > 0 "empty Uml"
n_bands = size(Uml[1], 1)
end
n_exclude_bands = length(exclude_bands)
n_kpts = length(M)
@assert n_kpts > 0 "empty M"
n_bvecs = length(M[1])
@assert length(Uml) > 0 "empty Uml"
n_wann = size(Uml[1], 1)
if have_disentangled
n_dis = zeros(Int, n_kpts)
for ik in 1:n_kpts
n_dis[ik] = count(dis_bands[ik])
end
else
n_dis = zeros(Int, 0)
end
return Chk(
header,
n_bands,
n_exclude_bands,
exclude_bands,
Mat3(lattice),
Mat3(recip_lattice),
n_kpts,
kgrid,
kpoints,
n_bvecs,
n_wann,
checkpoint,
have_disentangled,
ΩI,
dis_bands,
n_dis,
collect.(Udis),
collect.(Uml),
M,
r,
ω,
)
end
"""
read_chk(filename)
read_chk(filename, ::FortranText)
read_chk(filename, ::FortranBinary)
Read wannier90 `chk` checkpoint file.
Similar to [`read_amn`](@ref), the 1st version auto detect `chk` file format
(binary or text) and read it.
"""
function read_chk(filename::AbstractString, ::FortranText)
chk = open(filename) do io
# strip and read line
srline() = strip(readline(io))
# Read formatted chk file
header = String(srline())
n_bands = parse(Int, srline())
n_exclude_bands = parse(Int, srline())
exclude_bands = zeros(Int, n_exclude_bands)
if n_exclude_bands > 0
for i in 1:n_exclude_bands
exclude_bands[i] = parse(Int, srline())
end
end
# Each column is a lattice vector
# but W90 writes x components first, then y, z. NOT a1 first, then a2, a3.
line = parse.(Float64, split(srline()))
lattice = Mat3{Float64}(reshape(line, (3, 3))')
# Each column is a lattice vector
line = parse.(Float64, split(srline()))
recip_lattice = Mat3{Float64}(reshape(line, (3, 3))')
n_kpts = parse(Int, srline())
kgrid = Vec3{Int}(parse.(Int, split(srline())))
kpoints = zeros(Vec3{Float64}, n_kpts)
for ik in 1:n_kpts
kpoints[ik] = Vec3(parse.(Float64, split(srline()))...)
end
n_bvecs = parse(Int, srline())
n_wann = parse(Int, srline())
checkpoint = String(srline())
# 1 -> True, 0 -> False
have_disentangled = Bool(parse(Int, srline()))
if have_disentangled
# omega_invariant
ΩI = parse(Float64, srline())
dis_bands = [falses(n_bands) for _ in 1:n_kpts]
for ik in 1:n_kpts
for ib in 1:n_bands
# 1 -> True, 0 -> False
dis_bands[ik][ib] = Bool(parse(Int, srline()))
end
end
n_dis = zeros(Int, n_kpts)
for ik in 1:n_kpts
n_dis[ik] = parse(Int, srline())
@assert n_dis[ik] == count(dis_bands[ik])
end
# u_matrix_opt
Udis = [zeros(ComplexF64, n_bands, n_wann) for _ in 1:n_kpts]
for ik in 1:n_kpts
for iw in 1:n_wann
for ib in 1:n_bands
vals = parse.(Float64, split(srline()))
Udis[ik][ib, iw] = vals[1] + im * vals[2]
end
end
end
else
ΩI = -1.0
dis_bands = BitVector[]
n_dis = Int[]
Udis = Matrix{ComplexF64}[]
end
# u_matrix
Uml = [zeros(ComplexF64, n_wann, n_wann) for _ in 1:n_kpts]
for ik in 1:n_kpts
for iw in 1:n_wann
for ib in 1:n_wann
vals = parse.(Float64, split(srline()))
Uml[ik][ib, iw] = vals[1] + im * vals[2]
end
end
end
# m_matrix
M = [[zeros(ComplexF64, n_wann, n_wann) for _ in 1:n_bvecs] for _ in 1:n_kpts]
for ik in 1:n_kpts
for inn in 1:n_bvecs
for iw in 1:n_wann
for ib in 1:n_wann
vals = parse.(Float64, split(srline()))
M[ik][inn][ib, iw] = vals[1] + im * vals[2]
end
end
end
end
# wannier_centres
r = zeros(Vec3{Float64}, n_wann)
for iw in 1:n_wann
r[iw] = Vec3(parse.(Float64, split(srline()))...)
end
# wannier_spreads
ω = zeros(Float64, n_wann)
for iw in 1:n_wann
ω[iw] = parse(Float64, srline())
end
return Chk(
header,
exclude_bands,
lattice,
recip_lattice,
kgrid,
kpoints,
checkpoint,
have_disentangled,
ΩI,
dis_bands,
Udis,
Uml,
M,
r,
ω,
)
end
return chk
end
function read_chk(filename::AbstractString, ::FortranBinary)
io = FortranFile(filename)
# strip and read line
header_len = 33
header = trimstring(read(io, FString{header_len}))
# gfortran default integer is 4 bytes
Tint = Int32
n_bands = read(io, Tint)
n_exclude_bands = read(io, Tint)
exclude_bands = zeros(Int, n_exclude_bands)
exclude_bands .= read(io, (Tint, n_exclude_bands))
# I don't know why but I need to skip a record here
# probably because the FortranFiles.read does not handle 0-length arrays correctly
n_exclude_bands == 0 && read(io)
# Each column is a lattice vector
# but W90 writes x components first, then y, z. Not a1 first, then a2, a3.
lattice = read(io, (Float64, 3, 3))
lattice = Mat3{Float64}(lattice')
# Each column is a lattice vector
recip_lattice = read(io, (Float64, 3, 3))
recip_lattice = Mat3{Float64}(recip_lattice')
n_kpts = read(io, Tint)
kgrid = Vec3{Int}(read(io, (Tint, 3)))
kpoints = zeros(Vec3{Float64}, n_kpts)
read(io, kpoints)
n_bvecs = read(io, Tint)
n_wann = read(io, Tint)
checkpoint = trimstring(read(io, FString{20}))
# treat Bool as Int32
# 1 -> true, 0 -> false
have_disentangled = parse_bool(read(io, Tint))
if have_disentangled
# omega_invariant
ΩI = read(io, Float64)
tmp = parse_bool.(read(io, (Tint, n_bands, n_kpts)))
dis_bands = [tmp[:, i] for i in 1:n_kpts]
n_dis = zeros(Int, n_kpts)
n_dis .= read(io, (Tint, n_kpts))
for ik in 1:n_kpts
@assert n_dis[ik] == count(dis_bands[ik])
end
# u_matrix_opt
U_tmp = zeros(ComplexF64, n_bands, n_wann, n_kpts)
read(io, U_tmp)
Udis = [U_tmp[:, :, ik] for ik in 1:n_kpts]
else
ΩI = -1.0
dis_bands = BitVector[]
n_dis = Int[]
Udis = Matrix{ComplexF64}[]
end
# u_matrix
Uml = zeros(ComplexF64, n_wann, n_wann, n_kpts)
read(io, Uml)
# m_matrix
M = zeros(ComplexF64, n_wann, n_wann, n_bvecs, n_kpts)
read(io, M)
# wannier_centres
r = zeros(Float64, 3, n_wann)
read(io, r)
# wannier_spreads
ω = zeros(Float64, n_wann)
read(io, ω)
close(io)
return Chk(
header,
exclude_bands,
lattice,
recip_lattice,
kgrid,
kpoints,
checkpoint,
have_disentangled,
ΩI,
dis_bands,
Udis,
[Uml[:, :, ik] for ik in 1:n_kpts],
[[M[:, :, ib, ik] for ib in 1:n_bvecs] for ik in 1:n_kpts],
[Vec3(r[:, iw]) for iw in 1:n_wann],
ω,
)
end
function read_chk(filename::AbstractString)
if isbinary(filename)
format = FortranBinary()
else
format = FortranText()
end
chk = read_chk(filename, format)
n_kpts = chk.n_kpts
n_bands = chk.n_bands
n_wann = chk.n_wann
@info "Reading chk file" filename n_kpts n_bands n_wann
return chk
end
"""
write_chk(filename, chk::Chk; binary=false)
write_chk(filename, chk::Chk, ::FortranText)
write_chk(filename, chk::Chk, ::FortranBinary)
Write wannier90 `chk` file.
Similar to [`write_amn`](@ref), the 1st version is a convenience wrapper.
# Keyword arguments
- `binary`: write as Fortran binary file or not. Although wannier90 default
is Fortran binary format, here the default is `false` since Fortran binary
depends on compiler and platform, so it is not guaranteed to always work.
"""
function write_chk end
function write_chk(filename::AbstractString, chk::Chk, ::FortranText)
open(filename, "w") do io
n_bands = chk.n_bands
n_wann = chk.n_wann
n_kpts = chk.n_kpts
n_bvecs = chk.n_bvecs
# Write formatted chk file
@printf(io, "%33s\n", chk.header)
@printf(io, "%d\n", n_bands)
@printf(io, "%d\n", chk.n_exclude_bands)
if chk.n_exclude_bands > 0
for i in 1:(chk.n_exclude_bands)
@printf(io, "%d\n", chk.exclude_bands[i])
end
end
# Each column is a lattice vector
# but W90 writes x components first, then y, z. Not a1 first, then a2, a3.
for v in reshape(chk.lattice', 9)
@printf(io, "%25.17f", v)
end
@printf(io, "\n")
# Each column is a lattice vector
for v in reshape(chk.recip_lattice', 9)
@printf(io, "%25.17f", v)
end
@printf(io, "\n")
@printf(io, "%d\n", n_kpts)
@printf(io, "%d %d %d\n", chk.kgrid...)
for kpt in chk.kpoints
@printf(io, "%25.17f %25.17f %25.17f\n", kpt...)
end
@printf(io, "%d\n", n_bvecs)
@printf(io, "%d\n", n_wann)
# left-justified
@printf(io, "%-20s\n", chk.checkpoint)
# 1 -> True, 0 -> False
# v = chk.have_disentangled ? 1 : 0
@printf(io, "%d\n", chk.have_disentangled)
if chk.have_disentangled
# omega_invariant
@printf(io, "%25.17f\n", chk.ΩI)
for ik in 1:n_kpts
for ib in 1:n_bands
# 1 -> True, 0 -> False
@printf(io, "%d\n", chk.dis_bands[ik][ib])
end
end
for ik in 1:n_kpts
@printf(io, "%d\n", chk.n_dis[ik])
end
# u_matrix_opt
for ik in 1:n_kpts
for iw in 1:n_wann
for ib in 1:n_bands
v = chk.Udis[ik][ib, iw]
@printf(io, "%25.17f %25.17f\n", real(v), imag(v))
end
end
end
end
# u_matrix
for ik in 1:n_kpts
for iw in 1:n_wann
for ib in 1:n_wann
v = chk.Uml[ik][ib, iw]
@printf(io, "%25.17f %25.17f\n", real(v), imag(v))
end
end
end
# m_matrix
for ik in 1:n_kpts
for inn in 1:n_bvecs
for iw in 1:n_wann
for ib in 1:n_wann
v = chk.M[ik][inn][ib, iw]
@printf(io, "%25.17f %25.17f\n", real(v), imag(v))
end
end
end
end
# wannier_centres
for iw in 1:n_wann
@printf(io, "%25.17f %25.17f %25.17f\n", chk.r[iw]...)
end
# wannier_spreads
for iw in 1:n_wann
@printf(io, "%25.17f\n", chk.ω[iw])
end
end
end
function write_chk(filename::AbstractString, chk::Chk, ::FortranBinary)
io = FortranFile(filename, "w")
n_bands = chk.n_bands
n_wann = chk.n_wann
n_kpts = chk.n_kpts
n_bvecs = chk.n_bvecs
write(io, FString(33, chk.header))
# gfortran default integer is 4 bytes
Tint = Int32
write(io, Tint(n_bands))
write(io, Tint(chk.n_exclude_bands))
write(io, Vector{Tint}(chk.exclude_bands))
# Each column is a lattice vector
# but W90 writes x components first, then y, z. Not a1 first, then a2, a3.
write(io, Vector{Float64}(reshape(chk.lattice', 9)))
# Each column is a lattice vector
write(io, Vector{Float64}(reshape(chk.recip_lattice', 9)))
write(io, Tint(n_kpts))
write(io, Vector{Tint}(chk.kgrid))
# size = 3 * n_kpts
# Use `reduce(hcat, kpoints)`, which is much faster than `hcat(kpoints...)`
kpoints = reduce(hcat, chk.kpoints)
write(io, Matrix{Float64}(kpoints))
write(io, Tint(n_bvecs))
write(io, Tint(n_wann))
# left-justified
write(io, FString(20, chk.checkpoint))
# true -> 1, false -> 0
write(io, Tint(chk.have_disentangled))
# concatenate along dims=3
cat3(A...) = cat(A...; dims=3)
if chk.have_disentangled
# omega_invariant
write(io, Float64(chk.ΩI))
# true -> 1, false -> 0
write(io, Matrix{Tint}(reduce(hcat, chk.dis_bands)))
write(io, Vector{Tint}(chk.n_dis))
# u_matrix_opt
write(io, Array{ComplexF64}(reduce(cat3, chk.Udis)))
end
# u_matrix
write(io, Array{ComplexF64}(reduce(cat3, chk.Uml)))
# m_matrix
M = zeros(ComplexF64, n_wann, n_wann, n_bvecs, n_kpts)
for ik in 1:n_kpts
for ibvec in 1:n_bvecs
M[:, :, ibvec, ik] = chk.M[ik][ibvec]
end
end
write(io, M)
# wannier_centres
r = reduce(hcat, chk.r)
write(io, Matrix{Float64}(r))
# wannier_spreads
write(io, Vector{Float64}(chk.ω))
close(io)
return nothing
end
function write_chk(filename::AbstractString, chk::Chk; binary=false)
n_kpts = chk.n_kpts
n_bands = chk.n_bands
n_wann = chk.n_wann
@info "Writing chk file" filename n_kpts n_bands n_wann
if binary
format = FortranBinary()
else
format = FortranText()
end
return write_chk(filename, chk, format)
end
"""
$(SIGNATURES)
Extract the combined `U = Udis * Uml` matrices from `Chk`.
"""
function get_U(chk::Chk)
if !chk.have_disentangled
# Return deepcopy for safety, so that chk.Uml is not modified
return deepcopy(chk.Uml)
end
Udis = get_Udis(chk)
return map(zip(Udis, chk.Uml)) do (d, m)
d * m
end
end
"""
$(SIGNATURES)
Extract disentanglement `Udis` matrices from `Chk`.
"""
function get_Udis(chk::Chk)
n_kpts = chk.n_kpts
n_bands = chk.n_bands
n_wann = chk.n_wann
T = eltype(chk.Uml[1])
if !chk.have_disentangled
return [diagm(0 => ones(T, n_wann)) for _ in 1:n_kpts]
end
# Need to permute wavefunctions since Udis is stored in a way that
# the bands taking part in disentanglement are in the first few rows.
# Construct identity matrix
Iᵏ = Matrix{T}(I, n_bands, n_bands)
return map(1:n_kpts) do ik
# sortperm is stable, and
# need descending order (dis bands at the front)
p = sortperm(chk.dis_bands[ik]; order=Base.Order.Reverse)
# usually we don't need this permutation, but if
# 1. the dis_win_min > minimum(E), then these below
# dis_win_min bands are shifted to the last rows of Udis
# 2. use projectability disentanglement, then
# there might be cases that the lower (bonding) and
# higher (anti-bonding) bands participate in disentanglement,
# but some low-projectability bands are excluded from
# disentanglement, then these low-proj bands are shifted to
# the last rows of Udis
# so we need to permute the Bloch states before multiplying Udis
# chk.Udis: semi-unitary matrices from disentanglement
# chk.Uml: unitary matrices from maximal localization
Iᵏ[:, p] * chk.Udis[ik]
end
end
"""
$(SIGNATURES)
Compare two `Chk` objects.
"""
function Base.isapprox(a::Chk, b::Chk)
return _isapprox(a, b)
end
| WannierIO | https://github.com/qiaojunfeng/WannierIO.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.