licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | docs | 2057 | # Nevanlinna
[](https://github.com/SpM-lab/Nevanlinna.jl/stable)
[](https://github.com/SpM-lab/Nevanlinna.jl/dev)
[](https://github.com/SpM-lab/Nevanlinna.jl/actions/workflows/CI.yml?query=branch%3Amain)
## Installation
The package can be installed with the Julia package manager. From the Julia REPL, type `]` to enter the Pkg REPL mode and run:
```
pkg> add Nevanlinna
```
This will install a Command Line Interface (CLI) script, `nevanlinna`, into `~/.julia/bin`.
You can add this directory to your PATH in a bash shell by adding the following line into `~/.bashrc`:
```bash
export PATH="$HOME/.julia/bin:$PATH"
```
This command needs input parameter TOML file.
These files can be downloaded from [here](https://github.com/SpM-lab/Nevanlinna.jl/blob/comonicon/comonicon/bare/config.toml).
TO DO:the link must be modified after marged to main branch!
### How to run examples
You can reproduce the examples demonstrated in our paper by running notebooks in the `notebook` directory!
The examples include:
- $\delta$-function
- Gaussian
- Lorentzian
- Two peak
- Kondo resonance
- tractable Hubbard gap
- challenging Hubbard gap
- compare 64-bit and 128-bit
- Hamburger moment problem
To run our code, please ensure that the following packages are installed:
- Nevanlinna
- Plots
- LaTeXStrings
- SparseIR
One can install these libraries as follows:
```bash
julia -e 'import Pkg; Pkg.add(["Nevanlinna", "Plots", "LaTeXStrings", "SparseIR"])'
```
### Manual installation from source (advanced)
You should almost never have to do this, but it is possible to install Nevanlinna.jl from source as follows:
```bash
git clone https://github.com/SpM-lab/Nevanlinna.jl.git
julia -e "import Pkg; Pkg.add(path=\"Nevanlinna.jl\")"
```
This is *not* recommended, as you will get the unstable development version and no future updates. | Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | docs | 186 | ```@meta
CurrentModule = Nevanlinna
```
# Nevanlinna
Documentation for [Nevanlinna](https://github.com/shinaoka/Nevanlinna.jl).
```@index
```
```@autodocs
Modules = [Nevanlinna]
```
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 3368 | using MPI
using PencilArrays
using BenchmarkTools
using Random
using PencilArrays.LocalGrids:
LocalRectilinearGrid, components
MPI.Init()
comm = MPI.COMM_WORLD
MPI.Comm_rank(comm) == 0 || redirect_stdout(devnull)
@inline ftest(u, x, y, z) = u * (x + 2y + z^2)
function bench_eachindex!(v, u, grid)
for I ∈ eachindex(grid)
@inbounds v[I] = ftest(u[I], grid[I]...)
end
v
end
function bench_iterators!(v, u, grid)
for (n, xyz) ∈ zip(eachindex(u), grid)
@inbounds v[n] = ftest(u[n], xyz...)
end
v
end
function bench_rawcoords!(v, u, coords)
for (n, I) ∈ zip(eachindex(u), CartesianIndices(u))
@inbounds xyz = map(getindex, coords, Tuple(I))
@inbounds v[n] = ftest(u[n], xyz...)
end
v
end
function benchmark_pencil(pen)
println(pen, "\n")
dims = size(pen)
# Note: things are roughly twice as fast if one "collects" ranges into
# regular arrays.
coords_global = map(
xs -> collect(Float64, xs),
(
range(0, 1; length = dims[1]),
range(0, 1; length = dims[2]),
[n^2 for n = 1:dims[3]],
)
)
grid = localgrid(pen, coords_global)
coords_local = map(view, coords_global, range_local(pen, LogicalOrder()))
@assert components(grid) == coords_local
u = PencilArray{Float64}(undef, pen)
randn!(u)
v = similar(u)
print("- Broadcast: ")
@btime $v .= ftest.($u, $(grid.x), $(grid.y), $(grid.z))
vcopy = copy(v)
fill!(v, 0)
print("- Eachindex: ")
@btime bench_eachindex!($v, $u, $grid)
@assert v == vcopy
fill!(v, 0)
print("- Iterators: ")
@btime bench_iterators!($v, $u, $grid)
@assert v == vcopy
fill!(v, 0)
print("- Raw coords:") # i.e. without localgrid
@btime bench_rawcoords!($v, $u, $coords_local)
@assert v == vcopy
nothing
end
dims = (60, 110, 21)
perms = [NoPermutation(), Permutation(2, 3, 1)]
for (n, perm) ∈ enumerate(perms)
s = perm == NoPermutation() ? "Without permutations" : "With permutations"
println("\n($n) ", s, "\n")
pen = Pencil(dims, comm; permute = perm)
benchmark_pencil(pen)
end
#=============================================================
Benchmark results
=================
On Julia 1.7.2 + PencilArrays v0.15.0 and 1 MPI process.
This is with --check-bounds=no.
Without that flag, things are a bit slower for the "Iterators" and "Raw coords"
cases, which probably means that there are some @inbounds missing somewhere in
the code.
(1) Without permutations
Decomposition of 3D data
Data dimensions: (60, 110, 21)
Decomposed dimensions: (2, 3)
Data permutation: NoPermutation()
Array type: Array
- Broadcast: 212.889 μs (0 allocations: 0 bytes)
- Eachindex: 171.430 μs (0 allocations: 0 bytes)
- Iterators: 182.775 μs (0 allocations: 0 bytes)
- Raw coords: 205.575 μs (0 allocations: 0 bytes)
(2) With permutations
Decomposition of 3D data
Data dimensions: (60, 110, 21)
Decomposed dimensions: (2, 3)
Data permutation: Permutation(2, 3, 1)
Array type: Array
- Broadcast: 216.302 μs (0 allocations: 0 bytes)
- Eachindex: 175.397 μs (0 allocations: 0 bytes)
- Iterators: 158.978 μs (0 allocations: 0 bytes)
- Raw coords: 312.931 μs (0 allocations: 0 bytes)
=============================================================#
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1525 | using PencilArrays
using Documenter
using Documenter.Remotes: GitHub
DocMeta.setdocmeta!(
PencilArrays, :DocTestSetup,
quote
using PencilArrays
using MPI
MPI.Initialized() || MPI.Init()
end;
recursive=true,
)
doctest(PencilArrays; fix = false)
function main()
makedocs(;
modules = [PencilArrays],
authors = "Juan Ignacio Polanco <[email protected]>",
repo = GitHub("jipolanco", "PencilArrays.jl"),
sitename = "PencilArrays.jl",
format = Documenter.HTML(;
prettyurls = true,
canonical = "https://jipolanco.github.io/PencilArrays.jl",
assets = [
"assets/custom.css",
"assets/tomate.js",
],
),
pages = [
"Home" => "index.md",
"Library" => [
"Pencils.md",
"PencilArrays.md",
"LocalGrids.md",
"Transpositions.md",
"PencilIO.md",
"MPITopology.md",
"PencilArrays_timers.md",
],
"Additional notes" => [
"notes/reductions.md",
],
],
warnonly = [:missing_docs], # TODO can we remove this?
)
deploydocs(;
repo = "github.com/jipolanco/PencilArrays.jl",
forcepush = true,
# PRs deploy at https://jipolanco.github.io/PencilArrays.jl/previews/PR**
push_preview = true,
)
nothing
end
main()
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 899 | module PencilArraysAMDGPUExt
using PencilArrays: typeof_array, typeof_ptr
using PencilArrays.Transpositions: Transpositions
using AMDGPU: ROCVector
# Workaround `unsafe_wrap` not allowing the `own` keyword argument in the AMDGPU
# implementation.
# Moreover, one needs to set the `lock = false` argument to indicate that we want to wrap an
# array which is already in the GPU.
function Transpositions.unsafe_as_array(::Type{T}, x::ROCVector{UInt8}, dims::Tuple) where {T}
p = typeof_ptr(x){T}(pointer(x))
unsafe_wrap(typeof_array(x), p, dims; lock = false)
end
# Workaround `unsafe_wrap` for ROCArrays not providing a definition for dims::Integer.
# We convert that argument to a tuple, which is accepted by the implementation in AMDGPU.
function Transpositions.unsafe_as_array(::Type{T}, x::ROCVector{UInt8}, N::Integer) where {T}
Transpositions.unsafe_as_array(T, x, (N,))
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 333 | module PencilArraysDiffEqExt
using PencilArrays: PencilArray, length_global
using DiffEqBase
# This is used for adaptive timestepping in DifferentialEquations.jl.
# Without this, each MPI process may choose a different dt, leading to
# catastrophic consequences!
DiffEqBase.recursive_length(u::PencilArray) = length_global(u)
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 8003 | module PencilArraysHDF5Ext
using MPI
using HDF5
using PencilArrays
using PencilArrays.PencilIO
using PencilArrays: MaybePencilArrayCollection, collection_size
using StaticArrays: SVector
using TimerOutputs
function PencilIO.PHDF5Driver(;
fcpl = HDF5.FileCreateProperties(),
fapl = HDF5.FileAccessProperties(),
)
# We set fapl.fclose_degree if it hasn't been explicitly set.
if !_is_set(fapl, Val(:fclose_degree))
# This is the default in HDF5.jl -- makes sense due to GC.
fapl.fclose_degree = :strong
end
PencilIO._PHDF5Driver(fcpl, fapl)
end
# TODO Is there a better way to check if fapl.fclose_degree has already
# been set??
function _is_set(fapl::HDF5.FileAccessProperties, ::Val{:fclose_degree})
id = fapl.id
degree = Ref{Cint}()
status = ccall(
(:H5Pget_fclose_degree, HDF5.API.libhdf5), HDF5.API.herr_t,
(HDF5.API.hid_t, Ref{Cint}), id, degree)
# A negative value means failure, which we interpret here as meaning that
# "fclose_degree" has not been set.
status ≥ 0
end
PencilIO.hdf5_has_parallel() = HDF5.has_parallel()
function keywords_to_h5open(; kws...)
flags, other_kws = PencilIO.keywords_to_open(; kws...)
(
flags.read,
flags.write,
flags.create,
flags.truncate,
flags.append,
), other_kws
end
function Base.open(D::PHDF5Driver, filename::AbstractString, comm::MPI.Comm; kw...)
mode_args, other_kws = keywords_to_h5open(; kw...)
info = MPI.Info(other_kws...)
fcpl = D.fcpl
fapl = D.fapl
mpio = HDF5.Drivers.MPIO(comm, info)
HDF5.Drivers.set_driver!(fapl, mpio) # fails if no parallel support
swmr = false
# The code below is adapted from h5open in HDF5.jl v0.15
# TODO propose alternative h5open for HDF5.jl, taking keyword arguments `read`, `write`, ...
# Then we wouldn't need to copy code from HDF5.jl...
rd, wr, cr, tr, ff = mode_args
if ff && !wr
error("HDF5 does not support appending without writing")
end
fid = if cr && (tr || !isfile(filename))
flag = swmr ? HDF5.API.H5F_ACC_TRUNC | HDF5.API.H5F_ACC_SWMR_WRITE :
HDF5.API.H5F_ACC_TRUNC
HDF5.API.h5f_create(filename, flag, fcpl, fapl)
else
HDF5.ishdf5(filename) ||
error("unable to determine if $filename is accessible in the HDF5 format (file may not exist)")
flag = if wr
swmr ? HDF5.API.H5F_ACC_RDWR | HDF5.API.H5F_ACC_SWMR_WRITE :
HDF5.API.H5F_ACC_RDWR
else
swmr ? HDF5.API.H5F_ACC_RDONLY | HDF5.API.H5F_ACC_SWMR_READ :
HDF5.API.H5F_ACC_RDONLY
end
HDF5.API.h5f_open(filename, flag, fapl)
end
close(fapl)
close(fcpl)
HDF5.File(fid, filename)
end
function Base.setindex!(
g::Union{HDF5.File, HDF5.Group}, x::MaybePencilArrayCollection,
name::AbstractString;
chunks=false, collective=true, prop_pairs...,
)
to = timer(pencil(x))
@timeit_debug to "Write HDF5" begin
check_phdf5_file(g, x)
# Add extra property lists if required by keyword args.
# TODO avoid using Dict?
props = Dict{Symbol,Any}(pairs(prop_pairs))
if chunks && !haskey(prop_pairs, :chunk)
chunk = h5_chunk_size(x, MemoryOrder())
props[:chunk] = chunk
end
if collective && !haskey(prop_pairs, :dxpl_mpio)
props[:dxpl_mpio] = :collective
end
dims_global = h5_dataspace_dims(x)
@timeit_debug to "create dataset" dset =
create_dataset(g, name, h5_datatype(x), dataspace(dims_global); props...)
inds = range_local(x, MemoryOrder())
@timeit_debug to "write data" to_hdf5(dset, x, inds)
@timeit_debug to "write metadata" write_metadata(dset, x)
end
x
end
# Write metadata as HDF5 attributes attached to a dataset.
# Note that this is a collective operation (all processes must call this).
function write_metadata(dset::HDF5.Dataset, x)
meta = PencilIO.metadata(x)
for (name, val) in pairs(meta)
dset[string(name)] = to_hdf5(val)
end
dset
end
to_hdf5(val) = val
to_hdf5(val::Tuple{}) = false # empty tuple
to_hdf5(val::Tuple) = SVector(val)
to_hdf5(::Nothing) = false
function Base.read!(g::Union{HDF5.File, HDF5.Group}, x::MaybePencilArrayCollection,
name::AbstractString; collective=true, prop_pairs...)
to = timer(pencil(x))
@timeit_debug to "Read HDF5" begin
dapl = HDF5.DatasetAccessProperties(; prop_pairs...)
dxpl = HDF5.DatasetTransferProperties(; prop_pairs...)
# Add extra property lists if required by keyword args.
if collective && !haskey(prop_pairs, :dxpl_mpio)
dxpl.dxpl_mpio = :collective
end
dims_global = h5_dataspace_dims(x)
@timeit_debug to "open dataset" dset = open_dataset(g, string(name), dapl, dxpl)
check_phdf5_file(parent(dset), x)
if dims_global != size(dset)
throw(DimensionMismatch(
"incompatible dimensions of HDF5 dataset and PencilArray"))
end
inds = range_local(x, MemoryOrder())
@timeit_debug to "read data" from_hdf5!(dset, x, inds)
end
x
end
function check_phdf5_file(g, x)
fapl = HDF5.get_access_properties(HDF5.file(g))
driver = HDF5.Drivers.get_driver(fapl)
if hasfield(typeof(driver), :comm)
comm = driver.comm :: MPI.Comm
if MPI.Comm_compare(comm, get_comm(x)) ∉ (MPI.IDENT, MPI.CONGRUENT)
throw(ArgumentError(
"incompatible MPI communicators of HDF5 file and PencilArray"
))
end
else
error("HDF5 file was not opened with the MPIO driver")
end
close(fapl)
nothing
end
to_hdf5(dset, x::PencilArray, inds) = dset[inds...] = parent(x)
function from_hdf5!(dset, x::PencilArray, inds)
u = parent(x)
if stride(u, 1) != 1
u .= dset[inds...] # short and easy version (but allocates!)
return x
end
# The following is adapted from one of the _getindex() in HDF5.jl.
HDF5Scalar = HDF5.ScalarType
T = eltype(x)
if !(T <: Union{HDF5Scalar, Complex{<:HDF5Scalar}})
error("Dataset indexing (hyperslab) is available only for bits types")
end
dsel_id = HDF5.hyperslab(dset, inds...)
memtype = HDF5.datatype(u)
memspace = HDF5.dataspace(u)
try
# This only works for stride-1 arrays.
HDF5.API.h5d_read(dset.id, memtype.id, memspace.id, dsel_id, dset.xfer, u)
finally
close(memtype)
close(memspace)
HDF5.API.h5s_close(dsel_id)
end
x
end
# Define variants for collections.
for func in (:from_hdf5!, :to_hdf5)
@eval function $func(dset, col::PencilArrayCollection, inds_in)
for I in CartesianIndices(collection_size(col))
inds = (inds_in..., Tuple(I)...)
$func(dset, col[I], inds)
end
end
end
h5_datatype(x::PencilArray) = datatype(eltype(x))
h5_datatype(x::PencilArrayCollection) = h5_datatype(first(x))
h5_dataspace_dims(x::PencilArray) = size_global(x, MemoryOrder())
h5_dataspace_dims(x::PencilArrayCollection) =
(h5_dataspace_dims(first(x))..., collection_size(x)...)
function h5_chunk_size(x::PencilArray, order = MemoryOrder())
# Determine chunk size for writing to HDF5 dataset.
# The idea is that each process writes to a single separate chunk of the
# dataset, of size `dims_local`.
# This only works if the data is ideally balanced among processes, i.e. if
# the local dimensions of the dataset are the same for all processes.
dims_local = size_local(x, order)
# In the general case that the data is not well balanced, we take the
# minimum size along each dimension.
chunk = MPI.Allreduce(collect(dims_local), min, get_comm(x))
N = ndims(x)
@assert length(chunk) == N
ntuple(d -> chunk[d], Val(N))
end
h5_chunk_size(x::PencilArrayCollection, args...) =
(h5_chunk_size(first(x), args...)..., collection_size(x)...)
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1266 | module PencilArrays
using MPI
using OffsetArrays
using Reexport
using StaticPermutations
using TimerOutputs
using Base: @propagate_inbounds
import Adapt
include("Permutations.jl")
import .Permutations: permutation
include("PermutedIndices/PermutedIndices.jl")
using .PermutedIndices
include("LocalGrids/LocalGrids.jl")
@reexport using .LocalGrids
include("Pencils/Pencils.jl")
@reexport using .Pencils
import .Pencils:
get_comm,
range_local,
range_remote,
size_local,
size_global,
length_local,
length_global,
topology,
typeof_array
export PencilArray, GlobalPencilArray, PencilArrayCollection, ManyPencilArray
export pencil, permutation
export gather
export global_view
export ndims_extra, ndims_space, extra_dims, sizeof_global
# Type definitions
include("arrays.jl") # PencilArray
include("multiarrays.jl") # ManyPencilArray
include("global_view.jl") # GlobalPencilArray
include("cartesian_indices.jl") # PermutedLinearIndices, PermutedCartesianIndices
include("size.jl")
include("array_interface.jl")
include("broadcast.jl")
include("random.jl")
include("reductions.jl")
include("gather.jl")
include("Transpositions/Transpositions.jl")
@reexport using .Transpositions
include("PencilIO/PencilIO.jl")
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 71 | module Permutations
export permutation
function permutation end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1491 | import StaticArrayInterface:
StaticArrayInterface,
StaticInt,
contiguous_axis,
contiguous_batch_size,
parent_type,
stride_rank,
dense_dims
parent_type(::Type{<:PencilArray{T,N,A}}) where {T,N,A} = A
contiguous_axis(::Type{A}) where {A <: PencilArray} =
_contiguous_axis(
contiguous_axis(parent_type(A)),
permutation(A),
)
_contiguous_axis(x::Nothing, ::AbstractPermutation) = x
_contiguous_axis(x::StaticInt, ::NoPermutation) = x
@inline function _contiguous_axis(x::StaticInt{i}, p::Permutation) where {i}
i == -1 && return x
StaticInt(p[Val(i)])
end
contiguous_batch_size(::Type{A}) where {A <: PencilArray} =
contiguous_batch_size(parent_type(A))
function stride_rank(::Type{A}) where {A <: PencilArray}
rank = stride_rank(parent_type(A))
rank === nothing && return nothing
iperm = Tuple(inv(permutation(A)))
iperm === nothing && return rank
StaticArrayInterface.permute(rank, Val(iperm))
end
function dense_dims(::Type{A}) where {A <: PencilArray}
dense = dense_dims(parent_type(A))
dense === nothing && return nothing
perm = Tuple(inv(permutation(A)))
perm === nothing && return dense
StaticArrayInterface.permute(dense, Val(perm))
end
StaticArrayInterface.static_size(A::PencilArray) =
permutation(A) * StaticArrayInterface.static_size(parent(A))
StaticArrayInterface.static_strides(A::PencilArray) =
permutation(A) * StaticArrayInterface.static_strides(parent(A))
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 17299 | """
PencilArray(pencil::Pencil, data::AbstractArray{T,N})
Create array wrapper with pencil decomposition information.
The array dimensions and element type must be consistent with those of the given
pencil.
!!! note "Index permutations"
If the `Pencil` has an associated index permutation, then `data` must have
its dimensions permuted accordingly (in *memory* order).
Unlike `data`, the resulting `PencilArray` should be accessed with
unpermuted indices (in *logical* order).
##### Example
Suppose `pencil` has local dimensions `(10, 20, 30)` before permutation, and
has an asociated permutation `(2, 3, 1)`.
Then:
```julia
data = zeros(20, 30, 10) # parent array (with dimensions in memory order)
u = PencilArray(pencil, data) # wrapper with dimensions (10, 20, 30)
@assert size_local(u) === (10, 20, 30)
u[15, 25, 5] # BoundsError (15 > 10 and 25 > 20)
u[5, 15, 25] # correct
parent(u)[15, 25, 5] # correct
```
!!! note "Extra dimensions"
The data array can have one or more extra dimensions to the right (slow
indices), which are not affected by index permutations.
##### Example
```julia
dims = (20, 30, 10)
PencilArray(pencil, zeros(dims...)) # works (scalar)
PencilArray(pencil, zeros(dims..., 3)) # works (3-component vector)
PencilArray(pencil, zeros(dims..., 4, 3)) # works (4×3 tensor)
PencilArray(pencil, zeros(3, dims...)) # fails
```
---
PencilArray{T}(undef, pencil::Pencil, [extra_dims...])
Allocate an uninitialised `PencilArray` that can hold data in the local pencil.
Extra dimensions, for instance representing vector components, can be specified.
These dimensions are added to the rightmost (slowest) indices of the resulting
array.
# Example
Suppose `pencil` has local dimensions `(20, 10, 30)`. Then:
```julia
PencilArray{Float64}(undef, pencil) # array dimensions are (20, 10, 30)
PencilArray{Float64}(undef, pencil, 4, 3) # array dimensions are (20, 10, 30, 4, 3)
```
More examples:
```jldoctest
julia> pen = Pencil((20, 10, 12), MPI.COMM_WORLD);
julia> u = PencilArray{Float64}(undef, pen);
julia> summary(u)
"20×10×12 PencilArray{Float64, 3}(::Pencil{3, 2, NoPermutation, Array})"
julia> PencilArray{Float64}(undef, pen, 4, 3) |> summary
"20×10×12×4×3 PencilArray{Float64, 5}(::Pencil{3, 2, NoPermutation, Array})"
```
"""
struct PencilArray{
T,
N,
A <: AbstractArray{T,N},
Np, # number of "spatial" dimensions (i.e. dimensions of the Pencil)
E, # number of "extra" dimensions (= N - Np)
P <: Pencil,
} <: AbstractArray{T,N}
pencil :: P
data :: A
space_dims :: Dims{Np} # spatial dimensions in *logical* order
extra_dims :: Dims{E}
# This constructor is not to be used directly!
# It exists just to enforce that the type of data array is consistent with
# typeof_array(pencil).
function PencilArray(pencil::Pencil, data::AbstractArray)
_check_compatible(pencil, data)
N = ndims(data)
Np = ndims(pencil)
E = N - Np
size_data = size(data)
geom_dims = ntuple(n -> size_data[n], Np) # = size_data[1:Np]
extra_dims = ntuple(n -> size_data[Np + n], E) # = size_data[Np+1:N]
dims_local = size_local(pencil, MemoryOrder())
if geom_dims !== dims_local
throw(DimensionMismatch(
"array has incorrect dimensions: $(size_data). " *
"Local dimensions of pencil: $(dims_local)."))
end
space_dims = permutation(pencil) \ geom_dims # undo permutation
T = eltype(data)
P = typeof(pencil)
new{T, N, typeof(data), Np, E, P}(pencil, data, space_dims, extra_dims)
end
end
@inline _check_compatible(p::Pencil, u) = _check_compatible(typeof_array(p), u)
@inline function _check_compatible(::Type{A}, u, ubase = u) where {A}
typeof(u) <: A && return nothing
up = parent(u)
typeof(up) === typeof(u) && throw(ArgumentError(
"type of data array ($(typeof(ubase))) is not compatible with expected array type ($A)"
))
_check_compatible(A, up, ubase)
end
function PencilArray{T}(init, pencil::Pencil, extra_dims::Vararg{Integer}) where {T}
dims = (size_local(pencil, MemoryOrder())..., extra_dims...)
A = typeof_array(pencil)
PencilArray(pencil, A{T}(init, dims))
end
# Treat PencilArray similarly to other wrapper types.
# https://github.com/JuliaGPU/Adapt.jl/blob/master/src/wrappers.jl
function Adapt.adapt_structure(to, u::PencilArray)
A = typeof_array(to)
p = similar(pencil(u), A) # create Pencil with possibly different array type
PencilArray(p, Adapt.adapt(to, parent(u)))
end
pencil_type(::Type{PencilArray{T,N,A,M,E,P}}) where {T,N,A,M,E,P} = P
# This is called by `summary`.
function Base.showarg(io::IO, u::PencilArray, toplevel)
toplevel || print(io, "::")
print(io, nameof(typeof(u)), '{', eltype(u), ", ", ndims(u), '}')
if toplevel
print(io, '(')
Base.showarg(io, pencil(u), false)
print(io, ')')
end
nothing
end
"""
PencilArrayCollection
`UnionAll` type describing a collection of [`PencilArray`](@ref)s.
Such a collection can be a tuple or an array of `PencilArray`s.
Collections are **by assumption** homogeneous: each array has the same
properties, and in particular, is associated to the same [`Pencil`](@ref)
configuration.
For convenience, certain operations defined for `PencilArray` are also defined
for `PencilArrayCollection`, and return the same value as for a single
`PencilArray`.
Some examples are [`pencil`](@ref), [`range_local`](@ref) and
[`get_comm`](@ref).
Also note that functions from `Base`, such as `size`, `ndims` and `eltype`, are **not**
overloaded for `PencilArrayCollection`, since they already have a definition
for tuples and arrays (and redefining them would be type piracy...).
"""
const PencilArrayCollection =
Union{Tuple{Vararg{A}}, AbstractArray{A}} where {A <: PencilArray}
collection_size(x::Tuple{Vararg{PencilArray}}) = (length(x), )
collection_size(x::AbstractArray{<:PencilArray}) = size(x)
collection_size(::PencilArray) = ()
# This is convenient for iterating over one or more PencilArrays.
# A single PencilArray is treated as a "collection" of one array.
collection(x::PencilArrayCollection) = x
collection(x::PencilArray) = (x, )
const MaybePencilArrayCollection = Union{PencilArray, PencilArrayCollection}
function _apply(f::Function, x::PencilArrayCollection, args...; kwargs...)
a = first(x)
if !all(b -> pencil(a) === pencil(b), x)
throw(ArgumentError("PencilArrayCollection is not homogeneous"))
end
f(a, args...; kwargs...)
end
Base.axes(x::PencilArray) = permutation(x) \ axes(parent(x))
"""
similar(x::PencilArray, [element_type=eltype(x)], [dims]) -> PencilArray
Returns a `PencilArray` similar to `x`.
In particular, the new array shares the same parallel decomposition (the same `Pencil`) than
`x`. This means that the dimensions of the new array must be the same as those of `x`. Note
that the optional `dims` argument is allowed for the sole reason of making things work
nicely with other packages (such as StructArrays.jl), but things will fail if `dims ≠ size(x)`.
# Examples
```jldoctest
julia> pen = Pencil((20, 10, 12), MPI.COMM_WORLD);
julia> u = PencilArray{Float64}(undef, pen);
julia> similar(u) |> summary
"20×10×12 PencilArray{Float64, 3}(::Pencil{3, 2, NoPermutation, Array})"
julia> similar(u, size(u)) |> summary
"20×10×12 PencilArray{Float64, 3}(::Pencil{3, 2, NoPermutation, Array})"
julia> similar(u, ComplexF32) |> summary
"20×10×12 PencilArray{ComplexF32, 3}(::Pencil{3, 2, NoPermutation, Array})"
julia> similar(u, (4, 3, 8))
ERROR: DimensionMismatch: cannot construct a similar PencilArray with different size
julia> similar(u, (4, 3)) |> summary
ERROR: DimensionMismatch: cannot construct a similar PencilArray with different size
julia> similar(u, ComplexF32) |> summary
"20×10×12 PencilArray{ComplexF32, 3}(::Pencil{3, 2, NoPermutation, Array})"
julia> similar(u, ComplexF32, (4, 3))
ERROR: DimensionMismatch: cannot construct a similar PencilArray with different size
```
---
similar(x::PencilArray, [element_type = eltype(x)], p::Pencil)
Create a `PencilArray` with the decomposition described by the given `Pencil`.
This variant may be used to create a `PencilArray` that has a different
decomposition than the input `PencilArray`.
# Examples
```jldoctest
julia> pen_u = Pencil((20, 10, 12), (2, 3), MPI.COMM_WORLD);
julia> u = PencilArray{Float64}(undef, pen_u);
julia> pen_v = Pencil(pen_u; decomp_dims = (1, 3), permute = Permutation(2, 3, 1))
Decomposition of 3D data
Data dimensions: (20, 10, 12)
Decomposed dimensions: (1, 3)
Data permutation: Permutation(2, 3, 1)
Array type: Array
julia> v = similar(u, pen_v);
julia> summary(v)
"20×10×12 PencilArray{Float64, 3}(::Pencil{3, 2, Permutation{(2, 3, 1), 3}, Array})"
julia> pencil(v) === pen_v
true
julia> vint = similar(u, Int, pen_v);
julia> summary(vint)
"20×10×12 PencilArray{Int64, 3}(::Pencil{3, 2, Permutation{(2, 3, 1), 3}, Array})"
julia> pencil(vint) === pen_v
true
```
"""
function Base.similar(x::PencilArray, ::Type{S}) where {S}
dims_perm = permutation(x) * size_local(x)
PencilArray(x.pencil, similar(parent(x), S, dims_perm))
end
function Base.similar(x::PencilArray, ::Type{S}, dims::Dims) where {S}
dims == size(x) ||
throw(DimensionMismatch("cannot construct a similar PencilArray with different size"))
similar(x, S)
end
function Base.similar(x::PencilArray, ::Type{S}, p::Pencil) where {S}
dims_mem = (size_local(p, MemoryOrder())..., extra_dims(x)...)
PencilArray(p, similar(parent(x), S, dims_mem))
end
Base.similar(x::PencilArray, p::Pencil) = similar(x, eltype(x), p)
# Use same index style as the parent array.
Base.IndexStyle(::Type{<:PencilArray{T,N,A}} where {T,N}) where {A} =
IndexStyle(A)
# Overload Base._sub2ind for converting from Cartesian to linear index.
@inline function Base._sub2ind(x::PencilArray, I...)
# _sub2ind(axes(x), I...) <- default implementation for AbstractArray
J = permutation(x) * I
Base._sub2ind(parent(x), J...)
end
# Linear indexing
@propagate_inbounds function Base.getindex(x::PencilArray, i::Integer)
parent(x)[i]
end
@propagate_inbounds function Base.setindex!(x::PencilArray, v, i::Integer)
parent(x)[i] = v
end
# Cartesian indexing: assume input indices are unpermuted, and permute them.
# (This is similar to the implementation of PermutedDimsArray.)
@propagate_inbounds Base.getindex(
x::PencilArray{T,N}, I::Vararg{Int,N}) where {T,N} =
parent(x)[_genperm(x, I)...]
@propagate_inbounds @inline Base.setindex!(
x::PencilArray{T,N}, v, I::Vararg{Int,N}) where {T,N} =
parent(x)[_genperm(x, I)...] = v
@inline function _genperm(x::PencilArray{T,N}, I::NTuple{N,Int}) where {T,N}
permutation(x) * I
end
@inline _genperm(x::PencilArray, I::CartesianIndex) =
CartesianIndex(_genperm(x, Tuple(I)))
"""
pencil(x::PencilArray)
Return decomposition configuration associated to a `PencilArray`.
"""
pencil(x::PencilArray) = x.pencil
pencil(x::PencilArrayCollection) = _apply(pencil, x)
"""
parent(x::PencilArray)
Return array wrapped by a `PencilArray`.
"""
Base.parent(x::PencilArray) = x.data
# This enables aliasing detection (e.g. using Base.mightalias) on PencilArrays.
Base.dataids(x::PencilArray) = Base.dataids(parent(x))
# This is based on strides(::PermutedDimsArray)
function Base.strides(x::PencilArray)
s = strides(parent(x))
permutation(x) * s
end
"""
pointer(x::PencilArray)
Return pointer to the start of the underlying data.
Use with caution: this may not make a lot of sense if the underlying data is not
contiguous or strided (e.g. if the `PencilArray` is wrapping a non-strided
`SubArray`).
"""
Base.pointer(x::PencilArray) = pointer(parent(x))
"""
ndims_extra(::Type{<:PencilArray})
ndims_extra(x::PencilArray)
ndims_extra(x::PencilArrayCollection)
Number of "extra" dimensions associated to `PencilArray`.
These are the dimensions that are not associated to the domain geometry.
For instance, they may correspond to vector or tensor components.
These dimensions correspond to the rightmost indices of the array.
The total number of dimensions of a `PencilArray` is given by:
ndims(x) == ndims_space(x) + ndims_extra(x)
"""
ndims_extra(x::MaybePencilArrayCollection) = length(extra_dims(x))
ndims_extra(::Type{<:PencilArray{T,N,A,M,E}}) where {T,N,A,M,E} = E
"""
ndims_space(x::PencilArray)
ndims_space(x::PencilArrayCollection)
Number of dimensions associated to the domain geometry.
These dimensions correspond to the leftmost indices of the array.
The total number of dimensions of a `PencilArray` is given by:
ndims(x) == ndims_space(x) + ndims_extra(x)
"""
ndims_space(x::PencilArray) = ndims(x) - ndims_extra(x)
ndims_space(x::PencilArrayCollection) = _apply(ndims_space, x)
"""
extra_dims(x::PencilArray)
extra_dims(x::PencilArrayCollection)
Return tuple with size of "extra" dimensions of `PencilArray`.
"""
extra_dims(x::PencilArray) = x.extra_dims
extra_dims(x::PencilArrayCollection) = _apply(extra_dims, x)
"""
sizeof_global(x::PencilArray)
sizeof_global(x::PencilArrayCollection)
Global size of array in bytes.
"""
sizeof_global(x::PencilArray) = prod(size_global(x)) * sizeof(eltype(x))
sizeof_global(x::PencilArrayCollection) = sum(sizeof_global, x)
"""
range_local(x::PencilArray, [order = LogicalOrder()])
range_local(x::PencilArrayCollection, [order = LogicalOrder()])
Local data range held by the `PencilArray`.
By default the dimensions are returned in logical order.
"""
range_local(x::MaybePencilArrayCollection, args...; kw...) =
(range_local(pencil(x), args...; kw...)..., map(Base.OneTo, extra_dims(x))...)
"""
range_remote(x::PencilArray, coords, [order = LogicalOrder()])
range_remote(x::PencilArrayCollection, coords, [order = LogicalOrder()])
Get data range held by the `PencilArray` in a given MPI process.
The location of the MPI process in the topology is determined by the `coords`
argument, which can be given as a linear or Cartesian index.
See [`range_remote(::Pencil, ...)`](@ref range_remote(::Pencil, ::Integer,
::LogicalOrder)) variant for details.
"""
range_remote(x::MaybePencilArrayCollection, args...) =
(range_remote(pencil(x), args...)..., map(Base.OneTo, extra_dims(x))...)
"""
get_comm(x::PencilArray)
get_comm(x::PencilArrayCollection)
Get MPI communicator associated to a pencil-distributed array.
"""
get_comm(x::MaybePencilArrayCollection) = get_comm(pencil(x))
"""
permutation(::Type{<:PencilArray})
permutation(x::PencilArray)
permutation(x::PencilArrayCollection)
Get index permutation associated to the given `PencilArray`.
Returns `NoPermutation()` if there is no associated permutation.
"""
function permutation end
function permutation(::Type{A}) where {A <: PencilArray}
P = pencil_type(A)
perm = permutation(P)
E = ndims_extra(A)
append(perm, Val(E))
end
permutation(x::PencilArray) = permutation(typeof(x))
permutation(x::PencilArrayCollection) = _apply(permutation, x)
"""
topology(x::PencilArray)
topology(x::PencilArrayCollection)
Get [`MPITopology`](@ref) associated to a `PencilArray`.
"""
topology(x::MaybePencilArrayCollection) = topology(pencil(x))
## Common array operations
# We try to avoid falling onto the generic AbstractArray interface, because it
# generally uses scalar indexing which is not liked by GPU arrays.
Base.zero(x::PencilArray) = fill!(similar(x), zero(eltype(x)))
function _check_compatible_arrays(x::PencilArray, y::PencilArray)
# The condition is stronger than needed, but it's pretty common for arrays
# to share the same Pencil, and it's more efficient to compare this way.
pencil(x) === pencil(y) ||
throw(ArgumentError("arrays are not compatible"))
end
function Base.copyto!(x::PencilArray, y::PencilArray)
_check_compatible_arrays(x, y)
copyto!(parent(x), parent(y))
x
end
# Should this be an equality across all MPI processes?
function Base.:(==)(x::PencilArray, y::PencilArray)
_check_compatible_arrays(x, y)
parent(x) == parent(y)
end
function Base.isapprox(x::PencilArray, y::PencilArray; kws...)
_check_compatible_arrays(x, y)
isapprox(parent(x), parent(y); kws...)
end
function Base.fill!(A::PencilArray, x)
fill!(parent(A), x)
A
end
"""
typeof_ptr(x::AbstractArray)
typeof_ptr(x::PencilArray)
Get the type of pointer to the underlying array of a `PencilArray` or `AbstractArray`.
"""
typeof_ptr(A::AbstractArray) = typeof(pointer(A)).name.wrapper
"""
typeof_array(x::Pencil)
typeof_array(x::PencilArray)
typeof_array(x::AbstractArray)
Get the type of array (without the element type) so it can be used as a constructor.
"""
typeof_array(A::PencilArray) = typeof_array(parent(A))
"""
localgrid(x::PencilArray, args...)
Equivalent of `localgrid(pencil(x), args...)`.
"""
LocalGrids.localgrid(A::PencilArray, args...) = localgrid(pencil(A), args...)
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 3097 | # The broadcasting logic is quite tricky due to possible dimension permutations.
# The basic idea is that, when permutations are enabled, PencilArrays broadcast
# using their dimensions in memory order.
# This allows things like `u .+ parent(u)` even when `u` is a PencilArray with
# permuted dimensions.
# In this case, `u` and `parent(u)` may have different sizes [e.g. `(4, 6, 7)`
# vs `(6, 7, 4)`] but they're still allowed to broadcast, which may not be very
# natural or intuitive.
using Base.Broadcast:
Broadcast,
BroadcastStyle, Broadcasted,
AbstractArrayStyle, DefaultArrayStyle
struct PencilArrayStyle{N} <: AbstractArrayStyle{N} end
struct PencilArrayBroadcastable{T, N, A <: PencilArray{T, N}}
data :: A
PencilArrayBroadcastable(u::PencilArray{T, N}) where {T, N} =
new{T, N, typeof(u)}(u)
end
_actual_parent(u::PencilArray) = parent(u)
_actual_parent(bc::PencilArrayBroadcastable) = _actual_parent(bc.data)
Broadcast.broadcastable(x::PencilArray) = PencilArrayBroadcastable(x)
Base.eltype(::Type{<:PencilArrayBroadcastable{T}}) where {T} = T
Base.size(bc::PencilArrayBroadcastable) = size(_actual_parent(bc))
function Broadcast.materialize!(u::PencilArray, bc_in::Broadcasted)
dest = _actual_parent(u)
bc = _unwrap_pa(bc_in)
Broadcast.materialize!(dest, bc)
u
end
# When materialising the broadcast, we unwrap all arrays wrapped by PencilArrays.
# This is to make sure that the right `copyto!` is called.
# For GPU arrays, this enables the use of the `copyto!` implementation in
# GPUArrays.jl, avoiding scalar indexing.
function Base.copyto!(dest_in::PencilArray, bc_in::Broadcasted{Nothing})
dest = _actual_parent(dest_in)
bc = _unwrap_pa(bc_in)
copyto!(dest, bc)
dest_in
end
function _unwrap_pa(bc::Broadcasted{Style}) where {Style}
args = map(_unwrap_pa, bc.args)
axs = axes(bc)
if Style === Nothing
Broadcasted{Nothing}(bc.f, args, axs) # used by copyto!
else
Broadcasted(bc.f, args, axs) # used by materialize!
end
end
_unwrap_pa(u::PencilArrayBroadcastable) = _actual_parent(u)
_unwrap_pa(u) = u
BroadcastStyle(::Type{<:PencilArrayBroadcastable{T, N, <:PencilArray}}) where {T, N} =
PencilArrayStyle{N}()
# PencilArrayStyle wins against other array styles
BroadcastStyle(style::PencilArrayStyle, ::AbstractArrayStyle) = style
# This is needed to avoid ambiguities
BroadcastStyle(style::PencilArrayStyle, ::DefaultArrayStyle) = style
function Base.similar(
bc::Broadcasted{<:PencilArrayStyle}, ::Type{T},
) where {T}
br = find_pa(bc) :: PencilArrayBroadcastable
A = br.data
axs_a = permutation(A) * axes(A) # in memory order
axs_b = axes(bc)
axs_a == axs_b ||
throw(DimensionMismatch("arrays cannot be broadcast; got axes $axs_a and $axs_b"))
similar(A, T)
end
# Find PencilArray among broadcast arguments.
find_pa(bc::Broadcasted) = find_pa(bc.args)
find_pa(args::Tuple) = find_pa(find_pa(args[1]), Base.tail(args))
find_pa(x) = x
find_pa(::Any, rest) = find_pa(rest)
find_pa(A::PencilArrayBroadcastable, rest) = A
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1037 | # We make LinearIndices(::PencilArray) return a PermutedLinearIndices, which
# takes index permutation into account.
Base.LinearIndices(A::PencilArray) =
PermutedLinearIndices(LinearIndices(parent(A)), permutation(A))
function Base.LinearIndices(g::GlobalPencilArray)
p = permutation(g)
axs_log = axes(g) # offset axes in logical (unpermuted) order
axs_mem = p * axs_log # offset axes in memory (permuted) order
PermutedLinearIndices(LinearIndices(axs_mem), p)
end
# We make CartesianIndices(::PencilArray) return a PermutedCartesianIndices,
# which loops faster (in memory order) when there are index permutations.
Base.CartesianIndices(A::PencilArray) =
PermutedCartesianIndices(CartesianIndices(parent(A)), permutation(A))
function Base.CartesianIndices(g::GlobalPencilArray)
p = permutation(g)
axs_log = axes(g) # offset axes in logical (unpermuted) order
axs_mem = p * axs_log # offset axes in memory (permuted) order
PermutedCartesianIndices(CartesianIndices(axs_mem), p)
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 3102 | """
gather(x::PencilArray{T, N}, [root::Integer=0]) -> Array{T, N}
Gather data from all MPI processes into one (big) array.
Data is received by the `root` process.
Returns the full array on the `root` process, and `nothing` on the other
processes.
Note that `gather` always returns a base `Array`, even when the
`PencilArray` wraps a different kind of array (e.g. a `CuArray`).
This function can be useful for testing, but it shouldn't be used with
very large datasets!
"""
function gather(x::PencilArray{T,N}, root::Integer=0) where {T, N}
timer = Pencils.timer(pencil(x))
@timeit_debug timer "gather" begin
# TODO reduce allocations! see `transpose_impl!`
comm = get_comm(x)
rank = MPI.Comm_rank(comm)
mpi_tag = 42
pen = pencil(x)
extra_dims = PencilArrays.extra_dims(x)
# Each process sends its data to the root process.
# If the local indices are permuted, the permutation is reverted before
# sending the data.
data = let perm = permutation(pen)
if isidentity(perm)
parent(x)
else
# Apply inverse permutation.
p = append(inv(perm), Val(length(extra_dims)))
permutedims(parent(x), Tuple(p)) # creates copy!
end
end
# The output is a regular CPU array.
DestArray = Array{T}
# For GPU arrays, this transfers data to the CPU (allocating a new Array).
# If `data` is already an Array{T}, this is non-allocating.
data_cpu = convert(DestArray, data)
if rank != root
# Wait for data to be sent, then return.
buf = MPI.Buffer(data_cpu)
GC.@preserve buf begin
send_req = MPI.Isend(buf, comm, MPI.UnsafeRequest(); dest = root, tag = mpi_tag)
MPI.Wait(send_req)
end
return nothing
end
# Receive data (root only).
topo = pen.topology
Nproc = length(topo)
recv = Vector{Array{T,N}}(undef, Nproc)
recv_req = MPI.UnsafeMultiRequest(Nproc)
dest = DestArray(undef, size_global(x)) # output array
root_index = -1
GC.@preserve recv begin
for n = 1:Nproc
# Global data range that I will receive from process n.
rrange = pen.axes_all[n]
rdims = length.(rrange)
src_rank = topo.ranks[n] # actual rank of sending process
if src_rank == root
root_index = n
else
# TODO avoid allocation?
recv[n] = Array{T,N}(undef, rdims..., extra_dims...)
MPI.Irecv!(recv[n], comm, recv_req[n]; source = src_rank, tag = mpi_tag)
end
end
# Unpack data.
# 1. Copy local data.
colons_extra_dims = ntuple(n -> Colon(), Val(length(extra_dims)))
dest[pen.axes_local..., colons_extra_dims...] .= data_cpu
# 2. Copy remote data.
for m = 2:Nproc
n = MPI.Waitany(recv_req)
rrange = pen.axes_all[n]
dest[rrange..., colons_extra_dims...] .= recv[n]
end
end # GC.@preserve recv
end # @timeit_debug
dest
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1156 | """
GlobalPencilArray{T,N} <: AbstractArray{T,N}
Alias for an `OffsetArray` wrapping a [`PencilArray`](@ref).
Unlike `PencilArray`s, `GlobalPencilArray`s take *global* indices, which
in general don't start at 1 for a given MPI process.
The [`global_view`](@ref) function should be used to create a
`GlobalPencilArray` from a `PencilArray`.
"""
const GlobalPencilArray{T,N} = OffsetArray{T,N,A} where {A <: PencilArray}
"""
global_view(x::PencilArray)
Create an [`OffsetArray`](https://github.com/JuliaArrays/OffsetArrays.jl) of a
`PencilArray` that takes global indices in logical order.
"""
function global_view(x::PencilArray)
r = range_local(x, LogicalOrder())
offsets = first.(r) .- 1
xo = OffsetArray(x, offsets)
@assert parent(xo) === x # OffsetArray shouldn't create a copy...
xo :: GlobalPencilArray
end
permutation(x::GlobalPencilArray) = permutation(parent(x))
Base.similar(x::GlobalPencilArray, ::Type{S}) where {S} =
global_view(similar(parent(x), S))
# Account for index permutation in global views.
@inline Base._sub2ind(x::GlobalPencilArray, I...) =
Base._sub2ind(parent(x), (I .- x.offsets)...)
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 4500 | ## Definition of AbstractManyPencilArray
"""
AbstractManyPencilArray{N,M}
Abstract type specifying a container holding `M` different
[`PencilArray`](@ref) views to the same underlying.
All views share the same dimensionality `N`. In principle, their element types
can be different.
A concrete implementation, [`ManyPencilArray`](@ref), is proposed in which all
arrays have the same element type `T`.
"""
abstract type AbstractManyPencilArray{N,M} end
Base.ndims(::AbstractManyPencilArray{N}) where {N} = N
"""
length(A::AbstractManyPencilArray)
Returns the number of [`PencilArray`](@ref)s wrapped by `A`.
"""
Base.length(::AbstractManyPencilArray{N,M}) where {N,M} = M
"""
Tuple(A::AbstractManyPencilArray) -> (u1, u2, …)
Returns the [`PencilArray`](@ref)s wrapped by `A`.
This can be useful for iterating over all the wrapped arrays.
"""
@inline Base.Tuple(A::AbstractManyPencilArray) = A.arrays
"""
first(A::AbstractManyPencilArray)
Returns the first [`PencilArray`](@ref) wrapped by `A`.
"""
Base.first(A::AbstractManyPencilArray) = first(A.arrays)
"""
last(A::AbstractManyPencilArray)
Returns the last [`PencilArray`](@ref) wrapped by `A`.
"""
Base.last(A::AbstractManyPencilArray) = last(A.arrays)
"""
getindex(A::AbstractManyPencilArray, ::Val{i})
getindex(A::AbstractManyPencilArray, i::Integer)
Returns the i-th [`PencilArray`](@ref) wrapped by `A`.
If possible, the `Val{i}` form should be preferred, as it ensures that the full
type of the returned `PencilArray` is known by the compiler.
See also [`first(::AbstractManyPencilArray)`](@ref), [`last(::AbstractManyPencilArray)`](@ref).
# Example
```julia
A = ManyPencilArray(pencil1, pencil2, pencil3)
# Get the PencilArray associated to `pencil2`.
u2 = A[2]
u2 = A[Val(2)]
```
"""
Base.getindex(A::AbstractManyPencilArray, ::Val{i}) where {i} =
_getindex(Val(i), A.arrays...)
@inline Base.getindex(A::AbstractManyPencilArray, i) = A[Val(i)]
@inline function _getindex(::Val{i}, a, t::Vararg) where {i}
i :: Integer
i <= 0 && throw(BoundsError("index must be >= 1"))
i == 1 && return a
_getindex(Val(i - 1), t...)
end
# This will happen if the index `i` intially passed is too large.
@inline _getindex(::Val) = throw(BoundsError("invalid index"))
# ====================================================================== #
## Concrete implementation: ManyPencilArray
"""
ManyPencilArray{T,N,M} <: AbstractManyPencilArray{N,M}
Container holding `M` different [`PencilArray`](@ref) views to the same
underlying data buffer. All views share the same element type `T` and
dimensionality `N`.
This can be used to perform in-place data transpositions with
[`transpose!`](@ref Transpositions.transpose!).
---
ManyPencilArray{T}(undef, pencils...; extra_dims=())
Create a `ManyPencilArray` container that can hold data of type `T` associated
to all the given [`Pencil`](@ref)s.
The optional `extra_dims` argument is the same as for [`PencilArray`](@ref).
"""
struct ManyPencilArray{
T, # element type of each array
N, # number of dimensions of each array (including extra_dims)
M, # number of arrays
Arrays <: Tuple{Vararg{PencilArray,M}},
DataVector <: AbstractVector{T},
} <: AbstractManyPencilArray{N, M}
data :: DataVector
arrays :: Arrays
function ManyPencilArray{T}(
init, pfirst::Pencil{Np}, pens::Vararg{Pencil{Np}};
extra_dims::Dims=(),
) where {Np,T}
pencils = (pfirst, pens...)
BufType = typeof_array(pfirst)
@assert all(p -> typeof_array(p) === BufType, pens)
data_length = max(length.(pencils)...) * prod(extra_dims)
data = BufType{T}(init, data_length)
arrays = _make_arrays(data, extra_dims, pencils...)
N = Np + length(extra_dims)
M = length(pencils)
new{T, N, M, typeof(arrays), typeof(data)}(data, arrays)
end
end
function _make_arrays(data::DenseVector{T}, extra_dims::Dims, p::Pencil,
pens::Vararg{Pencil}) where {T}
dims = (size_local(p, MemoryOrder())..., extra_dims...)
n = prod(dims)
@assert n == length_local(p) * prod(extra_dims)
arr = unsafe_wrap(typeof_array(data), pointer(data), dims) # fixes efficiency issues with vec = view(data, Base.OneTo(n))
A = PencilArray(p, arr)
(A, _make_arrays(data, extra_dims, pens...)...)
end
_make_arrays(::DenseVector, ::Dims) = ()
Base.eltype(::ManyPencilArray{T}) where {T} = T
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 531 | using Random
function Random.rand!(rng::AbstractRNG, u::PencilArray)
rand!(rng, parent(u))
u
end
# This is to workaround scalar indexing issue with GPUArrays (or at least with JLArrays).
# GPUArrays.jl defines rand!(::AbstractRNG, ::AnyGPUArray) but not rand!(::AnyGPUArray),
# which ends up calling a generic rand! implementation in Julia base.
Random.rand!(u::PencilArray) = rand!(Random.default_rng(), u)
function Random.randn!(rng::AbstractRNG, u::PencilArray, args...)
randn!(rng, parent(u), args...)
u
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1509 | # We force specialisation on each function to avoid (tiny) allocations.
#
# Note that, for mapreduce, we can assume that the operation is commutative,
# which allows MPI to freely reorder operations.
#
# We also define mapfoldl (and mapfoldr) for completeness, even though the global
# operations are not strictly performed from left to right (or from right to
# left), since each process locally reduces first.
for (func, commutative) in [:mapreduce => true, :mapfoldl => false, :mapfoldr => false]
@eval function Base.$func(
f::F, op::OP, u::PencilArray, etc::Vararg{PencilArray}; kws...,
) where {F, OP}
foreach(v -> _check_compatible_arrays(u, v), etc)
comm = get_comm(u)
ups = map(parent, (u, etc...))
rlocal = $func(f, op, ups...; kws...)
op_mpi = MPI.Op(op, typeof(rlocal); iscommutative = $commutative)
MPI.Allreduce(rlocal, op_mpi, comm)
end
# Make things work with zip(u::PencilArray, v::PencilArray, ...)
@eval function Base.$func(
f::F, op::OP, z::Iterators.Zip{<:Tuple{Vararg{PencilArray}}}; kws...,
) where {F, OP}
g(args...) = f(args)
$func(g, op, z.is...; kws...)
end
end
function Base.any(f::F, u::PencilArray) where {F <: Function}
xlocal = any(f, parent(u)) :: Bool
MPI.Allreduce(xlocal, |, get_comm(u))
end
function Base.all(f::F, u::PencilArray) where {F <: Function}
xlocal = all(f, parent(u)) :: Bool
MPI.Allreduce(xlocal, &, get_comm(u))
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1875 | """
length_local(x::PencilArray)
Get linear length of the local data held by a `PencilArray`.
"""
length_local(x::Union{<:PencilArray, <:GlobalPencilArray}) = length(parent(x))
"""
length_global(x::PencilArray)
Get linear length of the global data held by a `PencilArray`.
"""
length_global(x::Union{<:PencilArray, <:GlobalPencilArray}) = prod(size_global(x))
"""
size(x::PencilArray)
Return the *local* dimensions of a `PencilArray` in logical order.
Defined as `size_local(x, LogicalOrder())`.
"""
Base.size(x::Union{<:PencilArray, <:GlobalPencilArray}) =
size_local(x, LogicalOrder())
"""
length(x::PencilArray)
Get the *local* number of elements stored in the `PencilArray`.
Equivalent to `length_local(x)`.
"""
Base.length(x::Union{<:PencilArray, <:GlobalPencilArray}) = length(parent(x))
"""
size_local(x::PencilArray, [order = LogicalOrder()])
size_local(x::PencilArrayCollection, [order = LogicalOrder()])
Local dimensions of the data held by the `PencilArray`.
See also [`size_local(::Pencil)`](@ref).
"""
size_local(x::MaybePencilArrayCollection, args...; kwargs...) =
(size_local(pencil(x), args...; kwargs...)..., extra_dims(x)...)
size_local(x::GlobalPencilArray, args...; kwargs...) =
size_local(parent(x), args...; kwargs...)
"""
size_global(x::PencilArray, [order = LogicalOrder()])
size_global(x::PencilArrayCollection, [order = LogicalOrder()])
Global dimensions associated to the given array.
By default, the logical dimensions of the dataset are returned.
If `order = LogicalOrder()`, this is the same as `size(x)`.
See also [`size_global(::Pencil)`](@ref).
"""
size_global(x::MaybePencilArrayCollection, args...; kw...) =
(size_global(pencil(x), args...; kw...)..., extra_dims(x)...)
size_global(x::GlobalPencilArray, args...; kwargs...) =
size_global(parent(x), args...; kwargs...)
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 768 | module LocalGrids
import ..Permutations: permutation
using ..PermutedIndices
using Base.Broadcast
using Base: @propagate_inbounds
using StaticPermutations
export localgrid
"""
AbstractLocalGrid{N, Perm <: AbstractPermutation}
Abstract type specifying the local portion of an `N`-dimensional grid.
"""
abstract type AbstractLocalGrid{N, Perm <: AbstractPermutation} end
Base.ndims(::Type{<:AbstractLocalGrid{N}}) where {N} = N
Base.ndims(g::AbstractLocalGrid) = ndims(typeof(g))
permutation(g::AbstractLocalGrid) = getfield(g, :perm)
"""
LocalGrids.components(g::LocalRectilinearGrid) -> (xs, ys, zs, ...)
Get coordinates associated to the current MPI process.
"""
components(g::AbstractLocalGrid) = getfield(g, :coords)
include("rectilinear.jl")
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 5595 | """
LocalRectilinearGrid{N, Perm} <: AbstractLocalGrid{N, Perm}
Defines the local portion of a rectilinear grid in `N` dimensions.
A rectilinear grid is represented by a set of orthogonal coordinates `(x, y, z, ...)`.
"""
struct LocalRectilinearGrid{
N,
Perm <: AbstractPermutation,
LocalCoords <: Tuple{Vararg{AbstractVector, N}},
} <: AbstractLocalGrid{N, Perm}
coords :: LocalCoords # in logical order
perm :: Perm
end
"""
localgrid((xs, ys, ...), perm = NoPermutation()) -> LocalRectilinearGrid
Create a [`LocalRectilinearGrid`](@ref) from a set of orthogonal coordinates
`(xs, ys, ...)`, where each element is an `AbstractVector`.
Optionally, one can pass a static permutation (as in `Permutation(2, 1, 3)`) to
change the order in which the coordinates are iterated.
"""
function localgrid(
coords::Tuple{Vararg{AbstractVector}},
perm::AbstractPermutation = NoPermutation(),
)
LocalRectilinearGrid(coords, perm)
end
# Axes in logical order
Base.axes(g::LocalRectilinearGrid) = map(xs -> axes(xs, 1), components(g))
# These are needed for `collect`
Base.length(g::LocalRectilinearGrid) = prod(xs -> length(xs), components(g))
@generated function Base.eltype(
::Type{<:LocalRectilinearGrid{N, P, VecTuple}}
) where {N, P, VecTuple}
types = Tuple{map(eltype, VecTuple.parameters)...}
:( $types )
end
# We define this wrapper type to be able to control broadcasting on separate
# grid components (x, y, ...).
struct RectilinearGridComponent{
i, # dimension of this coordinate
T,
FullGrid <: LocalRectilinearGrid, # dataset dimension
Coords <: AbstractVector{T},
} <: AbstractVector{T}
grid :: FullGrid
data :: Coords
@inline function RectilinearGridComponent(
g::LocalRectilinearGrid, ::Val{i},
) where {i}
data = components(g)[i]
new{i, eltype(data), typeof(g), typeof(data)}(g, data)
end
end
function Base.show(io::IO, xs::RectilinearGridComponent{i}) where {i}
print(io, "Component i = $i of ")
summary(io, xs.grid)
print(io, ": ", xs.data)
nothing
end
Base.IndexStyle(::Type{<:RectilinearGridComponent}) = IndexLinear()
Base.parent(xs::RectilinearGridComponent) = xs.data
Base.size(xs::RectilinearGridComponent) = size(parent(xs))
@propagate_inbounds Base.getindex(xs::RectilinearGridComponent, i) = parent(xs)[i]
@inline Base.getindex(g::LocalRectilinearGrid, i::Val) =
RectilinearGridComponent(g, i)
@inline Base.getindex(g::LocalRectilinearGrid, i::Int) = g[Val(i)]
@propagate_inbounds function Base.getindex(
g::LocalRectilinearGrid{N}, inds::Vararg{Integer, N},
) where {N}
@boundscheck checkbounds(CartesianIndices(axes(g)), inds...)
map((xs, i) -> @inbounds(xs[i]), components(g), inds)
end
@propagate_inbounds Base.getindex(g::LocalRectilinearGrid, I::CartesianIndex) =
g[Tuple(I)...]
@inline function Base.CartesianIndices(g::LocalRectilinearGrid)
perm = permutation(g)
axs = perm * axes(g) # axes in memory order
inds = CartesianIndices(axs) # each index inds[i] is in memory order
PermutedCartesianIndices(inds, perm)
end
# This is similar to definition of pairs(::IndexCartesian, ::AbstractArray)
# TODO do the same for IndexLinear?
# (not that obvious, because getindex(g, ::Int) already has a different meaning...)
Base.pairs(::IndexCartesian, g::LocalRectilinearGrid) =
Base.Pairs(g, CartesianIndices(g))
# This is to avoid default definition in base/abstractdict.jl, which uses
# generators and can be much slower.
Base.pairs(g::LocalRectilinearGrid) = pairs(IndexCartesian(), g)
# This is used by eachindex(::LocalRectilinearGrid)
@inline Base.keys(g::LocalRectilinearGrid) = CartesianIndices(g)
@inline function Base.iterate(g::LocalRectilinearGrid)
perm = permutation(g)
coords_mem = perm * components(g) # iterate in memory order
# Create and advance actual iterator
iter = Iterators.product(coords_mem...)
stuff = iterate(iter)
stuff === nothing && return nothing
x⃗_mem, next = stuff
x⃗_log = perm \ x⃗_mem # current coordinate in logical order (x, y, z, ...)
x⃗_log, (iter, next)
end
@inline function Base.iterate(g::LocalRectilinearGrid, state)
perm = permutation(g)
iter = first(state)
stuff = iterate(state...)
stuff === nothing && return nothing
x⃗_mem, next = stuff
x⃗_log = perm \ x⃗_mem # current coordinate in logical order (x, y, z, ...)
x⃗_log, (iter, next)
end
function Broadcast.broadcastable(xs::RectilinearGridComponent{i}) where {i}
g = xs.grid
N = ndims(g)
perm = permutation(g)
data = xs.data
dims = ntuple(j -> j == i ? length(data) : 1, Val(N))
reshape(xs.data, perm * dims)
end
function Base.show(io::IO, g::LocalRectilinearGrid{N}) where {N}
print(io, nameof(typeof(g)), "{$N} with ")
perm = permutation(g)
isidentity(perm) || print(io, perm, " and ")
print(io, "coordinates:")
foreach(enumerate(components(g))) do (n, xs)
print(io, "\n ($n) $xs")
end
nothing
end
function Base.summary(io::IO, g::LocalRectilinearGrid)
N = ndims(g)
print(io, nameof(typeof(g)), "{$N}")
nothing
end
# For convenience when working with up to three dimensions.
@inline function Base.getproperty(g::LocalRectilinearGrid, name::Symbol)
if ndims(g) ≥ 1 && name === :x
g[Val(1)]
elseif ndims(g) ≥ 2 && name === :y
g[Val(2)]
elseif ndims(g) ≥ 3 && name === :z
g[Val(3)]
else
getfield(g, name)
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 2205 | module PencilIO
using ..PencilArrays
import ..PencilArrays: MaybePencilArrayCollection, collection_size, collection
using MPI
using StaticArrays: SVector
using TimerOutputs
"""
ParallelIODriver
Abstract type specifying a parallel I/O driver.
"""
abstract type ParallelIODriver end
"""
open([f::Function], driver::ParallelIODriver, filename, comm::MPI.Comm; keywords...)
Open parallel file using the chosen driver.
## Keyword arguments
Supported keyword arguments include:
- open mode arguments: `read`, `write`, `create`, `append` and `truncate`.
These have the same behaviour and defaults as `Base.open`.
Some of them may be ignored by the chosen driver (see driver-specific docs).
- as in [`MPI.File.open`](https://juliaparallel.github.io/MPI.jl/latest/io/#MPI.File.open),
other arguments are passed via an `MPI.Info` object.
Note that driver-specific options (such as HDF5 property lists) must be passed
to each driver's constructor.
## See also
- [`open(::MPIIODriver)`](@ref) for MPI-IO specific options
- [`open(::PHDF5Driver)`](@ref) for HDF5 specific options
"""
function Base.open(::ParallelIODriver) end
function Base.open(f::Function, driver::ParallelIODriver, args...; kw...)
fid = open(driver, args...; kw...)
try
f(fid)
finally
close(fid)
end
end
# Metadata to be attached to each dataset (as HDF5 attributes or in an external
# metadata file).
function metadata(x::MaybePencilArrayCollection)
pen = pencil(x)
topo = topology(x)
edims = extra_dims(x) # this may be an empty tuple, with no type information
(
permutation = Tuple(permutation(x)),
extra_dims = SVector{length(edims),Int}(edims),
decomposed_dims = decomposition(pen),
process_dims = size(topo),
)
end
function keywords_to_open(; read=nothing, write=nothing, create=nothing,
truncate=nothing, append=nothing, other_kws...)
flags = Base.open_flags(read=read, write=write, create=create,
truncate=truncate, append=append)
flags, other_kws
end
include("mpi_io.jl")
include("hdf5.jl") # actual implementation is in ../../ext/PencilArraysHDF5Ext.jl
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 5049 | # Note: the actual implementation is in the PencilArraysHDF5Ext.jl package extension.
export PHDF5Driver
"""
PHDF5Driver(; fcpl = HDF5.FileCreateProperties(), fapl = HDF5.FileAccessProperties())
Parallel HDF5 driver using the HDF5.jl package.
HDF5 file creation and file access property lists may be specified via the
`fcpl` and `fapl` keyword arguments respectively.
Note that the MPIO file access property list does not need to be set, as this is
done automatically by this driver when the file is opened.
"""
struct PHDF5Driver{
FileCreateProperties, # type not known, since HDF5 hasn't been loaded at this point...
FileAccessProperties,
} <: ParallelIODriver
fcpl :: FileCreateProperties
fapl :: FileAccessProperties
# "Private" constructor, called in package extension (PencilArraysHDF5Ext.jl).
global _PHDF5Driver(a, b) = new{typeof(a), typeof(b)}(a, b)
end
"""
hdf5_has_parallel() -> Bool
Returns `true` if the loaded HDF5 libraries support MPI-IO.
This is exactly the same as `HDF5.has_parallel()`, and is left here for
compatibility with previous versions.
"""
function hdf5_has_parallel end
"""
open([f::Function], driver::PHDF5Driver, filename, comm::MPI.Comm; keywords...)
Open parallel file using the Parallel HDF5 driver.
See [`open(::ParallelIODriver)`](@ref) for common options for all drivers.
Driver-specific options may be passed via the `driver` argument. See
[`PHDF5Driver`](@ref) for details.
"""
function Base.open(::PHDF5Driver) end
"""
setindex!(
g::Union{HDF5.File, HDF5.Group}, x::MaybePencilArrayCollection,
name::AbstractString; chunks = false, collective = true, prop_lists...,
)
Write [`PencilArray`](@ref) or [`PencilArrayCollection`](@ref) to parallel HDF5
file.
For performance reasons, the memory layout of the data is conserved. In other
words, if the dimensions of a `PencilArray` are permuted in memory, then the
data is written in permuted form.
In the case of a `PencilArrayCollection`, each array of the collection is written
as a single component of a higher-dimension dataset.
# Optional arguments
- if `chunks = true`, data is written in chunks, with roughly one chunk
per MPI process. This may (or may not) improve performance in parallel
filesystems.
- if `collective = true`, the dataset is written collectivelly. This is
usually recommended for performance.
- additional property lists may be specified by key-value pairs in
`prop_lists`, following the [HDF5.jl
syntax](https://juliaio.github.io/HDF5.jl/stable/#Passing-parameters).
These property lists take precedence over keyword arguments.
For instance, if the `dxpl_mpio = :collective` option is passed,
then the value of the `collective` argument is ignored.
# Property lists
Property lists are passed to
[`h5d_create`](https://portal.hdfgroup.org/display/HDF5/H5D_CREATE2)
and [`h5d_write`](https://portal.hdfgroup.org/display/HDF5/H5D_WRITE).
The following property types are recognised:
- [link creation properties](https://portal.hdfgroup.org/display/HDF5/Attribute+and+Link+Creation+Properties),
- [dataset creation properties](https://portal.hdfgroup.org/display/HDF5/Dataset+Creation+Properties),
- [dataset access properties](https://portal.hdfgroup.org/display/HDF5/Dataset+Access+Properties),
- [dataset transfer properties](https://portal.hdfgroup.org/display/HDF5/Dataset+Transfer+Properties).
# Example
Open a parallel HDF5 file and write some `PencilArray`s to the file:
```julia
pencil = Pencil(#= ... =#)
u = PencilArray{Float64}(undef, pencil)
v = similar(u)
# [fill the arrays with interesting values...]
comm = get_comm(u)
open(PHDF5Driver(), "filename.h5", comm, write=true) do ff
ff["u", chunks=true] = u
ff["uv"] = (u, v) # this is a two-component PencilArrayCollection (assuming equal dimensions of `u` and `v`)
end
```
"""
function Base.setindex!(::PHDF5Driver) end # this is just for generating the documentation
"""
read!(g::Union{HDF5.File, HDF5.Group}, x::MaybePencilArrayCollection,
name::AbstractString; collective=true, prop_lists...)
Read [`PencilArray`](@ref) or [`PencilArrayCollection`](@ref) from parallel HDF5
file.
See [`setindex!`](@ref) for details on optional arguments.
# Property lists
Property lists are passed to
[`h5d_open`](https://portal.hdfgroup.org/display/HDF5/H5D_OPEN2)
and [`h5d_read`](https://portal.hdfgroup.org/display/HDF5/H5D_READ).
The following property types are recognised:
- [dataset access properties](https://portal.hdfgroup.org/display/HDF5/Dataset+Access+Properties),
- [dataset transfer properties](https://portal.hdfgroup.org/display/HDF5/Dataset+Transfer+Properties).
# Example
Open a parallel HDF5 file and read some `PencilArray`s:
```julia
pencil = Pencil(#= ... =#)
u = PencilArray{Float64}(undef, pencil)
v = similar(u)
comm = get_comm(u)
info = MPI.Info()
open(PHDF5Driver(), "filename.h5", comm, read=true) do ff
read!(ff, u, "u")
read!(ff, (u, v), "uv")
end
```
"""
function Base.read!(::PHDF5Driver) end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 13653 | export MPIIODriver
import JSON3, VersionParsing
# Version of internal MPIIO format.
# If the version is updated, it should match the upcoming PencilArrays version.
const MPIIO_VERSION = v"0.9.4"
const IS_LITTLE_ENDIAN = ENDIAN_BOM == 0x04030201
"""
MPIIODriver(; sequential = false, uniqueopen = false, deleteonclose = false)
MPI-IO driver using the MPI.jl package.
Keyword arguments are passed to
[`MPI.File.open`](https://juliaparallel.github.io/MPI.jl/latest/io/#MPI.File.open).
This driver writes binary data along with a JSON file containing metadata.
When reading data, this JSON file is expected to be present along with the raw
data file.
"""
Base.@kwdef struct MPIIODriver <: ParallelIODriver
sequential :: Bool = false
uniqueopen :: Bool = false
deleteonclose :: Bool = false
end
const MetadataDict = Dict{Symbol,Any}
const DatasetKey = Symbol
const DatasetDict = Dict{DatasetKey,Any}
"""
MPIFile
Wraps a `MPI.FileHandle`, also including file position information and metadata.
File position is updated when reading and writing data, and is independent of
the individual and shared file pointers defined by MPI.
"""
mutable struct MPIFile
file :: MPI.FileHandle
comm :: MPI.Comm
filename :: String
meta :: MetadataDict
position :: Int # file position in bytes
write_mode :: Bool
MPIFile(file, comm, filename, meta; write) =
new(file, comm, filename, meta, 0, write)
end
function MPIFile(comm::MPI.Comm, filename; kws...)
flags, other_kws = keywords_to_open(; kws...)
meta = if flags.write && !flags.append
mpiio_init_metadata()
else
metafile = filename_meta(filename)
if isfile(metafile)
mpiio_load_metadata(metafile)
else
# Metadata file not found.
# Assuming file contains a single dataset.
MetadataDict() # empty dict
end
end
file = MPIFile(
MPI.File.open(comm, filename; kws...),
comm, filename, meta, write=flags.write,
)
if flags.append
# Synchronise position in file.
pos = MPI.File.get_position_shared(parent(file))
seek(file, pos)
end
file
end
mpiio_init_metadata() = MetadataDict(
:driver => (type = "MPIIODriver", version = string(MPIIO_VERSION)),
:datasets => DatasetDict(),
)
function mpiio_load_metadata(filename)
isfile(filename) || error("metadata file not found: $filename")
meta = open(JSON3.read, filename, "r")
# Convert from specific JSON3 type to Dict, so that datasets can be appended.
MetadataDict(
:driver => meta.driver,
:datasets => DatasetDict(meta.datasets),
)
end
function Base.close(ff::MPIFile)
if should_write_metadata(ff)
write_metadata(ff, metadata(ff))
end
close(parent(ff))
end
function write_metadata(ff, meta)
MPI.Comm_rank(ff.comm) == 0 || return
open(filename_meta(ff), "w") do io
JSON3.pretty(io, JSON3.write(meta))
write(io, '\n')
end
nothing
end
filename_meta(fname) = string(fname, ".json")
filename_meta(ff::MPIFile) = filename_meta(get_filename(ff))
metadata(ff::MPIFile) = ff.meta
should_write_metadata(ff::MPIFile) = ff.write_mode
get_filename(ff::MPIFile) = ff.filename
Base.parent(ff::MPIFile) = ff.file
Base.position(ff::MPIFile) = ff.position
Base.skip(ff::MPIFile, offset) = ff.position += offset
Base.seek(ff::MPIFile, pos) = ff.position = pos
mpiio_version(ff::MPIFile) = mpiio_version(metadata(ff))
mpiio_version(meta::MetadataDict) =
VersionParsing.vparse(string(meta[:driver][:version]))
"""
open([f::Function], driver::MPIIODriver, filename, comm::MPI.Comm; keywords...)
Open parallel file using the MPI-IO driver.
See [`open(::ParallelIODriver)`](@ref) for common options for all drivers.
Driver-specific options may be passed via the `driver` argument. See
[`MPIIODriver`](@ref) for details.
## Driver notes
- the `truncate` keyword is ignored.
"""
function Base.open(::MPIIODriver) end
Base.open(D::MPIIODriver, filename::AbstractString, comm::MPI.Comm; keywords...) =
MPIFile(
comm, filename;
sequential=D.sequential, uniqueopen=D.uniqueopen,
deleteonclose=D.deleteonclose, keywords...,
)
"""
setindex!(file::MPIFile, x, name; chunks = false, collective = true, infokws...)
Write [`PencilArray`](@ref) to binary file using MPI-IO.
The input `x` can be a `PencilArray` or a tuple of `PencilArray`s.
# Optional arguments
- if `chunks = true`, data is written in contiguous blocks, with one block per
process.
Otherwise, each process writes to discontiguous sections of disk, using
`MPI.File.set_view!` and custom datatypes.
Note that discontiguous I/O (the default) is more convenient, as it allows to
read back the data using a different number or distribution of MPI processes.
- if `collective = true`, the dataset is written collectivelly. This is
usually recommended for performance.
- when writing discontiguous blocks, additional keyword arguments are passed via
an `MPI.Info` object to `MPI.File.set_view!`. This is ignored if `chunks = true`.
"""
function Base.setindex!(
ff::MPIFile, x::MaybePencilArrayCollection, name;
collective=true, chunks=false, kw...,
)
file = parent(ff)
offset = position(ff)
for u in collection(x)
# TODO write all collection components at once (should be easier in the
# discontiguous case)
if chunks
write_contiguous(file, u; offset=offset, collective=collective, kw...)
else
write_discontiguous(file, u; offset=offset, collective=collective, kw...)
end
offset += sizeof_global(u)
end
add_metadata(ff, x, name, chunks)
skip(ff, sizeof_global(x))
x
end
eltype_collection(x::PencilArray) = eltype(x)
eltype_collection(x::PencilArrayCollection) = eltype(first(x))
function add_metadata(file::MPIFile, x, name, chunks::Bool)
meta = metadata(file)
size_col = collection_size(x)
size_log = size_global(x, LogicalOrder())
size_mem = size_global(x, MemoryOrder())
meta[:datasets][DatasetKey(name)] = (
metadata(x)...,
julia_endian_bom = repr(ENDIAN_BOM), # write it as a string such as 0x04030201
little_endian = IS_LITTLE_ENDIAN,
element_type = eltype_collection(x),
dims_logical = (size_log..., size_col...),
dims_memory = (size_mem..., size_col...),
chunks = chunks,
offset_bytes = position(file),
size_bytes = sizeof_global(x),
)
nothing
end
"""
read!(file::MPIFile, x, name; collective = true, infokws...)
Read binary data from an MPI-IO stream, filling in [`PencilArray`](@ref).
The output `x` can be a `PencilArray` or a tuple of `PencilArray`s.
See [`setindex!`](@ref setindex!(::MPIFile)) for details on keyword arguments.
## Reading files without JSON metadata
It is also possible to read datasets from binary files in the absence of JSON metadata.
This will be typically the case of binary files created by a separate application.
In that case, the `name` argument must *not* be passed.
If the file contains more than one dataset, one can optionally pass an `offset`
keyword argument to manually select the offset of the dataset (in bytes) from the
beginning of the file.
The signature of this metadata-less variant looks like:
read!(file::MPIFile, x; offset = 0, collective = true, infokws...)
Note that, since there is no metadata, this variant blindly assumes that the
dimensions and element type of `x` correspond to those existent in the file.
"""
function Base.read!(
ff::MPIFile, x::MaybePencilArrayCollection, name;
kws...,
)
if isempty(metadata(ff)) # metadata file wasn't found
metafile = filename_meta(ff)
throw(ArgumentError(
"""
metadata file not found: $metafile.
Try calling the `read!(ff, x)` variant (without the third argument)
to attempt reading the first dataset of the file.
"""
))
else
# Get metadata associated to dataset.
meta = get(metadata(ff)[:datasets], DatasetKey(name), nothing)
meta === nothing && error("dataset '$name' not found")
version = mpiio_version(ff)
check_metadata(x, meta, version)
offset = meta.offset_bytes :: Int
chunks = meta.chunks :: Bool
chunks && check_read_chunks(x, meta.process_dims, name)
end
_read_mpiio!(ff, x, offset, chunks; kws...)
end
function Base.read!(ff::MPIFile, x::MaybePencilArrayCollection; offset = 0)
# This variant should be called when metadata file is absent.
# This assumes that the file has a single contiguous dataset with the same
# dimensions and type of `x`.
# It is also assumed that the endianness is the same that of the system.
filename = get_filename(ff)
ndata = sizeof_global(x)
nfile = filesize(filename)
if ndata > nfile
error("attempt to read file without JSON metadata failed: the file size ($nfile) is inferior to the expected dataset size ($ndata). Filename: $filename")
end
chunks = false
_read_mpiio!(ff, x, offset, chunks)
end
function _read_mpiio!(ff, x, offset, chunks; collective = true, kw...)
file = parent(ff)
for u in collection(x)
if chunks
read_contiguous!(file, u; offset=offset, collective=collective, kw...)
else
read_discontiguous!(file, u; offset=offset, collective=collective, kw...)
end
offset += sizeof_global(u)
end
x
end
function check_metadata(x, meta, version)
T = eltype_collection(x)
file_eltype = meta.element_type
if string(T) != file_eltype
error("incompatible type of file and array: $file_eltype ≠ $T")
end
sz = (size_global(x, MemoryOrder())..., collection_size(x)...)
file_dims = Tuple(meta.dims_memory) :: typeof(sz)
if sz !== file_dims
error("incompatible dimensions of dataset in file and array: $file_dims ≠ $sz")
end
file_sizeof = meta.size_bytes
@assert sizeof_global(x) == file_sizeof
file_bom = if version < v"0.9.4"
# julia_endian_bom key didn't exist; assume ENDIAN_BOM
ENDIAN_BOM
else
parse(typeof(ENDIAN_BOM), meta.julia_endian_bom)
end
if file_bom != ENDIAN_BOM
error(
"file was not written with the same native endianness of the current system." *
" Reading a non-native endianness is not yet supported."
)
end
nothing
end
function check_read_chunks(x, pdims_file, name)
pdims = size(topology(x))
if length(pdims) != length(pdims_file) || any(pdims .!= pdims_file)
error("dataset '$name' was written in chunks with a different MPI topology" *
" ($pdims ≠ $pdims_file)")
end
nothing
end
function write_discontiguous(ff::MPI.FileHandle, x::PencilArray;
offset, collective, infokws...)
to = timer(pencil(x))
@timeit_debug to "Write MPI-IO discontiguous" begin
set_view!(ff, x, offset; infokws...)
A = parent(x)
if collective
MPI.File.write_all(ff, A)
else
MPI.File.write(ff, A)
end
end
nothing
end
function read_discontiguous!(ff::MPI.FileHandle, x::PencilArray;
offset, collective, infokws...)
to = timer(pencil(x))
@timeit_debug to "Read MPI-IO discontiguous" begin
set_view!(ff, x, offset; infokws...)
A = parent(x)
if collective
MPI.File.read_all!(ff, A)
else
MPI.File.read!(ff, A)
end
end
end
function set_view!(ff, x::PencilArray, offset; infokws...)
etype = MPI.Datatype(eltype(x))
filetype = create_discontiguous_datatype(x, MemoryOrder()) # TODO cache datatype?
datarep = "native"
MPI.File.set_view!(ff, offset, etype, filetype, datarep; infokws...)
nothing
end
function create_discontiguous_datatype(x::PencilArray, order)
sizes = size_global(x, order)
subsizes = size_local(x, order)
offsets = map(r -> first(r) - 1, range_local(x, order))
oldtype = MPI.Datatype(eltype(x))
dtype = MPI.Types.create_subarray(sizes, subsizes, offsets, oldtype)
MPI.Types.commit!(dtype)
dtype
end
function write_contiguous(ff::MPI.FileHandle, x::PencilArray;
offset, collective, infokws...)
to = timer(pencil(x))
@timeit_debug to "Write MPI-IO contiguous" begin
offset += mpi_io_offset(x)
A = parent(x)
if collective
MPI.File.write_at_all(ff, offset, A)
else
MPI.File.write_at(ff, offset, A)
end
end
nothing
end
function read_contiguous!(ff::MPI.FileHandle, x::PencilArray;
offset, collective, infokws...)
to = timer(pencil(x))
@timeit_debug to "Read MPI-IO contiguous" begin
offset += mpi_io_offset(x)
A = parent(x)
if collective
MPI.File.read_at_all!(ff, offset, A)
else
MPI.File.read_at!(ff, offset, A)
end
end
nothing
end
function mpi_io_offset(x::PencilArray)
topo = topology(pencil(x))
# Linear index of this process in the topology.
# (TODO This should be stored in MPITopology...)
n = LinearIndices(topo)[coords_local(topo)...]
off = 0
for m = 1:(n - 1)
r = range_remote(x, m)
off += length(CartesianIndices(r)) # length of data held by remote process
end
T = eltype(x)
off * sizeof(T)
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 6935 | module MPITopologies
export MPITopology
export get_comm, coords_local
import MPI
"""
MPITopology{N}
Describes an N-dimensional Cartesian MPI decomposition topology.
---
MPITopology(comm::MPI.Comm, pdims::Dims{N})
Create N-dimensional MPI topology information.
The `pdims` tuple specifies the number of MPI processes to put in every
dimension of the topology. The product of its values must be equal to the number
of processes in communicator `comm`.
# Example
Divide 2D topology into 4×2 blocks:
```julia
comm = MPI.COMM_WORLD
@assert MPI.Comm_size(comm) == 8
topology = MPITopology(comm, (4, 2))
```
---
MPITopology(comm::MPI.Comm, Val(N))
Convenient `MPITopology` constructor defining an `N`-dimensional decomposition
of data among all MPI processes in communicator.
The number of divisions along each of the `N` dimensions is automatically
determined by a call to [`MPI.Dims_create`](https://juliaparallel.org/MPI.jl/stable/reference/topology/#MPI.Dims_create).
# Example
Create 2D decomposition grid:
```julia
comm = MPI.COMM_WORLD
topology = MPITopology(comm, Val(2))
```
---
MPITopology{N}(comm_cart::MPI.Comm)
Create topology information from MPI communicator with Cartesian topology
(typically constructed using [`MPI.Cart_create`](https://juliaparallel.org/MPI.jl/stable/reference/topology/#MPI.Cart_create)).
The topology must have dimension `N`.
# Example
Divide 2D topology into 4×2 blocks:
```julia
comm = MPI.COMM_WORLD
@assert MPI.Comm_size(comm) == 8
pdims = (4, 2)
comm_cart = MPI.Cart_create(comm, pdims)
topology = MPITopology{2}(comm_cart)
```
"""
struct MPITopology{N}
# MPI communicator with Cartesian topology.
comm :: MPI.Comm
# Subcommunicators associated to the decomposed directions.
subcomms :: NTuple{N,MPI.Comm}
# Number of MPI processes along the decomposed directions.
dims :: Dims{N}
# Coordinates of the local process in the Cartesian topology.
# Indices are >= 1.
coords_local :: Dims{N}
# Maps Cartesian coordinates to MPI ranks in the `comm` communicator.
ranks :: Array{Int,N}
# Maps Cartesian coordinates to MPI ranks in each of the `subcomms`
# subcommunicators.
subcomm_ranks :: NTuple{N,Vector{Int}}
function MPITopology{N}(comm_cart::MPI.Comm) where {N}
# Get dimensions of MPI topology.
# This will fail if comm_cart doesn't have Cartesian topology!
Ndims = MPI.Cartdim_get(comm_cart)
if Ndims != N
throw(ArgumentError(
"Cartesian communicator must have $N dimensions."))
end
dims, coords_local = let
dims_vec, _, coords_vec = MPI.Cart_get(comm_cart)
coords_vec .+= 1 # switch to one-based indexing
map(X -> ntuple(n -> Int(X[n]), Val(N)), (dims_vec, coords_vec))
end
subcomms = create_subcomms(Val(N), comm_cart)
@assert MPI.Comm_size.(subcomms) === dims
ranks = get_cart_ranks(Val(N), comm_cart)
@assert ranks[coords_local...] == MPI.Comm_rank(comm_cart)
subcomm_ranks = get_cart_ranks_subcomm.(subcomms)
new{N}(comm_cart, subcomms, dims, coords_local, ranks, subcomm_ranks)
end
end
function Base.:(==)(A::MPITopology, B::MPITopology)
MPI.Comm_compare(get_comm(A), get_comm(B)) ∈ (MPI.IDENT, MPI.CONGRUENT)
end
function MPITopology(comm::MPI.Comm, dims::Dims{N}) where {N}
check_topology(comm, dims)
# Create Cartesian communicator.
periodic = map(_ -> false, dims) # this is the default
comm_cart = MPI.Cart_create(comm, dims; periodic, reorder = false)
MPITopology{N}(comm_cart)
end
function MPITopology(comm::MPI.Comm, ::Val{N}) where {N}
pdims = dims_create(comm, Val(N))
MPITopology(comm, pdims)
end
dims_create(comm::MPI.Comm, n) = dims_create(MPI.Comm_size(comm), n)
function dims_create(Nproc::Integer, ::Val{N}) where {N}
pdims_in = ntuple(_ -> zero(Cint), Val(N))
pdims = MPI.Dims_create(Nproc, pdims_in) # call lower-level MPI wrapper
ntuple(d -> Int(pdims[d]), Val(N)) :: Dims{N}
end
# Check that `pdims` argument is compatible with the number of processes in
# communicator. This is done to avoid fatal MPI error in MPI.Cart_create. Error
# message is adapted from MPICH.
function check_topology(comm, pdims)
Nproc = MPI.Comm_size(comm)
Ntopo = prod(pdims)
# Note that MPI_Cart_create allows Nproc > Ntopo, setting some processes as
# MPI_COMM_NULL. We disallow that here.
if Nproc != Ntopo
throw(ArgumentError(
"size of communicator ($Nproc) is different from size of Cartesian topology ($Ntopo)"))
end
nothing
end
function Base.show(io::IO, t::MPITopology)
M = ndims(t)
s = join(size(t), '×')
print(io, "MPI topology: $(M)D decomposition ($s processes)")
nothing
end
"""
ndims(t::MPITopology)
Get dimensionality of Cartesian topology.
"""
Base.ndims(t::MPITopology{N}) where N = N
"""
size(t::MPITopology)
Get dimensions of Cartesian topology.
"""
Base.size(t::MPITopology) = t.dims
"""
length(t::MPITopology)
Get total size of Cartesian topology (i.e. total number of MPI processes).
"""
Base.length(t::MPITopology) = prod(size(t))
"""
coords_local(t::MPITopology)
Get coordinates of local process in MPI topology.
"""
coords_local(t::MPITopology) = t.coords_local
"""
get_comm(t::MPITopology)
Get MPI communicator associated to an MPI Cartesian topology.
"""
get_comm(t::MPITopology) = t.comm
Base.CartesianIndices(t::MPITopology) = CartesianIndices(axes(t))
Base.LinearIndices(t::MPITopology) = LinearIndices(axes(t))
Base.eachindex(t::MPITopology) = LinearIndices(t)
# Get ranks of N-dimensional Cartesian communicator.
function get_cart_ranks(::Val{N}, comm::MPI.Comm) where N
@assert MPI.Cartdim_get(comm) == N # communicator should be N-dimensional
Nproc = MPI.Comm_size(comm)
dims = let
dims_vec, _, _ = MPI.Cart_get(comm)
ntuple(n -> Int(dims_vec[n]), N)
end
ranks = Array{Int,N}(undef, dims)
coords = Vector{Cint}(undef, N)
for I in CartesianIndices(dims)
coords .= Tuple(I) .- 1 # MPI uses zero-based indexing
ranks[I] = MPI.Cart_rank(comm, coords)
end
ranks
end
# Get ranks of one-dimensional Cartesian sub-communicator.
function get_cart_ranks_subcomm(subcomm::MPI.Comm)
@assert MPI.Cartdim_get(subcomm) == 1 # sub-communicator should be 1D
Nproc = MPI.Comm_size(subcomm)
ranks = Vector{Int}(undef, Nproc)
coords = Ref{Cint}()
for n = 1:Nproc
coords[] = n - 1 # MPI uses zero-based indexing
ranks[n] = MPI.Cart_rank(subcomm, coords)
end
ranks
end
function create_subcomms(::Val{N}, comm::MPI.Comm) where N
remain_dims = Vector{Cint}(undef, N)
ntuple(Val(N)) do n
fill!(remain_dims, zero(Cint))
remain_dims[n] = one(Cint)
MPI.Cart_sub(comm, remain_dims)
end
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 19383 | module Pencils
import ..Permutations: permutation
import ..LocalGrids
using StaticPermutations
using MPI
using Reexport
using StaticArrays: SVector
using TimerOutputs
export Pencil, MPITopology
export Permutation, NoPermutation # from StaticPermutations
export MemoryOrder, LogicalOrder
export decomposition, permutation
export get_comm, timer
export topology
export range_local, range_remote, size_local, size_global, to_local,
length_local, length_global
# Describes the portion of an array held by a given MPI process.
const ArrayRegion{N} = NTuple{N,UnitRange{Int}} where N
include("MPITopologies.jl")
@reexport using .MPITopologies
import .MPITopologies: get_comm
include("data_ranges.jl")
include("index_orders.jl")
"""
Pencil{N,M}
Describes the decomposition of an `N`-dimensional array among MPI processes
along `M` directions (with `M < N`).
---
Pencil(
[A = Array],
topology::MPITopology{M}, size_global::Dims{N},
decomp_dims::Dims{M} = default_decomposition(N, Val(M));
permute::AbstractPermutation = NoPermutation(),
timer = TimerOutput(),
)
Define the decomposition of an `N`-dimensional geometry along `M` dimensions.
The dimensions of the geometry are given by `size_global = (N1, N2, ...)`. The
`Pencil` describes the decomposition of an array of dimensions `size_global`
across a group of MPI processes.
Data is distributed over the given `M`-dimensional MPI topology (with `M ≤ N`).
The decomposed dimensions may optionally be provided via the `decomp_dims`
argument. By default, the `M` rightmost dimensions are decomposed. For instance,
for a 2D decomposition of 5D data (`M = 2` and `N = 5`), the dimensions `(4, 5)`
are decomposed by default.
It is also possible to distribute over all dimensions (`M = N`).
Note that, in this specific case, [transpositions](@ref
Global-MPI-operations) are currently not possible.
The optional argument `A` allows to work with arrays other than the base `Array`
type. In particular, this should be useful for working with GPU array types such
as `CuArray`.
The optional `permute` parameter may be used to indicate a permutation of the
data indices from **logical order** (the order in which the
arrays are accessed in code) to **memory order** (the actual order of indices in
memory). Permutations must be specified using the exported `Permutation` type,
as in `permute = Permutation(3, 1, 2)`.
It is also possible to pass a `TimerOutput` to the constructor. See
[Measuring performance](@ref PencilArrays.measuring_performance) for details.
# Examples
Decompose a 3D geometry of global dimensions ``N_x × N_y × N_z = 4×8×12`` along
the second (``y``) and third (``z``) dimensions:
```jldoctest
julia> topo = MPITopology(MPI.COMM_WORLD, Val(2));
julia> Pencil(topo, (4, 8, 12), (2, 3))
Decomposition of 3D data
Data dimensions: (4, 8, 12)
Decomposed dimensions: (2, 3)
Data permutation: NoPermutation()
Array type: Array
julia> Pencil(topo, (4, 8, 12), (2, 3); permute = Permutation(3, 2, 1))
Decomposition of 3D data
Data dimensions: (4, 8, 12)
Decomposed dimensions: (2, 3)
Data permutation: Permutation(3, 2, 1)
Array type: Array
```
In the second case, the actual data is stored in `(z, y, x)` order within
each MPI process.
---
Pencil([A = Array], size_global::Dims{N}, [decomp_dims = (2, …, N)], comm::MPI.Comm; kws...)
Convenience constructor that implicitly creates a [`MPITopology`](@ref).
The number of decomposed dimensions specified by `decomp_dims` must be `M < N`.
If `decomp_dims` is not passed, dimensions `2:N` are decomposed.
Keyword arguments are passed to alternative constructor taking an `MPITopology`.
That constructor should be used if more control is desired.
# Examples
```jldoctest
julia> Pencil((4, 8, 12), MPI.COMM_WORLD)
Decomposition of 3D data
Data dimensions: (4, 8, 12)
Decomposed dimensions: (2, 3)
Data permutation: NoPermutation()
Array type: Array
julia> Pencil((4, 8, 12), (1, ), MPI.COMM_WORLD)
Decomposition of 3D data
Data dimensions: (4, 8, 12)
Decomposed dimensions: (1,)
Data permutation: NoPermutation()
Array type: Array
```
---
Pencil(
[A = Array],
p::Pencil{N,M};
decomp_dims::Dims{M} = decomposition(p),
size_global::Dims{N} = size_global(p),
permute::P = permutation(p),
timer::TimerOutput = timer(p),
)
Create new pencil configuration from an existent one.
This constructor enables sharing temporary data buffers between the two pencil
configurations, leading to reduced global memory usage.
"""
struct Pencil{
N, # spatial dimensions
M, # MPI topology dimensions (< N)
P, # optional index permutation (see Permutation)
BufVector <: AbstractVector{UInt8},
}
# M-dimensional MPI decomposition info (with M < N).
topology :: MPITopology{M}
# Global array dimensions (N1, N2, ...) in logical order.
# These dimensions are *before* permutation by perm.
size_global :: Dims{N}
# Decomposition directions.
# Example: for x-pencils, this is typically (2, 3, ..., N).
# Note that the directions don't need to be sorted.
# The order matters when determining over how many processes a given dimension is
# distributed.
# This is in particular important for determining whether two Pencil's are compatible
# for transposing between them.
decomp_dims :: Dims{M}
# Part of the array held by every process.
# These dimensions are *before* permutation by `perm`.
axes_all :: Array{ArrayRegion{N}, M}
# Part of the array held by the local process (before permutation).
axes_local :: ArrayRegion{N}
# Part of the array held by the local process (after permutation).
axes_local_perm :: ArrayRegion{N}
# Optional axes permutation.
perm :: P
# Data buffers for transpositions.
send_buf :: BufVector
recv_buf :: BufVector
# Timing information.
timer :: TimerOutput
function check_empty_dimension(topology, size_global, decomp_dims)
proc_dims = size(topology)
for (i, nproc) ∈ zip(decomp_dims, proc_dims)
# Check that dimension `i` (which has size `N = size_global[i]`) is
# being decomposed over a number of processes ≤ N.
N = size_global[i]
nproc ≤ N && continue
@warn(
"""
Dimension `i = $i` has global size `Nᵢ = $N` but is being decomposed across `Pᵢ = $nproc`
processes.
Since `Pᵢ > Nᵢ`, some processes will have no data, and therefore will do no work. This can
result in broadcasting errors and other unsupported behaviour!
To fix this, consider choosing a different configuration of processes (e.g. via the
`proc_dims` argument), or use a lower number of processes. See below for the current
values of some of these parameters.
""",
i, size_global, decomp_dims, proc_dims,
)
return
end
nothing
end
# This constructor is left undocumented and should never be called directly.
global function _Pencil(
topology::MPITopology{M}, size_global::Dims{N},
decomp_dims::Dims{M}, axes_all, perm::P,
send_buf::BufVector, recv_buf::BufVector, timer::TimerOutput,
) where {M, N, P, BufVector}
check_permutation(perm)
check_empty_dimension(topology, size_global, decomp_dims)
axes_local = axes_all[coords_local(topology)...]
axes_local_perm = perm * axes_local
check_local_data_dims(axes_local)
new{N, M, P, BufVector}(
topology, size_global, decomp_dims,
axes_all, axes_local, axes_local_perm,
perm, send_buf, recv_buf, timer,
)
end
function Pencil(
topology::MPITopology{M}, size_global::Dims{N},
decomp_dims::Dims{M} = default_decomposition(N, Val(M));
permute::AbstractPermutation = NoPermutation(),
send_buf = UInt8[], recv_buf = UInt8[],
timer = TimerOutput(),
) where {N, M}
_check_selected_dimensions(N, decomp_dims)
axes_all = generate_axes_matrix(decomp_dims, topology.dims, size_global)
_Pencil(
topology, size_global, decomp_dims, axes_all, permute,
send_buf, recv_buf, timer,
)
end
# TODO
# - automatically reorder decomp_dims to make sure that both pencils are compatible for
# transpositions
# - throw error if it's not possible to make both pencils compatible for transpositions?
function Pencil(
p::Pencil{N,M};
decomp_dims::Dims{M} = decomposition(p),
size_global::Dims{N} = size_global(p),
permute = permutation(p),
timer::TimerOutput = timer(p),
etc...,
) where {N, M}
Pencil(
p.topology, size_global, decomp_dims;
permute=permute, timer=timer,
send_buf=p.send_buf, recv_buf=p.recv_buf,
etc...,
)
end
end
function Pencil(dims::Dims, decomp::Dims{M}, comm::MPI.Comm; kws...) where {M}
topo = MPITopology(comm, Val(M))
Pencil(topo, dims, decomp; kws...)
end
Pencil(dims::Dims{N}, comm::MPI.Comm; kws...) where {N} =
Pencil(dims, default_decomposition(N, Val(N - 1)), comm; kws...)
function Pencil(::Type{A}, args...; kws...) where {A <: AbstractArray}
# We initialise the array with a single element to work around problem
# with CuArrays: if its length is zero, then the CuArray doesn't have a
# valid pointer.
send_buf = A{UInt8}(undef, 1)
Pencil(args...; kws..., send_buf, recv_buf = similar(send_buf))
end
# Strips array type:
# Array{Int, 3} -> Array
# Array{Int} -> Array
# Array -> Array
@generated function typeof_array(::Type{A′}) where {A′ <: AbstractArray}
A = A′
while A isa UnionAll
A = A.body
end
T = A.name.wrapper
:($T)
end
typeof_array(A::AbstractArray) = typeof_array(typeof(A))
typeof_array(p::Pencil) = typeof_array(p.send_buf)
"""
similar(p::Pencil, [A = typeof_array(p)], [dims = size_global(p)])
Returns a [`Pencil`](@ref) decomposition with global dimensions `dims` and with
underlying array type `A`.
Typically, `A` should be something like `Array` or `CuArray` (see
[`Pencil`](@ref) for details).
"""
function Base.similar(
p::Pencil{N}, ::Type{A}, dims::Dims{N} = size_global(p),
) where {A <: AbstractArray, N}
_similar(A, typeof_array(p), p, dims)
end
Base.similar(p::Pencil{N}, dims::Dims{N} = size_global(p)) where {N} =
similar(p, typeof_array(p), dims)
# Case A === A′
function _similar(
::Type{A}, ::Type{A}, p::Pencil{N}, dims::Dims{N},
) where {N, A <: AbstractArray}
@assert typeof_array(p) === A
if dims == size_global(p)
p # avoid all copies
else
Pencil(p; size_global = dims)
end
end
# Case A !== A′ (→ change of array type)
function _similar(
::Type{A′}, ::Type{A}, p::Pencil{N}, dims::Dims{N},
) where {N, A <: AbstractArray, A′ <: AbstractArray}
@assert typeof_array(p) === A
# We initialise the array with a single element to work around problem
# with CuArrays: if its length is zero, then the CuArray doesn't have a
# valid pointer.
send_buf = A′{UInt8}(undef, 1)
recv_buf = similar(send_buf)
if dims == size_global(p)
# Avoid recomputing (and allocating a new) `axes_all`, since it doesn't
# change in the new decomposition.
_Pencil(
p.topology, dims, p.decomp_dims, p.axes_all,
p.perm, send_buf, recv_buf, p.timer,
)
else
Pencil(
p.topology, dims, p.decomp_dims;
permute = p.perm, send_buf, recv_buf, timer = p.timer,
)
end
end
function check_local_data_dims(axes_local)
# Show warning if the amount of local data is larger than typemax(Cint).
# This may cause problems with MPI, where lengths and offsets are given as Cint.
ndata = prod(length, axes_local)
if ndata > typemax(Cint)
@warn(
"""
size of local data is too large compared to typemax(Cint).
This may cause problems when calling MPI functions.
If that's the case, try increasing the number of MPI processes.
""",
ndata,
typemax(Cint),
ndata / (typemax(Cint) + 1),
)
end
nothing
end
function check_permutation(perm)
isperm(perm) && return
throw(ArgumentError("invalid permutation of dimensions: $perm"))
end
function default_decomposition(N, ::Val{M}) where {M}
@assert 0 < M ≤ N
ntuple(d -> N - M + d, Val(M))
end
# Verify that `dims` is a subselection of dimensions in 1:N.
function _check_selected_dimensions(N, dims::Dims{M}) where M
if M > N
throw(ArgumentError(
"number of decomposed dimensions `M` cannot be larger than the " *
"total number of dimensions N = $N (got M = $M)"))
end
if !allunique(dims)
throw(ArgumentError("dimensions may not be repeated. Got $dims."))
end
if !all(1 .<= dims .<= N)
throw(ArgumentError("dimensions must be in 1:$N. Got $dims."))
end
nothing
end
Base.summary(io::IO, p::Pencil) = Base.showarg(io, p, true)
function Base.showarg(io::IO, p::Pencil{N,M,P}, toplevel) where {N,M,P}
toplevel || print(io, "::")
A = typeof_array(p)
print(io, nameof(typeof(p)), "{$N, $M, $P, $A}")
end
function Base.show(io::IO, p::Pencil)
perm = permutation(p)
print(io,
"""
Decomposition of $(ndims(p))D data
Data dimensions: $(size_global(p))
Decomposed dimensions: $(decomposition(p))
Data permutation: $(perm)
Array type: $(typeof_array(p))""")
end
"""
timer(p::Pencil)
Get `TimerOutput` attached to a `Pencil`.
See [Measuring performance](@ref PencilArrays.measuring_performance) for details.
"""
timer(p::Pencil) = p.timer
"""
ndims(p::Pencil)
Number of spatial dimensions associated to pencil data.
This corresponds to the total number of dimensions of the space, which includes
the decomposed and non-decomposed dimensions.
"""
Base.ndims(::Pencil{N}) where N = N
"""
get_comm(p::Pencil)
Get MPI communicator associated to an MPI decomposition scheme.
"""
get_comm(p::Pencil) = get_comm(p.topology)
"""
permutation(::Type{<:Pencil}) -> AbstractPermutation
permutation(p::Pencil) -> AbstractPermutation
Get index permutation associated to the given pencil configuration.
Returns `NoPermutation()` if there is no associated permutation.
"""
permutation(p::Pencil) = permutation(typeof(p))
permutation(::Type{<:Pencil{N,M,P}}) where {N,M,P} = _instanceof(P)
@inline _instanceof(::Type{T}) where {T <: AbstractPermutation} = T()
@inline _instanceof(::Type{<:Permutation{p}}) where {p} = Permutation(p)
"""
decomposition(p::Pencil)
Get tuple with decomposed dimensions of the given pencil configuration.
"""
decomposition(p::Pencil) = p.decomp_dims
"""
topology(p::Pencil)
Get [`MPITopology`](@ref) attached to `Pencil`.
"""
topology(p::Pencil) = p.topology
"""
length(p::Pencil)
Get linear length of the *local* data associated to the decomposition.
Equivalent to `length_local(p)`.
"""
Base.length(p::Pencil) = prod(size(p))
"""
length_local(p::Pencil)
Get linear length of the local data associated to the decomposition.
"""
length_local(p::Pencil) = prod(size_local(p))
"""
length_global(p::Pencil)
Get linear length of the global data associated to the decomposition.
"""
length_global(p::Pencil) = prod(size_global(p))
"""
range_local(p::Pencil, [order = LogicalOrder()])
Local data range held by the pencil.
By default the dimensions are not permuted, i.e. they follow the logical order
of dimensions.
"""
range_local(p::Pencil, ::LogicalOrder) = p.axes_local
range_local(p::Pencil, ::MemoryOrder) = p.axes_local_perm
range_local(p) = range_local(p, DefaultOrder())
"""
range_remote(p::Pencil, coords, [order = LogicalOrder()])
range_remote(p::Pencil, n::Integer, [order = LogicalOrder()])
Get data range held by a given MPI process.
In the first variant, `coords` are the coordinates of the MPI process in
the Cartesian topology. They can be specified as a tuple `(i, j, ...)` or as a
`CartesianIndex`.
In the second variant, `n` is the linear index of a given process in the
topology.
"""
range_remote(p::Pencil, n::Integer, ::LogicalOrder) = p.axes_all[n]
range_remote(p::Pencil{N,M}, I::CartesianIndex{M}, ::LogicalOrder) where {N,M} =
p.axes_all[I]
range_remote(p::Pencil{N,M}, I::Dims{M}, ::LogicalOrder) where {N,M} =
range_remote(p, CartesianIndex(I), LogicalOrder())
range_remote(p, I) = range_remote(p, I, LogicalOrder())
range_remote(p, I, ::MemoryOrder) =
permutation(p) * range_remote(p, I, LogicalOrder())
"""
size_local(p::Pencil, [order = LogicalOrder()])
Local dimensions of the data held by the pencil.
By default the dimensions are not permuted, i.e. they follow the logical order
of dimensions.
"""
size_local(p::Pencil, etc...) = map(length, range_local(p, etc...))
"""
size_global(p::Pencil, [order = LogicalOrder()])
Global dimensions of the Cartesian grid associated to the given domain
decomposition.
Like [`size_local`](@ref), by default the returned dimensions are in logical
order.
"""
size_global(p::Pencil, ::LogicalOrder) = p.size_global
size_global(p::Pencil, ::MemoryOrder) = permutation(p) * p.size_global
size_global(p) = size_global(p, DefaultOrder())
"""
size(p::Pencil)
Returns the *local* data dimensions associated to the decomposition, in *logical*
order.
This is defined as `size_local(p, LogicalOrder())`.
"""
Base.size(p::Pencil) = size_local(p, LogicalOrder())
"""
to_local(p::Pencil, global_inds, [order = LogicalOrder()])
Convert non-permuted (logical) global indices to local indices.
If `order = MemoryOrder()`, returned indices will be permuted using the
permutation associated to the pencil configuration `p`.
"""
function to_local(p::Pencil{N}, global_inds::ArrayRegion{N},
order::AbstractIndexOrder = DefaultOrder()) where {N}
ind = map(global_inds, p.axes_local) do rg, rl
@assert step(rg) == 1
δ = 1 - first(rl)
(first(rg) + δ):(last(rg) + δ)
end :: ArrayRegion{N}
order === MemoryOrder() ? (permutation(p) * ind) : ind
end
"""
localgrid(p::Pencil, (x_global, y_global, ...)) -> LocalRectilinearGrid
localgrid(u::PencilArray, (x_global, y_global, ...)) -> LocalRectilinearGrid
Create a [`LocalRectilinearGrid`](@ref LocalGrids.LocalRectilinearGrid) from a
decomposition configuration and from a set of orthogonal global coordinates
`(x_global, y_global, ...)`.
In this case, each `*_global` is an `AbstractVector` describing the coordinates
along one dimension of the global grid.
"""
function LocalGrids.localgrid(p::Pencil, coords_global::Tuple{Vararg{AbstractVector}})
perm = permutation(p)
ranges = range_local(p, LogicalOrder())
coords_local = map(view, coords_global, ranges)
LocalGrids.localgrid(coords_local, perm)
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1399 | # Functions determining local data ranges in the different pencil
# configurations.
function local_data_range(p, P, N)
@assert 1 <= p <= P
a = (N * (p - 1)) ÷ P + 1
b = (N * p) ÷ P
a:b
end
# "Complete" dimensions not specified in `dims` with ones.
# Examples:
# - if N = 5, dims = (2, 3) and vals = (42, 12), this returns (1, 42, 12, 1, 1).
# - if N = 5, dims = (3, 2) and vals = (42, 12), this returns (1, 12, 42, 1, 1).
function complete_dims(::Val{N}, dims::Dims{M}, vals::Dims{M}) where {N, M}
@assert N >= M
vals_all = ntuple(Val(N)) do n
i = findfirst(==(n), dims)
if i === nothing
1 # this dimension is not included in `dims`, so we put a 1
else
vals[i]
end
end
vals_all :: Dims{N}
end
# Get axes (array regions) owned by all processes in a given pencil
# configuration.
function generate_axes_matrix(
decomp_dims::Dims{M}, proc_dims::Dims{M}, size_global::Dims{N},
) where {N, M}
axes = Array{ArrayRegion{N}, M}(undef, proc_dims)
# Number of processes in every direction, including those where
# decomposition is not applied.
procs = complete_dims(Val(N), decomp_dims, proc_dims)
for I in CartesianIndices(proc_dims)
coords = complete_dims(Val(N), decomp_dims, Tuple(I))
axes[I] = local_data_range.(coords, procs, size_global)
end
axes
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 652 | """
AbstractIndexOrder
Abstract type determining the ordering of dimensions of an array with possibly
permuted indices.
Subtypes are [`MemoryOrder`](@ref) and [`LogicalOrder`](@ref).
"""
abstract type AbstractIndexOrder end
"""
MemoryOrder <: AbstractIndexOrder
Singleton type specifying that array dimensions should be given in memory (or
*permuted*) order.
"""
struct MemoryOrder <: AbstractIndexOrder end
"""
LogicalOrder <: AbstractIndexOrder
Singleton type specifying that array dimensions should be given in logical (or
*non-permuted*) order.
"""
struct LogicalOrder <: AbstractIndexOrder end
const DefaultOrder = LogicalOrder
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 3313 | module PermutedIndices
import ..Permutations: permutation
using StaticPermutations
export PermutedLinearIndices, PermutedCartesianIndices
# Custom definitions of LinearIndices and CartesianIndices to take into account
# index permutations.
#
# In particular, when array dimensions are permuted, the default
# CartesianIndices do not iterate in memory order, making them suboptimal.
# We try to workaround that by adding a custom definition of CartesianIndices.
#
# (TODO Better / cleaner way to do this??)
struct PermutedLinearIndices{
N, L <: LinearIndices, Perm,
} <: AbstractArray{Int,N}
data :: L # indices in permuted order
perm :: Perm
function PermutedLinearIndices(
ind::LinearIndices{N}, perm::Perm) where {N, Perm}
L = typeof(ind)
new{N, L, Perm}(ind, perm)
end
end
permutation(L::PermutedLinearIndices) = L.perm
Base.length(L::PermutedLinearIndices) = length(L.data)
Base.size(L::PermutedLinearIndices) = permutation(L) \ size(L.data)
Base.axes(L::PermutedLinearIndices) = permutation(L) \ axes(L.data)
Base.iterate(L::PermutedLinearIndices, args...) = iterate(L.data, args...)
Base.lastindex(L::PermutedLinearIndices) = lastindex(L.data)
@inline function Base.getindex(L::PermutedLinearIndices, i::Integer)
@boundscheck checkbounds(L.data, i)
@inbounds L.data[i]
end
# Input: indices in logical (unpermuted) order
@inline function Base.getindex(
L::PermutedLinearIndices{N}, I::Vararg{Integer,N},
) where {N}
J = permutation(L) * I
@boundscheck checkbounds(L.data, J...)
@inbounds L.data[J...]
end
struct PermutedCartesianIndices{
N, C <: CartesianIndices{N}, Perm,
} <: AbstractArray{CartesianIndex{N}, N}
data :: C # indices in memory (permuted) order
perm :: Perm # permutation (logical -> memory)
function PermutedCartesianIndices(ind::CartesianIndices{N},
perm::Perm) where {N, Perm}
C = typeof(ind)
new{N, C, Perm}(ind, perm)
end
end
permutation(C::PermutedCartesianIndices) = C.perm
Base.size(C::PermutedCartesianIndices) = permutation(C) \ size(C.data)
Base.axes(C::PermutedCartesianIndices) = permutation(C) \ axes(C.data)
@inline function Base.iterate(C::PermutedCartesianIndices, args...)
next = iterate(C.data, args...)
next === nothing && return nothing
I, state = next # `I` has permuted indices
J = permutation(C) \ I # unpermute indices
J, state
end
# Get i-th Cartesian index in memory (permuted) order.
# Returns the Cartesian index in logical (unpermuted) order.
@inline function Base.getindex(C::PermutedCartesianIndices, i::Integer)
@boundscheck checkbounds(C.data, i)
@inbounds I = C.data[i] # convert linear to Cartesian index (relatively slow...)
permutation(C) \ I # unpermute indices
end
# Not sure if this is what makes the most sense, but it's consistent with the
# behaviour of CartesianIndices(::OffsetArray). In any case, this function is
# mostly used for printing (it's used by show(::PermutedCartesianIndices)), and
# almost never for actual computations.
@inline function Base.getindex(
C::PermutedCartesianIndices{N}, I::Vararg{Integer,N},
) where {N}
@boundscheck checkbounds(C, I...)
CartesianIndex(I)
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 22020 | module Transpositions
import LinearAlgebra: transpose!
export transpose! # to avoid needing to import LinearAlgebra in user code
using TimerOutputs
import MPI
using ..PencilArrays
using ..PencilArrays: typeof_ptr, typeof_array
using ..Pencils: ArrayRegion
using StaticPermutations
using Strided: @strided, Strided, StridedView
# Declare transposition approaches.
abstract type AbstractTransposeMethod end
struct PointToPoint <: AbstractTransposeMethod end
struct Alltoallv <: AbstractTransposeMethod end
function Base.show(io::IO, ::T) where {T<:AbstractTransposeMethod}
print(io, nameof(T))
end
"""
Transposition
Holds data for transposition between two pencil configurations.
---
Transposition(dest::PencilArray{T,N}, src::PencilArray{T,N};
method = Transpositions.PointToPoint())
Prepare transposition of arrays from one pencil configuration to the other.
The two pencil configurations must be compatible for transposition:
- they must share the same MPI Cartesian topology,
- they must have the same global data size,
- the decomposed dimensions must be almost the same, with at most one
difference.
For instance, if the input of a 3D dataset is decomposed in `(2, 3)`, then the
output may be decomposed in `(1, 3)` or in `(2, 1)`, but not in `(1, 2)`.
**Note that the order of the decomposed dimensions (as passed to the `Pencil`
constructor) matters.**
If the decomposed dimensions are the same, then no transposition is performed,
and data is just copied if needed.
The `src` and `dest` arrays may be aliased (they can share memory space).
# Performance tuning
The `method` argument allows to choose between transposition implementations.
This can be useful to tune performance of MPI data transfers.
Two values are currently accepted:
- `Transpositions.PointToPoint()` uses non-blocking point-to-point data transfers
(`MPI_Isend` and `MPI_Irecv`).
This may be more performant since data transfers are interleaved with local
data transpositions (index permutation of received data).
This is the default.
- `Transpositions.Alltoallv()` uses collective `MPI_Alltoallv` for global data
transpositions.
"""
struct Transposition{T, N,
PencilIn <: Pencil,
PencilOut <: Pencil,
ArrayIn <: PencilArray{T,N},
ArrayOut <: PencilArray{T,N},
M <: AbstractTransposeMethod,
}
Pi :: PencilIn
Po :: PencilOut
Ai :: ArrayIn
Ao :: ArrayOut
method :: M
dim :: Union{Nothing,Int} # dimension along which transposition is performed
# Note: we can use UnsafeMultiRequest as long as the send and receive buffers stay alive
# during the whole communication time, i.e. after all calls to Waitany / Waitall.
# This is the case because the buffers live in the `Pencil`s (`Pi` and `Pi`), which are
# themselves included in the `Transposition` object.
# So we're fine as long as the `Transposition` stays alive.
# If the `Transposition` is no longer alive, we can't do wait operations anyways, so
# everything is fine!
send_requests :: MPI.UnsafeMultiRequest
recv_requests :: MPI.UnsafeMultiRequest
function Transposition(Ao::PencilArray{T,N}, Ai::PencilArray{T,N};
method = PointToPoint()) where {T,N}
Pi = pencil(Ai)
Po = pencil(Ao)
# Verifications
if extra_dims(Ai) !== extra_dims(Ao)
throw(ArgumentError(
"incompatible number of extra dimensions of PencilArrays: " *
"$(extra_dims(Ai)) != $(extra_dims(Ao))"))
end
assert_compatible(Pi, Po)
# The `decomp_dims` tuples of both pencils must differ by at most one
# value (as just checked by `assert_compatible`). The transposition
# is performed along the dimension R where that difference happens.
dim = findfirst(decomposition(Pi) .!= decomposition(Po))
send_requests = MPI.UnsafeMultiRequest()
recv_requests = MPI.UnsafeMultiRequest()
new{T, N, typeof(Pi), typeof(Po), typeof(Ai), typeof(Ao), typeof(method)}(
Pi, Po, Ai, Ao, method, dim, send_requests, recv_requests,
)
end
end
"""
MPI.Waitall(t::Transposition)
Wait for completion of all unfinished MPI communications related to the
transposition.
"""
function MPI.Waitall(t::Transposition)
isempty(t.send_requests) || MPI.Waitall(t.send_requests)
nothing
end
function MPI.Waitall!(t::Transposition)
@warn """
MPI.Waitall!(t::Transposition) is deprecated and will be soon removed.
Use MPI.Waitall(t) instead (without the `!`).
"""
MPI.Waitall(t)
end
"""
transpose!(t::Transposition; waitall=true)
transpose!(dest::PencilArray{T,N}, src::PencilArray{T,N};
method = Transpositions.PointToPoint())
Transpose data from one pencil configuration to the other.
The first variant allows to optionally delay the wait for MPI send operations to
complete.
This is useful if the caller wants to perform other operations with the already received data.
To do this, the caller should pass `waitall = false`, and manually invoke
[`MPI.Waitall`](@ref) on the `Transposition` object once the operations are
done.
Note that this option only has an effect when the transposition method is
`PointToPoint`.
See [`Transposition`](@ref) for details.
"""
function transpose! end
function transpose!(
dest::PencilArray, src::PencilArray;
method::AbstractTransposeMethod = PointToPoint(),
)
dest === src && return dest # same pencil & same data
t = Transposition(dest, src, method=method)
transpose!(t, waitall=true)
dest
end
function transpose!(t::Transposition; waitall=true)
timer = Pencils.timer(t.Pi)
@timeit_debug timer "transpose!" begin
transpose_impl!(t.dim, t)
if waitall
@timeit_debug timer "wait send" MPI.Waitall(t)
end
end
t
end
function assert_compatible(p::Pencil, q::Pencil)
if p.topology !== q.topology
throw(ArgumentError("pencil topologies must be the same."))
end
if p.size_global !== q.size_global
throw(ArgumentError(
"global data sizes must be the same between different pencil " *
" configurations. Got $(p.size_global) ≠ $(q.size_global)."))
end
# Check that decomp_dims differ on at most one value.
dp, dq = map(decomposition, (p, q))
if sum(dp .!= dq) > 1
throw(ArgumentError(
"pencil decompositions must differ in at most one dimension. " *
"Got decomposed dimensions $dp and $dq."))
end
nothing
end
# Reinterpret UInt8 vector as a different type of array.
# The input array should have enough space for the reinterpreted array with the
# given dimensions.
# This is a workaround to the performance issues when using `reinterpret`.
# See for instance:
# - https://discourse.julialang.org/t/big-overhead-with-the-new-lazy-reshape-reinterpret/7635
# - https://github.com/JuliaLang/julia/issues/28980
function unsafe_as_array(::Type{T}, x::AbstractVector{UInt8}, dims) where {T}
p = typeof_ptr(x){T}(pointer(x))
unsafe_wrap(typeof_array(x), p, dims, own=false)
end
# Only local transposition.
function transpose_impl!(::Nothing, t::Transposition)
Pi = t.Pi
Po = t.Po
Ai = t.Ai
Ao = t.Ao
timer = Pencils.timer(Pi)
# Both pencil configurations are identical, so we just copy the data,
# permuting dimensions if needed.
@assert size_local(Ai) === size_local(Ao)
ui = parent(Ai)
uo = parent(Ao)
if permutation(Pi) == permutation(Po)
@timeit_debug timer "copy!" copy!(uo, ui)
else
@timeit_debug timer "permute_local!" permute_local!(Ao, Ai)
end
t
end
function permute_local!(Ao::PencilArray{T,N},
Ai::PencilArray{T,N}) where {T, N}
Pi = pencil(Ai)
Po = pencil(Ao)
perm = let
perm_base = permutation(Po) / permutation(Pi) # relative permutation
p = append(perm_base, Val(ndims_extra(Ai)))
Tuple(p)
end
ui = parent(Ai)
uo = parent(Ao)
inplace = Base.mightalias(ui, uo)
if inplace
# TODO optimise in-place version?
# For now we permute into a temporary buffer, and then we copy to `Ao`.
# We reuse `recv_buf` used for MPI transposes.
buf = let x = Pi.recv_buf
n = length(uo)
dims = size(uo)
resize!(x, sizeof(T) * n)
vec = unsafe_as_array(T, x, n)
reshape(vec, dims)
end
@strided permutedims!(buf, ui, perm)
copy!(uo, buf)
else
# Permute directly onto the output.
@strided permutedims!(uo, ui, perm)
end
Ao
end
function mpi_buffer(buf::AbstractArray, off, length)
inds = (off + 1):(off + length)
v = view(buf, inds)
MPI.Buffer(v)
end
# Transposition among MPI processes in a subcommunicator.
# R: index of MPI subgroup (dimension of MPI Cartesian topology) along which the
# transposition is performed.
function transpose_impl!(R::Int, t::Transposition{T}) where {T}
@assert t.dim === R
Pi = t.Pi
Po = t.Po
Ai = t.Ai
Ao = t.Ao
method = t.method
timer = Pencils.timer(Pi)
@assert Pi.topology === Po.topology
@assert extra_dims(Ai) === extra_dims(Ao)
topology = Pi.topology
comm = topology.subcomms[R] # exchange among the subgroup R
Nproc = topology.dims[R]
subcomm_ranks = topology.subcomm_ranks[R]
myrank = subcomm_ranks[topology.coords_local[R]] # rank in subgroup
remote_inds = get_remote_indices(R, topology.coords_local, Nproc)
# Length of data that I will "send" to myself.
length_self = let
range_intersect = map(intersect, Pi.axes_local, Po.axes_local)
prod(map(length, range_intersect)) * prod(extra_dims(Ai))
end
# Total data to be sent / received.
length_send = length(Ai) - length_self
length_recv_total = length(Ao) # includes local exchange with myself
# Minimum resize is one in order to have a valid pointer
# in the case of a CuArray for instance
resize!(Po.send_buf, sizeof(T) * max(1, length_send))
send_buf = unsafe_as_array(T, Po.send_buf, length_send)
resize!(Po.recv_buf, sizeof(T) * max(1, length_recv_total))
recv_buf = unsafe_as_array(T, Po.recv_buf, length_recv_total)
recv_offsets = Vector{Int}(undef, Nproc) # all offsets in recv_buf
req_length = method === Alltoallv() ? 0 : Nproc
(; send_requests, recv_requests,) = t
resize!(send_requests, req_length)
resize!(recv_requests, req_length)
# 1. Pack and send data.
@timeit_debug timer "pack data" index_local_req = transpose_send!(
(send_buf, recv_buf),
recv_offsets,
(send_requests, recv_requests),
length_self, remote_inds,
(comm, subcomm_ranks, myrank),
Ao, Ai, method, timer,
)
# 2. Unpack data and perform local transposition.
@timeit_debug timer "unpack data" transpose_recv!(
recv_buf, recv_offsets, recv_requests,
remote_inds, index_local_req,
Ao, Ai, method, timer,
)
t
end
function transpose_send!(
(send_buf, recv_buf),
recv_offsets, requests,
length_self, remote_inds,
(comm, subcomm_ranks, myrank),
Ao::PencilArray{T}, Ai::PencilArray{T},
method::AbstractTransposeMethod,
timer::TimerOutput,
) where {T}
Pi = pencil(Ai) # input (sent data)
Po = pencil(Ao) # output (received data)
idims_local = Pi.axes_local
odims_local = Po.axes_local
idims = Pi.axes_all
odims = Po.axes_all
exdims = extra_dims(Ai)
prod_extra_dims = prod(exdims)
isend = 0 # current index in send_buf
irecv = 0 # current index in recv_buf
index_local_req = -1 # request index associated to local exchange
# Data received from other processes.
length_recv = length(Ao) - length_self
Nproc = length(subcomm_ranks)
@assert Nproc == MPI.Comm_size(comm)
@assert myrank == MPI.Comm_rank(comm)
buf_info = make_buffer_info(method, (send_buf, recv_buf), Nproc)
for (n, ind) in enumerate(remote_inds)
# Global data range that I need to send to process n.
srange = map(intersect, idims_local, odims[ind])
length_send_n = prod(map(length, srange)) * prod_extra_dims
local_send_range = to_local(Pi, srange, MemoryOrder())
# Determine amount of data to be received.
rrange = map(intersect, odims_local, idims[ind])
length_recv_n = prod(map(length, rrange)) * prod_extra_dims
recv_offsets[n] = irecv
rank = subcomm_ranks[n] # actual rank of the other process
if rank == myrank
# Copy directly from `Ai` to `recv_buf`.
# For convenience, data is put at the end of `recv_buf`.
# This makes it easier to implement an alternative based on MPI_Alltoallv.
@assert length_recv_n == length_self
recv_offsets[n] = length_recv
@timeit_debug timer "copy_range!" copy_range!(
recv_buf, length_recv, Ai, local_send_range,
)
transpose_send_self!(method, n, requests, buf_info)
index_local_req = n
else
# Copy data into contiguous buffer, then send the buffer.
@timeit_debug timer "copy_range!" copy_range!(
send_buf, isend, Ai, local_send_range,
)
transpose_send_other!(
method, buf_info, (length_send_n, length_recv_n), n,
requests, (rank, comm), eltype(Ai),
)
irecv += length_recv_n
isend += length_send_n
end
end
if method === Alltoallv()
# This @view is needed because the Alltoallv wrapper checks that the
# length of the buffer is consistent with recv_counts.
recv_buf_view = @view recv_buf[1:length_recv]
@timeit_debug timer "MPI.Alltoallv!" MPI.Alltoallv!(
MPI.VBuffer(send_buf, buf_info.send_counts),
MPI.VBuffer(recv_buf_view, buf_info.recv_counts),
comm,
)
end
index_local_req
end
function make_buffer_info(::PointToPoint, (send_buf, recv_buf), Nproc)
(
send_buf = send_buf,
recv_buf = recv_buf,
send_offset = Ref(0),
recv_offset = Ref(0),
)
end
function make_buffer_info(::Alltoallv, bufs, Nproc)
counts = Vector{Cint}(undef, Nproc)
(
send_counts = counts,
recv_counts = similar(counts),
)
end
function transpose_send_self!(::PointToPoint, n, (send_requests, recv_requests), etc...)
# Do nothing. The request send_requests[n] and recv_requests[n] should already be null.
@assert send_requests.vals[n] == MPI.REQUEST_NULL.val
@assert recv_requests.vals[n] == MPI.REQUEST_NULL.val
nothing
end
function transpose_send_self!(::Alltoallv, n, reqs, buf_info)
# Don't send data to myself via Alltoallv.
buf_info.send_counts[n] = buf_info.recv_counts[n] = zero(Cint)
nothing
end
function transpose_send_other!(
::PointToPoint, info, (length_send_n, length_recv_n),
n, (send_requests, recv_requests), (rank, comm), ::Type{T},
) where {T}
# Exchange data with the other process (non-blocking operations).
# Note: data is sent and received with the permutation associated to Pi.
tag = 42
data_send = mpi_buffer(info.send_buf, info.send_offset[], length_send_n)
data_recv = mpi_buffer(info.recv_buf, info.recv_offset[], length_recv_n)
MPI.Isend(data_send, comm, send_requests[n]; dest = rank, tag)
MPI.Irecv!(data_recv, comm, recv_requests[n]; source = rank, tag)
info.send_offset[] += length_send_n
info.recv_offset[] += length_recv_n
nothing
end
function transpose_send_other!(
::Alltoallv, buf_info, (length_send_n, length_recv_n), n, args...
)
buf_info.send_counts[n] = length_send_n
buf_info.recv_counts[n] = length_recv_n
nothing
end
function transpose_recv!(
recv_buf, recv_offsets, recv_requests,
remote_inds, index_local_req,
Ao::PencilArray, Ai::PencilArray,
method::AbstractTransposeMethod,
timer::TimerOutput,
)
Pi = pencil(Ai) # input (sent data)
Po = pencil(Ao) # output (received data)
odims_local = Po.axes_local
idims = Pi.axes_all
exdims = extra_dims(Ao)
prod_extra_dims = prod(exdims)
# Relative index permutation to go from Pi ordering to Po ordering.
perm = permutation(Po) / permutation(Pi)
Nproc = length(remote_inds)
for m = 1:Nproc
if method === Alltoallv()
n = m
elseif m == 1
n = index_local_req # copy local data first
else
@timeit_debug timer "wait receive" n = MPI.Waitany(recv_requests)
end
# Non-permuted global indices of received data.
ind = remote_inds[n]
g_range = map(intersect, odims_local, idims[ind])
# length_recv_n = prod(map(length, g_range)) * prod_extra_dims
off = recv_offsets[n]
# Local output data range in the **input** permutation.
o_range_iperm = permutation(Pi) * to_local(Po, g_range, LogicalOrder())
# Copy data to `Ao`, permuting dimensions if required.
@timeit_debug timer "copy_permuted!" copy_permuted!(
Ao, o_range_iperm, recv_buf, off, perm,
)
end
Ao
end
# Cartesian indices of the remote MPI processes included in the subgroup of
# index `R`.
# Example: if coords_local = (2, 3, 5) and R = 1, then this function returns the
# indices corresponding to (:, 3, 5).
function get_remote_indices(R::Int, coords_local::Dims{M}, Nproc::Int) where M
t = ntuple(Val(M)) do i
if i == R
1:Nproc
else
c = coords_local[i]
c:c
end
end
CartesianIndices(t)
end
# Specialisation for CPU arrays.
function copy_range!(
dest::Vector, dest_offset::Integer,
src::PencilArray, src_range_memorder::NTuple,
)
exdims = extra_dims(src)
n = dest_offset
src_p = parent(src) # array with non-permuted indices (memory order)
for K in CartesianIndices(exdims)
for I in CartesianIndices(src_range_memorder)
@inbounds dest[n += 1] = src_p[I, K]
end
end
dest
end
# Generic case avoiding scalar indexing, should work for GPU arrays.
function copy_range!(
dest::AbstractVector, dest_offset::Integer,
src::PencilArray, src_range_memorder::NTuple,
)
exdims = extra_dims(src)
n = dest_offset
src_p = parent(src) # array with non-permuted indices (memory order)
Ks = CartesianIndices(exdims)
Is = CartesianIndices(src_range_memorder)
len = length(Is) * length(Ks)
src_view = @view src_p[Is, Ks]
dst_view = @view dest[(n + 1):(n + len)]
# TODO this allocates on GPUArrays... can it be improved?
copyto!(dst_view, src_view)
dest
end
function copy_permuted!(
dst::PencilArray, o_range_iperm::NTuple,
src::AbstractVector, src_offset::Integer,
perm::AbstractPermutation,
)
N = ndims(dst)
P = length(o_range_iperm)
exdims = extra_dims(dst)
E = length(exdims)
@assert P + E == N
src_dims = (map(length, o_range_iperm)..., exdims...)
src_view = _viewreshape(src, src_dims, src_offset)
dst_inds = perm * o_range_iperm # destination indices in memory order
_permutedims!(dst, src_view, dst_inds, perm)
dst
end
# Case of CPU arrays.
# Note that Strided uses scalar indexing at some point, and for that reason it
# doesn't work with GPU arrays.
function _viewreshape(src::Vector, src_dims, n)
N = prod(src_dims)
v = Strided.sview(src, (n + 1):(n + N))
Strided.sreshape(v, src_dims)
end
# Generic case, used in particular for GPU arrays.
function _viewreshape(src::AbstractVector, src_dims, n)
@boundscheck begin
N = prod(src_dims)
checkbounds(src, (n + 1):(n + N))
end
# On GPUs, we use unsafe_wrap to make sure that the returned array is an
# AbstractGPUArray, for which `permutedims!` is implemented in GPUArrays.jl.
unsafe_wrap(typeof_array(src), pointer(src, n + 1), src_dims)
end
function _permutedims!(dst::PencilArray, src, dst_inds, perm)
exdims = extra_dims(dst)
v = view(parent(dst), dst_inds..., map(Base.OneTo, exdims)...)
_permutedims!(typeof_array(pencil(dst)), v, src, perm)
end
# Specialisation for CPU arrays.
# Note that v_in is the raw array (in memory order) wrapped by a PencilArray.
function _permutedims!(::Type{Array}, v_in::SubArray, src, perm)
v = StridedView(v_in)
vperm = if isidentity(perm)
v
else
E = ndims(v) - length(perm) # number of "extra dims"
pperm = append(perm, Val(E))
# This is the equivalent of a PermutedDimsArray in Strided.jl.
# Note that this is a lazy object (a StridedView)!
permutedims(v, Tuple(inv(pperm))) :: StridedView
end
copyto!(vperm, src)
end
# General case, used in particular for GPU arrays.
function _permutedims!(::Type{<:AbstractArray}, v::SubArray, src, perm)
if isidentity(perm)
copyto!(v, src)
else
E = ndims(v) - length(perm) # number of "extra dims"
pperm = append(perm, Val(E))
# On GPUs, if `src` is an AbstractGPUArray, then there is a `permutedims!`
# implementation for GPUs (in GPUArrays.jl) if the destination is also
# an AbstractGPUArray.
# Note that AbstractGPUArray <: DenseArray, and `v` is generally not
# dense, so we need an intermediate array for the destination.
tmp = similar(src, pperm * size(src)) # TODO avoid allocation!
permutedims!(tmp, src, Tuple(pperm))
copyto!(v, tmp)
end
v
end
end # module Transpositions
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1264 | using Adapt
using MPI
using PencilArrays
using JLArrays
using Test
MPI.Init()
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
rank == 0 || redirect_stdout(devnull)
pen = Pencil((12, 43), comm)
u = PencilArray(pen, rand(Float64, size_local(pen)...))
@testset "Adapt" begin
@testset "Float64 -> Float32" begin
@assert u isa PencilArray{Float64, 2}
@assert parent(u) isa Array{Float64, 2}
v = @inferred adapt(Array{Float32}, u)
@test v isa PencilArray{Float32, 2} # wrapper type is preserved
@test parent(v) isa Array{Float32, 2}
@test u ≈ v
end
# Try changing array type (Array -> JLArray)
@testset "Array -> JLArray" begin
@assert Pencils.typeof_array(u) === Array
v = @inferred adapt(JLArray, u)
@test v isa PencilArray{Float64, 2}
@test Pencils.typeof_array(v) === JLArray
@test parent(v) isa JLArray{Float64, 2}
@test JLArray(parent(u)) == parent(v)
# Similar but changing element type
v = @inferred adapt(JLArray{Float32}, u)
@test v isa PencilArray{Float32, 2}
@test Pencils.typeof_array(v) === JLArray
@test parent(v) isa JLArray{Float32, 2}
@test JLArray(parent(u)) ≈ parent(v)
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 3711 | #!/usr/bin/env julia
using MPI
using PencilArrays
using Random
using Test
import StaticArrayInterface:
StaticArrayInterface,
StaticInt,
StaticBool,
contiguous_axis,
contiguous_axis_indicator,
contiguous_batch_size,
dense_dims,
stride_rank
struct DummyArray{T,N} <: AbstractArray{T,N}
dims :: Dims{N}
DummyArray{T}(::UndefInitializer, dims::Dims) where {T} = new{T,length(dims)}(dims)
end
DummyArray{T}(init, dims...) where {T} = DummyArray{T}(init, dims)
Base.size(x::DummyArray) = x.dims
Base.similar(x::DummyArray{T}) where {T} = DummyArray{T}(undef, size(x))
Base.getindex(::DummyArray{T}, ind...) where {T} = zero(T)
Base.strides(x::DummyArray) = Base.size_to_strides(1, size(x)...)
function non_dense_array(::Type{T}, dims) where {T}
# Only the first dimension is dense: (True, False, False, ...).
N = length(dims)
dims_parent = ntuple(d -> (d - 1) + dims[d], Val(N))
up = view(Array{T}(undef, dims_parent), Base.OneTo.(dims)...)
@assert dense_dims(up) === ntuple(d -> StaticBool(d == 1), Val(ndims(up)))
up
end
function non_contiguous_array(::Type{T}, dims) where {T}
N = length(dims)
dims_parent = (2, dims...) # we take the slice [1, :, :, ...]
up = view(Array{T}(undef, dims_parent), 1, ntuple(d -> Colon(), Val(N))...)
@assert contiguous_axis(up) === StaticInt(-1)
@assert size(up) == dims
@assert StaticArrayInterface.static_size(up) == dims
up
end
function test_array_interface(pen_in::Pencil)
pa = Pencil(Array, pen_in)
pd = Pencil(DummyArray, pen_in)
# Test different kinds of parent arrays
dims_mem = size_local(pen_in, MemoryOrder())
up_regular = Array{Float64}(undef, dims_mem)
up_noncontig = non_contiguous_array(Float64, dims_mem)
up_nondense = non_dense_array(Float64, dims_mem)
up_dummy = DummyArray{Float64}(undef, dims_mem)
parents = (
up_regular => pa,
up_noncontig => pa,
up_nondense => pa,
up_dummy => pd,
)
@testset "Parent $(typeof(up))" for (up, p) in parents
u = PencilArray(p, up)
@test StaticArrayInterface.parent_type(u) === typeof(up)
@test StaticArrayInterface.known_length(u) === nothing
@test !StaticArrayInterface.can_change_size(u)
@test StaticArrayInterface.ismutable(u)
@test StaticArrayInterface.can_setindex(u)
@test StaticArrayInterface.aos_to_soa(u) === u
@test StaticArrayInterface.fast_scalar_indexing(u)
@test !StaticArrayInterface.isstructured(u)
# Compare outputs with equivalent PermutedDimsArray
iperm = inv(permutation(u))
vp = PermutedDimsArray(up, iperm)
functions = (
contiguous_axis, contiguous_axis_indicator,
contiguous_batch_size, stride_rank, dense_dims,
StaticArrayInterface.static_size,
StaticArrayInterface.static_strides, StaticArrayInterface.offsets,
)
for f in functions
@inferred f(u)
@test f(u) === f(vp)
end
end
nothing
end
MPI.Init()
Nxyz = (11, 21, 32)
comm = MPI.COMM_WORLD
Nproc = MPI.Comm_size(comm)
myrank = MPI.Comm_rank(comm)
let dev_null = @static Sys.iswindows() ? "nul" : "/dev/null"
MPI.Comm_rank(comm) == 0 || redirect_stdout(open(dev_null, "w"))
end
rng = MersenneTwister(42 + myrank)
topo = MPITopology(comm, Val(2))
pen1 = Pencil(topo, Nxyz, (2, 3))
pen2 = Pencil(pen1, decomp_dims=(1, 3), permute=Permutation(2, 1, 3))
pen3 = Pencil(pen2, decomp_dims=(1, 2), permute=Permutation(3, 2, 1))
pens = (pen1, pen2, pen3)
@testset "StaticArrayInterface -- Pencil$(decomposition(p))" for p in pens
test_array_interface(p)
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 6690 | # Test PencilArrays wrapping arrays of type different from the base Array.
# We use a custom TestArray type as well as the JLArray (<: AbstractGPUArray)
# type defined in the GPUArrays.jl tests.
using MPI
using PencilArrays
using PencilArrays: typeof_array
using Random
using Test
## ================================================================================ ##
using JLArrays: JLArray, DenseJLVector, JLVector, DataRef
# A bit of type piracy to help tests pass (the following functions seem to be defined for
# CuArray).
# This is a modified version of the resize! function defined in JLArrays.jl 0.1.5, which
# avoids freeing memory that will be used in the future.
function Base.resize!(a::DenseJLVector{T}, nl::Integer) where {T}
a_resized = JLVector{T}(undef, nl)
copyto!(a_resized, 1, a, 1, min(length(a), nl))
finalize(a) # free previous memory
a.data = copy(a_resized.data) # this simply increments the reference count by 1
a.offset = 0
a.dims = size(a_resized)
return a
end
function Base.unsafe_wrap(::Type{<:JLArray}, p::Ptr, dims::Dims; kws...)
T = eltype(p)
N = length(dims)
p_obj = convert(Ptr{UInt8}, p)
dims_obj = (sizeof(T) * prod(dims),)
obj = unsafe_wrap(Array, p_obj, dims_obj; kws...)
ref = DataRef(obj)
x = JLArray{T,N}(ref, dims)
@assert pointer(x) === p
x
end
Base.unsafe_wrap(::Type{T}, p::Ptr, n::Integer; kws...) where {T <: JLArray} =
unsafe_wrap(T, p, (n,); kws...)
# Random.rand!(rng::AbstractRNG, u::JLArray, ::Type{X}) where {X} = (rand!(rng, u.data, X); u)
# For some reason this kind of view doesn't work correctly in the original implementation,
# returning a copy.
function Base.view(u::DenseJLVector, I::AbstractUnitRange)
a, b = first(I), last(I)
inds = a:1:b # this kind of range works correctly
view(u, inds)
end
# Note that MPI.Buffer is also defined for CuArray.
function MPI.Buffer(u::JLArray)
obj = u.data.rc.obj :: Vector{UInt8}
count = length(u)
datatype = MPI.Datatype(eltype(u))
MPI.Buffer(obj, count, datatype)
end
## ================================================================================ ##
# Define simple array wrapper type for tests.
struct TestArray{T, N} <: DenseArray{T, N}
data :: Array{T, N}
end
TestArray{T}(args...) where {T} = TestArray(Array{T}(args...))
TestArray{T,N}(args...) where {T,N} = TestArray(Array{T,N}(args...))
Base.parent(u::TestArray) = u # this is just for the tests...
Base.size(u::TestArray) = size(u.data)
Base.similar(u::TestArray, ::Type{T}, dims::Dims) where {T} =
TestArray(similar(u.data, T, dims))
Base.getindex(u::TestArray, args...) = getindex(u.data, args...)
Base.setindex!(u::TestArray, args...) = setindex!(u.data, args...)
Base.resize!(u::TestArray, args...) = (resize!(u.data, args...); u)
Base.pointer(u::TestArray) = pointer(u.data)
Base.pointer(u::TestArray, n::Integer) = pointer(u.data, n) # needed to avoid ambiguity
Base.unsafe_wrap(::Type{<:TestArray}, p::Ptr, dims::Union{Integer, Dims}; kws...) =
TestArray(unsafe_wrap(Array, p, dims; kws...))
if isdefined(Base, :elsize)
# This seems to be needed on Julia nightly (1.12.0-DEV)
Base.elsize(::Type{<:TestArray{T, N}}) where {T, N} = Base.elsize(Array{T, N})
end
MPI.Buffer(u::TestArray) = MPI.Buffer(u.data) # for `gather`
Base.cconvert(::Type{MPI.MPIPtr}, u::TestArray{T}) where {T} =
reinterpret(MPI.MPIPtr, pointer(u))
MPI.Init()
comm = MPI.COMM_WORLD
MPI.Comm_rank(comm) == 0 || redirect_stdout(devnull)
@testset "Array type: $A" for A ∈ (Array, JLArray, TestArray)
pen = @inferred Pencil(A, (8, 10), comm)
@test @inferred(typeof_array(pen)) === A
@test (@inferred (p -> p.send_buf)(pen)) isa A
@test (@inferred (p -> p.recv_buf)(pen)) isa A
# Check that creating a PencilArray with incorrect type of underlying data
# fails.
ArrayOther = A === Array ? TestArray : Array
let dims = size_local(pen, MemoryOrder())
data = ArrayOther{Float32}(undef, dims)
@test_throws ArgumentError PencilArray(pen, data)
end
u = @inferred PencilArray{Float32}(undef, pen)
@test typeof(parent(u)) <: A{Float32}
@test @inferred(typeof_array(pen)) === A
@test @inferred(typeof_array(u)) === A
# This is in particular to test that, for GPU arrays, scalar indexing is not
# performed and the correct GPU functions are called.
rng = MersenneTwister(42)
@testset "Initialisation" begin
@test_nowarn fill!(u, 4)
@test_nowarn rand!(rng, u)
@test_nowarn randn!(rng, u)
end
px = @inferred Pencil(A, (20, 16, 4), (1,), comm)
@testset "Permutation: $perm" for perm ∈ (NoPermutation(), Permutation(2, 3, 1))
if perm != NoPermutation()
# Make sure we're testing the more "interesting" case in which the
# permutation is not its own inverse.
@assert inv(perm) != perm
end
py = @inferred Pencil(px; decomp_dims = (2,), permute = perm)
@test px.send_buf === py.send_buf
@test permutation(py) == perm
@test @inferred(typeof_array(px)) === A
@test @inferred(typeof_array(py)) === A
if A === JLArray
GC.gc()
@test px.send_buf.data.rc.count[] == 1
end
@testset "Transpositions" begin
ux = @test_nowarn rand!(rng, PencilArray{Float64}(undef, px))
uy = @inferred similar(ux, py)
@test pencil(uy) === py
tr = @inferred Transpositions.Transposition(uy, ux)
if A === JLArray
GC.gc()
@test px.send_buf.data.rc.count[] == 1
end
transpose!(tr)
if A === JLArray
GC.gc()
@test px.send_buf.data.rc.count[] == 1
end
@test_logs (:warn, r"is deprecated") MPI.Waitall!(tr)
# Verify transposition
gx = @inferred Nothing gather(ux)
gy = @inferred Nothing gather(uy)
if !(nothing === gx === gy)
@test typeof(gx) === typeof(gy) <: Array
@test gx == gy
end
end
@testset "Multiarrays" begin
M = @inferred ManyPencilArray{Float32}(undef, px, py)
randn!(rng, M.data)
ux = @inferred first(M)
uy = @inferred last(M)
@test ux isa PencilArray{Float32}
@test uy isa PencilArray{Float32}
uxp = parent(ux)
uyp = parent(uy)
@test uxp isa A{Float32}
@test uyp isa A{Float32}
@test @inferred(Tuple(M)) === (ux, uy)
end
end # permutation
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 656 | using MPI
using PencilArrays
using Random
using Test
MPI.Initialized() || MPI.Init()
dims = (8, 12, 9)
comm = MPI.COMM_WORLD
pen = Pencil(dims, comm; permute = Permutation(2, 3, 1))
MPI.Comm_rank(comm) == 0 || redirect_stdout(devnull)
u = PencilArray{Float32}(undef, pen)
randn!(u)
ug = global_view(u)
@testset "Indices" begin
for A in (u, ug)
@test all(zip(LinearIndices(A), CartesianIndices(A))) do (n, I)
A[n] === A[I]
end
@test all(pairs(IndexLinear(), A)) do (n, v)
A[n] === v
end
@test all(pairs(IndexCartesian(), A)) do (I, v)
A[I] === v
end
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1846 | using MPI
using PencilArrays
using Random
using Test
using GPUArrays
using JLArrays
MPI.Init()
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
rank == 0 || redirect_stdout(devnull)
topo = MPITopology(comm, Val(1))
dims = (11, 12, 2)
perm = Permutation(2, 3, 1)
@assert inv(perm) != perm
pencils = (
"Non-permuted" => Pencil(topo, dims, (2, )),
"Permuted" => Pencil(topo, dims, (2, ); permute = perm),
)
@testset "$s" for (s, pen) in pencils
A = PencilArray{Float64}(undef, pen)
randn!(A)
perm = permutation(A)
@testset "Broadcast" begin
@test typeof(2A) == typeof(A)
@test typeof(A .+ A) == typeof(A)
@test typeof(A .+ A .+ 3) == typeof(A)
@test parent(2A) == 2parent(A)
let x = A, y = similar(x)
broadcast!(+, y, x, x, 3) # precompile before measuring allocations
alloc = @allocated broadcast!(+, y, x, x, 3)
@test alloc == 0
@test y ≈ 2x .+ 3
end
end
@testset "Combinations" begin
# Combine with regular Array
P = parent(A) :: Array
@test typeof(P .+ A) == typeof(A)
@test P .+ A == 2A
end
@testset "GPU arrays" begin
pp = Pencil(JLArray, pen)
u = PencilArray{Float32}(undef, pp)
randn!(u)
# Some basic stuff that should work without scalar indexing
# (Nothing to do with broadcasting though...)
v = @test_nowarn copy(u)
@test typeof(v) === typeof(u)
@test_nowarn v == u
@test v == u
@test v ≈ u
@test parent(u) isa JLArray
@test_nowarn u .+ u # should avoid scalar indexing
@test u .+ u == 2u
@test typeof(u .+ u) == typeof(u)
@test_nowarn v .= u .+ 2u
@test typeof(v) == typeof(u)
@test parent(v) ≈ 3parent(u)
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 6692 | using HDF5
using MPI
using PencilArrays
using PencilArrays.PencilIO
import JSON3
if !PencilIO.hdf5_has_parallel()
@warn "HDF5 has no parallel support. Skipping HDF5 tests."
exit(0)
end
using Random
using Test
# Copied from PencilArraysHDF5Ext.jl
function _is_set(fapl::HDF5.FileAccessProperties, ::Val{:fclose_degree})
id = fapl.id
degree = Ref{Cint}()
status = ccall(
(:H5Pget_fclose_degree, HDF5.API.libhdf5), HDF5.API.herr_t,
(HDF5.API.hid_t, Ref{Cint}), id, degree)
# A negative value means failure, which we interpret here as meaning that
# "fclose_degree" has not been set.
status ≥ 0
end
function test_write_mpiio(filename, u::PencilArray)
comm = get_comm(u)
root = 0
rank = MPI.Comm_rank(comm)
X = (u, u .+ 1, u .+ 2, u .+ 3, u .+ 4)
kws = Iterators.product((false, true), (false, true))
@test_nowarn open(MPIIODriver(), filename, comm, write=true, create=true) do ff
pos = 0
for (i, (collective, chunks)) in enumerate(kws)
name = "field_$i"
ff[name, collective=collective, chunks=chunks] = X[i]
pos += sizeof_global(X[i])
@test position(ff) == pos
end
end
# Append some data.
open(MPIIODriver(), filename, comm, write=true, append=true) do ff
ff["field_5", chunks=false] = X[5]
ff["collection"] = X
end
@test isfile("$filename.json")
meta = open(JSON3.read, "$filename.json", "r").datasets
# Test file contents in serial mode.
# First, gather data from all processes.
# Note that we may need to permute indices of data, since data on disk is
# written in memory (not logical) order.
perm = Tuple(permutation(u))
Xg = map(X) do x
xg = gather(x, root) # note: data is in logical order
xg === nothing && return xg
perm === nothing && return xg
PermutedDimsArray(xg, perm)
end
@test (Xg[1] === nothing) == (rank != root)
if rank == root
open(filename, "r") do ff
y = similar(Xg[1])
for (i, (collective, chunks)) in enumerate(kws)
mpiio_read_serial!(ff, y, meta, "field_$i")
if !chunks # if chunks = true, data is reordered into blocks
@test y == Xg[i]
end
end
# Verify appended data
mpiio_read_serial!(ff, y, meta, "field_5")
@test y == Xg[5]
let y = similar.(Xg)
mpiio_read_serial!(ff, y, meta, "collection")
@test all(y .== Xg)
end
end
end
# Read stuff
y = similar(X[1])
@test_nowarn open(MPIIODriver(), filename, comm, read=true) do ff
@test_throws ErrorException read!(ff, y, "field not in file")
let y = similar.(X)
read!(ff, y, "collection")
@test all(y .== X)
end
for (i, (collective, _)) in enumerate(kws)
name = "field_$i"
read!(ff, y, name, collective=collective)
@test y == X[i]
end
read!(ff, y, "field_5")
@test y == X[5]
end
# File without metadata
filename_nometa = filename * "_nometa"
rank == root && symlink(filename, filename_nometa)
MPI.Barrier(comm)
fill!(y, 0)
@test_nowarn open(
MPIIODriver(), filename_nometa, comm; read = true,
) do ff
# We can't pass a name, since metadata is not available.
@test_throws ArgumentError read!(ff, y, "name_doesnt_matter")
# This should read the first written dataset (offset = 0).
@test_nowarn read!(ff, y)
# This will only be true if X[1] was written with chunks = false.
@test y == X[1]
end
nothing
end
read_array!(ff, x) = read!(ff, x)
read_array!(ff, t::Tuple) = map(x -> read_array!(ff, x), t)
function mpiio_read_serial!(ff, x, meta, name)
offset = meta[Symbol(name)].offset_bytes :: Int
seek(ff, offset)
read_array!(ff, x)
end
function test_write_hdf5(filename, u::PencilArray)
comm = get_comm(u)
rank = MPI.Comm_rank(comm)
v = u .+ 1
w = u .+ 2
# Open file in serial mode first.
if rank == 0
h5open(filename, "w") do ff
# "HDF5 file was not opened with the MPIO driver"
@test_throws ErrorException ff["scalar"] = u
end
end
MPI.Barrier(comm)
@test_nowarn open(PHDF5Driver(), filename, comm, write=true) do ff
@test isopen(ff)
@test_nowarn ff["scalar", collective=true, chunks=false] = u
@test_nowarn ff["vector_tuple", collective=false, chunks=true] = (u, v, w)
@test_nowarn ff["vector_array", collective=true, chunks=true] = [u, v, w]
end
@test_nowarn open(PHDF5Driver(), filename, comm, append=true) do ff
@test isopen(ff)
@test_nowarn ff["scalar_again"] = u
end
@test_nowarn open(PHDF5Driver(), filename, comm, read=true) do ff
@test isopen(ff)
uvw = (u, v, w)
uvw_r = similar.(uvw)
ur, vr, wr = uvw_r
read!(ff, ur, "scalar")
@test u == ur
read!(ff, vr, "scalar_again")
@test vr == ur
let perm = Tuple(permutation(ur))
@test haskey(attributes(ff["scalar"]), "permutation")
expected = perm === nothing ? false : collect(perm)
@test read(ff["scalar"]["permutation"]) == expected
end
read!(ff, uvw_r, "vector_tuple")
@test all(uvw .== uvw_r)
fill!.(uvw_r, 0)
read!(ff, collect(uvw_r), "vector_array", collective=false)
@test all(uvw .== uvw_r)
end
nothing
end
MPI.Init()
Nxyz = (16, 21, 41)
comm = MPI.COMM_WORLD
Nproc = MPI.Comm_size(comm)
myrank = MPI.Comm_rank(comm)
MPI.Comm_rank(comm) == 0 || redirect_stdout(devnull)
@show HDF5.API.libhdf5
@testset "HDF5 properties" begin
let fapl = HDF5.FileAccessProperties()
@test _is_set(fapl, Val(:fclose_degree)) === false
fapl.fclose_degree = :strong
@test _is_set(fapl, Val(:fclose_degree)) === true
@test fapl.fclose_degree === :strong
end
end
rng = MersenneTwister(42)
perms = (NoPermutation(), Permutation(2, 3, 1))
@testset "$perm" for perm in perms
pen = Pencil(Nxyz, (1, 3), comm; permute = perm)
u = PencilArray{Float64}(undef, pen)
randn!(rng, u)
u .+= 10 * myrank
@testset "MPI-IO" begin
filename = MPI.bcast(tempname(), 0, comm)
test_write_mpiio(filename, u)
end
@testset "HDF5" begin
filename = MPI.bcast(tempname(), 0, comm)
test_write_hdf5(filename, u)
end
end
# HDF5.API.h5_close()
# MPI.Finalize()
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 2283 | using MPI
using PencilArrays
using Test
using PencilArrays.LocalGrids:
LocalRectilinearGrid, components
# TODO
# - return SVector for grid elements?
MPI.Init()
comm = MPI.COMM_WORLD
MPI.Comm_rank(comm) == 0 || redirect_stdout(devnull)
perm = Permutation(2, 3, 1)
@assert inv(perm) != perm
dims = (6, 11, 21)
pen = Pencil(dims, comm; permute = perm)
@testset "LocalRectilinearGrid" begin
coords_global = (
range(0, 1; length = dims[1]),
range(0, 1; length = dims[2]),
[n^2 for n = 1:dims[3]], # these are Int
)
grid = @inferred localgrid(pen, coords_global)
@test grid isa LocalRectilinearGrid{3}
@test permutation(grid) === permutation(pen)
@test ndims(grid) == 3
@test match(
r"^LocalRectilinearGrid\{3\} with Permutation\(.*\) and coordinates:",
repr(grid),
) !== nothing
# Components
@inferred (g -> (g.x, g.y, g.z))(grid)
@inferred (g -> (g[1], g[2], g[3]))(grid)
@test match(
r"^Component i = 2 of LocalRectilinearGrid\{3\}:", repr(grid.y),
) !== nothing
xl, yl, zl = @inferred components(grid)
@test grid.x == xl
@test all(i -> xl[i] == grid.x[i], eachindex(grid.x))
@test all(i -> yl[i] == grid.y[i], eachindex(grid.y))
@test all(i -> zl[i] == grid.z[i], eachindex(grid.z))
# Broadcasting
u = PencilArray{Float32}(undef, pen)
@test @inferred(localgrid(u, coords_global)) === grid
@test_nowarn @. u = grid.x * grid.y + grid.z
# Indexing
@test @inferred(eachindex(grid)) === @inferred(CartesianIndices(grid))
@test eachindex(grid) === CartesianIndices(u)
@test all(eachindex(grid)) do I
x, y, z = grid[I]
u[I] ≈ x * y + z
end
# Iteration: check that grids and arrays iterate in the same order
@test all(zip(u, grid)) do (v, xyz)
x, y, z = xyz
v ≈ x * y + z
end
@test all(enumerate(grid)) do (i, xyz)
x, y, z = xyz
u[i] ≈ x * y + z
end
@test all(pairs(grid)) do (I, xyz)
x, y, z = xyz
u[I] ≈ x * y + z
end
coords_col = @inferred collect(grid)
@test coords_col isa Vector
@test eltype(coords_col) === eltype(grid) ===
typeof(map(first, components(grid))) # = Tuple{Float64, Float64, Int}
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 2838 | # Test interaction with DifferentialEquations.jl.
# We solve a trivial decoupled system of ODEs.
import DiffEqBase
using MPI
using PencilArrays
using OrdinaryDiffEqTsit5
using RecursiveArrayTools: ArrayPartition
using StructArrays: StructArray
using StaticArrays: SVector
using Test
function to_structarray(us::NTuple{N, A}) where {N, A <: AbstractArray}
T = eltype(A)
Vec = SVector{N, T}
StructArray{Vec}(us)
end
MPI.Init()
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
rank == 0 || redirect_stdout(devnull)
dims = (13, 23, 17) # prime dimensions to test an unbalanced partitioning
coords_global = map(N -> range(0, 1; length = N), dims)
perm = Permutation(2, 3, 1)
pen = Pencil(dims, comm; permute = perm)
grid = localgrid(pen, coords_global)
u0 = PencilArray{Float64}(undef, pen)
@. u0 = grid.x * grid.y + grid.z
function rhs!(du, u, p, t)
@. du = -0.1 * u
du
end
@testset "DiffEqBase" begin
unorm = DiffEqBase.ODE_DEFAULT_NORM(u0, 0.0)
unorms = MPI.Allgather(unorm, comm)
@test allequal(unorms)
# Note that ODE_DEFAULT_UNSTABLE_CHECK calls NAN_CHECK.
w = copy(u0)
wcheck = DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK(nothing, w, nothing, nothing)
@test wcheck == false
# After setting a single value to NaN, all processes should detect it.
if rank == 0
w[1] = NaN
end
wcheck = DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK(nothing, w, nothing, nothing)
@test wcheck == true
end
@testset "OrdinaryDiffEq" begin
tspan = (0.0, 1000.0)
params = (;)
prob = @inferred ODEProblem{true}(rhs!, u0, tspan, params)
# This is not fully inferred...
integrator = init(
prob, Tsit5();
adaptive = true, save_everystep = false,
)
# Check that all timesteps are the same
for _ = 1:10
local dts = MPI.Allgather(integrator.dt, comm)
@test allequal(dts)
step!(integrator)
end
@testset "ArrayPartition" begin
v0 = ArrayPartition(u0)
prob = @inferred ODEProblem{true}(rhs!, v0, tspan, params)
# TODO for now this fails when permutations are enabled due to incompatible
# broadcasting.
@test_skip integrator = init(
prob, Tsit5();
adaptive = true, save_everystep = false,
)
end
# Solve the equation for a 2D vector field represented by a StructArray.
@testset "StructArray" begin
v0 = to_structarray((u0, 2u0))
@assert eltype(v0) <: SVector{2}
tspan = (0.0, 1.0)
prob = @inferred ODEProblem{true}(rhs!, v0, tspan, params)
integrator = init(
prob, Tsit5();
adaptive = true, save_everystep = false,
)
@test integrator.u == v0
for _ ∈ 1:10
step!(integrator)
end
@test integrator.u ≠ v0
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 17819 | using PencilArrays
using PencilArrays.MPITopologies
using MPI
using BenchmarkTools
using Random
using Test
const BENCHMARK_ARRAYS = "--benchmark" in ARGS
Indexation(::Type{IndexLinear}) = LinearIndices
Indexation(::Type{IndexCartesian}) = CartesianIndices
# For testing `similar` for PencilArray.
struct DummyArray{T,N} <: AbstractArray{T,N}
dims :: Dims{N}
DummyArray{T}(::UndefInitializer, dims::Dims) where {T} = new{T,length(dims)}(dims)
end
DummyArray{T}(init, dims...) where {T} = DummyArray{T}(init, dims)
Base.size(x::DummyArray) = x.dims
Base.getindex(::DummyArray{T}, ind...) where {T} = zero(T)
Base.similar(x::DummyArray, ::Type{S}, dims::Dims) where {S} = DummyArray{S}(undef, dims)
function benchmark_fill!(::Type{T}, u, val) where {T <: IndexStyle}
indices = Indexation(T)(u)
@inbounds for I in indices
u[I] = val
end
u
end
function test_array_wrappers(p::Pencil, ::Type{T} = Float64) where {T}
u = PencilArray{T}(undef, p)
@test match(r"PencilArray{.*}\(::Pencil{.*}\)", summary(u)) !== nothing
for x in (42, 10)
fill!(u, x)
@test all(==(x), u)
end
perm = permutation(u)
@test perm === permutation(typeof(u))
let topo = topology(p)
@test topo === topology(u)
I = coords_local(topo)
@test range_remote(u, I) === range_remote(p, I)
end
@test parent(u) === u.data
@test eltype(u) === eltype(u.data) === T
@test length.(axes(u)) === size_local(u)
@test sizeof_global(u) == sizeof(T) * prod(size_global(u))
@test length_global(u) == prod(size_global(u))
@test sizeof_global((u, u)) == 2 * sizeof_global(u)
let umat = [u for i = 1:2, j = 1:3]
@test sizeof_global(umat) == 6 * sizeof_global(u)
end
@test length_global(u) == length_global(p)
@test size_global(u) == size_global(p)
@test length_local(u) == length_local(p)
@test size_local(u) == size_local(p)
let
A = PermutedDimsArray(parent(u), perm)
@test strides(A) === strides(u)
end
randn!(u)
@test check_iteration_order(u)
@inferred global_view(u)
ug = global_view(u)
@test check_iteration_order(ug)
@test length(u) == length_local(u)
@test size(u) == size_local(u)
if BENCHMARK_ARRAYS
for S in (IndexLinear, IndexCartesian)
@info("Filling arrays using $S (Array, PencilArray, GlobalPencilArray)",
permutation(p))
for v in (parent(u), u, ug)
val = 3 * oneunit(eltype(v))
@btime benchmark_fill!($S, $v, $val)
end
println()
end
end
@testset "similar" begin
let v = @inferred similar(u)
@test typeof(v) === typeof(u)
@test length(v) == length(u)
@test length_local(v) == length_local(u)
@test size(v) == size(u)
@test size_local(v) == size_local(u)
@test pencil(v) === pencil(u)
end
let v = @inferred similar(u, Int)
@test v isa PencilArray
@test size_local(v) == size_local(u)
@test eltype(v) === Int
@test pencil(v) === pencil(u)
end
@test (@inferred similar(u, size(u))) isa PencilArray{T}
@test (@inferred similar(u, Int, size(u))) isa PencilArray{Int}
@test_throws DimensionMismatch similar(u, 12)
@test_throws DimensionMismatch similar(u, Int, 12)
@test_throws DimensionMismatch similar(u, size(u) .+ 1)
@test_throws DimensionMismatch similar(u, Int, size(u) .+ 1)
let A = DummyArray{Int}(undef, size_local(p, MemoryOrder()))
pdummy = Pencil(DummyArray, p)
local u = @inferred PencilArray(pdummy, A)
@test parent(u) === A
v = @inferred similar(u)
@test typeof(v) === typeof(u)
@test size(v) == size(u)
end
# Test similar(u, [T], q::Pencil)
let N = ndims(p)
permute = Permutation(N, ntuple(identity, N - 1)...) # = (N, 1, 2, ..., N - 1)
decomp_dims = mod1.(decomposition(p) .+ 1, N)
q = Pencil(p; decomp_dims = decomp_dims, permute = permute)
v = @inferred similar(u, q)
@test pencil(v) === q
@test eltype(v) === eltype(u)
@test size_global(v) === size_global(u)
w = @inferred similar(u, Int, q)
@test pencil(w) === q
@test eltype(w) === Int
@test size_global(w) === size_global(u)
end
end
@test fill!(u, 42) === u
let z = @inferred zero(u)
@test all(iszero, z)
@test typeof(z) === typeof(u)
@test pencil(z) === pencil(u)
@test size(z) === size(u)
@test size_local(z) === size_local(u)
end
let v = similar(u)
@test typeof(v) === typeof(u)
psize = size_local(p, LogicalOrder())
@test psize === size_local(v) === size_local(u)
@test psize === size_local(u, LogicalOrder()) === size_local(v, LogicalOrder())
vp = parent(v)
randn!(vp)
I = size_local(v) .>> 1 # non-permuted indices
J = perm * I
@test v[I...] == vp[J...] # the parent takes permuted indices
end
let psize = size_local(p, MemoryOrder())
a = zeros(T, psize)
u = PencilArray(p, a)
@test parent(u) === a
@test IndexStyle(typeof(u)) === IndexStyle(typeof(a)) === IndexLinear()
b = zeros(T, psize .+ 2)
@test_throws DimensionMismatch PencilArray(p, b)
@test_throws DimensionMismatch PencilArray(p, zeros(T, 3, psize...))
# This is allowed.
w = PencilArray(p, zeros(T, psize..., 3))
@test size_global(w) === (size_global(p)..., 3)
@inferred PencilArray(p, zeros(T, psize..., 3))
@inferred size_global(w)
end
nothing
end
function test_multiarrays(pencils::Vararg{Pencil,M};
element_type::Type{T} = Float64) where {M,T}
@assert M >= 3
@inferred ManyPencilArray{T}(undef, pencils...)
A = ManyPencilArray{T}(undef, pencils...)
@test ndims(A) === ndims(first(pencils))
@test eltype(A) === T
@test length(A) === M
@inferred first(A)
@inferred last(A)
@inferred A[Val(2)]
@inferred A[Val(M)]
@test_throws ErrorException @inferred A[2] # type not inferred
@test A[Val(1)] === first(A) === A[Val(UInt8(1))] === A[1]
@test A[Val(2)] === A[2] === A.arrays[2] === A[Val(Int32(2))]
@test A[Val(M)] === last(A)
@test_throws BoundsError A[Val(0)]
@test_throws BoundsError A[Val(M + 1)]
@testset "In-place extra dimensions" begin
e = (3, 2)
@inferred ManyPencilArray{T}(undef, pencils...; extra_dims=e)
A = ManyPencilArray{T}(undef, pencils...; extra_dims=e)
@test extra_dims(first(A)) === extra_dims(last(A)) === e
@test ndims_extra(first(A)) == ndims_extra(last(A)) == length(e)
end
@testset "In-place transpose" begin
u = A[Val(1)]
v = A[Val(2)]
w = A[Val(3)]
randn!(u)
u_orig = copy(u)
transpose!(v, u) # this also modifies `u`!
@test compare_distributed_arrays(u_orig, v)
# In the 1D decomposition case, this is a local transpose, since v and w
# only differ in the permutation.
transpose!(w, v)
@test compare_distributed_arrays(u_orig, w)
end
nothing
end
function check_iteration_order(u::Union{PencilArray,GlobalPencilArray})
p = parent(parent(u)) :: Array # two `parent` are needed for GlobalPencilArray
cart = CartesianIndices(u)
lin = LinearIndices(u)
# Check that the behaviour of `cart` is consistent with that of
# CartesianIndices.
@assert size(CartesianIndices(p)) == size(p)
@test size(cart) == size_local(u)
# Same for `lin`.
@assert size(LinearIndices(p)) == size(p)
@test size(lin) == size_local(u)
# Check that Cartesian indices iterate in memory order.
for (n, I) in enumerate(cart)
l = lin[I]
@assert l == n
u[n] == p[n] == u[I] == u[l] || return false
end
# Also test iteration on LinearIndices and their conversion to Cartesian
# indices.
for (n, l) in enumerate(lin)
@assert l == n
# Convert linear to Cartesian index.
I = cart[l] # this is relatively slow, don't do it in real code!
u[n] == p[n] == u[I] == u[l] || return false
end
N = ndims(u)
@test ndims(lin) == ndims(cart) == N
true
end
function compare_distributed_arrays(u_local::PencilArray, v_local::PencilArray)
comm = get_comm(u_local)
root = 0
myrank = MPI.Comm_rank(comm)
u = gather(u_local, root)
v = gather(v_local, root)
same = Ref(false)
if u !== nothing && v !== nothing
@assert myrank == root
same[] = u == v
end
MPI.Bcast!(same, root, comm)
same[]
end
MPI.Init()
Nxyz = (16, 21, 41)
comm = MPI.COMM_WORLD
Nproc = MPI.Comm_size(comm)
myrank = MPI.Comm_rank(comm)
myrank == 0 || redirect_stdout(devnull)
rng = MersenneTwister(42 + myrank)
# Let MPI_Dims_create choose the values of (P1, P2).
proc_dims = MPITopologies.dims_create(comm, Val(2))
# Note that using dims_create is the default in MPITopology
@test MPITopology(comm, proc_dims) == MPITopology(comm, Val(2))
@test_throws ArgumentError MPITopology(comm, proc_dims .- 1)
@test_throws ArgumentError MPITopology(comm, proc_dims .+ 1)
topo = MPITopology(comm, proc_dims)
@test match(
r"^MPI topology: 2D decomposition \(\d+×\d+ processes\)$",
string(topo),
) !== nothing
@test ndims(topo) == length(proc_dims) == 2
pen1 = @inferred Pencil(topo, Nxyz)
let p = @inferred Pencil(topo, Nxyz, (2, 3)) # this is the default decomposition
@test decomposition(p) === decomposition(pen1)
end
pen2 = @inferred Pencil(pen1, decomp_dims=(1, 3), permute=Permutation(2, 3, 1))
pen3 = @inferred Pencil(pen2, decomp_dims=(1, 2), permute=Permutation(3, 2, 1))
@test match(r"Pencil{3, 2, NoPermutation, Array}", summary(pen1)) !== nothing
@test match(r"Pencil{3, 2, Permutation{.*}, Array}", summary(pen2)) !== nothing
println("Pencil 1: ", pen1, "\n")
println("Pencil 2: ", pen2, "\n")
println("Pencil 3: ", pen3, "\n")
@testset "Pencil constructors" begin
comm = MPI.COMM_WORLD
p = @inferred Pencil((5, 4, 4, 3), comm)
@test decomposition(p) == (2, 3, 4)
@test ndims(topology(p)) == 3
@test permutation(p) == NoPermutation()
p = @inferred Pencil((5, 4, 4, 3), comm;
permute = Permutation(2, 3, 4, 1))
@test decomposition(p) == (2, 3, 4)
@test ndims(topology(p)) == 3
@test permutation(p) == Permutation(2, 3, 4, 1)
p = @inferred Pencil((5, 4, 4, 3), (2, 3), comm;
permute = Permutation(2, 3, 4, 1))
@test decomposition(p) == (2, 3)
@test ndims(topology(p)) == 2
@test permutation(p) == Permutation(2, 3, 4, 1)
end
@testset "ManyPencilArray" begin
test_multiarrays(pen1, pen2, pen3)
end
@testset "Topology" begin
@test topology(pen2) === topo
@test range_remote(pen2, coords_local(topo)) == range_local(pen2)
@test eachindex(topo) isa LinearIndices
for (n, I) in zip(eachindex(topo), CartesianIndices(topo))
for order in (MemoryOrder(), LogicalOrder())
@test range_remote(pen2, Tuple(I), order) ==
range_remote(pen2, n, order)
end
end
end
# Note: the permutation of pen2 was chosen such that the inverse permutation
# is different.
@assert permutation(pen2) != inv(permutation(pen2))
@testset "Pencil constructor checks" begin
# Invalid permutations
@test_throws TypeError Pencil(
topo, Nxyz, (1, 2), permute=(2, 3, 1))
@test_throws ArgumentError Pencil(
topo, Nxyz, (1, 2), permute=Permutation(0, 3, 15))
# Decomposed dimensions may not be repeated.
@test_throws ArgumentError Pencil(topo, Nxyz, (2, 2))
# Decomposed dimensions must be in 1:N = 1:3.
@test_throws ArgumentError Pencil(topo, Nxyz, (1, 4))
@test_throws ArgumentError Pencil(topo, Nxyz, (0, 2))
@test Pencils.complete_dims(Val(5), (2, 3), (42, 12)) ===
(1, 42, 12, 1, 1)
# Divide dimension of size = Nproc - 1 among Nproc processes.
# => One process will have no data!
global_dims = (12, Nproc - 1)
decomp_dims = (2,)
# These warning tests fail on Julia 1.6 for some reason.
# The warning is shown, but @test_warn apparently fails to capture the
# output.
@static if VERSION ≥ v"1.7"
@test_warn "have no data" Pencil(global_dims, decomp_dims, comm)
# Throw warning if amount of local data is larger than typemax(Cint) (#58).
if sizeof(Cint) == 4 # just in case Cint == Int64, if that can ever happen...
let Nlocal = Int64(2) * (Int64(typemax(Cint)) + 1)
local Nglobal = Int64(Nproc) * Nlocal
local Nx = Int64(64)
local Ny = Nglobal ÷ Nx
@assert Nx * Ny == Nglobal
@test_warn "size of local data is too large" Pencil((Nx, Ny), comm)
end
end
end
end
@testset "Pencil" begin
for p ∈ (pen1, pen2, pen3)
@test size(p) === size_local(p, LogicalOrder())
@test length(p) === prod(size(p))
@inferred (p -> p.send_buf)(p)
end
@testset "similar" begin
p = pen2
# Case 1a: identical pencil
let q = @inferred similar(p)
@test q === p
end
# Case 1b: different dimensions
let q = @inferred similar(p, 2 .* size_global(p))
@test size_global(q) == 2 .* size_global(p)
end
# Case 2a: same dimensions but different array type
let q = @inferred similar(p, DummyArray)
@test q !== p
@test q.axes_all === p.axes_all # array wasn't copied nor recomputed
@test size_global(q) == size_global(p)
@test Pencils.typeof_array(q) === DummyArray
end
# Case 2b: different dimensions and array type
let q = @inferred similar(p, DummyArray, 2 .* size_global(p))
@test q !== p
@test size_global(q) == 2 .* size_global(p)
@test Pencils.typeof_array(q) === DummyArray
end
end
end
@testset "PencilArray" begin
test_array_wrappers(pen1)
test_array_wrappers(pen2)
test_array_wrappers(pen3)
end
# Test arrays with extra dimensions.
@testset "extra dimensions" begin
T = Float32
u1 = PencilArray{T}(undef, pen1, 3, 4)
u2 = PencilArray{T}(undef, pen2, 3, 4)
u3 = PencilArray{T}(undef, pen3, 3, 4)
@test range_local(u2) ===
(range_local(pen2)..., Base.OneTo.((3, 4))...)
@test range_remote(u2, 1) ===
(range_remote(pen2, 1)..., Base.OneTo.((3, 4))...)
randn!(rng, u1)
transpose!(u2, u1)
@test compare_distributed_arrays(u1, u2)
transpose!(u3, u2)
@test compare_distributed_arrays(u2, u3)
for v in (u1, u2, u3)
@test check_iteration_order(v)
end
@inferred global_view(u1)
end
# Test slab (1D) decomposition.
@testset "1D decomposition" begin
T = Float32
topo = MPITopology(comm, (Nproc, ))
@test ndims(topo) == 1
pen1 = Pencil(topo, Nxyz, (1, ))
pen2 = Pencil(pen1, decomp_dims=(2, ))
# Same decomposed dimension as pen2, but different permutation.
pen3 = Pencil(pen2, permute=Permutation(3, 2, 1))
u1 = PencilArray{T}(undef, pen1)
u2 = @inferred similar(u1, pen2)
u3 = @inferred similar(u1, pen3)
@test pencil(u2) === pen2
@test pencil(u3) === pen3
randn!(rng, u1)
transpose!(u2, u1)
@test compare_distributed_arrays(u1, u2)
transpose!(u3, u2)
@test compare_distributed_arrays(u1, u3)
@test check_iteration_order(u3)
# Test transposition between two identical configurations.
transpose!(u2, u2)
@test compare_distributed_arrays(u1, u2)
let v = similar(u2)
@test pencil(u2) === pencil(v)
transpose!(v, u2)
@test compare_distributed_arrays(u1, v)
end
test_multiarrays(pen1, pen2, pen3)
end
# Test decomposition along all dimensions.
@testset "3D decomposition" begin
T = ComplexF32
topo = @inferred MPITopology(comm, Val(3))
@test ndims(topo) == 3
# Note that we can't really change the decomposition if we're decomposing
# all dimensions, but we can at least change the permutation.
pen1 = @inferred Pencil(topo, Nxyz)
pen2 = @inferred Pencil(pen1; permute = Permutation(2, 3, 1))
u1 = @inferred PencilArray{T}(undef, pen1)
u2 = @inferred similar(u1, pen2)
@test permutation(u1) == Permutation(1, 2, 3) == NoPermutation()
@test permutation(u2) == Permutation(2, 3, 1)
randn!(rng, u1)
transpose!(u2, u1)
@test compare_distributed_arrays(u1, u2)
end
@testset "Inference" begin
periods = zeros(Int, length(proc_dims))
comm_cart = MPI.Cart_create(comm, proc_dims; reorder = false)
@inferred MPITopologies.create_subcomms(Val(2), comm_cart)
@test_throws ArgumentError MPITopology{3}(comm_cart) # wrong dimensionality
@inferred MPITopology{2}(comm_cart)
@inferred MPITopologies.get_cart_ranks_subcomm(pen1.topology.subcomms[1])
@inferred PencilArrays.to_local(pen2, (1:2, 1:2, 1:2), MemoryOrder())
@inferred PencilArrays.to_local(pen2, (1:2, 1:2, 1:2), LogicalOrder())
@inferred PencilArrays.size_local(pen2, MemoryOrder())
T = Int
@inferred PencilArray{T}(undef, pen2)
@inferred PencilArray{T}(undef, pen2, 3, 4)
u1 = PencilArray{T}(undef, pen1)
u2 = similar(u1, pen2)
@inferred Nothing gather(u2)
@inferred transpose!(u2, u1)
@inferred Transpositions.get_remote_indices(1, (2, 3), 8)
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1075 | using OffsetArrays
using PencilArrays: Permutation, PermutedLinearIndices, PermutedCartesianIndices
using Test
@testset "Permuted indices" begin
Aoff = OffsetArray(rand(3, 4, 5), -2, -1, -3)
lin = LinearIndices(Aoff)
cart = CartesianIndices(Aoff)
perm = Permutation(3, 1, 2)
plin = @inferred PermutedLinearIndices(lin, perm)
pcart = @inferred PermutedCartesianIndices(cart, perm)
for f in (size, axes)
@test f(plin) == perm \ f(lin)
@test f(pcart) == perm \ f(cart)
end
for n in LinearIndices(lin)
@test lin[n] == plin[n]
@test cart[n] == perm * pcart[n]
end
for I in CartesianIndices(lin)
@test lin[I] == plin[perm \ I]
end
for J in CartesianIndices(plin)
@test lin[perm * J] == plin[J]
end
# Iterate over permuted indices
for n in plin
@test n == lin[n] == plin[n]
@test cart[n] == perm * pcart[n]
end
for J in pcart
@test pcart[J] == J
end
for (I, J) in zip(cart, pcart)
@test I == perm * J
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1646 | using MPI
using PencilArrays
using Test
MPI.Init()
comm = MPI.COMM_WORLD
nprocs = MPI.Comm_size(comm)
rank = MPI.Comm_rank(comm)
myid = rank + 1
rank == 0 || redirect_stdout(devnull)
pen = Pencil((16, 32, 14), comm)
u = PencilArray{Int32}(undef, pen)
fill!(u, 2myid)
@testset "Reductions" begin
@test minimum(u) == 2
@test maximum(u) == 2nprocs
@test minimum(abs2, u) == 2^2
@test maximum(abs2, u) == (2nprocs)^2
@test sum(u) === MPI.Allreduce(sum(parent(u)), +, comm)
@test sum(abs2, u) === MPI.Allreduce(sum(abs2, parent(u)), +, comm)
@testset "Multiple PencilArrays" begin
û = @. u + im * u
v̂ = copy(û)
a = @inferred sum(abs2, û; init = zero(eltype(û))) # `init` needed for inference when eltype(û) = Complex{Int32}...
# These should all be equivalent:
b = @inferred mapreduce((x, y) -> real(x * conj(y)), +, û, v̂)
c = @inferred sum(Base.splat((x, y) -> real(x * conj(y))), zip(û, v̂))
d = @inferred sum(xs -> real(xs[1] * conj(xs[2])), zip(û, v̂))
@test a == b == c == d
end
# These exact equalities work because we're using integers.
# They are not guaranteed to work with floats.
@test foldl(min, u) === minimum(u)
@test mapfoldl(abs2, min, u) === minimum(abs2, u)
@test foldr(min, u) === minimum(u)
@test mapfoldr(abs2, min, u) === minimum(abs2, u)
@testset "any / all" begin
@test any(==(2), u) === true # the first process has u[:] = 2myid = 2
@test any(==(-2), u) === false
@test nprocs == 1 || all(==(2), u) === false
@test all(>(0), u) === true
end
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 1098 | using MPIPreferences
@show MPIPreferences.binary
using InteractiveUtils
using MPI: MPI, mpiexec
# Load test packages to trigger precompilation
using PencilArrays
# These tests can be run in serial mode
test_files_serial = [
"permutations.jl",
]
test_files = [
"io.jl",
"localgrid.jl",
"reductions.jl",
"broadcast.jl",
"array_types.jl",
"arrays.jl",
"pencils.jl",
"transpose.jl",
"array_interface.jl",
"adapt.jl",
"ode.jl",
]
Nproc = let N = get(ENV, "JULIA_MPI_TEST_NPROCS", nothing)
N === nothing ? clamp(Sys.CPU_THREADS, 4, 6) : parse(Int, N)
end
println()
versioninfo()
MPI.versioninfo()
if MPIPreferences.binary != "system"
error("""
tests should be run with system MPI binaries for testing parallel HDF5
(found MPIPreferences.binary = $(MPIPreferences.binary))
""")
end
for fname in test_files_serial
include(fname)
end
for fname in test_files
@info "Running $fname with $Nproc processes..."
mpiexec() do cmd
run(`$cmd -n $Nproc $(Base.julia_cmd()) $fname`)
end
println()
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | code | 2353 | using PencilArrays
using MPI
using Test
using Random
function compare_distributed_arrays(u_local::PencilArray, v_local::PencilArray)
comm = get_comm(u_local)
root = 0
myrank = MPI.Comm_rank(comm)
u = gather(u_local, root)
v = gather(v_local, root)
same = Ref(false)
if u !== nothing && v !== nothing
@assert myrank == root
same[] = u == v
end
MPI.Bcast!(same, root, comm)
same[]
end
function test_transpose(method)
dims = (16, 21, 41)
comm = MPI.COMM_WORLD
pen1 = @inferred Pencil(dims, (2, 3), comm)
pen2 = @inferred Pencil(pen1; decomp_dims = (1, 3), permute = Permutation(2, 3, 1))
pen3 = @inferred Pencil(pen2; decomp_dims = (1, 2), permute = Permutation(3, 2, 1))
T = Float64
u1 = PencilArray{T}(undef, pen1)
u2 = PencilArray{T}(undef, pen2)
u3 = PencilArray{T}(undef, pen3)
# Set initial random data.
myrank = MPI.Comm_rank(comm)
rng = MersenneTwister(42 + myrank)
randn!(rng, u1)
u1 .+= 10 * myrank
u1_orig = copy(u1)
# Direct u1 -> u3 transposition is not possible!
@test_throws ArgumentError transpose!(u3, u1; method)
# Transpose back and forth between different pencil configurations
transpose!(u2, u1; method)
@test compare_distributed_arrays(u1, u2)
transpose!(u3, u2; method)
@test compare_distributed_arrays(u2, u3)
transpose!(u2, u3; method)
@test compare_distributed_arrays(u2, u3)
transpose!(u1, u2; method)
@test compare_distributed_arrays(u1, u2)
@test u1_orig == u1
# Test transpositions without permutations.
let pen2 = Pencil(pen1; decomp_dims = (1, 3))
u2 = PencilArray{T}(undef, pen2)
transpose!(u2, u1; method)
@test compare_distributed_arrays(u1, u2)
end
# Test transpositions with unsorted decomp_dims (#57).
let pen_alt = @inferred Pencil(pen1; decomp_dims = (2, 1))
ualt = PencilArray{T}(undef, pen_alt)
transpose!(ualt, u1; method)
@test compare_distributed_arrays(u1, ualt)
end
nothing
end
MPI.Init()
comm = MPI.COMM_WORLD
MPI.Comm_rank(comm) == 0 || redirect_stdout(devnull)
transpose_methods = (
Transpositions.PointToPoint(),
Transpositions.Alltoallv(),
)
@testset "transpose! $method" for method in transpose_methods
test_transpose(method)
end
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 819 | # Changelog
The format is based on [Keep a Changelog] and [Common Changelog].
## [0.19.0] - 2023-07-14
### Changed
- **Breaking:** change behaviour of `similar(u::PencilArray, [T], dims)` ([#83])
When the `dims` argument is passed, we now try to return a new `PencilArray` instead of another (non-distributed) array type. Since this is only possible when `dims` matches the array size, an error is now thrown if that is not the case. This allows things to play nicely with other packages such as [StructArrays.jl](https://github.com/JuliaArrays/StructArrays.jl), which in some cases end up calling `similar` with the `dims` argument.
[Keep a Changelog]: https://keepachangelog.com/en/1.1.0/
[Common Changelog]: https://common-changelog.org/
[#83]: https://github.com/jipolanco/PencilArrays.jl/pull/83
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 4912 | # PencilArrays
[](https://jipolanco.github.io/PencilArrays.jl/stable)
[](https://jipolanco.github.io/PencilArrays.jl/dev)
[](https://doi.org/10.5281/zenodo.5148035)
[](https://github.com/jipolanco/PencilArrays.jl/actions)
[](https://codecov.io/gh/jipolanco/PencilArrays.jl)
Distributed Julia arrays using the MPI protocol.
This package provides a convenient framework for working with multidimensional
Julia arrays distributed among MPI processes.
The name of this package originates from the decomposition of 3D domains along
two out of three dimensions, sometimes called *pencil* decomposition.
This is illustrated by the figure below, which represents a distributed 3D array.
Each coloured block is managed by a different MPI process.
<p align="center">
<br/>
<img width="85%" alt="Pencil decomposition of 3D domains" src="docs/src/img/pencils.svg">
</p>
More generally, PencilArrays can decompose arrays of arbitrary dimension `N`,
along an arbitrary number of subdimensions `M ≤ N`.
(In the image above, `N = 3` and `M = 2`.)
PencilArrays is the basis for the
[PencilFFTs](https://github.com/jipolanco/PencilFFTs.jl) package, which
provides efficient and highly scalable distributed FFTs.
## Features
- distribution of `N`-dimensional arrays among MPI processes;
- decomposition of arrays along all or a subset of dimensions;
- tools for conveniently and efficiently iterating over the coordinates of distributed multidimensional geometries;
- transpositions between different decomposition configurations, using
point-to-point and collective MPI communications;
- zero-cost, convenient dimension permutations using the [StaticPermutations.jl](https://github.com/jipolanco/StaticPermutations.jl) package;
- convenient parallel I/O using either MPI-IO or the [Parallel
HDF5](https://portal.hdfgroup.org/display/HDF5/Parallel+HDF5) libraries;
- distributed FFTs and related transforms via the
[PencilFFTs.jl](https://github.com/jipolanco/PencilFFTs.jl) package.
## Installation
PencilArrays can be installed using the Julia package manager:
julia> ] add PencilArrays
## Quick start
```julia
using MPI
using PencilArrays
MPI.Init()
comm = MPI.COMM_WORLD # MPI communicator
rank = MPI.Comm_rank(comm) # rank of local process
# Let's decompose a 3D grid across all MPI processes.
# The resulting configuration is described by a Pencil object.
dims_global = (42, 31, 29) # global dimensions of the array
pen_x = Pencil(dims_global, comm)
# By default the 3D grid is decomposed along the two last dimensions, similarly
# to the "x-pencil" configuration in the figure above:
println(pen_x)
# Decomposition of 3D data
# Data dimensions: (42, 31, 29)
# Decomposed dimensions: (2, 3)
# Data permutation: NoPermutation()
# Array type: Array
# We can now allocate distributed arrays in the x-pencil configuration.
Ax = PencilArray{Float64}(undef, pen_x)
fill!(Ax, rank * π) # each process locally fills its part of the array
parent(Ax) # parent array holding the local data (here, an Array{Float64,3})
size(Ax) # total size of the array = (42, 31, 29)
size_local(Ax) # size of local part, e.g. (42, 8, 10) for a given process
range_local(Ax) # range of local part on global grid, e.g. (1:42, 16:23, 20:29)
# Let's associate the dimensions to a global grid of coordinates (x_i, y_j, z_k)
xs_global = range(0, 1; length = dims_global[1])
ys_global = range(0, 2; length = dims_global[2])
zs_global = range(0, 2π; length = dims_global[3])
# Part of the grid associated to the local MPI process:
grid = localgrid(pen_x, (xs_global, ys_global, zs_global))
# This is convenient for example if we want to initialise the `Ax` array as
# a function of the grid coordinates (x, y, z):
@. Ax = grid.x + (2 * grid.y * cos(grid.z))
# Alternatively (useful in higher dimensions):
@. Ax = grid[1] + (2 * grid[2] * cos(grid[3]))
# Create another pencil configuration, decomposing along dimensions (1, 3).
# We could use the same constructor as before, but it's recommended to reuse the
# previous Pencil instead to reduce memory usage.
pen_y = Pencil(pen_x; decomp_dims = (1, 3))
# Now transpose from the x-pencil to the y-pencil configuration, redistributing
# the data initially in Ax.
Ay = PencilArray{Float64}(undef, pen_y)
transpose!(Ay, Ax)
# We can check that Ax and Ay have the same data (but distributed differently)
# by combining the data from all different processes onto a single process
# (this should never be used for large datasets!)
gather(Ax) == gather(Ay) # true
```
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 2594 | ```@meta
CurrentModule = PencilArrays.LocalGrids
```
# Working with grids
PencilArrays.jl includes functionality for conveniently working with the
coordinates associated to a multidimensional domain.
For this, the [`localgrid`](@ref) function can be used to construct an object
describing the grid coordinates associated to the local MPI process.
This object can be used to easily and efficiently perform operations that
depend on the local coordinates.
## Creating local grids
As an example, say we are performing a 3D simulation on a [rectilinear
grid](https://en.wikipedia.org/wiki/Regular_grid#Rectilinear_grid), so that
the coordinates of a grid point are given by ``\bm{x}_{ijk} = (x_i, y_j, z_k)``
where `x`, `y` and `z` are separate one-dimensional arrays.
For instance:
```@example LocalGrids
Nx, Ny, Nz = 65, 17, 21
xs = range(0, 1; length = Nx)
ys = range(-1, 1; length = Ny)
zs = range(0, 2; length = Nz)
nothing # hide
```
Before continuing, let's create a domain decomposition configuration:
```@example LocalGrids
using MPI
using PencilArrays
MPI.Init()
comm = MPI.COMM_WORLD
pen = Pencil((Nx, Ny, Nz), comm)
```
Now, we can extract the local grid associated to the local MPI process:
```@example LocalGrids
grid = localgrid(pen, (xs, ys, zs))
```
Note that this example was run on a single MPI process, which makes things
somewhat less interesting, but the same applies to more processes.
With more than one process, the local grid is a subset of the global grid
defined by the coordinates `(xs, ys, zs)`.
## Using local grids
The `grid` object just created can be used to operate with `PencilArray`s.
In particular, say we want to initialise a `PencilArray` to a function that
depends on the domain coordinates, ``u(x, y, z) = x + 2y + z^2``.
This can be easily done using the broadcasting syntax (here we use the `@.`
macro for convenience):
```@example LocalGrids
u = PencilArray{Float64}(undef, pen) # construct PencilArray first
@. u = grid.x + 2 * grid.y + grid.z^2
nothing # hide
```
Here, `grid.x`, `grid.y` and `grid.z` are a convenient way of extracting the
three components of the grid.
Alternatively, one can use the syntax `grid[1]`, `grid[2]`, etc..., which is in
particularly useful when working in dimensions higher than 3.
Note that one could do the same as above using indexing instead of
broadcasting:
```@example LocalGrids
for I ∈ eachindex(grid)
x, y, z = grid[I]
u[I] = x + 2y + z^2
end
```
## Library
```@docs
AbstractLocalGrid
LocalRectilinearGrid
localgrid
components
```
## Index
```@index
Pages = ["LocalGrids.md"]
```
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 2138 | # [MPI topology](@id sec:mpi_topology)
The [`MPITopology`](@ref) type defines the MPI Cartesian topology of the
decomposition.
In other words, it contains information about the number of decomposed
dimensions, and the number of processes in each of these dimensions.
This type should only be used if more control is needed regarding the MPI
decomposition.
In particular, dealing with `MPITopology` is not required when using the
[high-level interface](@ref pencil-high-level) to construct domain
decomposition configurations.
## Construction
The main `MPITopology` constructor takes a MPI communicator and a tuple
specifying the number of processes in each dimension.
For instance, to distribute 12 MPI processes on a $3 × 4$ grid:
```julia
comm = MPI.COMM_WORLD # we assume MPI.Comm_size(comm) == 12
pdims = (3, 4)
topology = MPITopology(comm, pdims)
```
A convenience constructor is provided that automatically chooses a default
`pdims` from the number of processes and from the dimension `N` of
decomposition grid. For instance, for a two-dimensional decomposition:
```julia
topology = MPITopology(comm, Val(2))
```
Under the hood, this works by letting
[`MPI_Dims_create`](https://www.open-mpi.org/doc/current/man3/MPI_Dims_create.3.php)
choose the number of divisions along each dimension.
At the lower level, [`MPITopology`](@ref) uses
[`MPI_Cart_create`](https://www.open-mpi.org/doc/current/man3/MPI_Cart_create.3.php)
to define a Cartesian MPI communicator.
For more control, one can also create a Cartesian communicator using
[`MPI.Cart_create`](https://juliaparallel.org/MPI.jl/stable/reference/topology/#MPI.Cart_create),
and pass that to `MPITopology`:
```julia
dims = (3, 4)
comm_cart = MPI.Cart_create(comm, dims)
topology = MPITopology{2}(comm_cart) # note the "{2}"!!
```
Note that in this case, one needs to indicate the number of dimensions `M` of
the decomposition (here `M = 2`).
## Types
```@docs
MPITopology
```
## Methods
```@docs
get_comm(::MPITopology)
coords_local(::MPITopology)
length(::MPITopology)
ndims(::MPITopology)
size(::MPITopology)
```
## Index
```@index
Pages = ["MPITopology.md"]
```
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 3780 | # [Array wrappers](@id PencilArrays_module)
```@meta
CurrentModule = PencilArrays
```
The `PencilArrays` module defines types for handling MPI-distributed data.
The most important types are:
- [`PencilArray`](@ref): array wrapper including MPI decomposition information.
Takes *local* indices starting at 1, regardless of the location of each MPI
process on the global topology.
- [`GlobalPencilArray`](@ref): `PencilArray` wrapper that takes *global*
indices, which generally don't start at 1.
See also [Global views](@ref).
## Construction
An uninitialised `PencilArray` can be constructed from a [`Pencil`](@ref)
instance as
```julia
pencil = Pencil(#= ... =#)
A = PencilArray{Float64}(undef, pencil)
parent(A) # returns the Array wrapped by `A`
```
This allocates a new `Array` with the local dimensions and data type associated
to the `Pencil`.
One can also construct a `PencilArray` wrapper from an existing
`AbstractArray`, whose dimensions must be compatible with the `Pencil`
configuration.
For instance, the following works:
```julia
dims = size_local(pencil, MemoryOrder()) # dimensions must be in memory order!
data = zeros(dims)
A = PencilArray(pencil, data)
```
Note that `data` does not need to be a `Array`, but can be any subtype of
`AbstractArray`.
It is also possible to construct higher dimensional arrays, as in:
```julia
data = zeros(dims..., 3, 2)
A = PencilArray(pencil, data)
```
This will construct a `PencilArray` where the rightmost dimensions (called
*extra dimensions* in the PencilArrays API) will never be split among MPI
processes.
## Dimension permutations
Unlike the wrapped `AbstractArray`, the `PencilArray` wrapper takes indices in
logical order.
For instance, if the underlying permutation of the `Pencil` is `(2, 3, 1)`,
then `A[i, j, k]` points to the same value as `parent(A)[j, k, i]`.
## Global views
`PencilArray`s are accessed using local indices that start at 1, regardless of
the location of the subdomain associated to the local process on the global
grid.
Sometimes it may be more convenient to use global indices describing the
position of the local process in the domain.
For this, the [`global_view`](@ref) function is provided that generates an
[`OffsetArray`](https://github.com/JuliaArrays/OffsetArrays.jl) wrapper taking
global indices.
For more details, see for instance [the gradient
example](https://jipolanco.github.io/PencilFFTs.jl/stable/examples/gradient/#gradient_method_global)
in the PencilFFTs docs.
!!! warning "Global views"
Regular `PencilArray`s have more functionality than global view wrappers.
This includes broadcasting, which is currently not supported for global views.
In general it should be preferred to work with `PencilArray`s.
## Types
```@docs
PencilArray
GlobalPencilArray
PencilArrayCollection
AbstractManyPencilArray
ManyPencilArray
```
## Methods
### PencilArray
```@docs
extra_dims(::PencilArray)
get_comm(::MaybePencilArrayCollection)
permutation
global_view(::PencilArray)
ndims_extra(::MaybePencilArrayCollection)
ndims_space(::PencilArray)
parent(::PencilArray)
pencil(::PencilArray)
pointer(::PencilArray)
range_local(::MaybePencilArrayCollection)
range_remote(::MaybePencilArrayCollection, etc...)
similar(::PencilArray)
length(::PencilArray)
length_local(::PencilArray)
length_global(::PencilArray)
size(::PencilArray)
size_local(::MaybePencilArrayCollection)
size_global(::MaybePencilArrayCollection)
sizeof_global(::PencilArray)
topology(::MaybePencilArrayCollection)
PencilArrays.typeof_array
PencilArrays.typeof_ptr
```
### ManyPencilArray
```@docs
first(::ManyPencilArray)
getindex(::ManyPencilArray)
last(::ManyPencilArray)
length(::ManyPencilArray)
Tuple(::ManyPencilArray)
```
## Index
```@index
Pages = ["PencilArrays.md"]
```
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 1201 | # [Measuring performance](@id PencilArrays.measuring_performance)
It is possible to measure the time spent in different sections of the MPI data
transposition routines using the
[TimerOutputs](https://github.com/KristofferC/TimerOutputs.jl) package. This
has a (very small) performance overhead, so it is disabled by default. To
enable time measurements, call
`TimerOutputs.enable_debug_timings` after loading `PencilArrays` (see below for
an example).
For more details see the [TimerOutputs
docs](https://github.com/KristofferC/TimerOutputs.jl#overhead).
Minimal example:
```julia
using MPI
using PencilArrays
using TimerOutputs
# Enable timing of `PencilArrays` functions
TimerOutputs.enable_debug_timings(PencilArrays)
TimerOutputs.enable_debug_timings(Transpositions)
MPI.Init()
pencil = Pencil(#= args... =#)
# [do stuff with `pencil`...]
# Retrieve and print timing data associated to `plan`
to = timer(pencil)
print_timer(to)
```
By default, each `Pencil` has its own `TimerOutput`. If you already have a `TimerOutput`, you can pass it to the [`Pencil`](@ref) constructor:
```julia
to = TimerOutput()
pencil = Pencil(..., timer=to)
# [do stuff with `pencil`...]
print_timer(to)
```
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 6130 | # [Parallel I/O](@id PencilIO_module)
```@meta
CurrentModule = PencilArrays.PencilIO
```
The `PencilArrays.PencilIO` module contains functions for saving and loading
[`PencilArray`](@ref)s to disk using parallel I/O.
Currently, two different output formats are supported:
- raw binary files via the MPI-IO interface;
- parallel HDF5 files.
In both cases, information on dataset sizes, names and other metadata are
included along with the binary data.
The implemented approach consists in storing the data coming from different MPI
processes in a single file.
This strategy scales better in terms of number of files, and is more
convenient, than that of storing one file per process.
However, the performance is very sensitive to the configuration of the
underlying file system.
In distributed file systems such as
[Lustre](https://en.wikipedia.org/wiki/Lustre_(file_system)), it is worth
tuning parameters such as the stripe count and stripe size.
For more information, see for instance the [Parallel HDF5
page](https://portal.hdfgroup.org/display/HDF5/Parallel+HDF5).
## Getting started
The first step before writing `PencilArray`s is to choose the parallel I/O
driver, which determines the format of the output data.
Two different drivers are currently available:
- [`MPIIODriver`](@ref): parallel I/O via the MPI-IO API and the [MPI.jl
wrappers](https://juliaparallel.github.io/MPI.jl/latest/io/). This driver
writes a raw binary file, along with a JSON file describing dataset metadata
(name, dimensions, location in file, ...);
- [`PHDF5Driver`](@ref): parallel I/O via the Parallel HDF5 API and
[HDF5.jl](https://github.com/JuliaIO/HDF5.jl). This driver requires a special
set-up, as detailed in the [dedicated section](@ref setting_up_parallel_hdf5).
### Writing data
To open a parallel file, pass the MPI communicator and an instance of the
chosen driver to [`open`](@ref).
For instance, the following opens an MPI-IO file in write mode:
```julia
using PencilArrays.PencilIO # needed for accessing parallel I/O functionality
ff = open(MPIIODriver(), "filename.bin", MPI.COMM_WORLD; write=true)
```
Datasets, in the form of `PencilArray`s, can then be written as follows:
```julia
v = PencilArray(...)
ff["velocity"] = v
```
This writing step may be customised via keyword arguments such as `chunks` and
`collective`. These options are supported by both MPI-IO and HDF5 drivers.
For instance:
```julia
ff["velocity", chunks=true, collective=false] = v
```
See [`setindex!`](@ref) for the meaning of these options for each driver, as
well as for driver-specific options.
After datasets are written, the file should be closed as usual by doing
`close(ff)`. Note that the do-block syntax is also supported, as in
```julia
open(MPIIODriver(), "filename.bin", MPI.COMM_WORLD; write=true) do ff
ff["velocity"] = v
end
```
### Reading data
Data is loaded into an existent `PencilArray` using [`read!`](@ref).
For instance:
```julia
v = PencilArray(...)
open(MPIIODriver(), "filename.bin", MPI.COMM_WORLD; read=true) do ff
read!(ff, v, "velocity")
end
```
Note that, for the MPI-IO driver, a `filename.bin.json` file must be present
along with the `filename.bin` file containing the binary data. The JSON file is
automatically generated when writing data with this driver.
Optional keyword arguments, such as `collective`, are also supported by
[`read!`](@ref).
## [Setting-up Parallel HDF5](@id setting_up_parallel_hdf5)
If using the [Parallel HDF5 driver](#PencilArrays.PencilIO.PHDF5Driver), the HDF5.jl package must
be available and configured with MPI support.
Note that HDF5.jl versions previous to
[v0.15](https://github.com/JuliaIO/HDF5.jl/releases/tag/v0.15.0) are not
supported.
Parallel HDF5 is not enabled in the default installation of HDF5.jl.
For Parallel HDF5 to work, the HDF5 C libraries wrapped by HDF5.jl must be
compiled with parallel support and linked to the specific MPI implementation
that will be used for parallel I/O.
HDF5.jl must be explicitly instructed to use parallel-enabled HDF5 libraries
available in the system.
Similarly, MPI.jl must be instructed to use the corresponding MPI libraries.
This is detailed in the sections below.
Parallel-enabled HDF5 libraries are usually included in computing clusters and
linked to the available MPI implementations.
They are also available via the package manager of a number of Linux
distributions.
(For instance, Fedora includes the `hdf5-mpich-devel` and `hdf5-openmpi-devel`
packages, respectively linked to the MPICH and OpenMPI libraries in the Fedora
repositories.)
The following step-by-step guide assumes one already has access to
parallel-enabled HDF5 libraries linked to an existent MPI installation.
### 1. Using system-provided MPI libraries
Select the system-provided MPI backend linked to the parallel HDF5 installation
following the instructions in the [MPI.jl
docs](https://juliaparallel.org/MPI.jl/v0.20/configuration/#Using-a-system-provided-MPI-backend).
### 2. Using parallel HDF5 libraries
Set the `JULIA_HDF5_PATH` environment variable to the top-level installation
directory of the HDF5 libraries compiled with parallel support are found.
Then run `]build HDF5` from Julia.
Note that the selected HDF5 library must be linked to the MPI library chosen in
the previous section.
Also note that HDF5 library versions older than 0.10.4 are not supported by HDF5.jl.
For the set-up to be persistent across HDF5.jl updates, consider setting
`JULIA_HDF5_PATH` in `~/.bashrc` or similar.
See the [HDF5.jl README](https://github.com/JuliaIO/HDF5.jl#installation) for details.
### 3. Loading PencilIO
In the `PencilIO` module, the HDF5.jl package is lazy-loaded
using [Requires](https://github.com/JuliaPackaging/Requires.jl).
This means that HDF5 functionality will be available after both the
`PencilArrays.jl` and `HDF5.jl` packages have been loaded:
```julia
using MPI
using HDF5
using PencilArrays
```
## Library
```@docs
PencilIO.ParallelIODriver
MPIIODriver
PHDF5Driver
PencilIO.MPIFile
open
setindex!
read!
hdf5_has_parallel
```
## Index
```@index
Pages = ["PencilIO.md"]
```
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 6317 | ```@meta
CurrentModule = PencilArrays.Pencils
```
# [Pencil configurations](@id sec:pencil_configs)
A *pencil* configuration refers to a given distribution of multidimensional
data among MPI processes.
This information is encoded in the [`Pencil`](@ref) type.
A pencil configuration includes:
- [MPI topology](@ref sec:mpi_topology) information,
- global and local dimensions of the numerical grid,
- subset of decomposed dimensions,
- definition of optional permutation of dimensions.
## Construction
### [High-level interface](@id pencil-high-level)
The simplest way of constructing a new [`Pencil`](@ref) is by passing the
global dataset dimensions and an MPI communicator to the `Pencil` constructor:
```julia-repl
julia> dims_global = (16, 32, 64);
julia> comm = MPI.COMM_WORLD;
julia> pen = Pencil(dims_global, comm)
Decomposition of 3D data
Data dimensions: (16, 32, 64)
Decomposed dimensions: (2, 3)
Data permutation: NoPermutation()
```
This will decompose the data along the two last dimensions of the dataset:[^1]
```julia-repl
julia> decomposition(pen)
(2, 3)
```
For instance, if the communicator `comm` has 4 MPI processes, then each process will hold a subset of data of size `(16, 16, 32)`:
```julia-repl
julia> topology(pen)
MPI topology: 2D decomposition (2×2 processes) # assuming MPI.Comm_size(comm) == 4
julia> size_local(pen)
(16, 16, 32)
```
Instead of the default, one may want to choose the subset of dimensions that
should be decomposed.
For instance, to decompose along the first dimension only:
```julia-repl
julia> decomp_dims = (1,);
julia> pen = Pencil(dims_global, decomp_dims, comm)
Decomposition of 3D data
Data dimensions: (16, 32, 64)
Decomposed dimensions: (1,)
Data permutation: NoPermutation()
julia> decomposition(pen)
(1,)
julia> topology(pen)
MPI topology: 1D decomposition (4 processes) # again, assuming 4 MPI processes
```
### Low-level interface
Note that the above high-level constructors don't require the definition of
a [`MPITopology`](@ref), which is constructed implicitly.
For more control, one may want to manually construct a `MPITopology`, and then construct a `Pencil` from that.
As above, one may also specify the list of decomposed dimensions.
For instance, we may want to decompose 32 MPI processes into a 8×4 Cartesian
topology.
This is done as follows:
```julia-repl
julia> topo = MPITopology(comm, (8, 4)) # NOTE: fails if MPI.Comm_size(comm) ≠ 32
MPI topology: 2D decomposition (8×4 processes)
julia> dims_global = (16, 32, 64);
julia> pen = Pencil(topo, dims_global)
Decomposition of 3D data
Data dimensions: (16, 32, 64)
Decomposed dimensions: (2, 3)
Data permutation: NoPermutation()
```
As before, the decomposed dimensions are the rightmost ones by default (in this
case, dimensions `2` and `3`). A different set of dimensions may be
selected via an optional positional argument.
For instance, to decompose along dimensions `1` and `3` instead:
```julia-repl
julia> decomp_dims = (1, 3);
julia> pen = Pencil(topo, dims_global, decomp_dims)
Decomposition of 3D data
Data dimensions: (16, 32, 64)
Decomposed dimensions: (1, 3)
Data permutation: NoPermutation()
```
### Defining multiple pencils
One may also want to work with multiple pencil configurations that differ, for
instance, on the selection of decomposed dimensions.
For this case, a constructor is available that takes an already existing
`Pencil` instance.
Calling this constructor should be preferred when possible since it allows
sharing memory buffers (used for instance for [global transpositions](@ref
Global-MPI-operations)) and thus reducing memory usage.
The following creates a `Pencil` equivalent to the one above, but with
different decomposed dimensions:
```julia-repl
julia> pen_y = Pencil(pen; decomp_dims = (1, 3))
Decomposition of 3D data
Data dimensions: (16, 32, 64)
Decomposed dimensions: (1, 3)
Data permutation: NoPermutation()
```
See the [`Pencil`](@ref) documentation for more details.
## Dimension permutations
A `Pencil` may optionally be given information on dimension permutations.
In this case, the layout of the data arrays in memory is different from the
logical order of dimensions.
For performance reasons, permutations are compile-time objects defined in the
[StaticPermutations](https://github.com/jipolanco/StaticPermutations.jl)
package.
To make permutations clearer, consider the example above where the global data
dimensions are $N_x × N_y × N_z = 16 × 32 × 64$.
In this case, the logical order is $(x, y, z)$.
Now let's say that we want the memory order of the data to be $(y, z, x)$,[^2]
which corresponds to the permutation `(2, 3, 1)`.
Permutations are passed to the `Pencil` constructor via the `permute` keyword
argument.
Dimension permutations should be specified using a
[`Permutation`](https://jipolanco.github.io/StaticPermutations.jl/stable/#StaticPermutations.Permutation)
object.
For instance,
```julia-repl
julia> perm = Permutation(2, 3, 1);
julia> pen = Pencil(dims_global, comm; permute = perm)
Decomposition of 3D data
Data dimensions: (16, 32, 64)
Decomposed dimensions: (2, 3)
Data permutation: Permutation(2, 3, 1)
```
One can also pass a
[`NoPermutation`](https://jipolanco.github.io/StaticPermutations.jl/stable/#StaticPermutations.NoPermutation)
to disable permutations (this is the default).
## Types
```@docs
Pencil
Pencils.AbstractIndexOrder
MemoryOrder
LogicalOrder
```
## Methods
```@docs
topology(::Pencil)
get_comm(::Pencil)
decomposition(::Pencil)
permutation(::Pencil)
timer(::Pencil)
length(::Pencil)
size(::Pencil)
ndims(::Pencil)
range_remote(::Pencil, ::Integer, ::LogicalOrder)
range_local(::Pencil, ::LogicalOrder)
size_global(::Pencil, ::LogicalOrder)
size_local(::Pencil, etc...)
length_global(::Pencil)
length_local(::Pencil)
to_local(::Pencil)
similar(::Pencil)
```
## Index
```@index
Pages = ["Pencils.md"]
```
[^1]:
More generally, an ``N``-dimensional dataset is by default decomposed along its ``N - 1`` last dimensions.
[^2]:
Why would we want this?
One application is to efficiently perform FFTs along $y$, which, under
this permutation, would be the fastest dimension.
This is used by the [PencilFFTs](https://github.com/jipolanco/PencilFFTs.jl) package.
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 1184 | # Global MPI operations
```@meta
CurrentModule = PencilArrays
```
One of the most time-consuming parts of a large-scale computation involving
multidimensional FFTs, is the global data transpositions between different MPI
decomposition configurations.
In `PencilArrays`, this is performed by the
[`transpose!`](@ref Transpositions.transpose!) function, which
takes two `PencilArray`s, typically associated to two different configurations.
The implementation performs comparably to similar implementations in
lower-level languages (see [PencilFFTs
benchmarks](https://jipolanco.github.io/PencilFFTs.jl/stable/benchmarks/) for
details).
Also provided is a [`gather`](@ref) function that creates a single global array
from decomposed data.
This can be useful for tests (in fact, it is used in the PencilArrays tests to
verify the correctness of the transpositions), but shouldn't be used with large
datasets.
It is generally useful for small problems where the global size of the data can
easily fit the locally available memory.
## Library
```@docs
Transpositions.Transposition
Transpositions.transpose!
MPI.Waitall
gather
```
## Index
```@index
Pages = ["Transpositions.md"]
```
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 4544 | ```@meta
CurrentModule = PencilArrays
```
# PencilArrays
Distributed Julia arrays using the MPI protocol.
## Introduction
This package provides a convenient framework for working with multidimensional
Julia arrays distributed among MPI processes.
The name of this package originates from the decomposition of 3D domains along
two out of three dimensions, sometimes called *pencil* decomposition.
This is illustrated by the figure below,[^1] where each coloured block is
managed by a different MPI process.
```@raw html
<div class="figure">
<img
width="85%"
src="img/pencils.svg"
alt="Pencil decomposition of 3D domains">
</div>
```
More generally, PencilArrays can decompose arrays of arbitrary dimension ``N``,
along an arbitrary subset of ``M`` dimensions.
(In the example above, ``N = 3`` and ``M = 2``.)
PencilArrays is the basis for the
[PencilFFTs](https://github.com/jipolanco/PencilFFTs.jl) package, which
provides efficient and highly scalable distributed FFTs.
## Features
- distribution of ``N``-dimensional arrays among MPI processes;
- decomposition of arrays along all or a subset of dimensions;
- tools for conveniently and efficiently iterating over the [coordinates of
distributed multidimensional geometries](@ref Working-with-grids);
- [transpositions](@ref Global-MPI-operations) between different decomposition
configurations, using point-to-point and collective MPI communications;
- zero-cost, convenient dimension permutations using [StaticPermutations.jl](https://github.com/jipolanco/StaticPermutations.jl);
- convenient [parallel I/O](@ref PencilIO_module) of distributed arrays using
either MPI-IO or the [Parallel
HDF5](https://portal.hdfgroup.org/display/HDF5/Parallel+HDF5) libraries;
- distributed FFTs and related transforms via the
[PencilFFTs.jl](https://github.com/jipolanco/PencilFFTs.jl) package.
## Installation
PencilArrays can be installed using the Julia package manager:
julia> ] add PencilArrays
## Quick start
```julia
using MPI
using PencilArrays
MPI.Init()
comm = MPI.COMM_WORLD # MPI communicator
rank = MPI.Comm_rank(comm) # rank of local process
# Let's decompose a 3D grid across all MPI processes.
# The resulting configuration is described by a Pencil object.
dims_global = (42, 31, 29) # global dimensions of the array
pen_x = Pencil(dims_global, comm)
# By default the 3D grid is decomposed along the two last dimensions, similarly
# to the "x-pencil" configuration in the figure above:
println(pen_x)
# Decomposition of 3D data
# Data dimensions: (42, 31, 29)
# Decomposed dimensions: (2, 3)
# Data permutation: NoPermutation()
# Array type: Array
# We can now allocate distributed arrays in the x-pencil configuration.
Ax = PencilArray{Float64}(undef, pen_x)
fill!(Ax, rank * π) # each process locally fills its part of the array
parent(Ax) # parent array holding the local data (here, an Array{Float64,3})
size(Ax) # total size of the array = (42, 31, 29)
size_local(Ax) # size of local part, e.g. (42, 8, 10) for a given process
range_local(Ax) # range of local part on global grid, e.g. (1:42, 16:23, 20:29)
# Let's associate the dimensions to a global grid of coordinates (x_i, y_j, z_k)
xs_global = range(0, 1; length = dims_global[1])
ys_global = range(0, 2; length = dims_global[2])
zs_global = range(0, 2π; length = dims_global[3])
# Part of the grid associated to the local MPI process:
grid = localgrid(pen_x, (xs_global, ys_global, zs_global))
# This is convenient for example if we want to initialise the `Ax` array as
# a function of the grid coordinates (x, y, z):
@. Ax = grid.x + (2 * grid.y * cos(grid.z))
# Alternatively (useful in higher dimensions):
@. Ax = grid[1] + (2 * grid[2] * cos(grid[3]))
# Create another pencil configuration, decomposing along dimensions (1, 3).
# We could use the same constructor as before, but it's recommended to reuse the
# previous Pencil instead to reduce memory usage.
pen_y = Pencil(pen_x; decomp_dims = (1, 3))
# Now transpose from the x-pencil to the y-pencil configuration, redistributing
# the data initially in Ax.
Ay = PencilArray{Float64}(undef, pen_y)
transpose!(Ay, Ax)
# We can check that Ax and Ay have the same data (but distributed differently)
# by combining the data from all different processes onto a single process
# (this should never be used for large datasets!)
gather(Ax) == gather(Ay) # true
```
[^1]:
Figure adapted from [this PhD thesis](https://hal.archives-ouvertes.fr/tel-02084215v1).
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 0.19.6 | 3d8a67ac2ea254cabea5d7b5118e78e6564dd75f | docs | 1548 | # Reductions
Reduction over [`PencilArray`](@ref)s using Julia functions such as `minimum`,
`maximum`, `sum`, `any` or `all` are performed on the global data.
This involves first a local reduction over each process, followed by a global
reduction of a scalar quantity using
[`MPI.Allreduce`](https://juliaparallel.github.io/MPI.jl/latest/collective/#MPI.Allreduce).
For example:
```julia
using MPI
using PencilArrays
MPI.Init()
comm = MPI.COMM_WORLD
nprocs = MPI.Comm_size(comm)
rank = MPI.Comm_rank(comm)
id = rank + 1
pen = Pencil((16, 32, 14), comm)
u = PencilArray{Int}(undef, pen)
fill!(u, 2 * id)
minimum(u) # = 2
maximum(u) # = 2 * nprocs
minimum(abs2, u) # = 4
maximum(abs2, u) # = (2 * nprocs)^2
all(>(0), u) # true
all(==(2), u) # false if nprocs > 1
any(==(2), u) # true
```
!!! note "Note on associativity"
Associative reduction operations like
[`foldl`](https://docs.julialang.org/en/v1/base/collections/#Base.foldl-Tuple{Any,%20Any}),
[`foldr`](https://docs.julialang.org/en/v1/base/collections/#Base.foldr-Tuple{Any,%20Any}),
[`mapfoldl`](https://docs.julialang.org/en/v1/base/collections/#Base.mapfoldl-Tuple{Any,%20Any,%20Any})
and
[`mapfoldr`](https://docs.julialang.org/en/v1/base/collections/#Base.mapfoldr-Tuple{Any,%20Any,%20Any})
are also defined for consistency, but these operations are not
guaranteed to strictly respect left or right associativity.
In fact, associativity is only respected on each local process, before
results are reduced among all processes.
| PencilArrays | https://github.com/jipolanco/PencilArrays.jl.git |
|
[
"MIT"
] | 1.0.0 | bcd90f754ce53b519551288418a7e85e42d21beb | code | 3748 | module WebAssemblyInterfaces
export js_repr, js_types, js_def
function fixname(s)
# JS names have to be letter, numbers, or underscore
replace(string(s), r"[^a-zA-Z0-9_]" => s"_")
end
BuiltinTypes = Union{Bool, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, Float64}
const default_map = Dict{Type,String}()
for T in (Bool, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64)
default_map[T] = string("'", lowercase(repr(T)), "'")
end
default_map[Float32] = "'f32'"
default_map[Float64] = "'f64'"
struct Context
type_map::Dict{Type,String}
new_types::Vector{String}
end
Context(; type_map = copy(default_map), new_types = String[]) = Context(type_map, new_types)
function definition_repr(ctx::Context, x::Type)
if !isconcretetype()
return nothing
end
end
function definition_repr(ctx::Context, x::AbstractVector{T}) where T
string("[", definition_repr(ctx, x[1]), ", ", length(x), "]")
end
tname(::Type{T}) where {T} = string(nameof(T), join(T.parameters, "_"))
function definition_repr(ctx::Context, x::Type{T}) where T
if haskey(ctx.type_map, T)
return ctx.type_map[T]
end
# if isconcretetype(T) && !ismutabletype(T)
name = fixname(nameof(T))
collect(values(ctx.type_map))
if name in values(ctx.type_map)
name = fixname(tname(T))
end
ctx.type_map[T] = name
s = string("const ", name, " = new ffi.Struct({\n")
for i in 1:fieldcount(T)
FT = fieldtype(T, i)
if sizeof(FT) > 0
s *= string(" ",
fixname(string(fieldname(T, i))), ": ",
get(ctx.type_map, FT, definition_repr(ctx, FT)),
",\n")
end
end
s *= "});\n"
push!(ctx.new_types, s)
return name
# end
end
function definition_repr(ctx::Context, ::Type{NTuple{N, T}}) where {N,T}
string("[", definition_repr(ctx, T), ", ", N, "]")
end
function definition_repr(ctx::Context, T::Type{<:Tuple})
string("ffi.rust.tuple([", join((definition_repr(ctx, p) for p in T.parameters), ","), "])")
end
function definition_repr(ctx::Context, ::Type{Base.RefValue{T}}) where T
string("types.pointer(", definition_repr(ctx, T), ")")
end
function definition_repr(ctx::Context, ::Type{<:Enum}) # kludge for now
string("'int32'")
end
function js_types(T::Type; ctx = Context())
definition_repr(ctx, T)
return join(ctx.new_types, "\n")
end
function js_def(x::T; ctx = Context()) where T
typename = definition_repr(ctx, T)
s = string("new ", typename, "({\n")
for i in 1:fieldcount(T)
FT = fieldtype(T, i)
if sizeof(FT) > 0
s *= string(fixname(string(fieldname(T, i))), ": ",
js_def(getfield(x, i), ctx = ctx),
",\n")
end
end
s *= "})"
return s
end
js_def(x::T; args...) where T <: Union{BuiltinTypes} = x
function js_def(x::NTuple{N,T}; ctx = Context()) where {N,T}
string("[", join(string.(x), ","), "]")
end
function js_def(x::Base.RefValue{T}; ctx = Context()) where T
string("new Pointer(", definition_repr(ctx, T), ", ", js_def(x[]; ctx), ")")
end
function js_def(x::T; ctx = Context()) where T <: Tuple
string("new ffi.rust.tuple([",
join((definition_repr(ctx, p) for p in T.parameters), ","), "], [",
join((js_def(z; ctx) for z in x), ", "), "])")
end
function js_def(x::Enum; ctx = Context()) # kludge for now
Int32(x)
end
function js_repr(x)
ctx = Context()
string(
js_types(typeof(x); ctx),
"\n",
js_def(x; ctx),
"\n",
)
end
end
| WebAssemblyInterfaces | https://github.com/tshort/WebAssemblyInterfaces.jl.git |
|
[
"MIT"
] | 1.0.0 | bcd90f754ce53b519551288418a7e85e42d21beb | code | 631 | using WebAssemblyInterfaces
using Test
mutable struct X{A,B,C}
a::A
b::B
c::C
end
struct Y{A,B,C}
a::A
b::B
c::C
end
x = X(2, Y(1.1, 2, (1, 1.1)), Y(1, 2, 3))
@testset "Basics" begin
s = js_repr(x)
println(s)
@test contains(s, """
const Y = new ffi.Struct({
a: 'f64',
b: 'int64',
c: ffi.rust.tuple(['int64','f64']),
});
""")
@test contains(s, """
new X({
a: 2,
b: new Y({
a: 1.1,
b: 2,
c: new ffi.rust.tuple(['int64','f64'], [1, 1.1]),
}),
c: new YInt64_Int64_Int64({
a: 1,
b: 2,
c: 3,
}),
})
""")
# More sophisticated tests could use NodeJS.jl to run some JavaScript/WebAssembly.
end
| WebAssemblyInterfaces | https://github.com/tshort/WebAssemblyInterfaces.jl.git |
|
[
"MIT"
] | 1.0.0 | bcd90f754ce53b519551288418a7e85e42d21beb | docs | 3652 | # WebAssemblyInterfaces
[](https://github.com/tshort/WebAssemblyInterfaces.jl/actions/workflows/CI.yml?query=branch%3Amain)
NOTE: This is still experimental, and not all features have been tested with WebAssembly.
For a working example, see this [Lorenz Attraction App in Julia](http://tshort.github.io/Lorenz-WebAssembly-Model.jl).
This is a small package to write out definitions in JavaScript that correspond to Julia types and object definitions. This JavaScript code is meant to be used with the [wasm-ffi](https://github.com/DeMille/wasm-ffi/tree/master) package, a great package for interfacing between JavaScript and WebAssembly. This allows JavaScript to read and write to memory that is shared by the Julia code (after being compiled to WebAssembly). The [wasm-ffi](https://github.com/DeMille/wasm-ffi/tree/master) package writes to the same memory layout used by Julia.
The following types are supported:
* Structs, tuples, named tuples
* Concrete types that include: Bool, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, and Float64
Functions and other types that don't have a size are not written. For vectors, the `MallocVector` type from [StaticTools](https://github.com/brenhinkeller/StaticTools.jl) works with the `ffi.rust.vector` type in wasm-ffi. The memory layouts do not match exactly, but it works for some uses.
`wasm-ffi` performs allocations when objects are created on the JavaScript side. It is also possible to do allocation on the Julia side. The WebAssembly file needs to include `allocate` and `free` functions.
Three functions are provided:
* `js_types(T)`: Return a string with the JavaScript definition of type `T`.
* `js_def(x)`: Return a string with the JavaScript code to define object `x`.
* `js_repr(x)`: Return a string with the JavaScript code with the types and the code to define `x`.
Here is an example of Julia code that defines a custom type and generates JavaScript interfacing code.
```jl
mutable struct X{A,B,C}
a::A
b::B
c::C
end
struct Y{A,B,C}
a::A
b::B
c::C
end
x = X(2, Y(1.1, 2, (1, 1.1)), Y(1, 2, 3))
using WebAssemblyInterfaces
print(js_repr(x))
```
Here is the JavaScript code that is printed:
```js
const Y = new ffi.Struct({
a: 'f64',
b: 'int64',
c: ffi.rust.tuple(['int64','f64']),
});
const YInt64_Int64_Int64 = new ffi.Struct({
a: 'int64',
b: 'int64',
c: 'int64',
});
const X = new ffi.Struct({
a: 'int64',
b: Y,
c: YInt64_Int64_Int64,
});
new X({
a: 2,
b: new Y({
a: 1.1,
b: 2,
c: new ffi.rust.tuple(['int64','f64'], [1, 1.1]),
}),
c: new YInt64_Int64_Int64({
a: 1,
b: 2,
c: 3,
}),
})
```
Here is a Julia function that could operate on this object. This can be compiled with [StaticCompiler](https://github.com/tshort/StaticCompiler.jl). The Julia code can read data from the object passed in, and it can write to this object in memory.
```jl
function f(x)
x.a = x.b[2] * x.c[3]
return x.c[1] + x.b.c[1]
end
# Compile it to WebAssembly:
using StaticCompiler
wasm_path = compile_wasm(f, Tuple{typeof(x)}, flags = `walloc.o`)
```
### Options going forward
* Should we include a copy of `wasm-ffi.browser.js`? It makes sense if we add support for more Julia types.
* Figure out where `walloc.o` should live. Should we add object code from other sources to make WebAssembly easier?
* We could create and package a set of method overrides for StaticCompiler that are targeted at WebAssembly. We could also develop Mixtape passes to be able to compile more code.
| WebAssemblyInterfaces | https://github.com/tshort/WebAssemblyInterfaces.jl.git |
|
[
"MIT"
] | 0.4.0 | 9bda530fa178f41e92eeeb57312e7be1b9f47a9f | code | 11801 | module SPCSpectra
using Dates
export SPC
mutable struct SPC
data::Vector{Tuple{Vector{<:Number}, Vector{Float32}}} # xdata can be Float64 if calculated from range specs
zdata::Vector{Float32}
xlabel::String
ylabel::String
zlabel::String
experimenttype::String
timestamp::DateTime
param_dict::Dict{String, String}
params::Vector{String}
end
# Adaptation of the pythonpackage spc by rohanisaac
# byte positon of various parts of the file
# head_siz = 512
# old_head_siz = 256
subhead_siz = 32
log_siz = 64
"Units for x,z,w axes."
const fxtype_op = ["Arbitrary",
"Wavenumber (cm-1)",
"Micrometers (um)",
"Nanometers (nm)",
"Seconds ",
"Minutes", "Hertz (Hz)",
"Kilohertz (KHz)",
"Megahertz (MHz) ",
"Mass (M/z)",
"Parts per million (PPM)",
"Days",
"Years",
"Raman Shift (cm-1)",
"eV",
"XYZ text labels in fcatxt (old 0x4D version only)",
"Diode Number",
"Channel",
"Degrees",
"Temperature (F)",
"Temperature (C)",
"Temperature (K)",
"Data Points",
"Milliseconds (mSec)",
"Microseconds (uSec) ",
"Nanoseconds (nSec)",
"Gigahertz (GHz)",
"Centimeters (cm)",
"Meters (m)",
"Millimeters (mm)",
"Hours"]
"Units y-axis."
const fytype_op = ["Arbitrary Intensity",
"Interferogram",
"Absorbance",
"Kubelka-Munk",
"Counts",
"Volts",
"Degrees",
"Milliamps",
"Millimeters",
"Millivolts",
"Log(1/R)",
"Percent",
"Intensity",
"Relative Intensity",
"Energy",
"",
"Decibel",
"",
"",
"Temperature (F)",
"Temperature (C)",
"Temperature (K)",
"Index of Refraction [N]",
"Extinction Coeff. [K]",
"Real",
"Imaginary",
"Complex"]
const fytype_op2 = ["Transmission",
"Reflectance",
"Arbitrary or Single Beam with Valley Peaks",
"Emission"]
const fexper_op = ["General SPC",
"Gas Chromatogram",
"General Chromatogram",
"HPLC Chromatogram",
"FT-IR, FT-NIR, FT-Raman Spectrum or Igram",
"NIR Spectrum",
"UV-VIS Spectrum",
"X-ray Diffraction Spectrum",
"Mass Spectrum ",
"NMR Spectrum or FID",
"Raman Spectrum",
"Fluorescence Spectrum",
"Atomic Spectrum",
"Chromatography Diode Array Spectra"]
flag_bits(n) = BitVector(digits(n, base = 2, pad = 8 * sizeof(n)))
read_data(io::IO, T::DataType) = ltoh(read(io, T))
read_data(io::IO, ::Type{String}, n::Integer) = strip(String(read(io, n)), '\0')
read_data(io::IO, T::DataType, n::Integer...) = ltoh.(reshape(reinterpret(T, read(io, prod(n) * sizeof(T))), Int64.(n)...))
read_data(io::IO, TT::Union{NTuple{N, DataType} where N, Vector{DataType}}) = ltoh.(read.(Ref(io), TT))
"""
SPC(filename::AbstractString)
Construct SPC objects.
"""
function SPC(filename::AbstractString)
content = read(filename)
io = IOBuffer(content)
ftflg, fversn = content[1:2]
# --------------------------------------------
# NEW FORMAT (LSB)
# --------------------------------------------
fversn == 0x4b || return "Reading of file version $(repr(fversn)) not implemented"
ftflg = read_data(io, UInt8)
fversn = read_data(io, UInt8)
fexper = read_data(io, UInt8)
fexp = read_data(io, UInt8)
fnpts = read_data(io, Int32)
ffirst = read_data(io, Float64)
flast = read_data(io, Float64)
fnsub = read_data(io, Int32)
fxtype = read_data(io, UInt8)
fytype = read_data(io, UInt8)
fztype = read_data(io, UInt8)
fpost = read_data(io, UInt8)
fdate = read_data(io, Int32)
fres = read_data(io, String, 9)
fsource = read_data(io, String, 9)
fpeakpt = read_data(io, Int16)
fspare = read_data(io, String, 32)
fcmnt = read_data(io, String, 130)
fcatxt = read_data(io, String, 30)
flogoff = read_data(io, Int32)
fmods = read_data(io, Int32)
fprocs = read_data(io, UInt8)
flevel = read_data(io, UInt8)
fsampin = read_data(io, Int16)
ffactor = read_data(io, Float32)
fmethod = read_data(io, String, 48)
fzinc = read_data(io, Float32)
fwplanes = read_data(io, Int32)
fwinc = read_data(io, Float32)
fwtype = read_data(io, UInt8)
freser = read_data(io, String, 187)
# Flag bits
tsprec, tcgram, tmulti, trandm, tordrd, talabs, txyxys, txvals = flag_bits(ftflg)
# Convert date time to appropriate format
year = fdate >> 20
month = (fdate >> 16) % (2^4)
day = (fdate >> 11) % (2^5)
hour = (fdate >> 6) % (2^5)
minute = fdate % (2^6)
timestamp = DateTime(year, month, day, hour, minute)
# remove multiple spaces
cmnt = replace(fcmnt, r"\s+" => " ")
# figure out type of file
dat_multi = fnsub > 1
dat_fmt = if txyxys
# x values are given
"-xy"
elseif txvals
# only one subfile, which contains the x data
dat_fmt = "x-y"
else
# no x values are given, but they can be generated
dat_fmt = "gx-y"
end
println("$dat_fmt($fnsub)")
x = if ! txyxys
# txyxys don't have global x data
if txvals
# if global x data is given
read_data(io, Float32, fnpts)
else
# otherwise generate them
range(ffirst, flast; length=fnpts) |> collect
end
end
# make a list of subfiles
xydata = []
zdata = Float32[]
z0 = dz = 0f0
# if subfile directory is given
if dat_fmt == "-xy" && fnpts > 0
directory = true
# loop over entries in directory
for i in 1:fnsub
ssfposn, ssfsize, ssftime = read_data(io, (Int32, Int32, Float32))
# add sufile, load defaults for npts and exp
pos = position(io)
seek(io, ssfposn) # io buffer position is zero-based!
xloc, y, z, zinc = subFile(io, 0, 0, true, tsprec, tmulti)
if i == 1
z0 = z
dz = zinc
end
seek(io, pos)
push!(xydata, (isnothing(xloc) ? x : xloc, y))
push!(zdata, z)
end
else
# don't have directory, for each subfile
for i in 1:fnsub
xloc, y, z, zinc = subFile(io, fnpts, fexp, txyxys, tsprec, tmulti)
if i == 1
z0 = z
dz = zinc
end
push!(xydata, (isnothing(xloc) ? x : xloc, y))
push!(zdata, z)
end
end
# if log data exists
# flog offset to log data offset not zero (bytes)
param_dict = Dict{String, String}()
params = String[] # put the rest into a list
if flogoff > 0
log_head_end = flogoff + log_siz
io_log = IOBuffer(content[flogoff+1:log_head_end])
# logstc_str = "<iiiii44s"
logsizd, logsizm, logtxto, logbins, logdsks = read_data(io_log, Int32, 5)
logspar = read_data(io_log, String, 44)
log_pos = flogoff + logtxto
log_end_pos = flogoff + logsizd
# line endings: get rid of any '\r' and then split on '\n'
log_content = split(strip(String(content[log_pos + 1:log_end_pos]), ['\0', '\r', '\n']), r"\r?\n")
# split log data into dictionary based on =
for x in log_content
if occursin("=", x)
# stop it from breaking if there is more than 1 =
key, value = split(x, "=")[1:2]
push!(param_dict, key => strip(value, '\0'))
else
push!(params, x)
end
end
end
labels = [
get(fxtype_op, fxtype + 1, "Unknown"),
get(fytype_op, fytype + 1, get(fytype_op2, fytype - 127, "Unknown")),
get(fxtype_op, fztype + 1, "Unknown")
]
# --------------------------
# check if labels are included as text
# --------------------------
# split it based on '\0' character
# format x, y, z
if talabs
for (i, s) in enumerate(split(fcatxt, '\0', keepempty = false))
isempty(s) || (labels[i] = s)
end
end
if (0x10 & ftflg == 0x0)
fzinc > 0 && (dz == fzinc)
zdata = z0 .+ collect(0:dz:((fnpts - 1) * dz))
end
SPC(xydata, zdata, labels..., get(fexper_op, fexper + 1, "Unknown"), timestamp, param_dict, params)
end
"""
subFile(io::IO, fnpts, fexp, txyxy, tsprec, tmulti)
Process each subfile passed to it, extracts header information and data
information and places them in data members
Data
- x: x-data (optional)
- y: y-data
- y_int: integer y-data if y-data is not floating
"""
function subFile(io::IO, fnpts, fexp, txyxy, tsprec, tmulti)
# extract subheader info
subflgs, subexp, subindx, subtime, subnext, subnois, subnpts, subscan, subwlevel, subresv = read_subheader(io)
pts = txyxy ? subnpts : fnpts
# Choosing exponent
# -----------------
# choose local vs global exponent depending on tmulti
exp = tmulti ? subexp : fexp
# Make sure it is reasonable, if it out of range zero it
(-128 < exp <= 128) || (exp = 0)
# --------------------------
# if x_data present
# --------------------------
x = if txyxy
# x_str = '<' + 'i' * pts
x_raw = read_data(io, Int32, pts)
(2.0f0^(exp - 32)) .* x_raw
else
nothing
end
# --------------------------
# extract y_data
# --------------------------
y = if exp == 128
# Floating y-values
read_data(io, Float32, pts)
else
# integer format
if tsprec
# 16 bit
y_raw16 = read_data(io, Int16, pts)
(2.0f0^(exp - 16)) .* y_raw16
else
# 32 bit, using size of subheader to figure out data type
y_raw = read_data(io, Int32, pts)
(2.0f0^(exp - 32)) .* y_raw
end
end
z = subtime
zinc = subnext - subtime
x, y, z, zinc
end
"""
read_subheader(io::IO)
Return the subheader as a list:
-------
10 item list with the following data members:
[1] subflgs
[2] subexp
[3] subindx
[4] subtime
[5] subnext
[6] subnois
[7] subnpts
[8] subscan
[9] subwlevel
[10] subresv
"""
function read_subheader(io::IO)
subflgs = read_data(io, UInt8)
subexp = read_data(io, UInt8)
subindx = read_data(io, Int16)
subtime = read_data(io, Float32)
subnext = read_data(io, Float32)
subnois = read_data(io, Float32)
subnpts = read_data(io, Int32)
subscan = read_data(io, Int32)
subwlevel = read_data(io, Float32)
subresv = read_data(io, String, 4)
subflgs, subexp, subindx, subtime, subnext, subnois, subnpts, subscan, subwlevel, subresv
end
end # module
| SPCSpectra | https://github.com/hhaensel/SPCSpectra.jl.git |
|
[
"MIT"
] | 0.4.0 | 9bda530fa178f41e92eeeb57312e7be1b9f47a9f | code | 394 | using Test
using SPCSpectra
@testset "SPCSpectra" begin
dir = joinpath(pkgdir(SPCSpectra), "test", "data")
filename = "4d_map.spc"
path = joinpath(dir, filename)
spc = SPC(path)
# The data is from a Nicolet FT-IR spectrometer.
# That is, a Fourier infrared spectrometer.
@test spc.param_dict["SRC"] == "IR Source"
@test spc.param_dict["MODEL"] == "Nicolet"
end
| SPCSpectra | https://github.com/hhaensel/SPCSpectra.jl.git |
|
[
"MIT"
] | 0.4.0 | 9bda530fa178f41e92eeeb57312e7be1b9f47a9f | docs | 3312 | # SPCSpectra
A module for working with .SPC files in Julia. SPC is a binary data format to store a variety of spectral data, developed by Galactic Industries Corporation in the '90s. Popularly used Thermo Fisher/Scientific software GRAMS/AI. Also used by others including Ocean Optics, Jobin Yvon Horiba. Can store a variety of spectrum including FT-IR, UV-VIS, X-ray Diffraction, Mass Spectroscopy, NMR, Raman and Fluorescence spectra.
The SPC file format can store either single or multiple y-values, and the x-values can either be given explicitly or even spaced x-values can be generated based on initial and final points as well as number of points. In addition the format can store various log data and parameters, as well as various information such as axis labels and scan type.
NOTE: This file is still in beta state and has not been fully tested.
## Acknowledgement
This package is highly inspired the python package [`spc` by rohanisaac](https://github.com/rohanisaac/spc).
## Features
1. Extracts header information
2. Store x and y data of all traces into a vector `data`. Single traces can be addressed by `spc.data[1]` (, `spc.data[2]`, `spc.data[3]`, ...)
3. Attempts to interpret x-, y-, and z-labels, as well as experiment type
4. Store z values of multifiles into a vector `zdata`
Currently only file version `0x4b` is supported. Data output is not yet implemented.
## Installation
```julia
julia> ]
pkg> add SPCSpectra
```
## Usage
```julia
using SPCSpectra
datadir = joinpath(pkgdir(SPCSpectra), "test", "data")
filenames = filter(endswith(r"\.spc"i), readdir(datadir; join=true))
spc = SPC(filenames[1])
# Plotting
using PlotlyBase
plotspc(spc::SPC) = plotspc(spc.data)
plotspc(data) = Plot([scatter(x = s[1], y = s[2]) for s in data])
plotspc(spc)
```
### Accessing data
In contrast to the original spc python package, all data elements contain both x and y values.
This is not a waste of storage as x arrays are stored by reference.
The following fields are currently supported.
metadata | variable
------------------- | -----------
x-label | spc.xlabel
y-label | spc.ylabel
z-label | spc.zlabel
Timestamp | spc.timestamp
Experiment type | spc.experimenttype
Log dictionary | spc.param_dict
Log (remaining) | spc.params
### File versions supported
File versions are given by the second bit in the file. Currently the library supports the following `fversn` bytes.
fversn | Description | Support | Notes
------ | ---------------- | ------------ | ----------------------------------------------------------------
0x4B | New format (LSB) | Good |
0x4C | New format (MSB) | None |
0x4D | Old format | None |
0xCF | SHIMADZU format | None |
### Notes
- Used format specification from Universal Data Format Specification [1], [2]
- Loads entire file into memory
- Data uses variable naming as in SPC.H
### Todo
- support of other format versions
- data output / conversion
- integration of plot functions
## References
[1] "SPC file format", Wikipedia (<https://en.wikipedia.org/wiki/SPC_file_format>)
[2] "Universal Data Format Specification" (PDF). (<https://ensembles-eu.metoffice.gov.uk/met-res/aries/technical/GSPC_UDF.PDF>)
| SPCSpectra | https://github.com/hhaensel/SPCSpectra.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | code | 592 | using Documenter, Arpack
makedocs(
format = Documenter.HTML(
canonical = "https://julialinearalgebra.github.io/Arpack.jl/stable/",
),
sitename = "Arpack.jl",
modules = [Arpack],
pages = [
"Home" => "index.md",
"Standard Eigen Decomposition" => "eigs.md",
"Generalized Eigen Decomposition" => "eigs_gen.md",
"Singular Value Decomposition" => "svds.md",
"API Reference" => "api.md",
]
)
deploydocs(
repo = "github.com/JuliaLinearAlgebra/Arpack.jl.git",
target = "build",
deps = nothing,
make = nothing
)
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | code | 15114 | # This file is a part of Julia. License is MIT: https://julialang.org/license
"""
Arnoldi and Lanczos iteration for computing eigenvalues
"""
module Arpack
# Load in our binary dependencies
using Arpack_jll
using LinearAlgebra: BlasFloat, BlasInt, Diagonal, I, SVD, UniformScaling,
checksquare, factorize, ishermitian, issymmetric, mul!,
rmul!, qr!
import LinearAlgebra
export eigs, svds
include("libarpack.jl")
## eigs
"""
eigs(A; nev=6, ncv=max(20,2*nev+1), which=:LM, tol=0.0, maxiter=300, sigma=nothing, ritzvec=true, explicittransform=:auto, v0=zeros((0,)), check=0) -> (d,[v,],nconv,niter,nmult,resid)
Computes eigenvalues `d` of `A` using implicitly restarted Lanczos or Arnoldi iterations for real symmetric or
general nonsymmetric matrices respectively. See [the manual](@ref man-eigs) for more information.
`eigs` returns the `nev` requested eigenvalues in `d`, the corresponding Ritz vectors `v`
(only if `ritzvec=true`), the number of converged eigenvalues `nconv`, the number of
iterations `niter` and the number of matrix vector multiplications `nmult`, as well as the
final residual vector `resid`. The parameter `explicittransform` takes the values `:auto`, `:none`
or `:shiftinvert`, specifying if shift and invert should be explicitly invoked in julia code.
When `check = 0`, an error is thrown if maximum number of iterations taken (`info = 1`). This usually means all possible eigenvalues has been found according to ARPACK manual.
When `check = 1`, return currently converged eigenvalues when `info = 1`. And a `@warn` will given.
When `check = 2`, return currently converged eigenvalues when `info = 1`.
# Examples
```jldoctest
julia> using LinearAlgebra, Arpack
julia> A = Diagonal(1:4);
julia> λ, ϕ = eigs(A, nev = 2);
julia> λ
2-element Array{Float64,1}:
3.9999999999999996
3.000000000000001
```
"""
eigs(A; kwargs...) = eigs(A, I; kwargs...)
eigs(A::AbstractMatrix{<:BlasFloat}, ::UniformScaling; kwargs...) = _eigs(A, I; kwargs...)
eigs(A::AbstractMatrix{T}, B::AbstractMatrix{T}; kwargs...) where {T<:BlasFloat} = _eigs(A, B; kwargs...)
eigs(A::AbstractMatrix{BigFloat}, B::AbstractMatrix...; kwargs...) = throw(MethodError(eigs, Any[A,B,kwargs...]))
eigs(A::AbstractMatrix{BigFloat}, B::UniformScaling; kwargs...) = throw(MethodError(eigs, Any[A,B,kwargs...]))
function eigs(A::AbstractMatrix{T}, ::UniformScaling; kwargs...) where T
Tnew = typeof(zero(T)/sqrt(one(T)))
eigs(convert(AbstractMatrix{Tnew}, A), I; kwargs...)
end
function eigs(A::AbstractMatrix, B::AbstractMatrix; kwargs...)
T = promote_type(eltype(A), eltype(B))
Tnew = typeof(zero(T)/sqrt(one(T)))
eigs(convert(AbstractMatrix{Tnew}, A), convert(AbstractMatrix{Tnew}, B); kwargs...)
end
"""
eigs(A, B; nev=6, ncv=max(20,2*nev+1), which=:LM, tol=0.0, maxiter=300, sigma=nothing, ritzvec=true, v0=zeros((0,)), check=0) -> (d,[v,],nconv,niter,nmult,resid)
Computes generalized eigenvalues `d` of `A` and `B` using implicitly restarted Lanczos or Arnoldi iterations for real symmetric or general nonsymmetric matrices respectively. See [the manual](@ref man-eigsgen) for more information.
When `check = 0`, an error is thrown if maximum number of iterations taken (`info = 1`). This usually means all possible eigenvalues has been found according to ARPACK manual.
When `check = 1`, return currently converged eigenvalues when `info = 1`. And a `@warn` will given.
When `check = 2`, return currently converged eigenvalues when `info = 1`.
"""
eigs(A, B; kwargs...) = _eigs(A, B; kwargs...)
function _eigs(A, B;
nev::Integer=6, ncv::Integer=max(20,2*nev+1), which=:LM,
tol=0.0, maxiter::Integer=300, sigma=nothing, v0::Vector=zeros(eltype(A),(0,)),
ritzvec::Bool=true, explicittransform::Symbol=:auto, check::Integer=0)
n = checksquare(A)
eigval_postprocess = false; # If we need to shift-and-invert eigvals as postprocessing
T = eltype(A)
iscmplx = T <: Complex
isgeneral = B !== I
sym = !iscmplx && issymmetric(A) && issymmetric(B)
nevmax = sym ? n-1 : n-2
if nevmax <= 0
throw(ArgumentError("input matrix A is too small. Use eigen instead."))
end
if nev > nevmax
@warn "Adjusting nev from $nev to $nevmax"
nev = nevmax
end
if nev <= 0
throw(ArgumentError("requested number of eigenvalues (nev) must be ≥ 1, got $nev"))
end
ncvmin = nev + (sym ? 1 : 2)
if ncv < ncvmin
@warn "Adjusting ncv from $ncv to $ncvmin"
ncv = ncvmin
end
ncv = BlasInt(min(ncv, n))
bmat = isgeneral ? "G" : "I"
isshift = sigma !== nothing
if isa(which,AbstractString)
@warn "Use symbols instead of strings for specifying which eigenvalues to compute"
which=Symbol(which)
end
if (which != :LM && which != :SM && which != :LR && which != :SR &&
which != :LI && which != :SI && which != :BE)
throw(ArgumentError("which must be :LM, :SM, :LR, :SR, :LI, :SI, or :BE, got $(repr(which))"))
end
if which == :BE && !sym
throw(ArgumentError("which=:BE only possible for real symmetric problem"))
end
isshift && which == :SM && @warn "Use of :SM in shift-and-invert mode is not recommended, use :LM to find eigenvalues closest to sigma"
if (explicittransform==:auto)
# Try to automatically detect if it is good to carry out an explicittransform
if (isgeneral && (isshift || which==:LM))
explicittransform = :shiftinvert
else
explicittransform = :none
end
end
if sigma !== nothing && !iscmplx && isa(sigma,Complex)
throw(ArgumentError("complex shifts for real problems are not yet supported"))
end
sigma = isshift ? convert(T,sigma) : zero(T)
if (explicittransform==:shiftinvert && (which==:LM || which==:LR || which == :LI) && !isgeneral)
@warn "Explicit transformation with :L* for standard eigenvalue problems has no meaning. Changing to explicittransform=false."
explicittransform=:none
end
sigma0=sigma; # Store for inverted shift-and-invert
if explicittransform==:shiftinvert
isgeneral=false
bmat="I"
sym=false # Explicit transform destroys symmetry in general
sigma=zero(T);
if (isshift) # Try to keep the original meaning of which & sigma
if (which == :LM)
which = :SM
elseif (which == :SM)
which = :LM
end
if (which == :LR)
which = :SR
elseif (which == :SR)
which = :LR
end
end
end
if !isempty(v0)
if length(v0) != n
throw(DimensionMismatch())
end
if eltype(v0) != T
throw(ArgumentError("starting vector must have element type $T, got $(eltype(v0))"))
end
end
whichstr = "LM"
if which == :SM
whichstr = "SM"
end
if which == :BE
whichstr = "BE"
end
if which == :LR
whichstr = (!sym ? "LR" : "LA")
end
if which == :SR
whichstr = (!sym ? "SR" : "SA")
end
if which == :LI
if !sym
whichstr = "LI"
else
throw(ArgumentError("largest imaginary is meaningless for symmetric eigenvalue problems"))
end
end
if which == :SI
if !sym
whichstr = "SI"
else
throw(ArgumentError("smallest imaginary is meaningless for symmetric eigenvalue problems"))
end
end
# Refer to ex-*.doc files in ARPACK/DOCUMENTS for calling sequence
matvecA! = (y, x) -> mul!(y, A, x)
if !isgeneral || (explicittransform==:shiftinvert) # Standard problem
matvecB = x -> x
if (explicittransform == :none)
if !isshift # Regular mode
mode = 1
solveSI = x->x
else # Shift-invert mode
mode = 3
F = factorize(A - UniformScaling(sigma))
solveSI = x -> F \ x
end
else
# doing explicit transformation to standard eigprob
if (which == :LM || which == :LI || which == :LR)
eigval_postprocess = false # No eigval postprocess necessary the operator is B^{-1}A
F = factorize(B);
matvecA! = (y,x) -> (y[:]= F \ (A*x))
else
eigval_postprocess = true
sigma = zero(T);
F = factorize(sigma0*B - A);
matvecA! = (y,x) -> (y[:]= F \ (B*x))
end
mode = 1;
solveSI = x -> x;
end
else # Generalized eigenproblem
matvecB = x -> B * x
if !isshift # Regular inverse mode
mode = 2
F = factorize(B)
solveSI = x -> F \ x
else # Shift-invert mode
mode = 3
F = factorize(A - sigma*B)
solveSI = x -> F \ x
end
end
# Compute the Ritz values and Ritz vectors
(resid, v, ldv, iparam, ipntr, workd, workl, lworkl, rwork, TOL) =
aupd_wrapper(T, matvecA!, matvecB, solveSI, n, sym, iscmplx, bmat, nev, ncv, whichstr, tol, maxiter, mode, v0, check)
# Postprocessing to get eigenvalues and eigenvectors
check == 1 && (iparam[5] < nev) && @warn "nev = $nev, but only $(iparam[5]) found!"
output = eupd_wrapper(T, n, sym, iscmplx, bmat, check == 0 ? nev : iparam[5], whichstr, ritzvec, TOL,
resid, ncv, v, ldv, sigma, iparam, ipntr, workd, workl, lworkl, rwork)
# Issue 10495, 10701: Check that all eigenvalues are converged
nev = length(output[1])
nconv = output[ritzvec ? 3 : 2]
nev ≤ nconv || @warn "Not all wanted Ritz pairs converged. Requested: $nev, converged: $nconv"
if (eigval_postprocess) # invert the shift-and-inverse
λ = sigma0 .- 1 ./output[1];
return (λ, output[2:end]...)
end
return output
end
## svds
struct SVDAugmented{T,S} <: AbstractArray{T, 2}
X::S
SVDAugmented{T,S}(X::AbstractMatrix) where {T,S} = new(X)
end
function SVDAugmented(A::AbstractMatrix{T}) where T
Tnew = typeof(zero(T)/sqrt(one(T)))
Anew = convert(AbstractMatrix{Tnew}, A)
SVDAugmented{Tnew,typeof(Anew)}(Anew)
end
function LinearAlgebra.mul!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where T
m, mn = size(A.X, 1), length(x)
mul!( view(y, 1:m), A.X, view(x, m + 1:mn)) # left singular vector
mul!(view(y, m + 1:mn), adjoint(A.X), view(x, 1:m)) # right singular vector
return y
end
Base.size(A::SVDAugmented) = ((+)(size(A.X)...), (+)(size(A.X)...))
LinearAlgebra.ishermitian(A::SVDAugmented) = true
struct AtA_or_AAt{T,S} <: AbstractArray{T, 2}
A::S
buffer::Vector{T}
end
function AtA_or_AAt(A)
T = eltype(A)
Tnew = typeof(zero(T)/sqrt(one(T)))
return AtA_or_AAt{Tnew,typeof(A)}(A, Vector{Tnew}(undef, max(size(A)...)))
end
function LinearAlgebra.mul!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where T
if size(A.A, 1) >= size(A.A, 2)
mul!(A.buffer, A.A, x)
return mul!(y, adjoint(A.A), A.buffer)
else
mul!(A.buffer, adjoint(A.A), x)
return mul!(y, A.A, A.buffer)
end
end
Base.size(A::AtA_or_AAt) = ntuple(i -> min(size(A.A)...), Val(2))
LinearAlgebra.ishermitian(s::AtA_or_AAt) = true
svds(A::AbstractMatrix{<:BlasFloat}; kwargs...) = _svds(A; kwargs...)
svds(A::AbstractMatrix{BigFloat}; kwargs...) = throw(MethodError(svds, Any[A, kwargs...]))
function svds(A::AbstractMatrix{T}; kwargs...) where T
Tnew = typeof(zero(T)/sqrt(one(T)))
svds(convert(AbstractMatrix{Tnew}, A); kwargs...)
end
"""
svds(A; nsv=6, ritzvec=true, tol=0.0, maxiter=1000, ncv=2*nsv, v0=zeros((0,))) -> (SVD([left_sv,] s, [right_sv,]), nconv, niter, nmult, resid, check=0)
Computes the largest singular values `s` of `A` using implicitly restarted Lanczos
iterations derived from [`eigs`](@ref). See [the manual](@ref man-svds) for more information.
When `check = 0`, an error is thrown if maximum number of iterations taken (`info = 1`). This usually means all possible eigenvalues has been found according to ARPACK manual.
When `check = 1`, return currently converged eigenvalues when `info = 1`. And a `@warn` will given.
When `check = 2`, return currently converged eigenvalues when `info = 1`.
"""
svds(A; kwargs...) = _svds(A; kwargs...)
function _orth!(P)
Q,R = qr!(P)
_sign(x) = iszero(x) ? one(x) : sign(x)
rsign = [_sign(R[i,i]) for i in 1:size(R,2)]
return rmul!(Matrix(Q), Diagonal(rsign))
end
function _svds(X; nsv::Int = 6, ritzvec::Bool = true, tol::Float64 = 0.0, maxiter::Int = 1000, ncv::Int = 2*nsv, v0::Vector=zeros(eltype(X),(0,)), check::Integer=0)
if nsv < 1
throw(ArgumentError("number of singular values (nsv) must be ≥ 1, got $nsv"))
end
if nsv >= minimum(size(X))
throw(ArgumentError("number of singular values (nsv) must be < $(minimum(size(X))), got $nsv"))
end
m, n = size(X)
otype = eltype(X)
if length(v0) ∉ (0, min(m, n))
min_mn = min(m, n)
throw(DimensionMismatch("length of v0, the guess for the starting right Krylov vector, must be 0, or $min_mn, got $(length(v0))"))
end
ex = eigs(AtA_or_AAt(X), I; which = :LM, ritzvec = ritzvec, nev = nsv, tol = tol, maxiter = maxiter, v0=v0, check=check)
check != 0 && (nsv = length(ex[1]))
# ind = [1:2:ncv;]
# sval = abs.(ex[1][ind])
realex1 = real.(ex[1])
threshold = max(eps(real(otype))*realex1[1], eps(real(otype)))
firstzero = findfirst(v -> v <= threshold, realex1)
r = firstzero === nothing ? nsv : firstzero-1 # rank of the decomposition
realex1[r+1:end] .= zero(real(otype))
svals = sqrt.(realex1)
if ritzvec
# calculating singular vectors
# left_sv = sqrt(2) * ex[2][ 1:size(X,1), ind ] .* sign.(ex[1][ind]')
if size(X, 1) >= size(X, 2)
V = ex[2]
# We cannot assume that X*V is a Matrix even though V is. This is not
# the case for e.g. LinearMaps.jl so we convert to Matrix explicitly
U = _orth!(rmul!(convert(Matrix, X*V), Diagonal([inv.(svals[1:r]); ones(nsv-r)])))
else
U = ex[2]
# We cannot assume that X'U is a Matrix even though U is. This is not
# the case for e.g. LinearMaps.jl so we convert to Matrix explicitly
V = _orth!(rmul!(convert(Matrix, X'U), Diagonal([inv.(svals[1:r]); ones(nsv-r)])))
end
# right_sv = sqrt(2) * ex[2][ size(X,1)+1:end, ind ]
return (SVD(U, svals, copy(V')), ex[3], ex[4], ex[5], ex[6])
else
#The sort is necessary to work around #10329
return (SVD(zeros(eltype(svals), n, 0),
svals,
zeros(eltype(svals), 0, m)),
ex[2], ex[3], ex[4], ex[5])
end
end
end # module
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | code | 16575 | # This file is a part of Julia. License is MIT: https://julialang.org/licenOAse
import LinearAlgebra: BlasInt
using Logging
# A convenient shortcut to show unexpected behavior from libarpack
const ERR_UNEXPECTED_BEHAVIOR = -999
struct XYAUPD_Exception <: Exception
info::BlasInt
end
const AUPD_ERRORS = [
(3, "No shifts could be applied during a cycle of the Implicitly restarted Arnoldi iteration. One possibility is to increase the size of NCV relative to NEV. "),
(2, "No longer an informational error. Deprecated starting with release 2 of ARPACK."),
(1, """Maximum number of iterations taken. All possible eigenvalues of OP has been found.
IPARAM(5) returns the number of wanted converged Ritz values."""),
(0, "Normal exit."),
(-1, "N must be positive."),
(-2, "NEV must be positive."),
(-3, "NCV-NEV >= 2 and less than or equal to N."),
(-4, "The maximum number of Arnoldi update iterations allowed must be greater than zero."),
(-5, " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'"),
(-6, "BMAT must be one of 'I' or 'G'."),
(-7, "Length of private work array WORKL is not sufficient."),
(-8, "Error return from LAPACK eigenvalue calculation."),
(-9, "Starting vector is zero."),
(-10, "IPARAM(7) must be 1,2,3,4."),
(-11, "IPARAM(7) = 1 and BMAT = 'G' are incompatible."),
(-12, "IPARAM(1) must be equal to 0 or 1."),
(-13, "NEV and WHICH = 'BE' are incompatible."),
(-9999, """Could not build an Arnoldi factorization.
IPARAM(5) returns the size of the current Arnoldi factorization.
The user is advised to check that enough workspace and array storage has been allocated.""")
]
function Base.showerror(io::IO, ex::XYAUPD_Exception)
info = ex.info
if info == ERR_UNEXPECTED_BEHAVIOR
@error "XYAUPD_Exception: Undefined error"
else
idx = searchsorted(AUPD_ERRORS, info, by=first, rev=true)
if isempty(idx)
@error "XYAUPD_Exception: Please check XYAUPD error codes in the ARPACK manual." info
else
@error "XYAUPD_Exception: $(last(AUPD_ERRORS[first(idx)]))" info
end
end
end
struct XYEUPD_Exception <: Exception
info::BlasInt
end
const EUPD_ERRORS = [
(1, """The Schur form computed by LAPACK routine lahqr could not be reordered by LAPACK routine trsen.
Re-enter subroutine neupd with IPARAM(5)NCV and increase the size of the arrays DR and DI to have dimension at least dimension NCV and allocate at least NCV columns for Z.
NOTE: Not necessary if Z and V share the same space. Please notify the authors if this error occurs."""),
(0, "Normal exit."),
(-1, "N must be positive."),
(-2, "NEV must be positive."),
(-3, "NCV-NEV >= 2 and less than or equal to N."),
(-5, "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'"),
(-6, "BMAT must be one of 'I' or 'G'."),
(-7, "Length of private work WORKL array is not sufficient."),
(-8, """Error return from calculation of a real Schur form.
"Informational error from LAPACK routine lahqr ."""),
(-9, """Error return from calculation of eigenvectors.
"Informational error from LAPACK routine dtrevc."""),
(-10, "IPARAM(7) must be 1,2,3,4."),
(-11, "IPARAM(7) = 1 and BMAT = 'G' are incompatible."),
(-12, "HOWMNY = 'S' not yet implemented"),
(-13, "HOWMNY must be one of 'A' or 'P' if RVEC = .true."),
(-14, "DNAUPD did not find any eigenvalues to sufficient accuracy."),
(-15, """DNEUPD got a different count of the number of converged Ritz values than NAUPD got.
This indicates the user probably made an error in passing data from NAUPD to NEUPD or that the data was modified before entering NEUPD""")
]
function Base.showerror(io::IO, ex::XYEUPD_Exception)
info = ex.info
if info == ERR_UNEXPECTED_BEHAVIOR
@error "XYEUPD_Exception: Undefined error"
else
idx = searchsorted(EUPD_ERRORS, info, by=first, rev=true)
if isempty(idx)
@error "XYEUPD_Exception: Please check XYEUPD error codes in the ARPACK manual." info
else
@error "XYEUPD_Exception: $(last(EUPD_ERRORS[first(idx)]))" info
end
end
end
## aupd and eupd wrappers
function aupd_wrapper(T, matvecA!::Function, matvecB::Function, solveSI::Function, n::Integer,
sym::Bool, cmplx::Bool, bmat,
nev::Integer, ncv::Integer, which,
tol::Real, maxiter::Integer, mode::Integer, v0::Vector, check::Integer)
lworkl = cmplx ? ncv * (3*ncv + 5) : (sym ? ncv * (ncv + 8) : ncv * (3*ncv + 6) )
TR = cmplx ? T.types[1] : T
TOL = Ref{TR}(tol)
v = Matrix{T}(undef, n, ncv)
workd = Vector{T}(undef, 3*n)
workl = Vector{T}(undef, lworkl)
rwork = cmplx ? Vector{TR}(undef, ncv) : Vector{TR}()
if isempty(v0)
resid = Vector{T}(undef, n)
info = Ref{BlasInt}(0)
else
resid = deepcopy(v0)
info = Ref{BlasInt}(1)
end
iparam = zeros(BlasInt, 11)
ipntr = zeros(BlasInt, (sym && !cmplx) ? 11 : 14)
ido = Ref{BlasInt}(0)
iparam[1] = BlasInt(1) # ishifts
iparam[3] = BlasInt(maxiter) # maxiter
iparam[7] = BlasInt(mode) # mode
zernm1 = 0:(n-1)
while true
if cmplx
naupd(ido, bmat, n, which, nev, TOL, resid, ncv, v, n,
iparam, ipntr, workd, workl, lworkl, rwork, info)
elseif sym
saupd(ido, bmat, n, which, nev, TOL, resid, ncv, v, n,
iparam, ipntr, workd, workl, lworkl, info)
else
naupd(ido, bmat, n, which, nev, TOL, resid, ncv, v, n,
iparam, ipntr, workd, workl, lworkl, info)
end
if info[] != 0
if info[] == 1 && check != 0
return (resid, v, n, iparam, ipntr, workd, workl, lworkl, rwork, TOL)
end
throw(XYAUPD_Exception(info[]))
end
x = view(workd, ipntr[1] .+ zernm1)
y = view(workd, ipntr[2] .+ zernm1)
if mode == 1 # corresponds to dsdrv1, dndrv1 or zndrv1
if ido[] == -1 || ido[] == 1
matvecA!(y, x)
elseif ido[] == 99
break
else
throw(XYAUPD_Exception(ERR_UNEXPECTED_BEHAVIOR))
end
elseif mode == 3 && bmat == "I" # corresponds to dsdrv2, dndrv2 or zndrv2
if ido[] == -1 || ido[] == 1
y[:] = solveSI(x)
elseif ido[] == 99
break
else
throw(XYAUPD_Exception(ERR_UNEXPECTED_BEHAVIOR))
end
elseif mode == 2 # corresponds to dsdrv3, dndrv3 or zndrv3
if ido[] == -1 || ido[] == 1
matvecA!(y, x)
if sym
x[:] = y # overwrite as per Remark 5 in dsaupd.f
end
y[:] = solveSI(y)
elseif ido[] == 2
y[:] = matvecB(x)
elseif ido[] == 99
break
else
throw(XYAUPD_Exception(ERR_UNEXPECTED_BEHAVIOR))
end
elseif mode == 3 && bmat == "G" # corresponds to dsdrv4, dndrv4 or zndrv4
if ido[] == -1
y[:] = solveSI(matvecB(x))
elseif ido[] == 1
y[:] = solveSI(view(workd,ipntr[3] .+ zernm1))
elseif ido[] == 2
y[:] = matvecB(x)
elseif ido[] == 99
break
else
throw(XYAUPD_Exception(ERR_UNEXPECTED_BEHAVIOR))
end
else
throw(ArgumentError("ARPACK mode ($mode) not yet supported"))
end
end
return (resid, v, n, iparam, ipntr, workd, workl, lworkl, rwork, TOL)
end
function eupd_wrapper(T, n::Integer, sym::Bool, cmplx::Bool, bmat,
nev::Integer, which, ritzvec::Bool,
TOL::Ref, resid, ncv::Integer, v, ldv, sigma, iparam, ipntr,
workd, workl, lworkl, rwork)
howmny = "A"
select = Vector{BlasInt}(undef, ncv)
info = Ref{BlasInt}(0)
dmap = if which == "LM" || which == "SM"
abs
elseif which == "LR" || which == "LA" || which == "BE" || which == "SR" || which == "SA"
real
elseif which == "LI" || which == "SI"
abs ∘ imag # ARPACK returns largest,smallest abs(imaginary) (complex pairs come together)
else
error("unknown which string $which")
end
rev = which[1] == 'L'
if iparam[7] == 3 # shift-and-invert
dmap = dmap ∘ (x -> 1 / (x - sigma))
end
if cmplx
d = Vector{T}(undef, nev+1)
sigmar = Ref{T}(sigma)
workev = Vector{T}(undef, 2ncv)
neupd(ritzvec, howmny, select, d, v, ldv, sigmar, workev,
bmat, n, which, nev, TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, rwork, info)
if info[] != 0
throw(XYEUPD_Exception(info[]))
end
p = sortperm(d[1:nev], by=dmap, rev=rev)
return ritzvec ? (d[p], v[1:n, p],iparam[5],iparam[3],iparam[9],resid) : (d[p],iparam[5],iparam[3],iparam[9],resid)
elseif sym
d = Vector{T}(undef, nev)
sigmar = Ref{T}(sigma)
seupd(ritzvec, howmny, select, d, v, ldv, sigmar,
bmat, n, which, nev, TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, info)
if info[] != 0
throw(XYEUPD_Exception(info[]))
end
p = sortperm(d, by=dmap, rev=rev)
return ritzvec ? (d[p], v[1:n, p],iparam[5],iparam[3],iparam[9],resid) : (d[p],iparam[5],iparam[3],iparam[9],resid)
else
dr = Vector{T}(undef, nev+1)
di = Vector{T}(undef, nev+1)
fill!(dr,NaN)
fill!(di,NaN)
sigmar = Ref{T}(real(sigma))
sigmai = Ref{T}(imag(sigma))
workev = Vector{T}(undef, 3*ncv)
neupd(ritzvec, howmny, select, dr, di, v, ldv, sigmar, sigmai,
workev, bmat, n, which, nev, TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, info)
if info[] != 0
throw(XYEUPD_Exception(info[]))
end
evec = complex.(Matrix{T}(undef, n, nev+1), Matrix{T}(undef, n, nev+1))
j = 1
while j <= nev
if di[j] == 0
evec[:,j] = v[:,j]
else # For complex conjugate pairs
evec[:,j] = v[:,j] + im*v[:,j+1]
evec[:,j+1] = v[:,j] - im*v[:,j+1]
j += 1
end
j += 1
end
if j == nev+1 && !isnan(di[j])
if di[j] == 0
evec[:,j] = v[:,j]
j += 1
else
throw(XYEUPD_Exception(ERR_UNEXPECTED_BEHAVIOR))
end
end
d = complex.(dr, di)
if j == nev+1
p = sortperm(d[1:nev], by=dmap, rev=rev)
else
p = sortperm(d, by=dmap, rev=rev)
p = p[1:nev]
end
return ritzvec ? (d[p], evec[1:n, p],iparam[5],iparam[3],iparam[9],resid) : (d[p],iparam[5],iparam[3],iparam[9],resid)
end
end
for (T, saupd_name, seupd_name, naupd_name, neupd_name) in
((:Float64, :dsaupd_, :dseupd_, :dnaupd_, :dneupd_),
(:Float32, :ssaupd_, :sseupd_, :snaupd_, :sneupd_))
@eval begin
function naupd(ido, bmat, n, evtype, nev, TOL::Ref{$T}, resid::Vector{$T}, ncv, v::Matrix{$T}, ldv,
iparam, ipntr, workd::Vector{$T}, workl::Vector{$T}, lworkl, info)
ccall(($(string(naupd_name)), libarpack), Cvoid,
(Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt},
Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{$T}, Ref{BlasInt},
Ref{BlasInt}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong),
ido, bmat, n, evtype, nev,
TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, info, 1, 2)
end
function neupd(rvec, howmny, select, dr, di, z, ldz, sigmar, sigmai,
workev::Vector{$T}, bmat, n, evtype, nev, TOL::Ref{$T}, resid::Vector{$T}, ncv, v, ldv,
iparam, ipntr, workd::Vector{$T}, workl::Vector{$T}, lworkl, info)
ccall(($(string(neupd_name)), libarpack), Cvoid,
(Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{$T}, Ref{BlasInt},
Ref{$T}, Ref{$T}, Ref{$T}, Ptr{UInt8}, Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt},
Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{$T}, Ref{BlasInt},
Ref{BlasInt}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong),
rvec, howmny, select, dr, di, z, ldz,
sigmar, sigmai, workev, bmat, n, evtype, nev,
TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, info, 1, 1, 2)
end
function saupd(ido, bmat, n, which, nev, TOL::Ref{$T}, resid::Vector{$T}, ncv, v::Matrix{$T}, ldv,
iparam, ipntr, workd::Vector{$T}, workl::Vector{$T}, lworkl, info)
ccall(($(string(saupd_name)), libarpack), Cvoid,
(Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt},
Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{$T}, Ref{BlasInt},
Ref{BlasInt}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong),
ido, bmat, n, which, nev,
TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, info, 1, 2)
end
function seupd(rvec, howmny, select, d, z, ldz, sigma,
bmat, n, evtype, nev, TOL::Ref{$T}, resid::Vector{$T}, ncv, v::Matrix{$T}, ldv,
iparam, ipntr, workd::Vector{$T}, workl::Vector{$T}, lworkl, info)
ccall(($(string(seupd_name)), libarpack), Cvoid,
(Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt},
Ref{$T}, Ptr{UInt8}, Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt},
Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{$T}, Ref{BlasInt},
Ref{BlasInt}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong),
rvec, howmny, select, d, z, ldz,
sigma, bmat, n, evtype, nev,
TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, info, 1, 1, 2)
end
end
end
for (T, TR, naupd_name, neupd_name) in
((:ComplexF64, :Float64, :znaupd_, :zneupd_),
(:ComplexF32, :Float32, :cnaupd_, :cneupd_))
@eval begin
function naupd(ido, bmat, n, evtype, nev, TOL::Ref{$TR}, resid::Vector{$T}, ncv, v::Matrix{$T}, ldv,
iparam, ipntr, workd::Vector{$T}, workl::Vector{$T}, lworkl,
rwork::Vector{$TR}, info)
ccall(($(string(naupd_name)), libarpack), Cvoid,
(Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt},
Ref{$TR}, Ref{$T}, Ref{BlasInt}, Ref{$T}, Ref{BlasInt},
Ref{BlasInt}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{$TR}, Ref{BlasInt}, Clong, Clong),
ido, bmat, n, evtype, nev,
TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, rwork, info, 1, 2)
end
function neupd(rvec, howmny, select, d, z, ldz, sigma, workev::Vector{$T},
bmat, n, evtype, nev, TOL::Ref{$TR}, resid::Vector{$T}, ncv, v::Matrix{$T}, ldv,
iparam, ipntr, workd::Vector{$T}, workl::Vector{$T}, lworkl,
rwork::Vector{$TR}, info)
ccall(($(string(neupd_name)), libarpack), Cvoid,
(Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt},
Ref{$T}, Ref{$T}, Ptr{UInt8}, Ref{BlasInt}, Ptr{UInt8}, Ref{BlasInt},
Ref{$TR}, Ref{$T}, Ref{BlasInt}, Ref{$T}, Ref{BlasInt},
Ref{BlasInt}, Ref{BlasInt}, Ref{$T}, Ref{$T}, Ref{BlasInt}, Ref{$TR}, Ref{BlasInt}, Clong, Clong, Clong),
rvec, howmny, select, d, z, ldz,
sigma, workev, bmat, n, evtype, nev,
TOL, resid, ncv, v, ldv,
iparam, ipntr, workd, workl, lworkl, rwork, info, 1, 1, 2)
end
end
end
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | code | 16151 | # This file is a part of Julia. License is MIT: https://julialang.org/license
using Arpack
using Test, LinearAlgebra, SparseArrays, StableRNGs
@testset "eigs" begin
rng = StableRNG(1235)
n = 10
areal = sprandn(rng, n, n, 0.4)
breal = sprandn(rng, n, n, 0.4)
acmplx = complex.(sprandn(rng, n, n, 0.4), sprandn(rng, n, n, 0.4))
bcmplx = complex.(sprandn(rng, n, n, 0.4), sprandn(rng, n, n, 0.4))
testtol = 1e-6
@testset for elty in (Float64, ComplexF64)
if elty == ComplexF32 || elty == ComplexF64
a = acmplx
b = bcmplx
else
a = areal
b = breal
end
a_evs = eigvals(Array(a))
a = convert(SparseMatrixCSC{elty}, a)
asym = copy(a') + a # symmetric indefinite
apd = a'*a # symmetric positive-definite
b = convert(SparseMatrixCSC{elty}, b)
bsym = copy(b') + b
bpd = b'*b + I
(d,v) = eigs(a, nev=3)
@test a*v[:,2] ≈ d[2]*v[:,2]
@test norm(v) > testtol # eigenvectors cannot be null vectors
(d,v) = eigs(a, LinearAlgebra.I, nev=3) # test eigs(A, B; kwargs...)
@test a*v[:,2] ≈ d[2]*v[:,2]
@test norm(v) > testtol # eigenvectors cannot be null vectors
@test_logs (:warn, "Use symbols instead of strings for specifying which eigenvalues to compute") eigs(a, which="LM")
@test_logs (:warn, "Adjusting ncv from 1 to 4") eigs(a, ncv=1, nev=2)
@test_logs (:warn, "Adjusting nev from $n to $(n - 2)") eigs(a, nev=n)
# (d,v) = eigs(a, b, nev=3, tol=1e-8) # not handled yet
# @test a*v[:,2] ≈ d[2]*b*v[:,2] atol=testtol
# @test norm(v) > testtol # eigenvectors cannot be null vectors
if elty <: LinearAlgebra.BlasComplex
sr_ind = argmin(real.(a_evs))
(d, v) = eigs(a, nev=1, which=:SR)
@test d[1] ≈ a_evs[sr_ind]
si_ind = argmin(imag.(a_evs))
(d, v) = eigs(a, nev=1, which=:SI)
@test d[1] ≈ a_evs[si_ind]
lr_ind = argmax(real.(a_evs))
(d, v) = eigs(a, nev=1, which=:LR)
@test d[1] ≈ a_evs[lr_ind]
li_ind = argmax(imag.(a_evs))
(d, v) = eigs(a, nev=1, which=:LI)
@test d[1] ≈ a_evs[li_ind]
end
(d,v) = eigs(asym, nev=3)
@test asym*v[:,1] ≈ d[1]*v[:,1]
@test eigs(asym; nev=1, sigma=d[3])[1][1] ≈ d[3]
@test norm(v) > testtol # eigenvectors cannot be null vectors
(d,v) = eigs(apd, nev=3)
@test apd*v[:,3] ≈ d[3]*v[:,3]
@test eigs(apd; nev=1, sigma=d[3])[1][1] ≈ d[3]
(d,v) = eigs(apd, bpd, nev=3, tol=1e-8)
@test apd*v[:,2] ≈ d[2]*bpd*v[:,2] atol=testtol
@test norm(v) > testtol # eigenvectors cannot be null vectors
@testset "(shift-and-)invert mode" begin
(d,v) = eigs(apd, nev=3, sigma=0)
@test apd*v[:,3] ≈ d[3]*v[:,3]
@test norm(v) > testtol # eigenvectors cannot be null vectors
(d,v) = eigs(apd, bpd, nev=3, sigma=0, tol=1e-8)
@test apd*v[:,1] ≈ d[1]*bpd*v[:,1] atol=testtol
@test norm(v) > testtol # eigenvectors cannot be null vectors
end
@testset "ArgumentErrors" begin
@test_throws ArgumentError eigs(rand(rng, elty, 2, 2))
@test_throws ArgumentError eigs(a, nev=-1)
@test_throws ArgumentError eigs(a, which=:Z)
@test_throws ArgumentError eigs(a, which=:BE)
@test_throws DimensionMismatch eigs(a, v0=zeros(elty,n+2))
@test_throws ArgumentError eigs(a, v0=zeros(Int,n))
if elty == Float64
@test_throws ArgumentError eigs(a + copy(transpose(a)), which=:SI)
@test_throws ArgumentError eigs(a + copy(transpose(a)), which=:LI)
@test_throws ArgumentError eigs(a, sigma = rand(rng, ComplexF32))
end
end
end
@testset "Symmetric generalized with singular B" begin
rng = StableRNG(127)
n = 10
k = 3
A = randn(rng, n, n); A = A'A
B = randn(rng, n, k); B = B*B'
@test sort(eigs(A, B, nev = k, sigma = 1.0, explicittransform=:none)[1]) ≈ sort(eigvals(A, B); by=abs)[1:k]
end
end
@testset "Problematic example from #6965A" begin
A6965 = [
1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0
-1.0 2.0 0.0 0.0 0.0 0.0 0.0 1.0
-1.0 0.0 3.0 0.0 0.0 0.0 0.0 1.0
-1.0 0.0 0.0 4.0 0.0 0.0 0.0 1.0
-1.0 0.0 0.0 0.0 5.0 0.0 0.0 1.0
-1.0 0.0 0.0 0.0 0.0 6.0 0.0 1.0
-1.0 0.0 0.0 0.0 0.0 0.0 7.0 1.0
-1.0 -1.0 -1.0 -1.0 -1.0 -1.0 -1.0 8.0
]
d, = eigs(A6965,which=:LM,nev=2,ncv=4,tol=eps(), sigma=0.0)
@test d[1] ≈ 2.5346936860350002
@test real(d[2]) ≈ 2.6159972444834976
@test abs(imag(d[2])) ≈ 1.2917858749046127
# Requires ARPACK 3.2 or a patched 3.1.5
#T6965 = [ 0.9 0.05 0.05
# 0.8 0.1 0.1
# 0.7 0.1 0.2 ]
#d,v,nconv = eigs(T6965,nev=1,which=:LM)
# @test T6965*v ≈ d[1]*v atol=1e-6
end
# Example from Quantum Information Theory
import Base: size
mutable struct CPM{T<:LinearAlgebra.BlasFloat} <: AbstractMatrix{T} # completely positive map
kraus::Array{T,3} # kraus operator representation
end
size(Phi::CPM) = (size(Phi.kraus,1)^2,size(Phi.kraus,3)^2)
LinearAlgebra.issymmetric(Phi::CPM) = false
LinearAlgebra.ishermitian(Phi::CPM) = false
function LinearAlgebra.mul!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:LinearAlgebra.BlasFloat}
rho = reshape(rho,(size(Phi.kraus,3),size(Phi.kraus,3)))
rho1 = zeros(T,(size(Phi.kraus,1),size(Phi.kraus,1)))
for s = 1:size(Phi.kraus,2)
As = view(Phi.kraus,:,s,:)
rho1 += As*rho*As'
end
return copyto!(rho2,rho1)
end
@testset "Test random isometry" begin
(Q, R) = qr(randn(100, 50))
Q = reshape(Array(Q), (50, 2, 50))
# Construct trace-preserving completely positive map from this
Phi = CPM(copy(Q))
(d,v,nconv,numiter,numop,resid) = eigs(Phi, nev=1, which=:LM)
# Properties: largest eigenvalue should be 1, largest eigenvector, when reshaped as matrix
# should be a Hermitian positive definite matrix (up to an arbitrary phase)
@test d[1] ≈ 1. # largest eigenvalue should be 1.
v = reshape(v, (50, 50)) # reshape to matrix
v /= tr(v) # factor out arbitrary phase
@test norm(imag(v)) ≈ 0. # it should be real
v = real(v)
# @test norm(v-v')/2 ≈ 0. # it should be Hermitian
# Since this fails sometimes (numerical precision error),this test is commented out
v = (v + v')/2
@test isposdef(v)
# Repeat with starting vector
(d2, v2, nconv2, numiter2, numop2, resid2) = eigs(Phi, nev=1, which=:LM, v0=reshape(v, (2500,)))
v2 = reshape(v2, (50,50))
v2 /= tr(v2)
@test numiter2 < numiter
@test v ≈ v2
# Adjust the tolerance a bit since matrices with repeated eigenvalues
# can be very stressful to ARPACK and this may therefore fail with
# info = 3 if the tolerance is too small
@test eigs(sparse(1.0I, 50, 50), nev=10, tol = 5e-16)[1] ≈ fill(1., 10) #Issue 4246
end
@testset "real svds" begin
A = sparse([1, 1, 2, 3, 4], [2, 1, 1, 3, 1], [2.0, -1.0, 6.1, 7.0, 1.5])
S1 = svds(A, nsv = 2)
S2 = svd(Array(A))
## singular values match:
@test S1[1].S ≈ S2.S[1:2]
@testset "singular vectors" begin
## 1st left singular vector
s1_left = sign(S1[1].U[3,1]) * S1[1].U[:,1]
s2_left = sign(S2.U[3,1]) * S2.U[:,1]
@test s1_left ≈ s2_left
## 1st right singular vector
s1_right = sign(S1[1].V[3,1]) * S1[1].V[:,1]
s2_right = sign(S2.V[3,1]) * S2.V[:,1]
@test s1_right ≈ s2_right
end
# Issue number 10329
# Ensure singular values from svds are in
# the correct order
@testset "singular values ordered correctly" begin
B = sparse(Diagonal([1.0, 2.0, 34.0, 5.0, 6.0]))
S3 = svds(B, ritzvec=false, nsv=2)
@test S3[1].S ≈ [34.0, 6.0]
S4 = svds(B, nsv=2)
@test S4[1].S ≈ [34.0, 6.0]
end
@testset "passing guess for Krylov vectors" begin
S1 = svds(A, nsv = 2, v0=rand(eltype(A), size(A,2)))
@test S1[1].S ≈ S2.S[1:2]
end
@test_throws ArgumentError svds(A, nsv=0)
@test_throws ArgumentError svds(A, nsv=20)
@test_throws DimensionMismatch svds(A, nsv=2, v0=rand(size(A,2) + 1))
@testset "Orthogonal vectors with repeated singular values $i times. Issue 16608" for i in 2:3
rng = StableRNG(126) # Fragile to compute repeated values without blocking so we set the seed
v0 = randn(rng, 20)
d = sort(rand(rng, 20), rev = true)
for j in 2:i
d[j] = d[1]
end
A = qr(randn(rng, 20, 20)).Q*Diagonal(d)*qr(randn(rng, 20, 20)).Q
@testset "Number of singular values: $j" for j in 2:6
# Default size of subspace
F = svds(A, nsv = j, v0 = v0)
@test F[1].U'F[1].U ≈ Matrix(I, j, j)
@test F[1].V'F[1].V ≈ Matrix(I, j, j)
@test F[1].S ≈ d[1:j]
for k in 3j:2:5j
# Custom size of subspace
F = svds(A, nsv = j, ncv = k, v0 = v0)
@test F[1].U'F[1].U ≈ Matrix(I, j, j)
@test F[1].V'F[1].V ≈ Matrix(I, j, j)
@test F[1].S ≈ d[1:j]
end
end
end
end
@testset "complex svds" begin
A = sparse([1, 1, 2, 3, 4], [2, 1, 1, 3, 1], exp.(im*[2.0:2:10;]), 5, 4)
S1 = svds(A, nsv = 2)
S2 = svd(Array(A))
## singular values match:
@test S1[1].S ≈ S2.S[1:2]
@testset "singular vectors" begin
## left singular vectors
s1_left = abs.(S1[1].U[:,1:2])
s2_left = abs.(S2.U[:,1:2])
@test s1_left ≈ s2_left
## right singular vectors
s1_right = abs.(S1[1].V[:,1:2])
s2_right = abs.(S2.V[:,1:2])
@test s1_right ≈ s2_right
end
@testset "passing guess for Krylov vectors" begin
S1 = svds(A, nsv = 2, v0=rand(eltype(A), size(A,2)))
@test S1[1].S ≈ S2.S[1:2]
end
@test_throws ArgumentError svds(A,nsv=0)
@test_throws ArgumentError svds(A,nsv=20)
@test_throws DimensionMismatch svds(A,nsv=2,v0=complex(rand(size(A,2)+1)))
end
@testset "promotion" begin
eigs(rand(1:10, 10, 10))
eigs(rand(1:10, 10, 10), rand(1:10, 10, 10) |> t -> t't)
svds(rand(1:10, 10, 8))
@test_throws MethodError eigs(big.(rand(1:10, 10, 10)))
@test_throws MethodError eigs(big.(rand(1:10, 10, 10)), rand(1:10, 10, 10))
@test_throws MethodError svds(big.(rand(1:10, 10, 8)))
end
struct MyOp{S}
mat::S
end
Base.size(A::MyOp) = size(A.mat)
Base.size(A::MyOp, i::Integer) = size(A.mat, i)
Base.eltype(A::MyOp) = Float64
Base.:*(A::MyOp, B::AbstractMatrix) = A.mat*B
LinearAlgebra.mul!(y::AbstractVector, A::MyOp, x::AbstractVector) = mul!(y, A.mat, x)
LinearAlgebra.adjoint(A::MyOp) = MyOp(adjoint(A.mat))
@testset "svds for non-AbstractMatrix" begin
A = MyOp(randn(10, 9))
@test svds(A, v0 = ones(9))[1].S == svds(A.mat, v0 = ones(9))[1].S
end
@testset "low rank" begin
rng = StableRNG(123)
@testset "$T coefficients" for T in [Float64, Complex{Float64}]
@testset "rank $r" for r in [2, 5, 10]
m, n = 3*r, 4*r
nsv = 2*r
FU = qr(randn(rng, T, m, r))
U = Matrix(FU.Q)
S = 0.1 .+ sort(rand(rng, r), rev=true)
FV = qr(randn(rng, T, n, r))
V = Matrix(FV.Q)
A = U*Diagonal(S)*V'
F = svds(A, nsv=nsv)[1]
@test F.S[1:r] ≈ S
if T == Complex{Float64}
# This test fails since ARPACK does not have an Hermitian solver
# for the complex case. This problem occurs for U in the "fat"
# case. In the "tall" case the same may happen for V instead.
@test_broken F.U'*F.U ≈ Matrix{T}(I, nsv, nsv)
else
@test F.U'*F.U ≈ Matrix{T}(I, nsv, nsv)
end
@test F.V'*F.V ≈ Matrix{T}(I, nsv, nsv)
end
end
end
@testset "Problematic examples from #41" begin
@test all(Matrix(svds([1. 0.; 0. 0.],nsv=1)[1]) ≈ [1. 0.; 0. 0.] for i in 1:10)
A = [1. 0. 0.; 0. 0. 0.; 0. 0. 0.]
U,s,V = svds(A,nsv=2)[1]
@test U*Diagonal(s)*V' ≈ A atol=1e-7
@test U'U ≈ I
@test V'V ≈ I
end
# Problematic example from #118
@testset "issue 118" begin
ωc = 1.2
ωa = 0.9
γ = 0.5
κ = 1.1
sz = sparse(ComplexF64[1 0; 0 -1])
sp = sparse(ComplexF64[0 1; 0 0])
sm = sparse(collect(sp'))
ids = one(sz)
a = sparse(diagm(1 => ComplexF64[sqrt(i) for i=1:10]))
ida = one(a)
Ha = kron(ida, 0.5*ωa*sz)
Hc = kron(ωc*a'*a, ids)
Hint = sparse(kron(a', sm) + kron(a, sp))
H = Ha + Hc + Hint
Ja = kron(ida, sqrt(γ)*sm)
Jc = kron(sqrt(κ)*a, ids)
J = sqrt(2) .* [Ja, Jc]
Jdagger = adjoint.(J)
rates = 0.5 .* ones(length(J))
spre(x) = kron(one(x), x)
spost(x) = kron(permutedims(x), one(x))
L = spre(-1im*H) + spost(1im*H)
for i=1:length(J)
jdagger_j = rates[i]/2*Jdagger[i]*J[i]
L -= spre(jdagger_j) + spost(jdagger_j)
L += spre(rates[i]*J[i]) * spost(Jdagger[i])
end
for _=1:100
d, rest = eigs(L, nev=2, which=:LR)
@test abs(d[1]) < 1e-9
end
end
# Problematic examples from #85
@testset "maxiter reach not throw err" begin
a = rand(100, 100)
a = a + a'
nev = 5
try
e, v = eigs(a, nev = nev, maxiter = 2)
catch err
@test isa(err, Arpack.XYAUPD_Exception)
@test err.info == 1
end
e, v = eigs(a, nev = nev, maxiter = 2, check = 2)
println("An warning 'nev = $nev, but only x found!' is expected here:")
e, v = eigs(a, nev = nev, maxiter = 2, check = 1)
e0, v0 = eigs(a, nev = nev)
n = length(e)
@test all(e[1:n] .≈ e0[1:n])
@test abs.(v[:, 1:n]'v0[:, 1:n]) ≈ I
try
e, v = svds(a, nsv = 5, maxiter = 2)
catch err
@show typeof(err)
@test isa(err, Arpack.XYAUPD_Exception)
@test err.info == 1
end
r, _ = svds(a, nsv = 5, maxiter = 2, check = 2)
println("An warning 'nev = $nev, but only x found!' is expected here:")
r, _ = svds(a, nsv = 5, maxiter = 2, check = 1)
r0, _ = svds(a, nsv = 5)
n = length(r.S)
@test all(r.S[1:n] .≈ r0.S[1:n])
@test abs.(r.U[:, 1:n]'r0.U[:, 1:n]) ≈ I
@test abs.(r.V[:, 1:n]'r0.V[:, 1:n]) ≈ I
end
# Regression test for #110.
@testset "correct Krylov vector length check" begin
m = 4
n = 8
a = sprandn(m,n,0.4)
@test svds(a, nsv=1, v0 = ones(min(m, n)))[1].S ≈ svds(a', nsv=1, v0 = ones(min(m, n)))[1].S
@test_throws DimensionMismatch svds(a, nsv=1, v0 = ones(max(m, n)))
@test_throws DimensionMismatch svds(a', nsv=1, v0 = ones(max(m, n)))
end
@testset "ordering modes" begin
N = 10
nev = 4
M = rand(N,N)
S = eigvals(M)
abs_imag = abs ∘ imag # ARPACK returns largest,smallest abs(imaginary) (complex pairs come together)
@testset "no shift-invert" begin
for (which, sortby, rev) in [(:LM, abs, true), (:LR, real, true), (:LI, abs_imag, true),
(:SM, abs, false), (:SR, real, false), (:SI, abs_imag, false)]
d, _ = eigs(M, nev=nev, which=which)
e = partialsort(S, 1:nev, by=sortby, rev=rev)
@test sortby.(e) ≈ sortby.(d)
end
end
@testset "shift-invert" begin
for (which, sortby, rev) in [(:LM, abs, true), (:LR, real, true), (:LI, abs_imag, true),
(:SM, abs, false), (:SR, real, false), (:SI, abs_imag, false)]
d, _ = eigs(M, nev=nev, which=which, sigma=0.0)
e = S[partialsortperm(S, 1:nev, by=sortby ∘ inv, rev=rev)]
@test sortby.(e) ≈ sortby.(d)
end
end
end
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | docs | 931 | # Arpack
[](https://github.com/JuliaLinearAlgebra/Arpack.jl/actions/workflows/ci.yml)
[![][docs-stable-img]][docs-stable-url]
Julia wrapper for the [arpack](https://github.com/opencollab/arpack-ng/) library
designed to solve large-scale eigenvalue problems.
## Installation
Install Arpack.jl through the Julia package manager:
```julia
julia> Pkg.add("Arpack")
```
[docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg
[docs-latest-url]: http://arpack.JuliaLinearAlgebra.org/latest/
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: http://arpack.JuliaLinearAlgebra.org/stable/
# Alternate packages
Users running into issues with this package may want to try [KrylovKit.jl](https://github.com/Jutho/KrylovKit.jl) or [ArnoldiMethod.jl](https://github.com/haampie/ArnoldiMethod.jl).
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | docs | 188 | # API
```@meta
DocTestSetup = :(using Arpack, LinearAlgebra, SparseArrays)
```
```@docs
Arpack.eigs(::Any)
Arpack.eigs(::Any, ::Any)
Arpack.svds
```
```@meta
DocTestSetup = nothing
```
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | docs | 5720 | # [Standard Eigen Decomposition](@id man-eigs)
```@meta
DocTestSetup = :(using Arpack, LinearAlgebra, SparseArrays)
```
`eigs` calculates the eigenvalues and, optionally, eigenvectors of a matrix
using implicitly restarted Lanczos or Arnoldi iterations for real symmetric or
general nonsymmetric matrices respectively. The input matrix `A` can be any
structured `AbstractMatrix` that implements the in-place product
method `LinearAlgebra.mul!(y, A, x)`.
For the single matrix version,
`eigs(A; nev=6, ncv=max(20,2*nev+1), which=:LM, tol=0.0, maxiter=300, sigma=nothing, ritzvec=true, v0=zeros((0,))) -> (d,[v,],nconv,niter,nmult,resid)`
the following keyword arguments are supported:
* `nev`: Number of eigenvalues
* `ncv`: Number of Krylov vectors used in the computation; should satisfy `nev+1 <= ncv <= n`
for real symmetric problems and `nev+2 <= ncv <= n` for other problems, where `n` is the
size of the input matrix `A`. The default is `ncv = max(20,2*nev+1)`. Note that these
restrictions limit the input matrix `A` to be of dimension at least 2.
* `which`: type of eigenvalues to compute. See the note below.
| `which` | type of eigenvalues |
|:--------|:--------------------------------------------------------------------------------------------------------------------------|
| `:LM` | eigenvalues of largest magnitude (default) |
| `:SM` | eigenvalues of smallest magnitude |
| `:LR` | eigenvalues of largest real part |
| `:SR` | eigenvalues of smallest real part |
| `:LI` | eigenvalues of largest imaginary part (nonsymmetric or complex `A` only) |
| `:SI` | eigenvalues of smallest imaginary part (nonsymmetric or complex `A` only) |
| `:BE` | compute half of the eigenvalues from each end of the spectrum, biased in favor of the high end. (real symmetric `A` only) |
* `tol`: parameter defining the relative tolerance for convergence of Ritz values (eigenvalue estimates).
A Ritz value ``θ`` is considered converged when its associated residual
is less than or equal to the product of `tol` and ``max(ɛ^{2/3}, |θ|)``,
where `ɛ = eps(real(eltype(A)))/2` is LAPACK's machine epsilon.
The residual associated with ``θ`` and its corresponding Ritz vector ``v``
is defined as the norm ``||Av - vθ||``.
The specified value of `tol` should be positive; otherwise, it is ignored
and ``ɛ`` is used instead.
Default: ``ɛ``.
* `maxiter`: Maximum number of iterations (default = 300)
* `sigma`: Specifies the level shift used in inverse iteration. If `nothing` (default),
defaults to ordinary (forward) iterations. Otherwise, find eigenvalues close to `sigma`
using shift and invert iterations.
* `ritzvec`: Returns the Ritz vectors `v` (eigenvectors) if `true`
* `v0`: starting vector from which to start the iterations
We can see the various keywords in action in the following examples:
```jldoctest; filter = r"(1|2)-element Array{(Float64|Complex{Float64}),1}:\n (.|\s)*$"
julia> A = Diagonal(1:4);
julia> λ, ϕ = eigs(A, nev = 2, which=:SM);
julia> λ
2-element Array{Float64,1}:
1.0000000000000002
2.0
julia> B = Diagonal([1., 2., -3im, 4im]);
julia> λ, ϕ = eigs(B, nev=1, which=:LI);
julia> λ
1-element Array{Complex{Float64},1}:
1.3322676295501878e-15 + 4.0im
julia> λ, ϕ = eigs(B, nev=1, which=:SI);
julia> λ
1-element Array{Complex{Float64},1}:
-2.498001805406602e-16 - 3.0000000000000018im
julia> λ, ϕ = eigs(B, nev=1, which=:LR);
julia> λ
1-element Array{Complex{Float64},1}:
2.0000000000000004 + 4.0615212488780827e-17im
julia> λ, ϕ = eigs(B, nev=1, which=:SR);
julia> λ
1-element Array{Complex{Float64},1}:
-8.881784197001252e-16 + 3.999999999999997im
julia> λ, ϕ = eigs(B, nev=1, sigma=1.5);
julia> λ
1-element Array{Complex{Float64},1}:
1.0000000000000004 + 4.0417078924070745e-18im
```
!!! note
The `sigma` and `which` keywords interact: the description of eigenvalues
searched for by `which` do *not* necessarily refer to the eigenvalues of
`A`, but rather the linear operator constructed by the specification of the
iteration mode implied by `sigma`.
| `sigma` | iteration mode | `which` refers to eigenvalues of |
|:----------------|:---------------------------------|:---------------------------------|
| `nothing` | ordinary (forward) | ``A`` |
| real or complex | inverse with level shift `sigma` | ``(A - \sigma I )^{-1}`` |
!!! note
Although `tol` has a default value, the best choice depends strongly on the
matrix `A`. We recommend that users _always_ specify a value for `tol`
which suits their specific needs.
For details of how the errors in the computed eigenvalues are estimated, see:
* B. N. Parlett, "The Symmetric Eigenvalue Problem", SIAM: Philadelphia, 2/e
(1998), Ch. 13.2, "Accessing Accuracy in Lanczos Problems", pp. 290-292 ff.
* R. B. Lehoucq and D. C. Sorensen, "Deflation Techniques for an Implicitly
Restarted Arnoldi Iteration", SIAM Journal on Matrix Analysis and
Applications (1996), 17(4), 789–821. doi:10.1137/S0895479895281484
```@meta
DocTestSetup = nothing
```
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | docs | 4486 | # [Generalized Eigen Decomposition](@id man-eigsgen)
```@meta
DocTestSetup = :(using Arpack, LinearAlgebra, SparseArrays)
```
For the two-input generalized eigensolution version,
`eigs(A, B; nev=6, ncv=max(20,2*nev+1), which=:LM, tol=0.0, maxiter=300, sigma=nothing, ritzvec=true, v0=zeros((0,))) -> (d,[v,],nconv,niter,nmult,resid)`
the following keyword arguments are supported:
* `nev`: Number of eigenvalues
* `ncv`: Number of Krylov vectors used in the computation; should satisfy `nev+1 <= ncv <= n`
for real symmetric problems and `nev+2 <= ncv <= n` for other problems, where `n` is the
size of the input matrices `A` and `B`. The default is `ncv = max(20,2*nev+1)`. Note that
these restrictions limit the input matrix `A` to be of dimension at least 2.
* `which`: type of eigenvalues to compute. See the note below.
| `which` | type of eigenvalues |
|:--------|:--------------------------------------------------------------------------------------------------------------------------|
| `:LM` | eigenvalues of largest magnitude (default) |
| `:SM` | eigenvalues of smallest magnitude |
| `:LR` | eigenvalues of largest real part |
| `:SR` | eigenvalues of smallest real part |
| `:LI` | eigenvalues of largest imaginary part (nonsymmetric or complex `A` only) |
| `:SI` | eigenvalues of smallest imaginary part (nonsymmetric or complex `A` only) |
| `:BE` | compute half of the eigenvalues from each end of the spectrum, biased in favor of the high end. (real symmetric `A` only) |
* `tol`: relative tolerance used in the convergence criterion for eigenvalues, similar to
`tol` in the [`eigs(A)`](@ref) method for the ordinary eigenvalue
problem, but effectively for the eigenvalues of ``B^{-1} A`` instead of ``A``.
See the documentation for the ordinary eigenvalue problem in
[`eigs(A)`](@ref) and the accompanying note about `tol`.
* `maxiter`: Maximum number of iterations (default = 300)
* `sigma`: Specifies the level shift used in inverse iteration. If `nothing` (default),
defaults to ordinary (forward) iterations. Otherwise, find eigenvalues close to `sigma`
using shift and invert iterations.
* `ritzvec`: Returns the Ritz vectors `v` (eigenvectors) if `true`
* `v0`: starting vector from which to start the iterations
`eigs` returns the `nev` requested eigenvalues in `d`, the corresponding Ritz vectors `v`
(only if `ritzvec=true`), the number of converged eigenvalues `nconv`, the number of
iterations `niter` and the number of matrix vector multiplications `nmult`, as well as the
final residual vector `resid`.
We can see the various keywords in action in the following examples:
```jldoctest; filter = r"(1|2)-element Array{(Float64|Complex{Float64}),1}:\n (.|\s)*$"
julia> A = sparse(1.0I, 4, 4); B = Diagonal(1:4);
julia> λ, ϕ = eigs(A, B, nev = 2);
julia> λ
2-element Array{Float64,1}:
1.0000000000000002
0.5
julia> A = Diagonal([1, -2im, 3, 4im]); B = sparse(1.0I, 4, 4);
julia> λ, ϕ = eigs(A, B, nev=1, which=:SI);
julia> λ
1-element Array{Complex{Float64},1}:
-1.5720931501039814e-16 - 1.9999999999999984im
julia> λ, ϕ = eigs(A, B, nev=1, which=:LI);
julia> λ
1-element Array{Complex{Float64},1}:
0.0 + 4.000000000000002im
```
!!! note
The `sigma` and `which` keywords interact: the description of eigenvalues searched for by
`which` do *not* necessarily refer to the eigenvalue problem ``Av = Bv\lambda``, but rather
the linear operator constructed by the specification of the iteration mode implied by `sigma`.
| `sigma` | iteration mode | `which` refers to the problem |
|:----------------|:---------------------------------|:-----------------------------------|
| `nothing` | ordinary (forward) | ``Av = Bv\lambda`` |
| real or complex | inverse with level shift `sigma` | ``(A - \sigma B )^{-1}B = v\nu`` |
```@meta
DocTestSetup = nothing
```
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | docs | 653 | # [Arpack.jl](@id man-arpack)
```@meta
DocTestSetup = :(using Arpack, LinearAlgebra, SparseArrays)
```
This package provides bindings to
[ARPACK](http://www.caam.rice.edu/software/ARPACK/), which can be used
to perform iterative solutions for eigensystems (using [`eigs`](@ref))
or singular value decompositions (using [`svds`](@ref)).
**Notes**
1. The ARPACK Fortran library is not re-entrant. `Arpack.jl` should only be used from one thread in a Julia program.
2. ARPACK uses a random starting vector by default. This causes the phase of the singular vectors to be random (or just the sign, for real values).
```@meta
DocTestSetup = nothing
```
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.5.4 | 9b9b347613394885fd1c8c7729bfc60528faa436 | docs | 2790 | # [Singular Value Decomposition](@id man-svds)
```@meta
DocTestSetup = :(using Arpack, LinearAlgebra, SparseArrays, Random)
```
`svds(A; nsv=6, ritzvec=true, tol=0.0, maxiter=1000, ncv=2*nsv, v0=zeros((0,))) -> (SVD([left_sv,] s, [right_sv,]), nconv, niter, nmult, resid)`
Computes the largest singular values `s` of `A` using implicitly restarted Lanczos
iterations derived from [`eigs`](@ref).
**Inputs**
* `A`: Linear operator whose singular values are desired. `A` may be represented as a
subtype of `AbstractArray`, e.g., a sparse matrix, or any other type supporting the four
methods `size(A)`, `eltype(A)`, `A * vector`, and `A' * vector`.
* `nsv`: Number of singular values. Default: 6.
* `ritzvec`: If `true`, return the left and right singular vectors `left_sv` and `right_sv`.
If `false`, omit the singular vectors. Default: `true`.
* `tol`: tolerance, see [`eigs`](@ref).
* `maxiter`: Maximum number of iterations, see [`eigs`](@ref). Default: 1000.
* `ncv`: Maximum size of the Krylov subspace, see [`eigs`](@ref) (there called `nev`). Default: `2*nsv`.
* `v0`: Initial guess for the first Krylov vector. It may have length `min(size(A)...)`, or 0.
**Outputs**
* `svd`: An `SVD` object containing the left singular vectors, the requested values, and the
right singular vectors. If `ritzvec = false`, the left and right singular vectors will be
empty. `U`, `S`, `V` and `Vt` can be obtained from the SVD object with `Z.U`, `Z.S`, `Z.V`
and `Z.Vt`, where `Z = svds(A)[1]` and `U * Diagonal(S) * Vt` is a low-rank approximation
of `A` with rank `nsv`. Internally `Vt` is stored and hence `Vt` is more efficient to extract than `V`.
* `nconv`: Number of converged singular values.
* `niter`: Number of iterations.
* `nmult`: Number of matrix--vector products used.
* `resid`: Final residual vector.
**Examples**
```jldoctest
julia> Random.seed!(123);
julia> A = Diagonal(1:5);
julia> Z = svds(A, nsv = 2)[1];
julia> Z.U
5×2 Array{Float64,2}:
0.0 7.80626e-18
0.0 -0.0
-1.33227e-16 5.35947e-33
-6.38552e-17 1.0
-1.0 -6.38552e-17
julia> Z.S
2-element Array{Float64,1}:
5.0
3.999999999999999
julia> Z.Vt
2×5 Array{Float64,2}:
-2.77556e-17 0.0 -2.22045e-16 -7.9819e-17 -1.0
3.1225e-17 1.89735e-19 0.0 1.0 -8.32667e-17
julia> Z.V
5×2 Adjoint{Float64,Array{Float64,2}}:
-2.77556e-17 3.1225e-17
0.0 1.89735e-19
-2.22045e-16 0.0
-7.9819e-17 1.0
-1.0 -8.32667e-17
```
!!! note "Implementation"
`svds(A)` is formally equivalent to calling [`eigs`](@ref) to perform implicitly restarted
Lanczos tridiagonalization on the Hermitian matrix ``A^\prime A`` or ``AA^\prime`` such
that the size is smallest.
```@meta
DocTestSetup = nothing
```
| Arpack | https://github.com/JuliaLinearAlgebra/Arpack.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 585 | module CairoMakieDemo
using CairoMakie: heatmap
using Kitten: text # CairoMakie also exports text
using Kitten
get("/") do
text("welcome to the random plot api!")
end
# generate a random plot
get("/plot/png") do
fig, ax, pl = heatmap(rand(50, 50)) # or something
png(fig)
end
get("/plot/svg") do
fig, ax, pl = heatmap(rand(50, 50)) # or something
svg(fig)
end
get("/plot/pdf") do
fig, ax, pl = heatmap(rand(50, 50)) # or something
pdf(fig)
end
get("/plot/html") do
fig, ax, pl = heatmap(rand(50, 50)) # or something
html(fig)
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 1061 | module AuthDemo
using Kitten
using HTTP
@get "/divide/{a}/{b}" function(req::HTTP.Request, a::Float64, b::Float64)
return a / b
end
const CORS_HEADERS = [
"Access-Control-Allow-Origin" => "*",
"Access-Control-Allow-Headers" => "*",
"Access-Control-Allow-Methods" => "POST, GET, OPTIONS"
]
# https://juliaweb.github.io/HTTP.jl/stable/examples/#Cors-Server
function CorsMiddleware(handler)
return function(req::HTTP.Request)
if HTTP.method(req)=="OPTIONS"
return HTTP.Response(200, CORS_HEADERS)
else
return handler(req)
end
end
end
function AuthMiddleware(handler)
return function(req::HTTP.Request)
# ** NOT an actual security check ** #
if !HTTP.headercontains(req, "Authorization", "true")
return HTTP.Response(403)
else
return HTTP.Response(200, body=string(handler(req)))
end
end
end
# There is no hard limit on the number of middleware functions you can add
serve(middleware=[CorsMiddleware, AuthMiddleware])
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 1186 | module BankingAppDemo
using Base: @kwdef
using Kitten
struct Address
street::String
city::String
state::String
zip_code::String
country::String
end
struct User
id::Int
first_name::String
last_name::String
email::String
address::Address
end
@kwdef struct BankAccount
id::Int
account_number::String
account_type::String = "checking"
balance::Float64
user::User
end
@get "/" function()
return "Welcome to the Banking App Demo"
end
"""
Setup User related routes
"""
user = router("/user", tags=["user"])
@post user("/json") function(req, data::Json{User})
return data.payload
end
@post user("/form") function(req, data::Form{User})
return data.payload
end
@post user("/headers") function(req, data::Header{User})
return data.payload
end
"""
Setup Account related routes
"""
acct = router("/account", tags=["account"])
@post acct("/json") function(req, data::Json{BankAccount})
return data.payload
end
@post acct("/form") function(req, data::Form{BankAccount})
return data.payload
end
@post acct("/headers") function(req, data::Header{BankAccount})
return data.payload
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 674 | module CorsDemo
using Kitten
using HTTP
allowed_origins = [ "Access-Control-Allow-Origin" => "*" ]
cors_headers = [
allowed_origins...,
"Access-Control-Allow-Headers" => "*",
"Access-Control-Allow-Methods" => "GET, POST"
]
function CorsHandler(handle)
return function (req::HTTP.Request)
# return headers on OPTIONS request
if HTTP.method(req) == "OPTIONS"
return HTTP.Response(200, cors_headers)
else
r = handle(req)
append!(r.headers, allowed_origins)
return r
end
end
end
get("/") do
text("hello world")
end
# more code here
serve(middleware=[CorsHandler])
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 942 | module CronDemo
using Kitten
using HTTP
using Dates
# You can use the @cron macro directly
@cron "*/2" function()
println("every 2 seconds")
end
@cron "*/5" function every5seconds()
println("every 5 seconds")
end
value = 0
# You can also just use the 'cron' keyword that's apart of the router() function
@get router("/increment", cron="*/11", interval=4) function()
global value += 1
return value
end
@get router("/getvalue") function()
return value
end
# all endpoints will inherit this cron expression
pingpong = router("/pingpong", cron="*/3")
@get pingpong("/ping") function()
println("ping")
return "ping"
end
# here we override the inherited cron expression
@get pingpong("/pong", cron="*/7") function()
println("pong")
return "pong"
end
@get "/home" function()
"home"
end
@get "/stop" function()
stopcronjobs()
end
@get "/start" function()
startcronjobs()
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 784 | module CronManagementDemo
using Kitten
using HTTP
using Dates
get("/data") do
Dict("msg" => "hello")
end
function logtime()
@info "current time: $(now())"
end
# initialize the app with an already running cron job
get(router("/log", cron="*/2")) do
logtime()
end
get("/register") do
@info "registering new job"
@cron "*/2" logtime
"registered jobs"
end
get("/start") do
@info "/start POST endpoint hit; running job"
startcronjobs()
"started jobs"
end
get("/clear") do
@info "clearing jobs"
clearcronjobs()
"cleared jobs"
end
get("/stop") do
@info "/stop POST endpoint hit"
stopcronjobs()
"stopped jobs"
end
@cron "*/3" function()
println("every 3 seconds")
end
try
serve()
finally
terminate()
end
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 656 | module CustomSerializerDemo
using Kitten
using HTTP
using JSON3
function middleware(handle)
return function(req)
try
resp = handle(req)
if resp isa HTTP.Messages.Response
return resp
end
return HTTP.Response(200, [], body=JSON3.write(resp))
catch error
@error "ERROR: " exception=(error, catch_backtrace())
return HTTP.Response(500, "The server encountered a problem")
end
end
end
@get "/hello" function (req::HTTP.Request)
return "hello"
end
# disable default serializaiton
serve(serialize=false, middleware=[middleware])
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 404 | module DynamicHeadersDemo
using Kitten
using HTTP
get("/") do req::HTTP.Request
return "hello world!"
end
# This function allows us to customize the headers on our static & dynamic resources
function customize_headers(route::String, content_type::String, headers::Vector)
return [headers; "My-Header" => "hello world!"]
end
staticfiles("content", set_headers=customize_headers)
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 1232 | module ErgonomicsDemo
using Kitten
using StructTypes
using Dates
using HTTP
using JSON3
using Base: @kwdef
using BenchmarkTools
struct Address
street::String
city::String
state::String
zip::String
end
struct Person
name::String
age::Int
end
@kwdef struct Sample
limit::Int = 20
skip::Int = 33
end
struct Parameters
b::Int
end
@kwdef struct PersonWithDefault
name::String
age::Int
money::Float64 = 100.0
address::Address = Address("123 Main Street", "Orlando", "FL", "32810")
end
@get "/add/{a}/{b}" function(req, a::String, path::Path{Parameters}, qparams::Query{Sample}, c::Float64=3.6)
return (a=a, c=c, path=path, query=qparams)
end
@get "/headers" function(req, headers = Header(Sample, s -> s.limit < 30))
return headers.payload
end
@post "/json" function(req, data = Json(PersonWithDefault, p -> p.money < 1000 ))
return data.payload
end
@post "/form" function(req, data::Form{PersonWithDefault})
return data.payload
end
@post "/body" function(req, data::Body{Int64})
return data.payload
end
@get "/get" function(req, data::Json{Sample})
return data.payload
end
@get "/" function(req)
"home"
end
serve(docs=true, metrics=true)
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 515 | module ErrorHandlingDemo
using Kitten
using HTTP
@get "/" function(req::HTTP.Request)
return "hello world!"
end
@get "/bad" function(req::HTTP.Request)
throw("whoops")
"hello"
end
function errorcatcher(handle)
function(req)
try
response = handle(req)
return response
catch e
return HTTP.Response(500, "here's a custom error response")
end
end
end
# start the web server
serve(middleware=[errorcatcher], catch_errors=false)
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 3128 | module Main
using Kitten
using HTTP
using JSON3
using StructTypes
struct Animal
id::Int
type::String
name::String
end
# Add a supporting struct type definition to the Animal struct
StructTypes.StructType(::Type{Animal}) = StructTypes.Struct()
@get "/" function()
return "home"
end
@get "/killserver" function ()
terminate()
end
# add a default handler for unmatched requests
@get "*" function ()
return "looks like you hit an endpoint that doesn't exist"
end
# You can also interpolate variables into the endpoint
operations = Dict("add" => +, "multiply" => *)
for (pathname, operator) in operations
@get "/$pathname/{a}/{b}" function (req, a::Float64, b::Float64)
return operator(a, b)
end
end
# demonstate how to use path params (without type definitions)
@get "/power/{a}/{b}" function (req::HTTP.Request, a::String, b::String)
return parse(Float64, a) ^ parse(Float64, b)
end
# demonstrate how to use path params with type definitions
@get "/divide/{a}/{b}" function (req::HTTP.Request, a::Float64, b::Float64)
return a / b
end
# Return the body of the request as a string
@post "/echo-text" function (req::HTTP.Request)
return text(req)
end
# demonstrates how to serialize JSON into a julia struct
@post "/animal" function (req)
return json(req, Animal)
end
# Return the body of the request as a JSON object
@post "/echo-json" function (req::HTTP.Request)
return json(req)
end
# You can also return your own customized HTTP.Response object from an endpoint
@get "/custom-response" function (req::HTTP.Request)
test_value = 77.8
return HTTP.Response(200, ["Content-Type" => "text/plain"], body = "$test_value")
end
# Any object retuned from a function will automatically be converted into JSON (by default)
@get "/json" function(req::HTTP.Request)
return Dict("message" => "hello world", "animal" => Animal(1, "cat", "whiskers"))
end
# show how to return a file from an endpoint
@get "/files" function (req)
return file("main.jl")
end
# show how to return a string that needs to be interpreted as html
@get "/string-as-html" function (req)
message = "Hello World!"
return html("""
<!DOCTYPE html>
<html>
<body>
<h1>$message</h1>
</body>
</html>
""")
end
@route ["GET", "POST"] "/demo" function(req)
return Animal(1, "cat", "whiskers")
end
# recursively mount all files inside the demo folder ex.) demo/main.jl => /static/demo/main.jl
staticfiles("content")
dynamicfiles("content", "dynamic")
# CORS headers that show what kinds of complex requests are allowed to API
headers = [
"Access-Control-Allow-Origin" => "*",
"Access-Control-Allow-Headers" => "*",
"Access-Control-Allow-Methods" => "GET, POST"
]
function CorsHandler(handle)
return function(req::HTTP.Request)
# return headers on OPTIONS request
if HTTP.method(req) == "OPTIONS"
return HTTP.Response(200, headers)
else
return handle(req)
end
end
end
# start the web server
serve(middleware=[CorsHandler])
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 333 | module CronManagementDemo
using Kitten
using HTTP
using Dates
get("/data") do
Dict("msg" => "hello")
end
get("/random/sm") do
sleep(rand(0.01:0.03))
"small random"
end
get("/random/md") do
sleep(rand(0.03:0.07))
"small random"
end
get("/random/lg") do
sleep(rand(0.07:.1))
"random"
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 2542 | module MiddlewareDemo
using Kitten
using HTTP
using JSON3
function handler1(handler)
return function(req::HTTP.Request)
println("1")
handler(req)
end
end
function handler2(handler)
return function(req::HTTP.Request)
println("2")
handler(req)
end
end
function handler3(handler)
return function(req::HTTP.Request)
println("3")
handler(req)
end
end
function handler4(handler)
return function(req::HTTP.Request)
println("4")
handler(req)
end
end
"""
Middleware rules
All middleware is additive, any middleware defined at the application, router, our route level will get combined
and get executed.
Regardless if set or not, Middleware will always get executed in the following order:
application -> router -> route
Well, what if we don't want previous layers of middleware to run?
You set middleware=[], it clears all middleware at that layer and skips all layers that come before it.
For example, setting middleware=[] at the:
- application layer: clears the application layer
- router layer: clears the router layer and skips application layer
- route layer: clears the route layer and skips the application & router layer
"""
# case 1: no middleware setup, uses the application middleware by default
@get "/add/{a}/{b}" function (req::HTTP.Request, a::Float64, b::Float64)
return a + b
end
# case 1: no middleware is defined at any level -> use application middleware
@get router("/power/{a}/{b}") function (req::HTTP.Request, a::Float64, b::Float64)
return a ^ b
end
math = router("/math", middleware=[handler3])
# case 2: middleware is cleared at route level so don't register any middleware
@get math("/cube/{a}", middleware=[]) function(req, a::Float64)
return a * a * a
end
# case 3: router-level is empty & route-level is defined
other = router("/math", middleware=[])
@get other("/multiply/{a}/{b}", middleware=[handler3]) function (req::HTTP.Request, a::Float64, b::Float64)
return a * b
end
# case 4 (both defined)
@get math("/divide/{a}/{b}", middleware=[handler4]) function(req::HTTP.Request, a::Float64, b::Float64)
return a / b
end
# case 5: only router level is defined
@get math("/subtract/{a}/{b}") function(req::HTTP.Request, a::Float64, b::Float64)
return a - b
end
# case 6: only route level middleware is defined
empty = router()
@get empty("/math/square/{a}", middleware=[handler3]) function(req, a::Float64)
return a * a
end
serve(middleware=[handler1, handler2])
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 572 | module MultiInstanceDemo
using Kitten
using HTTP
# Setup the first app
app1 = instance()
app1.get("/") do
"welcome to server #1"
end
app1.@get("/subtract/{a}/{b}") do req, a::Int, b::Int
("answer" => a - b)
end
# Setup the second app
app2 = instance()
app2.get("/") do
"welcome to server #2"
end
app2.@get("/add/{a}/{b}") do req, a::Int, b::Int
("answer" => a + b)
end
try
# start both servers together
app1.serve(port=8001, async=true)
app2.serve(port=8002)
finally
# clean it up
app1.terminate()
app2.terminate()
end
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 434 | module MultiInstanceDemo
module A
using Kitten; @oxidise
@get "/" function()
text("server A")
end
@get "/another" function()
text("another route in server A")
end
end
module B
using Kitten; @oxidise
@get "/" function()
text("server B")
end
end
try
A.serve(port=8001, async=true)
B.serve(port=8002, async=false)
finally
A.terminate()
B.terminate()
end
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 306 | module OxidiseDemo
using Kitten; @oxidise
@get("/") do req
"home"
end
@get("/nihao") do req
"你好"
end
@get "/greet" function()
"hello world!"
end
@get "/saluer" () -> begin
"Bonjour le monde!"
end
@get "/saludar" () -> "¡Hola Mundo!"
@get "/salutare" f() = "ciao mondo!"
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 732 | module ParallelDemo
using Kitten
using HTTP
using JSON3
using StructTypes
using SwaggerMarkdown
using Base.Threads
############## Atomic variable example ##############
StructTypes.StructType(::Type{Atomic{Int64}}) = StructTypes.Struct()
x = Atomic{Int64}(0);
@get "/atomic/show" function(req)
return x
end
@get "/atomic/increment" function()
atomic_add!(x, 1)
return x
end
############## ReentrantLock example ##############
global a = 0
rl = ReentrantLock()
@get "/lock/show" function()
return a
end
@get "/lock/increment" function()
lock(rl)
try
global a
a += 1
finally
unlock(rl)
end
return a
end
# start the web server in parallel mode
serveparallel()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 437 | module RepeatTasksDemo
using Kitten
const iterations = Ref{Int}(0)
get(router("/one", interval=1)) do
iterations[] += 1
end
get(router("/two", interval=2)) do
iterations[] += 1
end
@repeat 3 function()
iterations[] += 1
end
@repeat 4 "every 4 seconds" function()
iterations[] += 1
end
starttasks()
while iterations[] < 10
println("Iterations: ", iterations[])
sleep(1)
end
stoptasks()
cleartasks()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 661 | module RouterDemo
using Kitten
using HTTP
using JSON3
hellorouter = router("/hello", tags=["greeting"])
@get hellorouter("/excited", tags=["good"]) function(req)
return "excited"
end
@get hellorouter("/sad") function(req)
return "sad"
end
repeat = router("/repeat", interval = 1, tags=["repeat"])
@get repeat("/one") function(req)
println("one")
return "one"
end
emptyrouter = router()
@get emptyrouter("/empty", interval = 3) function(req)
println("empty")
return "empty"
end
# you can also pass the `router()` function itself
@get router("/spam", tags=["spam"], interval=0.25) function()
println("spam")
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 543 | module FunctionsRoutingDemo
using Kitten
using HTTP
get("/") do
"hello"
end
math = router("/math", tags=["math"])
get(math("/add/{x}/{y}")) do request::HTTP.Request, x::Int, y::Int
x + y
end
route(["POST"], math("/other/{x}/{y}")) do req, x::Int, y::Int
x - y
end
get(math("/multiply/{x}/{y}")) do request::HTTP.Request, x::Int, y::Int
x * y
end
get("/get") do
"test"
end
put("/put") do
"put"
end
patch("/patch") do
"patch"
end
delete("/delete") do
"delete"
end
# start the web server
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 1460 | module SSEDemo
using JSON3
using Dates
using HTTP
using Kitten
@get "/" function()
html("""
<html>
<head>
<meta charset="UTF-8">
<title>Server-sent events demo</title>
</head>
<body>
<h3>Fetched items:</h3>
<ul id="list"></ul>
</body>
<script>
const evtSource = new EventSource("http://127.0.0.1:8080/api/events")
evtSource.onmessage = async function (event) {
const newElement = document.createElement("li");
const eventList = document.getElementById("list");
newElement.textContent = event.data;
eventList.appendChild(newElement);
}
evtSource.addEventListener("ping", function(event) {
console.log('ping:', event.data)
});
</script>
</html>
""")
end
@stream "/api/events" function(stream::HTTP.Stream)
HTTP.setheader(stream, "Access-Control-Allow-Origin" => "*")
HTTP.setheader(stream, "Access-Control-Allow-Methods" => "GET")
HTTP.setheader(stream, "Content-Type" => "text/event-stream")
HTTP.setheader(stream, "Cache-Control" => "no-cache")
while true
message = "The time is: $(now())"
write(stream, format_sse_message(message))
write(stream, format_sse_message(message; event="ping"))
sleep(1)
end
return nothing
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 1711 | module StreamingChunksDemo
using JSON3
using Dates
using HTTP
using JSON3
using Kitten
function chunks(data::Any, nchunks::Int)
return chunks(JSON3.write(data), nchunks)
end
function chunks(data::String, nchunks::Int)
# Convert the data to binary
binarydata = Vector{UInt8}(data)
data_size = sizeof(binarydata)
# Calculate chunk size
chunk_size = ceil(Int, data_size / nchunks)
# return a generator for the chunks
return (binarydata[i:min(i + chunk_size - 1, end)] for i in 1:chunk_size:data_size)
end
nchunks = 5
data = Dict()
# Add new properties with large arrays of values
for i in 1:100
data["property$i"] = rand(1000) # Each property will have an array of 1000 random numbers
end
@stream "/api/chunked/json" function(stream::HTTP.Stream)
# Set headers
HTTP.setheader(stream, "Content-Type" => "application/json")
HTTP.setheader(stream, "Transfer-Encoding" => "chunked")
# Start writing (if you need to send headers before the body)
startwrite(stream)
# Write each chunk to the stream
for chunk in chunks(data, nchunks)
write(stream, chunk)
end
# Close the stream to end the HTTP response properly
closewrite(stream)
end
# Chunk Text
@stream "/api/chunked/text" function(stream::HTTP.Stream)
# Set headers
HTTP.setheader(stream, "Content-Type" => "text/plain")
HTTP.setheader(stream, "Transfer-Encoding" => "chunked")
# Start writing (if you need to send headers before the body)
startwrite(stream)
data = ["a", "b", "c"]
for chunk in data
write(stream, chunk)
end
# Close the stream to end the HTTP response properly
closewrite(stream)
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 2611 | module SwaggerDemo
using Kitten
using HTTP
using SwaggerMarkdown
using StructTypes
using JSON3
using Dates
@enum Fruit apple=1 orange=2 kiwi=3
struct Person
name :: String
age :: Int8
end
# Add a supporting struct type definition to the Person struct
StructTypes.StructType(::Type{Person}) = StructTypes.Struct()
StructTypes.StructType(::Type{Complex{Float64}}) = StructTypes.Struct()
@get "/fruit/{fruit}" function(req, fruit::Fruit)
return fruit
end
@get "/date/{date}" function(req, date::Date)
return date
end
@get "/datetime/{datetime}" function(req, datetime::DateTime)
return datetime
end
@get "/complex/{complex}" function(req, complex::Complex{Float64})
return complex
end
@get "/list/{list}" function(req, list::Vector{Float32})
return list
end
@get "/data/{dict}" function(req, dict::Dict{String, Any})
return dict
end
@get "/tuple/{tuple}" function(req, tuple::Tuple{String, String})
return tuple
end
@get "/union/{value}" function(req, value::Union{Bool, String, Float64})
return value
end
@get "/boolean/{bool}" function(req, bool::Bool)
return bool
end
@get "/person/{person}" function(req, person::Person)
return person
end
@get "/float/{float}" function (req::HTTP.Request, float::Float32)
return float
end
@swagger """
/divide/{a}/{b}:
get:
description: Return the value of a / b
parameters:
- name: a
in: path
required: true
description: this is your value
schema:
type: number
format: double
responses:
'200':
description: Successfully returned an number.
"""
# demonstrate how to use path params with type definitions
@get "/divide/{a}/{b}" function (req::HTTP.Request, a::Float64, b::Float64)
return a / b
end
@get "/add/{a}/{b}" function (req::HTTP.Request, a::UInt32, b::Float16)
return a + b
end
@get "/add/{success}" function (req::HTTP.Request, success::Bool)
return success
end
@swagger """
/home:
get:
description: returns the home endpoint!!
responses:
"200":
description: Returns a string
"503":
description: something bad happened
"""
@get "/home" function()
"home"
end
# the version of the OpenAPI used is required
openApi_version = "3.0"
# the info of the API, title and version of the info are required
info = Dict{String, Any}()
info["title"] = "My custom api"
info["version"] = openApi_version
openApi = OpenAPI(openApi_version, info)
swagger_document = build(openApi)
# merge the SwaggerMarkdown schema with the internal schema
mergeschema(swagger_document)
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 862 | module WGLMakieDemo
using WGLMakie
using WGLMakie.Makie: FigureLike
using Bonito, FileIO, Colors, HTTP
using Kitten
using Kitten: text, html
get("/") do
text("home")
end
get("/plot") do
plt = heatmap(rand(50, 50))
html(plt)
end
get("/html/heatmap") do
app = App() do session::Session
return DOM.div(
DOM.h1("Random 50x50 Heatmap"),
DOM.div(heatmap(rand(50, 50)))
)
end
return html(app)
end
get("/page") do
app = App() do session::Session
hue_slider = Slider(0:360)
color_swatch = DOM.div(class="h-6 w-6 p-2 m-2 rounded shadow")
onjs(session, hue_slider.value, js"""function (hue){
$(color_swatch).style.backgroundColor = "hsl(" + hue + ",60%,50%)"
}""")
return Row(hue_slider, color_swatch)
end
return html(app)
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 1409 | module WebSocketDemo
using Dates
using HTTP
using HTTP.WebSockets: send
using Kitten
@get "/" function()
html("""
<html>
<head>
<meta charset="UTF-8">
<title>WebSocket demo</title>
</head>
<body>
<h3>Sent messages:</h3>
<ul id="list"></ul>
</body>
<script>
const socket = new WebSocket("ws://127.0.0.1:8080/ws");
socket.onopen = function(event) {
setInterval(function() {
const message = "Hello, server! What time is it?";
socket.send(message);
console.log('Sent:', message);
}, 1000);
};
socket.onmessage = function(event) {
console.log('Received:', event.data);
const newElement = document.createElement("li");
const messageList = document.getElementById("list");
newElement.textContent = event.data;
messageList.appendChild(newElement);
};
socket.onerror = function(error) {
console.log('WebSocket Error:', error);
};
</script>
</html>
""")
end
@websocket "/ws" function(ws::HTTP.WebSocket)
for msg in ws
@info "Received message: $msg"
send(ws, "The time is: $(now())")
end
end
serve()
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 2070 | module ApiTester
using HTTP
using Distributions
using Random
function random_requester(urls::Array{String,1}, req_range::Tuple{Int, Int})
# Ensure the range is valid
if req_range[1] > req_range[2] || req_range[1] < 0
error("Invalid range of requests per second.")
end
# Calculate the time interval range in seconds (as floating point numbers)
interval_range = (1.0 / req_range[2], 1.0 / req_range[1])
while true
# Choose a random URL from the list
url = rand(urls)
# Generate a random request interval from the specified range
interval = rand(Uniform(interval_range[1], interval_range[2]))
# Send the HTTP request
try
response = HTTP.get(url)
println("Requested $(url): Status $(response.status)")
catch e
println("Failed to request $(url): $(e)")
end
# Wait for the random interval before sending the next request
sleep(interval)
end
end
# Example usage:
urls = [
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/data",
"http://localhost:8080/random/lg",
"http://localhost:8080/random/lg",
"http://localhost:8080/random/lg",
"http://localhost:8080/random/md",
"http://localhost:8080/random/md",
"http://localhost:8080/random/md",
"http://localhost:8080/random/md",
"http://localhost:8080/random/sm",
"http://localhost:8080/random/sm",
"http://localhost:8080/random/sm",
"http://localhost:8080/random/sm",
"http://localhost:8080/random/sm",
"http://localhost:8080/random/sm",
"http://localhost:8080/fake",
"http://localhost:8080/error",
"http://localhost:8080/nothing",
]
random_requester(urls, (1, 8)) # Randomly hit endpoints between 1 and 5 requests per second
end | Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 809 |
# escape and replace text as needed
function sanitizefile(filelines)
content = ""
skips = 0
for line in filelines
if skips > 0
skips -= 1
continue
elseif contains(line, "<!-- START HTML -->")
content *= "```@raw html\n"
elseif contains(line, "<!-- END HTML -->")
content *= "```\n"
elseif contains(line, "<!-- REPLACE")
_, replace_value, _ = split(line, r"\{\{|\}\}")
content *= "$replace_value\n"
skips += 1
else
content *= "$line\n"
end
end
return content
end
# generate index.md and move assets
readme = readlines("../README.md")
write("src/index.md", sanitizefile(readme))
cp("../oxygen.png", "./src/oxygen.png", force=true)
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 815 | using Documenter
using Kitten
makedocs(
sitename = "Kitten.jl",
format = Documenter.HTML(),
modules = [Kitten],
pages = [
"Overview" => "index.md",
"api.md",
"Manual" => [
"tutorial/first_steps.md",
"tutorial/request_types.md",
"tutorial/path_parameters.md",
"tutorial/query_parameters.md",
"tutorial/request_body.md",
"tutorial/cron_scheduling.md",
"tutorial/bigger_applications.md",
"tutorial/oauth2.md"
]
]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(
repo = "github.com/JuliaKit/Kitten.jl.git",
push_preview = false
)
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 1486 | module Kitten
include("core.jl")
using .Core
include("instances.jl")
using .Instances
include("extensions/load.jl")
import HTTP: Request, Response
using .Core: Context, History, Server, Nullable
using .Core: GET, POST, PUT, DELETE, PATCH
const CONTEXT = Ref{Context}(Context())
import Base: get
include("methods.jl")
include("deprecated.jl")
macro oxidise()
quote
import Kitten
import Kitten: PACKAGE_DIR, Context, Nullable
import Kitten: GET, POST, PUT, DELETE, PATCH, STREAM, WEBSOCKET
const CONTEXT = Ref{Context}(Context())
include(joinpath(PACKAGE_DIR, "methods.jl"))
nothing # to hide last definition
end |> esc
end
export @oxidise, @get, @post, @put, @patch, @delete, @route,
@staticfiles, @dynamicfiles, @cron, @repeat, @stream, @websocket,
get, post, put, patch, delete, route, stream, websocket,
serve, serveparallel, terminate, internalrequest,
resetstate, instance, staticfiles, dynamicfiles,
# Util
redirect, queryparams, formdata, format_sse_message,
html, text, json, file, xml, js, css, binary,
# Extractors
Path, Query, Header, Json, JsonFragment, Form, Body, extract, validate,
# Docs
configdocs, mergeschema, setschema, getschema, router,
enabledocs, disabledocs, isdocsenabled,
# Tasks & Cron
starttasks, stoptasks, cleartasks,
startcronjobs, stopcronjobs, clearcronjobs,
# Common HTTP Types
Request, Response, Stream, WebSocket
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
|
[
"MIT"
] | 0.1.0 | 9be647522c1dc531739aae8b9320cf6ae221e616 | code | 13560 | module AutoDoc
using HTTP
using Dates
using DataStructures
using Reexport
using RelocatableFolders
using ..Util: html, recursive_merge
using ..Constants
using ..AppContext: Context, Documenation
using ..Types: TaggedRoute, TaskDefinition, CronDefinition, Nullable, Param, isrequired
using ..Extractors: isextractor, extracttype, isreqparam
using ..Reflection: splitdef
export registerschema, swaggerhtml, redochtml, mergeschema
"""
mergeschema(route::String, customschema::Dict)
Merge the schema of a specific route
"""
function mergeschema(schema::Dict, route::String, customschema::Dict)
schema["paths"][route] = recursive_merge(get(schema["paths"], route, Dict()), customschema)
end
"""
mergeschema(customschema::Dict)
Merge the top-level autogenerated schema with a custom schema
"""
function mergeschema(schema::Dict, customschema::Dict)
updated_schema = recursive_merge(schema, customschema)
merge!(schema, updated_schema)
end
"""
Returns the openapi equivalent of each Julia type
"""
function gettype(type::Type)::String
if type <: Bool
return "boolean"
elseif type <: AbstractFloat
return "number"
elseif type <: Integer
return "integer"
elseif type <: AbstractVector
return "array"
elseif type <: String || type == Date || type == DateTime
return "string"
elseif isstructtype(type)
return "object"
else
return "string"
end
end
"""
Returns the specific format type for a given parameter
ex.) DateTime(2022,1,1) => "date-time"
"""
function getformat(type::Type) :: Nullable{String}
if type <: AbstractFloat
if type == Float32
return "float"
elseif type == Float64
return "double"
end
elseif type <: Integer
if type == Int32
return "int32"
elseif type == Int64
return "int64"
end
elseif type == Date
return "date"
elseif type == DateTime
return "date-time"
end
return nothing
end
function getcomponent(name::AbstractString) :: String
return "#/components/schemas/$name"
end
function getcomponent(t::DataType) :: String
return getcomponent(string(nameof(t)))
end
function createparam(p::Param{T}, paramtype::String) :: Dict where {T}
schema = Dict("type" => gettype(p.type))
# Add ref if the type is a custom struct
if schema["type"] == "object"
schema["\$ref"] = getcomponent(p.type)
end
# Add optional format if it's relevant
format = getformat(p.type)
if !isnothing(format)
schema["format"] = format
end
# Add default value if it exists
if p.hasdefault
schema["default"] = string(p.default)
end
# path params are always required
param_required = paramtype == "path" ? true : isrequired(p)
param = Dict(
"in" => paramtype, # path, query, header (where the parameter is located)
"name" => String(p.name),
"required" => param_required,
"schema" => schema
)
return param
end
"""
This function helps format the individual parameters for each route in the openapi schema
"""
function formatparam!(params::Vector{Any}, p::Param{T}, paramtype::String) where T
# Will need to flatten request extrators & append all properties to the schema
if isextractor(p) && isreqparam(p)
type = extracttype(p.type)
info = splitdef(type)
sig_names = OrderedSet{Symbol}(p.name for p in info.sig)
for name in sig_names
push!(params, createparam(info.sig_map[name], paramtype))
end
else
push!(params, createparam(p, paramtype))
end
end
"""
This function helps format the content object for each route in the openapi schema.
If similar body extractors are used, all schema's are included using an "allOf" relation.
The only exception to this is the text/plain case, which excepts the Body extractor.
If there are more than one Body extractor, the type defaults to string - since this is
the only way to represent multiple formats at the same time.
"""
function formatcontent(bodyparams::Vector) :: OrderedDict
body_refs = Dict{String,Vector{String}}()
body_types = Dict()
for p in bodyparams
inner_type = p.type |> extracttype
inner_type_name = inner_type |> nameof |> string
extractor_name = p.type |> nameof |> string
body_types[extractor_name] = gettype(inner_type)
if !is_custom_struct(inner_type)
continue
end
if !haskey(body_refs, extractor_name)
body_refs[extractor_name] = []
end
body_refs[extractor_name] = vcat(body_refs[extractor_name], getcomponent(inner_type_name))
end
jsonschema = collectschemarefs(body_refs, ["Json", "JsonFragment"])
jsonschema = merge(jsonschema, Dict("type" => "object"))
# The schema type for text/plain can vary unlike the other types
textschema = collectschemarefs(body_refs, ["Body"])
# If there are multiple Body extractors, default to string type
textschema_type = length(textschema["allOf"]) > 1 ? "string" : get(body_types, "Body", "string")
textschema = merge(textschema, Dict("type" => textschema_type))
formschema = collectschemarefs(body_refs, ["Form"])
formschema = merge(formschema, Dict("type" => "object"))
content = Dict(
"application/json" => Dict(
"schema" => jsonschema
),
"text/plain" => Dict(
"schema" => textschema
),
"application/x-www-form-urlencoded" => Dict(
"schema" => formschema
),
"application/xml" => Dict(
"schema" => Dict(
"type" => "object"
)
),
"multipart/form-data" => Dict(
"schema" => Dict(
"type" => "object",
"properties" => Dict(
"file" => Dict(
"type" => "string",
"format" => "binary"
)
),
"required" => ["file"]
)
)
)
##### Add Schemas to this route, with the preferred content type first #####
ordered_content = OrderedDict()
if !isempty(jsonschema["allOf"])
ordered_content["application/json"] = Dict("schema" => jsonschema)
end
if !isempty(textschema["allOf"])
ordered_content["text/plain"] = Dict("schema" => textschema)
end
if !isempty(formschema["allOf"])
ordered_content["application/x-www-form-urlencoded"] = Dict("schema" => formschema)
end
# Add all other content types (won't default to these, but they are available)
for (key, value) in content
if !haskey(ordered_content, key)
ordered_content[key] = value
end
end
return ordered_content
end
"""
Used to generate & register schema related for a specific endpoint
"""
function registerschema(
docs::Documenation,
path::String,
httpmethod::String,
parameters::Vector,
queryparams::Vector,
headers::Vector,
bodyparams::Vector,
returntype::Vector)
##### Add all the body parameters to the schema #####
schemas = Dict()
for p in bodyparams
inner_type = p.type |> extracttype
if is_custom_struct(inner_type)
convertobject!(inner_type, schemas)
end
end
components = Dict("components" => Dict("schemas" => schemas))
if !isempty(schemas)
mergeschema(docs.schema, components)
end
##### Append the parameter schema for the route #####
params = []
for (param_list, location) in [(parameters, "path"), (queryparams, "query"), (headers, "header")]
for p in param_list
formatparam!(params, p, location)
end
end
##### Set the schema for the body parameters #####
content = formatcontent(bodyparams)
# lookup if this route has any registered tags
if haskey(docs.taggedroutes, path) && httpmethod in docs.taggedroutes[path].httpmethods
tags = docs.taggedroutes[path].tags
else
tags = []
end
# Build the route schema
route = Dict(
"$(lowercase(httpmethod))" => Dict(
"tags" => tags,
"parameters" => params,
"responses" => Dict(
"200" => Dict("description" => "200 response"),
"500" => Dict("description" => "500 Server encountered a problem")
)
)
)
# Add a request body to the route if it's a POST, PUT, or PATCH request
if httpmethod in ["POST", "PUT", "PATCH"] || !isempty(bodyparams)
route[lowercase(httpmethod)]["requestBody"] = Dict(
# if any body param is required, mark the entire body as required
"required" => any(p -> isrequired(p), bodyparams),
"content" => content
)
end
# remove any special regex patterns from the path before adding this path to the schema
cleanedpath = replace(path, r"(?=:)(.*?)(?=}/)" => "")
mergeschema(docs.schema, cleanedpath, route)
end
function collectschemarefs(data::Dict, keys::Vector{String}; schematype="allOf")
refs = []
for key in keys
if haskey(data, key)
append!(refs, data[key])
end
end
return Dict("$schematype" => [ Dict("\$ref" => ref) for ref in refs ])
end
function is_custom_struct(T::Type)
return T.name.module ∉ (Base, Core) && (isstructtype(T) || isabstracttype(T))
end
# takes a struct and converts it into an openapi 3.0 compliant dictionary
function convertobject!(type::Type, schemas::Dict) :: Dict
typename = type |> nameof |> string
# intilaize this entry
obj = Dict("type" => "object", "properties" => Dict())
# parse out the fields of the type
info = splitdef(type)
# Make sure we have a unique set of names (in case of duplicate field names when parsing types)
# The same field names can show up as regular parameters and keyword parameters when the type is used with @kwdef
sig_names = OrderedSet{Symbol}(p.name for p in info.sig)
# loop over all unique fields
for name in sig_names
p = info.sig_map[name]
field_name = string(p.name)
current_type = p.type
current_name = string(nameof(current_type))
# Case 1: Recursively convert nested structs & register schemas
if is_custom_struct(current_type) && !haskey(schemas, current_name)
# Set the field to be a reference to the custom struct
obj["properties"][field_name] = Dict("\$ref" => getcomponent(current_name))
# Recursively convert nested structs
convertobject!(current_type, schemas)
# Case 2: Convert the individual fields of the current type to it's openapi equivalent
else
current_field = Dict("type" => gettype(current_type), "required" => isrequired(p))
# Add format if it exists
format = getformat(current_type)
if !isnothing(format)
current_field["format"] = format
end
# Add default value if it exists
if p.hasdefault
current_field["default"] = string(p.default)
end
# convert the current field
obj["properties"][field_name] = current_field
end
end
schemas[typename] = obj
return schemas
end
"""
Read in a static file from the /data folder
"""
function readstaticfile(filepath::String)::String
path = joinpath(DATA_PATH, filepath)
return read(path, String)
end
function redochtml(schemapath::String, docspath::String) :: HTTP.Response
redocjs = readstaticfile("$REDOC_VERSION/redoc.standalone.js")
html("""
<!DOCTYPE html>
<html lang="en">
<head>
<title>Docs</title>
<meta charset="utf-8"/>
<meta name="description" content="Docs" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="icon" type="image/x-icon" href="$docspath/metrics/favicon.ico">
</head>
<body>
<redoc spec-url="$schemapath"></redoc>
<script>$redocjs</script>
</body>
</html>
""")
end
"""
Return HTML page to render the autogenerated docs
"""
function swaggerhtml(schemapath::String, docspath::String) :: HTTP.Response
# load static content files
swaggerjs = readstaticfile("$SWAGGER_VERSION/swagger-ui-bundle.js")
swaggerstyles = readstaticfile("$SWAGGER_VERSION/swagger-ui.css")
html("""
<!DOCTYPE html>
<html lang="en">
<head>
<title>Docs</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="description" content="Docs" />
<style>$swaggerstyles</style>
<link rel="icon" type="image/x-icon" href="$docspath/metrics/favicon.ico">
</head>
<body>
<div id="swagger-ui"></div>
<script>$swaggerjs</script>
<script>
window.onload = () => {
window.ui = SwaggerUIBundle({
url: window.location.origin + "$schemapath",
dom_id: '#swagger-ui',
});
};
</script>
</body>
</html>
""")
end
end
| Kitten | https://github.com/JuliaKit/Kitten.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.