licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 183 | using Requires
function __init__()
@require Unitful="1986cc42-f94f-5a68-af5c-568840ba703d" begin
multiplier_type(::Unitful.AbstractQuantity{T}) where {T} = T
end
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 40300 | #
# lgemm.jl -
#
# Lazily Generalized Matrix-Matrix mutiplication.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2020 Éric Thiébaut.
#
"""
# Lazily Generalized Matrix-Matrix mutiplication
```julia
lgemm([α=1,] [transA='N',] A, [transB='N',] B, Nc=2) -> C
```
yields `C = α*op(A)*op(B)` where `op(A)` is `A` if `transA` is `'N'`,
`transpose(A)` if `transA` is `'T'` and `adjoint(A)` if `transA` is `'C'` and
similarly for `op(B)` and `transB`. The expression `op(A)*op(B)` is a
matrix-matrix multiplication but interpreted by grouping consecutive dimensions
of `op(A)` and `op(B)` (see [`lgemv`](@ref) for explanations). Argument `Nc`
specifies the number of dimensions of the result.
The in-place version is called by:
```julia
lgemm!([α=1,] [transA='N',] A, [transB='N',] B, [β=0,] C) -> C
```
and overwrites the contents of `C` with `α*op(A)*op(B) + β*C`. Note that `C`
must not be aliased with `A` nor `B`.
The multipliers `α` and `β` must be both specified or omitted, they can be any
scalar numbers but are respectively converted to `promote_eltype(A,B)` and
`eltype(C)` which may throw an `InexactError` exception.
See also: [`lgemv`](@ref), [`LinearAlgebra.BLAS.gemm`](@ref),
[`LinearAlgebra.BLAS.gemm!`](@ref).
"""
function lgemm(α::Number,
transA::Char,
A::AbstractArray{<:Floats},
transB::Char,
B::AbstractArray{<:Floats},
Nc::Integer=2)
return _lgemm(Implementation(Val(:lgemm), α, transA, A, transB, B, Int(Nc)),
α, transA, A, transB, B, Int(Nc))
end
function lgemm!(α::Number,
transA::Char,
A::AbstractArray{<:Floats},
transB::Char,
B::AbstractArray{<:Floats},
β::Number,
C::AbstractArray{<:Floats})
return _lgemm!(Implementation(Val(:lgemm), α, transA, A, transB, B, β, C),
α, transA, A, transB, B, β, C)
end
@doc @doc(lgemm) lgemm!
# Best implementations for lgemm and lgemm!
for (atyp, eltyp) in ((Real, BlasReal),
(Number, BlasComplex))
@eval begin
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::Matrix{T},
transB::Char,
B::Matrix{T},
Nc::Int) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::Matrix{T},
transB::Char,
B::Matrix{T},
β::$atyp,
C::Matrix{T}) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::AbstractMatrix{T},
transB::Char,
B::AbstractMatrix{T},
Nc::Int) where {T<:$eltyp}
return (is_flat_array(A, B) ? Blas() : Basic())
end
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::AbstractMatrix{T},
transB::Char,
B::AbstractMatrix{T},
β::$atyp,
C::AbstractMatrix{T}) where {T<:$eltyp}
return (is_flat_array(A, B, C) ? Blas() : Basic())
end
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::Array{T},
transB::Char,
B::Array{T},
Nc::Int) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::Array{T},
transB::Char,
B::Array{T},
β::$atyp,
C::Array{T}) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::AbstractArray{T},
transB::Char,
B::AbstractArray{T},
Nc::Int) where {T<:$eltyp}
return (is_flat_array(A, B) ? Blas() : Basic())
end
function Implementation(::Val{:lgemm},
α::$atyp,
transA::Char,
A::AbstractArray{T},
transB::Char,
B::AbstractArray{T},
β::$atyp,
C::AbstractArray{T}) where {T<:$eltyp}
return (is_flat_array(A, B, C) ? Blas() : Basic())
end
end
end
function Implementation(::Val{:lgemm},
α::Number,
transA::Char,
A::AbstractMatrix,
transB::Char,
B::AbstractMatrix,
Nc::Int)
return Basic()
end
function Implementation(::Val{:lgemm},
α::Number,
transA::Char,
A::AbstractMatrix,
transB::Char,
B::AbstractMatrix,
β::Number,
C::AbstractMatrix)
return Basic()
end
function Implementation(::Val{:lgemm},
α::Number,
transA::Char,
A::AbstractArray,
transB::Char,
B::AbstractArray,
Nc::Int)
return (is_flat_array(A, B) ? Linear() : Generic())
end
function Implementation(::Val{:lgemm},
α::Number,
transA::Char,
A::AbstractArray,
transB::Char,
B::AbstractArray,
β::Number,
C::AbstractArray)
return (is_flat_array(A, B, C) ? Linear() : Generic())
end
"""
```julia
_lgemm_ndims(Na, Nb, Nc) -> Ni, Nj, Nk
```
yields the number of consecutive indices involved in meta-indices `i`, `j` and
`k` for `lgemm` and `lgemm!` methods which compute:
```
C[i,j] = α⋅sum_k op(A)[i,k]*op(B)[k,j] + β⋅C[i,j]
```
Here `Na`, `Nb` and `Nc` are the respective number of dimensions of `A`, `B`
and `C`. Calling this method also check the compatibility of the number of
dimensions.
"""
function _lgemm_ndims(Na::Int, Nb::Int, Nc::Int)
#
# The relations are:
#
# Na = Ni + Nk Ni = (Na + Nc - Nb)/2
# Nb = Nk + Nj <==> Nj = (Nc + Nb - Na)/2
# Nc = Ni + Nj Nk = (Nb + Na - Nc)/2
#
# Note: Ni ≥ 1 and Nj ≥ 1 and Nk ≥ 1 implies that Na ≥ 2 and Nb ≥ 2
# and Nc ≥ 2.
#
Li = Na + Nc - Nb
Lj = Nc + Nb - Na
Lk = Nb + Na - Nc
if Li > 0 && iseven(Li) && Lj > 0 && iseven(Lj) && Lk > 0 && iseven(Lk)
return Li >> 1, Lj >> 1, Lk >> 1
end
incompatible_dimensions()
end
# BLAS implementations for (generalized) matrices.
# Linear indexing is assumed (this must have been checked before).
function _lgemm(::Blas,
α::Number,
transA::Char,
A::AbstractArray{T},
transB::Char,
B::AbstractArray{T},
Nc::Int) where {T<:BlasFloat}
m, n, p, shape = _lgemm_dims(transA, A, transB, B, Nc)
return _blas_lgemm!(m, n, p, convert(T, α), transA, A,
transB, B, zero(T), Array{T}(undef, shape))
end
function _lgemm!(::Blas,
α::Number,
transA::Char,
A::AbstractArray{T},
transB::Char,
B::AbstractArray{T},
β::Number,
C::AbstractArray{T}) where {T<:BlasFloat}
m, n, p = _lgemm_dims(transA, A, transB, B, C)
return _blas_lgemm!(m, n, p, convert(T, α), transA, A,
transB, B, convert(T, β), C)
end
# Julia implementations for (generalized) matrices.
# Linear indexing is assumed (this must have been checked before).
function _lgemm(::Linear,
α::Number,
transA::Char,
A::AbstractArray{<:Floats},
transB::Char,
B::AbstractArray{<:Floats},
Nc::Int)
# FIXME: check and improve type stability
m, n, p, shape = _lgemm_dims(transA, A, transB, B, Nc)
T = _lgemm_type(α, A, B)
return _linear_lgemm!(m, n, p,
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(0, T),
Array{T}(undef, shape))
end
function _lgemm!(::Linear,
α::Number,
transA::Char,
A::AbstractArray{<:Floats},
transB::Char,
B::AbstractArray{<:Floats},
β::Number,
C::AbstractArray{<:Floats})
m, n, p = _lgemm_dims(transA, A, transB, B, C)
return _linear_lgemm!(m, n, p,
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(β, C), C)
end
# Julia implementations for any kind of abstract matrices.
function _lgemm(::Basic,
α::Number,
transA::Char,
A::AbstractMatrix{<:Floats},
transB::Char,
B::AbstractMatrix{<:Floats},
Nc::Int)
I, J, K = _lgemm_indices(transA, A, transB, B, Nc)
T = _lgemm_type(α, A, B)
return _generic_lgemm!(I, J, K,
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(0, T),
similar(Array{T}, (I, J)))
end
function _lgemm!(::Basic,
α::Number,
transA::Char,
A::AbstractMatrix{<:Floats},
transB::Char,
B::AbstractMatrix{<:Floats},
β::Number,
C::AbstractMatrix{<:Floats})
I, J, K = _lgemm_indices(transA, A, transB, B, C)
return _generic_lgemm!(I, J, K,
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(β, C), C)
end
# Generic Julia implementation.
function _lgemm(::Generic,
α::Number,
transA::Char,
A::AbstractMatrix{<:Floats},
transB::Char,
B::AbstractMatrix{<:Floats},
Nc::Int)
I, J, K = _lgemm_indices(transA, A, transB, B, Nc)
T = _lgemm_type(α, A, B)
return _generic_lgemm!(I, J, K,
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(0, T),
similar(Array{T}, (I, J)))
end
function _lgemm!(::Generic,
α::Number,
transA::Char,
A::AbstractMatrix{<:Floats},
transB::Char,
B::AbstractMatrix{<:Floats},
β::Number,
C::AbstractMatrix{<:Floats})
I, J, K = _lgemm_indices(transA, A, transB, B, C)
return _generic_lgemm!(I, J, K,
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(β, C), C)
end
function _lgemm(::Generic,
α::Number,
transA::Char,
A::AbstractArray{<:Floats},
transB::Char,
B::AbstractArray{<:Floats},
Nc::Int)
I, J, K = _lgemm_indices(transA, A, transB, B, Nc)
T = _lgemm_type(α, A, B)
return _generic_lgemm!(cartesian_indices(I),
cartesian_indices(J),
cartesian_indices(K),
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(0, T),
similar(Array{T}, (I..., J...)))
end
function _lgemm!(::Generic,
α::Number,
transA::Char,
A::AbstractArray{<:Floats},
transB::Char,
B::AbstractArray{<:Floats},
β::Number,
C::AbstractArray{<:Floats})
I, J, K = _lgemm_indices(transA, A, transB, B, C)
return _generic_lgemm!(cartesian_indices(I),
cartesian_indices(J),
cartesian_indices(K),
promote_multiplier(α, A, B), transA, A,
transB, B,
promote_multiplier(β, C), C)
end
#
# Call low-level BLAS version. The differences with LinearAlgebra.BLAS.gemm!
# are that inputs are assumed to be flat arrays (see is_flat_array) and that
# multipliers are automatically converted.
#
for (f, T) in ((:dgemm_, Float64),
(:sgemm_, Float32),
(:zgemm_, ComplexF64),
(:cgemm_, ComplexF32))
@eval begin
#
# FORTRAN prototype:
#
# SUBROUTINE ${pfx}GEMM(TRANSA,TRANSB,M,N,K,ALPHA,A,LDA,B,LDB,BETA,
# C,LDC)
# ${T} ALPHA,BETA
# INTEGER M,N,K,LDA,LDB,LDC
# CHARACTER TRANSA,TRANSB
# ${T} A(LDA,*),B(LDB,*),C(LDC,*)
#
# Scalar arguments, α and β, can just be `Number` and integer arguments
# can just be `Integer` but we want to keep the signature strict
# because it is a low-level private method.
#
function _blas_lgemm!(m::Int, n::Int, p::Int, α::($T),
transA::Char, A::AbstractArray{$T},
transB::Char, B::AbstractArray{$T},
β::($T), C::AbstractArray{$T})
lda = (transA == 'N' ? m : p)
ldb = (transB == 'N' ? p : n)
ldc = m
ccall((@blasfunc($f), libblas), Cvoid,
(Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt},
Ref{BlasInt}, Ref{$T}, Ptr{$T}, Ref{BlasInt},
Ptr{$T}, Ref{BlasInt}, Ref{$T}, Ptr{$T},
Ref{BlasInt}),
transA, transB, m, n, p, α, A, lda, B, ldb, β, C, ldc)
return C
end
end
end
#
# This method is based on reference BLAS level 2 routine ZGEMM of LAPACK
# (version 3.7.0). It is assumed that arrays A, B and C are flat arrays which
# can be linearly indexed. Arguments are assumed to be correct, no checking is
# done.
#
function _linear_lgemm!(m::Int,
n::Int,
p::Int,
α::Number,
transA::Char,
A::AbstractArray{Ta,Na},
transB::Char,
B::AbstractArray{Tb,Nb},
β::Number,
C::AbstractArray{Tc,Nc}) where {Ta<:Floats,Na,
Tb<:Floats,Nb,
Tc<:Floats,Nc}
#
# Quick return if possible.
#
if m > 0 && n > 0 && (β != 1 || (p > 0 && α != 0))
T = promote_type(Ta, Tb)
if α == 0
#
# Quick computations when α = 0.
#
if β == 0
@inbounds @simd for k in eachindex(C)
C[k] = zero(Tc)
end
elseif β != 1
@inbounds @simd for k in eachindex(C)
C[k] *= β
end
end
elseif transB == 'N'
if transA == 'N'
#
# Form C := α*A*B + β*C.
#
@inbounds for j in 1:n
if β == 0
@simd for i in 1:m
C[m*(j - 1) + i] = zero(Tc)
end
elseif β != 1
@simd for i in 1:m
C[m*(j - 1) + i] *= β
end
end
for k in 1:p
temp = α*B[p*(j - 1) + k]
if temp != zero(temp)
@simd for i in 1:m
C[m*(j - 1) + i] += temp*A[m*(k - 1) + i]
end
end
end
end
elseif Ta <: Real || transA == 'T'
#
# Form C := α*A^T*B + β*C
#
@inbounds for j in 1:n
for i in 1:m
temp = zero(T)
@simd for k in 1:p
temp += A[p*(i - 1) + k]*B[p*(j - 1) + k]
end
C[m*(j - 1) + i] = (β == 0 ? α*temp : α*temp + β*C[m*(j - 1) + i])
end
end
else
#
# Form C := α*A^H*B + β*C.
#
@inbounds for j in 1:n
for i in 1:m
temp = zero(T)
@simd for k in 1:p
temp += conj(A[p*(i - 1) + k])*B[p*(j - 1) + k]
end
C[m*(j - 1) + i] = (β == 0 ? α*temp : α*temp + β*C[m*(j - 1) + i])
end
end
end
elseif transA == 'N'
if Tb <: Real || transB == 'T'
#
# Form C := α*A*B^T + β*C
#
@inbounds for j in 1:n
if β == 0
@simd for i in 1:m
C[m*(j - 1) + i] = zero(Tc)
end
elseif β != one
@simd for i in 1:m
C[m*(j - 1) + i] *= β
end
end
for k in 1:p
temp = α*B[n*(k - 1) + j]
if temp != zero(temp)
@simd for i in 1:m
C[m*(j - 1) + i] += temp*A[m*(k - 1) + i]
end
end
end
end
else
#
# Form C := α*A*B^H + β*C.
#
@inbounds for j in 1:n
if β == 0
@simd for i in 1:m
C[m*(j - 1) + i] = zero(Tc)
end
elseif β != one
@simd for i in 1:m
C[m*(j - 1) + i] *= β
end
end
for k in 1:p
temp = α*conj(B[n*(k - 1) + j])
if temp != zero(temp)
@simd for i in 1:m
C[m*(j - 1) + i] += temp*A[m*(k - 1) + i]
end
end
end
end
end
elseif Ta <: Real || transA == 'T'
if Tb <: Real || transB == 'T'
#
# Form C := α*A^T*B^T + β*C
#
@inbounds for j in 1:n
for i in 1:m
temp = zero(T)
@simd for k in 1:p
temp += A[p*(i - 1) + k]*B[n*(k - 1) + j]
end
C[m*(j - 1) + i] = (β == 0 ? α*temp : α*temp + β*C[m*(j - 1) + i])
end
end
else
#
# Form C := α*A^T*B^H + β*C
#
@inbounds for j in 1:n
for i in 1:m
temp = zero(T)
@simd for k in 1:p
temp += A[p*(i - 1) + k]*conj(B[n*(k - 1) + j])
end
C[m*(j - 1) + i] = (β == 0 ? α*temp : α*temp + β*C[m*(j - 1) + i])
end
end
end
else
if Tb <: Real || transB == 'T'
#
# Form C := α*A^H*B^T + β*C
#
@inbounds for j in 1:n
for i in 1:m
temp = zero(T)
@simd for k in 1:p
temp += conj(A[p*(i - 1) + k])*B[n*(k - 1) + j]
end
C[m*(j - 1) + i] = (β == 0 ? α*temp : α*temp + β*C[m*(j - 1) + i])
end
end
else
#
# Form C := α*A^H*B^H + β*C.
#
@inbounds for j in 1:n
for i in 1:m
temp = zero(T)
@simd for k in 1:p
temp += conj(A[p*(i - 1) + k])*conj(B[n*(k - 1) + j])
end
C[m*(j - 1) + i] = (β == 0 ? α*temp : α*temp + β*C[m*(j - 1) + i])
end
end
end
end
end
return C
end
#
# This method is based on reference BLAS level 2 routine ZGEMM of LAPACK
# (version 3.7.0). Arguments A, B and C are any kind of arrays indexed by the
# Cartesian indices in I, J, and K. Arguments are assumed to be correct, no
# checking is done.
#
function _generic_lgemm!(I, J, K,
α::Number,
transA::Char,
A::AbstractArray{Ta,Na},
transB::Char,
B::AbstractArray{Tb,Nb},
β::Number,
C::AbstractArray{Tc,Nc}) where {Ta<:Floats,Na,
Tb<:Floats,Nb,
Tc<:Floats,Nc}
#
# Quick return if possible.
#
if length(I) > 0 && length(J) > 0 && (β != 1 || (length(K) > 0 && α != 0))
T = promote_type(Ta, Tb)
if α == 0
#
# Quick computations when α = 0.
#
if β == 0
@inbounds @simd for k in eachindex(C)
C[k] = zero(Tc)
end
elseif β != 1
@inbounds @simd for k in eachindex(C)
C[k] *= β
end
end
elseif transB == 'N'
if transA == 'N'
#
# Form C := α*A*B + β*C.
#
@inbounds for j in J
if β == 0
@simd for i in I
C[i,j] = zero(Tc)
end
elseif β != 1
@simd for i in I
C[i,j] *= β
end
end
for k in K
temp = α*B[k,j]
if temp != zero(temp)
@simd for i in I
C[i,j] += temp*A[i,k]
end
end
end
end
elseif Ta <: Real || transA == 'T'
#
# Form C := α*A^T*B + β*C
#
@inbounds for j in J
for i in I
temp = zero(T)
@simd for k in K
temp += A[k,i]*B[k,j]
end
C[i,j] = (β == 0 ? α*temp : α*temp + β*C[i,j])
end
end
else
#
# Form C := α*A^H*B + β*C.
#
@inbounds for j in J
for i in I
temp = zero(T)
@simd for k in K
temp += conj(A[k,i])*B[k,j]
end
C[i,j] = (β == 0 ? α*temp : α*temp + β*C[i,j])
end
end
end
elseif transA == 'N'
if Tb <: Real || transB == 'T'
#
# Form C := α*A*B^T + β*C
#
@inbounds for j in J
if β == 0
@simd for i in I
C[i,j] = zero(Tc)
end
elseif β != one
@simd for i in I
C[i,j] *= β
end
end
for k in K
temp = α*B[j,k]
if temp != zero(temp)
@simd for i in I
C[i,j] += temp*A[i,k]
end
end
end
end
else
#
# Form C := α*A*B^H + β*C.
#
@inbounds for j in J
if β == 0
@simd for i in I
C[i,j] = zero(Tc)
end
elseif β != one
@simd for i in I
C[i,j] *= β
end
end
for k in K
temp = α*conj(B[j,k])
if temp != zero(temp)
@simd for i in I
C[i,j] += temp*A[i,k]
end
end
end
end
end
elseif Ta <: Real || transA == 'T'
if Tb <: Real || transB == 'T'
#
# Form C := α*A^T*B^T + β*C
#
@inbounds for j in J
for i in I
temp = zero(T)
@simd for k in K
temp += A[k,i]*B[j,k]
end
C[i,j] = (β == 0 ? α*temp : α*temp + β*C[i,j])
end
end
else
#
# Form C := α*A^T*B^H + β*C
#
@inbounds for j in J
for i in I
temp = zero(T)
@simd for k in K
temp += A[k,i]*conj(B[j,k])
end
C[i,j] = (β == 0 ? α*temp : α*temp + β*C[i,j])
end
end
end
else
if Tb <: Real || transB == 'T'
#
# Form C := α*A^H*B^T + β*C
#
@inbounds for j in J
for i in I
temp = zero(T)
@simd for k in K
temp += conj(A[k,i])*B[j,k]
end
C[i,j] = (β == 0 ? α*temp : α*temp + β*C[i,j])
end
end
else
#
# Form C := α*A^H*B^H + β*C.
#
@inbounds for j in J
for i in I
temp = zero(T)
@simd for k in K
temp += conj(A[k,i])*conj(B[j,k])
end
C[i,j] = (β == 0 ? α*temp : α*temp + β*C[i,j])
end
end
end
end
end
return C
end
#
# This method yields the type of the elements for the result of lgemm.
#
_lgemm_type(α::Real, A::AbstractArray, B::AbstractArray) =
promote_eltype(A, B)
_lgemm_type(α::Complex, A::AbstractArray, B::AbstractArray) =
complex(promote_eltype(A, B))
#
# This method yields the lengths of the meta-dimensions for lgemm assuming
# linear indexing and check arguments.
#
@inline function _lgemm_dims(transA::Char,
A::AbstractMatrix,
transB::Char,
B::AbstractMatrix,
Nc::Int)
Nc == 2 || incompatible_dimensions()
lda = size(A, 1)
ldb = size(B, 1)
if transA == 'N'
m, p = lda, size(A, 2)
elseif transA == 'T' || transA == 'C'
p, m = lda, size(A, 2)
else
invalid_transpose_character()
end
if transB == 'N'
ldb == p || incompatible_dimensions()
n = size(B, 2)
elseif transB == 'T' || transB == 'C'
size(B, 2) == p || incompatible_dimensions()
n = ldb
else
invalid_transpose_character()
end
return m, n, p, (m, n)
end
#
# Idem for general arrays.
#
function _lgemm_dims(transA::Char,
A::AbstractArray{<:Any,Na},
transB::Char,
B::AbstractArray{<:Any,Nb},
Nc::Int) where {Na,Nb}
nota = (transA == 'N')
notb = (transB == 'N')
if ((!nota && transA != 'T' && transA != 'C') ||
(!notb && transB != 'T' && transB != 'C'))
invalid_transpose_character()
end
Ni, Nj, Nk = _lgemm_ndims(Na, Nb, Nc)
@inbounds begin
p = 1
if nota
if notb
# C[i,j] = sum_k A[i,k]*B[k,j]
for d in 1:Nk
dim = size(A, d + Ni)
size(B, d) == dim || incompatible_dimensions()
p *= dim
end
shape = ntuple(d -> d ≤ Ni ? size(A, d) : size(B, d + (Nk - Ni)), Nc)
else
# C[i,j] = sum_k A[i,k]*B[j,k]
for d in 1:Nk
dim = size(A, d + Ni)
size(B, d + Nj) == dim || incompatible_dimensions()
p *= dim
end
shape = ntuple(d -> d ≤ Ni ? size(A, d) : size(B, d - Ni), Nc)
end
else
if notb
# C[i,j] = sum_k A[k,i]*B[k,j]
for d in 1:Nk
dim = size(A, d)
size(B, d) == dim || incompatible_dimensions()
p *= dim
end
shape = ntuple(d -> d ≤ Ni ? size(A, d + Nk) : size(B, d + (Nk - Ni)), Nc)
else
# C[i,j] = sum_k A[k,i]*B[j,k]
for d in 1:Nk
dim = size(A, d)
size(B, d + Nj) == dim || incompatible_dimensions()
p *= dim
end
shape = ntuple(d -> d ≤ Ni ? size(A, d + Nk) : size(B, d - Ni), Nc)
end
end
m = 1
for d in 1:Ni
m *= shape[d]
end
n = 1
for d in Ni+1:Ni+Nj
n *= shape[d]
end
return m, n, p, shape
end
end
#
# This method yields the lengths of the meta-dimensions for lgemm! assuming
# linear indexing and check arguments.
#
@inline function _lgemm_dims(transA::Char,
A::AbstractMatrix,
transB::Char,
B::AbstractMatrix,
C::AbstractMatrix)
m, n = size(C, 1), size(C, 2)
adim1, adim2 = size(A, 1), size(A, 2)
bdim1, bdim2 = size(B, 1), size(B, 2)
if transA == 'N'
adim1 == m || incompatible_dimensions()
p = adim2
elseif transA == 'T' || transA == 'C'
adim2 == m || incompatible_dimensions()
p = adim1
else
invalid_transpose_character()
end
if transB == 'N'
(bdim1 == p && bdim2 == n) || incompatible_dimensions()
elseif transB == 'T' || transB == 'C'
(bdim1 == n && bdim2 == p) || incompatible_dimensions()
else
invalid_transpose_character()
end
return m, n, p
end
#
# Idem for general arrays.
#
function _lgemm_dims(transA::Char,
A::AbstractArray{<:Any,Na},
transB::Char,
B::AbstractArray{<:Any,Nb},
C::AbstractArray{<:Any,Nc}) where {Na,Nb,Nc}
nota = (transA == 'N')
notb = (transB == 'N')
if ((!nota && transA != 'T' && transA != 'C') ||
(!notb && transB != 'T' && transB != 'C'))
invalid_transpose_character()
end
Ni, Nj, Nk = _lgemm_ndims(Na, Nb, Nc)
@inbounds begin
m = p = 1
if nota
for d in 1:Ni
dim = size(C, d)
size(A, d) == dim || incompatible_dimensions()
m *= dim
end
if notb
# C[i,j] = sum_k A[i,k]*B[k,j]
for d in 1:Nk
dim = size(A, d + Ni)
size(B, d) == dim || incompatible_dimensions()
p *= dim
end
else
# C[i,j] = sum_k A[i,k]*B[j,k]
for d in 1:Nk
dim = size(A, d + Ni)
size(B, d + Nj) == dim || incompatible_dimensions()
p *= dim
end
end
else
for d in 1:Ni
dim = size(C, d)
size(A, d + Nk) == dim || incompatible_dimensions()
m *= dim
end
if notb
# C[i,j] = sum_k A[k,i]*B[k,j]
for d in 1:Nk
dim = size(A, d)
size(B, d) == dim || incompatible_dimensions()
p *= dim
end
else
# C[i,j] = sum_k A[k,i]*B[j,k]
for d in 1:Nk
dim = size(A, d)
size(B, d + Nj) == dim || incompatible_dimensions()
p *= dim
end
end
end
n = 1
if notb
for d in 1:Nj
dim = size(C, d + Ni)
size(B, d + Nk) == dim || incompatible_dimensions()
n *= dim
end
else
for d in 1:Nj
dim = size(C, d + Ni)
size(B, d) == dim || incompatible_dimensions()
n *= dim
end
end
return m, n, p
end
end
#
# This method yields the indices of the meta-dimensions for lgemm and check
# arguments.
#
@inline function _lgemm_indices(transA::Char,
A::AbstractMatrix,
transB::Char,
B::AbstractMatrix,
Nc::Int)
Nc == 2 || incompatible_dimensions()
if transA == 'N'
I, K = axes(A, 1), axes(A, 2)
elseif transA == 'T' || transA == 'C'
K, I = axes(A, 1), axes(A, 2)
else
invalid_transpose_character()
end
if transB == 'N'
axes(B, 1) == K || incompatible_dimensions()
J = axes(B, 2)
elseif transB == 'T' || transB == 'C'
axes(B, 2) == K || incompatible_dimensions()
J = axes(B, 1)
else
invalid_transpose_character()
end
return I, J, K
end
#
# Idem for general arrays.
#
function _lgemm_indices(transA::Char,
A::AbstractArray{<:Any,Na},
transB::Char,
B::AbstractArray{<:Any,Nb},
Nc::Int) where {Na,Nb}
nota = (transA == 'N')
notb = (transB == 'N')
if ((!nota && transA != 'T' && transA != 'C') ||
(!notb && transB != 'T' && transB != 'C'))
invalid_transpose_character()
end
Ni, Nj, Nk = _lgemm_ndims(Na, Nb, Nc)
@inbounds begin
if nota
I = ntuple(d -> axes(A, d), Ni)
K = ntuple(d -> axes(A, d + Ni), Nk)
else
I = ntuple(d -> axes(A, d + Nk), Ni)
K = ntuple(d -> axes(A, d), Nk)
end
if notb
for d in 1:Nk
axes(B, d) == K[d] || incompatible_dimensions()
end
J = ntuple(d -> axes(B, d + Nk), Nj)
else
for d in 1:Nk
axes(B, d + Nj) == K[d] || incompatible_dimensions()
end
J = ntuple(d -> axes(B, d), Nj)
end
return I, J, K
end
end
#
# This method yields the indices of the meta-dimensions for lgemm! and check
# arguments.
#
@inline function _lgemm_indices(transA::Char,
A::AbstractMatrix,
transB::Char,
B::AbstractMatrix,
C::AbstractMatrix)
I, J = axes(C, 1), axes(C, 2)
if transA == 'N'
axes(A, 1) == I || incompatible_dimensions()
K = axes(A, 2)
elseif transA == 'T' || transA == 'C'
axes(A, 2) == I || incompatible_dimensions()
K = axes(A, 1)
else
invalid_transpose_character()
end
if transB == 'N'
(axes(B, 1) == K && axes(B, 2) == J) || incompatible_dimensions()
elseif transB == 'T' || transB == 'C'
(axes(B, 1) == J && axes(B, 2) == K) || incompatible_dimensions()
else
invalid_transpose_character()
end
return I, J, K
end
#
# Idem for general arrays.
#
function _lgemm_indices(transA::Char,
A::AbstractArray{<:Any,Na},
transB::Char,
B::AbstractArray{<:Any,Nb},
C::AbstractArray{<:Any,Nc}) where {Na,Nb,Nc}
nota = (transA == 'N')
notb = (transB == 'N')
if ((!nota && transA != 'T' && transA != 'C') ||
(!notb && transB != 'T' && transB != 'C'))
invalid_transpose_character()
end
Ni, Nj, Nk = _lgemm_ndims(Na, Nb, Nc)
@inbounds begin
I = ntuple(d -> axes(C, d), Ni)
J = ntuple(d -> axes(C, d + Ni), Nj)
if nota
for d in 1:Ni
axes(A, d) == I[d] || incompatible_dimensions()
end
K = ntuple(d -> axes(A, d + Ni), Nk)
else
for d in 1:Ni
axes(A, d + Nk) == I[d] || incompatible_dimensions()
end
K = ntuple(d -> axes(A, d), Nk)
end
if notb
for d in 1:Nk
axes(B, d) == K[d] || incompatible_dimensions()
end
for d in 1:Nj
axes(B, d + Nk) == J[d] || incompatible_dimensions()
end
else
for d in 1:Nk
axes(B, d + Nj) == K[d] || incompatible_dimensions()
end
for d in 1:Nj
axes(B, d) == J[d] || incompatible_dimensions()
end
end
return I, J, K
end
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 24799 | #
# lgemv.jl -
#
# Lazily Generalized Matrix-Vector mutiplication.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2020 Éric Thiébaut.
#
"""
# Lazily Generalized Matrix-Vector mutiplication
```julia
lgemv([α=1,] [tr='N',] A, x) -> y
```
yields `y = α*op(A)*x` where `op(A)` is `A` if `tr` is `'N'`, `transpose(A)` if
`tr` is `'T'` and `adjoint(A)` if `tr` is `'C'`. The expression `op(A)*x` is a
matrix-vector multiplication but interpreted by grouping consecutive dimensions
of `A` and `x` as follows:
- if `tr` is `'N'`, the trailing dimensions of `x` must match those of `A` and
the leading dimensions of `A` are those of the result `y`;
- if `tr` is `'T'` or `'C'`, the leading dimensions of `x` must match those of
`A` and the trailing dimensions of `A` are those of the result `y`.
The in-place version is called by:
```julia
lgemv!([α=1,] [tr='N',] A, x, [β=0,] y) -> y
```
and overwrites the contents of `y` with `α*op(A)*x + β*y`. Note that `x` and
`y` must not be aliased.
The multipliers `α` and `β` must be both specified or omitted, they can be any
scalar numbers but are respectively converted to `promote_eltype(A,x)` and
`eltype(y)` which may throw an `InexactError` exception.
See also: [`lgemm`](@ref), [`LinearAlgebra.BLAS.gemv`](@ref),
[`LinearAlgebra.BLAS.gemv!`](@ref).
"""
function lgemv(α::Number,
trans::Char,
A::AbstractArray{<:Floats},
x::AbstractArray{<:Floats})
return _lgemv(Implementation(Val(:lgemv), α, trans, A, x),
α, trans, A, x)
end
function lgemv!(α::Number,
trans::Char,
A::AbstractArray{<:Floats},
x::AbstractArray{<:Floats},
β::Number,
y::AbstractArray{<:Floats})
return _lgemv!(Implementation(Val(:lgemv), α, trans, A, x, β, y),
α, trans, A, x, β, y)
end
@doc @doc(lgemv) lgemv!
lgemv(A::AbstractArray, x::AbstractArray) = lgemv(1, 'N', A, x)
lgemv!(A::AbstractArray, x::AbstractArray, y::AbstractArray) =
lgemv!(1, 'N', A, x, 0, y)
lgemv(trans::Char, A::AbstractArray, x::AbstractArray) =
lgemv(1, trans, A, x)
lgemv!(trans::Char, A::AbstractArray, x::AbstractArray,
y::AbstractArray) = lgemv!(1, trans, A, x, 0, y)
lgemv(α::Number, A::AbstractArray, x::AbstractArray) = lgemv(α, 'N', A, x)
lgemv!(α::Number, A::AbstractArray, x::AbstractArray, β::Number,
y::AbstractArray) = lgemv!(α, 'N', A, x, β, y)
@static if isdefined(LinearAlgebra, :Transpose)
lgemv(A::LinearAlgebra.Transpose, x::AbstractArray) =
lgemv(1, 'T', A.parent, x)
lgemv!(A::LinearAlgebra.Transpose, x::AbstractArray, y::AbstractArray) =
lgemv!(1, 'T', A.parent, x, 0, y)
lgemv(α::Number, A::LinearAlgebra.Transpose, x::AbstractArray) =
lgemv(α, 'T', A.parent, x)
lgemv!(α::Number, A::LinearAlgebra.Transpose, x::AbstractArray,
β::Number, y::AbstractArray) = lgemv!(α, 'T', A.parent, x, β, y)
end
@static if isdefined(LinearAlgebra, :Adjoint)
lgemv(A::LinearAlgebra.Adjoint, x::AbstractArray) =
lgemv(1, 'C', A.parent, x)
lgemv!(A::LinearAlgebra.Adjoint, x::AbstractArray, y::AbstractArray) =
lgemv!(1, 'C', A.parent, x, 0, y)
lgemv(α::Number, A::LinearAlgebra.Adjoint, x::AbstractArray) =
lgemv(α, 'C', A.parent, x)
lgemv!(α::Number, A::LinearAlgebra.Adjoint, x::AbstractArray, β::Number,
y::AbstractArray) = lgemv!(α, 'C', A.parent, x, β, y)
end
# Best implementations for lgemv and lgemv!
for (atyp, eltyp) in ((Real, BlasReal),
(Number, BlasComplex))
@eval begin
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::Matrix{T},
x::Vector{T}) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::Matrix{T},
x::Vector{T},
β::$atyp,
y::Vector{T}) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::AbstractMatrix{T},
x::AbstractVector{T}) where {T<:$eltyp}
return (is_flat_array(A, x) ? Blas() : Basic())
end
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::AbstractMatrix{T},
x::AbstractVector{T},
β::$atyp,
y::AbstractVector{T}) where {T<:$eltyp}
return (is_flat_array(A, x, y) ? Blas() : Basic())
end
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::Array{T},
x::Array{T}) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::Array{T},
x::Array{T},
β::$atyp,
y::Array{T}) where {T<:$eltyp}
return Blas()
end
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::AbstractArray{T},
x::AbstractArray{T}) where {T<:$eltyp}
return (is_flat_array(A, x) ? Blas() : Basic())
end
function Implementation(::Val{:lgemv},
α::$atyp,
trans::Char,
A::AbstractArray{T},
x::AbstractArray{T},
β::$atyp,
y::AbstractArray{T}) where {T<:$eltyp}
return (is_flat_array(A, x, y) ? Blas() : Basic())
end
end
end
function Implementation(::Val{:lgemv},
α::Number,
trans::Char,
A::AbstractMatrix,
x::AbstractVector)
return Basic()
end
function Implementation(::Val{:lgemv},
α::Number,
trans::Char,
A::AbstractMatrix,
x::AbstractVector,
β::Number,
y::AbstractVector)
return Basic()
end
function Implementation(::Val{:lgemv},
α::Number,
trans::Char,
A::AbstractArray,
x::AbstractArray)
return (is_flat_array(A, x) ? Linear() : Generic())
end
function Implementation(::Val{:lgemv},
α::Number,
trans::Char,
A::AbstractArray,
x::AbstractArray,
β::Number,
y::AbstractArray)
return (is_flat_array(A, x, y) ? Linear() : Generic())
end
# BLAS implementations for (generalized) matrices and vectors.
# Linear indexing is assumed (this must have been checked before).
function _lgemv(::Blas,
α::Number,
trans::Char,
A::AbstractArray{T},
x::AbstractArray{T}) where {T<:BlasFloat}
nrows, ncols, shape = _lgemv_dims(trans, A, x)
return _blas_lgemv!(nrows, ncols, convert(T, α), trans, A, x,
zero(T), Array{T}(undef, shape))
end
function _lgemv!(::Blas,
α::Number,
trans::Char,
A::AbstractArray{T},
x::AbstractArray{T},
β::Number,
y::AbstractArray{T}) where {T<:BlasFloat}
nrows, ncols = _lgemv_dims(trans, A, x, y)
return _blas_lgemv!(nrows, ncols, convert(T, α), trans, A, x,
convert(T, β), y)
end
# Julia implementations for (generalized) matrices and vectors.
# Linear indexing is assumed (this must have been checked before).
function _lgemv(::Linear,
α::Number,
trans::Char,
A::AbstractArray{<:Floats},
x::AbstractArray{<:Floats})
nrows, ncols, shape = _lgemv_dims(trans, A, x)
T = _lgemv_type(α, A, x)
return _linear_lgemv!(nrows, ncols,
promote_multiplier(α, A, x), trans, A, x,
promote_multiplier(0, T), Array{T}(undef, shape))
end
function _lgemv!(::Linear,
α::Number,
trans::Char,
A::AbstractArray{<:Floats},
x::AbstractArray{<:Floats},
β::Number,
y::AbstractArray{<:Floats})
nrows, ncols = _lgemv_dims(trans, A, x, y)
return _linear_lgemv!(nrows, ncols,
promote_multiplier(α, A, x), trans, A, x,
promote_multiplier(β, y), y)
end
# Basic Julia implementations when vectors and matrices are, respectively, 1D
# and 2D arrays.
function _lgemv(::Basic,
α::Number,
trans::Char,
A::AbstractMatrix{<:Floats},
x::AbstractVector{<:Floats})
rows, cols = _lgemv_indices(trans, A, x)
T = _lgemv_type(α, A, x)
return _generic_lgemv!(rows, cols,
promote_multiplier(α, A, x), trans, A, x,
promote_multiplier(0, T),
similar(Array{T}, trans == 'N' ? rows : cols))
end
function _lgemv!(::Basic,
α::Number,
trans::Char,
A::AbstractMatrix{<:Floats},
x::AbstractVector{<:Floats},
β::Number,
y::AbstractVector{<:Floats})
rows, cols = _lgemv_indices(trans, A, x, y)
return _generic_lgemv!(rows, cols,
promote_multiplier(α, A, x), trans, A, x,
promote_multiplier(β, y), y)
end
# Generic implementations for any other cases.
function _lgemv(::Generic,
α::Number,
trans::Char,
A::AbstractArray{<:Floats},
x::AbstractArray{<:Floats})
rows, cols = _lgemv_indices(trans, A, x)
T = _lgemv_type(α, A, x)
return _generic_lgemv!(cartesian_indices(rows), cartesian_indices(cols),
promote_multiplier(α, A, x), trans, A, x,
promote_multiplier(0, T),
similar(Array{T}, trans == 'N' ? rows : cols))
end
function _lgemv!(::Generic,
α::Number,
trans::Char,
A::AbstractArray{<:Floats},
x::AbstractArray{<:Floats},
β::Number,
y::AbstractArray{<:Floats})
rows, cols = _lgemv_indices(trans, A, x, y)
return _generic_lgemv!(cartesian_indices(rows), cartesian_indices(cols),
promote_multiplier(α, A, x), trans, A, x,
promote_multiplier(β, y), y)
end
#
# Call low-level BLAS version. The differences with LinearAlgebra.BLAS.gemv!
# are that inputs are assumed to be flat arrays (see is_flat_array) and that
# multipliers are automatically converted.
#
for (f, T) in ((:dgemv_, Float64),
(:sgemv_, Float32),
(:zgemv_, ComplexF64),
(:cgemv_, ComplexF32))
@eval begin
#
# FORTRAN prototype:
# SUBROUTINE ${pfx}GEMV(TRANS,M,N,ALPHA,A,LDA,X,INCX,BETA,Y,INCY)
# ${T} ALPHA,BETA
# INTEGER M,N,LDA,INCX,INCY
# CHARACTER TRANS
# ${T} A(LDA,*),X(*),Y(*)
#
# Scalar arguments, α and β, can just be `Number` and integer arguments
# can just be `Integer` but we want to keep the signature strict
# because it is a low-level private method.
#
function _blas_lgemv!(nrows::Int,
ncols::Int,
α::($T),
trans::Char,
A::AbstractArray{$T},
x::AbstractArray{$T},
β::($T),
y::AbstractArray{$T})
#@static if DEBUG
# (length(x) == (trans == 'N' ? ncols : nrows) &&
# length(y) == (trans == 'N' ? nrows : ncols) &&
# length(A) == nrows*ncols) ||
# bad_size("incompatible sizes")
#end
ccall((@blasfunc($f), libblas), Cvoid,
(Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{$T},
Ptr{$T}, Ref{BlasInt}, Ptr{$T}, Ref{BlasInt},
Ref{$T}, Ptr{$T}, Ref{BlasInt}),
trans, nrows, ncols, α, A, nrows, x, 1, β, y, 1)
return y
end
end
end
#
# Reference (non-BLAS) version for *flat* arrays which can be linearly indexed.
# Loops are ordered assuming the coefficients of A have column-major storage
# order.
#
function _linear_lgemv!(nrows::Int, ncols::Int,
α::Floats,
trans::Char,
A::AbstractArray{Ta},
x::AbstractArray{Tx},
β::Floats,
y::AbstractArray{Ty}) where {Ta<:Floats,
Tx<:Floats,
Ty<:Floats}
if α == 0 || trans == 'N'
# Form: y := β⋅y
if β == 0
@inbounds @simd for k in eachindex(y)
y[k] = zero(Ty)
end
elseif β != 1
@inbounds @simd for k in eachindex(y)
y[k] *= β
end
end
end
if α != 0
if trans == 'N'
@inbounds for j in 1:ncols
temp = α*x[j]
if temp != zero(temp)
off = (j - 1)*nrows
@simd for i in 1:nrows
y[i] += A[off + i]*temp
end
end
end
else
T = promote_type(Ta, Tx)
if Ta <: Reals || trans == 'T'
@inbounds for j in 1:ncols
off = (j - 1)*nrows
temp = zero(T)
@simd for i in 1:nrows
temp += A[off + i]*x[i]
end
y[j] = (β == 0 ? α*temp : α*temp + β*y[j])
end
else
@inbounds for j in 1:ncols
off = (j - 1)*nrows
temp = zero(T)
@simd for i in 1:nrows
temp += conj(A[off + i])*x[i]
end
y[j] = (β == 0 ? α*temp : α*temp + β*y[j])
end
end
end
end
return y
end
#
# At the lowest level, the same code can serve for the very general case
# (multi-dimensional Cartesian indices) and the basic case (A is a 2D array
# while x and y are both vectors).
#
# The elements of A are accessed sequentially with one pass through A assuming
# they are stored in colum-major order.
#
function _generic_lgemv!(I, J,
α::Floats,
trans::Char,
A::AbstractArray{Ta},
x::AbstractArray{Tx},
β::Floats,
y::AbstractArray{Ty}) where {Ta<:Floats,
Tx<:Floats,
Ty<:Floats}
if α == 0 || trans == 'N'
# Form: y := β⋅y
if β == 0
@inbounds @simd for k in eachindex(y)
y[k] = zero(Ty)
end
elseif β != 1
@inbounds @simd for k in eachindex(y)
y[k] *= β
end
end
end
if α != 0
if trans == 'N'
#
# Form y := α*A*x + y.
#
@inbounds for j in J
temp = α*x[j]
if temp != zero(temp)
@simd for i in I
y[i] += A[i,j]*temp
end
end
end
else
T = promote_type(Ta,Tx)
if Ta <: Real || trans == 'T'
#
# Form y := α*A^T*x + y
#
@inbounds for j in J
temp = zero(T)
@simd for i in I
temp += A[i,j]*x[i]
end
y[j] = (β == 0 ? α*temp : α*temp + β*y[j])
end
else
#
# Form y := α*A^H*x + y.
#
@inbounds for j in J
temp = zero(T)
@simd for i in I
temp += conj(A[i,j])*x[i]
end
y[j] = (β == 0 ? α*temp : α*temp + β*y[j])
end
end
end
end
return y
end
#
# This method yields the type of the elements for the result of lgemv.
#
_lgemv_type(α::Real, A::AbstractArray, x::AbstractArray) =
promote_eltype(A, x)
_lgemv_type(α::Complex, A::AbstractArray, x::AbstractArray) =
complex(promote_eltype(A, x))
#
# This method yields the number of rows and columns for lgemv assuming linear
# indexing and check arguments.
#
@inline function _lgemv_dims(trans::Char,
A::AbstractMatrix,
x::AbstractVector)
nrows, ncols = size(A, 1), size(A, 2)
if trans == 'N'
length(x) == ncols || incompatible_dimensions()
elseif trans == 'T' || trans == 'C'
length(x) == nrows || incompatible_dimensions()
else
invalid_transpose_character()
end
return nrows, ncols, (trans == 'N' ? nrows : ncols)
end
#
# Idem for general "flat" arrays.
#
function _lgemv_dims(trans::Char,
A::AbstractArray{<:Any,Na},
x::AbstractArray{<:Any,Nx}) where {Na,Nx}
1 ≤ Nx < Na || incompatible_dimensions()
@inbounds begin
Ny = Na - Nx
if trans == 'N'
ncols = 1
for d in 1:Nx
dim = size(x, d)
size(A, Ny + d) == dim || incompatible_dimensions()
ncols *= dim
end
shape = ntuple(d -> size(A, d), Ny)
nrows = prod(shape)
elseif trans == 'T' || trans == 'C'
nrows = 1
for d in 1:Nx
dim = size(x, d)
size(A, d) == dim || incompatible_dimensions()
nrows *= dim
end
shape = ntuple(d -> size(A, Nx + d), Ny)
ncols = prod(shape)
else
invalid_transpose_character()
end
return nrows, ncols, shape
end
end
#
# This method yields the number of rows and columns for lgemv! assuming linear
# indexing and check arguments.
#
@inline function _lgemv_dims(trans::Char,
A::AbstractMatrix,
x::AbstractVector,
y::AbstractVector)
nrows, ncols = size(A)
if trans == 'N'
(length(x) == ncols && length(y) == nrows) || incompatible_dimensions()
elseif trans == 'T' || trans == 'C'
(length(x) == nrows && length(y) == ncols) || incompatible_dimensions()
else
invalid_transpose_character()
end
return nrows, ncols
end
#
# Idem for general arrays.
#
function _lgemv_dims(trans::Char,
A::AbstractArray{<:Any,Na},
x::AbstractArray{<:Any,Nx},
y::AbstractArray{<:Any,Ny}) where {Na,Nx,Ny}
(Na == Nx + Ny && Nx ≥ 1 && Ny ≥ 1) || incompatible_dimensions()
nrows = ncols = 1
@inbounds begin
if trans == 'N'
for d in 1:Ny
dim = size(y, d)
size(A, d) == dim || incompatible_dimensions()
nrows *= dim
end
for d in 1:Nx
dim = size(x, d)
size(A, d + Ny) == dim || incompatible_dimensions()
ncols *= dim
end
elseif trans == 'T' || trans == 'C'
for d in 1:Nx
dim = size(x, d)
size(A, d) == dim || incompatible_dimensions()
nrows *= dim
end
for d in 1:Ny
dim = size(y, d)
size(A, d + Nx) == dim || incompatible_dimensions()
ncols *= dim
end
else
invalid_transpose_character()
end
end
return (nrows, ncols)
end
#
# Build tuples rows and cols of index intervals to access A[i,j] in lgemv and
# check arguments.
#
@inline function _lgemv_indices(trans::Char,
A::AbstractMatrix,
x::AbstractVector)
rows, cols = axes(A)
if trans == 'N'
axes(x) == (cols,) || incompatible_dimensions()
elseif trans == 'T' || trans == 'C'
axes(x) == (rows,) || incompatible_dimensions()
else
invalid_transpose_character()
end
return rows, cols
end
#
# Idem for general arrays.
#
@inline function _lgemv_indices(trans::Char,
A::AbstractArray{<:Any,Na},
x::AbstractArray{<:Any,Nx}) where {Na,Nx}
1 ≤ Nx < Na || incompatible_dimensions()
@inbounds begin
Ny = Na - Nx
if trans == 'N'
rows = ntuple(d -> axes(A, d), Ny)
cols = ntuple(d -> axes(A, d + Ny), Nx)
for d in 1:Nx
axes(x, d) == cols[d] || incompatible_dimensions()
end
elseif trans == 'T' || trans == 'C'
rows = ntuple(d -> axes(A, d), Nx)
cols = ntuple(d -> axes(A, d + Nx), Ny)
for d in 1:Nx
axes(x, d) == rows[d] || incompatible_dimensions()
end
else
invalid_transpose_character()
end
return rows, cols
end
end
#
# Build tuples rows and cols of index intervals to access A[i,j] in lgemv! and
# check arguments.
#
@inline function _lgemv_indices(trans::Char,
A::AbstractMatrix,
x::AbstractVector,
y::AbstractVector)
rows, cols = axes(A)
if trans == 'N'
(axes(x) == (cols,) && axes(y) == (rows,)) || incompatible_dimensions()
elseif trans == 'T' || trans == 'C'
(axes(x) == (rows,) && axes(y) == (cols,)) || incompatible_dimensions()
else
invalid_transpose_character()
end
return rows, cols
end
#
# Idem for general arrays.
#
@inline function _lgemv_indices(trans::Char,
A::AbstractArray{<:Any,Na},
x::AbstractArray{<:Any,Nx},
y::AbstractArray{<:Any,Ny}) where {Na,Nx,Ny}
(Na == Nx + Ny && Nx ≥ 1 && Ny ≥ 1) || incompatible_dimensions()
@inbounds begin
if trans == 'N'
rows = ntuple(d -> axes(A, d), Ny)
cols = ntuple(d -> axes(A, d + Ny), Nx)
for d in 1:Nx
axes(x, d) == cols[d] || incompatible_dimensions()
end
for d in 1:Ny
axes(y, d) == rows[d] || incompatible_dimensions()
end
elseif trans == 'T' || trans == 'C'
rows = ntuple(d -> axes(A, d), Nx)
cols = ntuple(d -> axes(A, d + Nx), Ny)
for d in 1:Nx
axes(x, d) == rows[d] || incompatible_dimensions()
end
for d in 1:Ny
axes(y, d) == cols[d] || incompatible_dimensions()
end
else
invalid_transpose_character()
end
return rows, cols
end
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 16042 | #
# mappings.jl -
#
# Provide basic mappings.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2020 Éric Thiébaut.
#
#------------------------------------------------------------------------------
# IDENTITY AND UNIFORM SCALING
identical(::Identity, ::Identity) = true
@callable Identity
# Traits:
SelfAdjointType(::Identity) = SelfAdjoint()
MorphismType(::Identity) = Endomorphism()
DiagonalType(::Identity) = DiagonalMapping()
apply(::Type{<:Operations}, ::Identity, x, scratch::Bool=false) = x
# vcreate for identity always return x (see doc. of vcreate).
vcreate(::Type{<:Operations}, ::Identity, x, scratch::Bool) = x
apply!(α::Number, ::Type{<:Operations}, ::Identity, x, ::Bool, β::Number, y) =
vcombine!(y, α, x, β, y)
# Rules to automatically convert UniformScaling from standard library module
# LinearAlgebra into λ*Id. For other operators, there is no needs to extend ⋅
# (\cdot) and ∘ (\circ) as they are already converted in calls to *. But in
# the case of UniformScaling, we must explicitly do that for * and for ∘ (not
# for ⋅ which is replaced by a * by existing rules).
for op in (:(+), :(-), :(*), :(∘), :(/), Symbol("\\"))
@eval begin
Base.$op(A::UniformScaling, B::Mapping) = $op(Mapping(A), B)
Base.$op(A::Mapping, B::UniformScaling) = $op(A, Mapping(B))
end
end
#------------------------------------------------------------------------------
# SYMBOLIC MAPPINGS (FOR TESTS)
struct SymbolicMapping{T} <: Mapping end
struct SymbolicLinearMapping{T} <: LinearMapping end
SymbolicMapping(id::AbstractString) = SymbolicMapping(Symbol(id))
SymbolicMapping(id::Symbol) = SymbolicMapping{Val{id}}()
SymbolicLinearMapping(id::AbstractString) = SymbolicLinearMapping(Symbol(id))
SymbolicLinearMapping(x::Symbol) = SymbolicLinearMapping{Val{x}}()
show(io::IO, A::SymbolicMapping{Val{T}}) where {T} = print(io, T)
show(io::IO, A::SymbolicLinearMapping{Val{T}}) where {T} = print(io, T)
identical(::T, ::T) where {T<:SymbolicMapping} = true
identical(::T, ::T) where {T<:SymbolicLinearMapping} = true
#------------------------------------------------------------------------------
# NON-UNIFORM SCALING
"""
Diag(A) -> NonuniformScaling(A)
yields a non-uniform scaling linear mapping (of type `NonuniformScaling`) whose
effect is to apply elementwise multiplication of its argument by the scaling
factors `A`. This mapping can be thought as a *diagonal* operator.
The `diag` method in `LinearAlgebra` can be called to retrieve the scaling
factors:
using LinearAlgebra
W = Diag(A)
diag(W) === A # this is true
!!! note
Beware of the differences between the [`Diag`](@ref) (with an uppercase
'D') and [`diag`](@ref) (with an lowercase 'd') methods.
"""
struct NonuniformScaling{T} <: LinearMapping
diag::T
end
const Diag{T} = NonuniformScaling{T}
@callable NonuniformScaling
# Traits:
MorphismType(::NonuniformScaling) = Endomorphism()
DiagonalType(::NonuniformScaling) = DiagonalMapping()
SelfAdjointType(A::NonuniformScaling) =
_selfadjointtype(eltype(coefficients(A)), A)
_selfadjointtype(::Type{<:Real}, ::NonuniformScaling) =
SelfAdjoint()
_selfadjointtype(::Type{<:Complex}, ::NonuniformScaling) =
NonSelfAdjoint()
coefficients(A::NonuniformScaling) = A.diag
LinearAlgebra.diag(A::NonuniformScaling) = coefficients(A)
identical(A::T, B::T) where {T<:NonuniformScaling} =
coefficients(A) === coefficients(B)
function inv(A::NonuniformScaling{<:AbstractArray{T,N}}
) where {T<:AbstractFloat, N}
q = coefficients(A)
r = similar(q)
@inbounds @simd for i in eachindex(q, r)
r[i] = one(T)/q[i]
end
return NonuniformScaling(r)
end
eltype(::Type{<:NonuniformScaling{<:AbstractArray{T,N}}}) where {T, N} = T
input_ndims(::NonuniformScaling{<:AbstractArray{T,N}}) where {T, N} = N
input_size(A::NonuniformScaling{<:AbstractArray}) = size(coefficients(A))
input_size(A::NonuniformScaling{<:AbstractArray}, i) =
size(coefficients(A), i)
output_ndims(::NonuniformScaling{<:AbstractArray{T,N}}) where {T, N} = N
output_size(A::NonuniformScaling{<:AbstractArray}) = size(coefficients(A))
output_size(A::NonuniformScaling{<:AbstractArray}, i) =
size(coefficients(A), i)
# Simplify left multiplication (and division) by a scalar.
# FIXME: α = 0 should be treated specifically
*(α::Number, A::NonuniformScaling)::NonuniformScaling =
(α == 1 ? A : NonuniformScaling(vscale(α, coefficients(A))))
# Extend composition of diagonal operators.
*(A::NonuniformScaling, B::NonuniformScaling) =
NonuniformScaling(vproduct(coefficients(A), coefficients(B)))
function apply!(α::Number,
::Type{P},
W::NonuniformScaling{<:AbstractArray{Tw,N}},
x::AbstractArray{Tx,N},
scratch::Bool,
β::Number,
y::AbstractArray{Ty,N}) where {P<:Operations,
Tw<:Floats,
Tx<:Floats,
Ty<:Floats,N}
w = coefficients(W)
I = all_indices(w, x, y)
if α == 0
vscale!(y, β)
elseif β == 0
if α == 1
_apply_diagonal!(P, axpby_yields_x, I, 1, w, x, 0, y)
else
a = promote_multiplier(α, Tw, Tx)
_apply_diagonal!(P, axpby_yields_ax, I, a, w, x, 0, y)
end
elseif β == 1
if α == 1
_apply_diagonal!(P, axpby_yields_xpy, I, 1, w, x, 1, y)
else
a = promote_multiplier(α, Tw, Tx)
_apply_diagonal!(P, axpby_yields_axpy, I, a, w, x, 1, y)
end
else
b = promote_multiplier(β, Ty)
if α == 1
_apply_diagonal!(P, axpby_yields_xpby, I, 1, w, x, b, y)
else
a = promote_multiplier(α, Tw, Tx)
_apply_diagonal!(P, axpby_yields_axpby, I, a, w, x, b, y)
end
end
return y
end
function _apply_diagonal!(::Type{Direct}, axpby::Function, I,
α, w, x, β, y)
@inbounds @simd for i in I
y[i] = axpby(α, w[i]*x[i], β, y[i])
end
end
function _apply_diagonal!(::Type{Adjoint}, axpby::Function, I,
α, w, x, β, y)
@inbounds @simd for i in I
y[i] = axpby(α, conj(w[i])*x[i], β, y[i])
end
end
function _apply_diagonal!(::Type{Inverse}, axpby::Function, I,
α, w, x, β, y)
@inbounds @simd for i in I
y[i] = axpby(α, x[i]/w[i], β, y[i])
end
end
function _apply_diagonal!(::Type{InverseAdjoint}, axpby::Function, I,
α, w, x, β, y)
@inbounds @simd for i in I
y[i] = axpby(α, x[i]/conj(w[i]), β, y[i])
end
end
function vcreate(::Type{<:Operations},
W::NonuniformScaling{<:AbstractArray{Tw,N}},
x::AbstractArray{Tx,N},
scratch::Bool) where {Tw,Tx,N}
inds = same_axes(coefficients(W), x)
T = promote_type(Tw, Tx)
return (scratch && Tx == T ? x : similar(Array{T}, inds))
end
#------------------------------------------------------------------------------
# RANK-1 OPERATORS
"""
RankOneOperator(u, v) -> A
yields the rank one linear operator `A = u⋅v'` defined by the two *vectors* `u`
and `v` and behaving as:
A*x -> vscale(vdot(v, x)), u)
A'*x -> vscale(vdot(u, x)), v)
See also: [`SymmetricRankOneOperator`](@ref), [`LinearMapping`](@ref),
[`apply!`](@ref), [`vcreate`](@ref).
"""
struct RankOneOperator{U,V} <: LinearMapping
u::U
v::V
end
@callable RankOneOperator
function apply!(α::Number, ::Type{Direct}, A::RankOneOperator,
x, scratch::Bool, β::Number, y)
return _apply_rank_one!(α, A.u, A.v, x, β, y)
end
function apply!(α::Number, ::Type{Adjoint}, A::RankOneOperator,
x, scratch::Bool, β::Number, y)
return _apply_rank_one!(α, A.v, A.u, x, β, y)
end
function _apply_rank_one!(α::Number, u, v, x, β::Number, y)
if α == 0
# Lazily assume that y has correct type, dimensions, etc.
vscale!(y, β)
else
vcombine!(y, α*vdot(v, x), u, β, y)
end
return y
end
# Lazily assume that x has correct type, dimensions, etc.
# FIXME: optimize when scratch=true
vcreate(::Type{Direct}, A::RankOneOperator, x, scratch::Bool) = vcreate(A.v)
vcreate(::Type{Adjoint}, A::RankOneOperator, x, scratch::Bool) = vcreate(A.u)
input_type(A::RankOneOperator{U,V}) where {U,V} = V
input_ndims(A::RankOneOperator) = ndims(A.v)
input_size(A::RankOneOperator) = size(A.v)
input_size(A::RankOneOperator, d...) = size(A.v, d...)
input_eltype(A::RankOneOperator) = eltype(A.v)
output_type(A::RankOneOperator{U,V}) where {U,V} = U
output_ndims(A::RankOneOperator) = ndims(A.u)
output_size(A::RankOneOperator) = size(A.u)
output_size(A::RankOneOperator, d...) = size(A.u, d...)
output_eltype(A::RankOneOperator) = eltype(A.u)
identical(A::T, B::T) where {T<:RankOneOperator} =
((A.u === B.u)&(A.v === B.v))
"""
SymmetricRankOneOperator(u) -> A
yields the symmetric rank one operator `A = u⋅u'` defined by the *vector* `u`
and behaving as follows:
A'*x -> A*x
A*x -> vscale(vdot(u, x)), u)
See also: [`RankOneOperator`](@ref), [`LinearMapping`](@ref),
[`Trait`](@ref) [`apply!`](@ref), [`vcreate`](@ref).
"""
struct SymmetricRankOneOperator{U} <: LinearMapping
u::U
end
@callable SymmetricRankOneOperator
# Traits:
MorphismType(::SymmetricRankOneOperator) = Endomorphism()
SelfAdjointType(::SymmetricRankOneOperator) = SelfAdjoint()
function apply!(α::Number, ::Type{<:Union{Direct,Adjoint}},
A::SymmetricRankOneOperator, x, scratch::Bool, β::Number, y)
return _apply_rank_one!(α, A.u, A.u, x, β, y)
end
function vcreate(::Type{<:Union{Direct,Adjoint}},
A::SymmetricRankOneOperator, x, scratch::Bool)
# Lazily assume that x has correct type, dimensions, etc.
return (scratch ? x : vcreate(x))
end
input_type(A::SymmetricRankOneOperator{U}) where {U} = U
input_ndims(A::SymmetricRankOneOperator) = ndims(A.u)
input_size(A::SymmetricRankOneOperator) = size(A.u)
input_size(A::SymmetricRankOneOperator, d...) = size(A.u, d...)
input_eltype(A::SymmetricRankOneOperator) = eltype(A.u)
output_type(A::SymmetricRankOneOperator{U}) where {U} = U
output_ndims(A::SymmetricRankOneOperator) = ndims(A.u)
output_size(A::SymmetricRankOneOperator) = size(A.u)
output_size(A::SymmetricRankOneOperator, d...) = size(A.u, d...)
output_eltype(A::SymmetricRankOneOperator) = eltype(A.u)
identical(A::T, B::T) where {T<:SymmetricRankOneOperator} =
(A.u === B.u)
#------------------------------------------------------------------------------
# GENERALIZED MATRIX AND MATRIX-VECTOR PRODUCT
"""
GeneralMatrix(A)
creates a linear mapping whose coefficients are given by a multi-dimensional
array `A` and which generalizes the definition of the matrix-vector product
without calling `reshape` to change the dimensions.
For instance, assuming that `G = GeneralMatrix(A)` with `A` a regular array,
then `y = G*x` requires that the dimensions of `x` match the trailing
dimensions of `A` and yields a result `y` whose dimensions are the remaining
leading dimensions of `A`, such that `axes(A) = (axes(y)..., axes(x)...)`.
Applying the adjoint of `G` as in `y = G'*x` requires that the dimensions of
`x` match the leading dimension of `A` and yields a result `y` whose dimensions
are the remaining trailing dimensions of `A`, such that `axes(A) = (axes(x)...,
axes(y)...)`.
See also: [`reshape`](@ref).
"""
struct GeneralMatrix{T<:AbstractArray} <: LinearMapping
arr::T
end
@callable GeneralMatrix
coefficients(A) = A.arr
# Make a GeneralMatrix behaves like an ordinary array.
eltype(A::GeneralMatrix) = eltype(coefficients(A))
length(A::GeneralMatrix) = length(coefficients(A))
ndims(A::GeneralMatrix) = ndims(coefficients(A))
axes(A::GeneralMatrix) = axes(coefficients(A))
size(A::GeneralMatrix) = size(coefficients(A))
size(A::GeneralMatrix, i...) = size(coefficients(A), i...)
getindex(A::GeneralMatrix, i...) = getindex(coefficients(A), i...)
setindex!(A::GeneralMatrix, x, i...) = setindex!(coefficients(A), x, i...)
stride(A::GeneralMatrix, k) = stride(coefficients(A), k)
strides(A::GeneralMatrix) = strides(coefficients(A))
eachindex(A::GeneralMatrix) = eachindex(coefficients(A))
identical(A::T, B::T) where {T<:GeneralMatrix} =
(coefficients(A) === coefficients(B))
function apply!(α::Number,
P::Type{<:Operations},
A::GeneralMatrix{<:AbstractArray{<:GenMult.Floats}},
x::AbstractArray{<:GenMult.Floats},
scratch::Bool,
β::Number,
y::AbstractArray{<:GenMult.Floats})
return apply!(α, P, coefficients(A), x, scratch, β, y)
end
function vcreate(P::Type{<:Operations},
A::GeneralMatrix{<:AbstractArray{<:GenMult.Floats}},
x::AbstractArray{<:GenMult.Floats},
scratch::Bool)
return vcreate(P, coefficients(A), x, scratch)
end
for (T, L) in ((:Direct, 'N'), (:Adjoint, 'C'))
@eval begin
function apply!(α::Number,
::Type{$T},
A::AbstractArray{<:GenMult.Floats},
x::AbstractArray{<:GenMult.Floats},
scratch::Bool,
β::Number,
y::AbstractArray{<:GenMult.Floats})
return lgemv!(α, $L, A, x, β, y)
end
end
end
# To have apply and apply! methods callable with an array (instead of a
# mapping), we have to provide the different possibilities.
apply(A::AbstractArray, x::AbstractArray, scratch::Bool) =
apply(Direct, A, x, scratch)
apply(P::Type{<:Operations}, A::AbstractArray, x::AbstractArray, scratch::Bool) =
apply!(1, P, A, x, scratch, 0, vcreate(P, A, x, scratch))
apply!(y::AbstractArray, A::AbstractArray, x::AbstractArray) =
apply!(1, Direct, A, x, false, 0, y)
apply!(y::AbstractArray, P::Type{<:Operations}, A::AbstractArray, x::AbstractArray) =
apply!(1, P, A, x, false, 0, y)
function vcreate(P::Type{<:Union{Direct,InverseAdjoint}},
A::AbstractArray{Ta,Na},
x::AbstractArray{Tx,Nx},
scratch::Bool) where {Ta,Na,Tx,Nx}
# Non-transposed matrix. Trailing dimensions of X must match those of A,
# leading dimensions of A are those of the result. Whatever the scratch
# parameter, a new array is returned as the operation cannot be done
# in-place.
@noinline incompatible_dimensions() =
bad_size("the indices of `x` do not match the trailing indices of `A`")
1 ≤ Nx < Na || incompatible_dimensions()
Ny = Na - Nx
xinds = axes(x)
Ainds = axes(A)
@inbounds for d in 1:Nx
xinds[d] == Ainds[Ny + d] || incompatible_dimensions()
end
shape = ntuple(d -> Ainds[d], Val(Ny)) # faster than Ainds[1:Ny]
return similar(A, promote_type(Ta, Tx), shape)
end
function vcreate(P::Type{<:Union{Adjoint,Inverse}},
A::AbstractArray{Ta,Na},
x::AbstractArray{Tx,Nx},
scratch::Bool) where {Ta,Na,Tx,Nx}
# Transposed matrix. Leading dimensions of X must match those of A,
# trailing dimensions of A are those of the result. Whatever the scratch
# parameter, a new array is returned as the operation cannot be done
# in-place.
@noinline incompatible_dimensions() =
bad_size("the indices of `x` do not match the leading indices of `A`")
1 ≤ Nx < Na || incompatible_dimensions()
Ny = Na - Nx
xinds = axes(x)
Ainds = axes(A)
@inbounds for d in 1:Nx
xinds[d] == Ainds[d] || incompatible_dimensions()
end
shape = ntuple(d -> Ainds[Nx + d], Val(Ny)) # faster than Ainds[Nx+1:end]
return similar(A, promote_type(Ta, Tx), shape)
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 33608 | #
# methods.jl -
#
# Implement non-specific methods for mappings.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2021 Éric Thiébaut.
#
@noinline function unimplemented(::Type{P},
::Type{T}) where {P<:Operations, T<:Mapping}
throw(UnimplementedOperation("unimplemented operation `$P` for mapping $T"))
end
@noinline function unimplemented(func::Union{AbstractString,Symbol},
::Type{T}) where {T<:Mapping}
throw(UnimplementedMethod("unimplemented method `$func` for mapping $T"))
end
"""
@callable T
makes instances of concrete type `T` callable as a regular `LazyAlgebra`
mapping, that is `A(x)` yields `apply(A,x)` for any `A` of type `T`.
"""
macro callable(T)
quote
(A::$(esc(T)))(x) = apply(A, x)
end
end
@callable Adjoint
@callable Inverse
@callable InverseAdjoint
@callable Jacobian
@callable Gram
@callable Scaled
@callable Sum
@callable Composition
show(io::IO, ::MIME"text/plain", A::Mapping) = show(io, A)
show(io::IO, A::Identity) = print(io, "Id")
function show(io::IO, A::Scaled)
λ, M = multiplier(A), unscaled(A)
if λ == -1
print(io, "-")
elseif λ != 1
print(io, λ, "⋅")
end
show(io, M)
end
function show(io::IO, A::Scaled{<:Sum})
λ, M = multiplier(A), unscaled(A)
if λ == -1
print(io, "-(")
elseif λ != 1
print(io, λ, "⋅(")
end
show(io, M)
if λ != 1
print(io, ")")
end
end
function show(io::IO, A::Adjoint{<:Mapping})
show(io, unveil(A))
print(io, "'")
end
function show(io::IO, A::Adjoint{T}) where {T<:Union{Scaled,Composition,Sum}}
print(io, "(")
show(io, unveil(A))
print(io, ")'")
end
function show(io::IO, A::Inverse{<:Mapping})
print(io, "inv(")
show(io, unveil(A))
print(io, ")")
end
function show(io::IO, A::InverseAdjoint{<:Mapping})
print(io, "inv(")
show(io, unveil(A))
print(io, ")'")
end
function show(io::IO, A::Jacobian{<:Mapping})
print(io, "∇(")
show(io, primitive(A))
print(io, ",x)")
end
function show(io::IO, A::Sum{N}) where {N}
function show_term(io::IO, A::Sum)
print(io, "(")
show(io, A)
print(io, ")")
end
show_term(io::IO, A::Mapping) = show(io, A)
for i in 1:N
let B = A[i]
if isa(B, Scaled)
λ, M = multiplier(B), unscaled(B)
if λ < 0
print(io, (i == 1 ? "-" : " - "))
λ = -λ
elseif i > 1
print(io, " + ")
end
if λ != 1
print(io, λ, "⋅")
end
show_term(io, M)
else
if i > 1
print(io, " + ")
end
show_term(io, B)
end
end
end
end
function show(io::IO, A::Composition{N}) where {N}
for i in 1:N
let B = A[i]
if i > 1
print(io, "⋅")
end
if isa(B, Sum) || isa(B, Scaled)
print(io, "(")
show(io, B)
print(io, ")")
else
show(io, B)
end
end
end
end
"""
terms(A)
yields the list (as a tuple) of terms that compose mapping `A`. If `A` is a
sum or a composition of mappings, the list of terms is returned; otherwise, the
1-tuple `(A,)` is returned.
If `A` is sum or a composition of mappings, `Tuple(A)` yields the same result
as `terms(A)`.
"""
terms(A::Union{Sum,Composition}) = getfield(A, :ops)
terms(A::Mapping) = (A,)
"""
unveil(A)
unveils the mapping embedded in mapping `A` if it is a *decorated* mapping (see
[`LazyAlgebra.DecoratedMapping`](@ref)); otherwise, just returns `A` if it is
not a *decorated* mapping.
As a special case, `A` may be an instance of `LinearAlgebra.UniformScaling` and
the result is the LazyAlgebra mapping corresponding to `A`.
"""
unveil(A::DecoratedMapping) = getfield(A, :op)
unveil(A::Mapping) = A
unveil(A::UniformScaling) = multiplier(A)*Id
Mapping(A::UniformScaling) = unveil(A)
"""
unscaled(A)
yields the mapping `M` of the scaled mapping `A = λ*M` (see [`Scaled`](@ref));
otherwise yields `A`. This method also works for intances of
`LinearAlgebra.UniformScaling`. Call [`multiplier`](@ref) to get the multiplier
`λ`.
"""
unscaled(A::Mapping) = A
unscaled(A::Scaled) = getfield(A, :M)
unscaled(A::UniformScaling) = Id
"""
multiplier(A)
yields the multiplier `λ` of the scaled mapping `A = λ*M` (see
[`Scaled`](@ref)); otherwise yields `1`. Note that this method also works for
intances of `LinearAlgebra.UniformScaling`. Call [`unscaled`](@ref) to get the
mapping `M`. `λ`.
"""
multiplier(A::Scaled) = getfield(A, :λ)
multiplier(A::Mapping) = 1
multiplier(A::UniformScaling) = getfield(A, :λ)
"""
primitive(J)
yields the mapping `A` embedded in the Jacobian `J = ∇(A,x)`. Call
[`variables`](@ref) to get `x` instead.
"""
primitive(J::Jacobian) = getfield(J, :A)
"""
variables(J)
yields the variables `x` embedded in the Jacobian `J = ∇(A,x)`. Call
[`primitive`](@ref) to get `A` instead.
"""
variables(J::Jacobian) = getfield(J, :x)
"""
identifier(A)
yields a hash value identifying almost uniquely the unscaled mapping `A`. This
identifier is used for sorting terms in a sum of mappings.
!!! warning
For now, the identifier is computed as `objectid(unscaled(A))` and is
unique with a very high probability.
"""
identifier(A::Mapping) = objectid(unscaled(A))
Base.isless(A::Mapping, B::Mapping) = isless(identifier(A), identifier(B))
# Extend base methods to simplify the code for reducing expressions.
first(A::Mapping) = A
last(A::Mapping) = A
first(A::Union{Sum,Composition}) = @inbounds A[1]
last(A::Union{Sum{N},Composition{N}}) where {N} = @inbounds A[N]
firstindex(A::Union{Sum,Composition}) = 1
lastindex(A::Union{Sum{N},Composition{N}}) where {N} = N
length(A::Union{Sum{N},Composition{N}}) where {N} = N
eltype(::Type{<:Sum{N,T}}) where {N,T} = eltype(T)
eltype(::Type{<:Composition{N,T}}) where {N,T} = eltype(T)
Tuple(A::Union{Sum,Composition}) = terms(A)
@inline @propagate_inbounds getindex(A::Union{Sum,Composition}, i) =
getindex(terms(A), i)
"""
input_type([P=Direct,] A)
output_type([P=Direct,] A)
yield the (preferred) types of the input and output arguments of the operation
`P` with mapping `A`. If `A` operates on Julia arrays, the element type, list
of dimensions, `i`-th dimension and number of dimensions for the input and
output are given by:
input_eltype([P=Direct,] A) output_eltype([P=Direct,] A)
input_size([P=Direct,] A) output_size([P=Direct,] A)
input_size([P=Direct,] A, i) output_size([P=Direct,] A, i)
input_ndims([P=Direct,] A) output_ndims([P=Direct,] A)
For mappings operating on Julia arrays, only `input_size(A)` and
`output_size(A)` have to be implemented.
Also see: [`vcreate`](@ref), [`apply!`](@ref), [`LinearMapping`](@ref),
[`Operations`](@ref).
"""
function input_type end
for sfx in (:size, :eltype, :ndims, :type),
pfx in (:output, :input)
fn1 = Symbol(pfx, "_", sfx)
for P in (Direct, Adjoint, Inverse, InverseAdjoint)
fn2 = Symbol(P === Adjoint || P === Inverse ?
(pfx === :output ? :input : :output) : pfx, "_", sfx)
T = (P === Adjoint || P === InverseAdjoint ? LinearMapping : Mapping)
# Provide basic methods for the different operations and for tagged
# mappings.
@eval $fn1(::Type{$P}, A::$T) = $fn2(A)
if P !== Direct
@eval $fn1(A::$P{<:$T}) = $fn2(unveil(A))
end
if sfx === :size
if P !== Direct
@eval $fn1(A::$P{<:$T}, dim...) = $fn2(unveil(A), dim...)
end
@eval $fn1(::Type{$P}, A::$T, dim...) = $fn2(A, dim...)
end
end
# Link documentation for the basic methods.
if fn1 !== :input_type
@eval @doc @doc(:input_type) $fn1
end
end
# Provide default methods for `$(sfx)_size(A, dim...)` and `$(sfx)_ndims(A)`.
for pfx in (:input, :output)
get_size = Symbol(pfx, "_size")
get_ndims = Symbol(pfx, "_ndims")
@eval begin
$get_ndims(A::Mapping) = length($get_size(A))
$get_size(A::Mapping, dim) = $get_size(A)[dim]
function $get_size(A::Mapping, dim...)
dims = $get_size(A)
ntuple(i -> dims[dim[i]], length(dim))
end
end
end
for f in (:input_eltype, :output_eltype, :input_size, :output_size)
@eval $f(::T) where {T<:Mapping} = unimplemented($(string(f)), T)
end
"""
nrows(A)
yields the *equivalent* number of rows of the linear operator `A`. Not all
operators extend this method.
In the implemented generalization of linear operators, the equivalent number of
rows is the number of element of the result of applying the operator be it
single- or multi-dimensional.
"""
nrows(A::LinearMapping) = prod(row_size(A))
@noinline nrows(A::Mapping) =
throw(ArgumentError("`nrows` is only implemented for linear mappings"))
"""
ncols(A)
yields the *equivalent* number of columns of the linear operator `A`. Not all
operators extend this method.
In the implemented generalization of linear operators, the equivalent number of
columns is the number of element of an argument of the operator be it single-
or multi-dimensional.
"""
ncols(A::LinearMapping) = prod(col_size(A))
@noinline ncols(A::Mapping) =
throw(ArgumentError("`ncols` is only implemented for linear mappings"))
"""
row_size(A)
yields the dimensions of the result of applying the linear operator `A`, this
is equivalent to `output_size(A)`. Not all operators extend this method.
"""
row_size(A::LinearMapping) = output_size(A)
@noinline row_size(A::Mapping) =
throw(ArgumentError("`row_size` is only implemented for linear mappings"))
"""
col_size(A)
yields the dimensions of the argument of the linear operator `A`, this is
equivalent to `input_size(A)`. Not all operators extend this method.
"""
col_size(A::LinearMapping) = input_size(A)
@noinline col_size(A::Mapping) =
throw(ArgumentError("`col_size` is only implemented for linear mappings"))
"""
coefficients(A)
yields the object backing the storage of the coefficients of the linear mapping
`A`. Not all linear mappings extend this method.
""" coefficients
"""
check(A) -> A
checks integrity of mapping `A` and returns it.
"""
check(A::Mapping) = A
"""
checkmapping(y, A, x) -> (v1, v2, v1 - v2)
yields `v1 = vdot(y, A*x)`, `v2 = vdot(A'*y, x)` and their difference for `A` a
linear mapping, `y` a *vector* of the output space of `A` and `x` a *vector* of
the input space of `A`. In principle, the two inner products should be equal
whatever `x` and `y`; otherwise the mapping has a bug.
Simple linear mappings operating on Julia arrays can be tested on random
*vectors* with:
checkmapping([T=Float64,] outdims, A, inpdims) -> (v1, v2, v1 - v2)
with `outdims` and `outdims` the dimensions of the output and input *vectors*
for `A`. Optional argument `T` is the element type.
If `A` operates on Julia arrays and methods `input_eltype`, `input_size`,
`output_eltype` and `output_size` have been specialized for `A`, then:
checkmapping(A) -> (v1, v2, v1 - v2)
is sufficient to check `A` against automatically generated random arrays.
See also: [`vdot`](@ref), [`vcreate`](@ref), [`apply!`](@ref),
[`input_type`](@ref).
"""
function checkmapping(y::Ty, A::Mapping, x::Tx) where {Tx, Ty}
is_linear(A) || bad_argument("expecting a linear mapping")
v1 = vdot(y, A*x)
v2 = vdot(A'*y, x)
(v1, v2, v1 - v2)
end
function checkmapping(::Type{T},
outdims::Tuple{Vararg{Int}},
A::Mapping,
inpdims::Tuple{Vararg{Int}}) where {T<:AbstractFloat}
checkmapping(randn(T, outdims), A, randn(T, inpdims))
end
function checkmapping(outdims::Tuple{Vararg{Int}},
A::Mapping,
inpdims::Tuple{Vararg{Int}})
checkmapping(Float64, outdims, A, inpdims)
end
checkmapping(A::LinearMapping) =
checkmapping(randn(output_eltype(A), output_size(A)), A,
randn(input_eltype(A), input_size(A)))
"""
identical(A, B)
yields whether `A` is the same mapping as `B` in the sense that their effects
will always be the same. This method is used to perform some simplifications
and optimizations and may have to be specialized for specific mapping types.
The default implementation is to return `A === B`.
!!! note
The returned result may be true although `A` and `B` are not necessarily
the same objects. For instance, if `A` and `B` are two sparse matrices
whose coefficients and indices are stored in the same arrays (as can be
tested with the `===` or `≡` operators, `identical(A,B)` should return
`true` because the two operators will always behave identically (any
changes in the coefficients or indices of `A` will be reflected in `B`). If
any of the arrays storing the coefficients or the indices are not the same
objects, then `identical(A,B)` must return `false` even though the stored
values may be the same because it is possible, later, to change one
operator without affecting identically the other.
"""
@inline identical(::Mapping, ::Mapping) = false # false if not same types
@inline identical(A::T, B::T) where {T<:Mapping} = (A === B)
"""
gram(A) -> A'*A
yields the Gram operator built out of the linear mapping `A`. The result is
equivalent to `A'*A` but its type depends on simplifications that may occur.
See also [`Gram`](@ref).
"""
gram(A::LinearMapping) = A'*A
gram(A::Mapping) =
is_linear(A) ? A'*A : throw_forbidden_Gram_of_non_linear_mapping()
@noinline throw_forbidden_Gram_of_non_linear_mapping() =
bad_argument("making a Gram operator out of a non-linear mapping is not allowed")
# Inlined functions called to perform `α*x + β*y` for specific values of the
# multipliers `α` and `β`. Passing these (simple) functions to another method
# is to simplify the coding of vectorized methods and of the the `apply!`
# method by mappings. NOTE: Forcing inlining may not be necessary but it does
# not hurt.
@inline axpby_yields_zero( α, x, β, y) = zero(typeof(y)) # α = 0, β = 0
@inline axpby_yields_y( α, x, β, y) = y # α = 0, β = 1
@inline axpby_yields_my( α, x, β, y) = -y # α = 0, β = -1
@inline axpby_yields_by( α, x, β, y) = β*y # α = 0, any β
@inline axpby_yields_x( α, x, β, y) = x # α = 1, β = 0
@inline axpby_yields_xpy( α, x, β, y) = x + y # α = 1, β = 1
@inline axpby_yields_xmy( α, x, β, y) = x - y # α = 1, β = -1
@inline axpby_yields_xpby( α, x, β, y) = x + β*y # α = 1, any β
@inline axpby_yields_mx( α, x, β, y) = -x # α = -1, β = 0
@inline axpby_yields_ymx( α, x, β, y) = y - x # α = -1, β = 1
@inline axpby_yields_mxmy( α, x, β, y) = -x - y # α = -1, β = -1
@inline axpby_yields_bymx( α, x, β, y) = β*y - x # α = -1, any β
@inline axpby_yields_ax( α, x, β, y) = α*x # any α, β = 0
@inline axpby_yields_axpy( α, x, β, y) = α*x + y # any α, β = 1
@inline axpby_yields_axmy( α, x, β, y) = α*x - y # any α, β = -1
@inline axpby_yields_axpby(α, x, β, y) = α*x + β*y # any α, any β
#------------------------------------------------------------------------------
# VCREATE, APPLY AND APPLY!
"""
vcreate([P,] A, x, scratch=false) -> y
yields a new instance `y` suitable for storing the result of applying mapping
`A` to the argument `x`. Optional parameter `P ∈ Operations` is one of `Direct`
(the default), `Adjoint`, `Inverse` and/or `InverseAdjoint` and can be used to
specify how `A` is to be applied as explained in the documentation of the
[`apply`](@ref) method.
Optional argument `scratch` indicates whether input argument `x` can be
overwritten by the operation and thus used to store the result. This may be
exploited by some mappings (which are able to operate *in-place*) to avoid
allocating a new object for the result `y`.
The caller should set `scratch = true` if `x` is not needed after calling
`apply`. If `scratch = true`, then it is possible that `y` be the same object
as `x`; otherwise, `y` is a new object unless applying the operation yields the
same contents as `y` for the result `x` (this is always true for the identity
for instance). Thus, in general, it should not be assumed that the returned `y`
is different from the input `x`.
The method `vcreate(::Type{P}, A, x)` should be implemented by linear mappings
for any supported operations `P` and argument type for `x`. The result returned
by `vcreate` should be of predictible type to ensure *type-stability*. Checking
the validity (*e.g.* the size) of argument `x` in `vcreate` may be skipped
because this argument will be eventually checked by the `apply!` method.
See also: [`Mapping`](@ref), [`apply`](@ref).
"""
vcreate(A::Mapping, x, scratch::Bool=false) = vcreate(Direct, A, x, scratch)
vcreate(::Type{P}, A::Mapping, x) where {P<:Operations} =
vcreate(P, A, x, false)
"""
vmul(A, x) -> y
yields `y = A*x`. The default behavior is to call `apply(Direct,A,x,false)`.
Method [`vmul!`](@ref) is the in-place version.
"""
vmul(A, x) = apply(Direct, A, x, false)
"""
vmul!(y, A, x) -> y
overwrites `y` with the result of `A*x` and returns `y`. The default behavior
is to call `apply!(1,Direct,A,x,false,0,y)`.
!!! note
This method is intended to be used by algorithms such as the conjugate
gradient to apply operators. It may be specialized by the caller for its
needs which is much easier than specializing [`apply!`](@ref) which
requires to consider the specific values of the multipliers `α` and `β`.
"""
vmul!(y, A, x) = apply!(1, Direct, A, x, false, 0, y)
"""
apply([P=Direct,] A, x, scratch=false) -> y
yields the result `y` of applying mapping `A` to the argument `x`. Optional
parameter `P` can be used to specify how `A` is to be applied:
* `Direct` (the default) to apply `A` and yield `y = A⋅x`;
* `Adjoint` to apply the adjoint of `A` and yield `y = A'⋅x`;
* `Inverse` to apply the inverse of `A` and yield `y = A\\x`;
* `InverseAdjoint` or `AdjointInverse` to apply the inverse of `A'` and
yield `y = A'\\x`.
Not all operations may be implemented by the different types of mappings and
`Adjoint` and `InverseAdjoint` may only be applicable for linear mappings.
Optional argument `scratch` indicates whether input argument `x` can be
overwritten by the operation. This may be exploited to avoid allocating
temporary workspace(s). The caller should set `scratch = true` if `x` is not
needed after calling `apply`. If `scratch = true`, then it is possible that `y`
be the same object as `x`; otherwise, `y` is a new object unless applying the
operation yields the same contents as `y` for the result `x` (this is always
true for the identity for instance). Thus, in general, it should not be assumed
that the result of applying a mapping is different from the input.
Julia methods are provided so that `apply(A', x)` automatically calls
`apply(Adjoint, A, x)` so the shorter syntax may be used without impacting
performances.
See also: [`Mapping`](@ref), [`apply!`](@ref), [`vcreate`](@ref).
"""
apply(A::Mapping, x, scratch::Bool=false) = apply(Direct, A, x, scratch)
apply(::Type{P}, A::Mapping, x, scratch::Bool=false) where {P<:Operations} =
apply!(1, P, A, x, scratch, 0, vcreate(P, A, x, scratch))
*(A::Mapping, x) = apply(Direct, A, x, false)
\(A::Mapping, x) = apply(Inverse, A, x, false)
"""
apply!([α=1,] [P=Direct,] A::Mapping, x, [scratch=false,] [β=0,] y) -> y
overwrites `y` with `α*P(A)⋅x + β*y` where `P ∈ Operations` can be `Direct`,
`Adjoint`, `Inverse` and/or `InverseAdjoint` to indicate which variant of the
mapping `A` to apply. The convention is that the prior contents of `y` is not
used at all if `β = 0` so `y` can be directly used to store the result even
though it is not initialized. The `scratch` optional argument indicates whether
the input `x` is no longer needed by the caller and can thus be used as a
scratch array. Having `scratch = true` or `β = 0` may be exploited by the
specific implementation of the `apply!` method for the mapping type to avoid
allocating temporary workspace(s).
The `apply!` method can be seen as a generalization of the `LinearAlgebra.mul!`
method.
The order of arguments can be changed and the same result as above is obtained
with:
apply!([β=0,] y, [α=1,] [P=Direct,] A::Mapping, x, scratch=false) -> y
The result `y` may have been allocated by:
y = vcreate(P, A, x, scratch=false)
or by:
y = vcreate(A, x, scratch=false)
if `P` is not specified.
Mapping sub-types only need to extend `vcreate` and `apply!` with the specific
signatures:
vcreate(::Type{P}, A::M, x, scratch::Bool=false) -> y
apply!(α::Number, ::Type{P}, A::M, x, scratch::Bool, β::Number, y) -> y
for any supported operation `P` and where `M` is the type of the mapping. Of
course, the types of arguments `x` and `y` may be specified as well.
Optionally, the method with signature:
apply(::Type{P}, A::M, x, scratch::Bool=false) -> y
may also be extended to improve the default implementation which is:
apply(P::Type{<:Operations}, A::Mapping, x, scratch::Bool=false) =
apply!(1, P, A, x, scratch, 0, vcreate(P, A, x, scratch))
See also: [`Mapping`](@ref), [`apply`](@ref), [`vcreate`](@ref).
""" apply!
# Provide fallbacks so that `Direct` is the default operation and only the
# method with signature:
#
# apply!(α::Number, ::Type{P}, A::MappingType, x::X, scratch::Bool,
# β::Number, y::Y) where {P<:Operations,X,Y}
#
# has to be implemented (possibly with restrictions on X and Y) by subtypes of
# Mapping so we provide the necessary mechanism to dispatch derived methods.
apply!(A::Mapping, x, y) =
apply!(1, Direct, A, x, false, 0, y)
apply!(α::Number, A::Mapping, x, y) =
apply!(α, Direct, A, x, false, 0, y)
apply!(A::Mapping, x, β::Number, y) =
apply!(1, Direct, A, x, false, β, y)
apply!(α::Number, A::Mapping, x, β::Number, y) =
apply!(α, Direct, A, x, false, β, y)
apply!(P::Type{<:Operations}, A::Mapping, x, y) =
apply!(1, P, A, x, false, 0, y)
apply!(α::Number, P::Type{<:Operations}, A::Mapping, x, y) =
apply!(α, P, A, x, false, 0, y)
apply!(P::Type{<:Operations}, A::Mapping, x, β::Number, y) =
apply!(1, P, A, x, false, β, y)
apply!(α::Number, P::Type{<:Operations}, A::Mapping, x, β::Number, y) =
apply!(α, P, A, x, false, β, y)
apply!(A::Mapping, x, scratch::Bool, y) =
apply!(1, Direct, A, x, scratch, 0, y)
apply!(α::Number, A::Mapping, x, scratch::Bool, y) =
apply!(α, Direct, A, x, scratch, 0, y)
apply!(A::Mapping, x, scratch::Bool, β::Number, y) =
apply!(1, Direct, A, x, scratch, β, y)
apply!(P::Type{<:Operations}, A::Mapping, x, scratch::Bool, y) =
apply!(1, P, A, x, scratch, 0, y)
apply!(α::Number, P::Type{<:Operations}, A::Mapping, x, scratch::Bool, y) =
apply!(α, P, A, x, scratch, 0, y)
apply!(P::Type{<:Operations}, A::Mapping, x, scratch::Bool, β::Number, y) =
apply!(1, P, A, x, scratch, β, y)
# Change order of arguments.
apply!(y, A::Mapping, x, scratch::Bool=false) =
apply!(1, Direct, A, x, scratch, 0, y)
apply!(y, P::Type{<:Operations}, A::Mapping, x, scratch::Bool=false) =
apply!(1, P, A, x, scratch, 0, y)
apply!(y, α::Number, A::Mapping, x, scratch::Bool=false) =
apply!(α, Direct, A, x, scratch, 0, y)
apply!(y, α::Number, P::Type{<:Operations}, A::Mapping, x, scratch::Bool=false) =
apply!(α, P, A, x, scratch, 0, y)
apply!(β::Number, y, A::Mapping, x, scratch::Bool=false) =
apply!(1, Direct, A, x, scratch, β, y)
apply!(β::Number, y, P::Type{<:Operations}, A::Mapping, x, scratch::Bool=false) =
apply!(1, P, A, x, scratch, β, y)
apply!(β::Number, y, α::Number, A::Mapping, x, scratch::Bool=false) =
apply!(α, Direct, A, x, scratch, β, y)
apply!(β::Number, y, α::Number, P::Type{<:Operations}, A::Mapping, x, scratch::Bool=false) =
apply!(α, P, A, x, scratch, β, y)
# Extend `LinearAlgebra.mul!` so that `A'*x`, `A*B*C*x`, etc. yield the
# expected result. FIXME: This should be restricted to linear mappings but
# this is not possible without overheads.
mul!(y, A::Mapping, x) = apply!(1, Direct, A, x, false, 0, y)
mul!(y, A::Mapping, x, α::Number, β::Number) =
apply!(α, Direct, A, x, false, β, y)
# Implemention of the `apply!(α,P,A,x,scratch,β,y)` and
# `vcreate(P,A,x,scratch)` methods for a scaled mapping.
for (P, expr) in ((:Direct, :(α*multiplier(A))),
(:Adjoint, :(α*conj(multiplier(A)))),
(:Inverse, :(α/multiplier(A))),
(:InverseAdjoint, :(α/conj(multiplier(A)))))
@eval begin
apply!(α::Number, ::Type{$P}, A::Scaled, x, scratch::Bool, β::Number, y) =
apply!($expr, $P, unscaled(A), x, scratch, β, y)
end
end
"""
overwritable(scratch, x, y) -> bool
yields whether the result `y` of applying a mapping to `x` with scratch flag
`scratch` can overwritten. Arguments `x` and `y` can be reversed.
"""
overwritable(scratch::Bool, x, y) = (scratch || x !== y)
# Implement `apply` for scaled operators to avoid the needs of explicitly
# calling `vcreate` as done by the default implementation of `apply`. This is
# needed for scaled compositions among others.
function apply(::Type{Direct}, A::Scaled, x, scratch::Bool)
y = apply(Direct, unscaled(A), x, scratch)
vscale!((overwritable(scratch, x, y) ? y : vcopy(y)), multiplier(A))
end
function apply(::Type{Adjoint}, A::Scaled, x, scratch::Bool)
y = apply(Direct, unscaled(A), x, scratch)
vscale!((overwritable(scratch, x, y) ? y : vcopy(y)), conj(multiplier(A)))
end
function apply(::Type{Inverse}, A::Scaled, x, scratch::Bool)
y = apply(Direct, unscaled(A), x, scratch)
vscale!((overwritable(scratch, x, y) ? y : vcopy(y)), 1/multiplier(A))
end
function apply(::Type{InverseAdjoint}, A::Scaled, x, scratch::Bool)
y = apply(Direct, unscaled(A), x, scratch)
vscale!((overwritable(scratch, x, y) ? y : vcopy(y)), 1/conj(multiplier(A)))
end
vcreate(P::Type{<:Operations}, A::Scaled, x, scratch::Bool) =
vcreate(P, unscaled(A), x, scratch)
# Implemention of the `vcreate(P,A,x,scratch)` and
# `apply!(α,P,A,x,scratch,β,y)` methods for the various decorations of a
# mapping so as to automatically unveil the embedded mapping.
for (T1, T2, T3) in ((:Direct, :Adjoint, :Adjoint),
(:Adjoint, :Adjoint, :Direct),
(:Inverse, :Adjoint, :InverseAdjoint),
(:InverseAdjoint, :Adjoint, :Inverse),
(:Direct, :Inverse, :Inverse),
(:Adjoint, :Inverse, :InverseAdjoint),
(:Inverse, :Inverse, :Direct),
(:InverseAdjoint, :Inverse, :Adjoint),
(:Direct, :InverseAdjoint, :InverseAdjoint),
(:Adjoint, :InverseAdjoint, :Inverse),
(:Inverse, :InverseAdjoint, :Adjoint),
(:InverseAdjoint, :InverseAdjoint, :Direct))
@eval begin
vcreate(::Type{$T1}, A::$T2, x, scratch::Bool) =
vcreate($T3, unveil(A), x, scratch)
apply!(α::Number, ::Type{$T1}, A::$T2, x, scratch::Bool, β::Number, y) =
apply!(α, $T3, unveil(A), x, scratch, β, y)
end
end
# Implementation of the `vcreate(P,A,x,scratch)` and
# `apply!(α,P,A,x,scratch,β,y)` and methods for a sum of mappings. Note that
# `Sum` instances are warranted to have at least 2 components.
function vcreate(::Type{P}, A::Sum, x,
scratch::Bool) where {P<:Union{Direct,Adjoint}}
# The sum only makes sense if all mappings yields the same kind of result.
# Hence we just call the vcreate method for the first mapping of the sum.
vcreate(P, A[1], x, scratch)
end
function apply!(α::Number, P::Type{<:Union{Direct,Adjoint}}, A::Sum{N},
x, scratch::Bool, β::Number, y) where {N}
if α == 0
# Just scale the destination.
vscale!(y, β)
else
# Apply first mapping with β and then other with β=1. Scratch flag is
# always false until last mapping because we must preserve x as there
# is more than one term.
apply!(α, P, A[1], x, false, β, y)
for i in 2:N
apply!(α, P, A[i], x, (scratch && i == N), 1, y)
end
end
return y
end
vcreate(::Type{<:Union{Inverse,InverseAdjoint}}, A::Sum, x, scratch::Bool) =
throw_unsupported_inverse_of_sum()
apply(::Type{<:Union{Inverse,InverseAdjoint}}, A::Sum, x, scratch::Bool) =
throw_unsupported_inverse_of_sum()
function apply!(α::Number, ::Type{<:Union{Inverse,InverseAdjoint}}, A::Sum,
x, scratch::Bool, β::Number, y)
throw_unsupported_inverse_of_sum()
end
throw_unsupported_inverse_of_sum() =
error("automatic dispatching of the inverse of a sum of mappings is not supported")
# Implementation of the `apply!(α,P,A,x,scratch,β,y)` method for a composition
# of mappings. There is no possible `vcreate(P,A,x,scratch)` method for a
# composition so we directly extend the `apply(P,A,x,scratch)` method. Note
# that `Composition` instances are warranted to have at least 2 components.
#
# The unrolled code (taking care of allowing as few temporaries as possible and
# for the Direct or InverseAdjoint operation) writes:
#
# w1 = apply(P, A[N], x, scratch)
# scratch = overwritable(scratch, x, w1)
# w2 = apply!(1, P, A[N-1], w1, scratch)
# scratch = overwritable(scratch, w1, w2)
# w3 = apply!(1, P, A[N-2], w2, scratch)
# scratch = overwritable(scratch, w2, w3)
# ...
# return apply!(α, P, A[1], wNm1, scratch, β, y)
#
# To break the type barrier, this is done by a recursion. The recursion is
# just done in the other direction for the Adjoint or Inverse operation.
function vcreate(::Type{<:Operations},
A::Composition{N}, x, scratch::Bool) where {N}
error("it is not possible to create the output of a composition of mappings")
end
function apply!(α::Number, ::Type{P}, A::Composition{N}, x, scratch::Bool,
β::Number, y) where {N,P<:Union{Direct,InverseAdjoint}}
if α == 0
# Just scale the destination.
vscale!(y, β)
else
ops = terms(A)
w = apply(P, *, ops[2:N], x, scratch)
scratch = overwritable(scratch, w, x)
apply!(α, P, ops[1], w, scratch, β, y)
end
return y
end
function apply(::Type{P}, A::Composition{N}, x,
scratch::Bool) where {N,P<:Union{Direct,InverseAdjoint}}
apply(P, *, terms(A), x, scratch)
end
function apply(::Type{P}, ::typeof(*), ops::NTuple{N,Mapping}, x,
scratch::Bool) where {N,P<:Union{Direct,InverseAdjoint}}
w = apply(P, ops[N], x, scratch)
N == 1 && return w
scratch = overwritable(scratch, w, x)
apply(P, *, ops[1:N-1], w, scratch)
end
function apply!(α::Number, ::Type{P}, A::Composition{N}, x, scratch::Bool,
β::Number, y) where {N,P<:Union{Adjoint,Inverse}}
if α == 0
# Just scale the destination.
vscale!(y, β)
else
ops = terms(A)
w = apply(P, *, ops[1:N-1], x, scratch)
scratch = overwritable(scratch, w, x)
apply!(α, P, ops[N], w, scratch, β, y)
end
return y
end
function apply(::Type{P}, A::Composition{N}, x,
scratch::Bool) where {N,P<:Union{Adjoint,Inverse}}
apply(P, *, terms(A), x, scratch)
end
function apply(::Type{P}, ::typeof(*), ops::NTuple{N,Mapping}, x,
scratch::Bool) where {N,P<:Union{Adjoint,Inverse}}
w = apply(P, ops[1], x, scratch)
N == 1 && return w
scratch = overwritable(scratch, w, x)
apply(P, *, ops[2:N], w, scratch)
end
# Default rules to apply a Gram operator. Gram matrices are Hermitian by
# construction which left only 2 cases to deal with.
apply!(α::Number, ::Type{Adjoint}, A::Gram, x, scratch::Bool, β::Number, y) =
apply!(α, Direct, A, x, scratch, β, y)
apply!(α::Number, ::Type{InverseAdjoint}, A::Gram, x, scratch::Bool, β::Number, y) =
apply!(α, Inverse, A, x, scratch, β, y)
function apply!(α::Number, ::Type{Direct}, A::Gram, x, scratch::Bool, β::Number, y)
if α == 0
vscale!(y, β)
else
B = unveil(A) # A ≡ B'*B
z = apply(Direct, B, x, scratch) # z <- B⋅x
apply!(α, Adjoint, B, z, (z !== x), β, y) # y <- α⋅B'⋅z + β⋅y
end
return y
end
function apply!(α::Number, ::Type{Inverse}, A::Gram, x, scratch::Bool, β::Number, y)
if α == 0
vscale!(y, β)
else
B = unveil(A) # A ≡ B'⋅B
# Compute α⋅inv(A)⋅x + β⋅y = α⋅inv(B'⋅B)⋅x + β⋅y
# = α⋅inv(B)⋅inv(B')⋅x + β⋅y
z = apply(InverseAdjoint, B, x, scratch) # z <- inv(B')⋅x
apply!(α, Inverse, B, z, (z !== x), β, y) # y <- α⋅inv(B)⋅z + β⋅y
end
return y
end
# A Gram operator is self-adjoint by construction and yields result of same
# kind as input.
vcreate(::Type{<:Operations}, ::Gram, x, scratch::Bool) = vcreate(x)
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 21421 | #
# rules.jl -
#
# Implement rules for automatically simplifying expressions involving mappings.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2021 Éric Thiébaut.
#
#------------------------------------------------------------------------------
# NEUTRAL ELEMENTS
# The neutral element ("zero") for the addition is zero times a mapping of the
# proper type.
zero(A::Mapping) = 0*A
iszero(A::Scaled) = iszero(multiplier(A))
iszero(::Mapping) = false
# The neutral element ("one") for the composition is the identity.
const Id = Identity()
one(::Mapping) = Id
isone(::Identity) = true
isone(::Mapping) = false
#------------------------------------------------------------------------------
# UNQUALIFIED OUTER CONSTRUCTORS
# Unqualified outer constructors are provided which call the corresponding
# inner constructors with all suitable parameters and rely on the inner
# constructors to check whether the call was allowed or not. A constraint that
# must hold is that T(A), with T an unqualified type constructor, always yields
# an instance of T.
Direct(A::Mapping) = A # provided for completeness
Adjoint(A::T) where {T<:Mapping} = Adjoint{T}(A)
Inverse(A::T) where {T<:Mapping} = Inverse{T}(A)
InverseAdjoint(A::T) where {T<:Mapping} = InverseAdjoint{T}(A)
Gram(A::T) where {T<:Mapping} = Gram{T}(A)
Jacobian(A::M, x::T) where {M<:Mapping,T} = Jacobian{M,T}(A, x)
Scaled(α::S, A::T) where {S<:Number,T<:Mapping} = Scaled{T,S}(α, A)
Sum(ops::Mapping...) = Sum(ops)
Sum(ops::T) where {N,T<:NTuple{N,Mapping}} = Sum{N,T}(ops)
Composition(ops::Mapping...) = Composition(ops)
Composition(ops::T) where {N,T<:NTuple{N,Mapping}} = Composition{N,T}(ops)
# Qualified outer constructors to forbid decoration of mappings of specific
# types when, according to the simplification rules, another more simple
# construction should be built instead. Everything not forbidden is allowed
# except that additional tests may be performed by the inner constructors
# (e.g., Adjoint check that its argument is linear).
#
# FIXME: Some restrictions may be bad ideas like adjoint of sums or
# compositions.
for (func, blacklist) in ((:Adjoint, (:Identity,
:Adjoint,
:Inverse,
:InverseAdjoint,
:Scaled,
:Sum,
:Composition)),
(:Inverse, (:Identity,
:Adjoint,
:Inverse,
:InverseAdjoint,
:Scaled,
:Composition)),
(:InverseAdjoint, (:Identity,
:Adjoint,
:Inverse,
:InverseAdjoint,
:Scaled,
:Composition)),
(:Gram, (:Inverse,
:InverseAdjoint,
:Scaled,)),
(:Jacobian, (:Scaled,)),
(:Scaled, (:Scaled,)))
for T in blacklist
if func === :Scaled
@eval $func{T,S}(α::S, A::T) where {S<:Number,T<:$T} =
illegal_call_to($func, T)
elseif func === :Jacobian
@eval $func{M,T}(A::M, x::T) where {M<:$T,T} =
illegal_call_to($func, M)
else
@eval $func{T}(A::T) where {T<:$T} = illegal_call_to($func, T)
end
end
end
@noinline illegal_call_to(::Type{Adjoint}, T::Type) =
bad_argument("the `Adjoint` constructor cannot be applied to an instance of `",
brief(T), "`, use expressions like `A'` or `adjoint(A)`")
@noinline illegal_call_to(::Type{Inverse}, T::Type) =
bad_argument("the `Inverse` constructor cannot be applied to an instance of `",
brief(T), "`, use expressions like `A\\B`, `A/B` or `inv(A)`")
@noinline illegal_call_to(::Type{InverseAdjoint}, T::Type) =
bad_argument("the `InverseAdjoint` constructor cannot be applied to an instance of `",
brief(T), "`, use expressions like `A'\\B`, `A/(B')`, `inv(A')` or `inv(A)'`")
@noinline illegal_call_to(::Type{Gram}, T::Type) =
bad_argument("the `Gram` constructor cannot be applied to an instance of `",
brief(T), "`, use expressions like `A'*A` or `gram(A)`")
@noinline illegal_call_to(::Type{Jacobian}, T::Type) =
bad_argument("the `Jacobian` constructor cannot be applied to an instance of `",
brief(T), "`, use an expression like `∇(A,x)`")
@noinline illegal_call_to(::Type{Scaled}, T::Type) =
bad_argument("the `Scaled` constructor cannot be applied to an instance of `",
brief(T), "`, use expressions like `α*A`")
brief(::Type{<:Adjoint} ) = "Adjoint"
brief(::Type{<:Inverse} ) = "Inverse"
brief(::Type{<:InverseAdjoint}) = "InverseAdjoint"
brief(::Type{<:Gram} ) = "Gram"
brief(::Type{<:Jacobian} ) = "Jacobian"
brief(::Type{<:Scaled} ) = "Scaled"
brief(::Type{<:Sum} ) = "Sum"
brief(::Type{<:Composition} ) = "Composition"
brief(::Type{<:Identity} ) = "Identity"
brief(T::Type) = repr(T)
#------------------------------------------------------------------------------
# SCALED TYPE
# Left-multiplication and left-division by a scalar. The only way to
# right-multiply or right-divide a mapping by a scalar is to right multiply or
# divide it by the scaled identity.
*(α::Number, A::Mapping) = (α == 1 ? A : Scaled(α, A))
*(α::Number, A::Scaled) = (α*multiplier(A))*unscaled(A)
\(α::Number, A::Mapping) = inv(α)*A
\(α::Number, A::Scaled) = (multiplier(A)/α)*unscaled(A)
/(α::Number, A::Mapping) = α*inv(A)
#------------------------------------------------------------------------------
# ADJOINT TYPE
# Adjoint for non-specific mappings.
adjoint(A::Mapping) = _adjoint(LinearType(A), A)
_adjoint(::Linear, A::Mapping) = _adjoint(Linear(), SelfAdjointType(A), A)
_adjoint(::Linear, ::SelfAdjoint, A::Mapping) = A
_adjoint(::Linear, ::NonSelfAdjoint, A::Mapping) = Adjoint(A)
_adjoint(::NonLinear, A::Mapping) =
throw_forbidden_adjoint_of_non_linear_mapping()
# Adjoint for specific mapping types.
adjoint(A::Identity) = Id
adjoint(A::Scaled) = conj(multiplier(A))*adjoint(unscaled(A))
adjoint(A::Adjoint) = unveil(A)
adjoint(A::Inverse) = inv(adjoint(unveil(A)))
adjoint(A::InverseAdjoint) = inv(unveil(A))
adjoint(A::Jacobian) = Jacobian(A)
adjoint(A::Gram) = A
adjoint(A::Composition) =
# It is assumed that the composition has already been simplified, so we
# just apply the mathematical formula for the adjoint of a composition.
Composition(reversemap(adjoint, terms(A)))
function adjoint(A::Sum{N}) where {N}
# It is assumed that the sum has already been simplified, so we just apply
# the mathematical formula for the adjoint of a sum and sort the resulting
# terms.
B = Vector{Mapping}(undef, N)
@inbounds for i in 1:N
B[i] = adjoint(A[i])
end
return Sum(to_tuple(sort!(B)))
end
@noinline throw_forbidden_adjoint_of_non_linear_mapping() =
bad_argument("taking the adjoint of non-linear mappings is not allowed")
#------------------------------------------------------------------------------
# JACOBIAN
"""
∇(A, x)
yields a result corresponding to the Jacobian (first partial derivatives) of
the linear mapping `A` for the variables `x`. If `A` is a linear mapping,
`A` is returned whatever `x`.
The call
jacobian(A, x)
is an alias for `∇(A,x)`.
"""
∇(A::Mapping, x) = jacobian(A, x)
jacobian(A::Mapping, x) = _jacobian(LinearType(A), A, x)
_jacobian(::Linear, A::Mapping, x) = A
_jacobian(::NonLinear, A::Mapping, x) = Jacobian(A, x)
jacobian(A::Scaled, x) = multiplier(A)*jacobian(unscaled(A), x)
@doc @doc(∇) jacobian
#------------------------------------------------------------------------------
# INVERSE TYPE
# Inverse for non-specific mappings (a simple mapping or a sum or mappings).
inv(A::T) where {T<:Mapping} = Inverse{T}(A)
# Inverse for specific mapping types.
inv(A::Identity) = Id
inv(A::Scaled) = (is_linear(unscaled(A)) ?
inv(multiplier(A))*inv(unscaled(A)) :
inv(unscaled(A))*(inv(multiplier(A))*Id))
inv(A::Inverse) = unveil(A)
inv(A::InverseAdjoint) = adjoint(unveil(A))
inv(A::Adjoint) = InverseAdjoint(unveil(A))
inv(A::Composition) =
# Even though the composition has already been simplified, taking the
# inverse may trigger other simplifications, so we must rebuild the
# composition term by term in reverse order (i.e. applying the mathematical
# formula for the inverse of a composition).
_merge_inv_mul(terms(A))
# `_merge_inv_mul([A,i,]B)` is recursively called to build the inverse of a
# composition. Argument A is a mapping (initially not specified or the
# identity) of the resulting composition, argument `i` is the index of the next
# component to take (initially not specified or set to `N` the number of
# terms), argument `B` is a tuple (initially full) of the remaining terms.
_merge_inv_mul(B::NTuple{N,Mapping}) where {N} =
# Initialize recursion.
_merge_inv_mul(inv(last(B)), N - 1, B)
function _merge_inv_mul(A::Mapping, i::Int, B::NTuple{N,Mapping}) where {N}
# Perform intermediate and last recursion step.
C = A*inv(B[i])
return (i > 1 ? _merge_inv_mul(C, i - 1, B) : C)
end
#------------------------------------------------------------------------------
# SUM OF MAPPINGS
# Unary minus and unary plus.
-(A::Mapping) = (-1)*A
-(A::Scaled) = (-multiplier(A))*unscaled(A)
+(A::Mapping) = A
# Subtraction.
-(A::Mapping, B::Mapping) = A + (-B)
# Simplify the sum of two mappings.
+(A::Mapping, B::Mapping) = add(A, B)
"""
add(A, B)
performs the final stage of simplifying the sum `A + B` of mappings `A` and
`B`. This method assumes that any other simplifications than those involving
sum of sums have been performed on `A + B` and that `A` and `B` have already
been simplified (individually).
Trivial simplifications of the composition `A + B` of mappings `A` and `B` must
be done by specializing the operator `+` and `sum(A,B)` may eventually be
called to simplify the sum of `A` and `B` when one of these may be a
sum. This method just returns `Sum(A,B)` when none of its
arguments is a composition.
yields a simplified sum `A + B` of the mappings `A` and `B`. This helper
function is intended to be called when at least one of `A` or `B` is a sum.
The ability to perform simplifications relies on implemented specializations of
`A + B` when neither `A` nor `B` are sums. It is also assumed that `A` and `B`
have already been simplified if they are sums.
"""
add(A::Sum, B::Mapping) = _add(A, B)
add(A::Mapping, B::Sum ) = _add(A, B)
add(A::Sum, B::Sum ) = _add(A, B)
add(A::Mapping, B::Mapping) = begin
# Neither `A` nor `B` is a sum.
if identical(unscaled(A), unscaled(B))
return (multiplier(A) + multiplier(B))*unscaled(A)
elseif identifier(A) ≤ identifier(B)
return Sum(A, B)
else
return Sum(B, A)
end
end
_add(A::Mapping, B::Mapping) = begin
V = add!(as_vector(+, A), B)
length(V) == 1 ? V[1] : Sum(to_tuple(V))
end
# Add the terms of a sum one-by-one. Since terms must be re-ordered, there are
# no obvious better ways to recombine.
function add!(A::Vector{Mapping}, B::Sum{N}) where {N}
@inbounds for i in 1:N
add!(A, B[i])
end
return A
end
function add!(A::Vector{Mapping}, B::Mapping)
# Nothing to do if B is zero times anything.
multiplier(B) == 0 && return A
# If exact match found, update A in-place and return.
n = length(A)
@inbounds for i in 1:n
if identical(unscaled(A[i]), unscaled(B))
λ = multiplier(A[i]) + multiplier(B)
if λ == 1
A[i] = unscaled(B)
elseif λ != 0
A[i] = λ*unscaled(B)
else
# Multiplier is zero. Drop term if there are other terms
# or keep the single term times zero.
if n > 1
for j in i:n-1
A[j] = A[j+1]
end
resize!(A, n - 1)
else
A[1] = 0*unscaled(A[1])
end
end
return A
end
end
# If no exact match found, insert B in A in order.
id = identifier(B)
i = 1
while i ≤ n && @inbounds(identifier(A[i])) < id
i += 1
end
resize!(A, n + 1)
@inbounds for j in n:-1:i
A[j+1] = A[j]
end
A[i] = B
return A
end
#------------------------------------------------------------------------------
# COMPOSITION OF MAPPINGS
# Left and right divisions.
\(A::Mapping, B::Mapping) = inv(A)*B
/(A::Mapping, B::Mapping) = A*inv(B)
# Dot operator (\cdot + tab) involving a mapping acts as the multiply or
# compose operator.
⋅(A::Mapping, B::Mapping) = A*B
⋅(A::Mapping, B::Any ) = A*B
⋅(A::Any, B::Mapping) = A*B
# Compose operator (\circ + tab) beween mappings.
∘(A::Mapping, B::Mapping) = A*B
# Rules for the composition of 2 mappings. Mappings that may behave
# specifically in a composition have type `Identity`, `Scaled` and
# `Composition`; all others have the same behavior.
# Composition with identity.
*(::Identity, ::Identity) = Id
for T in (Scaled, Composition, Sum, Mapping)
@eval begin
*(::Identity, A::$T) = A
*(A::$T, ::Identity) = A
end
end
# Simplify the composition of two mappings (including compositions).
*(A::Mapping, B::Mapping) = compose(A, B)
# Simplify compositions involving a scaled mapping.
*(A::Scaled, B::Mapping) = multiplier(A)*(unscaled(A)*B)
*(A::Mapping, B::Scaled) =
if is_linear(A)
multiplier(B)*(A*unscaled(B))
else
compose(A, B)
end
*(A::Scaled, B::Scaled) =
if is_linear(A)
(multiplier(A)*multiplier(B))*(unscaled(A)*unscaled(B))
else
multiplier(A)*(unscaled(A)*B)
end
# Simplify compositions involving an inverse mapping.
*(A::Inverse{T}, B::T) where {T<:Mapping} =
identical(unveil(A), B) ? Id : compose(A, B)
*(A::T, B::Inverse{T}) where {T<:Mapping} =
identical(A, unveil(B)) ? Id : compose(A, B)
*(A::Inverse, B::Inverse) = compose(A, B)
*(A::InverseAdjoint{T}, B::Adjoint{T}) where {T<:Mapping} =
identical(unveil(A), unveil(B)) ? Id : compose(A, B)
*(A::Adjoint{T}, B::InverseAdjoint{T}) where {T<:Mapping} =
identical(unveil(A), unveil(B)) ? Id : compose(A, B)
*(A::InverseAdjoint, B::InverseAdjoint) = compose(A, B)
# Automatically build Gram operators, Gram(A) ≡ A'*A. The following automatic
# rules are implemented (for an "allowed" linear mapping A):
#
# A'*A -> Gram(A)
# A*A' -> Gram(A')
# inv(A)*inv(A') -> inv(A'*A) -> inv(Gram(A))
# inv(A')*inv(A) -> inv(A*A') -> inv(Gram(A'))
#
# other rules implemented elsewhere:
#
# Gram(inv(A)) -> inv(Gram(A'))
# Gram(inv(A')) -> inv(Gram(A))
#
# In principle, if forming the adjoint has been allowed, it is not needed to
# check whether operands are linear mappings.
*(A::Adjoint{T}, B::T) where {T<:Mapping} =
identical(unveil(A), B) ? Gram(B) : compose(A, B)
*(A::T, B::Adjoint{T}) where {T<:Mapping} =
identical(A, unveil(B)) ? Gram(B) : compose(A, B)
*(A::Inverse{T}, B::InverseAdjoint{T}) where {T<:Mapping} =
identical(unveil(A), unveil(B)) ? Inverse(Gram(unveil(A))) :
compose(A, B)
*(A::InverseAdjoint{T}, B::Inverse{T}) where {T<:Mapping} =
identical(unveil(A), unveil(B)) ?
Inverse(Gram(Adjoint(unveil(A)))) : compose(A, B)
"""
compose(A,B)
performs the final stage of simplifying the composition `A*B` of mappings `A`
and `B`. This method assumes that any other simplifications than those
involving composition of compositions have been performed on `A*B` and that `A`
and `B` have already been simplified (individually).
Trivial simplifications of the composition `A*B` of mappings `A` and `B` must
be done by specializing the operator `*` and `compose(A,B)` may eventually be
called to simplify the composition of `A` and `B` when one of these may be a
composition. This method just returns `Composition(A,B)` when neither `A` nor
`B` is a composition.
""" compose
# Compose two mappings when at least one is a composition or when none is a
# composition.
compose(A::Composition, B::Composition) = _compose(A, B)
compose(A::Composition, B::Mapping ) = _compose(A, B)
compose(A::Mapping, B::Composition) = _compose(A, B)
compose(A::Mapping, B::Mapping ) = Composition(A, B)
_compose(A::Mapping, B::Mapping) = begin
C = compose!(as_vector(*, A), B)
n = length(C)
return (n == 0 ? Id :
n == 1 ? C[1] :
Composition(to_tuple(C)))
end
"""
compose!(A, B) -> A
overwrites `A` with a simplified composition of a left operand `A` and a right
operand `B`. The left operand is a composition of (zero or more) mappings
whose terms are stored in the vector of mappings `A` (if `A` is empty, the left
operand is assumed to be the identity). On return, the vector `A` is modified
to store the terms of the simplified composition of `A` and `B`. The left
operand `B` may be itself a composition (as an instance of
`LazyAlgebra.Composition` or as a vector of mappings) or any other kind of
mapping.
""" compose!
function compose!(A::Vector{Mapping}, B::Composition{N}) where {N}
@inbounds for i in 1:N
# Build the simplified composition A*B[i].
compose!(A, B[i])
if identical(last(A), B[i])
# The last term of the simplified composition A*B[i] is still B[i],
# which indicates that composing A with B[i] did not yield any
# simplifications. It is sufficient to append all the other terms
# of B to A as no further simplifications are expected.
return append_terms!(A, B, (i+1):N)
end
end
return A
end
function compose!(A::Vector{Mapping}, B::Mapping)
# Compute the simplified composition of the last term of A with B. The
# result is either a simple mapping or a simplified composition.
m = length(A); @certify m > 0
C = A[m]*B
# Replace the last term of A with C.
if C isa Composition && identical(C[1], A[m])
# Nothing has changed at the tail of the composition A. No further
# simplifications are expected. Push all terms of C to A, but the
# first term of C which is identical to the last term of A.
append_terms!(A, C, 2:length(C))
elseif m > 1
# Drop the last term of A and compose the remaining terms with C. This
# may trigger further simplifications
compose!(resize!(A, m - 1), C)
elseif C isa Composition
# Replace the only term of A by all the terms of the composition C.
# This is the same as above but avoids calling `resize!` as a small
# optimization.
A[1] = C[1]
append_terms!(A, C, 2:length(C))
else
# Replace the only term of A by the simple mapping C.
A[1] = C
end
return A
end
#------------------------------------------------------------------------------
# UTILITIES FOR BUILDING SUMS AND COMPOSITIONS
"""
as_vector(op, A)
yields a vector of mappings with the terms of the mapping `A`. Argument `op`
is `+` or `*`. If `op` is `+` (resp. `*`) and `A` is a sum (resp. a
composition) of mappings, the terms of `A` are extracted in the returned
vector; otherwise, the returned vector has just one element which is `A`.
"""
function as_vector(::Union{typeof(+),typeof(*)}, A::Mapping)
V = Vector{Mapping}(undef, 1)
V[1] = A
return V
end
as_vector(::typeof(+), A::Sum ) = collect_terms(A)
as_vector(::typeof(*), A::Composition) = collect_terms(A)
"""
collect_terms(A)
collects the terms of the mapping `A` into a vector. This is similar to
`collect(A)` except that the element type of the result is forced to be
`Mapping`.
"""
function collect_terms(A::Union{Sum{N},Composition{N}}) where {N}
V = Vector{Mapping}(undef, N)
@inbounds for i in 1:N
V[i] = A[i]
end
return V
end
"""
append_terms!(A, B, I=1:length(B)) -> A
pushes all terms `B[i]` for all `i ∈ I` to `A` and returns `A`.
"""
function append_terms!(A::Vector{Mapping},
B::Union{Vector{Mapping},Composition},
I::AbstractUnitRange{<:Integer} = Base.OneTo(length(B)))
imin, imax = Int(first(I)), Int(last(I))
(1 ≤ imin && imax ≤ length(B)) ||
bad_argument("out of bounds indices in given range")
if imin ≤ imax
m = length(A)
n = imax - imin + 1
resize!(A, m + n)
k = m + 1 - imin
@inbounds for i in I
A[k+i] = B[i]
end
end
return A
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 89884 | #
# sparse.jl --
#
# Implement various format of compressed sparse linear operators. The purpose
# of this module is mainly to convert between different formats and to provide
# LazyAlgebra wrappers to apply the corresponding linear mappings. Julia's
# SparseArrays standard package only provides "Compressed Sparse Column" (CSC)
# format.
#
# See https://en.wikipedia.org/wiki/Sparse_matrix.
#
#------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (C) 2017-2020, Éric Thiébaut.
#
module SparseOperators
export
CompressedSparseOperator,
SparseOperator,
SparseOperatorCOO,
SparseOperatorCSC,
SparseOperatorCSR,
nrows,
ncols,
row_size,
col_size,
nonzeros,
nnz
using StructuredArrays
using ZippedArrays
import LinearAlgebra
using ..LazyAlgebra
using ..Foundations
using ..LazyAlgebra: @certify
import .LazyAlgebra:
MorphismType,
multiplier_type,
apply!,
vcreate,
identical,
coefficients,
row_size,
col_size,
nrows,
ncols,
input_ndims,
input_size,
output_ndims,
output_size
import SparseArrays
using SparseArrays: SparseMatrixCSC, nonzeros, nnz
if isdefined(SparseArrays, :AbstractSparseMatrixCSC)
const AbstractSparseMatrixCSC{Tv,Ti} =
SparseArrays.AbstractSparseMatrixCSC{Tv,Ti}
else
const AbstractSparseMatrixCSC{Tv,Ti} = SparseArrays.SparseMatrixCSC{Tv,Ti}
end
import Base: getindex, setindex!, iterate
using Base: @propagate_inbounds
#------------------------------------------------------------------------------
# Convert to integer type suitable for indexing.
to_int(i::Int) = i
to_int(i::Integer) = Int(i)
# Convert to vector of indices.
to_indices(inds::AbstractVector{<:Integer}) = to_values(Int, inds)
# Convert to vector of values with given element type and make sure it is a
# fast vector.
to_values(vals::AbstractVector{T}) where {T} = to_values(T, vals)
to_values(::Type{Any}, vals::AbstractVector{T}) where {T} = to_values(T, vals)
to_values(::Type{T}, vals::Vector{T}) where {T} = vals
to_values(::Type{T}, vals::AbstractVector) where {T} = convert(Vector{T}, vals)
@inline to_values(::Type{T}, vals::AbstractVector{T}) where {T} =
_to_values(T, vals, eachindex(vals))
@inline _to_values(::Type{T}, vals::AbstractVector, inds) where {T} =
convert(Vector{T}, vals) # Convert because not a fast vector.
@inline function _to_values(::Type{T}, vals::AbstractVector,
inds::AbstractUnitRange{Int}) where {T}
(first(inds) == 1 ? vals : convert(Vector{T}, vals))
end
# Union of types acceptable to define array size and methods to convert to
# canonic form.
const ArraySize = Union{Integer,Tuple{Vararg{Integer}}}
to_size(siz::Tuple{Vararg{Int}}) = siz
to_size(siz::Tuple{Vararg{Integer}}) = map(to_int, siz)
to_size(siz::Integer) = (to_int(siz),)
as_matrix(A::AbstractMatrix, nrows::Int, ncols::Int) = begin
@certify size(A) == (nrows, ncols)
return A
end
as_matrix(A::AbstractArray, nrows::Int, ncols::Int) =
reshape(A, (nrows, ncols))
#------------------------------------------------------------------------------
"""
SparseOperator{T,M,N}
is the abstract type inherited by sparse operator types. Parameter `T` is the
type of the elements. Parameters `M` and `N` are the number of dimensions of
the *rows* and of the *columns* respectively. Sparse operators are a
generalization of sparse matrices in the sense that they implement linear
mappings which can be applied to `N`-dimensonal arguments to produce
`M`-dimensional results (as explained below). See [`GeneralMatrix`](@ref) for
a similar generalization but for *dense* matrices.
See [`CompressedSparseOperator`](@ref) for usage of sparse operators
implementing compressed storage formats.
"""
abstract type SparseOperator{T,M,N} <: LinearMapping end
"""
CompressedSparseOperator{F,T,M,N}
is an abstract sub-type of `SparseOperator{T,M,N}` and is inherited by the
concrete types implementing sparse operators with compressed storage in format
`F`.
Format `F` is specificed as a symbol and can be:
- `:COO` for *Compressed Sparse Coordinate* storage format. This format is not
the most efficient, it is mostly used as an intermediate for building a
sparse operator in one of the following formats.
- `:CSC` for *Compressed Sparse Column* storage format. This format is very
efficient for applying the adjoint of the sparse operator.
- `:CSR` for *Compressed Sparse Row* storage format. This format is very
efficient for directly applying the sparse operator.
To construct (or convert to) a sparse operator with compressed storage format
`F`, you can call:
CompressedSparseOperator{F}(args...; kwds...)
CompressedSparseOperator{F,T}(args...; kwds...)
CompressedSparseOperator{F,T,M}(args...; kwds...)
CompressedSparseOperator{F,T,M,N}(args...; kwds...)
where given parameters `T`, `M` and `N`, arguments `args...` and optional
keywords `kwds...` will be passed to the concrete constructor
[`SparseOperatorCOO`](@ref), [`SparseOperatorCSC`](@ref) or
[`SparseOperatorCSR`](@ref) corresponding to the format `F`.
It is possible to use a compressed sparse operator `A` as an iterator:
```julia
for (Aij,i,j) in A # simple but slow for CSR and CSC
...
end
```
to retrieve the values `Aij` and respective row `i` and column `j` indices for
all the entries stored in `A`. It is however more efficient to access them
according to their storage order which depends on the compressed format.
- If `A` is in CSC format:
```julia
using LazyAlgebra.SparseMethods
for j in each_col(A) # loop over column index
for k in each_off(A, j) # loop over structural non-zeros in this column
i = get_row(A, k) # get row index of entry
Aij = get_val(A, k) # get value of entry
end
end
```
- If `A` is in CSR format:
```julia
using LazyAlgebra.SparseMethods
for i in each_row(A) # loop over row index
for k in each_off(A, i) # loop over structural non-zeros in this row
j = get_col(A, k) # get column index of entry
Aij = get_val(A, k) # get value of entry
end
end
```
- If `A` is in COO format:
```julia
using LazyAlgebra.SparseMethods
for k in each_off(A)
i = get_row(A, k) # get row index of entry
j = get_col(A, k) # get column index of entry
Aij = get_val(A, k) # get value of entry
end
```
The low-level methods `each_row`, `each_col`, `each_off`, `get_row`, `get_col`
and `get_val` are not automatically exported by `LazyAlgebra`, this is the
purpose of the statement `using LazyAlgebra.SparseMethods`.
"""
abstract type CompressedSparseOperator{F,T,M,N} <: SparseOperator{T,M,N} end
struct SparseOperatorCSR{T,M,N,
V<:AbstractVector{T},
J<:AbstractVector{Int},
K<:AbstractVector{Int}
} <: CompressedSparseOperator{:CSR,T,M,N}
m::Int # equivalent number of rows of the operator
n::Int # number of columns of the operator
vals::V # values of entries
cols::J # linear column indices of entries
offs::K # row offsets in arrays of entries and column indices
rowsiz::NTuple{M,Int} # dimensions of rows
colsiz::NTuple{N,Int} # dimensions of columns
# An inner constructor is defined to prevent Julia from providing a simple
# outer constructor, it is not meant to be called directly as it does not
# check whether arguments are correct.
function SparseOperatorCSR{T,M,N,V,J,K}(m::Int,
n::Int,
vals::V,
cols::J,
offs::K,
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {
T,M,N,
V<:AbstractVector{T},
J<:AbstractVector{Int},
K<:AbstractVector{Int}}
new{T,M,N,V,J,K}(m, n, vals, cols, offs, rowsiz, colsiz)
end
end
struct SparseOperatorCSC{T,M,N,
V<:AbstractVector{T},
I<:AbstractVector{Int},
K<:AbstractVector{Int}
} <: CompressedSparseOperator{:CSC,T,M,N}
m::Int # equivalent number of rows of the operator
n::Int # number of columns of the operator
vals::V # values of entries
rows::I # linear row indices of entries
offs::K # columns offsets in arrays of entries and row indices
rowsiz::NTuple{M,Int} # dimensions of rows
colsiz::NTuple{N,Int} # dimensions of columns
# An inner constructor is defined to prevent Julia from providing a simple
# outer constructor, it is not meant to be called directly as it does not
# check whether arguments are correct.
function SparseOperatorCSC{T,M,N,V,I,K}(m::Int,
n::Int,
vals::V,
rows::I,
offs::K,
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {
T,M,N,
V<:AbstractVector{T},
I<:AbstractVector{Int},
K<:AbstractVector{Int}}
new{T,M,N,V,I,K}(m, n, vals, rows, offs, rowsiz, colsiz)
end
end
struct SparseOperatorCOO{T,M,N,
V<:AbstractVector{T},
I<:AbstractVector{Int},
J<:AbstractVector{Int}
} <: CompressedSparseOperator{:COO,T,M,N}
m::Int # equivalent number of rows of the operator
n::Int # number of columns of the operator
vals::V # values of entries
rows::I # linear row indices of entries
cols::J # linear column indices of entries
rowsiz::NTuple{M,Int} # dimensions of rows
colsiz::NTuple{N,Int} # dimensions of columns
# An inner constructor is defined to prevent Julia from providing a simple
# outer constructor, it is not meant to be called directly as it does not
# check whether arguments are correct.
function SparseOperatorCOO{T,M,N,V,I,J}(m::Int,
n::Int,
vals::V,
rows::I,
cols::J,
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {
T,M,N,
V<:AbstractVector{T},
I<:AbstractVector{Int},
J<:AbstractVector{Int}}
new{T,M,N,V,I,J}(m, n, vals, rows, cols, rowsiz, colsiz)
end
end
# Unions of compressed sparse operators that can be considered as being in a
# given storage format.
const AnyCSR{T,M,N} = Union{CompressedSparseOperator{:CSR,T,M,N},
Adjoint{<:CompressedSparseOperator{:CSC,T,M,N}}}
const AnyCSC{T,M,N} = Union{CompressedSparseOperator{:CSC,T,M,N},
Adjoint{<:CompressedSparseOperator{:CSR,T,M,N}}}
const AnyCOO{T,M,N} = Union{CompressedSparseOperator{:COO,T,M,N},
Adjoint{<:CompressedSparseOperator{:COO,T,M,N}}}
#------------------------------------------------------------------------------
# Accessors and basic methods.
nrows(A::SparseOperator) = getfield(A, :m)
ncols(A::SparseOperator) = getfield(A, :n)
row_size(A::SparseOperator) = getfield(A, :rowsiz)
col_size(A::SparseOperator) = getfield(A, :colsiz)
output_size(A::SparseOperator) = row_size(A)
input_size(A::SparseOperator) = col_size(A)
output_ndims(A::SparseOperator{T,M,N}) where {T,M,N} = M
input_ndims(A::SparseOperator{T,M,N}) where {T,M,N} = N
Base.eltype(A::SparseOperator{T,M,N}) where {T,M,N} = T
Base.ndims(A::SparseOperator{T,M,N}) where {T,M,N} = M+N
Base.length(A::SparseOperator) = nrows(A)*ncols(A)
Base.size(A::SparseOperator) = (row_size(A)..., col_size(A)...)
# Use constructors to perform conversion (the first method is to resolve
# ambiguities).
Base.convert(::Type{T}, A::T) where {T<:SparseOperator} = A
Base.convert(::Type{T}, A) where {T<:SparseOperator} = T(A)
# FIXME: This cannot be considered as a *pure* trait as it does not only
# depend on the type of the object.
MorphismType(A::SparseOperator) =
(row_size(A) == col_size(A) ? Endomorphism() : Morphism())
coefficients(A::SparseOperator) = get_vals(A)
identical(A::T, B::T) where {T<:CompressedSparseOperator{:CSR}} =
(get_vals(A) === get_vals(B) && get_cols(A) === get_cols(B) &&
get_offs(A) === get_offs(B) &&
row_size(A) == row_size(B) && col_size(A) == col_size(B))
identical(A::T, B::T) where {T<:CompressedSparseOperator{:CSC}} =
(get_vals(A) === get_vals(B) && get_rows(A) === get_rows(B) &&
get_offs(A) === get_offs(B) &&
row_size(A) == row_size(B) && col_size(A) == col_size(B))
identical(A::T, B::T) where {T<:CompressedSparseOperator{:COO}} =
(get_vals(A) === get_vals(B) && get_rows(A) === get_rows(B) &&
get_cols(A) === get_cols(B) &&
row_size(A) == row_size(B) && col_size(A) == col_size(B))
# Assume that a copy of a compressed sparse operator is to keep the same
# structure for the structural non-zeros but possibly change the values. So
# only duplicate the value part.
Base.copy(A::SparseOperatorCSR{T,M,N}) where {T,M,N} =
unsafe_csr(nrows(A), ncols(A), copy_vals(A), get_cols(A), get_offs(A),
row_size(A), col_size(A))
Base.copy(A::SparseOperatorCSC{T,M,N}) where {T,M,N} =
unsafe_csc(nrows(A), ncols(A), copy_vals(A), get_rows(A), get_offs(A),
row_size(A), col_size(A))
Base.copy(A::SparseOperatorCOO{T,M,N}) where {T,M,N} =
unsafe_coo(nrows(A), ncols(A), copy_vals(A), get_rows(A), get_cols(A),
row_size(A), col_size(A))
# `findnz(A) -> I,J,V` yields the row and column indices and the values of the
# stored values in A`.
SparseArrays.findnz(A::SparseOperator) =
(get_rows(A), get_cols(A), get_vals(A))
# Extend some methods in SparseArrays. The "structural" non-zeros are the
# entries stored by the sparse structure, they may or not be equal to zero
# un-stored entries are always considered as being equal to zero.
SparseArrays.nonzeros(A::SparseOperator) = get_vals(A)
SparseArrays.nnz(A::SparseOperator) = length(nonzeros(A))
"""
get_vals(A)
yields the array storing the values of the sparse operator `A`. The returned
array is shared with `A`, call `copy_vals(A)` instead if you want to modify the
contents of the returned array with no side effects on `A`.
As a convenience, argument may also be the adjoint of a sparse operator:
get_vals(A') -> get_vals(A)
which yields the **unmodified** values of `A`, hence the caller has to take the
conjugate of these values. The method `get_val(A',k)` however takes care of
conjugating the values.
"""
get_vals(A::SparseOperator) = getfield(A, :vals)
get_vals(A::Adjoint{<:SparseOperator}) = get_vals(unveil(A))
"""
copy_vals([T = eltype(A),] A) -> vals
yields a copy of the values of the entries in sparse operator `A` converted to
type `T`. The result is a vector that is not shared by `A`, the caller may
thus modify its contents with no side effects on `A`.
"""
copy_vals(A::SparseOperator{T}) where {T} = copy_vals(T, A)
function copy_vals(::Type{T}, A::SparseOperator) where {T}
vals = get_vals(A)
copyto!(Vector{T}(undef, size(vals)), vals)
end
"""
get_rows(A)
yields the row indices of the entries of the sparse operator `A`. The returned
array may be shared with `A`, call `copy_rows(A)` instead if you want to modify
the contents of the returned array with no side effects on `A`.
"""
get_rows(A::SparseOperatorCSC) = getfield(A, :rows)
get_rows(A::SparseOperatorCOO) = getfield(A, :rows)
get_rows(A::CompressedSparseOperator{:CSR}) =
copy_rows(A) # FIXME: yield an iterator
get_rows(A::Adjoint{<:SparseOperator}) = get_cols(unveil(A))
"""
copy_rows(A) -> rows
yields a copy of the linear row indices of entries in sparse operator `A`. The
result is a vector that is not shared by `A`, the caller may thus modify its
contents with no side effects on `A`.
"""
function copy_rows(A::SparseOperator)
rows = get_rows(A)
copyto!(Vector{Int}(undef, size(rows)), rows)
end
function copy_rows(A::CompressedSparseOperator{:CSR})
rows = Vector{Int}(undef, length(get_vals(A)))
@inbounds for i in each_row(A)
@simd for k in each_off(A, i)
rows[k] = i
end
end
return rows
end
"""
get_cols(A)
yields the column indices of the entries of the sparse operator `A`. The
returned array may be shared with `A`, call `copy_cols(A)` instead if you want
to modify the contents of the returned array with no side effects on `A`.
"""
get_cols(A::SparseOperatorCSR) = getfield(A, :cols)
get_cols(A::SparseOperatorCOO) = getfield(A, :cols)
get_cols(A::Union{CompressedSparseOperator{:CSC},SparseMatrixCSC}) =
copy_cols(A) # FIXME: yield an iterator
get_cols(A::Adjoint{<:SparseOperator}) = get_rows(unveil(A))
"""
copy_cols(A) -> cols
yields a copy of the linear column indices of entries in sparse operator `A`.
The result is a vector that is not shared by `A`, the caller may thus modify
its contents with no side effects on `A`.
"""
function copy_cols(A::SparseOperator)
cols = get_cols(A)
copyto!(Vector{Int}(undef, size(cols)), cols)
end
function copy_cols(A::Union{CompressedSparseOperator{:CSC},SparseMatrixCSC})
cols = Vector{Int}(undef, length(get_vals(A)))
@inbounds for j in each_col(A)
@simd for k in each_off(A, j)
cols[k] = j
end
end
return cols
end
"""
get_offs(A)
yields the table of offsets of the sparse operator `A`. Not all operators
extend this method.
!!! warning
The interpretation of offsets depend on the type of `A`. For instance,
assuming `offs = get_offs(A)`, then the index range of the `j`-th column of
a `SparseMatrixCSC` is `offs[j]:(offs[j+1]-1)` while the index range is
`(offs[j]+1):offs[j+1]` for a `SparseOperatorCSC`. For this reason,
it is recommended to call [`each_off`](@ref) instead or to call `get_offs`
with 2 arguments as shown below.
For a transparent usage of the offsets, the method should be called with 2
arguments:
get_offs(A, i) -> k1, k2
which yields the offsets of the first and last elements in the arrays of values
and linear column indices for the `i`-th row of the sparse operator `A` stored
in a *Compressed Sparse Row* (CSR) format. If `k2 < k1`, it means that the
`i`-th row is empty. Calling `each_off(A,i)` directly yields `k1:k2`.
get_offs(A, j) -> k1, k2
yields the offsets of the first and last elements in the arrays of values and
linear row indices for the `j`-th column of the sparse operator `A` stored in a
*Compressed Sparse Column* (CSC) format. If `k2 < k1`, it means that the
`j`-th column is empty. Calling `each_off(A,j)` directly yields `k1:k2`.
"""
get_offs(A::SparseOperatorCSR) = getfield(A, :offs)
get_offs(A::SparseOperatorCSC) = getfield(A, :offs)
get_offs(A::Adjoint{<:CompressedSparseOperator{:CSR}}) = get_offs(unveil(A))
get_offs(A::Adjoint{<:CompressedSparseOperator{:CSC}}) = get_offs(unveil(A))
@inline function get_offs(A::AnyCSR, i::Int)
offs = get_offs(A)
@boundscheck ((i < 1)|(i ≥ length(offs))) && out_of_range_row_index(A, i)
return ((@inbounds offs[i] + 1),
(@inbounds offs[i+1]))
end
@inline function get_offs(A::AnyCSC, j::Int)
offs = get_offs(A)
@boundscheck ((j < 1)|(j ≥ length(offs))) && out_of_range_column_index(A, j)
return ((@inbounds offs[j] + 1),
(@inbounds offs[j+1]))
end
@noinline out_of_range_row_index(A, i::Integer) =
throw(ErrorException(string("out of range row index ", i,
" for sparse operator with ", nrows(A),
" rows")))
@noinline out_of_range_column_index(A, j::Integer) =
throw(ErrorException(string("out of range column index ", j,
" for sparse operator with ", ncols(A),
" columns")))
"""
For a sparse operator `A` stored in a *Compressed Sparse Coordinate* (COO)
format, the call:
each_off(A)
yields an iterator over the indices in the arrays of values and of linear row
and column indices for the `k`-th entry of `A`.
---
For a sparse operator `A` stored in a *Compressed Sparse Column* (CSC) format,
the call:
each_off(A, j)
yields an iterator over the indices in the arrays of values and linear row
indices for the `j`-th column of `A`.
---
For a sparse operator `A` stored in a *Compressed Sparse Row* (CSR) format, the
call:
each_off(A, i)
yields an iterator over the indices in the arrays of values and linear column
indices for the `i`-th row of `A`.
"""
@inline each_off(A::CompressedSparseOperator{:COO}) = Base.OneTo(nnz(A))
@inline each_off(A::Adjoint{<:CompressedSparseOperator{:COO}}) =
each_off(unveil(A))
@inline @propagate_inbounds function each_off(A::AnyCSR, i::Int)
k1, k2 = get_offs(A, i)
return k1:k2
end
@inline @propagate_inbounds function each_off(A::AnyCSC, j::Int)
k1, k2 = get_offs(A, j)
return k1:k2
end
"""
each_row(A)
yields an iterator over the linear row indices of the sparse operator `A`
stored in a *Compressed Sparse Row* (CSR) format, this includes the adjoint of
a sparse operator in *Compressed Sparse Column* (CSC) format.
"""
each_row(A::CompressedSparseOperator{:CSR}) = Base.OneTo(nrows(A))
each_row(A::Adjoint{<:CompressedSparseOperator{:CSC}}) = each_col(unveil(A))
"""
each_col(A)
yields an iterator over the linear column indices of the sparse operator `A`
stored in a *Compressed Sparse Column* (CSC) format, this includes the adjoint
of a sparse operator in *Compressed Sparse Row* (CSR) format.
"""
each_col(A::CompressedSparseOperator{:CSC}) = Base.OneTo(ncols(A))
each_col(A::Adjoint{<:CompressedSparseOperator{:CSR}}) = each_row(unveil(A))
"""
get_row(A, k) -> i
yields the linear row index of the `k`-th entry of the sparse operator `A`
stored in a *Compressed Sparse Column* (CSC) or *Coordinate* (COO) formats
(this includes adjoint of sparse operators in CSR format).
"""
@inline @propagate_inbounds function get_row(
A::Union{CompressedSparseOperator{:COO},
CompressedSparseOperator{:CSC},
Adjoint{<:CompressedSparseOperator{:CSR}}}, k::Integer)
get_rows(A)[k]
end
"""
get_col(A, k) -> j
yields the linear column index of the `k`-th entry of the sparse operator `A`
stored in a *Compressed Sparse Row* (CSR) or *Coordinate* (COO) formats (this
includes adjoint of sparse operators in CSC format).
"""
@inline @propagate_inbounds function get_col(
A::Union{
CompressedSparseOperator{:COO},
CompressedSparseOperator{:CSR},
Adjoint{<:CompressedSparseOperator{:CSC}}}, k::Integer)
get_cols(A)[k]
end
"""
get_val(A, k) -> v
yields the value of the `k`-th entry of the sparse operator `A` stored in a
*Compressed Sparse Row* (CSR), *Compressed Sparse Column* (CSC) or *Coordinate*
(COO) format.
Argument may also be the adjoint of a sparse operator:
get_val(A', k) -> conj(get_val(A, k))
"""
@inline @propagate_inbounds get_val(A, k::Int) = get_vals(A)[k]
@inline @propagate_inbounds get_val(A, k::Integer) = get_val(A, Int(k)::Int)
@inline @propagate_inbounds get_val(A::Adjoint, k::Integer) =
conj(get_val(A, k))
"""
set_val!(A, k, v) -> A
assigns `v` to the value of the `k`-th entry of the sparse operator `A` stored
in a *Compressed Sparse Row* (CSR), *Compressed Sparse Column* (CSC) or
*Coordinate* (COO) format.
"""
@inline function set_val!(A, k::Int, v)
get_vals(A)[k] = v
return A
end
@inline @propagate_inbounds set_val!(A, k::Integer, v) =
set_val!(A, Int(k)::Int, v)
@inline @propagate_inbounds set_val!(A::Adjoint, k::Integer, v) =
set_val!(A, k, conj(v))
# Iterators to deliver (v,i,j).
@inline function Base.iterate(A::AnyCSR, state::Tuple{Int,Int,Int} = (0,0,0))
i, k, kmax = state
@inbounds begin
k += 1
while k > kmax
if i ≥ nrows(A)
return nothing
end
i += 1
k, kmax = get_offs(A, i)
end
v = get_val(A, k)
j = get_col(A, k)
return ((v, i, j), (i, k, kmax))
end
end
@inline function Base.iterate(A::AnyCSC, state::Tuple{Int,Int,Int} = (0,0,0))
j, k, kmax = state
@inbounds begin
k += 1
while k > kmax
if j ≥ ncols(A)
return nothing
end
j += 1
k, kmax = get_offs(A, j)
end
v = get_val(A, k)
i = get_row(A, k)
return ((v, i, j), (j, k, kmax))
end
end
@inline function Base.iterate(A::AnyCOO, state::Tuple{Int,Int} = (0, nnz(A)))
k, kmax = state
@inbounds begin
if k < kmax
k += 1
return ((get_val(A, k), get_row(A, k), get_col(A, k)), (k, kmax))
else
return nothing
end
end
end
#------------------------------------------------------------------------------
# Extend methods for SparseMatrixCSC defined in SparseArrays.
nrows(A::SparseMatrixCSC) = getfield(A, :m)
ncols(A::SparseMatrixCSC) = getfield(A, :n)
get_vals(A::SparseMatrixCSC) = getfield(A, :nzval)
get_offs(A::SparseMatrixCSC) = getfield(A, :colptr)
get_rows(A::SparseMatrixCSC) = getfield(A, :rowval)
# get_cols is already done elsewhere.
row_size(A::SparseMatrixCSC) = (nrows(A),)
col_size(A::SparseMatrixCSC) = (ncols(A),)
each_col(A::SparseMatrixCSC) = Base.OneTo(ncols(A))
@inline @propagate_inbounds each_off(A::SparseMatrixCSC, j::Integer) =
((k1, k2) = get_offs(A, j); k1:k2)
# Provide a specific version of `get_offs(A,j)` because offsets have a slightly
# different definition than our CSC format.
@inline function get_offs(A::SparseMatrixCSC, j::Integer)
offs = get_offs(A)
@boundscheck ((j < 1)|(j ≥ length(offs))) && out_of_range_column_index(A, j)
return ((@inbounds offs[j]),
(@inbounds offs[j+1]-1))
end
#------------------------------------------------------------------------------
# Constructors.
"""
Sparse operators in *Compressed Sparse Coordinate* (COO) format store the
significant entries in no particular order, as a vector of values, a vector of
linear row indices and a vector of linear column indices. It is even possible
to have repeated entries. This format is very useful to build a sparse linear
operator. It can be converted to a more efficient format like *Compressed
Sparse Column* (CSC) or *Compressed Sparse Row* (CSR) for fast application of
the sparse linear mapping or of its adjoint.
A sparse operator in COO storage format can be constructed by providing all
necessary information:
SparseOperatorCOO(vals, rows, cols, rowsiz, colsiz)
where `vals` is the vector of values of the sparse entries, `rows` and `cols`
are integer valued vectors with the linear row and column indices of the sparse
entries, `rowsiz` and `colsiz` are the sizes of the row and column dimensions.
The entries values and respective linear row and column indices of the `k`-th
sparse entry are given by `vals[k]`, `rows[k]` and `cols[k]`. For efficiency
reasons, sparse operators are currently limited to *fast* arrays because they
can be indexed linearly with no loss of performances. If `vals`, `rows` and/or
`cols` are not fast arrays, they will be automatically converted to linearly
indexed arrays.
A sparse operator in COO storage format can be directly constructed from a
2-dimensional Julia array `A`:
SparseOperatorCOO(A, sel = (v,i,j) -> (v != zero(v)))
where optional argument `sel` is a selector function which is called as
`sel(v,i,j)` with `v`, `i` and `j` the value, the row and the column linear
indices for each entries of `A` and which is assumed to yield `true` for the
entries of `A` to be selected in the sparse structure and `false` for the
entries of `A` to discard. The default selector is such that all non-zeros of
`A` are selected.
The element type, say `T`, for the sparse coefficients can be imposed by
rewriting the above examples as:
SparseOperatorCOO{T}(args...)
A sparse operator in COO storage format implementing generalized matrix-vector
multiplication can also be directly constructed from a `L`-dimensional Julia
array (with `L ≥ 2`) `A` by:
SparseOperatorCOO{T,M}(A[, sel])
with `M` the number of leading dimensions of `A` corresponding to the *rows* of
the operator, the trailing `N = L - M` dimensions being assumed to correspond
to the *columns* of the operator. These dimensions are the size of,
respectively, the output and the input arrays when applying the operator. The
parameter `N` may be specified (although it can be automatically determined):
SparseOperatorCOO{T,M,N}(A[, sel])
provided the equality `M + N = ndims(A)` holds.
A last parameter `V` can be specified for the type of the vector to store the
coefficients of the sparse operator:
SparseOperatorCOO{T,M,N,V}(args...)
provided `V` implements standard linear indexing. The default is to take `V =
Vector{T}`. As a special case, you can choose a uniform boolean vector from
the `StructuredArrays` package to store the sparse coefficients:
SparseOperatorCOO{T,M,N,UniformVector{Bool}}(args...)
to get a compressed sparse operator in COO format whose values are an immutable
uniform vector of true values requiring no storage. This is useful to only
store the sparse structure of the operator, that is the indices in COO format
of the sparse coefficients not their values.
The `SparseOperatorCOO` constructor can also be used to convert a sparse
operator in another storage format into the COO format. In that case,
parameter `T` may also be specified to convert the type of the sparse
coefficients.
""" SparseOperatorCOO
"""
Sparse operators in *Compressed Sparse Column* (CSC) format store the
significant entries in a column-wise order, as a vector of values, a vector of
corresponding linear row indices and a vector of offsets indicating, for each
column, the range of indices in the vectors of values and of row indices. This
storage format is very suitable for fast application of the operator, notably
its adjoint.
A sparse operator in CSC storage format can be constructed by providing all
necessary information:
SparseOperatorCSC(vals, rows, offs, rowsiz, colsiz)
where `vals` is the vector of values of the sparse entries, `rows` is an
integer valued vector of the linear row indices of the sparse entries, `offs`
is a column-wise table of offsets in these arrays, `rowsiz` and `colsiz` are
the sizes of the row and column dimensions. The entries values and respective
linear row indices of the `j`-th column are given by `vals[k]` and `rows[k]`
with `k ∈ offs[j]+1:offs[j+1]`. The linear column index `j` is in the range
`1:n` where `n = prod(colsiz)` is the equivalent number of columns. For
efficiency reasons, sparse operators are currently limited to *fast* arrays
because they can be indexed linearly with no loss of performances. If `vals`,
`rows` and/or `offs` are not fast arrays, they will be automatically converted
to linearly indexed arrays.
A sparse operator in CSC storage format can be directly constructed from a
2-dimensional Julia array `A`:
SparseOperatorCSC(A, sel = (v,i,j) -> (v != zero(v)))
where optional argument `sel` is a selector function which is called as
`sel(v,i,j)` with `v`, `i` and `j` the value, the row and the column linear
indices for each entries of `A` and which is assumed to yield `true` for the
entries of `A` to be selected in the sparse structure and `false` for the
entries of `A` to discard. The default selector is such that all non-zeros of
`A` are selected.
The element type, say `T`, for the sparse coefficients can be imposed by
rewriting the above examples as:
SparseOperatorCSC{T}(args...)
A sparse operator in CSC storage format implementing generalized matrix-vector
multiplication can also be directly constructed from a `L`-dimensional Julia
array (with `L ≥ 2`) `A` by:
SparseOperatorCSC{T,M}(A[, sel])
with `M` the number of leading dimensions of `A` corresponding to the *rows* of
the operator, the trailing `N = L - M` dimensions being assumed to correspond
to the *columns* of the operator. These dimensions are the size of,
respectively, the output and the input arrays when applying the operator. The
parameter `N` may be specified (although it can be automatically determined):
SparseOperatorCSC{T,M,N}(A[, sel])
provided the equality `M + N = ndims(A)` holds.
A last parameter `V` can be specified for the type of the vector to store the
coefficients of the sparse operator:
SparseOperatorCSC{T,M,N,V}(args...)
provided `V` implements standard linear indexing. The default is to take `V =
Vector{T}`. As a special case, you can choose a uniform boolean vector from
the `StructuredArrays` package to store the sparse coefficients:
SparseOperatorCSC{T,M,N,UniformVector{Bool}}(args...)
to get a compressed sparse operator in CSC format whose values are an immutable
uniform vector of true values requiring no storage. This is useful to only
store the sparse structure of the operator, that is the indices in CSC format
of the sparse coefficients not their values.
The `SparseOperatorCSC` constructor can also be used to convert a sparse
operator in another storage format into the CSC format. In that case,
parameter `T` may also be specified to convert the type of the sparse
coefficients.
""" SparseOperatorCSC
"""
Sparse operators in *Compressed Sparse Row* (CSR) format store the significant
entries in a row-wise order, as a vector of values, a vector of corresponding
linear column indices and a vector of offsets indicating, for each row, the
range of indices in the vectors of values and of column indices. This storage
format is very suitable for fast application of the operator.
A sparse operator in CSR storage format can be constructed by providing all
necessary information:
SparseOperatorCSR(vals, cols, offs, rowsiz, colsiz)
where `vals` is the vector of values of the sparse entries, `cols` is an
integer valued vector of the linear column indices of the sparse entries,
`offs` is a column-wise table of offsets in these arrays, `rowsiz` and `colsiz`
are the sizes of the row and column dimensions. The entries values and
respective linear column indices of the `i`-th row are given by `vals[k]` and
`cols[k]` with `k ∈ offs[i]+1:offs[i+1]`. The linear row index `i` is in the
range `1:m` where `m = prod(rowsiz)` is the equivalent number of rows. For
efficiency reasons, sparse operators are currently limited to *fast* arrays
because they can be indexed linearly with no loss of performances. If `vals`,
`cols` and/or `offs` are not fast arrays, they will be automatically converted
to linearly indexed arrays.
A sparse operator in CSR storage format can be directly constructed from a
2-dimensional Julia array `A`:
SparseOperatorCSR(A, sel = (v,i,j) -> (v != zero(v)))
where optional argument `sel` is a selector function which is called as
`sel(v,i,j)` with `v`, `i` and `j` the value, the row and the column linear
indices for each entries of `A` and which is assumed to yield `true` for the
entries of `A` to be selected in the sparse structure and `false` for the
entries of `A` to discard. The default selector is such that all non-zeros of
`A` are selected.
The element type, say `T`, for the sparse coefficients can be imposed by
rewriting the above examples as:
SparseOperatorCSR{T}(args...)
A sparse operator in CSR storage format implementing generalized matrix-vector
multiplication can also be directly constructed from a `L`-dimensional Julia
array (with `L ≥ 2`) `A` by:
SparseOperatorCSR{T,M}(A[, sel])
with `M` the number of leading dimensions of `A` corresponding to the *rows* of
the operator, the trailing `N = L - M` dimensions being assumed to correspond
to the *columns* of the operator. These dimensions are the size of,
respectively, the output and the input arrays when applying the operator. The
parameter `N` may be specified (although it can be automatically determined):
SparseOperatorCSR{T,M,N}(A[, sel])
provided the equality `M + N = ndims(A)` holds.
A last parameter `V` can be specified for the type of the vector to store the
coefficients of the sparse operator:
SparseOperatorCSR{T,M,N,V}(args...)
provided `V` implements standard linear indexing. The default is to take `V =
Vector{T}`. As a special case, you can choose a uniform boolean vector from
the `StructuredArrays` package to store the sparse coefficients:
SparseOperatorCSR{T,M,N,UniformVector{Bool}}(args...)
to get a compressed sparse operator in CSR format whose values are an immutable
uniform vector of true values requiring no storage. This is useful to only
store the sparse structure of the operator, that is the indices in CSR format
of the sparse coefficients not their values.
The `SparseOperatorCSR` constructor can also be used to convert a sparse
operator in another storage format into the CSR format. In that case,
parameter `T` may also be specified to convert the type of the sparse
coefficients.
""" SparseOperatorCSR
# Make sparse operators callable.
@callable SparseOperatorCSR
@callable SparseOperatorCSC
@callable SparseOperatorCOO
SparseOperator(A::SparseOperator) = A
SparseOperator{T}(A::SparseOperator{T}) where {T} = A
SparseOperator{T,M}(A::SparseOperator{T,M}) where {T,M} = A
SparseOperator{T,M,N}(A::SparseOperator{T,M,N}) where {T,M,N} = A
# Change element type.
SparseOperator{T}(A::SparseOperator{<:Any,M,N}) where {T,M,N} =
SparseOperator{T,M,N}(A)
SparseOperator{T,M}(A::SparseOperator{<:Any,M,N}) where {T,M,N} =
SparseOperator{T,M,N}(A)
for F in (:SparseOperatorCSC, :SparseOperatorCSR, :SparseOperatorCOO)
@eval begin
SparseOperator{T,M,N}(A::$F{<:Any,M,N}) where {T,M,N} =
$F{T,M,N}(A)
end
end
for (fmt,func) in ((:CSC, :SparseOperatorCSC),
(:CSR, :SparseOperatorCSR),
(:COO, :SparseOperatorCOO),)
F = Expr(:quote, Symbol(fmt))
@eval begin
CompressedSparseOperator{$F}(args...; kwds...) =
$func(args...; kwds...)
CompressedSparseOperator{$F,T}(args...; kwds...) where {T} =
$func{T}(args...; kwds...)
CompressedSparseOperator{$F,T,M}(args...; kwds...) where {T,M} =
$func{T,M}(args...; kwds...)
CompressedSparseOperator{$F,T,M,N}(args...; kwds...) where {T,M,N} =
$func{T,M,N}(args...; kwds...)
end
end
# Conversion without changing format (mostly for changing element type).
CompressedSparseOperator{Any}(A::CompressedSparseOperator) = A
CompressedSparseOperator{Any,T}(A::CompressedSparseOperator{F}) where {F,T} =
CompressedSparseOperator{F,T}(A)
CompressedSparseOperator{Any,T,M}(A::CompressedSparseOperator{F}) where {F,T,M} =
CompressedSparseOperator{F,T,M}(A)
CompressedSparseOperator{Any,T,M,N}(A::CompressedSparseOperator{F}) where {F,T,M,N} =
CompressedSparseOperator{F,T,M,N}(A)
# Basic outer constructors return a fully checked structure.
function SparseOperatorCSR(vals::AbstractVector,
cols::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize)
check_structure(unsafe_csr(to_values(vals),
to_indices(cols),
to_indices(offs),
to_size(rowsiz),
to_size(colsiz)))
end
function SparseOperatorCSC(vals::AbstractVector,
rows::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize)
check_structure(unsafe_csc(to_values(vals),
to_indices(rows),
to_indices(offs),
to_size(rowsiz),
to_size(colsiz)))
end
function SparseOperatorCOO(vals::AbstractVector,
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize)
check_structure(unsafe_coo(to_values(vals),
to_indices(rows),
to_indices(cols),
to_size(rowsiz),
to_size(colsiz)))
end
@inline isnonzero(v::T, i::Integer, j::Integer) where {T} = (v != zero(T))
for CS in (:SparseOperatorCSR,
:SparseOperatorCSC,
:SparseOperatorCOO)
@eval begin
# Get rid of the M,N parameters, but keep/set T for conversion of
# values.
$CS{T,M,N}(A::SparseOperator{<:Any,M,N}) where {T,M,N} = $CS{T}(A)
$CS{T,M}(A::SparseOperator{<:Any,M}) where {T,M} = $CS{T}(A)
$CS(A::SparseOperator{T}) where {T} = $CS{T}(A)
# Cases which do nothing (it makes sense that a constructor of an
# immutable type be able to just return its argument if it is already
# of the correct type).
$CS{T}(A::$CS{T}) where {T} = A
# Manage to call constructors of compressed sparse operator given a
# regular Julia array with correct parameters and selector.
$CS(A::AbstractMatrix{T}, args...; kwds...) where {T} =
$CS{T,1,1}(A, args...; kwds...)
$CS{Any}(A::AbstractMatrix{T}, args...; kwds...) where {T} =
$CS{T,1,1}(A, args...; kwds...)
$CS{T}(A::AbstractMatrix, args...; kwds...) where {T} =
$CS{T,1,1}(A, args...; kwds...)
$CS{Any,M}(A::AbstractArray{T}, args...; kwds...) where {T,M} =
$CS{T,M}(A, args...; kwds...)
$CS{Any,M,N}(A::AbstractArray{T}, args...; kwds...) where {T,M,N} =
$CS{T,M,N}(A, args...; kwds...)
$CS{Any,M,N,V}(A::AbstractArray{T}, args...; kwds...) where {T,M,N,V} =
$CS{T,M,N,V}(A, args...; kwds...)
function $CS{T,M}(A::AbstractArray{S,L}, args...; kwds...) where {S,T,L,M}
1 ≤ M < L ||
error("parameters M=$M and L=$L are not such that 1 ≤ M < L")
$CS{T,M,L-M}(A, args...; kwds...)
end
$CS{T,M,N}(A::AbstractArray, args...; kwds...) where {T,M,N} =
$CS{T,M,N,Vector{T}}(A, args...; kwds...)
end
end
# Constructors that convert array of values. Other fields have already been
# checked so do not check structure again.
SparseOperatorCSR{T}(A::SparseOperatorCSR{S,M,N}) where {S,T,M,N} =
unsafe_csr(nrows(A), ncols(A), to_values(T, get_vals(A)),
get_cols(A), get_offs(A), row_size(A), col_size(A))
SparseOperatorCSC{T}(A::SparseOperatorCSC{S,M,N}) where {S,T,M,N} =
unsafe_csc(nrows(A), ncols(A), to_values(T, get_vals(A)),
get_rows(A), get_offs(A), row_size(A), col_size(A))
SparseOperatorCOO{T}(A::SparseOperatorCOO{S,M,N}) where {S,T,M,N} =
unsafe_coo(nrows(A), ncols(A), to_values(T, get_vals(A)),
get_rows(A), get_cols(A), row_size(A), col_size(A))
# Constructors for CSR format similar to the basic ones but have parameters
# that may imply converting arguments.
function SparseOperatorCSR{T,M,N}(vals::AbstractVector,
cols::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T,M,N}
check_row_ndims(rowsiz, M)
check_column_ndims(colsiz, N)
SparseOperatorCSR{T}(vals, cols, offs, rowsiz, colsiz)
end
function SparseOperatorCSR{T,M}(vals::AbstractVector,
cols::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T,M}
check_row_ndims(rowsiz, M)
SparseOperatorCSR{T}(vals, cols, offs, rowsiz, colsiz)
end
function SparseOperatorCSR{T}(vals::AbstractVector,
cols::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T}
SparseOperatorCSR(to_values(T, vals), cols, offs, rowsiz, colsiz)
end
# Idem for CSC format.
function SparseOperatorCSC{T,M,N}(vals::AbstractVector,
rows::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T,M,N}
check_row_ndims(rowsiz, M)
check_column_ndims(colsiz, N)
SparseOperatorCSC{T}(vals, rows, offs, rowsiz, colsiz)
end
function SparseOperatorCSC{T,M}(vals::AbstractVector,
rows::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T,M}
check_row_ndims(rowsiz, M)
SparseOperatorCSC{T}(vals, rows, offs, rowsiz, colsiz)
end
function SparseOperatorCSC{T}(vals::AbstractVector,
rows::AbstractVector{<:Integer},
offs::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T}
SparseOperatorCSC(to_values(T, vals), rows, offs, rowsiz, colsiz)
end
# Idem for COO format.
function SparseOperatorCOO{T,M,N}(vals::AbstractVector,
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T,M,N}
check_row_ndims(rowsiz, M)
check_column_ndims(colsiz, N)
SparseOperatorCOO{T}(vals, rows, cols, rowsiz, colsiz)
end
function SparseOperatorCOO{T,M}(vals::AbstractVector,
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T,M}
check_row_ndims(rowsiz, M)
SparseOperatorCOO{T}(vals, rows, cols, rowsiz, colsiz)
end
function SparseOperatorCOO{T}(vals::AbstractVector,
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
rowsiz::ArraySize,
colsiz::ArraySize) where {T}
SparseOperatorCOO(to_values(T, vals), rows, cols, rowsiz, colsiz)
end
check_row_ndims(rowsiz::ArraySize, M::Integer) =
length(rowsiz) == M ||
throw_dimension_mismatch("number of row dimensions is not M=$M")
check_column_ndims(colsiz::ArraySize, N::Integer) =
length(colsiz) == N ||
throw_dimension_mismatch("number of columns dimensions is not N=$N")
# Constructors of a sparse operator in various format given a regular Julia
# array and a selector function. Julia arrays are usually in column-major
# order but this is not always the case, to handle various storage orders when
# extracting selected entries, we convert the input array into a equivalent
# "matrix", that is a 2-dimensional array.
function SparseOperatorCSR{T,M,N,V}(arr::AbstractArray{S,L},
sel::Function = isnonzero) where {
S,T,L,M,N,V<:AbstractVector{T}}
# Get equivalent matrix dimensions.
nrows, ncols, rowsiz, colsiz = get_equivalent_size(arr, Val(M), Val(N))
# Convert into equivalent matrix.
A = as_matrix(arr, nrows, ncols)
# Count the number of selected entries.
nvals = count_selection(A, sel)
# Extract the selected entries and their column indices and count the
# numver of selected entries per row. The pseudo-matrix is walked in
# row-major order.
cols = Vector{Int}(undef, nvals)
offs = Vector{Int}(undef, nrows + 1)
k = 0
if V <: UniformVector{Bool}
# Just extract the structure, not the values.
vals = V(true, nvals)
@inbounds for i in 1:nrows
offs[i] = k
for j in 1:ncols
if sel(A[i,j], i, j)
(k += 1) ≤ nvals || bad_selector()
cols[k] = j
end
end
end
else
# Extract the structure and the values.
vals = V(undef, nvals)
@inbounds for i in 1:nrows
offs[i] = k
for j in 1:ncols
v = A[i,j]
if sel(v, i, j)
(k += 1) ≤ nvals || bad_selector()
vals[k] = v
cols[k] = j
end
end
end
end
k == nvals || bad_selector()
offs[end] = nvals
# By construction, the sparse structure should be correct so just call the
# "unsafe" constructor.
return unsafe_csr(nrows, ncols, vals, cols, offs, rowsiz, colsiz)
end
function SparseOperatorCSC{T,M,N,V}(arr::AbstractArray{S,L},
sel::Function = isnonzero) where {
S,T,L,M,N,V<:AbstractVector{T}}
# Get equivalent matrix dimensions.
nrows, ncols, rowsiz, colsiz = get_equivalent_size(arr, Val(M), Val(N))
# Convert into equivalent matrix.
A = as_matrix(arr, nrows, ncols)
# Count the number of selected entries.
nvals = count_selection(A, sel)
# Extract the selected entries and their row indices and count the numver
# of selected entries per column. The pseudo-matrix is walked in
# column-major order.
rows = Vector{Int}(undef, nvals)
offs = Vector{Int}(undef, ncols + 1)
k = 0
if V <: UniformVector{Bool}
# Just extract the structure, not the values.
vals = V(true, nvals)
@inbounds for j in 1:ncols
offs[j] = k
for i in 1:nrows
if sel(A[i,j], i, j)
(k += 1) ≤ nvals || bad_selector()
rows[k] = i
end
end
end
else
# Extract the structure and the values.
vals = V(undef, nvals)
@inbounds for j in 1:ncols
offs[j] = k
for i in 1:nrows
v = A[i,j]
if sel(v, i, j)
(k += 1) ≤ nvals || bad_selector()
vals[k] = v
rows[k] = i
end
end
end
end
k == nvals || bad_selector()
offs[end] = nvals
# By construction, the sparse structure should be correct so just call the
# "unsafe" constructor.
return unsafe_csc(nrows, ncols, vals, rows, offs, rowsiz, colsiz)
end
function SparseOperatorCOO{T,M,N,V}(arr::AbstractArray{S,L},
sel::Function = isnonzero) where {
S,T,L,M,N,V<:AbstractVector{T}}
# Get equivalent matrix dimensions.
nrows, ncols, rowsiz, colsiz = get_equivalent_size(arr, Val(M), Val(N))
# Convert into equivalent matrix.
A = as_matrix(arr, nrows, ncols)
# Count the number of selected entries.
nvals = count_selection(A, sel)
# Extract the selected entries and their row and column indices. The
# pseudo-matrix is walked in column-major order since most Julia arrays are
# stored in that order.
rows = Vector{Int}(undef, nvals)
cols = Vector{Int}(undef, nvals)
k = 0
if V <: UniformVector{Bool}
# Just extract the structure, not the values.
vals = V(true, nvals)
@inbounds for j in 1:ncols
for i in 1:nrows
if sel(A[i,j], i, j)
(k += 1) ≤ nvals || bad_selector()
vals[k] = v
rows[k] = i
cols[k] = j
end
end
end
else
# Extract the structure and the values.
vals = V(undef, nvals)
@inbounds for j in 1:ncols
for i in 1:nrows
v = A[i,j]
if sel(v, i, j)
(k += 1) ≤ nvals || bad_selector()
vals[k] = v
rows[k] = i
cols[k] = j
end
end
end
end
k == nvals || bad_selector()
# By construction, the sparse structure should be correct so just call the
# "unsafe" constructor.
return unsafe_coo(nrows, ncols, vals, rows, cols, rowsiz, colsiz)
end
"""
unpack!(A, S; flatten=false) -> A
unpacks the non-zero coefficients of the sparse operator `S` into the array `A`
and returns `A`. Keyword `flatten` specifies whether to only consider the
length of `A` instead of its dimensions. In any cases, `A` must have as many
elements as `length(S)` and standard linear indexing.
Just call `Array(S)` to unpack the coefficients of a sparse operator `S`
without providing the destination array.
""" unpack!
# Convert to standard Julia arrays which are stored in column-major order,
# hence the stride is the equivalent number of rows. For COO format, as
# duplicates are allowed, values must be combined by an operator.
Base.Array(A::SparseOperator{T,M,N}) where {T,M,N} = Array{T,M+N}(A)
Base.Array{T}(A::SparseOperator{<:Any,M,N}) where {T,M,N} = Array{T,M+N}(A)
function Base.Array{T,L}(A::SparseOperator{<:Any,M,N}) where {T,L,M,N}
L == M + N || throw_incompatible_number_of_dimensions()
return unpack!(Array{T}(undef, (row_size(A)..., col_size(A)...,)), A)
end
function prepare_unpack!(dst::AbstractArray,
src::SparseOperator,
flatten::Bool)
is_fast_array(dst) || throw_non_standard_indexing("destination array")
if flatten
length(dst) == length(src) ||
throw_incompatible_number_of_elements()
else
size(dst) == (row_size(src)..., col_size(src)...,) ||
throw_incompatible_dimensions()
end
fill!(dst, zero(eltype(dst)))
end
function unpack!(B::AbstractArray{T,L},
A::SparseOperatorCSR{<:Any,M,N};
flatten::Bool = false) where {T,L,M,N}
prepare_unpack!(B, A, flatten)
m = nrows(A) # used as the "stride" in B
@inbounds for i in each_row(A)
for k in each_off(A, i)
j = get_col(A, k)
v = get_val(A, k)
B[i + m*(j - 1)] = v
end
end
return B
end
function unpack!(B::AbstractArray{T,L},
A::SparseOperatorCSC{<:Any,M,N};
flatten::Bool = false) where {T,L,M,N}
prepare_unpack!(B, A, flatten)
m = nrows(A) # used as the "stride" in B
@inbounds for j in each_col(A)
for k in each_off(A, j)
i = get_row(A, k)
v = get_val(A, k)
B[i + m*(j - 1)] = v
end
end
return B
end
unpack!(B::AbstractArray, A::SparseOperatorCOO; kwds...) =
unpack!(B, A, +; kwds...)
unpack!(B::AbstractArray, A::SparseOperatorCOO{Bool}; kwds...) =
unpack!(B, A, |; kwds...)
function unpack!(B::AbstractArray{T,L},
A::SparseOperatorCOO{<:Any,M,N},
op::Function; flatten::Bool = false) where {T,L,M,N}
prepare_unpack!(B, A, flatten)
m = nrows(A) # used as the "stride" in B
@inbounds for k in each_off(A)
i = get_row(A, k)
j = get_col(A, k)
v = get_val(A, k)
l = i + m*(j - 1)
B[l] = op(B[l], v)
end
return B
end
function check_new_shape(A::SparseOperator,
rowsiz::Tuple{Vararg{Int}},
colsiz::Tuple{Vararg{Int}})
prod(rowsiz) == nrows(A) ||
bad_size("products of row dimensions must be equal")
prod(colsiz) == ncols(A) ||
bad_size("products of column dimensions must be equal")
end
Base.reshape(A::SparseOperator, rowsiz::ArraySize, colsiz::ArraySize) =
reshape(A, to_size(rowsiz), to_size(colsiz))
function Base.reshape(A::SparseOperatorCSR,
rowsiz::Tuple{Vararg{Int}},
colsiz::Tuple{Vararg{Int}})
check_new_shape(A, rowsiz, colsiz)
unsafe_csr(nrows(A), ncols(A), get_vals(A), get_cols(A), get_offs(A),
rowsiz, colsiz)
end
function Base.reshape(A::SparseOperatorCSC,
rowsiz::Tuple{Vararg{Int}},
colsiz::Tuple{Vararg{Int}})
check_new_shape(A, rowsiz, colsiz)
unsafe_csc(nrows(A), ncols(A), get_vals(A), get_rows(A), get_offs(A),
rowsiz, colsiz)
end
function Base.reshape(A::SparseOperatorCOO,
rowsiz::Tuple{Vararg{Int}},
colsiz::Tuple{Vararg{Int}})
check_new_shape(A, rowsiz, colsiz)
unsafe_coo(nrows(A), ncols(A), get_vals(A), get_rows(A), get_cols(A),
rowsiz, colsiz)
end
# Convert from other compressed sparse formats. For compressed sparse row and
# column (CSR and CSC) formats, the compressed sparse coordinate (COO) format
# is used as an intermediate representation and entries are sorted in
# row/column major order. To avoid side-effects, they must be copied first.
# Unless values are converted, there is no needs to copy when converting to a
# compressed sparse coordinate (COO) format.
SparseOperatorCSR{T}(A::SparseOperator) where {T} =
coo_to_csr!(copy_vals(T, A),
copy_rows(A),
copy_cols(A),
row_size(A),
col_size(A))
SparseOperatorCSC{T}(A::SparseOperator) where {T} =
coo_to_csc!(copy_vals(T, A),
copy_rows(A),
copy_cols(A),
row_size(A),
col_size(A))
SparseOperatorCOO{T}(A::SparseOperator) where {T} =
SparseOperatorCOO(copy_vals(T, A),
get_rows(A),
get_cols(A),
row_size(A),
col_size(A))
SparseOperatorCOO{T}(A::SparseOperator{T}) where {T} =
SparseOperatorCOO(get_vals(A),
get_rows(A),
get_cols(A),
row_size(A),
col_size(A))
"""
coo_to_csr!(vals, rows, cols, rowsiz, colsiz [, mrg]) -> A
yields the a compressed sparse operator in a CSR format given the components
`vals`, `rows` and `cols` in the COO format and the sizes `rowsiz` and `colsiz`
of the row and column dimensions. Input arrays are modified in-place.
Optional argument `mrg` is a function called to merge values of entries with
the same row and column indices.
Input arrays must be regular Julia vectors to ensure type stability in case of
duplicates.
"""
function coo_to_csr!(vals::Vector{T},
rows::Vector{Int},
cols::Vector{Int},
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int},
mrg::Function = (T <: Bool ? (|) : (+))) where {T,M,N}
# Check row and column sizes.
nrows = check_size(rowsiz, "row")
ncols = check_size(colsiz, "column")
# Check row and column indices.
check_rows(rows, nrows)
check_cols(cols, ncols)
# Sort and merge entries in row-major order, resize arrays if needed and
# compute offsets.
nvals = sort_and_merge!(vals, rows, cols, mrg)
if nvals < length(vals)
vals = vals[1:nvals]
cols = cols[1:nvals]
end
offs = compute_offsets(nrows, rows, nvals)
# Since everything will have
# been checked, we can call the unsafe constructor.
return unsafe_csr(nrows, ncols, vals, cols, offs, rowsiz, colsiz)
end
"""
coo_to_csc!(vals, rows, cols, rowsiz, colsiz [, mrg]) -> A
yields the a compressed sparse operator in a CSC format given the components
`vals`, `rows` and `cols` in the COO format and the sizes `rowsiz` and `colsiz`
of the row and column dimensions. Input arrays are modified in-place.
Optional argument `mrg` is a function called to merge values of entries with
the same row and column indices.
Input arrays must be regular Julia vectors to ensure type stability in case of
duplicates.
"""
function coo_to_csc!(vals::Vector{T},
rows::Vector{Int},
cols::Vector{Int},
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int},
mrg::Function = (T <: Bool ? (|) : (+))) where {T,M,N}
# Check row and column sizes.
nrows = check_size(rowsiz, "row")
ncols = check_size(colsiz, "column")
# Check row and column indices.
check_rows(rows, nrows)
check_cols(cols, ncols)
# Sort and merge entries in column-major order, resize arrays if needed and
# compute offsets.
nvals = sort_and_merge!(vals, cols, rows, mrg)
if nvals < length(vals)
vals = vals[1:nvals]
rows = rows[1:nvals]
end
offs = compute_offsets(ncols, cols, nvals)
# Since everything will have
# been checked, we can call the unsafe constructor.
return unsafe_csc(nrows, ncols, vals, rows, offs, rowsiz, colsiz)
end
# "less-than" method for sorting entries in order, arguments are 3-tuples
# `(v,major,minor)` with `v` the entry value, `major` the major index and
# `minir` the minor index.
@inline major_order(a::Tuple{T,Int,Int},b::Tuple{T,Int,Int}) where {T} =
ifelse(a[2] == b[2], a[3] < b[3], a[2] < b[2])
"""
sort_and_merge!(vals, major, minor, mrg) -> nvals
sorts entries and merges duplicates in input arrays `vals`, `major` and
`minor`. Entries consist in the 3-tuples `(vals[i],major[i],minor[i])`. The
sorting order of the `i`-th entry is based the value of `major[i]` and, if
equal, on the value of `minor[i]`. After sorting, duplicates entries, that is
those which have the same `(major[i],minor[i])`, are removed merging the
associated values in `vals` with the `mrg` function. All operations are done
in-place, the number of unique entries is returned but inputs arrays are not
resized, only the `nvals` first entries are valid.
"""
function sort_and_merge!(vals::AbstractVector,
major::AbstractVector{Int},
minor::AbstractVector{Int},
mrg::Function)
# Sort entries in order (this also ensures that all arrays have the same
# dimensions).
sort!(ZippedArray(vals, major, minor); lt=major_order)
# Merge duplicates.
j = 1
@inbounds for k in 2:length(vals)
if major[k] == major[j] && minor[k] == minor[j]
vals[j] = mrg(vals[j], vals[k])
elseif (j += 1) < k
vals[j], major[j], minor[j] = vals[k], major[k], minor[k]
end
end
return j
end
"""
compute_offsets(n, inds, len=length(inds)) -> offs
yields the `n+1` offsets computed from the list of indices `inds[1:len]`.
Indices must be in non-increasing order an all in the range `1:n`.
"""
function compute_offsets(n::Int,
inds::AbstractVector{Int},
len::Int = length(inds))
@certify len ≤ length(inds)
@inbounds begin
offs = Vector{Int}(undef, n + 1)
i = 0
for k in 1:len
j = inds[k]
j == i && continue
((j < i)|(j > n)) &&
error(1 ≤ j ≤ n ?
"indices must be in non-increasing order" :
"out of bound indices")
off = k - 1
while i < j
i += 1
offs[i] = off
end
end
while i ≤ n
i += 1
offs[i] = len
end
end
return offs
end
# This error is due to the non-zeros selector not returning the same results in
# the two selection passes.
bad_selector() = throw_argument_error("inconsistent selector function")
@inline select_non_zeros(v::Bool, i::Int, j::Int) = v
@inline select_non_zeros(v::T, i::Int, j::Int) where {T} = (v != zero(T))
@inline select_non_zeros_in_diagonal(v::T, i::Int, j::Int) where {T} =
((i == j)|(v != zero(T)))
@inline select_non_zeros_in_lower_part(v::T, i::Int, j::Int) where {T} =
((i ≥ j)&(v != zero(T)))
@inline select_non_zeros_in_upper_part(v::T, i::Int, j::Int) where {T} =
((i ≤ j)&(v != zero(T)))
"""
get_equivalent_size(A, Val(M), Val(N)) -> nrows, ncols, rowsiz, colsiz
yields equivalent matrix dimensions of array `A` assuming the *rows* account
for the `M` leading dimensions while the *columns* account for the other `N`
dimensions.
"""
function get_equivalent_size(A::AbstractArray{T,L},
::Val{M}, ::Val{N}) where {T,L,M,N}
@certify L == M + N
@certify M ≥ 1
@certify N ≥ 1
eachindex(A) == 1:length(A) ||
throw_argument_error("array must have standard linear indexing")
siz = size(A)
rowsiz = siz[1:M]
colsiz = siz[M+1:end]
nrows = prod(rowsiz)
ncols = prod(colsiz)
return nrows, ncols, rowsiz, colsiz
end
"""
count_selection(A, sel, rowmajor=false) -> nvals
yields the number of selected entries in matrix `A` such that `sel(A[i,j],i,j)`
is `true` and with `i` and `j` the row and column indices. if optinal argument
`rowmajor` is true, the array is walked in row-major order; otherwise (the
default), the array is walked in column-major order.
"""
function count_selection(A::AbstractMatrix, sel::Function,
rowmajor::Bool = false)
nrows, ncols = size(A)
nvals = 0
if rowmajor
# Walk the coefficients in row-major order.
@inbounds for i in 1:nrows, j in 1:ncols
if sel(A[i,j], i, j)
nvals += 1
end
end
else
# Walk the coefficients in column-major order.
@inbounds for j in 1:ncols, i in 1:nrows
if sel(A[i,j], i, j)
nvals += 1
end
end
end
return nvals
end
"""
check_structure(A) -> A
checks the structure of the compressed sparse operator `A` throwing an
exception if there are any inconsistencies.
"""
function check_structure(A::CompressedSparseOperator{:CSR})
check_size(A)
check_vals(A)
check_cols(A)
check_offs(A)
return A
end
function check_structure(A::CompressedSparseOperator{:CSC})
check_size(A)
check_vals(A)
check_rows(A)
check_offs(A)
return A
end
function check_structure(A::CompressedSparseOperator{:COO})
check_size(A)
check_vals(A)
check_rows(A)
check_cols(A)
return A
end
"""
check_size(siz, id="array") -> len
checks the array size `siz` and returns the corresponding number of elements.
An `ArgumentError` is thrown if a dimension is invalid, using `id` to identify
the argument.
check_size(A)
checks the validity of the row and column sizes in compressed sparse operator
`A` throwing an exception if there are any inconsistencies.
"""
function check_size(siz::NTuple{N,Int}, id::String="array") where {N}
len = 1
@inbounds for i in 1:N
(dim = siz[i]) ≥ 0 || bad_dimension(dim, i, id)
len *= dim
end
return len
end
function check_size(A::SparseOperator)
check_size(row_size(A), "row") == nrows(A) ||
throw_dimension_mismatch("incompatible equivalent number of rows and row size")
check_size(col_size(A), "column") == ncols(A) ||
throw_dimension_mismatch("incompatible equivalent number of columns and column size")
nothing
end
@noinline bad_dimension(dim::Integer, i::Integer, id) =
throw_argument_error("invalid ", i, ordinal_suffix(i), " ", id,
" dimension: ", dim)
"""
check_vals(A)
checks the array of values in compressed sparse operator `A` throwing an
exception if there are any inconsistencies.
"""
function check_vals(A::SparseOperator)
vals = get_vals(A)
is_fast_array(vals) || throw_not_fast_array("array of values")
length(vals) == nnz(A) || throw_argument_error("bad number of values")
nothing
end
"""
check_rows(A)
checks the array of linear row indices in the compressed sparse operator `A`
stored in a *Compressed Sparse Column* (CSC) or *Compressed Sparse Coordinate*
(COO) format. Throws an exception in case of inconsistency.
check_rows(rows, m)
check the array of linear row indices `rows` for being a fast vetor of values
in the range `1:m`.
"""
function check_rows(A::Union{<:CompressedSparseOperator{:CSC},
<:CompressedSparseOperator{:COO}})
rows = get_rows(A)
length(rows) == nnz(A) ||
throw_argument_error("bad number of row indices")
check_rows(rows, nrows(A))
# FIXME: also check sorting for CompressedSparseOperator{:CSC}?
end
function check_rows(rows::AbstractVector{Int}, m::Int)
is_fast_array(rows) || throw_not_fast_array("array of row indices")
anyerror = false
@inbounds @simd for k in eachindex(rows)
i = rows[k]
anyerror |= ((i < 1)|(i > m))
end
anyerror && error("out of range row indices")
nothing
end
"""
check_cols(A)
checks the array of linear column indices in the compressed sparse operator `A`
stored in a *Compressed Sparse Row* (CSR) or *Compressed Sparse Coordinate*
(COO) format. Throws an exception in case of inconsistency.
check_cols(cols, n)
check the array of linear column indices `cols` for being a fast vetor of
values in the range `1:n`.
"""
function check_cols(A::Union{<:CompressedSparseOperator{:CSR},
<:CompressedSparseOperator{:COO}})
cols = get_cols(A)
length(cols) == nnz(A) ||
throw_argument_error("bad number of column indices")
check_cols(cols, ncols(A))
# FIXME: also check sorting for CompressedSparseOperator{:CSR}?
end
function check_cols(cols::AbstractVector{Int}, n::Int)
is_fast_array(cols) || throw_not_fast_array("array of column indices")
anyerror = false
@inbounds @simd for k in eachindex(cols)
j = cols[k]
anyerror |= ((j < 1)|(j > n))
end
anyerror && error("out of range column indices")
nothing
end
"""
check_offs(A)
checks the array of offsets in the compressed sparse operator `A` stored in a
*Compressed Sparse Row* (CSR) or *Compressed Sparse Column* (CSC) format.
Throws an exception in case of inconsistency.
"""
function check_offs(A::T) where {T<:Union{CompressedSparseOperator{:CSR},
CompressedSparseOperator{:CSC}}}
offs = get_offs(A)
is_fast_array(offs) || throw_not_fast_array("array of offsets")
n = (T <: CompressedSparseOperator{:CSR} ? nrows(A) : ncols(A))
length(offs) == n + 1 || error("bad number of offsets")
offs[1] == 0 || error("bad initial offset")
len = 0
anyerrors = false
@inbounds for i in 1:n
k1, k2 = offs[i], offs[i+1]
anyerrors |= (k2 < k1)
len += ifelse(k2 > k1, k2 - k1, 0)
end
anyerrors && error("offsets must be non-decreasing")
len == nnz(A) ||
error("offsets incompatible with number of structural non-zeros")
nothing
end
"""
unsafe_csr([m, n,] vals, cols, offs, rowsiz, colsiz)
yields a compressed sparse operator in *Compressed Sparse Row* (CSR) format as
an instance of `SparseOperatorCSR`. This method assumes that arguments are
correct, it just calls the inner constructor with suitable parameters. This
method is mostly used by converters and outer constructors.
"""
function unsafe_csr(m::Integer, n::Integer,
vals::V, cols::J, offs::K,
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {T,M,N,
V<:AbstractVector{T},
J<:AbstractVector{Int},
K<:AbstractVector{Int}}
SparseOperatorCSR{T,M,N,V,J,K}(to_int(m), to_int(n),
vals, cols, offs,
rowsiz, colsiz)
end
function unsafe_csr(vals::AbstractVector,
cols::AbstractVector{Int},
offs::AbstractVector{Int},
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {M,N}
unsafe_csr(prod(rowsiz), prod(colsiz), vals, cols, offs, rowsiz, colsiz)
end
"""
unsafe_csc([m, n,] vals, rows, offs, rowsiz, colsiz)
yields a compressed sparse operator in *Compressed Sparse Column* (CSC) format
as an instance of `SparseOperatorCSC`. This method assumes that arguments are
correct, it just calls the inner constructor with suitable parameters. This
method is mostly used by converters and outer constructors.
"""
function unsafe_csc(m::Integer, n::Integer,
vals::V, rows::I, offs::K,
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {T,M,N,
V<:AbstractVector{T},
I<:AbstractVector{Int},
K<:AbstractVector{Int}}
SparseOperatorCSC{T,M,N,V,I,K}(to_int(m), to_int(n),
vals, rows, offs,
rowsiz, colsiz)
end
function unsafe_csc(vals::AbstractVector,
rows::AbstractVector{Int},
offs::AbstractVector{Int},
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {M,N}
unsafe_csc(prod(rowsiz), prod(colsiz), vals, rows, offs, rowsiz, colsiz)
end
"""
unsafe_coo([m, n,] vals, rows, cols, rowsiz, colsiz)
yields a compressed sparse operator in *Compressed Sparse Coordinate* (COO)
format as an instance of `SparseOperatorCOO`. This method assumes that
arguments are correct, it just calls the inner constructor with suitable
parameters. This method is mostly used by converters and outer constructors.
"""
function unsafe_coo(m::Integer, n::Integer,
vals::V, rows::I, cols::J,
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {T,M,N,
V<:AbstractVector{T},
I<:AbstractVector{Int},
J<:AbstractVector{Int}}
SparseOperatorCOO{T,M,N,V,I,J}(to_int(m), to_int(n),
vals, rows, cols,
rowsiz, colsiz)
end
function unsafe_coo(vals::AbstractVector,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
rowsiz::NTuple{M,Int},
colsiz::NTuple{N,Int}) where {M,N}
unsafe_coo(prod(rowsiz), prod(colsiz), vals, rows, cols, rowsiz, colsiz)
end
"""
is_fast_array(A) -> bool
yields whether `A` is a *fast array* that is an array with standard linear
indexing.
"""
is_fast_array(A::AbstractArray) = is_fast_indices(eachindex(A))
"""
is_fast_indices(inds) -> bool
yields whether `inds` is an iterator for *fast indices* that is linear indices
starting at `1`.
"""
is_fast_indices(inds::AbstractUnitRange{Int}) = (first(inds) == 1)
is_fast_indices(inds) = false
"""
check_argument(A, siz, id="array")
checks whether array `A` has size `siz` and implements standard linear
indexing. An exception is thrown if any of these do not hold.
"""
function check_argument(A::AbstractArray{<:Any,N},
siz::NTuple{N,Int},
id="array") where {N}
IndexStyle(A) === IndexLinear() || throw_non_linear_indexing(id)
inds = axes(A)
@inbounds for i in 1:N
first(inds[i]) == 1 || throw_non_standard_indexing(id)
length(inds[i]) == siz[i] || throw_incompatible_dimensions(id)
end
nothing
end
"""
throw_argument_error(args...)
throws an `ArgumentError` exception with a textual message made of `args...`.
"""
throw_argument_error(mesg::AbstractString) = throw(ArgumentError(mesg))
@noinline throw_argument_error(args...) = throw_argument_error(string(args...))
@noinline throw_not_fast_array(id) =
throw_argument_error(id, " does not implement fast indexing")
@noinline throw_non_linear_indexing(id) =
throw_argument_error(id, " does not implement linear indexing")
@noinline throw_non_standard_indexing(id) =
throw_argument_error(id, " has non-standard indexing")
"""
throw_assertion_error(args...)
throws an `AssertionError` exception with a textual message made of `args...`.
"""
throw_assertion_error(mesg::AbstractString) = throw(AssertionError(mesg))
@noinline throw_assertion_error(args...) = throw_assertion_error(string(args...))
"""
throw_dimension_mismatch(args...)
throws a `DimensionMismatch` exception with a textual message made of
`args...`.
"""
throw_dimension_mismatch(mesg::AbstractString) =
throw(DimensionMismatch(mesg))
@noinline throw_dimension_mismatch(args...) =
throw_dimension_mismatch(string(args...))
@noinline throw_incompatible_dimensions(id) =
throw_dimension_mismatch(id, " has incompatible dimensions")
@noinline throw_incompatible_dimensions() =
throw_dimension_mismatch("incompatible dimensions")
@noinline throw_incompatible_number_of_dimensions() =
throw_dimension_mismatch("incompatible number of dimensions")
@noinline throw_incompatible_number_of_elements() =
throw_dimension_mismatch("incompatible number of elements")
"""
ordinal_suffix(n) -> "st" or "nd" or "rd" or "th"
yields the ordinal suffix for integer `n`.
"""
function ordinal_suffix(n::Integer)
if n > 0
d = mod(n, 10)
if d == 1
return "st"
elseif d == 2
return "nd"
elseif d == 3
return "rd"
end
end
return "th"
end
#------------------------------------------------------------------------------
# Apply operators.
# Extend multiplier_type for sparse operators.
multiplier_type(::SparseOperator{T}) where {T} = T
"""
dispatch_multipliers!(α, f, A, x, β, y) -> y
dispatch calls to function `f` as `f(α,A,x,β,axpy)` with `α`, `A`, `x`, `β` and
`y` the other arguments and where `axpy` is a function called with 4 scalar
arguments as `axpy(α,x,β,y)` to yield `α*x + β*y` but which is optimized
depending on the values of the multipliers `α` and `β`. For instance, if `α=1`
and `β=0`, then `axpy(α,x,β,y)` just evaluates as `x`.
The `dispatch_multipliers!` method is a helper to apply a mapping `A` to an
argument `x` and store the result in `y`. In pseudo-code, this amounts to
performing `y <- α*op(A)(x) + β*y` where `op(A)` denotes a variant of `A` which
usually depends on `f`.
"""
@inline function dispatch_multipliers!(α::Number, f::Function, A, x,
β::Number, y)
if α == 0
vscale!(y, β)
elseif α == 1
if β == 0
f(1, A, x, 0, y, axpby_yields_x)
elseif β == 1
f(1, A, x, 1, y, axpby_yields_xpy)
else
b = promote_multiplier(β, y)
f(1, A, x, b, y, axpby_yields_xpby)
end
else
a = promote_multiplier(α, A, x)
if β == 0
f(a, A, x, 0, y, axpby_yields_ax)
elseif β == 1
f(a, A, x, 1, y, axpby_yields_axpy)
else
b = promote_multiplier(β, y)
f(a, A, x, b, y, axpby_yields_axpby)
end
end
return y
end
# Generic version of `vcreate` for most compressed sparse operators.
#
# We assume that in-place operation is not possible and thus simply ignore the
# `scratch` flag. Operators which can be applied in-place shall specialize
# this method. We do not check the dimensions and indexing of `x` as this will
# be done when `apply!` is called.
function vcreate(::Type{P},
A::SparseOperator{Ta,M,N},
x::AbstractArray{Tx,N},
scratch::Bool) where {Ta,Tx,M,N,P<:Union{Direct,InverseAdjoint}}
Ty = promote_type(Ta,Tx)
return Array{Ty}(undef, row_size(A))
end
function vcreate(::Type{P},
A::SparseOperator{Ta,M,N},
x::AbstractArray{Tx,M},
scratch::Bool) where {Ta,Tx,M,N,P<:Union{Adjoint,Inverse}}
Ty = promote_type(Ta,Tx)
return Array{Ty}(undef, col_size(A))
end
# Apply a sparse linear mapping, and its adjoint, stored in Compressed Sparse
# Row (CSR) format.
function apply!(α::Number,
::Type{Direct},
A::CompressedSparseOperator{:CSR,Ta,M,N},
x::AbstractArray{Tx,N},
scratch::Bool,
β::Number,
y::AbstractArray{Ty,M}) where {Ta,Tx,Ty,M,N}
check_argument(x, col_size(A))
check_argument(y, row_size(A))
dispatch_multipliers!(α, unsafe_apply_direct!, A, x, β, y)
end
function unsafe_apply_direct!(α::Number,
A::CompressedSparseOperator{:CSR,Ta,M,N},
x::AbstractArray{Tx,N},
β::Number,
y::AbstractArray{Ty,M},
axpby::Function) where {Ta,Tx,Ty,M,N}
@inbounds for i in each_row(A)
s = zero(promote_type(Ta, Tx))
for k in each_off(A, i)
j = get_col(A, k)
v = get_val(A, k)
s += v*x[j]
end
y[i] = axpby(α, s, β, y[i])
end
end
function apply!(α::Number,
::Type{Adjoint},
A::CompressedSparseOperator{:CSR,Ta,M,N},
x::AbstractArray{Tx,M},
scratch::Bool,
β::Number,
y::AbstractArray{Ty,N}) where {Ta,Tx,Ty,M,N}
check_argument(x, row_size(A))
check_argument(y, col_size(A))
Tm = promote_type(Ta, Tx) # to promote multipliers
β == 1 || vscale!(y, β)
if α == 1
@inbounds for i in each_row(A)
q = promote_multiplier(x[i], Tm)
if q != 0
for k in each_off(A, i)
j = get_col(A, k)
v = get_val(A, k)
y[j] += q*conj(v)
end
end
end
elseif α != 0
a = promote_multiplier(α, Tm)
@inbounds for i in each_row(A)
q = a*promote_multiplier(x[i], Tm)
if q != 0
for k in each_off(A, i)
j = get_col(A, k)
v = get_val(A, k)
y[j] += q*conj(v)
end
end
end
end
return y
end
# Apply a sparse operator, and its adjoint, stored in Compressed Sparse Column
# (CSC) format.
function apply!(α::Number,
::Type{Direct},
A::CompressedSparseOperator{:CSC,Ta,M,N},
x::AbstractArray{Tx,N},
scratch::Bool,
β::Number,
y::AbstractArray{Ty,M}) where {Ta,Tx,Ty,M,N}
check_argument(x, col_size(A))
check_argument(y, row_size(A))
Tm = promote_type(Ta, Tx) # to promote multipliers
β == 1 || vscale!(y, β)
if α == 1
@inbounds for j in each_col(A)
q = promote_multiplier(x[j], Tm)
if q != 0
for k in each_off(A, j)
i = get_row(A, k)
v = get_val(A, k)
y[i] += q*v
end
end
end
elseif α != 0
a = promote_multiplier(α, Tm)
@inbounds for j in each_col(A)
q = a*promote_multiplier(x[j], Tm)
if q != 0
for k in each_off(A, j)
i = get_row(A, k)
v = get_val(A, k)
y[i] += q*v
end
end
end
end
return y
end
function apply!(α::Number,
::Type{Adjoint},
A::CompressedSparseOperator{:CSC,Ta,M,N},
x::AbstractArray{Tx,M},
scratch::Bool,
β::Number,
y::AbstractArray{Ty,N}) where {Ta,Tx,Ty,M,N}
check_argument(x, row_size(A))
check_argument(y, col_size(A))
dispatch_multipliers!(α, unsafe_apply_adjoint!, A, x, β, y)
end
function unsafe_apply_adjoint!(α::Number,
A::CompressedSparseOperator{:CSC,Ta,M,N},
x::AbstractArray{Tx,M},
β::Number,
y::AbstractArray{Ty,N},
axpby::Function) where {Ta,Tx,Ty,M,N}
@inbounds for j in each_col(A)
s = zero(promote_type(Ta, Tx))
for k in each_off(A, j)
i = get_row(A, k)
v = get_val(A, k)
s += conj(v)*x[i]
end
y[j] = axpby(α, s, β, y[j])
end
return y
end
# Apply a sparse operator, and its adjoint, stored in Compressed Sparse
# Coordinate (COO) format.
function apply!(α::Number,
::Type{Direct},
A::CompressedSparseOperator{:COO,Ta,M,N},
x::AbstractArray{Tx,N},
scratch::Bool,
β::Number,
y::AbstractArray{Ty,M}) where {Ta,Tx,Ty,M,N}
check_argument(x, col_size(A))
check_argument(y, row_size(A))
β == 1 || vscale!(y, β)
if α != 0
V, I, J = get_vals(A), get_rows(A), get_cols(A)
if α == 1
@inbounds for k in eachindex(V, I, J)
v, i, j = V[k], I[k], J[k]
y[i] += x[j]*v
end
elseif α == -1
@inbounds for k in eachindex(V, I, J)
v, i, j = V[k], I[k], J[k]
y[i] -= x[j]*v
end
else
# The ordering of operations is to minimize the number of
# operations in case `v` is complex while `α` and `x` are reals.
alpha = promote_multiplier(α, Ta, Tx)
@inbounds for k in eachindex(V, I, J)
v, i, j = V[k], I[k], J[k]
y[i] += (alpha*x[j])*v
end
end
end
return y
end
function apply!(α::Number,
::Type{Adjoint},
A::CompressedSparseOperator{:COO,Ta,M,N},
x::AbstractArray{Tx,M},
scratch::Bool,
β::Number,
y::AbstractArray{Ty,N}) where {Ta,Tx,Ty,M,N}
check_argument(x, row_size(A))
check_argument(y, col_size(A))
β == 1 || vscale!(y, β)
if α != 0
V, I, J = get_vals(A), get_rows(A), get_cols(A)
if α == 1
@inbounds for k in eachindex(V, I, J)
v, i, j = V[k], I[k], J[k]
y[j] += x[i]*conj(v)
end
elseif α == -1
@inbounds for k in eachindex(V, I, J)
v, i, j = V[k], I[k], J[k]
y[j] -= x[i]*conj(v)
end
else
# The ordering of operations is to minimize the number of
# operations in case `v` is complex while `α` and `x` are reals.
alpha = promote_multiplier(α, Ta, Tx)
@inbounds for k in eachindex(V, I, J)
v, i, j = V[k], I[k], J[k]
y[j] += (alpha*x[i])*conj(v)
end
end
end
return y
end
end # module SparseOperators
# The following module is to facilitate using compressed sparse operators at a
# lower level than the exported API.
module SparseMethods
export
CompressedSparseOperator,
SparseOperator,
SparseOperatorCOO,
SparseOperatorCSC,
SparseOperatorCSR,
col_size,
copy_cols,
copy_rows,
copy_vals,
each_col,
each_off,
each_row,
get_col,
get_cols,
get_offs,
get_row,
get_rows,
get_val,
get_vals,
ncols,
nnz,
nonzeros,
nrows,
row_size,
set_val!
import ..SparseOperators:
CompressedSparseOperator,
SparseOperator,
SparseOperatorCOO,
SparseOperatorCSC,
SparseOperatorCSR,
col_size,
copy_cols,
copy_rows,
copy_vals,
each_col,
each_off,
each_row,
get_col,
get_cols,
get_offs,
get_row,
get_rows,
get_val,
get_vals,
ncols,
nnz,
nonzeros,
nrows,
row_size,
set_val!
end # module SparseMethods
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 6481 | #
# traits.jl -
#
# Methods related to mapping traits.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2021 Éric Thiébaut.
#
"""
```julia
LinearType(A)
```
yields the *linear* trait of mapping `A` indicating whether `A` is certainly
linear. The returned value is one of the singletons `Linear()` for linear maps
or `NonLinear()` for other mappings.
See also: [`Trait`](@ref), [`is_linear`](@ref).
"""
LinearType(::Mapping) = NonLinear() # any mapping assumed non-linear by default
LinearType(::LinearMapping) = Linear()
LinearType(::Inverse{<:LinearMapping}) = Linear()
LinearType(::Scaled{<:LinearMapping}) = Linear()
LinearType(A::Inverse) = LinearType(unveil(A))
LinearType(A::Scaled) = LinearType(unscaled(A))
LinearType(A::Union{Sum,Composition}) =
(allof(x -> LinearType(x) === Linear(), terms(A)...) ?
Linear() : NonLinear())
LinearType(A::Scaled{T,S}) where {T,S} =
# If the multiplier λ of a scaled mapping A = (λ⋅M) is zero, then A behaves
# linearly even though M is not a linear mapping. FIXME: But acknowledging
# this as a linear mapping may give rise to troubles later.
(multiplier(A) == zero(S) ? Linear() : LinearType(unscaled(A)))
@doc @doc(LinearType) Linear
@doc @doc(LinearType) NonLinear
"""
```julia
SelfAdjointType(A)
```
yields the *self-adjoint* trait of mapping `A` indicating whether `A` is
certainly a self-adjoint linear map. The returned value is one of the
singletons `SelfAdjoint()` for self-adjoint linear maps and `NonSelfAdjoint()`
for other mappings.
See also: [`Trait`](@ref), [`is_selfadjoint`](@ref).
"""
SelfAdjointType(::Mapping) = NonSelfAdjoint()
SelfAdjointType(A::DecoratedMapping) = SelfAdjointType(unveil(A))
SelfAdjointType(A::Scaled) = SelfAdjointType(unscaled(A))
SelfAdjointType(A::Sum) =
(allof(x -> SelfAdjointType(x) === SelfAdjoint(), terms(A)...) ?
SelfAdjoint() : NonSelfAdjoint())
SelfAdjointType(A::Gram) = SelfAdjoint()
@doc @doc(SelfAdjointType) SelfAdjoint
@doc @doc(SelfAdjointType) NonSelfAdjoint
"""
```julia
MorphismType(A)
```
yields the *morphism* trait of mapping `A` indicating whether `A` is certainly
an endomorphism (its input and output spaces are the same). The returned value
is one of the singletons `Endomorphism()` for mappings whose input and output
spaces are the same or `Morphism()` for other mappings.
See also: [`Trait`](@ref), [`is_endomorphism`](@ref).
"""
MorphismType(::Mapping) = Morphism()
MorphismType(A::DecoratedMapping) = MorphismType(unveil(A))
MorphismType(A::Gram) = Endomorphism()
MorphismType(A::Scaled) = MorphismType(unscaled(A))
MorphismType(A::Union{Sum,Composition}) =
(allof(x -> MorphismType(x) === Endomorphism(), terms(A)...) ?
Endomorphism() : Morphism())
@doc @doc(MorphismType) Morphism
@doc @doc(MorphismType) Endomorphism
"""
```julia
DiagonalType(A)
```
yields the *diagonal* trait of mapping `A` indicating whether `A` is certainly
a diagonal linear mapping. The returned value is one of the singletons
`DiagonalMapping()` for diagonal linear maps or `NonDiagonalMapping()` for other
mappings.
See also: [`Trait`](@ref), [`is_diagonal`](@ref).
"""
DiagonalType(::Mapping) = NonDiagonalMapping()
DiagonalType(A::DecoratedMapping) = DiagonalType(unveil(A))
DiagonalType(A::Scaled) = DiagonalType(unscaled(A))
DiagonalType(A::Union{Sum,Composition}) =
(allof(x -> DiagonalType(x) === DiagonalMapping(), terms(A)...) ?
DiagonalMapping() : NonDiagonalMapping())
@doc @doc(DiagonalType) NonDiagonalMapping
@doc @doc(DiagonalType) DiagonalMapping
"""
```julia
is_linear(A)
```
yields whether `A` is certainly a linear mapping.
!!! note
This method is intended to perform certain automatic simplifications or
optimizations. It is guaranted to return `true` when its argument is
certainly a linear mapping but it may return `false` even though its
argument behaves linearly because it is not always possible to figure out
that a complex mapping assemblage has this property.
See also: [`LinearType`](@ref).
"""
is_linear(A::LinearMapping) = true
is_linear(A::Mapping) = _is_linear(LinearType(A))
_is_linear(::Linear) = true
_is_linear(::NonLinear) = false
"""
```julia
is_selfadjoint(A)
```
yields whether mapping `A` is certainly a self-adjoint linear mapping.
!!! note
This method is intended to perform certain automatic simplifications or
optimizations. It is guaranted to return `true` when its argument is
certainly a self-adjoint linear mapping but it may return `false` even
though its argument behaves like a self-adjoint linear map because it is
not always possible to figure out that a complex mapping construction has
this property or because, for efficiency reasons, the coefficients of the
mapping are not considered for this trait.
See also: [`SelfAdjointType`](@ref).
"""
is_selfadjoint(A::Mapping) = _is_selfadjoint(SelfAdjointType(A))
_is_selfadjoint(::SelfAdjoint) = true
_is_selfadjoint(::NonSelfAdjoint) = false
"""
```julia
is_endomorphism(A)
```
yields whether mapping `A` is certainly an endomorphism.
!!! note
This method is intended to perform certain automatic simplifications or
optimizations. It is guaranted to return `true` when its argument is
certainly an endomorphism but it may return `false` even though its
argument behaves like an endomorphism because it is not always possible to
figure out that a complex mapping assemblage has this property.
See also: [`MorphismType`](@ref).
"""
is_endomorphism(A::Mapping) = _is_endomorphism(MorphismType(A))
_is_endomorphism(::Endomorphism) = true
_is_endomorphism(::Morphism) = false
"""
```julia
is_diagonal(A)
```
yields whether mapping `A` is certainly a diagonal linear map.
!!! note
This method is intended to perform certain automatic simplifications or
optimizations. It is guaranted to return `true` when its argument is
certainly a diagonal linear map but it may return `false` even though its
argument behaves like a diagonal linear map because it is not always
possible to figure out that a complex mapping assemblage has this property.
See also: [`DiagonalType`](@ref).
"""
is_diagonal(A::Mapping) = _is_diagonal(DiagonalType(A))
_is_diagonal(::DiagonalMapping) = true
_is_diagonal(::NonDiagonalMapping) = false
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 13182 | #
# types.jl -
#
# Type definitions and (some) constructors for linear algebra.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2020 Éric Thiébaut.
#
struct SingularSystem <: Exception
msg::String
end
showerror(io::IO, err::SingularSystem) =
print(io, "singular linear system (", err.msg, ")")
struct NonPositiveDefinite <: Exception
msg::String
end
showerror(io::IO, err::NonPositiveDefinite) =
print(io, "non-positive definite operator (", err.msg, ")")
struct UnimplementedOperation <: Exception
msg::String
end
showerror(io::IO, err::UnimplementedOperation) =
print(io, err.msg)
struct UnimplementedMethod <: Exception
msg::String
end
showerror(io::IO, err::UnimplementedMethod) =
print(io, err.msg)
"""
Reals
is the set of the floating point types. It is the numerical approximation of
reals in the mathematical sense.
This definition closely follows the semantic used in the BLAS module that
`BlasReal` are all the real types supported by the BLAS library.
"""
const Reals = AbstractFloat
"""
Complexes
is the set of the complexes whose real and imaginary parts are floating point.
It is the numerical approximation of complexes in the mathematical sense.
This definition closely follows the semantic used in the BLAS module that
`BlasComplex` are all the complex types supported by the BLAS library.
"""
const Complexes = Complex{<:Reals}
"""
Floats
is the union of all floating-point types (reals and complexes).
This definition closely follows the semantic used in the BLAS module that
`BlasFloat` are all floating-point types supported by the BLAS library.
"""
const Floats = Union{Reals,Complexes}
"""
A `Mapping` is any function between two variables spaces. Assuming upper case
Latin letters denote mappings, lower case Latin letters denote variables, and
Greek letters denote scalars, then:
* `A*x` or `A⋅x` yields the result of applying the mapping `A` to `x`;
* `A\\x` yields the result of applying the inverse of `A` to `x`;
Simple constructions are allowed for any kind of mappings and can be used to
create new instances of mappings which behave correctly. For instance:
* `B = α*A` (where `α` is a real) is a mapping which behaves as `A` times `α`;
that is `B⋅x` yields the same result as `α*(A⋅x)`.
* `C = A + B + ...` is a mapping which behaves as the sum of the mappings `A`,
`B`, ...; that is `C⋅x` yields the same result as `A⋅x + B⋅x + ...`.
* `C = A*B` or `C = A⋅B` is a mapping which behaves as the composition of the
mappings `A` and `B`; that is `C⋅x` yields the same result as `A⋅(B.x)`. As
for the sum of mappings, there may be an arbitrary number of mappings in a
composition; for example, if `D = A*B*C` then `D⋅x` yields the same result as
`A⋅(B⋅(C⋅x))`.
* `C = A\\B` is a mapping such that `C⋅x` yields the same result as `A\\(B⋅x)`.
* `C = A/B` is a mapping such that `C⋅x` yields the same result as `A⋅(B\\x)`.
These constructions can be combined to build up more complex mappings. For
example:
* `D = A*(B + C)` is a mapping such that `C⋅x` yields the same result as
`A⋅(B⋅x + C⋅x)`.
A `LinearMapping` is any linear mapping between two spaces. This abstract
subtype of `Mapping` is introduced to extend the notion of *matrices* and
*vectors*. Assuming the type of `A` inherits from `LinearMapping`, then:
* `A'⋅x` and `A'*x` yields the result of applying the adjoint of the mapping
`A` to `x`;
* `A'\\x` yields the result of applying the adjoint of the inverse of mapping
`A` to `x`.
* `B = A'` is a mapping such that `B⋅x` yields the same result as `A'⋅x`.
The following methods should be implemented for a mapping `A` of specific type
`M <: Mapping`:
```julia
vcreate(::Type{P}, A::M, x, scratch::Bool) -> y
apply!(α::Number, ::Type{P}, A::M, x, , scratch::Bool, β::Number, y) -> y
```
for any supported operation `P ∈ Operations` (`Direct`, `Adjoint`, `Inverse`
and/or `InverseAdjoint`). See the documentation of these methods for
explanations. Optionally, methods `P(A)` may be extended, *e.g.* to throw
exceptions if operation `P` is forbidden (or not implemented). By default, all
these operations are assumed possible (except `Adjoint` and `InverseAdjoint`
for a nonlinear mapping).
See also: [`apply`](@ref), [`apply!`](@ref), [`vcreate`](@ref),
[`LinearType`](@ref), [`Scalar`](@ref), [`Direct`](@ref),
[`Adjoint`](@ref), [`Inverse`](@ref), [`InverseAdjoint`](@ref).
"""
abstract type Mapping <: Function end
abstract type LinearMapping <: Mapping end
@doc @doc(Mapping) LinearMapping
"""
Identity()
yields the identity linear mapping. The purpose of this mapping is to be as
efficient as possible, hence the result of applying this mapping may be the
same as the input argument.
The identity is a singleton and is also available as:
Id
The `LinearAlgebra` module of the standard library exports a constant `I` which
also corresponds to the identity (but in the sense of a matrix). When `I` is
combined with any LazyAlgebra mapping, it is recognized as an alias of `Id`. So
that, for instance, `I/A`, `A\\I`, `Id/A` and `A\\Id` all yield `inv(A)` for
any LazyAlgebra mappings `A`.
"""
struct Identity <: LinearMapping; end
"""
Trait
is the abstract type inherited by types indicating specific traits.
See also: [`LinearType`](@ref), [`SelfAdjointType`](@ref),
[`DiagonalType`](@ref), [`MorphismType`](@ref).
"""
abstract type Trait end
# Trait indicating whether a mapping is certainly linear.
abstract type LinearType <: Trait end
struct NonLinear <: LinearType end
struct Linear <: LinearType end
# Trait indicating whether a mapping is certainly a self-adjoint linear map.
abstract type SelfAdjointType <: Trait end
struct NonSelfAdjoint <: SelfAdjointType end
struct SelfAdjoint <: SelfAdjointType end
# Trait indicating whether a mapping is certainly an endomorphism.
abstract type MorphismType <: Trait end
struct Morphism <: MorphismType end
struct Endomorphism <: MorphismType end
# Trait indicating whether a mapping is certainly a diagonal linear mapping.
abstract type DiagonalType <: Trait end
struct NonDiagonalMapping <: DiagonalType end
struct DiagonalMapping <: DiagonalType end
"""
Type `Direct` is a singleton type to indicate that a linear mapping should
be directly applied. This type is part of the union `Operations`.
See also: [`LinearMapping`](@ref), [`apply`](@ref), [`Operations`](@ref).
"""
struct Direct; end
"""
Adjoint(A) -> obj
yields an object instance `obj` representing `A'`, the adjoint of the linear
mapping `A`.
Directly calling this constructor is discouraged, use an expression like `A'`
instead and benefit from automatic simplification rules.
Call [`unveil(obj)`](@ref) to reveal the linear mapping `A` embedded in `obj`.
See also [`DecoratedMapping`](@ref).
"""
struct Adjoint{T<:Mapping} <: LinearMapping
op::T
# The outer constructors prevent most illegal calls to `Adjoint(A)` we
# just have to check that the argument is a simple linear mapping.
function Adjoint{T}(A::T) where {T<:Mapping}
is_linear(A) || throw_forbidden_adjoint_of_non_linear_mapping()
return new{T}(A)
end
end
"""
Inverse(A) -> obj
yields an object instance `obj` representing the inverse of the mapping `A`.
Directly calling this constructor is discouraged, call `inv(A)` or use an
expression like `Id/A` instead and benefit from automatic simplification rules.
Call [`unveil(obj)`](@ref) to reveal the mapping `A` embedded in `obj`.
See also [`DecoratedMapping`](@ref).
"""
struct Inverse{T<:Mapping} <: Mapping
op::T
# The outer constructors prevent all illegal calls to `Inverse(A)` so there
# is nothing more to check.
Inverse{T}(A::T) where {T<:Mapping} = new{T}(A)
end
"""
InverseAdjoint(A) -> obj
yields an object instance `obj` representing the inverse of the adjoint of the
linear mapping `A`.
Directly calling this constructor is discouraged, use expressions like
`inv(A')`, `inv(A')` or `Id/A'` instead and benefit from automatic
simplification rules.
Call [`unveil(obj)`](@ref) to reveal the mapping `A` embedded in `obj`.
`AdjointInverse` is an alias for `InverseAdjoint`.
See also [`DecoratedMapping`](@ref).
"""
struct InverseAdjoint{T<:Mapping} <: LinearMapping
op::T
# The outer constructors prevent most illegal calls to `InverseAdjoint(A)`
# we just have to check that the argument is a simple linear mapping.
function InverseAdjoint{T}(A::T) where {T<:Mapping}
is_linear(A) ||
bad_argument("taking the inverse adjoint of non-linear mappings is not allowed")
return new{T}(A)
end
end
const AdjointInverse{T} = InverseAdjoint{T}
@doc @doc(InverseAdjoint) AdjointInverse
"""
Gram(A) -> obj
yields an object instance `obj` representing the composition `A'*A` for the
linear mapping `A`.
Directly calling this constructor is discouraged, call [`gram(A)`](@ref) or use
expression `A'*A` instead and benefit from automatic simplification rules.
Call [`unveil(obj)`](@ref) to reveal the linear mapping `A` embedded in `obj`.
See also [`gram`](@ref), [`unveil`](@ref) and [`DecoratedMapping`](@ref).
"""
struct Gram{T<:Mapping} <: LinearMapping
op::T
# The outer constructors prevent most illegal calls to `Gram(A)` we
# just have to check that the argument is a simple linear mapping.
function Gram{T}(A::T) where {T<:Mapping}
is_linear(A) || throw_forbidden_Gram_of_non_linear_mapping()
return new{T}(A)
end
end
"""
DecoratedMapping
is the union of the *decorated* mapping types: [`Adjoint`](@ref),
[`Inverse`](@ref), [`InverseAdjoint`](@ref), and [`Gram`](@ref).
The method [`unveil(A)`](@ref) can be called to reveal the mapping embedded in
a decorated mapping `A`.
"""
const DecoratedMapping = Union{Adjoint,Inverse,InverseAdjoint,Gram}
"""
Jacobian(A,x) -> obj
yields an object instance `obj` representing the Jacobian `∇(A,x)` of the
non-linear mapping `A` for the variables `x`.
Directly calling this constructor is discouraged, call [`jacobian(A,x)`](@ref)
or [`∇(A,x)`](@ref) instead and benefit from automatic simplification rules.
"""
struct Jacobian{M<:Mapping,T} <: Mapping
A::M
x::T
# The outer constructors prevent most illegal calls to `Jacobian(A)` we
# just have to check that the argument is not a simple linear mapping.
function Jacobian{M,T}(A::M, x::T) where {M<:Mapping,T}
is_linear(A) &&
bad_argument("the Jacobian of a linear mapping of type `",
M, "` should be the mapping itself")
return new{M,T}(A, x)
end
end
"""
Operations
is the union of the possible variants to apply a mapping: [`Direct`](@ref),
[`Adjoint`](@ref), [`Inverse`](@ref) and [`InverseAdjoint`](@ref) (or its alias
[`AdjointInverse`](@ref)).
See also: [`apply`](@ref) and [`apply!`](@ref).
"""
const Operations = Union{Direct,Adjoint,Inverse,InverseAdjoint}
"""
Scaled(λ, M) -> obj
yields an object instance `obj` representing `λ*M`, the mapping `M` multiplied
by a scalar `λ`.
Directly calling this constructor is discouraged, use expressions like `λ*M`
instead and benefit from automatic simplification rules.
Call [`multiplier(obj)`](@ref) and [`unscaled(obj)`](@ref) with a scaled
mapping `obj = λ*M` to retrieve `λ` and `M` respectively.
"""
struct Scaled{T<:Mapping,S<:Number} <: Mapping
λ::S
M::T
Scaled{T,S}(λ::S, M::Mapping) where {S<:Number,T<:Mapping} =
new{T,S}(λ, M)
end
"""
Sum(A, B, ...) -> obj
yields an object instance `obj` representing the sum `A + B + ...` of the
mappings `A`, `B`, ...
Directly calling this constructor is discouraged, use expressions like `A + B +
...` instead and benefit from automatic simplification rules.
Call [`terms(obj)`](@ref) retrieve the tuple `(A,B,...)` of the terms of the
sum stored in `obj`.
"""
struct Sum{N,T<:NTuple{N,Mapping}} <: Mapping
ops::T
# The inner constructor ensures that the number of arguments is at least 2.
function Sum{N,T}(ops::T) where {N,T<:NTuple{N,Mapping}}
N ≥ 2 ||
throw(ArgumentError("a sum of mappings has at least 2 components"))
new{N,T}(ops)
end
end
"""
Composition(A, B, ...) -> obj
yields an object instance `obj` representing the composition `A*B*...` of the
mappings `A`, `B`, ...
Directly calling this constructor is discouraged, use expressions like
`A*B*...` `A∘B∘...` or `A⋅B⋅...` instead and benefit from automatic
simplification rules.
Call [`terms(obj)`](@ref) retrieve the tuple `(A,B,...)` of the terms of the
composition stored in `obj`.
"""
struct Composition{N,T<:NTuple{N,Mapping}} <: Mapping
ops::T
# The inner constructor ensures that the number of arguments is at least 2.
function Composition{N,T}(ops::T) where {N,T<:NTuple{N,Mapping}}
N ≥ 2 ||
throw(ArgumentError("a composition of mappings has at least 2 components"))
new{N,T}(ops)
end
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 6141 | #
# utils.jl -
#
# General purpose methods.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2020 Éric Thiébaut.
#
@noinline bad_argument(args...) = bad_argument(string(args...))
bad_argument(mesg::String) = throw(ArgumentError(mesg))
@noinline bad_size(args...) = bad_size(string(args...))
bad_size(mesg::String) = throw(DimensionMismatch(mesg))
arguments_have_incompatible_axes() =
bad_size("arguments have incompatible dimensions/indices")
operands_have_incompatible_axes() =
bad_size("operands have incompatible dimensions/indices")
"""
message([io=stdout,] header, args...; color=:blue)
prints a message on `io` with `header` text in bold followed by a space,
`args...` and a newline. Keyword `color` can be used to specify the text color
of the message.
"""
message(header::String, args...; kwds...) =
message(stdout, header, args...; kwds...)
@noinline function message(io::IO, header::String, args...;
color::Symbol=:blue)
printstyled(io, header; color=color, bold=true)
printstyled(io, " ", args...; color=color, bold=false)
println(io)
end
"""
warn([io=stdout,] args...)
prints a warning message in yellow on `io` with `"Warning: "` in bold followed
by `args...` and a newline.
"""
warn(args...) = warn(stderr, args...)
warn(io::IO, args...) = message(io, "Warning:", args...; color=:yellow)
#inform(args...) = inform(stderr, args...)
#inform(io::IO, args...) = message(io, "Info:", args...; color=:blue)
"""
promote_multiplier(λ, T)
yields multiplier `λ` converted to a suitable floating-point type for
multiplying values or expressions of type `T`. This method is *type stable*.
The result has the same floating-point precision as `T` and is a real if `λ` is
real or a complex if `λ` is complex.
Multiple arguments can be specified after the multiplier `λ`:
promote_multiplier(λ, args...)
to have `T` the promoted type of all types in `args...` or all element types of
arrays in `args...`.
See methods [`LazyAlgebra.multiplier_type`](@ref) and
[`LazyAlgebra.multiplier_floatingpoint_type`](@ref).
""" promote_multiplier
# Note taht the only direct sub-types of `Number` are abstract types `Real` and
# `Complex`. Also see discussion here
# (https://github.com/emmt/LinearInterpolators.jl/issues/7) for details about
# the following implementation.
@inline function promote_multiplier(λ::Real, args...)
T = multiplier_floatingpoint_type(args...)
return convert(T, λ)::T
end
@inline function promote_multiplier(λ::Complex{<:Real}, args...)
T = multiplier_floatingpoint_type(args...)
return convert(Complex{T}, λ)::Complex{T}
end
"""
multiplier_floatingpoint_type(args...) -> T::AbstractFloat
yields the multiplier floating-point type for the arguments `args...` of the
multiplier. Each argument may be anything acceptable for
[`LazyAlgebra.multiplier_type`](@ref). The result is guaranteed to be a
concrete floating-point type.
See methods [`LazyAlgebra.promote_multiplier`](@ref) and
[`LazyAlgebra.multiplier_type`](@ref).
""" multiplier_floatingpoint_type
multiplier_floatingpoint_type(::Tuple{}) =
throw(ArgumentError("at least one other argument must be specified"))
@inline function multiplier_floatingpoint_type(args...)
T = promote_type(map(multiplier_type, args)...)
(T <: Number && isconcretetype(T)) || error(
"resulting multiplier type ", T, " is not a concrete real type")
return float(real(T))
end
"""
multiplier_type(x) -> T::Number
yields the *element* type to be imposed to multipliers of `x`. The result must
be a concrete number type. Argument `x` may be an array, a number, or a data
type. Other packages are however encouraged to specialize this method for
their needs.
See methods [`LazyAlgebra.promote_multiplier`](@ref) and
[`LazyAlgebra.multiplier_floatingpoint_type`](@ref).
"""
multiplier_type(::Type{T}) where {T<:Number} = T
multiplier_type(::AbstractArray{T}) where {T<:Number} = T
multiplier_type(::T) where {T<:Number} = T
"""
to_tuple(arg)
converts `arg` into an `N`-tuple where `N` is the number of elements of `arg`.
This is equivalent to `Tuple(arg)` or `(arg...,)` for a vector but it is much
faster for small vectors.
""" to_tuple
to_tuple(x::Tuple) = x
# The cutoff at n = 10 below reflects what is used by `ntuple`. This value is
# somewhat arbitrary, on the machines where I tested the code, the explicit
# unrolled expression for n = 10 is still about 44 times faster than `(x...,)`.
# Calling `ntuple` for n ≤ 10 is about twice slower; for n > 10, `ntuple` is
# slower than `(x...,)`.
function to_tuple(x::AbstractVector)
n = length(x)
@inbounds begin
n == 0 ? () :
n > 10 || firstindex(x) != 1 ? (x...,) :
n == 1 ? (x[1],) :
n == 2 ? (x[1], x[2]) :
n == 3 ? (x[1], x[2], x[3]) :
n == 4 ? (x[1], x[2], x[3], x[4]) :
n == 5 ? (x[1], x[2], x[3], x[4], x[5]) :
n == 6 ? (x[1], x[2], x[3], x[4], x[5], x[6]) :
n == 7 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7]) :
n == 8 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8]) :
n == 9 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]) :
(x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])
end
end
"""
@certify expr [mesg]
asserts that expression `expr` is true; otherwise, throws an `AssertionError`
exception with message `mesg`. If unspecified, `mesg` is `expr` converted into
a string. Compared to `@assert`, the assertion made by `@certify` may never be
disabled whatever the optimization level.
"""
macro certify(expr)
_certify(expr, string(expr))
end
macro certify(expr, mesg::Union{Expr,Symbol})
_certify(expr, :(string($(esc(mesg)))))
end
macro certify(expr, mesg::AbstractString)
_certify(expr, mesg)
end
macro certify(expr, mesg)
_certify(expr, string(mesg))
end
_certify(expr, mesg) = :($(esc(expr)) ? nothing : throw(AssertionError($mesg)))
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 22041 | #
# vectors.jl -
#
# Implement basic operations for *vectors*. Here arrays of any rank are
# considered as *vectors*, the only requirements are that, when combining
# *vectors*, they have the same list of axes (i.e. the same dimensions for
# most arrays). These methods are intended to be used for numerical
# optimization.
#
#-------------------------------------------------------------------------------
#
# This file is part of LazyAlgebra (https://github.com/emmt/LazyAlgebra.jl)
# released under the MIT "Expat" license.
#
# Copyright (c) 2017-2020 Éric Thiébaut.
#
"""
vnorm2([T,] v)
yields the Euclidean (L2) norm of `v`. The floating point type of the result
can be imposed by optional argument `T`. Also see [`vnorm1`](@ref) and
[`vnorminf`](@ref).
"""
function vnorm2(v::AbstractArray{<:Floats})
s = zero(real(eltype(v)))
@inbounds @simd for i in eachindex(v)
s += abs2(v[i])
end
return sqrt(s)
end
"""
vnorm1([T,] v)
yields the L1 norm of `v`, that is the sum of the absolute values of its
elements. The floating point type of the result can be imposed by optional
argument `T`. For a complex valued argument, the result is the sum of the
absolute values of the real part and of the imaginary part of the elements
(like BLAS `asum`).
See also [`vnorm2`](@ref) and [`vnorminf`](@ref).
"""
function vnorm1(v::AbstractArray{<:Reals})
s = zero(real(eltype(v)))
@inbounds @simd for i in eachindex(v)
s += abs(v[i])
end
return s
end
function vnorm1(v::AbstractArray{<:Complexes})
si = sr = zero(real(eltype(v)))
@inbounds @simd for i in eachindex(v)
z = v[i]
sr += abs(real(z))
si += abs(imag(z))
end
return sr + si
end
"""
vnorminf([T,] v)
yields the infinite norm of `v`, that is the maximum absolute value of its
elements. The floating point type of the result can be imposed by optional
argument `T`. Also see [`vnorm1`](@ref) and [`vnorm2`](@ref).
"""
function vnorminf(v::AbstractArray{<:Reals})
absmax = zero(real(eltype(v)))
@inbounds @simd for i in eachindex(v)
absmax = max(absmax, abs(v[i]))
end
return absmax
end
function vnorminf(v::AbstractArray{<:Complexes})
abs2max = zero(real(eltype(v)))
@inbounds @simd for i in eachindex(v)
abs2max = max(abs2max, abs2(v[i]))
end
return sqrt(abs2max)
end
# Versions with forced type of output result.
for func in (:vnorm2, :vnorm1, :vnorminf)
@eval $func(::Type{T}, v) where {T<:AbstractFloat} =
convert(T, $func(v))::T
end
#------------------------------------------------------------------------------
"""
vcreate(x)
yields a new variable instance similar to `x`. If `x` is an array, the
element type of the result is a floating-point type.
Also see [`similar`](@ref).
"""
vcreate(x::AbstractArray{T}) where {R<:Real,T<:Union{R,Complex{R}}} =
similar(x, float(T))
#------------------------------------------------------------------------------
"""
vcopy!(dst, src) -> dst
copies the contents of `src` into `dst` and returns `dst`. This function
checks that the copy makes sense (for instance, for array arguments, the
`copyto!` operation does not check that the source and destination have the
same dimensions).
Also see [`copyto!`](@ref), [`vcopy`](@ref), [`vswap!`](@ref).
"""
function vcopy!(dst::AbstractArray{<:Real,N},
src::AbstractArray{<:Real,N}) where {N}
if dst !== src
axes(dst) == axes(src) || arguments_have_incompatible_axes()
copyto!(dst, src)
end
return dst
end
function vcopy!(dst::AbstractArray{<:Complex{<:Real},N},
src::AbstractArray{<:Complex{<:Real},N}) where {N}
if dst !== src
axes(dst) == axes(src) || arguments_have_incompatible_axes()
copyto!(dst, src)
end
return dst
end
"""
vcopy(x)
yields a fresh copy of the *vector* `x`. If `x` is is an array, the element
type of the result is a floating-point type.
Also see [`copy`](@ref), [`vcopy!`](@ref), [`vcreate!`](@ref).
"""
vcopy(x) = vcopy!(vcreate(x), x)
"""
vswap!(x, y)
exchanges the contents of `x` and `y` (which must have the same element type
and axes if they are arrays).
Also see [`vcopy!`](@ref).
"""
vswap!(x::T, y::T) where {T<:AbstractArray} =
x === y || _swap!(x, y)
vswap!(x::AbstractArray{T,N}, y::AbstractArray{T,N}) where {T,N} =
_swap!(x, y)
# Forced swapping.
_swap!(x::AbstractArray{T,N}, y::AbstractArray{T,N}) where {T,N} =
@inbounds @simd for i in all_indices(x, y)
temp = x[i]
x[i] = y[i]
y[i] = temp
end
#------------------------------------------------------------------------------
"""
vfill!(x, α) -> x
sets all elements of `x` with the scalar value `α` and return `x`. The default
implementation just calls `fill!(x,α)` but this method may be specialized for
specific types of variables `x`.
Also see [`vzero!`](@ref), [`fill!`](@ref).
"""
vfill!(x, α) = fill!(x, α)
"""
vzero!(x) -> x
fills `x` with zeros and returns it. The default implementation just calls
`fill!(x,zero(eltype(x)))` but this method may be specialized for specific
types of variables `x`.
Also see [`vfill!`](@ref).
"""
vzero!(x) = vfill!(x, zero(eltype(x)))
"""
vzeros(x)
yields a *vector* like `x` filled with zeros.
Also see [`vones`](@ref), [`vcreate`](@ref), [`vfill!`](@ref).
"""
vzeros(x) = vzero!(vcreate(x))
"""
vones(x)
yields a *vector* like `x` filled with ones.
Also see [`vzeros`](@ref), [`vcreate`](@ref), [`vfill!`](@ref).
"""
vones(x) = vfill!(vcreate(x), 1)
vones(x::AbstractArray{T}) where {T} = vfill!(vcreate(x), one(T))
#------------------------------------------------------------------------------
"""
vscale!(dst, α, src) -> dst
overwrites `dst` with `α*src` and returns `dst`. Computations are done at the
numerical precision of `promote_eltype(src,dst)`. The source argument may be
omitted to perform *in-place* scaling:
vscale!(x, α) -> x
overwrites `x` with `α*x` and returns `x`. The convention is that the result
is zero-filled if `α=0` (whatever the values in the source).
Methods are provided by default so that the order of the factor `α` and the
source vector may be reversed:
vscale!(dst, src, α) -> dst
vscale!(α, x) -> x
Also see [`vscale`](@ref), [`LinearAlgebra.rmul!](@ref).
"""
function vscale!(dst::AbstractArray{<:Floats,N},
α::Number,
src::AbstractArray{<:Floats,N}) where {N}
if α == 1
vcopy!(dst, src)
elseif α == 0
axes(dst) == axes(src) || arguments_have_incompatible_axes()
vzero!(dst)
elseif α == -1
@inbounds @simd for i in all_indices(dst, src)
dst[i] = -src[i]
end
else
alpha = promote_multiplier(α, src)
@inbounds @simd for i in all_indices(dst, src)
dst[i] = alpha*src[i]
end
end
return dst
end
# In-place scaling.
function vscale!(x::AbstractArray{<:Floats}, α::Number)
if α == 0
vzero!(x)
elseif α == -1
@inbounds @simd for i in eachindex(x)
x[i] = -x[i]
end
elseif α != 1
alpha = promote_multiplier(α, x)
@inbounds @simd for i in eachindex(x)
x[i] *= alpha
end
end
return x
end
# In-place scaling with reverse order of arguments.
vscale!(α::Number, x) = vscale!(x, α)
# Scaling for other *vector* types.
vscale!(x, α::Number) = vscale!(x, α, x)
vscale!(dst, src, α::Number) = vscale!(dst, α, src)
# The following methods are needed to avoid looping forever.
vscale!(::Number, ::Number) = error("bad argument types")
vscale!(::Any, ::Number, ::Number) = error("bad argument types")
vscale!(::Number, ::Any, ::Number) = error("bad argument types")
vscale!(::Number, ::Number, ::Any) = error("bad argument types")
vscale!(::Number, ::Number, ::Number) = error("bad argument types")
"""
vscale(α, x)
or
vscale(x, α)
yield a new *vector* whose elements are those of `x` multiplied by the scalar
`α`.
Also see [`vscale!`](@ref), [`vcreate`](@ref).
"""
vscale(α::Number, x) = vscale!(vcreate(x), α, x)
vscale(x, α::Number) = vscale(α, x)
#------------------------------------------------------------------------------
# ELEMENT-WISE MULTIPLICATION
"""
vproduct(x, y) -> z
yields the element-wise multiplication of `x` by `y`. To avoid allocating the
result, the destination array `dst` can be specified with the in-place version
of the method:
vproduct!(dst, [sel,] x, y) -> dst
which overwrites `dst` with the elementwise multiplication of `x` by `y`.
Optional argument `sel` is a selection of indices to which apply the operation.
"""
vproduct(x::V, y::V) where {V} = vproduct!(vcreate(x), x, y)
vproduct(x::AbstractArray{<:Any,N}, y::AbstractArray{<:Any,N}) where {N} =
vproduct!(similar(x, promote_eltype(x,y)), x, y)
for Td in (AbstractFloat, Complex{<:AbstractFloat}),
Tx in (AbstractFloat, Complex{<:AbstractFloat}),
Ty in (AbstractFloat, Complex{<:AbstractFloat})
if Td <: Complex || (Tx <: Real && Ty <: Real)
@eval function vproduct!(dst::AbstractArray{<:$Td,N},
x::AbstractArray{<:$Tx,N},
y::AbstractArray{<:$Ty,N}) where {N}
@inbounds @simd for i in all_indices(dst, x, y)
dst[i] = x[i]*y[i]
end
return dst
end
@eval function vproduct!(dst::AbstractArray{<:$Td,N},
sel::AbstractVector{Int},
x::AbstractArray{<:$Tx,N},
y::AbstractArray{<:$Ty,N}) where {N}
if checkselection(sel, dst, x, y)
@inbounds @simd for j in eachindex(sel)
i = sel[j]
dst[i] = x[i]*y[i]
end
end
return dst
end
end
end
@doc @doc(vproduct) vproduct!
#------------------------------------------------------------------------------
# VECTOR UPDATE
"""
vupdate!(y, [sel,] α, x) -> y
overwrites `y` with `α*x + y` and returns `y`. The code is optimized for some
specific values of the multiplier `α`. For instance, if `α` is zero, then `y`
is left unchanged without using `x`. Computations are performed at the
numerical precision of `promote_eltype(x,y)`.
Optional argument `sel` is a selection of indices to which apply the operation.
Note that if an index is repeated, the operation will be performed several
times at this location.
See also: [`vscale!`](@ref), [`vcombine!](@ref).
"""
function vupdate!(y::AbstractArray{<:Number,N},
α::Number,
x::AbstractArray{<:Number,N}) where {N}
axes(x) == axes(y) || arguments_have_incompatible_axes()
if α == 1
@inbounds @simd for i in eachindex(x, y)
y[i] += x[i]
end
elseif α == -1
@inbounds @simd for i in eachindex(x, y)
y[i] -= x[i]
end
elseif α != 0
alpha = promote_multiplier(α, x)
@inbounds @simd for i in eachindex(x, y)
y[i] += alpha*x[i]
end
end
return y
end
function vupdate!(y::AbstractArray{<:Floats,N},
sel::AbstractVector{Int},
α::Number,
x::AbstractArray{<:Floats,N}) where {N}
if checkselection(sel, x, y)
if α == 1
@inbounds @simd for j in eachindex(sel)
i = sel[j]
y[i] += x[i]
end
elseif α == -1
@inbounds @simd for j in eachindex(sel)
i = sel[j]
y[i] -= x[i]
end
elseif α != 0
alpha = promote_multiplier(α, x)
@inbounds @simd for j in eachindex(sel)
i = sel[j]
y[i] += alpha*x[i]
end
end
end
return y
end
#------------------------------------------------------------------------------
# LINEAR COMBINATION
"""
vcombine(α, x, β, y) -> dst
yields the linear combination `dst = α*x + β*y`.
----
To avoid allocating the result, the destination array `dst` can be specified
with the in-place version of the method:
vcombine!(dst, α, x, β, y) -> dst
The code is optimized for some specific values of the multipliers `α` and `β`.
For instance, if `α` (resp. `β`) is zero, then the prior contents of `x`
(resp. `y`) is not used.
The source(s) and the destination can be the same. For instance, the two
following lines of code produce the same result:
vcombine!(dst, 1, dst, α, x)
vupdate!(dst, α, x)
See also: [`vscale!`](@ref), [`vupdate!](@ref).
"""
vcombine(α::Number, x::V, β::Number, y::V) where {V} =
vcombine!(vcreate(x), α, x, β, y)
function vcombine!(dst::AbstractArray{<:Number,N},
α::Number, x::AbstractArray{<:Number,N},
β::Number, y::AbstractArray{<:Number,N}) where {N}
axes(dst) == axes(x) == axes(y) || arguments_have_incompatible_axes()
if α == 0
if β == 0
vzero!(dst)
elseif β == 1
_vcombine!(dst, axpby_yields_y, 0,x, 1,y)
elseif β == -1
_vcombine!(dst, axpby_yields_my, 0,x,-1,y)
else
b = promote_multiplier(β, y)
_vcombine!(dst, axpby_yields_by, 0,x, b,y)
end
elseif α == 1
if β == 0
_vcombine!(dst, axpby_yields_x, 1,x, 0,y)
elseif β == 1
_vcombine!(dst, axpby_yields_xpy, 1,x, 1,y)
elseif β == -1
_vcombine!(dst, axpby_yields_xmy, 1,x,-1,y)
else
b = promote_multiplier(β, y)
_vcombine!(dst, axpby_yields_xpby, 1,x, b,y)
end
elseif α == -1
if β == 0
_vcombine!(dst, axpby_yields_mx, -1,x, 0,y)
elseif β == 1
_vcombine!(dst, axpby_yields_ymx, -1,x, 1,y)
elseif β == -1
_vcombine!(dst, axpby_yields_mxmy, -1,x,-1,y)
else
b = promote_multiplier(β, y)
_vcombine!(dst, axpby_yields_bymx, -1,x, b,y)
end
else
a = promote_multiplier(α, x)
if β == 0
_vcombine!(dst, axpby_yields_ax, a,x, 0,y)
elseif β == 1
_vcombine!(dst, axpby_yields_axpy, a,x, 1,y)
elseif β == -1
_vcombine!(dst, axpby_yields_axmy, a,x,-1,y)
else
b = promote_multiplier(β, y)
_vcombine!(dst, axpby_yields_axpby, a,x, b,y)
end
end
return dst
end
function _vcombine!(dst::AbstractArray{<:Number,N},
f::Function,
α::Number, x::AbstractArray{<:Number,N},
β::Number, y::AbstractArray{<:Number,N}) where {N}
@inbounds @simd for i in eachindex(dst, x, y)
dst[i] = f(α, x[i], β, y[i])
end
end
@doc @doc(vcombine) vcombine!
#------------------------------------------------------------------------------
# INNER PRODUCT
"""
vdot([T,] [w,] x, y)
yields the inner product of `x` and `y`; that is, the sum of `conj(x[i])*y[i]`
or, if `w` is specified, the sum of `w[i]*conj(x[i])*y[i]` (`w` must have
real-valued elements), for all indices `i`. Optional argument `T` is the
floating point type of the result.
Another possibility is:
vdot([T,] sel, x, y)
with `sel` a selection of indices to restrict the computation of the inner
product to some selected elements. This yields the sum of `x[i]*y[i]` for all
`i ∈ sel`.
If the arguments have complex-valued elements and `T` is specified as a
floating-point type, complexes are considered as vectors of pairs of reals and
the result is:
vdot(T::Type{AbstractFloat}, x, y)
-> ((x[1].re*y[1].re + x[1].im*y[1].im) +
(x[2].re*y[2].re + x[2].im*y[2].im) + ...)
"""
vdot(::Type{T}, x, y) where {T<:AbstractFloat} = convert(T,vdot(x,y))::T
vdot(::Type{T}, w, x, y) where {T<:AbstractFloat} = convert(T,vdot(w,x,y))::T
function vdot(x::AbstractArray{<:AbstractFloat,N},
y::AbstractArray{<:AbstractFloat,N}) where {N}
s = zero(promote_eltype(x, y))
@inbounds @simd for i in all_indices(x, y)
s += x[i]*y[i]
end
return s
end
function vdot(x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
s = zero(promote_eltype(x, y))
@inbounds @simd for i in all_indices(x, y)
s += conj(x[i])*y[i]
end
return s
end
# This one yields the real part of the dot product, just as if complexes were
# pairs of reals.
function vdot(T::Type{<:AbstractFloat},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
s = zero(real(promote_eltype(x, y)))
@inbounds @simd for i in all_indices(x, y)
xi = x[i]
yi = y[i]
s += real(xi)*real(yi) + imag(xi)*imag(yi)
end
return convert(T, s)::T
end
function vdot(T::Type{Complex{<:AbstractFloat}},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
return convert(T, vdot(x, y))::T
end
function vdot(w::AbstractArray{<:AbstractFloat,N},
x::AbstractArray{<:AbstractFloat,N},
y::AbstractArray{<:AbstractFloat,N}) where {N}
s = zero(promote_eltype(w, x, y))
@inbounds @simd for i in all_indices(w, x, y)
s += w[i]*x[i]*y[i]
end
return s
end
function vdot(T::Type{<:AbstractFloat},
w::AbstractArray{<:AbstractFloat,N},
x::AbstractArray{<:AbstractFloat,N},
y::AbstractArray{<:AbstractFloat,N}) where {N}
return convert(T, vdot(w, x, y))::T
end
function vdot(w::AbstractArray{<:AbstractFloat,N},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
s = zero(promote_eltype(w, x, y))
@inbounds @simd for i in all_indices(w, x, y)
s += w[i]*conj(x[i])*y[i]
end
return s
end
function vdot(T::Type{<:AbstractFloat},
w::AbstractArray{<:AbstractFloat,N},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
s = zero(real(promote_eltype(w, x, y)))
@inbounds @simd for i in all_indices(w, x, y)
xi = x[i]
yi = y[i]
s += (real(xi)*real(yi) + imag(xi)*imag(yi))*w[i]
end
return convert(T, s)::T
end
function vdot(T::Type{Complex{<:AbstractFloat}},
w::AbstractArray{<:AbstractFloat,N},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
return convert(T, vdot(w, x, y))::T
end
function vdot(sel::AbstractVector{Int},
x::AbstractArray{<:AbstractFloat,N},
y::AbstractArray{<:AbstractFloat,N}) where {N}
s = zero(promote_eltype(x, y))
if checkselection(sel, x, y)
@inbounds @simd for j in eachindex(sel)
i = sel[j]
s += x[i]*y[i]
end
end
return s
end
function vdot(T::Type{<:AbstractFloat},
sel::AbstractVector{Int},
x::AbstractArray{<:AbstractFloat,N},
y::AbstractArray{<:AbstractFloat,N}) where {N}
return convert(T, vdot(sel, x, y))::T
end
function vdot(sel::AbstractVector{Int},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
s = zero(promote_eltype(x, y))
if checkselection(sel, x, y)
@inbounds @simd for j in eachindex(sel)
i = sel[j]
s += conj(x[i])*y[i]
end
end
return s
end
function vdot(T::Type{<:AbstractFloat},
sel::AbstractVector{Int},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
s = zero(real(promote_eltype(x, y)))
if checkselection(sel, x, y)
@inbounds @simd for j in eachindex(sel)
i = sel[j]
xi = x[i]
yi = y[i]
s += real(xi)*real(yi) + imag(xi)*imag(yi)
end
end
return convert(T, s)::T
end
function vdot(T::Type{Complex{<:AbstractFloat}},
sel::AbstractVector{Int},
x::AbstractArray{<:Complex{<:AbstractFloat},N},
y::AbstractArray{<:Complex{<:AbstractFloat},N}) where {N}
return convert(T, vdot(sel, x, y))::T
end
# Check compatibility os selected indices with other specifed array(s) and
# return whether the selection is non-empty.
@inline function checkselection(sel::AbstractVector{Int},
A::AbstractArray{<:Any,N}) where {N}
@certify IndexStyle(sel) === IndexLinear()
@certify IndexStyle(A) === IndexLinear()
flag = !isempty(sel)
if flag
imin, imax = extrema(sel)
I = eachindex(IndexLinear(), A)
((first(I) ≤ imin) & (imax ≤ last(I))) || out_of_range_selection()
end
return flag
end
@inline function checkselection(sel::AbstractVector{Int},
A::AbstractArray{<:Any,N},
B::AbstractArray{<:Any,N}) where {N}
@certify IndexStyle(B) === IndexLinear()
axes(A) == axes(B) || arguments_have_incompatible_axes()
checkselection(sel, A)
end
@inline function checkselection(sel::AbstractVector{Int},
A::AbstractArray{<:Any,N},
B::AbstractArray{<:Any,N},
C::AbstractArray{<:Any,N}) where {N}
@certify IndexStyle(B) === IndexLinear()
@certify IndexStyle(C) === IndexLinear()
axes(A) == axes(B) == axes(C) || arguments_have_incompatible_axes()
checkselection(sel, A)
end
@noinline out_of_range_selection() =
bad_argument("some selected indices are out of range")
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 4733 | isdefined(:LazyAlgebra) || include("../src/LazyAlgebra.jl")
module LazyAlgebraBenchmarks
using BenchmarkTools
using LazyAlgebra
import LazyAlgebra: vdot
vdot(::Type{Val{:BLAS}}, x, y) =
LazyAlgebra.blas_vdot(x,y)
vdot(::Type{Val{:Julia}}, x, y) =
dot(reshape(x, length(x)), reshape(y, length(y)))
function vdot(::Type{Val{:basic}},
x::AbstractArray{Tx,N},
y::AbstractArray{Ty,N}) where {Tx, Ty, N}
@assert size(x) == size(y)
T = promote_type(Tx, Ty)
s = zero(T)
for i in eachindex(x, y)
s += convert(T, x[i])*convert(T, y[i])
end
return s
end
function vdot(::Type{Val{:inbounds}},
x::AbstractArray{Tx,N},
y::AbstractArray{Ty,N}) where {Tx, Ty, N}
@assert size(x) == size(y)
T = promote_type(Tx, Ty)
s = zero(T)
@inbounds for i in eachindex(x, y)
s += convert(T, x[i])*convert(T, y[i])
end
return s
end
function vdot(::Type{Val{:simd}},
x::AbstractArray{Tx,N},
y::AbstractArray{Ty,N}) where {Tx, Ty, N}
@assert size(x) == size(y)
T = promote_type(Tx, Ty)
s = zero(T)
@inbounds @simd for i in eachindex(x, y)
s += convert(T, x[i])*convert(T, y[i])
end
return s
end
function vdot(::Type{Val{:simdlinear}},
x::DenseArray{Tx,N},
y::DenseArray{Ty,N}) where {Tx, Ty, N}
@assert size(x) == size(y)
T = promote_type(Tx, Ty)
s = zero(T)
@inbounds @simd for i in 1:length(x)
s += x[i]*y[i]
end
return s
end
function vdot(::Type{Val{:basic}},
w::AbstractArray{Tw,N},
x::AbstractArray{Tx,N},
y::AbstractArray{Ty,N}) where {Tw, Tx, Ty, N}
@assert size(x) == size(y)
T = promote_type(Tw, Tx, Ty)
s = zero(T)
for i in eachindex(x, x, y)
s += convert(T, w[i])*convert(T, x[i])*convert(T, y[i])
end
return s
end
function vdot(::Type{Val{:inbounds}},
w::AbstractArray{Tw,N},
x::AbstractArray{Tx,N},
y::AbstractArray{Ty,N}) where {Tw, Tx, Ty, N}
@assert size(w) == size(x) == size(y)
T = promote_type(Tw, Tx, Ty)
s = zero(T)
@inbounds for i in eachindex(w, x, y)
s += convert(T, w[i])*convert(T, x[i])*convert(T, y[i])
end
return s
end
function vdot(::Type{Val{:simd}},
w::AbstractArray{Tw,N},
x::AbstractArray{Tx,N},
y::AbstractArray{Ty,N}) where {Tw, Tx, Ty, N}
@assert size(w) == size(x) == size(y)
T = promote_type(Tw, Tx, Ty)
s = zero(T)
@inbounds @simd for i in eachindex(w, x, y)
s += convert(T, w[i])*convert(T, x[i])*convert(T, y[i])
end
return s
end
function vdot(::Type{Val{:simdlinear}},
w::DenseArray{Tw,N},
x::DenseArray{Tx,N},
y::DenseArray{Ty,N}) where {Tw, Tx, Ty, N}
@assert size(w) == size(x) == size(y)
T = promote_type(Tw, Tx, Ty)
s = zero(T)
@inbounds @simd for i in 1:length(x)
s += w[i]*x[i]*y[i]
end
return s
end
testdot() = testdot(33, 33)
function testdot(_dims::Integer...)
global dims, w, x, y, z
dims = Int.(_dims)
#show(stdout, MIME"text/plain"(), @benchmark $p(dat, a, img, b, rois))
println("\n\nDot products of $dims elements")
println("\\begin{tabular}{lrr}")
println("\\hline")
println(" & \\multicolumn{2}{c}{Median time (ns) for ",
dims, " elements} \\\\")
println("Operation & \\texttt{Float32} & \\texttt{Float64}\\\\")
println("\\hline")
println("\\hline")
for p in (:Julia, :BLAS, :basic, :inbounds, :simd, :simdlinear)
@printf(" dot %-12s", string("(",p,")"))
for T in (Float32, Float64)
w = randn(T, dims)
x = randn(T, dims)
y = randn(T, dims)
z = randn(T, dims)
s = vdot(Val{p}, x, y)
t = @benchmark vdot($(Val{p}), x, y)
@printf(" & %4.0f", median(t.times))
end
println(" \\\\")
end
println("\\hline")
for p in (:basic, :inbounds, :simd, :simdlinear)
@printf("wdot %-12s", string("(",p,")"))
for T in (Float32, Float64)
w = randn(T, dims)
x = randn(T, dims)
y = randn(T, dims)
z = randn(T, dims)
s = vdot(Val{p}, x, y)
t = @benchmark vdot($(Val{p}), w, x, y)
@printf(" & %4.0f", median(t.times))
end
println(" \\\\")
end
println("\\hline")
println("\\end{tabular}")
end
norminf(x::AbstractArray) = ((xmn, xmx) = extrema(x); return max(-xmn, xmx))
norm1(x::AbstractArray) = sum(abs.(x))
norm2(x::AbstractArray) = sum(abs2.(x))
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 3512 | #
# cg-tests.jl -
#
# Tests for linear conjugate gradients methods.
#
#isdefined(:LazyAlgebra) || include("../src/LazyAlgebra.jl")
module TestingLazyAlgebraConjGrad
using LazyAlgebra
using Test
const DEBUG = true
@testset "ConjGrad" begin
types = (Float32, Float64)
rows = (7,8)
cols = (2,3,4)
@testset "least-square fit ($T)" for T in types
H = GeneralMatrix(randn(T, rows..., cols...))
x = randn(T, cols)
y = H*x + 0.01*randn(T, rows)
# Least-squares solution using Julia linear algebra.
yj = reshape(y, length(y))
Hj = reshape(coefficients(H), length(y), length(x))
Aj = Hj'*Hj
bj = Hj'*yj
x0 = reshape(Aj\bj, cols)
# Tolerances.
rtol = 1e-4
atol = rtol*vnorm2(x0)
# LHS matrix and RHS vector of the normal equations.
A = H'*H
b = H'*y
n = length(b)
x1 = conjgrad(A, b; maxiter=10n, restart=n,
quiet=false, verb=true,
gtol=(0,0), ftol=0, xtol=rtol/10)
DEBUG && println("x1: ", vnorm2(x1 - x0)/vnorm2(x0))
@test vnorm2(x1 - x0) ≤ atol
# Exercise random starting point.
x2 = conjgrad(A, b, randn(T, cols); maxiter=2n, restart=n,
quiet=true, verb=false,
gtol=(0,0), ftol=0, xtol=rtol/10)
DEBUG && println("x2: ", vnorm2(x2 - x0)/vnorm2(x0))
@test vnorm2(x2 - x0) ≤ atol
# Exercise other convergence tests.
x3 = conjgrad(A, b; maxiter=n, restart=n,
quiet=false, verb=true,
gtol=(0,0), ftol=0, xtol=0)
DEBUG && println("x3: ", vnorm2(x3 - x0)/vnorm2(x0))
@test vnorm2(x3 - x0) ≤ atol
x4 = conjgrad(A, b; maxiter=10n, restart=n,
quiet=false, verb=true,
gtol=(0,0), ftol=1e-9, xtol=0)
DEBUG && println("x4: ", vnorm2(x4 - x0)/vnorm2(x0))
@test vnorm2(x4 - x0) ≤ atol
x5 = conjgrad(A, b; maxiter=10n, restart=n,
quiet=false, verb=true,
gtol=(0,1e-6), ftol=0, xtol=0)
DEBUG && println("x5: ", vnorm2(x5 - x0)/vnorm2(x0))
@test vnorm2(x5 - x0) ≤ atol
# Directly use an array.
x6 = conjgrad(reshape(Aj, cols..., cols...), b;
maxiter=2n, restart=n,
quiet=true, verb=false,
gtol=(0,0), ftol=0, xtol=rtol/10)
DEBUG && println("x6: ", vnorm2(x6 - x0)/vnorm2(x0))
@test vnorm2(x6 - x0) ≤ atol
# Force no iterations (should yield exactly the initial solution).
x7 = conjgrad(A, b, x0;
maxiter=0, restart=n,
quiet=true, verb=false,
gtol=(0,0), ftol=0, xtol=0)
DEBUG && println("x7: ", vnorm2(x7 - x0)/vnorm2(x0))
@test vnorm2(x7 - x0) ≤ 0
# Force restarts.
x8 = conjgrad(A, b;
maxiter=4n, restart=(n>>1),
quiet=true, verb=false,
gtol=(0,0), ftol=0, xtol=0)
DEBUG && println("x8: ", vnorm2(x8 - x0)/vnorm2(x0))
@test vnorm2(x8 - x0) ≤ atol
# Non-positive definite.
@test vnorm2(conjgrad(-A, b; strict=false)) ≤ 0
@test_throws LazyAlgebra.NonPositiveDefinite conjgrad(-A, b; verb=true,
quiet=false)
end
end
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 2065 | #
# common.jl -
#
# Common functions for testing.
#
"""
```julia
floating_point_type(A, B, ...)
```
yields the floating-point type for operations between arrays `A`, `B`, ...
"""
floating_point_type(args::AbstractArray...) =
float(real(promote_type(map(eltype, args)...)))
"""
```julia
relative_precision(A, B, ...)
```
yields the worst of the relative precisions of the element types of arrays `A`,
`B`, ...
"""
relative_precision(args::AbstractArray...) = max(map(relative_precision, args)...)
relative_precision(A::AbstractArray{T}) where {T} = eps(float(real(T)))
"""
```julia
test_api(P, A, x, y; atol=0, rtol=sqrt(eps(relative_precision(x,y))))
```
test LazyAlgebra API for mapping `P(A)` using variables `x` and `y`.
"""
function test_api(::Type{P}, A::Mapping, x0::AbstractArray, y0::AbstractArray;
rtol::Real=sqrt(relative_precision(x0,y0)),
atol::Real=0) where {P<:Union{Direct,InverseAdjoint}}
x = vcopy(x0)
y = vcopy(y0)
z = apply(P, A, x)
@test x == x0
T = floating_point_type(x, y, z)
for α in (0, 1, -1, 2.71, π),
β in (0, 1, -1, -1.33, Base.MathConstants.φ),
scratch in (false, true)
@test apply!(α, P, A, x, scratch, β, vcopy(y)) ≈
T(α)*z + T(β)*y atol=atol rtol=rtol
if scratch
vcopy!(x, x0)
else
@test x == x0
end
end
end
function test_api(::Type{P}, A::Mapping, x0::AbstractArray, y0::AbstractArray;
rtol::Real=sqrt(relative_precision(x0,y0)),
atol::Real=0) where {P<:Union{Adjoint,Inverse}}
x = vcopy(x0)
y = vcopy(y0)
z = apply(P, A, y)
@test y == y0
T = floating_point_type(x, y, z)
for α in (0, 1, -1, 2.71, π),
β in (0, 1, -1, -1.33, Base.MathConstants.φ),
scratch in (false, true)
@test apply!(α, P, A, y, scratch, β, vcopy(x)) ≈
T(α)*z + T(β)*x atol=atol rtol=rtol
if scratch
vcopy!(y, y0)
else
@test y == y0
end
end
end
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 4388 | #
# crop-tests.jl -
#
# Tests for cropping and zero-padding.
#
module TestingLazyAlgebraCrop
using Test
using LazyAlgebra
using LazyAlgebra.Foundations
@testset "Cropping and zero-padding" begin
#
# Private methods for testing.
#
offset(outer::NTuple{N,Int}, inner::NTuple{N,Int}) where {N} =
ntuple(i -> (outer[i]>>1) - (inner[i]>>1), Val(N))
function crop(x::AbstractArray{T,N}, siz::NTuple{N,Int}) where {T,N}
return crop!(Array{T,N}(undef, siz), x)
end
function crop(x::AbstractArray{T,N}, siz::NTuple{N,Int},
off::NTuple{N,Int}) where {T,N}
return crop!(Array{T,N}(undef, siz), x, off)
end
function zeropad(x::AbstractArray{T,N}, siz::NTuple{N,Int}) where {T,N}
return zeropad!(Array{T,N}(undef, siz), x)
end
function zeropad(x::AbstractArray{T,N}, siz::NTuple{N,Int},
off::NTuple{N,Int}) where {T,N}
return zeropad!(Array{T,N}(undef, siz), x, off)
end
function subregionindices(sub::AbstractArray{<:Any,N},
big::AbstractArray{<:Any,N},
off::NTuple{N,Int}) where {N}
I = CartesianIndices(sub) # indices in smallest region
J = CartesianIndices(big) # indices in largest region
k = CartesianIndex(off) # offset index
(first(J) ≤ first(I) + k && last(I) + k ≤ last(J)) ||
error("out of range sub-region")
return I, J, k
end
function crop!(y::AbstractArray{T,N},
x::AbstractArray{<:Any,N},
off::NTuple{N,Int} = offset(size(x), size(y))) where {T,N}
I, J, k = subregionindices(y, x, off)
@inbounds @simd for i ∈ I
y[i] = x[i + k]
end
return y
end
function zeropad!(y::AbstractArray{T,N},
x::AbstractArray{<:Any,N},
off::NTuple{N,Int} = offset(size(y), size(x)),
init::Bool = false) where {T,N}
I, J, k = subregionindices(x, y, off)
init || fill!(y, zero(T))
@inbounds @simd for i ∈ I
y[i+k] = x[i]
end
return y
end
#
# Miscellaneous tests.
#
@test_throws ErrorException CroppingOperator((2,3,), (2,3,4,))
@test_throws ErrorException CroppingOperator((2,3,), (2,3,4,), (0,1,))
#
# Tests for different sizes and element types.
#
for (osz, isz) in (((3,), (8,)),
((4,), (8,)),
((3,), (7,)),
((4,), (7,)),
((4,5), (6,7))),
T in (Float64, Complex{Float32})
# Basics methods.
R = real(T)
C = CroppingOperator(osz, isz)
off = ntuple(i->Int16(isz[i] - osz[i])>>1, length(isz))
@test ZeroPaddingOperator(isz, osz) === Adjoint(C)
@test ZeroPaddingOperator(isz, osz, off) === Adjoint(CroppingOperator(osz, isz, off))
@test input_ndims(C) == length(isz)
@test input_size(C) == isz
@test all(i -> input_size(C,i) == isz[i], 1:length(isz))
@test output_ndims(C) == length(osz)
@test output_size(C) == osz
@test all(i -> output_size(C,i) == osz[i], 1:length(osz))
# Compare result of opertaor with reference implementation.
x = rand(T, isz)
xsav = vcopy(x)
Cx = C*x
@test x == xsav
@test Cx == crop(x, osz)
y = rand(T, osz)
ysav = vcopy(y)
Cty = C'*y
@test y == ysav
@test Cty == zeropad(y, isz)
# Test various possibilities for apply!
atol = 0
rtol = eps(R)
for α in (0, 1, -1, 2.71, π),
β in (0, 1, -1, -1.33, Base.MathConstants.φ),
scratch in (false, true)
# Test operator.
@test apply!(α, Direct, C, x, scratch, β, vcopy(y)) ≈
R(α)*Cx + R(β)*y atol=atol rtol=rtol
if scratch
vcopy!(x, xsav)
else
@test x == xsav
end
# Test adjoint.
@test apply!(α, Adjoint, C, y, scratch, β, vcopy(x)) ≈
R(α)*Cty + R(β)*x atol=atol rtol=rtol
if scratch
vcopy!(y, ysav)
else
@test y == ysav
end
end
end
end
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 11188 | #
# diff-tests.jl -
#
# Tests for finite difference operators.
#
module TestingLazyAlgebraDiff
using Test
using MayOptimize
using LazyAlgebra
using LazyAlgebra.Foundations
using LazyAlgebra.FiniteDifferences: limits, ArrayAxis, ArrayAxes,
differentiation_order, dimensions_of_interest, optimization_level
include("common.jl")
@inline colons(n::Integer) = ntuple(x -> Colon(), max(Int(n), 0))
# Slice along the last dimension.
slice(A::AbstractArray{T,N}, d::Int) where {T,N} =
view(A, colons(N - 1)..., d)
# Reference implementation of finite differences.
gram_diff_ref(order::Integer, A::AbstractArray; dim::Int=1) =
diff_ref(order, diff_ref(order, A; dim=dim, adj=false); dim=dim, adj=true)
diff_ref(order::Integer, A::AbstractArray; kwds...) =
(order == 1 ? diff1_ref(A; kwds...) :
order == 2 ? diff2_ref(A; kwds...) :
error("invalid derivative order ", order))
diff1_ref(A::AbstractArray; kwds...) = diff1_ref!(similar(A), A; kwds...)
diff2_ref(A::AbstractArray; kwds...) = diff2_ref!(similar(A), A; kwds...)
function diff1_ref!(dst::AbstractArray{<:Any,N},
src::AbstractArray{<:Any,N};
dim::Int=1,
adj::Bool=false) where {N}
inds = axes(src)
@assert axes(dst) == inds
@assert 1 ≤ dim ≤ N
I = colons(dim - 1)
J = inds[dim]
K = colons(N - dim)
diff1_ref!(dst, src, I, J, K, adj)
end
function diff2_ref!(dst::AbstractArray{<:Any,N},
src::AbstractArray{<:Any,N};
dim::Int=1,
adj::Bool=false) where {N}
inds = axes(src)
@assert axes(dst) == inds
@assert 1 ≤ dim ≤ N
I = colons(dim - 1)
J = inds[dim]
K = colons(N - dim)
diff2_ref!(dst, src, I, J, K)
end
#
# Code to apply 1st order finite difference operator D or its adjoint D' (given
# below) along a given dimensions of an array.
#
# D = [ -1 1 0 0
# 0 -1 1 0
# 0 0 -1 1
# 0 0 0 0];
#
# D' = [ -1 0 0 0
# 1 -1 0 0
# 0 1 -1 0
# 0 0 1 0];
#
function diff1_ref!(dst::AbstractArray{<:Any,N},
src::AbstractArray{<:Any,N},
I::Tuple{Vararg{Colon}},
J::ArrayAxis,
K::Tuple{Vararg{Colon}},
adj::Bool) where {N}
# Forward 1st order finite differences assuming flat boundary conditions
# along a given dimension.
len = length(J)
if len > 1
j_first = first(J)
j_last = last(J)
if adj
map!(-,
view(dst, I..., j_first, K...),
view(src, I..., j_first, K...))
if len > 2
map!(-,
view(dst, I..., (j_first+1):(j_last-1), K...),
view(src, I..., (j_first ):(j_last-2), K...),
view(src, I..., (j_first+1):(j_last-1), K...))
end
copyto!(view(dst, I..., j_last, K...),
view(src, I..., j_last-1, K...))
else
map!(-,
view(dst, I..., (j_first ):(j_last-1), K...),
view(src, I..., (j_first+1):(j_last ), K...),
view(src, I..., (j_first ):(j_last-1), K...))
fill!(view(dst, I..., j_last, K...), zero(eltype(dst)))
end
else
fill!(dst, zero(eltype(dst)))
end
return dst
end
#
# 2nd order finite differences D2 with flat boundary conditions is given by:
#
# D2 = [-1 1 0 0
# 1 -2 1 0
# 0 1 -2 1
# 0 0 1 -1]
#
# Note that this operator is self-adjoint, so there is no `adj` argument.
#
function diff2_ref!(dst::AbstractArray{<:Any,N},
src::AbstractArray{<:Any,N},
I::Tuple{Vararg{Colon}},
J::ArrayAxis,
K::Tuple{Vararg{Colon}}) where {N}
# Forward 1st order finite differences assuming flat boundary conditions
# along a given dimension.
len = length(J)
if len ≥ 2
j_first = first(J)
j_last = last(J)
map!((b, c) -> c - b,
view(dst, I..., j_first, K...),
view(src, I..., j_first, K...),
view(src, I..., j_first+1, K...))
if len > 2
map!((a, b, c) -> (c - b) - (b - a),
view(dst, I..., (j_first+1):(j_last-1), K...),
view(src, I..., (j_first ):(j_last-2), K...),
view(src, I..., (j_first+1):(j_last-1), K...),
view(src, I..., (j_first+2):(j_last ), K...))
end
map!((a, b) -> a - b,
view(dst, I..., j_last, K...),
view(src, I..., j_last-1, K...),
view(src, I..., j_last, K...))
else
fill!(dst, zero(eltype(dst)))
end
return dst
end
@testset "Finite differences" begin
# First test the correctness of the result compared to the above reference
# implementation and use the saclar product to check the adjoint. Use
# integer values for exact computations, over a reduced range to avoid
# overflows and have exact floating-point representation.
vmin = -(vmax = Float64(7_000))
vals = vmin:vmax
@testset "Differentiation order = $order" for order in 1:2
# Dimensions to test: 1, 2 and any ≥ 3 for 1st order derivatives, 1:4
# and any ≥ 5 for 2nd order derivatives.
dimlist = (order == 1 ? (1, 2, 5) : ((1:4)..., 6))
sizes = Tuple{Vararg{Int}}[]
for dim1 in dimlist
push!(sizes, (dim1,))
end
for dim1 in dimlist, dim2 in dimlist
push!(sizes, (dim1,dim2))
end
for dim1 in dimlist, dim2 in dimlist, dim3 in dimlist
push!(sizes, (dim1,dim2,dim3))
end
for dims in sizes
# Random values (an copies to check that input variables do not
# change).
x = rand(vals, dims)
y = rand(vals, (size(x)..., ndims(x)))
xsav = copy(x)
ysav = copy(y)
atol = zero(eltype(x))
rtol = 16*eps(eltype(x))
# Apply along all dimensions specified as a colon.
D_all = Diff(order,:,Debug)
@test differentiation_order(D_all) === order
@test dimensions_of_interest(D_all) === Colon
@test optimization_level(D_all) === Debug
D_all_x = D_all*x; @test x == xsav
Dt_all_y = D_all'*y; @test y == ysav
@test vdot(x, Dt_all_y) == vdot(y, D_all_x)
test_api(Direct, D_all, x, y; atol=atol, rtol=rtol)
test_api(Adjoint, D_all, x, y; atol=atol, rtol=rtol)
DtD_all = D_all'*D_all
@test DtD_all === gram(D_all)
@test isa(DtD_all, Gram{typeof(D_all)})
@test differentiation_order(DtD_all) === order
@test dimensions_of_interest(DtD_all) === dimensions_of_interest(D_all)
@test optimization_level(DtD_all) === optimization_level(D_all)
@test DtD_all*x == D_all'*D_all_x; @test x == xsav
test_api(Direct, DtD_all, x, x; atol=atol, rtol=rtol)
# Apply along all dimensions specified as a list.
D_lst = Diff(order,((1:ndims(x))...,),Debug)
@test differentiation_order(D_lst) === order
@test dimensions_of_interest(D_lst) === ((1:ndims(x))...,)
@test optimization_level(D_lst) === Debug
D_lst_x = D_lst*x; @test x == xsav
Dt_lst_y = D_lst'*y; @test y == ysav
@test vdot(x, Dt_lst_y) == vdot(y, D_lst_x)
test_api(Direct, D_lst, x, y; atol=atol, rtol=rtol)
test_api(Adjoint, D_lst, x, y; atol=atol, rtol=rtol)
DtD_lst = D_lst'*D_lst
@test DtD_lst === gram(D_lst)
@test isa(DtD_lst, Gram{typeof(D_lst)})
@test differentiation_order(DtD_lst) === order
@test dimensions_of_interest(DtD_lst) === dimensions_of_interest(D_lst)
@test optimization_level(DtD_lst) === optimization_level(D_lst)
@test DtD_lst*x == D_lst'*D_lst_x
test_api(Direct, DtD_lst, x, x; atol=atol, rtol=rtol)
if ndims(x) > 1
# Apply along all dimensions in reverse order specified as a
# range and to avoid all dimensions at a time version.
D_rev = Diff(order,ndims(x):-1:1,Debug)
@test differentiation_order(D_rev) === order
@test dimensions_of_interest(D_rev) === ((ndims(x):-1:1)...,)
@test optimization_level(D_rev) === Debug
D_rev_x = D_rev*x; @test x == xsav
Dt_rev_y = D_rev'*y; @test y == ysav
@test vdot(x, Dt_rev_y) == vdot(y, D_rev_x)
test_api(Direct, D_rev, x, y; atol=atol, rtol=rtol)
test_api(Adjoint, D_rev, x, y; atol=atol, rtol=rtol)
DtD_rev = D_rev'*D_rev
@test DtD_rev === gram(D_rev)
@test isa(DtD_rev, Gram{typeof(D_rev)})
@test differentiation_order(DtD_rev) === order
@test dimensions_of_interest(DtD_rev) === dimensions_of_interest(D_rev)
@test optimization_level(DtD_rev) === optimization_level(D_rev)
@test DtD_rev*x == D_rev'*D_rev_x
test_api(Direct, DtD_rev, x, x; atol=atol, rtol=rtol)
end
for d in 1:ndims(x)
# Apply along a single dimension.
D_one = Diff(order,d,Debug)
@test differentiation_order(D_one) === order
@test dimensions_of_interest(D_one) === d
@test optimization_level(D_one) === Debug
z = slice(y, d)
D_ref_x = diff_ref(order, x, dim=d, adj=false)
Dt_ref_z = diff_ref(order, z, dim=d, adj=true)
@test D_one*x == D_ref_x; @test x == xsav
@test D_one'*z == Dt_ref_z; @test y == ysav # z is a view on y
@test slice(D_all_x, d) == D_ref_x
@test slice(D_lst_x, d) == D_ref_x
if ndims(x) > 1
r = ndims(x) - d + 1
@test slice(D_rev_x, r) == D_ref_x
end
test_api(Direct, D_one, x, z; atol=atol, rtol=rtol)
test_api(Adjoint, D_one, x, z; atol=atol, rtol=rtol)
DtD_one = D_one'*D_one
@test DtD_one === gram(D_one)
@test isa(DtD_one, Gram{typeof(D_one)})
@test differentiation_order(DtD_one) === order
@test dimensions_of_interest(DtD_one) === dimensions_of_interest(D_one)
@test optimization_level(DtD_one) === optimization_level(D_one)
DtD_one_x = DtD_one*x; @test x == xsav
@test DtD_one_x == gram_diff_ref(order, x, dim=d)
@test DtD_one_x == D_one'*(D_one*x)
test_api(Direct, DtD_one, x, x; atol=atol, rtol=rtol)
end
end
end
end
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 8310 | #
# fft-tests.jl -
#
# Tests for FFT and circulant convolution operators.
#
module TestingLazyAlgebraFFT
using Test
using LazyAlgebra
using LazyAlgebra.Foundations
using AbstractFFTs, FFTW
floats = (Float32, Float64)
types = (Float32, #Float64, Complex{Float32},
Complex{Float64})
alphas = (0, 1, -1, 2.71, π)
betas = (0, 1, -1, -1.33, Base.MathConstants.φ)
lpad_args(n::Integer, x...) = lpad(string(x...), n)
lpad_with_parentheses(n::Integer, x...) = lpad(string("(", x..., ")"), n)
@testset "FFT utilities " begin
dims1 = (1, 2, 3, 4, 5, 7, 9, 287, 511)
dims2 = (1, 2, 3, 4, 5, 8, 9, 288, 512)
dims3 = (1, 2, 3, 4, 5, 8, 9, 288, 512)
@test LazyAlgebra.FFTs.goodfftdims(dims1) == dims2
@test LazyAlgebra.FFTs.goodfftdims(map(Int16, dims1)) == dims2
@test LazyAlgebra.FFTs.goodfftdims(dims1...) == dims2
@test LazyAlgebra.FFTs.rfftdims(1,2,3,4,5) == (1,2,3,4,5)
@test LazyAlgebra.FFTs.rfftdims(2,3,4,5) == (2,3,4,5)
@test LazyAlgebra.FFTs.rfftdims(3,4,5) == (2,4,5)
@test LazyAlgebra.FFTs.rfftdims(4,5) == (3,5)
@test LazyAlgebra.FFTs.rfftdims(5) == (3,)
@test LazyAlgebra.FFTs.fftfreq(1) == [0]
@test LazyAlgebra.FFTs.fftfreq(2) == [0,-1]
@test LazyAlgebra.FFTs.fftfreq(3) == [0,1,-1]
@test LazyAlgebra.FFTs.fftfreq(4) == [0,1,-2,-1]
@test LazyAlgebra.FFTs.fftfreq(5) == [0,1,2,-2,-1]
end # testset
@testset "FFT operator $(lpad_with_parentheses(26, T))" for T in types
R = real(T)
ϵ = sqrt(eps(R)) # relative tolerance, can certainly be much tighter
for dims in ((45,), (20,), (33,12), (30,20), (4,5,6))
# Create a FFT operator given its argument.
x = rand(T, dims)
n = length(x)
xsav = vcopy(x)
F = FFTOperator(x)
@test x == xsav # check that input has been preserved
@test MorphismType(F) == (T<:Complex ? Endomorphism() : Morphism())
@test input_size(F) == dims
@test input_size(F) == ntuple(i->input_size(F,i), ndims(x))
@test output_size(F) == (T<:Complex ? dims :
Tuple(AbstractFFTs.rfft_output_size(x, 1:ndims(x))))
@test output_size(F) == ntuple(i->output_size(F,i), ndims(x))
@test input_eltype(F) == T
@test output_eltype(F) == typeof(complex(zero(R)))
@test input_ndims(F) == ndims(x)
@test output_ndims(F) == ndims(x)
if T<:Complex
@test LazyAlgebra.FFTs.destroys_input(F.forward) == true
@test LazyAlgebra.FFTs.destroys_input(F.backward) == true
else
@test LazyAlgebra.FFTs.preserves_input(F.forward) == true
@test LazyAlgebra.FFTs.preserves_input(F.backward) == false
end
@test LazyAlgebra.identical(F, F) == true
io = IOBuffer()
show(io, F)
@test String(take!(io)) == "FFT"
@test typeof(F') === Adjoint{typeof(F)}
@test typeof(inv(F)) === Inverse{typeof(F)}
@test typeof(inv(F)') === InverseAdjoint{typeof(F)}
@test typeof(inv(F')) === InverseAdjoint{typeof(F)}
@test F'*F == length(x)*Identity()
@test F*F' == length(x)*Identity()
@test inv(F)*F == Identity()
@test F*inv(F) == Identity()
@test inv(F')*F' == Identity()
@test F'*inv(F') == Identity()
@test inv(F')*inv(F) == (1//length(x))*Identity()
@test inv(F)*inv(F') == (1//length(x))*Identity()
# Create operators which should be considered as the same as F.
F1 = FFTOperator(T, dims...)
@test LazyAlgebra.identical(F1, F) == true
F2 = FFTOperator(T, map(Int16, dims))
@test LazyAlgebra.identical(F2, F) == true
# Check applying operator.
xbad = rand(T, ntuple(i -> (i == 1 ? dims[i]+1 : dims[i]), length(dims)))
@test_throws DimensionMismatch F*xbad
y = rand(Complex{R}, (T<:Complex ? input_size(F) :
output_size(F)))
z = (T<:Complex ? fft(x) : rfft(x))
@test x == xsav # check that input has been preserved
ysav = vcopy(y)
w = (T<:Complex ? ifft(y) : irfft(y, dims[1]))
@test F*x ≈ z atol=0 rtol=ϵ norm=vnorm2
@test F\y ≈ w atol=0 rtol=ϵ norm=vnorm2
@test F(x) == F*x
@test F'(y) == F'*y
@test inv(F)(y) == F\y
@test inv(F')(x) == F'\x
@test x == xsav # check that input has been preserved
@test y == ysav # check that input has been preserved
@test y == ysav # check that input has been preserved
for α in alphas,
β in betas,
scratch in (false, true)
@test apply!(α, Direct, F, x, scratch, β, vcopy(y)) ≈
R(α)*z + R(β)*y atol=0 rtol=ϵ
if scratch
vcopy!(x, xsav)
else
@test x == xsav # check that input has been preserved
end
@test apply!(α, Adjoint, F, y, scratch, β, vcopy(x)) ≈
R(n*α)*w + R(β)*x atol=0 rtol=ϵ
if scratch
vcopy!(y, ysav)
else
@test y == ysav # check that input has been preserved
end
@test apply!(α, Inverse, F, y, scratch, β, vcopy(x)) ≈
R(α)*w + R(β)*x atol=0 rtol=ϵ
if scratch
vcopy!(y, ysav)
else
@test y == ysav # check that input has been preserved
end
@test apply!(α, InverseAdjoint, F, x, scratch, β, vcopy(y)) ≈
R(α/n)*z + R(β)*y atol=0 rtol=ϵ
if scratch
vcopy!(x, xsav)
else
@test x == xsav # check that input has been preserved
end
end
end
end # testset
@testset "Circular convolution $(lpad_with_parentheses(18, T))" for T in types
R = real(T)
ϵ = sqrt(eps(R)) # relative tolerance, can certainly be much tighter
n1, n2, n3 = 18, 12, 4
for dims in ((n1,), (n1,n2), (n1,n2,n3))
# Basic methods.
x = rand(T, dims)
n = length(x)
h = rand(T, dims)
H = CirculantConvolution(h, shift=false, flags=FFTW.ESTIMATE)
@test MorphismType(H) == Endomorphism()
@test input_size(H) == dims
@test input_size(H) == ntuple(i->input_size(H,i), ndims(x))
@test output_size(H) == dims
@test output_size(H) == ntuple(i->output_size(H,i), ndims(x))
@test input_eltype(H) == T
@test output_eltype(H) == T
@test input_ndims(H) == ndims(x)
@test output_ndims(H) == ndims(x)
@test eltype(H) == T
@test size(H) == (dims..., dims...)
@test ndims(H) == 2*length(dims)
@test (size(H)..., 1) == ntuple(i->size(H, i), ndims(H)+1)
# Test apply! method.
F = FFTOperator(x)
G = F\Diag(F*h)*F
y = rand(T, dims)
xsav = vcopy(x)
ysav = vcopy(y)
y1 = H*x
@test H(x) == y1
y2 = G*x
y3 = (T<:Real ? real(ifft(fft(h).*fft(x)))
: ifft(fft(h).*fft(x)))
@test y1 ≈ y2 atol=0 rtol=ϵ
@test y1 ≈ y3 atol=0 rtol=ϵ
if T<:Real
y4 = irfft(rfft(h).*rfft(x), n1)
@test y1 ≈ y4 atol=0 rtol=ϵ
end
z1 = H'*y
z2 = G'*y
z3 = (T<:Real ? real(ifft(conj.(fft(h)).*fft(y)))
: ifft(conj.(fft(h)).*fft(y)))
@test z1 ≈ z2 atol=0 rtol=ϵ
@test z1 ≈ z3 atol=0 rtol=ϵ
if T<:Real
z4 = irfft(conj.(rfft(h)).*rfft(y), n1)
@test z1 ≈ z4 atol=0 rtol=ϵ
end
for α in alphas,
β in betas,
scratch in (false, true)
@test apply!(α, Direct, H, x, scratch, β, vcopy(y)) ≈
R(α)*y1 + R(β)*y atol=0 rtol=ϵ
if scratch
vcopy!(x, xsav)
else
@test x == xsav # check that input has been preserved
end
@test apply!(α, Adjoint, H, y, scratch, β, vcopy(x)) ≈
R(α)*z1 + R(β)*x atol=0 rtol=ϵ
if scratch
vcopy!(y, ysav)
else
@test y == ysav # check that input has been preserved
end
end
end
end # testset
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 10774 | #
# genmult-tests.jl -
#
# Tests for generalized matrix-vector and matrix-matrix products.
#
module TestingLazyAlgebraGenMult
using Test
using Test: print_test_results
using LazyAlgebra
import LazyAlgebra: GenMult
import .GenMult: Reals, Complexes, Floats, BlasFloat
for (pfx, P) in ((:generic, GenMult.Generic()),
(:blas, GenMult.Blas()),
(:linear, GenMult.Linear()),
(:basic, GenMult.Basic()))
@eval begin
$(Symbol(pfx, "_lgemv"))(α, trans, A, x) =
GenMult._lgemv($P, α, trans, A, x)
$(Symbol(pfx, "_lgemv!"))(α, trans, A, x, β, y) =
GenMult._lgemv!($P, α, trans, A, x, β, y)
$(Symbol(pfx, "_lgemm"))(α, transA, A, transB, B, Nc::Integer=2) =
GenMult._lgemm($P, α, transA, A, transB, B, Int(Nc))
$(Symbol(pfx, "_lgemm!"))(α, transA, A, transB, B, β, C) =
GenMult._lgemm!($P, α, transA, A, transB, B, β, C)
end
end
_transp(t::Char, A::AbstractMatrix) =
t == 'N' ? A :
t == 'T' ? transpose(A) :
t == 'C' ? A' : error("invalid transpose character")
# in Julia < 0.7 randn() does not generate complex numbers.
_randn(::Type{T}, ::Tuple{}) where {T} = _randn(T)
_randn(::Type{T}, dims::Integer...) where {T} = _randn(T, map(Int, dims))
_randn(::Type{T}) where {T<:AbstractFloat} = randn(T)
_randn(::Type{<:Complex{T}}) where {T<:AbstractFloat} =
Complex(randn(T), randn(T))
function _randn(::Type{T}, dims::NTuple{N,Int}) where {T,N}
A = Array{T,N}(undef, dims)
@inbounds for i in eachindex(A)
A[i] = _randn(T)
end
return A
end
function ref_lgemv(α::Number,
trans::Char,
A::DenseArray{Ta,Na},
x::DenseArray{Tx,Nx}) where {Ta,Na,Tx,Nx}
@assert 1 ≤ Nx < Na
Ny = Na - Nx
dims = size(A)
if trans == 'N'
rows, cols = dims[1:Ny], dims[Ny+1:end]
@assert size(x) == cols
else
rows, cols = dims[1:Nx], dims[Nx+1:end]
@assert size(x) == rows
end
M = reshape(A, (prod(rows), prod(cols)))
v = reshape(x, length(x))
if trans == 'N'
return α*reshape(M*v, rows)
elseif trans == 'T'
return α*reshape(transpose(M)*v, cols)
elseif trans == 'C'
return α*reshape(M'*v, cols)
else
error("invalid transpose character")
end
end
function ref_lgemm(α::Number,
transA::Char,
A::DenseArray{Ta,Na},
transB::Char,
B::DenseArray{Tb,Nb},
Nc::Integer=2) where {Ta,Na,Tb,Nb}
Ni, Nj, Nk = GenMult._lgemm_ndims(Na, Nb, Int(Nc))
@assert Na == Ni + Nk
@assert Nb == Nk + Nj
@assert Nc == Ni + Nj
Adims = size(A)
Bdims = size(B)
if transA == 'N'
I, K = Adims[1:Ni], Adims[Ni+1:Ni+Nk]
else
K, I = Adims[1:Nk], Adims[Nk+1:Nk+Ni]
end
if transB == 'N'
@assert Bdims[1:Nk] == K
J = Bdims[Nk+1:Nk+Nj]
else
@assert Bdims[Nj+1:Nj+Nk] == K
J = Bdims[1:Nj]
end
m = prod(I)
n = prod(J)
p = prod(K)
return α*reshape(_transp(transA, reshape(A, transA == 'N' ? (m, p) : (p, m)))*
_transp(transB, reshape(B, transB == 'N' ? (p, n) : (n, p))),
(I..., J...))
end
cnv(::Type{T}, x::T) where {T<:AbstractFloat} = x
cnv(::Type{T}, x::Real) where {T<:AbstractFloat} = convert(T, x)
cnv(::Type{T}, x::Complex{<:Real}) where {T<:AbstractFloat} =
convert(Complex{T}, x)
function similar_values(::Type{T},
A::AbstractArray,
B::AbstractArray;
atol::Real=zero(T),
rtol::Real=sqrt(eps(T))) where {T<:AbstractFloat}
if axes(A) != axes(B)
return false
end
local anrm2::T = 0
local bnrm2::T = 0
local cnrm2::T = 0
n = 0
for i in eachindex(A, B)
a, b = cnv(T, A[i]), cnv(T, B[i])
anrm2 += abs2(a)
bnrm2 += abs2(b)
cnrm2 += abs2(a - b)
n += 1
end
return sqrt(cnrm2) ≤ n*atol + rtol*sqrt(max(anrm2, bnrm2))
end
function similar_values(A::AbstractArray{Ta},
B::AbstractArray{Tb};
kwds...) where {Ta,Tb}
similar_values(float(real(promote_type(Ta,Tb))), A, B; kwds...)
end
"""
`worst_type(T1,T2)` yields the smallest floating-point real type of
real/complex types `T1` and `T2`.
"""
function worst_type(::Type{Complex{T1}},
::Type{T2}) where {T1<:AbstractFloat,
T2<:AbstractFloat}
worst_type(T1, T2)
end
function worst_type(::Type{T1},
::Type{Complex{T2}}) where {T1<:AbstractFloat,
T2<:AbstractFloat}
worst_type(T1, T2)
end
function worst_type(::Type{Complex{T1}},
::Type{Complex{T2}}) where {T1<:AbstractFloat,
T2<:AbstractFloat}
worst_type(T1, T2)
end
function worst_type(::Type{T1},
::Type{T2}) where {T1<:AbstractFloat,
T2<:AbstractFloat}
sizeof(T1) ≤ sizeof(T2) ? T1 : T2
end
function test_lgemv(reduced::Bool=false)
# Notes:
# - π is good for testing special (non floating-point) values.
# - Use prime numbers for dimensions so that they cannot be split.
if reduced
MULTIPLIERS = (0, 1, π)
DIMENSIONS = (((3, ), (5, )),
((2, 3), (5, 7)))
TRANS = ('N', 'T', 'C')
TYPES = (Float32, Float64, ComplexF64)
else
MULTIPLIERS = (0, 1, π)
DIMENSIONS = (((3, ), (5, )),
((2, 3), (5, )),
((3, ), (2, 5)),
((2, 3), (5, 7)))
TRANS = ('N', 'T', 'C')
TYPES = (Float32, Float64, ComplexF32, ComplexF64)
end
@testset "LGEMV" begin
@testset for dims in DIMENSIONS,
Ta in TYPES, transA in TRANS,
Tx in TYPES,
α in MULTIPLIERS, β in MULTIPLIERS
Ta <: Real && transA == 'C' && continue
m, n = dims
Ty = promote_type(Ta, Tx)
A = _randn(Ta, (m..., n...))
x = _randn(Tx, transA == 'N' ? n : m)
y = _randn(Ty, transA == 'N' ? m : n)
Tw = worst_type(Ta, Tx)
ref = ref_lgemv(α, transA, A, x) + β*y
if β == 0
@test similar_values(Tw, ref, generic_lgemv(α, transA, A, x))
@test similar_values(Tw, ref, linear_lgemv(α, transA, A, x))
@test similar_values(Tw, ref, lgemv(α, transA, A, x))
if ndims(A) == 2 && ndims(x) == 1
@test similar_values(Tw, ref, basic_lgemv(α, transA, A, x))
end
if Ta == Tx == Ty && Ta <: BlasFloat
α′ = convert(Ta, α)
@test similar_values(Tw, ref, blas_lgemv(α′,transA, A, x))
end
end
@test similar_values(Tw, ref, generic_lgemv!(α, transA, A, x, β, deepcopy(y)))
@test similar_values(Tw, ref, linear_lgemv!(α, transA, A, x, β, deepcopy(y)))
@test similar_values(Tw, ref, lgemv!(α, transA, A, x, β, deepcopy(y)))
if ndims(A) == 2 && ndims(x) == 1
@test similar_values(Tw, ref, basic_lgemv!(α, transA, A, x, β, deepcopy(y)))
end
if Ta == Tx == Ty && Ta <: BlasFloat
α′ = convert(Ta, α)
β′ = convert(Ta, β)
@test similar_values(Tw, ref, blas_lgemv!(α′,transA, A, x, β′,deepcopy(y)))
end
end
end
end
function test_lgemm(reduced::Bool=false)
if reduced
MULTIPLIERS = (0, 1, π)
DIMENSIONS = (((5, ), (3, ), (2, )),
((5, 2), (4, ), (3, )),
((4, 5), (3, 2), (2, 3)))
TRANS = ('N', 'T', 'C')
TYPES = (Float32, Float64, ComplexF64)
else
MULTIPLIERS = (0, 1, π)
DIMENSIONS = (((5, ), (3, ), (2, )),
((5, 2), (3, ), (2, )),
((5, 2), (4, 3 ), (3, 2)),
((3, 4), (2, 2, 3), (2, 3)))
TRANS = ('N', 'T', 'C')
TYPES = (Float32, Float64, ComplexF32, ComplexF64)
end
@testset "LGEMM" begin
@testset for dims in DIMENSIONS,
α in MULTIPLIERS, β in MULTIPLIERS,
Ta in TYPES, transA in TRANS,
Tb in TYPES, transB in TRANS
Ta <: Real && transA == 'C' && continue
Tb <: Real && transB == 'C' && continue
m, n, p = dims
Tc = promote_type(Ta, Tb)
C = _randn(Tc, (m..., n...))
Nc = ndims(C)
A = _randn(Ta, transA == 'N' ? (m..., p...) : (p..., m...))
B = _randn(Tb, transB == 'N' ? (p..., n...) : (n..., p...))
Tw = worst_type(Ta, Tb)
ref = ref_lgemm(α, transA, A, transB, B, Nc) + β*C
#ref = generic_lgemm(α, transA, A, transB, B) + β*C
if β == 0
@test similar_values(Tw, ref, generic_lgemm(α, transA, A, transB, B, Nc))
@test similar_values(Tw, ref, linear_lgemm(α, transA, A, transB, B, Nc))
@test similar_values(Tw, ref, lgemm(α, transA, A, transB, B, Nc))
if ndims(A) == ndims(B) == 2
@test similar_values(Tw, ref, basic_lgemm(α, transA, A, transB, B, Nc))
end
if Ta == Tb == Tc && Ta <: BlasFloat
α′ = convert(Ta, α)
@test similar_values(Tw, ref, blas_lgemm(α′,transA, A, transB, B, Nc))
end
end
@test similar_values(Tw, ref, generic_lgemm!(α, transA, A, transB, B, β, deepcopy(C)))
@test similar_values(Tw, ref, linear_lgemm!(α, transA, A, transB, B, β, deepcopy(C)))
@test similar_values(Tw, ref, lgemm!(α, transA, A, transB, B, β, deepcopy(C)))
if ndims(A) == ndims(B) == 2
@test similar_values(Tw, ref, basic_lgemm!(α, transA, A, transB, B, β, deepcopy(C)))
end
if Ta == Tb == Tc && Ta <: BlasFloat
α′ = convert(Ta, α)
β′ = convert(Ta, β)
@test similar_values(Tw, ref, blas_lgemm!(α′,transA, A, transB, B, β′,deepcopy(C)))
end
end
end
end
function test_all(reduced::Bool=false)
print_test_results(test_lgemv(reduced))
print_test_results(test_lgemm(reduced))
nothing
end
end # module
TestingLazyAlgebraGenMult.test_all()
nothing
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 508 | #
# gram-tests.jl -
#
# Test Gram operator.
#
module TestingLazyAlgebraRules
using LazyAlgebra
using LazyAlgebra.Foundations
using Test
@testset "Gram operators" begin
@test gram(Id) === Id
rows = (3,4,)
cols = (2,5,)
T = Float32
a = rand(T, rows..., cols...)
A = GeneralMatrix(a)
AtA = gram(A)
@test isa(AtA, Gram)
@test isa(A'*A, Gram)
@test A'*A === AtA
@test AtA' === AtA
x = rand(T, cols)
y = A'*(A*x)
z = AtA*x
@test z ≈ y
end
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 6732 | #
# map-tests.jl -
#
# Tests for basic mappings.
#
module TestingLazyAlgebraMappings
using Test
using LazyAlgebra
using LazyAlgebra.Foundations
using LinearAlgebra: ⋅, UniformScaling
include("common.jl")
Scaled = LazyAlgebra.Scaled
Sum = LazyAlgebra.Sum
Composition = LazyAlgebra.Composition
Endomorphism = LazyAlgebra.Endomorphism
MorphismType = LazyAlgebra.MorphismType
identical = LazyAlgebra.identical
ALPHAS = (0, 1, -1, 2.71, π)
BETAS = (0, 1, -1, -1.33, Base.MathConstants.φ)
@testset "Rules for uniform scaling " begin
# Check + operator.
@test Id + UniformScaling(1) === 2Id
@test Id + UniformScaling(2) === 3Id
@test UniformScaling(1) + Id === 2Id
@test UniformScaling(2) + Id === 3Id
# Check - operator.
@test Id - UniformScaling(1) === 0Id
@test Id - UniformScaling(2) === -Id
@test UniformScaling(1) - Id === 0Id
@test UniformScaling(2) - Id === Id
# Check * operator.
@test Id*UniformScaling(1) === Id
@test Id*UniformScaling(2) === 2Id
@test UniformScaling(1)*Id === Id
@test UniformScaling(2)*Id === 2Id
# Check \circ operator.
@test Id∘UniformScaling(1) === Id
@test Id∘UniformScaling(2) === 2Id
@test UniformScaling(1)∘Id === Id
@test UniformScaling(2)∘Id === 2Id
# \cdot is specific.
@test Id⋅UniformScaling(1) === Id
@test Id⋅UniformScaling(2) === 2Id
@test UniformScaling(1)⋅Id === Id
@test UniformScaling(2)⋅Id === 2Id
# Check / operator.
@test Id/UniformScaling(1) === Id
@test Id/UniformScaling(2) === (1/2)*Id
@test UniformScaling(1)/Id === Id
@test UniformScaling(2)/Id === 2Id
# Check \ operator.
@test Id\UniformScaling(1) === Id
@test Id\UniformScaling(2) === 2Id
@test UniformScaling(1)\Id === Id
@test UniformScaling(2)\Id === (1/2)*Id
end # testset
@testset "Uniform scaling ($T)" for T in (Float32, Float64)
dims = (3,4,5)
x = randn(T, dims)
y = randn(T, dims)
λ = sqrt(2)
U = λ*Id
atol, rtol = zero(T), sqrt(eps(T))
@test U*x ≈ λ*x atol=atol rtol=rtol
@test U'*x ≈ λ*x atol=atol rtol=rtol
@test U\x ≈ (1/λ)*x atol=atol rtol=rtol
@test U'\x ≈ (1/λ)*x atol=atol rtol=rtol
for α in ALPHAS,
β in BETAS
for P in (Direct, Adjoint)
@test apply!(α, P, U, x, β, vcopy(y)) ≈
T(α*λ)*x + T(β)*y atol=atol rtol=rtol norm=vnorm2
end
for P in (Inverse, InverseAdjoint)
@test apply!(α, P, U, x, β, vcopy(y)) ≈
T(α/λ)*x + T(β)*y atol=atol rtol=rtol norm=vnorm2
end
end
end # testset
@testset "Rank 1 operators ($T)" for T in (Float32, Float64)
dims = (3,4,5)
n = prod(dims)
w = rand(T, dims)
x = rand(T, dims)
y = rand(T, dims)
A = RankOneOperator(w, w)
B = RankOneOperator(w, y)
C = SymmetricRankOneOperator(w)
atol, rtol = zero(T), sqrt(eps(T))
@test LinearType(A) === Linear()
@test LinearType(C) === Linear()
@test MorphismType(C) === Endomorphism()
@test A*Id === A
@test Id*A === A
@test A*x ≈ sum(w.*x)*w atol=atol rtol=rtol norm=vnorm2
@test A'*x ≈ sum(w.*x)*w atol=atol rtol=rtol norm=vnorm2
@test B*x ≈ sum(y.*x)*w atol=atol rtol=rtol norm=vnorm2
@test B'*x ≈ sum(w.*x)*y atol=atol rtol=rtol norm=vnorm2
@test C*x ≈ sum(w.*x)*w atol=atol rtol=rtol norm=vnorm2
@test C'*x ≈ sum(w.*x)*w atol=atol rtol=rtol norm=vnorm2
z = vmul!(vcreate(y), B, x)
@test z == vmul(B, x)
@test z == B*x
for α in ALPHAS,
β in BETAS
for P in (Direct, Adjoint)
@test apply!(α, P, C, x, β, vcopy(y)) ≈
T(α*vdot(w,x))*w + T(β)*y atol=atol rtol=rtol norm=vnorm2
end
end
end # testset
@testset "Non-uniform scaling ($T)" for T in (Float32, Float64)
dims = (3,4,5)
n = prod(dims)
w = randn(T, dims)
for i in eachindex(w)
while w[i] == 0
w[i] = randn(T)
end
end
x = randn(T, dims)
y = randn(T, dims)
z = vcreate(y)
S = NonuniformScaling(w)
@test diag(S) === w
@test Diag(w) === S
atol, rtol = zero(T), sqrt(eps(T))
@test S*x ≈ w.*x atol=atol rtol=rtol norm=vnorm2
@test S'*x ≈ w.*x atol=atol rtol=rtol norm=vnorm2
@test S\x ≈ x./w atol=atol rtol=rtol norm=vnorm2
@test S'\x ≈ x./w atol=atol rtol=rtol norm=vnorm2
for α in ALPHAS,
β in BETAS
for P in (Direct, Adjoint)
@test apply!(α, P, S, x, β, vcopy(y)) ≈
T(α)*w.*x + T(β)*y atol=atol rtol=rtol norm=vnorm2
end
for P in (Inverse, InverseAdjoint)
@test apply!(α, P, S, x, β, vcopy(y)) ≈
T(α)*x./w + T(β)*y atol=atol rtol=rtol norm=vnorm2
end
end
end # testset
@testset "Non-uniform scaling (Complex{$T})" for T in (Float32, Float64)
dims = (3,4,5)
n = prod(dims)
w = complex.(randn(T, dims), randn(T, dims))
for i in eachindex(w)
while w[i] == 0
w[i] = complex(randn(T), randn(T))
end
end
x = complex.(randn(T, dims), randn(T, dims))
y = complex.(randn(T, dims), randn(T, dims))
wx = w.*x
qx = x./w
z = vcreate(y)
S = NonuniformScaling(w)
atol, rtol = zero(T), sqrt(eps(T))
@test S*x ≈ w.*x atol=atol rtol=rtol norm=vnorm2
@test S'*x ≈ conj.(w).*x atol=atol rtol=rtol norm=vnorm2
@test S\x ≈ x./w atol=atol rtol=rtol norm=vnorm2
@test S'\x ≈ x./conj.(w) atol=atol rtol=rtol norm=vnorm2
for α in ALPHAS,
β in BETAS
@test apply!(α, Direct, S, x, β, vcopy(y)) ≈
T(α)*w.*x + T(β)*y atol=atol rtol=rtol norm=vnorm2
@test apply!(α, Adjoint, S, x, β, vcopy(y)) ≈
T(α)*conj.(w).*x + T(β)*y atol=atol rtol=rtol norm=vnorm2
@test apply!(α, Inverse, S, x, β, vcopy(y)) ≈
T(α)*x./w + T(β)*y atol=atol rtol=rtol norm=vnorm2
@test apply!(α, InverseAdjoint, S, x, β, vcopy(y)) ≈
T(α)*x./conj.(w) + T(β)*y atol=atol rtol=rtol norm=vnorm2
end
end # testset
@testset "Generalized matrices ($T)" for T in (Float32, Float64)
rows, cols = (2,3,4), (5,6)
nrows, ncols = prod(rows), prod(cols)
A = randn(T, rows..., cols...)
x = randn(T, cols)
y = randn(T, rows)
G = GeneralMatrix(A)
atol, rtol = zero(T), sqrt(eps(T))
mA = reshape(A, nrows, ncols)
vx = reshape(x, ncols)
vy = reshape(y, nrows)
Gx = G*x
Gty = G'*y
@test Gx ≈ reshape(mA*vx, rows) atol=atol rtol=rtol norm=vnorm2
@test Gty ≈ reshape(mA'*vy, cols) atol=atol rtol=rtol norm=vnorm2
test_api(Direct, G, x, y)
test_api(Adjoint, G, x, y)
end
nothing # testset
end # mdoule
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 9364 | #
# oldrules.jl --
#
# Implement simplification rules for LazyAlgebra. This is the old version,
# kept for comparisons and benchmarking. The new version is simpler and
# faster.
#
module OldRules
using LazyAlgebra
using LazyAlgebra:
Adjoint,
Composition,
Composition,
Direct,
Gram,
Inverse,
InverseAdjoint,
LinearMapping,
Mapping,
Scaled,
Sum,
identifier,
multiplier,
unveil,
terms,
to_tuple
#------------------------------------------------------------------------------
# RULES FOR SUMS OF MAPPINGS
"""
OldRules.add(A, B)
yields a simplied sum of mappings `A + B`.
"""
add(A::Mapping, B::Mapping) =
simplify_sum((split_sum(A)..., split_sum(B)...))
# `split_sum(A)` yields a tuple of the terms of `A` if it is a sum or just
# `(A,)` otherwise.
split_sum(A::Sum) = terms(A)
split_sum(A::Mapping) = (A,)
# `merge_sum(args...)` constructs a fully qualified sum. It is assumed that
# the argument(s) have already been simplified. An empty sum is forbidden
# because there is no universal neutral element ("zero") for the addition.
merge_sum(arg::Mapping) = arg
merge_sum(args::Mapping...) = merge_sum(args)
merge_sum(args::Tuple{}) = throw(ArgumentError("empty sum"))
merge_sum(args::Tuple{Mapping}) = args[1]
merge_sum(args::T) where {N,T<:NTuple{N,Mapping}} = Sum{N,T}(args)
# `simplify_sum(args...)` simplifies the sum of all terms in `args...` and
# returns a single term (possibly an instance of `Sum`). It is assumed that
# the operator `+` is associative and commutative.
# Make a sum out of 0-1 terms.
simplify_sum(args::Tuple{}) = merge_sum(args)
simplify_sum(args::Tuple{Mapping}) = args[1]
# Make a sum out of N terms (with N ≥ 2).
function simplify_sum(args::NTuple{N,Mapping}) where {N}
# First group terms corresponding to identical mappings (up to an optional
# multiplier) to produce a single (possibly scaled) term per group. FIXME:
# The following algorithm scales as O(N²) which is probably not optimal.
# Nevertheless, it never compares twice the same pair of arguments.
terms = Array{Mapping}(undef, 0)
flags = fill!(Array{Bool}(undef, N), true)
i = 1
while i != 0
# Push next ungrouped argument.
push!(terms, args[i])
flags[i] = false
# Find any other argument which is identical, possibly scaled, term.
k = i + 1
i = 0
for j = k:N
if flags[j]
if simplify_sum!(terms, terms[end], args[j])
flags[j] = false
elseif i == 0
# Next ungrouped argument to consider.
i = j
end
end
end
end
# Make a sum out of the terms after having eliminated the zeros.
return sort_sum(terms)
end
# `simplify_sum!(terms, A, B)` is a helper function for the `simplify_sum`
# method which attempts to make a trivial simplification for `A + B` when `A =
# λ⋅M` and `B = μ⋅M` for any numbers `λ` and `μ` and any mapping `M`. If such
# a simplification can be done, the result `(λ + μ)⋅M` is stored as the last
# component of `terms` and `true` is returned; otherwise, `false` is returned.
simplify_sum!(terms::Vector{Mapping}, A::Mapping, B::Mapping) = false
function simplify_sum!(terms::Vector{Mapping},
A::Scaled{T}, B::Scaled{T}) where {T<:Mapping}
unscaled(A) === unscaled(B) || return false
@inbounds terms[end] = (multiplier(A) + multiplier(B))*unscaled(A)
return true
end
function simplify_sum!(terms::Vector{Mapping},
A::Scaled{T}, B::T) where {T<:Mapping}
unscaled(A) === B || return false
@inbounds terms[end] = (multiplier(A) + one(multiplier(A)))*B
return true
end
function simplify_sum!(terms::Vector{Mapping},
A::T, B::Scaled{T}) where {T<:Mapping}
A === unscaled(B) || return false
@inbounds terms[end] = (one(multiplier(B)) + multiplier(B))*A
return true
end
function simplify_sum!(terms::Vector{Mapping},
A::T, B::T) where {T<:Mapping}
A === B || return false
@inbounds terms[end] = 2*A
return true
end
# Sort the terms of a sum (so that all equivalent expressions eventually yield
# the same result after simplifications) and eliminate the "zeros"
# (if all terms are "zero", the sum simplifies to the first one).
sort_sum(args::Mapping...) = sort_sum(args)
sort_sum(args::Tuple{Mapping}) = args[1]
function sort_sum(terms::Union{Tuple{Vararg{Mapping}},Vector{<:Mapping}})
perms = sortperm([identifier(terms[i]) for i in 1:length(terms)])
n = 0
@inbounds for i in 1:length(perms)
j = perms[i]
if ! iszero(terms[j])
n += 1
perms[n] = j
end
end
if n ≤ 1
# All terms are zero or only one term is non-zero, return the first
# sorted term.
return terms[perms[1]]
else
# Make a sum out of the remaing sorted terms.
return merge_sum(ntuple(i -> terms[perms[i]], n))
end
end
#------------------------------------------------------------------------------
# RULES FOR COMPOSITIONS OF MAPPINGS
"""
OldRules.compose(A, B)
yields a simplified composition of mappings `A*B`.
"""
compose(A::Mapping, B::Mapping) = simplify_mul(A, B)
# `split_mul(A)` yields a tuple of the terms of `A` if it is a composition or
# just `(A,)` otherwise.
split_mul(A::Composition) = terms(A)
split_mul(A::Mapping) = (A,)
# `merge_mul(args...)` constructs a fully qualified composition. It is assumed
# that the argument(s) have already been simplified. An empty composition
# yields the identity which is the universal neutral element ("one") for the
# composition.
merge_mul(arg::Mapping) = arg
merge_mul(args::Mapping...) = merge_mul(args)
merge_mul(args::Tuple{}) = Id
merge_mul(args::Tuple{Mapping}) = args[1]
merge_mul(args::T) where {N,T<:NTuple{N,Mapping}} = Composition{N,T}(args)
# `simplify_mul(A,B)` simplifies the product of `A` and `B`. The result is a
# tuple `C` of the resulting terms if `A` and `B` are both tuples of mappings
# or an instance of `Mapping` if `A` and `B` are both mappings. If no
# simplification can be made, the result is just the concatenation of the terms
# in `A` and `B`. To perform simplifications, it is assumed that, if they are
# compositions, `A` and `B` have already been simplified. It is also assumed
# that the composition is associative but non-commutative.
simplify_mul(A::Mapping, B::Mapping) =
merge_mul(simplify_mul(split_mul(A), split_mul(B)))
# The following versions of `simplify_mul` are for `A` and `B` in the form of
# tuples and return a tuple. The algorithm is recursive and should works for
# any non-commutative binary operator.
simplify_mul(A::Tuple{}, B::Tuple{}) = (Id,)
simplify_mul(A::Tuple{Vararg{Mapping}}, B::Tuple{}) = A
simplify_mul(A::Tuple{}, B::Tuple{Vararg{Mapping}}) = B
function simplify_mul(A::NTuple{M,Mapping},
B::NTuple{N,Mapping}) where {M,N}
# Here M ≥ 1 and N ≥ 1.
@assert M ≥ 1 && N ≥ 1
# Attempt to simplify the product of the terms at the junction.
C = split_mul(A[M]*B[1])
len = length(C)
if len == 2
if C === (A[M], B[1])
# No simplification, just concatenate the 2 compositions.
return (A..., B...)
else
# There have been some changes, but the result of A[M]*B[1] still
# have two terms which cannot be further simplified. So we
# simplify its head with the remaining leftmost operands and its
# tail with the remaining rightmost operands.
L = simplify_mul(A[1:M-1], C[1]) # simplify leftmost operands
R = simplify_mul(C[2], B[2:N]) # simplify rightmost operands
if L[end] !== C[1] || R[1] !== C[2]
# At least one of the last of resulting rightmost operands or
# the first of the resulting leftmost operands has been
# modified so there may be other possible simplifications.
return simplify_mul(L, R)
else
# No further simplifications possible.
return (L..., R...)
end
end
elseif len == 1
# Simplications have occured resulting in a single operand. This
# operand can be simplified whith the remaining leftmost operands
# and/or with the remaining rightmost operands. To benefit from the
# maximum simplifications, we can either do:
#
# simplify_mul(A[1:end-1], simplify_mul(C, B[2:end]))
#
# that is, simplify right then left, or:
#
# simplify_mul(simplify_mul(A[1:end-1], C), B[2:end])
#
# that is simplify left then right. Since we want to propagate
# multipliers to the right of compositions, the former is the most
# appropriate.
return simplify_mul(A[1:end-1], simplify_mul(C, B[2:end]))
else
# len == 0 The result of A[M]*B[1] is the neutral element for * thus
# eliminating these terms from the merging. We just have to repeat the
# process with the truncated associations in case further simplications
# are possible.
return simplify_mul(A[1:M-1], B[2:N-1])
end
end
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 9163 | #
# rules-tests.jl -
#
# Test algebraic rules and simplifications.
#
module TestingLazyAlgebraRules
using LazyAlgebra
using LazyAlgebra.Foundations
import LazyAlgebra: ⋅,
Adjoint, Inverse, InverseAdjoint, Jacobian,
Scaled, Sum, Composition
using Test
function to_string(A::Mapping)
io = IOBuffer()
show(io, A)
String(take!(io))
end
@testset "Algebraic rules" begin
include("common.jl")
identical = LazyAlgebra.identical
Composition = LazyAlgebra.Composition
dims = (3,4,5)
M = SymbolicMapping(:M)
Q = SymbolicMapping(:Q)
R = SymbolicMapping(:R)
A = SymbolicLinearMapping(:A)
B = SymbolicLinearMapping(:B)
C = SymbolicLinearMapping(:C)
D = SymbolicLinearMapping(:D)
# Test properties.
@test M !== R
@test A !== B
@test identical(A, B) == false
let E = A
@test identical(A, E) == true
end
@test is_linear(M) == false
@test is_linear(A) == true
@test is_linear(A + B) == true
@test is_linear(A') == true
@test is_linear(inv(A)) == true
@test is_linear(inv(M)) == false
@test is_linear(A + B + C) == true
@test is_linear(A' + B + C) == true
@test is_linear(A + B' + C) == true
@test is_linear(A + B + C') == true
@test is_linear(M + B + C) == false
@test is_linear(A + M + C) == false
@test is_linear(A + B + M) == false
@test is_linear(A*B*C) == true
@test is_linear(A'*B*C) == true
@test is_linear(A*B'*C) == true
@test is_linear(A*B*C') == true
@test is_linear(M*B*C) == false
@test is_linear(A*M*C) == false
@test is_linear(A*B*M) == false
@test is_endomorphism(M) == false
@test is_endomorphism(A) == false
@test is_selfadjoint(M) == false
@test is_selfadjoint(A) == false
@test is_selfadjoint(A'*A) == true # FIXME: was broken
@test is_selfadjoint(A*A') == true # FIXME: was broken
@test_broken is_selfadjoint(B'*A*A'*B) == true
# Test identity.
@test Id === LazyAlgebra.Id
@test Id' === Id
@test inv(Id) === Id
@test 1Id === Id
@test Id*M*Id === M
@test Id*Id === Id
@test Id*Id*Id === Id
@test Id\Id === Id
@test Id/Id === Id
@test Id + Id === 2Id
@test Id - Id === 0Id
@test Id + 2Id === 3Id
@test 2Id + 3Id === 5Id
@test -4Id + (Id + 2Id) === -Id
@test Id + Id - 2Id === 0Id
@test 2Id - (Id + Id) === 0Id
@test inv(3Id) === (1/3)*Id
@test SelfAdjointType(Id) === SelfAdjoint()
@test MorphismType(Id) === Endomorphism()
@test DiagonalType(Id) === DiagonalMapping()
for T in (Float32, Float64)
atol, rtol = zero(T), sqrt(eps(T))
x = randn(T, dims)
y = randn(T, dims)
@test Id*x === x # test same object
for P in (Direct, Adjoint, Inverse, InverseAdjoint)
@test apply(P,Id,x) === x # test same object
test_api(P, Id, x, y)
end
end
# Neutral elements.
@test isone(Id)
@test iszero(Id) == false
@test iszero(A - A)
@test iszero(-M + M)
@test one(A) === Id
@test one(M) === Id
@test zero(A) === 0*A
@test zero(M) === 0*M
# Basic methods for sums and compositions of mappings.
let A1 = A + B + M, A2 = C*B*M*Q
@test eltype(A1) <: Mapping
@test eltype(A2) <: Mapping
@test Tuple(A1) === terms(A1)
@test Tuple(A2) === terms(A2)
@test length(A1) == 3
@test length(A2) == 4
@test firstindex(A1) == 1
@test firstindex(A2) == 1
@test lastindex(A1) == length(A1)
@test lastindex(A2) == length(A2)
@test first(A1) === A1[1]
@test first(A2) === A2[1]
@test last(A1) === A1[end]
@test last(A2) === A2[end]
end
@test B + A === A + B
@test B + A + Q === Q + A + B
@test B + (M + A + Q) === A + B + M + Q
@test (-1)*A === -A
@test -(-A) === A
@test 2\A === (1/2)*A
@test A + A === 2A
@test A + B - A === B
@test 2R + A - Q + B - 2A + Q + 1R === 3R + B - A
# FIXME: for the following to work, we must impose the value of the
# first multiplier of a sum
#@test A + B*(M - Q) + A - 2B*(Q - M) === 2*A + 3*B*(M - Q)
@test A + B*(M - Q) + 3A - 3B*(M - Q) === 4*A - 2*B*(M - Q)
@test A*2M === 2*(A*M)
@test A*2M === 2*(A*M)
@test 3A*2M === 6A*M
@test 3R*2M !== 6R*M
# Test adjoint and Jacobian.
x = nothing
@test (A*A')' === A*A'
for X in (A*A', A'*A, B'*A'*A*B, B'*A*A'*B)
@test X' === X
end
@test A' === Adjoint(A)
@test A' isa Adjoint
@test A'' === (A')' === A
@test adjoint(A) === A'
@test jacobian(A,x) === A
@test ∇(A,x) === A
@test (3A)' === 3*(A')
@test (A + 2B)' - A' === 2*B'
@test_throws ArgumentError Jacobian(A,x)
@test_throws ArgumentError M'
@test_throws ArgumentError adjoint(M)
@test_throws ArgumentError Adjoint(M)
@test jacobian(M,x) isa Jacobian
@test ∇(M,x) === jacobian(M,x)
@test ∇(3M,x) === 3*∇(M,x)
@test ∇(M,x) + ∇(2M,x) === 3∇(M,x)
# Inverse.
@test inv(M) === Id/M
@test inv(inv(M)) === M
@test Id/A === inv(A)
@test Id\A === A
@test M/Q === M*inv(Q)
@test M\Q === inv(M)*Q
@test inv(2M) === inv(M)*(2\Id)
@test inv(2M) === inv(M)*((1/2)*Id)
@test inv(2A) === 2\inv(A)
@test inv(2A) === (1/2)*inv(A)
@test inv(A*B) === inv(B)*inv(A)
@test A/3B === 3\A/B
@test 2/A === 2*inv(A)
@test 4A/4B === A/B
@test 4A\4B === A\B
@test inv(A*M*B*Q) === inv(Q)*inv(B)*inv(M)*inv(A)
@test inv(M)*M === Id
@test M*inv(M) === Id
let D = M*Q*(A - B)
@test inv(D)*D === Id
@test D*inv(D) === Id
end
let D = A + 2B - C
@test inv(D)*D === Id
@test D*inv(D) === Id
end
@test inv(M*Q*(A - B)) === inv(A - B)*inv(Q)*inv(M)
@test inv(A*3M) === inv(M)*(3\inv(A))
@test inv(A*3B) === 3\inv(A*B) === 3\inv(B)*inv(A)
@test inv(A*B*3M) === inv(M)*(3\inv(B))*inv(A)
@test inv(A*B*3C) === 3\inv(A*B*C) === 3\inv(C)*inv(B)*inv(A)
# Inverse-adjoint.
@test inv(A)' === inv(A')
@test inv(A')*A' === Id
@test A'*inv(A') === Id
let E = inv(A'), F = inv(A)'
@test inv(E) === A'
@test inv(F) === A'
end
# Test aliases for composition.
@test isa(M*R, Composition)
@test M⋅R === M*R
@test M∘R === M*R
# Test associativity of sum and composition.
@test (M + R) + Q === M + R + Q
@test M + (R + Q) === (M + R) + Q
@test (M*R)*Q === M*R*Q
@test M*(R*Q) === (M*R)*Q
# Test adjoint of sums and compositions.
@test (A*B)' === (B')*(A') === B'*A'
@test (A'*B)' === B'*A
@test (A*B')' === B*A'
@test (A*B*C)' === C'*B'*A'
@test (A'*B*C)' === C'*B'*A
@test (A*B'*C)' === C'*B*A'
@test (A*B*C')' === C*B'*A'
@test (A + B)' === A' + B'
@test (A' + B)' === A + B'
@test (A + B')' === A' + B
@test (A' + B + C)' === A + B' + C'
@test (A + B' + C)' === A' + B + C'
@test (A + B + C')' === A' + B' + C
# Test inverse of sums and compositions.
# Test unary plus and negation.
@test +M === M
@test -(-M) === M
@test -M === (-1)*M
@test 2A + A === 3A
@test 10M - 3M === 7M
@test M + A + M === A + 2M
# Test forbidden calls to constructors because they should yield an
# instance of a different type if simplification rules were applied.
@test_throws ArgumentError Adjoint(Id)
@test_throws ArgumentError Adjoint(A')
@test_throws ArgumentError Adjoint(inv(A))
@test_throws ArgumentError Adjoint(inv(A'))
@test_throws ArgumentError Adjoint(3A)
@test_throws ArgumentError Adjoint(A + B)
@test_throws ArgumentError Adjoint(A*B)
@test_throws ArgumentError Inverse(Id)
@test_throws ArgumentError Inverse(A')
@test_throws ArgumentError Inverse(inv(A))
@test_throws ArgumentError Inverse(inv(A'))
@test_throws ArgumentError Inverse(3A)
@test_throws ArgumentError Inverse(A*B)
@test_throws ArgumentError InverseAdjoint(Id)
@test_throws ArgumentError InverseAdjoint(A')
@test_throws ArgumentError InverseAdjoint(inv(A))
@test_throws ArgumentError InverseAdjoint(inv(A'))
@test_throws ArgumentError InverseAdjoint(3A)
@test_throws ArgumentError InverseAdjoint(A*B)
@test_throws ArgumentError Jacobian(3M,x)
@test_throws ArgumentError Scaled(2,3M)
# Check that sums and compositions must have at least 2 terms
@test_throws ArgumentError Sum()
@test_throws ArgumentError Sum(A)
@test Sum(A,B) isa Sum
@test_throws ArgumentError Composition()
@test_throws ArgumentError Composition(A)
@test Composition(A,B) isa Composition
# Test the `show` method.
@test to_string(A) == "A"
@test to_string(A') == "A'"
@test to_string(A + A) == "2⋅A"
@test to_string(A' + A') == "2⋅A'"
@test to_string(Id/A) == "inv(A)"
@test to_string(Id/(A + B)) == "inv($(to_string(A + B)))"
@test to_string(M) == "M"
@test to_string(M + M) == "2⋅M"
@test to_string(∇(M,x)) == "∇(M,x)"
@test to_string(∇(M,x) + ∇(M,x)) == "2⋅∇(M,x)"
end # testset
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 1021 | #
# runtests.jl -
#
# Run all tests.
#
module TestingLazyAlgebra
using Test
@testset "Utilities " begin
include("utils-tests.jl")
end
@testset "Rules " begin
include("rules-tests.jl")
end
@testset "Generalized matrix multiplication" begin
include("genmult-tests.jl")
end
@testset "Vectorized operations " begin
include("vect-tests.jl")
end
@testset "Mappings " begin
include("map-tests.jl")
end
@testset "Finite differences " begin
include("diff-tests.jl")
end
@testset "Sparse operators " begin
include("sparse-tests.jl")
end
@testset "Cropping and padding " begin
include("crop-tests.jl")
end
@testset "FFT methods " begin
include("fft-tests.jl")
end
#@testset "Gram operators " begin
include("gram-tests.jl")
#end
@testset "Conjugate gradient " begin
include("cg-tests.jl")
end
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 3980 | #
# sparse-benchs.jl -
#
# Benchmarking sparse operators.
#
module BenchmarkingLazyAlgebraSparseOperators
using LinearAlgebra
using SparseArrays
using StructuredArrays
using LazyAlgebra
using LazyAlgebra: identical, Adjoint, Direct
using LazyAlgebra.SparseMethods
using LazyAlgebra.SparseOperators: check_structure, compute_offsets
using BenchmarkTools
using Random
is_csc(::Any) = false
is_csc(::CompressedSparseOperator{:CSC}) = true
is_csc(::Adjoint{<:CompressedSparseOperator{:CSR}}) = true
is_csr(::Any) = false
is_csr(::CompressedSparseOperator{:CSR}) = true
is_csr(::Adjoint{<:CompressedSparseOperator{:CSC}}) = true
is_coo(::Any) = false
is_coo(::CompressedSparseOperator{:COO}) = true
is_coo(::Adjoint{<:CompressedSparseOperator{:COO}}) = true
# Generate a possibly sparse array of random values. Value are small signed
# integers so that all computations should be exact (except with non-integer
# multipliers).
genarr(T::Type, dims::Integer...; kwds...) = genarr(T, dims; kwds...)
function genarr(T::Type, dims::Tuple{Vararg{Integer}};
sparsity::Real = 0,
range::AbstractUnitRange{<:Integer} = -17:17)
@assert 0 ≤ sparsity ≤ 1
A = Array{T}(undef, dims)
for i in eachindex(A)
if sparsity > 0 && rand() ≤ sparsity
A[i] = zero(T)
elseif T <: Complex
A[i] = T(rand(range), rand(range))
else
A[i] = T(rand(range))
end
end
return A
end
# Unpack a sparse operator into a regular array using simplest iterator. There
# may be duplicates.
function unpack_with_iterator!(dest::Array{T},
A::SparseOperator{E},
op = (E === Bool ? (|) : (+))) where {T,E}
C = fill!(reshape(dest, (nrows(A), ncols(A))), zero(T))
for (Aij, i, j) in A
C[i,j] = op(C[i,j], Aij)
end
return dest
end
nickname(::AbstractMatrix) = "Matrix";
nickname(::AbstractSparseMatrix) = "SparseMatrix";
nickname(::GeneralMatrix) = "GeneralMatrix";
nickname(::CompressedSparseOperator{:COO}) = "SparseOperatorCOO";
nickname(::CompressedSparseOperator{:CSC}) = "SparseOperatorCSC";
nickname(::CompressedSparseOperator{:CSR}) = "SparseOperatorCSR";
nickname(n::Integer, x) = rpad(nickname(x), n);
function bench1(; m::Integer = 2_000, n::Integer = m, T::Type = Float64,
sparsity::Real = 0.95)
bench1(T, m, n, sparsity)
end
bench1(::Type{T}, m::Integer, n::Integer, sparsity::Real) where {T} =
bench1(T, Int(m), Int(n), Float64(sparsity))
function bench1(::Type{T}, m::Int, n::Int, sparsity::Float64) where {T}
A = genarr(T, (m, n); sparsity=sparsity);
x = genarr(T, n);
y = genarr(T, m);
x1 = copy(x);
y0 = copy(y);
x1 = similar(x);
y1 = similar(y);
x2 = similar(x);
y2 = similar(y);
coo = SparseOperatorCOO(A);
csc = SparseOperatorCSC(A);
csr = SparseOperatorCSR(A);
gen = GeneralMatrix(A);
S = sparse(A);
println("Tests are done for T=$T, (m,n)=($m,$n) and sparsity = ",
round(sparsity*1e2, sigdigits=3), "% of entries.\n")
nnz(S) == nnz(coo) || println("not same number on non-zeros (COO)");
nnz(S) == nnz(csc) || println("not same number on non-zeros (CSS)");
nnz(S) == nnz(csr) || println("not same number on non-zeros (CSR)");
mul!(y1, A, x);
mul!(x1, A', y);
for B in (gen, S, coo, csc, csr)
mul!(y2, B, x);
mul!(x2, B', y);
println("compare A*x for a ", nickname(20, B), extrema(x1 - x2))
println("compare A'*x for a ", nickname(20, B), extrema(y1 - y2))
end
println()
for B in (A, gen, S, coo, csc, csr)
print("benchmarking A*x for a ", nickname(20, B))
@btime mul!($y1, $B, $x)
end
println()
for B in (A, gen, S, coo, csc, csr)
print("benchmarking A'*x for a ", nickname(20, B))
@btime mul!($x1, $(B'), $y)
end
end
end # module
BenchmarkingLazyAlgebraSparseOperators.bench1()
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 17698 | #
# sparse-tests.jl -
#
# Testing sparse operators.
#
module TestingLazyAlgebraSparseOperators
using SparseArrays
using StructuredArrays
using LazyAlgebra
using LazyAlgebra: identical
using LazyAlgebra.Foundations
using LazyAlgebra.SparseMethods
using LazyAlgebra.SparseOperators: check_structure, compute_offsets
using Test
using Random
is_csc(::Any) = false
is_csc(::CompressedSparseOperator{:CSC}) = true
is_csc(::Adjoint{<:CompressedSparseOperator{:CSR}}) = true
is_csr(::Any) = false
is_csr(::CompressedSparseOperator{:CSR}) = true
is_csr(::Adjoint{<:CompressedSparseOperator{:CSC}}) = true
is_coo(::Any) = false
is_coo(::CompressedSparseOperator{:COO}) = true
is_coo(::Adjoint{<:CompressedSparseOperator{:COO}}) = true
# Generate a possibly sparse array of random values. Value are small signed
# integers so that all computations should be exact (except with non-integer
# multipliers).
genarr(T::Type, dims::Integer...; kwds...) = genarr(T, dims; kwds...)
function genarr(T::Type, dims::Tuple{Vararg{Integer}};
sparsity::Real = 0,
range::AbstractUnitRange{<:Integer} = -17:17)
@assert 0 ≤ sparsity ≤ 1
A = Array{T}(undef, dims)
for i in eachindex(A)
if sparsity > 0 && rand() ≤ sparsity
A[i] = zero(T)
elseif T <: Complex
A[i] = T(rand(range), rand(range))
else
A[i] = T(rand(range))
end
end
return A
end
# Unpack a sparse operator into a regular array using simplest iterator. There
# may be duplicates.
function unpack_with_iterator!(dest::Array{T},
A::SparseOperator{E},
op = (E === Bool ? (|) : (+))) where {T,E}
C = fill!(reshape(dest, (nrows(A), ncols(A))), zero(T))
for (Aij, i, j) in A
C[i,j] = op(C[i,j], Aij)
end
return dest
end
@testset "Low level sparse utilities" begin
@test compute_offsets(2, Int[]) == [0,0,0]
@test compute_offsets(5, [2,2,3,5]) == [0,0,2,3,3,4]
@test compute_offsets(5, [1,3,3]) == [0,1,1,3,3,3]
# Check for non-increasing order.
@test_throws ErrorException compute_offsets(5, [1,3,2])
# Check for out-of-bounds.
@test_throws ErrorException compute_offsets(5, [0,3,3,7])
@test_throws ErrorException compute_offsets(5, [1,3,7])
end
@testset "Compressed sparse formats " begin
# Parameters.
siz = (5, 6) # these tests only for A a 2-D array
T = Float64;
Tp = Float32; # for conversion
# Make a banded matrix with random entries.
A = genarr(T, siz) .* StructuredArray((i,j) -> -1 ≤ i - j ≤ 2, siz)
spm = sparse(A);
csr = convert(SparseOperatorCSR, A); # same as SparseOperatorCSR(A)
csc = SparseOperatorCSC(A);
coo = SparseOperatorCOO(A);
x = genarr(T, siz[2]);
y = genarr(T, siz[1]);
# Make a COO version with randomly permuted entries.
kp = randperm(nnz(coo));
coo_perm = SparseOperatorCOO(get_vals(coo)[kp],
get_rows(coo)[kp],
get_cols(coo)[kp],
row_size(coo),
col_size(coo));
# Make a COO version with randomly permuted entries and some duplicates.
# Use fractions 1/3 and 3/4 for duplicating so that there is no loss of
# precision.
l = 7
k = zeros(Int, length(kp) + l)
w = ones(T, length(k))
k[1:length(kp)] = kp
for i in 1:l
j1 = length(kp) - i + 1
j2 = length(kp) + i
w[j1] *= 1/4
w[j2] *= 3/4
k[j2] = k[j1]
end
coo_dups = SparseOperatorCOO(get_vals(coo)[k] .* w,
get_rows(coo)[k],
get_cols(coo)[k],
row_size(coo),
col_size(coo))
# Check structures.
@test check_structure(csr) === csr
@test check_structure(csc) === csc
@test check_structure(coo) === coo
# Basic array-like methods
@test eltype(csr) === eltype(A)
@test eltype(csc) === eltype(A)
@test eltype(coo) === eltype(A)
@test length(csr) === length(A)
@test length(csc) === length(A)
@test length(coo) === length(A)
@test ndims(csr) === ndims(A)
@test ndims(csc) === ndims(A)
@test ndims(coo) === ndims(A)
@test size(csr) === size(A)
@test size(csc) === size(A)
@test size(coo) === size(A)
@test nrows(csr) === size(A,1)
@test nrows(csc) === size(A,1)
@test nrows(coo) === size(A,1)
@test nrows(spm) === size(A,1)
@test ncols(csr) === size(A,2)
@test ncols(csc) === size(A,2)
@test ncols(coo) === size(A,2)
@test ncols(spm) === size(A,2)
# Number of structural non-zeros.
nvals = count(x -> x != zero(x), A);
@test nnz(csr) === nvals
@test nnz(csc) === nvals
@test nnz(coo) === nvals
@test nnz(spm) === nvals
@test length(get_vals(csr)) === nvals
@test length(get_vals(csc)) === nvals
@test length(get_vals(coo)) === nvals
@test length(get_vals(spm)) === nvals
# `nonzeros` and `get_vals` should yield the same object.
@test get_vals(csr) === nonzeros(csr)
@test get_vals(csc) === nonzeros(csc)
@test get_vals(coo) === nonzeros(coo)
@test get_vals(spm) === nonzeros(spm)
# Julia arrays are column-major so values and row indices should be the
# same in compressed sparse column (CSC) and compressed sparse coordinate
# (COO) formats.
@test get_vals(coo) == get_vals(csc)
@test get_rows(coo) == get_rows(csc)
@test get_cols(coo) == get_cols(csc)
@test get_vals(coo) == get_vals(spm)
@test get_rows(coo) == get_rows(spm)
@test get_cols(coo) == get_cols(spm)
# Check converting back to standard array.
@test Array(csr) == A
@test Array(csc) == A
@test Array(coo) == A
@test Array(spm) == A
@test Array(coo_perm) == A
@test Array(coo_dups) == A
# Check matrix-vector multiplication (more serious tests in another
# section).
Ax = A*x
Aty = A'*y
@test csr*x == Ax
@test csc*x == Ax
@test coo*x == Ax
@test csr'*y == Aty
@test csc'*y == Aty
@test coo'*y == Aty
# Check iterators.
B = Array{T}(undef, size(A))
@test unpack_with_iterator!(B, csr) == A
@test unpack_with_iterator!(B, csc) == A
@test unpack_with_iterator!(B, coo) == A
# Check conversions to COO, CSC and CSR formats.
for F in (:COO, :CSC, :CSR)
for src in (A, csc, csr, coo, coo_perm, coo_dups)
for (t, cnv) in ((T, CompressedSparseOperator{F}(src)),
(T, CompressedSparseOperator{F,T}(src)),
(Tp, CompressedSparseOperator{F,Tp}(src)),)
@test check_structure(cnv) === cnv
@test eltype(cnv) === t
if F === :COO
@test (cnv === coo) == (t === T && src === coo)
@test identical(cnv, coo) == (t === T && src === coo)
if is_csc(src) || is_csr(src)
if is_csc(src)
@test get_rows(cnv) === get_rows(src)
else
@test get_rows(cnv) == get_rows(src)
end
if is_csr(src)
@test get_cols(cnv) === get_cols(src)
else
@test get_cols(cnv) == get_cols(src)
end
if t === T
@test get_vals(cnv) === get_vals(src)
else
@test get_vals(cnv) == get_vals(src)
end
end
elseif F === :CSC
@test (cnv === csc) == (t === T && src === csc)
@test identical(cnv, csc) == (t === T && src === csc)
if is_csc(src)
@test get_rows(cnv) === get_rows(csc)
else
@test get_rows(cnv) == get_rows(csc)
end
@test each_col(cnv) === each_col(csc)
@test get_cols(cnv) == get_cols(csc)
if is_csc(src) && t === T
@test get_vals(cnv) === get_vals(csc)
else
@test get_vals(cnv) == get_vals(csc)
end
elseif F === :CSR
@test (cnv === csr) == (t === T && src === csr)
@test identical(cnv, csr) == (t === T && src === csr)
@test each_row(cnv) === each_row(csr)
@test get_rows(cnv) == get_rows(csr)
if is_csr(src)
@test get_cols(cnv) === get_cols(csr)
else
@test get_cols(cnv) == get_cols(csr)
end
if is_csr(src) && t === T
@test get_vals(cnv) === get_vals(csr)
else
@test get_vals(cnv) == get_vals(csr)
end
end
end
end
end
end # testset
@testset "Sparse operations " begin
rows = (2,3,4)
cols = (5,6)
M = length(rows)
N = length(cols)
for T in (Float32, Float64, Complex{Float64})
R = real(T);
ε = eps(R);
A = genarr(T, rows..., cols...; sparsity=0.7); # 70% of zeros
x = genarr(T, cols);
xsav = vcopy(x);
y = genarr(T, rows);
ysav = vcopy(y);
Scsc = SparseOperatorCSC{T,M,N}(A);
Scsr = SparseOperatorCSR{T,M,N}(A);
Scoo = SparseOperatorCOO{T,M,N}(A);
# Check basic methods.
for S in (Scsc, Scsr, Scoo)
@test eltype(S) === T
@test ndims(S) == length(rows) + length(cols)
@test is_endomorphism(S) == (rows == cols)
@test (LazyAlgebra.MorphismType(S) ===
LazyAlgebra.Endomorphism()) == (rows == cols)
@test row_size(S) == rows
@test col_size(S) == cols
@test nrows(S) == prod(rows)
@test ncols(S) == prod(cols)
@test output_size(S) == rows
@test input_size(S) == cols
@test SparseOperator(S) === S
@test SparseOperator{T}(S) === S
@test SparseOperator{T,M}(S) === S
@test SparseOperator{T,M,N}(S) === S
@test LazyAlgebra.identical(SparseOperator(S), S)
@test LazyAlgebra.identical(SparseOperator{T}(S), S)
@test LazyAlgebra.identical(SparseOperator{T,M}(S), S)
@test LazyAlgebra.identical(SparseOperator{T,M,N}(S), S)
# Check `apply!` and `vcreate` with integer valued multipliers so
# that exact results are expected.
Sx = S*x; @test x == xsav;
Sty = S'*y; @test y == ysav;
@test vdot(y, Sx) == vdot(Sty, x);
for α in (0, 1, -1, 3),
β in (0, 1, -1, 7),
scratch in (false, true)
# Test operator.
@test apply!(α, Direct, S, x, scratch, β, vcopy(y)) ==
R(α)*Sx + R(β)*y
if scratch
vcopy!(x, xsav)
else
@test x == xsav
end
# Test adjoint.
@test apply!(α, Adjoint, S, y, scratch, β, vcopy(x)) ==
R(α)*Sty + R(β)*x
if scratch
vcopy!(y, ysav)
else
@test y == ysav
end
end
# Compare to results with a general matrix.
G = GeneralMatrix(A);
Gx = G*x; @test x == xsav;
Gty = G'*y; @test y == ysav;
@test Sx == Gx
@test Sty == Gty
# Compare to results with a 2D matrix and 1D vectors.
Aflat = reshape(A, prod(rows), prod(cols));
xflat = reshape(x, prod(cols));
yflat = reshape(y, prod(rows));
@test Sx == reshape(Aflat*xflat, rows)
@test Sty == reshape(Aflat'*yflat, cols)
# Extract coefficients as an array or as a matrix.
A1 = Array(S);
@test eltype(A1) === eltype(S)
@test ndims(A1) == ndims(S)
@test size(A1) == (rows..., cols...,)
@test A1 == A
# FIXME: A2 = Matrix(S)
# FIXME: @test eltype(A2) === eltype(S)
# FIXME: @test ndims(A2) == 2
# FIXME: @test size(A2) == (prod(rows), prod(cols))
# FIXME: @test A2 == reshape(A, size(A2))
# FIXME: B = (A .!= 0) # make an array of booleans
# FIXME: @test Array(SparseOperator(B, length(rows))) == B
# Convert to another floating-point type.
T1 = (T <: Complex ?
(real(T) === Float32 ? Complex{Float64} : Complex{Float32}) :
(T === Float32 ? Float64 : Float32))
S1 = SparseOperator{T1}(S)
@test eltype(S1) === T1
@test ndims(S1) == ndims(S)
if is_csc(S) || is_coo(S)
@test get_rows(S1) === get_rows(S)
else
@test get_rows(S1) == get_rows(S)
end
if is_csr(S) || is_coo(S)
@test get_cols(S1) === get_cols(S)
else
@test get_cols(S1) == get_cols(S)
end
@test coefficients(S1) == coefficients(S)
@test LazyAlgebra.identical(S1, S) == false
# Check reshaping.
S2d = reshape(S, prod(output_size(S)), prod(input_size(S)))
@test eltype(S2d) === eltype(S)
@test ndims(S2d) == 2
if is_csc(S) || is_coo(S)
@test get_rows(S2d) === get_rows(S)
else
@test get_rows(S2d) == get_rows(S)
end
if is_csr(S) || is_coo(S)
@test get_cols(S2d) === get_cols(S)
else
@test get_cols(S2d) == get_cols(S)
end
@test coefficients(S2d) === coefficients(S)
@test LazyAlgebra.identical(S2d, S) == false
# FIXME: # Convert to a sparse matrix.
# FIXME: S2 = sparse(S)
# FIXME: @test eltype(S2) === eltype(S)
# FIXME: S3 = SparseOperator(S2)
# FIXME: @test eltype(S3) === eltype(S)
# FIXME: x2 = genarr(T, input_size(S3))
# FIXME: y2 = genarr(T, output_size(S3))
# FIXME: @test S2*x2 == S3*x2
# FIXME: @test S2'*y2 == S3'*y2
# FIXME:
# FIXME: # Check multiplication by a scalar.
# FIXME: @test 1*S === S
# FIXME: S0 = 0*S
# FIXME: @test isa(S0, SparseOperator)
# FIXME: @test length(get_rows(S0)) == 0
# FIXME: @test length(get_cols(S0)) == 0
# FIXME: @test length(coefficients(S0)) == 0
# FIXME: @test eltype(S0) == eltype(S)
# FIXME: @test input_size(S0) == input_size(S)
# FIXME: @test output_size(S0) == output_size(S)
# FIXME: α = R(π)
# FIXME: αS = α*S
# FIXME: @test isa(αS, SparseOperator)
# FIXME: @test get_rows(αS) === get_rows(S)
# FIXME: @test get_cols(αS) === get_cols(S)
# FIXME: @test coefficients(αS) == α*coefficients(S)
# FIXME: @test eltype(αS) == eltype(S)
# FIXME: @test input_size(αS) == input_size(S)
# FIXME: @test output_size(αS) == output_size(S)
# FIXME:
# FIXME: # Check left and right multiplication by a non-uniform rescaling
# FIXME: # operator.
# FIXME: w1 = genarr(T, output_size(S))
# FIXME: W1 = NonuniformScaling(w1)
# FIXME: W1_S = W1*S
# FIXME: c1 = (w1 .* A)[A .!= zero(T)]
# FIXME: @test isa(W1_S, SparseOperator)
# FIXME: @test eltype(W1_S) === T
# FIXME: @test output_size(W1_S) == output_size(S)
# FIXME: @test input_size(W1_S) == input_size(S)
# FIXME: @test get_rows(W1_S) === get_rows(S)
# FIXME: @test get_cols(W1_S) === get_cols(S)
# FIXME: @test coefficients(W1_S) == c1
# FIXME: w2 = genarr(T, input_size(S))
# FIXME: W2 = NonuniformScaling(w2)
# FIXME: S_W2 = S*W2
# FIXME: c2 = (A .* reshape(w2, (ones(Int, length(output_size(S)))...,
# FIXME: input_size(S)...,)))[A .!= zero(T)]
# FIXME: @test isa(S_W2, SparseOperator)
# FIXME: @test eltype(S_W2) === T
# FIXME: @test output_size(S_W2) == output_size(S)
# FIXME: @test input_size(S_W2) == input_size(S)
# FIXME: @test get_cols(S_W2) === get_cols(S)
# FIXME: @test get_rows(S_W2) === get_rows(S)
# FIXME: @test coefficients(S_W2) == c2
# FIXME:
# FIXME: # Use another constructor with integer conversion.
# FIXME: R = SparseOperator(Int32.(get_rows(S)),
# FIXME: Int64.(get_cols(S)),
# FIXME: coefficients(S),
# FIXME: Int32.(output_size(S)),
# FIXME: Int64.(input_size(S)))
# FIXME: @test Sx == R*x
# FIXME: @test Sty == R'*y
end
end
end
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 2737 | module TupleTests
using BenchmarkTools
@inline to_tuple1(x::AbstractVector) = Tuple(x)
@inline to_tuple2(x::AbstractVector) = (x...,)
@inline function to_tuple3(x::AbstractVector)
n = length(x)
@assert eachindex(x) == 1:n
ntuple(i->x[i], n)
end
# The cutoff at n = 10 below reflects what is used by `ntuple`. This value is
# somewhat arbitrary, on the machines where I tested the code, the explicit
# unrolled expression for n = 10 is still about 44 times faster than `(x...,)`.
# Calling `ntuple` for n ≤ 10 is about twice slower; for n > 10, `ntuple` is
# slower than `(x...,)`.
function to_tuple4(x::AbstractVector)
n = length(x)
@inbounds begin
n == 0 ? () :
n > 10 || firstindex(x) != 1 ? (x...,) :
n == 1 ? (x[1],) :
n == 2 ? (x[1], x[2]) :
n == 3 ? (x[1], x[2], x[3]) :
n == 4 ? (x[1], x[2], x[3], x[4]) :
n == 5 ? (x[1], x[2], x[3], x[4], x[5]) :
n == 6 ? (x[1], x[2], x[3], x[4], x[5], x[6]) :
n == 7 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7]) :
n == 8 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8]) :
n == 9 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]) :
(x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])
end
end
# As to_tuple4 but inlined.
@inline function to_tuple5(x::AbstractVector)
n = length(x)
@inbounds begin
n == 0 ? () :
n > 10 || firstindex(x) != 1 ? (x...,) :
n == 1 ? (x[1],) :
n == 2 ? (x[1], x[2]) :
n == 3 ? (x[1], x[2], x[3]) :
n == 4 ? (x[1], x[2], x[3], x[4]) :
n == 5 ? (x[1], x[2], x[3], x[4], x[5]) :
n == 6 ? (x[1], x[2], x[3], x[4], x[5], x[6]) :
n == 7 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7]) :
n == 8 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8]) :
n == 9 ? (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]) :
(x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])
end
end
for n in ((0:20)...,30,40,50)
println("\nmaking tuples from a vector of $n elements:")
let x = rand(n)
@assert to_tuple1(x) === Tuple(x)
@assert to_tuple2(x) === Tuple(x)
@assert to_tuple3(x) === Tuple(x)
@assert to_tuple4(x) === Tuple(x)
@assert to_tuple5(x) === Tuple(x)
print(" Tuple(x): ")
@btime to_tuple1($x)
print(" (x, ...): ")
@btime to_tuple2($x)
print(" ntuple(i->x[i], length(x)): ")
@btime to_tuple3($x)
print(" to_tuple4(x): ")
@btime to_tuple4($x)
print(" to_tuple5(x): ")
@btime to_tuple5($x)
end
end
end # module
nothing
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 4886 | #
# utils-tests.jl -
#
# Test utility functions.
#
module TestingLazyAlgebraUtilities
using Random
using Test
using LazyAlgebra
@testset "Multipliers " begin
#
# Tests for `promote_multiplier`.
#
let promote_multiplier = LazyAlgebra.promote_multiplier,
types = (Float32, Float16, BigFloat, Float64, ComplexF32, ComplexF64),
perms = randperm(length(types)), # prevent compilation-time optimization
n = length(types)
# The ≡ (or ===) operator is too restrictive, let's define our own
# method to compare multipliers: they must have the same value and the
# same type to be considered as identical.
identical(a::T, b::T) where {T} = (a == b)
identical(a, b) = false
for i in 1:n
T1 = types[perms[i]]
T2 = types[perms[mod(i, n) + 1]]
T3 = types[perms[mod(i+1, n) + 1]]
T4 = types[perms[mod(i+2, n) + 1]]
A1 = zeros(T1, 1)
A2 = zeros(T2, 2)
A3 = zeros(T3, 3)
A4 = zeros(T4, 4)
for λ in (1, π, 2 - 1im)
# Check with type arguments.
@test identical(promote_multiplier(λ, T1),
convert(isa(λ, Complex) ?
Complex{real(T1)} :
real(T1), λ))
@test identical(promote_multiplier(λ,T1,T2),
convert(isa(λ, Complex) ?
Complex{real(promote_type(T1,T2))} :
real(promote_type(T1,T2)), λ))
@test identical(promote_multiplier(λ,T1,T2,T3),
convert(isa(λ, Complex) ?
Complex{real(promote_type(T1,T2,T3))} :
real(promote_type(T1,T2,T3)), λ))
@test identical(promote_multiplier(λ,T1,T2,T3,T4),
convert(isa(λ, Complex) ?
Complex{real(promote_type(T1,T2,T3,T4))} :
real(promote_type(T1,T2,T3,T4)), λ))
# Check with array arguments.
@test identical(promote_multiplier(λ, A1),
convert(isa(λ, Complex) ?
Complex{real(T1)} :
real(T1), λ))
@test identical(promote_multiplier(λ, A1, A2),
convert(isa(λ, Complex) ?
Complex{real(promote_type(T1,T2))} :
real(promote_type(T1,T2)), λ))
@test identical(promote_multiplier(λ, A1, A2, A3),
convert(isa(λ, Complex) ?
Complex{real(promote_type(T1,T2,T3))} :
real(promote_type(T1,T2,T3)), λ))
@test identical(promote_multiplier(λ, A1, A2, A3, A4),
convert(isa(λ, Complex) ?
Complex{real(promote_type(T1,T2,T3,T4))} :
real(promote_type(T1,T2,T3,T4)), λ))
end
end
for T in (AbstractFloat, Real, Complex, Integer, Number, Unsigned)[randperm(6)]
@test_throws ErrorException promote_multiplier(1, T)
end
end
end # testset
@testset "Miscellaneous" begin
#
# Tests for `to_tuple`.
#
let to_tuple = LazyAlgebra.to_tuple, x = randn(5)
@test to_tuple(x) === (x...,)
@test to_tuple(to_tuple(x)) === (x...,)
end
end # testset
@testset "Messages " begin
#
# Tests of message, etc.
#
let bad_argument = LazyAlgebra.bad_argument,
bad_size = LazyAlgebra.bad_size,
arguments_have_incompatible_axes = LazyAlgebra.arguments_have_incompatible_axes,
operands_have_incompatible_axes = LazyAlgebra.operands_have_incompatible_axes,
message = LazyAlgebra.message,
warn = LazyAlgebra.warn,
siz = (3,4,5)
@test_throws ArgumentError bad_argument("argument must be nonnegative")
@test_throws ArgumentError bad_argument("invalid size ", siz)
@test_throws DimensionMismatch arguments_have_incompatible_axes()
@test_throws DimensionMismatch operands_have_incompatible_axes()
@test_throws DimensionMismatch bad_size("invalid size")
@test_throws DimensionMismatch bad_size("invalid size ", siz)
message("Info:", "array size ", siz; color=:magenta)
message(stdout, "Info:", "array size ", siz; color=:yellow)
warn("array size ", siz)
warn(stdout, "Info:", "array size ", siz)
end
end # testset
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 3816 | # Benchmarks for vectorized operations.
module LazyAlgebraVectorBenchmarks
using BenchmarkTools
using LazyAlgebra
const Vn = LazyAlgebra
#------------------------------------------------------------------------------
module V1
using ArrayTools
using LazyAlgebra:
Floats, vscale!, vzero!,
promote_multiplier, arguments_have_incompatible_axes
function vupdate!(y::AbstractArray{<:Floats,N},
α::Number,
x::AbstractArray{<:Floats,N}) where {N}
I = all_indices(x, y)
if α == 1
@inbounds @simd for i in I
y[i] += x[i]
end
elseif α == -1
@inbounds @simd for i in I
y[i] -= x[i]
end
elseif α != 0
alpha = promote_multiplier(α, x)
@inbounds @simd for i in I
y[i] += alpha*x[i]
end
end
return y
end
function vcombine!(dst::AbstractArray{<:Floats,N},
α::Number,
x::AbstractArray{<:Floats,N},
β::Number,
y::AbstractArray{<:Floats,N}) where {N}
if α == 0
axes(x) == axes(dst) || arguments_have_incompatible_axes()
vscale!(dst, β, y)
elseif β == 0
axes(y) == axes(dst) || arguments_have_incompatible_axes()
vscale!(dst, α, x)
else
I = all_indices(dst, x, y)
if α == 1
if β == 1
@inbounds @simd for i in I
dst[i] = x[i] + y[i]
end
elseif β == -1
@inbounds @simd for i in I
dst[i] = x[i] - y[i]
end
else
beta = promote_multiplier(β, y)
@inbounds @simd for i in I
dst[i] = x[i] + beta*y[i]
end
end
elseif α == -1
if β == 1
@inbounds @simd for i in I
dst[i] = y[i] - x[i]
end
elseif β == -1
@inbounds @simd for i in I
dst[i] = -x[i] - y[i]
end
else
beta = promote_multiplier(β, y)
@inbounds @simd for i in I
dst[i] = beta*y[i] - x[i]
end
end
else
alpha = promote_multiplier(α, x)
if β == 1
@inbounds @simd for i in I
dst[i] = alpha*x[i] + y[i]
end
elseif β == -1
@inbounds @simd for i in I
dst[i] = alpha*x[i] - y[i]
end
else
beta = promote_multiplier(β, y)
@inbounds @simd for i in I
dst[i] = alpha*x[i] + beta*y[i]
end
end
end
end
return dst
end
end # module V1
#------------------------------------------------------------------------------
n = 10_000
T = Float32
x = rand(T,n)
y = rand(T,n)
z1 = similar(x)
zn = similar(x)
alphas = (0,1,-1,-4.0)
betas = (0,1,-1,+2.0)
v1_vupdate!(dst, y, alpha, x) = V1.vupdate!(vcopy!(dst, y), alpha, x)
vn_vupdate!(dst, y, alpha, x) = LazyAlgebra.vupdate!(vcopy!(dst, y), alpha, x)
for a in alphas
println("\nvupdate!(y, α=$a, x):")
print(" v1: ")
@btime v1_vupdate!($z1,$y,$a,$x);
print(" new: ")
@btime vn_vupdate!($zn,$y,$a,$x);
dz = vnorm2(z1 - zn)
printstyled(" -> ‖z1 - zn‖ = $dz\n"; color=(dz==0 ? :green : :red))
end
for a in alphas, b in betas
println("\nvcombine!(z, α=$a, x, β=$b, y):")
print(" v1: ")
@btime V1.vcombine!($z1,$a,$x,$b,$y);
print(" new: ")
@btime LazyAlgebra.vcombine!($zn,$a,$x,$b,$y);
dz = vnorm2(z1 - zn)
printstyled(" -> ‖z1 - zn‖ = $dz\n"; color=(dz==0 ? :green : :red))
end
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | code | 6271 | #
# vect-tests.jl -
#
# Tests for vectorized operations.
#
module TestingLazyAlgebraVectorized
using LazyAlgebra
using Test
lpad_with_parentheses(n::Integer, x) = lpad(string("(", x, ")"), n)
lpad_with_parentheses(n::Integer, x, y) = lpad(string("(", x, ",", y, ")"), n)
distance(a::Real, b::Real) = abs(a - b)
distance(a::NTuple{2,Real}, b::NTuple{2,Real}) =
hypot(a[1] - b[1], a[2] - b[2])
distance(A::AbstractArray{Ta,N}, B::AbstractArray{Tb,N}) where {Ta,Tb,N} =
maximum(abs.(A - B))
makeselection(n::Integer) = begin
sel = Array{Int}(undef, 0)
j = [2,3,5]
k = 1
while k ≤ n
push!(sel, k)
k += rand(j)
end
return sel
end
alphas = (0, 1, -1, 2.71, π)
betas = (0, 1, -1, -1.33, Base.MathConstants.φ)
types = (Float32, Float64)
dims = (3,4,5)
@testset "vnorm $(lpad_with_parentheses(34,T))" for T in types
S = (T == Float32 ? Float64 : Float32)
v = randn(T, dims)
@test vnorminf(v) == maximum(abs.(v))
@test vnorminf(S, v) == S(maximum(abs.(v)))
@test vnorm1(v) ≈ sum(abs.(v))
@test vnorm2(v) ≈ sqrt(sum(v.*v))
z = complex.(randn(T, dims), randn(T, dims))
@test vnorminf(z) ≈ maximum(abs.(z))
@test vnorm1(z) ≈ sum(abs.(real.(z)) + abs.(imag.(z)))
@test vnorm2(z) ≈ sqrt(sum(abs2.(z)))
end # testset
@testset "vcopy, vswap $(lpad_with_parentheses(27,T))" for T in types
u = randn(T, dims)
uc = vcopy(u)
@test distance(u, uc) == 0
v = randn(T, dims)
vc = vcopy!(vcreate(v), v)
@test distance(v, vc) == 0
vswap!(u, v)
@test distance(u, vc) == distance(v, uc) == 0
@test_throws DimensionMismatch vcopy!(Array{T}(undef, dims .+ 1), u)
end # testset
@testset "vfill $(lpad_with_parentheses(34,T))" for T in types
a = randn(T, dims)
@test distance(vfill!(a,0), zeros(T,dims)) == 0
a = randn(T, dims)
@test distance(vfill!(a,0), vzero!(a)) == 0
a = randn(T, dims)
@test distance(vfill!(a,1), ones(T,dims)) == 0
a = randn(T, dims)
@test distance(vfill!(a,π), fill!(similar(a), π)) == 0
ac = vcopy(a)
@test distance(vzeros(a), zeros(T,dims)) == 0
@test distance(a, ac) == 0
@test distance(vones(a), ones(T,dims)) == 0
@test distance(a, ac) == 0
end # testset
@testset "vscale " begin
for T in types
a = randn(T, dims)
b = vcreate(a)
for α in (0, -1, 1, π, 2.71)
d = T(α)*a
@test distance(vscale(α,a), d) == 0
@test distance(vscale(a,α), d) == 0
@test distance(vscale!(b,α,a), d) == 0
@test distance(vscale!(b,a,α), d) == 0
c = vcopy(a)
@test distance(vscale!(c,α), d) == 0
vcopy!(c, a)
@test distance(vscale!(α,c), d) == 0
end
end
v = ones(dims)
@test_throws ErrorException vscale!(1,2)
@test_throws ErrorException vscale!(v,2,3)
@test_throws ErrorException vscale!(1,v,3)
@test_throws ErrorException vscale!(1,2,v)
@test_throws ErrorException vscale!(1,2,3)
for Ta in types, Tb in types
a = randn(Ta, dims)
ac = vcopy(a)
b = Array{Tb}(undef, dims)
e = max(eps(Ta), eps(Tb))
for α in alphas
d = α*a
@test distance(vscale!(b,α,a), d) ≤ 8e
@test distance(vscale(α,a), d) ≤ 8e
@test distance(a, ac) == 0
end
end
end # testset
@testset "vupdate $(lpad_with_parentheses(32,Ta,Tb))" for Ta in types, Tb in types
Tmin = sizeof(Ta) ≤ sizeof(Tb) ? Ta : Tb
Tmax = sizeof(Ta) ≥ sizeof(Tb) ? Ta : Tb
a = randn(Ta, dims)
b = randn(Tb, dims)
sel = makeselection(length(a))
atol, rtol = zero(Tmin), sqrt(eps(Tmin))
for α in alphas
@test vupdate!(vcopy(a),α,b) ≈
a + Tmax(α)*b atol=atol rtol=rtol norm=vnorm2
c = vcopy(a)
c[sel] .+= Tmax(α)*b[sel]
@test vupdate!(vcopy(a),sel,α,b) ≈ c atol=atol rtol=rtol norm=vnorm2
end
end # testset
@testset "vproduct $(lpad_with_parentheses(31,Ta,Tb))" for Ta in types, Tb in types
a = randn(Ta, dims)
b = randn(Tb, dims)
sel = makeselection(length(a))
c = vcreate(a)
e = max(eps(Ta), eps(Tb))
@test distance(vproduct!(c,a,b), (a .* b)) ≤ 2e
c = randn(Tb, dims)
d = vcopy(c)
d[sel] = a[sel] .* b[sel]
@test distance(vproduct!(c,sel,a,b), d) ≤ 2e
end # testset
@testset "vcombine $(lpad_with_parentheses(31,T))" for T in types
a = randn(T, dims)
b = randn(T, dims)
d = vcreate(a)
for α in alphas,
β in betas
@test distance(vcombine!(d,α,a,β,b), (T(α)*a + T(β)*b)) == 0
end
end # testset
@testset "vdot $(lpad_with_parentheses(35,Ta,Tb))" for Ta in types, Tb in types
Tmin = sizeof(Ta) ≤ sizeof(Tb) ? Ta : Tb
Tmax = sizeof(Ta) ≥ sizeof(Tb) ? Ta : Tb
a = randn(Ta, dims)
b = randn(Tb, dims)
sel = makeselection(length(a))
w = zeros(Tmax, dims)
w[sel] = rand(length(sel))
# check implementation for real-valued vectors
@test vdot(a,b) ≈ sum(a.*b)
@test vdot(w,a,b) ≈ sum(w.*a.*b)
@test vdot(sel,a,b) ≈ sum(a[sel].*b[sel])
# check ⟨a,b⟩ = ⟨b,a⟩ for real-valued vectors
@test vdot(a,b) == vdot(b,a)
@test vdot(w,a,b) ≈ vdot(w,b,a)
@test vdot(sel,a,b) == vdot(sel,b,a)
@test sqrt(vdot(a,a)) ≈ vnorm2(a)
end # testset
@testset "vdot (Complex{$Ta},Complex{$Tb})" for Ta in types, Tb in types
Tmin = sizeof(Ta) ≤ sizeof(Tb) ? Ta : Tb
Tmax = sizeof(Ta) ≥ sizeof(Tb) ? Ta : Tb
a = complex.(randn(Ta, dims), randn(Ta, dims))
b = complex.(randn(Tb, dims), randn(Tb, dims))
sel = makeselection(length(a))
w = zeros(Tmax, dims)
w[sel] = rand(length(sel))
# check implementation for complex-valued vectors
@test vdot(a,b) ≈ sum(conj.(a).*b)
@test vdot(Tmax,a,b) ≈ real(sum(conj.(a).*b))
@test vdot(w,a,b) ≈ sum(w.*conj.(a).*b)
@test vdot(Tmax,w,a,b) ≈ real(sum(w.*conj.(a).*b))
@test vdot(sel,a,b) ≈ sum(conj.(a[sel]).*b[sel])
@test vdot(Tmax,sel,a,b) ≈ real(sum(conj.(a[sel]).*b[sel]))
# check ⟨a,b⟩ = conj(⟨b,a⟩) for complex-valued vectors
@test vdot(a,b) == conj(vdot(b,a))
@test vdot(w,a,b) ≈ conj(vdot(w,b,a))
@test vdot(sel,a,b) == conj(vdot(sel,b,a))
end # testset
nothing
end # module
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 10671 | # Visible changes in LazyAlgebra
## Wish list for future developments
* Simplifications that are automatically done by`LazyAlgebra` may change
multipliers but must not change the coefficients of the mappings. Call
`simplify(A)` to apply further simplifications that may change the
coefficients of the mappings in `A`. For instance, assuming `a` is an array,
`inv(Diag(a))` automatically yields `Inverse(Diag(a))` while
`simplify(inv(Diag(a)))` yields `Diag(1 ./ a)`.
* Calling BLAS should be avoided in some cases, either because BLAS is slower
than optimized Julia code, or because BLAS may use more than one thread in
inappropriate places (e.g., Julia multi-threaded code).
* As far as possible, make the code more agnostic of the element type of the
arguments. This would be useful to deal with arrays whose elements have
non-standard numerical types as physical quantities in the `Unitful` package.
## Version 0.2.3
## Branch 0.2
### Version 0.2.5
* Make `set_val!` for sparse operators returns the same result as `setindex!`.
### Version 0.2.3
* Simplify and generalize `vfill!` and `vzero!` to be able to work with
`Unitful` elements.
* Automatically specialize `multiplier_type` for `Unitful.AbstractQuantity`.
### Version 0.2.2
* Improve `promote_multiplier` and make it easy to extend. The work done by
`promote_multiplier` is break in several functions: `multiplier_type(x)`
yields the *element type* corresponding to `x` (which can be a number, an
array of numbers, or a number type), `multiplier_floatingpoint_type(args...)`
combines the types given by `multiplier_type` for all `args...` to yield a
concrete floating-point type. The method `multiplier_type` is intended to be
extended by other packages.
### Version 0.2.1
* Replace `@assert` by `@certify`. Compared to `@assert`, the assertion made by
`@certify` may never be disabled whatever the optimization level.
* Provide default `vcreate` method for Gram operators.
### Version 0.2.0
* Sub-module `LazyAlgebra.Foundations` (previously
`LazyAlgebra.LazyAlgebraLowLevel`) exports types and methods needed to extend
or implement `LazyAlgebra` mappings.
* The finite difference operator was too limited (finite differences were
forcibly computed along all dimensions and only 1st order derivatives were
implemented) and slow (because the leading dimension was used to store the
finite differences along each dimension). The new family of operators can
compute 1st or 2nd derivatives along all or given dimensions. The last
dimension of the result is used to store finite differences along each chosen
dimensions; the operators are much faster (at least 3 times faster for
200×200 arrays for instance). Applying the Gram composition `D'*D` of a
finite difference operator `D` is optimized and is about 2 times faster than
applying `D` and then `D'`. Type `SimpleFiniteDifferences` is no longer
available, use `Diff` instead (`Diff` was available as a shortcut in previous
releases).
## Branch 0.1
### Version 0.1.0
* New rules: `α/A -> α*inv(A)`.
* Exported methods and types have been limited to the ones for the end-user.
Use `using LazyAlgebra.LazyAlgebraLowLevel` to use low-level symbols.
* Large sub-package for sparse operators which are linear mappings with few
non-zero coefficients (see doc. for `SparseOperator` and
`CompressedSparseOperator`). All common compressed sparse storage formats
(COO, CSC and CSR) are supported and easy conversion between them is
provided. Generalized matrix-vector multiplication is implemented and is as
fast or significantly faster than with `SparseArrays.SparseMatrixCSC`.
* Method `∇(A,x)` yields the Jacobian of the mapping `A` at the variables `x`.
If `A` is a linear-mapping, then `∇(A,x)` yields `A` whatever `x`. The new
type `Jacobian` type is used to denote the Jacobian of a non-linear mapping.
The notation `A'`, which is strictly equivalent to `adjoint(A)`, is only
allowed for linear mappings and always denote the adjoint (conjugate
transpose) of `A`.
* Method `gram(A)` yields `A'*A` for the linear mapping `A`. An associated
*decorated type* `Gram` is used to denote this specific expression and some
constructions are automatically recognized as valid Gram operators. Making
this work for more complex constructions (like sums and compositions) would
require to change the simplification rules (notably for the adjoint of such
constructions).
* Methods `has_oneto_axes`, `densearray`, `densevector` and `densematrix` have
been replaced by `has_standard_indexing` and `to_flat_array` from
`ArrayTools`.
* The exported constant `I = Identity()` has been renamed as `Id` to avoid
conflicts with standard `LinearAlgebra` package. `Id` is systematically
exported while `I` was only exported if not already defined in the `Base`
module. The constant `LinearAlgebra.I` and, more generally, any instance of
`LinearAlgebra.UniformScaling` is recognized by LazyAlgebra in the sense that
they behave as the identity when combined with any LazyAlgebra mapping.
* `operand` and `operands` are deprecated in favor of `unveil` and `terms`
which are less confusing. The `terms` method behaves exactly like the former
`operands` method. Compared to `operand`, the `unveil` method has a better
defined behavior: for a *decorated* mapping (that is an instance of
`Adjoint`, `Inverse` or `InverseAdjoint`), it yields the embedded mapping;
for other LazyAlgebra mappings (including scaled ones), it returns its
argument; for an instance of `LinearAlgebra.UniformScaling`, it returns the
equivalent LazyAlgebra mapping (that is `λ⋅Id`). To get the mapping embedded
in a scaled mapping, call the `unscaled` method.
* `unscaled` is introduced as the counterpart of `multiplier` so that
`multiplier(A)*unscaled(A) === A` always holds. Previously it was wrongly
suggested to use `operand` (now `unveil`) for that but, then the strict
equality was only true for `A` being a scaled mapping. These methods also
work for instances of `LinearAlgebra.UniformScaling`.
* `NonuniformScalingOperator` deprecated in favor of `NonuniformScaling`.
* In most cases, complex-valued arrays and multipliers are supported.
* Argument `scratch` is no longer optional in low-level `vcreate`.
* Not so well defined `HalfHessian` and `Hessian` have been removed
(`HalfHessian` is somewhat equivalent to `Gram`).
* New `gram(A)` method which yields `A'*A` and alias `Gram{typeof(A)}` to
represent the type of this construction.
* The `CroppingOperators` sub-module has been renamed `Cropping`.
* Add cropping and zero-padding operators.
* Left multiplication by a scalar and left/right multiplication by a
non-uniform scaling (a.k.a. diagonal operator) is optimized for sparse and
non-uniform scaling operators.
* Provide `unpack!` method to unpack the non-zero coefficients of a sparse
operator and extend `reshape` to be applicable to a sparse operator.
* Make constructor of a sparse operator (`SparseOperator`) reminiscent of the
`sparse` method. Row and column dimensions can be a single scalar.
* Provide utility method `dimensions` which yields a dimension list out of its
arguments and associated union type `Dimensions`.
* A sparse operator (`SparseOperator`) can be converted to a regular array or
to a sparse matrix (`SparseMatrixCSC`) and reciprocally.
* Trait constructors now return trait instances (instead of type). This is more
*natural* in Julia and avoid having different method names.
* Skip bound checking when applying a `SparseOperator` (unless the operator
structure has been corrupted, checking the dimensions of the arguments is
sufficient to insure that indices are correct).
* Provide `lgemv` and `lgemv!` for *Lazily Generalized Matrix-Vector
multiplication* and `lgemm` and `lgemm!` for *Lazily Generalized
Matrix-Matrix multiplication*. The names of these methods are reminiscent of
`xGEMV` and `xGEMM` BLAS subroutines in LAPACK (with `x` the prefix
corresponding to the type of the arguments).
* Deprecated `fastrange` is replaced by `allindices` which is extended to
scalar dimension and index intervals.
* Complete rewrite of the rules for simplifying complex constructions involving
compositions and linear combination of mappings.
* Add rule for left-division by a scalar.
* `UniformScalingOperator` has been suppressed (was deprecated).
* `show` has been extend for mapping constructions.
* `contents`, too vague, has been suppressed and replaced by `operands` or
`operand`. Accessor `multiplier` is provided to query the multiplier of a
scaled mapping. Methods `getindex`, `first` and `last` are extended. In
principle, direct reference to a field of any base mapping structures is no
longer needed.
* The multiplier of a scaled mapping can now be any number although applying
linear combination of mappings is still limited to real-valued multipliers.
* Add `fftfreq`, `rfftdims`, `goodfftdim` and `goodfftdims` in
`LazyAlgebra.FFT` and re-export `fftshift` and `ifftshift` when `using
LazyAlgebra.FFT`.
* Add `is_same_mapping` to allow for automatic simplifications when building-up
sums and compositions.
* Optimal, an more general, management of temporaries is now done via the
`scratch` argument of the `vcreate` and `apply!` methods. `InPlaceType` trait
and `is_applicable_in_place` method have been removed.
* `promote_scalar` has been modified and renamed as `promote_multipler`.
* Provide `SimpleFiniteDifferences` operator.
* Provide `SparseOperator`.
* `LinearAlgebra.UniformScaling` can be combined with mappings in LazyAlgebra.
* `UniformScalingOperator` has been deprecated in favor of a `Scaled` version
of the identity.
* Compatible with Julia 0.6, 0.7 and 1.0.
* Provide (partial) support for complex-valued arrays.
* Traits replace abstract types such as `Endomorphism`, `SelfAdjointOperator`,
etc. Some operators may be endomorphisms or not. For instance the
complex-to-complex `FFTOperator` is an endomorphism while the real-to-complex
FFT is not. Another example: `NonuniformScaling` is self-adjoint if its
coefficients are reals, not if they are complexes. This also overcomes the
fact that multiple heritage is not possible in Julia.
* The `apply!` method has been rewritten to allow for optimized combination to
do `y = α*Op(A)⋅x + β*y` (as in LAPACK and optimized if scalars have values
0, ±1):
```julia
apply!(α::Real, Op::Type{<:Operations}, A::LinearMapping, x, β::Real, y)
apply!(β::Real, y, α::Real, Op::Type{<:Operations}, A::LinearMapping, x)
```
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 1862 | # Lazy algebra framework
[![Stable Doc.][doc-stable-img]][doc-stable-url]
[![Dev. Doc.][doc-dev-img]][doc-dev-url]
[![License][license-img]][license-url]
[![Build Status][github-ci-img]][github-ci-url]
[![Build Status][appveyor-img]][appveyor-url]
[![Coverage][codecov-img]][codecov-url]
**LazyAlgebra** is a [Julia](http://julialang.org/) package to generalize the
notion of matrices and vectors used in
[linear algebra](https://en.wikipedia.org/wiki/Linear_algebra).
This package is used in:
- [OptimPackNextGen](https://github.com/emmt/OptimPackNextGen.jl) which
provides optimization methods for large scale problems in pure Julia.
- [LinearInterpolators](https://github.com/emmt/LinearInterpolators.jl) which
provides methods for interpolating multi-dimensional arrays.
Similar Julia packages:
- [LinearMaps](https://github.com/Jutho/LinearMaps.jl)
- [LinearOperators](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl)
[doc-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[doc-stable-url]: https://emmt.github.io/LazyAlgebra.jl/stable
[doc-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[doc-dev-url]: https://emmt.github.io/LazyAlgebra.jl/dev
[license-url]: ./LICENSE.md
[license-img]: http://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat
[github-ci-img]: https://github.com/emmt/LazyAlgebra.jl/actions/workflows/CI.yml/badge.svg?branch=master
[github-ci-url]: https://github.com/emmt/LazyAlgebra.jl/actions/workflows/CI.yml?query=branch%3Amaster
[appveyor-img]: https://ci.appveyor.com/api/projects/status/github/emmt/LazyAlgebra.jl?branch=master
[appveyor-url]: https://ci.appveyor.com/project/emmt/LazyAlgebra-jl/branch/master
[codecov-img]: http://codecov.io/github/emmt/LazyAlgebra.jl/coverage.svg?branch=master
[codecov-url]: http://codecov.io/github/emmt/LazyAlgebra.jl?branch=master
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 6641 | * Define `const RealComplex{T<:Real} = Union{T,Complex{T}}` and use better
names for `Reals`, `Floats` and `Complexes`.
* In `using LinearAlgebra`, `norm(z,1)` is defined as the sum of the absolute
values of the elements of the complex-valued array `z` while `norm(z,Inf)` is
defined as the maximum absolute value of the elements of the complex-valued
array `z` .
* Fix doc. about the type argument for `vnorm2(x)`, etc.
* `promote_multiplier` should have 2 different behaviors: to allow for using
BLAS routines, all multipliers must be converted to complexes if array
arguments are complex-valued.
* Remove file `test/common.jl`.
* Rationalize exceptions and error messages.
* Separate simplifications (like `inv(A)*A -> Id`) and optimizations (like
multiplying two diagonal operators yields a single diagonal operator),
perhaps via methods `simplify(X)` and `optimize(X)` with `X` a construction
of mappings.
* Deprecate `is_same_mapping` in favor of `are_same_mappings` and
`is_same_mutable_object` in favor of `are_same_objects` which is more general.
```julia
@deprecate is_same_mapping are_same_mappings
are_same_objects(::Any, ::Any) = false # always false if types are different
are_same_objects(a::T, b::T) where T =
(T.mutable ? pointer_from_objref(a) === pointer_from_objref(b) : a === b)
```
The above implementation does exactly what does `===` so it is not needed.
Discuss extending `are_same_mappings` for any derived mapping type.
Method `are_same_mapping` should default to `===` but may be extended
for special cases like FFT operators. The documentation should explain when
`are_same_mapping` has to be extended.
* Optimize composition of cropping and zero-padding operators. The adjoint of
a cropping or zero-padding operator is the pseudo-inverse of the operator,
hence extend the `pinv` method. If input and ouput dimnesions are the same
(and offsets are all zeros), a cropping/zero-padding operator is the
identity.
* `vscale!` can call `rmul!`?
* Implement *preconditioned* conjugate gradient.
* Simplify left/right multiplication of a sparse/diagonal operator by a diagonal
operator. Same thing for sparse interpolator. Take care of scaling by
a multiplier (otherwise this makes little sense).
* Automatically simplify composition of diagonal operators.
* Do not make `A*x` equivalent to `A(x)` for non-linear mappings.
* Provide means to convert a sparse operator to a regular array or to a sparse
matrix and reciprocally. Use BLAS/LAPACK routines for sparse operators?
* Write an implementation of the L-BFGS operator and of the SR1 operator and
perhaps of other low-rank operators.
* Use more extensively BLAS subroutines. Fix usage of BLAX `dot` and `axpy`
routines for dense arrays (use flat arrays).
* SelfAdjoint should not be a trait? Perhaps better to extend `adjoint(A::T) =
A` when `T` is self-adjoint.
* Provide simplification rules for sums and compositions of diagonal operators
(which are also easy to invert).
* Add rules so that the composition of two scaled linear operators, say
`(αA)⋅(βB)`, automatically yields `((αβ)A)⋅B` when `A` is a linear mapping.
This cannot easily propagate to have, *e.g.* `(αA)⋅(βB)⋅(γC)` automatically
yields `((αβγ)A)⋅B⋅C` when `A` and `B` are linear mappings. Perhaps this
could be solved with some `simplify(...)` method to be applied to constructed
mappings. In fact, the solution is to have `(αA)⋅(βB)` automatically
represented (by the magic of the constructors chain) as a *scaled composition*
that is `(αβ)(A⋅B)` (that is pull the scale factor outside the expression).
Indeed, `(αA)⋅(βB)⋅(γC)` will then automatically becomes `(αβ)(A⋅B)⋅(γC)` and
then `(αβγ)(A⋅B⋅C)` with no additional efforts.
- `α*A` => `A` if `α = 1`
- `α*A` => `O` if `α = 0` with `O` the null mapping which is represented as
`0` times a mapping, here `A`. This is needed to know the result of
applying the null mapping. In other words, there is no *universal* neutral
element for the addition of mappings; whereas the identity `Id` is the
*universal* neutral element for the composition of mappings.
- `A*(β*B)` => `β*(A*B)` if `A` is a a linear mapping.
- `(α*A)*(β*B)` => `(α*β)*(A*B)` if `A` is a linear mapping.
- As a consequence of the above rules, `(α*A)*(β*B)*(γ*C)` =>
`(α*βγ*)*(A*B*C)` if `A` and `B` are linear mappings, and so on.
- `α\A` => `(1/α)*A`
- `A/(β*B)` => `β\(A/B)` if `A` is a linear mapping.
- `(α*A)/(β*B)` => `(α/β)*(A/B)` if `A` is a linear mapping.
- `A\(β*B)` => `β*(A\B)` if `A` is a linear mapping.
- `(α*A)\(β*B)` => `(β/α)*(A\B)` if `A` is a linear mapping.
- `(α*Id)*A` => `α*A` where `Id` is the identity.
- `A/A`, `A\A`, or `inv(A)*A` => `Id` for `A` *invertible* (this trait means
that `A` can be safely assumed invertible, possibilities: `Invertible`,
`NonInvertible` to trigger an error on attempt to invert,
`PossiblyInvertible` for mappings that may be invertible but not always and
for which it is too costly to check. For intance, checking for a uniform
scaling `(α*Id)` is trivial as it suffices to check whether `α` is
non-zero).
* Concrete implementation of mappings on arrays is not consistent for
complex valued arrays.
* Decide that, unless forbidden, `inv` is always possible (may be clash when
trying to apply). Or decide the opposite.
* Optimize `FiniteDifferences` for other multipliers.
* Make a demo like:
```julia
using LazyAlgebra
psf = read_image("psf.dat")
dat = read_image("data.dat")
wgt = read_image("weights.dat")
µ = 1e-3 # choose regularization level
.... # deal with sizes, zero-padding, or cropping etc.
F = FFTOperator(dat) # make a FFT operator to work with arrays similar to dat
# Build instrumental model H (convolution by the PSF)
H = F\Diag(F*ifftshift(psf))*F
W = Diag(wgt) # W is the precision matrix for independent noise
D = Diff() # D will be used for the regularization
A = H'*W*H + µ*D'*D # left hand-side matrix of the normal equations
b = H'*W*y # right hand-side vector of the normal equations
img = conjgrad(A, b) # solve the normal equations using linear conjugate gradients
save_image(img, "result.dat")
```
Notes: (1) `D'*D` is automatically simplified into a `HalfHessian`
construction whose application to a *vector*, say `x`, is faster than
`D'*(D*x))`. (2) The evaluation of `H'*W*H` automatically uses the least
temporary workspace(s).
* Replace `Coder` by using available meta-programming tools.
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 339 | # Home
This is the documentation of the LazyAlgebra package for Julia.
The sources are [here](https://github.com/emmt/LazyAlgebra.jl).
## Contents
```@contents
Pages = [
"install.md",
"introduction.md",
"vectors.md",
"sparse.md",
"mappings.md",
"simplifications.md",
"refs.md"]
```
## Index
```@index
```
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 1839 | # Installation
LazyAlgebra is not yet an [offical Julia package](https://pkg.julialang.org/)
but it is easy to install it from Julia as explained below. Note that
LazyAlgebra requires the [ArrayTools](https://github.com/emmt/ArrayTools.jl)
package.
## Using the package manager
At the [REPL of
Julia](https://docs.julialang.org/en/stable/manual/interacting-with-julia/),
hit the `]` key to switch to the package manager REPL (you should get a
`... pkg>` prompt) and type:
```julia
pkg> add https://github.com/emmt/ArrayTools.jl
pkg> add https://github.com/emmt/StructuredArrays.jl
pkg> add https://github.com/emmt/ZippedArrays.jl
pkg> add https://github.com/emmt/LazyAlgebra.jl
```
where `pkg>` represents the package manager prompt and `https` protocol has
been assumed; if `ssh` is more suitable for you, then type:
```julia
pkg> add [email protected]:emmt/ArrayTools.jl
pkg> add [email protected]:emmt/LazyAlgebra.jl
```
instead. To check whether the LazyAlgebra package works correctly, type:
```julia
pkg> test LazyAlgebra
```
Later, to update to the last version (and run tests), you can type:
```julia
pkg> update LazyAlgebra
pkg> build LazyAlgebra
pkg> test LazyAlgebra
```
If something goes wrong, it may be because you already have an old version of
LazyAlgebra. Uninstall LazyAlgebra as follows:
```julia
pkg> rm LazyAlgebra
pkg> gc
pkg> add https://github.com/emmt/LazyAlgebra.jl
```
before re-installing.
To revert to Julia's REPL, hit the `Backspace` key at the `... pkg>` prompt.
## Installation in scripts
To install LazyAlgebra in a Julia script, write:
```julia
if VERSION >= v"0.7.0-"; using Pkg; end
Pkg.add(PackageSpec(url="https://github.com/emmt/LazyAlgebra.jl", rev="master"));
```
or with `url="[email protected]:emmt/LazyAlgebra.jl"` if you want to use `ssh`.
This also works from the Julia REPL.
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 7111 | # Lazy algebra framework
**LazyAlgebra** is a [Julia](http://julialang.org/) package to generalize the
notion of matrices and vectors used in
[linear algebra](https://en.wikipedia.org/wiki/Linear_algebra).
Many numerical methods (*e.g.* in numerical optimization or digital signal
processing) involve essentially
[linear operations](https://en.wikipedia.org/wiki/Vector_space) on the
considered variables. LazyAlgebra provides a framework to implement these
kind of numerical methods independently of the specific type of the variables.
This is exploited in
[OptimPackNextGen](https://github.com/emmt/OptimPackNextGen.jl) package, an
attempt to provide most optimization algorithms of
[OptimPack](https://github.com/emmt/OptimPack) in pure Julia.
LazyAlgebra also provides a flexible and extensible framework for creating
complex mappings and linear mappings to operate on the variables.
A few concepts are central to LazyAlgebra:
* *vectors* represent the variables of interest and can be anything providing a
few methods are implemented for their specific type;
* *mappings* are any functions between such vectors;
* *linear mappings* (a.k.a. linear operators) behave linearly with respect to
their arguments.
There are several reasons to have special methods for basic vector operations
rather than relying on Julia linear algebra methods. First, the notion of
*vector* is different, in Julia a mono-dimensional array is a vector while,
here any object with embedded values can be assumed to be a vector providing a
subset of methods are specialized for this type of object. For instance,
LazyAlgebra provides such methods specialized for real-valued and
complex-valued (with real components) arrays of any dimensionality. Second,
the meaning of the methods may have to be different. For instance, only
real-valued functions can be minimized (or maximized) and for this task,
complex-valued variables can just be considered as real-valued variables (each
complex value being equivalent to a pair of reals).
## Mappings
LazyAlgebra features:
* flexible and extensible framework for creating complex mappings;
* *lazy* evaluation of the mappings;
* *lazy* assumptions when combining mappings;
* efficient memory allocation by avoiding temporaries.
### General mappings
A `Mapping` can be any function between two variables spaces. Using
Householder-like notation (that is upper case Latin letters denote *mappings*,
lower case Latin letters denote *variables*, and Greek letters denote
*scalars*), then:
* `A(x)`, `A*x` or `A⋅x` yields the result of applying the mapping `A` to `x`;
* `A\x` yields the result of applying the inverse of `A` to `x`;
Simple constructions are allowed for any kind of mappings and can be used to
create new instances of mappings which behave correctly. For instance:
* `B = α*A` (where `α` is a number) is a mapping which behaves as `A` times
`α`; that is `B(x)` yields the same result as `α*(A(x))`.
* `C = A + B + ...` is a mapping which behaves as the sum of the mappings `A`,
`B`, ...; that is `C(x)` yields the same result as `A(x) + B(x) + ...`.
* `C = A*B`, `C = A∘B` or `C = A⋅B` is a mapping which behaves as the
composition of the mappings `A` and `B`; that is `C⋅x` yields the same result
as `A(B(x))`. As for the sum of mappings, there may be an arbitrary number
of mappings in a composition; for example, if `D = A*B*C` then `D(x)` yields
the same result as `A(B(C(x)))`.
* `C = A\B` is a mapping such that `C(x)` yields the same result as
`inv(A)(B(x))`.
* `C = A/B` is a mapping such that `C(x)` yields the same result as
`A(inv(B)(x))`.
These constructions can be combined to build up more complex mappings. For
example:
* `D = A*(B + 3C)` is a mapping such that `D⋅x` yields the same result as
`A(B(x) + 3*C(x))`.
### Linear mappings
A `LinearMapping` can be any linear mapping between two spaces. This abstract
subtype of `Mapping` is introduced to extend the notion of *matrices* and
*vectors*. Assuming the type of `A` inherits from `LinearMapping`, then:
* for linear mappings `A` and `B`, `A⋅B` is the same as `A∘B` or `A*B` which
yields the composition of `A` and `B` whose effect is to apply `B` and then
`A`;
* `A'⋅x` and `A'*x` yields the result of applying the adjoint of the mapping
`A` to `x`;
* `A'\x` yields the result of applying the adjoint of the inverse of mapping
`A` to `x`.
* `B = A'` is a mapping such that `B⋅x` yields the same result as `A'⋅x`.
!!! note
Beware that, due to the priority of operators in Julia, `A*B(x)` is the
same as `A(B(x))` not `(A*B)(x)`.
## Automatic simplifications
An important feature of LazyAlgebra framework for mappings is that a *number
of simplifications are automatically made at contruction time*. For instance,
assuming `A` is a mapping:
```julia
B = A'
C = B'
```
yields `C` which is just a reference to `A`. In other words,
`adjoint(adjoint(A)) -> A` holds. Likely
```julia
D = inv(A)
E = inv(D)
```
yields `E` which is another reference to `A`. In other words,
`inv(inv(A)) -> A` holds assuming by default that `A` is invertible. This
follows the principles of laziness. It is however, possible to prevent this by
extending the `Base.inv` method so as to throw an exception when applied to the
specific type of `A`:
```julia
Base.inv(::SomeNonInvertibleMapping) = error("non-invertible mapping")
```
where `SomeNonInvertibleMapping <: Mapping` is the type of `A`.
Other example of simplifications:
```julia
B = 3A
C = 7B'
```
where mappings `B` and `C` are such that `B*x -> 3*(A*x)` and `C*x -> 21*(A*x)`
for any *vector* `x`. That is `C*x` is evaluated as `21*(A*x)` not as
`7*(3*(A*x))` thanks to simplifications occurring while the mapping `C` is
constructed.
Using the `->` to denote in the right-hand side the actual construction made by
LazyAlgebra for the expression in the left-hand side and assuming `A`, `B` and
`C` are linear mappings, the following simplications will occur:
```julia
(A + C + B + 3C)' -> A' + B' + 4C'
(A*B*3C)' -> 3C'*B'*A'
inv(A*B*3C) -> 3\inv(C)*inv(B)*inv(A)
```
However, if `M` is a non-linear mapping, then:
```julia
inv(A*B*3M) -> inv(M)*(3\inv(B))*inv(A)
```
which can be compared to `inv(A*B*3C)` when all operands are linear mappings.
!!! note
Due to the associative rules applied by Julia, parentheses are needed
around constructions like `3*C` if it has to be interpreted as `3C` in
all contexes. Otherwise, `A*B*(3*C)` is equivalent to `A*B*3C` while
`A*B*3*C` is interpreted as `((A*B)*3)*C`; that is, compose `A` and `B`,
apply `A*B` to `3` and right multiply the result by `C`.
## Creating new mappings
LazyAlgebra provides a number of simple mappings. Creating new primitive
mapping types (not by combining existing mappings as explained above) which
benefit from the LazyAlgebra framework is as simple as declaring a new
mapping subtype of `Mapping` (or one of its abstract subtypes) and extending
two methods `vcreate` and `apply!` specialized for the new mapping type. For
mode details, see [here](mappings.md).
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 8474 | # Methods for mappings
`LazyAlgebra` provides a number of mappings and linear operators. To create
new primitive mapping types (not by combining existing mappings) and benefit
from the `LazyAlgebra` infrastruture, you have to:
* Create a new type derived from `Mapping` or one of its abstract sub-types
such as `LinearMapping`.
* Implement at least two methods `apply!` and `vcreate` specialized for the new
mapping type. Applying the mapping is done by the former method. The latter
method is called to create a new output variable suitable to store the result
of applying the mapping (or one of its variants) to some input variable.
* Optionally specialize method `identical` for two arguments of the new
mapping type.
## The `vcreate` method
The signature of the `vcreate` method to be implemented by specific mapping
types is:
```julia
vcreate(::Type{P}, A::Ta, x::Tx, scratch::Bool) -> y
```
where `A` is the mapping, `x` its argument and `P` is one of `Direct`,
`Adjoint`, `Inverse` and/or `InverseAdjoint` (or equivalently `AdjointInverse`)
and indicates how `A` is to be applied:
* `Direct` to apply `A` to `x`, *e.g.* to compute `A⋅x`;
* `Adjoint` to apply the adjoint of `A` to `x`, *e.g.* to compute `A'⋅x`;
* `Inverse` to apply the inverse of `A` to `x`, *e.g.* to compute `A\x`;
* `InverseAdjoint` or `AdjointInverse` to apply the inverse of `A'` to `x`,
*e.g.* to compute `A'\x`.
The result returned by `vcreate` is a new output variables suitable to store
the result of applying the mapping `A` (or one of its variants as indicated by
`P`) to the input variables `x`.
The `scratch` argument is a boolean to let the caller indicate whether the
input variable `x` may be re-used to store the result. If `scratch` is `true`
and if that make sense, the value returned by `vcreate` may be `x`. Calling
`vcreate` with `scratch=true` can be used to limit the allocation of resources
when possible. Having `scratch=true` is only indicative and a specific
implementation of `vcreate` may legitimately always assume `scratch=false` and
return a new variable whatever the value of this argument (e.g. because
applying the considered mapping *in-place* is not possible or because the
considered mapping is not an endomorphism). Of course, the opposite behavior
(i.e., assuming that `scratch=true` while the method was called with
`scratch=false`) is forbidden.
The result returned by `vcreate` should be of predictible type to ensure
*type-stability*. Checking the validity (*e.g.* the size) of argument `x` in
`vcreate` may be skipped because this argument will be eventually checked by
the `apply!` method.
## The `apply!` method
The signature of the `apply!` method to be implemented by specific mapping
types is:
```julia
apply!(α::Number, ::Type{P}, A::Ta, x::Tx, scratch::Bool, β::Number, y::Ty) -> y
```
This method shall overwrites the contents of output variables `y` with the
result of `α*P(A)⋅x + β*y` where `P` is one of `Direct`, `Adjoint`, `Inverse`
and/or `InverseAdjoint` (or equivalently `AdjointInverse`) and shall return
`y`. The convention is that the prior contents of `y` is not used at all if `β
= 0` so the contents of `y` does not need to be initialized in that case.
Not all operations `P` must be implemented, only the supported ones. For
iterative resolution of (inverse) problems, it is generally needed to implement
at least the `Direct` and `Adjoint` operations for linear operators. However
nonlinear mappings are not supposed to implement the `Adjoint` and derived
operations.
Argument `scratch` is a boolean to let the caller indicate whether the contents
of the input variable `x` may be overwritten during the operations. If
`scratch=false`, the `apply!` method shall not modify the contents of `x`.
## The `identical` method
The method `identical(A,B)` yields whether `A` and `B` are the same mappings in
the sense that their effects will **always** be the same. This method is used
to perform some simplifications and optimizations and may have to be
specialized for specific mapping types. The default implementation is to
return `A === B`.
The returned result may be true although `A` and `B` are not necessarily the
same object. In the below example, if `A` and `B` are two sparse matrices
whose coefficients and indices are stored in the same vectors (as can be tested
with the `===` operator) this method should return `true` because the two
operators will behave identically (any changes in the coefficients or indices
of `A` will be reflected in `B`). If any of the vectors storing the
coefficients or the indices are not the same objects, then `identical(A,B)`
must return `false` even though the stored values may be the same because it is
possible, later, to change one operator without affecting identically the
other.
## Example
The following example implements a simple sparse linear operator which is able
to operate on multi-dimensional arrays (the so-called *variables*):
```julia
# Use LazyAlgebra framework and import methods that need to be extended.
using LazyAlgebra
import LazyAlgebra: vcreate, apply!, input_size, output_size
struct SparseOperator{T<:AbstractFloat,M,N} <: LinearMapping
outdims::NTuple{M,Int}
inpdims::NTuple{N,Int}
A::Vector{T}
I::Vector{Int}
J::Vector{Int}
end
input_size(S::SparseOperator) = S.inpdims
output_size(S::SparseOperator) = S.outdims
function vcreate(::Type{Direct}, S::SparseOperator{Ts,M,N},
x::DenseArray{Tx,N},
scratch::Bool) where {Ts<:Real,Tx<:Real,M,N}
@assert size(x) == input_size(S)
Ty = promote_type(Ts, Tx)
return Array{Ty}(undef, output_size(S))
end
function vcreate(::Type{Adjoint}, S::SparseOperator{Ts,M,N},
x::DenseArray{Tx,M},
scratch::Bool) where {Ts<:Real,Tx<:Real,M,N}
@assert size(x) == output_size(S)
Ty = promote_type(Ts, Tx)
return Array{Ty}(undef, input_size(S))
end
function apply!(α::Real,
::Type{Direct},
S::SparseOperator{Ts,M,N},
x::DenseArray{Tx,N},
scratch::Bool,
β::Real,
y::DenseArray{Ty,M}) where {Ts<:Real,Tx<:Real,Ty<:Real,M,N}
@assert size(x) == input_size(S)
@assert size(y) == output_size(S)
β == 1 || vscale!(y, β)
if α != 0
A, I, J = S.A, S.I, S.J
alpha = convert(promote_type(Ts,Tx,Ty), α)
@assert length(I) == length(J) == length(A)
for k in 1:length(A)
i, j = I[k], J[k]
y[i] += alpha*A[k]*x[j]
end
end
return y
end
function apply!(α::Real,
::Type{Adjoint},
S::SparseOperator{Ts,M,N},
x::DenseArray{Tx,M},
scratch::Bool,
β::Real,
y::DenseArray{Ty,N}) where {Ts<:Real,Tx<:Real,Ty<:Real,M,N}
@assert size(x) == output_size(S)
@assert size(y) == input_size(S)
β == 1 || vscale!(y, β)
if α != 0
A, I, J = S.A, S.I, S.J
alpha = convert(promote_type(Ts,Tx,Ty), α)
@assert length(I) == length(J) == length(A)
for k in 1:length(A)
i, j = I[k], J[k]
y[j] += alpha*A[k]*x[i]
end
end
return y
end
identical(A::T, B::T) where {T<:SparseOperator} =
(A.outdims == B.outdims && A.inpdims == B.inpdims &&
A.A === B.A && A.I === B.I && A.J === B.J)
```
Remarks:
- In our example, arrays are restricted to be *dense* so that linear indexing
is efficient. For the sake of clarity, the above code is intended to be
correct although there are many possible optimizations.
- If `α = 0` there is nothing to do except scale `y` by `β`.
- The call to `vscale!(β, y)` is to properly initialize `y`. Remember the
convention that the contents of `y` is not used at all if `β = 0` so `y`
does not need to be properly initialized in that case, it will simply be
zero-filled by the call to `vscale!`. The statements
```julia
β == 1 || vscale!(y, β)
```
are equivalent to:
```julia
if β != 1
vscale!(y, β)
end
```
which may be simplified to just calling `vscale!` unconditionally:
```julia
vscale!(y, β)
```
as `vscale!(y, β)` does nothing if `β = 1`.
- `@inbounds` could be used for the loops but this would require checking that
all indices are whithin the bounds. In this example, only `k` is guaranteed
to be valid, `i` and `j` have to be checked.
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 1136 | # Reference
The following provides detailled documentation about types and methods provided
by the `LazyAlgebra` package. This information is also available from
the REPL by typing `?` followed by the name of a method or a type.
## Methods for linear mappings
```@docs
nrows
ncols
row_size
col_size
```
## Sparse operators
### Types and compressed storage formats
```@docs
SparseOperator
CompressedSparseOperator
SparseOperatorCOO
SparseOperatorCSC
SparseOperatorCSR
```
### Methods
```@docs
LazyAlgebra.SparseOperators.unpack!
```
### Low-level interface
These methods are provided by `using LazyAlgebra.SparseMethods`.
```@docs
LazyAlgebra.SparseMethods.each_row
LazyAlgebra.SparseMethods.each_col
LazyAlgebra.SparseMethods.each_off
LazyAlgebra.SparseMethods.get_row
LazyAlgebra.SparseMethods.get_rows
LazyAlgebra.SparseMethods.get_col
LazyAlgebra.SparseMethods.get_cols
LazyAlgebra.SparseMethods.get_val
LazyAlgebra.SparseMethods.get_vals
LazyAlgebra.SparseMethods.set_val!
LazyAlgebra.SparseMethods.get_offs
LazyAlgebra.SparseMethods.copy_rows
LazyAlgebra.SparseMethods.copy_cols
LazyAlgebra.SparseMethods.copy_vals
```
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 7639 | # Simplification and optimization of combinations of mappings
LazyAlgebra provides mappings and structures to store arbitrarily complex
combinations of mappings. When constructing such combinations, a number of
simplifications are automatically performed. These simplifications follow a
number of rules which are explained below.
## Rationale for simplification rules
A distinction must be made between:
- *basic* mappings which are the simplest mappings and are the building blocks
of more complex constructions,
- *scaled* or *decorated* mappings (see [`LazyAlgebra.DecoratedMapping`](@ref))
which are simple structures wrapped around a mapping and which are almost
costless to build,
- *combinations* of mappings which are sums or compositions of mappings.
As general guidelines, simplification that are automatically performed at
construction time should:
- require as few computations as possible,
- avoid creating new mappings with mutable contents,
- be type stable if possible,
- preserve basic mappings embedded in more complex constructions to
make it more likely to realize that two mappings be identical by their types.
Imposing *type stability* is not a strict requirement as it would prevent a lot
of worthwile simplifications like:
- `λ*A` yields `A` if `λ = 1`, a scaled mapping otherwise,
- `A + B` yields `2*A` if `B ≡ A`, a sum of mappings otherwise;
There are some cases where the rules to follow are not obvious:
* Multiplying a sum of mappings, say `λ*(A + B + C)`, can be constructed as:
1. a sum of scaled mappings, `λ*A + λ*B + λ*C`;
2. a scaled sum.
* Taking the adjoint of a sum of mappings, say `(A + B + C)'`, can be:
1. simplified into a sum of adjoints, `A' + B' + C'`;
2. constructed as the adjoint of the sum.
* Taking the adjoint of a composition of mappings, say `(A*B*C)'`, can be:
1. simplified into a composition of adjoints, `C'*B'*A'`;
2. constructed as the adjoint of the composition.
* Taking the inverse of a composition of mappings, say `inv(A*B*C)'`, can be:
1. simplified into a composition of inverses, `inv(C)*inv(B)*inv(A)`;
2. constructed as the inverse of the composition.
For all these cases, the second solution requires that (i) the `apply` method
be specialized to be applicable to the resulting construction and, for
consistency, that (ii) the other possible expressions be automatically
recognized and constructed in the same way (for instance ``C'*B'*A'` should be
automatically simplified as `(A*B*C)'`.
In the case of the scaling of a sum, it is more efficient to use the second
form because is factorize the multiplication at the end of calculus.
On the one hand, the first solution is more simple to implement. On the other
hand, with the second solution, it is easier to write simplification rules that
apply automatically.
The curent rules in LazyAlgebra (see the next section) implement the second
solution for the multiplication of a sum by a scalar and the first solution in
all other case. This is expected to change in the future.
## Implemented simplification rules:
- Multipliers are factorized to the left as possible.
- Adjoint of a sum (or a composition) of terms is rewritten as the sum
(respectively composition) of the adjoint of the terms.
- Adjoint of a scaled mapping is rewritten as a scaled adjoint of the
mapping. Similarly, inverse of a scaled mapping is rewritten as a scaled
inverse of the mapping, if the mapping is linear, or as the inverse of the
mapping times a scaled identity otherwise.
- Adjoint of the inverse is rewritten as inverse of the adjoint.
- Inner constructors are fully qualified but check arguments. Un-qualified
outer constructors just call the inner constructors with the suitable
parameters.
- To simplify a sum, the terms corresponding to identical mappings (possibly
scaled) are first grouped to produce a single mapping (possibly scaled)
per group, the resulting terms are sorted (so that all equivalent
expressions yield the same result) and the "zeros" eliminated (if all
terms are "zero", the sum simplifies to the first one). For now, the
sorting is not absolutely perfect as it is based on `objectid()` hashing
method. The odds of having the same identifier for two different things
are however extremely low.
- To simplify a composition, a fusion algorithm is applied and "ones" are
eliminated. It is assumed that composition is non-commutative so the
ordering of terms is left unchanged. Thanks to this, simplification rules
for simple compositions (made of two non-composition mappings) can be
automatically performed by proper dispatching rules. Calling the fusion
algorithm is only needed for more complex compositions.
The simplication algorithm is not perfect (LazyAlgebra is not intended to be
for symbolic computations) but do a reasonnable job. In particular complex
mappings built using the same sequences should be simplified in the same way
and thus be correctly identified as being identical.
Since applying a construction of mappings will result in applying its
components, it can
```julia
+(A::Adjoint, B::Adjoint) = (A + B)'
*(A::Adjoint, B::Adjoint) = (B*A)'
*(A::Inverse, B::Inverse) = inv(B*A)
*(A::Inverse{T}, B::T) where {T<:Mapping} =
(identical(unveil(A), B) ? Id : Composition(A,B))
*(A::T, B::Inverse{T}) where {T<:Mapping} =
(identical(A, unveil(B)) ? Id : Composition(A,B))
\(A::Mapping, B::Mapping) = inv(A)*B
/(A::Mapping, B::Mapping) = A*inv(B)
adjoint(A::Adjoint) = unveil(A)
adjoint(A::AdjointInverse) = inv(A)
adjoint(A::Inverse) = AdjointInverse(A)
inv(A::Inverse) = unveil(A)
inv(A::Adjoint) = AdjointInverse(A)
inv(A::AdjointInverse) = A'
```
## Coding recommendations
To make the code easier to maintain and avoid incosistencies, there are a few
recommendations to follow. This is especially true for coding the
simplification rules that are automatically performed.
* Simplification rules are initiated by specializing for mapping arguments the
operators (addition, multiplication, adjoint, etc.) used in Julia expressions.
Hence simplifications are automatically performed when such expressions
appears in the code.
* More complex rules may require calling auxiliary helper functions like
`simplify`. But the entry point for a simplification is always a simple
expression so that the end user shall not have to call `simplify` directly.
* To avoid building non-simplified constructions, the `Adjoint`, `Inverse`,
`AdjointInverse`, `Scaled`, `Sum`, `Composition`, and `Gram` constructors
should not be directly called by a end user who should use expressions like
`A'` to construct the adjoint, `inv(A)` for the inverse, etc. To discourage
calling constructors for combining mappings, these constructors are not
exported by LazyAlgebra.
* Trust the optimizer and resist to the tendency of writing very specialized
rules to deal with complex cases in favor of writing more simpler and more
general rules that, applied together, yield the correct answer.
For instance, the following rules would be sufficient to implement the
right-multiplication and the right-division of a mapping by a scalar:
```julia
*(A::Mapping, α::Number) = (is_linear(A) ? α*A : A*(α*Id))
/(A::Mapping, α::Number) = A*inv(α)
```
Only the right-multiplication is in charge of deciding whether the operation
is commutative and these two methods returns their result as an expression to
delegate the construction of the result to the methods implementing the
left-multiplication of a mapping by a scalar, the composition of two
mappings, etc.
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 18502 | # Sparse operators
A sparse operator ([`SparseOperator`](@ref)) in `LazyAlgebra` is the
generalization of a sparse matrix. Like a [`GeneralMatrix`](@ref), rows and
columns may be multi-dimensional. However unlike a [`GeneralMatrix`](@ref), a
sparse operator only stores its *structural non-zero* entries and thus requires
fewer memory and is usually faster to apply.
There are many different possibilities for storing a sparse operator, hence
`SparseOperator{T,M,N}` is an abstract type inherited by the concrete types
implementing compressed sparse storage in various formats. Parameter `T` is
the type of the elements while parameters `M` and `N` are the number of
dimensions of the *rows* and of the *columns* respectively. Objects of this
kind are a generalization of sparse matrices in the sense that they implement
linear mappings which can be applied to `N`-dimensional arguments to produce
`M`-dimensional results (as explained below). The construction of a sparse
operator depends on its storage format. Several concrete implementations are
provided: [*Compressed Sparse Row* (CSR)](#Compressed-sparse-row-format),
[*Compressed Sparse Column* (CSC)](#Compressed-sparse-column-format) and
[*Compressed Sparse Coordinate* (COO)](#Compressed-sparse-coordinate-format).
## Basic methods
The following methods are generally applicable to any sparse operator `A`:
- `eltype(A)` yields `T`, the type of the elements of `A`;
- `row_size(A)` yields an `M`-tuple of `Int`: the size of the rows of `A`, this
is equivalent to `output_size(A)`;
- `col_size(A)` yields an `N`tuple of `Int`: the size of the columns of `A`,
this is equivalent to `input_size(A)`;
- `nrows(A)` yields `prod(row_size(A))`, the equivalent number of rows of `A`;
- `ncols(A)` yields `prod(col_size(A))`, the equivalent number of columns of
`A`.
- `ndims(A)` yields `M + N` the number of dimensions of the regular array
corresponding to the sparse operator `A`;
- `size(A)` yields `(row_size(A)..., col_size(A)...)` the size of the regular
array corresponding to the sparse operator `A`;
- `length(A)` yields `prod(size(A))` the number of elements of the regular
array corresponding to the sparse operator `A`;
- `nnz(A)` yields the number of *structural non-zeros* in `A`;
- `nonzeros(A)` yields the vector of *structural non-zeros* in `A`.
The *structural non-zeros* are the entries stored by the sparse structure, they
may or not be equal to zero, un-stored entries are always considered as being
equal to zero.
As can be seen above, `eltype`, `ndims`, `size` and `length` yield the same
results as if applied to the multi-dimensional array corresponding to the
sparse operator.
## Generalized matrix multplication by a sparse operator
A sparse operator `A` can be directly used as any linear mapping in
`LazyAlgebra`:
```julia
A*x
```
yields the generalized matrix multiplication of `x` by `A`. The size of `x`
must be that of the *columns* of `A`, that is `col_size(A)`. The result is an
array whose size is that of the *rows* of `A`, that is `size(A*x) =
row_size(A)`. Applying the adjoint of `A` is also implemented by the usual
syntax:
```julia
A'*y
```
to produce an array of size `col_size(A)` provided `y` is of suitable size,
i.e. `size(y) = row_size(A)`.
Sparse operators can be used as iterators, the value returned by the iterator
is a 3-tuple `(v,i,j)` with the value, the linear row index and the linear
column index of the entry. For instance:
```julia
for (v,i,j) in A
println("A[$i,$j] = $v")
end
```
This can be used to illustrate how `w = A*x` and `z = A'*y` could be computed
for the sparse operator `A`:
```julia
# Compute w = A*x:
@assert !Base.has_offset_axes(x)
@assert size(x) == col_size(A)
T = promote_type(eltype(A),eltype(x))
w = zeros(T, row_size(A))
@inbounds for (v,i,j) in A
w[i] += v*x[j]
end
```
and
```julia
# Compute z = A'*y:
@assert !Base.has_offset_axes(y)
@assert size(y) == row_size(A)
T = promote_type(eltype(A),eltype(y))
z = zeros(T, col_size(A))
@inbounds for (v,i,j) in A
z[j] += conj(v)*y[i]
end
```
Actual implementations of sparse operators in `LazyAlgebra` are equivalent to
the above examples but should be more efficient because they exploit the
specific storage format of a compressed sparse operator (see
[`CompressedSparseOperator`](@ref), [`SparseOperatorCSR`](@ref),
[`SparseOperatorCSC`](@ref) and [`SparseOperatorCOO`](@ref)).
!!! note
For now, row and column indices are restricted to be linear indices and
arguments to the linear mappings implemented by compressed sparse
operators must be arrays with conventional indexing (1-based linear
indices) and preferably implementing linear indices (not Cartesian ones).
## Simple construction of compressed sparse operators
Compressed sparse operators only store their structural non-zero elements. The
abstract super-type of these sparse operators is
`CompressedSparseOperator{F,T,M,N}` which is a direct sub-type of
`SparseOperator{T,M,N}` with an additional parameter `F` to specify the storage
format. The parameter `F` is specificed as a symbol and can be:
- `:COO` for *Compressed Sparse Coordinate* storage format. This format is not
the most efficient, it is mostly used as an intermediate for building a
sparse operator in one of the following format.
- `:CSC` for *Compressed Sparse Column* storage format. This format is very
efficient for applying the adjoint of the sparse operator.
- `:CSR` for *Compressed Sparse Row* storage format. This format is very
efficient for directly applying the sparse operator.
To construct a compressed sparse operator in a given format `F` from the values
in a 2-dimensional array `A` call:
```julia
CompressedSparseOperator{F}(A, sel = (v,i,j) -> (v != zero(v)))
```
where optional argument `sel` is a selector function which is called as
`sel(v,i,j)` with `v`, `i` and `j` the value, the row and the column linear
indices for each entries of `A` and which is assumed to yield `true` for the
entries of `A` to be selected in the sparse structure and `false` for the
entries of `A` to discard. The default selector is such that all non-zeros of
`A` are selected. As an example, to select the non-zeros of the lower
triangular part of `A`, the constructor can be called as:
```julia
CompressedSparseOperator{F}(A, (v,i,j) -> ((i ≥ j)&(v != zero(v))))
```
Note the (slight) optimization of the expression with a biwise AND `&` instead
of a short-circuiting logical AND `&&` to avoid branching.
By default the values of the structural non-zeros of the sparse operator have
the same type as the elements of `A`, you can enforce a specific element type
`T` with:
```julia
CompressedSparseOperator{F,T}(A[, sel])
```
To generalize the matrix-vector product, a sparse operator can emulate an array
with more than 2 dimensions. In that case, you must specify the number `M` of
leading dimensions that are considered as the *rows*:
```julia
CompressedSparseOperator{F,T,M}(A[, sel])
```
The number `N` of trailing dimensions that are considered as the *columns* can
also be specified (although they can be automatically guessed):
```julia
CompressedSparseOperator{F,T,M,N}(A[, sel])
```
with the restriction that `M ≥ 1`, `N ≥ 1` and `M + N = ndims(A)`. Note that
parameter `T` can be `Any` to keep the same element type as `A`.
Finally, the type `V` of the vector used to store the coefficients of the sparse
operator may also be specified:
```julia
CompressedSparseOperator{F,T,M,N,V}(A[, sel])
```
with the restriction that `V` must have standard linear indexing. The default
is to take `V = Vector{T}`. As a special case, you can choose a uniform
boolean vector from the
[`StructuredArrays`](https://github.com/emmt/StructuredArrays.jl) package to
store the sparse coefficients:
```julia
CompressedSparseOperator{F,T,M,N,UniformVector{Bool}}(A[, sel])
```
yields a compressed sparse operator whose values are an immutable uniform
vector of `true` values requiring no storage. This is useful if you want to
only store the sparse structure of the selected values, that is their indices
in the compressed format `F` not their values.
As explained in the last sections, compressed sparse operators can also be
consructed by providing the values of the structural non-zeros and their
respective row and column indices. As a general rule, to construct (or convert
to) a sparse operator with compressed storage format `F`, you can call:
```julia
CompressedSparseOperator{F}(args...; kwds...)
CompressedSparseOperator{F,T}(args...; kwds...)
CompressedSparseOperator{F,T,M}(args...; kwds...)
CompressedSparseOperator{F,T,M,N}(args...; kwds...)
CompressedSparseOperator{F,T,M,N,V}(args...; kwds...)
```
where given parameters `T`, `M`, `N` and `V`, arguments `args...`
and optional keywords `kwds...` will be passed to the concrete constructor
[`SparseOperatorCOO`](@ref), [`SparseOperatorCSC`](@ref) or
[`SparseOperatorCSR`](@ref) corresponding to the format `F`. For instance,
```julia
CompressedSparseOperator{:CSR}(A) -> SparseOperatorCSR(A)
```
## Accessing the structural non-zeros
It is possible to use a compressed sparse operator `A` as an iterator:
```julia
for (Aij,i,j) in A # simple but slow for CSR and CSC
...
end
```
to retrieve the values `Aij` and respective row `i` and column `j` indices for
all the entries stored in `A`. It is however more efficient to access them
according to their storage order which depends on the compressed format.
- If `A` is in CSC format:
```julia
using LazyAlgebra.SparseMethods
for j in each_col(A) # loop over column index
for k in each_off(A, j) # loop over structural non-zeros in this column
i = get_row(A, k) # get row index of entry
Aij = get_val(A, k) # get value of entry
end
end
```
- If `A` is in CSR format:
```julia
using LazyAlgebra.SparseMethods
for i in each_row(A) # loop over row index
for k in each_off(A, i) # loop over structural non-zeros in this row
j = get_col(A, k) # get column index of entry
Aij = get_val(A, k) # get value of entry
end
end
```
- If `A` is in COO format:
```julia
using LazyAlgebra.SparseMethods
for k in each_off(A)
i = get_row(A, k) # get row index of entry
j = get_col(A, k) # get column index of entry
Aij = get_val(A, k) # get value of entry
end
```
The low-level methods `each_row`, `each_col`, `each_off`, `get_row`, `get_col`
and `get_val` are not automatically exported by `LazyAlgebra`, this is the
purpose of the statement `using LazyAlgebra.SparseMethods`. These methods may
be extended to implement variants of compressed sparse operators.
## Sparse operators in COO format
Sparse operators in *Compressed Sparse Coordinate* (COO) format store the
significant entries in no particular order, as a vector of values, a vector of
linear row indices and a vector of linear column indices. It is even possible
to have repeated entries. This format is very useful to build a sparse
operator. It can be converted to a more efficient format like *Compressed
Sparse Column* or *Compressed Sparse Row* for fast application of the sparse
linear mapping or of its adjoint.
A sparse operator in with COO storage can be directly constructed by:
```julia
CompressedSparseOperator{:COO}(vals, rows, cols, rowsiz, colsiz)
```
which is the same as:
```julia
SparseOperatorCOO(vals, rows, cols, rowsiz, colsiz)
```
or, if you want to force the element type of the result, one of the following:
```julia
CompressedSparseOperator{:COO,T}(vals, rows, cols, rowsiz, colsiz)
SparseOperatorCOO{T}(vals, rows, cols, rowsiz, colsiz)
```
Here, `vals` is the vector of values of the sparse entries, `rows` and `cols`
are integer valued vectors with the linear row and column indices of the sparse
entries, `rowsiz` and `colsiz` are the sizes of the row and column dimensions.
The entries values and respective linear row and column indices of the `k`-th
sparse entry are given by `vals[k]`, `rows[k]` and `cols[k]`.
For efficiency reasons, sparse operators are currently limited to *fast* arrays
because they can be indexed linearly with no loss of performances. If `vals`,
`rows` and/or `cols` are not fast arrays, they will be automatically converted
to linearly indexed arrays.
## Sparse operators in CSC format
Sparse operators in *Compressed Sparse Column* (CSC) format store the
significant entries in a column-wise order, as a vector of values, a vector of
corresponding linear row indices and a vector of offsets indicating, for each
column, the range of indices in the vectors of values and of row indices.
A sparse operator in with CSC storage can be directly constructed by:
```julia
CompressedSparseOperator{:CSC}(vals, rows, offs, rowsiz, colsiz)
```
which is the same as:
```julia
SparseOperatorCSC(vals, rows, offs, rowsiz, colsiz)
```
or, if you want to force the element type of the result, one of the following:
```julia
CompressedSparseOperator{:CSC,T}(vals, rows, offs, rowsiz, colsiz)
SparseOperatorCSC{T}(vals, rows, offs, rowsiz, colsiz)
```
Here, `vals` is the vector of values of the sparse entries, `rows` is an
integer valued vector of the linear row indices of the sparse entries, `offs`
is a column-wise table of offsets in these arrays, `rowsiz` and `colsiz` are
the sizes of the row and column dimensions. The entries values and respective
linear row indices of the `j`-th column are given by `vals[k]` and `rows[k]`
with `k ∈ offs[j]+1:offs[j+1]`. The linear column index `j` is in the range
`1:n` where `n = prod(colsiz)` is the equivalent number of columns.
For efficiency reasons, sparse operators are currently limited to *fast* arrays
because they can be indexed linearly with no loss of performances. If `vals`,
`rows` and/or `offs` are not fast arrays, they will be automatically converted
to linearly indexed arrays.
## Sparse operators in CSR format
Sparse operators in *Compressed Sparse Row* (CSR) format store the significant
entries in a row-wise order, as a vector of values, a vector of corresponding
linear column indices and a vector of offsets indicating, for each row, the
range of indices in the vectors of values and of column indices.
A sparse operator in with CSR storage can be directly constructed by:
```julia
CompressedSparseOperator{:CSR}(vals, cols, offs, rowsiz, colsiz)
```
which is the same as:
```julia
SparseOperatorCSR(vals, cols, offs, rowsiz, colsiz)
```
or, if you want to force the element type of the result, one of the following:
```julia
CompressedSparseOperator{:CSR,T}(vals, cols, offs, rowsiz, colsiz)
SparseOperatorCSR{T}(vals, cols, offs, rowsiz, colsiz)
```
Here, `vals` is the vector of values of the sparse entries, `cols` is an
integer valued vector of the linear column indices of the sparse entries,
`offs` is a column-wise table of offsets in these arrays, `rowsiz` and `colsiz`
are the sizes of the row and column dimensions. The entries values and
respective linear column indices of the `i`-th row are given by `vals[k]` and
`cols[k]` with `k ∈ offs[j]+1:offs[j+1]`. The linear row index `i` is in the
range `1:m` where `m = prod(rowsiz)` is the equivalent number of rows.
For efficiency reasons, sparse operators are currently limited to *fast* arrays
because they can be indexed linearly with no loss of performances. If `vals`,
`cols` and/or `offs` are not fast arrays, they will be automatically converted
to linearly indexed arrays.
## Conversion
Calling a sparse operator constructor can also be used to convert between
different formats or to change the type of the stored values. For example, to
convert a sparse operator `A` into a Compressed Spase Row (CSR) format, the
following calls are equivalent:
```julia
SparseOperatorCSR(A)
CompressedSparseOperator{:CSR}(A)
convert(SparseOperatorCSR, A)
convert(CompressedSparseOperator{:CSR}, A)
```
If `A` is in Compressed Sparse Coordinate (COO) format, entries are sorted and
duplicates merged. This also occurs when converting from COO format to
Compressed Sparse Column (CSC) format. Such conversions are very useful as
building a sparse operator in COO format is easier while CSC and CSR formats
are more efficients.
It is sufficient to specify the element type `T` to convert the storage format
and the type of the stored values. For example, any of the following will
convert `A` to CSC format with element type `T`:
```julia
SparseOperatorCSC{T}(A)
CompressedSparseOperator{:CSC,T}(A)
convert(SparseOperatorCSC{T}, A)
convert(CompressedSparseOperator{:CSC,T}, A)
```
If you just want to convert the type of the values stored by the sparse
operator `A` to type `T` while keeping its storage format, any of the following
will do the job:
```julia
SparseOperator{T}(A)
CompressedSparseOperator{Any,T}(A)
convert(SparseOperator{T}, A)
convert(CompressedSparseOperator{Any,T}, A)
```
As can be seen, specifying `Any` for the format parameter in
`CompressedSparseOperator` is a mean to keep the same storage format.
## Other methods
A sparse operator `S` can be reshaped:
```julia
reshape(S, rowdims, coldims)
```
where `rowdims` and `coldims` are the new list of dimensions for the rows and
the columns, their product must be equal to the product of the former lists of
dimensions (which means that you cannot change the number of elements of the
input and output of a sparse operator). The reshaped sparse operator and `S`
share the arrays of non-zero coefficients and corresponding row and column
indices, hence reshaping is a fast operation.
The non-zero coefficients of a sparse operator `S` can be unpacked into
a provided array `A`:
```julia
unpack!(A, S; flatten=false) -> A
```
Keyword `flatten` specifies whether to only consider the length of `A` instead
of its dimensions. In any cases, `A` must have as many elements as `length(S)`
and standard linear indexing. Just call `Array(S)` to unpack the coefficients
of the sparse operator `S` without providing the destination array or
`Array{T}(S)` if you want to a specific element type `T`.
[LinearInterpolators]: https://github.com/emmt/LinearInterpolators.jl
[SparseArrays]: https://docs.julialang.org/en/latest/stdlib/SparseArrays/#Sparse-Arrays-1
[sparse]: https://docs.julialang.org/en/latest/stdlib/SparseArrays/#SparseArrays.sparse
[SparseMatrixCSC]: https://docs.julialang.org/en/latest/stdlib/SparseArrays/#SparseArrays.SparseMatrixCSC
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"MIT"
] | 0.2.7 | e58d5904fa7ffa914a3eb60f8705e2ea3aaea1b9 | docs | 5987 | # Methods for vectors
A *vector* is that which has the algebra of a vector space (Peano 1888, van der
Waerden 1931). See talk by Jiahao Chen:
[*Taking Vector Transposes Seriously*](https://www.youtube.com/watch?v=C2RO34b_oPM) at JuliaCon 2017.
## Vectorized methods
Most necessary operations on the variables of interest are linear operations.
Hence variables (whatever their specific type and size) are just called
*vectors* in `LazyAlgebra`. Numerical methods based on `LazyAlgebra`
manipulate the variables via a small number of vectorized methods:
* `vdot([T,][w,]x,y)` yields the inner product of `x` and `y`; that is, the sum
of `conj(x[i])*y[i]` or, if `w` is specified, the sum of
`w[i]*conj(x[i])*y[i]`, for all indices `i`. Optional argument `T` is the
type of the result; for real valued *vectors*, `T` is a floating-point type;
for complex valued *vectors*, `T` can be a complex type (with floating-point
parts) or a floating-point type to compute only the real part of the inner
product. `vdot([T,]sel,x,y)` yields the sum of `x[i]*y[i]` for all `i ∈ sel`
where `sel` is a selection of indices.
* `vnorm1([T,]x)` yields the L-1 norm of `x`, that is the sum of the absolute
values of the components of `x`. Optional argument `T` is the floating-point
type of the result.
* `vnorm2([T,]x)` yields the Euclidean (or L-2) norm of `x`, that is the square
root of sum of the squared values of the components of `x`. Optional
argument `T` is the floating-point type of the result.
* `vnorminf([T,]x)` L-∞ norm of `x`, that is the maximal absolute values of the
components of `x`. Optional argument `T` is the floating-point type of the
result
* `vcreate(x)` yields a new variable instance similar to `x`. If `x` is an
array, the element type of the result is a floating-point type.
* `vcopy!(dst,src)` copies the contents of `src` into `dst` and returns `dst`.
* `vcopy(x)` yields a fresh copy of the *vector* `x`.
* `vswap!(x,y)` exchanges the contents of `x` and `y` (which must have the same
type and size if they are arrays).
* `vfill!(x,α)` sets all elements of `x` with the scalar value `α` and return
`x`.
* `vzero!(x)`fills `x` with zeros and returns it.
* `vscale!(dst,α,src)` overwrites `dst` with `α*src` and returns `dst`. The
convention is that, if `α = 0`, then `dst` is filled with zeros whatever the
contents of `src`.
* `vscale!(x,α)` and `vscale!(α,x)` overwrite `x` with `α*x` and returns `x`.
The convention is that, if `α = 0`, then `x` is filled with zeros whatever
its prior contents.
* `vscale(α,x)` and `vscale(x,α)` yield a new *vector* whose elements are
those of `x` multiplied by the scalar `α`.
* `vproduct!(dst,[sel,]x,y)` overwrites `dst` with the elementwise
multiplication of `x` by `y`. Optional argument `sel` is a selection of
indices to consider.
* `vproduct(x,y)` yields the elementwise multiplication of `x` by `y`.
* `vupdate!(y,[sel,]α,x)` overwrites `y` with `α*x + y` and returns `y`.
Optional argument `sel` is a selection of indices to which apply the
operation (if an index is repeated, the operation will be performed several
times at this location).
* `vcombine(α,x,β,y)` yields the linear combination `α*x` or `α*x + β*y`.
* `vcombine!(dst,α,x,β,y)` overwrites `dst` with the linear combination `dst =
α*x` or `dst = α*x + β*y` and returns `dst`.
Note that the names of these methods all start with a `v` (for **v**ector) as
the conventions used by these methods may be particular. For instance,
compared to `copy!` and when applied to arrays, `vcopy!` imposes that the two
arguments have exactly the same dimensions. Another example is the `vdot`
method which has a slightly different semantics than Julia `dot` method.
`LazyAlgebra` already provides implementations of these methods for Julia
arrays with floating-point type elements. This implementation assumes that an
array is a valid *vector* providing it has suitable type and dimensions.
## Implementing a new vector type
To have a numerical method based on `LazyAlgebra` be applicable to a new given
type of variables, it is sufficient to implement a subset of these basic
methods specialized for this kind of variables.
The various operations that should be implemented for a *vector* are:
* compute the inner product of two vectors of the same kind (`vdot(x,y)`
method);
* create a vector of a given kind (`vcreate(x)` method);
* copy a vector (`vcopy!(dst,src)`);
* fill a vector with a given value (`vfill!(x,α)` method);
* exchange the contents of two vectors (`vswap!(x,y)` method);
* linearly combine several vectors (`vcombine!(dst,α,x,β,y)` method).
Derived methods are:
* compute the Euclidean norm of a vector (`vnorm2` method, based on `vdot` by
default);
* multiply a vector by a scalar: `vscale!(dst,α,src)` and/or `vscale!(x,α)`
methods (based on `vcombine!` by default);
* update a vector by a scaled step: `vupdate!(y,α,x)` method (based on
`vcombine!` by default) and, for some constrained optimization methods,
`vupdate!(y,sel,α,x)` method;
* erase a vector: `vzero!(x)` method (based on `vfill!` by default);
* `vscale` and `vcopy` methods are implemented with `vcreate` and
respectively`vscale!` and `vcopy!`.
Other methods which may be required by some packages:
* compute the L-1 norm of a vector: `vnorm1(x)` method;
* compute the L-∞ norm of a vector: `vnorminf(x)` method;
Methods that must be implemented (`V` represent the vector type):
```julia
vdot(x::V, y::V)
```
```julia
vscale!(dst::V, alpha::Real, src::V) -> dst
```
methods that may be implemented:
```julia
vscale!(alpha::Real, x::V) -> x
```
For mappings and linear operators (see
[Implementation of new mappings](mappings.md) for details), implement:
```julia
apply!(α::Scalar, P::Type{<:Operations}, A::Ta, x::Tx, β::Scalar, y::Ty) -> y
```
and
```julia
vcreate(P::Type{P}, A::Ta, x::Tx) -> y
```
for `Ta<:Mapping` and the supported operations `P<:Operations`.
| LazyAlgebra | https://github.com/emmt/LazyAlgebra.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1014 | push!(LOAD_PATH, "../src/")
using Documenter, Neuroimaging
makedocs(
modules = [Neuroimaging],
format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
sitename = "Neuroimaging.jl",
authors = "Robert Luke",
pages = [
"Home" => "index.md",
"Types" => "types.md",
"General EEG Processing" => Any[
"Overview"=>"eeg/eeg.md",
"Example"=>"eeg/examples.md",
"API"=>"eeg/functions.md",
],
"Steady State Responses (EEG)" => Any[
"Overview"=>"assr/assr.md",
"Example"=>"assr/examples.md",
"API"=>"assr/functions.md",
],
"Coordinate Systems" => Any["Example"=>"coord/examples.md",],
"Source Modelling" => Any["Example"=>"source/examples.md",],
"Input/Output Support" => "IO.md",
"Low-Level API" => "api.md",
],
)
deploydocs(
repo = "github.com/rob-luke/Neuroimaging.jl.git",
push_preview = true,
devbranch = "main",
)
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4480 | """
A Julia package for process neuroimaing data.
"""
module Neuroimaging
using Logging,
Unitful,
DataFrames,
Distances,
CSV,
DelimitedFiles,
DSP,
Distributions,
Plots,
MAT,
Printf,
Statistics,
FFTW,
DataDeps,
LinearAlgebra,
Images,
BDF
using Unitful: AbstractQuantity
export new_processing_key,
find_keys_containing,
fileparts,
add_dataframe_static_rows,
_find_closest_number_idx,
# File type reading and writing
import_biosemi,
channelNames_biosemi_1020,
create_channel,
create_events,
NeuroimagingMeasurement,
read_avr,
read_bsa,
read_dat,
read_sfp,
read_elp,
write_dat,
prepare_dat,
write_avr,
read_evt,
read_rba_mat,
# Pre-processing
epoch_rejection,
channel_rejection,
highpass_filter,
lowpass_filter,
bandpass_filter,
compensate_for_filter,
remove_template,
rereference,
clean_triggers,
validate_triggers,
extra_triggers,
downsample,
# Reshaping of data
extract_epochs,
average_epochs,
create_sweeps,
# Statistics
ftest,
gfp,
# Type - Volume Image
VolumeImage,
read_VolumeImage,
plot,
normalise,
find_dipoles,
std,
mean,
isequal,
==,
# Type - SSR
EEG,
GeneralEEG,
SSR,
samplingrate,
modulationrate,
channelnames,
read_SSR,
trim_channel,
add_triggers,
remove_channel!,
keep_channel!,
add_channel,
assr_frequency,
save_results,
trigger_channel,
write_SSR,
merge_channels,
hcat,
# Source analysis
Sensor,
sensors,
Electrode,
elecrodes,
label,
labels,
x,
y,
z,
show,
match_sensors,
EEG_64_10_20,
EEG_Vanvooren_2014,
EEG_Vanvooren_2014_Left,
EEG_Vanvooren_2014_Right,
read_EEG,
Coordinate,
SPM,
BrainVision,
Talairach,
UnknownCoordinate,
convert,
conv_bv2tal,
conv_spm_mni2tal,
Dipole,
best_dipole,
Leadfield,
match_leadfield,
find_location,
project,
euclidean,
# Plotting
plot,
oplot,
plot_dat,
plot_spectrum,
plot_timeseries,
plot_single_channel_timeseries,
plot_multi_channel_timeseries,
oplot_dipoles,
SSR_spectrogram,
plot_filter_response,
plot_ftest,
Source,
Detector,
Optode
# Helper functions
include("miscellaneous/helper.jl")
include("datasets/datasets.jl")
# Pre-processing
include("preprocessing/data_rejection.jl")
include("preprocessing/filtering.jl")
include("preprocessing/reference.jl")
include("preprocessing/triggers.jl")
# Reshaping of data
include("reshaping/epochs.jl")
include("reshaping/sweeps.jl")
# Statistics
include("statistics/ftest.jl")
include("statistics/gfp.jl")
# Type - Neuroimaging
include("types/NeuroimagingMeasurement.jl")
# Type - Coordinates
include("types/Coordinates/Coordinates.jl")
# Type - Sensors
include("types/Sensors/Sensors.jl")
# Type - SSR
include("types/EEG/EEG.jl")
include("types/SSR/SSR.jl")
include("types/SSR/Preprocessing.jl")
include("types/SSR/ReadWrite.jl")
include("types/SSR/Reshaping.jl")
include("types/SSR/Statistics.jl")
include("types/SSR/Plotting.jl")
# Type - Dipole
include("types/Dipole/Dipole.jl")
include("types/Dipole/Operations.jl")
include("types/Sensors/Operations.jl")
include("types/Sensors/Sets.jl")
include("types/Coordinates/Operations.jl")
# Source analysis
include("source_analysis/dipoles.jl")
# Type - Leadfield
include("types/Leadfield/Leadfield.jl")
include("types/Leadfield/Operations.jl")
# Type - Volume Image
include("types/VolumeImage/VolumeImage.jl")
include("types/VolumeImage/ReadWrite.jl")
include("types/VolumeImage/Plotting.jl")
include("types/VolumeImage/Dipoles.jl")
include("types/VolumeImage/Operations.jl")
# Plotting functions
include("types/EEG/plotting.jl")
include("plotting/plots.jl")
include("types/Dipole/Plotting.jl")
# File type reading and writing
include("read_write/avr.jl")
include("read_write/bdf.jl")
include("read_write/bsa.jl")
include("read_write/dat.jl")
include("read_write/elp.jl")
include("read_write/evt.jl")
include("read_write/rba.jl")
include("read_write/sfp.jl")
end # module
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 65 | using Neuroimaging, Lexicon
save("doc/API.md", Neuroimaging)
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 615 | function __init__()
register(
DataDep(
"BioSemiTestFiles",
"Manafacturer provided example files",
["https://www.biosemi.com/download/BDFtestfiles.zip"];
post_fetch_method = [file -> run(`unzip $file`)],
),
)
register(
DataDep(
"ExampleSSR",
"Steady state response data with few channels",
[
"https://github.com/rob-luke/Neuroimaging.jl-example-data/archive/refs/heads/master.zip",
];
post_fetch_method = [file -> run(`unzip $file`)],
),
)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 3687 | """
Return a new processing key with the number incremented.
It checks for existing keys and returns a string with the next key to be used.
#### Arguments
* `d`: Dictionary containing existing keys
* `key_name`: Base of the
#### Returns
* AbstractString with new key name
#### Returns
```julia
results_storage = Dict()
results_storage[new_processing_key(results_storage, "FTest")] = 4
results_storage[new_processing_key(results_storage, "FTest")] = 49
# Dict(Any, Any) with 2 entries
# "FTest1" => 4
# "FTest2" => 49
```
"""
function new_processing_key(d::Dict, key_name::AbstractString)
key_numb = 1
key = string(key_name, key_numb)
while haskey(d, key)
key_numb += 1
key = string(key_name, key_numb)
end
return key
end
"""
Find dictionary keys containing a string.
#### Arguments
* `d`: Dictionary containing existing keys
* `partial_key`: AbstractString you want to find in key names
#### Returns
* Array containg the indices of dictionary containing the partial_key
#### Returns
```julia
results_storage = Dict()
results_storage[new_processing_key(results_storage, "FTest")] = 4
results_storage[new_processing_key(results_storage, "Turtle")] = 5
results_storage[new_processing_key(results_storage, "FTest")] = 49
find_keys_containing(results_storage, "FTest")
# 2-element Array{Int64,1}:
# 1
# 3
```
"""
function find_keys_containing(d::Dict, partial_key::AbstractString)
valid_keys = [startswith(i, partial_key) for i in collect(keys(d))]
findall((in)(true), valid_keys)
end
"""
Extract the path, filename and extension of a file
#### Arguments
* `fname`: AbstractString with the full path to a file
#### Output
* Three strings containing the path, file name and file extension
#### Returns
```julia
fileparts("/Users/test/subdir/test-file.bdf")
# ("/Users/test/subdir/","test-file","bdf")
```
"""
function fileparts(fname::AbstractString)
if fname == ""
pathname = ""
filename = ""
extension = ""
else
pathname = dirname(fname)
if pathname == ""
#nothing
else
pathname = string(pathname, "/")
end
filename = splitext(basename(fname))[1]
extension = splitext(basename(fname))[2][2:end]
end
return pathname, filename, extension
end
"""
Find the closest number to a target in an array and return the index
#### Arguments
* `list`: Array containing numbers
* `target`: Number to find closest to in the list
#### Output
* Index of the closest number to the target
#### Returns
```julia
_find_closest_number_idx([1, 2, 2.7, 3.2, 4, 3.1, 7], 3)
# 6
```
"""
function _find_closest_number_idx(
list::AbstractArray{T,1},
target::Number,
) where {T<:Number}
diff_array = abs.(list .- target)
targetIdx = something(findfirst(isequal(minimum(diff_array)), diff_array), 0)
end
#######################################
#
# DataFrame manipulation
#
#######################################
function add_dataframe_static_rows(a::DataFrame, args...)
@debug("Adding column(s)")
for kwargs in args
@debug(kwargs)
for k in kwargs
name = Symbol(k[1])
code = k[2]
expanded_code = vec(repeat([k[2]], size(a, 1), 1))
@debug("Name: $name Code: $code")
a[!, name] = expanded_code
# insert!(a, size(a, 2) + 1, expanded_code, name)
# rename!(a, Symbol(string("x", size(a, 2))) => name)
end
end
return a
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 6327 | #######################################
#
# Plot spectrum of signal
#
#######################################
"""
plot_spectrum(signal::Vector, fs::Real; kwargs...)
Plot the spectrum of a signal.
"""
function plot_spectrum(
signal::Vector,
fs::Real;
titletext::S = "",
Fmin::Number = 0,
Fmax::Number = 90,
targetFreq::F = 0.0,
dBPlot::Bool = true,
noise_level::Number = 0,
signal_level::Number = 0,
) where {S<:AbstractString,F<:AbstractFloat}
# Determine fft frequencies
signal_length = length(signal)
frequencies = range(0, stop = 1, length = Int(signal_length / 2 + 1)) * fs / 2
# Calculate fft and convert to power
fftSweep = 2 / signal_length * fft(signal)
spectrum = abs.(fftSweep[1:div(signal_length, 2)+1]) # Amplitude
spectrum = spectrum .^ 2
valid_idx = (frequencies .<= Fmax) .& (frequencies .>= Fmin)
spectrum = spectrum[valid_idx]
frequencies = frequencies[valid_idx]
# Want a log plot?
if dBPlot
spectrum = 10 * log10.(spectrum)
ylabel = "Response Power (dB)"
else
ylabel = "Response Power (uV^2)"
end
# Plot signal
Plots.plot(frequencies, spectrum, lab = "Spectrum", color = :black)
xlims!(Fmin, Fmax)
xlabel!("Frequency (Hz)")
ylabel!(ylabel)
p = title!(titletext)
# Plot the noise level if requested
if noise_level != 0
if dBPlot
noise_level = 10 * log10.(noise_level)
end
p = plot!(
[Fmin, targetFreq + 2],
[noise_level, noise_level],
lab = "Noise",
color = :red,
)
end
# Plot the signal level if requested
if signal_level != 0
if dBPlot
signal_level = 10 * log10.(signal_level)
end
plot!(
[Fmin, targetFreq],
[signal_level, signal_level],
lab = "Signal",
color = :green,
)
targetFreqIdx = something(
findfirst(
isequal(minimum(abs.(frequencies .- targetFreq))),
abs.(frequencies .- targetFreq),
),
0,
)
targetFreq = frequencies[targetFreqIdx]
targetResults = spectrum[targetFreqIdx]
#TODO remove label for circle rather than make it empty
p = plot!(
[targetFreq],
[targetResults],
marker = (:circle, 5, 0.1, :green),
markerstrokecolor = :green,
lab = "",
)
end
return p
end
#######################################
#
# Plot time series
#
# Automatic plotting for single channel (with units) or multichannel (with channel names) time series
#
#######################################
"""
plot_single_channel_timeseries(signal::AbstractVector{T}, fs::Real; kwargs...)
Plot a single channel time series
# Input
* signal: Vector of data
* fs: Sample rate
* channels: Name of channel to plot
* plot_points: Number of points to plot, they will be equally spread. Used to speed up plotting
* Other optional arguements are passed to gadfly plot function
# Output
Returns a figure
"""
function plot_single_channel_timeseries(
signal::AbstractVector{T},
fs::Real;
xlabel::S = "Time (s)",
ylabel::S = "Amplitude (uV)",
lab::S = "",
kwargs...,
) where {T<:Number,S<:AbstractString}
@debug("Plotting single channel waveform of size $(size(signal))")
time_s = collect(1:size(signal, 1)) / fs # Create time axis
Plots.plot(
time_s,
signal,
t = :line,
c = :black,
lab = lab,
xlabel = xlabel,
ylabel = ylabel,
)
end
"""
plot_multi_channel_timeseries(signals::Array{T,2}, fs::Number, channels::Array{S}; kwargs...)
Plot a multi channel time series
# Input
* signals: Array of data
* fs: Sample rate
* channels: Name of channels
* plot_points: Number of points to plot, they will be equally spread. Used to speed up plotting
* Other optional arguements are passed to gadfly plot function
#### Output
Returns a figure
"""
function plot_multi_channel_timeseries(
signals::Array{T,2},
fs::Number,
channels::Array{S};
xlabel::S = "Time (s)",
ylabel::S = "Amplitude (uV)",
kwargs...,
) where {T<:Number,S<:AbstractString}
@debug("Plotting multi channel waveform of size $(size(signals))")
time_s = collect(1:size(signals, 1)) / fs # Create time axis
variances = var(signals, dims = 1) # Variance of each figure for rescaling
mean_variance = Statistics.mean(variances) # Calculate for rescaling figures
p = Plots.plot(
t = :line,
c = :black,
xlabel = xlabel,
ylabel = ylabel,
ylim = (-0.5, size(signals, 2) - 0.5),
)
for c = 1:size(signals, 2) # Plot each channel
signals[:, c] = signals[:, c] .- Statistics.mean(signals[:, c]) # Remove mean
signals[:, c] = signals[:, c] ./ (mean_variance ./ 4) .+ (c - 1) # Rescale and shift so all chanels are visible
p = plot!(time_s, signals[:, c], c = :black, lab = "")
end
p = plot!(yticks = (0:length(channels)-1, channels))
return p
end
#######################################
#
# Filter response
#
#######################################
# Plot filter response
function plot_filter_response(
zpk_filter::FilterCoefficients,
fs::Integer;
lower::Number = 1,
upper::Number = 30,
sample_points::Int = 1024,
)
frequencies = range(lower, stop = upper, length = 1024)
h = freqresp(zpk_filter, frequencies * ((2pi) / fs))
magnitude_dB = 20 * log10.(convert(Array{Float64}, abs.(h)))
phase_response = (360 / (2 * pi)) * unwrap(convert(Array{Float64}, angle.(h)))
p1 = plot(frequencies, magnitude_dB, lab = "")
p2 = plot(frequencies, phase_response, lab = "")
p = plot(
p1,
p2,
ylabel = ["Magnitude (dB)" "Phase (degrees)"],
xlabel = "Frequency (Hz)",
layout = @layout([a; b]),
)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 3597 | """
epoch_rejection(epochs::Array{T,3}, retain_percentage::AbstractFloat; rejection_method::Function = Neuroimaging.peak2peak) where {T<:Number}
Reject epochs based on the maximum peak to peak voltage within an epoch across all channels
# Arguments
* `epochs`: Array containing the epoch data in the format samples x epochs x channels
* `retain_percentage`: The percentage of epochs to retain
* `rejection_method`: Method to be used for epoch rejection (peak2peak)
# Returns
* An array with a reduced amount of entries in the epochs dimension
"""
function epoch_rejection(
epochs::Array{T,3},
retain_percentage::AbstractFloat;
rejection_method::Function = Neuroimaging.peak2peak,
) where {T<:Number}
if (0 > retain_percentage) || (1 < retain_percentage)
@warn("Non valid percentage value for retaining epochs $(retain_percentage)")
end
@info(
"Rejected $(round.(Int, (1 - retain_percentage) * 100))% of epochs based on $(string(rejection_method))"
)
# Epoch value should be a value or score per epoch where a lower value is better
# The lowest `retain_percentage` amount of epoch values will be kept
epoch_values = rejection_method(epochs)
cut_off_value = sort(epoch_values)[floor(Int, length(epoch_values) * retain_percentage)]
epochs = epochs[:, epoch_values.<=cut_off_value, :]
end
"""
peak2peak(epochs::Array{T,3}) where {T<:Number}
Find the peak to peak value for each epoch to be returned to epoch_rejection()
"""
function peak2peak(epochs::Array{T,3}) where {T<:Number}
epochsNum = size(epochs)[2]
peakvalues = Array{AbstractFloat}(undef, epochsNum)
for epoch = 1:epochsNum
peakvalues[epoch] =
abs.(maximum(epochs[:, epoch, :]) - minimum(epochs[:, epoch, :]))
end
return peakvalues
end
"""
channel_rejection(sigs::Array{T}, threshold_abs::Number, threshold_var::Number) where {T<:Number}
Reject channels with too great a variance.
Rejection can be based on a threshold or dynamicly chosen based on the variation of all channels.
# Arguments
* `signals`: Array of data in format samples x channels
* `threshold_abs`: Absolute threshold to remove channels with variance above this value
* `threshold_std`: Reject channels with a variance more than n times the std of all channels
# Returns
An array indicating the channels to be kept
"""
function channel_rejection(
sigs::Array{T},
threshold_abs::Number,
threshold_var::Number,
) where {T<:Number}
@debug(
"Rejecting channels for signal of $(size(sigs,2)) chanels and $(size(sigs,1)) samples"
)
variances = var(sigs, dims = 1) # Determine the variance of each channel
valid_nonzero = variances .!= 0 # The reference channel will have a variance of 0 so ignore it
# Reject channels above the threshold
valid_threshold_abs = variances .< threshold_abs
@debug("Static rejection threshold: $(threshold_abs)")
# Reject channels outside median + n * std
variances_median = median(variances[valid_nonzero]) # Use the median as usually not normal
variances_std = Statistics.std(variances[valid_nonzero]) # And ignore the reference channel
valid_threshold_var = variances .< (variances_median + threshold_var * variances_std)
@debug(
"Dynamic rejection threshold: $(variances_median + threshold_var * variances_std)"
)
valid_nonzero .& valid_threshold_abs .& valid_threshold_var # Merge all methods
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4472 | """
highpass_filter(signals::Array{T}, cutOff::Number, fs::Number, order::Int) where {T<:AbstractFloat}
High pass filter applied in forward and reverse direction
Simply a wrapper for the DSP.jl functions
# Arguments
* `signals`: Signal data in the format samples x channels
* `cutOff`: Cut off frequency in Hz
* `fs`: Sampling rate
* `order`: Filter orde
# Returns
* filtered signal
* filter used on signal
"""
function highpass_filter(
signals::Array{T},
cutOff::Number,
fs::Number,
order::Int,
) where {T<:AbstractFloat}
@debug("Highpass filtering $(size(signals)[end]) channels. Pass band > $(cutOff) Hz")
Wn = cutOff / (fs / 2)
highpass_filter(signals, Wn, order)
end
function highpass_filter(signals::Array{T}, Wn::Number, order::Int) where {T<:AbstractFloat}
@debug("Filter order = $order, Wn = $Wn")
f = digitalfilter(Highpass(Wn), Butterworth(order))
signals = filtfilt(f, signals)
return signals, f
end
"""
lowpass_filter(signals::Array{T}, cutOff::Number, fs::Number, order::Int) where {T<:AbstractFloat}
Low pass filter applied in forward and reverse direction
Simply a wrapper for the DSP.jl functions
# Arguments
* `signals`: Signal data in the format samples x channels
* `cutOff`: Cut off frequency in Hz
* `fs`: Sampling rate
* `order`: Filter orde
# Returns
* filtered signal
* filter used on signal
"""
function lowpass_filter(
signals::Array{T},
cutOff::Number,
fs::Number,
order::Int,
) where {T<:AbstractFloat}
@debug("Lowpass filtering $(size(signals)[end]) channels. Pass band < $(cutOff) Hz")
Wn = cutOff / (fs / 2)
lowpass_filter(signals, Wn, order)
end
function lowpass_filter(signals::Array{T}, Wn::Number, order::Int) where {T<:AbstractFloat}
@debug("Filter order = $order, Wn = $Wn")
f = digitalfilter(Lowpass(Wn), Butterworth(order))
signals = filtfilt(f, signals)
return signals, f
end
"""
bandpass_filter(signals::Array, lower::Number, upper::Number, fs::Number, n::Int, rp::Number)
Bandpass filter applied in forward and reverse direction
Simply a wrapper for the DSP.jl functions
# Returns
* filtered signal
* filter used on signal
# TODO
Use filtfilt rather than custom implementation.
"""
function bandpass_filter(
signals::Array,
lower::Number,
upper::Number,
fs::Number,
n::Int,
rp::Number,
)
# Type 1 Chebychev filter
# TODO filtfilt does not work. Why not?
signals = convert(Array{Float64}, signals)
f = digitalfilter(Bandpass(lower, upper, fs = fs), Chebyshev1(n, rp))
@info("Bandpass filtering $(size(signals)[end]) channels. $lower < Hz < $upper")
@debug("Filter order = $n, fs = $fs")
signals = filt(f, signals)
signals = filt(f, reverse(signals, dims = 1))
signals = reverse(signals, dims = 1)
return signals, f
end
#######################################
#
# Filter compensation
#
#######################################
function compensate_for_filter(d::Dict, spectrum::AbstractArray, fs::Real)
frequencies = range(0, stop = 1, length = Int(size(spectrum, 1))) * fs / 2
key_name = "filter"
key_numb = 1
key = string(key_name, key_numb)
while haskey(d, key)
spectrum = compensate_for_filter(d[key], spectrum, frequencies, fs)
@debug("Accounted for $key response in spectrum estimation")
key_numb += 1
key = string(key_name, key_numb)
end
return spectrum
end
"""
compensate_for_filter(filter::FilterCoefficients, spectrum::AbstractArray, frequencies::AbstractArray, fs::Real)
Recover the spectrum of signal by compensating for filtering done.
# Arguments
* `filter`: The filter used on the spectrum
* `spectrum`: Spectrum of signal
* `frequencies`: Array of frequencies you want to apply the compensation to
* `fs`: Sampling rate
# Returns
Spectrum of the signal after comensating for the filter
"""
function compensate_for_filter(
filter::FilterCoefficients,
spectrum::AbstractArray,
frequencies::AbstractArray,
fs::Real,
)
filter_response = [freqresp(filter, f * ((2pi) / fs)) for f in frequencies]
for f = 1:length(filter_response)
spectrum[f, :, :] = spectrum[f, :, :] ./ abs.(filter_response[f])^2
end
return spectrum
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 2817 | """
rereference(signals::Array{T,2}, refChan::Union{Int,Array{Int}}) where {T<:AbstractFloat}
rereference(signals::Array{T,2}, refChan::Union{S,Array{S}}, chanNames::Vector{S}) where {S<:AbstractString}
Re reference a signals to specific signal channel by index,
or by channel name from supplied list.
If multiple channels are specififed, their average is used as the reference.
# Arguments
* `signals`: Original signals to be modified
* `refChan`: Index or name of channels to be used as reference.
* `chanNames`: List of channel names associated with signals array
# Returns
Rereferenced signals
"""
function rereference(
signals::Array{T,2},
refChan::Union{Int,Array{Int}},
) where {T<:AbstractFloat}
@debug("Re referencing $(size(signals)[end]) channels to $(length(refChan)) channels")
@debug("Reference channels = $refChan")
reference_signal = signals[:, refChan]
# If using the average of several channels
if size(reference_signal, 2) > 1
reference_signal = vec(Statistics.mean(reference_signal, dims = 2))
end
remove_template(signals, reference_signal)
end
function rereference(
signals::Array{T,2},
refChan::S,
chanNames::Vector{S},
) where {S<:AbstractString,T<:AbstractFloat}
@debug("Reference channels = $refChan")
if refChan == "car" || refChan == "average"
refChan_Idx = collect(1:size(signals, 2))
elseif isa(refChan, AbstractString)
refChan_Idx = something(findfirst(isequal(refChan), chanNames), 0)
end
if refChan_Idx == 0
throw(ArgumentError("Requested channel is not in the provided list of channels"))
end
rereference(signals, refChan_Idx)
end
function rereference(
signals::Array{T,2},
refChan::Vector{S},
chanNames::Vector{S},
) where {S<:AbstractString,T<:AbstractFloat}
@debug("Reference channels = $refChan")
refChan_Idx = [something(findfirst(isequal(i), chanNames), 0) for i in refChan]
if 0 in refChan_Idx
throw(ArgumentError("Requested channel is not in the provided list of channels"))
end
rereference(signals, refChan_Idx)
end
"""
remove_template(signals::Array{T,2}, template::AbstractVector{T})
Remove a template signal from each column of an array
# Arguments
* `signals`: Original signals to be modified (samples x channels)
* `template`: Template to remove from each signal
# Returns
Signals with template removed
"""
function remove_template(
signals::Array{T,2},
template::AbstractVector{T},
) where {T<:AbstractFloat}
@assert size(signals, 1) == size(template, 1)
for chan = 1:size(signals)[end]
signals[:, chan] -= template
end
return signals
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 8214 | using Statistics
# Trigger information is stored in a dictionary
# containing three fields, all referenced in samples.
# Index: Start of trigger
# Code: Code of trigger
# Duration: Duration of trigger
"""
validate_triggers(t::Dict)
Validate trigger channels have required keys and information.
Trigger information is stored in a dictionary
containing three fields, all referenced in samples:
* `Index`: Start of trigger
* `Code`: Code of trigger
* `Duration`: Duration of trigger
"""
function validate_triggers(t::Dict)
@debug("Validating trigger information")
if t.count > 3
throw(KeyError("Trigger channel has extra columns"))
end
if !haskey(t, "Index")
throw(KeyError("Trigger channel does not contain index information"))
end
if !haskey(t, "Code")
throw(KeyError("Trigger channel does not contain code information"))
end
if !haskey(t, "Duration")
throw(KeyError("Trigger channel does not contain duration information"))
end
if length(t["Index"]) !== length(t["Duration"])
throw(KeyError("Trigger index and duration lengths are different"))
end
if length(t["Index"]) !== length(t["Code"])
throw(KeyError("Trigger index and code lengths are different"))
end
end
"""
clean_triggers(t::Dict, valid_triggers::Array{Int}, min_epoch_length::Int, max_epoch_length::Int, remove_first::Int, max_epochs::Int)
Clean trigger channel information by removing specified epochs
if they are too long or short. Can also remove the first trigger which
often represent the start of a condition or measurement.
Can also be used to limit the number of total epochs.
"""
function clean_triggers(
t::Dict,
valid_triggers::Array{Int},
min_epoch_length::Int,
max_epoch_length::Int,
remove_first::Int,
max_epochs::Int,
)
@debug("Cleaning triggers")
# Ensure the passed in dictionary contains all required fields
validate_triggers(t)
# Make in to data frame for easy management
epochIndex =
DataFrame(Code = t["Code"] .- 252, Index = t["Index"], Duration = t["Duration"])
# Present information about triggers before processing
@debug("Original trigger codes $(unique(epochIndex[:Code]))")
@debug("Originally $(length(epochIndex[:Code])) triggers")
# Check for not valid indices and throw a warning
if sum([in(i, [0; valid_triggers]) for i in epochIndex[!, :Code]]) != length(epochIndex[!, :Code])
validity = Bool[]
for ep in epochIndex[!, :Code]
push!(validity, in(ep, valid_triggers))
end
non_valid = sort(unique(epochIndex[!, :Code][.!validity]))
@info("Non valid triggers found: $non_valid")
end
# Just take valid indices
valid = convert(Array{Bool}, vec([in(i, valid_triggers) for i in epochIndex[!, :Code]]))
epochIndex = epochIndex[valid, :]
# Trim values if requested
if remove_first > 0
epochIndex = epochIndex[remove_first+1:end, :]
@debug("Trimming first $remove_first triggers")
end
if max_epochs != 0
epochIndex = epochIndex[1:minimum([max_epochs, length(epochIndex[!, :Index])]), :]
@debug("Trimming to $max_epochs triggers")
end
# Throw out epochs that are the wrong length
if length(epochIndex[!, :Index]) > 2
epochIndex[!, :Length] = [0; diff(epochIndex[!, :Index])]
if min_epoch_length > 0
begin
epochIndex[!, :valid_length] = epochIndex[!, :Length] .> min_epoch_length
epochIndex
end
num_non_valid = sum(.!epochIndex[!, :valid_length])
if num_non_valid > 1 # Don't count the first trigger
@debug("Removed $num_non_valid triggers < length $min_epoch_length")
epochIndex = epochIndex[epochIndex[!, :valid_length], :]
end
end
begin
epochIndex[!, :Length] = [0; diff(epochIndex[!, :Index])]
epochIndex
end
if max_epoch_length != 0
epochIndex[!, :valid_length] = epochIndex[!, :Length] .< max_epoch_length
num_non_valid = sum(.!epochIndex[!, :valid_length])
if num_non_valid > 0
@debug("Removed $num_non_valid triggers > length $max_epoch_length")
epochIndex = epochIndex[epochIndex[!, :valid_length], :]
end
end
# Sanity check
if Statistics.std(epochIndex[!, :Length][2:end]) > 1
@info("Your epoch lengths vary too much")
@info(
string(
"Length: median=$(median(epochIndex[!, :Length][2:end])) sd=$(Statistics.std(epochIndex[!, :Length][2:end])) ",
"min=$(minimum(epochIndex[!, :Length][2:end]))",
)
)
@debug(epochIndex)
end
end
# If the trigger has been signalled by 0 status then offset this
# Otherwise when saving and reading again, nothing will be detected
if sum(epochIndex[!, :Code]) == 0
@info("Trigger status indicated by 0, shifting to 1 for further processing")
epochIndex[!, :Code] = epochIndex[!, :Code] .+ 1
end
triggers = Dict(
"Index" => vec((epochIndex[!, :Index])'),
"Code" => vec(epochIndex[!, :Code] .+ 252),
"Duration" => vec(epochIndex[!, :Duration])',
)
validate_triggers(triggers)
return triggers
end
"""
Place extra triggers a set time after existing triggers.
A new trigger with `new_trigger_code` will be placed `new_trigger_time` seconds after exisiting `old_trigger_code` triggers.
"""
function extra_triggers(
t::Dict,
old_trigger_code::Union{Int,Array{Int}},
new_trigger_code::Int,
new_trigger_time::Number,
fs::Number;
trigger_code_offset::Int = 252,
max_inserted::Number = Inf,
)
# Scan through existing triggers, when you find one that has been specified to trip on
# then add a new trigger at a set time after the trip
# Calculate the delay in samples. This may not be an integer number.
# Don't round here as you will get drifting
new_trigger_delay = new_trigger_time * fs
# Find triggers we want to trip on
valid_trip = any(t["Code"] .- trigger_code_offset .== old_trigger_code', dims = 2)
valid_trip_idx = findall(valid_trip)
valid_trip_index = [t["Index"][valid_trip_idx]; 0] # Place a 0 at end so we dont use the last epoch
valid_trip_code = t["Code"][valid_trip_idx]
@debug("Found $(length(valid_trip_code)) exisiting valid triggers")
@debug(
"Adding new trigger $new_trigger_code after $new_trigger_time (s) = $new_trigger_delay (samples) from $old_trigger_code"
)
validate_triggers(t)
code = Int[]
index = Int[]
vt = 0 # Count which valid index we are up to
for i = 1:length(t["Index"])-1
push!(code, t["Code"][i] - trigger_code_offset)
push!(index, t["Index"][i])
if valid_trip[i]
offset = t["Index"][i] + new_trigger_delay
counter = 0
vt += 1
while offset < valid_trip_index[vt+1] && counter < max_inserted
push!(code, new_trigger_code)
push!(index, Int(round.(offset))) # Round and take integer here to minimise the drift
offset += new_trigger_delay
counter += 1
end
end
end
# Ensure triggers are sorted
v = sortperm(index)
index = index[v]
code = code[v]
# if there are any two triggers directly on top of each other then remove them
valid_idx = [true; diff(index) .!= 0]
index = index[valid_idx]
code = code[valid_idx]
triggers = Dict(
"Index" => vec((index)'),
"Code" => vec(code .+ trigger_code_offset),
"Duration" => vec([0; diff(index)])',
)
validate_triggers(triggers)
return triggers
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 2439 | #######################################
#
# AVR file
#
#######################################
"""
Read AVR (.avr) file
#### Input
* `fname`: Name or path for the AVR file
#### Output
* `data`: Array of data read from AVR file. Each column represents a channel, and each row represents a point.
* `chanNames`: Channel Names
"""
function read_avr(fname::AbstractString)
@info("Reading AVR file: $fname")
# Open file
file = open(fname, "r")
# Header line
header_exp =
r"Npts= (\d*)\s+TSB= ([-+]?[0-9]*\.?[0-9]+)\s+DI= ([-+]?[0-9]*\.?[0-9]+)\s+SB= ([-+]?[0-9]*\.?[0-9]+)\s+SC= ([-+]?[0-9]*\.?[0-9]+)\s+Nchan= (\d*)"
m = match(header_exp, readline(file))
npts = parse(Int, ascii(m.captures[1])) # Number of points
tsb = m.captures[2] # Omit?
di = m.captures[3] # Omit?
sb = m.captures[4] # Omit?
sc = m.captures[5] # Omit?
nchan = parse(Int, ascii(m.captures[6])) # Number of channels
# Channel line
names_exp = r"(\w+)"
chanNames = collect((m.match for m in eachmatch(names_exp, readline(file))))
# Data
data = Array{Float64}(undef, (npts, nchan))
for c = 1:nchan
d = collect((m.match for m in eachmatch(r"([-+]?[0-9]*\.?[0-9]+)", readline(file))))
for n = 1:npts
data[n, c] = parse(Float64, ascii(d[n]))
end
end
# Close file
close(file)
return data, chanNames
end
"""
Write AVR file
"""
function write_avr(fname::AbstractString, data::Array, chanNames::Array, fs::Number)
@info("Saving avr to $fname")
fs = float(fs)
open(fname, "w") do fid
@printf(
fid,
"Npts= %d TSB= %2.6f DI= %2.6f SB= %2.3f SC= %3.1f Nchan= %d\n",
size(data, 1),
1000 / fs,
1000 / fs,
1.0,
200.0,
size(data, 2)
)
@printf(fid, "%s", chanNames[1])
for c = 2:length(chanNames)
@printf(fid, " ")
@printf(fid, "%s", chanNames[c])
end
@printf(fid, "\n")
for c = 1:size(data, 2)
@printf(fid, "%2.6f", data[1, c])
for p = 2:size(data, 1)
@printf(fid, " ")
@printf(fid, "%2.6f", data[p, c])
end
@printf(fid, " ")
@printf(fid, "\n")
end
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4975 | #######################################
#
# BDF file
#
#######################################
"""
Import Biosemi files
"""
function import_biosemi(fname::Union{AbstractString,IO}; kwargs...)
@info("Importing BIOSEMI data file")
# Read raw data using BDF.jl
data, triggers, trigger_channel, system_code_channel =
readBDF(identity(fname), transposeData = true; kwargs...)
header = readBDFHeader(identity(fname))
# Check the sample rate
sample_rate = header["sampRate"]
if sum(diff(sample_rate)) != 0
@warn("Sampling rate varies across channels")
sample_rate = NaN
else
sample_rate = sample_rate[1]
end
reference_channel = "Raw"
# Tidy the trigger channel to standard names
triggers = Dict(
"Code" => triggers["code"],
"Index" => triggers["idx"],
"Duration" => triggers["dur"],
)
# Tidy channel names if required
if any(header["chanLabels"] .== "B16") # Cz is usually present
@debug(" Converting names from BIOSEMI to 10-20")
header["chanLabels"] = channelNames_biosemi_1020(header["chanLabels"])
end
system_codes = create_events(system_code_channel, sample_rate)
return data, triggers, system_codes, sample_rate, reference_channel, header
end
# Create events from channel
############################
function create_events(channel::Array{Int16,1}, fs::Number)
startPoints = vcat(1, findall(diff(channel) .!= 0) .+ 1)
stopPoints = vcat(findall(diff(channel) .!= 0), length(channel))
trigDurs = (stopPoints - startPoints) / fs
evt = channel[startPoints]
evtTab = Dict("Code" => evt, "Index" => startPoints, "Duration" => trigDurs)
end
# Create channel from events
############################
function create_channel(t::Dict, data::Array, fs::Number; kwargs...)
create_channel(t, maximum(size(data)), fs; kwargs...)
end
function create_channel(
t::Dict,
l::Int,
fs::Number;
code::AbstractString = "Code",
index::AbstractString = "Index",
duration::AbstractString = "Duration",
)
@debug(
"Creating trigger channel from data. Length: $l Triggers: $(length(t[index])) Fs: $fs"
)
channel = Array{Int16}(undef, l)
# Initialise array to 252 code
for i = 1:l
channel[i] = 252
end
for i = 1:length(t[index])-1
channel[t[index][i]:t[index][i]+round.(Int, t[duration][i] * fs)] .= t[code][i]
end
return channel
end
# Change biosemi labels to 1020
#######################################
function channelNames_biosemi_1020(original::S) where {S<:AbstractString}
biosemi_1020 = [
"A01" "Fp1"
"A1" "Fp1"
"A05" "F3"
"A5" "F3"
"A09" "FC5"
"A9" "FC5"
"A13" "C3"
"A17" "CP5"
"A21" "P3"
"A25" "PO7"
"A29" "Oz"
"B01" "Fpz"
"B1" "Fpz"
"B05" "AFz"
"B5" "AFz"
"B9" "F6"
"B13" "FC4"
"B17" "C2"
"B21" "TP8"
"B25" "P2"
"B29" "P10"
"A02" "AF7"
"A2" "AF7"
"A06" "F5"
"A6" "F5"
"A10" "FC3"
"A14" "C5"
"A18" "CP3"
"A22" "P5"
"A26" "PO3"
"A30" "POz"
"B02" "Fp2"
"B2" "Fp2"
"B06" "Fz"
"B6" "Fz"
"B10" "F8"
"B14" "FC2"
"B18" "C4"
"B22" "CP6"
"B26" "P4"
"B30" "PO8"
"A03" "AF3"
"A3" "AF3"
"A07" "F7"
"A7" "F7"
"A11" "FC1"
"A15" "T7"
"A19" "CP1"
"A23" "P7"
"A27" "O1"
"A31" "Pz"
"B03" "AF8"
"B3" "AF8"
"B07" "F2"
"B7" "F2"
"B11" "FT8"
"B15" "FCz"
"B19" "C6"
"B23" "CP4"
"B27" "P6"
"B31" "PO4"
"A04" "F1"
"A4" "F1"
"A08" "FT7"
"A8" "FT7"
"A12" "C1"
"A16" "TP7"
"A20" "P1"
"A24" "P9"
"A28" "Iz"
"A32" "CPz"
"B04" "AF4"
"B4" "AF4"
"B08" "F4"
"B8" "F4"
"B12" "FC6"
"B16" "Cz"
"B20" "T8"
"B24" "CP2"
"B28" "P8"
"B32" "O2"
"Status" "Status"
]
idx = something(findfirst(isequal(original), biosemi_1020), 0)
if idx == 0
error("Channel $original is unknown")
end
converted = biosemi_1020[:, 2][idx]
end
function channelNames_biosemi_1020(original::Array{S}) where {S<:AbstractString}
converted = Array{AbstractString}(undef, size(original))
@info("Fixing channel names of $(length(original)) channels")
for i = 1:length(original)
converted[i] = channelNames_biosemi_1020(original[i])
end
return converted
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1670 | #######################################
#
# BSA file
#
#######################################
"""
Read Besa's BSA (.bsa) file
#### Input
* `fname`: Name or path for the BSA file
#### Output
* `bsa`: Dipole object
"""
function read_bsa(fname::AbstractString)
@info("Reading BSA file = $fname")
# Open file
file = open(fname, "r")
# Read version
first_line = readline(file)
separator = something(findfirst(isequal('|'), first_line), 0)
version = first_line[1:separator-1]
coordinate_system = first_line[separator+1:end-1]
# Read title line
regexp =
r"(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)"
m = match(regexp, readline(file))
# Read useless line
readline(file)
# Read dipoles
dips = Neuroimaging.Dipole[]
while !eof(file)
dm = match(regexp, readline(file))
dip = Dipole(
coordinate_system,
1u"m" * parse(Float64, dm.captures[2]) / 1000,
1u"m" * parse(Float64, dm.captures[3]) / 1000,
1u"m" * parse(Float64, dm.captures[4]) / 1000,
parse(Float64, dm.captures[5]),
parse(Float64, dm.captures[6]),
parse(Float64, dm.captures[7]),
parse(Float64, dm.captures[8]),
parse(Float64, dm.captures[9]),
parse(Float64, dm.captures[10]),
)
push!(dips, dip)
end
# Close file
close(file)
@debug("Version = $version")
@debug("Coordinate System = $coordinate_system")
@debug("Dipoles = $(length(dips))")
return dips
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 7936 | #######################################
#
# dat file
#
#######################################
"""
Read dat files
#### Arguments
* `fname`: Name or path for the dat file
#### Returns
* `x`: Range of x values
* `y`: Range of y values
* `z`: Range of z values
* `complete_data`: Array (x × y × z x t)
* `sample_times`
#### References
File specs were taken from [fieldtrip](https://github.com/fieldtrip/fieldtrip/blob/1cabb512c46cc70e5b734776f20cdc3c181243bd/external/besa/readBESAimage.m)
"""
function read_dat(fname::AbstractString)
@info("Reading dat file = $fname")
read_dat(open(fname, "r"))
end
function read_dat(fid::IO)
if isa(fid, IOBuffer)
fid.ptr = 1
end
# Ensure we are reading version 2
versionAbstractString = match(r"(\S+):(\d.\d)", readline(fid))
version = parse(Float64, versionAbstractString.captures[2])
@debug("Version = $version")
# Use @assert here?
@assert version == 2
# Header info
readline(fid) # Empty line
data_file = readline(fid)
condition = readline(fid)
typeline = readline(fid)
# Types of data that can be stored
if something(findfirst("Method", typeline), 0:-1) != 0:-1 # TODO: change to imatch
@debug("File type is Method")
image_type = typeline[21:end]
image_mode = "Time"
regularization = readline(fid)[21:end-1]
# TODO: Fix for latencies. See fieldtrip
# Units
units = readline(fid)[3:end-1]
@debug("Regularisation = $regularization")
@debug("Units = $units")
elseif something(findfirst("MSBF", typeline), 0:-1) != 0:-1
image_mode = "Single Time"
image_type = "Multiple Source Beamformer"
units = condition[3:end-1]
regularization = "None"
@warn("MSBF type under development")
elseif something(findfirst("MSPS", typeline), 0:-1) != 0:-1
@warn("MSPS type not implemented yet")
elseif something(findfirst("Sens", typeline), 0:-1) != 0:-1
@warn("Sens type not implemented yet")
else
@warn("Unknown type")
end
readline(fid) # Empty line
description = readline(fid)
# Read in the dimensions
regexp = r"[X-Z]:\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+)"
xrange = match(regexp, readline(fid))
x = range(
parse(Float64, xrange.captures[1]),
stop = parse(Float64, xrange.captures[2]),
length = parse(Int, xrange.captures[3]),
)
yrange = match(regexp, readline(fid))
y = range(
parse(Float64, yrange.captures[1]),
stop = parse(Float64, yrange.captures[2]),
length = parse(Int, yrange.captures[3]),
)
zrange = match(regexp, readline(fid))
z = range(
parse(Float64, zrange.captures[1]),
stop = parse(Float64, zrange.captures[2]),
length = parse(Int, zrange.captures[3]),
)
empty = readline(fid)
# Variables to fill
t = 1
complete_data = Array{Float64}(undef, (length(x), length(y), length(z), t))
sample_times = Float64[]
description = readline(fid)
if something(findfirst("Sample", typeline), 0:-1) != 0:-1
#
# 4D file
#
println("4D dat file reading is broken since the Julia 1 update")
# s = match(r"Sample \d+, (-?\d+.\d+) ms", description)
# push!(sample_times, float(s.captures[1]))
# file_still_going = true
# while file_still_going
# for zind = 1:length(z)
# readline(fid) # Z: z
# for yind = 1:length(y)
# d = readline(fid) # values
# m = collect((m.match for m in eachmatch(r"(-?\d+.\d+)", d)))
# complete_data[:, yind, zind, t] = float(m)
# end
# readline(fid) # blank or dashed
# end
# if eof(fid)
# file_still_going = false
# else
# t += 1
# s = readline(fid) # Sample n, t.tt ms
# s = match(r"Sample \d+, (-?\d+.\d+) ms", s)
# push!(sample_times, float(s.captures[1]))
# # There is no nice way to grow a multidimensional array
# temp = complete_data
# complete_data = Array{Float64}(undef, (length(x), length(y), length(z), t))
# complete_data[:, :, :, 1:t-1] = temp
# end
# end
else
#
# 3D file
#
file_still_going = true
idx = 1
while file_still_going
for zind = 1:length(z)
for yind = 1:length(y)
d = readline(fid) # values
m = collect((m.match for m in eachmatch(r"(-?\d+.\d+e?-?\d*)", d)))
m = parse.(Float64, m)
complete_data[:, yind, zind, t] = m
end
d = readline(fid) # blank or dashed
d = readline(fid) # blank or dashed
end
if eof(fid)
file_still_going = false
else
println("File should have finished")
end
end
sample_times = [0]
end
close(fid)
return x, y, z, complete_data, sample_times
end
# """
# Write dat file. Disabled as 4D reading is broken.
# """
# function write_dat(
# fname::AbstractString,
# X::AbstractVector,
# Y::AbstractVector,
# Z::AbstractVector,
# S::Array{DataT,4},
# T::AbstractVector;
# data_file::AbstractString = "NA",
# condition::AbstractString = "NA",
# method::AbstractString = "NA",
# regularization::AbstractString = "NA",
# units::AbstractString = "NA",
# ) where {DataT<:AbstractFloat}
# if size(S, 1) != length(X)
# @warn("Data and x sizes do not match")
# end
# if size(S, 2) != length(Y)
# @warn("Data and y sizes do not match")
# end
# if size(S, 3) != length(Z)
# @warn("Data and z sizes do not match")
# end
# if size(S, 4) != length(T)
# @warn("Data and t sizes do not match")
# end
# @info("Saving dat to $fname")
# open(fname, "w") do fid
# @printf(fid, "BESA_SA_IMAGE:2.0\n")
# @printf(fid, "\n")
# @printf(fid, "Data file: %s\n", data_file)
# @printf(fid, "Condition: %s\n", condition)
# @printf(fid, "Method: %s\n", method)
# @printf(fid, "Regularization: %s\n", regularization)
# @printf(fid, " %s\n", units)
# @printf(fid, "\n")
# @printf(fid, "Grid dimensions ([min] [max] [nr of locations]):\n")
# @printf(fid, "X: %2.6f %2.6f %d\n", minimum(X), maximum(X), length(X))
# @printf(fid, "Y: %2.6f %2.6f %d\n", minimum(Y), maximum(Y), length(Y))
# @printf(fid, "Z: %2.6f %2.6f %d\n", minimum(Z), maximum(Z), length(Z))
# @printf(
# fid,
# "==============================================================================================\n"
# )
# for t = 1:size(S, 4)
# @printf(fid, "Sample %d, %1.2f ms\n", t - 1, T[t])
# for z = 1:size(S, 3)
# @printf(fid, "Z: %d\n", z - 1)
# for y = 1:size(S, 2)
# for x = 1:size(S, 1)
# @printf(fid, "%2.10f ", S[x, y, z, t])
# end
# @printf(fid, "\n")
# end
# @printf(fid, "\n")
# end
# end
# @debug("File successfully written")
# end
# end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1531 | #######################################
#
# elp file
#
#######################################
"""
Read elp file containing sensor locations
#### Input
* `fname`: Name or path for the sfp file
* `coordinate`: Coordinate system for electrode location
* `r`: Radius for converting spherical coords
#### Output
* `elecs`: Array of electrode objects
"""
function read_elp(fname::AbstractString; coordinate = Talairach, r::Real = 90)
@info("Reading elp file = $fname")
# Create an empty electrode set
elecs = Electrode[]
# Read file and match to expected file format
file = read(fname, String)
regexp = r"(\S+)\s+(\S+)\s+(\S+)"
m = collect((m.match for m in eachmatch(regexp, file)))
# Convert label to ascii and remove '
for idx = 1:length(m)
local_matches = match(regexp, m[idx])
# Extract phi and theta
phi = parse(Float64, local_matches[2])
theta = parse(Float64, local_matches[3])
# Convert to x, y, z
x = r .* sin.(phi * (pi / 180)) .* cos.(theta * (pi / 180))
y = r .* sin.(phi * (pi / 180)) .* sin.(theta * (pi / 180)) - 17.5
z = r .* cos.(phi * (pi / 180))
push!(
elecs,
Electrode(
replace(local_matches[1], "'" => ""),
coordinate(x * u"m", y * u"m", z * u"m"),
Dict(),
),
)
end
@debug("Imported $(length(elecs)) electrodes")
return elecs
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 983 | #######################################
#
# evt file
#
#######################################
"""
Read *.evt file and convert to form for EEG.jl
"""
function read_evt(fname::AbstractString, fs::Number; kwargs...)
@info("Reading evt file: $fname")
fs = ustrip(fs)
d = readdlm(fname)
@assert (size(d, 2) <= 3) "EVT file has too many columns"
d = Dict(d[1, 1] => d[2:end, 1], d[1, 2] => d[2:end, 2], d[1, 3] => d[2:end, 3])
if haskey(d, "Tmu")
d["Index"] = [1 + round.(i * (1 / 1000000) * float(fs)) for i in d["Tmu"]]
elseif haskey(d, "Tsec")
d["Index"] = [1 + round.(i * float(fs)) for i in d["Tsec"]]
else
@warn("Unknown time scale in evt file")
end
d["Duration"] = ones(length(d["Code"]))
@info("Imported $(length(d["Code"])) events")
return Dict(
"Code" => d["Code"] .+ 252,
"Index" => d["Index"],
"Duration" => d["Duration"],
)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 2584 | #######################################
#
# rba file
#
#######################################
"""
Read rba from MAT file
"""
function read_rba_mat(mat_path)
# Define variables here so that they can be accessed within the scope of try constructs
modulation_frequency = NaN
stimulation_amplitude = NaN
stimulation_side = ""
participant_name = ""
carrier_frequency = NaN
# Old RBA format
try
rba = matread(mat_path)
modulation_frequency =
rba["properties"]["stimulation_properties"]["stimulus_1"]["rounded_modulation_frequency"]
carrier_frequency =
rba["properties"]["stimulation_properties"]["stimulus_1"]["rounded_carrier_frequency"]
@info("Imported matching .mat file in old format")
catch
# nothing
end
# New RBA format
try
rba = matopen(mat_path)
mat = read(rba, "properties")
modulation_frequency1 =
mat["stimulation_properties"]["stimulus_1"]["rounded_modulation_frequency"]
modulation_frequency2 =
mat["stimulation_properties"]["stimulus_2"]["rounded_modulation_frequency"]
carrier_frequency =
mat["stimulation_properties"]["stimulus_1"]["rounded_carrier_frequency"]
if modulation_frequency1 != modulation_frequency2
err("Different modulation frequency in each stimulus. Taking stimulus 1")
end
modulation_frequency = modulation_frequency1
stimulus_amplitude1 = mat["stimulation_properties"]["stimulus_1"]["amplitude"]
stimulus_amplitude2 = mat["stimulation_properties"]["stimulus_2"]["amplitude"]
if stimulus_amplitude1 == stimulus_amplitude2
stimulation_side = "Bilateral"
else
stimulation_side = stimulus_amplitude1 > stimulus_amplitude2 ? "Left" : "Right"
end
stimulation_amplitude = max(stimulus_amplitude1, stimulus_amplitude2)
participant_name = mat["metas"]["subject"]
@info("Imported matching .mat file in new format")
catch
# nothing
end
if modulation_frequency == NaN && stimulation_side == NaN && participant_name == NaN
@warn("Reading of .mat file failed")
end
@debug(
"Frequency: $modulation_frequency Side: $stimulation_side Name: $participant_name Carrier: $carrier_frequency"
)
return modulation_frequency,
stimulation_side,
participant_name,
stimulation_amplitude,
carrier_frequency
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1221 | #######################################
#
# sfp file
#
#######################################
"""
Read sfp file containing sensor locations
#### Input
* `fname`: Path for the sfp file
#### Output
* `elec`: Electrodes object
"""
function read_sfp(fname::AbstractString; coordinate = Talairach)
@info("Reading dat file = $fname")
# Create an empty electrode set
elecs = Electrode[]
# Read file and match to expected file format
file = read(fname, String)
regexp = r"(\S+)\s+(\S+)\s+(\S+)\s+(\S+)"
m = collect((m.match for m in eachmatch(regexp, file)))
# Convert label to ascii and remove '
for idx = 1:length(m)
local_matches = match(regexp, m[idx])
push!(
elecs,
Electrode(
replace(local_matches[1], "'" => ""),
coordinate(
parse(Float64, local_matches[2]) * u"m",
parse(Float64, local_matches[3]) * u"m",
parse(Float64, local_matches[4]) * u"m",
),
Dict(),
),
)
end
@debug("Imported $(length(elecs)) electrodes")
return elecs
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 2836 |
#######################################
#
# Extract epochs
#
#######################################
"""
Extract epoch data from array of channels.
#### Input
* Array of raw data. Samples x Channels
* Dictionary of trigger information
* Vector of valid trigger numbers
* Number of first triggers to remove
* Number of end triggers to remove
#### Example
```julia
epochs = extract_epochs(data, triggers, [1,2], 0, 0)
```
"""
function extract_epochs(
data::Array{T,2},
triggers::Dict,
valid_triggers::Union{AbstractVector,Int},
remove_first::Int,
remove_last::Int,
trigger_offset::Int = 252,
) where {T<:Number}
@debug(
"Extracting epochs for $(size(data)[end]) channels using triggers $(valid_triggers)"
)
validate_triggers(triggers)
newTidx = [Int(i) for i in triggers["Index"]]
triggers = DataFrame(Code = triggers["Code"], Index = newTidx)
# TODO Use convert function
#=triggers = convert(DataFrame, triggers)=#
# Change offset so numbers are manageable
triggers[!, :Code] = triggers[!, :Code] .- trigger_offset
# Determine indices of triggers which are valid
valid_triggers = any(triggers[!, :Code] .== valid_triggers', dims = 2)[:, 1]
@debug("Number of valid triggers: $(length(valid_triggers))")
# Remove unwanted triggers
triggers = triggers[valid_triggers, :] # That aren't valid
triggers = triggers[remove_first+1:end-remove_last, :] # Often the first trigger is rubbish
lenEpochs = minimum(diff(triggers[!, :Index]))
numChans = size(data)[end]
# Check we aren't looking past the end of the data
start_indices = convert(Array{Int}, triggers[!, :Index])
end_indices = convert(Array{Int}, start_indices .+ lenEpochs .- 1)
while end_indices[end] > size(data, 1)
pop!(start_indices)
pop!(end_indices)
@debug("Removed end epoch as its not complete")
end
# Create variable for epochs
numEpochs = length(start_indices)
epochs = zeros(Float64, (Int(lenEpochs), Int(numEpochs), Int(numChans)))
# User feedback
@debug("Creating epochs: $lenEpochs x $numEpochs x $numChans")
for si = 1:length(start_indices)
epochs[:, si, :] = data[start_indices[si]:end_indices[si], :]
end
@info("Generated $numEpochs epochs of length $lenEpochs for $numChans channels")
return epochs
end
#######################################
#
# Create average epochs
#
#######################################
function average_epochs(ep::Array)
@info(
"Averaging down epochs to 1 epoch of length $(size(ep,1)) from $(size(ep,2)) epochs on $(size(ep,3)) channels"
)
dropdims(Statistics.mean(ep, dims = 2), dims = 2)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 854 |
#######################################
#
# Create sweeps
#
#######################################
function create_sweeps(epochs::Array, epochsPerSweep::Int)
epochsLen = size(epochs)[1]
epochsNum = size(epochs)[2]
chansNum = size(epochs)[3]
sweepLen = epochsLen * epochsPerSweep
sweepNum = round.(Int, floor(epochsNum / epochsPerSweep))
sweeps = zeros(Float64, (sweepLen, sweepNum, chansNum))
sweep = 1
while sweep <= sweepNum
sweepStart = (sweep - 1) * (epochsPerSweep) + 1
sweepStop = sweepStart + epochsPerSweep - 1
sweeps[:, sweep, :] =
reshape(epochs[:, sweepStart:sweepStop, :], (sweepLen, 1, chansNum))
sweep += 1
end
@info("Generated $sweepNum sweeps of length $sweepLen for $chansNum channels")
return sweeps
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4258 | """
Find all dipole in an activity map.
Determines the local maxima in a 3 dimensional array
#### Input
* s: Activity in 3d matrix
* window: Windowing to use in each dimension for min max filter
* x,y,z: Coordinates associated with s matrix
#### Output
* dips: An array of dipoles
"""
function find_dipoles(
s::Array{T,3};
window::Array{Int} = [6, 6, 6],
x::AbstractVector{T} = 1:size(s, 1),
y::AbstractVector{T} = 1:size(s, 2),
z::AbstractVector{T} = 1:size(s, 3),
) where {T<:Number}
@debug("Finding dipoles for 3d array")
localmaxima_locations = findlocalmaxima(s)
peak_values = s[localmaxima_locations]
smallest_peak_value = (0.1 * maximum(peak_values))
localmaxima_locations_subset = findlocalmaxima(s .>= smallest_peak_value)
localmaxima_locations_subset = localmaxima_locations
# Store dipoles in an array
dips = Dipole[]
for location in localmaxima_locations_subset
push!(
dips,
Dipole(
"Unknown",
location[1] * u"m",
location[2] * u"m",
location[3] * u"m",
0,
0,
0,
0,
0,
s[location],
),
)
end
left_side = s[x.<0, :, :]
a = CartesianIndices(size(left_side))[findall(left_side .== maximum(left_side))]
a = a[1]
xidx = a[1]
yidx = a[2]
zidx = a[3]
push!(
dips,
Dipole(
"Unknown",
x[xidx[1]] * u"m",
y[yidx[1]] * u"m",
z[zidx[1]] * u"m",
0,
0,
0,
0,
0,
maximum(left_side),
),
)
right_side = s[x.>0, :, :]
a = CartesianIndices(size(right_side))[findall(right_side .== maximum(right_side))]
a = a[1]
xidx = a[1]
yidx = a[2]
zidx = a[3]
x_tmp = x[x.>0]
push!(
dips,
Dipole(
"Unknown",
x_tmp[xidx[1]] * u"m",
y[yidx[1]] * u"m",
z[zidx[1]] * u"m",
0,
0,
0,
0,
0,
maximum(right_side),
),
)
# Sort dipoles by size
vec(dips[sortperm([dip.size for dip in dips], rev = true)])
end
#######################################
#
# Find the best dipoles from selection
#
#######################################
"""
Find best dipole relative to reference location.
Finds the largest dipole within a specified distance of a reference location
#### Input
* ref: Reference coordinate or dipole
* dips: Dipoles to find the best dipole from
* maxdist: Maximum distance a dipole can be from the reference
#### Output
* dip: The best dipole
"""
function best_dipole(
ref::Union{Coordinate,Dipole},
dips::Array{Dipole};
maxdist::Number = 0.30,
min_dipole_size::Real = -Inf,
kwargs...,
)
@info("Calculating best dipole for $(length(dips)) dipoles")
dips = dips[findall([d.size > min_dipole_size for d in dips])]
if length(dips) > 0
# Find all dipoles within distance
dists = [euclidean(ref, dip) for dip in dips]
valid_dist = dists .< maxdist
if sum(valid_dist) >= 2
# Valid dipoles exist find the largest one
sizes = [dip.size for dip in dips]
bestdip = maximum(sizes[valid_dist])
dip = dips[findall(sizes .== bestdip)]
@debug("$(sum(valid_dist)) dipoles within $(maxdist) m. ")
elseif sum(valid_dist) == 1
# Return the one valid dipole
dip = dips[findall(valid_dist)]
@debug("Only one dipole within $(maxdist) m. ")
else
# No dipoles within distance
# Take the closest
bestdip = minimum(dists)
dip = dips[findall(dists .== bestdip)]
@debug("No dipole within $(maxdist) m. ")
end
@debug("Best = $(euclidean(ref, dip[1]))")
return dip[1]
else
return NaN
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4300 | """
Calculates the F test as is commonly implemented in SSR research.
TODO: Add references to MASTER and Luts et al
#### Parameters
* Sweep measurements. Samples x Sweeps x Channels
* Frequency(ies) of interest (Hz)
* Sampling rate (Hz)
* The amount of data to use on each side of frequency of interest to estimate noise (Hz)
* Filter used on the sweep data. If provided then is compensated for
* The number of bins to ignore on each side of the frequency of interest
#### Returns
* Signal to noise ratio in dB
* Signal phase at frequency of interest
* Signal power at frequency of interest
* Noise power estimated of side frequencies
* F statistic
"""
function ftest(
sweeps::Union{Array{Float64,3},Array{Float32,3}},
freq_of_interest::Real,
fs::Real,
side_freq::Real,
used_filter::Union{DSP.FilterType,Nothing},
spill_bins::Int,
)
spectrum = Neuroimaging._ftest_spectrum(sweeps)
# No compensation is made here for prior filtering. See SSR.jl for an example of how to compensate filtering.
frequencies = range(0, stop = 1, length = Int(size(spectrum, 1))) * float(fs) / 2
ftest(spectrum, frequencies, freq_of_interest, side_freq, spill_bins)
end
function ftest(
spectrum::Array{Complex{T},2},
frequencies::AbstractArray,
freq_of_interest::Real,
side_freq::Real,
spill_bins::Int,
) where {T<:AbstractFloat}
@info(
"Calculating F statistic on $(size(spectrum)[end]) channels at $freq_of_interest Hz +-$(side_freq) Hz"
)
idx = _find_closest_number_idx(frequencies, freq_of_interest)
idx_Low = _find_closest_number_idx(frequencies, freq_of_interest - side_freq)
idx_High = _find_closest_number_idx(frequencies, freq_of_interest + side_freq)
# Determine signal phase
signal_phase = angle.(spectrum[idx, :]) # Biased response phase
# Determine signal power
signal_power = vec(abs.(spectrum[idx, :]) .^ 2) # Biased response power
# Determine noise power
noise_idxs = [
idx_Low-div(spill_bins, 2):idx-spill_bins
idx+spill_bins:idx_High+div(spill_bins, 2)
]
noise_bins = spectrum[noise_idxs, :]
noise_bins = abs.(noise_bins)
noise_power = vec(sum(noise_bins .^ 2, dims = 1) ./ size(noise_bins, 1)) # Recording noise power
# Calculate SNR
snr = (signal_power ./ noise_power) # Biased recording SNR
snrDb = 10 * log10.(snr)
# Calculate statistic
continuous_distribution = FDist(2, 2 * size(noise_bins, 1))
statistic = ccdf.(continuous_distribution, snr)
# Debugging information
@debug(
"Frequencies = [$(freq_of_interest - side_freq), $(freq_of_interest), $(freq_of_interest + side_freq)]"
)
@debug("Indicies = [$(minimum(noise_idxs)), $(idx), $(maximum(noise_idxs))]")
@debug("Noise bins = $(size(noise_bins,1))")
@debug("Signal = $(signal_power)")
@debug("Noise = $(noise_power)")
@debug("SNR = $(snr)")
@debug("SNR dB = $(snrDb)")
@debug("Stat = $(statistic)")
return snrDb, signal_phase, signal_power, noise_power, statistic
end
# Calculates the spectrum for ftest and plotting
function _ftest_spectrum(sweep::Union{Array{Float64,1},Array{Float64,2}}; ref::Int = 0)
# First dimension is samples, second dimension if existing is channels
sweepLen = size(sweep)[1]
# Calculate amplitude sweepe at each frequency along first dimension
fftSweep = 2 / sweepLen * fft(sweep, 1)
spectrum = fftSweep[1:round.(Int, sweepLen / 2 + 1), :]
if ref > 0
refspec = spectrum[:, ref]
for i = 1:size(spectrum)[2]
spectrum[:, i] = spectrum[:, i] - refspec
end
end
return spectrum
end
function _ftest_spectrum(sweeps::Array{Float64,3}; ref = 0)
_ftest_spectrum(dropdims(Statistics.mean(sweeps, dims = 2), dims = 2), ref = ref)
end
#=function _ftest_spectrum(s::Array{Float32}; ref=0); _ftest_spectrum(convert(Array{AbstractFloat}, s), ref=ref); end=#
#=function _ftest_spectrum(s::Array{Float64}; ref=0); _ftest_spectrum(convert(Array{AbstractFloat}, s), ref=ref); end=#
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 663 | #######################################
#
# Global field power
#
#######################################
function gfp(x::Array)
samples, sensors = size(x)
@info("Computing global field power for $sensors sensors and $samples samples")
result = zeros(samples, 1)
for sample = 1:samples
u = vec(x[sample, :]) .- Statistics.mean(x[sample, :])
sumsqdif = 0
for sensor = 1:sensors
for sensor2 = 1:sensors
sumsqdif += (u[sensor] - u[sensor2])^2
end
end
result[sample] = sqrt.(sumsqdif / (2 * length(samples)))
end
return result
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 522 | """
Abstract type for storing Neuroimaing data.
All other neuroimaging types inherit from this type.
All neuroimaing types support the following functions:
* `samplingrate()`
* `channelnames()`
* `remove_channel!()`
* `keep_channel!()`
* `trim_channel()`
* `highpass_filter()`
* `lowpass_filter()`
* `rereference()`
# Examples
```julia
data = # load your neuroimaging data
samplingrate(data) # Returns the sampling rate
channelnames(data) # Returns the channel names
```
"""
abstract type NeuroimagingMeasurement end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1893 | """
Abstract type for coordinates in three dimensions
All sub types have x, y, z coordinates.
And conversion is available between subtypes using the convert function.
```julia
bv_coord = (0.3, 2, 3.1)
tal_coord = convert(Talairach, mni)
```
"""
abstract type Coordinate end
"""
Type for BrainVision coordinate system.
"""
mutable struct BrainVision <: Coordinate
x::typeof(1.0u"m")
y::typeof(1.0u"m")
z::typeof(1.0u"m")
BrainVision(x::AbstractQuantity, y::AbstractQuantity, z::AbstractQuantity) =
new(x, y, z)
BrainVision(x::Number, y::Number, z::Number) = new(x * u"m", y * u"m", z * u"m")
end
"""
Type for Talairach coordinate system.
"""
mutable struct Talairach <: Coordinate
x::typeof(1.0u"m")
y::typeof(1.0u"m")
z::typeof(1.0u"m")
Talairach(x::AbstractQuantity, y::AbstractQuantity, z::AbstractQuantity) = new(x, y, z)
Talairach(x::Number, y::Number, z::Number) = new(x * u"m", y * u"m", z * u"m")
end
"""
Type for SPM coordinate system.
"""
mutable struct SPM <: Coordinate
x::typeof(1.0u"m")
y::typeof(1.0u"m")
z::typeof(1.0u"m")
SPM(x::AbstractQuantity, y::AbstractQuantity, z::AbstractQuantity) = new(x, y, z)
SPM(x::Number, y::Number, z::Number) = new(x * u"m", y * u"m", z * u"m")
end
"""
Type to be used when the coordinate system is unknown.
"""
mutable struct UnknownCoordinate <: Coordinate
x::typeof(1.0u"m")
y::typeof(1.0u"m")
z::typeof(1.0u"m")
UnknownCoordinate(x::AbstractQuantity, y::AbstractQuantity, z::AbstractQuantity) =
new(x, y, z)
UnknownCoordinate(x::Number, y::Number, z::Number) = new(x * u"m", y * u"m", z * u"m")
end
import Base.show
function show(c::S) where {S<:Coordinate}
println(
"Coordinate: $(typeof(c)) - ($(c.x |> u"mm"), $(c.y |> u"mm"), $(c.z |> u"mm"))",
)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 3215 | #######################################
#
# Wrapper convert functions
#
#######################################
import Base.convert
function convert(::Type{Talairach}, l::BrainVision)
x, y, z =
conv_bv2tal(l.x |> u"mm" |> ustrip, l.y |> u"mm" |> ustrip, l.z |> u"mm" |> ustrip)
Talairach(x * u"mm", y * u"mm", z * u"mm")
end
function convert(::Type{Talairach}, l::SPM)
x, y, z = conv_spm_mni2tal(
l.x |> u"mm" |> ustrip,
l.y |> u"mm" |> ustrip,
l.z |> u"mm" |> ustrip,
)
Talairach(x[1] * u"mm", y[1] * u"mm", z[1] * u"mm")
end
#######################################
#
# Convert brain vision to talairach
#
#######################################
function conv_bv2tal(
Xbv::Union{AbstractArray,Number},
Ybv::Union{AbstractArray,Number},
Zbv::Union{AbstractArray,Number};
offset::Number = 128,
)
X = -Zbv .+ offset
Y = -Xbv .+ offset
Z = -Ybv .+ offset
@info("Converted $(length(Xbv)) coordinates to talairach space from BV")
return X, Y, Z
end
#######################################
#
# Convert MNI to talairach
#
#######################################
function conv_spm_mni2tal(
Xspm::Union{AbstractArray,Number},
Yspm::Union{AbstractArray,Number},
Zspm::Union{AbstractArray,Number},
)
# Convert MNI ICMB152 coordinates as used in spm99 to talairach
# http://onlinelibrary.wiley.com/doi/10.1002/hbm.20345/abstract
# Port of http://www.brainmap.org/icbm2tal/icbm_spm2tal.m
inpoints = [Xspm Yspm Zspm]'
inpoints = [inpoints; ones(Float64, (1, size(inpoints)[2]))]
# Transformation matrices, different for each software package
icbm_spm = [
0.9254 0.0024 -0.0118 -1.0207
-0.0048 0.9316 -0.0871 -1.7667
0.0152 0.0883 0.8924 4.0926
0.0000 0.0000 0.0000 1.0000
]
# apply the transformation matrix
inpoints = icbm_spm * inpoints
X = inpoints[1, :]'
Y = inpoints[2, :]'
Z = inpoints[3, :]'
@info("Converted $(length(X)) coordinates to talairach space from SPM MNI")
return X, Y, Z
end
function conv_spm_mni2tal(elec::Electrode)
x, y, z = conv_spm_mni2tal(
elec.coordinate.x |> u"mm" |> ustrip,
elec.coordinate.y |> u"mm" |> ustrip,
elec.coordinate.z |> u"mm" |> ustrip,
)
Electrode(elec.label, Talairach(x[1] * u"mm", y[1] * u"mm", z[1] * u"mm"), elec.info)
end
# Euclidean distance for coordinates and dipoles
function Distances.euclidean(a::Union{Coordinate,Dipole}, b::Union{Coordinate,Dipole})
euclidean(
[float(a.x |> ustrip), float(a.y |> ustrip), float(a.z |> ustrip)],
[float(b.x |> ustrip), float(b.y |> ustrip), float(b.z |> ustrip)],
)
end
function Distances.euclidean(a::Union{Coordinate,Dipole}, b::V) where {V<:AbstractVector}
euclidean([float(a.x |> ustrip), float(a.y |> ustrip), float(a.z |> ustrip)], b)
end
function Distances.euclidean(a::V, b::Union{Coordinate,Dipole}) where {V<:AbstractVector}
euclidean(a, [float(b.x |> ustrip), float(b.y |> ustrip), float(b.z |> ustrip)])
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1156 | """
Dipole type.
#### Parameters
* coord_system: The coordinate system that the locations are stored in
* x,y,z: Location of dipole
* x,y,z/ori: Orientation of dipole
* color: Color of dipole for plotting
* state: State of dipol
* size: size of dipole
"""
mutable struct Dipole
coord_system::AbstractString
x::typeof(1.0u"m")
y::typeof(1.0u"m")
z::typeof(1.0u"m")
xori::Number
yori::Number
zori::Number
color::Number
state::Number
size::Number
end
import Base.show
function Base.show(io::IO, d::Dipole)
@printf(
"Dipole with coordinates x = % 6.2f m, y = % 6.2f m, z = % 6.2f m, size = % 9.5f\n",
ustrip(d.x),
ustrip(d.y),
ustrip(d.z),
ustrip(d.size)
)
end
function Base.show(io::IO, dips::Array{Dipole})
@printf("%d dipoles\n", length(dips))
for d in dips
@printf(
" Dipole with coordinates x = % 6.2f m, y = % 6.2f m, z = % 6.2f m and size = % 9.5f\n",
ustrip(d.x),
ustrip(d.y),
ustrip(d.z),
ustrip(d.size)
)
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 649 | using Statistics
function mean(ds::Array{Dipole})
mean_x = Statistics.mean([d.x for d in ds])
mean_y = Statistics.mean([d.y for d in ds])
mean_z = Statistics.mean([d.z for d in ds])
mean_s = Statistics.mean([d.size for d in ds])
Dipole(ds[1].coord_system, mean_x, mean_y, mean_z, 0, 0, 0, 0, 0, mean_s)
end
function std(ds::Array{Dipole})
std_x = Statistics.std([d.x for d in ds])
std_y = Statistics.std([d.y for d in ds])
std_z = Statistics.std([d.z for d in ds])
std_s = Statistics.std([d.size for d in ds])
Dipole(ds[1].coord_system, std_x, std_y, std_z, 0, 0, 0, 0, 0, std_s)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1160 | function Neuroimaging.plot(
p::Plots.Plot,
dip::Union{Dipole,Coordinate};
c = :green,
m = (8, :rect),
l = "",
kwargs...,
)
Plots.scatter!(
p.subplots[1],
[ustrip(dip.x) * 1000],
[ustrip(dip.y) * 1000],
m = m,
c = c,
lab = l,
legend = false;
kwargs...,
)
Plots.scatter!(
p.subplots[2],
[ustrip(dip.y) * 1000],
[ustrip(dip.z) * 1000],
m = m,
c = c,
lab = l,
legend = false;
kwargs...,
)
Plots.scatter!(
p.subplots[3],
[ustrip(dip.x) * 1000],
[ustrip(dip.z) * 1000],
m = m,
c = c,
lab = l,
legend = true;
kwargs...,
)
return p
end
function Neuroimaging.plot(p::Plots.Plot, dips::Vector{Dipole}; l = "", kwargs...)
for dip = 1:length(dips)
if dip == length(dips)
p = Neuroimaging.plot(p, dips[dip], l = l; kwargs...)
else
p = Neuroimaging.plot(p, dips[dip], l = ""; kwargs...)
end
end
return p
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 14075 | """
Abstract type to represent Electroencephalography (EEG) data.
The following types inherit from the EEG type and can be used to process your data:
- `GeneralEEG`: Used to store data without assumption of any experimental paradigm.
- `SSR`: Used to store data acquired with a steady state response experiment paradigm.
# Examples
```julia
data = # load your EEG data using for example read_EEG()
samplingrate(data) # Returns the sampling rate
channelnames(data) # Returns the channel names
```
"""
abstract type EEG <: NeuroimagingMeasurement end
import Base.show
function Base.show(io::IO, a::EEG)
time_length = round.(size(a.data, 1) / samplingrate(a) / 60)
println(
io,
"EEG measurement of $time_length mins with $(size(a.data,2)) channels sampled at $(a.samplingrate)",
)
end
"""
Type for storing general EEG data without assumption of any experimental paradigm.
# Examples
```julia
s = read_EEG(filename)
s = rereference(s, "Cz")
s = remove_channel!(s, "Cz")
```
"""
mutable struct GeneralEEG <: EEG
data::Array
sensors::Array{Sensor}
triggers::Dict
system_codes::Dict
samplingrate::typeof(1.0u"Hz")
reference_channel::Array{AbstractString,1}
file_path::AbstractString
file_name::AbstractString
processing::Dict
header::Dict
end
GeneralEEG(args...; kwargs...) = read_EEG(args...; kwargs...)
"""
samplingrate(t::Type, s::EEG)
samplingrate(s::EEG)
Return the sampling rate of an EEG type in Hz as the requested type.
If no type is provided, the sampling rate is returned as a floating point number.
# Examples
```julia
s = read_EEG(filename)
samplingrate(s)
```
"""
samplingrate(s::EEG) = samplingrate(AbstractFloat, s)
samplingrate(t, s::EEG) = convert(t, s.samplingrate |> u"Hz" |> ustrip)
"""
channelnames(s::EEG)
Return the names of sensors in EEG measurement.
# Examples
```julia
s = read_EEG(filename)
channelnames(s)
```
"""
channelnames(s::EEG) = labels(s.sensors)
"""
channelnames(s::EEG, i::Int, l::AbstractString)
channelnames(s::EEG, l::AbstractVector{AbstractString})
Change the names of `i`th sensors in an EEG measurement `s` to `l`.
Or change the name of all sensors by pass a vector of strings.
# Examples
```julia
s = read_EEG(filename)
channelnames(s, 1, "Fp1")
```
"""
function channelnames(s::EEG, i::Int, l::S) where {S<:AbstractString}
s.sensors[i].label = l
return s
end
function channelnames(s::EEG, l::AbstractVector{S}) where {S<:AbstractString}
@assert length(l) == length(channelnames(s))
for li = 1:length(l)
s = channelnames(s, li, l[li])
end
return s
end
"""
sensors(s::EEG)
channelnames(s::EEG, l::AbstractVector{AbstractString})
Returns the sensors for an EEG recording.
# Examples
```julia
s = read_EEG(filename)
sensors(s)
```
"""
sensors(s::EEG) = s.sensors
electrodes(s::EEG) = s.sensors
#######################################
#
# EEG type operations
#
#######################################
import Base.hcat
"""
hcat(a::EEG, b::EEG)
Concatenate two EEG measurements together, effectively creating a single long measurement.
# Examples
```julia
hcat(a, b)
```
"""
function hcat(a::EEG, b::EEG)
if channelnames(a) != channelnames(b)
throw(
ArgumentError(
string("Channels do not match $(channelnames(a)) != $(channelnames(b))"),
),
)
end
if haskey(a.processing, "epochs")
@warn("Epochs have already been extracted and will no longer be valid")
end
if haskey(a.processing, "statistics")
@warn("Statistics have already been calculated and will no longer be valid")
end
@debug(
"Appending two EEGs with $(size(a.data, 2)) .& $(size(b.data, 2)) channels and lengths $(size(a.data, 1)) $(size(b.data, 1))"
)
join_triggers(a, b)
a.data = [a.data; b.data]
return a
end
"""
Append the trigger information of one EEG type to another.
Places the trigger information at the end of first file
#### Example
```julia
join_triggers(a, b)
```
"""
function join_triggers(a, b; offset = size(a.data, 1))
a.triggers["Index"] = [a.triggers["Index"]; (b.triggers["Index"] .+ offset)]
a.triggers["Code"] = [a.triggers["Code"]; b.triggers["Code"]]
a.triggers["Duration"] = [a.triggers["Duration"]'; b.triggers["Duration"]']'
a
end
#######################################
#
# Change reference channels
#
#######################################
"""
rereference(a::EEG, refChan::Union{AbstractString, Array{AbstractString}}; kwargs...)
Reference data to specified channel(s).
#### Example
```julia
a = rereference(a, "Cz")
# or
a = rereference(a, ["P9", "P10"])
```
"""
function rereference(
a::EEG,
refChan::Union{S,Array{S}};
kwargs...,
) where {S<:AbstractString}
a.data = rereference(a.data, refChan, channelnames(a))
a.reference_channel = [refChan]
return a
end
#######################################
#
# Manipulate channels
#
#######################################
"""
add_channel(a::EEG, data::Vector, chanLabel::AbstractString)
Add a channel to the EEG type with specified channel names.
# Examples
```julia
s = read_EEG(filename)
new_channel = mean(s.data, 2)
s = add_channel(s, new_channel, "MeanChannelData")
```
"""
function add_channel(a::EEG, data::Vector, chanLabel::AbstractString; kwargs...)
@info("Adding channel $chanLabel")
a.data = hcat(a.data, data)
push!(a.sensors, Electrode(chanLabel, Talairach(NaN, NaN, NaN), Dict()))
return a
end
"""
remove_channel!(a::EEG, channelname::AbstractString)
remove_channel!(a::EEG, channelnames::Array{AbstractString})
remove_channel!(a::EEG, channelidx::Int)
remove_channel!(a::EEG, channelidxs::Array{Int})
Remove channel(s) from EEG as specifed by `channelname` or `channelidx`.
# Examples
```julia
a = read_EEG(filename)
remove_channel!(a, ["TP8", "Cz"])
```
"""
function remove_channel!(a::EEG, channel_name::S; kwargs...) where {S<:AbstractString}
@debug("Removing channel $(channel_name)")
remove_channel!(a, [channel_name])
end
function remove_channel!(
a::EEG,
channel_names::Array{S};
kwargs...,
) where {S<:AbstractString}
@debug("Removing channels $(join(channel_names, " "))")
remove_channel!(
a,
Int[something(findfirst(isequal(c), channelnames(a)), 0) for c in channel_names],
)
end
remove_channel!(a::EEG, channel_names::Int; kwargs...) =
remove_channel!(a, [channel_names]; kwargs...)
function remove_channel!(a::EEG, channel_idx::Array{Int}; kwargs...)
channel_idx = channel_idx[channel_idx.!=0]
@debug("Removing channel(s) $channel_idx")
if any(channel_idx .== 0)
@warn("Failed to remove a channel")
end
keep_idx = [1:size(a.data)[end];]
for c in sort(channel_idx, rev = true)
try
splice!(keep_idx, c)
catch
# Nothing
end
end
if haskey(a.processing, "epochs")
if size(a.processing["epochs"], 3) == size(a.data, 2)
@debug("Removing channel(s) from epoch data")
a.processing["epochs"] = a.processing["epochs"][:, :, keep_idx]
end
end
if haskey(a.processing, "sweeps")
if size(a.processing["sweeps"], 3) == size(a.data, 2)
@debug("Removing channel(s) from sweep data")
a.processing["sweeps"] = a.processing["sweeps"][:, :, keep_idx]
end
end
a.data = a.data[:, keep_idx]
a.sensors = a.sensors[keep_idx]
return a
end
"""
keep_channel!(a::EEG, channelname::AbstractString)
keep_channel!(a::EEG, channelnames::Array{AbstractString})
keep_channel!(a::EEG, channelidxs::Array{Int})
Remove all channels except those requested from EEG.
# Examples
```julia
a = read_EEG(filename)
keep_channel!(a, ["P8", "Cz"])
```
"""
function keep_channel!(a::EEG, channel_name::AbstractString; kwargs...)
keep_channel!(a, [channel_name]; kwargs...)
end
function keep_channel!(a::EEG, channel_names::Array{S}; kwargs...) where {S<:AbstractString}
@info("Keeping channel(s) $(join(channel_names, " "))")
keep_channel!(
a,
vec(
round.(
Int,
[
something(findfirst(isequal(c), channelnames(a)), 0) for
c in channel_names
],
),
),
)
end
function keep_channel!(a::EEG, channel_idx::AbstractVector{Int}; kwargs...)
remove_channels = [1:size(a.data, 2);]
channel_idx = sort(channel_idx, rev = true)
for c in channel_idx
splice!(remove_channels, c)
end
remove_channel!(a, remove_channels; kwargs...)
end
#######################################
#
# Trim channels
#
#######################################
"""
trim_channel(a::EEG, stop::Int; start::Int=1)
Trim EEG recording by removing data after `stop` specifed samples
and optionally before `start` samples.
# Examples
```julia
s = trim_channel(s, 8192*300, start=8192)
```
"""
function trim_channel(a::EEG, stop::Int; start::Int = 1, kwargs...)
@info("Trimming $(size(a.data)[end]) channels between $start and $stop")
a.data = a.data[start:stop, :]
to_keep = findall((a.triggers["Index"] .>= start) .& (a.triggers["Index"] .<= stop))
a.triggers["Index"] = a.triggers["Index"][to_keep]
a.triggers["Duration"] = a.triggers["Duration"][to_keep]
a.triggers["Code"] = a.triggers["Code"][to_keep]
a.triggers["Index"] .-= (start - 1)
to_keep =
findall((a.system_codes["Index"] .>= start) .& (a.system_codes["Index"] .<= stop))
a.system_codes["Index"] = a.system_codes["Index"][to_keep]
a.system_codes["Duration"] = a.system_codes["Duration"][to_keep]
a.system_codes["Code"] = a.system_codes["Code"][to_keep]
a.system_codes["Index"] .-= (start - 1)
return a
end
#######################################
#
# Merge channels
#
#######################################
"""
merge_channels(a::EEG, merge_Chans::Array{S}, new_name::S) where {S<:AbstractString}
merge_channels(a::EEG, merge_Chans::S, new_name::S) where {S<:AbstractString}
Average `EEG` channels listed in `merge_Chans` and label the averaged channel as `new_name`.
# Examples
```julia
s = merge_channels(s, ["P6", "P8"], "P68")
```
"""
function merge_channels(
a::EEG,
merge_Chans::Array{S},
new_name::S;
kwargs...,
) where {S<:AbstractString}
@debug("Number of original channels: $(length(channelnames(a)))")
keep_idxs =
vec([something(findfirst(isequal(i), channelnames(a)), 0) for i in merge_Chans])
if sum(keep_idxs .== 0) > 0
@warn(
"Could not merge as these channels don't exist: $(join(vec(merge_Chans[keep_idxs .== 0]), " "))"
)
keep_idxs = keep_idxs[keep_idxs.>0]
end
@info("Merging channels $(join(vec(channelnames(a)[keep_idxs,:]), " "))")
@debug("Merging channels $keep_idxs")
a = add_channel(
a,
vec(Statistics.mean(a.data[:, keep_idxs], dims = 2)),
new_name;
kwargs...,
)
end
function merge_channels(
a::EEG,
merge_Chans::S,
new_name::S;
kwargs...,
) where {S<:AbstractString}
a = merge_channels(a, [merge_Chans], new_name; kwargs...)
end
"""
read_EEG(fname::AbstractString)
read_EEG(args...)
Read a file or IO stream and store the data in an `GeneralEEG` type.
# Arguments
- `fname`: Name of the file to be read
- `min_epoch_length`: Minimum epoch length in samples. Shorter epochs will be removed (0)
- `max_epoch_length`: Maximum epoch length in samples. Longer epochs will be removed (0 = all)
- `valid_triggers`: Triggers that are considered valid, others are removed ([1,2])
- `stimulation_amplitude`: Amplitude of stimulation (NaN)
- `remove_first`: Number of epochs to be removed from start of recording (0)
- `max_epochs`: Maximum number of epochs to retain (0 = all)
# Supported file formats
- BIOSEMI (.bdf)
"""
function read_EEG(
fname::AbstractString;
valid_triggers::Array{Int} = [1, 2],
min_epoch_length::Int = 0,
max_epoch_length::Int = 0,
remove_first::Int = 0,
max_epochs::Int = 0,
kwargs...,
)
@info("Importing EEG from file: $fname")
file_path, file_name, ext = fileparts(fname)
#
# Read file data
#
# Import raw data
if ext == "bdf"
data, triggers, system_codes, samplingrate, reference_channel, header =
import_biosemi(fname; kwargs...)
else
warn("File type $ext is unknown")
end
# Create electrodes
elecs = Electrode[]
for e in header["chanLabels"]
push!(elecs, Electrode(e, Talairach(NaN * u"m", NaN * u"m", NaN * u"m"), Dict()))
end
# Create EEG type
if unit(samplingrate) == unit(1.0u"Hz")
#nothing
else
samplingrate = samplingrate * 1.0u"Hz"
end
a = GeneralEEG(
data,
elecs,
triggers,
system_codes,
samplingrate,
[reference_channel],
file_path,
file_name,
Dict(),
header,
)
#
# Clean up
#
# Remove status channel information
remove_channel!(a, "Status")
# Clean epoch index
a.triggers = clean_triggers(
a.triggers,
valid_triggers,
min_epoch_length,
max_epoch_length,
remove_first,
max_epochs,
)
return a
end
function trigger_channel(a::EEG; kwargs...)
create_channel(a.triggers, a.data, samplingrate(a))
end
function system_code_channel(a::EEG; kwargs...)
create_channel(a.system_codes, a.data, samplingrate(a))
end
"""
epoch_rejection(a::EEG; retain_percentage::Number = 0.95, kwargs...)
Reject epochs such that `retain_percentage` is retained.
"""
function epoch_rejection(a::EEG; retain_percentage::Number = 0.95, kwargs...)
a.processing["epochs"] =
epoch_rejection(a.processing["epochs"], retain_percentage; kwargs...)
return a
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1801 | """
plot_timeseries(s::EEG; channels, fs, kwargs)
Plot an EEG recording.
Plot detailed single channel or general multichanel figure depending on how many channels are requested.
#### Input
* `s`: EEG type
* `channels`: The channels you want to plot, all if not specified
* `fs`: Sample rate
* Other optional arguements are passed to the Plots.jl functions
#### Output
Returns a figure
#### Example
```julia
plot1 = plot_timeseries(s, channels=["P6", "Cz"], plot_points=8192*4)
draw(PDF("timeseries.pdf", 10inch, 6inch), plot1)
```
"""
function plot_timeseries(
s::EEG;
channels::Union{S,Array{S}} = channelnames(s),
fs::Number = samplingrate(s),
kwargs...,
) where {S<:AbstractString}
if isa(channels, AbstractString) || length(channels) == 1 || size(s.data, 2) == 1
@debug(
"Plotting single channel waveform for channel $channels from channels $(channelnames(s))"
)
fig = plot_single_channel_timeseries(
vec(keep_channel!(deepcopy(s), channels).data),
samplingrate(s);
kwargs...,
)
else
# Find index of requested channels
idx = [something(findfirst(isequal(n), channelnames(s)), 0) for n in channels]
idx = idx[idx.!=0] # But if you cant find channels then plot what you can
if length(idx) != length(channels)
@warn("Cant find index of all requested channels")
end
@debug("Plotting multi channel waveform for channels $(channelnames(s)[idx])")
fig = plot_multi_channel_timeseries(
s.data[:, idx],
samplingrate(s),
channelnames(s)[idx];
kwargs...,
)
end
return fig
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 926 | mutable struct Leadfield{T<:AbstractFloat,S<:AbstractString}
L::Array{T,3}
x::Vector{T}
y::Vector{T}
z::Vector{T}
sensors::Vector{S}
end
function Base.show(io::IO, l::Leadfield)
@printf "Leadfield\n"
@printf " Number of sources: %d\n" size(l.L, 1)
@printf " Number of dimensions + nulls: %d\n" size(l.L, 2)
@printf " Number of sensors: %d\n" size(l.L, 3)
end
function match_leadfield(l::Leadfield, s::SSR)
@info("Matching leadfield to SSR")
idx = [something(findfirst(isequal(name), l.sensors), 0) for name in channelnames(s)]
if length(unique(idx)) < length(idx)
error(
"Not all SSR channels mapped to sensor #SSR=$(length(channelnames(s))), #L=$(length(l.sensors))",
)
end
l.L = l.L[:, :, idx]
l.sensors = l.sensors[idx]
@debug("matched $(length(idx)) sensors")
return l
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 650 | """
Find index of location of coordinate or dipole in leadfield
"""
find_location(l, d::Union{Dipole,Coordinate}) =
find_location(l::Leadfield, d.x |> ustrip, d.y |> ustrip, d.z |> ustrip)
function find_location(l::Leadfield, x::Number, y::Number, z::Number)
valid_x = l.x .== x
valid_y = l.y .== y
valid_z = l.z .== z
idx = findall(valid_x .& valid_y .& valid_z)
if isempty(idx)
dists = [euclidean([l.x[i], l.y[i], l.z[i]], [x, y, z]) for i = 1:length(l.x)]
idx = something(findfirst(isequal(minimum(dists)), dists), 0)
else
idx = idx[1]
end
return idx
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1760 | """
plot_spectrum(eeg::SSR, chan::Int; kwargs...)
plot_spectrum(eeg::SSR, chan::AbstractString; kwargs...)
Plot the spectrum of a steady state response measurement.
"""
function plot_spectrum(eeg::SSR, chan::Int; targetFreq::Number = modulationrate(eeg))
channel_name = channelnames(eeg)[1]
# Check through the processing to see if we have done a statistical test at target frequency
signal = nothing
result_idx = find_keys_containing(eeg.processing, "statistics")
for r = 1:length(result_idx)
result = get(eeg.processing, collect(keys(eeg.processing))[result_idx[r]], 0)
if result[!, :AnalysisFrequency][1] == targetFreq
result_snr = result[!, :SNRdB][chan]
signal = result[!, :SignalAmplitude][chan]^2
noise = result[!, :NoiseAmplitude][chan]^2
title = "Channel $(channel_name). SNR = $(round(result_snr, sigdigits=4)) dB"
end
end
if signal == nothing
title = "Channel $(channel_name)"
noise = 0
signal = 0
end
title = replace(title, "_" => " ")
avg_sweep = dropdims(Statistics.mean(eeg.processing["sweeps"], dims = 2), dims = 2)
avg_sweep = avg_sweep[:, chan]
avg_sweep = convert(Array{Float64}, vec(avg_sweep))
p = plot_spectrum(
avg_sweep,
eeg.header["sampRate"][1];
titletext = title,
targetFreq = targetFreq,
noise_level = noise,
signal_level = signal,
)
return p
end
function plot_spectrum(
eeg::SSR,
chan::AbstractString;
targetFreq::Number = modulationrate(eeg),
)
return plot_spectrum(
eeg,
something(findfirst(isequal(chan), channelnames(eeg)), 0),
targetFreq = targetFreq,
)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4528 | #######################################
#
# Filtering
#
#######################################
"""
highpass_filter(a::SSR; cutOff::Real=2, fs::Real=samplingrate(a), order::Int=3, tolerance::Real=0.01, kwargs...)
Applly a high pass filter.
A zero phase high pass filter is applied to the data using `filtfilt`.
A check is performed to ensure the filter does not affect the modulation rate.
The filter coefficents are stored in the processing field.
# Examples
```julia
a = read_SSR(fname)
b = highpass_filter(a)
c = highpass_filter(a, cutOff = 1)
```
"""
function highpass_filter(
a::SSR;
cutOff::Real = 2,
fs::Real = samplingrate(a),
order::Int = 3,
tolerance::Real = 0.01,
kwargs...,
)
a.data, f = highpass_filter(a.data, cutOff, fs, order)
_filter_check(f, modulationrate(a), fs, tolerance)
_append_filter(a, f)
end
"""
lowpass_filter(a::SSR; cutOff::Real=150, fs::Real=samplingrate(a), order::Int=3, tolerance::Real=0.01, kwargs...)
Applly a low pass filter.
A zero phase high pass filter is applied to the data using `filtfilt`.
A check is performed to ensure the filter does not affect the modulation rate.
The filter coefficents are stored in the processing field.
# Examples
```julia
a = read_SSR(fname)
b = lowpass_filter(a)
c = lowpass_filter(a, cutOff = 1)
```
"""
function lowpass_filter(
a::SSR;
cutOff::Real = 150,
fs::Real = samplingrate(a),
order::Int = 3,
tolerance::Real = 0.01,
kwargs...,
)
a.data, f = lowpass_filter(a.data, cutOff, fs, order)
_filter_check(f, modulationrate(a), fs, tolerance)
#= _append_filter(a, f) =#
return a
end
"""
bandpass_filter(a::SSR; lower::Number=modulationrate(a) - 1, upper::Number=modulationrate(a) + 1, fs::Real=samplingrate(a), n::Int=24, rp::Number = 0.0001, tolerance::Real=0.01, kwargs...)
Applly a band pass filter.
A check is performed to ensure the filter does not affect the modulation rate.
The filter coefficents are stored in the processing field.
# Examples
```julia
a = read_SSR(fname)
a = bandpass_filter(a)
```
"""
function bandpass_filter(
a::SSR;
lower::Number = modulationrate(a) - 1,
upper::Number = modulationrate(a) + 1,
n::Int = 24,
rp::Number = 0.0001,
tolerance::Number = 0.01,
kwargs...,
)
# Type 1 Chebychev filter
# The default options here are optimised for modulation frequencies 4, 10, 20, 40, 80
# TODO filter check does not work here. Why not?
# TODO automatic minimum filter order selection
a.data, f = bandpass_filter(a.data, lower, upper, samplingrate(a), n, rp)
_filter_check(f, modulationrate(a), samplingrate(a), tolerance)
_append_filter(a, f)
end
function _filter_check(
f::FilterCoefficients,
mod_freq::Number,
fs::Number,
tolerance::Number,
)
#
# Ensure that the filter does not alter the modulation frequency greater than a set tolerance
#
mod_change = abs.(freqresp(f, mod_freq * ((2pi) / fs)))
if mod_change > 1 + tolerance || mod_change < 1 - tolerance
@warn(
"Filtering has modified modulation frequency greater than set tolerance: $mod_change"
)
end
@debug("Filter magnitude at modulation frequency: $(mod_change)")
end
function _append_filter(a::SSR, f::FilterCoefficients; name::AbstractString = "filter")
#
# Put the filter information in the SSR processing structure
#
key_name = new_processing_key(a.processing, name)
merge!(a.processing, Dict(key_name => f))
return a
end
#######################################
#
# Downsample
#
#######################################
"""
downsample(s::SSR, ratio::Rational)
Downsample signal by specified ratio.
"""
function downsample(s::SSR, ratio::Rational)
@info("Downsampling SSR by ratio $ratio")
dec_filter = DSP.FIRFilter([1], ratio)
new_data =
zeros(typeof(s.data[1, 1]), round.(Int, size(s.data, 1) * ratio), size(s.data, 2))
for c = 1:size(s.data, 2)
new_data[:, c] = DSP.filt(dec_filter, vec(s.data[:, c]))
end
s.data = new_data
s.triggers["Index"] = round.(Int, s.triggers["Index"] .* ratio)
if s.triggers["Index"][1] == 0
s.triggers["Index"][1] = 1
end
s.samplingrate = float(samplingrate(s) * ratio) * 1.0u"Hz"
return s
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 5904 | #######################################
#
# Read SSR
#
#######################################
"""
## Read SSR from file or IO stream
Read a file or IO stream and store the data in an `SSR` type.
Matching .mat files are read and modulation frequency information extracted.
Failing that, user passed arguments are used or the modulation frequency is extracted from the file name.
#### Arguments
* `fname`: Name of the file to be read
* `min_epoch_length`: Minimum epoch length in samples. Shorter epochs will be removed (0)
* `max_epoch_length`: Maximum epoch length in samples. Longer epochs will be removed (0 = all)
* `valid_triggers`: Triggers that are considered valid, others are removed ([1,2])
* `stimulation_amplitude`: Amplitude of stimulation (NaN)
* `modulationrate`: Modulation frequency of SSR stimulation (NaN)
* `carrier_frequency`: Carrier frequency (NaN)
* `participant_name`: Name of participant ("")
* `remove_first`: Number of epochs to be removed from start of recording (0)
* `max_epochs`: Maximum number of epochs to retain (0 = all)
* `env` (nothing)
* `bkt` ("")
#### Supported file formats
* BIOSEMI (.bdf)
"""
function read_SSR(
fname::String;
stimulation_amplitude::Number = NaN, # User can set these
modulationrate::Number = NaN, # values, but if not
carrier_frequency::Number = NaN, # then attempt to read
stimulation_side::AbstractString = "", # from file name or mat
participant_name::AbstractString = "",
valid_triggers::Array{Int} = [1, 2],
min_epoch_length::Int = 0,
max_epoch_length::Int = 0,
remove_first::Int = 0,
max_epochs::Int = 0,
env = nothing,
bkt = "",
kwargs...,
)
@info("Importing SSR from file: $fname")
file_path, file_name, ext = fileparts(fname)
if env != nothing
@debug("File type is S3")
fname = S3.get_object(env, bkt, fname).obj
end
#
# Extract meta data
#
# Extract frequency from the file name if not set manually
if occursin("Hz", file_name) && isnan(modulationrate)
a = match(r"[-_](\d+[_.]?[\d+]?)Hz|Hz(\d+[_.]?[\d+]?)[-_]", file_name).captures
modulationrate = parse(Float64, a[[i !== nothing for i in a]][1]) * 1.0u"Hz"
@debug("Extracted modulation frequency from file name: $modulationrate")
end
# Or even better if there is a mat file read it
mat_path = string(file_path, file_name, ".mat")
if !isfile(mat_path)
mat_path = string(file_path, file_name, "-properties.mat")
end
if isfile(mat_path)
modulationrate,
stimulation_side,
participant_name,
stimulation_amplitude,
carrier_frequency = read_rba_mat(mat_path)
valid_mat = true
else
valid_mat = false
end
#
# Read file data
#
# Import raw data
if ext == "bdf"
data, triggers, system_codes, samplingrate, reference_channel, header =
import_biosemi(fname; kwargs...)
else
warn("File type $ext is unknown")
end
# Create electrodes
elecs = Electrode[]
for e in header["chanLabels"]
push!(elecs, Electrode(e, Talairach(NaN * u"m", NaN * u"m", NaN * u"m"), Dict()))
end
# Create SSR type
if unit(modulationrate) == unit(1.0u"Hz")
#nothing
else
modulationrate = modulationrate * 1.0u"Hz"
end
if unit(samplingrate) == unit(1.0u"Hz")
#nothing
else
samplingrate = samplingrate * 1.0u"Hz"
end
a = SSR(
data,
elecs,
triggers,
system_codes,
samplingrate,
modulationrate,
[reference_channel],
file_path,
file_name,
Dict(),
header,
)
# If a valid mat file was found then store that information with the raw header
if valid_mat
a.header["rbadata"] = matread(mat_path)
end
#
# Store meta data in processing dictionary
#
if stimulation_side != ""
a.processing["Side"] = stimulation_side
end
if participant_name != ""
a.processing["Name"] = participant_name
end
if !isnan(stimulation_amplitude)
a.processing["Amplitude"] = stimulation_amplitude
end
if !isnan(carrier_frequency)
a.processing["Carrier_Frequency"] = carrier_frequency
end
#
# Clean up
#
# Remove status channel information
remove_channel!(a, "Status")
# Clean epoch index
a.triggers = clean_triggers(
a.triggers,
valid_triggers,
min_epoch_length,
max_epoch_length,
remove_first,
max_epochs,
)
return a
end
#######################################
#
# Read event files
#
#######################################
function read_evt(a::SSR, fname::AbstractString; kwargs...)
d = read_evt(fname, a.samplingrate; kwargs...)
validate_triggers(d)
a.triggers = d
return a
end
#######################################
#
# File IO
#
#######################################
function write_SSR(
a::SSR,
fname::S;
chanLabels = channelnames(a),
subjID = a.header["subjID"],
startDate = a.header["startDate"],
startTime = a.header["startTime"],
kwargs...,
) where {S<:AbstractString}
fname = convert(String, fname)
@info("Saving $(size(a.data)[end]) channels to $fname")
writeBDF(
fname,
a.data',
vec(trigger_channel(a)),
vec(system_code_channel(a)),
samplingrate(Int, a),
chanLabels = convert(Array{S,1}, chanLabels),
startDate = startDate,
startTime = startTime,
subjID = subjID,
)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4304 | """
function extract_epochs(a::SSR; valid_triggers::Union{AbstractArray,Int} = [1, 2], remove_first::Int = 0, remove_last::Int = 0, kwargs...)
Extract epoch data from SSR
# Arguments
* `a`: A SSR object
* `valid_triggers`: Trigger numbers that are considered valid ([1,2])
* `remove_first`: Remove the first n triggers (0)
* `remove_last`: Remove the last n triggers (0)
# Examples
```julia
epochs = extract_epochs(SSR, valid_triggers=[1,2])
```
"""
function extract_epochs(
a::SSR;
valid_triggers::Union{AbstractArray,Int} = [1, 2],
remove_first::Int = 0,
remove_last::Int = 0,
kwargs...,
)
merge!(
a.processing,
Dict(
"epochs" => extract_epochs(
a.data,
a.triggers,
valid_triggers,
remove_first,
remove_last,
),
),
)
return a
end
function create_sweeps(a::SSR; epochsPerSweep::Int = 64, kwargs...)
if epochsPerSweep > size(a.processing["epochs"], 2)
error("Sweep length is longer than number of epochs will allow")
end
merge!(
a.processing,
Dict("sweeps" => create_sweeps(a.processing["epochs"], epochsPerSweep)),
)
return a
end
#######################################
#
# Add triggers for more epochs
#
#######################################
function add_triggers(a::SSR; kwargs...)
@debug("Adding triggers to reduce SSR. Using SSR modulation frequency")
add_triggers(a, modulationrate(a); kwargs...)
end
function add_triggers(a::SSR, mod_freq::Number; kwargs...)
@debug("Adding triggers to reduce SSR. Using $(mod_freq)Hz")
epochIndex =
DataFrame(Code = a.triggers["Code"], Index = [Int(i) for i in a.triggers["Index"]])
epochIndex[!, :Code] = epochIndex[!, :Code] .- 252
add_triggers(a, mod_freq, epochIndex; kwargs...)
end
function add_triggers(
a::SSR,
mod_freq::Number,
epochIndex;
cycle_per_epoch::Int = 1,
kwargs...,
)
@info(
"Adding triggers to reduce SSR. Reducing $(mod_freq)Hz to $cycle_per_epoch cycle(s)."
)
# Existing epochs
existing_epoch_length = median(diff(epochIndex[!, :Index])) # samples
existing_epoch_length_s = existing_epoch_length / samplingrate(a)
@debug("Existing epoch length: $(existing_epoch_length_s)s")
# New epochs
new_epoch_length_s = cycle_per_epoch / mod_freq
new_epochs_num = round.(existing_epoch_length_s / new_epoch_length_s) - 2
new_epoch_times = collect(1:new_epochs_num) * new_epoch_length_s
new_epoch_indx = [0; round.(new_epoch_times * samplingrate(a))]
@debug("New epoch length = $new_epoch_length_s")
@debug("New # epochs = $new_epochs_num")
# Place new epoch indices
@debug("Was $(length(epochIndex[!, :Index])) indices")
new_indx = epochIndex[!, :Index][1:end-1] .+ new_epoch_indx'
new_indx = reshape(new_indx', length(new_indx), 1)[1:end-1]
@debug("Now $(length(new_indx)) indices")
# Place in dict
new_code = round.(Int, ones(1, length(new_indx))) .+ 252
a.triggers = Dict(
"Index" => vec((new_indx)'),
"Code" => vec(new_code),
"Duration" => ones(length(new_code), 1)',
)
#TODO Possible the trigger duration of one is not long enough
return a
end
#######################################
#
# Rejection channels
#
#######################################
function channel_rejection(
a::SSR;
threshold_abs::Number = 1000,
threshold_var::Number = 2,
kwargs...,
)
# Run on epochs if available, else run on the raw data
if haskey(a.processing, "epochs")
data = reshape(
a.processing["epochs"],
size(a.processing["epochs"], 1) * size(a.processing["epochs"], 2),
size(a.processing["epochs"], 3),
)
else
data = a.data
end
valid = vec(channel_rejection(data, threshold_abs, threshold_var))
@info(
"Rejected $(sum(.!valid)) channels $(join(channelnames(a)[findall(.!valid)], " "))"
)
remove_channel!(a, channelnames(a)[.!valid])
return a
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 3050 | """
Type for storing data acquired with a steady state response (SSR) experimental paradigm.
In addition to the functions available for all EEG types,
the SSR type supports:
* `modulationrate()`
The following standard names are used when saving data to the processing dictionary.
* `Name`: The identifier for the participant
* `Side`: Side of stimulation
* `Carrier_Frequency`: Carrier frequency of the stimulus
* `Amplitude`: Amplitude of the stimulus
* `epochs`: The epochs extracted from the recording
* `sweeps`: The extracted sweeps from the recording
#### Example
Put an example here
```julia
s = read_SSR("filename")
s.modulationrate = 40.0391u"Hz"
s = rereference(s, "Cz")
```
"""
mutable struct SSR <: EEG
data::Array
sensors::Array{Sensor}
triggers::Dict
system_codes::Dict
samplingrate::typeof(1.0u"Hz")
modulationrate::typeof(1.0u"Hz")
reference_channel::Array{AbstractString,1}
file_path::AbstractString
file_name::AbstractString
processing::Dict
header::Dict
end
#######################################
#
# SSR info
#
#######################################
"""
Return the modulation rate of a steady state type.
If no type is provided, the modulation rate is returned as a floating point.
#### Example
Return the modulation rate of a recording
```julia
s = read_SSR(filename)
modulationrate(s)
```
"""
modulationrate(t, s::SSR) = convert(t, ustrip(s.modulationrate))
modulationrate(s::SSR) = modulationrate(AbstractFloat, s)
#######################################
#
# Show
#
#######################################
import Base.show
function Base.show(io::IO, a::SSR)
time_length = round.(size(a.data, 1) / samplingrate(a) / 60)
println(
io,
"SSR measurement of $time_length mins with $(size(a.data,2)) channels sampled at $(a.samplingrate)",
)
println(io, " Modulation frequency: $(a.modulationrate )")
if haskey(a.processing, "Amplitude")
println(io, " Stimulation amplitude: $(a.processing["Amplitude"]) dB")
end
if haskey(a.processing, "Name")
println(io, " Participant name: $(a.processing["Name"] )")
end
if haskey(a.processing, "Side")
println(io, " Stimulation side: $(a.processing["Side"] )")
end
if haskey(a.processing, "Carrier_Frequency")
println(io, " Carrier frequency: $(a.processing["Carrier_Frequency"] ) Hz")
end
end
#######################################
#
# Helper functions
#
#######################################
function assr_frequency(
rounded_freq::Number;
stimulation_samplingrate::Number = 32000,
stimulation_frames_per_epoch::Number = 32768,
)
round.(rounded_freq / (stimulation_samplingrate / stimulation_frames_per_epoch)) *
stimulation_samplingrate / stimulation_frames_per_epoch
end
function assr_frequency(rounded_freq::AbstractVector)
[assr_frequency(f) for f in rounded_freq]
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 3661 | #######################################
#
# FTest
#
#######################################
"""
ftest(s::SSR)
ftest(s::SSR; kwargs...)
Run f-test statistics on steady state response measurement.
# Arguments
* `freq_of_interest`: Frequency to analyse for presence of a response
* `side_freq`: Adjacent frequencies above and below the frequency of interest used to quantify the noise
* `ID`: Participant ID for storage in resulting dataframe
* `spill_bins`: Number of bins each side of the FFT bin of interes to ignore in noise computation
* `results_key`: Dictionary key name to store results in `s.processing`
# Examples
```julia
s = read_SSR(fname)
s.modulationrate = 33.2u"Hz"
s = ftest(s)
println(s.processing['statistics'])
```
# Reference
Hofmann, M., Wouters, J. Improved Electrically Evoked Auditory Steady-State Response Thresholds in Humans. JARO 13, 573–589 (2012). https://doi.org/10.1007/s10162-012-0321-8
Luke, Robert, and Jan Wouters. "Kalman filter based estimation of auditory steady state response parameters." IEEE Transactions on Neural Systems and Rehabilitation Engineering 25.3 (2016): 196-204.
"""
function ftest(
s::SSR;
freq_of_interest::Union{Real,AbstractArray} = modulationrate(s),
side_freq::Number = 0.5,
ID::AbstractString = "",
spill_bins::Int = 2,
results_key::AbstractString = "statistics",
kwargs...,
)
# Do calculation here once, instead of in each low level call
spectrum = Neuroimaging._ftest_spectrum(s.processing["sweeps"])
spectrum = compensate_for_filter(s.processing, spectrum, samplingrate(s))
frequencies = range(0, stop = 1, length = Int(size(spectrum, 1))) * samplingrate(s) / 2
for freq in freq_of_interest
snrDb, phase, signal, noise, statistic =
ftest(spectrum, frequencies, freq, side_freq, spill_bins)
result = DataFrame(
ID = vec(repeat([ID], length(channelnames(s)), 1)),
Channel = copy(channelnames(s)),
ModulationRate = copy(modulationrate(s)),
AnalysisType = vec(repeat(["F-test"], length(channelnames(s)))),
AnalysisFrequency = vec(repeat([freq], length(channelnames(s)))),
SignalAmplitude = vec(sqrt.(signal)),
SignalPhase = vec(phase),
NoiseAmplitude = vec(sqrt.(noise)),
SNRdB = vec(snrDb),
Statistic = vec(statistic),
)
result = add_dataframe_static_rows(result, kwargs)
if haskey(s.processing, results_key)
s.processing[results_key] = vcat(s.processing[results_key], result)
else
s.processing[results_key] = result
end
end
return s
end
#######################################
#
# Helper functions
#
#######################################
"""
Save the processing step information from SSR type.
By default saves information in the key `statistics`,
but this can be modified by the user.
"""
# Save ftest results to file
function save_results(
a::SSR;
name_extension::AbstractString = "",
results_key::AbstractString = "statistics",
kwargs...,
)
file_name = string(a.file_name, name_extension, ".csv")
# Rename to save space
results = a.processing
# Index of keys to be exported
result_idx = find_keys_containing(results, results_key)
if length(result_idx) > 0
to_save = get(results, collect(keys(results))[result_idx[1]], 0)
CSV.write(file_name, to_save)
end
@info("File saved to $file_name")
return a
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1589 | """
match_sensors(sens::Array{S}, lbls::Array{AS}) where {AS<:AbstractString, S<:Sensor}
Match a set of electrodes to those provided
# Examples
```julia
lf, valid = match_sensors(electrodes, sensor_labels)
```
"""
function match_sensors(sens::Array{S}, lbls::Array{AS}) where {AS<:AbstractString,S<:Sensor}
valid_idx = Int[]
for label in lbls
matched_idx = something(findfirst(isequal(label), labels(sens)), 0)
if matched_idx != 0
push!(valid_idx, matched_idx)
end
@debug(
"Label $label matched to $( matched_idx == 0 ? "!! nothing !!" : sens[matched_idx].label)"
)
end
sens = sens[valid_idx]
return sens, valid_idx
end
function match_sensors(
lf::Array,
lf_labels::Array{S},
labels::Array{S},
) where {S<:AbstractString}
# Match the sensors in a leadfield array to those provided
#
# usage: lf, valid = match_sensors(leadfield, leadfield_labels, sensor_labels)
valid_idx = Int[]
for label in labels
matched_idx = something(findfirst(isequal(label), lf_labels), 0)
if matched_idx != 0
push!(valid_idx, matched_idx)
end
@debug(
"Label $label matched to $( matched_idx == 0 ? "!! nothing !!" : lf_labels[matched_idx])"
)
end
@info(
"Leadfield had $(length(lf_labels)) channels, now has $(length(valid_idx)) channels"
)
lf = lf[:, :, valid_idx]
lf_labels = lf_labels[valid_idx]
return lf, valid_idx
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 2153 | """
Abstract type for storing neuroimaging sensors.
Other types inherit from the Sensor type.
And common functions can be run on all sensors sub types.
All sensors have a label and coordinate.
Some sensors also store additional information.
For example, fNIRS sensors may hold wavelength information.
All Sensor types support the following functions:
* `label()`
* `labels()`
* `x()`
* `y()`
* `z()`
```julia
my_sensor = # Create a electrode, optode etc
label(my_sensor) # Returns the sensor name
x(my_sensor) # Returns the x coordinate of the sensor
```
"""
abstract type Sensor end
"""
Electrode sensor type used in EEG measurements.
"""
mutable struct Electrode <: Sensor
label::AbstractString
coordinate::Coordinate
info::Dict
end
"""
Optode abstract sensor type used in fNIRS measrurements.
"""
abstract type Optode <: Sensor end
"""
Source optode sensor type used in fNIRS measurements.
"""
mutable struct Source <: Optode
label::AbstractString
coordinate::Coordinate
info::Dict
end
"""
Detector optode sensor type used in fNIRS measurements.
"""
mutable struct Detector <: Optode
label::AbstractString
coordinate::Coordinate
info::Dict
end
import Base.show
function show(s::S) where {S<:Sensor}
println(
"Sensor: $(s.label) $(typeof(s)) - ($(s.coordinate.x), $(s.coordinate.y), $(s.coordinate.z)) ($(typeof(s.coordinate)))",
)
end
function show(s::Array{S}) where {S<:Sensor}
println("$(length(s)) sensors: $(typeof(s[1])) ($(typeof(s[1].coordinate)))")
end
label(s::S) where {S<:Sensor} = s.label
label(s::Array{S,1}) where {S<:Sensor} = [si.label for si in s]
labels(s::S) where {S<:Sensor} = label(s)
labels(s::Array{S}) where {S<:Sensor} = label(s)
x(s::S) where {S<:Sensor} = s.coordinate.x
y(s::S) where {S<:Sensor} = s.coordinate.y
z(s::S) where {S<:Sensor} = s.coordinate.z
x(s::Array{S}) where {S<:Sensor} = [si.coordinate.x for si in s]
y(s::Array{S}) where {S<:Sensor} = [si.coordinate.y for si in s]
z(s::Array{S}) where {S<:Sensor} = [si.coordinate.z for si in s]
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1294 | #######################################
#
# Standard electrode sets
#
#######################################
EEG_64_10_20 = [
"Fp1",
"F3",
"FC5",
"C3",
"CP5",
"P3",
"PO7",
"Oz",
"Fpz",
"AFz",
"F6",
"FC4",
"C2",
"TP8",
"P2",
"P10",
"AF7",
"F5",
"FC3",
"C5",
"CP3",
"P5",
"PO3",
"POz",
"Fp2",
"Fz",
"F8",
"FC2",
"C4",
"CP6",
"P4",
"PO8",
"AF3",
"F7",
"FC1",
"T7",
"CP1",
"P7",
"O1",
"Pz",
"AF8",
"F2",
"FT8",
"FCz",
"C6",
"CP4",
"P6",
"PO4",
"F1",
"FT7",
"C1",
"TP7",
"P1",
"P9",
"Iz",
"CPz",
"AF4",
"F4",
"FC6",
"Cz",
"T8",
"CP2",
"P8",
"O2",
]
EEG_Vanvooren_2014 = [
"TP7",
"P9",
"P7",
"P5",
"P3",
"P1",
"PO7",
"PO3",
"O1",
"P2",
"P4",
"P6",
"P10",
"TP8",
"PO4",
"PO8",
"O2",
"TP8",
]
EEG_Vanvooren_2014_Left = ["TP7", "P9", "P7", "P5", "P3", "P1", "PO7", "PO3", "O1"]
EEG_Vanvooren_2014_Right = ["P2", "P4", "P6", "P10", "TP8", "PO4", "PO8", "O2", "TP8"]
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 2256 | function find_dipoles(vi::VolumeImage; kwargs...)
@info("Finding dipoles for volume image")
if size(vi.data, 4) > 1
@warn("Can not squeeze 4d volume image to 3d. Please reduce first.")
end
x = [ustrip(xi) for xi in vi.x]
y = [ustrip(yi) for yi in vi.y]
z = [ustrip(zi) for zi in vi.z]
# List comprehension returns type any which needs to be changed
x = convert(Array{AbstractFloat}, x)
y = convert(Array{AbstractFloat}, y)
z = convert(Array{AbstractFloat}, z)
unique_dipoles(
find_dipoles(dropdims(vi.data, dims = 4), x = x, y = y, z = z; kwargs...),
)
end
# function new_dipole_method(vi::VolumeImage; min_size::Real = 1, kwargs...)
# old_dips = find_dipoles(vi; kwargs...)
# new_dips = Dipole[]
# for dip in old_dips
# threshold = 0.9 * dip.size
# tmp_vi = deepcopy(vi)
# tmp_vi.data[tmp_vi.data.<threshold] = 0
# val_x = abs.(tmp_vi.x .- (dip.x)) .> (0.015u"m")
# val_y = abs.(tmp_vi.y .- (dip.y)) .> (0.015u"m")
# val_z = abs.(tmp_vi.z .- (dip.z)) .> (0.015u"m")
# tmp_vi.data[val_x, :, :] = 0
# tmp_vi.data[:, val_y, :] = 0
# tmp_vi.data[:, :, val_z] = 0
# valid = tmp_vi.data .> threshold
# x_loc = mean(
# vi.x[findall(dropdims(sum(valid, [2, 3]), (2, 3)))] / (1.0 * SIUnits.Meter0),
# )
# y_loc = mean(
# vi.y[findall(dropdims(sum(valid, [1, 3]), (1, 3)))] / (1.0 * SIUnits.Meter0),
# )
# z_loc = mean(
# vi.z[findall(dropdims(sum(valid, [1, 2]), (1, 2)))] / (1.0 * SIUnits.Meter0),
# )
# x, y, z, t = find_location(vi, x_loc, y_loc, z_loc)
# s = vi.data[x, y, z, t]
# push!(new_dips, Dipole("Talairach", x_loc, y_loc, z_loc, 0, 0, 0, 0, 0, s))
# end
# new_dips = new_dips[findall([d.size > min_size for d in new_dips])]
# unique_dipoles(new_dips)
# end
unique_dipoles(dips = Array{Dipoles}) =
dips[findall(.![false; diff([d.size for d in dips]) .== 0])]
# lowest_dipole(dips = Array{Dipoles}) =
# dips[findall([d.z for d in dips] .== minimum([d.z for d in dips]))]
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4500 | import Base: +, -, /, *, maximum, minimum, isequal, ==
using Statistics
# +
function +(vi1::VolumeImage, vi2::VolumeImage)
dimensions_equal(vi1, vi2)
@debug("Adding two volume images with $(size(vi1.data, 4)) time instances")
vout = deepcopy(vi1)
vout.data = vi1.data .+ vi2.data
return vout
end
# -
function -(vi1::VolumeImage, vi2::VolumeImage)
dimensions_equal(vi1, vi2)
@debug("Subtracting two volume images with $(size(vi1.data, 4)) time instances")
vout = deepcopy(vi1)
vout.data = vi1.data .- vi2.data
return vout
end
# /
function /(vi1::VolumeImage, vi2::VolumeImage)
dimensions_equal(vi1, vi2)
@debug("Dividing two volume images with $(size(vi1.data, 4)) time instances")
vout = deepcopy(vi1)
vout.data = vi1.data ./ vi2.data
return vout
end
function /(vi::VolumeImage, c::Number)
vout = deepcopy(vi)
vout.data = vi.data ./ c
return vout
end
# *
function *(vi::VolumeImage, c::Number)
vout = deepcopy(vi)
vout.data = vi.data .* c
return vout
end
# mean
function mean(vi::VolumeImage)
@debug("Taking mean of one volume images with $(size(vi.data, 4)) time instances")
vout = deepcopy(vi)
vout.data = Statistics.mean(vout.data, dims = 4)
# Store time as 0 to indicate its been averaged
vout.t = [NaN] * u"s"
return vout
end
function mean(va::Array{VolumeImage,1})
@debug(
"Taking mean of $(length(va)) volume images with $(size(va[1].data, 4)) time instances"
)
mean_va = deepcopy(va[1])
for i = 2:length(va)
mean_va = mean_va + va[i]
end
return mean_va / length(va)
end
# maximum
function maximum(vi::VolumeImage)
maximum(vi.data)
end
function maximum(vis::Array{VolumeImage})
maximum([maximum(vi) for vi in vis])
end
# minimum
function minimum(vi::VolumeImage)
minimum(vi.data)
end
function minimum(vis::Array{VolumeImage})
minimum([minimum(vi) for vi in vis])
end
# normalise
function normalise(vi::VolumeImage)
@debug("Normalising one volume images with $(size(vi.data, 4)) time instances")
normalisation_constant = maximum(vi)
vi = deepcopy(vi) / normalisation_constant
vi.info["NormalisationConstant"] = normalisation_constant
return vi
end
function normalise(va::Array{VolumeImage,1})
@debug(
"Normalising $(length(va)) volume images with $(size(va[1].data, 4)) time instances"
)
vo = deepcopy(va)
for i = 1:length(vo)
vo[i] = normalise(vo[i])
end
return vo
end
# isequal
function isequal(a::VolumeImage, b::VolumeImage)
a.data == b.data ||
a.units == b.units ||
a.x == b.x ||
a.y == b.y ||
a.z == b.z ||
a.t == b.t ||
a.method == b.method ||
a.coord_system == b.coord_system
end
function ==(a::VolumeImage, b::VolumeImage)
isequal(a, b)
end
#
# Helper functions
# ----------------
#
function dimensions_equal(
vi1::VolumeImage,
vi2::VolumeImage;
x::Bool = true,
y::Bool = true,
z::Bool = true,
t::Bool = true,
units::Bool = true,
kwargs...,
)
matching = true
if x .& .!(vi1.x == vi2.x)
throw(KeyError("X dimensions do not match"))
end
if y .& .!(vi1.y == vi2.y)
throw(KeyError("Y dimensions do not match"))
end
if z .& .!(vi1.z == vi2.z)
throw(KeyError("Z dimensions do not match"))
end
if t .& .!(vi1.t == vi2.t)
throw(KeyError("T dimensions do not match"))
end
if units .& .!(vi1.units == vi2.units)
throw(KeyError("Units do not match"))
end
if matching
return true
end
end
"""
Find indicies of location in VolumeImage
"""
function find_location(vi::VolumeImage, x::Real, y::Real, z::Real)
x_loc = findall(minimum(abs.(ustrip(vi.x) .- x)) .== abs.(ustrip(vi.x) .- x))[1]
y_loc = findall(minimum(abs.(ustrip(vi.y) .- y)) .== abs.(ustrip(vi.y) .- y))[1]
z_loc = findall(minimum(abs.(ustrip(vi.z) .- z)) .== abs.(ustrip(vi.z) .- z))[1]
if length(size(vi.data)) == 3
return [x_loc, y_loc, z_loc]
elseif length(size(vi.data)) == 4
return [x_loc, y_loc, z_loc, 1]
else
return [NaN]
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 8032 | using Plots
import Plots.plot
"""
Plot a volume image
#### Arguments
* `v`: A VolumeImage type
* `threshold`: Minimum value to plot, values smaller than this are plotted as `minsize`
* `min_val`: Force a minimum value for color and size scaling
* `max_val`: Force a maximum value for color and size scaling
* `minsize`: Minimum size a marker can be
* `maxsize`: Maximum size a marker can be
* `exclude`: Values not to plot
* `title`: Figure title
* `elp`: Path to elp file to overlay channel names
* `colorbar`: Should a colorbar be plotted
#### Returns
* A Plots.jl figure
"""
function plot(v::VolumeImage; kwargs...)
x = AbstractFloat[xi / (1 * u"mm") for xi in v.x]
y = AbstractFloat[yi / (1 * u"mm") for yi in v.y]
z = AbstractFloat[zi / (1 * u"mm") for zi in v.z]
plot_src(dropdims(v.data, dims = 4), x, y, z; kwargs...)
end
function plot_src(
d::Array{A,3},
x::Vector{A},
y::Vector{A},
z::Vector{A};
threshold::Real = -Inf,
min_val::Real = Inf,
max_val::Real = -Inf,
minsize::Real = 2,
maxsize::Real = 6,
exclude::A = 0.0,
title::S = "",
elp::AbstractString = "",
colorbar::Bool = true,
figure_size = (1000, 400),
kwargs...,
) where {A<:AbstractFloat,S<:AbstractString}
# cols = [colorant"darkblue", colorant"orange", colorant"darkred"]
scaleval = maxsize / maximum(d)
plot_labels = false
if elp != ""
e = read_elp(elp)
plot_labels = true
end
#
# First facet
x_tmp = AbstractFloat[]
y_tmp = AbstractFloat[]
c_tmp = AbstractFloat[]
s_tmp = AbstractFloat[]
t = copy(d)
t = maximum(t, dims = 3)
t = dropdims(t, dims = 3)
for x_i = 1:size(t, 1)
for y_i = 1:size(t, 2)
val = t[x_i, y_i]
if val != exclude
push!(x_tmp, x[x_i])
push!(y_tmp, y[y_i])
if val > threshold
push!(s_tmp, max(scaleval * val, minsize))
push!(c_tmp, val)
else
push!(s_tmp, minsize)
push!(c_tmp, minsize)
end
end
end
end
if max_val > maximum(c_tmp)
@debug("Manually specifying maximum plotting value")
push!(s_tmp, max_val)
push!(c_tmp, max_val)
push!(x_tmp, -200)
push!(y_tmp, -200)
end
if min_val < minimum(c_tmp)
@debug("Manually specifying minimum plotting value")
push!(s_tmp, min_val)
push!(c_tmp, min_val)
push!(x_tmp, -200)
push!(y_tmp, -200)
end
p1 = plot(
x_tmp,
y_tmp,
zcolor = c_tmp,
ms = s_tmp,
legend = false,
l = :scatter,
lab = "Source",
colorbar = false,
markerstrokewidth = 0.1,
xlabel = "Left - Right (mm)",
ylabel = "Posterior - Anterior (mm)",
xlims = (-100, 100),
ylims = (-120, 90);
kwargs...,
)
if plot_labels
plotlist = [
"Fpz",
"Fp2",
"AF8",
"F8",
"FT8",
"T8",
"TP8",
"P10",
"PO8",
"O2",
"Oz",
"O1",
"PO7",
"P9",
"TP7",
"T7",
"FT7",
"F7",
"AF7",
"Fp1",
]
for elec in e
if something(findfirst(isequal(elec.label), plotlist), 0) > 0
annotate!(
p1,
(elec.coordinate.x |> u"cm" |> ustrip) - 5,
1.1 * (elec.coordinate.y |> u"cm" |> ustrip) - 2,
elec.label,
colorbar = false,
)
end
end
end
#
# Second facet
x_tmp = AbstractFloat[]
y_tmp = AbstractFloat[]
c_tmp = AbstractFloat[]
s_tmp = AbstractFloat[]
t = copy(d)
t = maximum(t, dims = 1)
t = dropdims(t, dims = 1)
for x_i = 1:size(t, 1)
for y_i = 1:size(t, 2)
val = t[x_i, y_i]
if val != exclude
push!(x_tmp, y[x_i])
push!(y_tmp, z[y_i])
if val > threshold
push!(s_tmp, max(scaleval * val, minsize))
push!(c_tmp, val)
else
push!(s_tmp, minsize)
push!(c_tmp, minsize)
end
end
end
end
if max_val > maximum(c_tmp)
@debug("Manually specifying maximum plotting value")
push!(s_tmp, max_val)
push!(c_tmp, max_val)
push!(x_tmp, -200)
push!(y_tmp, -200)
end
if min_val < minimum(c_tmp)
@debug("Manually specifying minimum plotting value")
push!(s_tmp, min_val)
push!(c_tmp, min_val)
push!(x_tmp, -200)
push!(y_tmp, -200)
end
p2 = plot(
x_tmp,
y_tmp,
zcolor = c_tmp,
ms = s_tmp,
legend = false,
l = :scatter,
title = title,
lab = "Source",
colorbar = false,
markerstrokewidth = 0.1,
xlabel = "Posterior - Anterior (mm)",
ylabel = "Inferior - Superior (mm)",
xlims = (-120, 90),
ylims = (-70, 100);
kwargs...,
)
if plot_labels
plotlist = ["Iz", "Oz", "POz", "Pz", "CPz", "Cz", "FCz", "Fz", "AFz", "Fpz"]
for elec in e
if something(findfirst(isequal(elec.label), plotlist), 0) > 0
annotate!(
p2,
(elec.coordinate.y |> u"cm" |> ustrip) - 5,
elec.coordinate.z |> u"cm" |> ustrip,
elec.label,
)
end
end
end
#
# Third facet
x_tmp = AbstractFloat[]
y_tmp = AbstractFloat[]
c_tmp = AbstractFloat[]
s_tmp = AbstractFloat[]
t = copy(d)
t = maximum(t, dims = 2)
t = dropdims(t, dims = 2)
for x_i = 1:size(t, 1)
for y_i = 1:size(t, 2)
val = t[x_i, y_i]
if val != exclude
push!(x_tmp, x[x_i])
push!(y_tmp, z[y_i])
if val > threshold
push!(s_tmp, max(scaleval * val, minsize))
push!(c_tmp, val)
else
push!(s_tmp, minsize)
push!(c_tmp, minsize)
end
end
end
end
if max_val > maximum(c_tmp)
@debug("Manually specifying maximum plotting value")
push!(s_tmp, max_val)
push!(c_tmp, max_val)
push!(x_tmp, -200)
push!(y_tmp, -200)
end
if min_val < minimum(c_tmp)
@debug("Manually specifying minimum plotting value")
push!(s_tmp, min_val)
push!(c_tmp, min_val)
push!(x_tmp, -200)
push!(y_tmp, -200)
end
p3 = plot(
x_tmp,
y_tmp,
zcolor = c_tmp,
ms = s_tmp,
legend = false,
l = :scatter,
lab = "",
markerstrokewidth = 0.1,
colorbar = colorbar,
xlabel = "Left - Right (mm)",
ylabel = "Inferior - Superior (mm)",
xlims = (-100, 100),
ylims = (-70, 100);
kwargs...,
)
if plot_labels
plotlist = ["T7", "C5", "C3", "C1", "Cz", "C2", "C4", "C6", "T8"]
for elec in e
if something(findfirst(isequal(elec.label), plotlist), 0) > 0
annotate!(p3, elec.coordinate.x - 5, elec.coordinate.z, elec.label)
end
end
end
l = @layout([a b c])
return plot(p1, p2, p3, layout = l, size = figure_size)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 691 | function read_VolumeImage(fname::AbstractString)
@info("Creating volume image from file $fname")
if occursin(".dat", fname)
x, y, z, s, t = read_dat(fname)
method = "CLARA"
header = Dict()
units = "nAm/cm^3"
x = x / 1000
y = y / 1000
z = z / 1000
t = t / 1000
else
@warn("Unknown file type")
end
header["FileName"] = fname
coord_system = "?"
VolumeImage(
s,
units,
collect(x) * u"m",
collect(y) * u"m",
collect(z) * u"m",
collect(t) * u"s",
method,
header,
coord_system,
)
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 4012 | """
Volume Image
This composite type contains volume image information
#### Fields
* `data`: contains the recorded data
* `x`, `y`, `z`, `t` Arrays containing spatial and time information
* `method` AbstractString of method used to compute tomography
* `info`: additional information in dictionary
#### `processing` Fields
The following standard names are used when saving data to the info dictionary.
* `Regularisation`: Regularisation used in tomography
* `NormalisationConstant`: Value used to normalise image to maximum of 1
* `FileName`: Name of file
"""
mutable struct VolumeImage
data::Array{AbstractFloat,4}
units::AbstractString
x::Vector{typeof(1.0u"m")}
y::Vector{typeof(1.0u"m")}
z::Vector{typeof(1.0u"m")}
t::Vector{typeof(1.0u"s")}
method::AbstractString
info::Dict
coord_system::AbstractString
function VolumeImage(
data::Array{F,4},
units::S,
x::Vector{F},
y::Vector{F},
z::Vector{F},
t::Vector{F},
method::S,
info::Dict,
coord_system::S,
) where {F<:AbstractFloat,S<:AbstractString}
@assert size(data, 1) == length(x)
@assert size(data, 2) == length(y)
@assert size(data, 3) == length(z)
@assert size(data, 4) == length(t)
new(data, units, x * u"m", y * u"m", z * u"m", t * u"s", method, info, coord_system)
end
function VolumeImage(
data::Array{F,4},
units::S,
x::Vector{Met},
y::Vector{Met},
z::Vector{Met},
t::Vector{Sec},
method::S,
info::Dict,
coord_system::S,
) where {F<:AbstractFloat,S<:AbstractString,Met<:typeof(1.0u"m"),Sec<:typeof(1.0u"s")}
@assert size(data, 1) == length(x)
@assert size(data, 2) == length(y)
@assert size(data, 3) == length(z)
@assert size(data, 4) == length(t)
new(data, units, x, y, z, t, method, info, coord_system)
end
function VolumeImage(
data::Vector{F},
units::S,
x::Vector{F},
y::Vector{F},
z::Vector{F},
t::Vector{F},
method::S,
info::Dict,
coord_system::S,
) where {F<:AbstractFloat,S<:AbstractString}
@assert length(x) == length(data)
@assert length(y) == length(data)
@assert length(z) == length(data)
@assert length(t) == length(data)
newX = sort(unique(x))
newY = sort(unique(y))
newZ = sort(unique(z))
newT = sort(unique(t))
L = zeros(typeof(data[1]), length(newX), length(newY), length(newZ), length(newT))
for idx = 1:length(data)
idxX = something(findfirst(isequal(x[idx]), newX), 0)
idxY = something(findfirst(isequal(y[idx]), newY), 0)
idxZ = something(findfirst(isequal(z[idx]), newZ), 0)
idxT = something(findfirst(isequal(t[idx]), newT), 0)
L[idxX, idxY, idxZ, idxT] = data[idx]
end
new(
L,
units,
newX * u"m",
newY * u"m",
newZ * u"m",
newT * u"s",
method,
info,
coord_system,
)
end
end
#
# Basic operations
# ----------------
#
import Base: show
function Base.show(io::IO, vi::VolumeImage)
println(io, "VolumeImage of method $(vi.method) and units $(vi.units)")
println(io, " Spanning x: $(vi.x[1]) : $(vi.x[end])")
println(io, " Spanning y: $(vi.y[1]) : $(vi.y[end])")
println(io, " Spanning z: $(vi.z[1]) : $(vi.z[end])")
println(io, " Spanning t: $(vi.t[1]) : $(vi.t[end])")
if haskey(vi.info, "Regularisation")
println(io, " Regularisation: $(vi.info["Regularisation"])")
end
if haskey(vi.info, "NormalisationConstant")
println(io, " Image has been normalised")
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 408 | using Neuroimaging
using Test
using Logging
using MAT
using Plots
using Glob
using DataDeps
using LinearAlgebra
using Unitful
logger = SimpleLogger(stdout, Logging.Warn)
#
# Run all tests
#
tests = glob("**/*.jl")
append!(tests, glob("**/**/*.jl"))
append!(tests, glob("**/**/**/*.jl"))
tests = tests[.~(tests .== "runtests.jl")]
with_logger(logger) do
for t in tests
include(t)
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 772 | using Neuroimaging, DataDeps, Test
# Tests to ensure datasets are available and correct
# Basic biosemi data file
data_path = joinpath(datadep"BioSemiTestFiles", "Newtest17-2048.bdf")
s = read_SSR(data_path)
@test samplingrate(s) == 2048
@test length(channelnames(s)) == 16
@test length(s.triggers["Index"]) == length(s.triggers["Code"])
@test length(s.triggers["Code"]) == length(s.triggers["Duration"])
# Basic biosemi data file
data_path = joinpath(
datadep"ExampleSSR",
"Neuroimaging.jl-example-data-master",
"neuroimaingSSR.bdf",
)
s = read_SSR(data_path)
@test samplingrate(s) == 8192
@test length(channelnames(s)) == 7
@test length(s.triggers["Index"]) == length(s.triggers["Code"])
@test length(s.triggers["Code"]) == length(s.triggers["Duration"])
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1269 | @testset "Miscellaneous" begin
@testset "Results storage" begin
results_storage = Dict()
results_storage[new_processing_key(results_storage, "FTest")] = 4
results_storage[new_processing_key(results_storage, "Turtle")] = 5
results_storage[new_processing_key(results_storage, "FTest")] = 49
@test new_processing_key(results_storage, "FTest") == "FTest3"
@test find_keys_containing(results_storage, "FTest") == [1, 3]
@test find_keys_containing(results_storage, "Mum") == []
end
@testset "File parts" begin
a, b, c = fileparts("")
@test a == ""
@test b == ""
@test c == ""
a, b, c = fileparts("/Users/test/subdir/test-file.bdf")
@test a == "/Users/test/subdir/"
@test b == "test-file"
@test c == "bdf"
a, b, c = fileparts("/Users/test/subdir/test_file.bdf")
@test a == "/Users/test/subdir/"
@test b == "test_file"
@test c == "bdf"
a, b, c = fileparts("test-file.bdf")
@test a == ""
@test b == "test-file"
@test c == "bdf"
end
@testset "Find closest number index" begin
@test _find_closest_number_idx([1, 2, 2.7, 3.2, 4, 3.1, 7], 3) == 6
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1492 | @testset "Plotting" begin
fname = joinpath(dirname(@__FILE__), "../data", "test_Hz19.5-testing.bdf")
s = read_SSR(fname)
s = rereference(s, "Cz")
s = highpass_filter(s)
s = extract_epochs(s)
s = create_sweeps(s, epochsPerSweep = 2)
s = ftest(s)
@testset "Spectrum" begin
p = plot_spectrum(s, "20Hz_SWN_70dB_R", targetFreq = 3.0)
#= display(p) =#
p = plot_spectrum(vec(s.data[:, 1]), Int(samplingrate(s)), dBPlot = false)
#= display(p) =#
p = plot_spectrum(s, 3, targetFreq = 40.0390625)
#= display(p) =#
end
@testset "Filter reponse" begin
p = plot_filter_response(s.processing["filter1"], Int(samplingrate(s)))
#= display(p) =#
end
s = trim_channel(s, 8192 * 3)
@testset "Multi channel time series" begin
plot1 = plot_timeseries(s)
#= display(plot1) =#
plot2 = plot_timeseries(s, channels = ["40Hz_SWN_70dB_R", "Cz"])
#= display(plot2) =#
s = rereference(s, "car")
plot3 = plot_timeseries(s, channels = ["40Hz_SWN_70dB_R", "Cz"])
#= display(plot3) =#
end
@testset "Single channel time series" begin
plot4 = plot_timeseries(s, channels = ["40Hz_SWN_70dB_R"])
#= display(plot4) =#
plot5 = plot_timeseries(s, channels = "Cz")
#= display(plot5) =#
keep_channel!(s, ["40Hz_SWN_70dB_R"])
plot6 = plot_timeseries(s)
#= display(plot6) =#
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 2043 | @testset "Preprocessing" begin
fname = joinpath(dirname(@__FILE__), "..", "data", "test_Hz19.5-testing.bdf")
s = read_SSR(fname)
@testset "Triggers" begin
@testset "Validation" begin
validate_triggers(s.triggers)
s1 = deepcopy(s)
delete!(s1.triggers, "Index")
@test_throws KeyError validate_triggers(s1.triggers)
s1 = deepcopy(s)
delete!(s1.triggers, "Code")
@test_throws KeyError validate_triggers(s1.triggers)
s1 = deepcopy(s)
delete!(s1.triggers, "Duration")
@test_throws KeyError validate_triggers(s1.triggers)
s1 = deepcopy(s)
s1.triggers["test"] = [1]
@test_throws KeyError validate_triggers(s1.triggers)
s1 = deepcopy(s)
s1.triggers["Duration"] = s1.triggers["Duration"][1:4]
@test_throws KeyError validate_triggers(s1.triggers)
s1 = deepcopy(s)
s1.triggers["Code"] = s1.triggers["Code"][1:4]
@test_throws KeyError validate_triggers(s1.triggers)
end
end
end
@testset "Referencing" begin
signals = [0 1 2] .* ones(5, 3)
@testset "Remove template" begin
signals = [0 1 2] .* ones(5, 3)
template = vec(2 * ones(5))
@test remove_template(signals, template) == [-2 -1 0] .* ones(5, 3)
end
@testset "Reference to channel" begin
@test rereference(signals, 3) == [-2 -1 0] .* ones(5, 3)
@test rereference(signals, "C2", ["C1", "C2", "C3"]) == [-1 0 1] .* ones(5, 3)
end
@testset "Reference to group of channels" begin
@test rereference(signals, [1, 2, 3]) == [-1 0 1] .* ones(5, 3)
@test rereference(signals, ["C2", "C1", "C3"], ["C1", "C2", "C3"]) ==
[-1 0 1] .* ones(5, 3)
@test rereference(signals, "car", ["C1", "C2", "C3"]) == [-1 0 1] .* ones(5, 3)
@test rereference(signals, "average", ["C1", "C2", "C3"]) == [-1 0 1] .* ones(5, 3)
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 242 | fname = joinpath(dirname(@__FILE__), "..", "data", "test.avr")
sname = joinpath(dirname(@__FILE__), "..", "data", "tmp", "same.avr")
a, b = read_avr(fname)
write_avr(sname, a, b, 8192)
a2, b2 = read_avr(sname)
@test a == a2
@test b == b2
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 110 | fname = joinpath(dirname(@__FILE__), "../data", "test.bsa")
dips = read_bsa(fname)
@test size(dips) == (2,)
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 1284 | fname = joinpath(dirname(@__FILE__), "..", "data", "test-4d.dat")
sname = joinpath(dirname(@__FILE__), "..", "data", "tmp", "same.dat")
#x, y, z, s, t = read_dat(fname)
#@test size(x) == (30,)
#@test size(y) == (36,)
#@test size(z) == (28,)
#@test size(s) == (30,36,28,2)
#@test size(t) == (2,)
#@test maximum(x) == 72.5
#@test maximum(y) == 71.220001
#@test maximum(z) == 76.809998
#@test maximum(s) == 0.067409396
#@test maximum(t) == 0.24
#
#@test minimum(x) == -72.5
#@test minimum(y) == -103.779999
#@test minimum(z) == -58.189999
#@test minimum(s) == 0.0
#@test minimum(t) == 0.12
# write_dat(sname, x, y, z, s[:,:,:,:], t)
# x2, y2, z2, s2, t2 = read_dat(sname)
# @test x == x2
# @test y == y2
# @test z == z2
# @test s == s2
# @test t == t2
# fname = joinpath(dirname(@__FILE__), "..", "data", "test-3d.dat")
# x, y, z, s, t = read_dat(fname)
#@test size(x) == (30,)
#@test size(y) == (36,)
#@test size(z) == (28,)
#@test size(s) == (30,36,28,1)
#@test size(t) == (1,)
#@test maximum(x) == 72.5
#@test maximum(y) == 71.220001
#@test maximum(z) == 76.809998
#@test maximum(s) == 33.2692985535
#@test maximum(t) == 0
#@test minimum(x) == -72.5
#@test minimum(y) == -103.779999
#@test minimum(z) == -58.189999
#@test minimum(s) == -7.5189352036
#@test minimum(t) == 0
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 143 | e = read_elp(joinpath(dirname(@__FILE__), "..", "data", "test.elp"))
@test length(e) == 2
@test e[1].label == "Fpz"
@test e[2].label == "Fp2"
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 156 | s = read_SSR(joinpath(dirname(@__FILE__), "..", "data", "test_Hz19.5-testing.bdf"))
s = read_evt(s, joinpath(dirname(@__FILE__), "..", "data", "test.evt"))
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 290 | a = read_SSR(joinpath(dirname(@__FILE__), "..", "data", "test_Hz19.5-testing.bdf"))
@test a.processing["Side"] == "Bilateral"
@test a.processing["Name"] == "P2"
@test a.processing["Amplitude"] == 70.0
@test a.processing["Carrier_Frequency"] == 1000.0
@test modulationrate(a) == 40.0390625
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 448 | @testset "File formats" begin
@testset "avr" begin
include("avr.jl")
end
@testset "bsa" begin
include("bsa.jl")
end
@testset "dat" begin
include("dat.jl")
end
@testset "elp" begin
include("elp.jl")
end
@testset "evt" begin
include("evt.jl")
end
@testset "rba" begin
include("rba.jl")
end
@testset "sfp" begin
include("rba.jl")
end
end
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
|
[
"BSD-3-Clause"
] | 1.1.2 | 658b4dd2b445c13a4fef067f98cd0f9c97a72a7c | code | 477 | fname = joinpath(dirname(@__FILE__), "..", "data", "test.sfp")
s = read_sfp(fname)
@test length(s) == 3
@test label(s) == ["Fp1", "Fpz", "Fp2"]
println(Neuroimaging.x(s))
@test Neuroimaging.x(s) == [-27.747648 * u"m", -0.085967 * u"m", 27.676888 * u"m"]
@test Neuroimaging.y(s) == [98.803864 * u"m", 103.555275 * u"m", 99.133354 * u"m"]
@test Neuroimaging.z(s) == [34.338360 * u"m", 34.357265 * u"m", 34.457005 * u"m"]
@test typeof(s[1].coordinate) == Neuroimaging.Talairach
| Neuroimaging | https://github.com/rob-luke/Neuroimaging.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.