licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 2434 |
mutable struct BatchEncoder <: SEALObject
handle::Ptr{Cvoid}
context::SEALContext
function BatchEncoder(context)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:BatchEncoder_Create, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, handleref)
@check_return_value retval
return BatchEncoder(handleref[], context)
end
function BatchEncoder(handle::Ptr{Cvoid}, context)
object = new(handle, context)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::BatchEncoder)
if isallocated(object)
@check_return_value ccall((:BatchEncoder_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function slot_count(encoder::BatchEncoder)
count = Ref{UInt64}(0)
retval = ccall((:BatchEncoder_GetSlotCount, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
encoder, count)
@check_return_value retval
return Int(count[])
end
function encode!(destination::Plaintext, values::DenseArray{UInt64}, encoder::BatchEncoder)
retval = ccall((:BatchEncoder_Encode1, libsealc), Clong,
(Ptr{Cvoid}, UInt64, Ref{UInt64}, Ptr{Cvoid}),
encoder, length(values), values, destination)
@check_return_value retval
return destination
end
function encode!(destination::Plaintext, values::DenseArray{Int64}, encoder::BatchEncoder)
retval = ccall((:BatchEncoder_Encode2, libsealc), Clong,
(Ptr{Cvoid}, UInt64, Ref{Int64}, Ptr{Cvoid}),
encoder, length(values), values, destination)
@check_return_value retval
return destination
end
function decode!(destination::DenseVector{UInt64}, plain::Plaintext, encoder::BatchEncoder)
count = Ref{UInt64}(0)
retval = ccall((:BatchEncoder_Decode1, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt64}, Ref{UInt64}, Ptr{Cvoid}),
encoder, plain, count, destination, C_NULL)
@check_return_value retval
return destination
end
function decode!(destination::DenseVector{Int64}, plain::Plaintext, encoder::BatchEncoder)
count = Ref{UInt64}(0)
retval = ccall((:BatchEncoder_Decode2, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt64}, Ref{Int64}, Ptr{Cvoid}),
encoder, plain, count, destination, C_NULL)
@check_return_value retval
return destination
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 4456 |
"""
Ciphertext
A ciphertext element, consisting of two or more polynomials. It can be created from a `Plaintext`
element by encrypting it with an appropriate `Encryptor` instance. `Ciphertext` instances should
usually not be modified directly by the user but only through the corresponding functions of
`Evaluator`. Decryption is performed via a `Decryptor` instance, which converts a `Ciphertext` back
to a `Plaintext` instance.
See also: [`Plaintext`](@ref), [`Encryptor`](@ref), [`Decryptor`](@ref), [`Evaluator`](@ref)
"""
mutable struct Ciphertext <: SEALObject
handle::Ptr{Cvoid}
function Ciphertext()
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Ciphertext_Create1, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
C_NULL, handleref)
@check_return_value retval
return Ciphertext(handleref[])
end
function Ciphertext(context)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Ciphertext_Create3, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, C_NULL, handleref)
@check_return_value retval
return Ciphertext(handleref[])
end
function Ciphertext(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::Ciphertext)
if isallocated(object)
@check_return_value ccall((:Ciphertext_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function scale(encrypted::Ciphertext)
value = Ref{Cdouble}(0)
retval = ccall((:Ciphertext_Scale, libsealc), Clong,
(Ptr{Cvoid}, Ref{Cdouble}),
encrypted, value)
@check_return_value retval
return Float64(value[])
end
function scale!(encrypted::Ciphertext, value::Float64)
retval = ccall((:Ciphertext_SetScale, libsealc), Clong,
(Ptr{Cvoid}, Ref{Cdouble}),
encrypted, value)
@check_return_value retval
return encrypted
end
function parms_id(encrypted::Ciphertext)
parms_id_ = zeros(UInt64, 4)
retval = ccall((:Ciphertext_ParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
encrypted, parms_id_)
@check_return_value retval
return parms_id_
end
function Base.size(encrypted::Ciphertext)
sizeref = Ref{UInt64}(0)
retval = ccall((:Ciphertext_Size, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
encrypted, sizeref)
@check_return_value retval
return (Int(sizeref[]),)
end
Base.length(encrypted::Ciphertext) = size(encrypted)[1]
function save_size(compr_mode, encrypted::Ciphertext)
result = Ref{Int64}(0)
retval = ccall((:Ciphertext_SaveSize, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Int64}),
encrypted, compr_mode, result)
@check_return_value retval
return Int(result[])
end
save_size(encrypted::Ciphertext) = save_size(ComprModeType.default, encrypted)
function save!(buffer::DenseVector{UInt8}, length::Integer,
compr_mode::ComprModeType.ComprModeTypeEnum, encrypted::Ciphertext)
out_bytes = Ref{Int64}(0)
retval = ccall((:Ciphertext_Save, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}, UInt64, UInt8, Ref{Int64}),
encrypted, buffer, length, compr_mode, out_bytes)
@check_return_value retval
return Int(out_bytes[])
end
function save!(buffer::DenseVector{UInt8}, length::Integer, encrypted::Ciphertext)
return save!(buffer, length, ComprModeType.default, encrypted)
end
function save!(buffer::DenseVector{UInt8}, encrypted::Ciphertext)
return save!(buffer, length(buffer), encrypted)
end
function load!(encrypted::Ciphertext, context::SEALContext, buffer::DenseVector{UInt8}, length)
in_bytes = Ref{Int64}(0)
retval = ccall((:Ciphertext_Load, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt8}, UInt64, Ref{Int64}),
encrypted, context, buffer, length, in_bytes)
@check_return_value retval
return Int(in_bytes[])
end
function load!(encrypted::Ciphertext, context::SEALContext, buffer::DenseVector{UInt8})
return load!(encrypted, context, buffer, length(buffer))
end
function reserve!(encrypted::Ciphertext, capacity)
retval = ccall((:Ciphertext_Reserve3, libsealc), Clong,
(Ptr{Cvoid}, UInt64),
encrypted, capacity)
@check_return_value retval
return encrypted
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 3936 |
"""
CKKSEncoder
A `CKKSEncoder` provides functionality to convert raw data such as scalars and vectors into
`Plaintext` instances using `encode!`, and to convert `Plaintext` elements back to raw data using
`decode!`.
See also: [`Plaintext`](@ref), [`encode!`](@ref), [`decode!`](@ref)
"""
mutable struct CKKSEncoder <: SEALObject
handle::Ptr{Cvoid}
context::SEALContext
function CKKSEncoder(context)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:CKKSEncoder_Create, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, handleref)
@check_return_value retval
return CKKSEncoder(handleref[], context)
end
function CKKSEncoder(handle::Ptr{Cvoid}, context)
object = new(handle, context)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::CKKSEncoder)
if isallocated(object)
@check_return_value ccall((:CKKSEncoder_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
"""
slot_count(encoder)
Return the number of available slots for a given encoder, i.e., how many raw data values can be
stored and processed simultaneously with the given encryption setup.
"""
function slot_count(encoder::CKKSEncoder)
count = Ref{UInt64}(0)
retval = ccall((:CKKSEncoder_SlotCount, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
encoder, count)
@check_return_value retval
return Int(count[])
end
"""
encode!(destination, data::DenseVector{Float64}, scale, encoder)
encode!(destination, data::Float64, scale, encoder)
Use `CKKSEncoder` instance `encoder` to encode raw `data`, which can either be a scalar or a dense
vector. The result is stored in the `Plaintext` instance `destination` using encoding precision
`scale`. Note that if `data` is a vector, it must have at least as many elements as there are slots
available.
See also: [`slot_count`](@ref)
"""
function encode! end
function encode!(destination::Plaintext, data::DenseVector{Float64}, scale, encoder::CKKSEncoder)
value_count = UInt64(length(data))
parms_id = first_parms_id(encoder.context)
retval = ccall((:CKKSEncoder_Encode1, libsealc), Clong,
(Ptr{Cvoid}, UInt64, Ref{Cdouble}, Ref{UInt64}, Float64, Ptr{Cvoid}, Ptr{Cvoid}),
encoder, value_count, data, parms_id, scale, destination, C_NULL)
@check_return_value retval
return destination
end
function encode!(destination::Plaintext, data::Float64, scale, encoder::CKKSEncoder)
parms_id = first_parms_id(encoder.context)
retval = ccall((:CKKSEncoder_Encode3, libsealc), Clong,
(Ptr{Cvoid}, Float64, Ref{UInt64}, Float64, Ptr{Cvoid}, Ptr{Cvoid}),
encoder, data, parms_id, scale, destination, C_NULL)
@check_return_value retval
return destination
end
function encode!(destination::Plaintext, data::Integer, encoder::CKKSEncoder)
parms_id = first_parms_id(encoder.context)
retval = ccall((:CKKSEncoder_Encode5, libsealc), Clong,
(Ptr{Cvoid}, Int64, Ref{UInt64}, Ptr{Cvoid}),
encoder, data, parms_id, destination)
@check_return_value retval
return destination
end
"""
decode!(destination, plain, encoder)
Use `CKKSEncoder` instance `encoder` to convert the `Plaintext` instance `plain` back to raw data.
The result is stored in the dense vector `destination`, which must have at least as many elements as
there are slots available.
See also: [`slot_count`](@ref)
"""
function decode!(destination::DenseVector{Float64}, plain::Plaintext, encoder::CKKSEncoder)
value_count = UInt64(length(destination))
retval = ccall((:CKKSEncoder_Decode1, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt64}, Ref{Cdouble}, Ptr{Cvoid}),
encoder, plain, value_count, destination, C_NULL)
@check_return_value retval
return destination
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 6548 |
"""
SEALContext
Heavyweight class that does validates encryption parameters of type `EncryptionParameters` and
pre-computes and stores several costly pre-computations.
See also: [`EncryptionParameters`](@ref)
"""
mutable struct SEALContext <: SEALObject
handle::Ptr{Cvoid}
function SEALContext(enc_param::EncryptionParameters;
expand_mod_chain=true, sec_level=SecLevelType.tc128)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:SEALContext_Create, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Cint, Ref{Ptr{Cvoid}}),
enc_param, expand_mod_chain, sec_level, handleref)
@check_return_value retval
return SEALContext(handleref[])
end
function SEALContext(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::SEALContext)
if isallocated(object)
@check_return_value ccall((:SEALContext_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function first_parms_id(context::SEALContext)
parms_id = zeros(UInt64, 4)
retval = ccall((:SEALContext_FirstParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
context, parms_id)
@check_return_value retval
return parms_id
end
function last_parms_id(context::SEALContext)
parms_id = zeros(UInt64, 4)
retval = ccall((:SEALContext_LastParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
context, parms_id)
@check_return_value retval
return parms_id
end
function get_context_data(context::SEALContext, parms_id::DenseVector{UInt64})
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:SEALContext_GetContextData, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}, Ref{Ptr{Cvoid}}),
context, parms_id, handleref)
@check_return_value retval
return ContextData(handleref[], destroy_on_gc=false)
end
function key_context_data(context::SEALContext)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:SEALContext_KeyContextData, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, handleref)
@check_return_value retval
return ContextData(handleref[], destroy_on_gc=false)
end
function first_context_data(context::SEALContext)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:SEALContext_FirstContextData, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, handleref)
@check_return_value retval
return ContextData(handleref[], destroy_on_gc=false)
end
function parameter_error_message(context::SEALContext)
len = Ref{UInt64}(0)
# First call to obtain length (message pointer is null)
retval = ccall((:SEALContext_ParameterErrorMessage, libsealc), Clong,
(Ptr{Cvoid}, Ptr{UInt8}, Ptr{UInt64}),
context, C_NULL, len)
@check_return_value retval
# Second call to obtain message
message = Vector{UInt8}(undef, len[])
retval = ccall((:SEALContext_ParameterErrorMessage, libsealc), Clong,
(Ptr{Cvoid}, Ptr{UInt8}, Ptr{UInt64}),
context, message, len)
@check_return_value retval
return String(message)
end
function using_keyswitching(context::SEALContext)
valueref = Ref{UInt8}(0)
retval = ccall((:SEALContext_UsingKeyswitching, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}),
context, valueref)
@check_return_value retval
return Bool(valueref[])
end
mutable struct ContextData <: SEALObject
handle::Ptr{Cvoid}
function ContextData(handle::Ptr{Cvoid}; destroy_on_gc=true)
object = new(handle)
destroy_on_gc && finalizer(destroy!, object)
return object
end
end
function destroy!(object::ContextData)
if isallocated(object)
@check_return_value ccall((:ContextData_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function chain_index(context_data::ContextData)
index = Ref{UInt64}(0)
retval = ccall((:ContextData_ChainIndex, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
context_data, index)
@check_return_value retval
return Int(index[])
end
function parms(context_data::ContextData)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:ContextData_Parms, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context_data, handleref)
@check_return_value retval
return EncryptionParameters(handleref[])
end
function parms_id(context_data::ContextData)
enc_parms = parms(context_data)
return parms_id(enc_parms)
end
function total_coeff_modulus_bit_count(context_data::ContextData)
bit_count = Ref{Cint}(0)
retval = ccall((:ContextData_TotalCoeffModulusBitCount, libsealc), Clong,
(Ptr{Cvoid}, Ref{Cint}),
context_data, bit_count)
@check_return_value retval
return Int(bit_count[])
end
function qualifiers(context_data::ContextData)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:ContextData_Qualifiers, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context_data, handleref)
@check_return_value retval
return EncryptionParameterQualifiers(handleref[], destroy_on_gc=false)
end
function next_context_data(context_data::ContextData)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:ContextData_NextContextData, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context_data, handleref)
@check_return_value retval
if handleref[] == C_NULL
return nothing
else
return ContextData(handleref[], destroy_on_gc=false)
end
end
mutable struct EncryptionParameterQualifiers <: SEALObject
handle::Ptr{Cvoid}
function EncryptionParameterQualifiers(handle::Ptr{Cvoid}; destroy_on_gc=true)
object = new(handle)
destroy_on_gc && finalizer(destroy!, object)
return object
end
end
function destroy!(object::EncryptionParameterQualifiers)
if isallocated(object)
@check_return_value ccall((:EncryptionParameterQualifiers_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function using_batching(epq::EncryptionParameterQualifiers)
valueref = Ref{UInt8}(0)
retval = ccall((:EPQ_UsingBatching, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}),
epq, valueref)
@check_return_value retval
return Bool(valueref[])
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 1563 |
"""
Decryptor
A `Decryptor` can be used to decrypt a `Ciphertext` instance back into a `Plaintext` instance.
See also: [`Plaintext`](@ref), [`Ciphertext`](@ref)
"""
mutable struct Decryptor <: SEALObject
handle::Ptr{Cvoid}
function Decryptor(context::SEALContext, secret_key::SecretKey)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Decryptor_Create, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, secret_key, handleref)
@check_return_value retval
return Decryptor(handleref[])
end
function Decryptor(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::Decryptor)
if isallocated(object)
@check_return_value ccall((:Decryptor_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function decrypt!(destination::Plaintext, encrypted::Ciphertext, decryptor::Decryptor)
retval = ccall((:Decryptor_Decrypt, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
decryptor, encrypted, destination)
@check_return_value retval
return destination
end
function invariant_noise_budget(encrypted::Ciphertext, decryptor::Decryptor)
budgetref = Ref{Cint}(0)
retval = ccall((:Decryptor_InvariantNoiseBudget, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{Cint}),
decryptor, encrypted, budgetref)
@check_return_value retval
return Int(budgetref[])
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 6609 |
"""
SchemeType
A module that only wraps the enum `SchemeTypeEnum` with values `none`, `BFV`, and `CKKS`, which
indicate the type of encryption scheme. `BFV` refers to the Brakerski/Fan-Vercauteren scheme, `CKKS`
refers to the Cheon-Kim-Kim-Song scheme (sometimes also called `HEAAN` in the literature), and
`none` indicates that no encryption should be used.
"""
module SchemeType
@enum SchemeTypeEnum::UInt8 none=0 bfv=1 ckks=2
end
"""
EncryptionParameters
Stores settings for use by the encryption schemes, most importantly the polynomial modulus, the
coefficient modulus, and the plaintext modulus. An `EncryptionParameters` object is required to
create a `SEALContext` instance.
See also: [`SEALContext`](@ref)
"""
mutable struct EncryptionParameters <: SEALObject
handle::Ptr{Cvoid}
function EncryptionParameters(scheme::SchemeType.SchemeTypeEnum=SchemeType.none)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:EncParams_Create1, libsealc), Clong,
(UInt8, Ref{Ptr{Cvoid}}),
scheme, handleref)
@check_return_value retval
return EncryptionParameters(handleref[])
end
function EncryptionParameters(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::EncryptionParameters)
if isallocated(object)
@check_return_value ccall((:EncParams_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function poly_modulus_degree(enc_param::EncryptionParameters)
degree = Ref{UInt64}(0)
retval = ccall((:EncParams_GetPolyModulusDegree, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
enc_param, degree)
@check_return_value retval
return Int(degree[])
end
function set_poly_modulus_degree!(enc_param::EncryptionParameters, degree)
retval = ccall((:EncParams_SetPolyModulusDegree, libsealc), Clong,
(Ptr{Cvoid}, UInt64),
enc_param, degree)
@check_return_value retval
return enc_param
end
function set_coeff_modulus!(enc_param::EncryptionParameters, coeff_modulus)
coeff_modulus_ptrs = Ptr{Cvoid}[gethandle(c) for c in coeff_modulus]
retval = ccall((:EncParams_SetCoeffModulus, libsealc), Clong,
(Ptr{Cvoid}, UInt64, Ref{Ptr{Cvoid}}),
enc_param, length(coeff_modulus), coeff_modulus_ptrs)
@check_return_value retval
return enc_param
end
function coeff_modulus(enc_param::EncryptionParameters)
len = Ref{UInt64}(0)
# First call to obtain length (modulus result pointer is null)
retval = ccall((:EncParams_GetCoeffModulus, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}, Ptr{Cvoid}),
enc_param, len, C_NULL)
@check_return_value retval
# Second call to obtain modulus
modulusptrs = Vector{Ptr{Cvoid}}(undef, len[])
retval = ccall((:EncParams_GetCoeffModulus, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}, Ref{Ptr{Cvoid}}),
enc_param, len, modulusptrs)
@check_return_value retval
modulus = Modulus[Modulus(ptr) for ptr in modulusptrs]
return modulus
end
function scheme(enc_param::EncryptionParameters)
scheme = Ref{UInt8}(0)
retval = ccall((:EncParams_GetScheme, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}),
enc_param, scheme)
@check_return_value retval
return SchemeType.SchemeTypeEnum(scheme[])
end
function plain_modulus(enc_param::EncryptionParameters)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:EncParams_GetPlainModulus, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
enc_param, handleref)
@check_return_value retval
return Modulus(handleref[], destroy_on_gc=false)
end
function set_plain_modulus!(enc_param::EncryptionParameters, plain_modulus::Modulus)
retval = ccall((:EncParams_SetPlainModulus1, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}),
enc_param, plain_modulus)
@check_return_value retval
return enc_param
end
function set_plain_modulus!(enc_param::EncryptionParameters, plain_modulus::Integer)
retval = ccall((:EncParams_SetPlainModulus2, libsealc), Clong,
(Ptr{Cvoid}, UInt64),
enc_param, plain_modulus)
@check_return_value retval
return enc_param
end
function parms_id(enc_param::EncryptionParameters)
parms_id_ = zeros(UInt64, 4)
retval = ccall((:EncParams_GetParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
enc_param, parms_id_)
@check_return_value retval
return parms_id_
end
function save!(buffer::DenseVector{UInt8}, length::Integer,
compr_mode::ComprModeType.ComprModeTypeEnum, enc_param::EncryptionParameters)
out_bytes = Ref{Int64}(0)
retval = ccall((:EncParams_Save, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}, UInt64, UInt8, Ref{Int64}),
enc_param, buffer, length, compr_mode, out_bytes)
@check_return_value retval
return Int(out_bytes[])
end
function save!(buffer::DenseVector{UInt8}, length::Integer, enc_param::EncryptionParameters)
return save!(buffer, length, ComprModeType.default, enc_param)
end
function save!(buffer::DenseVector{UInt8}, enc_param::EncryptionParameters)
return save!(buffer, length(buffer), enc_param)
end
function save_size(compr_mode, enc_param::EncryptionParameters)
result = Ref{Int64}(0)
retval = ccall((:EncParams_SaveSize, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Int64}),
enc_param, compr_mode, result)
@check_return_value retval
return Int(result[])
end
save_size(enc_param::EncryptionParameters) = save_size(ComprModeType.default, enc_param)
function load!(enc_param::EncryptionParameters, buffer::DenseVector{UInt8}, length)
in_bytes = Ref{Int64}(0)
retval = ccall((:EncParams_Load, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}, UInt64, Ref{Int64}),
enc_param, buffer, length, in_bytes)
@check_return_value retval
return Int(in_bytes[])
end
load!(enc_param::EncryptionParameters, buffer::DenseVector{UInt8}) = load!(enc_param, buffer,
length(buffer))
function Base.:(==)(enc_param1::EncryptionParameters, enc_param2::EncryptionParameters)
result = Ref{UInt8}(0)
retval = ccall((:EncParams_Equals, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt8}),
enc_param1, enc_param2, result)
@check_return_value retval
return Bool(result[])
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 2986 |
"""
Encryptor
An `Encryptor` can be used to encrypt a `Plaintext` instance, yielding a `Ciphertext` instance.
See also: [`Plaintext`](@ref), [`Ciphertext`](@ref)
"""
mutable struct Encryptor <: SEALObject
handle::Ptr{Cvoid}
function Encryptor(context::SEALContext, public_key::PublicKey, secret_key::SecretKey)
handleref = Ref{Ptr{Cvoid}}(0)
retval = ccall((:Encryptor_Create, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, public_key, secret_key, handleref)
@check_return_value retval
return Encryptor(handleref[])
end
function Encryptor(context::SEALContext, public_key::PublicKey)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Encryptor_Create, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, public_key, C_NULL, handleref)
@check_return_value retval
return Encryptor(handleref[])
end
function Encryptor(context::SEALContext, secret_key::SecretKey)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Encryptor_Create, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, C_NULL, secret_key, handleref)
@check_return_value retval
return Encryptor(handleref[])
end
function Encryptor(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::Encryptor)
if isallocated(object)
@check_return_value ccall((:Encryptor_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function set_secret_key!(encryptor::Encryptor, secret_key::SecretKey)
retval = ccall((:Encryptor_SetSecretKey, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}),
encryptor, secret_key)
@check_return_value retval
return nothing
end
function encrypt!(destination::Ciphertext, plain::Plaintext, encryptor::Encryptor)
retval = ccall((:Encryptor_Encrypt, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
encryptor, plain, destination, C_NULL)
@check_return_value retval
return destination
end
function encrypt_symmetric!(destination::Ciphertext, plain::Plaintext, encryptor::Encryptor)
retval = ccall((:Encryptor_EncryptSymmetric, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, UInt8, Ptr{Cvoid}, Ptr{Cvoid}),
encryptor, plain, false, destination, C_NULL)
@check_return_value retval
return destination
end
function encrypt_symmetric(plain::Plaintext, encryptor::Encryptor)
destination = Ciphertext()
retval = ccall((:Encryptor_EncryptSymmetric, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, UInt8, Ptr{Cvoid}, Ptr{Cvoid}),
encryptor, plain, true, destination, C_NULL)
@check_return_value retval
return destination
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 9861 |
"""
Evaluator
An `Evaluator` is used to perform arithmetic and other operations on `Ciphertext` instances. These
include addition, multiplication, relinearization, and modulus switching.
See also: [`Ciphertext`](@ref)
"""
mutable struct Evaluator <: SEALObject
handle::Ptr{Cvoid}
function Evaluator(context::SEALContext)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Evaluator_Create, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, handleref)
@check_return_value retval
return Evaluator(handleref[])
end
function Evaluator(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::Evaluator)
if isallocated(object)
@check_return_value ccall((:Evaluator_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function square!(destination::Ciphertext, encrypted::Ciphertext, evaluator::Evaluator)
retval = ccall((:Evaluator_Square, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, destination, C_NULL)
@check_return_value retval
return destination
end
function square_inplace!(encrypted::Ciphertext, evaluator::Evaluator)
return square!(encrypted, encrypted, evaluator)
end
function relinearize!(destination::Ciphertext, encrypted::Ciphertext, relinkeys::RelinKeys,
evaluator::Evaluator)
retval = ccall((:Evaluator_Relinearize, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, relinkeys, destination, C_NULL)
@check_return_value retval
return destination
end
function relinearize_inplace!(encrypted::Ciphertext, relinkeys::RelinKeys, evaluator::Evaluator)
return relinearize!(encrypted, encrypted, relinkeys, evaluator)
end
function rescale_to_next!(destination::Ciphertext, encrypted::Ciphertext, evaluator::Evaluator)
retval = ccall((:Evaluator_RescaleToNext, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, destination, C_NULL)
@check_return_value retval
return destination
end
function rescale_to_next_inplace!(encrypted::Ciphertext, evaluator::Evaluator)
return rescale_to_next!(encrypted, encrypted, evaluator)
end
function multiply_plain!(destination::Ciphertext, encrypted::Ciphertext, plain::Plaintext,
evaluator::Evaluator)
retval = ccall((:Evaluator_MultiplyPlain, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, plain, destination, C_NULL)
@check_return_value retval
return destination
end
function multiply_plain_inplace!(encrypted::Ciphertext, plain::Plaintext,
evaluator::Evaluator)
return multiply_plain!(encrypted, encrypted, plain, evaluator)
end
function multiply!(destination::Ciphertext, encrypted1::Ciphertext, encrypted2::Ciphertext,
evaluator::Evaluator)
retval = ccall((:Evaluator_Multiply, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted1, encrypted2, destination, C_NULL)
@check_return_value retval
return destination
end
function multiply_inplace!(encrypted1::Ciphertext, encrypted2::Ciphertext, evaluator::Evaluator)
return multiply!(encrypted1, encrypted1, encrypted2, evaluator)
end
function mod_switch_to!(destination::Ciphertext, encrypted::Ciphertext,
parms_id::DenseVector{UInt64}, evaluator::Evaluator)
retval = ccall((:Evaluator_ModSwitchTo1, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt64}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, parms_id, destination, C_NULL)
@check_return_value retval
return destination
end
function mod_switch_to_inplace!(encrypted::Ciphertext, parms_id::DenseVector{UInt64},
evaluator::Evaluator)
return mod_switch_to!(encrypted, encrypted, parms_id, evaluator)
end
function mod_switch_to_next!(destination::Ciphertext, encrypted::Ciphertext, evaluator::Evaluator)
retval = ccall((:Evaluator_ModSwitchToNext1, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, destination, C_NULL)
@check_return_value retval
return destination
end
function mod_switch_to_next_inplace!(encrypted::Ciphertext, evaluator::Evaluator)
return mod_switch_to_next!(encrypted, encrypted, evaluator)
end
function mod_switch_to!(destination::Plaintext, plain::Plaintext, parms_id::DenseVector{UInt64},
evaluator::Evaluator)
retval = ccall((:Evaluator_ModSwitchTo2, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt64}, Ptr{Cvoid}),
evaluator, plain, parms_id, destination)
@check_return_value retval
return destination
end
function mod_switch_to_inplace!(plain::Plaintext, parms_id::DenseVector{UInt64},
evaluator::Evaluator)
return mod_switch_to!(plain, plain, parms_id, evaluator)
end
function add!(destination::Ciphertext, encrypted1::Ciphertext, encrypted2::Ciphertext,
evaluator::Evaluator)
retval = ccall((:Evaluator_Add, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted1, encrypted2, destination)
@check_return_value retval
return destination
end
function add_inplace!(encrypted1::Ciphertext, encrypted2::Ciphertext, evaluator::Evaluator)
return add!(encrypted1, encrypted1, encrypted2, evaluator)
end
function add_plain!(destination::Ciphertext, encrypted::Ciphertext, plain::Plaintext,
evaluator::Evaluator)
retval = ccall((:Evaluator_AddPlain, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, plain, destination)
@check_return_value retval
return destination
end
function add_plain_inplace!(encrypted::Ciphertext, plain::Plaintext, evaluator::Evaluator)
return add_plain!(encrypted, encrypted, plain, evaluator)
end
function rotate_vector!(destination::Ciphertext, encrypted::Ciphertext, steps,
galois_keys::GaloisKeys, evaluator::Evaluator)
retval = ccall((:Evaluator_RotateVector, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Cint, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, steps, galois_keys, destination, C_NULL)
@check_return_value retval
return destination
end
function rotate_vector_inplace!(encrypted::Ciphertext, steps, galois_keys::GaloisKeys,
evaluator::Evaluator)
return rotate_vector!(encrypted, encrypted, steps, galois_keys, evaluator)
end
function rotate_rows!(destination::Ciphertext, encrypted::Ciphertext, steps,
galois_keys::GaloisKeys, evaluator::Evaluator)
retval = ccall((:Evaluator_RotateRows, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Cint, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, steps, galois_keys, destination, C_NULL)
@check_return_value retval
return destination
end
function rotate_rows_inplace!(encrypted::Ciphertext, steps, galois_keys::GaloisKeys,
evaluator::Evaluator)
return rotate_rows!(encrypted, encrypted, steps, galois_keys, evaluator)
end
function rotate_columns!(destination::Ciphertext, encrypted::Ciphertext, galois_keys::GaloisKeys,
evaluator::Evaluator)
retval = ccall((:Evaluator_RotateColumns, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, galois_keys, destination, C_NULL)
@check_return_value retval
return destination
end
function rotate_columns_inplace!(encrypted::Ciphertext, galois_keys::GaloisKeys,
evaluator::Evaluator)
return rotate_columns!(encrypted, encrypted, galois_keys, evaluator)
end
function complex_conjugate!(destination::Ciphertext, encrypted::Ciphertext, galois_keys::GaloisKeys,
evaluator::Evaluator)
retval = ccall((:Evaluator_ComplexConjugate, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, galois_keys, destination, C_NULL)
@check_return_value retval
return destination
end
function complex_conjugate_inplace!(encrypted::Ciphertext, galois_keys::GaloisKeys,
evaluator::Evaluator)
return complex_conjugate!(encrypted, encrypted, galois_keys, evaluator)
end
function negate!(destination::Ciphertext, encrypted::Ciphertext, evaluator::Evaluator)
retval = ccall((:Evaluator_Negate, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted, destination)
@check_return_value retval
return destination
end
function negate_inplace!(encrypted::Ciphertext, evaluator::Evaluator)
return negate!(encrypted, encrypted, evaluator)
end
function sub!(destination::Ciphertext, encrypted1::Ciphertext, encrypted2::Ciphertext,
evaluator::Evaluator)
retval = ccall((:Evaluator_Sub, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}),
evaluator, encrypted1, encrypted2, destination)
@check_return_value retval
return destination
end
function sub_inplace!(encrypted1::Ciphertext, encrypted2::Ciphertext, evaluator::Evaluator)
return sub!(encrypted1, encrypted1, encrypted2, evaluator)
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 1120 |
"""
GaloisKeys
Stores Galois keys generated by a `KeyGenerator` instance.
See also: [`KeyGenerator`](@ref)
"""
mutable struct GaloisKeys <: SEALObject
handle::Ptr{Cvoid}
function GaloisKeys()
handleref = Ref{Ptr{Cvoid}}(C_NULL)
# GaloisKeys are created as KSwitchKeys since they share the same data
retval = ccall((:KSwitchKeys_Create1, libsealc), Clong,
(Ref{Ptr{Cvoid}},),
handleref)
@check_return_value retval
return GaloisKeys(handleref[])
end
function GaloisKeys(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::GaloisKeys)
if isallocated(object)
@check_return_value ccall((:KSwitchKeys_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function parms_id(key::GaloisKeys)
parms_id = zeros(UInt64, 4)
retval = ccall((:KSwitchKeys_GetParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
key, parms_id)
@check_return_value retval
return parms_id
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 3247 |
"""
KeyGenerator
Can be used to generate a pair of matching secret and public keys. In addition, the `KeyGenerator`
provides functions to obtain relinearization keys (required after multiplication) and Galois keys
(needed for rotation).
See also: [`SecretKey`](@ref), [`PublicKey`](@ref), [`RelinKeys`](@ref)
"""
mutable struct KeyGenerator <: SEALObject
handle::Ptr{Cvoid}
function KeyGenerator(context::SEALContext)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_Create1, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, handleref)
@check_return_value retval
return KeyGenerator(handleref[])
end
function KeyGenerator(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::KeyGenerator)
if isallocated(object)
@check_return_value ccall((:KeyGenerator_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function create_public_key!(destination::PublicKey, keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreatePublicKey, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, false, keyptr)
@check_return_value retval
# Destroy previous key and reuse its container
destroy!(destination)
sethandle!(destination, keyptr[])
return nothing
end
function create_public_key(keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreatePublicKey, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, true, keyptr)
@check_return_value retval
return PublicKey(keyptr[])
end
function secret_key(keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_SecretKey, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
keygen, keyptr)
@check_return_value retval
return SecretKey(keyptr[])
end
function create_relin_keys!(destination::RelinKeys, keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreateRelinKeys, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, false, keyptr)
@check_return_value retval
# Destroy previous key and reuse its container
destroy!(destination)
sethandle!(destination, keyptr[])
return nothing
end
function create_relin_keys(keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreateRelinKeys, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, true, keyptr)
@check_return_value retval
return RelinKeys(keyptr[])
end
function create_galois_keys!(destination::GaloisKeys, keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreateGaloisKeysAll, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, false, keyptr)
@check_return_value retval
# Destroy previous key and reuse its container
destroy!(destination)
sethandle!(destination, keyptr[])
return nothing
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 981 |
mutable struct MemoryPoolHandle <: SEALObject
handle::Ptr{Cvoid}
function MemoryPoolHandle(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::MemoryPoolHandle)
if isallocated(object)
@check_return_value ccall((:MemoryPoolHandle_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function alloc_byte_count(handle::MemoryPoolHandle)
count = Ref{UInt64}(0)
retval = ccall((:MemoryPoolHandle_AllocByteCount, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
handle, count)
@check_return_value retval
return Int(count[])
end
function memory_manager_get_pool()
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:MemoryManager_GetPool2, libsealc), Clong,
(Ref{Ptr{Cvoid}},),
handleref)
@check_return_value retval
return MemoryPoolHandle(handleref[])
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 2914 |
"""
Modulus
Represents a non-negative integer modulus of up to 61 bits, e.g., for the plain modulus and the
coefficient modulus in instances of `EncryptionParameters`.
See also: [`EncryptionParameters`](@ref)
"""
mutable struct Modulus <: SEALObject
handle::Ptr{Cvoid}
function Modulus(value::Integer)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Modulus_Create1, libsealc), Clong,
(UInt8, Ref{Ptr{Cvoid}}),
value, handleref)
@check_return_value retval
return Modulus(handleref[])
end
function Modulus(handle::Ptr{Cvoid}; destroy_on_gc=true)
object = new(handle)
if destroy_on_gc
finalizer(destroy!, object)
end
return object
end
end
function destroy!(object::Modulus)
if isallocated(object)
@check_return_value ccall((:Modulus_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
module SecLevelType
@enum SecLevelTypeEnum::Cint none=0 tc128=128 tc192=192 tc256=256
end
function bit_count(modulus::Modulus)
bit_count = Ref{Cint}(0)
retval = ccall((:Modulus_BitCount, libsealc), Clong,
(Ptr{Cvoid}, Ref{Cint}),
modulus, bit_count)
@check_return_value retval
return Int(bit_count[])
end
function value(modulus::Modulus)
value = Ref{UInt64}(0)
retval = ccall((:Modulus_Value, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
modulus, value)
@check_return_value retval
return Int(value[])
end
function coeff_modulus_create(poly_modulus_degree, bit_sizes)
modulusptrs = Vector{Ptr{Cvoid}}(undef, length(bit_sizes))
retval = ccall((:CoeffModulus_Create, libsealc), Clong,
(UInt64, UInt64, Ref{Cint}, Ref{Ptr{Cvoid}}),
poly_modulus_degree, length(bit_sizes), collect(Cint, bit_sizes), modulusptrs)
@check_return_value retval
modulus = Modulus[Modulus(modulusptrs[i]) for i in 1:length(bit_sizes)]
return modulus
end
function coeff_modulus_bfv_default(poly_modulus_degree, sec_level=SecLevelType.tc128)
len = Ref{UInt64}(0)
# First call to obtain length (modulus result pointer is null)
retval = ccall((:CoeffModulus_BFVDefault, libsealc), Clong,
(UInt64, Cint, Ref{UInt64}, Ptr{Ptr{Cvoid}}),
poly_modulus_degree, sec_level, len, C_NULL)
@check_return_value retval
# Second call to obtain modulus
modulusptrs = Vector{Ptr{Cvoid}}(undef, len[])
retval = ccall((:CoeffModulus_BFVDefault, libsealc), Clong,
(UInt64, Cint, Ref{UInt64}, Ref{Ptr{Cvoid}}),
poly_modulus_degree, sec_level, len, modulusptrs)
@check_return_value retval
modulus = Modulus[Modulus(ptr) for ptr in modulusptrs]
return modulus
end
function plain_modulus_batching(poly_modulus_degree, bit_size)
return coeff_modulus_create(poly_modulus_degree, [bit_size])[1]
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 4557 |
"""
Plaintext
A plaintext element, storing data as a polynomial modulo the plaintext modulus. It can be used to
create a `Ciphertext` element by encrypting it with an appropriate `Encryptor` instance. Decrypting
a `Ciphertext` with a `Decryptor` instance will again return a `Plaintext` instance.
See also: [`Ciphertext`](@ref), [`Encryptor`](@ref), [`Decryptor`](@ref)
"""
mutable struct Plaintext <: SEALObject
handle::Ptr{Cvoid}
function Plaintext()
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Plaintext_Create1, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
C_NULL, handleref)
@check_return_value retval
return Plaintext(handleref[])
end
function Plaintext(capacity, coeff_count)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Plaintext_Create3, libsealc), Clong,
(UInt64, UInt64, Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
capacity, coeff_count, C_NULL, handleref)
@check_return_value retval
return Plaintext(handleref[])
end
function Plaintext(hex_poly)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:Plaintext_Create4, libsealc), Clong,
(Cstring, Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
hex_poly, C_NULL, handleref)
@check_return_value retval
return Plaintext(handleref[])
end
function Plaintext(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::Plaintext)
if isallocated(object)
@check_return_value ccall((:Plaintext_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function scale(plain::Plaintext)
value = Ref{Cdouble}(0)
retval = ccall((:Plaintext_Scale, libsealc), Clong,
(Ptr{Cvoid}, Ref{Cdouble}),
plain, value)
@check_return_value retval
return Float64(value[])
end
function scale!(plain::Plaintext, value::Float64)
retval = ccall((:Plaintext_SetScale, libsealc), Clong,
(Ptr{Cvoid}, Ref{Cdouble}),
plain, value)
@check_return_value retval
return plain
end
function parms_id(plain::Plaintext)
parms_id_ = zeros(UInt64, 4)
retval = ccall((:Plaintext_GetParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
plain, parms_id_)
@check_return_value retval
return parms_id_
end
function to_string(plain::Plaintext)
len = Ref{UInt64}(0)
# First call to obtain length (message pointer is null)
retval = ccall((:Plaintext_ToString, libsealc), Clong,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{UInt64}),
plain, C_NULL, len)
@check_return_value retval
# Second call to obtain message
# Note: The "+1" is needed since the terminating NULL byte is included in the *copy* operation in
# SEAL, but *not* in the returned length.
message = Vector{UInt8}(undef, len[] + 1)
retval = ccall((:Plaintext_ToString, libsealc), Clong,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{UInt64}),
plain, message, len)
@check_return_value retval
# Return as String but without terminating NULL byte
return String(message[1:end-1])
end
function save_size(compr_mode, plain::Plaintext)
result = Ref{Int64}(0)
retval = ccall((:Plaintext_SaveSize, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Int64}),
plain, compr_mode, result)
@check_return_value retval
return Int(result[])
end
save_size(plain::Plaintext) = save_size(ComprModeType.default, plain)
function save!(buffer::DenseVector{UInt8}, length::Integer,
compr_mode::ComprModeType.ComprModeTypeEnum, plain::Plaintext)
out_bytes = Ref{Int64}(0)
retval = ccall((:Plaintext_Save, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}, UInt64, UInt8, Ref{Int64}),
plain, buffer, length, compr_mode, out_bytes)
@check_return_value retval
return Int(out_bytes[])
end
function save!(buffer::DenseVector{UInt8}, length::Integer, plain::Plaintext)
return save!(buffer, length, ComprModeType.default, plain)
end
function save!(buffer::DenseVector{UInt8}, plain::Plaintext)
return save!(buffer, length(buffer), plain)
end
function Base.:(==)(plain1::Plaintext, plain2::Plaintext)
result = Ref{UInt8}(0)
retval = ccall((:Plaintext_Equals, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt8}),
plain1, plain2, result)
@check_return_value retval
return Bool(result[])
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 1030 |
"""
PublicKey
Stores a public key generated by a `KeyGenerator` instance.
See also: [`KeyGenerator`](@ref)
"""
mutable struct PublicKey <: SEALObject
handle::Ptr{Cvoid}
function PublicKey()
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:PublicKey_Create1, libsealc), Clong,
(Ref{Ptr{Cvoid}},),
handleref)
@check_return_value retval
return PublicKey(handleref[])
end
function PublicKey(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::PublicKey)
if isallocated(object)
@check_return_value ccall((:PublicKey_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function parms_id(key::PublicKey)
parms_id = zeros(UInt64, 4)
retval = ccall((:PublicKey_ParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
key, parms_id)
@check_return_value retval
return parms_id
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 2843 |
"""
RelinKeys
Stores relinearization keys generated by a `KeyGenerator` instance.
See also: [`KeyGenerator`](@ref)
"""
mutable struct RelinKeys <: SEALObject
handle::Ptr{Cvoid}
function RelinKeys()
handleref = Ref{Ptr{Cvoid}}(C_NULL)
# RelinKeys are created as KSwitchKeys since they share the same data
retval = ccall((:KSwitchKeys_Create1, libsealc), Clong,
(Ref{Ptr{Cvoid}},),
handleref)
@check_return_value retval
return RelinKeys(handleref[])
end
function RelinKeys(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::RelinKeys)
if isallocated(object)
@check_return_value ccall((:KSwitchKeys_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function parms_id(key::RelinKeys)
parms_id = zeros(UInt64, 4)
retval = ccall((:KSwitchKeys_GetParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
key, parms_id)
@check_return_value retval
return parms_id
end
function save_size(compr_mode, key::RelinKeys)
result = Ref{Int64}(0)
retval = ccall((:KSwitchKeys_SaveSize, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Int64}),
key, compr_mode, result)
@check_return_value retval
return Int(result[])
end
save_size(key::RelinKeys) = save_size(ComprModeType.default, key)
function save!(buffer::DenseVector{UInt8}, length::Integer,
compr_mode::ComprModeType.ComprModeTypeEnum, key::RelinKeys)
out_bytes = Ref{Int64}(0)
retval = ccall((:KSwitchKeys_Save, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}, UInt64, UInt8, Ref{Int64}),
key, buffer, length, compr_mode, out_bytes)
@check_return_value retval
return Int(out_bytes[])
end
function save!(buffer::DenseVector{UInt8}, length::Integer, key::RelinKeys)
return save!(buffer, length, ComprModeType.default, key)
end
function save!(buffer::DenseVector{UInt8}, key::RelinKeys)
return save!(buffer, length(buffer), key)
end
function load!(key::RelinKeys, context::SEALContext, buffer::DenseVector{UInt8}, length)
in_bytes = Ref{Int64}(0)
retval = ccall((:KSwitchKeys_Load, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt8}, UInt64, Ref{Int64}),
key, context, buffer, length, in_bytes)
@check_return_value retval
return Int(in_bytes[])
end
load!(key::RelinKeys, context::SEALContext, buffer::DenseVector{UInt8}) = load!(key,
context,
buffer,
length(buffer))
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 2526 |
"""
SecretKey
Stores a secret key generated by a `KeyGenerator` instance.
See also: [`KeyGenerator`](@ref)
"""
mutable struct SecretKey <: SEALObject
handle::Ptr{Cvoid}
function SecretKey()
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:SecretKey_Create1, libsealc), Clong,
(Ref{Ptr{Cvoid}},),
handleref)
@check_return_value retval
return SecretKey(handleref[])
end
function SecretKey(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::SecretKey)
if isallocated(object)
@check_return_value ccall((:SecretKey_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function parms_id(key::SecretKey)
parms_id = zeros(UInt64, 4)
retval = ccall((:SecretKey_ParmsId, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt64}),
key, parms_id)
@check_return_value retval
return parms_id
end
function save!(buffer::DenseVector{UInt8}, length::Integer,
compr_mode::ComprModeType.ComprModeTypeEnum, key::SecretKey)
out_bytes = Ref{Int64}(0)
retval = ccall((:SecretKey_Save, libsealc), Clong,
(Ptr{Cvoid}, Ref{UInt8}, UInt64, UInt8, Ref{Int64}),
key, buffer, length, compr_mode, out_bytes)
@check_return_value retval
return Int(out_bytes[])
end
function save!(buffer::DenseVector{UInt8}, length::Integer, key::SecretKey)
return save!(buffer, length, ComprModeType.default, key)
end
function save!(buffer::DenseVector{UInt8}, key::SecretKey)
return save!(buffer, length(buffer), key)
end
function save_size(compr_mode, key::SecretKey)
result = Ref{Int64}(0)
retval = ccall((:SecretKey_SaveSize, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Int64}),
key, compr_mode, result)
@check_return_value retval
return Int(result[])
end
save_size(key::SecretKey) = save_size(ComprModeType.default, key)
function load!(key::SecretKey, context::SEALContext, buffer::DenseVector{UInt8}, length)
in_bytes = Ref{Int64}(0)
retval = ccall((:SecretKey_Load, libsealc), Clong,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{UInt8}, UInt64, Ref{Int64}),
key, context, buffer, length, in_bytes)
@check_return_value retval
return Int(in_bytes[])
end
function load!(key::SecretKey, context::SEALContext, buffer::DenseVector{UInt8})
return load!(key, context, buffer, length(buffer))
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 693 |
module ComprModeType
@enum ComprModeTypeEnum::UInt8 none=0 zlib=1 zstd=2
const default = zstd
end
mutable struct SEALHeader
magic::UInt16
header_size::UInt8
version_major::UInt8
version_minor::UInt8
compr_mode::UInt8
reserved::UInt16
size::UInt64
end
SEALHeader() = SEALHeader(0, 0, 0, 0, 0, 0, 0)
function load_header!(header::SEALHeader, buffer::DenseVector{UInt8})
io = IOBuffer(buffer)
header.magic = read(io, UInt16)
header.header_size = read(io, UInt8)
header.version_major = read(io, UInt8)
header.version_minor = read(io, UInt8)
header.compr_mode = read(io, UInt8)
header.reserved = read(io, UInt16)
header.size = read(io, UInt64)
return header
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 1432 |
"""
version_major()
Return the *major* version of the used SEAL library as an integer.
See also: [`version_minor`](@ref), [`version_patch`](@ref), [`version`](@ref)
"""
function version_major()
val = Ref{UInt8}(0)
retval = ccall((:Version_Major, libsealc), Clong, (Ref{UInt8},), val)
@check_return_value retval
return convert(Int, val[])
end
"""
version_minor()
Return the *minor* version of the used SEAL library as an integer.
See also: [`version_major`](@ref), [`version_patch`](@ref), [`version`](@ref)
"""
function version_minor()
val = Ref{UInt8}(0)
retval = ccall((:Version_Minor, libsealc), Clong, (Ref{UInt8},), val)
@check_return_value retval
return convert(Int, val[])
end
"""
version_patch()
Return the *patch* version of the used SEAL library as an integer.
See also: [`version_major`](@ref), [`version_minor`](@ref), [`version`](@ref)
"""
function version_patch()
val = Ref{UInt8}(0)
retval = ccall((:Version_Patch, libsealc), Clong, (Ref{UInt8},), val)
@check_return_value retval
return convert(Int, val[])
end
"""
version()
Return the version of the used SEAL library as a `VersionNumber` in the format
`v"major.minor.patch"`..
See also: [`version_major`](@ref), [`version_minor`](@ref), [`version_patch`](@ref)
"""
function version()
major = version_major()
minor = version_minor()
patch = version_patch()
return VersionNumber("$major.$minor.$patch")
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 280 | using SEAL
using Test
# Include files with example-specific tests
include("test_1_bfv_basics.jl")
include("test_2_encoders.jl")
include("test_3_levels.jl")
include("test_4_ckks_basics.jl")
include("test_5_rotation.jl")
include("test_6_serialization.jl")
include("test_extra.jl")
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 6408 | @testset "1_bfv_basics" begin
@testset "EncryptionParameters" begin
@test_nowarn EncryptionParameters(SchemeType.bfv)
end
enc_parms = EncryptionParameters(SchemeType.bfv)
@testset "polynomial modulus degree" begin
@test_nowarn set_poly_modulus_degree!(enc_parms, 4096)
end
@testset "coefficient modulus" begin
@test_nowarn coeff_modulus_bfv_default(4096)
@test_nowarn set_coeff_modulus!(enc_parms, coeff_modulus_bfv_default(4096))
end
@testset "plain modulus" begin
@test_nowarn set_plain_modulus!(enc_parms, 1024)
end
@testset "SEALContext" begin
@test_nowarn SEALContext(enc_parms)
end
context = SEALContext(enc_parms)
@testset "parameter_error_message" begin
@test parameter_error_message(context) == "valid"
end
@testset "KeyGenerator" begin
@test_nowarn KeyGenerator(context)
end
keygen = KeyGenerator(context)
@testset "PublicKey" begin
@test_nowarn PublicKey()
end
public_key_ = PublicKey()
@testset "create_public_key" begin
@test_nowarn create_public_key!(public_key_, keygen)
end
@testset "SecretKey" begin
@test_nowarn secret_key(keygen)
end
secret_key_ = secret_key(keygen)
@testset "RelinKeys" begin
@test_nowarn RelinKeys()
end
relin_keys_ = RelinKeys()
@testset "create_relin_keys" begin
@test_nowarn create_relin_keys!(relin_keys_, keygen)
end
@testset "Encryptor" begin
@test_nowarn Encryptor(context, public_key_)
end
encryptor = Encryptor(context, public_key_)
@testset "Evaluator" begin
@test_nowarn Evaluator(context)
end
evaluator = Evaluator(context)
@testset "Decryptor" begin
@test_nowarn Decryptor(context, secret_key_)
end
decryptor = Decryptor(context, secret_key_)
@testset "Plaintext" begin
@test_nowarn Plaintext(string(6))
end
x_plain = Plaintext(string(6))
@testset "to_string" begin
@test to_string(x_plain) == "6"
end
x_encrypted = Ciphertext()
@testset "encrypt!" begin
@test_nowarn encrypt!(x_encrypted, x_plain, encryptor)
end
@testset "length" begin
@test length(x_encrypted) == 2
end
@testset "invariant_noise_budget" begin
# Next test is fuzzy since the actual noise budget might vary
@test invariant_noise_budget(x_encrypted, decryptor) in (54, 55, 56)
end
x_decrypted = Plaintext()
@testset "decrypt!" begin
@test_nowarn decrypt!(x_decrypted, x_encrypted, decryptor)
@test to_string(x_decrypted) == "6"
end
x_sq_plus_one = Ciphertext()
@testset "square!" begin
@test_nowarn square!(x_sq_plus_one, x_encrypted, evaluator)
end
plain_one = Plaintext("1")
@testset "add_plain_inplace! and length/noise budget" begin
@test_nowarn add_plain_inplace!(x_sq_plus_one, plain_one, evaluator)
@test length(x_sq_plus_one) == 3
@test invariant_noise_budget(x_sq_plus_one, decryptor) == 33
end
decrypted_result = Plaintext()
@testset "decrypt! and check (x^2 + 1 = 37 = 0x25)" begin
@test_nowarn decrypt!(decrypted_result, x_sq_plus_one, decryptor)
@test to_string(decrypted_result) == "25"
end
x_plus_one_sq = Ciphertext()
@testset "add_plain!" begin
@test_nowarn add_plain!(x_plus_one_sq, x_encrypted, plain_one, evaluator)
end
@testset "square_inplace! and length/noise budget" begin
@test_nowarn square_inplace!(x_plus_one_sq, evaluator)
@test length(x_plus_one_sq) == 3
@test invariant_noise_budget(x_plus_one_sq, decryptor) == 33
end
@testset "decrypt! and check ((x+1)^2 = 49 = 0x31)" begin
@test_nowarn decrypt!(decrypted_result, x_plus_one_sq, decryptor)
@test to_string(decrypted_result) == "31"
end
encrypted_result = Ciphertext()
plain_four = Plaintext("4")
@testset "compute encrypted_result (4(x^2+1)(x+1)^2)" begin
@test_nowarn multiply_plain_inplace!(x_sq_plus_one, plain_four, evaluator)
@test_nowarn multiply!(encrypted_result, x_sq_plus_one, x_plus_one_sq, evaluator)
@test length(encrypted_result) == 5
# Next test is fuzzy since the actual noise budget might vary
@test invariant_noise_budget(encrypted_result, decryptor) in (3, 4, 5)
end
x_squared = Ciphertext()
@testset "compute and relinearize x_squared (x^2)" begin
@test_nowarn square!(x_squared, x_encrypted, evaluator)
@test length(x_squared) == 3
@test_nowarn relinearize_inplace!(x_squared, relin_keys_, evaluator)
@test length(x_squared) == 2
end
@testset "compute x_sq_plus_one (x^2+1) and decrypt" begin
@test_nowarn add_plain!(x_sq_plus_one, x_squared, plain_one, evaluator)
@test invariant_noise_budget(x_sq_plus_one, decryptor) == 33
@test_nowarn decrypt!(decrypted_result, x_sq_plus_one, decryptor)
@test to_string(decrypted_result) == "25"
end
x_plus_one = Ciphertext()
@testset "compute x_plus_one (x+1)" begin
@test_nowarn add_plain!(x_plus_one, x_encrypted, plain_one, evaluator)
end
@testset "compute and relinearize x_plus_one_sq ((x+1)^2)" begin
@test_nowarn square!(x_plus_one_sq, x_plus_one, evaluator)
@test length(x_plus_one_sq) == 3
@test_nowarn relinearize_inplace!(x_plus_one_sq, relin_keys_, evaluator)
@test invariant_noise_budget(x_plus_one_sq, decryptor) == 33
end
@testset "decrypt (x+1)^2 and check" begin
@test_nowarn decrypt!(decrypted_result, x_plus_one_sq, decryptor)
@test to_string(decrypted_result) == "31"
end
@testset "compute and relinearize encrypted_result (4(x^2+1)(x+1)^2)" begin
@test_nowarn multiply_plain_inplace!(x_sq_plus_one, plain_four, evaluator)
@test_nowarn multiply!(encrypted_result, x_sq_plus_one, x_plus_one_sq, evaluator)
@test length(encrypted_result) == 3
@test_nowarn relinearize_inplace!(encrypted_result, relin_keys_, evaluator)
@test length(encrypted_result) == 2
# Next test is fuzzy since the actual noise budget might vary
@test invariant_noise_budget(encrypted_result, decryptor) in (10, 11, 12)
end
@testset "decrypt encrypted_result (4(x^2+1)(x+1)^2) and check" begin
@test_nowarn decrypt!(decrypted_result, encrypted_result, decryptor)
@test to_string(decrypted_result) == "54"
end
@testset "invalid parameters" begin
@test_nowarn set_poly_modulus_degree!(enc_parms, 2048)
context = SEALContext(enc_parms)
@test parameter_error_message(context) == "parameters are not compliant with HomomorphicEncryption.org security standard"
end
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 3477 | @testset "2_encoders" begin
@testset "batch_encoder" begin
parms = EncryptionParameters(SchemeType.bfv)
poly_modulus_degree = 8192
set_poly_modulus_degree!(parms, poly_modulus_degree)
set_coeff_modulus!(parms, coeff_modulus_bfv_default(poly_modulus_degree))
set_plain_modulus!(parms, plain_modulus_batching(poly_modulus_degree, 20))
context = SEALContext(parms)
@testset "first_context_data" begin
@test_nowarn first_context_data(context)
end
context_data = first_context_data(context)
@testset "qualifiers" begin
@test_nowarn qualifiers(context_data)
end
epq = qualifiers(context_data)
@testset "using_batches" begin
@test using_batching(epq) == true
end
keygen = KeyGenerator(context)
public_key_ = PublicKey()
create_public_key!(public_key_, keygen)
secret_key_ = secret_key(keygen)
relin_keys_ = RelinKeys()
create_relin_keys!(relin_keys_, keygen)
encryptor = Encryptor(context, public_key_)
evaluator = Evaluator(context)
decryptor = Decryptor(context, secret_key_)
@testset "BatchEncoder" begin
@test_nowarn BatchEncoder(context)
end
batch_encoder = BatchEncoder(context)
@testset "slot_count" begin
@test slot_count(batch_encoder) == 8192
end
slot_count_ = slot_count(batch_encoder)
row_size = div(slot_count_, 2)
pod_matrix = zeros(UInt64, slot_count_)
pod_matrix[1] = 0
pod_matrix[2] = 1
pod_matrix[3] = 2
pod_matrix[4] = 3
pod_matrix[row_size + 1] = 4
pod_matrix[row_size + 2] = 5
pod_matrix[row_size + 3] = 6
pod_matrix[row_size + 4] = 7
plain_matrix = Plaintext()
@testset "encode!" begin
@test_nowarn encode!(plain_matrix, pod_matrix, batch_encoder)
end
pod_result = similar(pod_matrix)
@testset "decode!" begin
@test_nowarn decode!(pod_result, plain_matrix, batch_encoder)
end
# Extra: encode/decode for Int64 data type
pod_matrix_int64 = Int64.(pod_matrix)
plain_matrix_int64 = Plaintext()
@testset "encode!" begin
@test_nowarn encode!(plain_matrix_int64, pod_matrix_int64, batch_encoder)
end
pod_result_int64 = similar(pod_matrix_int64)
@testset "decode!" begin
@test_nowarn decode!(pod_result_int64, plain_matrix_int64, batch_encoder)
end
encrypted_matrix = Ciphertext()
@testset "encrypt!" begin
@test_nowarn encrypt!(encrypted_matrix, plain_matrix, encryptor)
end
@testset "noise budget 1" begin
@test invariant_noise_budget(encrypted_matrix, decryptor) in (145, 146, 147)
end
pod_matrix2 = ones(UInt64, slot_count_)
pod_matrix2[2:2:slot_count_] .= 2
plain_matrix2 = Plaintext()
@testset "encode!" begin
@test_nowarn encode!(plain_matrix2, pod_matrix2, batch_encoder)
end
@testset "sum, square, and relinearize" begin
@test_nowarn add_plain_inplace!(encrypted_matrix, plain_matrix2, evaluator)
@test_nowarn square_inplace!(encrypted_matrix, evaluator)
@test_nowarn relinearize_inplace!(encrypted_matrix, relin_keys_, evaluator)
end
@testset "noise budget 2" begin
@test invariant_noise_budget(encrypted_matrix, decryptor) in (113, 114, 115)
end
plain_result = Plaintext()
@testset "decrypt! and decode!" begin
@test_nowarn decrypt!(plain_result, encrypted_matrix, decryptor)
@test_nowarn decode!(pod_result, plain_result, batch_encoder)
end
end
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 9730 | @testset "3_levels" begin
@testset "EncryptionParameters" begin
@test_nowarn EncryptionParameters(SchemeType.bfv)
end
enc_parms = EncryptionParameters(SchemeType.bfv)
@testset "polynomial modulus degree" begin
@test_nowarn set_poly_modulus_degree!(enc_parms, 8192)
end
@testset "coefficient modulus" begin
@test_nowarn set_coeff_modulus!(enc_parms, coeff_modulus_create(8192, [50, 30, 30, 50, 50]))
end
@testset "plain modulus" begin
@test_nowarn set_plain_modulus!(enc_parms, plain_modulus_batching(8192, 20))
end
@testset "SEALContext" begin
@test_nowarn SEALContext(enc_parms)
end
context = SEALContext(enc_parms)
@testset "key_context_data" begin
@test_nowarn key_context_data(context)
end
context_data = key_context_data(context)
@testset "modulus switching chain (key context data)" begin
@test chain_index(context_data) == 4
@test parms_id(context_data) == [0x26d0ad92b6a78b12,
0x667d7d6411d19434,
0x18ade70427566279,
0x84e0aa06442af302]
@test_nowarn coeff_modulus(parms(context_data))
primes = coeff_modulus(parms(context_data))
@test value(primes[1]) == 0x3ffffffef4001
@test value(primes[2]) == 0x3ffe8001
@test value(primes[3]) == 0x3fff4001
@test value(primes[4]) == 0x3fffffffcc001
@test value(primes[5]) == 0x3ffffffffc001
end
@testset "first_context_data" begin
@test_nowarn first_context_data(context)
end
context_data = first_context_data(context)
@testset "modulus switching chain (first context data)" begin
@test chain_index(context_data) == 3
@test parms_id(context_data) == first_parms_id(context)
@test parms_id(context_data) == [0x211ee2c43ec16b18,
0x2c176ee3b851d741,
0x490eacf1dd5930b3,
0x3212f104b7a60a0c]
@test_nowarn coeff_modulus(parms(context_data))
primes = coeff_modulus(parms(context_data))
@test value(primes[1]) == 0x3ffffffef4001
@test value(primes[2]) == 0x3ffe8001
@test value(primes[3]) == 0x3fff4001
@test value(primes[4]) == 0x3fffffffcc001
end
@testset "next_context_data" begin
@test_nowarn next_context_data(context_data)
end
context_data = next_context_data(context_data)
context_data = next_context_data(context_data)
context_data = next_context_data(context_data)
@testset "isnothing(next_context_data)" begin
@test isnothing(next_context_data(context_data))
end
@testset "modulus switching chain (last context data)" begin
@test chain_index(context_data) == 0
@test parms_id(context_data) == last_parms_id(context)
@test parms_id(context_data) == [0xaf7f6dac55528cf7,
0x2f532a7e2362ab73,
0x03aeaedd1059515e,
0xa515111177a581ca]
@test_nowarn coeff_modulus(parms(context_data))
primes = coeff_modulus(parms(context_data))
@test value(primes[1]) == 0x3ffffffef4001
end
keygen = KeyGenerator(context)
public_key_ = PublicKey()
create_public_key!(public_key_, keygen)
secret_key_ = secret_key(keygen)
relin_keys_ = RelinKeys()
create_relin_keys!(relin_keys_, keygen)
galois_keys_ = GaloisKeys()
create_galois_keys!(galois_keys_, keygen)
@testset "parms_id of generated keys" begin
p = [0x26d0ad92b6a78b12, 0x667d7d6411d19434, 0x18ade70427566279, 0x84e0aa06442af302]
@test parms_id(public_key_) == p
@test parms_id(secret_key_) == p
@test parms_id(relin_keys_) == p
@test parms_id(galois_keys_) == p
end
encryptor = Encryptor(context, public_key_)
evaluator = Evaluator(context)
decryptor = Decryptor(context, secret_key_)
@testset "Plaintext" begin
@test_nowarn Plaintext("1x^3 + 2x^2 + 3x^1 + 4")
end
plain = Plaintext("1x^3 + 2x^2 + 3x^1 + 4")
encrypted = Ciphertext()
encrypt!(encrypted, plain, encryptor)
@testset "parms_id of plain" begin
@test parms_id(plain) == [0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000]
end
@testset "parms_id of encrypted" begin
@test parms_id(encrypted) == [0x211ee2c43ec16b18,
0x2c176ee3b851d741,
0x490eacf1dd5930b3,
0x3212f104b7a60a0c]
end
@testset "modulus switching on encrypted (level 3)" begin
context_data = first_context_data(context)
@test chain_index(context_data) == 3
@test parms_id(context_data) == [0x211ee2c43ec16b18,
0x2c176ee3b851d741,
0x490eacf1dd5930b3,
0x3212f104b7a60a0c]
@test invariant_noise_budget(encrypted, decryptor) in (131, 132, 133)
end
@testset "modulus switching on encrypted (level 2)" begin
@test mod_switch_to_next_inplace!(encrypted, evaluator) == encrypted
context_data = next_context_data(context_data)
@test chain_index(context_data) == 2
@test parms_id(context_data) == [0x85626ad91458073f,
0xe186437698f5ff4e,
0xa1e71da26dabe039,
0x9b66f4ab523b9be1]
@test invariant_noise_budget(encrypted, decryptor) in (81, 82, 83)
end
@testset "modulus switching on encrypted (level 1)" begin
@test mod_switch_to_next_inplace!(encrypted, evaluator) == encrypted
context_data = next_context_data(context_data)
@test chain_index(context_data) == 1
@test parms_id(context_data) == [0x73b7dc26d10a15b9,
0x56ce8bdd07324dfa,
0x7ff7b8ec16a6f20f,
0xb80f7319f2a28ac1]
@test invariant_noise_budget(encrypted, decryptor) in (51, 52, 53)
end
@testset "modulus switching on encrypted (level 3)" begin
@test mod_switch_to_next_inplace!(encrypted, evaluator) == encrypted
context_data = next_context_data(context_data)
@test chain_index(context_data) == 0
@test parms_id(context_data) == [0xaf7f6dac55528cf7,
0x2f532a7e2362ab73,
0x03aeaedd1059515e,
0xa515111177a581ca]
@test invariant_noise_budget(encrypted, decryptor) in (21, 22, 23)
end
@testset "decrypt! and check 1" begin
@test_nowarn decrypt!(plain, encrypted, decryptor)
@test to_string(plain) == "1x^3 + 2x^2 + 3x^1 + 4"
end
@testset "compute with modswitching" begin
@test_nowarn encrypt!(encrypted, plain, encryptor)
@test invariant_noise_budget(encrypted, decryptor) in (131, 132, 133)
@test square_inplace!(encrypted, evaluator) == encrypted
@test relinearize_inplace!(encrypted, relin_keys_, evaluator) == encrypted
@test invariant_noise_budget(encrypted, decryptor) in (99, 100, 101)
@test square_inplace!(encrypted, evaluator) == encrypted
@test relinearize_inplace!(encrypted, relin_keys_, evaluator) == encrypted
@test invariant_noise_budget(encrypted, decryptor) in (66, 67, 68)
@test_nowarn mod_switch_to_next_inplace!(encrypted, evaluator)
@test invariant_noise_budget(encrypted, decryptor) in (66, 67, 68)
@test square_inplace!(encrypted, evaluator) == encrypted
@test relinearize_inplace!(encrypted, relin_keys_, evaluator) == encrypted
@test invariant_noise_budget(encrypted, decryptor) in (33, 34, 35)
@test_nowarn mod_switch_to_next_inplace!(encrypted, evaluator)
@test invariant_noise_budget(encrypted, decryptor) in (33, 34, 35)
end
@testset "decrypt! and check 2" begin
@test_nowarn decrypt!(plain, encrypted, decryptor)
@test to_string(plain) == "1x^24 + 10x^23 + 88x^22 + 330x^21 + EFCx^20 + 3A30x^19 + C0B8x^18 + 22BB0x^17 + 58666x^16 + C88D0x^15 + 9C377x^14 + F4C0Ex^13 + E8B38x^12 + 5EE89x^11 + F8BFFx^10 + 30304x^9 + 5B9D4x^8 + 12653x^7 + 4DFB5x^6 + 879F8x^5 + 825FBx^4 + F1FFEx^3 + 3FFFFx^2 + 60000x^1 + 10000"
end
@testset "context without expanded modulus chain" begin
@test_nowarn SEALContext(enc_parms, expand_mod_chain=false)
end
context = SEALContext(enc_parms, expand_mod_chain=false)
context_data = key_context_data(context)
@testset "modulus switching chain (chain index 1)" begin
@test chain_index(context_data) == 1
@test parms_id(context_data) == [0x26d0ad92b6a78b12,
0x667d7d6411d19434,
0x18ade70427566279,
0x84e0aa06442af302]
@test_nowarn coeff_modulus(parms(context_data))
primes = coeff_modulus(parms(context_data))
@test value(primes[1]) == 0x3ffffffef4001
@test value(primes[2]) == 0x3ffe8001
@test value(primes[3]) == 0x3fff4001
@test value(primes[4]) == 0x3fffffffcc001
end
context_data = next_context_data(context_data)
@testset "modulus switching chain (chain index 0)" begin
@test chain_index(context_data) == 0
@test parms_id(context_data) == [0x211ee2c43ec16b18,
0x2c176ee3b851d741,
0x490eacf1dd5930b3,
0x3212f104b7a60a0c]
@test_nowarn coeff_modulus(parms(context_data))
primes = coeff_modulus(parms(context_data))
@test value(primes[1]) == 0x3ffffffef4001
@test value(primes[2]) == 0x3ffe8001
@test value(primes[3]) == 0x3fff4001
end
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 6645 | @testset "4_ckks_basics" begin
@testset "EncryptionParameters" begin
@test_nowarn EncryptionParameters(SchemeType.ckks)
end
enc_parms = EncryptionParameters(SchemeType.ckks)
@testset "polynomial modulus degree" begin
@test_nowarn set_poly_modulus_degree!(enc_parms, 8192)
@test poly_modulus_degree(enc_parms) == 8192
end
@testset "coefficient modulus" begin
@test_nowarn coeff_modulus_create(8192, [60, 40, 40, 60])
@test_nowarn set_coeff_modulus!(enc_parms, coeff_modulus_create(8192, [60, 40, 40, 60]))
end
@testset "SEALContext" begin
@test_nowarn SEALContext(enc_parms)
end
context = SEALContext(enc_parms)
@testset "extract parameters" begin
@test_nowarn key_context_data(context)
context_data = key_context_data(context)
@test_nowarn parms(context_data)
ec = parms(context_data)
@test scheme(ec) == SchemeType.ckks
@test total_coeff_modulus_bit_count(context_data) == 200
@test_nowarn coeff_modulus(ec)
bit_counts = [bit_count(modulus) for modulus in coeff_modulus(ec)]
@test bit_counts == [60, 40, 40, 60]
end
@testset "KeyGenerator" begin
@test_nowarn KeyGenerator(context)
end
keygen = KeyGenerator(context)
@testset "PublicKey" begin
@test_nowarn PublicKey()
end
public_key_ = PublicKey()
@testset "create_public_key" begin
@test_nowarn create_public_key!(public_key_, keygen)
end
@testset "SecretKey" begin
@test_nowarn secret_key(keygen)
end
secret_key_ = secret_key(keygen)
@testset "RelinKeys" begin
@test_nowarn RelinKeys()
end
relin_keys_ = RelinKeys()
@testset "create_relin_keys" begin
@test_nowarn create_relin_keys!(relin_keys_, keygen)
end
@testset "Encryptor" begin
@test_nowarn Encryptor(context, public_key_)
end
encryptor = Encryptor(context, public_key_)
@testset "Evaluator" begin
@test_nowarn Evaluator(context)
end
evaluator = Evaluator(context)
@testset "Decryptor" begin
@test_nowarn Decryptor(context, secret_key_)
end
decryptor = Decryptor(context, secret_key_)
@testset "CKKSEncoder" begin
@test_nowarn CKKSEncoder(context)
end
encoder = CKKSEncoder(context)
slot_count_ = 4096
@testset "slot_count" begin
@test slot_count(encoder) == slot_count_
end
@testset "Plaintext" begin
@test_nowarn Plaintext()
end
plain_coeff3 = Plaintext()
plain_coeff1 = Plaintext()
plain_coeff0 = Plaintext()
x_plain = Plaintext()
input = collect(range(0.0, 1.0, length=slot_count_))
initial_scale = 2.0^40
@testset "encode!" begin
@test_nowarn encode!(plain_coeff3, 3.14159265, initial_scale, encoder)
@test_nowarn encode!(plain_coeff1, 0.4, initial_scale, encoder)
@test_nowarn encode!(plain_coeff0, 1.0, initial_scale, encoder)
@test_nowarn encode!(x_plain, input, initial_scale, encoder)
end
@testset "Ciphertext" begin
@test_nowarn Ciphertext()
end
x1_encrypted = Ciphertext()
@testset "encrypt!" begin
@test_nowarn encrypt!(x1_encrypted, x_plain, encryptor)
end
x3_encrypted = Ciphertext()
@testset "square!" begin
@test_nowarn square!(x3_encrypted, x1_encrypted, evaluator)
end
@testset "relinearize_inplace!" begin
@test_nowarn relinearize_inplace!(x3_encrypted, relin_keys_, evaluator)
end
@testset "scale (before rescaling)" begin
@test isapprox(log2(scale(x3_encrypted)), 80)
end
@testset "rescale_to_next_inplace!" begin
@test rescale_to_next_inplace!(x3_encrypted, evaluator) == x3_encrypted
end
@testset "scale (after rescaling)" begin
@test isapprox(log2(scale(x3_encrypted)), 40)
end
x1_encrypted_coeff3 = Ciphertext()
@testset "multiply_plain! and rescale" begin
@test_nowarn multiply_plain!(x1_encrypted_coeff3, x1_encrypted, plain_coeff3, evaluator)
@test_nowarn rescale_to_next_inplace!(x1_encrypted_coeff3, evaluator)
end
@testset "multiply_inplace!" begin
@test multiply_inplace!(x3_encrypted, x1_encrypted_coeff3, evaluator) == x3_encrypted
end
@testset "relinearize_inplace! and rescale" begin
@test_nowarn relinearize_inplace!(x3_encrypted, relin_keys_, evaluator)
@test_nowarn rescale_to_next_inplace!(x3_encrypted, evaluator)
end
@testset "multiply_plain_inplace! and rescale" begin
@test_nowarn multiply_plain_inplace!(x1_encrypted, plain_coeff1, evaluator)
@test_nowarn rescale_to_next_inplace!(x1_encrypted, evaluator)
end
@testset "parms_id" begin
@test parms_id(x3_encrypted) == UInt64[0x2af33fda5cee1476, 0xe1a78ed1ec9d76b3,
0xed2adee911ba7c4d, 0x94f949f4f9055b1a]
@test parms_id(x1_encrypted) == UInt64[0xdc264f695a81d156, 0x890465c10f20b410,
0xf03d86f8bc932745, 0x2e67b09e17d4a44c]
@test parms_id(plain_coeff0) == UInt64[0x12a009457dbf8b0f, 0x6364d5f3d4c92b3c,
0x96a841ccd88e440c, 0x1255677018089458]
end
pid1 = parms_id(x3_encrypted)
pid2 = parms_id(x1_encrypted)
pid3 = parms_id(plain_coeff0)
@testset "get_context_data" begin
@test_nowarn get_context_data(context, pid1)
@test_nowarn get_context_data(context, pid2)
@test_nowarn get_context_data(context, pid3)
end
cd1 = get_context_data(context, pid1)
cd2 = get_context_data(context, pid2)
cd3 = get_context_data(context, pid3)
@testset "chain_index" begin
@test chain_index(cd1) == 0
@test chain_index(cd2) == 1
@test chain_index(cd3) == 2
end
@testset "scale!" begin
@test_nowarn scale!(x3_encrypted, 2.0^40)
@test_nowarn scale!(x1_encrypted, 2.0^40)
end
last_parms_id = parms_id(x3_encrypted)
@testset "mod_switch_to_inplace!" begin
@test mod_switch_to_inplace!(x1_encrypted, last_parms_id, evaluator) == x1_encrypted
@test mod_switch_to_inplace!(plain_coeff0, last_parms_id, evaluator) == plain_coeff0
end
encrypted_result = Ciphertext()
@testset "add!" begin
@test_nowarn add!(encrypted_result, x3_encrypted, x1_encrypted, evaluator)
end
@testset "add_plain_inplace!" begin
@test_nowarn add_plain_inplace!(encrypted_result, plain_coeff0, evaluator)
end
plain_result = Plaintext()
@testset "decrypt!" begin
@test_nowarn decrypt!(plain_result, encrypted_result, decryptor)
end
result = similar(input)
@testset "decode!" begin
@test_nowarn decode!(result, plain_result, encoder)
end
@testset "compare results" begin
true_result = similar(input)
for (i, x) in enumerate(input)
true_result[i] = (3.14159265 * x * x + 0.4) * x + 1
end
@test isapprox(result, true_result, atol=1e-4)
end
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 3001 | @testset "5_rotation" begin
@testset "rotation_ckks" begin
@testset "EncryptionParameters" begin
@test_nowarn EncryptionParameters(SchemeType.ckks)
end
enc_parms = EncryptionParameters(SchemeType.ckks)
@testset "polynomial modulus degree" begin
@test_nowarn set_poly_modulus_degree!(enc_parms, 8192)
@test poly_modulus_degree(enc_parms) == 8192
end
@testset "coefficient modulus" begin
@test_nowarn coeff_modulus_create(8192, [40, 40, 40, 40, 40])
@test_nowarn set_coeff_modulus!(enc_parms, coeff_modulus_create(8192, [40, 40, 40, 40, 40]))
end
@testset "SEALContext" begin
@test_nowarn SEALContext(enc_parms)
end
context = SEALContext(enc_parms)
@testset "KeyGenerator" begin
@test_nowarn KeyGenerator(context)
end
keygen = KeyGenerator(context)
@testset "PublicKey" begin
@test_nowarn PublicKey()
end
public_key_ = PublicKey()
@testset "create_public_key" begin
@test_nowarn create_public_key!(public_key_, keygen)
end
@testset "SecretKey" begin
@test_nowarn secret_key(keygen)
end
secret_key_ = secret_key(keygen)
@testset "RelinKeys" begin
@test_nowarn RelinKeys()
end
relin_keys_ = RelinKeys()
@testset "create_relin_keys" begin
@test_nowarn create_relin_keys!(relin_keys_, keygen)
end
@testset "GaloisKeys" begin
@test_nowarn GaloisKeys()
end
galois_keys_ = GaloisKeys()
@testset "create_galois_keys" begin
@test_nowarn create_galois_keys!(galois_keys_, keygen)
end
@testset "Encryptor" begin
@test_nowarn Encryptor(context, public_key_)
end
encryptor = Encryptor(context, public_key_)
@testset "Evaluator" begin
@test_nowarn Evaluator(context)
end
evaluator = Evaluator(context)
@testset "Decryptor" begin
@test_nowarn Decryptor(context, secret_key_)
end
decryptor = Decryptor(context, secret_key_)
@testset "CKKSEncoder" begin
@test_nowarn CKKSEncoder(context)
end
encoder = CKKSEncoder(context)
slot_count_ = 4096
@testset "slot_count" begin
@test slot_count(encoder) == slot_count_
end
input = collect(range(0.0, 1.0, length=slot_count_))
plain = Plaintext()
encrypted = Ciphertext()
initial_scale = 2.0^50
@testset "encode and encrypt" begin
@test_nowarn encode!(plain, input, initial_scale, encoder)
@test_nowarn encrypt!(encrypted, plain, encryptor)
end
rotated = Ciphertext()
@testset "rotate 2 steps left" begin
@test_nowarn rotate_vector!(rotated, encrypted, 2, galois_keys_, evaluator)
end
result = similar(input)
@testset "decrypt and decode" begin
@test_nowarn decrypt!(plain, rotated, decryptor)
@test_nowarn decode!(result, plain, encoder)
end
@testset "compare results" begin
true_result = circshift(input, -2)
@test isapprox(result, true_result)
end
end
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 6958 | @testset "6_serialization" begin
parms_stream = UInt8[]
data_stream1 = UInt8[]
data_stream2 = UInt8[]
data_stream3 = UInt8[]
data_stream4 = UInt8[]
sk_stream = UInt8[]
@testset "server (part 1)" begin
enc_parms = EncryptionParameters(SchemeType.ckks)
poly_modulus_degree = 8192
@testset "polynomial modulus degree" begin
@test_nowarn set_poly_modulus_degree!(enc_parms, poly_modulus_degree)
end
@testset "coefficient modulus" begin
@test_nowarn set_coeff_modulus!(enc_parms, coeff_modulus_create(poly_modulus_degree, [50, 20, 50]))
end
@testset "save! EncryptionParameters" begin
@test save_size(enc_parms) == 192
resize!(parms_stream, save_size(enc_parms))
@test save!(parms_stream, enc_parms) == 75
out_bytes = 75
resize!(parms_stream, out_bytes)
end
@testset "save_size comparison" begin
@test save_size(ComprModeType.none, enc_parms) == 129
@test save_size(ComprModeType.zlib, enc_parms) == 146
end
@testset "save! and load! EncryptionParameters" begin
byte_buffer = Vector{UInt8}(undef, save_size(enc_parms))
@test save!(byte_buffer, length(byte_buffer), enc_parms) == 75
enc_parms2 = EncryptionParameters()
@test load!(enc_parms2, byte_buffer, length(byte_buffer)) == 75
@test enc_parms == enc_parms2
end
end
@testset "client (part 1)" begin
enc_parms = EncryptionParameters()
@testset "load! EncryptionParameters" begin
@test load!(enc_parms, parms_stream) == 75
end
context = SEALContext(enc_parms)
keygen = KeyGenerator(context)
pk = PublicKey()
create_public_key!(pk, keygen)
sk = secret_key(keygen)
@testset "save! SecretKey" begin
@test save_size(sk) == 197464
resize!(sk_stream, save_size(sk))
@test isapprox(save!(sk_stream, sk), 145823, rtol=0.01)
out_bytes = save!(sk_stream, sk)
resize!(sk_stream, out_bytes)
end
rlk = create_relin_keys(keygen)
@testset "save! create_relin_keys" begin
@test save_size(rlk) == 395189
resize!(data_stream1, save_size(rlk))
@test isapprox(save!(data_stream1, rlk), 291635, rtol=0.01)
size_rlk = save!(data_stream1, rlk)
resize!(data_stream1, size_rlk)
end
rlk_big = RelinKeys()
create_relin_keys!(rlk_big, keygen)
@testset "save! create_relin_keys" begin
@test save_size(rlk_big) == 789779
resize!(data_stream2, save_size(rlk_big))
@test isapprox(save!(data_stream2, rlk_big), 583244, rtol=0.01)
size_rlk_big = save!(data_stream2, rlk_big)
resize!(data_stream2, size_rlk_big)
end
initial_scale = 2.0^20
encoder = CKKSEncoder(context)
plain1 = Plaintext()
plain2 = Plaintext()
@testset "encode!" begin
@test_nowarn encode!(plain1, 2.3, initial_scale, encoder)
@test_nowarn encode!(plain2, 4.5, initial_scale, encoder)
end
encryptor = Encryptor(context, pk)
encrypted1 = Ciphertext()
@testset "encrypt!" begin
@test_nowarn encrypt!(encrypted1, plain1, encryptor)
end
@testset "set_secret_key!" begin
@test_nowarn set_secret_key!(encryptor, sk)
end
@testset "encrypt_symmetric, encrypt_symmetric!" begin
@test encrypt_symmetric(plain2, encryptor) isa Ciphertext
c = Ciphertext()
@test encrypt_symmetric!(c, plain2, encryptor) == c
end
sym_encrypted2 = encrypt_symmetric(plain2, encryptor)
@testset "save! Ciphertext" begin
@test save_size(encrypted1) == 263273
resize!(data_stream2, save_size(encrypted1))
@test isapprox(save!(data_stream2, encrypted1), 173531, rtol=0.01)
size_encrypted1 = save!(data_stream2, encrypted1)
resize!(data_stream2, size_encrypted1)
@test save_size(sym_encrypted2) == 131770
resize!(data_stream3, save_size(sym_encrypted2))
@test isapprox(save!(data_stream3, sym_encrypted2), 86966, rtol=0.01)
size_sym_encrypted2 = save!(data_stream3, sym_encrypted2)
resize!(data_stream3, size_sym_encrypted2)
end
end
@testset "server (part 2)" begin
enc_parms = EncryptionParameters()
@testset "load! EncryptionParameters" begin
@test load!(enc_parms, parms_stream) == 75
end
context = SEALContext(enc_parms)
evaluator = Evaluator(context)
rlk = RelinKeys()
encrypted1 = Ciphertext()
encrypted2 = Ciphertext()
@testset "load! RelinKeys" begin
@test isapprox(load!(rlk, context, data_stream1), 291635, rtol=0.01)
end
@testset "load! Ciphertext" begin
@test isapprox(load!(encrypted1, context, data_stream2), 173702, rtol=0.01)
@test isapprox(load!(encrypted2, context, data_stream3), 86966, rtol=0.01)
end
encrypted_prod = Ciphertext()
@testset "multiply, relinearize, rescale" begin
@test multiply!(encrypted_prod, encrypted1, encrypted2, evaluator) == encrypted_prod
@test relinearize_inplace!(encrypted_prod, rlk, evaluator) == encrypted_prod
@test rescale_to_next_inplace!(encrypted_prod, evaluator) == encrypted_prod
end
@testset "save! Ciphertext" begin
@test save_size(encrypted_prod) == 131689
resize!(data_stream4, save_size(encrypted_prod))
@test isapprox(save!(data_stream4, encrypted_prod), 117909, rtol=0.01)
size_encrypted_prod = save!(data_stream4, encrypted_prod)
resize!(data_stream4, size_encrypted_prod)
end
end
@testset "client (part 2)" begin
enc_parms = EncryptionParameters()
load!(enc_parms, parms_stream)
context = SEALContext(enc_parms)
sk = SecretKey()
@testset "load! SecretKey" begin
@test isapprox(load!(sk, context, sk_stream), 145823, rtol=0.01)
end
decryptor = Decryptor(context, sk)
encoder = CKKSEncoder(context)
encrypted_result = Ciphertext()
@testset "load! Ciphertext" begin
@test_nowarn load!(encrypted_result, context, data_stream4)
end
plain_result = Plaintext()
@testset "decrypt!" begin
@test_nowarn decrypt!(plain_result, encrypted_result, decryptor)
end
slot_count_ = slot_count(encoder)
result = Vector{Float64}(undef, slot_count_)
@testset "decode! and check result" begin
@test_nowarn decode!(result, plain_result, encoder)
@test isapprox(result[1], 10.35, rtol=0.01)
@test isapprox(result[2], 10.35, rtol=0.01)
@test isapprox(result[3], 10.35, rtol=0.01)
@test isapprox(result[end-2], 10.35, rtol=0.01)
@test isapprox(result[end-1], 10.35, rtol=0.01)
@test isapprox(result[end-0], 10.35, rtol=0.01)
end
end
pt = Plaintext("1x^2 + 3")
stream = Vector{UInt8}(undef, save_size(pt))
@testset "save! Plaintext" begin
@test save!(stream, pt) == 66
data_size = 66
resize!(stream, data_size)
end
header = SEALHeader()
@testset "load_header!" begin
@test load_header!(header, stream) == header
@test header.size == 66
end
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | code | 7454 | # Additional tests to cover missing pieces
@testset "additional tests" begin
@testset "miscellaneous" begin
@testset "version_{major,minor,patch}" begin
@test_nowarn version_major()
@test_nowarn version_minor()
@test_nowarn version_patch()
@test_nowarn version()
end
@testset "version" begin
major = version_major()
minor = version_minor()
patch = version_patch()
@test version() == VersionNumber("$major.$minor.$patch")
end
@testset "PublicKey" begin
@test_nowarn PublicKey()
end
@testset "SecretKey" begin
@test_nowarn SecretKey()
end
@testset "RelinKeys" begin
@test_nowarn RelinKeys()
end
@testset "GaloisKeys" begin
@test_nowarn GaloisKeys()
end
@testset "Modulus" begin
@test_nowarn Modulus(0)
@test_throws ErrorException Modulus(1)
m = Modulus(0)
@test value(m) == 0
end
@testset "memory_manager_get_pool" begin
@test_nowarn memory_manager_get_pool()
end
@testset "alloc_byte_count" begin
@test alloc_byte_count(memory_manager_get_pool()) isa Int
end
@testset "check_return_value" begin
@test_throws ErrorException SEAL.check_return_value(0x80004003)
@test_throws ErrorException SEAL.check_return_value(0x80070057)
@test_throws ErrorException SEAL.check_return_value(0x8007000E)
@test_throws ErrorException SEAL.check_return_value(0x8000FFFF)
@test_throws ErrorException SEAL.check_return_value(0x80131620)
@test_throws ErrorException SEAL.check_return_value(0x80131509)
@test_throws ErrorException SEAL.check_return_value(0x11111111)
end
@testset "destroy!" begin
mempool = memory_manager_get_pool()
@test_nowarn destroy!(mempool)
end
end
@testset "additional CKKS tests" begin
enc_parms = EncryptionParameters(SchemeType.ckks)
set_poly_modulus_degree!(enc_parms, 8192)
set_coeff_modulus!(enc_parms, coeff_modulus_create(8192, [60, 40, 40, 60]))
context = SEALContext(enc_parms)
keygen = KeyGenerator(context)
public_key_ = PublicKey()
create_public_key!(public_key_, keygen)
secret_key_ = secret_key(keygen)
@testset "create_public_key" begin
@test create_public_key(keygen) isa PublicKey
end
@testset "Encryptor" begin
@test_nowarn Encryptor(context, public_key_, secret_key_)
@test_nowarn Encryptor(context, public_key_)
@test_nowarn Encryptor(context, secret_key_)
end
@testset "scale/scale! Plaintext" begin
p = Plaintext()
@test isapprox(scale(p), 1.0)
@test_nowarn scale!(p, 2.0^40)
@test isapprox(scale(p), 2.0^40)
end
@testset "create_relin_keys" begin
@test_nowarn create_relin_keys(keygen)
end
@testset "plain_modulus" begin
@test_nowarn plain_modulus(enc_parms)
end
p = Plaintext()
encoder = CKKSEncoder(context)
encode!(p, 3.14159265, 2.0^40, encoder)
encryptor = Encryptor(context, public_key_)
evaluator = Evaluator(context)
relin_keys_ = RelinKeys()
create_relin_keys!(relin_keys_, keygen)
@testset "{square,relinearize,rescale_to_next}_inplace!" begin
c1 = Ciphertext()
encrypt!(c1, p, encryptor)
@test_nowarn typeof(square_inplace!(c1, evaluator)) === Ciphertext
@test_nowarn typeof(relinearize_inplace!(c1, relin_keys_, evaluator)) === Ciphertext
@test_nowarn typeof(rescale_to_next_inplace!(c1, evaluator)) === Ciphertext
end
@testset "multiply_plain_inplace!" begin
c2 = Ciphertext()
encrypt!(c2, p, encryptor)
@test_nowarn multiply_plain_inplace!(c2, p, evaluator)
end
@testset "multiply_inplace!" begin
c3 = Ciphertext()
c4 = Ciphertext()
encrypt!(c3, p, encryptor)
encrypt!(c4, p, encryptor)
@test_nowarn multiply_inplace!(c3, c4, evaluator)
end
@testset "add_inplace!" begin
c5 = Ciphertext()
c6 = Ciphertext()
encrypt!(c5, p, encryptor)
encrypt!(c6, p, encryptor)
@test add_inplace!(c5, c6, evaluator) == c5
end
@testset "add_plain_inplace!" begin
c7 = Ciphertext()
encrypt!(c7, p, encryptor)
@test_nowarn add_plain_inplace!(c7, p, evaluator)
end
galois_keys_ = GaloisKeys()
create_galois_keys!(galois_keys_, keygen)
@testset "rotate_vector_inplace!" begin
c8 = Ciphertext()
encrypt!(c8, p, encryptor)
@test rotate_vector_inplace!(c8, 5, galois_keys_, evaluator) == c8
end
@testset "complex_conjugate_inplace!" begin
c9 = Ciphertext()
encrypt!(c9, p, encryptor)
@test complex_conjugate_inplace!(c9, galois_keys_, evaluator) == c9
end
@testset "negate!" begin
c10 = Ciphertext()
c11 = Ciphertext()
encrypt!(c10, p, encryptor)
@test negate!(c11, c10, evaluator) == c11
end
@testset "negate_inplace!" begin
c12 = Ciphertext()
encrypt!(c12, p, encryptor)
@test negate_inplace!(c12, evaluator) == c12
end
@testset "sub!" begin
c13 = Ciphertext()
c14 = Ciphertext()
c15 = Ciphertext()
encrypt!(c13, p, encryptor)
encrypt!(c14, p, encryptor)
@test sub!(c15, c13, c14, evaluator) == c15
end
@testset "sub_inplace!" begin
c16 = Ciphertext()
c17 = Ciphertext()
encrypt!(c16, p, encryptor)
encrypt!(c17, p, encryptor)
@test sub_inplace!(c16, c17, evaluator) == c16
end
@testset "using_keyswitching" begin
@test using_keyswitching(context) == true
end
@testset "Plaintext constructor" begin
@test Plaintext(8192, 0) isa Plaintext
end
@testset "Plaintext equality" begin
p1 = Plaintext(8192, 0)
p2 = Plaintext(8192, 0)
@test p1 == p2
end
@testset "Ciphertext constructor" begin
@test Ciphertext(context) isa Ciphertext
end
@testset "reserve! Ciphertext" begin
c = Ciphertext(context)
@test reserve!(c, 3) == c
end
@testset "encode!" begin
p2 = Plaintext()
@test encode!(p2, 17, encoder) == p2
end
end
@testset "additional BFV tests" begin
enc_parms = EncryptionParameters(SchemeType.bfv)
poly_modulus_degree = 4096
set_poly_modulus_degree!(enc_parms, poly_modulus_degree)
set_coeff_modulus!(enc_parms, coeff_modulus_bfv_default(poly_modulus_degree))
set_plain_modulus!(enc_parms, 786433)
context = SEALContext(enc_parms)
keygen = KeyGenerator(context)
public_key_ = PublicKey()
create_public_key!(public_key_, keygen)
secret_key_ = secret_key(keygen)
galois_keys_ = GaloisKeys()
create_galois_keys!(galois_keys_, keygen)
relin_keys_ = RelinKeys()
create_relin_keys!(relin_keys_, keygen)
batch_encoder = BatchEncoder(context)
slot_count_ = slot_count(batch_encoder)
encryptor = Encryptor(context, public_key_)
decryptor = Decryptor(context, secret_key_)
evaluator = Evaluator(context)
plain = Plaintext()
encode!(plain, ones(UInt64, slot_count_), batch_encoder)
encrypted = Ciphertext()
encrypt!(encrypted, plain, encryptor)
@testset "rotate_rows_inplace!" begin
@test rotate_rows_inplace!(encrypted, 1, galois_keys_, evaluator) == encrypted
end
@testset "rotate_columns_inplace!" begin
@test rotate_columns_inplace!(encrypted, galois_keys_, evaluator) == encrypted
end
end
end
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | docs | 949 | # Changelog
This file only tracks changes at a very high, summarized level, omitting patch releases.
## SEAL dev
* ...
## SEAL v0.4.0
* Update the wrapper to support SEAL v3.6
## SEAL v0.3.0
* Add `examples/6_serialization.jl` and `examples/7_performance.jl` and all corresponding
functionality in the library itself
* Add `examples/examples.jl` with `seal_examples()` that allows to run the examples interactively
* New methods: `using_keyswitching`, `save_size`, `save!`, `load!`, `reserve!`, `encrypt_symmetric`,
`encrypt_symmetric!`, `alloc_bytezcount`, `rotate_rows!`, `rotate_rows_inplace!`,
`rotate_columns!`, `rotate_columns_inplace!`, `complex_conjugate!`, `complex_conjugate_inplace!`
## SEAL v0.2.0
* Full support for all functionality found in all SEAL examples.
## SEAL v0.1.0
* Initial release
* Support for most functionality found in `examples/1_bfv_basics.cpp`, `examples/4_ckks_basics.cpp`,
`examples/5_rotation.cpp`
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | docs | 2274 | # Contributing
SEAL.jl is an open-source project and we are very happy to accept contributions
from the community. Please feel free to open issues or submit patches (preferably
as merge requests) any time. For planned larger contributions, it is often
beneficial to get in contact with one of the principal developers first.
SEAL.jl and its contributions are licensed under the MIT license (see
[LICENSE.md](LICENSE.md)). As a contributor, you certify that all your
contributions are in conformance with the *Developer Certificate of Origin
(Version 1.1)*, which is reproduced below.
## Developer Certificate of Origin (Version 1.1)
The following text was taken from
[https://developercertificate.org](https://developercertificate.org):
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | docs | 11151 | # SEAL.jl
[](https://juliacrypto.github.io/SEAL.jl/stable)
[](https://juliacrypto.github.io/SEAL.jl/dev)
[](https://github.com/JuliaCrypto/SEAL.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/JuliaCrypto/SEAL.jl)
[](https://opensource.org/licenses/MIT)
**SEAL.jl** is a Julia package that wraps the Microsoft
[SEAL](https://github.com/microsoft/SEAL) library for homomorphic encryption. It
supports the Brakerski/Fan-Vercauteren (BFV) and Cheon-Kim-Kim-Song (CKKS, also
known as HEAAN in literature) schemes and exposes the homomorphic encryption
capabilitites of SEAL in a (mostly) intuitive and Julian way. SEAL.jl is
published under the same permissive MIT license as the Microsoft SEAL library.
Currently, SEAL.jl supports all operations that are used in the examples of the
[SEAL library](https://github.com/microsoft/SEAL/tree/master/native/examples).
This includes encoding and encryption, addition and multiplication, rotation,
relinearization and modulus switching for the BFV and CKKS schemes.
## Installation
To install SEAL.jl, start a Julia REPL, hit `]` to enter Julia's `Pkg` mode, and
then execute
```julia
(@v1.5) pkg> add SEAL
```
Alternatively, you can install SEAL.jl by using `Pkg` directly, i.e., by running
```shell
julia -e 'using Pkg; Pkg.add("SEAL")'
```
SEAL.jl depends on the binary distribution of the SEAL library, which is
available as a Julia package `SEAL_jll.jl` and which is automatically installed
as a dependency.
*Note: Currently SEAL_jll.jl is not available on Windows, thus SEAL.jl will
work only on Linux, MacOS and FreeBSD. Also, SEAL_jll.jl does not work on 32-bit
systems.*
## Getting started
### Usage
After installation, load SEAL.jl by running
```julia
using SEAL
```
in the REPL. A **minimal** working example for encrypting an array of integers using the BFV
scheme, squaring it, and decrypting it, looks as follows:
```julia
julia> using SEAL
[ Info: Precompiling SEAL [bac81e26-86e4-4b48-8696-7d0406d5dbc1]
julia> parms = EncryptionParameters(SchemeType.bfv)
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> poly_modulus_degree = 4096
4096
julia> set_poly_modulus_degree!(parms, poly_modulus_degree)
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> set_coeff_modulus!(parms, coeff_modulus_bfv_default(poly_modulus_degree))
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> set_plain_modulus!(parms, plain_modulus_batching(poly_modulus_degree, 20))
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> context = SEALContext(parms)
SEALContext(Ptr{Nothing} @0x0000000004298440)
julia> keygen = KeyGenerator(context)
KeyGenerator(Ptr{Nothing} @0x00000000021ef540)
julia> public_key_ = PublicKey()
PublicKey(Ptr{Nothing} @0x0000000002272610)
julia> create_public_key!(public_key_, keygen)
julia> secret_key_ = secret_key(keygen)
SecretKey(Ptr{Nothing} @0x0000000001cec2a0)
julia> encryptor = Encryptor(context, public_key_)
Encryptor(Ptr{Nothing} @0x0000000001cd4480)
julia> evaluator = Evaluator(context)
Evaluator(Ptr{Nothing} @0x000000000428bdd0)
julia> decryptor = Decryptor(context, secret_key_)
Decryptor(Ptr{Nothing} @0x00000000037670d0)
julia> batch_encoder = BatchEncoder(context)
BatchEncoder(Ptr{Nothing} @0x0000000001fb4bd0, SEALContext(Ptr{Nothing} @0x0000000001b87780))
julia> pod_matrix = collect(UInt64, 1:slot_count(batch_encoder));
julia> Int.(vcat(pod_matrix[1:3], pod_matrix[end-3:end]))
7-element Array{Int64,1}:
1
2
3
4093
4094
4095
4096
julia> plain_matrix = Plaintext()
Plaintext(Ptr{Nothing} @0x00000000042db6e0)
julia> encode!(plain_matrix, pod_matrix, batch_encoder)
Plaintext(Ptr{Nothing} @0x0000000002ce0370)
julia> encrypted_matrix = Ciphertext()
Ciphertext(Ptr{Nothing} @0x0000000002d91b80)
julia> encrypt!(encrypted_matrix, plain_matrix, encryptor)
Ciphertext(Ptr{Nothing} @0x0000000002d91b80)
julia> add_inplace!(encrypted_matrix, encrypted_matrix, evaluator)
Ciphertext(Ptr{Nothing} @0x0000000002ce1280)
julia> plain_result = Plaintext()
Plaintext(Ptr{Nothing} @0x0000000004591550)
julia> decrypt!(plain_result, encrypted_matrix, decryptor)
Plaintext(Ptr{Nothing} @0x0000000004591550)
julia> decode!(pod_matrix, plain_result, batch_encoder);
julia> Int.(vcat(pod_matrix[1:3], pod_matrix[end-3:end]))
7-element Array{Int64,1}:
2
4
6
8186
8188
8190
8192
```
### Examples
As you can see, using homomorphic encryption is quite involved: You need to pick
a scheme, provide sensible encryption parameters, encode your raw data into
plaintext, encrypt it to ciphertext, perform your arithmetic operations on it,
and then decrypt and decode again. Therefore, before starting to use SEAL.jl
for your own applications, it is **highly recommended** to have a look at the
examples in the
[`examples/`](examples/)
directory. Otherwise it will be very likely that you are using SEAL.jl (and SEAL) in a
way that is either not secure, will produce unexpected results, or just crashes.
The examples included in SEAL.jl follow almost line-by-line the examples provided by the
[SEAL library](https://github.com/microsoft/SEAL/tree/master/native/examples).
For example, the snippet above is based on the `example_batch_encoder()` function in
[`examples/2_encoders.jl`](examples/2_encoders.jl).
The full list of examples is as follows:
|SEAL.jl |SEAL (C++) |Description |
|--------------------|---------------------|----------------------------------------------------------------------------|
|`examples.jl` |`examples.cpp` |The example runner application |
|`1_bfv_basics.jl` |`1_bfv_basics.cpp` |Encrypted modular arithmetic using the BFV scheme |
|`2_encoders.jl` |`2_encoders.cpp` |Encoding more complex data into Microsoft SEAL plaintext objects |
|`3_levels.jl` |`3_levels.cpp` |Introduces the concept of levels; prerequisite for using the CKKS scheme |
|`4_ckks_basics.jl` |`4_ckks_basics.cpp` |Encrypted real number arithmetic using the CKKS scheme |
|`5_rotation.jl` |`5_rotation.cpp` |Performing cyclic rotations on encrypted vectors in the BFV and CKKS schemes|
|`6_serialization.jl`|`6_serialization.cpp`|Serializing objects in Microsoft SEAL |
|`7_performance.jl` |`7_performance.cpp` |Performance tests |
To run the examples, first install SEAL.jl (as shown [above](#usage)) and clone this repository:
```shell
git clone https://github.com/JuliaCrypto/SEAL.jl.git
```
Then, run Julia and include `examples/examples.jl` before executing `seal_examples()`:
```shell
julia --project=. -e 'include("SEAL.jl/examples/examples.jl"); seal_examples()'
```
You will be shown an interactive prompt that lets you run any of the available
examples:
```
Microsoft SEAL version: 3.6.2
+---------------------------------------------------------+
| The following examples should be executed while reading |
| comments in associated files in examples/. |
+---------------------------------------------------------+
| Examples | Source Files |
+----------------------------+----------------------------+
| 1. BFV Basics | 1_bfv_basics.jl |
| 2. Encoders | 2_encoders.jl |
| 3. Levels | 3_levels.jl |
| 4. CKKS Basics | 4_ckks_basics.jl |
| 5. Rotation | 5_rotation.jl |
| 6. Serialization | 6_serialization.jl |
| 7. Performance Test | 7_performance.jl |
+----------------------------+----------------------------+
[ 0 MB] Total allocation from the memory pool
> Run example (1 ~ 7) or exit (0):
```
Since the examples will not create or modify any files, feel free to run them from
any directory.
## Implementation strategy
SEAL.jl is *work-in-progress*, thus only a subset of the many capabilities of
the SEAL library are so far supported ([PRs are welcome!](CONTRIBUTING.md)). In
general, SEAL.jl makes use of the C bindings provided by SEAL, but tries to
mimic SEAL's *C++* API as close as possible. That is, file names,
function/variable names, the order of arguments etc. are as close as
possible to the SEAL C++ code as possible. The reason for this is that the SEAL
library provides excellent inline code documentation, thus by reading (and
understanding) the comments in the C++ files you should immediately be able to
reproduce the same implementation with SEAL.jl.
However, some implementation details do not translate well from C++ to
Julia. Also, the Julia community has a few strong conventions that if violated
would make it unnecessarily difficult for experienced Julia users to use SEAL.jl
correctly. Thus, when trying to recreate SEAL examples written in C++ with
SEAL.jl in Julia, there are a few things to watch out for:
* Functions that modify their input are suffixed by `!`.
* Function arguments that are modified come first (but the rest remains in
order) .
* When translating C++ member function to Julia, the "owning" object is always
passed as the last argument.
* While `x.size()` in C++ returns a scalar, length-like value, `size(x)` in
Julia is expected to return a tuple, which is also the case in SEAL.jl.
The next example shows the first three items in practice. The C++ code
snippet
```c++
evaluator.multiply_plain(x1_encrypted, plain_coeff3, x1_encrypted_coeff3);
```
is translated to the following Julia code:
```julia
multiply_plain!(x1_encrypted_coeff3, x1_encrypted, plain_coeff3, evaluator)
```
Note the trailing `!`, the fact that `x1_encrypted_coeff3` as the modified input
variable is now the first argument, and `evaluator` being passed as the last
argument.
## Authors
SEAL.jl was initiated by
[Michael Schlottke-Lakemper](https://www.mi.uni-koeln.de/NumSim/schlottke-lakemper)
(University of Cologne, Germany), who is also the principal developer of
SEAL.jl.
## License and contributing
SEAL.jl is licensed under the MIT license (see [LICENSE.md](LICENSE.md)). Since SEAL.jl is
an open-source project, we are very happy to accept contributions from the
community. Please refer to [CONTRIBUTING.md](CONTRIBUTING.md) for more details.
## Acknowledgements
This Julia package would have not been possible without the excellent work of
the developers of the [SEAL](https://github.com/microsoft/SEAL) library. Their
high-quality code documentation plus the fact that they provide C bindings for
the entire functionality of the SEAL C++ library have made developing SEAL.jl
a breeze.
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | docs | 2265 | # Contributing
SEAL.jl is an open-source project and we are very happy to accept contributions
from the community. Please feel free to open issues or submit patches (preferably
as merge requests) any time. For planned larger contributions, it is often
beneficial to get in contact with one of the principal developers first.
SEAL.jl and its contributions are licensed under the MIT license (see
[License](@ref)). As a contributor, you certify that all your
contributions are in conformance with the *Developer Certificate of Origin
(Version 1.1)*, which is reproduced below.
## Developer Certificate of Origin (Version 1.1)
The following text was taken from
[https://developercertificate.org](https://developercertificate.org):
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | docs | 10603 | # SEAL.jl
**SEAL.jl** is a Julia package that wraps the Microsoft
[SEAL](https://github.com/microsoft/SEAL) library for homomorphic encryption. It
supports the Brakerski/Fan-Vercauteren (BFV) and Cheon-Kim-Kim-Song (CKKS, also
known as HEAAN in literature) schemes and exposes the homomorphic encryption
capabilitites of SEAL in a (mostly) intuitive and Julian way. SEAL.jl is
published under the same permissive MIT license as the Microsoft SEAL library.
Currently, SEAL.jl supports all operations that are used in the examples of the
[SEAL library](https://github.com/microsoft/SEAL/tree/master/native/examples).
This includes encoding and encryption, addition and multiplication, rotation,
relinearization and modulus switching for the BFV and CKKS schemes.
## Installation
To install SEAL.jl, start a Julia REPL, hit `]` to enter Julia's `Pkg` mode, and
then execute
```julia
(@v1.5) pkg> add SEAL
```
Alternatively, you can install SEAL.jl by using `Pkg` directly, i.e., by running
```shell
julia -e 'using Pkg; Pkg.add("SEAL")'
```
SEAL.jl depends on the binary distribution of the SEAL library, which is
available as a Julia package `SEAL_jll.jl` and which is automatically installed
as a dependency.
*Note: Currently SEAL_jll.jl is not available on Windows, thus SEAL.jl will
work only on Linux, MacOS and FreeBSD. Also, SEAL_jll.jl does not work on 32-bit
systems.*
## Getting started
### Usage
After installation, load SEAL.jl by running
```julia
using SEAL
```
in the REPL. A **minimal** working example for encrypting an array of integers using the BFV
scheme, squaring it, and decrypting it, looks as follows:
```julia
julia> using SEAL
[ Info: Precompiling SEAL [bac81e26-86e4-4b48-8696-7d0406d5dbc1]
julia> parms = EncryptionParameters(SchemeType.bfv)
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> poly_modulus_degree = 4096
4096
julia> set_poly_modulus_degree!(parms, poly_modulus_degree)
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> set_coeff_modulus!(parms, coeff_modulus_bfv_default(poly_modulus_degree))
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> set_plain_modulus!(parms, plain_modulus_batching(poly_modulus_degree, 20))
EncryptionParameters(Ptr{Nothing} @0x0000000002e1d3a0)
julia> context = SEALContext(parms)
SEALContext(Ptr{Nothing} @0x0000000004298440)
julia> keygen = KeyGenerator(context)
KeyGenerator(Ptr{Nothing} @0x00000000021ef540)
julia> public_key_ = PublicKey()
PublicKey(Ptr{Nothing} @0x0000000002272610)
julia> create_public_key!(public_key_, keygen)
julia> secret_key_ = secret_key(keygen)
SecretKey(Ptr{Nothing} @0x0000000001cec2a0)
julia> encryptor = Encryptor(context, public_key_)
Encryptor(Ptr{Nothing} @0x0000000001cd4480)
julia> evaluator = Evaluator(context)
Evaluator(Ptr{Nothing} @0x000000000428bdd0)
julia> decryptor = Decryptor(context, secret_key_)
Decryptor(Ptr{Nothing} @0x00000000037670d0)
julia> batch_encoder = BatchEncoder(context)
BatchEncoder(Ptr{Nothing} @0x0000000001fb4bd0, SEALContext(Ptr{Nothing} @0x0000000001b87780))
julia> pod_matrix = collect(UInt64, 1:slot_count(batch_encoder));
julia> Int.(vcat(pod_matrix[1:3], pod_matrix[end-3:end]))
7-element Array{Int64,1}:
1
2
3
4093
4094
4095
4096
julia> plain_matrix = Plaintext()
Plaintext(Ptr{Nothing} @0x00000000042db6e0)
julia> encode!(plain_matrix, pod_matrix, batch_encoder)
Plaintext(Ptr{Nothing} @0x0000000002ce0370)
julia> encrypted_matrix = Ciphertext()
Ciphertext(Ptr{Nothing} @0x0000000002d91b80)
julia> encrypt!(encrypted_matrix, plain_matrix, encryptor)
Ciphertext(Ptr{Nothing} @0x0000000002d91b80)
julia> add_inplace!(encrypted_matrix, encrypted_matrix, evaluator)
Ciphertext(Ptr{Nothing} @0x0000000002ce1280)
julia> plain_result = Plaintext()
Plaintext(Ptr{Nothing} @0x0000000004591550)
julia> decrypt!(plain_result, encrypted_matrix, decryptor)
Plaintext(Ptr{Nothing} @0x0000000004591550)
julia> decode!(pod_matrix, plain_result, batch_encoder);
julia> Int.(vcat(pod_matrix[1:3], pod_matrix[end-3:end]))
7-element Array{Int64,1}:
2
4
6
8186
8188
8190
8192
```
### Examples
As you can see, using homomorphic encryption is quite involved: You need to pick
a scheme, provide sensible encryption parameters, encode your raw data into
plaintext, encrypt it to ciphertext, perform your arithmetic operations on it,
and then decrypt and decode again. Therefore, before starting to use SEAL.jl
for your own applications, it is **highly recommended** to have a look at the
examples in the
[`examples/`](https://github.com/JuliaCrypto/SEAL.jl/tree/main/examples/)
directory. Otherwise it will be very likely that you are using SEAL.jl (and SEAL) in a
way that is either not secure, will produce unexpected results, or just crashes.
The examples included in SEAL.jl follow almost line-by-line the examples provided by the
[SEAL library](https://github.com/microsoft/SEAL/tree/master/native/examples).
For example, the snippet above is based on the `example_batch_encoder()` function in
[`examples/2_encoders.jl`](https://github.com/JuliaCrypto/SEAL.jl/tree/main/examples/2_encoders.jl).
The full list of examples is as follows:
|SEAL.jl |SEAL (C++) |Description |
|--------------------|---------------------|----------------------------------------------------------------------------|
|`examples.jl` |`examples.cpp` |The example runner application |
|`1_bfv_basics.jl` |`1_bfv_basics.cpp` |Encrypted modular arithmetic using the BFV scheme |
|`2_encoders.jl` |`2_encoders.cpp` |Encoding more complex data into Microsoft SEAL plaintext objects |
|`3_levels.jl` |`3_levels.cpp` |Introduces the concept of levels; prerequisite for using the CKKS scheme |
|`4_ckks_basics.jl` |`4_ckks_basics.cpp` |Encrypted real number arithmetic using the CKKS scheme |
|`5_rotation.jl` |`5_rotation.cpp` |Performing cyclic rotations on encrypted vectors in the BFV and CKKS schemes|
|`6_serialization.jl`|`6_serialization.cpp`|Serializing objects in Microsoft SEAL |
|`7_performance.jl` |`7_performance.cpp` |Performance tests |
To run the examples, first install SEAL.jl (as shown [above](#usage)) and clone this repository:
```shell
git clone https://github.com/JuliaCrypto/SEAL.jl.git
```
Then, run Julia and include `examples/examples.jl` before executing `seal_examples()`:
```shell
julia --project=. -e 'include("SEAL.jl/examples/examples.jl"); seal_examples()'
```
You will be shown an interactive prompt that lets you run any of the available
examples:
```
Microsoft SEAL version: 3.6.2
+---------------------------------------------------------+
| The following examples should be executed while reading |
| comments in associated files in examples/. |
+---------------------------------------------------------+
| Examples | Source Files |
+----------------------------+----------------------------+
| 1. BFV Basics | 1_bfv_basics.jl |
| 2. Encoders | 2_encoders.jl |
| 3. Levels | 3_levels.jl |
| 4. CKKS Basics | 4_ckks_basics.jl |
| 5. Rotation | 5_rotation.jl |
| 6. Serialization | 6_serialization.jl |
| 7. Performance Test | 7_performance.jl |
+----------------------------+----------------------------+
[ 0 MB] Total allocation from the memory pool
> Run example (1 ~ 7) or exit (0):
```
Since the examples will not create or modify any files, feel free to run them from
any directory.
## Implementation strategy
SEAL.jl is *work-in-progress*, thus only a subset of the many capabilities of
the SEAL library are so far supported ([PRs are welcome!](CONTRIBUTING.md)). In
general, SEAL.jl makes use of the C bindings provided by SEAL, but tries to
mimic SEAL's *C++* API as close as possible. That is, file names,
function/variable names, the order of arguments etc. are as close as
possible to the SEAL C++ code as possible. The reason for this is that the SEAL
library provides excellent inline code documentation, thus by reading (and
understanding) the comments in the C++ files you should immediately be able to
reproduce the same implementation with SEAL.jl.
However, some implementation details do not translate well from C++ to
Julia. Also, the Julia community has a few strong conventions that if violated
would make it unnecessarily difficult for experienced Julia users to use SEAL.jl
correctly. Thus, when trying to recreate SEAL examples written in C++ with
SEAL.jl in Julia, there are a few things to watch out for:
* Functions that modify their input are suffixed by `!`.
* Function arguments that are modified come first (but the rest remains in
order) .
* When translating C++ member function to Julia, the "owning" object is always
passed as the last argument.
* While `x.size()` in C++ returns a scalar, length-like value, `size(x)` in
Julia is expected to return a tuple, which is also the case in SEAL.jl.
The next example shows the first three items in practice. The C++ code
snippet
```c++
evaluator.multiply_plain(x1_encrypted, plain_coeff3, x1_encrypted_coeff3);
```
is translated to the following Julia code:
```julia
multiply_plain!(x1_encrypted_coeff3, x1_encrypted, plain_coeff3, evaluator)
```
Note the trailing `!`, the fact that `x1_encrypted_coeff3` as the modified input
variable is now the first argument, and `evaluator` being passed as the last
argument.
## Authors
SEAL.jl was initiated by
[Michael Schlottke-Lakemper](https://www.mi.uni-koeln.de/NumSim/schlottke-lakemper)
(University of Cologne, Germany), who is also the principal developer of
SEAL.jl.
## License and contributing
SEAL.jl is licensed under the MIT license (see [License](@ref)). Since SEAL.jl is
an open-source project, we are very happy to accept contributions from the
community. Please refer to [Contributing](@ref) for more details.
## Acknowledgements
This Julia package would have not been possible without the excellent work of
the developers of the [SEAL](https://github.com/microsoft/SEAL) library. Their
high-quality code documentation plus the fact that they provide C bindings for
the entire functionality of the SEAL C++ library have made developing SEAL.jl
a breeze.
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.4.2 | 6fc3f65aca571c817968def8e74065fc98f6bce4 | docs | 119 | # Reference
```@meta
CurrentModule = SEAL
```
```@index
Modules = [SEAL]
```
```@autodocs
Modules = [
SEAL,
]
```
| SEAL | https://github.com/JuliaCrypto/SEAL.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 664 | using SimpleCaching
using Documenter
DocMeta.setdocmeta!(SimpleCaching, :DocTestSetup, :(using SimpleCaching); recursive=true)
makedocs(;
modules=[SimpleCaching],
authors="Federico Manzella",
repo="https://github.com/ferdiu/SimpleCaching.jl/blob/{commit}{path}#{line}",
sitename="SimpleCaching.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://ferdiu.github.io/SimpleCaching.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Macros" => "macros.md",
],
)
deploydocs(;
repo="github.com/ferdiu/SimpleCaching.jl",
devbranch="main",
)
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 349 | module SimpleCaching
using SHA
using Dates
using Serialization
using JLD2
import Base: esc
# export caching macros
export @scache, @scachejld
export @scache_if, @scachejld_if
# export deprecated macros
export @scachefast
include("settings.jl")
include("utils.jl")
include("caching.jl")
include("deprecated.jl")
include("init.jl")
end # module
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 14752 |
@inline _default_table_file_name(type::AbstractString) = "$(type)_cached.tsv"
@inline function _default_jld_file_name(type::AbstractString, hash::AbstractString)
return string("$(type)_$(hash).jld")
end
function cached_obj_exists(
type::AbstractString, common_cache_dir::AbstractString, hash::AbstractString
)::Bool
return isdir(common_cache_dir) &&
isfile(joinpath(common_cache_dir, _default_jld_file_name(type, hash)))
end
function cache_obj(
type::AbstractString,
common_cache_dir::AbstractString,
obj::Any,
hash::AbstractString,
args_string::AbstractString;
column_separator::AbstractString = "\t",
time_spent::Dates.Millisecond,
use_serialize::Bool = false
)
total_save_path = joinpath(common_cache_dir, _default_jld_file_name(type, hash))
mkpath(dirname(total_save_path))
cache_table_exists = isfile(joinpath(common_cache_dir, _default_table_file_name(type)))
# cache record
if settings.create_cache_record
table_file = open(joinpath(common_cache_dir, _default_table_file_name(type)), "a+")
if !cache_table_exists
write(table_file,
join([
"TIMESTAMP",
"FILE NAME",
"COMP TIME",
"COMMAND",
"TYPE",
"JULIA_VERSION",
"\n"
], column_separator))
end
write(table_file, string(
Dates.format(Dates.now(), "dd/mm/yyyy HH:MM:SS"), column_separator,
_default_jld_file_name(type, hash), column_separator,
human_readable_time(time_spent), column_separator,
args_string, column_separator,
typeof(obj), column_separator,
VERSION, column_separator,
"\n"))
close(table_file)
end
# log
if settings.log
printcheckpoint(
settings.log_output,
"Saving $(type) to file $(total_save_path)[.tmp]...";
format = settings.date_format
)
end
if use_serialize
Serialization.serialize("$(total_save_path).tmp", obj)
else
@save "$(total_save_path).tmp" obj
end
mv("$(total_save_path).tmp", "$(total_save_path)"; force = true)
end
function load_cached_obj(
type::AbstractString, common_cache_dir::AbstractString, hash::AbstractString
)
total_load_path = joinpath(common_cache_dir, _default_jld_file_name(type, hash))
if settings.log
printcheckpoint(SimpleCaching.settings.log_output, "Loading $(type) from file " *
"$(total_load_path)..."; format = settings.date_format)
end
obj = nothing
# TODO use magic number check instead of try/catch
try
@load total_load_path obj
catch e
try
obj = Serialization.deserialize(total_load_path)
catch e
throw_n_log("File $(total_load_path) is neither in JLD2 format nor a " *
"Serialized object", ArgumentError)
end
end
obj
end
"""
_scache(type, common_cache_dir, ex)
This is the macro used by all the other macros exported by `SimpleCaching` package.
!!! note
Never use this macro: use [`@scache`](@ref) and [`@scachejld`](@ref) instead.
"""
macro _scache(type, common_cache_dir, ex)
will_use_serialize = _use_serialize
esc_times = escdepth(ex)
ex = unesc_comp(ex)
!_is_call_expr(ex) && !_is_bc_expr(ex) && (throw(ArgumentError("`@scache[jld]` can " *
"be used only with function calls: passed $(ex)")))
# hyigene
type = esc(type)
common_cache_dir = esc(common_cache_dir)
ex = esc(ex, esc_times)
t = _convert_input(ex, true)
rel_esc(ex) = esc(ex, esc_times+1)
as, ks, vs, res, bc = (_toexpr(t.args), _toexpr(t.kwargs)..., t.res, t.broadcast)
# TODO: make a function that interprets `res` to be inserted in `args` or `kwargs`
return quote
_hash = get_hash_sha256((
args = $(rel_esc(as)),
kwargs = merge(
Dict{Symbol,Any}(
zip(
$(rel_esc(ks)),
$(rel_esc(vs))
)
),
Dict{Symbol,Any}(
pairs($(rel_esc(res)))
)
),
broadcast = $(rel_esc(bc))
))
if cached_obj_exists($(type), $(common_cache_dir), _hash)
load_cached_obj($(type), $(common_cache_dir), _hash)
else
if settings.log
printcheckpoint(
settings.log_output, "Computing " * $(type) * "...";
format = settings.date_format,
start = settings.line_starter
)
end
_started = _start_timer()
_result_value = $(esc(ex))
_finish_time = _stop_timer(_started)
if settings.log
printcheckpoint(
settings.log_output,
"Computed " * $(type) * " in " * human_readable_time_s(_finish_time) *
" seconds (" * human_readable_time(_finish_time) * ")";
format = settings.date_format,
start = settings.line_starter
)
end
cache_obj(
$(type),
$(common_cache_dir),
_result_value,
_hash,
$(rel_esc(_strarg(ex, true))),
time_spent = _finish_time,
use_serialize = $(esc(will_use_serialize))
)
_result_value
end
end
end
const _ext_docs = """!!! note
The file extension will be `.jld` when using both [`@scache`](@ref) and
[`@scachejld`](@ref).
"""
const _same_args_docs = """Expressions in function argument will be computed as first step
so the cached file will be loaded even if the arguments are different but will evaluate to
the same result.
"""
const _type_docs = """!!! note
If `type` is omitted the function name will be used as `type`.
"""
const _cache_dir_docs = """!!! note
If `cache_dir` is omitted the value set in filed `cache_dir` in
[`SimpleCachingSettings`](@ref) will be used.
"""
"""
@scache [[type] cache_dir] function_call
Cache the result of `function_call` in directory `cache_dir` prefixing the saved file with
`type`.
This macro uses `Serialize` `serialize` and `deserialize` functions to save and load cached
files so it is faster and more memory efficent than [`@scachejld`](@ref) macro which uses
`JLD2` which, on the other hand, is more portable between different julia versions.
$_ext_docs
$_type_docs
$_cache_dir_docs
## Examples
```julia-repl
julia> using SimpleCaching
julia> SimpleCaching.settings.log = true;
julia> @scache "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:39:42 ] Computing cute-cube...
● [ 2022-12-09 15:39:42 ] Computed cute-cube in 0.009 seconds (00:00:00)
● [ 2022-12-09 15:39:44 ] Saving cute-cube to file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld[.tmp]...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
julia> @scache "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:39:56 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
shell> ls -l cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
-rw-r--r--. 1 user user 232 9 dic 15.39 cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
```
$_same_args_docs
```julia-repl
julia> @scache "cute-cube" "./" fill(0, 3, parse(Int, "3"), 3)
● [ 2022-12-09 09:41:54 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
"""
macro scache(type, common_cache_dir, ex)
global _use_serialize = true
:(@_scache $(esc(type)) $(esc(common_cache_dir)) $(esc(ex)))
end
macro scache(common_cache_dir, ex)
uex = unesc_comp(ex)
(typeof(uex) != Expr || uex.head != :call) && (throw(ArgumentError("`@scache[jld]` can " *
"be used only with function calls: passed $(uex)")))
:(@scache $(esc(string(uex.args[1]))) $(esc(common_cache_dir)) $(esc(ex)))
end
macro scache(ex)
:(@scache $(esc(settings.cache_dir)) $(esc(ex)))
end
"""
@scachejld [[type] cache_dir] function_call
Cache the result of `function_call` in directory `cache_dir` prefixing the saved file with
`type`.
This macro uses `JLD2` `@save` and `@load` macros to save and load cached files so it is
slower and less memory efficent than [`@scache`](@ref) macro which uses `serialize`
which, on the other hand, is less portable between different julia versions.
$_ext_docs
$_type_docs
$_cache_dir_docs
## Examples
```julia-repl
julia> using SimpleCaching
julia> SimpleCaching.settings.log = true;
julia> @scachejld "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:56:09 ] Computing cute-cube...
● [ 2022-12-09 15:56:09 ] Computed cute-cube in 0.0 seconds (00:00:00)
● [ 2022-12-09 15:56:09 ] Saving cute-cube to file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld[.tmp]...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
julia> @scachejld "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:56:19 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
shell> ls -l cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
-rw-r--r--. 1 user user 1000 9 dic 15.56 cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
```
$_same_args_docs
```julia-repl
julia> @scachejld "cute-cube" "./" fill(0, 3, round(Int64, 1.5 * 2), 3)
● [ 2022-12-09 15:59:13 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
"""
macro scachejld(type, common_cache_dir, ex)
global _use_serialize = false
:(@_scache $(esc(type)) $(esc(common_cache_dir)) $(esc(ex)))
end
macro scachejld(common_cache_dir, ex)
uex = unesc_comp(ex)
(typeof(uex) != Expr || uex.head != :call) && (throw(ArgumentError("`@scache[jld]` can " *
"be used only with function calls: passed $(uex)")))
:(@scachejld $(esc(string(uex.args[1]))) $(esc(common_cache_dir)) $(esc(ex)))
end
macro scachejld(ex)
:(@scachejld $(esc(settings.cache_dir)) $(esc(ex)))
end
"""
@scache_if condition [[type] cache_dir] function_call
Cache the result of `function_call` only if `condition` is `true`.
Note that will not be loaded the cached result even if present.
For other parameters docs see [`@scache`](@ref).
## Examples
```julia-repl
julia> using SimpleCaching
julia> SimpleCaching.settings.log = true;
julia> @scache_if true "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:39:42 ] Computing cute-cube...
● [ 2022-12-09 15:39:42 ] Computed cute-cube in 0.009 seconds (00:00:00)
● [ 2022-12-09 15:39:44 ] Saving cute-cube to file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld[.tmp]...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
julia> @scache_if true "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:41:54 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
shell> ls -lh cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
-rw-r--r--. 1 user user 1000 9 dic 09.54 cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
```
but passing a `false` `condition` (note there is no loading log):
```julia-repl
julia> @scache_if false "cute-cube" "./" fill(0, 3, 3, 3)
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
"""
macro scache_if(condition, type, common_cache_dir, ex)
return quote
if $(esc(condition))
@scache $(esc(type)) $(esc(common_cache_dir)) $(esc(ex))
else
$(esc(ex))
end
end
end
macro scache_if(condition, common_cache_dir, ex)
uex = unesc_comp(ex)
(typeof(uex) != Expr || uex.head != :call) && (throw(ArgumentError("`@scache[jld]` can " *
"be used only with function calls: passed $(uex)")))
:(@scache_if $(esc(condition)) $(esc(string(uex.args[1]))) $(esc(common_cache_dir)) $(esc(ex)))
end
macro scache_if(condition, ex)
:(@scache_if $(esc(condition)) $(esc(settings.cache_dir)) $(esc(ex)))
end
"""
@scachejld_if condition [[type] cache_dir] function_call
Cache the result of `function_call` only if `condition` is `true`.
Note that will not be loaded the cached result even if present.
For other parameters docs see [`@scachejld`](@ref).
## Examples
```julia-repl
julia> using SimpleCaching
julia> SimpleCaching.settings.log = true;
julia> @scachejld_if true "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 16:06:42 ] Computing cute-cube...
● [ 2022-12-09 16:06:42 ] Computed cute-cube in 0.009 seconds (00:00:00)
● [ 2022-12-09 16:06:44 ] Saving cute-cube to file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld[.tmp]...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
julia> @scachejld_if true "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 16:07:04 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
shell> ls -lh cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
-rw-r--r--. 1 user user 1000 9 dic 16.07 cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld
```
but passing a `false` `condition` (note there is no loading log):
```julia-repl
julia> @scachejld_if false "cute-cube" "./" fill(0, 3, 3, 3)
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
"""
macro scachejld_if(condition, type, common_cache_dir, ex)
return quote
if $(esc(condition))
@scachejld $(esc(type)) $(esc(common_cache_dir)) $(esc(ex))
else
$(esc(ex))
end
end
end
macro scachejld_if(condition, common_cache_dir, ex)
uex = unesc_comp(ex)
(typeof(uex) != Expr || uex.head != :call) && (throw(ArgumentError("`@scache[jld]` can " *
"be used only with function calls: passed $(uex)")))
:(@scachejld_if $(esc(condition)) $(esc(string(uex.args[1]))) $(esc(common_cache_dir)) $(esc(ex)))
end
macro scachejld_if(condition, ex)
:(@scachejld_if $(esc(condition)) $(esc(settings.cache_dir)) $(esc(ex)))
end
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 192 |
macro scachefast(type, common_cache_dir, ex)
Base.depwarn("`scachefast` is deprecated, use `scache` instead.", :scachefast)
:(@scache $(esc(type)) $(esc(common_cache_dir)) $(esc(ex)))
end
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 81 |
function __init__()
settings.log_output = stdout;
atexit(_closelog)
end
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 1438 |
"""
SimpleCachingSettings is a struct containing settings for exported macros `@scache` and
`@scachefast`.
## Fields
- `log`: a Bool indicating whether to log or not operations like computing, saving and
loading. Default is `false`.
- `log_output`: a IO containing the file descriptor the log will be written to; ignored if
`log` is `false`. Default is `stdout`.
- `date_format`: a String representing the format the DateTime will be printed when logging;
ignored if `log` is `false`. Default is `"yyyy-mm-dd HH:MM:SS"`.
- `line_started`: a String that will be placed at the beginning of the log; ignored if `log`
is `false`. Default is `"● "`.
- `create_cache_record`: a Bool indicating whether to create a tsv file containing human
readable informations about the saved file. Default is `false`;
- `cache_dir`: this is the default directory the cache will be saved in when not specified
in the macro call. Default is `./_sc_cache`.
"""
mutable struct SimpleCachingSettings
log::Bool
log_output::IO
date_format::AbstractString
line_starter::AbstractString
create_cache_record::Bool
cache_dir::AbstractString
end
const settings = SimpleCachingSettings(false, stdout, "yyyy-mm-dd HH:MM:SS", "● ", false, "./_sc_cache")
_use_serialize = false
_closelog(::IO) = nothing
_closelog(io::IOStream) = close(io)
_closelog(io::Channel) = close(io)
_closelog() = _closelog(settings.log_output)
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 7770 |
"""
_start_timer()
Same as `using Dates; now()`.
"""
_start_timer() = Dates.now()
"""
_stop_timer(d)
Same as `using Dates; now() - d` where `d` is a DateTime object.
"""
_stop_timer(d::DateTime) = Dates.now() - d
"""
get_hash_sha256(var)
Get the hash of the content of `var` using SHA256 algorithm.
"""
function get_hash_sha256(var)::String
io = IOBuffer();
serialize(io, var)
result = bytes2hex(sha256(take!(io)))
close(io)
result
end
"""
printcheckpoint([io::IO], any...; format)
Print `any...` to `io` and flush preceeded by the current DateTime in `format`.
A line starter can be specified by the keyword argument `start`. Default is "● ".
"""
@inline function printcheckpoint(
io::IO, string::AbstractString...;
format = "dd/mm/yyyy HH:MM:SS",
start = "● "
)
println(io, start, Dates.format(now(), "[ $(format) ] "), string...)
flush(io)
end
@inline function printcheckpoint(string::AbstractString...; kwargs...)
return printcheckpoint(stdout, string...; kwargs...)
end
@inline function printcheckpoint(io::IO, any::Any...; kwargs...)
return printcheckpoint(io, string(any...); kwargs...)
end
@inline function printcheckpoint(any::Any...; kwargs...)
return printcheckpoint(stdout, any...; kwargs...)
end
"""
human_readable_time(milliseconds)
Print elapsed time in format "HH:MM:SS".
"""
function human_readable_time(ms::Dates.Millisecond)::String
result = ms.value / 1000
seconds = round(Int64, result % 60)
result /= 60
minutes = round(Int64, result % 60)
result /= 60
hours = round(Int64, result % 24)
string(string(hours; pad=2), ":", string(minutes; pad=2), ":", string(seconds; pad=2))
end
"""
human_readable_time_s(milliseconds)
Print elapsed time in seconds.
"""
function human_readable_time_s(ms::Dates.Millisecond)::String
string(ms.value / 1000)
end
"""
throw_n_log(message; error_type = ErrorException)
Call `@error msg` before throwning.
"""
function throw_n_log(msg::AbstractString; error_type::Type{<:Exception} = ErrorException)
@error msg
throw(error_type(msg))
end
"""
_is_call_expr(expression)
Return whether an expression is a function call or not.
"""
_is_call_expr(::Any) = false
_is_call_expr(ex::Expr) = ex.head == :call
"""
_is_bc_expr(expression)
Return whether an expression is a broadcast or not.
"""
_is_bc_expr(::Any) = false
_is_bc_expr(ex::Expr) = ex.head == :.
"""
_bc2call(expression)
Convert a broadcast expression to a simple call expression.
"""
_bc2call(ex::Any) = ex
function _bc2call(ex::Expr)
if !_is_bc_expr(ex)
return ex
end
@assert typeof(ex.args[2]) == Expr && ex.args[2].head == :tuple "Found unusual " *
"broadcast expression: $(dump(ex))"
res = deepcopy(ex)
res.head = :call
res.args = res.args[2].args
return res
end
## conversion utilities ##
"""
_strarg(arg)
A utility function used to generate a String representing the final Expr that will cause
the cached file to be loaded.
This means that the following calls will produce the same strings:
```julia
x = 10
@scache "my-cute-vector" "./" fill(0, 10)
@scache "my-cute-vector" "./" fill(0, x)
@scache "my-cute-vector" "./" fill(0, 5 + 5)
```
This function is called inside [`@_scache`](@ref SimpleCaching.@_scache) to generate the
string that will fill the column `COMMAND` in the cache record (if generated).
"""
_strarg(arg::Any, top_level::Bool = false, broadcast::Bool = false) = Expr(:call, :string, arg)
function _strarg(arg::Expr, top_level::Bool = false, broadcast::Bool = false)
if top_level && arg.head == :escape
return _strarg(arg.args[1], true)
elseif top_level && arg.head == :call
_args = filter(
x -> !(typeof(x) == Expr && (x.head == :parameters || x.head == :kw)),
arg.args[2:end]
)
_params = filter(x -> typeof(x) == Expr && x.head == :parameters, arg.args[2:end])
_kw =
if length(_params) > 0
vcat([p.args for p in _params]...)
else
[]
end
append!(_kw, filter(x -> typeof(x) == Expr && x.head == :kw, arg.args[2:end]))
return Expr(:call, :string,
"$(arg.args[1])",
broadcast ? "." : "",
"(",
Expr(:call, :join, _toexpr(_strarg.(_args)), ", "),
length(_kw) > 0 ? "; " : "",
length(_kw) > 0 ? Expr(:call, :join, _toexpr(_strarg.(_kw, true)), ", ") : "",
")"
)
elseif top_level && arg.head == :.
return _strarg(_bc2call(arg), top_level, true)
elseif top_level && arg.head == :kw
return Expr(:call, :string, "$(arg.args[1]) = ", _strarg(arg.args[2]))
elseif top_level && arg.head == :parameters
return join(_strarg.(arg.args), ", ")
elseif top_level && arg.head == :...
return _rem(_rem(_rem(_strarg(arg.args[1]), "^\\("), "\\)\$"), ",\$")
elseif arg.head == :...
return _strarg(Expr(:call, :join, Expr(:..., _toexpr(arg.args)), ", "))
else
Expr(:call, :string, arg)
end
end
function _rem(s, reg::AbstractString)
return Expr(:call, :replace, _strarg(s), Expr(:call, :(=>), Expr(:call, :Regex, reg), ""))
end
"""
_convert_input(arg)
A utility function used to generate a stable Tuple containing the information that will be
used to generate the hash of the cached file.
The resulting object is a NamedTuple{(:args,:kwargs),Tuple{T,D}} where T is a ordered
Tuple containing the arguments passed to the function called and D is a Dict{Symbol,Any}
containing the keyword arguments passed.
Note: a dictionary is used for the keyword arguments because otherwise the hash would change
based on the their order.
"""
_convert_input(arg::Any, top_level::Bool = false, broadcast::Bool = false) = arg
function _convert_input(arg::Expr, top_level::Bool = false, broadcast::Bool = false)
_splat2pairs(v::AbstractVector) = length(v) == 0 ? [] : _splat2pairs(v[1])
_splat2pairs(ex::Expr) = Expr(:call, :pairs, ex.args[1])
if top_level && arg.head == :escape
return _convert_input(arg.args[1], true)
elseif top_level && arg.head == :call
_args = filter(
x -> !(typeof(x) == Expr && (x.head == :parameters || x.head == :kw)),
arg.args[2:end]
)
_params = filter(x -> typeof(x) == Expr && x.head == :parameters, arg.args[2:end])
_kw =
if length(_params) > 0
vcat([p.args for p in _params]...)
else
[]
end
append!(_kw, filter(x -> typeof(x) == Expr && x.head == :kw, arg.args[2:end]))
_res = filter(x -> typeof(x) == Expr && x.head == :..., _kw)
_kw = filter(x -> !(typeof(x) == Expr && x.head == :...), _kw)
return (
args = [_convert_input(a, false) for a in _args],
kwargs = Dict{Symbol,Any}(_convert_input.(_kw, true)...),
res = _splat2pairs(_res),
broadcast = broadcast
)
elseif top_level && arg.head == :.
return _convert_input(_bc2call(arg), top_level, true)
elseif top_level && arg.head == :kw
return arg.args[1] => _convert_input(arg.args[2])
elseif top_level && arg.head == :parameters
return _convert_input.(arg.args)
else
return arg
end
end
## fast "toexpr" conversions
_toexpr(vec::AbstractVector) = Expr(:vect, vec...)
function _toexpr(d::AbstractDict{Symbol,Any})
n = length(d)
ks = Vector{Symbol}(undef, n)
vs = Vector{Any}(undef, n)
Threads.@threads for (i, (k, v)) in collect(enumerate(d))
ks[i] = k
vs[i] = v
end
_toexpr(Expr.(:quote, ks)), _toexpr(vs)
end
## escaping utilities ##
isescaped(ex::Any) = false
isescaped(ex::Expr) = typeof(ex) == Expr && ex.head == :escape
unesc(ex::Any) = ex
unesc(ex::Expr) = ex.args[1]
safeunesc(ex::Any) = ex
safeunesc(ex::Expr) = isescaped(ex) ? unesc(ex) : ex
escdepth(ex::Any) = 0
function escdepth(ex::Expr)
res::Int = 0
curr_ex = ex
while (isescaped(curr_ex))
curr_ex = unesc(curr_ex)
res += 1
end
return res
end
function esc(ex::Any, n::Integer)
res = ex
for i in 1:n
res = esc(res)
end
return res
end
unesc_comp(ex::Any) = ex
function unesc_comp(ex::Expr)
res = ex
while (isescaped(res))
res = unesc(res)
end
return res
end
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | code | 6804 | using SimpleCaching
using Test
const testing_cache_dir = mktempdir(prefix = "SimpleCaching_test_")
const cached_type = "testing_object"
const mat1 = [1 0; 0 1]
const mat2 = [1 1; 0 1]
module A
using SimpleCaching
export heavy_computation
function heavy_computation(cached_type, testing_cache_dir, x, s::Integer...)
return @scache cached_type testing_cache_dir fill(x, s...)
end
end
@testset "SimpleCaching.jl" begin
# test @scache
res1 = @scache cached_type testing_cache_dir fill(0.0, 10, 10, 10)
@test isdir(testing_cache_dir)
res2 = @scache cached_type testing_cache_dir fill(0.0, 10, 10, 10)
res3 = @scache cached_type testing_cache_dir fill(Float64(1 - 1), 10, 10, 10)
res4 = @scache cached_type testing_cache_dir fill(res3[1,1,1], 10, 10, 10)
res = fill(0.0, 10, 10, 10)
@test res1 == res
@test res2 == res
@test res3 == res
@test res4 == res
# test @scachejld
res1 = @scachejld cached_type testing_cache_dir fill(0.0, 20, 20, 20)
res2 = @scachejld cached_type testing_cache_dir fill(0.0, 20, 20, 20)
res3 = @scachejld cached_type testing_cache_dir fill(Float64(1 - 1), 20, 20, 20)
res4 = @scachejld cached_type testing_cache_dir fill(res3[1,1,1], 20, 20, 20)
res = fill(0.0, 20, 20, 20)
@test res1 == res
@test res2 == res
@test res3 == res
@test res4 == res
# test broadcasting
res1_normal = mat1 * mat2
res2_normal = mat1 .* mat2
res1_cache = @scache mat1 * mat2
res2_cache = @scache mat1 .* mat2
@test res1_normal == res1_cache
@test res2_normal == res2_cache
# clean
rm(testing_cache_dir; recursive = true)
@testset "no type" begin
# test @scache
res1 = @scache testing_cache_dir fill(0.0, 10, 10, 10)
@test isdir(testing_cache_dir)
res2 = @scache testing_cache_dir fill(0.0, 10, 10, 10)
res3 = @scache testing_cache_dir fill(Float64(1 - 1), 10, 10, 10)
res4 = @scache testing_cache_dir fill(res3[1,1,1], 10, 10, 10)
res = fill(0.0, 10, 10, 10)
@test res1 == res
@test res2 == res
@test res3 == res
@test res4 == res
# test @scachejld
res1 = @scachejld testing_cache_dir fill(0.0, 20, 20, 20)
res2 = @scachejld testing_cache_dir fill(0.0, 20, 20, 20)
res3 = @scachejld testing_cache_dir fill(Float64(1 - 1), 20, 20, 20)
res4 = @scachejld testing_cache_dir fill(res3[1,1,1], 20, 20, 20)
res = fill(0.0, 20, 20, 20)
@test res1 == res
@test res2 == res
@test res3 == res
@test res4 == res
rm(testing_cache_dir; recursive = true)
@testset "no dir" begin
# test @scache
res1 = @scache fill(0.0, 10, 10, 10)
@test isdir(SimpleCaching.settings.cache_dir)
res2 = @scache fill(0.0, 10, 10, 10)
res3 = @scache fill(Float64(1 - 1), 10, 10, 10)
res4 = @scache fill(res3[1,1,1], 10, 10, 10)
res = fill(0.0, 10, 10, 10)
@test res1 == res
@test res2 == res
@test res3 == res
@test res4 == res
# test @scachejld
res1 = @scachejld fill(0.0, 20, 20, 20)
res2 = @scachejld fill(0.0, 20, 20, 20)
res3 = @scachejld fill(Float64(1 - 1), 20, 20, 20)
res4 = @scachejld fill(res3[1,1,1], 20, 20, 20)
res = fill(0.0, 20, 20, 20)
@test res1 == res
@test res2 == res
@test res3 == res
@test res4 == res
rm(SimpleCaching.settings.cache_dir; recursive = true)
end
end
@testset "conditionals" begin
# test @scache_if
res = fill(0.0, 10, 10, 10)
res1 = @scache_if false cached_type testing_cache_dir fill(0.0, 10, 10, 10)
@test !isdir(testing_cache_dir)
@test res1 == res
res1 = @scache_if true cached_type testing_cache_dir fill(0.0, 10, 10, 10)
@test isdir(testing_cache_dir)
@test res1 == res
rm(testing_cache_dir; recursive = true)
# test @scachejld_if
res = fill(0.0, 10, 10, 10)
res1 = @scachejld_if false cached_type testing_cache_dir fill(0.0, 10, 10, 10)
@test !isdir(testing_cache_dir)
@test res1 == res
res1 = @scachejld_if true cached_type testing_cache_dir fill(0.0, 10, 10, 10)
@test isdir(testing_cache_dir)
@test res1 == res
rm(testing_cache_dir; recursive = true)
@testset "no type" begin
# test @scache_if
res = fill(0.0, 10, 10, 10)
res1 = @scache_if false testing_cache_dir fill(0.0, 10, 10, 10)
@test !isdir(testing_cache_dir)
@test res1 == res
res1 = @scache_if true testing_cache_dir fill(0.0, 10, 10, 10)
@test isdir(testing_cache_dir)
@test res1 == res
rm(testing_cache_dir; recursive = true)
# test @scachejld_if
res = fill(0.0, 10, 10, 10)
res1 = @scachejld_if false testing_cache_dir fill(0.0, 10, 10, 10)
@test !isdir(testing_cache_dir)
@test res1 == res
res1 = @scachejld_if true testing_cache_dir fill(0.0, 10, 10, 10)
@test isdir(testing_cache_dir)
@test res1 == res
rm(testing_cache_dir; recursive = true)
@testset "no dir" begin
# test @scache_if
res = fill(0.0, 10, 10, 10)
res1 = @scache_if false fill(0.0, 10, 10, 10)
@test !isdir(SimpleCaching.settings.cache_dir)
@test res1 == res
res1 = @scache_if true fill(0.0, 10, 10, 10)
@test isdir(SimpleCaching.settings.cache_dir)
@test res1 == res
rm(SimpleCaching.settings.cache_dir; recursive = true)
# test @scachejld_if
res = fill(0.0, 10, 10, 10)
res1 = @scachejld_if false fill(0.0, 10, 10, 10)
@test !isdir(SimpleCaching.settings.cache_dir)
@test res1 == res
res1 = @scachejld_if true fill(0.0, 10, 10, 10)
@test isdir(SimpleCaching.settings.cache_dir)
@test res1 == res
rm(SimpleCaching.settings.cache_dir; recursive = true)
end
end
end
# test using macro within another module
using .A
res = fill(0.0, 20, 20, 20)
hc = heavy_computation(cached_type, testing_cache_dir, 0.0, 20, 20, 20)
@test hc == res
# test with local variables
begin
local n = 10
@scache cached_type testing_cache_dir vcat(fill(1, n), fill(2, 2n))
end
end
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | docs | 3741 | # SimpleCaching
[](https://ferdiu.github.io/SimpleCaching.jl/stable)
[](https://ferdiu.github.io/SimpleCaching.jl/dev)
[](https://cirrus-ci.com/github/ferdiu/SimpleCaching.jl)
This package provides two macros used to cache result(s) of function calls.
The cached file will survive the julia session so it will be automatically loaded from the
disk even after a restart of the julia session.
# Usage
## Installation
```Julia
using Pkg
Pkg.add(url="https://github.com/ferdiu/SimpleCaching.jl.git")
using SimpleCaching
```
## Caching function result using Serialize
For large files or complicated structers it is advised to cache results using the macro
`@scache` which provides faster serialization and smaller files on disk at the cost of less
portability (see [Serialization](https://docs.julialang.org/en/v1/stdlib/Serialization/)).
```Julia
julia> using SimpleCaching
julia> SimpleCaching.settings.log = true;
julia> @scache "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:39:42 ] Computing cute-cube...
● [ 2022-12-09 15:39:42 ] Computed cute-cube in 0.009 seconds (00:00:00)
● [ 2022-12-09 15:39:44 ] Saving cute-cube to file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld[.tmp]...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
julia> @scache "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:39:56 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
### Conditional caching
It is possible to cache the result of a function call based on the value returned by an
expression using the macro "@scache_if" as follows:
```Julia
julia> @scache_if true "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:41:54 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
but passing a `false` `condition` (note there is no loading log):
```Julia
julia> @scache_if false "cute-cube" "./" fill(0, 3, 3, 3)
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
just substitute an expression for `true` and `false`.
## Caching function result using JLD2
```Julia
julia> using SimpleCaching
julia> SimpleCaching.settings.log = true;
julia> @scachejld "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:40:45 ] Computing cute-cube...
● [ 2022-12-09 15:40:45 ] Computed cute-cube in 0.0 seconds (00:00:00)
● [ 2022-12-09 15:40:45 ] Saving cute-cube to file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld[.tmp]...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
julia> @scachejld "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:40:47 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
Conditional caching is possible for JLD2 macro using `@scachejld_if`.
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | docs | 2670 | ```@meta
CurrentModule = SimpleCaching
```
# SimpleCaching
```@contents
```
This package provides two macros used to cache result(s) of function calls.
The cached file will survive the julia session so it will be automatically loaded from the
disk even after a restart of the julia session.
#### Differences with other packages
Most packages that provide caching functionality offers macros that are strictly bounded to
function definitions (e.g. [Caching.jl](https://github.com/zgornel/Caching.jl))); this
means that if, for example, a `println` is added to a function definition the cached file
will not be loaded. To work around this problem, the macros exposed by this package only use
the function name and the value of the parameters at the time of the call to determine if
the result can be loaded from the cache, if any.
## Installation
This packages is registered so you can install it by executing the following commands in a
Julia REPL:
```julia
import Pkg
Pkg.add("SimpleCaching")
```
or, to install the developement version, run:
```julia
import Pkg
Pkg.add("https://github.com/ferdiu/SimpleCaching.jl#dev")
```
## Usage
Start using the macros as follow:
```julia-repl
julia> using SimpleCaching
julia> SimpleCaching.settings.log = true; # log when saving/loading cache
julia> @scache "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:39:42 ] Computing cute-cube...
● [ 2022-12-09 15:39:42 ] Computed cute-cube in 0.009 seconds (00:00:00)
● [ 2022-12-09 15:39:44 ] Saving cute-cube to file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld[.tmp]...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
if the same function will be called again with the same parameters this will result in the
cache to be loaded instead of performing the calculation again.
The cache is saved on the disk so it will be loaded even in subsequent julia sessions making
these macros particularly useful whene executing long calculations inside cycles withing
Julia scripts).
```julia-repl
julia> @scache "cute-cube" "./" fill(0, 3, 3, 3)
● [ 2022-12-09 15:39:56 ] Loading cute-cube from file ./cute-cube_4bbf9c2851f2c2b3954448f1a8085f6e3d40085add71f19640343885a8b7bd6a.jld...
3×3×3 Array{Int64, 3}:
[:, :, 1] =
0 0 0
0 0 0
0 0 0
[:, :, 2] =
0 0 0
0 0 0
0 0 0
[:, :, 3] =
0 0 0
0 0 0
0 0 0
```
See [`Macros`](@ref man-macros) section for more details on how to use them.
## Settings
Some behaviour of the macros can be tweaked chaning the values of the settings:
```@docs
SimpleCachingSettings
```
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.2.3 | 5083373d67f46fead7a24efc5852c5f3306a1d2e | docs | 729 | ```@meta
CurrentModule = SimpleCaching
```
# [Macros](@id man-macros)
All the macros can be described as one:
```julia
@scache[jld][_if condition] [[type] cache_dir] function_call
```
when `jld` is specified right after `@scache` then `JLD2` will be used instead of
`Serialization`; when `_if condition` are specified right after `@scache[jld]` then the
caching will be used only if `condition` is verified.
For more details consult [`Caching`](@ref man-macros-caching) and
[`Conditional caching`](@ref man-macros-conditional-caching) sections below.
## [Caching](@id man-macros-caching)
```@docs
@scache
@scachejld
```
## [Conditional caching](@id man-macros-conditional-caching)
```@docs
@scache_if
@scachejld_if
```
| SimpleCaching | https://github.com/ferdiu/SimpleCaching.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 557 | using DateShifting
using Documenter
makedocs(;
modules=[DateShifting],
authors="Dilum Aluthge, contributors",
repo="https://github.com/JuliaHealth/DateShifting.jl/blob/{commit}{path}#L{line}",
sitename="DateShifting.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://JuliaHealth.github.io/DateShifting.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
strict = true,
)
deploydocs(;
repo="github.com/JuliaHealth/DateShifting.jl",
)
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 270 | module DateShifting
export datetime_intervals
export sequence_and_random_date_shift
include("types.jl")
include("public.jl")
include("assert.jl")
include("compute_interval_nonrounded.jl")
include("date_shifting.jl")
include("intervals.jl")
include("sample.jl")
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 161 | function always_assert(cond::Bool,
msg::String = "")
if !cond
throw(AlwaysAssertionError(msg))
end
return nothing
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 278 | import Dates
function _compute_interval_nonrounded(start_datetime, current_datetime)
if start_datetime > current_datetime
throw(ArgumentError("Start datetime must be less than or equal to current datetime"))
end
return current_datetime - start_datetime
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 8604 | import Dates
import Distributions
import Random
import TimeZones
const _kwargs_docstring_for_sequence_and_random_date_shift = """
- `round_to::Dates.Period`: Resolution to which all intervals should be rounded.
- `day::::Union{Distributions.Sampleable, Nothing}`: Probability distribution from which the `Day` shift amount will be sampled.
- `hour::::Union{Distributions.Sampleable, Nothing}`: Probability distribution from which the `Hour` shift amount will be sampled.
- `minute::::Union{Distributions.Sampleable, Nothing}`: Probability distribution from which the `Minute` shift amount will be sampled.
- `second::::Union{Distributions.Sampleable, Nothing}`: Probability distribution from which the `Second` shift amount will be sampled.
- `millisecond::::Union{Distributions.Sampleable, Nothing}`: Probability distribution from which the `Millisecond` shift amount will be sampled.
- `microsecond::::Union{Distributions.Sampleable, Nothing}`: Probability distribution from which the `Microsecond` shift amount will be sampled.
- `nanosecond::::Union{Distributions.Sampleable, Nothing}`: Probability distribution from which the `Nanosecond` shift amount will be sampled.
"""
"""
sequence_and_random_date_shift(rng::Random.AbstractRNG,
input_dt_list::Vector{Dates.DateTime};
kwargs...)
## Arguments
- `rng::Random.AbstractRNG`: Random number generator that will be used for sampling.
- `input_dt_list::Vector{Dates.DateTime}`: A vector of `DateTime`s.
## Keyword Arguments
- `time_zone::Dates.TimeZone`: Time zone for the input dates.
$(_kwargs_docstring_for_sequence_and_random_date_shift)
## Example
```jldoctest; setup = :(import Random; Random.seed!(123))
julia> using Dates
julia> using DateShifting
julia> using Distributions
julia> using Random
julia> using TimeZones
julia> dates = [
DateTime("2000-01-01T00:00:00"),
DateTime("2000-02-01T00:00:00"),
DateTime("2000-01-05T00:00:00"),
DateTime("2000-01-02T04:05:06"),
DateTime("2000-01-02T01:02:03"),
]
5-element Array{DateTime,1}:
2000-01-01T00:00:00
2000-02-01T00:00:00
2000-01-05T00:00:00
2000-01-02T04:05:06
2000-01-02T01:02:03
julia> sequence, shifted_dates = sequence_and_random_date_shift(
Random.GLOBAL_RNG,
dates;
round_to = Day(1),
time_zone = TimeZone("America/New_York"),
day = DiscreteUniform(-31, 31),
hour = DiscreteUniform(-24, 24),
minute = DiscreteUniform(-60, 60),
second = DiscreteUniform(-60, 60),
millisecond = DiscreteUniform(-1000, 1000),
microsecond = DiscreteUniform(-1000, 1000),
nanosecond = DiscreteUniform(-1000, 1000),
)
([1, 5, 4, 3, 2], TimeZones.ZonedDateTime[ZonedDateTime(1999, 12, 5, tz"America/New_York"), ZonedDateTime(2000, 1, 5, tz"America/New_York"), ZonedDateTime(1999, 12, 9, tz"America/New_York"), ZonedDateTime(1999, 12, 6, tz"America/New_York"), ZonedDateTime(1999, 12, 6, tz"America/New_York")])
julia> sequence
5-element Array{Int64,1}:
1
5
4
3
2
julia> shifted_dates
5-element Array{ZonedDateTime,1}:
1999-12-05T00:00:00-05:00
2000-01-05T00:00:00-05:00
1999-12-09T00:00:00-05:00
1999-12-06T00:00:00-05:00
1999-12-06T00:00:00-05:00
```
"""
function sequence_and_random_date_shift(rng::Random.AbstractRNG,
input_dt_list::Vector{Dates.DateTime};
time_zone::Dates.TimeZone,
kwargs...)
input_zdt_list = [TimeZones.ZonedDateTime(x, time_zone) for x in input_dt_list]
return sequence_and_random_date_shift(rng, input_zdt_list; time_zone = time_zone, kwargs...)
end
"""
sequence_and_random_date_shift(rng::Random.AbstractRNG,
input_zdt_list::Vector{TimeZones.ZonedDateTime};
kwargs...)
## Arguments
- `rng::Random.AbstractRNG`: Random number generator that will be used for sampling.
- `input_zdt_list::Vector{TimeZones.ZonedDateTime}`: A vector of `ZonedDateTime`s.
## Keyword Arguments
- `time_zone::Dates.TimeZone`: Time zone to which all dates should be converted.
$(_kwargs_docstring_for_sequence_and_random_date_shift)
## Example
```jldoctest; setup = :(import Random; Random.seed!(123))
julia> using Dates
julia> using DateShifting
julia> using Distributions
julia> using Random
julia> using TimeZones
julia> dates = [
ZonedDateTime(DateTime("2000-01-01T00:00:00"), tz"America/New_York"),
ZonedDateTime(DateTime("2000-02-01T00:00:00"), tz"America/New_York"),
ZonedDateTime(DateTime("2000-01-05T00:00:00"), tz"America/New_York"),
ZonedDateTime(DateTime("2000-01-02T03:05:06"), tz"America/Chicago"),
ZonedDateTime(DateTime("2000-01-02T01:02:03"), tz"America/New_York"),
]
5-element Array{ZonedDateTime,1}:
2000-01-01T00:00:00-05:00
2000-02-01T00:00:00-05:00
2000-01-05T00:00:00-05:00
2000-01-02T03:05:06-06:00
2000-01-02T01:02:03-05:00
julia> sequence, shifted_dates = sequence_and_random_date_shift(
Random.GLOBAL_RNG,
dates;
round_to = Day(1),
time_zone = TimeZone("America/New_York"),
day = DiscreteUniform(-31, 31),
hour = DiscreteUniform(-24, 24),
minute = DiscreteUniform(-60, 60),
second = DiscreteUniform(-60, 60),
millisecond = DiscreteUniform(-1000, 1000),
microsecond = DiscreteUniform(-1000, 1000),
nanosecond = DiscreteUniform(-1000, 1000),
)
([1, 5, 4, 3, 2], TimeZones.ZonedDateTime[ZonedDateTime(1999, 12, 5, tz"America/New_York"), ZonedDateTime(2000, 1, 5, tz"America/New_York"), ZonedDateTime(1999, 12, 9, tz"America/New_York"), ZonedDateTime(1999, 12, 6, tz"America/New_York"), ZonedDateTime(1999, 12, 6, tz"America/New_York")])
julia> sequence
5-element Array{Int64,1}:
1
5
4
3
2
julia> shifted_dates
5-element Array{ZonedDateTime,1}:
1999-12-05T00:00:00-05:00
2000-01-05T00:00:00-05:00
1999-12-09T00:00:00-05:00
1999-12-06T00:00:00-05:00
1999-12-06T00:00:00-05:00
```
"""
function sequence_and_random_date_shift(rng::Random.AbstractRNG,
input_zdt_list::Vector{TimeZones.ZonedDateTime};
round_to::Dates.Period,
time_zone::Dates.TimeZone,
day::Union{Distributions.Sampleable, Nothing},
hour::Union{Distributions.Sampleable, Nothing},
minute::Union{Distributions.Sampleable, Nothing},
second::Union{Distributions.Sampleable, Nothing},
millisecond::Union{Distributions.Sampleable, Nothing},
microsecond::Union{Distributions.Sampleable, Nothing},
nanosecond::Union{Distributions.Sampleable, Nothing})
sequence = sortperm(input_zdt_list)
input_zdt_list_sametimezone = [TimeZones.astimezone(x, time_zone) for x in input_zdt_list]
always_assert(sortperm(input_zdt_list_sametimezone) == sequence)
day_shift_amount = _draw_sample(rng, Dates.Day, day)
hour_shift_amount = _draw_sample(rng, Dates.Hour, hour)
minute_shift_amount = _draw_sample(rng, Dates.Minute, minute)
second_shift_amount = _draw_sample(rng, Dates.Second, second)
millisecond_shift_amount = _draw_sample(rng, Dates.Millisecond, millisecond)
microsecond_shift_amount = _draw_sample(rng, Dates.Microsecond, microsecond)
nanosecond_shift_amount = _draw_sample(rng, Dates.Nanosecond, nanosecond)
total_shift_amount = day_shift_amount + hour_shift_amount +
minute_shift_amount +
second_shift_amount +
millisecond_shift_amount +
microsecond_shift_amount +
nanosecond_shift_amount
shifted_dates_nonrounded = [x + total_shift_amount for x in input_zdt_list_sametimezone]
shifted_dates = [round(x, round_to) for x in shifted_dates_nonrounded]
always_assert(all(TimeZones.timezone.(shifted_dates_nonrounded) .== time_zone))
always_assert(all(TimeZones.timezone.(shifted_dates) .== time_zone))
return sequence, shifted_dates
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 3252 | import Dates
import TimeZones
"""
datetime_intervals(input_dt_list::Vector{Dates.DateTime};
round_to::Dates.Period)
## Arguments
- `input_dt_list::Vector{Dates.DateTime}`: A vector of `DateTime`s.
## Keyword Arguments
- `round_to::Dates.Period`: Resolution to which all intervals should be rounded.
## Example
```jldoctest
julia> using Dates
julia> using DateShifting
julia> dates = [
DateTime("2000-01-01T00:00:00"),
DateTime("2000-02-01T00:00:00"),
DateTime("2000-01-05T00:00:00"),
DateTime("2000-01-02T04:05:06"),
DateTime("2000-01-02T01:02:03"),
]
5-element Array{DateTime,1}:
2000-01-01T00:00:00
2000-02-01T00:00:00
2000-01-05T00:00:00
2000-01-02T04:05:06
2000-01-02T01:02:03
julia> sequence, intervals = datetime_intervals(dates; round_to = Day(1))
([1, 5, 4, 3, 2], Dates.Day[0 days, 31 days, 4 days, 1 day, 1 day])
julia> sequence
5-element Array{Int64,1}:
1
5
4
3
2
julia> intervals
5-element Array{Day,1}:
0 days
31 days
4 days
1 day
1 day
```
"""
function datetime_intervals(input_dt_list::Vector{Dates.DateTime};
round_to::Dates.Period)
input_zdt_list = [TimeZones.ZonedDateTime(x, Dates.TimeZone("UTC")) for x in input_dt_list]
return datetime_intervals(input_zdt_list; round_to = round_to)
end
"""
datetime_intervals(input_zdt_list::Vector{TimeZones.ZonedDateTime};
round_to::Dates.Period)
## Arguments
- `input_zdt_list::Vector{TimeZones.ZonedDateTime}`: A vector of `ZonedDateTime`s.
## Keyword Arguments
- `round_to::Dates.Period`: Resolution to which all intervals should be rounded.
## Example
```jldoctest
julia> using Dates
julia> using DateShifting
julia> using TimeZones
julia> dates = [
ZonedDateTime(DateTime("2000-01-01T00:00:00"), tz"America/New_York"),
ZonedDateTime(DateTime("2000-02-01T00:00:00"), tz"America/New_York"),
ZonedDateTime(DateTime("2000-01-05T00:00:00"), tz"America/New_York"),
ZonedDateTime(DateTime("2000-01-02T03:05:06"), tz"America/Chicago"),
ZonedDateTime(DateTime("2000-01-02T01:02:03"), tz"America/New_York"),
]
5-element Array{ZonedDateTime,1}:
2000-01-01T00:00:00-05:00
2000-02-01T00:00:00-05:00
2000-01-05T00:00:00-05:00
2000-01-02T03:05:06-06:00
2000-01-02T01:02:03-05:00
julia> sequence, intervals = datetime_intervals(dates; round_to = Day(1))
([1, 5, 4, 3, 2], Dates.Day[0 days, 31 days, 4 days, 1 day, 1 day])
julia> sequence
5-element Array{Int64,1}:
1
5
4
3
2
julia> intervals
5-element Array{Day,1}:
0 days
31 days
4 days
1 day
1 day
```
"""
function datetime_intervals(input_zdt_list::Vector{TimeZones.ZonedDateTime};
round_to::Dates.Period)
sequence = sortperm(input_zdt_list)
value, index_of_input_start_zoned_datetime = findmin(sequence)
always_assert(value == 1)
input_start_zoned_datetime = input_zdt_list[index_of_input_start_zoned_datetime]
intervals_nonrounded = [_compute_interval_nonrounded(input_start_zoned_datetime, x) for x in input_zdt_list]
intervals = [round(interval, round_to) for interval in intervals_nonrounded]
return sequence, intervals
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 76 | function datetime_intervals end
function sequence_and_random_date_shift end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 403 | import Distributions
import Random
function _draw_sample(rng::Random.AbstractRNG,
T::Type,
distribution::Nothing)
return zero(T)
end
function _draw_sample(rng::Random.AbstractRNG,
T::Type,
distribution::Distributions.Sampleable)
numerical_value = rand(rng, distribution)
return T(numerical_value)
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 48 | struct AlwaysAssertionError
msg::String
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | code | 6523 | import Dates
import DateShifting
import Distributions
import Random
import TimeZones
import Test
Test.@testset "DateShifting.jl" begin
Test.@testset "assert.jl" begin
Test.@test nothing == Test.@test_nowarn DateShifting.always_assert(1 == 1)
Test.@test_throws DateShifting.AlwaysAssertionError DateShifting.always_assert(1 == 2)
end
Test.@testset "compute_interval_nonrounded.jl" begin
Test.@test DateShifting._compute_interval_nonrounded(Dates.DateTime("2000-01-01"), Dates.DateTime("2000-01-02")) == Dates.Day(1)
Test.@test_throws ArgumentError DateShifting._compute_interval_nonrounded(Dates.DateTime("2000-01-02"), Dates.DateTime("2000-01-01"))
end
Test.@testset "sample.jl" begin
Test.@test DateShifting._draw_sample(Random.GLOBAL_RNG, Dates.Day, nothing) == Dates.Day(0)
end
Test.@testset "intervals.jl" begin
datetime_list = [
Dates.DateTime("1900-01-01T00:00:00"),
Dates.DateTime("1901-01-01T00:00:00"),
Dates.DateTime("1900-02-01T00:00:00"),
Dates.DateTime("1900-01-01T00:00:00"),
Dates.DateTime("1900-01-03T00:00:00"),
Dates.DateTime("1900-01-05T00:00:00"),
Dates.DateTime("1900-01-02T00:00:00"),
Dates.DateTime("1900-01-01T00:00:00.001"),
Dates.DateTime("1900-01-01T00:20:00"),
Dates.DateTime("1900-01-01T00:30:00"),
]
zoneddatetime_list = [TimeZones.ZonedDateTime(x, TimeZones.tz"America/New_York") for x in datetime_list]
expected_sequence = [1, 4, 8, 9, 10, 7, 5, 6, 3, 2]
expected_intervals_round_to = Dict()
expected_intervals_round_to[Dates.Day(1)] = [Dates.Day(0), Dates.Day(365), Dates.Day(31), Dates.Day(0), Dates.Day(2), Dates.Day(4), Dates.Day(1), Dates.Day(0), Dates.Day(0), Dates.Day(0)]
expected_intervals_round_to[Dates.Day(2)] = [Dates.Day(0), Dates.Day(366), Dates.Day(32), Dates.Day(0), Dates.Day(2), Dates.Day(4), Dates.Day(2), Dates.Day(0), Dates.Day(0), Dates.Day(0)]
expected_intervals_round_to[Dates.Hour(1)] = [Dates.Hour(0), Dates.Hour(8760), Dates.Hour(744), Dates.Hour(0), Dates.Hour(48), Dates.Hour(96), Dates.Hour(24), Dates.Hour(0), Dates.Hour(0), Dates.Hour(1)]
expected_intervals_round_to[Dates.Hour(2)] = [Dates.Hour(0), Dates.Hour(8760), Dates.Hour(744), Dates.Hour(0), Dates.Hour(48), Dates.Hour(96), Dates.Hour(24), Dates.Hour(0), Dates.Hour(0), Dates.Hour(0)]
expected_intervals_round_to[Dates.Minute(1)] = [Dates.Minute(0), Dates.Minute(525600), Dates.Minute(44640), Dates.Minute(0), Dates.Minute(2880), Dates.Minute(5760), Dates.Minute(1440), Dates.Minute(0), Dates.Minute(20), Dates.Minute(30)]
expected_intervals_round_to[Dates.Minute(2)] = [Dates.Minute(0), Dates.Minute(525600), Dates.Minute(44640), Dates.Minute(0), Dates.Minute(2880), Dates.Minute(5760), Dates.Minute(1440), Dates.Minute(0), Dates.Minute(20), Dates.Minute(30)]
expected_intervals_round_to[Dates.Second(1)] = [Dates.Second(0), Dates.Second(31536000), Dates.Second(2678400), Dates.Second(0), Dates.Second(172800), Dates.Second(345600), Dates.Second(86400), Dates.Second(0), Dates.Second(1200), Dates.Second(1800)]
expected_intervals_round_to[Dates.Second(2)] = [Dates.Second(0), Dates.Second(31536000), Dates.Second(2678400), Dates.Second(0), Dates.Second(172800), Dates.Second(345600), Dates.Second(86400), Dates.Second(0), Dates.Second(1200), Dates.Second(1800)]
for round_to_period in keys(expected_intervals_round_to)
for input in Any[datetime_list, zoneddatetime_list]
observed_sequence, observed_intervals = DateShifting.datetime_intervals(input;
round_to = round_to_period)
Test.@test observed_sequence == expected_sequence
Test.@test observed_intervals == expected_intervals_round_to[round_to_period]
end
end
end
Test.@testset "public_sequence_and_random_date_shift.jl" begin
datetime_list = [
Dates.DateTime("1900-01-01T00:00:00"),
Dates.DateTime("1901-01-01T00:00:00"),
Dates.DateTime("1900-02-01T00:00:00"),
Dates.DateTime("1900-01-01T00:00:00"),
Dates.DateTime("1900-01-03T00:00:00"),
Dates.DateTime("1900-01-05T00:00:00"),
Dates.DateTime("1900-01-02T00:00:00"),
Dates.DateTime("1900-01-01T00:20:00"),
Dates.DateTime("1900-01-01T00:30:00"),
]
zoneddatetime_list = [TimeZones.ZonedDateTime(x, TimeZones.tz"America/Los_Angeles") for x in datetime_list]
expected_sequence = [1, 4, 8, 9, 7, 5, 6, 3, 2]
round_to_periods = Dates.Period[
Dates.Second(1),
Dates.Second(2),
Dates.Second(3),
Dates.Second(5),
Dates.Second(10),
Dates.Millisecond(1),
Dates.Millisecond(2),
Dates.Millisecond(3),
Dates.Millisecond(5),
Dates.Millisecond(10),
Dates.Millisecond(100),
Dates.Millisecond(500),
Dates.Millisecond(1000),
]
for round_to_period in round_to_periods
for input in Any[datetime_list, zoneddatetime_list]
observed_sequence, observed_shifted_dates = DateShifting.sequence_and_random_date_shift(
Random.GLOBAL_RNG,
input;
round_to = round_to_period,
time_zone = TimeZones.TimeZone("America/Chicago"),
day = Distributions.DiscreteUniform(-31, 31),
hour = Distributions.DiscreteUniform(-24, 24),
minute = Distributions.DiscreteUniform(-60, 60),
second = Distributions.DiscreteUniform(-60, 60),
millisecond = Distributions.DiscreteUniform(-1000, 1000),
microsecond = Distributions.DiscreteUniform(-1000, 1000),
nanosecond = nothing,
)
Test.@test observed_sequence == expected_sequence
Test.@test sortperm(input) == observed_sequence
Test.@test sortperm(input) == expected_sequence
Test.@test sortperm(observed_shifted_dates) == observed_sequence
Test.@test sortperm(observed_shifted_dates) == expected_sequence
end
end
end
end
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | docs | 745 | # DateShifting
[](https://juliahealth.org/DateShifting.jl/stable/)
[](https://juliahealth.org/DateShifting.jl/dev/)
[](https://github.com/JuliaHealth/DateShifting.jl/actions)
[](https://codecov.io/gh/JuliaHealth/DateShifting.jl)
The DateShifting package implements several methods for reducing re-identification risk while preserving temporal relationships in a data set.
Please [see the documentation](https://juliahealth.org/DateShifting.jl/dev/).
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 0.1.0 | eee6928c30d119fd4ac1d8ded99f87987512217d | docs | 116 | ```@meta
CurrentModule = DateShifting
```
# DateShifting
```@index
```
```@autodocs
Modules = [DateShifting]
```
| DateShifting | https://github.com/JuliaHealth/DateShifting.jl.git |
|
[
"MIT"
] | 3.1.0 | 0e10fdc02183b983f9528151ec638cf05ff09485 | code | 4608 | module Rfam
import Gzip_jll
import Tar
using Downloads: download
using Preferences: @set_preferences!, @load_preference
#using CodecZlib: GzipDecompressorStream
include("preferences.jl")
# make the loading of RFAM files thread-safe
const RFAM_LOCK = ReentrantLock()
function base_url(; rfam_version = get_rfam_version())
return "https://ftp.ebi.ac.uk/pub/databases/Rfam/$rfam_version"
end
function version_dir(; rfam_dir = get_rfam_directory(), rfam_version = get_rfam_version())
return mkpath(joinpath(rfam_dir, rfam_version))
end
function fasta_dir(; rfam_dir = get_rfam_directory(), rfam_version = get_rfam_version())
return mkpath(joinpath(version_dir(; rfam_dir, rfam_version), "fasta_files"))
end
"""
fasta_file(family_id)
Returns local path to `.fasta` file of `family_id`.
"""
function fasta_file(family_id::AbstractString; rfam_dir = get_rfam_directory(), rfam_version = get_rfam_version())
lock(RFAM_LOCK) do
local_path = joinpath(fasta_dir(; rfam_dir, rfam_version), "$family_id.fa")
if !isfile(local_path)
@info "Downloading $family_id to $local_path ..."
rfam_base_url = base_url(; rfam_version)
url = "$rfam_base_url/fasta_files/$family_id.fa.gz"
download(url, local_path * ".gz"; timeout = Inf)
gunzip(local_path * ".gz")
end
return local_path
end
end
"""
cm()
Returns the path to `Rfam.cm` file containing the covariance models of all the families.
"""
function cm(; rfam_dir = get_rfam_directory(), rfam_version = get_rfam_version())
lock(RFAM_LOCK) do
local_path = joinpath(version_dir(; rfam_dir, rfam_version), "Rfam.cm")
if !isfile(local_path)
@info "Downloading Rfam.cm to $local_path ..."
rfam_base_url = base_url(; rfam_version)
download("$rfam_base_url/Rfam.cm.gz", "$local_path.gz"; timeout = Inf)
gunzip(local_path * ".gz")
end
return local_path
end
end
"""
clanin()
Returns the path to `Rfam.clanin`.
"""
function clanin(; rfam_dir = get_rfam_directory(), rfam_version = get_rfam_version())
lock(RFAM_LOCK) do
local_path = joinpath(version_dir(; rfam_dir, rfam_version), "Rfam.clanin")
if !isfile(local_path)
@info "Downloading Rfam.clanin to $local_path ..."
rfam_base_url = base_url(; rfam_version)
download("$rfam_base_url/Rfam.clanin", "$local_path"; timeout = Inf)
end
return local_path
end
end
"""
seed()
Returns the path to `Rfam.seed`.
"""
function seed(; rfam_dir = get_rfam_directory(), rfam_version = get_rfam_version())
lock(RFAM_LOCK) do
local_path = joinpath(version_dir(; rfam_dir, rfam_version), "Rfam.seed")
if !isfile(local_path)
@info "Downloading Rfam.seed to $local_path ..."
rfam_base_url = base_url(; rfam_version)
download("$rfam_base_url/Rfam.seed.gz", "$local_path.gz"; timeout = Inf)
gunzip("$local_path.gz")
end
return local_path
end
end
"""
seed_tree(family_id)
Returns the path to the `.seed_tree` of the family.
"""
function seed_tree(family_id::AbstractString; rfam_dir = get_rfam_directory(), rfam_version = get_rfam_version())
lock(RFAM_LOCK) do
local_path = joinpath(version_dir(; rfam_dir, rfam_version), "Rfam.seed_tree")
if !isdir(local_path)
@info "Downloading Rfam.seed_tree.tar.gz to $local_path ..."
rfam_base_url = base_url(; rfam_version)
download("$rfam_base_url/Rfam.seed_tree.tar.gz", "$local_path.tar.gz"; timeout = Inf)
# Rfam.seed_tree.tar.gz seems not to be really gunzipped, it's just a Tar archive.
# The Tar is extracts a nested dir, so we extract at a temp location and then move
temp = mktempdir()
Tar.extract("$local_path.tar.gz", temp)
mv(joinpath(temp, "Rfam.seed_tree"), local_path)
end
return joinpath(local_path, "$family_id.seed_tree")
end
end
# decompress a gunzipped file.
gunzip(file::AbstractString) = run(`$(Gzip_jll.gzip()) -d $file`)
# extract a tarball (.tar.gz) to a directory
# function tarball(file::AbstractString)
# if endswith(file, ".tar.gz")
# outdir = file[1:end - 7]
# elseif endswith(file, ".tgz")
# outdir = file[1:end - 4]
# else
# throw(ArgumentError("file name does not end with .tar.gz or .tgz"))
# end
# open(GzipDecompressorStream, file) do io
# Tar.extract(io, outdir)
# end
# end
end # module
| Rfam | https://github.com/cossio/Rfam.jl.git |
|
[
"MIT"
] | 3.1.0 | 0e10fdc02183b983f9528151ec638cf05ff09485 | code | 876 | # Stores downloaded Rfam files
function set_rfam_directory(dir)
if isdir(dir)
@set_preferences!("RFAM_DIR" => dir)
@info "RFAM Directory $dir set."
else
throw(ArgumentError("Invalid directory path: $dir"))
end
end
function get_rfam_directory()
rfam_dir = @load_preference("RFAM_DIR")
if isnothing(rfam_dir)
error("RFAM_DIR not set; use `Rfam.set_rfam_directory` to set it")
else
return rfam_dir
end
end
# Determines the version of Rfam used
function set_rfam_version(version)
@set_preferences!("RFAM_VERSION" => version)
@info "Rfam version $version set."
end
function get_rfam_version()
rfam_version = @load_preference("RFAM_VERSION")
if isnothing(rfam_version)
error("RFAM_VERSION not set; use `Rfam.set_rfam_version` to set it")
else
return rfam_version
end
end
| Rfam | https://github.com/cossio/Rfam.jl.git |
|
[
"MIT"
] | 3.1.0 | 0e10fdc02183b983f9528151ec638cf05ff09485 | code | 111 | import Aqua
import Rfam
using Test: @testset
@testset verbose = true "aqua" begin
Aqua.test_all(Rfam)
end
| Rfam | https://github.com/cossio/Rfam.jl.git |
|
[
"MIT"
] | 3.1.0 | 0e10fdc02183b983f9528151ec638cf05ff09485 | code | 249 | import Rfam
using Test: @test, @testset
fasta = Rfam.fasta_file("RF00162")
@test isfile(fasta)
cmfile = Rfam.cm()
@test isfile(cmfile)
seed_file = Rfam.seed()
@test isfile(seed_file)
seed_tree = Rfam.seed_tree("RF00162")
@test isfile(seed_tree)
| Rfam | https://github.com/cossio/Rfam.jl.git |
|
[
"MIT"
] | 3.1.0 | 0e10fdc02183b983f9528151ec638cf05ff09485 | code | 162 | import Rfam
Rfam.set_rfam_directory(mktempdir())
Rfam.set_rfam_version("14.7")
module aqua_tests include("aqua.jl") end
module rfam_tests include("rfam.jl") end
| Rfam | https://github.com/cossio/Rfam.jl.git |
|
[
"MIT"
] | 3.1.0 | 0e10fdc02183b983f9528151ec638cf05ff09485 | docs | 912 | # Changelog
All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
## 3.0.0
### Breaking changes
- New preferences functions, no need to restart Julia after changing a preference. Removed constants `RFAM_DIR`, `RFAM_VERSION`.
## v2.0.4
- Add `seed_tree`.
## v2.0.3
- Fix `Rfam.seed` typo in filename.
## v2.0.2
- Use https instead of http.
## v2.0.1
- Fix bug in Rfam.cm [d02d6804b85186018f178af4430eb64e35081366].
## v2.0.0
- Allow `dir`, `version` options, bypassing LocalPreferences.toml.
- Changed some names, `base_url, version_dir, fasta_dir`.
## v1.0.0
- Release v1.0.0.
- This CHANGELOG file.
- Functions in the package always return paths to files only. You must parse and load this files. | Rfam | https://github.com/cossio/Rfam.jl.git |
|
[
"MIT"
] | 3.1.0 | 0e10fdc02183b983f9528151ec638cf05ff09485 | docs | 640 | # Rfam Julia package
Julia package to interface with the [Rfam](https://rfam.org) database. Only takes care of finding, downloading, and returning the path to files from the database (e.g. `Rfam.cm`, fasta files, etc.).
## Installation
This package is registered. Install with:
```julia
import Pkg
Pkg.add("Rfam")
```
This package does not export any symbols.
## Example
```julia
import Rfam
import FASTX
fasta = Rfam.fasta_file("RF00162"); # downloads `RF00162.fasta` file and returns local path
records = collect(FASTX.FASTA.Reader(open(fasta))); # convert to Fasta records
```
## Related
* https://github.com/cossio/Infernal.jl | Rfam | https://github.com/cossio/Rfam.jl.git |
|
[
"MIT"
] | 0.1.1 | a3223ab40da6429b70703cfd546f5ebd11250fc4 | code | 3771 | using Revise
using VectorModesolver
using PyCall
using SparseArrays
using PyPlot
using LinearAlgebra
EMpy = pyimport("EMpy")
Numpy = pyimport("numpy")
function convert_to_julia_sparse(python_sparse)
# Get the COO format data
rows = Int64.(python_sparse.row .+ 1) # Convert to 1-based index for Julia
cols = Int64.(python_sparse.col .+ 1) # Convert to 1-based index for Julia
data = python_sparse.data
# Convert to Julia SparseMatrixCSC
julia_sparse = sparse(rows, cols, data)
return julia_sparse
end
function getApy(λ,x,y,epsfunc)
x = Numpy.array(x)
y = Numpy.array(y)
solver = EMpy.modesolvers.FD.VFDModeSolver(λ, x, y, epsfunc, ("0","0","0","0"))
mat = solver.build_matrix()
return convert_to_julia_sparse(mat.tocoo())
end
# Define the domain
# Should return a tuple with (εxx, εxy, εyx, εyy, εzz)
function (ε::εtype)(x::Float64, y::Float64)
if (0.75 < x < 1.75) && (0.95 < y < 1.55)
return (4.0, 0.0, 0.0, 4.0, 4.0)
end
return (1.0, 0.0, 0.0, 1.0, 1.0)
end
ε = εtype()
function epsfunc(x_, y_)
eps = Numpy.zeros((length(x_), length(y_), 5))
for (i, x) in enumerate(x_)
for (j, y) in enumerate(y_)
eps[i,j,1] = ε(x,y)[1]
eps[i,j,2] = ε(x,y)[2]
eps[i,j,3] = ε(x,y)[3]
eps[i,j,4] = ε(x,y)[4]
eps[i,j,5] = ε(x,y)[5]
end
end
return eps
end
function getempymodes(λ,x,y,epsfunc,neigs, tol)
x = Numpy.array(x)
y = Numpy.array(y)
return EMpy.modesolvers.FD.VFDModeSolver(λ, x, y, epsfunc, ("0","0","0","0")).solve(
neigs, tol
).modes
end
# 1 < - - - - yj- - - - - >
# ^ [1. 1. 1. 1. 1. 1. 1.]
# | [1. 1. 1. 1. 1. 1. 1.]
# | [1. 1. 4. 4. 1. 1. 1.]
# | [1. 1. 4. 4. 1. 1. 1.]
# | [1. 1. 4. 4. 1. 1. 1.]
# xi[1. 1. 4. 4. 1. 1. 1.]
# | [1. 1. 1. 1. 1. 1. 1.]
# | [1. 1. 1. 1. 1. 1. 1.]
# | [1. 1. 1. 1. 1. 1. 1.]
# v [1. 1. 1. 1. 1. 1. 1.]
#
# W
# S + N
# E
# Parameters
λ = 1.55
x = [i for i in 0:0.03:2.5]
y = [i for i in 0:0.05:2.5]
neigs = 1
tol = 1e-8
boundary = (0,0,0,0)
# Define the modesolver
solver = VectorialModesolver(λ,x,y,boundary,ε)
# # Solve for the modes
# A = assemble(solver)
# Apy = getApy(λ,x,y,epsfunc)
# w = (A - Apy) .> 1e-5
# @show findnz(w)[1]
# @show findnz(w)[2]
# sum(abs.(A-Apy))
# sum(abs.(A-Apy))
# open("warntype_output.txt", "w") do f
# redirect_stdout(f) do
# @code_warntype optimize=true assemble(solver)
# end
# end
# a = assemble(solver)
# @code_warntype optimize=true ε(1.0,1.0)
modes = solve(solver, neigs, tol)
# emodes = getempymodes(λ,x,y,epsfunc,neigs, tol)
# # modes = solve(Apy, solver, neigs, tol)
# Generating some dummy data for fields (replace with your real data)
Ex = real.(modes[1].Ex)
Ey = real.(modes[1].Ey)
Ez = imag.(modes[1].Ez)
Hx = real.(modes[1].Hx)
Hy = real.(modes[1].Hy)
Hz = imag.(modes[1].Hz)
xx = x * ones(length(y))'
yy = ones(length(x)) * y'
eps = ((x,y)->ε(x,y)[1]).(xx, yy)
PyPlot.figure(figsize=(16, 6)) # Create a 3x2 layout
# Create the heatmaps
PyPlot.subplot(2, 4, 1)
PyPlot.imshow(Ex, cmap="RdBu")
PyPlot.title("Ex")
PyPlot.colorbar()
PyPlot.subplot(2, 4, 2)
PyPlot.imshow(Ey, cmap="RdBu")
PyPlot.title("Ey")
PyPlot.colorbar()
PyPlot.subplot(2, 4, 3)
PyPlot.imshow(Ez, cmap="RdBu")
PyPlot.title("Ez")
PyPlot.colorbar()
PyPlot.subplot(2, 4, 5)
PyPlot.imshow(Hx, cmap="RdBu")
PyPlot.title("Hx")
PyPlot.colorbar()
PyPlot.subplot(2, 4, 6)
PyPlot.imshow(Hy, cmap="RdBu")
PyPlot.title("Hy")
PyPlot.colorbar()
PyPlot.subplot(2, 4, 7)
PyPlot.imshow(Hz, cmap="RdBu")
PyPlot.title("Hz")
PyPlot.colorbar()
PyPlot.subplot(2, 4, 4)
PyPlot.imshow(eps', cmap="Greys", label="eps")
PyPlot.title("eps")
PyPlot.savefig("/Users/ianhammond/GitHub/VectorModesolver/examples/ims.png") | VectorModesolver | https://github.com/hammy4815/VectorModesolver.jl.git |
|
[
"MIT"
] | 0.1.1 | a3223ab40da6429b70703cfd546f5ebd11250fc4 | code | 480 | using VectorModesolver
function ε(x::Float64, y::Float64)
if (0.75 < x < 1.75) && (0.95 < y < 1.55)
return (4.0, 0.0, 0.0, 4.0, 4.0)
end
return (1.0, 0.0, 0.0, 1.0, 1.0)
end
function main()
λ = 1.55
x = [i for i in 0:0.03:2.5]
y = [i for i in 0:0.05:2.5]
neigs = 1
tol = 1e-8
boundary = (0,0,0,0)
solver = VectorialModesolver(λ,x,y,boundary,ε)
modes = solve(solver, neigs, tol)
plot_mode_fields(modes[1])
end
main()
| VectorModesolver | https://github.com/hammy4815/VectorModesolver.jl.git |
|
[
"MIT"
] | 0.1.1 | a3223ab40da6429b70703cfd546f5ebd11250fc4 | code | 257 | @with_kw struct Mode
λ::Float64
neff::Float64
x::Array{Float64}
y::Array{Float64}
Ex::Array{ComplexF64}
Ey::Array{ComplexF64}
Ez::Array{ComplexF64}
Hx::Array{ComplexF64}
Hy::Array{ComplexF64}
Hz::Array{ComplexF64}
end | VectorModesolver | https://github.com/hammy4815/VectorModesolver.jl.git |
|
[
"MIT"
] | 0.1.1 | a3223ab40da6429b70703cfd546f5ebd11250fc4 | code | 18661 |
@with_kw struct VectorialModesolver{F}
λ::Float64
x::Vector{Float64}
y::Vector{Float64}
boundary::Tuple{Int,Int,Int,Int}
ε::F
end
function assemble(ms::VectorialModesolver)
# Initialize matrix and vectors
λ = ms.λ
k = 2π / λ
x = ms.x
y = ms.y
nx = length(x)
ny = length(y)
A = spzeros(Float64, 2 * nx * ny, 2 * nx * ny)
diffx::Vector{Float64} = x[2:end] .- x[1:end-1]
diffy::Vector{Float64} = y[2:end] .- y[1:end-1]
diffx = reshape(vcat(diffx[1], diffx, diffx[end]), :)
diffy = reshape(vcat(diffy[1], diffy, diffy[end]), :)
xc::Vector{Float64} = (x[1:end-1] + x[2:end]) ./ 2
yc::Vector{Float64} = (y[1:end-1] + y[2:end]) ./ 2
xc = reshape(vcat(xc[1], xc, xc[end]), :)
yc = reshape(vcat(yc[1], yc, yc[end]), :)
# Loop through all points
for i ∈ 1:nx
for j ∈ 1:ny # Tridiagonal
# Get ε, nesw
n = diffy[j+1]
s = diffy[j]
e = diffx[i+1]
w = diffx[i]
εxx1::Float64, εxy1::Float64, εyx1::Float64, εyy1::Float64, εzz1::Float64 = ms.ε(xc[i], yc[j+1])
εxx2::Float64, εxy2::Float64, εyx2::Float64, εyy2::Float64, εzz2::Float64 = ms.ε(xc[i], yc[j])
εxx3::Float64, εxy3::Float64, εyx3::Float64, εyy3::Float64, εzz3::Float64 = ms.ε(xc[i+1], yc[j])
εxx4::Float64, εxy4::Float64, εyx4::Float64, εyy4::Float64, εzz4::Float64 = ms.ε(xc[i+1], yc[j+1])
ns21 = n * εyy2 + s * εyy1
ns34 = n * εyy3 + s * εyy4
ew14 = e * εxx1 + w * εxx4
ew23 = e * εxx2 + w * εxx3
# Eq 21
axxn = (
(2 * εyy4 * e - εyx4 * n) * (εyy3 / εzz4) / ns34
+ (2 * εyy1 * w + εyx1 * n) * (εyy2 / εzz1) / ns21
) / (n * (e + w))
# Eq 22
axxs = (
(2 * εyy3 * e + εyx3 * s) * (εyy4 / εzz3) / ns34
+ (2 * εyy2 * w - εyx2 * s) * (εyy1 / εzz2) / ns21
) / (s * (e + w))
# Eq 23 transformed
ayye = (2 * n * εxx4 - e * εxy4) * εxx1 / εzz4 / e / ew14 / (n + s) + (
2 * s * εxx3 + e * εxy3
) * εxx2 / εzz3 / e / ew23 / (n + s)
# Eq 24 transformed
ayyw = (2 * εxx1 * n + εxy1 * w) * εxx4 / εzz1 / w / ew14 / (n + s) + (
2 * εxx2 * s - εxy2 * w
) * εxx3 / εzz2 / w / ew23 / (n + s)
# Eq 23
axxe = (
2 / (e * (e + w))
+ (εyy4 * εyx3 / εzz3 - εyy3 * εyx4 / εzz4) / (e + w) / ns34
)
# Eq 24
axxw = (
2 / (w * (e + w))
+ (εyy2 * εyx1 / εzz1 - εyy1 * εyx2 / εzz2) / (e + w) / ns21
)
# Eq 21 transformed
ayyn = (
2 / (n * (n + s))
+ (εxx4 * εxy1 / εzz1 - εxx1 * εxy4 / εzz4) / (n + s) / ew14
)
# Eq 22 transformed
ayys = (
2 / (s * (n + s))
+ (εxx2 * εxy3 / εzz3 - εxx3 * εxy2 / εzz2) / (n + s) / ew23
)
# Eq 25
axxne = +εyx4 * εyy3 / εzz4 / (e + w) / ns34
axxse = -εyx3 * εyy4 / εzz3 / (e + w) / ns34
# Eq 26
axxnw = -εyx1 * εyy2 / εzz1 / (e + w) / ns21
axxsw = +εyx2 * εyy1 / εzz2 / (e + w) / ns21
# Eq 25 transformed
ayyne = +εxy4 * εxx1 / εzz4 / (n + s) / ew14
ayyse = -εxy3 * εxx2 / εzz3 / (n + s) / ew23
# Eq 26 transformed
ayynw = -εxy1 * εxx4 / εzz1 / (n + s) / ew14
ayysw = +εxy2 * εxx3 / εzz2 / (n + s) / ew23
# Eq 27
axxp = (
- axxn - axxs - axxe - axxw - axxne - axxse - axxnw - axxsw
+ k^2 * (n + s)
* (εyy4 * εyy3 * e / ns34 + εyy1 * εyy2 * w / ns21)
/ (e + w)
)
# Eq 27 transformed
ayyp = (
- ayyn - ayys - ayye - ayyw - ayyne - ayyse - ayynw - ayysw
+ k^2 * (e + w)
* (εxx1 * εxx4 * n / ew14 + εxx2 * εxx3 * s / ew23)
/ (n + s)
)
# Eq 28
axyn = (
εyy3 * εyy4 / εzz4 / ns34
- εyy2 * εyy1 / εzz1 / ns21
+ s * (εyy2 * εyy4 - εyy1 * εyy3) / ns21 / ns34
) / (e + w)
# Eq 29
axys = (
εyy1 * εyy2 / εzz2 / ns21
- εyy4 * εyy3 / εzz3 / ns34
+ n * (εyy2 * εyy4 - εyy1 * εyy3) / ns21 / ns34
) / (e + w)
# Eq 28 transformed
ayxe = (
εxx1 * εxx4 / εzz4 / ew14
- εxx2 * εxx3 / εzz3 / ew23
+ w * (εxx2 * εxx4 - εxx1 * εxx3) / ew23 / ew14
) / (n + s)
# Eq 29 transformed
ayxw = (
εxx3 * εxx2 / εzz2 / ew23
- εxx4 * εxx1 / εzz1 / ew14
+ e * (εxx4 * εxx2 - εxx1 * εxx3) / ew23 / ew14
) / (n + s)
# Eq 30
axye = (εyy4 * (1 + εyy3 / εzz4) - εyy3 * (1 + εyy4 / εzz4)) / ns34 / (
e + w
) - (
2 * εyx1 * εyy2 / εzz1 * n * w / ns21
+ 2 * εyx2 * εyy1 / εzz2 * s * w / ns21
+ 2 * εyx4 * εyy3 / εzz4 * n * e / ns34
+ 2 * εyx3 * εyy4 / εzz3 * s * e / ns34
+ 2 * εyy1 * εyy2 * (1.0 / εzz1 - 1.0 / εzz2) * w^2 / ns21
) / e / (
e + w
) ^ 2
# Eq 31
axyw = (εyy2 * (1 + εyy1 / εzz2) - εyy1 * (1 + εyy2 / εzz2)) / ns21 / (
e + w
) - (
2 * εyx1 * εyy2 / εzz1 * n * e / ns21
+ 2 * εyx2 * εyy1 / εzz2 * s * e / ns21
+ 2 * εyx4 * εyy3 / εzz4 * n * w / ns34
+ 2 * εyx3 * εyy4 / εzz3 * s * w / ns34
+ 2 * εyy3 * εyy4 * (1.0 / εzz3 - 1.0 / εzz4) * e^2 / ns34
) / w / (
e + w
) ^ 2
# Eq 30 transformed
ayxn = (εxx4 * (1 + εxx1 / εzz4) - εxx1 * (1 + εxx4 / εzz4)) / ew14 / (
n + s
) - (
2 * εxy3 * εxx2 / εzz3 * e * s / ew23
+ 2 * εxy2 * εxx3 / εzz2 * w * n / ew23
+ 2 * εxy4 * εxx1 / εzz4 * e * s / ew14
+ 2 * εxy1 * εxx4 / εzz1 * w * n / ew14
+ 2 * εxx3 * εxx2 * (1.0 / εzz3 - 1.0 / εzz2) * s^2 / ew23
) / n / (
n + s
) ^ 2
# Eq 31 transformed
ayxs = (εxx2 * (1 + εxx3 / εzz2) - εxx3 * (1 + εxx2 / εzz2)) / ew23 / (
n + s
) - (
2 * εxy3 * εxx2 / εzz3 * e * n / ew23
+ 2 * εxy2 * εxx3 / εzz2 * w * n / ew23
+ 2 * εxy4 * εxx1 / εzz4 * e * s / ew14
+ 2 * εxy1 * εxx4 / εzz1 * w * s / ew14
+ 2 * εxx1 * εxx4 * (1.0 / εzz1 - 1.0 / εzz4) * n^2 / ew14
) / s / (
n + s
) ^ 2
# Eq 32
axyne = +εyy3 * (1 - εyy4 / εzz4) / (e + w) / ns34
axyse = -εyy4 * (1 - εyy3 / εzz3) / (e + w) / ns34
# Eq 33
axynw = -εyy2 * (1 - εyy1 / εzz1) / (e + w) / ns21
axysw = +εyy1 * (1 - εyy2 / εzz2) / (e + w) / ns21
# Eq 32 transformed
ayxne = +εxx1 * (1 - εxx4 / εzz4) / (n + s) / ew14
ayxse = -εxx2 * (1 - εxx3 / εzz3) / (n + s) / ew23
# Eq 33 transformed
ayxnw = -εxx4 * (1 - εxx1 / εzz1) / (n + s) / ew14
ayxsw = +εxx3 * (1 - εxx2 / εzz2) / (n + s) / ew23
# Eq 34
axyp = -(axyn + axys + axye + axyw + axyne + axyse + axynw + axysw) - k^2 * (
w * (n * εyx1 * εyy2 + s * εyx2 * εyy1) / ns21
+ e * (s * εyx3 * εyy4 + n * εyx4 * εyy3) / ns34
) / (e + w)
# Eq 34 transformed
ayxp = -(ayxn + ayxs + ayxe + ayxw + ayxne + ayxse + ayxnw + ayxsw) - k^2 * (
n * (w * εxy1 * εxx4 + e * εxy4 * εxx1) / ew14
+ s * (w * εxy2 * εxx3 + e * εxy3 * εxx2) / ew23
) / (n + s)
# North boundary
if j == ny
axxs += ms.boundary[1] * axxn
axxse += ms.boundary[1] * axxne
axxsw += ms.boundary[1] * axxnw
ayxs += ms.boundary[1] * ayxn
ayxse += ms.boundary[1] * ayxne
ayxsw += ms.boundary[1] * ayxnw
ayys -= ms.boundary[1] * ayyn
ayyse -= ms.boundary[1] * ayyne
ayysw -= ms.boundary[1] * ayynw
axys -= ms.boundary[1] * axyn
axyse -= ms.boundary[1] * axyne
axysw -= ms.boundary[1] * axynw
end
# South boundary
if j == 1
axxn += ms.boundary[2] * axxs
axxne += ms.boundary[2] * axxse
axxnw += ms.boundary[2] * axxsw
ayxn += ms.boundary[2] * ayxs
ayxne += ms.boundary[2] * ayxse
ayxnw += ms.boundary[2] * ayxsw
ayyn -= ms.boundary[2] * ayys
ayyne -= ms.boundary[2] * ayyse
ayynw -= ms.boundary[2] * ayysw
axyn -= ms.boundary[2] * axys
axyne -= ms.boundary[2] * axyse
axynw -= ms.boundary[2] * axysw
end
# East boundary
if i == nx
axxw += ms.boundary[3] * axxe
axxnw += ms.boundary[3] * axxne
axxsw += ms.boundary[3] * axxse
ayxw += ms.boundary[3] * ayxe
ayxnw += ms.boundary[3] * ayxne
ayxsw += ms.boundary[3] * ayxse
ayyw -= ms.boundary[3] * ayye
ayynw -= ms.boundary[3] * ayyne
ayysw -= ms.boundary[3] * ayyse
axyw -= ms.boundary[3] * axye
axynw -= ms.boundary[3] * axyne
axysw -= ms.boundary[3] * axyse
end
# West boundary
if i == 1
axxe += ms.boundary[4] * axxw
axxne += ms.boundary[4] * axxnw
axxse += ms.boundary[4] * axxsw
ayxe += ms.boundary[4] * ayxw
ayxne += ms.boundary[4] * ayxnw
ayxse += ms.boundary[4] * ayxsw
ayye -= ms.boundary[4] * ayyw
ayyne -= ms.boundary[4] * ayynw
ayyse -= ms.boundary[4] * ayysw
axye -= ms.boundary[4] * axyw
axyne -= ms.boundary[4] * axynw
axyse -= ms.boundary[4] * axysw
end
# Construct tensor
nn = nx * ny
# Diagonal
ix = (i - 1) * ny + j # ii
iy = (i - 1) * ny + j # ii
A[ix, iy] = axxp
A[ix, iy+nn] = axyp
A[ix+nn, iy] = ayxp
A[ix+nn, iy+nn] = ayyp
# North
if (j > 1)
ix = (i - 1) * ny + j # n
iy = (i - 1) * ny + j - 1 # s
A[ix, iy] = axxs
A[ix, iy+nn] = axys
A[ix+nn, iy] = ayxs
A[ix+nn, iy+nn] = ayys
end
# South
if (j < ny)
ix = (i - 1) * ny + j # s
iy = (i - 1) * ny + j + 1 # n
A[ix, iy] = axxn
A[ix, iy+nn] = axyn
A[ix+nn, iy] = ayxn
A[ix+nn, iy+nn] = ayyn
end
# East
if (i > 1)
ix = (i - 1) * ny + j # e
iy = (i - 1 - 1) * ny + j # w
A[ix, iy] = axxw
A[ix, iy+nn] = axyw
A[ix+nn, iy] = ayxw
A[ix+nn, iy+nn] = ayyw
end
# West
if (i < nx)
ix = (i - 1) * ny + j # w
iy = (i - 1 + 1) * ny + j # e
A[ix, iy] = axxe
A[ix, iy+nn] = axye
A[ix+nn, iy] = ayxe
A[ix+nn, iy+nn] = ayye
end
# North-East
if (i > 1 && j > 1)
ix = (i - 1) * ny + j # ne
iy = (i - 1 - 1) * ny + j - 1 # sw
A[ix, iy] = axxsw
A[ix, iy+nn] = axysw
A[ix+nn, iy] = ayxsw
A[ix+nn, iy+nn] = ayysw
end
# South-East
if (j < ny && i > 1)
ix = (i - 1) * ny + j # se
iy = (i - 1 - 1) * ny + j + 1 # nw
A[ix, iy] = axxnw
A[ix, iy+nn] = axynw
A[ix+nn, iy] = ayxnw
A[ix+nn, iy+nn] = ayynw
end
# South-West
if (j < ny && i < nx)
ix = (i - 1) * ny + j # sw
iy = (i - 1 + 1) * ny + j + 1 # ne
A[ix, iy] = axxne
A[ix, iy+nn] = axyne
A[ix+nn, iy] = ayxne
A[ix+nn, iy+nn] = ayyne
end
# North-West
if (j > 1 && i < nx)
ix = (i - 1) * ny + j # nw
iy = (i - 1 + 1) * ny + j - 1 # se
A[ix, iy] = axxse
A[ix, iy+nn] = axyse
A[ix+nn, iy] = ayxse
A[ix+nn, iy+nn] = ayyse
end
end
end
return A
end
function getHz(Hx, Hy, x, y, β)
# Init field
Hz = zeros(ComplexF64, (size(Hx,1)-1,size(Hx,2)-1))
diffx = x[2:end] .- x[1:end-1]
diffy = y[2:end] .- y[1:end-1]
# Get field
for (j, dx) in enumerate(diffx)
for (i, dy) in enumerate(diffy)
Hz[i, j] = (
(Hx[i+1, j+1] + Hx[i, j+1] - Hx[i+1, j] - Hx[i, j]) / (2 * dx) + # ∂Hx/∂x
(Hy[i+1, j+1] + Hy[i+1, j] - Hy[i, j+1] - Hy[i, j]) / (2 * dy) # ∂Hy/∂y
) / (1im * β)
end
end
# Interpolate field
xc = 0.5 .* (x[2:end] .+ x[1:end-1])
yc = 0.5 .* (y[2:end] .+ y[1:end-1])
itp = interpolate((yc, xc), Hz, Gridded(Linear()))
etpf = extrapolate(itp, Flat())
itpHz = zeros(ComplexF64, (size(Hx,1), size(Hx,2)))
for (j, xv) in enumerate(x)
for (i, yv) in enumerate(y)
itpHz[i, j] = etpf(yv, xv)
end
end
return itpHz
end
function getE(Hx, Hy, Hz, x, y, β, ω, ε)
# Init Fields
Ex = zeros(ComplexF64, (size(Hx,1)-1, size(Hx,2)-1))
Ey = zeros(ComplexF64, (size(Hy,1)-1, size(Hy,2)-1))
Ez = zeros(ComplexF64, (size(Hz,1)-1, size(Hz,2)-1))
diffx = x[2:end] .- x[1:end-1]
diffy = y[2:end] .- y[1:end-1]
# Get Fields
# Dx = 1 / (jω) * (∂yHz - ∂zHy) = [∂yHz / (jω)] + (β/ω Hy)
# Dy = 1 / (jω) * (∂zHx - ∂xHz) = - [∂zHx / (jω)] - (β/ω Hx)
# Dz = 1 / (jω) * (∂xHy - ∂yHx) = [∂xHy / (jω)] - [∂yHx / (jω)]
for (j, dx) in enumerate(diffx)
for (i, dy) in enumerate(diffy)
# Get ε
xc, yc = (x[j] + x[j+1]) / 2, (y[i] + y[i+1]) / 2
εxx, εxy, εyx, εyy, εzz = ε(xc, yc)
detε = εxx * εyy - εxy * εyx
# Build D
Dx = (
(Hz[i+1, j] + Hz[i+1, j+1] - Hz[i, j] - Hz[i, j+1])
/ (2im * ω * dy) + # ∂Hz/∂y
(Hy[i, j] + Hy[i+1, j] + Hy[i, j+1] + Hy[i+1, j+1])
* (0.25 * β/ω) # -∂Hy/∂z
)
Dy = (
- (Hx[i, j] + Hx[i, j+1] + Hx[i+1, j] + Hx[i+1, j+1])
* (0.25 * β/ω) # -∂Hx/∂z
- (Hz[i+1, j+1] + Hz[i, j+1] - Hz[i+1, j] - Hz[i, j])
/ (2im * ω * dx) # ∂Hz/∂x
)
Dz = (
(Hy[i, j+1] + Hy[i+1, j+1] - Hy[i+1, j] - Hy[i, j])
/ (2im * ω * dx) + # ∂Hy/∂x
(Hx[i+1, j] + Hx[i+1, j+1] - Hx[i, j] - Hx[i, j+1])
/ (-2im * ω * dy) # ∂Hx/∂y
)
# Get E = ε⁻¹ D
Ex[i, j] = (εyy * Dx - εxy * Dy) / detε
Ey[i, j] = (εxx * Dy - εyx * Dx) / detε
Ez[i, j] = Dz / εzz
end
end
# Interpolate Fields
xc = 0.5 .* (x[2:end] .+ x[1:end-1])
yc = 0.5 .* (y[2:end] .+ y[1:end-1])
itpEx = interpolate((yc, xc), Ex, Gridded(Linear()))
etpEx = extrapolate(itpEx, Flat())
itpEy = interpolate((yc, xc), Ey, Gridded(Linear()))
etpEy = extrapolate(itpEy, Flat())
itpEz = interpolate((yc, xc), Ez, Gridded(Linear()))
etpEz = extrapolate(itpEz, Flat())
retEx = zeros(ComplexF64, (size(Ex,1)+1, size(Ex,2)+1))
retEy = zeros(ComplexF64, (size(Ey,1)+1, size(Ey,2)+1))
retEz = zeros(ComplexF64, (size(Ez,1)+1, size(Ez,2)+1))
for (j, xv) in enumerate(x)
for (i, yv) in enumerate(y)
retEx[i, j] = etpEx(yv, xv)
retEy[i, j] = etpEy(yv, xv)
retEz[i, j] = etpEz(yv, xv)
end
end
return retEx, retEy, retEz
end
function solve(A::SparseMatrixCSC, ms::VectorialModesolver, nev::Int, tol::Float64, ncv=nothing, sigma=nothing)
# Solve eigenvalues
ncv = ifelse(isnothing(ncv), 10*nev, ncv)
if isnothing(sigma)
β²s, ϕs = eigs(A, nev=nev, ncv=ncv, tol=tol, which=:LR)
else
β²s, ϕs = eigs(A, nev=nev, ncv=ncv, tol=tol, which=:LM, sigma=sigma)
end
# Compile Modes
modes = Vector{Mode}()
k = ω = 2 * π / ms.λ
nx, ny = length(ms.x), length(ms.y)
for (i, β²) in enumerate(β²s)
# Extract Hx, Hy
Hx = reshape(ϕs[1:nx*ny, i], (ny, nx))
Hy = reshape(ϕs[nx*ny+1:end, i], (ny, nx))
# Get Hz, neff, Ex, Ey, Ez
β = √(β²)
Hz = getHz(Hx, Hy, ms.x, ms.y, β)
neff = β / k
Ex, Ey, Ez = getE(Hx, Hy, Hz, ms.x, ms.y, β, ω, ms.ε)
# Push Field
push!(modes,
Mode(
λ = ms.λ,
neff = neff,
x = ms.x,
y = ms.y,
Ex = Ex,
Ey = Ey,
Ez = Ez,
Hx = Hx,
Hy = Hy,
Hz = Hz,
)
)
end
# Sort modes
sort!(modes, by=m->-m.neff)
return modes
end
solve(ms::VectorialModesolver, nev::Int, tol::Float64, ncv=nothing, sigma=nothing) =
solve(assemble(ms), ms, nev, tol, ncv, sigma) | VectorModesolver | https://github.com/hammy4815/VectorModesolver.jl.git |
|
[
"MIT"
] | 0.1.1 | a3223ab40da6429b70703cfd546f5ebd11250fc4 | code | 349 | module VectorModesolver
import Arpack: eigs
import SparseArrays: spzeros, SparseMatrixCSC
import Interpolations: interpolate, Linear, Gridded, extrapolate, Flat
using Parameters
using Revise
export VectorialModesolver, assemble, solve, εtype
include("Modesolver.jl")
include("Mode.jl")
include("Visualization.jl")
end # module VectorModesolver
| VectorModesolver | https://github.com/hammy4815/VectorModesolver.jl.git |
|
[
"MIT"
] | 0.1.1 | a3223ab40da6429b70703cfd546f5ebd11250fc4 | code | 1608 | using CairoMakie
export plot_mode_fields
function _normalize_fields(fields::AbstractVector)
max_val = maximum(maximum.(fields))
min_val = minimum(minimum.(fields))
abs_max = max(max_val,abs(min_val))
vmax = abs_max
vmin = -abs_max
return vmin, vmax
end
"""
plot_mode_fields(mode_data::Mode; kwargs...)
Plots the six-component, mode-field profiles.
"""
function plot_mode_fields(mode_data::Mode; normalize_fields::Bool=true, kwargs...)
f = Figure()
labels = [L"E_x", L"Ey", L"Ez"]
fields = real.([mode_data.Ex, mode_data.Ey, mode_data.Ez])
vmin, vmax = _normalize_fields(fields)
for k in 1:3
ax = Axis(f[2, k],
title = labels[k],
xlabel = "X",
ylabel = "Y",
aspect=DataAspect(),
)
hm = heatmap!(ax, mode_data.x, mode_data.y, fields[k], colormap=:bluesreds, colorrange = (vmin, vmax))
if k == 1
Colorbar(f[1, 1:3], hm; vertical=false, label=L"\Re(\textbf{E})")
end
end
labels = [L"Hx", L"Hy", L"Hz"]
fields = real.([mode_data.Hx, mode_data.Hy, mode_data.Hz])
vmin, vmax = _normalize_fields(fields)
for k in 1:3
ax = Axis(f[4, k],
title = labels[k],
xlabel = "X",
ylabel = "Y",
aspect=DataAspect(),
)
hm = heatmap!(ax, mode_data.x, mode_data.y, fields[k], colormap=:bluesreds, colorrange = (vmin, vmax))
if k == 1
Colorbar(f[3, :], hm; vertical=false, label=L"\Re(\textbf{H})")
end
end
resize_to_layout!(f)
return f
end | VectorModesolver | https://github.com/hammy4815/VectorModesolver.jl.git |
|
[
"MIT"
] | 0.1.1 | a3223ab40da6429b70703cfd546f5ebd11250fc4 | docs | 259 | # VectorModesolver
Vector Finite Difference Maxwell Modesolver
## References
Adapted from [Vector Finite Difference Modesolver for Anisotropic Dielectric Waveguides](https://ieeexplore.ieee.org/document/4542926) and [EMpy](https://github.com/lbolla/EMpy/).
| VectorModesolver | https://github.com/hammy4815/VectorModesolver.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 957 | DEBUG = false # use to toggle debugging outputs
using Distributed, Random
@everywhere using Logging
@everywhere begin ## TODO remove pre-registration dev hack
global_logger(ConsoleLogger(stdout, Logging.Error))
import Pkg
Pkg.activate(".")
global_logger(ConsoleLogger(stdout, Logging.Info))
end
global_logger(Logging.ConsoleLogger(stdout, DEBUG ? Logging.Debug : Logging.Info))
@debug "DEBUG mode"
@everywhere using MLMolGraph
MLMolGraph.banner()
# read the command line arguments
@everywhere args = parse_args()
# check args
valid, msg = MLMolGraph.validate_args(args)
if !valid
error(msg)
end
@info "Program parameters" args...
# set the random seed (seeded randomly by default)
Random.seed!(args[:seed])
# set paths relative to the input data folder
set_paths(args[:data])
# run the data processing
run_process(args)
@info "Done!"
exit(0) ##! why does the end of the script throw some silly error when nothing has failed?
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 1333 | import Pkg
# tries to load a Python dependency; on failure, adds the dependency via Conda
function check_add_dep(pkg; channel="", altname=nothing)
try
@info "Checking dependency: $pkg"
pyimport(isnothing(altname) ? pkg : altname)
catch
@info "Installing $pkg..."
Conda.add(pkg, channel=channel)
pyimport(isnothing(altname) ? pkg : altname)
@info "$pkg install verified."
end
end
@info "Setting up Python environment..."
ENV["PYTHON"]=""
Pkg.add("PyCall")
Pkg.build("PyCall")
using PyCall
pyimport("sys")
@info "PyCall verified."
# check for Conda; if not found, install it
try
@info "Checking Conda."
using Conda
@info "Conda verified."
catch
@info "Installing Conda..."
Pkg.add("Conda")
using Conda
@info "Conda verified."
end
# check for MLMolGraph; if not found, add it
try
@info "Checking MLMolGraph."
using MLMolGraph
@info "MLMolGraph verified."
catch # if not, install it
@info "Installing MLMolGraph..."
Pkg.add("MLMolGraph")
Pkg.build("MLMolGraph")
using MLMolGraph
@info "MLMolGraph install verified."
end
# check the deps, add if missing
check_add_dep("scipy")
check_add_dep("pymatgen", channel="conda-forge")
check_add_dep("pytorch", channel="pytorch", altname="torch")
@info "Setup complete!"
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 419 | using Documenter
using MLMolGraph
makedocs(
root = joinpath(dirname(pathof(MLMolGraph)), "..", "docs"),
modules = [MLMolGraph],
sitename = "MLMolGraph",
clean = true,
pages = [
"MLMolGraph" => "index.md",
"Graphs & AI" => "graph_ml.md"
],
format = Documenter.HTML(assets = ["assets/flux.css"])
)
deploydocs(repo = "github.com/SimonEnsemble/MLMolGraph.git")
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 825 | module MLMolGraph
using Distributed
using CSV, DataFrames, FIGlet, Graphs, JLD2, LinearAlgebra, Logging, MetaGraphs, ProgressMeter, PyCall, SharedArrays, Xtals
# , IOCapture,
# , , SparseArrays, StatsBase,
import Base.show, Base.display
include("processing.jl")
include("run_process.jl")
include("misc.jl")
include("argument_parsing.jl")
include("export_data.jl")
function load_pydep(dep::String)
try
rc[Symbol(dep)] = pyimport(dep)
catch
rc[Symbol(dep)] = nothing
@warn "Python dependency $dep not found."
end
end
function __init__()
for dir in values(rc[:paths])
if ! isdir(dir)
mkpath(dir)
end
end
load_pydep.(["torch", "numpy", "pickle"])
end
export run_process, parse_args, make_arg_dict
end | MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 951 | using ArgParse, Random
# create the argument parser object
argparser = ArgParseSettings(
prog="MLMolGraph Data Processor", # program name to display
description="reads .cif inputs, converts to primitive cell, infers bonds, and converts valid bonded structures to ML inputs"
)
# build the argument table
include("argument_table.jl")
# simple function for parsing command line args
import ArgParse.parse_args
parse_args() = parse_args(argparser, as_symbols=true)
"""
args::Dict{Symbol, Any} = make_arg_dict(target::String, args::Dict{Symbol, Any})
args::Dict{Symbol, Any} = make_arg_dict(args::Dict{Symbol, Any})
"""
function make_arg_dict(target::String=DEFAULT_ARGS[:target], args::Dict{Symbol, Any}=Dict{Symbol, Any}())::Dict{Symbol, Any}
args = merge(DEFAULT_ARGS, args)
args[:target] = target
return args
end
make_arg_dict(args::Dict{Symbol, Any}) = make_arg_dict(args[:target], args)
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 2394 | # All args must be set for tests to pass. Keep this Dict updated w/ new args from table.
DEFAULT_ARGS = Dict(
# `store_true` args (must be false by default)
:vectors => false,
:bonds => false,
:angles => false,
:verbose => false,
:pvec => false,
# string args
:data => rc[:paths][:data],
:target => "",
:dataset_output => "_DATASET_.PKL",
# int args
:seed => Int(floor(rand() * 1e3)),
:samples => 0,
# float args
:charge_tolerance => 1e-5,
:target_hi => Inf,
:target_lo => -Inf
)
@add_arg_table! argparser begin
"--angles"
help = "write ML inputs for bonding angles"
action = :store_true
"--bonds"
help = "write ML inputs for the bonding graph"
action = :store_true
"--seed"
help = "set the random seed for reproducible random dataset sampling"
arg_type = Int
default = DEFAULT_ARGS[:seed]
"--data"
help = "root of data tree. inputs loaded from `crystals` subdirectory."
arg_type = String
default = DEFAULT_ARGS[:data]
"--verbose"
help = "whether or not to print extra @info output"
action = :store_true
"--vectors"
help = "write bond vectors"
action = :store_true
"--charge_tolerance"
help = "set net charge tolerance for Crystal()"
arg_type = Float64
default = DEFAULT_ARGS[:charge_tolerance]
"--samples"
help = "number of inputs to sample (0 -> use all)"
arg_type = Int
default = DEFAULT_ARGS[:samples]
"--target"
help = "name of target column for constraining values"
arg_type = String
default = DEFAULT_ARGS[:target]
"--target_hi"
help = "upper bound for target value"
arg_type = Float64
default = DEFAULT_ARGS[:target_hi]
"--target_lo"
help = "lower bound for target value"
arg_type = Float64
default = DEFAULT_ARGS[:target_lo]
"--dataset_output"
help = "path to file where processed data will be saved"
default = DEFAULT_ARGS[:dataset_output]
"--pvec"
help = "whether or not to look for physical vector information"
default = DEFAULT_ARGS[:pvec]
end
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 4594 | """
dataset = consolidate_data(xtal_names, element_to_int, df, args)
"""
function consolidate_data(good_xtals::Vector{String}, element_to_int::Dict{Symbol, Int}, df::DataFrame, args::Dict{Symbol, Any}, temp_dir::String)::Dict{Symbol, Any}
# dictionary for collecting data set
dataset = Dict{Symbol, Any}()
# record args in dictionary
dataset[:args] = args
# collect names into dictionary
dataset[:names] = good_xtals
# collect targets df into dictionary
dataset[:targets] = df
# collect encoding dictionary into dataset (and inverse mapping)
dataset[:element_to_encoding] = element_to_int
dataset[:encoding_to_element] = Dict([value => key for (key, value) in element_to_int])
# collect bond angle vectors into dictionary
dataset[:bond_angles] = args[:angles] ? (@showprogress "Collecting bond angles " pmap(xtal_name -> load_data(xtal_name, temp_dir)[:angles], good_xtals)) : nothing
# collect bond distances into dictionary
dataset[:bond_distances] = @showprogress "Collecting bond distances " pmap(xtal_name -> load_data(xtal_name, temp_dir)[:bond_edges][3][:], good_xtals)
# collect atom node feature matrices into dictionary
dataset[:atom_features] = @showprogress "Collecting atom features " pmap(xtal_name -> load_data(xtal_name, temp_dir)[:atom_features], good_xtals)
dataset[:graph_edges] = @showprogress "Collecting graph edge matrices " [collect_graph_edge_matrix(xtal_name, :bond_graph, temp_dir) for xtal_name in good_xtals]
dataset[:pvec] = args[:pvec] ? error("##! TODO: pvec implementation") : nothing
return dataset
end
function reprocess((gem, atom_x))
if isnothing(gem)
return nothing, nothing, nothing
end
torch = rc[:torch]
numpy = rc[:numpy]
nb_atoms = atom_x.size(0)
# pull out the src/dst lists
atom_edge_src = [gem[1][i] for i in eachindex(gem[1]) if gem[3][i] == 0]
atom_edge_dst = [gem[2][i] for i in eachindex(gem[1]) if gem[3][i] == 0]
# convert directed edges to undirected
atom_edge_src, atom_edge_dst = vcat(atom_edge_src, atom_edge_dst), vcat(atom_edge_dst, atom_edge_src)
# convert src/dst arrays to edge_index tensor
atom_edge_index = torch.tensor(numpy.array([atom_edge_src, atom_edge_dst]), dtype=torch.long)
return atom_edge_index
end
function export_data(dataset::Dict{Symbol, Any}, args::Dict)
torch = rc[:torch]
numpy = rc[:numpy]
pickle = rc[:pickle]
xtal_name = dataset[:names]
atom_x = !isnothing(dataset[:atom_features]) ? [torch.tensor(atom_node_fts, dtype=torch.float) for atom_node_fts in dataset[:atom_features]] : nothing
graph_edge_matrix = !isnothing(dataset[:graph_edges]) ? [[gem[:, 1] .- 1, gem[:, 2] .- 1, gem[:, 3] .- 1] for gem in dataset[:graph_edges]] : nothing
atom_edge_index = @showprogress "Finalizing edge index list " pmap(reprocess, zip(graph_edge_matrix, atom_x))
x_p = !isnothing(dataset[:pvec]) ? [torch.tensor(numpy.array(pvec), dtype=torch.float) for pvec in dataset[:pvec]] : nothing
y = !isnothing(dataset[:targets]) ? torch.tensor(numpy.array(dataset[:targets][:, dataset[:args][:target]]), dtype=torch.float) : nothing
atom_to_int = dataset[:element_to_encoding]
data_dict = Dict([
"xtal_name" => xtal_name
"atom_x" => atom_x
"x_p" => x_p
"y" => y
"atom_to_int" => atom_to_int
"atom_edge_index" => atom_edge_index
])
# write dataset dictionary to python-readable binary
PyCall.open(args[:dataset_output], "w") do file
pickle.dump(data_dict, file)
end
end
"""
data = load_data(xtal_name, temp_dir)
Loads the data dictionary for the named input (usually a crystal identifier) from the temporary data store at temp_dir
"""
function load_data(name::String, temp_dir::String)::Dict
return load_object(joinpath(temp_dir, name))
end
"""
save_data(name, data, temp_dir)
Write a data dictionary to the temporary data store at temp_dir with the specified name
"""
function save_data(name::String, data::Dict, temp_dir::String)
save_object(joinpath(temp_dir, name), data)
end
function collect_graph_edge_matrix(xtal_name::String, graphkey::Symbol, temp_dir::String)::Matrix{Int}
graph = load_data(xtal_name, temp_dir)[graphkey]
gem = zeros(Int, (3, ne(graph)))
edge_label = Dict([
:AA => 1
])
for (i, e) in enumerate(edges(graph))
gem[:, i] .= src(e), dst(e), :type in keys(props(graph, e)) ? edge_label[get_prop(graph, e, :type)] : edge_label[:AA]
end
return Matrix(gem')
end
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 1213 | # calculates distance between two points in a periodic system
function pbc_distance(pt1::Array{Float64}, pt2::Array{Float64}, box::Vector{Float64})::Float64
dx = pt1 .- pt2
for i ∈ 1:3
if dx[i] > box[i] * 0.5
dx[i] -= box[i]
elseif dx[i] ≤ -box[i] * 0.5
dx[i] += box[i]
end
end
return norm(dx)
end
pbc_distance(pt1::Array{Float64}, pt2::Array{Float64}, box::Box) = pbc_distance(pt1, pt2, [box.a, box.b, box.c])
function validate_args(args::Dict{Symbol,Any})::Tuple{Bool,String}
if !ispath(args[:data])
return false, "Invalid data path"
end
if args[:angles] && !args[:bonds]
return false, "Must specify --bonds when requesting --angles"
end
return true, ""
end
function banner()
FIGlet.render("MLMolGraph", FIGlet.availablefonts()[441])
end
_primitive_xtal_name(xtal_name::String) = split(xtal_name, ".cif")[1] * "_primitive_cell.cif"
function record_args(args::Dict{Symbol, Any})
open(joinpath(rc[:paths][:data], "data_processing_args.csv"), "w") do file
for (key, value) in args
write(file, "$key, $value\n")
end
end
end
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 5464 | using XtalsPyTools
function loadcheck(xtal_list::Vector{String}, tolerance::Float64, temp_dir::String)
@showprogress "Checking file loadability. " @distributed for xtal_name ∈ xtal_list
xtal_data = Dict{Symbol, Any}()
try
xtal_data[:input] = Crystal(xtal_name * ".cif", remove_duplicates=true, net_charge_tol=tolerance)
save_data(xtal_name, xtal_data, temp_dir)
catch exception
@error xtal_name exception
save_data(xtal_name, xtal_data, temp_dir)
end
end
end
function xtals2primitive(xtal_list::Vector{String}, temp_dir::String)
@showprogress "Converting to primitive cells. " @distributed for xtal_name ∈ xtal_list
xtal_data = load_data(xtal_name, temp_dir)
xtal = xtal_data[:input]
if isnothing(xtal)
continue
else
xtal_data[:primitive_cell] = primitive_cell(xtal)
save_data(xtal_name, xtal_data, temp_dir)
end
end
end
function isgood(xtal_file::String, temp_dir)::Bool
try
xtal_data = load_data(xtal_file, temp_dir)
return !isnothing(xtal_data[:bond_graph]) && ne(xtal_data[:bond_graph]) > 0
catch
return false
end
end
function bondNclassify(xtal_list::Vector{String}, temp_dir::String)::Vector{String}
@showprogress "Inferring bonds. " pmap(xtal_list) do xtal_name
try
xtal_data = load_data(xtal_name, temp_dir)
xtal = xtal_data[:primitive_cell]
made_bonds = infer_bonds!(xtal, true, calculate_vectors=true)
if made_bonds
xtal_data[:bond_graph] = xtal.bonds
else
xtal_data[:bond_graph] = nothing
end
save_data(xtal_name, xtal_data, temp_dir)
catch exception
@error "Exception while classifying input" xtal_name exception
end
end
good = SharedArray{Bool}(length(xtal_list))
@sync @distributed for i ∈ eachindex(xtal_list)
good[i] = isgood(xtal_list[i], temp_dir)
end
return xtal_list[good]
end
function unique_elements(xtal_file::String, args, temp_dir::String)
xtal_data = load_data(xtal_file, temp_dir)
elements = unique(xtal_data[:primitive_cell].atoms.species)
if args[:verbose]
@info "$elements unique elements in $(xtal_file)"
end
return elements
end
function determine_encoding(xtal_list::Vector{String}, args, temp_dir::String)
n = length(xtal_list)
all_elements = [keys(rc[:covalent_radii])...] # list of all elements known to Xtals
elements = SharedArray{Bool}(length(all_elements)) # elements[i] == true iff keys(all_elements)[i] ∈ some xtal
if args[:verbose]
@info "Encoding $n structures' atoms"
@info "$(length(all_elements)) elements"
end
@showprogress "Determining encoding scheme:" @distributed for i ∈ 1:n
xtal_elements = unique_elements(xtal_list[i], args, temp_dir)
elements[[findfirst(isequal(element), all_elements) for element in xtal_elements]] .= true
end
element_to_int = Dict{Symbol,Int}([element => i for (i, element) ∈ enumerate(all_elements[elements])])
return element_to_int, length(element_to_int)
end
function read_targets(source::String, examples::Vector{String}, target_symbol::Symbol)::DataFrame
df = CSV.read(joinpath(rc[:paths][:data], source), DataFrame, delim=",")
select!(df, [:name, target_symbol])
filter!(r -> "$(r.name).cif" ∈ examples, df)
return df
end
# one-hot encoding of atomic species
function node_feature_matrix(xtal::Crystal, element_to_int::Dict{Symbol,Int})
X = zeros(Int, xtal.atoms.n, length(element_to_int))
for (i, atom) in enumerate(xtal.atoms.species)
X[i, element_to_int[atom]] = 1
end
return X
end
function edge_vectors(graph::MetaGraph, type::Symbol)
edge_count = ne(graph)
l = 2 * edge_count
edg_srcs = zeros(Int, l)
edg_dsts = zeros(Int, l)
edg_prop = zeros(Float64, l)
for (i, edge) in enumerate(edges(graph))
edg_srcs[i] = edg_dsts[i + edge_count] = edge.src
edg_dsts[i] = edg_srcs[i + edge_count] = edge.dst
edg_prop[i] = edg_prop[i + edge_count] = get_prop(graph, edge, :distance)
end
return edg_srcs, edg_dsts, edg_prop
end
function write_data(xtal_data::Dict, name::String, element_to_int::Dict{Symbol,Int}, df::DataFrame, args::Dict{Symbol,Any}, temp_dir::String)
xtal = xtal_data[:primitive_cell]
# node features
xtal_data[:atom_features] = node_feature_matrix(xtal, element_to_int)
# bond graph
xtal_data[:bond_edges] = args[:bonds] ? edge_vectors(xtal_data[:bond_graph], :bonds) : nothing
# pvec
xtal_data[:pvec] = args[:pvec] ? [df[findfirst(n -> n == name, df.name), prop] for prop in args[:pvec_columns]] : nothing
save_data(name, xtal_data, temp_dir)
end
function process_examples(good_xtals::Vector{String}, element_to_int::Dict{Symbol,Int}, df::DataFrame, args::Dict{Symbol,Any}, temp_dir::String)::Vector{Bool}
@assert length(good_xtals) > 0 "No inputs"
@showprogress "Processing examples " pmap(good_xtals) do xtal_name
write_data(load_data(xtal_name, temp_dir), xtal_name, element_to_int, df, args, temp_dir)
end
good = trues(length(good_xtals))
return good
end
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 3700 | """
`run_process(args)`
Converts inputs at `rc[:data][:crystals]` into ML model data at `rc[:data]`
"""
function run_process(args; temp_dir=nothing)
# create temporary working directory
temp_dir = isnothing(temp_dir) ? tempdir() : temp_dir
mkpath(temp_dir)
@assert isdir(temp_dir) "Failed to create temporary directory at $temp_dir"
# copy selected rows into smaller CSV file (for convenience)
@info "Reading target data."
target_csv = joinpath(rc[:paths][:data], "properties.csv")
df = CSV.read(target_csv, DataFrame)
@assert nrow(df) > 0 "$target_csv contains no examples."
@assert "name" in names(df) "$target_csv has no 'name' column."
# get list of xtals
xtal_dir = rc[:paths][:crystals]
all_files = readdir(xtal_dir)
file_is_cif = @showprogress "Finding example data " pmap(x -> contains(x, ".cif"), all_files)
validation_progress = Progress(6, "Validating example data ")
# filter for just the CIF data files
cif_files = all_files[file_is_cif .== 1]
next!(validation_progress)
@assert length(cif_files) != 0 "$(xtal_dir) contains no CIF files."
next!(validation_progress)
mof_names = [String(split(cif_file, ".cif")[1]) for cif_file in cif_files]
next!(validation_progress)
found_cif = intersect(mof_names, df.name)
next!(validation_progress)
# filter for examples w/ target data AND structure input
filter!(r -> r.name ∈ found_cif, df) ##! SLOW
next!(validation_progress)
@assert df.name != [] "$target_csv and $xtal_dir have no structure names in common."
next!(validation_progress)
@info "$(length(df.name)) inputs."
# sample list
if args[:samples] ≠ 0
@assert args[:samples] <= length(df.name) "Requested number of samples ($(args[:samples])) exceeds number of inputs."
@info "Randomly sampling $(args[:samples])."
df = df[in.(df.name, [sample(df.name, args[:samples], replace=false)]), :]
end
if args[:target] != ""
@info "Checking target values."
target_min = args[:target_lo]
target_max = args[:target_hi]
# filter xtal_list by
filter!(row -> row[args[:target]] >= target_min, df)
filter!(row -> row[args[:target]] <= target_max, df)
@assert length(df.name) > 0 "No target values within range [$target_min, $target_max]"
end
if args[:pvec]
@everywhere args[:pvec_columns] = readlines(joinpath(rc[:paths][:data], "pvec_columns.txt"))
end
# make sure names in df are correct data type
df.name = String.(df.name)
# check that inputs can be read and start their xtal_data temp dicts
loadcheck(df.name, args[:charge_tolerance], temp_dir)
# process inputs to primitive cells
xtals2primitive(df.name, temp_dir)
# infer bonds, test quality
good_xtals = bondNclassify(df.name, temp_dir)
@assert length(good_xtals) > 0 "No structures passed bonding inspection."
@info "$(length(good_xtals)) good bonded xtals."
# determine atom node encoding scheme
element_to_int, encoding_length = determine_encoding(good_xtals, args, temp_dir)
@assert encoding_length > 0
# final selection of target df rows
filter!(row -> row.name in good_xtals, df)
@assert length(df.name) == length(good_xtals)
# process the graphs into ML inputs
good = process_examples(good_xtals, element_to_int, df, args, temp_dir)
good_xtals = good_xtals[good]
dataset = consolidate_data(good_xtals, element_to_int, df, args, temp_dir)
export_data(dataset, args)
return good_xtals
end
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 408 | using MLMolGraph
import Aqua
# set `false` if ambiguity testing finds many "problems" outside the scope of this package
ambiguities=true
# to skip when checking for stale dependencies and missing compat entries
# Aqua is added in a separate CI job, so (ironically) does not work w/ itself
stale_deps = (ignore=[:Aqua],)
Aqua.test_all(MLMolGraph;
ambiguities=ambiguities,
stale_deps=stale_deps
) | MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 142 | module Arg_test
using Test, MLMolGraph
@testset "target constraints" begin
args = make_arg_dict()
@test args[:target] == ""
end
end | MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 710 | module Misc_test
using Test, MLMolGraph, Xtals
import MLMolGraph.pbc_distance
@testset "PBC distance" begin
points = [ # grid of 0.5x0.5x0.5 cube vertices
0.25 0.25 0.25 0.75 0.75 0.75 0.25 0.75;
0.25 0.25 0.75 0.25 0.25 0.75 0.75 0.75;
0.25 0.75 0.25 0.25 0.75 0.25 0.75 0.75
]
cubic_box = unit_cube()
@test pbc_distance(points[:,1], points[:,2], cubic_box) == 0.5
@test pbc_distance(points[:,1], points[:,8], cubic_box) == √(0.75)
triclinic_box = Box(1., 1., 1., π/3, π/3, π/3)
@test all([pbc_distance(points[:,i], points[:,j], cubic_box) == pbc_distance(points[:,j], points[:,i], triclinic_box) for i in 1:8 for j in 1:8])
end
end
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 460 | module Processing_test
using Graphs, Test, MLMolGraph, XtalsPyTools
temp_dir = joinpath(pwd(), ".temp")
if !isdir(temp_dir)
mkpath(temp_dir)
end
@testset "xtals2primitive" begin
xtal_name = "str_m3_o2_o18_pcu_sym.41"
MLMolGraph.loadcheck([xtal_name], 1e-4, temp_dir)
MLMolGraph.xtals2primitive([xtal_name], temp_dir)
xtal = MLMolGraph.load_data(xtal_name, temp_dir)[:primitive_cell]
@test xtal.atoms.n == 90
end
end | MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 3872 | module Run_process_test
using CSV, DataFrames, Graphs, MetaGraphs, PyCall, Test, MLMolGraph, Xtals, XtalsPyTools
target = "working_capacity_vacuum_swing [mmol/g]"
temp_dir = joinpath(pwd(), ".temp")
if !isdir(temp_dir)
mkpath(temp_dir)
end
"""
g = rebuild_graph(xn)
Reconstruct a graph (referenced by structure name) from ML input data
"""
function rebuild_graph(xtal_name::String, int_to_atom::Dict{Int, Symbol}, temp_dir::String)::MetaGraph
# read ML data files
xtal_data = MLMolGraph.load_data(xtal_name, temp_dir)
X = xtal_data[:atom_features]
es = xtal_data[:bond_edges][1]
ed = xtal_data[:bond_edges][2]
@assert length(es) == length(ed) && length(es) > 0 "length mismatch in edge arrays for $xtal_name"
# reconstruct the graph
g = MetaGraph(size(X, 1))
# copy node labels
for (i, row) in enumerate(eachrow(X))
@assert set_prop!(g, i, :species, int_to_atom[findfirst(row .== 1)])
end
# copy edges
for (s, d) in zip(es, ed)
if !has_edge(g, d, s)
@assert s <= size(X, 1) && d <= size(X, 1) && s >= 1 && d >= 1 "invalid index in edge arrays for $xtal_name: s = $s ($(maximum(es))), d = $d ($(maximum(ed))), [X] = $(size(X))"
@assert add_edge!(g, s, d) nv(g), s, d
end
end
return g
end
@testset "bonds" begin
# set up args
arg_dict = make_arg_dict(target)
arg_dict[:bonds] = true
# process the crystals
good_xtals = run_process(arg_dict, temp_dir=temp_dir)
# test that all inputs return usable results
@test length(good_xtals) == 50
# load crystals
xtals = Crystal.(good_xtals .* [".cif"])
# load feature matrices
py"""
import pickle
file = open("_DATASET_.PKL", "rb")
dataset = pickle.load(file)
file.close()
"""
dataset = py"dataset"
atom_x = dataset["atom_x"]
Xs = [[[get(get(atom_x[graph], node-1), i-1).item() for i in 1:length(get(atom_x[graph], node-1))] for node in 1:length(atom_x[graph])] for graph in eachindex(atom_x)]
# test that feature matrix rows are one-hot encodings
@test all([all([sum(Xs[i][j]) .== 1 for j in eachindex(Xs[i])]) for i in eachindex(Xs)])
# convert to primitive cells (to keep consistent w/ processing)
xtals = primitive_cell.(xtals)
# read atom encoding dictionary
atom_to_int = Dict([Symbol(value) => key for (value, key) in dataset["atom_to_int"]])
# test that atom encodings match original species by atom_to_int mapping for all structures
@test all([all([atom_to_int[xtals[i].atoms.species[j]] == findfirst(Xs[i][j] .== 1) for j in eachindex(Xs[i])]) for i in eachindex(Xs)])
# build bonding graphs
good_bonds = infer_bonds!.(xtals, [true])
@assert all(good_bonds) # make sure the bonding went right
# invert encoding dictionary
int_to_atom = Dict([label => key for (key, label) in atom_to_int])
@assert length(int_to_atom) == length(atom_to_int) # check bijectivity
# reconstruct graphs from numpy data
graphs = rebuild_graph.(good_xtals, [int_to_atom], temp_dir)
@test all([xtal.bonds for xtal in xtals] .== graphs) # test reconstructed graphs against originals
# check target data against source CSV
properties_df = CSV.read(joinpath(rc[:paths][:data], "properties.csv"), DataFrame)
targets = [y.item() for y in dataset["y"]]
test_flags = falses(length(properties_df[:, target]))
for (i, name) in enumerate(good_xtals)
# extract values from independent locations by name
pt = filter(row -> row.name == name, properties_df)[:, target][1]
tt = targets[findfirst(dataset["xtal_name"] .== name)]
# confirm that targets in each location are the same
test_flags[i] = isapprox(pt, tt, atol=1e-4)
end
@test all(test_flags)
end
end
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | code | 585 | testfiles = [
"run_process.jl"
"processing.jl"
"argument_parsing.jl"
"misc.jl"
]
const DEBUG = true #* toggle debug mode
@assert VERSION.major == 1
@assert VERSION.minor > 6
using Test, Logging
global_logger(ConsoleLogger(stdout, getproperty(Logging, DEBUG ? :Debug : :Info)))
@debug "DEBUG mode"
using MLMolGraph
MLMolGraph.banner()
for testfile ∈ testfiles
@info "Running test/$(testfile)"
@time include(testfile)
end
temp_dir = joinpath(pwd(), ".temp")
if isdir(temp_dir)
rm(temp_dir, recursive=true)
end
@info "Done!"
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | docs | 927 | | **Documentation** | **Build Status** | **Test Coverage** |
|:---:|:---:|:---:|
| [![Docs]()]() | [](https://github.com/eahenle/MLMolGraph.jl/actions/workflows/ci_testing.yml) | [![codecov]()]() [](https://github.com/JuliaTesting/Aqua.jl) |
# Data Processing
Process crystals in `data/crystals` to machine-learning inputs for graph neural networks by:
```
julia --project [-p numprocs] process_crystals.jl [OPTS]
```
Set `-p numprocs` with `numprocs` = desired number of processes for distributed processing.
Use `-h` or `--help` to see command line options.
**Example uses**
Generate ML inputs using 12 cores for a model based on the bonding network with angle information:
```
julia --project -p 12 process_crystals.jl --bonds --angles
```
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | docs | 701 | # Why Graphs?
For many applications, it is advantageous to represent chemical structures as graphs, where the atoms are the graph nodes and the bonds are the graph edges.
This is particularly true in machine learning, where neural networks based on the topology of the chemical graph can be trained to provide on-demand prediction of material properties which would normally require long calculations on powerful computers.
`MLMolGraph` uses the [`Xtals.jl`](https://github.com/SimonEnsemble/Xtals.jl) package to read chemical structure data and build the bonding graph, and then exports graph information as a series of vectors, formatted on disk for easy application with `torch` or `Flux`.
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"MIT"
] | 0.0.1 | 95b048776b19737e500c53fb3461bdeb6c0163d4 | docs | 424 | # MLMolGraph.jl
`MLMolGraph.jl` is a package for converting chemical structures to model inputs for graph neural networks.
Getting started is easy!
1. Install Julia 1.6.0 or higher.
2. In the Julia REPL, hit `]` to enter `Pkg` mode, and install the library:
``` Pkg> add MLMolGraph```
3. In the REPL or a script, import the namespace:
```julia> using MLMolGraph```
Once that's done, you're ready to go!
| MLMolGraph | https://github.com/eahenle/MLMolGraph.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 1068 | using ClimaAnalysis
using Documenter
import GeoMakie
DocMeta.setdocmeta!(
ClimaAnalysis,
:DocTestSetup,
:(using ClimaAnalysis;
using ClimaAnalysis.Utils;
using ClimaAnalysis.Var;
using ClimaAnalysis.Atmos;
using ClimaAnalysis.Sim);
recursive = true,
)
makedocs(;
modules = [
ClimaAnalysis,
Base.get_extension(ClimaAnalysis, :ClimaAnalysisMakieExt),
Base.get_extension(ClimaAnalysis, :ClimaAnalysisGeoMakieExt),
],
authors = "Climate Modelling Alliance",
sitename = "ClimaAnalysis.jl",
format = Documenter.HTML(;
prettyurls = !isempty(get(ENV, "CI", "")),
collapselevel = 1,
),
checkdocs = :exports,
pages = [
"Home" => "index.md",
"OutputVars" => "var.md",
"Visualizing OutputVars" => "visualize.md",
"RMSEVariables" => "rmse_var.md",
"Visualizing RMSEVariables" => "visualize_rmse_var.md",
"APIs" => "api.md",
"How do I?" => "howdoi.md",
],
)
deploydocs(; repo = "github.com/CliMA/ClimaAnalysis.jl")
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 11784 | module ClimaAnalysisGeoMakieExt
import GeoMakie
import GeoMakie: Makie
import ClimaAnalysis
import ClimaAnalysis: Visualize
MakiePlace = Union{Makie.Figure, Makie.GridLayout}
"""
oceanmask()
Return a collection of polygons to mask out the ocean.
Plot with `Makie.poly`.
"""
function Visualize.oceanmask()
elevation = 0
return GeoMakie.NaturalEarth.bathymetry(elevation)
end
"""
landmask()
Return a collection of polygons to mask out the continents.
Plot with `Makie.poly`.
"""
function Visualize.landmask()
return GeoMakie.land()
end
function _geomakie_plot_on_globe!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs = Dict(
:plot => Dict(),
:cb => Dict(),
:axis => Dict(),
:coast => Dict(:color => :black),
:mask => Dict(),
),
plot_fn = Makie.surface!,
)
length(var.dims) == 2 || error("Can only plot 2D variables")
lon_name = ""
lat_name = ""
for dim in var.index2dim
if dim in ClimaAnalysis.Var.LONGITUDE_NAMES
lon_name = dim
elseif dim in ClimaAnalysis.Var.LATITUDE_NAMES
lat_name = dim
else
error("$dim is neither longitude nor latitude")
end
end
lon = var.dims[lon_name]
lat = var.dims[lat_name]
units = ClimaAnalysis.units(var)
short_name = var.attributes["short_name"]
colorbar_label = "$short_name [$units]"
axis_kwargs = get(more_kwargs, :axis, Dict())
plot_kwargs = get(more_kwargs, :plot, Dict())
cb_kwargs = get(more_kwargs, :cb, Dict())
coast_kwargs = get(more_kwargs, :coast, Dict(:color => :black))
mask_kwargs = get(more_kwargs, :mask, Dict(:color => :white))
plot_mask = !isnothing(mask)
var.attributes["long_name"] =
ClimaAnalysis.Utils.warp_string(var.attributes["long_name"])
title = get(axis_kwargs, :title, var.attributes["long_name"])
GeoMakie.GeoAxis(place[p_loc...]; title, axis_kwargs...)
plot = plot_fn(lon, lat, var.data; plot_kwargs...)
plot_mask && Makie.poly!(mask; mask_kwargs...)
plot_coastline && Makie.lines!(GeoMakie.coastlines(); coast_kwargs...)
if plot_colorbar
p_loc_cb = Tuple([p_loc[1], p_loc[2] + 1])
Makie.Colorbar(
place[p_loc_cb...],
plot,
label = colorbar_label;
cb_kwargs...,
)
end
end
"""
heatmap2D_on_globe!(fig::Makie.Figure,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs)
heatmap2D_on_globe!(grid_layout::Makie.GridLayout,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs)
Plot a heatmap of the given 2D `var`iable on a projected geoid.
The plot comes with labels, units, and a colorbar.
This function assumes that the following attributes are available:
- long_name
- short_name
- units
The dimensions have to be longitude and latitude.
`mask` has to be an object that can be plotted by `Makie.poly`. Typically, an ocean or land
mask. `ClimaAnalysis` comes with predefined masks, check out [`Visualize.oceanmask`](@ref) and
[`Visualize.landmask`](@ref).
!!! note Masking does not affect the colorbar. If you have values defined beneath the map,
they can still affect the colorbar.
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
- the coastline (`:coast`)
- the mask (`:mask`)
The coastline is plotted from `GeoMakie.coastline` using the `lines!` plotting function.
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.heatmap2D_on_globe!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs = Dict(
:plot => Dict(),
:cb => Dict(),
:axis => Dict(),
:coast => Dict(:color => :black),
:mask => Dict(),
),
)
return _geomakie_plot_on_globe!(
place,
var;
p_loc,
plot_coastline,
plot_colorbar,
mask,
more_kwargs,
plot_fn = Makie.surface!,
)
end
"""
contours2D_on_globe!(fig::Makie.Figure,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
plot_coastline = true,
plot_colorbar = true,
plot_contours = true,
mask = nothing,
more_kwargs)
contours2D_on_globe!(grid_layout::Makie.GridLayout,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
plot_coastline = true,
plot_colorbar = true,
plot_contours = true,
mask = nothing,
more_kwargs)
Plot discrete contours of the given 2D `var`iable on a projected geoid.
The plot comes with labels, units, and a colorbar.
This function assumes that the following attributes are available:
- long_name
- short_name
- units
The dimensions have to be longitude and latitude.
`mask` has to be an object that can be plotted by `Makie.poly`. Typically, an ocean or land
mask. `ClimaAnalysis` comes with predefined masks, check out [`Visualize.oceanmask`](@ref) and
[`Visualize.landmask`](@ref).
!!! note Masking does not affect the colorbar. If you have values defined beneath the map,
they can still affect the colorbar.
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
- the coastline (`:coast`)
- the mask (`:mask`)
The coastline is plotted from `GeoMakie.coastline` using the `lines!` plotting function.
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.contour2D_on_globe!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs = Dict(
:plot => Dict(),
:cb => Dict(),
:axis => Dict(),
:coast => Dict(:color => :black),
:mask => Dict(),
),
)
_geomakie_plot_on_globe!(
place,
var;
p_loc,
plot_coastline,
plot_colorbar,
mask,
more_kwargs,
plot_fn = Makie.contourf!,
)
end
"""
plot_bias_on_globe!(fig::Makie.Figure,
sim::ClimaAnalysis.OutputVar,
obs::ClimaAnalysis.OutputVar;
cmap_extrema = extrema(ClimaAnalysis.bias(sim, obs).data),
p_loc = (1, 1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs)
plot_bias_on_globe!(grid_layout::Makie.GridLayout,
sim::ClimaAnalysis.OutputVar,
obs::ClimaAnalysis.OutputVar;
cmap_extrema = extrema(ClimaAnalysis.bias(sim, obs).data),
p_loc = (1, 1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs)
Plot the bias (`sim.data - var.data`) on a projected geoid. The gloal bias and root mean
squared error (RMSE) are computed and can be found in the title of the plot. This function
plots the returned `OutputVar` of `ClimaAnalysis.bias(sim, obs)`. See also
[`ClimaAnalysis.bias`](@ref).
The plot comes with labels, units, and a colorbar. This function uses a constrained colormap
based on the values of `cmap_extrema`.
The dimensions have to be longitude and latitude.
`mask` has to be an object that can be plotted by `Makie.poly`. `ClimaAnalysis` comes with
predefined masks, check out [`Visualize.oceanmask`](@ref) and [`Visualize.landmask`](@ref).
!!! note
Masking does not affect the colorbar. If you have values defined beneath the map, they
can still affect the colorbar.
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
- the coastline (`:coast`)
- the mask (`:mask`)
The coastline is plotted from `GeoMakie.coastline` using the `lines!` plotting function.
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.plot_bias_on_globe!(
place::MakiePlace,
sim::ClimaAnalysis.OutputVar,
obs::ClimaAnalysis.OutputVar;
cmap_extrema = extrema(ClimaAnalysis.bias(sim, obs).data),
p_loc = (1, 1),
plot_coastline = true,
plot_colorbar = true,
mask = nothing,
more_kwargs = Dict(
:plot => Dict(),
:cb => Dict(),
:axis => Dict(),
:coast => Dict(:color => :black),
:mask => Dict(),
),
)
bias_var = ClimaAnalysis.bias(sim, obs)
global_bias = round(bias_var.attributes["global_bias"], sigdigits = 3)
rmse = round(ClimaAnalysis.global_rmse(sim, obs), sigdigits = 3)
units = ClimaAnalysis.units(bias_var)
bias_var.attributes["long_name"] *= " (RMSE: $rmse $units, Global bias: $global_bias $units)"
min_level, max_level = cmap_extrema
# Make sure that 0 is at the center
cmap = Visualize._constrained_cmap(
Makie.cgrad(:vik).colors,
min_level,
max_level;
categorical = true,
)
nlevels = 11
# Offset so that it covers 0
levels = collect(range(min_level, max_level, length = nlevels))
offset = levels[argmin(abs.(levels))]
levels = levels .- offset
ticklabels = map(x -> string(round(x; digits = 0)), levels)
ticks = (levels, ticklabels)
default_kwargs = Dict(
:plot => Dict(
:colormap => cmap,
:levels => levels,
:extendhigh => :auto,
:extendlow => :auto,
),
:cb => Dict(:ticks => ticks),
)
# Function for recursively merging two dictionaries if the values of the dictionaries
# are dictionaries and the values of those are also dictionaries and so on
# See: https://discourse.julialang.org/t/multi-layer-dict-merge/27261/6
recursive_merge(x::AbstractDict...) = merge(recursive_merge, x...)
recursive_merge(x...) = x[end]
default_and_more_kwargs = recursive_merge(default_kwargs, more_kwargs)
return Visualize.contour2D_on_globe!(
place,
bias_var;
p_loc = p_loc,
plot_coastline = plot_coastline,
plot_colorbar = plot_colorbar,
mask = mask,
more_kwargs = default_and_more_kwargs,
)
end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 28047 | module ClimaAnalysisMakieExt
import Makie
import ClimaAnalysis
import ClimaAnalysis: Visualize
MakiePlace = Union{Makie.Figure, Makie.GridLayout}
"""
heatmap2D!(fig::Makie.Figure,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs)
heatmap2D!(grid_layout::Makie.GridLayout,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs)
Plot a heatmap of the given 2D `var`iable in the given place and location.
The place can be a `Figure` or a `GridLayout`.
The plot comes with labels, units, and a colorbar.
This function assumes that the following attributes are available:
- long_name
- short_name
- units (also for the dimensions)
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.heatmap2D!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :cb => Dict(), :axis => Dict()),
)
length(var.dims) == 2 || error("Can only plot 2D variables")
dim1_name, dim2_name = var.index2dim
dim1 = var.dims[dim1_name]
dim2 = var.dims[dim2_name]
units = ClimaAnalysis.units(var)
short_name = var.attributes["short_name"]
colorbar_label = "$short_name [$units]"
dim1_units = var.dim_attributes[dim1_name]["units"]
dim2_units = var.dim_attributes[dim2_name]["units"]
axis_kwargs = get(more_kwargs, :axis, Dict())
plot_kwargs = get(more_kwargs, :plot, Dict())
cb_kwargs = get(more_kwargs, :cb, Dict())
var.attributes["long_name"] =
ClimaAnalysis.Utils.warp_string(var.attributes["long_name"])
title = get(axis_kwargs, :title, var.attributes["long_name"])
xlabel = get(axis_kwargs, :xlabel, "$dim1_name [$dim1_units]")
ylabel = get(axis_kwargs, :ylabel, "$dim2_name [$dim2_units]")
# dim_on_y is only supported by plot_line1D. We remove it here to ensure that we can a
# consistent entry point between plot_line1D and heatmap2D. It we left it here, it would
# be passed down and lead to a unknown argument error.
#
# TODO: Refactor: We shouldn't have to deal with dim_on_y if we don't use it!
if haskey(axis_kwargs, :dim_on_y)
axis_kwargs_dict = Dict(axis_kwargs)
pop!(axis_kwargs_dict, :dim_on_y)
axis_kwargs = pairs(axis_kwargs_dict)
end
Makie.Axis(place[p_loc...]; title, xlabel, ylabel, axis_kwargs...)
plot = Makie.heatmap!(dim1, dim2, var.data; plot_kwargs...)
p_loc_cb = Tuple([p_loc[1], p_loc[2] + 1])
Makie.Colorbar(
place[p_loc_cb...],
plot,
label = colorbar_label;
cb_kwargs...,
)
end
"""
Private function to define `sliced_` functions.
It slices a given variable and applies `func` to it.
"""
function _sliced_plot_generic(
func,
fig,
var,
cut;
p_loc,
more_kwargs = Dict(:plot => Dict(), :cb => Dict(), :axis => Dict()),
)
isnothing(cut) && (cut = Dict())
var_sliced = var
for (dim_name, val) in cut
var_sliced = ClimaAnalysis.Var._slice_general(var_sliced, val, dim_name)
end
func(fig, var_sliced; p_loc, more_kwargs)
end
"""
Private function to define `plot!` functions.
It composes a `cut` from given `kwargs`. Used with `sliced` functions.
"""
function _plot_generic_kwargs(
func,
fig,
var;
p_loc,
more_kwargs = Dict(:plot => Dict(), :cb => Dict(), :axis => Dict()),
kwargs...,
)
cut = Dict("$k" => v for (k, v) in kwargs)
length(cut) == 0 && (cut = nothing)
return func(fig, var, cut; p_loc, more_kwargs)
end
"""
sliced_heatmap!(fig::Makie.Figure,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <: Real}};
p_loc = (1,1),
more_kwargs,
)
sliced_heatmap!(grid_layout::Makie.GridLayout,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <: Real}};
p_loc = (1,1),
more_kwargs,
)
Take a `var`iable, slice as directed, and plot a 2D heatmap in the given place and
location.
The place can be a `Figure` or a `GridLayout`.
The plot comes with labels, units, and a colorbar.
Arguments
=========
If the variable is not 2D, `cut` has to be a dictionary that maps the dimension that has to
be sliced and the value where to cut.
For example, if `var` has four dimensions: `time`, `long`, `lat`, `z`, this function can be
used to plot a `lat-long` heatmap at fixed `time` and `z`. Assuming we want to plot
time `100.` and altitude `50.`, `cut` should be `Dict("time" => 100., "z" => 50.)`.
This function assumes that the following attributes are available:
- long_name
- short_name
- units (also for the dimensions)
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.sliced_heatmap!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <:Real}} = nothing;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :cb => Dict(), :axis => Dict()),
)
return _sliced_plot_generic(
Visualize.heatmap2D!,
place,
var,
cut;
p_loc,
more_kwargs,
)
end
"""
heatmap!(place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs,
kwargs...
)
Syntactic sugar for `sliced_heatmap` with `kwargs` instead of `cut`.
Example
=======
`heatmap!(fig, var, time = 100, lat = 70)` plots a heatmap by slicing `var` along
the time nearest to 100 and latitude nearest 70.
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.heatmap!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :cb => Dict(), :axis => Dict()),
kwargs...,
)
_plot_generic_kwargs(
Visualize.sliced_heatmap!,
place,
var;
p_loc,
more_kwargs,
kwargs...,
)
end
"""
line_plot1D!(place::Makie.Figure,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs
)
line_plot1D!(place::Makie.GridLayout,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs
)
Plot a line plot of the given 1D `var`iable in the given place and location.
The place can be a `Figure` or a `GridLayout`.
The plot comes with labels, units.
This function assumes that the following attributes are available:
- long_name
- short_name
- units (also for the dimensions)
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
A special argument that can be passed to `:axis` is `:dim_on_y`, which puts the dimension on
the y axis instead of the variable. This is useful to plot columns with `z` on the vertical
axis instead of the horizontal one.
"""
function Visualize.line_plot1D!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :axis => Dict()),
)
length(var.dims) == 1 || error("Can only plot 1D variables")
dim_name = var.index2dim[]
dim = var.dims[dim_name]
units = ClimaAnalysis.units(var)
short_name = var.attributes["short_name"]
dim_units = var.dim_attributes[dim_name]["units"]
axis_kwargs = get(more_kwargs, :axis, Dict())
plot_kwargs = get(more_kwargs, :plot, Dict())
var.attributes["long_name"] =
ClimaAnalysis.Utils.warp_string(var.attributes["long_name"])
title = get(axis_kwargs, :title, var.attributes["long_name"])
xlabel = get(axis_kwargs, :xlabel, "$dim_name [$dim_units]")
ylabel = get(axis_kwargs, :ylabel, "$short_name [$units]")
x, y = dim, var.data
if get(axis_kwargs, :dim_on_y, false)
xlabel, ylabel = ylabel, xlabel
x, y = y, x
# dim_on_y is not a real keyword for Axis, so we have to remove it from the
# arguments. Since axis_kwargs is a Pairs object, we have to go through its
# underlying dictionary first
axis_kwargs_dict = Dict(axis_kwargs)
pop!(axis_kwargs_dict, :dim_on_y)
axis_kwargs = pairs(axis_kwargs_dict)
end
Makie.Axis(place[p_loc...]; title, xlabel, ylabel, axis_kwargs...)
Makie.lines!(x, y; plot_kwargs...)
end
"""
sliced_line_plot!(place::Makie.Figure,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <: Real}};
p_loc = (1,1),
more_kwargs
)
sliced_line_plot!(place::Makie.GridLayout,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <: Real}};
p_loc = (1,1),
more_kwargs
)
Take a `var`iable, slice as directed, and plot a 1D line plot in the given place and
location. The place can be a `Figure` or a `GridLayout`.
The plot comes with labels, and units.
Arguments
=========
If the variable is not 1D, `cut` has to be a dictionary that maps the dimension that has to
be sliced and the value where to cut.
For example, if `var` has four dimensions: `time`, `long`, `lat`, `z`, this function can be
used to plot a `lat-long` heatmap at fixed `time` and `z`. Assuming we want to plot
time `100.` and altitude `50.`, `cut` should be `Dict("time" => 100., "z" => 50.)`.
This function assumes that the following attributes are available:
- long_name
- short_name
- units (also for the dimensions)
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.sliced_line_plot!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <:Real}} = nothing;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :axis => Dict()),
)
return _sliced_plot_generic(
Visualize.line_plot1D!,
place,
var,
cut;
p_loc,
more_kwargs,
)
end
"""
line_plot!(place::Makie.Figure,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs,
kwargs...
)
line_plot!(place::Makie.GridLayout,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs,
kwargs...
)
Syntactic sugar for `sliced_line_plot` with `kwargs` instead of `cut`.
Example
=======
`line_plot!(fig, var, time = 100, lat = 70)` plots a line plot by slicing `var` along
the time nearest to 100 and latitude nearest 70.
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.line_plot!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :axis => Dict()),
kwargs...,
)
_plot_generic_kwargs(
Visualize.sliced_line_plot!,
place,
var;
p_loc,
more_kwargs,
kwargs...,
)
end
"""
sliced_plot!(place::Makie.Figure,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <: Real}};
p_loc = (1,1),
more_kwargs
)
sliced_plot!(place::Makie.GridLayout,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <: Real}};
p_loc = (1,1),
more_kwargs
)
Take a `var`iable, slice as directed, and plot a 1D line plot or 2D heatmap in the given place and
location. The place can be a `Figure` or a `GridLayout`.
The plot comes with labels, and units (and possibly a colorbar).
Arguments
=========
If the variable is not 1D/2D, `cut` has to be a dictionary that maps the dimension that has to
be sliced and the value where to cut.
For example, if `var` has four dimensions: `time`, `long`, `lat`, `z`, this function can be
used to plot a `lat-long` heatmap at fixed `time` and `z`. Assuming we want to plot
time `100.` and altitude `50.`, `cut` should be `Dict("time" => 100., "z" => 50.)`.
This function assumes that the following attributes are available:
- long_name
- short_name
- units (also for the dimensions)
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.sliced_plot!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar,
cut::Union{Nothing, AbstractDict{String, <:Real}} = nothing;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :cb => Dict(), :axis => Dict()),
)
initial_dim = length(var.dims)
removed_dims = isnothing(cut) ? 0 : length(cut)
final_dim = initial_dim - removed_dims
if final_dim == 1
fun = Visualize.line_plot1D!
elseif final_dim == 2
fun = Visualize.heatmap2D!
else
error("Sliced variable has $final_dim dimensions (needed 1 or 2)")
end
return _sliced_plot_generic(fun, place, var, cut; p_loc, more_kwargs)
end
"""
plot!(place::Makie.Figure,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs,
kwargs...
)
plot!(place::Makie.GridLayout,
var::ClimaAnalysis.OutputVar;
p_loc = (1,1),
more_kwargs,
kwargs...
)
Syntactic sugar for `sliced_plot` with `kwargs` instead of `cut`.
Example
=======
`line_plot!(fig, var, time = 100, lat = 70)` plots a line plot or a heatmap by slicing
`var` along the time nearest to 100 and latitude nearest 70.
Additional arguments to the plotting and axis functions
=======================================================
`more_kwargs` can be a dictionary that maps symbols to additional options for:
- the axis (`:axis`)
- the plotting function (`:plot`)
- the colorbar (`:cb`)
The values are splatted in the relevant functions. Populate them with a
Dictionary of `Symbol`s => values to pass additional options.
"""
function Visualize.plot!(
place::MakiePlace,
var::ClimaAnalysis.OutputVar;
p_loc = (1, 1),
more_kwargs = Dict(:plot => Dict(), :cb => Dict(), :axis => Dict()),
kwargs...,
)
_plot_generic_kwargs(
Visualize.sliced_plot!,
place,
var;
p_loc,
more_kwargs,
kwargs...,
)
end
"""
_to_unitrange(x::Number, lo::Number, hi::Number)
Linearly transform x ∈ [lo, hi] to [0, 1].
"""
_to_unitrange(x::Number, lo::Number, hi::Number) = (x - lo) / (hi - lo)
"""
_constrained_cmap(cols::Vector, lo, hi; mid = 0, categorical = false, rev = false)
_constrained_cmap(cols::Makie.ColorScheme, lo, hi; mid = 0, categorical = false, rev = false)
Constrain a colormap to a given range.
Given a colormap implicitly defined in `± maximum(abs, (lo, hi))`, constrain it to the range
[lo, hi]. This is useful to ensure that a colormap which is desired to diverge
symmetrically around zero maps the same color intensity to the same magnitude.
# Arguments
- `cols`: a vector of colors, or a ColorScheme
- `lo`: lower bound of the range
- `hi`: upper bound of the range
# Keyword Arguments
- `mid`: midpoint of the range # TODO: test `mid` better
- `categorical`: flag for whether returned colormap should be categorical or continuous
- `rev`: flag for whether to reverse the colormap before constraining cmap
# Returns
- `cmap::Makie.ColorGradient`: a colormap
"""
function Visualize._constrained_cmap(
cols::Vector,
lo,
hi;
mid = 0,
categorical = false,
rev = false,
)
Visualize._constrained_cmap(
Makie.ColorScheme(cols),
lo,
hi;
mid,
categorical,
rev,
)
end
function Visualize._constrained_cmap(
cols::Makie.ColorScheme,
lo,
hi;
mid = 0,
categorical = false,
rev = false,
)
# Reverse colorscheme if requested, don't reverse below in `cgrad`
rev && (cols = reverse(cols))
absmax = maximum(abs, (lo, hi) .- mid)
# Map lo, hi ∈ [-absmax, absmax] onto [0,1] to sample their corresponding colors
lo_m, hi_m = _to_unitrange.((lo, hi) .- mid, -absmax, absmax)
# Values on [0,1] where each color in cols is defined
colsvals = range(0, 1; length = length(cols))
# Filter colsvals, keep only values in [lo_m, hi_m] + the endpoints lo_m and hi_m
filter_colsvals =
filter(x -> lo_m <= x <= hi_m, unique([lo_m; colsvals; hi_m]))
# Select colors in filtered range; interpolate new low and hi colors
newcols = Makie.get(cols, filter_colsvals)
# Values on [0,1] where the new colors are defined
new_colsvals = _to_unitrange.(filter_colsvals, lo_m, hi_m)
cmap = Makie.cgrad(newcols, new_colsvals; categorical, rev = false)
return cmap
end
"""
Visualize.plot_boxplot!(fig,
rmse_var::ClimaAnalysis.RMSEVariable;
model_names = ["CliMA"],
ploc = (1, 1),
best_and_worst_category_name = "ANN",
legend_text_width = 10)
Plot a Tukey style boxplot for each category in `rmse_var`.
The best and worst single models are found for the category `best_and_worst_category_name`
and are plotted on the boxplot. When finding the best and worst single models, any models in
`model_names` will be excluded. Additionally, any model in `model_names` will also be
plotted on the boxplot.
The parameter `ploc` determines where to place the plot on the figure. The parameter
`legend_text_width` determines the number of characters on each line in the legend.
"""
function Visualize.plot_boxplot!(
fig,
rmse_var::ClimaAnalysis.RMSEVariable;
model_names = ["CliMA"],
ploc = (1, 1),
best_and_worst_category_name = "ANN",
legend_text_width = 10,
)
# Unit checking
ClimaAnalysis.Leaderboard._unit_check(rmse_var)
num_cats = length(rmse_var.category2index)
units = values(rmse_var.units) |> collect |> first
# Title and labels for x-axis and y-axis
ax = Makie.Axis(
fig[ploc...],
ylabel = "$(rmse_var.short_name) [$units]",
xticks = (1:num_cats, ClimaAnalysis.category_names(rmse_var)),
title = "Global RMSE $(rmse_var.short_name) [$units]",
)
# Set up for box plot
cats = reduce(
vcat,
[
fill(cat_val, length(rmse_var.model2index)) for
cat_val in 1:length(rmse_var.category2index)
],
)
vals = reduce(vcat, rmse_var.RMSEs)
# Filter out NaNs because we can't plot with NaNs
not_nan_idices = findall(!isnan, vals)
cats = cats[not_nan_idices]
vals = vals[not_nan_idices]
# Add box plot
Makie.boxplot!(
ax,
cats,
vals,
whiskerwidth = 1,
width = 0.35,
mediancolor = :black,
color = :gray,
whiskerlinewidth = 1,
)
# Delete any model in model_names to exclude them when finding best and worst models
rmse_var_delete =
ClimaAnalysis.Leaderboard._delete_model(rmse_var, model_names...)
# Find and plot best and worst models
absolute_worst_values, absolute_worst_model_name =
ClimaAnalysis.find_worst_single_model(
rmse_var_delete,
category_name = best_and_worst_category_name,
)
absolute_best_values, absolute_best_model_name =
ClimaAnalysis.find_best_single_model(
rmse_var_delete,
category_name = best_and_worst_category_name,
)
best_pt = Makie.scatter!(
ax,
1:num_cats,
absolute_best_values,
label = absolute_best_model_name,
)
worst_pt = Makie.scatter!(
ax,
1:num_cats,
absolute_worst_values,
label = absolute_worst_model_name,
)
# Plotting the median model
median_pt = Makie.scatter!(
ax,
1:num_cats,
ClimaAnalysis.median(rmse_var),
label = "Median",
color = :black,
marker = :hline,
markersize = 10,
visible = false,
)
# Keep track of points plotted by scatter! and names for plotting the legend
# later
pts_on_boxplot = [median_pt, best_pt, worst_pt]
names_on_legend = vcat(
["Median", absolute_best_model_name, absolute_worst_model_name],
model_names,
)
# Plot CliMA model and other models
for model_name in model_names
ClimaAnalysis.Leaderboard._model_name_check(rmse_var, model_name)
if model_name == "CliMA"
Makie.scatter!(
ax,
1:num_cats,
rmse_var[model_name],
label = model_name,
marker = :star5,
markersize = 20,
color = :green,
) |> pt -> push!(pts_on_boxplot, pt)
else
Makie.scatter!(
ax,
1:num_cats,
rmse_var[model_name],
label = model_name,
color = :red,
) |> pt -> push!(pts_on_boxplot, pt)
end
end
# Add a new line character every `legend_text_width` characters to prevent legend from
# overflowing onto the box plot
# Soln from
# https://stackoverflow.com/questions/40545980/insert-a-newline-character-every-10-characters-in-a-string-using-julia
names_on_legend = map(
name ->
replace(name, Regex("(.{$legend_text_width})") => s"\1\n") |>
(name -> rstrip(name, '\n')),
names_on_legend,
)
# Hack to make legend appear better
Makie.axislegend(ax, pts_on_boxplot, names_on_legend)
Makie.scatter!(ax, [num_cats + 2.5], [0.1], markersize = 0.01)
end
"""
Visualize.plot_leaderboard!(fig,
rmse_vars::ClimaAnalysis.RMSEVariable...;
ploc = (1, 1),
model_names = ["CliMA"],
best_category_name = "ANN")
Plot a heatmap over the categories and models. The models that appear is the best model
as found for the category `best_category_name` and any other models in `model_names`. The
root mean squared errors for each variable of interest is normalized by dividing over the
median root mean squared error of each variable.
The parameter `ploc` determines where to place the plot on the figure.
"""
function Visualize.plot_leaderboard!(
fig,
rmse_vars::ClimaAnalysis.RMSEVariable...;
ploc = (1, 1),
model_names = ["CliMA"],
best_category_name = "ANN",
)
# Check if rmse_model_vars all have the same categories
categories_names = ClimaAnalysis.category_names.(rmse_vars)
categories_same = length(unique(categories_names)) == 1
categories_same ||
error("Categories are not all the same across the RMSEVariable")
rmse_var = first(rmse_vars)
categ_names = ClimaAnalysis.category_names(rmse_var)
num_variables = length(rmse_vars)
num_boxes = length(categ_names) # number of categories
num_models = 1 + length(model_names) # best model plus the other models in model_names
# Initialize variables we need for storing RMSEs for plotting and short names for axis
rmse_normalized_arr = zeros(num_boxes * num_models, num_variables)
short_names = String[]
for (idx, var) in enumerate(reverse(rmse_vars))
# Get all the short name of the rmse_vars
push!(short_names, var.short_name)
# Compute median and best values for RMSE
med_vals = ClimaAnalysis.median(var)
best_vals, _ = ClimaAnalysis.find_best_single_model(
var,
category_name = best_category_name,
)
# Find normalized values for the models we are interested in and the normalized best
# value and store them
normalized_vals = [var[model] ./ med_vals for model in model_names]
normalized_vals = reduce(vcat, normalized_vals)
rmse_normalized_arr[:, idx] =
vcat(normalized_vals, best_vals ./ med_vals)'
end
# Finding the midpoint for placing labels
start_x_tick = div(num_boxes, 2, RoundUp)
ax_bottom_and_left = Makie.Axis(
fig[ploc...],
yticks = (1:length(short_names), short_names),
xticks = (
[start_x_tick, start_x_tick + num_boxes],
vcat(model_names, ["Best model"]),
),
aspect = num_boxes * num_models,
xgridvisible = false,
ygridvisible = false,
)
ax_top = Makie.Axis(
fig[ploc...],
xaxisposition = :top,
xticks = (0.5:1.0:length(categ_names), categ_names),
aspect = num_boxes * num_models,
xgridvisible = false,
ygridvisible = false,
)
Makie.hidespines!(ax_top)
Makie.hideydecorations!(ax_top)
colormap = Makie.Reverse(:RdYlGn)
# Filter out NaNs here because we need to take the maximum and extrema for the
# colorrange and limits
rmse_no_nan_vec = rmse_normalized_arr |> vec |> filter(!isnan)
Makie.heatmap!(
ax_bottom_and_left,
rmse_normalized_arr,
colormap = colormap,
# Trick to exclude the zeros
lowclip = :white,
colorrange = (1e-10, maximum(rmse_no_nan_vec)),
)
for idx in eachindex(model_names)
Makie.vlines!(ax_top, num_boxes * idx, color = :black, linewidth = 3.0)
end
row, col = ploc
col += 1
Makie.Colorbar(
fig[row, col],
limits = extrema(rmse_no_nan_vec),
label = "RMSE/median(RMSE)",
colormap = colormap,
)
end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 2601 | module ClimaAnalysisUnitfulExt
# ClimaAnalysisUnitfulExt is implemented as extension in case we decide to turn Unitful into
# an extension in the future, but, right now, everything is included directly in
# ClimaAnalysis.
import Unitful
import ClimaAnalysis: Var
"""
_maybe_convert_to_unitful(value)
Try converting `value` to a `Uniftul` object. If unsuccessful, just return it.
"""
function Var._maybe_convert_to_unitful(value)
value isa Unitful.Units && return value
# "" cannot be parsed but returns a wrong error (MethodError on lookup_units),
# so we handle it manually
value == "" && return value
# This function in inherently type-unstable
try
return Unitful.uparse(value)
catch exc
# ParseError when it cannot be parsed
# ArgumentError when symbols are not available
if exc isa Base.Meta.ParseError || exc isa ArgumentError
return value
else
rethrow(exc)
end
end
end
function _converted_data(data, conversion_function)
return conversion_function.(data)
end
function _converted_data_unitful(data, old_units, new_units)
# We add FT because sometimes convert changes the type and because
# ustrip only reinterprets the given array
FT = eltype(data)
return FT.(Unitful.ustrip(Unitful.uconvert.(new_units, data * old_units)))
end
function Var.convert_units(
var::Var.OutputVar,
new_units::AbstractString;
conversion_function = nothing,
)
has_unitful_units =
Var.has_units(var) && (var.attributes["units"] isa Unitful.Units)
new_units_maybe_unitful = Var._maybe_convert_to_unitful(new_units)
new_units_are_unitful = new_units_maybe_unitful isa Unitful.Units
if has_unitful_units && new_units_are_unitful
isnothing(conversion_function) ||
@warn "Ignoring conversion_function, units are parseable."
convert_function =
data -> _converted_data_unitful(
data,
var.attributes["units"],
new_units_maybe_unitful,
)
else
isnothing(conversion_function) && error(
"Conversion function required for var with non-parseable/absent units.",
)
convert_function = data -> _converted_data(data, conversion_function)
end
new_data = convert_function(var.data)
new_attribs = copy(var.attributes)
# The constructor will take care of converting new_units to Unitful
new_attribs["units"] = new_units
return Var.OutputVar(new_attribs, var.dims, var.dim_attributes, new_data)
end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 3720 | """
The `Atmos` module contains functions that are primarily useful when working
with atmospheric simulations.
"""
module Atmos
import Interpolations as Intp
import ..OutputVar, ..arecompatible
import ..short_name
import ..altitude_name
"""
_resample_column!(dest, var1d, origin_pressure, target_pressure)
Linearly interpolate `var1d` from `origin_pressure` to `target_pressure`.
Note: Values outside of the range are linearly extrapolated
"""
function _resample_column!(dest, var1d, origin_pressure, target_pressure)
# Interpolations.jl require increasing knots, but pressure is decreasing, so
# we have to reverse it
var1d_of_P = Intp.extrapolate(
Intp.interpolate(
(reverse(origin_pressure),),
reverse(var1d),
Intp.Gridded(Intp.Linear()),
),
Intp.Line(),
)
dest .= var1d_of_P.(target_pressure)
return nothing
end
"""
to_pressure_coordinates(var::OutputVar, pressure::OutputVar; target_pressure = nothing)
Change the vertical dimension of `var` to be in pressure coordinates.
If `target_pressure` is nothing, the target pressure levels are computed by
linearly sampling the interval `minimum(pressure), maximum(pressure)`. Then, for
each column in `var`, the values are linearly interpolate onto this new grid.
`target_pressure` can be set to a `Vector` to specify custom pressure levels.
The return value is a new `OutputVar` where the vertical dimension is pressure.
> :important: Values outside of the range are linearly extrapolated, so do not
trust them too much!
"""
function to_pressure_coordinates(
var::OutputVar,
pressure::OutputVar;
target_pressure = nothing,
)
arecompatible(var, pressure) ||
error("Pressure and variable are not compatible")
z_name = altitude_name(var)
z_index = var.dim2index[z_name]
pressure_name = short_name(pressure)
# First, we construct the target pressure grid. For this, we take the
# extrema of pressure and divide the interval linearly with the same number
# of points we originally had in z
if isnothing(target_pressure)
# TODO: Pick this more sensibly
# TODO: Make it go from max to min? (This is not supported by Interpolations.jl...)
target_pressure = range(
minimum(pressure.data),
maximum(pressure.data),
length = length(var.dims[z_name]),
)
end
# Then, we prepare the output variable
ret_attributes = copy(var.attributes)
TypeOfDims = typeof(var.dims)
ret_dims = TypeOfDims(
k != z_name ? k => v : pressure_name => target_pressure for
(k, v) in var.dims
)
TypeOfDimAttributes = typeof(var.dim_attributes)
ret_dim_attributes = TypeOfDimAttributes(
k != z_name ? k => v : pressure_name => pressure.attributes for
(k, v) in var.dim_attributes
)
num_dims = ndims(var.data)
# Account for possible custom target_pressure
ret_size = ntuple(
i -> (i != z_index ? size(var.data)[i] : length(target_pressure)),
num_dims,
)
ret_data = zeros(ret_size...)
# We have to loop over all the possible columns
ranges = [1:size(var.data)[i] for i in 1:num_dims if i != z_index]
# Iterate over the Cartesian product of these ranges
for idx in CartesianIndices(Tuple(ranges))
indices = ntuple(i -> (i == z_index ? Colon() : idx[i]), num_dims)
_resample_column!(
view(ret_data, indices...),
view(var.data, indices...),
view(pressure.data, indices...),
target_pressure,
)
end
return OutputVar(ret_attributes, ret_dims, ret_dim_attributes, ret_data)
end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 440 | module ClimaAnalysis
import Reexport: @reexport
include("Utils.jl")
import .Utils
include("Numerics.jl")
include("Var.jl")
@reexport using .Var
include("Sim.jl")
@reexport using .Sim
include("Leaderboard.jl")
@reexport using .Leaderboard
include("Visualize.jl")
@reexport using .Visualize
include("Atmos.jl")
# In case we want to turn Unitful into an extension
include("../ext/ClimaAnalysisUnitfulExt.jl")
end # module ClimaAnalysis
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 20465 | module Leaderboard
import OrderedCollections: OrderedDict
import NaNStatistics: nanmedian
export RMSEVariable,
model_names,
category_names,
rmse_units,
read_rmses,
getindex,
setindex!,
add_category,
add_model,
add_unit!,
find_best_single_model,
find_worst_single_model,
median
"""
Holding root mean squared errors over multiple categories and models for a single
variable.
"""
struct RMSEVariable{
FT <: AbstractFloat,
S1 <: AbstractString,
S2 <: AbstractString,
}
"Short name of variable of interest"
short_name::AbstractString
"Map model name (like `CliMA`) to index"
model2index::OrderedDict{S1, Int}
"Map category name (like `ANN` and seasons) to index"
category2index::OrderedDict{S2, Int}
"Arrray of RMSEs (rows correspond to model names and columns correspond to categories)"
RMSEs::Array{FT, 2}
"Map model_name to units"
units::Dict{S1, String}
end
"""
RMSEVariable(short_name::String,
model_names::Vector{String},
category_names::Vector{String},
RMSEs,
units::Dict)
Construct a RMSEVariable with the `short_name` of the variable, the names of the models in
`model_names`, the categories in `category_names`, the root mean squared errors in `RMSEs`,
and `units`.
"""
function RMSEVariable(
short_name::String,
model_names::Vector{String},
category_names::Vector{String},
RMSEs,
units::Dict,
)
# Check if the dimensions of model_names and category_names match the dimensions of RMSE array
(length(model_names), length(category_names)) != size(RMSEs) && error(
"The size of RMSEs ($(size(RMSEs))) does not fit the number of model names ($(length(model_names))) and categories ($(length(category_names)))",
)
# Check if RMSE is negative
any(RMSEs .< 0.0) && error("RMSEs cannot be negative")
# Check for uniqueness
length(unique(model_names)) == length(model_names) ||
error("Model names are not unique")
length(unique(category_names)) == length(category_names) ||
error("Category names are not unique")
model2index = OrderedDict(model_names |> enumerate |> collect .|> reverse)
category2index =
OrderedDict(category_names |> enumerate |> collect .|> reverse)
# Add missing model_name and key in model_names so that each model present in
# model_names is missing units or has an unit
for model_name in model_names
!haskey(units, model_name) && (units[model_name] = "")
end
# Delete model_name in units if they do not appear in model_names. We do not want to
# have unnecessary units in the Dict
for key in keys(units)
!(key in model_names) && delete!(units, key)
end
# Check number of model to unit pairs match the number of models
length(units) != length(model_names) && error(
"The number of unit for each model ($length(units)) is not equal to the number of models ($length(model_names))",
)
return RMSEVariable(short_name, model2index, category2index, RMSEs, units)
end
"""
RMSEVariable(short_name, model_names::Vector{String})
Construct a RMSEVariable with the `short_name` of the variable and the names of the
models in `model_names`.
The categories default to "ANN", "DJF", "MAM", "JJA", "SON". The root mean square errors
default to `NaN`. The unit for each model is missing which is denoted by an empty string.
"""
function RMSEVariable(short_name, model_names::Vector{String})
category_names = ["ANN", "DJF", "MAM", "JJA", "SON"]
RMSEs = fill(NaN, length(model_names), length(category_names))
units = Dict{valtype(model_names), String}([
(model_name, "") for model_name in model_names
])
return RMSEVariable(short_name, model_names, category_names, RMSEs, units)
end
"""
RMSEVariable(short_name, model_names::Vector{String}, units::Dict)
Construct a RMSEVariable with the `short_name` of the variable, the names of the models in
`model_names`, and provided units in the dictionary `units` that map model name to unit.
The categories default to "ANN", "DJF", "MAM", "JJA", "SON". The root mean square errors
default to `NaN`. Any missing model in the dictionary `units` will has missing unit which
is denoted by an empty string.
"""
function RMSEVariable(short_name, model_names::Vector{String}, units::Dict)
category_names = ["ANN", "DJF", "MAM", "JJA", "SON"]
RMSEs = fill(NaN, length(model_names), length(category_names))
return RMSEVariable(short_name, model_names, category_names, RMSEs, units)
end
"""
RMSEVariable(short_name,
model_names::Vector{String},
category_names::Vector{String},
units::Dict)
Construct a RMSEVariable with the `short_name` of the variable, the names of the models in
`model_names`, the categories in `category_names`, and provided units in the dictionary
`units` that map model name to unit.
The root mean square errors default to `NaN`. Any missing model in the dictionary `units`
will has missing unit which is denoted by an empty string.
"""
function RMSEVariable(
short_name,
model_names::Vector{String},
category_names::Vector{String},
units::Dict,
)
RMSEs = fill(NaN, length(model_names), length(category_names))
return RMSEVariable(short_name, model_names, category_names, RMSEs, units)
end
"""
RMSEVariable(short_name::String,
model_names::Vector{String},
category_names::Vector{String},
RMSEs,
units::String)
Construct a RMSEVariable with the `short_name` of the variable, the names of the models in
`model_names`, the categories in `category_names`, the root mean squared errors in `RMSEs`,
and units which map each model name to `units`.
This is useful if all the models share the same unit.
"""
function RMSEVariable(
short_name::String,
model_names::Vector{String},
category_names::Vector{String},
RMSEs,
units::String,
)
units_dict = Dict(model_name => units for model_name in model_names)
return RMSEVariable(
short_name,
model_names,
category_names,
RMSEs,
units_dict,
)
end
"""
RMSEVariable(short_name, model_names::Vector{String}, units::String)
Construct a RMSEVariable with the `short_name` of the variable, the names of the models in
`model_names`, and units which map each model name to `units`.
The categories default to "ANN", "DJF", "MAM", "JJA", "SON". The root mean square errors
default to `NaN`.
This is useful if all the models share the same unit.
"""
function RMSEVariable(short_name, model_names::Vector{String}, units::String)
units_dict = Dict(model_name => units for model_name in model_names)
return RMSEVariable(short_name, model_names, units_dict)
end
"""
RMSEVariable(short_name,
model_names::Vector{String},
category_names::Vector{String},
units::String)
Construct a RMSEVariable with the `short_name` of the variable, the names of the models in
`model_names`, the categories in `category_names`, and units which map each model name to
`units`.
The root mean square errors default to `NaN`.
This is useful if all the models share the same unit.
"""
function RMSEVariable(
short_name,
model_names::Vector{String},
category_names::Vector{String},
units::String,
)
RMSEs = fill(NaN, length(model_names), length(category_names))
return RMSEVariable(short_name, model_names, category_names, RMSEs, units)
end
"""
model_names(rmse_var::RMSEVariable)
Return all the model names in `rmse_var`.
"""
model_names(rmse_var::RMSEVariable) = rmse_var.model2index |> keys |> collect
"""
category_names(rmse_var::RMSEVariable)
Return all the category names in `rmse_var`.
"""
category_names(rmse_var::RMSEVariable) =
rmse_var.category2index |> keys |> collect
"""
rmse_units(rmse_var::RMSEVariable)
Return all the unit of the models in `rmse_var`.
"""
rmse_units(rmse_var::RMSEVariable) = rmse_var.units
"""
read_rmses(csv_file::String, short_name::String; units = nothing)
Read a CSV file and create a RMSEVariable with the `short_name` of the variable.
The format of the CSV file should have a header consisting of the entry "model_name" (or any
other text as it is ignored by the function) and rest of the entries should be the category
names. Each row after the header should start with the model name and the root mean squared
errors for each category for that model. The entries of the CSV file should be separated by
commas.
The parameter `units` can be a dictionary mapping model name to unit or a string. If `units`
is a string, then units will be the same across all models. If units is `nothing`, then the
unit is missing for each model which is denoted by an empty string.
"""
function read_rmses(csv_file::String, short_name::String; units = nothing)
# Intialize variables we need to construct RMSEVariable
model_names = Vector{String}()
model_rmse_vec = []
category_names = nothing
open(csv_file, "r") do io
header = readline(io)
# Get categories (e.g. DJF, MAM, JJA, SON, ANN)
category_names = String.(split(header, ','))
# get rid of the first column name which is the column named "model_name"
category_names |> popfirst!
# Process each line
for (line_num, line) in enumerate(eachline(io))
# Split the line by comma
fields = split(line, ',')
# Check if any entry is missing in the CSV file
length(fields) != (length(category_names) + 1) &&
error("Missing RMSEs for line $(line_num + 1) in CSV file")
# Grab model name
model_name = fields[1]
# the rest of the row is the rmse for each category
model_rmse = map(x -> parse(Float64, x), fields[2:end])
push!(model_names, model_name)
push!(model_rmse_vec, model_rmse)
end
end
model_rmses = stack(model_rmse_vec, dims = 1)
isnothing(units) && (
units = Dict{valtype(model_names), String}([
(model_name, "") for model_name in model_names
])
)
units isa String && (
units = Dict{valtype(model_names), String}([
model_name => units for model_name in model_names
])
)
return RMSEVariable(
short_name,
model_names,
category_names,
model_rmses,
units,
)
end
"""
function _index_convert(key2index, key::Colon)
Convert the symbol colon into an index for indexing.
"""
function _index_convert(key2index, key::Colon)
return collect(values(key2index))
end
"""
function _index_convert(key2index,
indices::AbstractVector{I})
where {I <: Integer}
Convert a string into an index for indexing.
"""
function _index_convert(key2index, key::AbstractString)
!haskey(key2index, key) &&
error("Key ($key) is not present in ($(keys(key2index)))")
return key2index[key]
end
"""
function _index_convert(key2index,
keys::AbstractVector{S})
where {S <: AbstractString}
Convert a vector of strings to indices for indexing.
"""
function _index_convert(
key2index,
keys::AbstractVector{S},
) where {S <: AbstractString}
for key in keys
!haskey(key2index, key) &&
error("Key ($key) is not present in ($(keys(key2index)))")
end
return [key2index[key] for key in keys]
end
"""
function _index_convert(key2index,
indices::AbstractVector{I})
where {I <: Integer}
Convert an integer to an index for indexing.
"""
function _index_convert(key2index, index::Integer)
!(index in values(key2index)) &&
error("Index ($index) is not present in ($(values(key2index)))")
return index
end
"""
function _index_convert(key2index,
indices::AbstractVector{I})
where {I <: Integer}
Convert a vector of integers to indices for indexing.
"""
function _index_convert(
key2index,
indices::AbstractVector{I},
) where {I <: Integer}
for index in indices
!(index in values(key2index)) &&
error("Index ($index) is not present in ($(values(key2index)))")
end
return indices
end
"""
Base.getindex(rmse_var::RMSEVariable, model_name, category)
Return a subset of the array holding the root mean square errors as specified by
`model_name` and `category`. Support indexing by `String` and `Int`.
"""
function Base.getindex(rmse_var::RMSEVariable, model_name, category)
model_idx = _index_convert(rmse_var.model2index, model_name)
cat_idx = _index_convert(rmse_var.category2index, category)
return rmse_var.RMSEs[model_idx, cat_idx]
end
"""
Base.getindex(rmse_var::RMSEVariable, model_name::String)
Return a subset of the array holding the root mean square errors as specified by
`model_name`. Support indexing by `String`. Do not support linear indexing.
"""
function Base.getindex(rmse_var::RMSEVariable, model_name::String)
return rmse_var[model_name, :]
end
"""
Base.setindex!(rmse_var::RMSEVariable, rmse, model_name, category)
Store a value or values from an array in the array of root mean squared errors in
`rmse_var`. Support indexing by `String` and `Int`.
"""
function Base.setindex!(rmse_var::RMSEVariable, rmse, model_name, category)
model_idx = _index_convert(rmse_var.model2index, model_name)
cat_idx = _index_convert(rmse_var.category2index, category)
rmse_var.RMSEs[model_idx, cat_idx] = rmse
end
"""
Base.setindex!(rmse_var::RMSEVariable, rmse, model_name::String)
Store a value or values from an array into the array of root mean squared errors in
`rmse_var`. Support indexing by `String`. Do not support linear indexing.
"""
function Base.setindex!(rmse_var::RMSEVariable, rmse, model_name::String)
model_idx = _index_convert(rmse_var.model2index, model_name)
rmse_var.RMSEs[model_idx, :] = rmse
end
"""
add_category(rmse_var::RMSEVariable, categories::String...)
Add one or more categories named `categories` to `rmse_var`.
"""
function add_category(rmse_var::RMSEVariable, categories::String...)
# Add new category
categ_names = category_names(rmse_var)
push!(categ_names, categories...)
# Add new column
mdl_names = model_names(rmse_var)
num_mdl_names = length(mdl_names)
nan_vecs = (fill(NaN, num_mdl_names) for _ in categories)
rmses = hcat(rmse_var.RMSEs, nan_vecs...)
return RMSEVariable(
rmse_var.short_name,
mdl_names,
categ_names,
rmses,
rmse_var.units |> deepcopy,
)
end
"""
add_model(rmse_var::RMSEVariable, models::String...)
Add one or more models named `models` to `rmse_var`.
"""
function add_model(rmse_var::RMSEVariable, models::String...)
# Add new model name
mdl_names = model_names(rmse_var)
push!(mdl_names, models...)
# Add new row
categ_names = category_names(rmse_var)
num_categ_names = length(categ_names)
nan_vecs = (fill(NaN, num_categ_names)' for _ in models)
rmses = vcat(rmse_var.RMSEs, nan_vecs...)
# Add missing units for model
units = rmse_var.units |> deepcopy
for name in models
units[name] = ""
end
return RMSEVariable(
rmse_var.short_name,
mdl_names,
categ_names,
rmses,
units,
)
end
"""
_delete_model(rmse_var::RMSEVariable, models::String...)
Delete one or more models named `models` from `rmse_var`.
"""
function _delete_model(rmse_var::RMSEVariable, models::String...)
# Delete model name
mdl_names = model_names(rmse_var)
num_rows = length(mdl_names)
setdiff!(mdl_names, models)
# Delete model
rmses = rmse_var.RMSEs |> copy
indices_to_delete = (rmse_var.model2index[model] for model in models)
rmses = rmse_var.RMSEs[setdiff(1:num_rows, indices_to_delete), :]
# Delete unit for model
units = rmse_var.units |> deepcopy
for name in models
delete!(units, name)
end
return RMSEVariable(
rmse_var.short_name,
mdl_names,
category_names(rmse_var),
rmses,
units,
)
end
"""
_model_name_check(rmse_var::RMSEVariable, model_name)
Check if `model_name` is present in the model names of `rmse_var`.
Return nothing if `model_name` is present in the model names of `rmse_var`. Otherwise,
return an error.
"""
function _model_name_check(rmse_var::RMSEVariable, model_name)
mdl_names = model_names(rmse_var)
(model_name in mdl_names) ||
error("Model name ($model_name) is not in $mdl_names")
return nothing
end
"""
add_unit!(rmse_var::RMSEVariable, model_name, unit)
Add a unit named `unit` to a model named `model_name` in `rmse_var`.
"""
function add_unit!(rmse_var::RMSEVariable, model_name, unit)
_model_name_check(rmse_var, model_name)
rmse_var.units[model_name] = unit
return nothing
end
"""
add_unit!(rmse_var::RMSEVariable, model_name2unit::Dict)
Add all model name and unit pairs in the dictionary `model_name2unit` to `rmse_var`.
"""
function add_unit!(rmse_var::RMSEVariable, model_name2unit::Dict)
for (model_name, unit) in model_name2unit
_model_name_check(rmse_var, model_name)
rmse_var.units[model_name] = unit
end
return nothing
end
"""
_unit_check(rmse_var::RMSEVariable)
Return nothing if units are not missing and units are the same across all models. Otherwise,
return an error.
"""
function _unit_check(rmse_var::RMSEVariable)
units = values(rmse_var.units) |> collect
unit_equal = all(unit -> unit == first(units), units)
(!unit_equal || first(units) == "") &&
error("Units are not the same across all models or units are missing")
return nothing
end
"""
find_best_single_model(rmse_var::RMSEVariable; category_name = "ANN")
Return a tuple of the best single model and the name of the model. Find the best single
model using the root mean squared errors of the category `category_name`.
"""
function find_best_single_model(rmse_var::RMSEVariable; category_name = "ANN")
_unit_check(rmse_var)
categ_names = category_names(rmse_var)
ann_idx = categ_names |> (x -> findfirst(y -> (y == category_name), x))
isnothing(ann_idx) &&
error("The category $category_name does not exist in $categ_names")
rmse_vec = rmse_var[:, ann_idx] |> copy
# Replace all NaN with Inf so that we do not get NaN as a result
# We do this instead of filtering because if we filter, then we need to keep track of
# original indices
replace!(rmse_vec, NaN => Inf)
_, model_idx = findmin(rmse_vec)
mdl_names = model_names(rmse_var)
return rmse_var[model_idx, :], mdl_names[model_idx]
end
"""
find_worst_single_model(rmse_var::RMSEVariable; category_name = "ANN")
Return a tuple of the worst single model and the name of the model. Find the worst single
model using the root mean squared errors of the category `category_name`.
"""
function find_worst_single_model(rmse_var::RMSEVariable; category_name = "ANN")
_unit_check(rmse_var)
categ_names = category_names(rmse_var)
ann_idx = categ_names |> (x -> findfirst(y -> (y == category_name), x))
isnothing(ann_idx) && error("Annual does not exist in $categ_names")
rmse_vec = rmse_var[:, ann_idx] |> copy
# Replace all NaN with Inf so that we do not get NaN as a result
# We do this instead of filtering because if we filter, then we need to keep track of
# original indices
replace!(rmse_vec, NaN => -Inf)
_, model_idx = findmax(rmse_vec)
mdl_names = model_names(rmse_var)
return rmse_var[model_idx, :], mdl_names[model_idx]
end
"""
median(rmse_var::RMSEVariable)
Find the median using the root mean squared errors across all categories.
Any `NaN` is ignored in computing the median.
"""
function median(rmse_var::RMSEVariable)
_unit_check(rmse_var)
# Drop dimension so that size is (n,) instead of (1,n) so that it is consistent with the
# size of the arrays returned from find_worst_single_model and find_best_single_model
return dropdims(nanmedian(rmse_var.RMSEs, dims = 1), dims = 1)
end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 4311 | module Numerics
import ..Utils: _isequispaced
"""
_integrate_lon(data::AbstractArray, lon::AbstractVector; dims)
Integrate out longitude from `data`. `data` has to be discretized on `lon`.
`dims` indicates which axis of `data` is longitude.
If the points are equispaced, it is assumed that each point correspond to the midpoint of a
cell which results in rectangular integration using the midpoint rule. Otherwise, the
integration being done is rectangular integration using the left endpoints. The unit for
longitude should be degrees.
"""
function _integrate_lon(data::AbstractArray, lon::AbstractVector; dims)
length(lon) == 1 &&
error("Cannot integrate when longitude is a single point")
_isequispaced(lon) ?
int_weights = _integration_weights_lon_equispaced(lon) :
int_weights = _integration_weights_lon_left(lon)
return _integrate_over_angle(data, lon, dims, int_weights)
end
"""
_integrate_lat(data::AbstractArray, lat::AbstractVector; dims)
Integrate out latitude from `data`. `data` has to be discretized on `lat`.
`dims` indicates which axis of `data` is latitude.
If the points are equispaced, it is assumed that each point correspond to the midpoint of a
cell which results in rectangular integration using the midpoint rule. Otherwise, the
integration being done is rectangular integration using the left endpoints. The unit for
latitude should be degrees.
"""
function _integrate_lat(data::AbstractArray, lat::AbstractVector; dims)
length(lat) == 1 &&
error("Cannot integrate when latitude is a single point")
_isequispaced(lat) ?
int_weights = _integration_weights_lat_equispaced(lat) :
int_weights = _integration_weights_lat_left(lat)
return _integrate_over_angle(data, lat, dims, int_weights)
end
"""
_integrate_over_angle(
data::AbstractArray,
angle_arr::AbstractVector,
angle_idx,
int_weights::AbstractVector,
)
Integrate out angle (latitude or longitude) from `data` using the weights `int_weights`.
`data` has to be discretized on `angle_arr`.
`angle_idx` indicates which axis of `data` is angle.
"""
function _integrate_over_angle(
data::AbstractArray,
angle_arr::AbstractVector,
angle_idx,
int_weights,
)
# Reshape to add extra dimensions for int_weights for broadcasting if needed
size_to_reshape =
(i == angle_idx ? length(int_weights) : 1 for i in 1:ndims(data))
int_weights = reshape(int_weights, size_to_reshape...)
int_on_angle = sum(data .* int_weights, dims = angle_idx)
return int_on_angle
end
"""
_integration_weights_lon_left(lon)
Return integration weights for rectangular integration using left endpoints for integrating
along longitude.
"""
function _integration_weights_lon_left(lon)
# This is where we use the assumption that units are degrees
d_lon = deg2rad.(diff(lon))
# We are doing integration using the left endpoints, so we weight the rightmost endpoint
# zero so that it make no contribution to the integral
push!(d_lon, zero(eltype(d_lon)))
return d_lon
end
"""
_integration_weights_lat_left(lat)
Return integration weights for rectangular integration using left endpoints for integrating
along latitude.
"""
function _integration_weights_lat_left(lat)
d_lat = deg2rad.(diff(lat))
# We are doing integration using the left endpoints, so we weight the rightmost endpoint
# zero so that it make no contribution to the integral
push!(d_lat, zero(eltype(d_lat)))
cos_lat = cosd.(lat)
return d_lat .* cos_lat
end
"""
_integration_weights_lon_equispaced(lon)
Return integration weights for rectangular integration when the points are equispaced for
integrating along longitude.
"""
function _integration_weights_lon_equispaced(lon)
# This is where we use the assumption that units are degrees
# Use fill to make a zero dimensional array so reshaping is possible
return fill(deg2rad(lon[begin + 1] - lon[begin]))
end
"""
_integration_weights_lat_equispaced(lat)
Return integration weights for rectangular integration when the points are equispaced for
integrating along latitude.
"""
function _integration_weights_lat_equispaced(lat)
d_lat = deg2rad.(lat[begin + 1] - lat[begin])
cos_lat = cosd.(lat)
return d_lat .* cos_lat
end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 6263 | module Sim
import Base: get
export SimDir, available_vars, available_reductions, available_periods
import ..Utils
import ..Var: read_var
"""
SimDir(simulation_path::String)
Object that describes all the `ClimaAtmos` output found in the given `simulation_path`.
"""
struct SimDir{DV <: Dict, DOV <: Dict}
"Path where the output data is stored"
simulation_path::String
"Dictionary of dictionaries that maps variables/reductions/periods to files"
variable_paths::DV
"Dictionary of dictionaries that maps variables/reductions/periods to OutputVars"
vars::DOV
"List of files that ClimaAtmos knows how to process"
allfiles::Set{String}
end
function SimDir(simulation_path::String)
variable_paths = Dict()
vars = Dict()
allfiles = Set{String}()
foreach(readdir(simulation_path)) do path
m = Utils.match_nc_filename(path)
if !isnothing(m)
short_name, period, reduction = m
full_path = joinpath(simulation_path, path)
# Get the dictionary variable_paths["short_name"] if it exists, otherwise make it
# a new dictionary.
variable_reduction = get!(variable_paths, short_name, Dict())
variable_reduction_period =
get!(variable_reduction, reduction, Dict())
push!(variable_reduction_period, period => full_path)
# Do the same for `vars`
vars_reduction = get!(vars, short_name, Dict())
vars_reduction_period = get!(vars_reduction, reduction, Dict())
push!(vars_reduction_period, period => nothing)
# Add to allfiles
push!(allfiles, full_path)
# At the end we have three layers of dictionaries.
#
# The first layer maps variable short names to a dictionary, which maps
# reductions to a dictionary, which maps periods to the path of the file that
# contains that chain
#
# Example: variable_paths = {"ta" : {"max": {"6.0h" => file.nc}}}
end
end
return SimDir(simulation_path, variable_paths, vars, allfiles)
end
"""
available_vars(simdir::SimDir)
Return the short names of the variables found in the given `simdir`.
"""
available_vars(simdir::SimDir) = keys(simdir.vars) |> Set
"""
available_reductions(simdir::SimDir, short_name::String)
Return the reductions available for the given variable in the given `simdir`.
"""
function available_reductions(simdir::SimDir; short_name::String)
if !(short_name in available_vars(simdir))
error(
"Variable $short_name not found. Available: $(available_vars(simdir))",
)
end
return keys(simdir.vars[short_name]) |> Set
end
"""
available_periods(simdir::SimDir, short_name::String, reduction::String)
Return the periods associated to the given variable and reduction.
"""
function available_periods(
simdir::SimDir;
short_name::String,
reduction::String,
)
if !(reduction in available_reductions(simdir; short_name))
error(
"Reduction $reduction not available for $short_name. Available: $(available_reductions(simdir; short_name))",
)
end
return keys(simdir.vars[short_name][reduction]) |> Set
end
function Base.summary(io::IO, simdir::SimDir)
print(io, "Output directory: $(simdir.simulation_path)\n")
print(io, "Variables:")
for short_name in available_vars(simdir)
print(io, "\n- $short_name")
for reduction in available_reductions(simdir; short_name)
print(io, "\n $reduction")
periods = available_periods(simdir; short_name, reduction)
print(io, " (", join(periods, ", "), ")")
end
end
end
"""
get(simdir::SimDir;
short_name,
reduction = nothing,
period = nothing)
Return a `OutputVar` for the corresponding combination of `short_name`, `reduction`,
and `period` (if it exists).
The variable is read only once and saved into the `simdir`.
Keyword arguments
==================
When passing `nothing` to `reduction` and `period`, `ClimaAnalysis` will try to
automatically deduce the value. An error will be thrown if this is not possible.
For instance, if the simulation has only one `ta`, you do not need to specify `short_name`,
`reduction`, and `period` (`short_name` is enough). Similarly, if there is only one
`ta_average` (ie, not multiple periods), `short_name` and `reduction` will be enough.
"""
function get(
simdir::SimDir;
short_name::String,
reduction::Union{String, Nothing} = nothing,
period::Union{String, Nothing} = nothing,
)
if isnothing(reduction)
reductions = available_reductions(simdir; short_name)
length(reductions) == 1 || error(
"Found multiple reductions for $short_name: $reductions. You have to specify it.",
)
reduction = pop!(reductions)
end
if isnothing(period)
periods = available_periods(simdir; short_name, reduction)
length(periods) == 1 || error(
"Found multiple periods for $short_name: $periods. You have to specify it.",
)
period = pop!(periods)
else
if !(period in available_periods(simdir; short_name, reduction))
error(
"Period $period not available for $short_name and reduction $reduction. " *
"Available: $(available_periods(simdir; short_name, reduction))",
)
end
end
# Variable has not been read before. Read it now.
if isnothing(simdir.vars[short_name][reduction][period])
file_path = simdir.variable_paths[short_name][reduction][period]
simdir.vars[short_name][reduction][period] = read_var(file_path)
end
return simdir.vars[short_name][reduction][period]
end
"""
get(simdir::SimDir, short_name)
If only one reduction and period exist for `short_name`, return the corresponding
`OutputVar`.
"""
function get(simdir::SimDir, short_name::String)
return get(simdir; short_name, reduction = nothing, period = nothing)
end
"""
isempty(simdir::SimDir)
Check if the given SimDir contains OutputVars.
"""
Base.isempty(simdir::SimDir) = isempty(simdir.vars)
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 11614 | module Utils
export match_nc_filename,
squeeze, nearest_index, kwargs, seconds_to_prettystr, warp_string
import Dates
"""
match_nc_filename(filename::String)
Return `short_name`, `period`, `reduction` extracted from the filename, if matching the
expected convention.
The convention is: `shortname_(period)_reduction.nc`, with `period` being optional.
Examples
=========
```jldoctest
julia> match_nc_filename("bob")
```
```jldoctest
julia> match_nc_filename("ta_1d_average.nc")
("ta", "1d", "average")
```
```jldoctest
julia> match_nc_filename("pfull_6.0m_max.nc")
("pfull", "6.0m", "max")
```
```jldoctest
julia> match_nc_filename("hu_inst.nc")
("hu", nothing, "inst")
```
"""
function match_nc_filename(filename::String)
# Let's unpack this regular expression to find files names like "orog_inst.nc" or
# "ta_3.0h_average.nc" and extract information from there.
# ^: Matches the beginning of the string
# (\w+?): Matches one or more word characters (letters, numbers, or underscore)
# non-greedily and captures it as the first group (variable name)
# _: Matches the underscore separating the variable name and the optional time
# resolution.
# ((?:[0-9]|m|M|d|s|y|_|\.)*?): Matches zero or more occurrences of the allowed
# characters (digits, time units, underscore, or dot) non-greedily and captures the
# entire time resolution string as the second group
# _?: Matches an optional underscore (to handle cases where there's no time resolution)
# ([a-zA-Z0-9]+): Matches one or more alphanumeric characters and captures it as the
# third group (statistic)
# \.nc: Matches the literal ".nc" file extension
# $: Matches the end of the string
re = r"^(\w+?)_((?:[0-9]|m|M|d|s|y|h|_|\.)*?)_?([a-zA-Z0-9]+)\.nc$"
m = match(re, filename)
if !isnothing(m)
# m.captures returns `SubString`s (or nothing). We want to have actual `String`s (or
# nothing) so that we can assume we have `String`s everywhere. We also take care of
# the case where the period is matched to an empty string and return nothing instead
return Tuple(
(isnothing(cap) || cap == "") ? nothing : String(cap) for
cap in m.captures
)
else
return nothing
end
end
"""
squeeze(A :: AbstractArray; dims)
Return an array that has no dimensions with size 1.
When an iterable `dims` is passed, only try to squeeze the given `dim`ensions.
Examples
=========
```jldoctest
julia> A = [[1 2] [3 4]];
julia> size(A)
(1, 4)
julia> A_squeezed = squeeze(A);
julia> size(A_squeezed)
(4,)
julia> A_not_squeezed = squeeze(A; dims = (2, ));
julia> size(A_not_squeezed)
(1, 4)
```
"""
function squeeze(A::AbstractArray; dims = nothing)
isnothing(dims) && (dims = Tuple(1:length(size(A))))
# TODO: (Refactor)
#
# Find a cleaner way to identify `keepdims`
dims_to_drop = Tuple(
dim for (dim, len) in enumerate(size(A)) if dim in dims && len == 1
)
keepdims = Tuple(
len for (dim, len) in enumerate(size(A)) if !(dim in dims_to_drop)
)
# We use reshape because of
# https://stackoverflow.com/questions/52505760/dropping-singleton-dimensions-in-julia
return reshape(A, keepdims)
end
"""
nearest_index(A::AbstractArray, val)
Return the index in `A` closest to the given `val`.
Examples
=========
```jldoctest
julia> A = [-1, 0, 1, 2, 3, 4, 5];
julia> nearest_index(A, 3)
5
julia> nearest_index(A, 0.1)
2
```
"""
function nearest_index(A::AbstractArray, val)
val < minimum(A) && return findmin(A)[2]
val > maximum(A) && return findmax(A)[2]
return findmin(A -> abs(A - val), A)[2]
end
"""
kwargs(; kwargs...)
Convert keyword arguments in a dictionary that maps `Symbol`s to values.
Useful to pass keyword arguments to different constructors in a function.
Examples
=========
```jldoctest
julia> kwargs(a = 1)
pairs(::NamedTuple) with 1 entry:
:a => 1
```
"""
kwargs(; kwargs...) = kwargs
"""
seconds_to_prettystr(seconds::Real)
Convert the given `seconds` into a string with rich time information.
One year is defined as having 365 days.
Examples
=========
```jldoctest
julia> seconds_to_prettystr(10)
"10s"
julia> seconds_to_prettystr(600)
"10m"
julia> seconds_to_prettystr(86400)
"1d"
julia> seconds_to_prettystr(864000)
"10d"
julia> seconds_to_prettystr(864010)
"10d 10s"
julia> seconds_to_prettystr(24 * 60 * 60 * 365 + 1)
"1y 1s"
```
"""
function seconds_to_prettystr(seconds::Real)
time = String[]
years, rem_seconds = divrem(seconds, 24 * 60 * 60 * 365)
days, rem_seconds = divrem(rem_seconds, 24 * 60 * 60)
hours, rem_seconds = divrem(rem_seconds, 60 * 60)
minutes, seconds = divrem(rem_seconds, 60)
# At this point, days, hours, minutes, seconds have to be integers.
# Let us force them to be such so that we can have a consistent string output.
years, days, hours, minutes = map(Int, (years, days, hours, minutes))
years > 0 && push!(time, "$(years)y")
days > 0 && push!(time, "$(days)d")
hours > 0 && push!(time, "$(hours)h")
minutes > 0 && push!(time, "$(minutes)m")
seconds > 0 && push!(time, "$(seconds)s")
return join(time, " ")
end
"""
warp_string(str::AbstractString; max_width = 70)
Return a string where each line is at most `max_width` characters or less
or at most one word.
Examples
=========
```jldoctest
julia> warp_string("space", max_width = 5)
"space"
julia> warp_string("space", max_width = 4)
"space"
julia> warp_string("\\tspace ", max_width = 4)
"space"
julia> warp_string("space space", max_width = 5)
"space\\nspace"
julia> warp_string("space space", max_width = 4)
"space\\nspace"
julia> warp_string("\\n space \\n space", max_width = 4)
"space\\nspace"
```
"""
function warp_string(str::AbstractString; max_width = 70)
return_str = ""
current_width = 0
for word in split(str, isspace)
word_width = length(word)
if word_width + current_width <= max_width
return_str *= "$word "
current_width += word_width + 1
else
# Ensure that spaces never precede newlines
return_str = rstrip(return_str)
return_str *= "\n$word "
current_width = word_width + 1
end
end
# Remove new line character when the first word is longer than
# `max_width` characters and remove leading and trailing
# whitespace
return strip(lstrip(return_str, '\n'))
end
"""
split_by_season(dates::AbstractArray{<: Dates.DateTime})
Return four vectors with `dates` split by seasons.
The months of the seasons are March to May, June to August, September to November, and
December to February. The order of the tuple is MAM, JJA, SON, and DJF.
Examples
=========
```jldoctest
julia> import Dates
julia> dates = [Dates.DateTime(2024, 1, 1), Dates.DateTime(2024, 3, 1), Dates.DateTime(2024, 6, 1), Dates.DateTime(2024, 9, 1)];
julia> split_by_season(dates)
([Dates.DateTime("2024-03-01T00:00:00")], [Dates.DateTime("2024-06-01T00:00:00")], [Dates.DateTime("2024-09-01T00:00:00")], [Dates.DateTime("2024-01-01T00:00:00")])
```
"""
function split_by_season(dates::AbstractArray{<:Dates.DateTime})
MAM, JJA, SON, DJF = Vector{Dates.DateTime}(),
Vector{Dates.DateTime}(),
Vector{Dates.DateTime}(),
Vector{Dates.DateTime}()
for date in dates
if Dates.Month(3) <= Dates.Month(date) <= Dates.Month(5)
push!(MAM, date)
elseif Dates.Month(6) <= Dates.Month(date) <= Dates.Month(8)
push!(JJA, date)
elseif Dates.Month(9) <= Dates.Month(date) <= Dates.Month(11)
push!(SON, date)
else
push!(DJF, date)
end
end
return (MAM, JJA, SON, DJF)
end
"""
_isequispaced(arr::Vector)
Return whether the array is equispaced or not.
Examples
=========
```jldoctest
julia> Utils._isequispaced([1.0, 2.0, 3.0])
true
julia> Utils._isequispaced([0.0, 2.0, 3.0])
false
```
"""
function _isequispaced(arr::Vector)
return all(diff(arr) .≈ arr[begin + 1] - arr[begin])
end
"""
time_to_date(start_date::Dates.DateTime, time::AbstractFloat)
Convert the given time to a calendar date starting from `start_date`.
Examples
=========
```jldoctest
julia> import Dates
julia> Utils.time_to_date(Dates.DateTime(2013, 7, 1, 12), 86400.0)
2013-07-02T12:00:00
julia> Utils.time_to_date(Dates.DateTime(2013, 7, 1, 12), 3600.0)
2013-07-01T13:00:00
julia> Utils.time_to_date(Dates.DateTime(2013, 7, 1, 12), 60.0)
2013-07-01T12:01:00
julia> Utils.time_to_date(Dates.DateTime(2013, 7, 1, 12), 1.0)
2013-07-01T12:00:01
```
"""
function time_to_date(start_date::Dates.DateTime, time::AbstractFloat)
# We go through milliseconds to allow fractions of a second (otherwise, Second(0.8)
# would fail). Milliseconds is the level of resolution that one gets when taking the
# difference between two DateTimes. In addition to this, we add a round to account for
# floating point errors. If the floating point error is small enough, round will correct
# it.
milliseconds = Dates.Millisecond.(round.(1_000 * time))
return start_date + milliseconds
end
"""
date_to_time(reference_date::Dates.DateTime, date::Dates.DateTime)
Convert the given calendar date to a time (in seconds) where t=0 is `reference_date`.
Examples
=========
```jldoctest
julia> import Dates
julia> Utils.date_to_time(Dates.DateTime(2013, 7, 1, 12), Dates.DateTime(2013, 7, 2, 12))
86400.0
julia> Utils.date_to_time(Dates.DateTime(2013, 7, 1, 12), Dates.DateTime(2013, 7, 1, 13))
3600.0
julia> Utils.date_to_time(Dates.DateTime(2013, 7, 1, 12), Dates.DateTime(2013, 7, 1, 12, 1))
60.0
julia> Utils.date_to_time(Dates.DateTime(2013, 7, 1, 12), Dates.DateTime(2013, 7, 1, 12, 0, 1))
1.0
```
"""
function date_to_time(reference_date::Dates.DateTime, date::Dates.DateTime)
return period_to_seconds_float(date - reference_date)
end
"""
period_to_seconds_float(period::Dates.Period)
Convert the given `period` to seconds in Float64.
Examples
=========
```jldoctest
julia> import Dates
julia> Utils.period_to_seconds_float(Dates.Millisecond(1))
0.001
julia> Utils.period_to_seconds_float(Dates.Second(1))
1.0
julia> Utils.period_to_seconds_float(Dates.Minute(1))
60.0
julia> Utils.period_to_seconds_float(Dates.Hour(1))
3600.0
julia> Utils.period_to_seconds_float(Dates.Day(1))
86400.0
julia> Utils.period_to_seconds_float(Dates.Week(1))
604800.0
julia> Utils.period_to_seconds_float(Dates.Month(1))
2.629746e6
julia> Utils.period_to_seconds_float(Dates.Year(1))
3.1556952e7
```
"""
function period_to_seconds_float(period::Dates.Period)
# See https://github.com/JuliaLang/julia/issues/55406
period isa Dates.OtherPeriod &&
(period = Dates.Second(Dates.Day(1)) * Dates.days(period))
return period / Dates.Second(1)
end
"""
_data_at_dim_vals(data, dim_arr, dim_idx, vals)
Return a view of `data` by slicing along `dim_idx`. The slices are indexed by the indices
corresponding to values in `dim_arr` closest to the values in `vals`.
Examples
=========
```jldoctest
julia> data = [[1, 4, 7] [2, 5, 8] [3, 6, 9]];
julia> dim_arr = [1.0, 2.0, 4.0];
julia> dim_idx = 2;
julia> vals = [1.1, 4.0];
julia> Utils._data_at_dim_vals(data, dim_arr, dim_idx, vals)
3×2 view(::Matrix{Int64}, :, [1, 3]) with eltype Int64:
1 3
4 6
7 9
```
"""
function _data_at_dim_vals(data, dim_arr, dim_idx, vals)
nearest_indices = map(val -> nearest_index(dim_arr, val), vals)
return selectdim(data, dim_idx, nearest_indices)
end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 52937 | module Var
import Dates
import NCDatasets
import OrderedCollections: OrderedDict
import Interpolations as Intp
import Statistics: mean
import NaNStatistics: nanmean
import ..Numerics
import ..Utils:
nearest_index,
seconds_to_prettystr,
squeeze,
split_by_season,
time_to_date,
date_to_time,
_data_at_dim_vals,
_isequispaced
export OutputVar,
read_var,
average_lat,
weighted_average_lat,
average_lon,
average_x,
average_y,
average_xy,
average_time,
is_z_1D,
slice,
window,
arecompatible,
center_longitude!,
short_name,
long_name,
units,
dim_units,
range_dim,
reordered_as,
resampled_as,
has_units,
convert_units,
integrate_lonlat,
integrate_lon,
integrate_lat,
isempty,
split_by_season,
bias,
global_bias,
squared_error,
global_mse,
global_rmse,
set_units,
shift_to_start_of_previous_month
"""
Representing an output variable
"""
struct OutputVar{T <: AbstractArray, A <: AbstractArray, B, C, ITP}
"Attributes associated to this variable, such as short/long name"
attributes::Dict{String, B}
"Dimensions over which the variable is defined"
dims::OrderedDict{String, T}
"Attributes associated to the dimensions"
dim_attributes::OrderedDict{String, C}
"Array that contains all the data"
data::A
"Dictionary that maps dimension name to its array index"
dim2index::Dict{String, Int}
"Array that maps name array index to the dimension name"
index2dim::Vector{String}
"Interpolant from Interpolations.jl, used to evaluate the OutputVar onto any given point."
interpolant::ITP
end
"""
_make_interpolant(dims, data)
Make a linear interpolant from `dims`, a dictionary mapping dimension name to array and
`data`, an array containing data. Used in constructing a `OutputVar`.
If any element of the arrays in `dims` is a Dates.DateTime, then no interpolant is returned.
Interpolations.jl does not support interpolating on dates. If the longitudes span the entire
range and are equispaced, then a periodic boundary condition is added for the longitude
dimension. If the latitudes span the entire range and are equispaced, then a flat boundary
condition is added for the latitude dimension. In all other cases, an error is thrown when
extrapolating outside of `dim_array`.
"""
function _make_interpolant(dims, data)
# If any element is DateTime, then return nothing for the interpolant because
# Interpolations.jl do not support DateTimes
for dim_array in values(dims)
eltype(dim_array) <: Dates.DateTime && return nothing
end
# We can only create interpolants when we have 1D dimensions
if isempty(dims) || any(d -> ndims(d) != 1 || length(d) == 1, values(dims))
return nothing
end
# Dimensions are all 1D, check that they are compatible with data
size_data = size(data)
for (dim_index, (dim_name, dim_array)) in enumerate(dims)
dim_length = length(dim_array)
data_length = size_data[dim_index]
if dim_length != data_length
error(
"Dimension $dim_name has inconsistent size with provided data ($dim_length != $data_length)",
)
end
end
# Find boundary conditions for extrapolation
extp_bound_conds = (
_find_extp_bound_cond(dim_name, dim_array) for
(dim_name, dim_array) in dims
)
dims_tuple = tuple(values(dims)...)
extp_bound_conds_tuple = tuple(extp_bound_conds...)
return Intp.extrapolate(
Intp.interpolate(dims_tuple, data, Intp.Gridded(Intp.Linear())),
extp_bound_conds_tuple,
)
end
"""
_find_extp_bound_cond(dim_name, dim_array)
Find the appropriate boundary condition for the `dim_name` dimension.
"""
function _find_extp_bound_cond(dim_name, dim_array)
min_of_dim, max_of_dim = extrema(dim_array)
dim_size = max_of_dim - min_of_dim
dsize = dim_array[begin + 1] - dim_array[begin]
# If the dimension array span the entire range and is equispaced, then add the
# appropriate boundary condition
# We do not handle the cases when the array is not equispaced
(
conventional_dim_name(dim_name) == "longitude" &&
_isequispaced(dim_array) &&
isapprox(dim_size + dsize, 360.0)
) && return Intp.Periodic()
(
conventional_dim_name(dim_name) == "latitude" &&
_isequispaced(dim_array) &&
isapprox(dim_size + dsize, 180.0)
) && return Intp.Flat()
return Intp.Throw()
end
function OutputVar(attribs, dims, dim_attribs, data)
index2dim = keys(dims) |> collect
dim2index =
Dict([dim_name => index for (index, dim_name) in enumerate(keys(dims))])
# TODO: Make this lazy: we should compute the spline the first time we use
# it, not when we create the object
itp = _make_interpolant(dims, data)
function _maybe_process_key_value(k, v)
k != "units" && return k => v
return k => _maybe_convert_to_unitful(v)
end
# Recreating an object to ensure that the type is correct
if !isempty(attribs)
attribs = Dict(_maybe_process_key_value(k, v) for (k, v) in attribs)
end
# TODO: Support units for dimensions too
return OutputVar(
attribs,
OrderedDict(dims),
OrderedDict(dim_attribs),
data,
dim2index,
index2dim,
itp,
)
end
function OutputVar(dims, data)
return OutputVar(Dict{String, Any}(), dims, Dict{String, Any}(), data)
end
"""
OutputVar(path,
short_name = nothing;
new_start_date = nothing,
shift_by = identity)
Read the NetCDF file in `path` as an `OutputVar`.
If `short_name` is `nothing`, automatically find the name.
Dates in the time dimension are automatically converted to seconds with respect to the first
date in the time dimension array or the `new_start_date`. The parameter `new_start_date` can
be any string parseable by the [Dates](https://docs.julialang.org/en/v1/stdlib/Dates/)
module or a `Dates.DateTime` object. The parameter `shift_by` is a function that takes in
Dates.DateTime elements and return Dates.DateTime elements. The start date is added to the
attributes of the `OutputVar`. The parameter `shift_by` is a function that takes in
`Dates.DateTime` elements and returns `Dates.DateTime` elements. This function is applied to
each element of the time array. Shifting the dates and converting to seconds is done in that
order.
"""
function OutputVar(
path::String,
short_name = nothing;
new_start_date = nothing,
shift_by = identity,
)
var = read_var(path; short_name)
# Check if it is possible to convert dates to seconds in the time dimension
if (has_time(var) && eltype(times(var)) <: Dates.DateTime)
var = _dates_to_seconds(
read_var(path; short_name),
new_start_date = new_start_date,
shift_by = shift_by,
)
end
return var
end
"""
read_var(path::String; short_name = nothing)
Read the `short_name` variable in the given NetCDF file.
When `short_name` is `nothing`, automatically identify the name of the variable. If multiple
variables are present, the last one in alphabetical order is chosen.
When `units` is among the attributes, try to parse it and convert it into an
[`Unitful`](https://painterqubits.github.io/Unitful.jl) object. `OutputVar`s with `Unitful`
support automatic unit conversions.
If you want to access `units` as a string, look at [`units`](@ref) function.
Example
=========
```julia
simdir = SimDir("my_output")
read_var(simdir.variable_paths["hu"]["inst"])
read_var("my_netcdf_file.nc", short_name = "ts")
```
"""
function read_var(path::String; short_name = nothing)
NCDatasets.NCDataset(path) do nc
# First, if short_name is nothing, we have to identify the name of the variable by
# finding what is not a dimension
unordered_dims = NCDatasets.dimnames(nc)
isnothing(short_name) &&
(short_name = pop!(setdiff(keys(nc), unordered_dims)))
dims =
map(NCDatasets.dimnames(nc[short_name])) do dim_name
return dim_name => Array(nc[dim_name])
end |> OrderedDict
attribs = Dict(k => v for (k, v) in nc[short_name].attrib)
dim_attribs = OrderedDict(
dim_name => Dict(nc[dim_name].attrib) for dim_name in keys(dims)
)
data = Array(nc[short_name])
return OutputVar(attribs, dims, dim_attribs, data)
end
end
"""
short_name(var::OutputVar)
Return the `short_name` of the given `var`, if available.
If not available, return an empty string.
"""
function short_name(var::OutputVar)
get(var.attributes, "short_name", "")
end
"""
long_name(var::OutputVar)
Return the `long_name` of the given `var`, if available.
If not available, return an empty string.
"""
function long_name(var::OutputVar)
get(var.attributes, "long_name", "")
end
"""
units(var::OutputVar)
Return the `units` of the given `var`, if available.
If not available, return an empty string.
"""
function units(var::OutputVar)
string(get(var.attributes, "units", ""))
end
"""
has_units(var::OutputVar)
Return whether the given `var` has `units` or not.
"""
function has_units(var::OutputVar)
return haskey(var.attributes, "units")
end
# Implemented in ClimaAnalysisUnitfulExt
function _maybe_convert_to_unitful end
"""
Var.convert_units(var, new_units; conversion_function = nothing)
Return a new `OutputVar` with converted physical units of `var` to `new_units`, if possible.
Automatic conversion happens when the units for `var` and `new_units` are both parseable.
When `var` does not have units (see also [`Var.has_units`](@ref)) or has no parseable units,
a conversion function `conversion_function` is required.
`conversion_function` has to be a function that takes one data point and returns the
transformed value.
Being parseable means that `Unitful.uparse` can parse the expression. Please, refer to the
documentation for [Unitful.jl](https://painterqubits.github.io/Unitful.jl/stable/) for more
information.
Examples
=======
Let us set up a trivial 1D `OutputVar` with units of meters per second and automatically
convert it to centimeters per second.
```jldoctest example1
julia> values = 0:100.0 |> collect;
julia> data = copy(values);
julia> attribs = Dict("long_name" => "speed", "units" => "m/s");
julia> dim_attribs = Dict{String, Any}();
julia> var = ClimaAnalysis.OutputVar(attribs, Dict("distance" => values), dim_attribs, data);
julia> ClimaAnalysis.has_units(var)
true
julia> var_cms = ClimaAnalysis.convert_units(var, "cm/s");
julia> extrema(var_cms.data)
(0.0, 10000.0)
```
Not all the units can be properly parsed, for example, assuming `bob=5lol`
```jldoctest example1
julia> attribs = Dict("long_name" => "speed", "units" => "bob/s");
julia> var_bob = ClimaAnalysis.OutputVar(attribs, Dict("distance" => values), dim_attribs, data);
julia> var_lols = ClimaAnalysis.convert_units(var, "lol/s", conversion_function = (x) -> 5x);
julia> extrema(var_lols.data)
(0.0, 500.0)
```
Failure to specify the `conversion_function` will produce an error.
"""
function convert_units end
"""
set_units(var::OutputVar, units::AbstractString)
Set `units` for data in `var`.
!!! warning "Override existing units"
If units already exist, this will override the units for data in `var`. To convert
units, see [`Var.convert_units`](@ref)
"""
function set_units(var::OutputVar, units::AbstractString)
converted_var = convert_units(var, units, conversion_function = identity)
return converted_var
end
"""
is_z_1D(var::OutputVar)
Return whether the given `var`iable has an altitude dimension that is 1D.
When topography is present, the altitude dimension in the output variable is typically
multidimensional. The dimensions are (X, Y, Z), where (X, Y) are the horizontal dimensions.
In this case, `dims["z"]` is essentially a map that identifies the physical altitude of the
given point.
"""
function is_z_1D(var::OutputVar)
has_altitude(var) || error("Variable does not have an altitude dimension")
return length(size(altitudes(var))) == 1
end
"""
isempty(var::OutputVar)
Determine whether an OutputVar is empty.
"""
function Base.isempty(var::OutputVar)
# Do not include :interpolant because var.interpolant is Nothing if data is
# zero dimensional and empty and isempty(Nothing) throws an error
return map(
fieldname -> isempty(getproperty(var, fieldname)),
filter(x -> x != :interpolant, fieldnames(OutputVar)),
) |> all
end
function Base.copy(var::OutputVar)
fields = fieldnames(OutputVar)
# We have nested containers and we have to make sure we hand ownership off,
# so we deepcopy
return OutputVar([deepcopy(getfield(var, field)) for field in fields]...)
end
"""
_reduce_over(reduction::F, dim::String, var::OutputVar, args...; kwargs...)
Apply the given reduction over the given dimension.
`reduction` has to support the `dims` key. Additional arguments are passed to `reduction`.
The return type is an `OutputVar` with the same attributes, the new data, and the dimension
dropped.
Example
=========
Average over latitudes
```julia
import Statistics: mean
long = 0.:180. |> collect
lat = 0.:90. |> collect
data = reshape(1.:91*181., (181, 91))
dims = Dict(["lat" => lat, "long" => long])
var = OutputVar(dims, data)
_reduce_over(mean, "lat", var)
```
"""
function _reduce_over(
reduction::F,
dim::String,
var::OutputVar,
args...;
kwargs...,
) where {F <: Function}
dim_index = var.dim2index[dim]
# squeeze removes the unnecessary singleton dimension
data = squeeze(
reduction(var.data, args..., dims = dim_index, kwargs...),
dims = (dim_index,),
)
# If we reduce over a dimension, we have to remove it
dims = copy(var.dims)
dim_attributes = copy(var.dim_attributes)
pop!(dims, dim)
haskey(var.dim_attributes, dim) && pop!(dim_attributes, dim)
return OutputVar(copy(var.attributes), dims, dim_attributes, copy(data))
end
"""
average_lat(var::OutputVar; ignore_nan = true, weighted = false)
Return a new OutputVar where the values on the latitudes are averaged arithmetically.
When `weighted` is `true`, weight the average over `cos(lat)`.
"""
function average_lat(var; ignore_nan = true, weighted = false)
if weighted
var = copy(var)
lats = latitudes(var)
abs(maximum(lats)) >= 0.5π ||
@warn "Detected latitudes are small. If units are radians, results will be wrong"
weights_1d = cosd.(lats)
lat_index = var.dim2index[latitude_name(var)]
weights = ones(size(var.data))
# Create a bitmask for the NaN's, we use this to remove weights in the normalization (with nanmean)
nan_mask = ifelse.(isnan.(var.data), NaN, 1)
for index in CartesianIndices(weights)
index_tuple =
ntuple(d -> d == lat_index ? Colon() : index[d], ndims(weights))
weights[index_tuple...] .= weights_1d
weights[index_tuple...] ./=
nanmean(nan_mask[index_tuple...] .* weights_1d)
end
var.data .*= weights
end
reduced_var =
_reduce_over(ignore_nan ? nanmean : mean, latitude_name(var), var)
weighted &&
haskey(var.attributes, "long_name") &&
(reduced_var.attributes["long_name"] *= " weighted")
_update_long_name_generic!(reduced_var, var, latitude_name(var), "averaged")
return reduced_var
end
"""
weighted_average_lat(var::OutputVar; ignore_nan = true)
Return a new OutputVar where the values on the latitudes are averaged arithmetically
with weights of `cos(lat)`.
"""
weighted_average_lat(var; ignore_nan = true) =
average_lat(var; ignore_nan, weighted = true)
"""
average_lon(var::OutputVar; ignore_nan = true)
Return a new OutputVar where the values on the longitudes are averaged arithmetically.
"""
function average_lon(var; ignore_nan = true)
reduced_var =
_reduce_over(ignore_nan ? nanmean : mean, longitude_name(var), var)
_update_long_name_generic!(
reduced_var,
var,
longitude_name(var),
"averaged",
)
return reduced_var
end
"""
average_x(var::OutputVar; ignore_nan = true)
Return a new OutputVar where the values along the `x` dimension are averaged arithmetically.
"""
function average_x(var; ignore_nan = true)
reduced_var = _reduce_over(ignore_nan ? nanmean : mean, "x", var)
_update_long_name_generic!(reduced_var, var, "x", "averaged")
return reduced_var
end
"""
average_y(var::OutputVar; ignore_nan = true)
Return a new OutputVar where the values along the `y` dimension are averaged arithmetically.
"""
function average_y(var; ignore_nan = true)
reduced_var = _reduce_over(ignore_nan ? nanmean : mean, "y", var)
_update_long_name_generic!(reduced_var, var, "y", "averaged")
return reduced_var
end
"""
average_xy(var::OutputVar; ignore_nan = true)
Return a new OutputVar where the values along both horizontal dimensions `x` and `y`
are averaged arithmetically.
"""
function average_xy(var; ignore_nan = true)
fn = ignore_nan ? nanmean : mean
reduced_var = _reduce_over(fn, "x", _reduce_over(fn, "y", var))
first_x, last_x = range_dim(var, "x")
first_y, last_y = range_dim(var, "y")
units_x = dim_units(var, "x")
units_y = dim_units(var, "y")
if haskey(var.attributes, "long_name")
reduced_var.attributes["long_name"] *= " averaged horizontally over x ($first_x to $last_x$units_x) and y ($first_y to $last_y$units_y)"
end
return reduced_var
end
"""
average_time(var::OutputVar; ignore_nan = true)
Return a new OutputVar where the values are averaged arithmetically in time.
"""
function average_time(var; ignore_nan = true)
reduced_var = _reduce_over(ignore_nan ? nanmean : mean, time_name(var), var)
_update_long_name_generic!(reduced_var, var, time_name(var), "averaged")
return reduced_var
end
"""
dim_units(var::OutputVar, dim_name)
Return the `units` of the given `dim_name` in `var`, if available.
If not available, return an empty string.
"""
function dim_units(var::OutputVar, dim_name)
!haskey(var.dims, dim_name) &&
error("Var does not have dimension $dim_name, found $(keys(var.dims))")
# Double get because var.dim_attributes is a dictionry whose values are dictionaries
get(get(var.dim_attributes, dim_name, Dict()), "units", "")
end
"""
range_dim(var::OutputVar, dim_name)
Return the range of the dimension `dim_name` in `var`.
Range here is a tuple with the minimum and maximum of `dim_name`.
"""
function range_dim(var::OutputVar, dim_name)
!haskey(var.dims, dim_name) &&
error("Var does not have dimension $dim_name, found $(keys(var.dims))")
first_elt = first(var.dims[dim_name])
last_elt = last(var.dims[dim_name])
return first_elt, last_elt
end
"""
_update_long_name_generic!(
reduced_var::OutputVar,
var::OutputVar,
dim_name,
operation_name,
)
Used by reductions (e.g., average) to update the long name of `reduced_var` by describing
the operation being used to reduce the data and the associated units.
"""
function _update_long_name_generic!(
reduced_var::OutputVar,
var::OutputVar,
dim_name,
operation_name,
)
dim_of_units = dim_units(var, dim_name)
first_elt, last_elt = range_dim(var, dim_name)
if haskey(var.attributes, "long_name")
reduced_var.attributes["long_name"] *= " $operation_name over $dim_name ($first_elt to $last_elt$dim_of_units)"
end
return nothing
end
"""
center_longitude!(var::OutputVar, lon::Real)
Shift the longitudes in `var` so that `lon` is the center one.
This is useful to center the global projection to the 180 meridian instead of the 0.
"""
function center_longitude!(var, lon)
lon_name = longitude_name(var)
old_center_lon_index = nearest_index(var.dims[lon_name], lon)
half_index = div(length(var.dims[lon_name]), 2)
# half_index = old_index + shift => shift = half_index - old_index
shift = half_index - old_center_lon_index
# We do not use circshift! because it can lead to unpredictable problems when mutating
shifted_lon = circshift(var.dims[lon_name], shift)
var.dims[lon_name] = shifted_lon
lon_dim_index = var.dim2index[lon_name]
# Prepare the shift tuple for the data array: do not shift, except for the dimension
# corresponding to the longitude
shift_tuple = zeros(length(var.dims))
shift_tuple[lon_dim_index] = shift
shifted_data = circshift(var.data, shift)
var.data .= shifted_data
end
"""
_slice_general(var::OutputVar, val, dim_name)
Return a new OutputVar by selecting the available index closest to the given `val` for the
given dimension
"""
function _slice_general(var, val, dim_name)
haskey(var.dims, dim_name) ||
error("Var does not have dimension $dim_name, found $(keys(var.dims))")
nearest_index_val = nearest_index(var.dims[dim_name], val)
_slice_over(data; dims) = selectdim(data, dims, nearest_index_val)
reduced_var = _reduce_over(_slice_over, dim_name, var)
# Let's try adding this operation to the long_name, if possible (ie, if the correct
# attributes are available)
if haskey(var.attributes, "long_name") &&
haskey(var.dim_attributes, dim_name) &&
haskey(var.dim_attributes[dim_name], "units")
dim_array = var.dims[dim_name]
dim_units = var.dim_attributes[dim_name]["units"]
cut_point = dim_array[nearest_index_val]
if (dim_name == "time" || dim_name == "t") && dim_units == "s"
# Dimension is time and units are seconds. Let's convert them to something nicer
pretty_timestr = seconds_to_prettystr(cut_point)
reduced_var.attributes["long_name"] *= " $dim_name = $pretty_timestr"
else
reduced_var.attributes["long_name"] *= " $dim_name = $cut_point $dim_units"
end
reduced_var.attributes["slice_$dim_name"] = "$cut_point"
reduced_var.attributes["slice_$(dim_name)_units"] = dim_units
end
return reduced_var
end
"""
slice(var::OutputVar, kwargs...)
Return a new OutputVar by slicing across dimensions as defined by the keyword arguments.
Example
===========
```julia
slice(var, lat = 30, lon = 20, time = 100)
```
"""
function slice(var; kwargs...)
sliced_var = var
for (dim_name, val) in kwargs
sliced_var = _slice_general(sliced_var, val, String(dim_name))
end
return sliced_var
end
"""
window(var::OutputVar, dim_name; left = nothing, right = nothing)
Return a new OutputVar by selecting the values of the given `dim`ension that are between
`left` and `right`.
If `left` and/or `right` are `nothing`, assume beginning (or end) of the array.
Example
===========
```julia
window(var, 'lat', left = -50, right = 50)
```
"""
function window(var, dim_name; left = nothing, right = nothing)
haskey(var.dims, dim_name) ||
error("Var does not have dimension $dim_name, found $(keys(var.dims))")
nearest_index_left =
isnothing(left) ? 1 : nearest_index(var.dims[dim_name], left)
nearest_index_right =
isnothing(right) ? length(var.dims[dim_name]) :
nearest_index(var.dims[dim_name], right)
(nearest_index_right >= nearest_index_left) ||
error("Right window value has to be larger than left one")
# Take only what's between nearest_index_left and nearest_index_right
reduced_data = selectdim(
var.data,
var.dim2index[dim_name],
nearest_index_left:nearest_index_right,
)
dims = copy(var.dims)
reduced_dim = var.dims[dim_name][nearest_index_left:nearest_index_right]
dims[dim_name] = reduced_dim
dim_attributes = copy(var.dim_attributes)
return OutputVar(copy(var.attributes), dims, dim_attributes, reduced_data)
end
"""
(x::OutputVar)(target_coord)
Interpolate variable `x` onto the given `target_coord` coordinate using
multilinear interpolation.
Extrapolation is now allowed and will throw a `BoundsError` in most cases.
If any element of the arrays of the dimensions is a Dates.DateTime, then interpolation is
not possible. Interpolations.jl do not support making interpolations for dates. If the
longitudes span the entire range and are equispaced, then a periodic boundary condition is
added for the longitude dimension. If the latitudes span the entire range and are
equispaced, then a flat boundary condition is added for the latitude dimension. In all other
cases, an error is thrown when extrapolating outside of the array of the dimension.
Example
=======
```jldoctest
julia> import ClimaAnalysis;
julia> time = 100.0:110.0 |> collect;
julia> z = 0.0:20.0 |> collect;
julia> data = reshape(1.0:(11 * 21), (11, 21));
julia> var2d = ClimaAnalysis.OutputVar(Dict("time" => time, "z" => z), data); var2d.([[105., 10.], [105.5, 10.5]])
2-element Vector{Float64}:
116.0
122.0
```
"""
function (x::OutputVar)(target_coord)
isnothing(x.interpolant) && error(
"Splines cannot be constructed because one (or more) of the dimensions of variable is not 1D",
)
return x.interpolant(target_coord...)
end
"""
arecompatible(x::OutputVar, y::OutputVar)
Return whether two `OutputVar` are defined on the same physical space
This is accomplished by comparing `dims` and `dim_attributes` (the latter because they might contain information about the units).
We assume that:
- `dim2index` and `index2dim` where correctly created and they reflect `dims`
- `data` is also consistent with `dims`,
We also *do not* check units for `data`.
"""
function arecompatible(x::OutputVar, y::OutputVar)
x_dims = collect(keys(x.dims))
y_dims = collect(keys(y.dims))
x_units = (dim_units(x, dim_name) for dim_name in x_dims)
y_units = (dim_units(y, dim_name) for dim_name in y_dims)
for (x_dim, x_unit, y_dim, y_unit) in zip(x_dims, x_units, y_dims, y_units)
x_unit == "" && @warn "Missing units for dimension $x_dim in x"
y_unit == "" && @warn "Missing units for dimension $y_dim in y"
x_unit != y_unit && return false
end
return x.dims == y.dims
end
"""
_check_dims_consistent(x::OutputVar, y::OutputVar)
Check if the number, name, and unit of dimensions in `x` and `y` are consistent.
If the unit for a dimension is missing, then the unit is not consistent for that dimension.
"""
function _check_dims_consistent(x::OutputVar, y::OutputVar)
# Check if the number of dimensions is the same
x_num_dims = length(x.dims)
y_num_dims = length(y.dims)
x_num_dims != y_num_dims && error(
"Number of dimensions do not match between x ($x_num_dims) and y ($y_num_dims)",
)
# Check if the dimensions agree with each other
conventional_dim_name_x = conventional_dim_name.(keys(x.dims))
conventional_dim_name_y = conventional_dim_name.(keys(y.dims))
mismatch_conventional_dim_name =
conventional_dim_name_x .!= conventional_dim_name_y
any(mismatch_conventional_dim_name) && error(
"Dimensions do not agree between x ($conventional_dim_name_x) and y ($conventional_dim_name_y)",
)
x_dims = collect(keys(x.dims))
y_dims = collect(keys(y.dims))
x_units = [dim_units(x, dim_name) for dim_name in x_dims]
y_units = [dim_units(y, dim_name) for dim_name in y_dims]
# Check for any missing units (missing units are represented with an empty string)
missing_x = (x_units .== "")
missing_y = (y_units .== "")
(any(missing_x) && any(missing_y)) && error(
"Units for dimensions $(x_dims[missing_x]) are missing in x and units for dimensions $(y_dims[missing_y]) are missing in y",
)
any(missing_x) &&
error("Units for dimensions $(x_dims[missing_x]) are missing in x")
any(missing_y) &&
error("Units for dimensions $(x_dims[missing_y]) are missing in y")
# Check if units match between dimensions
not_consistent_units = (x_units .!= y_units)
any(not_consistent_units) && error(
"Units for dimensions $(x_dims[not_consistent_units]) in x is not consistent with units for dimensions $(y_dims[not_consistent_units]) in y",
)
return nothing
end
"""
reordered_as(src_var::OutputVar, dest_var::OutputVar)
Reorder the dimensions in `src_var` to match the ordering of dimensions in `dest_var`.
"""
function reordered_as(src_var::OutputVar, dest_var::OutputVar)
# Get the conventional dim names for both src_var and dest_var
conventional_dim_name_src = conventional_dim_name.(keys(src_var.dims))
conventional_dim_name_dest = conventional_dim_name.(keys(dest_var.dims))
# Check if the dimensions are the same (order does not matter)
Set(conventional_dim_name_src) == Set(conventional_dim_name_dest) || error(
"Dimensions are not the same between src ($conventional_dim_name_src) and dest ($conventional_dim_name_dest)",
)
# Find permutation indices to reorder dims
reorder_indices =
indexin(conventional_dim_name_dest, conventional_dim_name_src)
# Reorder dims, dim_attribs, and data, but not attribs
ret_dims = deepcopy(src_var.dims)
ret_dims = OrderedDict(collect(ret_dims)[reorder_indices])
ret_attribs = deepcopy(src_var.attributes)
# Cannot assume that every dimension is present in dim_attribs so we loop to reorder the
# best we can and merge with src_var.dim_attributes to add any remaining pairs to
# ret_dim_attribs
ret_dim_attribs = empty(src_var.dim_attributes)
src_var_dim_attribs = src_var.dim_attributes |> deepcopy
src_var_dim_names = collect(keys(src_var.dims))
for idx in reorder_indices
dim_name = src_var_dim_names[idx]
haskey(src_var_dim_attribs, dim_name) &&
(ret_dim_attribs[dim_name] = src_var_dim_attribs[dim_name])
end
merge!(ret_dim_attribs, src_var_dim_attribs)
ret_data = copy(src_var.data)
ret_data = permutedims(ret_data, reorder_indices)
return OutputVar(ret_attribs, ret_dims, ret_dim_attribs, ret_data)
end
"""
resampled_as(src_var::OutputVar, dest_var::OutputVar)
Resample `data` in `src_var` to `dims` in `dest_var`.
The resampling performed here is a 1st-order linear resampling.
"""
function resampled_as(src_var::OutputVar, dest_var::OutputVar)
src_var = reordered_as(src_var, dest_var)
_check_dims_consistent(src_var, dest_var)
src_resampled_data =
[src_var(pt) for pt in Base.product(values(dest_var.dims)...)]
# Construct new OutputVar to return
src_var_ret_dims = empty(src_var.dims)
# Loop because names could be different in src_var compared to dest_var
# (e.g., `long` in one and `lon` in the other)
for (dim_name, dim_data) in zip(keys(src_var.dims), values(dest_var.dims))
src_var_ret_dims[dim_name] = copy(dim_data)
end
scr_var_ret_attribs = deepcopy(src_var.attributes)
scr_var_ret_dim_attribs = deepcopy(src_var.dim_attributes)
return OutputVar(
scr_var_ret_attribs,
src_var_ret_dims,
scr_var_ret_dim_attribs,
src_resampled_data,
)
end
"""
integrate_lonlat(var::OutputVar)
Integrate `data` in `var` on longitude and latitude with a first-order scheme. `data` has to
be discretized on longitude and latitude.
If the points are equispaced, it is assumed that each point correspond to the midpoint of a
cell which results in rectangular integration using the midpoint rule. Otherwise, the
integration being done is rectangular integration using the left endpoints for integrating
longitude and latitude. The units for longitude and latitude should be degrees.
"""
function integrate_lonlat(var::OutputVar)
var_integrate_lon = var |> integrate_lon
# Update long name so that we get "...integrated over lat... and integrated over lon..."
# instead of "...integrated over lat... integrated over lon..."
if haskey(var_integrate_lon.attributes, "long_name")
var_integrate_lon.attributes["long_name"] *= " and"
end
return var_integrate_lon |> integrate_lat
end
"""
integrate_lon(var::OutputVar)
Integrate `data` in `var` on longitude with a first-order scheme. `data` has to be
discretized on longitude.
If the points are equispaced, it is assumed that each point correspond to the midpoint of a
cell which results in rectangular integration using the midpoint rule. Otherwise, the
integration being done is rectangular integration using the left endpoints. The unit for
longitude should be degrees.
"""
function integrate_lon(var::OutputVar)
has_longitude(var) || error("var does not has longitude as a dimension")
lon_name = longitude_name(var)
return _integrate_over_angle(var, Numerics._integrate_lon, lon_name)
end
"""
integrate_lat(var::OutputVar)
Integrate `data` in `var` on latitude with a first-order scheme. `data` has to be
discretized on latitude.
If the points are equispaced, it is assumed that each point correspond to the midpoint of a
cell which results in rectangular integration using the midpoint rule. Otherwise, the
integration being done is rectangular integration using the left endpoints. The unit for
latitude should be degrees.
"""
function integrate_lat(var::OutputVar)
has_latitude(var) || error("var does not has latitude as a dimension")
lat_name = latitude_name(var)
return _integrate_over_angle(var, Numerics._integrate_lat, lat_name)
end
"""
_integrate_over_angle(var::OutputVar, integrate_on, angle_dim_name)
Integrate `data` in `var` on `angle_dim_name` with a first-order scheme. `data` has to be
discretized on `angle_dim_name`.
`angle_dim_name` is the name of the angle that is being integrated over. `integrate_on` is a
function that compute the integration for data over `angle_dim_name`.
"""
function _integrate_over_angle(var::OutputVar, integrate_on, angle_dim_name)
# Enforce constraint that unit is degree because we compute integration weights assuming
# degrees (see functions _integration_weights_lon_left and
# _integration_weights_lon_equispaced for examples in Numerics.jl)
deg_unit_names = [
"degrees",
"degree",
"deg",
"degs",
"°",
"degrees_north",
"degrees_east",
]
angle_dim_unit = dim_units(var, angle_dim_name)
lowercase(angle_dim_unit) in deg_unit_names ||
error("The unit for $angle_dim_name is missing or is not degree")
integrated_var = _reduce_over(
integrate_on,
angle_dim_name,
var,
var.dims[angle_dim_name],
)
_update_long_name_generic!(
integrated_var,
var,
angle_dim_name,
"integrated",
)
return integrated_var
end
"""
split_by_season(var::OutputVar)
Return a vector of four `OutputVar`s split by season.
The months of the seasons are March to May, June to August, September to November, and
December to February. The order of the vector is MAM, JJA, SON, and DJF. If there are no
dates found for a season, then the `OutputVar` for that season will be an empty `OutputVar`.
The function will use the start date in `var.attributes["start_date"]`. The unit of time is
expected to be second. Also, the interpolations will be inaccurate in time intervals
outside of their respective season for the returned `OutputVar`s.
"""
function split_by_season(var::OutputVar)
# Check time exists and unit is second
has_time(var) || error("Time is not a dimension in var")
dim_units(var, time_name(var)) == "s" ||
error("Unit for time is not second")
# Check start date exists
haskey(var.attributes, "start_date") ?
start_date = Dates.DateTime(var.attributes["start_date"]) :
error("Start date is not found in var")
season_dates = split_by_season(time_to_date.(start_date, times(var)))
season_times =
(date_to_time.(start_date, season) for season in season_dates)
# Split data according to seasons
season_data = (
collect(
_data_at_dim_vals(
var.data,
times(var),
var.dim2index[time_name(var)],
season_time,
),
) for season_time in season_times
)
# Construct an OutputVar for each season
return map(season_times, season_data) do time, data
if isempty(time)
dims = empty(var.dims)
data = similar(var.data, 0)
return OutputVar(dims, data)
end
ret_dims = deepcopy(var.dims)
ret_attribs = deepcopy(var.attributes)
ret_dim_attribs = deepcopy(var.dim_attributes)
ret_dims[time_name(var)] = time
OutputVar(ret_attribs, ret_dims, ret_dim_attribs, data)
end
end
"""
_check_sim_obs_units_consistent(sim::OutputVar, obs::OutputVar)
Check if the number of dimensions are two, the `data` in `sim` and `obs` is missing units or
not, and if the units of data are the same in `sim` and `obs`.
This function does not check if the dimensions are longitude and latitude in `sim` and `obs`
because `integrate_lonlat` (in `bias` and `squared_error`) handles that. The function also
does not check if the units of dimensions in `sim` and `obs` are the same because
`resampled_as` (in `bias` and `squared_error`) handles that.
"""
function _check_sim_obs_units_consistent(sim::OutputVar, obs::OutputVar)
# Check number of dimensions
sim_num_dims = length(sim.dims)
obs_num_dims = length(obs.dims)
((sim_num_dims != 2) || (obs_num_dims != 2)) && error(
"There are not only two dimensions in sim ($sim_num_dims) or obs ($obs_num_dims).",
)
# Check units for data is not missing
sim_data_units = units(sim)
obs_data_units = units(obs)
sim_data_units == "" && error("Unit is missing in data for sim")
obs_data_units == "" && error("Unit is missing in data for obs")
# Check if units of data match between sim and obs
sim_data_units == obs_data_units || error(
"Units do not match between the data in sim ($sim_data_units) and obs ($obs_data_units)",
)
return nothing
end
"""
bias(sim::OutputVar, obs::OutputVar)
Return a `OutputVar` whose data is the bias (`sim.data - obs.data`) and compute the global
bias of `data` in `sim` and `obs` over longitude and latitude. The result is stored in
`var.attributes["global_bias"]`.
This function is currently implemented for `OutputVar`s with only the dimensions longitude
and latitude. Units must be supplied for data and dimensions in `sim` and `obs`. The units
for longitude and latitude should be degrees. Resampling is done automatically by resampling
`obs` on `sim`. Attributes in `sim` and `obs` will be thrown away. The long name and short
name of the returned `OutputVar` will be updated to reflect that a bias is computed.
See also [`global_bias`](@ref), [`squared_error`](@ref), [`global_mse`](@ref),
[`global_rmse`](@ref).
"""
function bias(sim::OutputVar, obs::OutputVar)
_check_sim_obs_units_consistent(sim, obs)
# Resample obs on sim to ensure the size of data in sim and obs are the same and the
# dims are the same
obs_resampled = resampled_as(obs, sim)
# Compute bias
bias = sim - obs_resampled
# Do this because we do not want to store global bias as a string and unit could be Unitful
ret_attributes = Dict{keytype(bias.attributes), Any}(bias.attributes)
# Add units back for bias
ret_attributes["units"] = units(sim)
# Add short and long name
ret_attributes["short_name"] = "sim-obs"
ret_attributes["long_name"] = "SIM - OBS"
if !isempty(short_name(sim))
ret_attributes["short_name"] *= "_" * short_name(sim)
ret_attributes["long_name"] *= " " * short_name(sim)
end
# Compute global bias and store it as an attribute
integrated_bias = integrate_lonlat(bias).data
normalization =
integrate_lonlat(
OutputVar(
bias.attributes,
bias.dims,
bias.dim_attributes,
ones(size(bias.data)),
),
).data
# Do ./ instead of / because we are dividing between zero dimensional arrays
global_bias = integrated_bias ./ normalization
ret_attributes["global_bias"] = global_bias
return OutputVar(ret_attributes, bias.dims, bias.dim_attributes, bias.data)
end
"""
global_bias(sim::OutputVar, obs::OutputVar)
Return the global bias of `data` in `sim` and `obs` over longitude and latitude.
This function is currently only implemented for `OutputVar`s with only the dimensions
longitude and latitude. Units must be supplied for data and dimensions in `sim` and `obs`.
The units for longitude and latitude should be degrees. Resampling is done automatically by
resampling `obs` on `sim`.
See also [`bias`](@ref), [`squared_error`](@ref), [`global_mse`](@ref),
[`global_rmse`](@ref).
"""
function global_bias(sim::OutputVar, obs::OutputVar)
bias_var = bias(sim, obs)
return bias_var.attributes["global_bias"]
end
"""
squared_error(sim::OutputVar, obs::OutputVar)
Return a `OutputVar` whose data is the squared error (`(sim.data - obs.data)^2`) and compute
the global mean squared error (MSE) and global root mean squared error (RMSE) of `data` in
`sim` and `obs` over longitude and latitude. The result is stored in `var.attributes["mse"]`
and `var.attributes["rmse"]`.
This function is currently implemented for `OutputVar`s with only the dimensions longitude
and latitude. Units must be supplied for data and dimensions in `sim` and `obs`. The units
for longitude and latitude should be degrees. Resampling is done automatically by resampling
`obs` on `sim`. Attributes in `sim` and `obs` will be thrown away. The long name and short
name of the returned `OutputVar` will be updated to reflect that a squared error is computed.
See also [`global_mse`](@ref), [`global_rmse`](@ref), [`bias`](@ref), [`global_bias`](@ref).
"""
function squared_error(sim::OutputVar, obs::OutputVar)
_check_sim_obs_units_consistent(sim, obs)
# Resample obs on sim to ensure the size of data in sim and obs are the same and the
# dims are the same
obs_resampled = resampled_as(obs, sim)
# Compute squared error
# Do not use ^ since ^ is not defined between a OutputVar and Real
squared_error = (sim - obs_resampled) * (sim - obs_resampled)
# Do this because we do not want to store global mse and rmse as strings
ret_attributes = Dict{String, Any}(squared_error.attributes)
# Add units back for bias
# Always treat as a string since the string representation of something type Unitful is
# not always parseable as a Unitful.Unit (see:
# https://github.com/PainterQubits/Unitful.jl/issues/412)
ret_attributes["units"] = "($(units(sim)))^2"
# Add short and long name
ret_attributes["short_name"] = "(sim-obs)^2"
ret_attributes["long_name"] = "(SIM - OBS)^2"
if !isempty(short_name(sim))
ret_attributes["short_name"] *= "_" * short_name(sim)
ret_attributes["long_name"] *= " " * short_name(sim)
end
# Compute global mse and global rmse and store it as an attribute
integrated_squared_error = integrate_lonlat(squared_error).data
normalization =
integrate_lonlat(
OutputVar(
squared_error.attributes,
squared_error.dims,
squared_error.dim_attributes,
ones(size(squared_error.data)),
),
).data
# Do ./ instead of / because we are dividing between zero dimensional arrays
mse = integrated_squared_error ./ normalization
ret_attributes["global_mse"] = mse
ret_attributes["global_rmse"] = sqrt(mse)
return OutputVar(
ret_attributes,
squared_error.dims,
squared_error.dim_attributes,
squared_error.data,
)
end
"""
global_mse(sim::OutputVar, obs::OutputVar)
Return the global mean squared error (MSE) of `data` in `sim` and `obs` over longitude and
latitude.
This function is currently implemented for `OutputVar`s with only the dimensions longitude
and latitude. Units must be supplied for data and dimensions in `sim` and `obs`. The units
for longitude and latitude should be degrees. Resampling is done automatically by resampling
`obs` on `sim`.
See also [`squared_error`](@ref), [`global_rmse`](@ref), [`bias`](@ref), [`global_bias`](@ref).
"""
function global_mse(sim::OutputVar, obs::OutputVar)
squared_error_var = squared_error(sim, obs)
return squared_error_var.attributes["global_mse"]
end
"""
global_rmse(sim::OutputVar, obs::OutputVar)
Return the global root mean squared error (RMSE) of `data` in `sim` and `obs` over longitude
and latitude.
This function is currently implemented for `OutputVar`s with only the dimensions longitude
and latitude. Units must be supplied for data and dimensions in `sim` and `obs`. The units
for longitude and latitude should be degrees. Resampling is done automatically by resampling
`obs` on `sim`.
See also [`squared_error`](@ref), [`global_mse`](@ref), [`bias`](@ref), [`global_bias`](@ref).
"""
function global_rmse(sim::OutputVar, obs::OutputVar)
squared_error_var = squared_error(sim, obs)
return squared_error_var.attributes["global_rmse"]
end
"""
_dates_to_seconds(var::OutputVar;
new_start_date = nothing,
shift_by = identity)
Convert dates in time dimension to seconds with respect to the first date in the time
dimension array or the `new_start_date`.
Dates in the time dimension are automatically converted to seconds with respect to the first
date in the time dimension array or the `new_start_date`. The parameter `new_start_date` can
be any string parseable by the [Dates](https://docs.julialang.org/en/v1/stdlib/Dates/)
module or a `Dates.DateTime` object. The parameter `shift_by` is a function that takes in
Dates.DateTime elements and return Dates.DateTime elements. The start date is added to the
attributes of the `OutputVar`. The parameter `shift_by` is a function that takes in
`Dates.DateTime` elements and returns `Dates.DateTime` elements. This function is applied to
each element of the time array. Shifting the dates and converting to seconds is done in that
order.
Note that this function only works for the time dimension and will not work for the date
dimension.
"""
function _dates_to_seconds(
var::OutputVar;
new_start_date = nothing,
shift_by = identity,
)
has_time(var) || error(
"Converting from dates to seconds is only supported for the time dimension",
)
eltype(times(var)) <: Dates.DateTime ||
error("Type of time dimension is not dates")
# Preprocess time_arr by shifting dates
time_arr = copy(times(var))
if !isnothing(shift_by)
time_arr .= shift_by.(time_arr)
end
# Convert from dates to seconds using the first date in the time dimension array as the
# start date or the new_start_date
start_date = isnothing(new_start_date) ? time_arr[begin] : new_start_date
# Handle the case if start_date is a DateTime or string; if it is the latter, then try
# to parse it as a DateTime
start_date isa AbstractString && (start_date = Dates.DateTime(start_date))
time_arr = map(date -> date_to_time(start_date, date), time_arr)
# Remake OutputVar
ret_attribs = deepcopy(var.attributes)
ret_attribs["start_date"] = string(start_date) # add start_date as an attribute
ret_dim_attribs = deepcopy(var.dim_attributes)
ret_dim_attribs[time_name(var)]["units"] = "s" # add unit
var_dims = deepcopy(var.dims)
ret_dims_generator = (
conventional_dim_name(dim_name) == "time" ? dim_name => time_arr :
dim_name => dim_data for (dim_name, dim_data) in var_dims
)
ret_dims = OrderedDict(ret_dims_generator...)
ret_data = copy(var.data)
return OutputVar(ret_attribs, ret_dims, ret_dim_attribs, ret_data)
end
"""
shift_to_start_of_previous_month(var::OutputVar)
Shift the times in the time dimension to the start of the previous month.
After applying this function, the start date in the attributes correspond to the first
element in the time array.
This function is helpful in ensuring consistency in dates between simulation and
observational data. One example of this is when adjusting monthly averaged data. For
instance, suppose that data on 2010-02-01 in the `OutputVar` corresponds to the monthly
average for January. This function shifts the times so that 2010-01-01 will correspond to
the monthly average for January.
Note that this function only works for the time dimension and will not work for the date
dimension.
"""
function shift_to_start_of_previous_month(var::OutputVar)
# Check if time dimension exists, floats are in the array, and unit of data is
# second
has_time(var) || error("Time is not a dimension of var")
eltype(times(var)) <: Dates.DateTime && ("Dates found in time array")
dim_units(var, time_name(var)) != "s" &&
error("Unit of data is not in second")
# Convert to seconds to dates
date_arr =
Dates.Second.(times(var)) .+
Dates.DateTime.(var.attributes["start_date"])
# Apply transformations (find first day of month and subtract one month)
date_arr .=
date_arr .|> Dates.firstdayofmonth .|> date -> date - Dates.Month(1)
# Convert from dates to seconds
start_date = date_arr[begin]
time_arr = map(date -> date_to_time(start_date, date), date_arr)
ret_attribs = deepcopy(var.attributes)
ret_attribs["start_date"] = string(start_date)
ret_dims = deepcopy(var.dims)
ret_dims["time"] = time_arr
ret_dim_attributes = deepcopy(var.dim_attributes)
ret_data = copy(var.data)
return OutputVar(ret_attribs, ret_dims, ret_dim_attributes, ret_data)
end
"""
overload_binary_op(op)
Add methods to overload the given binary `op`erator for `OutputVars` and `Real`s.
Attributes that are not `short_name`, `long_name`, are discarded in the process.
"""
macro overload_binary_op(op)
quote
function Base.$op(x::OutputVar, y::OutputVar)
arecompatible(x, y) || error("Input OutputVars are not compatible")
ret_attributes = Dict{String, Any}()
specific_attributes = ("short_name", "long_name")
for attr in specific_attributes
if haskey(x.attributes, attr) && haskey(y.attributes, attr)
ret_attributes[attr] = string(
x.attributes[attr],
" ",
string($op),
" ",
y.attributes[attr],
)
end
end
ret_dims = x.dims
ret_dim_attributes = x.dim_attributes
ret_data = @. $op(x.data, y.data)
return OutputVar(
ret_attributes,
ret_dims,
ret_dim_attributes,
ret_data,
)
end
function Base.$op(x::OutputVar, y::Real)
ret_attributes = empty(x.attributes)
specific_attributes = ("short_name", "long_name")
for attr in specific_attributes
if haskey(x.attributes, attr)
ret_attributes[attr] =
string(x.attributes[attr], " ", string($op), " ", y)
end
end
ret_dims = deepcopy(x.dims)
ret_dim_attributes = deepcopy(x.dim_attributes)
ret_data = @. $op(x.data, y)
return OutputVar(
ret_attributes,
ret_dims,
ret_dim_attributes,
ret_data,
)
end
function Base.$op(x::Real, y::OutputVar)
ret_attributes = empty(y.attributes)
specific_attributes = ("short_name", "long_name")
for attr in specific_attributes
if haskey(y.attributes, attr)
ret_attributes[attr] =
string(x, " ", string($op), " ", y.attributes[attr])
end
end
ret_dims = deepcopy(y.dims)
ret_dim_attributes = deepcopy(y.dim_attributes)
ret_data = @. $op(x, y.data)
return OutputVar(
ret_attributes,
ret_dims,
ret_dim_attributes,
ret_data,
)
end
end
end
@overload_binary_op (+)
@overload_binary_op (-)
@overload_binary_op (*)
@overload_binary_op (/)
include("outvar_dimensions.jl")
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
|
[
"Apache-2.0"
] | 0.5.10 | 1f6c4859eafc66f1b6df4932bd7040747581d816 | code | 486 | module Visualize
export plot!
function _constrained_cmap end
function oceanmask end
function landmask end
function heatmap2D! end
function sliced_heatmap! end
function heatmap! end
function line_plot1D! end
function sliced_line_plot! end
function line_plot! end
function sliced_plot! end
function plot! end
function contour2D_on_globe! end
function heatmap2D_on_globe! end
function plot_bias_on_globe! end
function plot_boxplot! end
function plot_leaderboard! end
end
| ClimaAnalysis | https://github.com/CliMA/ClimaAnalysis.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.