licenses
sequencelengths
1
3
version
stringclasses
677 values
tree_hash
stringlengths
40
40
path
stringclasses
1 value
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48610
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_56 Documentation not found. """ @cenum __JL_Ctag_56::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48612
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_164 Documentation not found. """ @cenum __JL_Ctag_164::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48610
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_56 Documentation not found. """ @cenum __JL_Ctag_56::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48612
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_159 Documentation not found. """ @cenum __JL_Ctag_159::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48610
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_56 Documentation not found. """ @cenum __JL_Ctag_56::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48612
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_159 Documentation not found. """ @cenum __JL_Ctag_159::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48610
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_55 Documentation not found. """ @cenum __JL_Ctag_55::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48612
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_162 Documentation not found. """ @cenum __JL_Ctag_162::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48610
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_56 Documentation not found. """ @cenum __JL_Ctag_56::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48610
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_54 Documentation not found. """ @cenum __JL_Ctag_54::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
48610
using CEnum """ aws_cal_errors Documentation not found. """ @cenum aws_cal_errors::UInt32 begin AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = 7168 AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT = 7169 AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM = 7170 AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER = 7171 AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED = 7172 AWS_ERROR_CAL_MISMATCHED_DER_TYPE = 7173 AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM = 7174 AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM = 7175 AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM = 7176 AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT = 7177 AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT = 7178 AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED = 7179 AWS_ERROR_CAL_END_RANGE = 8191 end """ aws_cal_log_subject Documentation not found. """ @cenum aws_cal_log_subject::UInt32 begin AWS_LS_CAL_GENERAL = 7168 AWS_LS_CAL_ECC = 7169 AWS_LS_CAL_HASH = 7170 AWS_LS_CAL_HMAC = 7171 AWS_LS_CAL_DER = 7172 AWS_LS_CAL_LIBCRYPTO_RESOLVE = 7173 AWS_LS_CAL_RSA = 7174 AWS_LS_CAL_LAST = 8191 end """ aws_cal_library_init(allocator) Documentation not found. ### Prototype ```c void aws_cal_library_init(struct aws_allocator *allocator); ``` """ function aws_cal_library_init(allocator) ccall((:aws_cal_library_init, libaws_c_cal), Cvoid, (Ptr{aws_allocator},), allocator) end """ aws_cal_library_clean_up() Documentation not found. ### Prototype ```c void aws_cal_library_clean_up(void); ``` """ function aws_cal_library_clean_up() ccall((:aws_cal_library_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_cal_thread_clean_up() Documentation not found. ### Prototype ```c void aws_cal_thread_clean_up(void); ``` """ function aws_cal_thread_clean_up() ccall((:aws_cal_thread_clean_up, libaws_c_cal), Cvoid, ()) end """ aws_ecc_curve_name Documentation not found. """ @cenum aws_ecc_curve_name::UInt32 begin AWS_CAL_ECDSA_P256 = 0 AWS_CAL_ECDSA_P384 = 1 end # typedef void aws_ecc_key_pair_destroy_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_destroy_fn = Cvoid # typedef int aws_ecc_key_pair_sign_message_fn ( const struct aws_ecc_key_pair * key_pair , const struct aws_byte_cursor * message , struct aws_byte_buf * signature_output ) """ Documentation not found. """ const aws_ecc_key_pair_sign_message_fn = Cvoid # typedef int aws_ecc_key_pair_derive_public_key_fn ( struct aws_ecc_key_pair * key_pair ) """ Documentation not found. """ const aws_ecc_key_pair_derive_public_key_fn = Cvoid # typedef int aws_ecc_key_pair_verify_signature_fn ( const struct aws_ecc_key_pair * signer , const struct aws_byte_cursor * message , const struct aws_byte_cursor * signature ) """ Documentation not found. """ const aws_ecc_key_pair_verify_signature_fn = Cvoid # typedef size_t aws_ecc_key_pair_signature_length_fn ( const struct aws_ecc_key_pair * signer ) """ Documentation not found. """ const aws_ecc_key_pair_signature_length_fn = Cvoid """ aws_ecc_key_pair_vtable Documentation not found. """ struct aws_ecc_key_pair_vtable destroy::Ptr{aws_ecc_key_pair_destroy_fn} derive_pub_key::Ptr{aws_ecc_key_pair_derive_public_key_fn} sign_message::Ptr{aws_ecc_key_pair_sign_message_fn} verify_signature::Ptr{aws_ecc_key_pair_verify_signature_fn} signature_length::Ptr{aws_ecc_key_pair_signature_length_fn} end """ aws_ecc_key_pair Documentation not found. """ struct aws_ecc_key_pair allocator::Ptr{aws_allocator} ref_count::aws_atomic_var curve_name::aws_ecc_curve_name key_buf::aws_byte_buf pub_x::aws_byte_buf pub_y::aws_byte_buf priv_d::aws_byte_buf vtable::Ptr{aws_ecc_key_pair_vtable} impl::Ptr{Cvoid} end """ aws_ecc_key_pair_acquire(key_pair) Adds one to an ecc key pair's ref count. ### Prototype ```c void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_acquire(key_pair) ccall((:aws_ecc_key_pair_acquire, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_release(key_pair) Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. ### Prototype ```c void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_release(key_pair) ccall((:aws_ecc_key_pair_release, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) Creates an Elliptic Curve private key that can be used for signing. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: priv\\_key::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); ``` """ function aws_ecc_key_pair_new_from_private_key(allocator, curve_name, priv_key) ccall((:aws_ecc_key_pair_new_from_private_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}), allocator, curve_name, priv_key) end """ aws_ecc_key_pair_new_generate_random(allocator, curve_name) Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: On Apple platforms this function is only supported on MacOS. This is due to usage of SecItemExport, which is only available on MacOS 10.7+ (yes, MacOS only and no other Apple platforms). There are alternatives for ios and other platforms, but they are ugly to use. Hence for now it only supports this call on MacOS. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_pair_new_generate_random(allocator, curve_name) ccall((:aws_ecc_key_pair_new_generate_random, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name), allocator, curve_name) end """ aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) Creates an Elliptic Curve public key that can be used for verifying. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Note: public\\_key\\_x::len and public\\_key\\_y::len must match the appropriate length for the selected curve\\_name. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); ``` """ function aws_ecc_key_pair_new_from_public_key(allocator, curve_name, public_key_x, public_key_y) ccall((:aws_ecc_key_pair_new_from_public_key, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, curve_name, public_key_x, public_key_y) end """ aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) Creates an Elliptic Curve public/private key pair from a DER encoded key pair. Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. Whether or not signing or verification can be perform depends on if encoded\\_keys is a public/private pair or a public key. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); ``` """ function aws_ecc_key_pair_new_from_asn1(allocator, encoded_keys) ccall((:aws_ecc_key_pair_new_from_asn1, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, encoded_keys) end """ aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) Creates an Elliptic curve public key from x and y coordinates encoded as hex strings Returns a new instance of [`aws_ecc_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); ``` """ function aws_ecc_key_new_from_hex_coordinates(allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) ccall((:aws_ecc_key_new_from_hex_coordinates, libaws_c_cal), Ptr{aws_ecc_key_pair}, (Ptr{aws_allocator}, aws_ecc_curve_name, aws_byte_cursor, aws_byte_cursor), allocator, curve_name, pub_x_hex_cursor, pub_y_hex_cursor) end """ aws_ecc_key_pair_derive_public_key(key_pair) Derives a public key from the private key if supported by this operating system (not supported on OSX). key\\_pair::pub\\_x and key\\_pair::pub\\_y will be set with the raw key buffers. ### Prototype ```c int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_derive_public_key(key_pair) ccall((:aws_ecc_key_pair_derive_public_key, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_curve_name_from_oid(oid, curve_name) Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include type specifier or length. On success, the value of curve\\_name will be set. ### Prototype ```c int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); ``` """ function aws_ecc_curve_name_from_oid(oid, curve_name) ccall((:aws_ecc_curve_name_from_oid, libaws_c_cal), Cint, (Ptr{aws_byte_cursor}, Ptr{aws_ecc_curve_name}), oid, curve_name) end """ aws_ecc_oid_from_curve_name(curve_name, oid) Get the DER encoded OID from the curve\\_name. The OID in this case will not contain the type or the length specifier. ### Prototype ```c int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); ``` """ function aws_ecc_oid_from_curve_name(curve_name, oid) ccall((:aws_ecc_oid_from_curve_name, libaws_c_cal), Cint, (aws_ecc_curve_name, Ptr{aws_byte_cursor}), curve_name, oid) end """ aws_ecc_key_pair_sign_message(key_pair, message, signature) Uses the key\\_pair's private key to sign message. The output will be in signature. Signature must be large enough to hold the signature. Check [`aws_ecc_key_pair_signature_length`](@ref)() for the appropriate size. Signature will be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); ``` """ function aws_ecc_key_pair_sign_message(key_pair, message, signature) ccall((:aws_ecc_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}), key_pair, message, signature) end """ aws_ecc_key_pair_verify_signature(key_pair, message, signature) Uses the key\\_pair's public key to verify signature of message. Signature should be DER encoded. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. ### Prototype ```c int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); ``` """ function aws_ecc_key_pair_verify_signature(key_pair, message, signature) ccall((:aws_ecc_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, message, signature) end """ aws_ecc_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); ``` """ function aws_ecc_key_pair_signature_length(key_pair) ccall((:aws_ecc_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_ecc_key_pair},), key_pair) end """ aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); ``` """ function aws_ecc_key_pair_get_public_key(key_pair, pub_x, pub_y) ccall((:aws_ecc_key_pair_get_public_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), key_pair, pub_x, pub_y) end """ aws_ecc_key_pair_get_private_key(key_pair, private_d) Documentation not found. ### Prototype ```c void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); ``` """ function aws_ecc_key_pair_get_private_key(key_pair, private_d) ccall((:aws_ecc_key_pair_get_private_key, libaws_c_cal), Cvoid, (Ptr{aws_ecc_key_pair}, Ptr{aws_byte_cursor}), key_pair, private_d) end """ aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) Documentation not found. ### Prototype ```c size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); ``` """ function aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name) ccall((:aws_ecc_key_coordinate_byte_size_from_curve_name, libaws_c_cal), Csize_t, (aws_ecc_curve_name,), curve_name) end """ aws_hash_vtable Documentation not found. """ struct aws_hash_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hash Documentation not found. """ struct aws_hash allocator::Ptr{aws_allocator} vtable::Ptr{aws_hash_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hash * ( aws_hash_new_fn ) ( struct aws_allocator * allocator ) """ Documentation not found. """ const aws_hash_new_fn = Cvoid """ aws_sha256_new(allocator) Allocates and initializes a sha256 hash instance. ### Prototype ```c struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); ``` """ function aws_sha256_new(allocator) ccall((:aws_sha256_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_sha1_new(allocator) Allocates and initializes a sha1 hash instance. ### Prototype ```c struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); ``` """ function aws_sha1_new(allocator) ccall((:aws_sha1_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_md5_new(allocator) Allocates and initializes an md5 hash instance. ### Prototype ```c struct aws_hash *aws_md5_new(struct aws_allocator *allocator); ``` """ function aws_md5_new(allocator) ccall((:aws_md5_new, libaws_c_cal), Ptr{aws_hash}, (Ptr{aws_allocator},), allocator) end """ aws_hash_destroy(hash) Cleans up and deallocates hash. ### Prototype ```c void aws_hash_destroy(struct aws_hash *hash); ``` """ function aws_hash_destroy(hash) ccall((:aws_hash_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hash},), hash) end """ aws_hash_update(hash, to_hash) Updates the running hash with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); ``` """ function aws_hash_update(hash, to_hash) ccall((:aws_hash_update, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_cursor}), hash, to_hash) end """ aws_hash_finalize(hash, output, truncate_to) Completes the hash computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hash_finalize(hash, output, truncate_to) ccall((:aws_hash_finalize, libaws_c_cal), Cint, (Ptr{aws_hash}, Ptr{aws_byte_buf}, Csize_t), hash, output, truncate_to) end """ aws_md5_compute(allocator, input, output, truncate_to) Computes the md5 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. ### Prototype ```c int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_md5_compute(allocator, input, output, truncate_to) ccall((:aws_md5_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha256_compute(allocator, input, output, truncate_to) Computes the sha256 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_compute(allocator, input, output, truncate_to) ccall((:aws_sha256_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_sha1_compute(allocator, input, output, truncate_to) Computes the sha1 hash over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example, if you want a SHA1 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha1_compute(allocator, input, output, truncate_to) ccall((:aws_sha1_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, input, output, truncate_to) end """ aws_set_md5_new_fn(fn) Set the implementation of md5 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_md5_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_md5_new_fn(fn) ccall((:aws_set_md5_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha256_new_fn(fn) Set the implementation of sha256 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha256_new_fn(fn) ccall((:aws_set_sha256_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_set_sha1_new_fn(fn) Set the implementation of sha1 to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha1_new_fn(aws_hash_new_fn *fn); ``` """ function aws_set_sha1_new_fn(fn) ccall((:aws_set_sha1_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hash_new_fn},), fn) end """ aws_hmac_vtable Documentation not found. """ struct aws_hmac_vtable alg_name::Ptr{Cchar} provider::Ptr{Cchar} destroy::Ptr{Cvoid} update::Ptr{Cvoid} finalize::Ptr{Cvoid} end """ aws_hmac Documentation not found. """ struct aws_hmac allocator::Ptr{aws_allocator} vtable::Ptr{aws_hmac_vtable} digest_size::Csize_t good::Bool impl::Ptr{Cvoid} end # typedef struct aws_hmac * ( aws_hmac_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * secret ) """ Documentation not found. """ const aws_hmac_new_fn = Cvoid """ aws_sha256_hmac_new(allocator, secret) Allocates and initializes a sha256 hmac instance. Secret is the key to be used for the hmac process. ### Prototype ```c struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); ``` """ function aws_sha256_hmac_new(allocator, secret) ccall((:aws_sha256_hmac_new, libaws_c_cal), Ptr{aws_hmac}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, secret) end """ aws_hmac_destroy(hmac) Cleans up and deallocates hmac. ### Prototype ```c void aws_hmac_destroy(struct aws_hmac *hmac); ``` """ function aws_hmac_destroy(hmac) ccall((:aws_hmac_destroy, libaws_c_cal), Cvoid, (Ptr{aws_hmac},), hmac) end """ aws_hmac_update(hmac, to_hmac) Updates the running hmac with to\\_hash. this can be called multiple times. ### Prototype ```c int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); ``` """ function aws_hmac_update(hmac, to_hmac) ccall((:aws_hmac_update, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_cursor}), hmac, to_hmac) end """ aws_hmac_finalize(hmac, output, truncate_to) Completes the hmac computation and writes the final digest to output. Allocation of output is the caller's responsibility. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_hmac_finalize(hmac, output, truncate_to) ccall((:aws_hmac_finalize, libaws_c_cal), Cint, (Ptr{aws_hmac}, Ptr{aws_byte_buf}, Csize_t), hmac, output, truncate_to) end """ aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) Computes the sha256 hmac over input and writes the digest output to 'output'. Use this if you don't need to stream the data you're hashing and you can load the entire input to hash into memory. If you specify truncate\\_to to something other than 0, the output will be truncated to that number of bytes. For example if you want a SHA256 HMAC digest as the first 16 bytes, set truncate\\_to to 16. If you want the full digest size, just set this to 0. ### Prototype ```c int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); ``` """ function aws_sha256_hmac_compute(allocator, secret, to_hmac, output, truncate_to) ccall((:aws_sha256_hmac_compute, libaws_c_cal), Cint, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_buf}, Csize_t), allocator, secret, to_hmac, output, truncate_to) end """ aws_set_sha256_hmac_new_fn(fn) Set the implementation of sha256 hmac to use. If you compiled without BYO\\_CRYPTO, you do not need to call this. However, if use this, we will honor it, regardless of compile options. This may be useful for testing purposes. If you did set BYO\\_CRYPTO, and you do not call this function you will segfault. ### Prototype ```c void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); ``` """ function aws_set_sha256_hmac_new_fn(fn) ccall((:aws_set_sha256_hmac_new_fn, libaws_c_cal), Cvoid, (Ptr{aws_hmac_new_fn},), fn) end """ Documentation not found. """ mutable struct aws_rsa_key_pair end """ aws_rsa_encryption_algorithm Documentation not found. """ @cenum aws_rsa_encryption_algorithm::UInt32 begin AWS_CAL_RSA_ENCRYPTION_PKCS1_5 = 0 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 = 1 AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512 = 2 end """ aws_rsa_signature_algorithm Documentation not found. """ @cenum aws_rsa_signature_algorithm::UInt32 begin AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 = 0 AWS_CAL_RSA_SIGNATURE_PSS_SHA256 = 1 end """ __JL_Ctag_50 Documentation not found. """ @cenum __JL_Ctag_50::UInt32 begin AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024 AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096 end """ aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_public_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). Returns a new instance of [`aws_rsa_key_pair`](@ref) if the key was successfully built. Otherwise returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); ``` """ function aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, key) ccall((:aws_rsa_key_pair_new_from_private_key_pkcs1, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_allocator}, aws_byte_cursor), allocator, key) end """ aws_rsa_key_pair_acquire(key_pair) Adds one to an RSA key pair's ref count. Returns key\\_pair pointer. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_acquire(key_pair) ccall((:aws_rsa_key_pair_acquire, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_release(key_pair) Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. Always returns NULL. ### Prototype ```c struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_release(key_pair) ccall((:aws_rsa_key_pair_release, libaws_c_cal), Ptr{aws_rsa_key_pair}, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) Max plaintext size that can be encrypted by the key (i.e. max data size supported by the key - bytes needed for padding). ### Prototype ```c size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); ``` """ function aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) ccall((:aws_rsa_key_pair_max_encrypt_plaintext_size, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm), key_pair, algorithm) end """ aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_encrypt(key_pair, algorithm, plaintext, out) ccall((:aws_rsa_key_pair_encrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, plaintext, out) end """ aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_decrypt(key_pair, algorithm, ciphertext, out) ccall((:aws_rsa_key_pair_decrypt, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_encryption_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, ciphertext, out) end """ aws_rsa_key_pair_block_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_block_length(key_pair) ccall((:aws_rsa_key_pair_block_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) Uses the key\\_pair's private key to sign message. The output will be in out. out must be large enough to hold the signature. Check [`aws_rsa_key_pair_signature_length`](@ref)() for the appropriate size. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. ### Prototype ```c int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_sign_message(key_pair, algorithm, digest, out) ccall((:aws_rsa_key_pair_sign_message, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, Ptr{aws_byte_buf}), key_pair, algorithm, digest, out) end """ aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) Uses the key\\_pair's public key to verify signature of message. It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually something like a SHA256. returns AWS\\_OP\\_SUCCESS if the signature is valid. raises AWS\\_ERROR\\_CAL\\_SIGNATURE\\_VALIDATION\\_FAILED if signature validation failed ### Prototype ```c int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); ``` """ function aws_rsa_key_pair_verify_signature(key_pair, algorithm, digest, signature) ccall((:aws_rsa_key_pair_verify_signature, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_signature_algorithm, aws_byte_cursor, aws_byte_cursor), key_pair, algorithm, digest, signature) end """ aws_rsa_key_pair_signature_length(key_pair) Documentation not found. ### Prototype ```c size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); ``` """ function aws_rsa_key_pair_signature_length(key_pair) ccall((:aws_rsa_key_pair_signature_length, libaws_c_cal), Csize_t, (Ptr{aws_rsa_key_pair},), key_pair) end """ aws_rsa_key_export_format Documentation not found. """ @cenum aws_rsa_key_export_format::UInt32 begin AWS_CAL_RSA_KEY_EXPORT_PKCS1 = 0 end """ aws_rsa_key_pair_get_public_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_public_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_public_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ aws_rsa_key_pair_get_private_key(key_pair, format, out) Documentation not found. ### Prototype ```c int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); ``` """ function aws_rsa_key_pair_get_private_key(key_pair, format, out) ccall((:aws_rsa_key_pair_get_private_key, libaws_c_cal), Cint, (Ptr{aws_rsa_key_pair}, aws_rsa_key_export_format, Ptr{aws_byte_buf}), key_pair, format, out) end """ Documentation not found. """ mutable struct aws_symmetric_cipher end # typedef struct aws_symmetric_cipher * ( aws_aes_cbc_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_cbc_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_ctr_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv ) """ Documentation not found. """ const aws_aes_ctr_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_gcm_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key , const struct aws_byte_cursor * iv , const struct aws_byte_cursor * aad ) """ Documentation not found. """ const aws_aes_gcm_256_new_fn = Cvoid # typedef struct aws_symmetric_cipher * ( aws_aes_keywrap_256_new_fn ) ( struct aws_allocator * allocator , const struct aws_byte_cursor * key ) """ Documentation not found. """ const aws_aes_keywrap_256_new_fn = Cvoid """ aws_symmetric_cipher_state Documentation not found. """ @cenum aws_symmetric_cipher_state::UInt32 begin AWS_SYMMETRIC_CIPHER_READY = 0 AWS_SYMMETRIC_CIPHER_FINALIZED = 1 AWS_SYMMETRIC_CIPHER_ERROR = 2 end """ aws_aes_cbc_256_new(allocator, key, iv) Creates an instance of AES CBC with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_cbc_256_new(allocator, key, iv) ccall((:aws_aes_cbc_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_ctr_256_new(allocator, key, iv) Creates an instance of AES CTR with 256-bit key. If key and iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If they are set, that key and iv will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); ``` """ function aws_aes_ctr_256_new(allocator, key, iv) ccall((:aws_aes_ctr_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv) end """ aws_aes_gcm_256_new(allocator, key, iv, aad) Creates an instance of AES GCM with 256-bit key. If key, iv are NULL, they will be generated internally. You can get the generated key and iv back by calling: [`aws_symmetric_cipher_get_key`](@ref)() and [`aws_symmetric_cipher_get_initialization_vector`](@ref)() respectively. If aad is set it will be copied and applied to the cipher. If they are set, that key and iv will be copied internally and used by the cipher. For decryption purposes tag can be provided via [`aws_symmetric_cipher_set_tag`](@ref) method. Note: for decrypt operations, tag must be provided before first decrypt is called. (this is a windows bcrypt limitations, but for consistency sake same limitation is extended to other platforms) Tag generated during encryption can be retrieved using [`aws_symmetric_cipher_get_tag`](@ref) method after finalize is called. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad); ``` """ function aws_aes_gcm_256_new(allocator, key, iv, aad) ccall((:aws_aes_gcm_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}, Ptr{aws_byte_cursor}), allocator, key, iv, aad) end """ aws_aes_keywrap_256_new(allocator, key) Creates an instance of AES Keywrap with 256-bit key. If key is NULL, it will be generated internally. You can get the generated key back by calling: [`aws_symmetric_cipher_get_key`](@ref)() If key is set, that key will be copied internally and used by the cipher. Returns NULL on failure. You can check aws\\_last\\_error() to get the error code indicating the failure cause. ### Prototype ```c struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); ``` """ function aws_aes_keywrap_256_new(allocator, key) ccall((:aws_aes_keywrap_256_new, libaws_c_cal), Ptr{aws_symmetric_cipher}, (Ptr{aws_allocator}, Ptr{aws_byte_cursor}), allocator, key) end """ aws_symmetric_cipher_destroy(cipher) Cleans up internal resources and state for cipher and then deallocates it. ### Prototype ```c void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_destroy(cipher) ccall((:aws_symmetric_cipher_destroy, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) Encrypts the value in to\\_encrypt and writes the encrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the encrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_encrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_encrypt(cipher, to_encrypt, out) ccall((:aws_symmetric_cipher_encrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_encrypt, out) end """ aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) Decrypts the value in to\\_decrypt and writes the decrypted data into out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of to\\_decrypt + an extra BLOCK to account for padding etc... returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_decrypt(cipher, to_decrypt, out) ccall((:aws_symmetric_cipher_decrypt, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, aws_byte_cursor, Ptr{aws_byte_buf}), cipher, to_decrypt, out) end """ aws_symmetric_cipher_finalize_encryption(cipher, out) Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_encryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_encryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_finalize_decryption(cipher, out) Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for padding etc... After invoking this function, you MUST call [`aws_symmetric_cipher_reset`](@ref)() before invoking any encrypt/decrypt operations on this cipher again. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); ``` """ function aws_symmetric_cipher_finalize_decryption(cipher, out) ccall((:aws_symmetric_cipher_finalize_decryption, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher}, Ptr{aws_byte_buf}), cipher, out) end """ aws_symmetric_cipher_reset(cipher) Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for immediate reuse. Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure to make a copy of tag before reseting the cipher and pass that copy for decryption. Warning: In most cases it's a really bad idea to reset a cipher and perform another operation using that cipher. Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration. returns AWS\\_OP\\_SUCCESS on success. Call aws\\_last\\_error() to determine the failure cause if it returns AWS\\_OP\\_ERR; ### Prototype ```c int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_reset(cipher) ccall((:aws_symmetric_cipher_reset, libaws_c_cal), Cint, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_tag(cipher) Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Only use this function between other calls to this API as any function call can alter the value of this tag. If you need to access it in a different pattern, copy the values to your own buffer first. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_tag(cipher) ccall((:aws_symmetric_cipher_get_tag, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_set_tag(cipher, tag) Sets the GMAC tag on the cipher. Does nothing for ciphers that do not support tag. ### Prototype ```c void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor tag); ``` """ function aws_symmetric_cipher_set_tag(cipher, tag) ccall((:aws_symmetric_cipher_set_tag, libaws_c_cal), Cvoid, (Ptr{aws_symmetric_cipher}, aws_byte_cursor), cipher, tag) end """ aws_symmetric_cipher_get_initialization_vector(cipher) Gets the original initialization vector as a cursor. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. For some algorithms, such as AES Keywrap, this will return an empty cursor. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_initialization_vector(cipher) ccall((:aws_symmetric_cipher_get_initialization_vector, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_key(cipher) Gets the original key. The memory in this cursor is unsafe as it refers to the internal buffer. This was done because the use case doesn't require fetching these during an encryption or decryption operation and it dramatically simplifies the API. Unlike some other fields, this value does not change after the inital construction of the cipher. ### Prototype ```c struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_key(cipher) ccall((:aws_symmetric_cipher_get_key, libaws_c_cal), aws_byte_cursor, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_is_good(cipher) Returns true if the state of the cipher is good, and otherwise returns false. Most operations, other than [`aws_symmetric_cipher_reset`](@ref)() will fail if this function is returning false. [`aws_symmetric_cipher_reset`](@ref)() will reset the state to a good state if possible. ### Prototype ```c bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_is_good(cipher) ccall((:aws_symmetric_cipher_is_good, libaws_c_cal), Bool, (Ptr{aws_symmetric_cipher},), cipher) end """ aws_symmetric_cipher_get_state(cipher) Retuns the current state of the cipher. Ther state of the cipher can be ready for use, finalized, or has encountered an error. if the cipher is in a finished or error state, it must be reset before further use. ### Prototype ```c enum aws_symmetric_cipher_state aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher); ``` """ function aws_symmetric_cipher_get_state(cipher) ccall((:aws_symmetric_cipher_get_state, libaws_c_cal), aws_symmetric_cipher_state, (Ptr{aws_symmetric_cipher},), cipher) end """ Documentation not found. """ const AWS_C_CAL_PACKAGE_ID = 7 """ Documentation not found. """ const AWS_SHA256_LEN = 32 """ Documentation not found. """ const AWS_SHA1_LEN = 20 """ Documentation not found. """ const AWS_MD5_LEN = 16 """ Documentation not found. """ const AWS_SHA256_HMAC_LEN = 32 """ Documentation not found. """ const AWS_AES_256_CIPHER_BLOCK_SIZE = 16 """ Documentation not found. """ const AWS_AES_256_KEY_BIT_LEN = 256 """ Documentation not found. """ const AWS_AES_256_KEY_BYTE_LEN = AWS_AES_256_KEY_BIT_LEN ÷ 8
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
2057
module LibAwsCal using aws_c_cal_jll using LibAwsCommon const IS_LIBC_MUSL = occursin("musl", Base.BUILD_TRIPLET) if Sys.isapple() && Sys.ARCH === :aarch64 include("../lib/aarch64-apple-darwin20.jl") elseif Sys.islinux() && Sys.ARCH === :aarch64 && !IS_LIBC_MUSL include("../lib/aarch64-linux-gnu.jl") elseif Sys.islinux() && Sys.ARCH === :aarch64 && IS_LIBC_MUSL include("../lib/aarch64-linux-musl.jl") elseif Sys.islinux() && startswith(string(Sys.ARCH), "arm") && !IS_LIBC_MUSL include("../lib/armv7l-linux-gnueabihf.jl") elseif Sys.islinux() && startswith(string(Sys.ARCH), "arm") && IS_LIBC_MUSL include("../lib/armv7l-linux-musleabihf.jl") elseif Sys.islinux() && Sys.ARCH === :i686 && !IS_LIBC_MUSL include("../lib/i686-linux-gnu.jl") elseif Sys.islinux() && Sys.ARCH === :i686 && IS_LIBC_MUSL include("../lib/i686-linux-musl.jl") elseif Sys.iswindows() && Sys.ARCH === :i686 error("LibAwsCommon.jl does not support i686 windows https://github.com/JuliaPackaging/Yggdrasil/blob/bbab3a916ae5543902b025a4a873cf9ee4a7de68/A/aws_c_common/build_tarballs.jl#L48-L49") elseif Sys.islinux() && Sys.ARCH === :powerpc64le include("../lib/powerpc64le-linux-gnu.jl") elseif Sys.isapple() && Sys.ARCH === :x86_64 include("../lib/x86_64-apple-darwin14.jl") elseif Sys.islinux() && Sys.ARCH === :x86_64 && !IS_LIBC_MUSL include("../lib/x86_64-linux-gnu.jl") elseif Sys.islinux() && Sys.ARCH === :x86_64 && IS_LIBC_MUSL include("../lib/x86_64-linux-musl.jl") elseif Sys.isbsd() && !Sys.isapple() include("../lib/x86_64-unknown-freebsd13.2.jl") elseif Sys.iswindows() && Sys.ARCH === :x86_64 include("../lib/x86_64-w64-mingw32.jl") else error("Unknown platform: $(Base.BUILD_TRIPLET)") end # exports for name in names(@__MODULE__; all=true) if name == :eval || name == :include || contains(string(name), "#") continue end @eval export $name end function init(allocator=default_aws_allocator()) LibAwsCommon.init(allocator) aws_cal_library_init(allocator) return end end
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
code
457
using Test, Aqua, LibAwsCal, LibAwsCommon @testset "LibAwsCal" begin @testset "aqua" begin Aqua.test_all(LibAwsCal, ambiguities=false) Aqua.test_ambiguities(LibAwsCal) end @testset "basic usage to test the library loads" begin alloc = aws_default_allocator() # important! this shouldn't need to be qualified! if we generate a definition for it in LibAwsCal that is a bug. aws_cal_library_init(alloc) end end
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
docs
475
[![](https://img.shields.io/badge/docs-stable-blue.svg)](https://JuliaServices.github.io/LibAwsCal.jl/stable) [![](https://img.shields.io/badge/docs-dev-blue.svg)](https://JuliaServices.github.io/LibAwsCal.jl/dev) [![CI](https://github.com/JuliaServices/LibAwsCal.jl/actions/workflows/ci.yml/badge.svg)](https://github.com/JuliaServices/LibAwsCal.jl/actions/workflows/ci.yml) # LibAwsCal.jl Julia bindings for the [aws-c-cal](https://github.com/awslabs/aws-c-cal) library.
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
1.1.0
ea84a3fc5acae82883d2c3ce3579d461d26a47e2
docs
186
```@meta CurrentModule = LibAwsCal ``` # LibAwsCal Documentation for [LibAwsCal](https://github.com/JuliaServices/LibAwsCal.jl). ```@index ``` ```@autodocs Modules = [LibAwsCal] ```
LibAwsCal
https://github.com/JuliaServices/LibAwsCal.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
914
using SBMLToolkit using Documenter cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true) cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true) DocMeta.setdocmeta!(SBMLToolkit, :DocTestSetup, :(using SBMLToolkit); recursive = true) makedocs(; modules = [SBMLToolkit], authors = "paulflang, anandijain", sitename = "SBMLToolkit.jl", clean = true, doctest = false, linkcheck = true, warnonly = [:missing_docs, :cross_references], linkcheck_ignore = ["https://www.linkedin.com/in/paul-lang-7b54a81a3/"], format = Documenter.HTML(; prettyurls = get(ENV, "CI", "false") == "true", canonical = "https://docs.sciml.ai/SBMLToolkit/stable/", assets = ["assets/favicon.ico"]), pages = [ "Home" => "index.md", "API documentation" => "api.md" ]) deploydocs(; repo = "github.com/SciML/SBMLToolkit.jl")
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
509
module SBMLToolkit using Catalyst using SBML using SymbolicUtils include("drafts.jl") include("systems.jl") include("reactions.jl") include("rules.jl") include("events.jl") include("utils.jl") @deprecate convert_simplify_math convert_promotelocals_expandfuns export ReactionSystem, ODESystem export readSBML, readSBMLFromString, set_level_and_version, convert_simplify_math, convert_promotelocals_expandfuns, checksupport_file export DefaultImporter, ReactionSystemImporter, ODESystemImporter end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
230
function is_event_assignment(k, model) for ev in values(model.events) for as in ev.event_assignments if as.variable == k return true end end end return false end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
1336
""" Creates ContinuousVectorCallbacks """ function get_events(model) # Todo: implement up or downpass and parameters subsdict = get_substitutions(model) # Todo: use SUBSDICT evs = model.events mtk_evs = Pair{Vector{Equation}, Vector{Equation}}[] for (_, e) in evs trigger = SBML.extensive_kinetic_math(model, e.trigger.math) trigger = Symbolics.unwrap(interpret_as_num(trigger, model)) lhs, rhs = map(x -> substitute(x, subsdict), trigger.arguments) trig = [lhs ~ rhs] mtk_evas = Equation[] for eva in e.event_assignments math = eva.math if haskey(model.species, eva.variable) vc = get_volume_correction(model, eva.variable) if !isnothing(vc) math = SBML.MathApply("*", [SBML.MathIdent(vc), math]) end end var = create_var(eva.variable, IV; irreducible = true) math = substitute(Symbolics.unwrap(interpret_as_num(math, model)), subsdict) effect = var ~ math push!(mtk_evas, effect) end push!(mtk_evs, trig => mtk_evas) end !isempty(evs) && @warn "SBMLToolkit currently fires events regardless of uppass or downpass trigger." isempty(mtk_evs) ? nothing : mtk_evs end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
7209
""" Convert SBML.Reaction to MTK.Reaction """ function get_reactions(model::SBML.Model) subsdict = get_substitutions(model) # Todo: replace with SUBSDICT rxs = Reaction[] for reaction in values(model.reactions) extensive_math = SBML.extensive_kinetic_math(model, reaction.kinetic_math) symbolic_math = interpret_as_num(extensive_math, model) reactant_references = reaction.reactants product_references = reaction.products if reaction.reversible symbolic_math = get_unidirectional_components(symbolic_math) kl_fw, kl_rv = [substitute(x, subsdict) for x in symbolic_math] enforce_rate = isequal(kl_rv, 0) add_reaction!(rxs, kl_fw, reactant_references, product_references, model; enforce_rate = enforce_rate) add_reaction!(rxs, kl_rv, product_references, reactant_references, model; enforce_rate = enforce_rate) else kl = substitute(symbolic_math, subsdict) # Todo: use SUBSDICT add_reaction!(rxs, kl, reactant_references, product_references, model) end end rxs end """ Infer forward and reverse components of bidirectional kineticLaw """ function get_unidirectional_components(bidirectional_math) bm = ModelingToolkit.value(bidirectional_math) # Symbolics.tosymbol(bidirectional_math) bm = expand(simplify(bm)) if !SymbolicUtils.isadd(bm) @warn "Cannot separate bidirectional kineticLaw `$bidirectional_math` to forward and reverse part. Setting forward to `$bidirectional_math` and reverse to `0`. Stochastic simulations will be inexact." return (bidirectional_math, Num(0)) end terms = SymbolicUtils.arguments(ModelingToolkit.value(bm)) fw_terms = [] rv_terms = [] for term in terms if SymbolicUtils.ismul(ModelingToolkit.value(term)) && (term.coeff < 0) push!(rv_terms, Num(-term)) # PL: @Anand: Perhaps we should to create_var(term) or so? else push!(fw_terms, Num(term)) # PL: @Anand: Perhaps we should to create_var(term) or so? end end if (length(fw_terms) != 1) || (length(rv_terms) != 1) @warn "Cannot separate bidirectional kineticLaw `$bidirectional_math` to forward and reverse part. Setting forward to `$bidirectional_math` and reverse to `0`. Stochastic simulations will be inexact." return (bidirectional_math, Num(0)) end return (fw_terms[1], rv_terms[1]) end function add_reaction!(rxs::Vector{Reaction}, kl::Num, reactant_references::Vector{SBML.SpeciesReference}, product_references::Vector{SBML.SpeciesReference}, model::SBML.Model; enforce_rate = false) reactants, products, rstoichvals, pstoichvals = get_reagents(reactant_references, product_references, model) isnothing(reactants) && isnothing(products) && return rstoichvals = stoich_convert_to_ints(rstoichvals) pstoichvals = stoich_convert_to_ints(pstoichvals) kl, our = use_rate(kl, reactants, rstoichvals) our = enforce_rate ? true : our push!(rxs, Catalyst.Reaction(kl, reactants, products, rstoichvals, pstoichvals; only_use_rate = our)) end function stoich_convert_to_ints(xs) (xs !== nothing && all(isinteger(x) for x in xs)) ? Int.(xs) : xs end """ Get reagents """ function get_reagents(reactant_references::Vector{SBML.SpeciesReference}, product_references::Vector{SBML.SpeciesReference}, model::SBML.Model) reactants = String[] products = String[] rstoich = Float64[] pstoich = Float64[] for rr in reactant_references sn = rr.species stoich = rr.stoichiometry if isnothing(stoich) @warn "SBML SpeciesReferences does not contain stoichiometries. Assuming stoichiometry of 1." maxlog=1 stoich = 1.0 end iszero(stoich) && @error("Stoichiometry of $sn must be non-zero") if sn in reactants idx = findfirst(isequal(sn), reactants) rstoich[idx] += stoich else push!(reactants, sn) push!(rstoich, stoich) end if model.species[sn].boundary_condition == true if sn in products idx = findfirst(isequal(sn), products) pstoich[idx] += stoich else push!(products, sn) push!(pstoich, stoich) end end end for pr in product_references sn = pr.species stoich = pr.stoichiometry if isnothing(stoich) @warn "Stoichiometries of SpeciesReferences are not defined. Setting to 1." maxlog=1 stoich = 1.0 end iszero(stoich) && @error("Stoichiometry of $sn must be non-zero") if model.species[sn].boundary_condition != true if sn in products idx = findfirst(isequal(sn), products) pstoich[idx] += stoich else push!(products, sn) push!(pstoich, stoich) end end end reactants = map(x -> Num(create_var(x, IV)), reactants) products = map(x -> Num(create_var(x, IV)), products) if (length(reactants) == 0) reactants = nothing rstoich = nothing end if (length(products) == 0) products = nothing pstoich = nothing end (reactants, products, rstoich, pstoich) end """ Get kineticLaw for use in MTK.Reaction """ function use_rate(kl::Num, react::Union{Vector{Num}, Nothing}, stoich::Union{Vector{<:Real}, Nothing}) rate_const = get_massaction(kl, react, stoich) if !isnan(rate_const) kl = rate_const our = false else our = true end return (kl, our) end """ Get rate constant of mass action kineticLaws """ function get_massaction(kl::Num, reactants::Union{Vector{Num}, Nothing}, stoich::Union{Vector{<:Real}, Nothing}) function check_args(x::SymbolicUtils.BasicSymbolic{Real}) check_args(Val(SymbolicUtils.istree(x)), x) end function check_args(::Val{true}, x::SymbolicUtils.BasicSymbolic{Real}) for arg in SymbolicUtils.arguments(x) if isnan(check_args(arg)) || isequal(arg, default_t()) return NaN end end return 0 end check_args(::Val{false}, x::SymbolicUtils.BasicSymbolic{Real}) = isspecies(x) ? NaN : 0 # Species vs Parameter leaf node check_args(::Real) = 0 # Real leaf node check_args(x) = throw(ErrorException("Cannot handle $(typeof(x)) types.")) # Unknow leaf node if isnothing(reactants) && isnothing(stoich) rate_const = kl elseif isnothing(reactants) | isnothing(stoich) throw(ErrorException("`reactants` and `stoich` are inconsistent: `reactants` are $(reactants) and `stoich` is $(stoich).")) elseif max(stoich...) > 100 # simplify_fractions might StackOverflow rate_const = kl else rate_const = SymbolicUtils.simplify_fractions(kl / *((.^(reactants, stoich))...)) end isnan(check_args(ModelingToolkit.value(rate_const))) ? NaN : rate_const end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
2253
function get_rules(model) subsdict = get_substitutions(model) # Todo: use SUBSDICT obseqs = Equation[] algeqs = Equation[] raterules = Equation[] for r in model.rules if r isa SBML.AlgebraicRule push!(algeqs, 0 ~ interpret_as_num(r.math, model)) elseif r isa SBML.AssignmentRule var, ass = get_var_and_assignment(model, r) push!(obseqs, var ~ ass) elseif r isa SBML.RateRule var, ass = get_var_and_assignment(model, r) push!(raterules, D(var) ~ ass) else error("Rule must be of type SBML.AlgebraicRule, SBML.AssignmentRule, or SBML.RateRule.") end end algeqs, obseqs, raterules = map(x -> substitute(x, subsdict), (algeqs, obseqs, raterules)) algeqs, obseqs, raterules end function get_var_and_assignment(model, rule) if !haskey(merge(model.species, model.compartments, model.parameters), rule.variable) error("Cannot find target for rule with ID `$(rule.variable)`") end var = create_var(rule.variable, IV) math = SBML.extensive_kinetic_math(model, rule.math) vc = get_volume_correction(model, rule.variable) if !isnothing(vc) math = SBML.MathApply("*", [SBML.MathIdent(vc), math]) end assignment = interpret_as_num(math, model) if rule isa SBML.RateRule && haskey(model.species, rule.variable) sp = model.species[rule.variable] comp = model.compartments[sp.compartment] comp.constant == false && sp.only_substance_units == false && begin c = create_var(sp.compartment, IV) assignment = c * assignment + var / c * D(c) end end var, assignment end function get_volume_correction(model, s_id) haskey(model.species, s_id) || return nothing sp = model.species[s_id] comp = model.compartments[sp.compartment] sp.only_substance_units == true && return nothing isnothing(comp.size) && !SBML.seemsdefined(sp.compartment, model) && comp.spatial_dimensions != 0 && # remove this line when SBML test suite is fixed throw(DomainError(sp.compartment, "compartment size is insufficiently defined")) sp.compartment end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
7395
"DefaultImporter to use in conjunction with `readSBML`" struct DefaultImporter end "ReactionSystemImporter to use in conjunction with `readSBML`" struct ReactionSystemImporter end "ODESystemImporter to use in conjunction with `readSBML`" struct ODESystemImporter end """ readSBML(sbmlfile::String, ::DefaultImporter) Create a `SBML.Model` from an SBML file, using the default import settings for use as Catalyst and ModelingToolkit types. See also [`Model`](@ref) and [`DefaultImporter`](@ref). """ function SBML.readSBML(sbmlfile::String, ::DefaultImporter) # Returns an SBML.Model SBMLToolkit.checksupport_file(sbmlfile) readSBML(sbmlfile, importdefaults) end """ readSBML(sbmlfile::String, ::ReactionSystemImporter) Create a `Catalyst.ReactionSystem` from an SBML file, using the default import settings. See also [`Model`](@ref) and [`ReactionSystemImporter`](@ref). """ function SBML.readSBML(sbmlfile::String, ::ReactionSystemImporter; kwargs...) # Returns a Catalyst.ReactionSystem ReactionSystem(readSBML(sbmlfile::String, DefaultImporter()), kwargs...) end """ readSBML(sbmlfile::String, ::ODESystemImporter) Create a `ModelingToolkit.ODESystem` from an SBML file, using the default import settings. See also [`Model`](@ref) and [`ODESystemImporter`](@ref). """ function SBML.readSBML(sbmlfile::String, ::ODESystemImporter; include_zero_odes::Bool = true, kwargs...) # Returns an MTK.ODESystem odesys = convert(ODESystem, readSBML(sbmlfile, ReactionSystemImporter(), kwargs...), include_zero_odes = include_zero_odes) complete(odesys) end """ ReactionSystem(model::SBML.Model; kwargs...) Create a `ReactionSystem` from an `SBML.Model`. See also [`ODESystem`](@ref). """ function Catalyst.ReactionSystem(model::SBML.Model; kwargs...) # Todo: requires unique parameters (i.e. SBML must have been imported with localParameter promotion in libSBML) # length(model.events) > 0 ? error("Model contains events. Please import with `ODESystem(model)`") : nothing @Anand: how to suppress this when called from ODESystem rxs = get_reactions(model) u0map, parammap, initial_assignment_map = get_mappings(model) defs = Dict{Num, Any}() for (k, v) in vcat(u0map, parammap, initial_assignment_map) # initial_assignments override u0map and parammap defs[k] = v end # defs = ModelingToolkit._merge(Dict(u0map), Dict(parammap)) algrules, obsrules, raterules = get_rules(model) obsrules_rearranged = Equation[] for o in obsrules rhs = o.rhs for r in raterules if isequal(rhs, r.lhs) rhs = r.rhs end end defs[o.lhs] = ModelingToolkit.fixpoint_sub(rhs, defs) # ModelingToolkit._merge(defs, # Dict(Catalyst.DEFAULT_IV.val => 0))) push!(obsrules_rearranged, 0 ~ rhs - o.lhs) end raterules_subs = [] for o in raterules rhs = o.rhs for r in raterules if isequal(rhs, r.lhs) rhs = r.rhs end end defs[o.lhs] = ModelingToolkit.fixpoint_sub(rhs, defs) # ModelingToolkit._merge(defs, # Dict(Catalyst.DEFAULT_IV.val => 0))) push!(raterules_subs, rhs ~ o.lhs) end if haskey(kwargs, :defaults) defs = ModelingToolkit._merge(defs, kwargs[:defaults]) kwargs = filter(x -> !isequal(first(x), :defaults), kwargs) end rs = ReactionSystem([rxs..., algrules..., raterules_subs..., obsrules_rearranged...], IV, first.(u0map), first.(parammap); defaults = defs, name = gensym(:SBML), continuous_events = get_events(model), combinatoric_ratelaws = false, kwargs...) return complete(rs) # Todo: maybe add a `complete=True` kwarg end """ ODESystem(model::SBML.Model; include_zero_odes = true, kwargs...) Create an `ODESystem` from an `SBML.Model`. See also [`ReactionSystem`](@ref). """ function ModelingToolkit.ODESystem(model::SBML.Model; include_zero_odes::Bool = true, kwargs...) rs = ReactionSystem(model; kwargs...) odesys = convert(ODESystem, rs; include_zero_odes = include_zero_odes) complete(odesys) end function get_mappings(model::SBML.Model) inits = Dict(SBML.initial_amounts(model, convert_concentrations = true)) u0map = Pair[] parammap = Pair[] initial_assignment_map = Pair[] for (k, v) in model.species var = create_symbol(k, model) if v.constant == true push!(parammap, var => inits[k]) else push!(u0map, var => inits[k]) end end for (k, v) in model.parameters var = create_symbol(k, model) if v.constant == false && (SBML.seemsdefined(k, model) || is_event_assignment(k, model)) push!(u0map, var => v.value) elseif v.constant == true && isnothing(v.value) # Todo: maybe add this branch also to model.compartments val = model.initial_assignments[k] push!(parammap, var => interpret_as_num(val, model)) else push!(parammap, var => v.value) end end for (k, v) in model.compartments var = create_symbol(k, model) if v.constant == false && SBML.seemsdefined(k, model) push!(u0map, var => v.size) else push!(parammap, var => v.size) end end for (k, v) in model.initial_assignments var = create_symbol(k, model) push!(initial_assignment_map, var => interpret_as_num(v, model)) end u0map, parammap, initial_assignment_map end function netstoich(id, reaction) netstoich = 0 rdict = Dict(getproperty.(reaction.reactants, :species) .=> getproperty.(reaction.reactants, :stoichiometry)) pdict = Dict(getproperty.(reaction.products, :species) .=> getproperty.(reaction.products, :stoichiometry)) netstoich -= get(rdict, id, 0) netstoich += get(pdict, id, 0) end """ checksupport_file(filename::String) Check if SBML file is supported by SBMLToolkit.jl. """ function checksupport_file(filename::String) string = open(filename) do file read(file, String) end checksupport_string(string) end """ checksupport_string(filename::String) Check if SBML passed as string is supported by SBMLToolkit.jl. """ function checksupport_string(xml::String) not_implemented = ["listOfConstraints", "/delay", "<priority>", "factorial", "id=\"case00387\"", # Case 00387 requires event directionality "id=\"case01071\"", # require event directionality, I think "</eventAssignment>\n <eventAssignment"] for item in not_implemented occursin(item, xml) && throw(ErrorException("SBML models with $item are not yet implemented.")) end occursin("<sbml xmlns:fbc=", xml) && throw(ErrorException("This model was designed for constrained-based optimisation. Please use COBREXA.jl instead of SBMLToolkit.")) occursin("<sbml xmlns:comp=", xml) && throw(ErrorException("This model uses the SBML \"comp\" package, which is not yet implemented.")) !(occursin("<reaction", xml) || occursin("rateRule", xml)) && throw(ErrorException("Models that contain neither reactions or rateRules will fail in simulation.")) true end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
5202
# Conversion to symbolics const IV = default_t() const D = default_time_deriv() symbolicsRateOf(x) = D(x) const symbolics_mapping = Dict(SBML.default_function_mapping..., "rateOf" => symbolicsRateOf) function interpret_as_num(x::SBML.Math, model::SBML.Model) SBML.interpret_math(x; map_apply = (x::SBML.MathApply, interpret::Function) -> Num(symbolics_mapping[x.fn](interpret.(x.args)...)), map_const = (x::SBML.MathConst) -> Num(SBML.default_constants[x.id]), map_ident = x -> map_symbolics_ident(x, model), map_lambda = (_, _) -> throw(ErrorException("Symbolics.jl does not support lambda functions")), map_time = (x::SBML.MathTime) -> IV, map_value = (x::SBML.MathVal) -> x.val, map_avogadro = (x::SBML.MathAvogadro) -> SBML.default_constants["avogadro"]) end """ Get dictionary to change types in kineticLaw """ function get_substitutions(model) u0map, parammap = get_mappings(model) subsdict = Dict() for item in first.(u0map) k = create_var(string(item.f.name)) subsdict[k] = item end for item in first.(parammap) k = create_var(string(item.name)) subsdict[k] = item end subsdict end function map_symbolics_ident(x::SBML.Math, model::SBML.Model) k = x.id category = k in keys(model.species) ? :species : k in keys(model.parameters) ? :parameter : k in keys(model.compartments) ? :compartment : error("Unknown category for $k") if k in keys(model.species) v = model.species[k] if v.constant == true var = create_param(k; isconstantspecies = true) else var = create_var(k, IV; isbcspecies = has_rule_type(k, model, SBML.RateRule) || has_rule_type(k, model, SBML.AssignmentRule) || (has_rule_type(k, model, SBML.AlgebraicRule) && (all([netstoich(k, r) == 0 for r in values(model.reactions)]) || v.boundary_condition == true))) # To remove species that are otherwise defined end elseif k in keys(model.parameters) v = model.parameters[k] if v.constant == false && (SBML.seemsdefined(k, model) || is_event_assignment(k, model)) var = create_var(k, IV; isbcspecies = true) elseif v.constant == true && isnothing(v.value) # Todo: maybe add this branch also to model.compartments var = create_param(k) else var = create_param(k) end elseif k in keys(model.compartments) v = model.compartments[k] if v.constant == false && SBML.seemsdefined(k, model) var = create_var(k, IV; isbcspecies = true) else var = create_param(k) end else error("$k must be in the model species, parameters, or compartments.") end Num(var) end function create_var(x; isbcspecies = false) sym = Symbol(x) Symbolics.unwrap(first(@species $sym [isbcspecies = isbcspecies])) end function create_var(x, iv; isbcspecies = false, irreducible = false) sym = Symbol(x) Symbolics.unwrap(first(@species $sym(iv) [ isbcspecies = isbcspecies, irreducible = irreducible ])) end function create_param(x; isconstantspecies = false) sym = Symbol(x) Symbolics.unwrap(first(@parameters $sym [isconstantspecies = isconstantspecies])) end function has_rule_type(id::String, m::SBML.Model, T::Type{<:SBML.Rule}) T == SBML.AlgebraicRule && return any(SBML.isfreein(id, r.math) for r in m.rules if r isa SBML.AlgebraicRule) any(r.variable == id for r in m.rules if r isa T) end const importdefaults = doc -> begin set_level_and_version(3, 2)(doc) convert_promotelocals_expandfuns(doc) end function create_symbol(k::String, model::SBML.Model) if k in keys(model.species) v = model.species[k] if v.constant == true sym = create_param(k; isconstantspecies = true) else sym = create_var(k, IV; isbcspecies = has_rule_type(k, model, SBML.RateRule) || has_rule_type(k, model, SBML.AssignmentRule) || (has_rule_type(k, model, SBML.AlgebraicRule) && (all([netstoich(k, r) == 0 for r in values(model.reactions)]) || v.boundary_condition == true))) # To remove species that are otherwise defined end elseif k in keys(model.parameters) v = model.parameters[k] if v.constant == false && (SBML.seemsdefined(k, model) || is_event_assignment(k, model)) sym = create_var(k, IV; isbcspecies = true) else sym = create_param(k) end elseif k in keys(model.compartments) v = model.compartments[k] if v.constant == false && SBML.seemsdefined(k, model) sym = create_var(k, IV; isbcspecies = true) else sym = create_param(k) end end sym end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
963
using SBMLToolkit using Catalyst, SBMLToolkitTestSuite using Test const IV = default_t() @parameters compartment @species S1(IV) S2(IV) function readmodel(sbml) SBMLToolkit.readSBMLFromString(sbml, doc -> begin set_level_and_version(3, 2)(doc) convert_promotelocals_expandfuns(doc) end) end # Test get_events sbml, _, _ = SBMLToolkitTestSuite.read_case("00001") m = readmodel(sbml) @test isnothing(SBMLToolkit.get_events(m)) sbml, _, _ = SBMLToolkitTestSuite.read_case("00026") # 1 single trigger, single affect m = readmodel(sbml) events = SBMLToolkit.get_events(m) events_true = [[S1 / compartment ~ 0.1] => [S1 ~ compartment]] @test isequal(events, events_true) sbml, _, _ = SBMLToolkitTestSuite.read_case("00041") # multiple events m = readmodel(sbml) events = SBMLToolkit.get_events(m) events_true = [[S1 / compartment ~ 0.1] => [S1 ~ compartment], [S2 / compartment ~ 0.5] => [S2 ~ 0]] @test isequal(events, events_true)
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
446
using SBMLToolkit, Aqua @testset "Aqua" begin Aqua.find_persistent_tasks_deps(SBMLToolkit) Aqua.test_ambiguities(SBMLToolkit, recursive = false) Aqua.test_deps_compat(SBMLToolkit) Aqua.test_piracies(SBMLToolkit, treat_as_own = [SBMLToolkit.SBML.Model]) Aqua.test_project_extras(SBMLToolkit) Aqua.test_stale_deps(SBMLToolkit) Aqua.test_unbound_args(SBMLToolkit) Aqua.test_undefined_exports(SBMLToolkit) end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
6261
using SBMLToolkit using Catalyst, SBML using Test cd(@__DIR__) sbmlfile = joinpath("data", "reactionsystem_01.xml") const IV = default_t() @parameters k1, c1 @species s1(IV), s2(IV), s1s2(IV) COMP1 = SBML.Compartment("c1", true, 3, 2.0, "nl", nothing, nothing, nothing, nothing, SBML.CVTerm[]) SPECIES1 = SBML.Species(name = "s1", compartment = "c1", initial_amount = 1.0, substance_units = "substance", only_substance_units = true, boundary_condition = false, constant = false) SPECIES2 = SBML.Species(name = "s2", compartment = "c1", initial_amount = 1.0, substance_units = "substance", only_substance_units = true, boundary_condition = false, constant = false) SPECIES3 = SBML.Species(name = "s1s2", compartment = "c1", initial_amount = 1.0, substance_units = "substance", only_substance_units = true, boundary_condition = false, constant = false) KINETICMATH1 = SBML.MathIdent("k1") KINETICMATH2 = SBML.MathApply("*", SBML.Math[SBML.MathIdent("k1"), SBML.MathIdent("s2")]) REACTION1 = SBML.Reaction( products = [ SBML.SpeciesReference(species = "s1", stoichiometry = 1) ], kinetic_math = KINETICMATH1, reversible = false) PARAM1 = SBML.Parameter(name = "k1", value = 1.0, constant = true) MODEL1 = SBML.Model(parameters = Dict("k1" => PARAM1), compartments = Dict("c1" => COMP1), species = Dict("s1" => SPECIES1, "s2" => SPECIES2, "s1s2" => SPECIES3), reactions = Dict("r1" => REACTION1)) # PL: For instance in the compartments dict, we may want to enforce that key and compartment.name are identical # Test get_reactions reaction = SBMLToolkit.get_reactions(MODEL1)[1] truereaction = Catalyst.Reaction(k1, nothing, [s1], nothing, [1]) # Todo: implement Sam's suggestion on mass action kinetics @test isequal(reaction, truereaction) km = SBML.MathTime("x") reac = SBML.Reaction( reactants = [ SBML.SpeciesReference(species = "s1", stoichiometry = 1.0) ], kinetic_math = km, reversible = false) model = SBML.Model(parameters = Dict("k1" => PARAM1), compartments = Dict("c1" => COMP1), species = Dict("s1" => SPECIES1), reactions = Dict("r1" => reac)) @test isequal(IV, SBMLToolkit.get_reactions(model)[1].rate) # Test get_unidirectional_components km = SBML.MathApply("-", SBML.Math[KINETICMATH1, SBML.MathIdent("c1")]) sm = SBMLToolkit.interpret_as_num(km, MODEL1) kl = SBMLToolkit.get_unidirectional_components(sm) @test isequal(kl, (k1, c1)) km = SBML.MathApply("-", SBML.Math[KINETICMATH1, KINETICMATH2]) sm = SBMLToolkit.interpret_as_num(km, MODEL1) fw, rv = SBMLToolkit.get_unidirectional_components(sm) rv = substitute(rv, Dict(SBMLToolkit.create_var("s2") => SBMLToolkit.create_var("s2", IV))) @test isequal((fw, rv), (k1, k1 * s2)) km = SBML.MathIdent("s1s2") sm1 = SBMLToolkit.interpret_as_num(km, MODEL1) sm2 = sm - sm1 @test isequal(SBMLToolkit.get_unidirectional_components(sm2), (sm2, Num(0))) @test isequal(SBMLToolkit.get_unidirectional_components(k1), (k1, Num(0))) # Test add_reaction! rxs = Catalyst.Reaction[] SBMLToolkit.add_reaction!(rxs, k1, SBML.SpeciesReference[], SBML.SpeciesReference[], MODEL1) @test isequal(rxs, Catalyst.Reaction[]) rxs = Catalyst.Reaction[] SBMLToolkit.add_reaction!(rxs, k1 * s1, [SBML.SpeciesReference(species = "s1", stoichiometry = 1.0)], SBML.SpeciesReference[], MODEL1) reaction_true = Catalyst.Reaction(k1, [s1], nothing, [1], nothing, only_use_rate = false) @test isequal(rxs[1], reaction_true) rxs = Catalyst.Reaction[] SBMLToolkit.add_reaction!(rxs, k1 * s1, [SBML.SpeciesReference(species = "s1", stoichiometry = 1.0)], SBML.SpeciesReference[], MODEL1, enforce_rate = true) reaction_true = Catalyst.Reaction(k1, [s1], nothing, [1], nothing, only_use_rate = true) @test isequal(rxs, [reaction_true]) # Test stoich_convert_to_ints @test isnothing(SBMLToolkit.stoich_convert_to_ints(nothing)) t = typeof(SBMLToolkit.stoich_convert_to_ints([1.0])[1]) @test isequal(t, Int64) t = typeof(SBMLToolkit.stoich_convert_to_ints([1.1, 1])[1]) @test isequal(t, Float64) # Test get_reagents @test isequal((nothing, [s1], nothing, [1.0]), SBMLToolkit.get_reagents(REACTION1.reactants, REACTION1.products, MODEL1)) s = SBML.Species(name = "s", compartment = "c1", boundary_condition = true, initial_amount = 1.0, substance_units = "substance", only_substance_units = true) var = SBMLToolkit.create_var("s", IV) r = SBML.Reaction(reactants = [SBML.SpeciesReference(species = "s", stoichiometry = 1.0)], reversible = false) m = SBML.Model(species = Dict("s" => s), reactions = Dict("r1" => r)) @test isequal(([var], [var], [1.0], [1.0]), SBMLToolkit.get_reagents(r.reactants, r.products, m)) @test isequal((nothing, nothing, nothing, nothing), SBMLToolkit.get_reagents(r.products, r.reactants, m)) r = SBML.Reaction( reactants = [SBML.SpeciesReference(species = "s", stoichiometry = 1.0), SBML.SpeciesReference(species = "s", stoichiometry = 1.0)], reversible = false) m = SBML.Model(species = Dict("s" => s), reactions = Dict("r1" => r)) @test isequal(([var], [var], [2.0], [2.0]), SBMLToolkit.get_reagents(r.reactants, r.products, m)) # Test use_rate @test isequal(SBMLToolkit.use_rate(k1 * s1, [s1], [1]), (k1, false)) # Case hOSU=true @test isequal(SBMLToolkit.use_rate(k1 * s1 * s2 / (c1 + s2), [s1], [1]), (k1 * s1 * s2 / (c1 + s2), true)) # Case Michaelis-Menten kinetics # Test get_massaction @test isequal(SBMLToolkit.get_massaction(k1 * s1, [s1], [1]), k1) # Case hOSU=true @test isequal(SBMLToolkit.get_massaction(k1 * s1 / c1, [s1], [1]), k1 / c1) # Case hOSU=false @test isequal(SBMLToolkit.get_massaction(k1 + c1, nothing, nothing), k1 + c1) # Case zero order kineticLaw @test isnan(SBMLToolkit.get_massaction(k1 * s1 * s2 / (c1 + s2), [s1], [1])) # Case Michaelis-Menten kinetics @test isnan(SBMLToolkit.get_massaction(k1 * s1 * IV, [s1], [1])) # Case kineticLaw with time @test isnan(SBMLToolkit.get_massaction(k1 * s1, [s1], [101])) # Case no simplification performed due to large rstoich @test isnan(SBMLToolkit.get_massaction(k1 * s1 * s2, [s1], [1])) @test isnan(SBMLToolkit.get_massaction(k1 + c1, [s1], [1])) @test_throws ErrorException SBMLToolkit.get_massaction(k1, nothing, [1])
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
2201
using SBMLToolkit using SBML, SBMLToolkitTestSuite using Catalyst, ModelingToolkit, OrdinaryDiffEq using Test const IV = default_t() @parameters k1, compartment @species S1(IV), S2(IV) function readmodel(sbml) SBMLToolkit.readSBMLFromString(sbml, doc -> begin set_level_and_version(3, 2)(doc) convert_promotelocals_expandfuns(doc) end) end # Test get_rules sbml, _, _ = SBMLToolkitTestSuite.read_case("00029") # assignmentRule m = readmodel(sbml) a, o, r = SBMLToolkit.get_rules(m) o_true = [S1 ~ 7 * compartment] @test isequal(o, o_true) sbml, _, _ = SBMLToolkitTestSuite.read_case("00031") # rateRule m = readmodel(sbml) a, o, r = SBMLToolkit.get_rules(m) r_true = [default_time_deriv()(S1) ~ 7 * compartment] @test isequal(r, r_true) sbml, _, _ = SBMLToolkitTestSuite.read_case("00039") # algebraicRule m = readmodel(sbml) a, o, r = SBMLToolkit.get_rules(m) a_true = [0 ~ S1 + S2 - k1] @test isequal(a, a_true) # Test get_var_and_assignment sbml, _, _ = SBMLToolkitTestSuite.read_case("00031") m = readmodel(sbml) var, assignment = SBMLToolkit.get_var_and_assignment(m, m.rules[1]) var_true = S1 assignment_true = 7 * compartment @test isequal(var, var_true) @test isequal(assignment, assignment_true) r = SBML.AssignmentRule("S2", SBML.MathVal(1)) @test_throws ErrorException("Cannot find target for rule with ID `S2`") SBMLToolkit.get_var_and_assignment( m, r) # Test get_volume_correction vc = SBMLToolkit.get_volume_correction(m, "notaspecies") @test isnothing(vc) vc = SBMLToolkit.get_volume_correction(m, "S1") @test isequal(vc, "compartment") sbml, _, _ = SBMLToolkitTestSuite.read_case("00060") # hOSU="true" species m = readmodel(sbml) vc = SBMLToolkit.get_volume_correction(m, "S1") @test isnothing(vc) # tests that non-constant parameters become variables sbml, _, _ = SBMLToolkitTestSuite.read_case("00033") m = readmodel(sbml) @named sys = ODESystem(m) @species k1(IV) @test isequal(k1, unknowns(sys)[end]) # tests that non-constant compartments become variables sbml, _, _ = SBMLToolkitTestSuite.read_case("00051") # hOSU="true" species m = readmodel(sbml) @named sys = ODESystem(m) @species C(IV) @test isequal(C, unknowns(sys)[end])
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
647
using SafeTestsets, Test @safetestset "SBMLToolkit.jl" begin @safetestset "Quality Assurance" begin include("qa.jl") end @safetestset "Systems" begin include("systems.jl") end @safetestset "Reactions" begin include("reactions.jl") end @safetestset "Rules" begin include("rules.jl") end @safetestset "Events" begin include("events.jl") end @safetestset "Utils" begin include("utils.jl") end @safetestset "Simulation results" begin include("simresults.jl") end @safetestset "Wuschel" begin include("wuschel.jl") end end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
795
using SBMLToolkitTestSuite using Test const case_ids = [7, # boundary_condition 22, # non-integer stoichiometry 23, # species with constant=boundaryCondition="true" 41, # events 140, # compartment size overridden with assignmentRule 170, # Model using parameters and rules only 325, # One reactions and two rate rules with four species in a 2D compartment 679 # Initial value calculated by assignmentRule in compartment of non-unit size # 1208, # Non-hOSU species with rateRule in variable compartment -> require MTK fix. ] const logdir = joinpath(@__DIR__, "logs") ispath(logdir) && rm(logdir, recursive = true) mkdir(logdir) df = verify_all(case_ids, logdir) for i in 1:length(case_ids) @test sum(Vector(df[i, ["expected_err", "res"]])) == 1 end
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
7239
using SBMLToolkit using Catalyst, SBML using Test cd(@__DIR__) sbmlfile = joinpath("data", "reactionsystem_01.xml") const IV = default_t() @parameters k1, c1 @species s1(IV), s2(IV), s1s2(IV) COMP1 = SBML.Compartment("c1", true, 3, 2.0, "nl", nothing, nothing, nothing, nothing, SBML.CVTerm[]) SPECIES1 = SBML.Species(name = "s1", compartment = "c1", initial_amount = 1.0, substance_units = "substance", only_substance_units = true, boundary_condition = false, constant = false) # Todo: Maybe not support units in initial_concentration? SPECIES2 = SBML.Species(name = "s2", compartment = "c1", initial_amount = 1.0, substance_units = "substance/nl", only_substance_units = false) KINETICMATH1 = SBML.MathIdent("k1") KINETICMATH2 = SBML.MathApply("*", SBML.Math[SBML.MathIdent("k1"), SBML.MathIdent("s2")]) KINETICMATH3 = SBML.MathApply("-", SBML.Math[SBML.MathApply("*", SBML.Math[SBML.MathIdent("k1"), SBML.MathIdent("s1")]), KINETICMATH1]) REACTION1 = SBML.Reaction( products = [ SBML.SpeciesReference(species = "s1", stoichiometry = 1.0) ], kinetic_math = KINETICMATH1, reversible = false) REACTION2 = SBML.Reaction( reactants = [ SBML.SpeciesReference(species = "s1", stoichiometry = 1.0) ], kinetic_math = KINETICMATH3, reversible = true) PARAM1 = SBML.Parameter(name = "k1", value = 1.0, constant = true) MODEL1 = SBML.Model(parameters = Dict("k1" => PARAM1), compartments = Dict("c1" => COMP1), species = Dict("s1" => SPECIES1), reactions = Dict("r1" => REACTION1)) # PL: For instance in the compartments dict, we may want to enforce that key and compartment.name are identical MODEL2 = SBML.Model(parameters = Dict("k1" => PARAM1), compartments = Dict("c1" => COMP1), species = Dict("s1" => SPECIES1), reactions = Dict("r3" => REACTION2)) # Test ReactionSystem constructor rs = ReactionSystem(MODEL1) @test isequal(Catalyst.get_eqs(rs), Catalyst.Reaction[Catalyst.Reaction(k1, nothing, [s1], nothing, [1.0])]) @test isequal(Catalyst.get_iv(rs), IV) @test isequal(Catalyst.get_species(rs), [s1]) @test isequal(Catalyst.get_ps(rs), [k1, c1]) @named rs = ReactionSystem(MODEL1) isequal(nameof(rs), :rs) rs = ReactionSystem(readSBML(sbmlfile)) @test isequal(Catalyst.get_eqs(rs), Catalyst.Reaction[Catalyst.Reaction(k1 / c1, [s1, s2], [s1s2], [1.0, 1.0], [1.0])]) @test isequal(Catalyst.get_iv(rs), IV) @test isequal(Catalyst.get_species(rs), [s1, s1s2, s2]) @test isequal(Catalyst.get_ps(rs), [k1, c1]) @named rs = ReactionSystem(MODEL1) isequal(nameof(rs), :rs) rs = ReactionSystem(MODEL2) # Contains reversible reaction @test isequal(Catalyst.get_eqs(rs), Catalyst.Reaction[Catalyst.Reaction(k1, [s1], nothing, [1], nothing), Catalyst.Reaction(k1, nothing, [s1], nothing, [1])]) @test isequal(Catalyst.get_iv(rs), IV) @test isequal(Catalyst.get_species(rs), [s1]) @test isequal(Catalyst.get_ps(rs), [k1, c1]) @test convert(ModelingToolkit.ODESystem, rs) isa ODESystem @test structural_simplify(convert(ModelingToolkit.ODESystem, rs)) isa ODESystem # Test ODESystem constructor odesys = ODESystem(MODEL1) trueeqs = Equation[default_time_deriv()(s1) ~ k1] @test isequal(Catalyst.get_eqs(odesys), trueeqs) @test isequal(Catalyst.get_iv(odesys), IV) @test isequal(Catalyst.get_unknowns(odesys), [s1]) @test isequal(Catalyst.get_ps(odesys), [k1, c1]) u0 = [s1 => 1.0] par = [k1 => 1.0, c1 => 2.0] @test isequal(ModelingToolkit.defaults(odesys), ModelingToolkit._merge(u0, par)) # PL: @Anand: for some reason this does not work with `Catalyst.get_default()` @named odesys = ODESystem(MODEL1) isequal(nameof(odesys), :odesys) @test structural_simplify(odesys) isa ODESystem odesys = ODESystem(readSBML(sbmlfile)) m = readSBML(sbmlfile) trueeqs = Equation[default_time_deriv()(s1) ~ -((k1 * s1 * s2) / c1), default_time_deriv()(s1s2) ~ (k1 * s1 * s2) / c1, default_time_deriv()(s2) ~ -((k1 * s1 * s2) / c1)] @test isequal(Catalyst.get_eqs(odesys), trueeqs) @test isequal(Catalyst.get_iv(odesys), IV) @test isequal(Catalyst.get_unknowns(odesys), [s1, s1s2, s2]) @test isequal(Catalyst.get_ps(odesys), [k1, c1]) u0 = [s1 => 2 * 1.0, s2 => 2 * 1.0, s1s2 => 2 * 1.0] par = [k1 => 1.0, c1 => 2.0] @test isequal(ModelingToolkit.defaults(odesys), ModelingToolkit._merge(u0, par)) @named odesys = ODESystem(MODEL1) isequal(nameof(odesys), :odesys) @test ODEProblem(odesys, [], [0.0, 1.0], []) isa ODEProblem # # Test ODEProblem # oprob = ODEProblem(ODESystem(MODEL1), [], [0.0, 1.0], []) # sol = solve(oprob, Tsit5()) # @test isapprox(sol.u, [[1.0], [2.0]]) # @test_nowarn ODEProblem(ODESystem(readSBML(sbmlfile)), [], [0.0, 1.0], []) # Test get_mappings u0map, parammap, initial_assignment_map = SBMLToolkit.get_mappings(MODEL1) u0map_true = [s1 => 1.0] parammap_true = [k1 => 1.0, c1 => 2.0] initial_assignment_map_true = Pair[] @test isequal(u0map, u0map_true) @test isequal(parammap, parammap_true) @test isequal(initial_assignment_map, initial_assignment_map_true) p = SBML.Parameter(name = "k2", value = 1.0, constant = false) m = SBML.Model(parameters = Dict("k2" => p), rules = SBML.Rule[SBML.RateRule("k2", KINETICMATH2)]) u0map, parammap, initial_assignment_map = SBMLToolkit.get_mappings(m) u0map_true = [SBMLToolkit.create_var("k2", IV; isbcspecies = true) => 1.0] @test isequal(u0map, u0map_true) @test Catalyst.isbc(first(u0map[1])) p = SBML.Parameter(name = "k2", value = nothing, constant = true) ia = Dict("k2" => KINETICMATH1) m = SBML.Model(parameters = Dict("k1" => PARAM1, "k2" => p), initial_assignments = ia) u0map, parammap, initial_assignment_map = SBMLToolkit.get_mappings(m) parammap_true = [k1 => 1.0, SBMLToolkit.create_var("k2") => k1] initial_assignment_map_true = [SBMLToolkit.create_var("k2") => k1] @test isequal(parammap, parammap_true) @test isequal(initial_assignment_map, initial_assignment_map_true) m = SBML.Model(species = Dict("s2" => SPECIES2), rules = SBML.Rule[SBML.AlgebraicRule(KINETICMATH2)]) u0map, parammap, initial_assignment_map = SBMLToolkit.get_mappings(m) Catalyst.isbc(first(u0map[1])) # Test get_netstoich r = SBML.Reaction(reactants = [SBML.SpeciesReference(species = "s1", stoichiometry = 1.0)], kinetic_math = KINETICMATH1, reversible = false) ns = SBMLToolkit.netstoich("s1", r) @test isequal(ns, -1) r = SBML.Reaction(reactants = [SBML.SpeciesReference(species = "s1", stoichiometry = 1.0)], products = [SBML.SpeciesReference(species = "s1", stoichiometry = 2.0)], kinetic_math = KINETICMATH1, reversible = false) ns = SBMLToolkit.netstoich("s1", r) @test isequal(ns, 1) # Test checksupport_file @test_nowarn SBMLToolkit.checksupport_file(sbmlfile) @test_throws ErrorException SBMLToolkit.checksupport_file(joinpath("data", "unsupported.sbml")) # Test checksupport_string @test_nowarn SBMLToolkit.checksupport_string("all good <reaction") @test_throws ErrorException SBMLToolkit.checksupport_string("contains </delay>") # Test convenience functions @test readSBML(sbmlfile, DefaultImporter()) isa SBML.Model @test readSBML(sbmlfile, ReactionSystemImporter()) isa ReactionSystem @test readSBML(sbmlfile, ODESystemImporter()) isa ODESystem
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
4125
using SBMLToolkit using Catalyst, SBML, SBMLToolkitTestSuite using Test function readmodel(sbml) SBMLToolkit.readSBMLFromString(sbml, doc -> begin set_level_and_version(3, 2)(doc) convert_promotelocals_expandfuns(doc) end) end COMP1 = SBML.Compartment("C", true, 3, 2.0, "nl", nothing, nothing, nothing, nothing, SBML.CVTerm[]) SPECIES1 = SBML.Species(name = "B", compartment = "C", initial_amount = 1.0, substance_units = "substance", only_substance_units = true, boundary_condition = false, constant = false) # Todo: Maybe not support units in initial_concentration? PARAM1 = SBML.Parameter(name = "D", value = 1.0, constant = true) SPECIES2 = SBML.Species(name = "Bc", compartment = "C", initial_amount = 1.0, substance_units = "substance", only_substance_units = true, boundary_condition = false, constant = true) # Todo: Maybe not support units in initial_concentration? PARAM2 = SBML.Parameter(name = "Dv", value = 1.0, constant = false) RULE1 = SBML.AssignmentRule("Dv", SBML.MathIdent("B")) MODEL1 = SBML.Model(parameters = Dict("D" => PARAM1, "Dv" => PARAM2), compartments = Dict("C" => COMP1), species = Dict("B" => SPECIES1, "Bc" => SPECIES2), rules = [RULE1]) const IV = default_t() @species s1(IV) # Test symbolicsRateOf rate = SBMLToolkit.symbolicsRateOf(s1) rate_true = SBMLToolkit.D(s1) @test isequal(rate, rate_true) # Test map_symbolics_ident sym = SBMLToolkit.map_symbolics_ident(SBML.MathIdent("B"), MODEL1) @species B(IV) [irreducible = false; isbcspecies = false] @test isequal(sym, B) # Test interpret_as_num @species B(IV) Dv(IV) @parameters C D Bc test = SBML.MathApply("*", SBML.Math[ SBML.MathApply("+", SBML.Math[ SBML.MathApply("*", SBML.Math[SBML.MathAvogadro("A"), SBML.MathIdent("B")]), SBML.MathApply("-", SBML.Math[SBML.MathApply("*", SBML.Math[SBML.MathIdent("C"), SBML.MathIdent("D"), SBML.MathIdent("Dv"), SBML.MathIdent("Bc")])])]), SBML.MathTime("Time")]) @test isequal(SBMLToolkit.interpret_as_num(test, MODEL1), IV * (6.02214076e23 * B - C * D * Dv * Bc)) # Test get_substitutions sbml, _, _ = SBMLToolkitTestSuite.read_case("00001") m = readmodel(sbml) subsdict = SBMLToolkit.get_substitutions(m) @parameters k1, compartment @species S1(IV), S2(IV) subsdict_true = Dict(Num(Symbolics.variable(:S1; T = Real)) => S1, Num(Symbolics.variable(:S2; T = Real)) => S2, Num(Symbolics.variable(:k1; T = Real)) => k1, Num(Symbolics.variable(:compartment; T = Real)) => compartment) @test isequal(subsdict, subsdict_true) # Test create_var var = SBMLToolkit.create_var("s2") @species s2 @test isequal(var, s2) var = SBMLToolkit.create_var("s2", IV, isbcspecies = true) Catalyst.isbc(var) # Test create_param par = SBMLToolkit.create_param("k") @parameters k @test isequal(par, k) par = SBMLToolkit.create_param("k", isconstantspecies = true) @test Catalyst.isconstant(par) # Test create_symbol # Comment in when https://github.com/SciML/ModelingToolkit.jl/issues/2228 is fixed # @species B(IV) Dv(IV) # @parameters C D Bc # sym = SBMLToolkit.create_symbol("B", MODEL1) # @test isequal(B, sym) # sym = SBMLToolkit.create_symbol("Dv", MODEL1) # @test isequal(Dv, sym) # sym = SBMLToolkit.create_symbol("C", MODEL1) # @test isequal(C, sym) # sym = SBMLToolkit.create_symbol("D", MODEL1) # @test isequal(D, sym) # sym = SBMLToolkit.create_symbol("Bc", MODEL1) # @test isequal(Bc, sym) # Test has_rule_type sbml, _, _ = SBMLToolkitTestSuite.read_case("00031") # rateRule m = readmodel(sbml) res = SBMLToolkit.has_rule_type("S1", m, SBML.RateRule) @test res res = SBMLToolkit.has_rule_type("nospecies", m, SBML.RateRule) @test !res sbml, _, _ = SBMLToolkitTestSuite.read_case("00039") # algebraicRule m = readmodel(sbml) res = SBMLToolkit.has_rule_type("S1", m, SBML.AlgebraicRule) @test res res = SBMLToolkit.has_rule_type("nospecies", m, SBML.AlgebraicRule) @test !res
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
code
814
# this file tests that a big model simulates # SBML.Model with 4139 reactions, 1265 species, and 522 parameters. (1012 equations) using SBMLToolkit using Downloads, ModelingToolkit, OrdinaryDiffEq using Test sbml_url = "https://www.ebi.ac.uk/biomodels/model/download/MODEL1112100000.2?filename=MODEL1112100000_url.xml" sbml = String(take!(Downloads.download(sbml_url, IOBuffer()))) m = readSBMLFromString(sbml, doc -> begin # set_level_and_version(3, 2)(doc) # fails on wuschel convert_promotelocals_expandfuns(doc) end) sys = ODESystem(m) @test length(equations(sys)) == 1012 @test length(unknowns(sys)) == 1012 #ssys = structural_simplify(sys) # Todo: Figure out why this complains about ExtraVariablesSystemException prob = ODEProblem(sys, [], (0, 10.0)) solve(prob, Tsit5(), save_everystep = false)
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
docs
4427
# SBMLToolkit [![Join the chat at https://julialang.zulipchat.com #sciml-bridged](https://img.shields.io/static/v1?label=Zulip&message=chat&color=9558b2&labelColor=389826)](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged) [![Global Docs](https://img.shields.io/badge/docs-SciML-blue.svg)](https://docs.sciml.ai/SBMLToolkit/stable/) [![codecov](https://codecov.io/gh/SciML/SBMLToolkit.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/SciML/SBMLToolkit.jl) [![Build Status](https://github.com/SciML/SBMLToolkit.jl/workflows/CI/badge.svg)](https://github.com/SciML/SBMLToolkit.jl/actions?query=workflow%3ACI) [![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor%27s%20Guide-blueviolet)](https://github.com/SciML/ColPrac) [![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) SBMLToolkit.jl is a lightweight tool to import models specified in the Systems Biology Markup Language (SBML) into the Julia SciML ecosystem. There are multiple ways to specify the same model in SBML. Please help us improving SBMLToolkit.jl by creating a GitHub issue if you experience errors when converting your SBML model. SBMLToolkit uses the [SBML.jl](https://github.com/LCSB-BioCore/SBML.jl) wrapper of the [libSBML](https://model.caltech.edu/software/libsbml/) library to lower dynamical SBML models into completed dynamical systems. If you would like to import BioNetGen models use the `writeSBML()` export function or import the `.net` file with [ReactionNetworkImporters.jl](https://github.com/SciML/ReactionNetworkImporters.jl). For constrained-based modeling, please have a look at [COBREXA.jl](https://github.com/COBREXA/COBREXA.jl). We also recommend trying [SBMLImporter.jl](https://github.com/sebapersson/SBMLImporter.jl). While SBMLToolkit.jl has a slightly cleaner interface, SBMLImporter.jl respects directionality of events, can output concentrations or amounts, and provides better simulation performance for models including time-triggered events and SBML `piecewise` expressions. ## Installation To install SBMLToolkit.jl, use the Julia package manager: ```julia using Pkg Pkg.add("SBMLToolkit") ``` ## Tutorial SBML models can be simulated with the following steps (note that `sol` is in absolute quantities rather than concentrations): ```julia using SBMLToolkit, OrdinaryDiffEq odesys = readSBML("my_model.xml", ODESystemImporter()) tspan = (0.0, 1.0) prob = ODEProblem(odesys, [], tspan, []) sol = solve(prob, Tsit5()) ``` While this imports an `ODESystem` directly, you can also import a Catalyst.jl `ReactionSystem`: ```julia using SBMLToolkit rs = readSBML("my_model.xml", ReactionSystemImporter()) ``` One common case where this is useful is if you want to run stochastic instead of ODE simulations. In the very unlikely case that you need fine-grained control over the SBML import, you can create an SBML.jl `Model` (we strongly recommend manually running `checksupport_file("my_model.xml")` before) ```julia using SBML mdl = readSBML("my_model.xml", doc -> begin set_level_and_version(3, 2)(doc) convert_promotelocals_expandfuns(doc) end) ``` The conversion to SBML level 3 version 2 is necessary, because older versions are not well supported in SBMLToolkit. `convert_promotelocals_expandfuns` basically flattens the SBML before the import. Once you have obtained the `Model`, you can convert it to a `ReactionSystem` and `ODESystem`. ```julia using SBMLToolkit rs = ReactionSystem(mdl) odesys = convert(ODESystem, rs) ``` ## License The package is released under the [MIT license](https://github.com/SciML/SBMLToolkit.jl/blob/main/LICENSE). ## Questions and comments Please use GitHub issues and the #sciml-sysbio channel in the [Julia Slack workspace](https://julialang.org/slack/) with any questions or comments. # Citation If you use SBMLToolkit.jl in your research, please cite [this paper](https://www.degruyter.com/document/doi/10.1515/jib-2024-0003/html): ``` @article{lang_sbmltoolkitjl_2024, title = {{SBMLToolkit}.jl: a {Julia} package for importing {SBML} into the {SciML} ecosystem}, doi = {10.1515/jib-2024-0003}, journal = {Journal of Integrative Bioinformatics}, author = {Lang, Paul F. and Jain, Anand and Rackauckas, Christopher}, year = {2024}, } ```
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
docs
103
# API documentation ```@autodocs Modules = [SBMLToolkit, SBMLToolkit.SBML] Pages = ["systems.jl"] ```
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
0.1.28
68f50c5b34d17d625710062de62605e3bf6a77d3
docs
4599
```@meta CurrentModule = SBMLToolkit ``` # SBMLToolkit SBMLToolkit.jl is a lightweight tool to import models specified in the Systems Biology Markup Language (SBML) into the Julia SciML ecosystem. There are multiple ways to specify the same model in SBML. Please help us improve SBMLToolkit.jl by creating a GitHub issue if you experience errors when converting your SBML model. SBMLToolkit uses the [SBML.jl](https://github.com/LCSB-BioCore/SBML.jl) wrapper of the [libSBML](https://sbml.org/software/libsbml/) library to lower dynamical SBML models into dynamical systems. If you would like to import BioNetGen models, use the `writeSBML()` export function or import the `.net` file with [ReactionNetworkImporters.jl](https://github.com/SciML/ReactionNetworkImporters.jl). For constrained-based modeling, please have a look at [COBREXA.jl](https://github.com/LCSB-BioCore/COBREXA.jl). ## Installation To install SBMLToolkit.jl, use the Julia package manager: ```julia using Pkg Pkg.add("SBMLToolkit") ``` ## Contributing - Please refer to the [SciML ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://github.com/SciML/ColPrac/blob/master/README.md) for guidance on PRs, issues, and other matters relating to contributing to SciML. - See the [SciML Style Guide](https://github.com/SciML/SciMLStyle) for common coding practices and other style decisions. - There are a few community forums: + The #diffeq-bridged and #sciml-bridged channels in the [Julia Slack](https://julialang.org/slack/) + The #diffeq-bridged and #sciml-bridged channels in the [Julia Zulip](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged) + On the [Julia Discourse forums](https://discourse.julialang.org) + See also [SciML Community page](https://sciml.ai/community/) ## Tutorial SBML models can be simulated with the following steps (note that `sol` is in absolute quantities rather than concentrations): ```julia using SBMLToolkit, ModelingToolkit, OrdinaryDiffEq SBMLToolkit.checksupport_file("my_model.xml") mdl = readSBML("my_model.xml", doc -> begin set_level_and_version(3, 2)(doc) convert_promotelocals_expandfuns(doc) end) rs = ReactionSystem(mdl) # If you want to create a reaction system odesys = convert(ODESystem, rs) # Alternatively: ODESystem(mdl) tspan = (0.0, 1.0) prob = ODEProblem(odesys, [], tspan, []) sol = solve(prob, Tsit5()) ``` SBMLToolkit also provides the following convenience functions to import `SBML.Models`, `Catalyst.ReactionSystems` and `ModelingToolkit.ODESystems`: ```julia mdl = readSBML(sbmlfile, DefaultImporter()) rs = readSBML(sbmlfile, ReactionSystemImporter()) odesys = readSBML(sbmlfile, ODESystemImporter()) ``` ## License The package is released under the [MIT license](https://github.com/SciML/SBMLToolkit.jl/blob/main/LICENSE). ## Development team This package was developed by [Paul F. Lang](https://www.linkedin.com/in/paul-lang-7b54a81a3/) at the University of Oxford, UK and [Anand Jain](https://github.com/anandijain) at the University of Chicago, USA. ## Questions and comments Please use GitHub issues, the #sciml-sysbio channel in the [Julia Slack workspace](https://julialang.org/slack/) or email [Paul F. Lang](mailto:[email protected]) or [Anand Jain](mailto:[email protected]) with any questions or comments. ## Reproducibility ```@raw html <details><summary>The documentation of this SciML package was built using these direct dependencies,</summary> ``` ```@example using Pkg # hide Pkg.status() # hide ``` ```@raw html </details> ``` ```@raw html <details><summary>and using this machine and Julia version.</summary> ``` ```@example using InteractiveUtils # hide versioninfo() # hide ``` ```@raw html </details> ``` ```@raw html <details><summary>A more complete overview of all dependencies and their versions is also provided.</summary> ``` ```@example using Pkg # hide Pkg.status(; mode = PKGMODE_MANIFEST) # hide ``` ```@raw html </details> ``` ```@eval using TOML using Markdown version = TOML.parse(read("../../Project.toml", String))["version"] name = TOML.parse(read("../../Project.toml", String))["name"] link_manifest = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version * "/assets/Manifest.toml" link_project = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version * "/assets/Project.toml" Markdown.parse("""You can also download the [manifest]($link_manifest) file and the [project]($link_project) file. """) ```
SBMLToolkit
https://github.com/SciML/SBMLToolkit.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
3338
using LowRankModels, DataFrames, Random, StatsBase Random.seed!(0) println("censored data example") # boolean example with only entries greater than threshold t observed # ie, censored data # example with only entries greater than threshold t observed m,n,k,ktrue = 100,100,1,1 A = rand(m,ktrue)*rand(ktrue,n) B = round.(Int, ktrue*rand(m,n) .>= A) # Bernoulli samples with probability proportional to A losses = fill(QuadLoss(),n) r = QuadReg(.1) obs = Array{Tuple{Int,Int}}(undef,0) for i=1:m for j=1:n if B[i,j] == 1 push!(obs, (i,j)) end end end (train_observed_features, train_observed_examples, test_observed_features, test_observed_examples) = get_train_and_test(obs, m, n, .2) train_glrm = GLRM(B,losses,r,r,k, observed_features=train_observed_features, observed_examples=train_observed_examples,) test_glrm = GLRM(B,losses,r,r,k, observed_features=test_observed_features, observed_examples=test_observed_examples) function censored_regularization_path(train_glrm::GLRM, test_glrm::GLRM; params=Params(), reg_params=exp10.(range(2,stop=-2,length=5)), holdout_proportion=.1, verbose=true, ch::ConvergenceHistory=ConvergenceHistory("reg_path")) m,n = size(train_glrm.A) ntrain = sum(map(length, train_glrm.observed_features)) ntest = sum(map(length, test_glrm.observed_features)) train_error = Array{Float64}(undef,length(reg_params)) test_error = Array{Float64}(undef,length(reg_params)) solution = Array{Tuple{Float64,Float64}}(undef, length(reg_params)) train_time = Array{Float64}(undef, length(reg_params)) for iparam=1:length(reg_params) reg_param = reg_params[iparam] # evaluate train and test error if verbose println("fitting train GLRM for reg_param $reg_param") end mul!(train_glrm.rx, reg_param) mul!(train_glrm.ry, reg_param) train_glrm.X, train_glrm.Y = randn(train_glrm.k,m), randn(train_glrm.k,n) X, Y, ch = fit!(train_glrm; params=params, ch=ch, verbose=verbose) train_time[iparam] = ch.times[end] if verbose println("computing train and test error for reg_param $reg_param:") end train_error[iparam] = objective(train_glrm, X, Y, include_regularization=false) / ntrain if verbose println("\ttrain error: $(train_error[iparam])") end test_error[iparam] = objective(test_glrm, X, Y, include_regularization=false) / ntest if verbose println("\ttest error: $(test_error[iparam])") end solution[iparam] = (sum(X)+sum(Y), sum(abs.(X))+sum(abs.(Y))) if verbose println("\tsum of solution, one norm of solution: $(solution[iparam])") end end return train_error, test_error, train_time, reg_params, solution end (train_error, test_error, train_time, reg_params, solution) = censored_regularization_path(train_glrm, test_glrm, params=ProxGradParams(1, max_iter=50, abs_tol=.001, min_stepsize=.1), reg_params=exp10.(range(2, stop=-2, length=3)) ) df = DataFrame(train_error = train_error, test_error = test_error, train_time = train_time, reg_param = reg_params, solution_1norm = [s[2] for s in solution]) println(df) println(solution)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1400
using DataFrames, LowRankModels, Random, SparseArrays println("cross validation example") Random.seed!(5) do_cv = true do_cv_by_iter = true do_reg_path = true do_plot = false m,n,k = 50,50,3 A = randn(m,k)*randn(k,n) + k*sprandn(m,n,.05) losses = fill(HuberLoss(),n) r = QuadReg(.1) glrm = GLRM(A,losses,r,r,k+2) if do_cv println("Computing cross validation error for each fold") params = Params(1.0, max_iter=100, abs_tol=0.0, min_stepsize=.001) train_error, test_error, train_glrms, test_glrms = cross_validate(glrm, nfolds=5, params=params) df = DataFrame(train_error = train_error, test_error = test_error) end if do_cv_by_iter println("Computing training and testing error as a function of iteration number") train_error, test_error = cv_by_iter(glrm) df = DataFrame(train_error = train_error, test_error = test_error) end if do_reg_path println("Computing regularization path") params = Params(1.0, max_iter=50, abs_tol=.00001, min_stepsize=.01) train_error, test_error, train_time, reg_params = regularization_path(glrm, params=params, reg_params=exp10.(range(2,stop=-2,length=15))) df = DataFrame(train_error = train_error, test_error = test_error, train_time = train_time, reg_param = reg_params) if do_plot p = plot(df, :reg_param, [:train_error, :test_error]; scale = :log, filename = None) end end println(df)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
864
import RDatasets import DataFrames: DataFrame using LowRankModels # pick a data set df = RDatasets.dataset("psych", "msq") # make a GLRM on the whole dataframe using type imputation auto_glrm, labels = GLRM(df, 3) # fit!(auto_glrm) - this doesn't work yet, because the data contains some trivial columns (ordinals with <2 levels) # now we'll try it without type imputation # we'll just fit four of the columns, to try out all four data types dd = DataFrame([df[s] for s in [:TOD, :Scale, :Vigorous, :Wakeful]]) dd[!,end] = (dd[:,end].==1) datatypes = [:real, :cat, :ord, :bool] # fit it! glrm = GLRM(dd, 2, datatypes) println("initializing") init_svd!(glrm) println("fitting") X, Y, ch = fit!(glrm) # print results println(ch.objective) println("imputing") impute(glrm) println("crossvalidating") cross_validate(glrm, do_obs_check=false, init=init_svd!)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
451
using RDatasets using LowRankModels # pick a data set df = RDatasets.dataset("psych", "msq") # initialize glrm, labels = GLRM(df,2) println("Fitting model with random initialization") X, Y, ch = fit!(glrm) println("final objective is ", objective(glrm)) println("Fitting model with svd initialization; fit should be faster and final solution (slightly) better") init_svd!(glrm) X, Y, ch = fit!(glrm) println("final objective is ", objective(glrm))
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1333
using DataFrames, LowRankModels # boolean example with only entries greater than threshold t observed # ie, censored data # example with only entries greater than threshold t observed m,n,k,ktrue = 100,100,1,1 A = rand(m,ktrue)*rand(ktrue,n) println("max value of A is ",maximum(maximum(A))," which is less than $ktrue") B = round.(Int, ktrue*rand(m,n) .>= A) # Bernoulli samples with probability proportional to A losses = fill(QuadLoss(),n) r = QuadReg(.1) obs = Array{Tuple{Int,Int}}(undef, 0) for i=1:m for j=1:n if B[i,j] == 1 push!(obs, (i,j)) end end end (train_observed_features, train_observed_examples, test_observed_features, test_observed_examples) = get_train_and_test(obs, m, n, .2) train_glrm = GLRM(B,losses,r,r,k, observed_features=train_observed_features, observed_examples=train_observed_examples) train_error, test_error, prec_at_k, train_time, reg_params, solution = precision_at_k(train_glrm, test_observed_features, params=Params(1, max_iter=200, abs_tol=0.00001, min_stepsize=0.01), reg_params=exp10.(range(2,step=-2,length=9))) df = DataFrame(train_error = train_error, prec_at_k = prec_at_k, train_time = train_time, reg_param = reg_params, solution_1norm = [s[2] for s in solution]); println(df)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
376
using RDatasets include("censored.jl") include("cross_validate.jl") #= Initialize example fails because we're passing -1 as the third argument to ``` evaluate(::MultinomialOrdinalLoss, ::Array{Float64,1}, ::Int32) ``` src/evaluate_fit.jl:15 ... initialize.jl:11 =# # include("initialize.jl") include("precision_at_k.jl") include("simple_glrms.jl") include("fit_rdataset.jl")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
499
println("loading LowRankModels") @time @everywhere using LowRankModels function fit_pca(m,n,k) # matrix to encode Random.seed!(1) A = randn(m,k)*randn(k,n) X=randn(k,m) Y=randn(k,n) losses = fill(QuadLoss(),n) r = QuadReg() glrm = GLRM(A,losses,r,r,k, X=X, Y=Y) glrm = share(glrm) p = Params() p.max_iter = 10 X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) println("Time/iter:",ch.times[end]/10) return A,X,Y,ch end @everywhere Random.seed!(1) fit_pca(100,100,50)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2552
using LowRankModels import StatsBase: sample println("simple glrm examples") # minimize ||A - XY||^2 function fit_pca(m,n,k) # matrix to encode A = randn(m,k)*randn(k,n) loss = QuadLoss() r = ZeroReg() glrm = GLRM(A,loss,r,r,k) X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) return A,X,Y,ch end # minimize_{X>=0, Y>=0} ||A - XY||^2 function fit_nnmf(m,n,k) # matrix to encode A = rand(m,k)*rand(k,n) loss = QuadLoss() r = NonNegConstraint() glrm = GLRM(A,loss,r,r,k) X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) return A,X,Y,ch end # minimize ||A - XY||^2 + .1||X||^2 + .1||Y||^2 function fit_pca_nucnorm(m,n,k) # matrix to encode A = randn(m,k)*randn(k,n) loss = QuadLoss() r = QuadReg(.1) glrm = GLRM(A,loss,r,r,k) X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) return A,X,Y,ch end # minimize_{X<=0} ||A - XY||^2 function fit_kmeans(m,n,k) # matrix to encode Y = randn(k,n) A = zeros(m,n) for i=1:m A[i,:] = Y[mod(i,k)+1,:] end loss = QuadLoss() ry = ZeroReg() rx = UnitOneSparseConstraint() glrm = GLRM(A,loss,rx,ry,k+4) X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) return A,X,Y,ch end function fit_pca_nucnorm_sparse(m,n,k,s) # matrix to encode A = randn(m,k)*randn(k,n) loss = QuadLoss() r = QuadReg(.1) obsx = sample(1:m,s); obsy = sample(1:n,s) obs = [(obsx[i],obsy[i]) for i=1:s] glrm = GLRM(A,loss,r,r,k, obs=obs) X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) return A,X,Y,ch end function fit_pca_nucnorm_sparse_nonuniform(m,n,k,s) # matrix to encode A = randn(m,k)*randn(k,n) loss = QuadLoss() r = QuadReg(.1) obsx = [sample(1:round(Int,m/4),round(Int,s/2)); sample(round(Int,m/4)+1:m,s-round(Int,s/2))] obsy = sample(1:n,s) obs = [(obsx[i],obsy[i]) for i=1:s] glrm = GLRM(A,loss,r,r,k, obs=obs) X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) return A,X,Y,ch end function fit_soft_kmeans(m,n,k) # PCA with loadings constrained to lie on unit simplex # constrain columns of X to lie on unit simplex Xreal = rand(k,m) Xreal ./= sum(Xreal,1) A = Xreal' * randn(k,n) loss = QuadLoss() rx = SimplexConstraint() ry = ZeroReg() glrm = GLRM(A,loss,rx,ry,k) X,Y,ch = fit!(glrm) println("Convergence history:",ch.objective) return A,X,Y,ch end if true Random.seed!(10) fit_pca(100,100,2) fit_pca_nucnorm(100,100,2) fit_pca_nucnorm_sparse(500,500,2,10000) fit_pca_nucnorm_sparse_nonuniform(1000,1000,2,20000) fit_kmeans(50,50,10) fit_nnmf(50,50,2) end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1320
__precompile__() module LowRankModels using LinearAlgebra using Printf using SharedArrays using SparseArrays using Random using Statistics using DataFrames import LinearAlgebra: dot, norm, Diagonal, rmul!, mul! import Base: show import StatsBase: fit!, mode, mean, var, std # define losses, regularizers, convergence history include("domains.jl") include("losses.jl") include("impute_and_err.jl") include("regularizers.jl") include("convergence.jl") # define basic data type(s) include("glrm.jl") include("shareglrm.jl") # modify models (eg scaling and offsets) and evaluate fit include("modify_glrm.jl") include("evaluate_fit.jl") # fitting algorithms include("fit.jl") if Threads.nthreads() > 1 include("algorithms/proxgrad_multithread.jl") else include("algorithms/proxgrad.jl") end include("algorithms/sparse_proxgrad.jl") include("algorithms/quad_streaming.jl") # initialization methods include("rsvd.jl") include("initialize.jl") # fancy fun on top of low rank models include("simple_glrms.jl") include("cross_validate.jl") include("fit_dataframe.jl") include("sample.jl") # this takes to long to load for normal use # include("plot.jl") # utilities include("utilities/conveniencemethods.jl") include("utilities/deprecated.jl") # ScikitLearn.jl compatibility include("scikitlearn.jl") end # module
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1458
export ConvergenceHistory, update_ch! mutable struct ConvergenceHistory name::AbstractString objective::Array dual_objective::Array primal_residual::Array dual_residual::Array times::Array stepsizes::Array optval end ConvergenceHistory(name::AbstractString,optval=0) = ConvergenceHistory(name,Float64[],Float64[],Float64[],Float64[],Float64[],Float64[],optval) ConvergenceHistory() = ConvergenceHistory("unnamed_convergence_history") function update_ch!(ch::ConvergenceHistory, dt::Number, obj::Number, stepsize::Number=0, pr::Number=0, dr::Number=0) push!(ch.objective,obj) push!(ch.primal_residual,pr) push!(ch.dual_residual,dr) push!(ch.stepsizes,stepsize) if isempty(ch.times) push!(ch.times,dt) else push!(ch.times,ch.times[end]+dt) end end function update_ch!(ch::ConvergenceHistory, dt; obj=0, stepsize=0, pr=0, dr=0, dual_obj=0) push!(ch.objective,obj) push!(ch.dual_objective,dual_obj) push!(ch.primal_residual,pr) push!(ch.dual_residual,dr) push!(ch.stepsizes,stepsize) if isempty(ch.times) push!(ch.times,dt) else push!(ch.times,ch.times[end]+dt) end end function show(ch::ConvergenceHistory) print("Convergence History for $(ch.name)\n\n") @printf "%16s%16s\n" "time (s)" "objective" for i=1:length(ch.objective) @printf "%16.2e%16.4e\n" ch.times[i] ch.objective[i] end end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
14717
export cross_validate, cv_by_iter, regularization_path, get_train_and_test, precision_at_k # the loss function evaluates the objective minus the regularization # it is the default error metric loss_fn(args...; kwargs...) = objective(args...; include_regularization=false, kwargs...) # to use with error_metric when we have domains in the namespace, call as: # cross_validate(glrm, error_fn = error_metric(glrm,domains,glrm.X,glrm.Y)) function cross_validate(glrm::AbstractGLRM; nfolds=5, params=Params(), verbose=true, use_folds=nfolds, error_fn=loss_fn, init=nothing, do_obs_check = false) if verbose println("flattening observations") end # obs = flattenarray(map(ijs->map(j->(ijs[1],j),ijs[2]),zip(1:length(glrm.observed_features),glrm.observed_features))) obs = flatten_observations(glrm.observed_features) if verbose println("computing CV folds") end folds = getfolds(obs, nfolds, size(glrm.A)..., do_check = do_obs_check) train_glrms = Array{typeof(glrm)}(undef, nfolds) test_glrms = Array{typeof(glrm)}(undef, nfolds) train_error = Array{Float64}(undef, nfolds) test_error = Array{Float64}(undef, nfolds) for ifold=1:use_folds if verbose println("\nforming train and test GLRM for fold $ifold") end train_observed_features, train_observed_examples, test_observed_features, test_observed_examples = folds[ifold] ntrain = sum(map(length, train_observed_features)) ntest = sum(map(length, test_observed_features)) if verbose println("training model on $ntrain samples and testing on $ntest") end # form glrm on training dataset train_glrms[ifold] = copy_estimate(glrm) train_glrms[ifold].observed_examples = train_observed_examples train_glrms[ifold].observed_features = train_observed_features # form glrm on testing dataset test_glrms[ifold] = copy_estimate(glrm) test_glrms[ifold].observed_examples = test_observed_examples test_glrms[ifold].observed_features = test_observed_features # evaluate train and test error if verbose println("fitting train GLRM for fold $ifold") end if init != nothing init(train_glrms[ifold]) end fit!(train_glrms[ifold], params, verbose=verbose) if verbose println("computing train and test error for fold $ifold:") end train_error[ifold] = error_fn(train_glrms[ifold], parameter_estimate(train_glrms[ifold])...) / ntrain if verbose println("\ttrain error: $(train_error[ifold])") end test_error[ifold] = error_fn(test_glrms[ifold], parameter_estimate(train_glrms[ifold])...) / ntest if verbose println("\ttest error: $(test_error[ifold])") end end return train_error, test_error, train_glrms, test_glrms end function getfolds(obs::Array{Tuple{Int,Int},1}, nfolds, m, n; ntrials = 5, do_check = true) # partition elements of obs into nfolds groups groups = Array{Int}(undef, size(obs)) rand!(groups, 1:nfolds) # fill an array with random 1 through N # create the training and testing observations for each fold folds = Array{Tuple}(undef, nfolds) for itrial = 1:ntrials enough_observations = 0 for ifold=1:nfolds train = obs[filter(i->groups[i]!=ifold, 1:length(obs))] # all the obs that didn't get the ifold label train_observed_features, train_observed_examples = sort_observations(train,m,n) if !do_check || (check_enough_observations(train_observed_features) && check_enough_observations(train_observed_examples)) enough_observations += 1 else @warn("Not enough data to cross validate; one of the cross validation folds has no observations in one row or column. Trying again...") break end test = obs[filter(i->groups[i]==ifold, 1:length(obs))] # all the obs that did test_observed_features, test_observed_examples = sort_observations(test,m,n,check_empty=false) folds[ifold] = (train_observed_features, train_observed_examples, test_observed_features, test_observed_examples) end if enough_observations == nfolds return folds end end error("Not enough data to cross validate automatically.") end function check_enough_observations(observed_examples_or_features) all(map(length, observed_examples_or_features) .> 0) end function get_train_and_test(obs, m, n, holdout_proportion=.1) # generate random uniform number for each observation groups = Array{Float64}(undef, size(obs)) rand!(groups) # create the training and testing observations # observation is in test set if random number < holdout_proportion train = obs[filter(i->(groups[i]>=holdout_proportion), 1:length(obs))] train_observed_features, train_observed_examples = sort_observations(train,m,n) test = obs[filter(i->(groups[i]<holdout_proportion), 1:length(obs))] test_observed_features, test_observed_examples = sort_observations(test,m,n,check_empty=false) return (train_observed_features, train_observed_examples, test_observed_features, test_observed_examples) end function flatten_observations(observed_features::ObsArray) obs = Array{Tuple{Int,Int}}(undef, 0) for (i, features_in_example_i) in enumerate(observed_features) for j in features_in_example_i push!(obs, (i,j)) end end return obs end function flatten(x, y) state = start(x) if state==false push!(y, x) else while !done(x, state) (item, state) = next(x, state) flatten(item, y) end end y end flatten(x::Array{T}) where T=flatten(x,Array(T, 0)) function flattenarray(x, y) if typeof(x)<:Array for xi in x flattenarray(xi, y) end else push!(y, x) end y end flattenarray(x::Array{T}) where T=flattenarray(x,Array(T, 0)) function cv_by_iter(glrm::AbstractGLRM, holdout_proportion=.1, params=Params(100,max_iter=1,abs_tol=.01,min_stepsize=.01), ch = ConvergenceHistory("cv_by_iter"); verbose=true) # obs = flattenarray(map(ijs->map(j->(ijs[1],j),ijs[2]),zip(1:length(glrm.observed_features),glrm.observed_features))) obs = flatten_observations(glrm.observed_features) train_observed_features, train_observed_examples, test_observed_features, test_observed_examples = get_train_and_test(obs, size(glrm.A)..., holdout_proportion) # form glrm on training dataset train_glrm = copy_estimate(glrm) train_glrm.observed_examples = train_observed_examples train_glrm.observed_features = train_observed_features # form glrm on testing dataset test_glrm = copy_estimate(glrm) test_glrm.observed_examples = test_observed_examples test_glrm.observed_features = test_observed_features ntrain = sum(map(length, train_glrm.observed_features)) ntest = sum(map(length, test_glrm.observed_features)) niters = params.max_iter params.max_iter = 1 train_error = Array{Float64}(undef, niters) test_error = Array{Float64}(undef, niters) if verbose @printf("%12s%12s%12s\n", "train error", "test error", "time") t0 = time() end for iter=1:niters # evaluate train and test error fit!(train_glrm, params, ch=ch, verbose=false) train_error[iter] = ch.objective[end] # objective(train_glrm, parameter_estimate(train_glrm)..., include_regularization=false)/ntrain test_error[iter] = objective(test_glrm, parameter_estimate(train_glrm)..., include_regularization=false)/ntest if verbose @printf("%12.4e%12.4e%12.4e\n", train_error[iter], test_error[iter], time() - t0) end end return train_error, test_error end function regularization_path(glrm::AbstractGLRM; params=Params(), reg_params=exp10.(range(2,stop=-2,length=5)), holdout_proportion=.1, verbose=true, ch::ConvergenceHistory=ConvergenceHistory("reg_path")) if verbose println("flattening observations") end # obs = flattenarray(map(ijs->map(j->(ijs[1],j),ijs[2]),zip(1:length(glrm.observed_features),glrm.observed_features))) obs = flatten_observations(glrm.observed_features) if verbose println("splitting train and test sets") end train_observed_features, train_observed_examples, test_observed_features, test_observed_examples = get_train_and_test(obs, size(glrm.A)..., holdout_proportion) if verbose println("forming train and test GLRMs") end # form glrm on training dataset train_glrm = copy_estimate(glrm) train_glrm.observed_examples = train_observed_examples train_glrm.observed_features = train_observed_features # form glrm on testing dataset test_glrm = copy_estimate(glrm) test_glrm.observed_examples = test_observed_examples test_glrm.observed_features = test_observed_features return regularization_path(train_glrm, test_glrm; params=params, reg_params=reg_params, verbose=verbose, ch=ch) end # For each value of the regularization parameter, # compute the training error, ie, average error (sum over (i,j) in train_glrm.obs of L_j(A_ij, x_i y_j)) # and the test error, ie, average error (sum over (i,j) in test_glrm.obs of L_j(A_ij, x_i y_j)) function regularization_path(train_glrm::AbstractGLRM, test_glrm::AbstractGLRM; params=Params(), reg_params=exp10.(range(2,stop=-2,length=5)), verbose=true, ch::ConvergenceHistory=ConvergenceHistory("reg_path")) train_error = Array{Float64}(undef, length(reg_params)) test_error = Array{Float64}(undef, length(reg_params)) ntrain = sum(map(length, train_glrm.observed_features)) ntest = sum(map(length, test_glrm.observed_features)) if verbose println("training model on $ntrain samples and testing on $ntest") end @show params train_time = Array{Float64}(undef, length(reg_params)) for iparam=1:length(reg_params) reg_param = reg_params[iparam] # evaluate train and test error if verbose println("fitting train GLRM for reg_param $reg_param") end scale_regularizer!(train_glrm, reg_param) # no need to restart glrm X and Y even if they went to zero at the higher regularization # b/c fit! does that automatically fit!(train_glrm, params, ch=ch, verbose=verbose) train_time[iparam] = ch.times[end] if verbose println("computing mean train and test error for reg_param $reg_param:") end train_error[iparam] = objective(train_glrm, parameter_estimate(train_glrm)..., include_regularization=false) / ntrain if verbose println("\ttrain error: $(train_error[iparam])") end test_error[iparam] = objective(test_glrm, parameter_estimate(train_glrm)..., include_regularization=false) / ntest if verbose println("\ttest error: $(test_error[iparam])") end end return train_error, test_error, train_time, reg_params end function precision_at_k(train_glrm::GLRM, test_observed_features; params=Params(), reg_params=exp10.(range(2,stop=-2,length=5)), holdout_proportion=.1, verbose=true, ch::ConvergenceHistory=ConvergenceHistory("reg_path"), kprec=10) m,n = size(train_glrm.A) ntrain = sum(map(length, train_glrm.observed_features)) ntest = sum(map(length, test_observed_features)) train_observed_features = train_glrm.observed_features train_error = Array{Float64}(undef, length(reg_params)) test_error = Array{Float64}(undef, length(reg_params)) prec_at_k = Array{Float64}(undef, length(reg_params)) solution = Array{Tuple{Float64,Float64}}(undef, length(reg_params)) train_time = Array{Float64}(undef, length(reg_params)) test_glrm = GLRM(train_glrm.A, train_glrm.losses, train_glrm.rx, train_glrm.ry, train_glrm.k, X=copy(train_glrm.X), Y=copy(train_glrm.Y), observed_features = test_observed_features) for iparam=1:length(reg_params) reg_param = reg_params[iparam] # evaluate train error if verbose println("fitting train GLRM for reg_param $reg_param") end mul!(train_glrm.rx, reg_param) mul!(train_glrm.ry, reg_param) train_glrm.X, train_glrm.Y = randn(train_glrm.k,m), randn(train_glrm.k,n) # this bypasses the error checking in GLRM(). Risky. X, Y, ch = fit!(train_glrm, params, ch=ch, verbose=verbose) train_time[iparam] = ch.times[end] if verbose println("computing train error and precision at k for reg_param $reg_param:") end train_error[iparam] = objective(train_glrm, X, Y, include_regularization=false) / ntrain if verbose println("\ttrain error: $(train_error[iparam])") end test_error[iparam] = objective(test_glrm, X, Y, include_regularization=false) / ntrain if verbose println("\ttest error: $(test_error[iparam])") end # precision at k XY = X'*Y q = sort(XY[:],rev=true)[ntrain] # the ntest+ntrain largest value in the model XY true_pos = 0; false_pos = 0 kfound = 0 for i=1:m if kfound >= kprec break end for j=1:n if kfound >= kprec break end if XY[i,j] >= q # i predict 1 and (i,j) was in my test set and i observed 1 if j in test_observed_features[i] true_pos += 1 kfound += 1 # i predict 1 and i did not observe a 1 (in either my test *or* train set) elseif !(j in train_observed_features[i]) false_pos += 1 kfound += 1 end end end end prec_at_k[iparam] = true_pos / (true_pos + false_pos) if verbose println("\tprec_at_k: $(prec_at_k[iparam])") end solution[iparam] = (sum(X)+sum(Y), sum(abs.(X))+sum(abs.(Y))) if verbose println("\tsum of solution, one norm of solution: $(solution[iparam])") end end return train_error, test_error, prec_at_k, train_time, reg_params, solution end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2986
# Supported domains: Real, Boolean, Ordinal, Periodic, Count # The purpose of domains is to be able to impute over different possible values of `a` regardless of # the loss that was used in the GLRM. The reason for doing this is to evaluate the performance of GLRMS. # For instance, let's say we use PCA (QuadLoss losses) to model a binary data frame (not the best idea). # In order to override the standard imputation with `impute(QuadLoss(), u)`, which assumes imputation over the reals, # we can use `impute(BoolDomain(), QuadLoss(), u)` and see which of {-1,1} is best. The reason we want to be able to # do this is to compare a baseline model (e.g. PCA) with a more logical model using heterogenous losses, # yet still give each model the same amount of information regarding how imputation should be done. # In order to accomplish this we define a series of domains that tell imputation methods # what values the data can take. The imputation methods are defined in impute_and_err.jl # Domains should be assigned to each column of the data and are not part of the low-rank model itself. # They serve as a way to evaluate the performance of the low-rank model. export Domain, # the abstract type RealDomain, BoolDomain, OrdinalDomain, PeriodicDomain, CountDomain, CategoricalDomain, # the domains copy abstract type Domain end ########################################## REALS ########################################## # Real data can take values from ℜ struct RealDomain<:Domain end ########################################## BOOLS ########################################## # Boolean data should take values from {true, false} struct BoolDomain<:Domain end ########################################## ORDINALS ########################################## # Ordinal data should take integer values ranging from `min` to `max` struct OrdinalDomain<:Domain min::Int max::Int function OrdinalDomain(min, max) if max - min < 2 @warn("The ordinal variable you've created is degenerate: it has only two levels. Consider using a Boolean variable instead; ordinal loss functions may have unexpected behavior on a degenerate ordinal domain.") end return new(min, max) end end ########################################## ORDINALS ########################################## # Categorical data should take integer values ranging from 1 to `max` struct CategoricalDomain<:Domain min::Int max::Int end CategoricalDomain(m::Int) = CategoricalDomain(1,m) ########################################## PERIODIC ########################################## # Periodic data can take values from ℜ, but given a period T, we should have error_metric(a,a+T) = 0 struct PeriodicDomain<:Domain T::Float64 # the period end ########################################## COUNTS ########################################## # Count data can take values over ℕ, which we approximate as {0, 1, 2 ... `max_count`} struct CountDomain<:Domain max_count::Int # the biggest possible count end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
6254
export objective, error_metric, impute, impute_missing ### OBJECTIVE FUNCTION EVALUATION FOR MPCA function objective(glrm::GLRM, X::Array{Float64,2}, Y::Array{Float64,2}, XY::Array{Float64,2}; yidxs = get_yidxs(glrm.losses), # mapping from columns of A to columns of Y; by default, the identity include_regularization=true) m,n = size(glrm.A) @assert(size(XY)==(m,yidxs[end][end])) @assert(size(Y)==(glrm.k,yidxs[end][end])) @assert(size(X)==(glrm.k,m)) err = 0.0 for j=1:n for i in glrm.observed_examples[j] err += evaluate(glrm.losses[j], XY[i,yidxs[j]], glrm.A[i,j]) end end # add regularization penalty if include_regularization err += calc_penalty(glrm,X,Y; yidxs = yidxs) end return err end function row_objective(glrm::AbstractGLRM, i::Int, x::AbstractArray, Y::Array{Float64,2} = glrm.Y; yidxs = get_yidxs(glrm.losses), # mapping from columns of A to columns of Y; by default, the identity include_regularization=true) m,n = size(glrm.A) err = 0.0 XY = x'*Y for j in glrm.observed_features[i] err += evaluate(glrm.losses[j], XY[1,yidxs[j]], glrm.A[i,j]) end # add regularization penalty if include_regularization err += evaluate(glrm.rx[i], x) end return err end function col_objective(glrm::AbstractGLRM, j::Int, y::AbstractArray, X::Array{Float64,2} = glrm.X; include_regularization=true) m,n = size(glrm.A) sz = size(y) if length(sz) == 1 colind = 1 else colind = 1:sz[2] end err = 0.0 XY = X'*y obsex = glrm.observed_examples[j] @inbounds XYj = XY[obsex,colind] @inbounds Aj = convert(Array, glrm.A[obsex,j]) err += evaluate(glrm.losses[j], XYj, Aj) # add regularization penalty if include_regularization err += evaluate(glrm.ry[j], y) end return err end # The user can also pass in X and Y and `objective` will compute XY for them function objective(glrm::GLRM, X::Array{Float64,2}, Y::Array{Float64,2}; sparse=false, include_regularization=true, yidxs = get_yidxs(glrm.losses), kwargs...) @assert(size(Y)==(glrm.k,yidxs[end][end])) @assert(size(X)==(glrm.k,size(glrm.A,1))) XY = Array{Float64}(undef, (size(X,2), size(Y,2))) if sparse # Calculate X'*Y only at observed entries of A m,n = size(glrm.A) err = 0.0 for j=1:n for i in glrm.observed_examples[j] err += evaluate(glrm.losses[j], dot(X[:,i],Y[:,yidxs[j]]), glrm.A[i,j]) end end if include_regularization err += calc_penalty(glrm,X,Y; yidxs = yidxs) end return err else # dense calculation variant (calculate XY up front) gemm!('T','N',1.0,X,Y,0.0,XY) return objective(glrm, X, Y, XY; include_regularization=include_regularization, yidxs = yidxs, kwargs...) end end # Or just the GLRM and `objective` will use glrm.X and .Y objective(glrm::GLRM; kwargs...) = objective(glrm, glrm.X, glrm.Y; kwargs...) # For shared arrays # TODO: compute objective in parallel objective(glrm::ShareGLRM, X::SharedArray{Float64,2}, Y::SharedArray{Float64,2}) = objective(glrm, X.s, Y.s) # Helper function to calculate the regularization penalty for X and Y function calc_penalty(glrm::AbstractGLRM, X::Array{Float64,2}, Y::Array{Float64,2}; yidxs = get_yidxs(glrm.losses)) m,n = size(glrm.A) @assert(size(Y)==(glrm.k,yidxs[end][end])) @assert(size(X)==(glrm.k,m)) penalty = 0.0 for i=1:m penalty += evaluate(glrm.rx[i], view(X,:,i)) end for f=1:n penalty += evaluate(glrm.ry[f], view(Y,:,yidxs[f])) end return penalty end ## ERROR METRIC EVALUATION (BASED ON DOMAINS OF THE DATA) function raw_error_metric(glrm::AbstractGLRM, XY::Array{Float64,2}, domains::Array{Domain,1}; yidxs = get_yidxs(glrm.losses)) m,n = size(glrm.A) err = 0.0 for j=1:n for i in glrm.observed_examples[j] err += error_metric(domains[j], glrm.losses[j], XY[i,yidxs[j]], glrm.A[i,j]) end end return err end function std_error_metric(glrm::AbstractGLRM, XY::Array{Float64,2}, domains::Array{Domain,1}; yidxs = get_yidxs(glrm.losses)) m,n = size(glrm.A) err = 0.0 for j=1:n column_mean = 0.0 column_err = 0.0 for i in glrm.observed_examples[j] column_mean += glrm.A[i,j]^2 column_err += error_metric(domains[j], glrm.losses[j], XY[i,yidxs[j]], glrm.A[i,j]) end column_mean = column_mean/length(glrm.observed_examples[j]) if column_mean != 0 column_err = column_err/column_mean end err += column_err end return err end function error_metric(glrm::AbstractGLRM, XY::Array{Float64,2}, domains::Array{Domain,1}; standardize=false, yidxs = get_yidxs(glrm.losses)) m,n = size(glrm.A) @assert(size(XY)==(m,yidxs[end][end])) if standardize return std_error_metric(glrm, XY, domains; yidxs = yidxs) else return raw_error_metric(glrm, XY, domains; yidxs = yidxs) end end # The user can also pass in X and Y and `error_metric` will compute XY for them function error_metric(glrm::AbstractGLRM, X::Array{Float64,2}, Y::Array{Float64,2}, domains::Array{Domain,1}=Domain[l.domain for l in glrm.losses]; kwargs...) XY = Array{Float64}(undef,(size(X,2), size(Y,2))) gemm!('T','N',1.0,X,Y,0.0,XY) error_metric(glrm, XY, domains; kwargs...) end # Or just the GLRM and `error_metric` will use glrm.X and .Y error_metric(glrm::AbstractGLRM, domains::Array{Domain,1}; kwargs...) = error_metric(glrm, glrm.X, glrm.Y, domains; kwargs...) error_metric(glrm::AbstractGLRM; kwargs...) = error_metric(glrm, Domain[l.domain for l in glrm.losses]; kwargs...) # Use impute and errors over GLRMS impute(glrm::AbstractGLRM) = impute(glrm.losses, glrm.X'*glrm.Y) function impute_missing(glrm::AbstractGLRM) Ahat = impute(glrm) for j in 1:size(glrm.A,2) for i in glrm.observed_examples[j] Ahat[i,j] = glrm.A[i,j] end end return Ahat end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1029
export fit, fit!, Params ### PARAMETERS TYPE abstract type AbstractParams end Params(args...; kwargs...) = ProxGradParams(args...; kwargs...) # default in-place fitting uses proximal gradient method function fit!(glrm::AbstractGLRM; kwargs...) kwdict = Dict(kwargs) if :params in keys(kwdict) return fit!(glrm, kwdict[:params]; kwargs...) else if isa(glrm.A,SparseMatrixCSC) # Default to sparse algorithm for a sparse dataset return fit!(glrm, SparseProxGradParams(); kwargs...) else # Classic proximal gradient method for non-sparse data return fit!(glrm, ProxGradParams(); kwargs...) end end end # fit without modifying the glrm object function fit(glrm::AbstractGLRM, args...; kwargs...) X0 = Array{Float64}(undef, size(glrm.X)) Y0 = Array{Float64}(undef, size(glrm.Y)) copy!(X0, glrm.X); copy!(Y0, glrm.Y) X,Y,ch = fit!(glrm, args...; kwargs...) copy!(glrm.X, X0); copy!(glrm.Y, Y0) return X',Y,ch end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
8873
# ======================================== # REVIEW THIS IN LIGHT OF NEW DATAFRAMES # ======================================== import Base: isnan import DataFrames: DataFrame, ncol, convert export GLRM, observations, expand_categoricals!, NaNs_to_NAs!, NAs_to_0s!, NaNs_to_Missing!, ismissing_vec include("fit_dataframe_w_type_imputation.jl") probabilistic_losses = Dict{Symbol, Any}( :real => QuadLoss, :bool => LogisticLoss, :ord => MultinomialOrdinalLoss, :cat => MultinomialLoss ) robust_losses = Dict{Symbol, Any}( :real => HuberLoss, :bool => LogisticLoss, :ord => BvSLoss, :cat => OvALoss ) function GLRM(df::DataFrame, k::Int, datatypes::Array{Symbol,1}; loss_map = probabilistic_losses, rx = QuadReg(.01), ry = QuadReg(.01), offset = true, scale = false, prob_scale = true, transform_data_to_numbers = true, NaNs_to_Missing = true) # check input if ncol(df)!=length(datatypes) error("third argument (datatypes) must have one entry for each column of data frame.") end # validate input for dt in datatypes if !(dt in keys(loss_map)) error("data types must be either :real, :bool, :ord, or :cat, not $dt") end end # clean up dataframe if needed A = copy(df) if NaNs_to_Missing NaNs_to_Missing!(A) end # define loss functions for each column losses = Array{Loss}(undef, ncol(A)) for j=1:ncol(df) losstype = loss_map[datatypes[j]] if transform_data_to_numbers map_to_numbers!(A, j, datatypes[j]) end losses[j] = pick_loss(losstype, A[:,j]) end # identify which entries in data frame have been observed (ie are not missing) obs = observations(df) # form model rys = Array{Regularizer}(undef, length(losses)) for i=1:length(losses) if isa(losses[i].domain, OrdinalDomain) && embedding_dim(losses[i])>1 # losses[i], MultinomialOrdinalLoss) || isa(losses[i], OrdisticLoss) rys[i] = OrdinalReg(copy(ry)) else rys[i] = copy(ry) end end glrm = GLRM(A, losses, rx, rys, k, obs=obs, offset=offset, scale=scale) # scale model so it really computes the MAP estimator of the parameters if prob_scale prob_scale!(glrm) end return glrm end ## transform data to numbers function is_number_or_null(x) isa(x, Number) || ismissing(x) # (:value in fieldnames(x) && isa(x.value, Number)) end function is_int_or_null(x) isa(x, Int) || ismissing(x) # (:value in fieldnames(x) && isa(x.value, Int)) end function map_to_numbers!(df, j::Int, datatype::Symbol) # easy case if datatype == :real if all(xi -> is_number_or_null(xi), df[:,j][.!ismissing_vec(df[:,j])]) return df[:,j] else error("column contains non-numerical values") end end # harder cases col = copy(df[:,j]) levels = Set(col[.!ismissing_vec(col)]) if datatype == :bool if length(levels)>2 error("Boolean variable should have at most two levels; instead, got:\n$levels") end colmap = Dict{Any,Int}(zip(sort(collect(levels)), [-1,1][1:length(levels)])) elseif datatype == :cat || datatype == :ord colmap = Dict{Any,Int}(zip(sort(collect(levels)), 1:length(levels))) else error("datatype $datatype not recognized") end m = size(df,1) df[!,j] = Array{Union{Missing, Int},1}(undef, m) for i in 1:length(col) if !ismissing(col[i]) df[i,j] = getval(colmap[col[i]]) end end return df[:,j] end getval(x::Union{T, Nothing}) where T = x.value getval(x::T) where T<:Number = x function map_to_numbers!(df, j::Int, loss::Type{QuadLoss}) if all(xi -> is_number_or_null(xi), df[:,j][!ismissing_vec(df[:,j])]) return df[:,j] else error("column contains non-numerical values") end end function map_to_numbers!(df, j::Int, loss::Type{LogisticLoss}) col = copy(df[:,j]) levels = Set(col[!ismissing_vec(col)]) if length(levels)>2 error("Boolean variable should have at most two levels") end colmap = Dict{Any,Int}(zip(sort(collect(levels)), [-1,1][1:length(levels)])) df[:,j] = DataArray(Int, length(df[:,j])) for i in 1:length(col) if !ismissing(col[i]) df[i,j] = colmap[col[i]] end end return df[:,j] end function map_to_numbers!(df, j::Int, loss::Type{MultinomialLoss}) col = copy(df[:,j]) levels = Set(col[!ismissing_vec(col)]) colmap = Dict{Any,Int}(zip(sort(collect(levels)), 1:length(levels))) df[:,j] = DataArray(Int, length(df[:,j])) for i in 1:length(col) if !ismissing(col[i]) df[i,j] = colmap[col[i]] end end return df[:,j] end function map_to_numbers!(df, j::Int, loss::Type{MultinomialOrdinalLoss}) col = copy(df[:,j]) levels = Set(col[!ismissing_vec(col)]) colmap = Dict{Any,Int}(zip(sort(collect(levels)), 1:length(levels))) df[:,j] = DataArray(Int, length(df[:,j])) for i in 1:length(col) if !ismissing(col[i]) df[i,j] = colmap[col[i]] end end return df[:,j] end ## sanity check the choice of loss # this default definition could be tighter: only needs to be defined for arguments of types that subtype Loss function pick_loss(l, col) return l() end function pick_loss(l::Type{LogisticLoss}, col) if all(xi -> ismissing(xi) || xi in [-1,1], col) return l() else error("LogisticLoss can only be used on data taking values in {-1, 1}") end end function pick_loss(l::Type{MultinomialLoss}, col) if all(xi -> ismissing(xi) || (is_int_or_null(xi) && xi >= 1), col) return l(maximum(skipmissing(col))) else error("MultinomialLoss can only be used on data taking positive integer values") end end function pick_loss(l::Type{MultinomialOrdinalLoss}, col) if all(xi -> ismissing(xi) || (isa(xi, Int) && xi >= 1), col) return l(maximum(skipmissing(col))) else error("MultinomialOrdinalLoss can only be used on data taking positive integer values") end end observations(da::Array{Union{T, Missing}}) where T = df_observations(da) observations(df::DataFrame) = df_observations(df) # isnan -> ismissing function df_observations(da) obs = Tuple{Int, Int}[] m,n = size(da) for j=1:n # follow column-major order. First element of index in innermost loop for i=1:m if !ismissing(da[i,j]) push!(obs,(i,j)) end end end return obs end # TODO.. Missings in the data frame will be replaced by the number `z` function df2array(df::DataFrame, z::Number) A = zeros(size(df)) for i=1:size(A,2) if issubtype(typeof(df[:,i]), Array) A[:,i] = df[:,i] elseif typeof(df[i]) == Bool A[:,i] = convert(Array, (2*df[i]-1), z) else A[:,i] = convert(Array, df[i], z) end end return A end df2array(df::DataFrame) = df2array(df, 0) # expand categorical columns, given as column indices, into one boolean column for each level function expand_categoricals!(df::DataFrame,categoricals::Array{Int,1}) # map from names to indices; not used: categoricalidxs = map(y->df.colindex[y], categoricals) # create one boolean column for each level of categorical column colnames = names(df) for col in categoricals levels = sort(unique(df[:,col])) for level in levels if !ismissing(level) colname = Symbol(string(colnames[col])*"="*string(level)) df[colname] = (df[:,col] .== level) end end end # remove the original categorical columns for cat in sort(categoricals, rev=true) delete!(df, cat) end return df end function expand_categoricals!(df::DataFrame,categoricals::UnitRange{Int}) expand_categoricals!(df, Int[i for i in categoricals]) end # expand categoricals given as names of columns rather than column indices function expand_categoricals!(df::DataFrame,categoricals::Array) # map from names to indices categoricalidxs = map(y->df.colindex[y], categoricals) return expand_categoricals!(df, categoricalidxs) end # convert NaNs to NAs # isnan(x::NAtype) = false isnan(x::AbstractString) = false isnan(x::Union{T, Nothing}) where T = isnan(x.value) # same functionality as above. function NaNs_to_Missing!(df::DataFrame) m,n = size(df) for j=1:n df[!,j] = [ismissing(df[i,j]) || isnan(df[i,j]) ? missing : value for (i,value) in enumerate(df[:,j])]; end return df end ismissing_vec(V::AbstractArray) = Bool[ismissing(x) for x in V[:]]
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
4829
import Base: isnan import DataFrames: DataFrame, ncol, convert export GLRM # TODO: identify categoricals automatically from PooledDataArray columns default_real_loss = HuberLoss default_bool_loss = LogisticLoss default_ord_loss = MultinomialOrdinalLoss function GLRM(df::DataFrame, k::Int; losses = Loss[], rx = QuadReg(.01), ry = QuadReg(.01), offset = true, scale = false, prob_scale = true, NaNs_to_Missing = true) if NaNs_to_Missing df = copy(df) NaNs_to_Missing!(df) end if losses == Loss[] # if losses not specified, identify ordinal, boolean and real columns # change the wal get_reals, etc work. #reals, real_losses = get_reals(df) #bools, bool_losses = get_bools(df) #ordinals, ordinal_losses = get_ordinals(df) #easier to use just one function for this usecase. reals, real_losses, bools, bool_losses, ordinals, ordinal_losses = get_loss_types(df) A = [df[:,reals] df[:,bools] df[:,ordinals]] labels = [names(df)[reals]; names(df)[bools]; names(df)[ordinals]] losses = [real_losses; bool_losses; ordinal_losses] else # otherwise require one loss function per column A = df ncol(df)==length(losses) ? labels = names(df) : error("please input one loss per column of dataframe") end # identify which entries in data frame have been observed (ie are not N/A) obs = observations(A) # initialize X and Y X = randn(k,size(A,1)) Y = randn(k,embedding_dim(losses)) # form model rys = Array{Regularizer}(undef, length(losses)) for i=1:length(losses) if isa(losses[i].domain, OrdinalDomain) && embedding_dim(losses[i])>1 #losses[i], MultinomialOrdinalLoss) || isa(losses[i], OrdisticLoss) rys[i] = OrdinalReg(copy(ry)) else rys[i] = copy(ry) end end glrm = GLRM(A, losses, rx, rys, k, obs=obs, X=X, Y=Y, offset=offset, scale=scale) # scale model so it really computes the MAP estimator of the parameters if prob_scale prob_scale!(glrm) end return glrm, labels end function get_loss_types(df::DataFrame) m,n = size(df) reals = fill(false,n) bools = fill(false,n) ordinals = fill(false,n) for j in 1:n # assuming there are no columns with *all* values missing. (which would make it a non-informative column) t = eltype(collect(skipmissing(df[:,j]))[1]) if(t == Float64) reals[j] = true elseif (t == Bool) bools[j] = true elseif (t == Int) || (t == Int32) || (t == Int64) ordinals[j] = true end end n1 = sum(reals) real_losses = Array{Loss}(undef, n1) for i=1:n1 real_losses[i] = default_real_loss() end n2 = sum(bools) bool_losses = Array{Loss}(undef, n2) for i in 1:n2 bool_losses[i] = default_bool_loss() end n3 = sum(ordinals) ord_idx = (1:size(df,2))[ordinals] maxs = zeros(n3,1) mins = zeros(n3,1) for j in 1:n3 col = df[:,ord_idx[j]] try maxs[j] = maximum(skipmissing(col)) mins[j] = minimum(skipmissing(col)) catch nothing end end # set losses and regularizers ord_losses = Array{Loss}(undef, n3) for i=1:n3 ord_losses[i] = default_ord_loss(Int(maxs[i])) end return reals,real_losses,bools,bool_losses,ordinals,ord_losses end function get_reals(df::DataFrame) m,n = size(df) reals = [typeof(df[:,i])<:AbstractArray{Float64,1} for i in 1:n] n1 = sum(reals) losses = Array{Loss}(undef, n1) for i=1:n1 losses[i] = default_real_loss() end return reals, losses end function get_bools(df::DataFrame) m,n = size(df) bools = [isa(df[:,i], AbstractArray{Bool,1}) for i in 1:n] n1 = sum(bools) losses = Array{Loss}(undef, n1) for i=1:n1 losses[i] = default_bool_loss() end return bools, losses end function get_ordinals(df::DataFrame) m,n = size(df) # there must be a better way to check types... ordinals = [(isa(df[:,i], AbstractArray{Int,1}) || isa(df[:,i], AbstractArray{Int32,1}) || isa(df[:,i], AbstractArray{Int64,1})) for i in 1:n] nord = sum(ordinals) ord_idx = (1:size(df,2))[ordinals] maxs = zeros(nord,1) mins = zeros(nord,1) for i in 1:nord col = df[:,ord_idx[i]] try maxs[i] = maximum(dropmissing(col)) mins[i] = minimum(dropmissing(col)) catch nothing end end # set losses and regularizers losses = Array{Loss}(undef, nord) for i=1:nord losses[i] = default_ord_loss(Int(maxs[i])) end return ordinals, losses end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
4510
import LinearAlgebra: size, axpy! import LinearAlgebra.BLAS: gemm! abstract type AbstractGLRM end export AbstractGLRM, GLRM, getindex, size, scale_regularizer! const ObsArray = Union{Array{Array{Int,1},1}, Array{UnitRange{Int},1}} ### GLRM TYPE mutable struct GLRM<:AbstractGLRM A # The data table losses::Array{Loss,1} # array of loss functions rx::Array{Regularizer,1} # Array of regularizers to be applied to each column of X ry::Array{Regularizer,1} # Array of regularizers to be applied to each column of Y k::Int # Desired rank observed_features::ObsArray # for each example, an array telling which features were observed observed_examples::ObsArray # for each feature, an array telling in which examples the feature was observed X::AbstractArray{Float64,2} # Representation of data in low-rank space. A ≈ X'Y Y::AbstractArray{Float64,2} # Representation of features in low-rank space. A ≈ X'Y end # usage notes: # * providing argument `obs` overwrites arguments `observed_features` and `observed_examples` # * offset and scale are *false* by default to avoid unexpected behavior # * convenience methods for calling are defined in utilities/conveniencemethods.jl function GLRM(A, losses::Array, rx::Array, ry::Array, k::Int; # the following tighter definition fails when you form an array of a tighter subtype than the abstract type, eg Array{QuadLoss,1} # function GLRM(A::AbstractArray, losses::Array{Loss,1}, rx::Array{Regularizer,1}, ry::Array{Regularizer,1}, k::Int; X = randn(k,size(A,1)), Y = randn(k,embedding_dim(losses)), obs = nothing, # [(i₁,j₁), (i₂,j₂), ... (iₒ,jₒ)] observed_features = fill(1:size(A,2), size(A,1)), # [1:n, 1:n, ... 1:n] m times observed_examples = fill(1:size(A,1), size(A,2)), # [1:m, 1:m, ... 1:m] n times offset = false, scale = false, checknan = true, sparse_na = true) # Check dimensions of the arguments m,n = size(A) if length(losses)!=n error("There must be as many losses as there are columns in the data matrix") end if length(rx)!=m error("There must be either one X regularizer or as many X regularizers as there are rows in the data matrix") end if length(ry)!=n error("There must be either one Y regularizer or as many Y regularizers as there are columns in the data matrix") end if size(X)!=(k,m) error("X must be of size (k,m) where m is the number of rows in the data matrix. This is the transpose of the standard notation used in the paper, but it makes for better memory management. \nsize(X) = $(size(X)), size(A) = $(size(A)), k = $k") end if size(Y)!=(k,embedding_dim(losses)) error("Y must be of size (k,d) where d is the sum of the embedding dimensions of all the losses. \n(1 for real-valued losses, and the number of categories for categorical losses).") end # Determine observed entries of data if obs==nothing && sparse_na && isa(A,SparseMatrixCSC) obs = findall(!iszero, A) # observed indices (list of CartesianIndices) end if obs==nothing # if no specified array of tuples, use what was explicitly passed in or the defaults (all) # println("no obs given, using observed_features and observed_examples") glrm = GLRM(A,losses,rx,ry,k, observed_features, observed_examples, X,Y) else # otherwise unpack the tuple list into arrays # println("unpacking obs into array") glrm = GLRM(A,losses,rx,ry,k, sort_observations(obs,size(A)...)..., X,Y) end # check to make sure X is properly oriented if size(glrm.X) != (k, size(A,1)) # println("transposing X") glrm.X = glrm.X' end # check none of the observations are NaN if checknan for i=1:size(A,1) for j=glrm.observed_features[i] if isnan(A[i,j]) error("Observed value in entry ($i, $j) is NaN.") end end end end if scale # scale losses (and regularizers) so they all have equal variance equilibrate_variance!(glrm) end if offset # don't penalize the offset of the columns add_offset!(glrm) end return glrm end parameter_estimate(glrm::GLRM) = (glrm.X, glrm.Y) function scale_regularizer!(glrm::GLRM, newscale::Number) mul!(glrm.rx, newscale) mul!(glrm.ry, newscale) return glrm end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
8058
# Supported domains: Real, Boolean, Ordinal, Periodic, Count # The purpose of domains is to be able to impute over different possible values of `a` regardless of # the loss that was used in the GLRM. The reason for doing this is to evaluate the performance of GLRMS. # For instance, let's say we use PCA (QuadLoss losses) to model a binary data frame (not the best idea). # In order to override the standard imputation with `impute(QuadLoss(), u)`, which assumes imputation over the reals, # we can use `impute(BoolDomain(), QuadLoss(), u)` and see which of {-1,1} is best. The reason we want to be able to # do this is to compare a baseline model (e.g. PCA) with a more logical model using heterogenous losses, # yet still give each model the same amount of information regarding how imputation should be done. # The domains themselves are defined in domains.jl # In order to accomplish this we define a series of domains that describe how imputation should be performed over # them. Each combination of domain and loss must have the following: # Methods: # `impute(D::my_Domain, l::my_loss_type, u::Float64) ::Float64` # Imputes aᵤ = argmin l(u,a) over the range of possible values of a. The range of # possible values of a should be implicitly or explicitly provided by `D`. # There should be an impute method for every combination of datatype and loss. # `error_metric(D::my_Domain, l::my_loss_type, u::Float64, a::Number) ::Float64` # First calls aᵤ = impute(l,u), then uses the type of `my_D` to pick a # good measure of error- either 1-0 misclassification or squared difference. # DataTypes are assigned to each column of the data and are not part of the low-rank model itself, they just serve # as a way to evaluate the performance of the low-rank model. export impute, error_metric, errors # function for general use roundcutoff(x,a::T,b::T) where T<:Number = T(min(max(round(x),a),b)) # Error metrics for general use squared_error(a_imputed::Number, a::Number) = (a_imputed-a)^2 misclassification(a_imputed::T, a::T) where T = float(!(a_imputed==a)) # return 0.0 if equal, 1.0 else # use the default loss domain imputation if no domain provided impute(l::Loss, u::Float64) = impute(l.domain, l, u) ########################################## REALS ########################################## # Real data can take values from ℜ impute(D::RealDomain, l::DiffLoss, u::Float64) = u # by the properties of any DiffLoss impute(D::RealDomain, l::PoissonLoss, u::Float64) = exp(u) impute(D::RealDomain, l::OrdinalHingeLoss, u::Float64) = roundcutoff(u, l.min, l.max) impute(D::RealDomain, l::LogisticLoss, u::Float64) = error("Logistic loss always imputes either +∞ or -∞ given a∈ℜ") function impute(D::RealDomain, l::WeightedHingeLoss, u::Float64) @warn("It doesn't make sense to use HingeLoss to impute data that can take values in ℜ") 1/u end function error_metric(D::RealDomain, l::Loss, u::Float64, a::Number) a_imputed = impute(D, l, u) squared_error(a_imputed, a) end ########################################## BOOLS ########################################## # Boolean data should take values from {-1,1} # sign of u impute(D::BoolDomain, l::ClassificationLoss, u::Float64) = u>=0 ? true : false # Evaluate w/ a=-1 and a=1 and see which is better according to that loss. # This is fast and works for any loss. impute(D::BoolDomain, l::Loss, u::Float64) = evaluate(l,u,false)<evaluate(l,u,true) ? false : true function error_metric(D::BoolDomain, l::Loss, u::Float64, a::Number) a_imputed = impute(D, l, u) misclassification(a_imputed, a) end ########################################## ORDINALS ########################################## # Ordinal data should take integer values ranging from `min` to `max` impute(D::OrdinalDomain, l::DiffLoss, u::Float64) = roundcutoff(u, D.min, D.max) impute(D::OrdinalDomain, l::PoissonLoss, u::Float64) = roundcutoff(exp(u), D.min , D.max) impute(D::OrdinalDomain, l::OrdinalHingeLoss, u::Float64) = roundcutoff(u, D.min, D.max) impute(D::OrdinalDomain, l::LogisticLoss, u::Float64) = u>0 ? D.max : D.min function impute(D::OrdinalDomain, l::WeightedHingeLoss, u::Float64) @warn("It doesn't make sense to use HingeLoss to impute ordinals") a_imputed = (u>0 ? ceil(1/u) : floor(1/u)) roundcutoff(a_imputed, D.min, D.max) end impute(D::OrdinalDomain, l::OrdisticLoss, u::AbstractArray) = argmin(u.^2) # MultinomialOrdinalLoss # l(u, a) = -log(p(u, a)) # = u[1] + ... + u[a-1] - u[a] - ... - u[end] + # log(sum_{a'}(exp(u[1] + ... + u[a'-1] - u[a'] - ... - u[end]))) # # so given u, # the most probable value a is the index of the first # positive entry of u function impute(D::OrdinalDomain, l::MultinomialOrdinalLoss, u::AbstractArray) enforce_MNLOrdRules!(u) eu = exp.(u) p = [1-eu[1], -diff(eu)..., eu[end]] return argmax(p) end # generic method function impute(D::OrdinalDomain, l::Loss, u::AbstractArray) (D.min:D.max)[argmin([evaluate(l, u, i) for i in D.min:D.max])] end function error_metric(D::OrdinalDomain, l::Loss, u::Float64, a::Number) a_imputed = impute(D, l, u) squared_error(a_imputed, a) end ########################################## CATEGORICALS ########################################## # Categorical data should take integer values ranging from 1 to `max` impute(D::CategoricalDomain, l::MultinomialLoss, u::Array{Float64}) = argmax(u) impute(D::CategoricalDomain, l::OvALoss, u::Array{Float64}) = argmax(u) function error_metric(D::CategoricalDomain, l::Loss, u::Array{Float64}, a::Number) a_imputed = impute(D, l, u) misclassification(a_imputed, a) end ########################################## PERIODIC ########################################## # Periodic data can take values from ℜ, but given a period T, we should have error_metric(a,a+T) = 0 # Since periodic data can take any real value, we can use the real-valued imputation methods impute(D::PeriodicDomain, l::Loss, u::Float64) = impute(RealDomain(), l, u) # When imputing a periodic variable, we restrict ourselves to the domain [0,T] pos_mod(T::Float64, x::Float64) = x>0 ? x%T : (x%T)+T # takes a value and finds its equivalent positive modulus function error_metric(D::PeriodicDomain, l::Loss, u::Float64, a::Number) a_imputed = impute(D, l, u) # remap both a and a_imputed to [0,T] to check for a ≡ a_imputed squared_error(pos_mod(D.T,a_imputed), pos_mod(D.T,a)) end ########################################## COUNTS ########################################## # Count data can take values over ℕ, which we approximate as {0, 1, 2 ... `max_count`} # Our approximation of ℕ is really an ordinal impute(D::CountDomain, l::Loss, u::Float64) = impute(OrdinalDomain(0,D.max_count), l, u) function error_metric(D::CountDomain, l::Loss, u::Float64, a::Number) a_imputed = impute(D, l, u) squared_error(a_imputed, a) end #################################################################################### # Use impute and error_metric over arrays function impute(domains::Array{DomainSubtype,1}, losses::Array{LossSubtype,1}, U::AbstractArray) where {DomainSubtype<:Domain, LossSubtype<:Loss} m = size(U,1) n = length(losses) yidxs = get_yidxs(losses) A_imputed = Array{Number}(undef, (m, n)); for f in 1:n for i in 1:m if length(yidxs[f]) > 1 A_imputed[i,f] = impute(domains[f], losses[f], vec(U[i,yidxs[f]])) else A_imputed[i,f] = impute(domains[f], losses[f], U[i,yidxs[f]]) end end end return A_imputed end function impute(losses::Array{LossSubtype,1}, U::AbstractArray) where LossSubtype<:Loss domains = Domain[l.domain for l in losses] impute(domains, losses, U) end function impute(loss::LossSubtype, U::AbstractArray) where LossSubtype<:Loss impute(Loss[loss], U) end function errors(domains::Array{Domain,1}, losses::Array{Loss,1}, U::AbstractArray, A::AbstractArray ) err = zeros(size(A)) m,n = size(A) for j in 1:n for i in 1:m err[i,j] = error_metric(domains[j], losses[j], U[i,j], A[i,j]) end end return err end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
5136
import StatsBase: sample, wsample export init_kmeanspp!, init_svd!, init_nndsvd! import Arpack: svds # kmeans++ initialization, but with missing data # we make sure never to look at "unobserved" entries in A # so that models can be honestly cross validated, for example function init_kmeanspp!(glrm::GLRM) m,n = size(glrm.A) k = glrm.k possible_centers = Set(1:m) glrm.Y = randn(k,n) # assign first center randomly i = sample(1:m) setdiff!(possible_centers, i) glrm.Y[1,glrm.observed_features[i]] = Array(glrm.A[i,glrm.observed_features[i]]) # assign next centers one by one for l=1:k-1 min_dists_per_obs = zeros(m) for i in possible_centers d = zeros(l) for j in glrm.observed_features[i] for ll=1:l d[ll] += evaluate(glrm.losses[j], glrm.Y[ll,j], glrm.A[i,j]) end end min_dists_per_obs[i] = minimum(d)/length(glrm.observed_features[i]) end furthest_index = wsample(1:m,min_dists_per_obs) glrm.Y[l+1,glrm.observed_features[furthest_index]] = glrm.A[furthest_index,glrm.observed_features[furthest_index]] end return glrm end function init_svd!(glrm::GLRM; offset=true, scale=true, TOL = 1e-10) # only offset if the glrm model is offset offset = offset && typeof(glrm.rx) == lastentry1 # only scale if we also offset scale = scale && offset m,n = size(glrm.A) k = glrm.k # find spans of loss functions (for multidimensional losses) yidxs = get_yidxs(glrm.losses) d = maximum(yidxs[end]) # create a matrix representation of A with the same dimensions as X*Y # by expanding out all data types with embedding dimension greater than 1 if all(map(length, yidxs) .== 1) Areal = glrm.A # save time, but in this case we'll still have a DataFrame else Areal = zeros(m, d) for f=1:n if length(yidxs[f]) == 1 Areal[glrm.observed_examples[f], yidxs[f]] = glrm.A[glrm.observed_examples[f], f] else if isa(glrm.losses[f].domain, CategoricalDomain) levels = datalevels(glrm.losses[f]) for e in glrm.observed_examples[f] for ilevel in 1:length(levels) Areal[e, yidxs[f][ilevel]] = (glrm.A[e, f] == levels[ilevel] ? 1 : -1) end end elseif isa(glrm.losses[f].domain, OrdinalDomain) embed_dim = embedding_dim(glrm.losses[f]) mymean = mean(glrm.A[glrm.observed_examples[f], f]) levels = datalevels(glrm.losses[f]) for e in glrm.observed_examples[f] for ilevel in 1:(length(levels)-1) Areal[e, yidxs[f][ilevel]] = (glrm.A[e, f] > levels[ilevel] ? 1 : -1) end end else error("No default mapping to real valued matrix for domains of type $typeof(glrm.losses[f].domain)") end end end end # standardize A, respecting missing values means = zeros(d) stds = zeros(d) Astd = zeros(m, d) for f in 1:n for j in yidxs[f] nomissing = Areal[glrm.observed_examples[f],j] means[j] = mean(nomissing) if isnan(means[j]) means[j] = 1 end stds[j] = std(nomissing) if stds[j] < TOL || isnan(stds[j]) stds[j] = 1 end Astd[glrm.observed_examples[f],j] = Areal[glrm.observed_examples[f],j] .- means[j] end end if offset k -= 1 glrm.X[end,:] = 1 glrm.Y[end,:] = means if scale Astd = Astd ./ stds end if k <= 0 @warn("Using an offset on a rank 1 model fits *only* the offset. To fit an offset + 1 low rank component, use k=2.") return glrm end end # options for rescaling: # 1) scale Astd so its mean is the same as the mean of the observations Astd *= m*n/sum(map(length, glrm.observed_features)) # 2) scale columns inversely proportional to number of entries in them & so that column mean is same as mean of observations in it # intuition: noise in a dense column is low rank, so downweight dense columns # Astd *= diagm(m./map(length, glrm.observed_examples)) # 3) scale columns proportional to scale of regularizer & so that column mean is same as mean of observations in it # Astd *= diagm(m./map(scale, glrm.ry)) # ASVD = rsvd(Astd, k) - slower than built-in svds, and fails for sparse matrices ASVD = svds(Astd, nsv = k)[1] # initialize with the top k components of the SVD, # rescaling by the variances @assert(size(glrm.X, 1) >= k) @assert(size(glrm.X, 2) >= m) @assert(size(glrm.Y, 1) >= k) @assert(size(glrm.Y, 2) >= d) glrm.X[1:k,1:m] = Diagonal(sqrt.(ASVD.S))*ASVD.U' # recall X is transposed as per column major order. glrm.Y[1:k,1:d] = Diagonal(sqrt.(ASVD.S))*ASVD.Vt*Diagonal(stds) return glrm end include("initialize_nmf.jl")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1395
import NMF.nndsvd function init_nndsvd!(glrm::GLRM; scale::Bool=true, zeroh::Bool=false, variant::Symbol=:std, max_iters::Int=0) # NNDSVD initialization: # Boutsidis C, Gallopoulos E (2007). SVD based initialization: A head # start for nonnegative matrix factorization. Pattern Recognition m,n = size(glrm.A) # only initialize based on observed entries A_init = zeros(m,n) for i = 1:n A_init[glrm.observed_examples[i],i] = glrm.A[glrm.observed_examples[i],i] end # scale all columns by the Loss.scale parameter if scale for i = 1:n A_init[:,i] .*= glrm.losses[i].scale end end # run the first nndsvd initialization W,H = nndsvd(A_init, glrm.k, zeroh=zeroh, variant=variant) glrm.X = W' glrm.Y = H # If max_iters>0 do a soft impute for the missing entries of A. # Iterate: Estimate missing entries of A with W*H # Update (W,H) nndsvd estimate based on new A for iter = 1:max_iters # Update missing entries of A_init for j = 1:n for i = setdiff(1:m,glrm.observed_examples[j]) A_init[i,j] = dot(glrm.X[:,i],glrm.Y[:,j]) end end # Re-estimate W and H W,H = nndsvd(A_init, glrm.k, zeroh=zeroh, variant=variant) glrm.X = W' glrm.Y = H end end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
24825
# Predefined loss functions # You may also implement your own loss by subtyping the abstract type Loss. # # Losses must have the following: # Fields: # `scale::Float64` # This field represents a scalar weight assigned to the loss function: w*l(u,a) # `domain::natural_Domain` # The "natural" domain that the loss function was meant to handle. E.g. BoolDomain for LogisticLoss, # RealDomain for QuadLoss, etc. # Other fields may be also be included to encode parameters of the loss function, encode the range or # set of possible values of the data, etc. # # Methods: # `my_loss_type(args..., scale=1.0::Float64; # domain=natural_Domain(args[range]...), kwargs...) ::my_loss_type` # Constructor for the loss type. The first few arguments are parameters for # which there isn't a rational default (a loss may not need any of these). # The last positional argument should be the scale, which should default to 1. # There must be a default domain which is a Domain, which may take arguments from # the list of positional arguments. Parameters besides the scale for which there are # reasonable defaults should be included as keyword arguments (there may be none). # `evaluate(l::my_loss_type, u::Float64, a::Number) ::Float64` # Evaluates the function l(u,a) where u is the approximation of a # `grad(l::my_loss_type, u::Float64, a::Number) ::Float64` # Evaluates the gradient of the loss at the given point (u,a) # In addition, loss functions should preferably implement methods: # `M_estimator(l::my_loss_type, a::AbstractArray) ::Float64` # Finds uₒ = argmin ∑l(u,aᵢ) which is the best single estimate of the array `a` # If `M_estimator` is not implemented, a live optimization procedure will be used when this function is # called in order to compute loss function scalings. The live optimization may be slow, so an analytic # implementation is preferable. # `impute(d::Domain, l::my_loss_type, u::Array{Float64})` (in impute_and_err.jl) # Finds a = argmin l(u,a), the most likely value for an observation given a parameter u import Base: *, convert import Optim: optimize, LBFGS export Loss, DiffLoss, ClassificationLoss, SingleDimLoss, # categories of Losses QuadLoss, L1Loss, HuberLoss, QuantileLoss, # losses for predicting reals PoissonLoss, # losses for predicting integers HingeLoss, WeightedHingeLoss, LogisticLoss, # losses for predicting booleans OrdinalHingeLoss, OrdisticLoss, MultinomialOrdinalLoss, BvSLoss, # losses for predicting ordinals MultinomialLoss, OvALoss, # losses for predicting nominals (categoricals) PeriodicLoss, # losses for predicting periodic variables evaluate, grad, M_estimator, # methods on losses avgerror, scale, mul!, *, embedding_dim, get_yidxs, datalevels, domain abstract type Loss end # a DiffLoss is one in which l(u,a) = f(u-a) AND argmin f(x) = 0 # for example, QuadLoss(u,a)=(u-a)² and we can write f(x)=x² and x=u-a abstract type DiffLoss<:Loss end # a ClassificationLoss is one in which observed values are true = 1 or false = 0 = -1 AND argmin_a L(u,a) = u>=0 ? true : false abstract type ClassificationLoss<:Loss end # Single Dimensional losses are DiffLosses or ClassificationLosses, which allow optimized evaluate and grad functions const SingleDimLoss = Union{DiffLoss, ClassificationLoss} mul!(l::Loss, newscale::Number) = (l.scale = newscale; l) scale(l::Loss) = l.scale *(newscale::Number, l::Loss) = (newl = copy(l); mul!(newl, newscale)) *(l::Loss, newscale::Number) = (newl = copy(l); mul!(newl, newscale)) domain(l::Loss) = l.domain ### embedding dimensions: mappings from losses/columns of A to columns of Y # default number of columns # number of columns is higher for multidimensional losses embedding_dim(l::Loss) = 1 embedding_dim(l::Array{LossSubtype,1}) where LossSubtype<:Loss = sum(map(embedding_dim, l)) # find spans of loss functions (for multidimensional losses) function get_yidxs(losses::Array{LossSubtype,1}) where LossSubtype<:Loss n = length(losses) ds = map(embedding_dim, losses) d = sum(ds) featurestartidxs = cumsum(append!([1], ds)) # find which columns of Y map to which columns of A (for multidimensional losses) U = Union{UnitRange{Int}, Int} yidxs = Array{U}(undef, n) for f = 1:n if ds[f] == 1 yidxs[f] = featurestartidxs[f] else yidxs[f] = featurestartidxs[f]:featurestartidxs[f]+ds[f]-1 end end return yidxs end ### promote integers to floats if given as the argument u ## causes ambiguity warnings # evaluate(l::Loss, u::Number, a) = evaluate(l,convert(Float64,u),a) # grad(l::Loss, u::Number, a) = grad(l,convert(Float64,u),a) # evaluate{T<:Number}(l::Loss, u::Array{T,1}, a) = evaluate(l,convert(Array{Float64,1},u),a) # grad{T<:Number}(l::Loss, u::Array{T,1}, a) = grad(l,convert(Array{Float64,1},u),a) ### -1,0,1::Int are translated to Booleans if loss is not defined on numbers # convert(::Type{Bool}, x::Int) = x==1 ? true : (x==-1 || x==0) ? false : throw(InexactError("Bool method successfully overloaded by LowRankModels")) myBool(x::Int) = x==1 ? true : (x==-1 || x==0) ? false : throw(InexactError()) evaluate(l::ClassificationLoss, u::Real, a::Int) = evaluate(l,u,myBool(a)) grad(l::ClassificationLoss, u::Real, a::Int) = grad(l,u,myBool(a)) M_estimator(l::ClassificationLoss, a::AbstractArray{Int,1}) = M_estimator(l,myBool(a)) ### M-estimators # The following is the M-estimator for loss functions that don't have one defined. It's also useful # for checking that the analytic M_estimators are correct. To make sure this method is called instead # of the loss-specific method (should only be done to test), simply pass the third paramter `test`. # e.g. M_estimator(l,a) will call the implementation for l, but M_estimator(l,a,"test") will call the # general-purpose optimizing M_estimator. function M_estimator(l::Loss, a::AbstractArray; test="test") # the function to optimize over f = (u -> sum(map(ai->evaluate(l,u[1],ai), a))) # u is indexed because `optim` assumes input is a vector # the gradient of that function function g!(storage::Vector, u::Vector) # this is the format `optim` expects storage[1] = sum(map(ai->grad(l,u[1],ai), a)) end m = optimize(f, g!, [median(a)], LBFGS()).minimum[1] end # Uses uₒ = argmin ∑l(u,aᵢ) to find (1/n)*∑l(uₒ,aᵢ) which is the # average error incurred by using the estimate uₒ for every aᵢ function avgerror(l::Loss, a::AbstractArray) b = collect(skipmissing(a)) m = M_estimator(l,b) sum(map(ai->evaluate(l,m,ai),b))/length(b) end ## Losses: ########################################## QUADRATIC ########################################## # f: ℜxℜ -> ℜ mutable struct QuadLoss<:DiffLoss scale::Float64 domain::Domain end QuadLoss(scale=1.0::Float64; domain=RealDomain()) = QuadLoss(scale, domain) evaluate(l::QuadLoss, u::Real, a::Number) = l.scale*(u-a)^2 grad(l::QuadLoss, u::Real, a::Number) = 2*(u-a)*l.scale M_estimator(l::QuadLoss, a::AbstractArray) = mean(a) ########################################## L1 ########################################## # f: ℜxℜ -> ℜ mutable struct L1Loss<:DiffLoss scale::Float64 domain::Domain end L1Loss(scale=1.0::Float64; domain=RealDomain()) = L1Loss(scale, domain) evaluate(l::L1Loss, u::Real, a::Number) = l.scale*abs(u-a) grad(l::L1Loss, u::Real, a::Number) = sign(u-a)*l.scale M_estimator(l::L1Loss, a::AbstractArray) = median(a) ########################################## HUBER ########################################## # f: ℜxℜ -> ℜ mutable struct HuberLoss<:DiffLoss scale::Float64 domain::Domain crossover::Float64 # where QuadLoss loss ends and linear loss begins; =1 for standard HuberLoss end HuberLoss(scale=1.0::Float64; domain=RealDomain(), crossover=1.0::Float64) = HuberLoss(scale, domain, crossover) function evaluate(l::HuberLoss, u::Real, a::Number) abs(u-a) > l.crossover ? (abs(u-a) - l.crossover + l.crossover^2)*l.scale : (u-a)^2*l.scale end grad(l::HuberLoss,u::Real,a::Number) = abs(u-a)>l.crossover ? sign(u-a)*l.scale : (u-a)*l.scale M_estimator(l::HuberLoss, a::AbstractArray) = median(a) # a heuristic, not the true estimator ########################################## QUANTILE ########################################## # f: ℜxℜ -> ℜ # define (u)_+ = max(u,0), (u)_- = max(-u,0) so (u)_+ + (u)_- = |u| # f(u,a) = { quantile (a - u)_+ + (1-quantile) (a - u)_- # fits the `quantile`th quantile of the distribution mutable struct QuantileLoss<:DiffLoss scale::Float64 domain::Domain quantile::Float64 # fit the alphath quantile end QuantileLoss(scale=1.0::Float64; domain=RealDomain(), quantile=.5::Float64) = QuantileLoss(scale, domain, quantile) function evaluate(l::QuantileLoss, u::Real, a::Number) diff = a-u diff > 0 ? l.scale * l.quantile * diff : - l.scale * (1-l.quantile) * diff end function grad(l::QuantileLoss,u::Real,a::Number) diff = a-u diff > 0 ? -l.scale * l.quantile : l.scale * (1-l.quantile) end M_estimator(l::QuantileLoss, a::AbstractArray) = quantile(a, l.quantile) ########################################## PERIODIC ########################################## # f: ℜxℜ -> ℜ # f(u,a) = w * (1 - cos((a-u)*(2*pi)/T)) # this measures how far away u and a are on a circle of circumference T. mutable struct PeriodicLoss<:DiffLoss T::Float64 # the length of the period scale::Float64 domain::Domain end PeriodicLoss(T, scale=1.0::Float64; domain=PeriodicDomain(T)) = PeriodicLoss(T, scale, domain) evaluate(l::PeriodicLoss, u::Real, a::Number) = l.scale*(1-cos((a-u)*(2*pi)/l.T)) grad(l::PeriodicLoss, u::Real, a::Number) = -l.scale*((2*pi)/l.T)*sin((a-u)*(2*pi)/l.T) function M_estimator(l::PeriodicLoss, a::AbstractArray{<:Real}) (l.T/(2*pi))*atan( sum(sin(2*pi*a/l.T)) / sum(cos(2*pi*a/l.T)) ) + l.T/2 # not kidding. # this is the estimator, and there is a form that works with weighted measurements (aka a prior on a) # see: http://www.tandfonline.com/doi/pdf/10.1080/17442507308833101 eq. 5.2 end ########################################## POISSON ########################################## # f: ℜxℕ -> ℜ # BEWARE: # 1) this is a reparametrized poisson: we parametrize the mean as exp(u) so that u can take any real value and still produce a positive mean # 2) THIS LOSS MAY CAUSE MODEL INSTABLITY AND DIFFICULTY FITTING. mutable struct PoissonLoss<:Loss scale::Float64 domain::Domain end PoissonLoss(max_count=2^31::Int; domain=CountDomain(max_count)::Domain) = PoissonLoss(1.0, domain) function evaluate(l::PoissonLoss, u::Real, a::Number) l.scale*(exp(u) - a*u + (a==0 ? 0 : a*(log(a)-1))) # log(a!) ~ a==0 ? 0 : a*(log(a)-1) end grad(l::PoissonLoss, u::Real, a::Number) = l.scale*(exp(u) - a) M_estimator(l::PoissonLoss, a::AbstractArray) = log(mean(a)) ########################################## ORDINAL HINGE ########################################## # f: ℜx{min, min+1... max-1, max} -> ℜ mutable struct OrdinalHingeLoss<:Loss min::Integer max::Integer scale::Float64 domain::Domain end OrdinalHingeLoss(m1, m2, scale=1.0::Float64; domain=OrdinalDomain(m1,m2)) = OrdinalHingeLoss(m1,m2,scale,domain) # this method should never be called directly but is needed to support copying OrdinalHingeLoss() = OrdinalHingeLoss(1, 10, 1.0, OrdinalDomain(1,10)) OrdinalHingeLoss(m2) = OrdinalHingeLoss(1, m2, 1.0, OrdinalDomain(1, m2)) function evaluate(l::OrdinalHingeLoss, u::Real, a::Number) #a = round(a) if u > l.max-1 # number of levels higher than true level n = min(floor(u), l.max-1) - a loss = n*(n+1)/2 + (n+1)*(u-l.max+1) elseif u > a # number of levels higher than true level n = min(floor(u), l.max) - a loss = n*(n+1)/2 + (n+1)*(u-floor(u)) elseif u > l.min+1 # number of levels lower than true level n = a - max(ceil(u), l.min+1) loss = n*(n+1)/2 + (n+1)*(ceil(u)-u) else # number of levels higher than true level n = a - max(ceil(u), l.min+1) loss = n*(n+1)/2 + (n+1)*(l.min+1-u) end return l.scale*loss end function grad(l::OrdinalHingeLoss, u::Real, a::Number) #a = round(a) if u > a # number of levels higher than true level n = min(ceil(u), l.max) - a g = n else # number of levels lower than true level n = a - max(floor(u), l.min) g = -n end return l.scale*g end M_estimator(l::OrdinalHingeLoss, a::AbstractArray) = median(a) ########################################## LOGISTIC ########################################## # f: ℜx{-1,1}-> ℜ mutable struct LogisticLoss<:ClassificationLoss scale::Float64 domain::Domain end LogisticLoss(scale=1.0::Float64; domain=BoolDomain()) = LogisticLoss(scale, domain) evaluate(l::LogisticLoss, u::Real, a::Bool) = l.scale*log(1+exp(-(2a-1)*u)) grad(l::LogisticLoss, u::Real, a::Bool) = (aa = 2a-1; -aa*l.scale/(1+exp(aa*u))) function M_estimator(l::LogisticLoss, a::AbstractArray{Bool,1}) d, N = sum(a), length(a) log(N + d) - log(N - d) # very satisfying end ########################################## WEIGHTED HINGE ########################################## # f: ℜx{-1,1} -> ℜ # f(u,a) = { w * max(1-a*u, 0) for a = -1 # = { c * w * max(1-a*u, 0) for a = 1 mutable struct WeightedHingeLoss<:ClassificationLoss scale::Float64 domain::Domain case_weight_ratio::Float64 # >1 for trues to have more confidence than falses, <1 for opposite end WeightedHingeLoss(scale=1.0; domain=BoolDomain(), case_weight_ratio=1.0) = WeightedHingeLoss(scale, domain, case_weight_ratio) HingeLoss(scale=1.0::Float64; kwargs...) = WeightedHingeLoss(scale; kwargs...) # the standard HingeLoss is a special case of WeightedHingeLoss function evaluate(l::WeightedHingeLoss, u::Real, a::Bool) loss = l.scale*max(1-(2*a-1)*u, 0) if l.case_weight_ratio !==1. && a loss *= l.case_weight_ratio end return loss end function grad(l::WeightedHingeLoss, u::Real, a::Bool) an = (2*a-1) # change to {-1,1} g = (an*u>=1 ? 0 : -an*l.scale) if l.case_weight_ratio !==1. && a g *= l.case_weight_ratio end return g end function M_estimator(l::WeightedHingeLoss, a::AbstractArray{Bool,1}) r = length(a)/length(filter(x->x>0, a)) - 1 if l.case_weight_ratio > r m = 1.0 elseif l.case_weight_ratio == r m = 0.0 else m = -1.0 end end ########################################## MULTINOMIAL ########################################## # f: ℜx{1, 2, ..., max-1, max} -> ℜ # f computes the (negative log likelihood of the) multinomial logit, # often known as the softmax function # f(u, a) = exp(u[a]) / (sum_{a'} exp(u[a'])) # = 1 / (sum_{a'} exp(u[a'] - u[a])) mutable struct MultinomialLoss<:Loss max::Integer scale::Float64 domain::Domain end MultinomialLoss(m, scale=1.0::Float64; domain=CategoricalDomain(m)) = MultinomialLoss(m,scale,domain) embedding_dim(l::MultinomialLoss) = l.max datalevels(l::MultinomialLoss) = 1:l.max # levels are encoded as the numbers 1:l.max function evaluate(l::MultinomialLoss, u::Array{<:Real,1}, a::Integer) sumexp = 0 # inverse likelihood of observation # computing soft max directly is numerically unstable # instead note logsumexp(a_j) = logsumexp(a_j - M) + M # and we'll pick a good big (but not too big) M M = maximum(u) - u[a] # prevents overflow for j in 1:length(u) sumexp += exp(u[j] - u[a] - M) end loss = log(sumexp) + M return l.scale*loss end function grad(l::MultinomialLoss, u::Array{<:Real,1}, a::Integer) g = zeros(size(u)) # Using some nice algebra, you can show g[a] = -1 # and g[b] = -1/sum_{a' \in S} exp(u[b] - u[a']) # the contribution of one observation to one entry of the gradient # is always between -1 and 0 for j in 1:length(u) M = maximum(u) - u[j] # prevents overflow sumexp = 0 for jp in 1:length(u) sumexp += exp(u[jp] - u[j] - M) end g[j] += exp(-M)/sumexp end return l.scale*g end ## we'll compute it via a stochastic gradient method ## with fixed step size function M_estimator(l::MultinomialLoss, a::AbstractArray) u = zeros(l.max)' for i = 1:length(a) ai = a[i] u -= .1*grad(l, u, ai) end return u end ########################################## One vs All loss ########################################## # f: ℜx{1, 2, ..., max-1, max} -> ℜ mutable struct OvALoss<:Loss max::Integer bin_loss::Loss scale::Float64 domain::Domain end OvALoss(m::Integer, scale::Float64=1.0; domain=CategoricalDomain(m), bin_loss::Loss=LogisticLoss(scale)) = OvALoss(m,bin_loss,scale,domain) OvALoss() = OvALoss(1) # for copying correctly embedding_dim(l::OvALoss) = l.max datalevels(l::OvALoss) = 1:l.max # levels are encoded as the numbers 1:l.max function evaluate(l::OvALoss, u::Array{<:Real,1}, a::Integer) loss = 0 for j in 1:length(u) loss += evaluate(l.bin_loss, u[j], a==j) end return l.scale*loss end function grad(l::OvALoss, u::Array{<:Real,1}, a::Integer) g = zeros(length(u)) for j in 1:length(u) g[j] = grad(l.bin_loss, u[j], a==j) end return l.scale*g end function M_estimator(l::OvALoss, a::AbstractArray) u = zeros(l.max) for j = 1:l.max u[j] = M_estimator(l.bin_loss, a==j) end return u end ########################################## Bigger vs Smaller loss ########################################## # f: ℜx{1, 2, ..., max-1} -> ℜ mutable struct BvSLoss<:Loss max::Integer bin_loss::Loss scale::Float64 domain::Domain end BvSLoss(m::Integer, scale::Float64=1.0; domain=OrdinalDomain(1,m), bin_loss::Loss=LogisticLoss(scale)) = BvSLoss(m,bin_loss,scale,domain) BvSLoss() = BvSLoss(10) # for copying correctly embedding_dim(l::BvSLoss) = l.max-1 datalevels(l::BvSLoss) = 1:l.max # levels are encoded as the numbers 1:l.max function evaluate(l::BvSLoss, u::Array{<:Real,1}, a::Integer) loss = 0 for j in 1:length(u) loss += evaluate(l.bin_loss, u[j], a>j) end return l.scale*loss end function grad(l::BvSLoss, u::Array{<:Real,1}, a::Integer) g = zeros(length(u)) for j in 1:length(u) g[j] = grad(l.bin_loss, u[j], a>j) end return l.scale*g end function M_estimator(l::BvSLoss, a::AbstractArray) u = zeros(l.max) for j = 1:l.max-1 u[j] = M_estimator(l.bin_loss, a.>j) end return u end ########################################## ORDERED LOGISTIC ########################################## # f: ℜx{1, 2, ..., max-1, max} -> ℜ # f computes the (negative log likelihood of the) multinomial logit, # often known as the softmax function # f(u, a) = exp(u[a]) / (sum_{a'} exp(u[a'])) mutable struct OrdisticLoss<:Loss max::Integer scale::Float64 domain::Domain end OrdisticLoss(m::Int, scale=1.0::Float64; domain=OrdinalDomain(1,m)) = OrdisticLoss(m,scale,domain) embedding_dim(l::OrdisticLoss) = l.max datalevels(l::OrdisticLoss) = 1:l.max # levels are encoded as the numbers 1:l.max function evaluate(l::OrdisticLoss, u::Array{<:Real,1}, a::Integer) diffusquared = u[a]^2 .- u.^2 M = maximum(diffusquared) invlik = sum(exp, (diffusquared .- M)) loss = M + log(invlik) return l.scale*loss end function grad(l::OrdisticLoss, u::Array{<:Real,1}, a::Integer) g = zeros(size(u)) # Using some nice algebra, you can show g[a] = 2*u[a] sumexp = sum(map(j->exp(- u[j]^2), 1:length(u))) for j in 1:length(u) diffusquared = u[j]^2 .- u.^2 M = maximum(diffusquared) invlik = sum(exp,(diffusquared .- M)) g[j] -= 2 * u[j] * exp(- M) / invlik end return l.scale*g end ## we'll compute it via a stochastic gradient method ## with fixed step size function M_estimator(l::OrdisticLoss, a::AbstractArray) u = zeros(l.max)' for i = 1:length(a) ai = a[i] u -= .1*grad(l, u, ai) end return u end #################### Multinomial Ordinal Logit ##################### # l: ℜ^{max-1} x {1, 2, ..., max-1, max} -> ℜ # l computes the (negative log likelihood of the) multinomial ordinal logit. # # the length of the first argument u is one less than # the number of levels of the second argument a, # since the entries of u correspond to the division between each level # and the one above it. # # XXX warning XXX # the documentation in the comment below this point is defunct # # To yield a sensible pdf, the entries of u should be increasing # (b/c they're basically the -log of the cdf at the boundary between each level) # # The multinomial ordinal logit corresponds to a likelihood p with # p(u, a > i) ~ exp(-u[i]), so # p(u, a) ~ exp(-u[1]) * ... * exp(-u[a-1]) * exp(u[a]) * ... * exp(u[end]) # = exp(- u[1] - ... - u[a-1] + u[a] + ... + u[end]) # and normalizing, # p(u, a) = p(u, a) / sum_{a'} p(u, a') # # So l(u, a) = -log(p(u, a)) # = u[1] + ... + u[a-1] - u[a] - ... - u[end] + # log(sum_{a'}(exp(u[1] + ... + u[a'-1] - u[a'] - ... - u[end]))) # # Inspection of this loss function confirms that given u, # the most probable value a is the index of the first # positive entry of u mutable struct MultinomialOrdinalLoss<:Loss max::Integer scale::Float64 domain::Domain end MultinomialOrdinalLoss(m::Int, scale=1.0::Float64; domain=OrdinalDomain(1,m)) = MultinomialOrdinalLoss(m,scale,domain) MultinomialOrdinalLoss() = MultinomialOrdinalLoss(10) # for copying embedding_dim(l::MultinomialOrdinalLoss) = l.max - 1 datalevels(l::MultinomialOrdinalLoss) = 1:l.max # levels are encoded as the numbers 1:l.max function enforce_MNLOrdRules!(u; TOL=1e-3) u[1] = min(-TOL, u[1]) for j=2:length(u) u[j] = min(u[j], u[j-1]-TOL) end u end # argument u is a row vector (row slice of a matrix), which in julia is 2d # todo: increase numerical stability function evaluate(l::MultinomialOrdinalLoss, u::Array{<:Real,1}, a::Integer) enforce_MNLOrdRules!(u) if a == 1 return -l.scale*log(exp(0) - exp(u[1])) # (log(1 - exp(u[a] - 1))) elseif a == l.max return -l.scale*u[a-1] else return -l.scale*log(exp(u[a-1]) - exp(u[a])) # (u[a-1] + log(1 - exp(u[a] - u[a-1]))) end end function grad(l::MultinomialOrdinalLoss, u::Array{<:Real,1}, a::Integer) enforce_MNLOrdRules!(u) g = zeros(size(u)) if a == 1 g[1] = -exp(u[1])/(exp(0) - exp(u[1])) # g[1] = 1/(1 - exp(-u[1])) elseif a == l.max g[a-1] = 1 else # d = exp(u[a] - u[a-1]) # g[a] = d/(1-d) # g[a-1] = - g[a] - 1 g[a] = -exp(u[a])/(exp(u[a-1]) - exp(u[a])) g[a-1] = exp(u[a-1])/(exp(u[a-1]) - exp(u[a])) end return -l.scale*g end ## we'll compute it via a stochastic gradient method ## with fixed step size ## (we don't need a hyper accurate estimate for this) function M_estimator(l::MultinomialOrdinalLoss, a::AbstractVector) u = zeros(l.max-1)' for i = 1:length(a) ai = a[i] u -= .1*grad(l, u, ai) end return u end ### convenience methods for evaluating and computing gradients on vectorized arguments function evaluate(l::Loss, u::Array{<:Real,1}, a::AbstractVector) @assert size(u) == size(a) out = 0 for i=1:length(a) out += evaluate(l, u[i], a[i]) end return out end #Optimized vector evaluate on single-dimensional losses function evaluate(l::SingleDimLoss, u::Vector{<:Real}, a::AbstractVector) losseval = (x::Real, y::Number) -> evaluate(l, x, y) mapped = fill!(similar(u),0.) map!(losseval, mapped, u, a) reduce(+, mapped) end # now for multidimensional losses function evaluate(l::Loss, u::Array{<:Real,2}, a::AbstractVector) # @show size(u,1) # @show size(a) @assert size(u,1) == length(a) out = 0 for i=1:length(a) out += evaluate(l, u[i,:], a[i]) end return out end function grad(l::Loss, u::Array{<:Real,1}, a::AbstractVector) @assert size(u) == size(a) mygrad = zeros(size(u)) for i=1:length(a) mygrad[i] = grad(l, u[i], a[i]) end return mygrad end # Optimized vector grad on single-dimensional losses function grad(l::SingleDimLoss, u::Vector{<:Real}, a::AbstractVector) lossgrad = (x::Real,y::Number) -> grad(l, x, y) mapped = fill!(similar(u),0.) map!(lossgrad, mapped, u, a) end # now for multidimensional losses function grad(l::Loss, u::Array{<:Real,2}, a::AbstractVector) @assert size(u,1) == length(a) mygrad = zeros(size(u)) for i=1:length(a) mygrad[i,:] = grad(l, u[i,:], a[i]) end return mygrad end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
3669
export sort_observations, add_offset!, fix_latent_features!, equilibrate_variance!, prob_scale! ### OBSERVATION TUPLES TO ARRAYS function sort_observations(obs::Union{Array{CartesianIndex{2},1},Array{Tuple{Int,Int},1}}, m::Int, n::Int; check_empty=false) observed_features = Array{Int,1}[Int[] for i=1:m] observed_examples = Array{Int,1}[Int[] for j=1:n] for obsij in obs i,j = obsij[1], obsij[2] push!(observed_features[i],j) push!(observed_examples[j],i) end if check_empty && (any(map(x->length(x)==0,observed_examples)) || any(map(x->length(x)==0,observed_features))) error("Every row and column must contain at least one observation") end return observed_features, observed_examples end ### SCALINGS AND OFFSETS ON GLRM function add_offset!(glrm::AbstractGLRM) glrm.rx, glrm.ry = map(lastentry1, glrm.rx), map(lastentry_unpenalized, glrm.ry) return glrm end function fix_latent_features!(glrm::AbstractGLRM, n) glrm.ry = Regularizer[fixed_latent_features(glrm.ry[i], glrm.Y[1:n,i]) for i in 1:length(glrm.ry)] return glrm end ## equilibrate variance # scale all columns inversely proportional to mean value of loss function # makes sense when all loss functions used are nonnegative function equilibrate_variance!(glrm::AbstractGLRM, columns_to_scale = 1:size(glrm.A,2)) for i in columns_to_scale nomissing = glrm.A[glrm.observed_examples[i],i] if length(nomissing)>0 varlossi = avgerror(glrm.losses[i], nomissing) varregi = var(nomissing) # TODO make this depend on the kind of regularization; this assumes QuadLoss else varlossi = 1 varregi = 1 end if varlossi > 0 # rescale the losses and regularizers for each column by the inverse of the empirical variance mul!(glrm.losses[i], scale(glrm.losses[i])/varlossi) end if varregi > 0 mul!(glrm.ry[i], scale(glrm.ry[i])/varregi) end end return glrm end ## probabilistic scaling # scale loss function to fit -loglik of joint distribution # makes sense when all functions used are -logliks of sensible distributions # todo: option to scale to account for nonuniform sampling in rows or columns or both # skipmissing(Array with missing) gives an iterator. function prob_scale!(glrm, columns_to_scale = 1:size(glrm.A,2)) for i in columns_to_scale nomissing = glrm.A[glrm.observed_examples[i],i] if typeof(glrm.losses[i]) == QuadLoss && length(nomissing) > 0 varlossi = var(skipmissing(glrm.A[:,i])) # estimate the variance if varlossi > TOL mul!(glrm.losses[i], 1/(2*varlossi)) # this is the correct -loglik of gaussian with variance fixed at estimate else @warn("column $i has a variance of $varlossi; not scaling it to avoid dividing by zero.") end elseif typeof(glrm.losses[i]) == HuberLoss && length(nomissing) > 0 varlossi = avgerror(glrm.losses[i], glrm.A[:,i]) # estimate the width of the distribution if varlossi > TOL mul!(glrm.losses[i], 1/(2*varlossi)) # this is not the correct -loglik of huber with estimates for variance and mean of poisson, but that's probably ok else @warn("column $i has a variance of $varlossi; not scaling it to avoid dividing by zero.") end else # none of the other distributions have any free parameters to estimate, so this is the correct -loglik mul!(glrm.losses[i], 1) end end return glrm end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
615
#module Plot import Gadfly import DataFrames: DataFrame export plot function plot(df::DataFrame, xs::Symbol, ys::Array{Symbol, 1}; scale = :linear, filename=None, height=3, width=6) dflong = vcat(map(l->stack(df,l,xs),ys)...) if scale ==:log p = Gadfly.plot(dflong,x=xs,y=:value,color=:variable,Gadfly.Scale.y_log10) else p = Gadfly.plot(dflong,x=xs,y=:value,color=:variable) end if !(filename==None) println("saving figure in $filename") Gadfly.draw(Gadfly.PDF(filename, width*Gadfly.inch, height*Gadfly.inch), p) end return p end #end # module
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
15810
# Predefined regularizers # You may also implement your own regularizer by subtyping # the abstract type Regularizer. # Regularizers should implement `evaluate` and `prox`. import Base: * export Regularizer, ProductRegularizer, # abstract types # concrete regularizers QuadReg, QuadConstraint, OneReg, ZeroReg, NonNegConstraint, NonNegOneReg, NonNegQuadReg, OneSparseConstraint, UnitOneSparseConstraint, SimplexConstraint, KSparseConstraint, lastentry1, lastentry_unpenalized, fixed_latent_features, FixedLatentFeaturesConstraint, fixed_last_latent_features, FixedLastLatentFeaturesConstraint, OrdinalReg, MNLOrdinalReg, RemQuadReg, # methods on regularizers prox!, prox, # utilities scale, mul!, * # numerical tolerance TOL = 1e-12 # regularizers # regularizers r should have the method `prox` defined such that # prox(r)(u,alpha) = argmin_x( alpha r(x) + 1/2 \|x - u\|_2^2) abstract type Regularizer end abstract type MatrixRegularizer <: LowRankModels.Regularizer end # default inplace prox operator (slower than if inplace prox is implemented) prox!(r::Regularizer,u::AbstractArray,alpha::Number) = (v = prox(r,u,alpha); @simd for i=1:length(u) @inbounds u[i]=v[i] end; u) # default scaling scale(r::Regularizer) = r.scale mul!(r::Regularizer, newscale::Number) = (r.scale = newscale; r) mul!(rs::Array{Regularizer}, newscale::Number) = (for r in rs mul!(r, newscale) end; rs) *(newscale::Number, r::Regularizer) = (newr = typeof(r)(); mul!(newr, scale(r)*newscale); newr) ## utilities function allnonneg(a::AbstractArray) for ai in a ai < 0 && return false end return true end ## Quadratic regularization mutable struct QuadReg<:Regularizer scale::Float64 end QuadReg() = QuadReg(1) prox(r::QuadReg,u::AbstractArray,alpha::Number) = 1/(1+2*alpha*r.scale)*u prox!(r::QuadReg,u::Array{Float64},alpha::Number) = rmul!(u, 1/(1+2*alpha*r.scale)) evaluate(r::QuadReg,a::AbstractArray) = r.scale*sum(abs2, a) ## constrained quadratic regularization ## the function r such that ## r(x) = inf if norm(x) > max_2norm ## 0 otherwise ## can be used to implement maxnorm regularization: ## constraining the maxnorm of XY to be <= mu is achieved ## by setting glrm.rx = QuadConstraint(sqrt(mu)) ## and the same for every element of glrm.ry mutable struct QuadConstraint<:Regularizer max_2norm::Float64 end QuadConstraint() = QuadConstraint(1) prox(r::QuadConstraint,u::AbstractArray,alpha::Number) = (r.max_2norm)/norm(u)*u prox!(r::QuadConstraint,u::Array{Float64},alpha::Number) = mul!(u, (r.max_2norm)/norm(u)) evaluate(r::QuadConstraint,u::AbstractArray) = norm(u) > r.max_2norm + TOL ? Inf : 0 scale(r::QuadConstraint) = 1 mul!(r::QuadConstraint, newscale::Number) = 1 ## one norm regularization mutable struct OneReg<:Regularizer scale::Float64 end OneReg() = OneReg(1) function softthreshold(x::Number; alpha=1) return max(x-alpha,0) + min(x+alpha,0) end prox(r::OneReg,u::AbstractArray,alpha::Number) = (st(x) = softthreshold(x; alpha=r.scale*alpha); st.(u)) prox!(r::OneReg,u::AbstractArray,alpha::Number) = (st(x) = softthreshold(x; alpha=r.scale*alpha); map!(st, u, u)) evaluate(r::OneReg,a::AbstractArray) = r.scale*sum(abs,a) ## no regularization mutable struct ZeroReg<:Regularizer end prox(r::ZeroReg,u::AbstractArray,alpha::Number) = u prox!(r::ZeroReg,u::Array{Float64},alpha::Number) = u evaluate(r::ZeroReg,a::AbstractArray) = 0 scale(r::ZeroReg) = 0 mul!(r::ZeroReg, newscale::Number) = 0 ## indicator of the nonnegative orthant ## (enforces nonnegativity, eg for nonnegative matrix factorization) mutable struct NonNegConstraint<:Regularizer end prox(r::NonNegConstraint,u::AbstractArray,alpha::Number=1) = broadcast(max,u,0) prox!(r::NonNegConstraint,u::Array{Float64},alpha::Number=1) = (@simd for i=1:length(u) @inbounds u[i] = max(u[i], 0) end; u) function evaluate(r::NonNegConstraint,a::AbstractArray) for ai in a if ai<0 return Inf end end return 0 end scale(r::NonNegConstraint) = 1 mul!(r::NonNegConstraint, newscale::Number) = 1 ## one norm regularization restricted to nonnegative orthant ## (enforces nonnegativity, in addition to one norm regularization) mutable struct NonNegOneReg<:Regularizer scale::Float64 end NonNegOneReg() = NonNegOneReg(1) prox(r::NonNegOneReg,u::AbstractArray,alpha::Number) = max.(u.-alpha,0) function prox!(r::NonNegOneReg,u::AbstractArray,alpha::Number) nonnegsoftthreshold = (x::Number -> max(x-alpha,0)) map!(nonnegsoftthreshold, u, u) end function evaluate(r::NonNegOneReg,a::AbstractArray) for ai in a if ai<0 return Inf end end return r.scale*sum(a) end scale(r::NonNegOneReg) = 1 mul!(r::NonNegOneReg, newscale::Number) = 1 ## Quadratic regularization restricted to nonnegative domain ## (Enforces nonnegativity alongside quadratic regularization) mutable struct NonNegQuadReg scale::Float64 end NonNegQuadReg() = NonNegQuadReg(1) prox(r::NonNegQuadReg,u::AbstractArray,alpha::Number) = max.(1/(1+2*alpha*r.scale)*u, 0) prox!(r::NonNegQuadReg,u::AbstractArray,alpha::Number) = begin mul!(u, 1/(1+2*alpha*r.scale)) maxval = maximum(u) clamp!(u, 0, maxval) end function evaluate(r::NonNegQuadReg,a::AbstractArray) for ai in a if ai<0 return Inf end end return r.scale*sumabs2(a) end ## indicator of the last entry being equal to 1 ## (allows an unpenalized offset term into the glrm when used in conjunction with lastentry_unpenalized) mutable struct lastentry1<:Regularizer r::Regularizer end lastentry1() = lastentry1(ZeroReg()) prox(r::lastentry1,u::AbstractArray{Float64,1},alpha::Number=1) = [prox(r.r,view(u,1:length(u)-1),alpha); 1] prox!(r::lastentry1,u::AbstractArray{Float64,1},alpha::Number=1) = (prox!(r.r,view(u,1:length(u)-1),alpha); u[end]=1; u) prox(r::lastentry1,u::AbstractArray{Float64,2},alpha::Number=1) = [prox(r.r,view(u,1:size(u,1)-1,:),alpha); ones(1, size(u,2))] prox!(r::lastentry1,u::AbstractArray{Float64,2},alpha::Number=1) = (prox!(r.r,view(u,1:size(u,1)-1,:),alpha); u[end,:]=1; u) evaluate(r::lastentry1,a::AbstractArray{Float64,1}) = (a[end]==1 ? evaluate(r.r,a[1:end-1]) : Inf) evaluate(r::lastentry1,a::AbstractArray{Float64,2}) = (all(a[end,:].==1) ? evaluate(r.r,a[1:end-1,:]) : Inf) scale(r::lastentry1) = scale(r.r) mul!(r::lastentry1, newscale::Number) = mul!(r.r, newscale) ## makes the last entry unpenalized ## (allows an unpenalized offset term into the glrm when used in conjunction with lastentry1) mutable struct lastentry_unpenalized<:Regularizer r::Regularizer end lastentry_unpenalized() = lastentry_unpenalized(ZeroReg()) prox(r::lastentry_unpenalized,u::AbstractArray{Float64,1},alpha::Number=1) = [prox(r.r,u[1:end-1],alpha); u[end]] prox!(r::lastentry_unpenalized,u::AbstractArray{Float64,1},alpha::Number=1) = (prox!(r.r,view(u,1:size(u,1)-1),alpha); u) evaluate(r::lastentry_unpenalized,a::AbstractArray{Float64,1}) = evaluate(r.r,a[1:end-1]) prox(r::lastentry_unpenalized,u::AbstractArray{Float64,2},alpha::Number=1) = [prox(r.r,u[1:end-1,:],alpha); u[end,:]] prox!(r::lastentry_unpenalized,u::AbstractArray{Float64,2},alpha::Number=1) = (prox!(r.r,view(u,1:size(u,1)-1,:),alpha); u) evaluate(r::lastentry_unpenalized,a::AbstractArray{Float64,2}) = evaluate(r.r,a[1:end-1,:]) scale(r::lastentry_unpenalized) = scale(r.r) mul!(r::lastentry_unpenalized, newscale::Number) = mul!(r.r, newscale) ## fixes the values of the first n elements of the column to be y ## optionally regularizes the last k-n elements with regularizer r mutable struct fixed_latent_features<:Regularizer r::Regularizer y::Array{Float64,1} # the values of the fixed latent features n::Int # length of y end fixed_latent_features(r::Regularizer, y::Array{Float64,1}) = fixed_latent_features(r,y,length(y)) # standalone use without another regularizer FixedLatentFeaturesConstraint(y::Array{Float64, 1}) = fixed_latent_features(ZeroReg(),y,length(y)) prox(r::fixed_latent_features,u::AbstractArray,alpha::Number) = [r.y; prox(r.r,u[(r.n+1):end],alpha)] function prox!(r::fixed_latent_features,u::Array{Float64},alpha::Number) prox!(r.r,u[(r.n+1):end],alpha) u[1:r.n]=y u end evaluate(r::fixed_latent_features, a::AbstractArray) = a[1:r.n]==r.y ? evaluate(r.r, a[(r.n+1):end]) : Inf scale(r::fixed_latent_features) = scale(r.r) mul!(r::fixed_latent_features, newscale::Number) = mul!(r.r, newscale) ## fixes the values of the last n elements of the column to be y ## optionally regularizes the first k-n elements with regularizer r mutable struct fixed_last_latent_features<:Regularizer r::Regularizer y::Array{Float64,1} # the values of the fixed latent features n::Int # length of y end fixed_last_latent_features(r::Regularizer, y::Array{Float64,1}) = fixed_last_latent_features(r,y,length(y)) # standalone use without another regularizer FixedLastLatentFeaturesConstraint(y::Array{Float64, 1}) = fixed_last_latent_features(ZeroReg(),y,length(y)) prox(r::fixed_last_latent_features,u::AbstractArray,alpha::Number) = [prox(r.r,u[(r.n+1):end],alpha); r.y] function prox!(r::fixed_last_latent_features,u::Array{Float64},alpha::Number) u[length(u)-r.n+1:end]=y prox!(r.r,u[1:length(a)-r.n],alpha) u end evaluate(r::fixed_last_latent_features, a::AbstractArray) = a[length(a)-r.n+1:end]==r.y ? evaluate(r.r, a[1:length(a)-r.n]) : Inf scale(r::fixed_last_latent_features) = scale(r.r) mul!(r::fixed_last_latent_features, newscale::Number) = mul!(r.r, newscale) ## indicator of 1-sparse vectors ## (enforces that exact 1 entry is nonzero, eg for orthogonal NNMF) mutable struct OneSparseConstraint<:Regularizer end prox(r::OneSparseConstraint, u::AbstractArray, alpha::Number=0) = (idx = argmax(u); v=zeros(size(u)); v[idx]=u[idx]; v) prox!(r::OneSparseConstraint, u::Array, alpha::Number=0) = (idx = argmax(u); ui = u[idx]; mul!(u,0); u[idx]=ui; u) function evaluate(r::OneSparseConstraint, a::AbstractArray) oneflag = false for ai in a if oneflag if ai!=0 return Inf end else if ai!=0 oneflag=true end end end return 0 end scale(r::OneSparseConstraint) = 1 mul!(r::OneSparseConstraint, newscale::Number) = 1 ## Indicator of k-sparse vectors mutable struct KSparseConstraint<:Regularizer k::Int end function evaluate(r::KSparseConstraint, a::AbstractArray) k = r.k nonzcount = 0 for ai in a if nonzcount == k if ai != 0 return Inf end else if ai != 0 nonzcount += 1 end end end return 0 end function prox(r::KSparseConstraint, u::AbstractArray, alpha::Number) k = r.k ids = partialsortperm(u, 1:k, by=abs, rev=true) uk = zero(u) uk[ids] = u[ids] uk end function prox!(r::KSparseConstraint, u::Array, alpha::Number) k = r.k ids = partialsortperm(u, 1:k, by=abs, rev=true) vals = u[ids] mul!(u,0) u[ids] = vals u end ## indicator of 1-sparse unit vectors ## (enforces that exact 1 entry is 1 and all others are zero, eg for kmeans) mutable struct UnitOneSparseConstraint<:Regularizer end prox(r::UnitOneSparseConstraint, u::AbstractArray, alpha::Number=0) = (idx = argmax(u); v=zeros(size(u)); v[idx]=1; v) prox!(r::UnitOneSparseConstraint, u::Array, alpha::Number=0) = (idx = argmax(u); mul!(u,0); u[idx]=1; u) function evaluate(r::UnitOneSparseConstraint, a::AbstractArray) oneflag = false for ai in a if ai==0 continue elseif ai==1 if oneflag return Inf else oneflag=true end else return Inf end end return 0 end scale(r::UnitOneSparseConstraint) = 1 mul!(r::UnitOneSparseConstraint, newscale::Number) = 1 ## indicator of vectors in the simplex: nonnegative vectors with unit l1 norm ## (eg for QuadLoss mixtures, ie soft kmeans) ## prox for the simplex is derived by Chen and Ye in [this paper](http://arxiv.org/pdf/1101.6081v2.pdf) mutable struct SimplexConstraint<:Regularizer end function prox(r::SimplexConstraint, u::AbstractArray, alpha::Number=0) n = length(u) y = sort(u, rev=true) ysum = cumsum(y) t = (ysum[end]-1)/n for i=1:(n-1) if (ysum[i]-1)/i >= y[i+1] t = (ysum[i]-1)/i break end end max.(u .- t, 0) end function evaluate(r::SimplexConstraint,a::AbstractArray) # check it's a unit vector abs(sum(a)-1)>TOL && return Inf # check every entry is nonnegative for i=1:length(a) a[i] < 0 && return Inf end return 0 end scale(r::SimplexConstraint) = 1 mul!(r::SimplexConstraint, newscale::Number) = 1 ## ordinal regularizer ## a block regularizer which # 1) forces the first k-1 entries of each column to be the same # 2) forces the last entry of each column to be increasing # 3) applies an internal regularizer to the first k-1 entries of each column ## should always be used in conjunction with lastentry1 regularization on x mutable struct OrdinalReg<:Regularizer r::Regularizer end OrdinalReg() = OrdinalReg(ZeroReg()) prox(r::OrdinalReg,u::AbstractArray,alpha::Number) = (uc = copy(u); prox!(r,uc,alpha)) function prox!(r::OrdinalReg,u::AbstractArray,alpha::Number) um = mean(u[1:end-1, :], dims=2) prox!(r.r,um,alpha) for i=1:size(u,1)-1 for j=1:size(u,2) u[i,j] = um[i] end end # this enforces rule 2) (increasing last row of u), but isn't exactly the prox function # for j=2:size(u,2) # if u[end,j-1] > u[end,j] # m = (u[end,j-1] + u[end,j])/2 # u[end,j-1:j] = m # end # end u end evaluate(r::OrdinalReg,a::AbstractArray) = evaluate(r.r,a[1:end-1,1]) scale(r::OrdinalReg) = scale(r.r) mul!(r::OrdinalReg, newscale::Number) = mul!(r.r, newscale) # make sure we don't add two offsets cuz that's weird lastentry_unpenalized(r::OrdinalReg) = r mutable struct MNLOrdinalReg<:Regularizer r::Regularizer end MNLOrdinalReg() = MNLOrdinalReg(ZeroReg()) prox(r::MNLOrdinalReg,u::AbstractArray,alpha::Number) = (uc = copy(u); prox!(r,uc,alpha)) function prox!(r::MNLOrdinalReg,u::AbstractArray,alpha::Number; TOL=1e-3) um = mean(u[1:end-1, :], dims=2) prox!(r.r,um,alpha) for i=1:size(u,1)-1 for j=1:size(u,2) u[i,j] = um[i] end end # this enforces rule 2) (decreasing last row of u, all less than 0), but isn't exactly the prox function u[end,1] = min(-TOL, u[end,1]) for j=2:size(u,2) u[end,j] = min(u[end,j], u[end,j-1]-TOL) end u end evaluate(r::MNLOrdinalReg,a::AbstractArray) = evaluate(r.r,a[1:end-1,1]) scale(r::MNLOrdinalReg) = scale(r.r) mul!(r::MNLOrdinalReg, newscale::Number) = mul!(r.r, newscale) # make sure we don't add two offsets cuz that's weird lastentry_unpenalized(r::MNLOrdinalReg) = r ## Quadratic regularization with non-zero mean mutable struct RemQuadReg<:Regularizer scale::Float64 m::Array{Float64, 1} end RemQuadReg(m::Array{Float64, 1}) = RemQuadReg(1, m) prox(r::RemQuadReg, u::AbstractArray, alpha::Number) = (u + 2 * alpha * r.scale * r.m) / (1 + 2 * alpha * r.scale) prox!(r::RemQuadReg, u::Array{Float64}, alpha::Number) = begin broadcast!(.+, u, u, 2 * alpha * r.scale * r.m) mul!(u, 1 / (1 + 2 * alpha * r.scale)) end evaluate(r::RemQuadReg, a::AbstractArray) = r.scale * sum(abs2, a - r.m) ## simpler method for numbers, not arrays evaluate(r::Regularizer, u::Number) = evaluate(r, [u]) prox(r::Regularizer, u::Number, alpha::Number) = prox(r, [u], alpha)[1] # if step size not specified, step size = 1 prox(r::Regularizer, u) = prox(r, u, 1)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1752
#### randomized SVD (from Jiahao Chen, based on http://arxiv.org/pdf/0909.4061.pdf) import LinearAlgebra: SVD #The simplest possible randomized svd #Inputs # A: input matrix # n: Number of singular value/vector pairs to find # p: Number of extra vectors to include in computation function rsvd(A, n, p=0) Q = rrange(A, n, p=p) rsvd_direct(A, Q) end #Algorithm 4.4: randomized subspace iteration #A must support size(A), multiply and transpose multiply #p is the oversampling parameter #q controls the accuracy of the subspace found; it is the "number of power iterations" #A good heuristic is that when the original scheme produces a basis whose #approximation error is within a factor C of the optimum, the power scheme produces #an approximation error within C^(1/(2q+1)) of the optimum. function rrange(A, l::Integer; p::Integer=5, q::Integer=3) p≥0 || error() m, n = size(A) l <= m || error("Cannot find $l linearly independent vectors of $m x $n matrix") Ω = randn(n, l+p) Q = q_from_qr(A*Ω) for t=1:q Q = q_from_qr(A'*Q) Q = q_from_qr(A*Q) end Q = p==0 ? Q : Q[:,1:l] end function q_from_qr(Y, l::Integer=-1) Q = full(qrfact!(Y)[:Q]) Q = l<0 ? Q : Q[:,1:l] end #Algorithm 5.1: direct SVD #More accurate function rsvd_direct(A, Q) B=Q'A S=svdfact!(B) SVD(Q*S[:U], S[:S], S[:Vt]) end function onepass_svd(A::AbstractArray, r::Int) m, n = size(A) k = 2r + 1 l = 4r + 3 Omega = randn(n,k) Psi = randn(m,l) Y = A*Omega W = A'*Psi Q,_ = qr(view(Y,:,1:k)) B = view(W,:,1:l) / (Q'*view(Psi,:,1:l)) # Q's.Psi is k x l, its pinv is l x k, so B is n x k mysvd,_ = svds(B, nsv=r) # U is n x r return SVD(Q*mysvd.Vt, mysvd.S, mysvd.U) end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
6950
# Supported domains: Real, Boolean, Ordinal, Periodic, Count # The purpose of domains is to be able to sample over different possible values of `a` regardless of # the loss that was used in the GLRM. The reason for doing this is to evaluate the performance of GLRMS. # For instance, let's say we use PCA (QuadLoss losses) to model a binary data frame (not the best idea). # In order to override the standard imputation with `sample(QuadLoss(), u)`, which assumes imputation over the reals, # we can use `sample(BoolDomain(), QuadLoss(), u)` and see which of {-1,1} is best. The reason we want to be able to # do this is to compare a baseline model (e.g. PCA) with a more logical model using heterogenous losses, # yet still give each model the same amount of information regarding how imputation should be done. # The domains themselves are defined in domains.jl # In order to accomplish this we define a series of domains that describe how imputation should be performed over # them. Each combination of domain and loss must have the following: # Methods: # `sample(D::my_Domain, l::my_loss_type, u::Float64) ::Float64` # Samples aᵤ from among the range of possible values of a. The range of # possible values of a should be implicitly or explicitly provided by `D`. # There should be an sample method for every combination of datatype and loss. # DataTypes are assigned to each column of the data and are not part of the low-rank model itself, they just serve # as a way to evaluate the performance of the low-rank model. import StatsBase: sample, Weights export sample, sample_missing ########################################## REALS ########################################## # Real data can take values from ℜ # l.scale should be 1/var sample(D::RealDomain, l::QuadLoss, u::Float64; noisevar=l.scale) = u + randn()/sqrt(noisevar) ########################################## BOOLS ########################################## # Boolean data should take values from {true, false} function sample(D::BoolDomain, l::LogisticLoss, u::Float64) rand()<=(1/(1+exp(-u))) ? true : false end # generic method # Evaluate w/ a=-1 and a=1 and see which is better according to that loss. # This is fast and works for any loss. function sample(D::BoolDomain, l::Loss, u::AbstractArray) prob = exp.(-[evaluate(l, u, i) for i in (true, false)]) return sample(Weights(prob)) end ########################################## ORDINALS ########################################## # Ordinal data should take integer values ranging from `min` to `max` # a DiffLoss is one in which l(u,a) = f(u-a) AND argmin f(x) = 0 # for example, QuadLoss(u,a)=(u-a)² and we can write f(x)=x² and x=u-a function sample(D::OrdinalDomain, l::DiffLoss, u::Float64) uint = round(Int, u) uclip = max(D.min, min(D.max, uint)) return uclip end # generic method function sample(D::OrdinalDomain, l::Loss, u::AbstractArray) prob = exp.(-[evaluate(l, u, i) for i in D.min:D.max]) return sample(Weights(prob)) end ########################################## CATEGORICALS ########################################## # Categorical data should take integer values ranging from 1 to `max` function sample(D::CategoricalDomain, l::MultinomialLoss, u::Array{Float64}) return sample(Weights(exp.(u))) end # sample(D::CategoricalDomain, l::OvALoss, u::Array{Float64}) = ?? # generic method function sample(D::CategoricalDomain, l::Loss, u::AbstractArray) prob = exp.(-[evaluate(l, u, i) for i in D.min:D.max]) return sample(Weights(prob)) end ########################################## PERIODIC ########################################## # Periodic data can take values from ℜ, but given a period T, we should have error_metric(a,a+T) = 0 # Since periodic data can take any real value, we can use the real-valued imputation methods # sample(D::PeriodicDomain, l::Loss, u::Float64) = ?? ########################################## COUNTS ########################################## # Count data can take values over ℕ, which we approximate as {0, 1, 2 ... `max_count`} # Our approximation of ℕ is really an ordinal sample(D::CountDomain, l::Loss, u::Float64) = sample(OrdinalDomain(0,D.max_count), l, u) #################################################################################### # Use impute and error_metric over arrays function sample( domains::Array{DomainSubtype,1}, losses::Array{LossSubtype,1}, U::Array{Float64,2}) where {DomainSubtype<:Domain,LossSubtype<:Loss} m, d = size(U) n = length(losses) yidxs = get_yidxs(losses) A_sampled = Array(Number, (m, n)); for f in 1:n for i in 1:m if length(yidxs[f]) > 1 A_sampled[i,f] = sample(domains[f], losses[f], vec(U[i,yidxs[f]])) else A_sampled[i,f] = sample(domains[f], losses[f], U[i,yidxs[f]]) end end end return A_sampled end # sample missing entries in A according to the fit model (X,Y) function sample_missing(glrm::GLRM) do_sample(e::Int, f::Int) = !(e in glrm.observed_examples[f]) return sample(glrm, do_sample) end all_entries(e::Int,f::Int) = true # sample all entries in A according to the fit model (X,Y) # do_sample is a function that takes an example-feature pair (e,f) # and returns true if that entry should be replaced by a sample from the model # is_dense controls whether the output should be a dense matrix # it's true by default because we sample all entries by default function sample(glrm::GLRM, do_sample::Function=all_entries, is_dense::Bool=true) U = glrm.X'*glrm.Y m, d = size(U) n = length(glrm.losses) yidxs = get_yidxs(glrm.losses) domains = Domain[domain(l) for l in glrm.losses] # make sure we don't mutate the type of the array A # even if all data for some real loss take integer values for j=1:n if isa(domains[j], RealDomain) && isa(glrm.A[:,j], Array{Union{Missing, Int},1}) domains[j] = OrdinalDomain(minimum(dropmissing(glrm.A[j])), maximum(dropmissing(glrm.A[j]))) end end # compute the correct variance for real valued losses original_scales = [l.scale for l in glrm.losses] for j=1:n if isa(domains[j], RealDomain) println("old scale:", glrm.losses[j].scale) glrm.losses[j].scale = mean((U[glrm.observed_examples[j],j] - glrm.A[glrm.observed_examples[j],j]).^2) println("new scale:", glrm.losses[j].scale) end end A_sampled = copy(glrm.A); if is_dense && isa(A_sampled, SparseMatrixCSC) A_sampled = Matrix(A_sampled) end for f in 1:n for e in 1:m if do_sample(e,f) A_sampled[e,f] = sample(domains[f], glrm.losses[f], U[e,yidxs[f]]) end end end # revert scales to previously defined values for j=1:n glrm.losses[j].scale = original_scales[j] end return A_sampled end function sample(losses::Array{LossSubtype,1}, U::Array{Float64,2}) where LossSubtype<:Loss domains = Domain[domain(l) for l in losses] sample(domains, losses, U) end ### Hack to sample from non-probabilistic losses sample(D::Domain, l::Loss, u) = impute(D, l, u)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
9359
import ScikitLearnBase using ScikitLearnBase: @declare_hyperparameters export SkGLRM, PCA, QPCA, NNMF, KMeans, RPCA ################################################################################ # Shared definitions # Note: there is redundancy in the hyperparameters. This is # necessary if we want to offer a simple interface in PCA(), and a full # interface in SkGLRM(). PCA(abs_tol=0.1, max_iter=200) cannot create # `ProxGradParams(abs_tol, max_iter)` right away, because abs_tol and # max_iter are hyperparameters and need to be visible/changeable by # set_params for grid-search. # There are other ways of setting it up, but this seems like the simplest. mutable struct SkGLRM <: ScikitLearnBase.BaseEstimator # Hyperparameters: those will be passed to GLRM, so it doesn't matter if # they're not typed. fit_params # if fit_params != nothing, it has priority over abs_tol, etc. loss rx ry # rx/ry_scale can be nothing, in which case they're ignored. This allows # ry to be a vector rx_scale ry_scale abs_tol::Float64 rel_tol::Float64 max_iter::Int inner_iter::Int k::Int init::Function # initialization function verbose::Bool glrm::GLRM # left undefined by the constructor end # This defines `clone`, `get_params` and `set_params!` @declare_hyperparameters(SkGLRM, [:fit_params, :init, :rx, :ry, :rx_scale, :ry_scale, :loss, :abs_tol, :rel_tol, :max_iter, :inner_iter, :k, :verbose]) function do_fit!(skglrm::SkGLRM, glrm::GLRM) fit_params = (skglrm.fit_params === nothing ? ProxGradParams(abs_tol=skglrm.abs_tol, rel_tol=skglrm.rel_tol, max_iter=skglrm.max_iter) : skglrm.fit_params) fit!(glrm, fit_params; verbose=skglrm.verbose) end function ind2sub(a, i) i2s[i] end function build_glrm(skglrm::SkGLRM, X, missing_values) k = skglrm.k == -1 ? size(X, 2) : skglrm.k i2s = CartesianIndices(missing_values) obs = [i2s[x] for x in (LinearIndices(.!missing_values))[findall(.!missing_values)] ] rx, ry = skglrm.rx, skglrm.ry if skglrm.rx_scale !== nothing rx = copy(rx) mul!(rx, skglrm.rx_scale) end if skglrm.ry_scale !== nothing ry = copy(ry) mul!(ry, skglrm.ry_scale) end GLRM(X, skglrm.loss, rx, ry, k; obs=obs) end # The input matrix is called X (instead of A) following ScikitLearn's convention function ScikitLearnBase.fit_transform!(skglrm::SkGLRM, X, y=nothing; missing_values=isnan.(X)) @assert size(X)==size(missing_values) # Reuse the standard GLRM constructor and fitting machinery skglrm.glrm = build_glrm(skglrm, X, missing_values) skglrm.init(skglrm.glrm) X, _, _ = do_fit!(skglrm, skglrm.glrm) return X' end function ScikitLearnBase.fit!(skglrm::SkGLRM, X, y=nothing; kwargs...) ScikitLearnBase.fit_transform!(skglrm, X; kwargs...) skglrm end """ `transform(skglrm::SkGLRM, X)` brings X to low-rank-space """ function ScikitLearnBase.transform(skglrm::SkGLRM, X; missing_values=isnan.(X)) glrm = skglrm.glrm ry_fixed = [FixedLatentFeaturesConstraint(glrm.Y[:, i]) for i=1:size(glrm.Y, 2)] glrm_fixed = build_glrm(skglrm, X, missing_values) X2, _, ch = do_fit!(skglrm, glrm_fixed) return X2' end """ `transform(skglrm::SkGLRM, X)` brings X from low-rank-space back to the original input-space """ ScikitLearnBase.inverse_transform(skglrm::SkGLRM, X) = X * skglrm.glrm.Y # Only makes sense for KMeans function ScikitLearnBase.predict(km::SkGLRM, X) X2 = ScikitLearnBase.transform(km, X) # This performs the "argmax" over the columns to get the cluster # return mapslices(argmax, X2, 2)[:] end ################################################################################ # Public constructors """ SkGLRM(; fit_params=nothing, init=glrm->nothing, k::Int=-1, loss=QuadLoss(), rx::Regularizer=ZeroReg(), ry=ZeroReg(), rx_scale=nothing, ry_scale=nothing, # defaults taken from proxgrad.jl abs_tol=0.00001, rel_tol=0.0001, max_iter=100, inner_iter=1, verbose=false) Generalized low rank model (GLRM). GLRMs model a data array by a low rank matrix. GLRM makes it easy to mix and match loss functions and regularizers to construct a model suitable for a particular data set. Hyperparameters: - `fit_params`: algorithm to use in fitting the GLRM. Defaults to `ProxGradParams(abs_tol, rel_tol, skglrm.max_iter)` - `init`: function to initialize the low-rank matrices, before the main gradient descent loop. - `k`: number of components (rank of the latent representation). By default, use k=nfeatures (full rank) - `loss`: loss function. Can be either a single `::Loss` object, or a vector of `nfeature` loss objects, allowing for mixed inputs (eg. binary and continuous data) - `rx`: regularization over the hidden coefficient matrix - `ry`: regularization over the latent features matrix. Can be either a single regularizer, or a vector of regularizers of length nfeatures, allowing for mixed inputs - `rx_scale`, `ry_scale`: strength of the regularization (higher is stronger). By default, `scale=1`. Cannot be used if `rx/ry` are vectors. - `abs_tol, rel_tol`: tolerance criteria to stop the gradient descent iteration - `max_iter, inner_iter`: number of iterations in the gradient descent loops - `verbose`: print convergence information All parameters (in particular, `rx/ry_scale`) can be tuned with `ScikitLearn.GridSearch.GridSearchCV` For more information on the parameters see [LowRankModels](https://github.com/madeleineudell/LowRankModels.jl) """ function SkGLRM(; fit_params=nothing, init=glrm->nothing, k=-1, loss=QuadLoss(), rx=ZeroReg(), ry=ZeroReg(), rx_scale=nothing, ry_scale=nothing, # defaults taken from proxgrad.jl abs_tol=0.00001, rel_tol=0.0001, max_iter=100, inner_iter=1, verbose=false) dummy = pca(zeros(1,1), 1) # it needs an initial value - will be overwritten return SkGLRM(fit_params, loss, rx, ry, rx_scale, ry_scale, abs_tol, rel_tol, max_iter, inner_iter, k, init, verbose, dummy) end """ PCA(; k=-1, ...) Principal Component Analysis with `k` components (defaults to using `nfeatures`). Equivalent to SkGLRM(loss=QuadLoss(), rx=ZeroReg(), ry=ZeroReg(), init=init_svd!) See ?SkGLRM for more hyperparameters. In particular, increasing `max_iter` (default 100) may improve convergence. """ function PCA(; kwargs...) # principal components analysis # minimize ||A - XY||^2 loss = QuadLoss() r = ZeroReg() return SkGLRM(; loss=loss, rx=r, ry=r, init=init_svd!, kwargs...) end """ QPCA(k=-1, rx_scale=1, ry_scale=1; ...) Quadratically Regularized PCA with `k` components (default: `k = nfeatures`). Equivalent to SkGLRM(loss=QuadLoss(), rx=QuadReg(1.0), ry=QuadReg(1.0), init=init_svd!) Regularization strength is set by `rx_scale` and `ry_scale`. See ?SkGLRM for more hyperparameters. """ function QPCA(; kwargs...) # quadratically regularized principal components analysis # minimize ||A - XY||^2 + rx_scale*||X||^2 + ry_scale*||Y||^2 loss = QuadLoss() r = QuadReg(1.0) # scale is set in build_glrm return SkGLRM(; loss=loss, rx=r, ry=r, init=init_svd!, kwargs...) end """ NNMF(; k=-1, ...) Non-negative matrix factorization with `k` components (default: `k=nfeatures`). Equivalent to SkGLRM(loss=QuadLoss(), rx=NonNegConstraint(), ry=NonNegConstraint(), init=init_svd!) See ?SkGLRM for more hyperparameters """ function NNMF(; kwargs...) # nonnegative matrix factorization # minimize_{X>=0, Y>=0} ||A - XY||^2 loss = QuadLoss() r = NonNegConstraint() return SkGLRM(; loss=loss,rx=r,ry=r, init=init_svd!, kwargs...) end """ KMeans(; k=2, inner_iter=10, max_iter=100, ...) K-Means algorithm. Separates the data into `k` clusters. See ?SkGLRM for more hyperparameters. In particular, increasing `inner_iter` and `max_iter` may improve convergence. **IMPORTANT**: This is not the most efficient way of performing K-Means, and the iteration may not reach convergence. """ function KMeans(; k=2, inner_iter=10, kwargs...) # minimize_{columns of X are unit vectors} ||A - XY||^2 loss = QuadLoss() rx = UnitOneSparseConstraint() ry = ZeroReg() return SkGLRM(k=k, loss=loss,rx=rx,ry=ry, inner_iter=inner_iter, init=init_kmeanspp!; kwargs...) end """ RPCA(; k=-1, ...) Robust PCA with `k` components (default: `k = nfeatures`). Equivalent to SkGLRM(loss=HuberLoss(), rx=QuadReg(1.0), ry=QuadReg(1.0), init=init_svd!) Regularization strength is set by `rx_scale` and `ry_scale`. See ?SkGLRM for more hyperparameters. In particular, increasing `max_iter` (default 100) may improve convergence. """ function RPCA(; kwargs...) # robust PCA # minimize HuberLoss(A - XY) + scale*||X||^2 + scale*||Y||^2 loss = HuberLoss() r = QuadReg(1.0) return SkGLRM(; loss=loss,rx=r,ry=r, init=init_svd!, kwargs...) end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1485
import LinearAlgebra: size, axpy! import LinearAlgebra.BLAS: gemm! #import Base: shmem_rand, shmem_randn export ShareGLRM, share ### GLRM TYPE mutable struct ShareGLRM{L<:Loss, R<:Regularizer}<:AbstractGLRM A::SharedArray # The data table transformed into a coded array losses::Array{L,1} # array of loss functions rx::Regularizer # The regularization to be applied to each row of Xᵀ (column of X) ry::Array{R,1} # Array of regularizers to be applied to each column of Y k::Int # Desired rank observed_features::ObsArray # for each example, an array telling which features were observed observed_examples::ObsArray # for each feature, an array telling in which examples the feature was observed X::SharedArray{Float64,2} # Representation of data in low-rank space. A ≈ X'Y Y::SharedArray{Float64,2} # Representation of features in low-rank space. A ≈ X'Y end function share(glrm::GLRM) isa(glrm.A, SharedArray) ? A = glrm.A : A = convert(SharedArray,glrm.A) isa(glrm.X, SharedArray) ? X = glrm.X : X = convert(SharedArray, glrm.X) isa(glrm.Y, SharedArray) ? Y = glrm.Y : Y = convert(SharedArray, glrm.Y) return ShareGLRM(A, glrm.losses, glrm.rx, glrm.ry, glrm.k, glrm.observed_features, glrm.observed_examples, X, Y) end ### todo: define objective for shared arrays so it's evaluated (safely) in parallel
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1148
export pca, qpca, nnmf, rpca, kmeans # principal components analysis # minimize ||A - XY||^2 function pca(A::AbstractArray, k::Int; kwargs...) loss = QuadLoss() r = ZeroReg() return GLRM(A,loss,r,r,k; kwargs...) end # quadratically regularized principal components analysis # minimize ||A - XY||^2 + scale*||X||^2 + scale*||Y||^2 function qpca(A::AbstractArray, k::Int; scale=1.0::Float64, kwargs...) loss = QuadLoss() r = QuadReg(scale) return GLRM(A,loss,r,r,k; kwargs...) end # nonnegative matrix factorization # minimize_{X>=0, Y>=0} ||A - XY||^2 function nnmf(A::AbstractArray, k::Int; kwargs...) loss = QuadLoss() r = NonNegConstraint() GLRM(A,loss,r,r,k; kwargs...) end # k-means # minimize_{columns of X are unit vectors} ||A - XY||^2 function kmeans(A::AbstractArray, k::Int; kwargs...) loss = QuadLoss() ry = ZeroReg() rx = UnitOneSparseConstraint() return GLRM(A,loss,rx,ry,k; kwargs...) end # robust PCA # minimize HuberLoss(A - XY) + scale*||X||^2 + scale*||Y||^2 function rpca(A::AbstractArray, k::Int; scale=1.0::Float64, kwargs...) loss = HuberLoss() r = QuadReg(scale) return GLRM(A,loss,r,r,k; kwargs...) end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
9842
### Proximal gradient method export ProxGradParams, fit! mutable struct ProxGradParams<:AbstractParams stepsize::Float64 # initial stepsize max_iter::Int # maximum number of outer iterations inner_iter_X::Int # how many prox grad steps to take on X before moving on to Y (and vice versa) inner_iter_Y::Int # how many prox grad steps to take on Y before moving on to X (and vice versa) abs_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * number of observations rel_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * objective value min_stepsize::Float64 # use a decreasing stepsize, stop when reaches min_stepsize end function ProxGradParams(stepsize::Number=1.0; # initial stepsize max_iter::Int=100, # maximum number of outer iterations inner_iter_X::Int=1, # how many prox grad steps to take on X before moving on to Y (and vice versa) inner_iter_Y::Int=1, # how many prox grad steps to take on Y before moving on to X (and vice versa) inner_iter::Int=1, abs_tol::Number=0.00001, # stop if objective decrease upon one outer iteration is less than this * number of observations rel_tol::Number=0.0001, # stop if objective decrease upon one outer iteration is less than this * objective value min_stepsize::Number=0.01*stepsize) # stop if stepsize gets this small stepsize = convert(Float64, stepsize) inner_iter_X = max(inner_iter_X, inner_iter) inner_iter_Y = max(inner_iter_Y, inner_iter) return ProxGradParams(convert(Float64, stepsize), max_iter, inner_iter_X, inner_iter_Y, convert(Float64, abs_tol), convert(Float64, rel_tol), convert(Float64, min_stepsize)) end ### FITTING function fit!(glrm::GLRM, params::ProxGradParams; ch::ConvergenceHistory=ConvergenceHistory("ProxGradGLRM"), verbose=true, kwargs...) ### initialization A = glrm.A # rename these for easier local access losses = glrm.losses rx = glrm.rx ry = glrm.ry X = glrm.X; Y = glrm.Y # check that we didn't initialize to zero (otherwise we will never move) if norm(Y) == 0 Y = .1*randn(k,d) end k = glrm.k m,n = size(A) # find spans of loss functions (for multidimensional losses) yidxs = get_yidxs(losses) d = maximum(yidxs[end]) # check Y is the right size if d != size(Y,2) @warn("The width of Y should match the embedding dimension of the losses. Instead, embedding_dim(glrm.losses) = $(embedding_dim(glrm.losses)) and size(glrm.Y, 2) = $(size(glrm.Y, 2)). Reinitializing Y as randn(glrm.k, embedding_dim(glrm.losses).") # Please modify Y or the embedding dimension of the losses to match, # eg, by setting `glrm.Y = randn(glrm.k, embedding_dim(glrm.losses))`") glrm.Y = randn(glrm.k, d) end XY = Array{Float64}(undef, (m, d)) gemm!('T','N',1.0,X,Y,0.0,XY) # XY = X' * Y initial calculation # step size (will be scaled below to ensure it never exceeds 1/\|g\|_2 or so for any subproblem) alpharow = params.stepsize*ones(m) alphacol = params.stepsize*ones(n) # stopping criterion: stop when decrease in objective < tol, scaled by the number of observations scaled_abs_tol = params.abs_tol * mapreduce(length,+,glrm.observed_features) # alternating updates of X and Y if verbose println("Fitting GLRM") end update_ch!(ch, 0, objective(glrm, X, Y, XY, yidxs=yidxs)) t = time() steps_in_a_row = 0 # gradient wrt columns of X g = zeros(k) # gradient wrt column-chunks of Y G = zeros(k, d) # rowwise objective value obj_by_row = zeros(m) # columnwise objective value obj_by_col = zeros(n) # cache views for better memory management # make sure we don't try to access memory not allocated to us @assert(size(Y) == (k,d)) @assert(size(X) == (k,m)) # views of the columns of X corresponding to each example ve = [view(X,:,e) for e=1:m] # views of the column-chunks of Y corresponding to each feature y_j # vf[f] == Y[:,f] vf = [view(Y,:,yidxs[f]) for f=1:n] # views of the column-chunks of G corresponding to the gradient wrt each feature y_j # these have the same shape as y_j gf = [view(G,:,yidxs[f]) for f=1:n] # working variables newX = copy(X) newY = copy(Y) newve = [view(newX,:,e) for e=1:m] newvf = [view(newY,:,yidxs[f]) for f=1:n] for i=1:params.max_iter # STEP 1: X update # XY = X' * Y was computed above # reset step size if we're doing something more like alternating minimization if params.inner_iter_X > 1 || params.inner_iter_Y > 1 for ii=1:m alpharow[ii] = params.stepsize end for jj=1:n alphacol[jj] = params.stepsize end end for inneri=1:params.inner_iter_X for e=1:m # for every example x_e == ve[e] fill!(g, 0.) # reset gradient to 0 # compute gradient of L with respect to Xᵢ as follows: # ∇{Xᵢ}L = Σⱼ dLⱼ(XᵢYⱼ)/dXᵢ for f in glrm.observed_features[e] # but we have no function dLⱼ/dXᵢ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du # by chain rule, the result is: Σⱼ (dLⱼ(XᵢYⱼ)/du * Yⱼ), where dLⱼ/du is our grad() function curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f]) if isa(curgrad, Number) axpy!(curgrad, vf[f], g) else # on v0.4: gemm!('N', 'T', 1.0, vf[f], curgrad, 1.0, g) gemm!('N', 'N', 1.0, vf[f], curgrad, 1.0, g) end end # take a proximal gradient step to update ve[e] l = length(glrm.observed_features[e]) + 1 # if each loss function has lipshitz constant 1 this bounds the lipshitz constant of this example's objective obj_by_row[e] = row_objective(glrm, e, ve[e]) # previous row objective value while alpharow[e] > params.min_stepsize stepsize = alpharow[e]/l # newx = prox(rx[e], ve[e] - stepsize*g, stepsize) # this will use much more memory than the inplace version with linesearch below ## gradient step: Xᵢ += -(α/l) * ∇{Xᵢ}L axpy!(-stepsize,g,newve[e]) ## prox step: Xᵢ = prox_rx(Xᵢ, α/l) prox!(rx[e],newve[e],stepsize) if row_objective(glrm, e, newve[e]) < obj_by_row[e] copyto!(ve[e], newve[e]) alpharow[e] *= 1.05 break else # the stepsize was too big; undo and try again only smaller copyto!(newve[e], ve[e]) alpharow[e] *= .7 if alpharow[e] < params.min_stepsize alpharow[e] = params.min_stepsize * 1.1 break end end end end # for e=1:m gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new X end # inner iteration # STEP 2: Y update for inneri=1:params.inner_iter_Y fill!(G, 0.) for f=1:n # compute gradient of L with respect to Yⱼ as follows: # ∇{Yⱼ}L = Σⱼ dLⱼ(XᵢYⱼ)/dYⱼ for e in glrm.observed_examples[f] # but we have no function dLⱼ/dYⱼ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du # by chain rule, the result is: Σⱼ dLⱼ(XᵢYⱼ)/du * Xᵢ, where dLⱼ/du is our grad() function curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f]) if isa(curgrad, Number) axpy!(curgrad, ve[e], gf[f]) else # on v0.4: gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f]) gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f]) end end # take a proximal gradient step l = length(glrm.observed_examples[f]) + 1 obj_by_col[f] = col_objective(glrm, f, vf[f]) while alphacol[f] > params.min_stepsize stepsize = alphacol[f]/l # newy = prox(ry[f], vf[f] - stepsize*gf[f], stepsize) ## gradient step: Yⱼ += -(α/l) * ∇{Yⱼ}L axpy!(-stepsize,gf[f],newvf[f]) ## prox step: Yⱼ = prox_ryⱼ(Yⱼ, α/l) prox!(ry[f],newvf[f],stepsize) new_obj_by_col = col_objective(glrm, f, newvf[f]) if new_obj_by_col < obj_by_col[f] copyto!(vf[f], newvf[f]) alphacol[f] *= 1.05 obj_by_col[f] = new_obj_by_col break else copyto!(newvf[f], vf[f]) alphacol[f] *= .7 if alphacol[f] < params.min_stepsize alphacol[f] = params.min_stepsize * 1.1 break end end end end # for f=1:n gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new Y end # inner iteration # STEP 3: Record objective obj = sum(obj_by_col) t = time() - t update_ch!(ch, t, obj) t = time() # STEP 4: Check stopping criterion obj_decrease = ch.objective[end-1] - obj if i>10 && (obj_decrease < scaled_abs_tol || obj_decrease/obj < params.rel_tol) break end if verbose && i%10==0 println("Iteration $i: objective value = $(ch.objective[end])") end end return glrm.X, glrm.Y, ch end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
10099
### Proximal gradient method export ProxGradParams, fit! mutable struct ProxGradParams<:AbstractParams stepsize::Float64 # initial stepsize max_iter::Int # maximum number of outer iterations inner_iter_X::Int # how many prox grad steps to take on X before moving on to Y (and vice versa) inner_iter_Y::Int # how many prox grad steps to take on Y before moving on to X (and vice versa) abs_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * number of observations rel_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * objective value min_stepsize::Float64 # use a decreasing stepsize, stop when reaches min_stepsize end function ProxGradParams(stepsize::Number=1.0; # initial stepsize max_iter::Int=100, # maximum number of outer iterations inner_iter_X::Int=1, # how many prox grad steps to take on X before moving on to Y (and vice versa) inner_iter_Y::Int=1, # how many prox grad steps to take on Y before moving on to X (and vice versa) inner_iter::Int=1, abs_tol::Number=0.00001, # stop if objective decrease upon one outer iteration is less than this * number of observations rel_tol::Number=0.0001, # stop if objective decrease upon one outer iteration is less than this * objective value min_stepsize::Number=0.01*stepsize) # stop if stepsize gets this small stepsize = convert(Float64, stepsize) inner_iter_X = max(inner_iter_X, inner_iter) inner_iter_Y = max(inner_iter_Y, inner_iter) return ProxGradParams(convert(Float64, stepsize), max_iter, inner_iter_X, inner_iter_Y, convert(Float64, abs_tol), convert(Float64, rel_tol), convert(Float64, min_stepsize)) end ### FITTING function fit!(glrm::GLRM, params::ProxGradParams; ch::ConvergenceHistory=ConvergenceHistory("ProxGradGLRM"), verbose=true, kwargs...) ### initialization A = glrm.A # rename these for easier local access losses = glrm.losses rx = glrm.rx ry = glrm.ry X = glrm.X; Y = glrm.Y # check that we didn't initialize to zero (otherwise we will never move) if norm(Y) == 0 Y = .1*randn(k,d) end k = glrm.k m,n = size(A) # find spans of loss functions (for multidimensional losses) yidxs = get_yidxs(losses) d = maximum(yidxs[end]) # check Y is the right size if d != size(Y,2) @warn("The width of Y should match the embedding dimension of the losses. Instead, embedding_dim(glrm.losses) = $(embedding_dim(glrm.losses)) and size(glrm.Y, 2) = $(size(glrm.Y, 2)). Reinitializing Y as randn(glrm.k, embedding_dim(glrm.losses).") # Please modify Y or the embedding dimension of the losses to match, # eg, by setting `glrm.Y = randn(glrm.k, embedding_dim(glrm.losses))`") glrm.Y = randn(glrm.k, d) end XY = Array{Float64}(undef, (m, d)) gemm!('T','N',1.0,X,Y,0.0,XY) # XY = X' * Y initial calculation # step size (will be scaled below to ensure it never exceeds 1/\|g\|_2 or so for any subproblem) alpharow = params.stepsize*ones(m) alphacol = params.stepsize*ones(n) # stopping criterion: stop when decrease in objective < tol, scaled by the number of observations scaled_abs_tol = params.abs_tol * mapreduce(length,+,glrm.observed_features) # alternating updates of X and Y if verbose println("Fitting GLRM") end update_ch!(ch, 0, objective(glrm, X, Y, XY, yidxs=yidxs)) t = time() steps_in_a_row = 0 # gradient wrt columns of X g = [zeros(k) for t in 1:Threads.nthreads()] # gradient wrt column-chunks of Y G = zeros(k, d) # rowwise objective value obj_by_row = zeros(m) # columnwise objective value obj_by_col = zeros(n) # cache views for better memory management # make sure we don't try to access memory not allocated to us @assert(size(Y) == (k,d)) @assert(size(X) == (k,m)) # views of the columns of X corresponding to each example ve = [view(X,:,e) for e=1:m] # views of the column-chunks of Y corresponding to each feature y_j # vf[f] == Y[:,f] vf = [view(Y,:,yidxs[f]) for f=1:n] # views of the column-chunks of G corresponding to the gradient wrt each feature y_j # these have the same shape as y_j gf = [view(G,:,yidxs[f]) for f=1:n] # working variables newX = copy(X) newY = copy(Y) newve = [view(newX,:,e) for e=1:m] newvf = [view(newY,:,yidxs[f]) for f=1:n] for i=1:params.max_iter # STEP 1: X update # XY = X' * Y was computed above # reset step size if we're doing something more like alternating minimization if params.inner_iter_X > 1 || params.inner_iter_Y > 1 for ii=1:m alpharow[ii] = params.stepsize end for jj=1:n alphacol[jj] = params.stepsize end end for inneri=1:params.inner_iter_X Threads.@threads for e=1:m # for every example x_e == ve[e] # for e=1:m # for every example x_e == ve[e] g[Threads.threadid()] .= 0 # reset gradient to 0 # compute gradient of L with respect to Xᵢ as follows: # ∇{Xᵢ}L = Σⱼ dLⱼ(XᵢYⱼ)/dXᵢ for f in glrm.observed_features[e] # but we have no function dLⱼ/dXᵢ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du # by chain rule, the result is: Σⱼ (dLⱼ(XᵢYⱼ)/du * Yⱼ), where dLⱼ/du is our grad() function curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f]) if isa(curgrad, Number) axpy!(curgrad, vf[f], g[Threads.threadid()]) else # on v0.4: gemm!('N', 'T', 1.0, vf[f], curgrad, 1.0, g) gemm!('N', 'N', 1.0, vf[f], curgrad, 1.0, g[Threads.threadid()]) end end # take a proximal gradient step to update ve[e] l = length(glrm.observed_features[e]) + 1 # if each loss function has lipshitz constant 1 this bounds the lipshitz constant of this example's objective obj_by_row[e] = row_objective(glrm, e, ve[e]) # previous row objective value while alpharow[e] > params.min_stepsize stepsize = alpharow[e]/l # newx = prox(rx[e], ve[e] - stepsize*g, stepsize) # this will use much more memory than the inplace version with linesearch below ## gradient step: Xᵢ += -(α/l) * ∇{Xᵢ}L axpy!(-stepsize,g[Threads.threadid()],newve[e]) ## prox step: Xᵢ = prox_rx(Xᵢ, α/l) prox!(rx[e],newve[e],stepsize) if row_objective(glrm, e, newve[e]) < obj_by_row[e] copyto!(ve[e], newve[e]) alpharow[e] *= 1.05 # choose a more aggressive stepsize break else # the stepsize was too big; undo and try again only smaller copyto!(newve[e], ve[e]) alpharow[e] *= .7 # choose a less aggressive stepsize if alpharow[e] < params.min_stepsize alpharow[e] = params.min_stepsize * 1.1 break end end end end # for e=1:m gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new X end # inner iteration # STEP 2: Y update for inneri=1:params.inner_iter_Y G .= 0 Threads.@threads for f=1:n # for f=1:n # compute gradient of L with respect to Yⱼ as follows: # ∇{Yⱼ}L = Σⱼ dLⱼ(XᵢYⱼ)/dYⱼ for e in glrm.observed_examples[f] # but we have no function dLⱼ/dYⱼ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du # by chain rule, the result is: Σⱼ dLⱼ(XᵢYⱼ)/du * Xᵢ, where dLⱼ/du is our grad() function curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f]) if isa(curgrad, Number) axpy!(curgrad, ve[e], gf[f]) else # on v0.4: gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f]) gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f]) end end # take a proximal gradient step l = length(glrm.observed_examples[f]) + 1 obj_by_col[f] = col_objective(glrm, f, vf[f]) while alphacol[f] > params.min_stepsize stepsize = alphacol[f]/l # newy = prox(ry[f], vf[f] - stepsize*gf[f], stepsize) ## gradient step: Yⱼ += -(α/l) * ∇{Yⱼ}L axpy!(-stepsize,gf[f],newvf[f]) ## prox step: Yⱼ = prox_ryⱼ(Yⱼ, α/l) prox!(ry[f],newvf[f],stepsize) new_obj_by_col = col_objective(glrm, f, newvf[f]) if new_obj_by_col < obj_by_col[f] copyto!(vf[f], newvf[f]) alphacol[f] *= 1.05 obj_by_col[f] = new_obj_by_col break else copyto!(newvf[f], vf[f]) alphacol[f] *= .7 if alphacol[f] < params.min_stepsize alphacol[f] = params.min_stepsize * 1.1 break end end end end # for f=1:n gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new Y end # inner iteration # STEP 3: Record objective obj = sum(obj_by_col) t = time() - t update_ch!(ch, t, obj) t = time() # STEP 4: Check stopping criterion obj_decrease = ch.objective[end-1] - obj if i>10 && (obj_decrease < scaled_abs_tol || obj_decrease/obj < params.rel_tol) break end if verbose && i%10==0 println("Iteration $i: objective value = $(ch.objective[end])") end end return glrm.X, glrm.Y, ch end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
4779
### Streaming method # only implemented for quadratic objectives # TODO: add quadratic regularization export StreamingParams, streaming_fit!, streaming_impute! mutable struct StreamingParams<:AbstractParams T0::Int # number of rows to use to initialize Y before streaming begins stepsize::Float64 # stepsize (inverse of memory) Y_update_interval::Int # how often to prox Y end function StreamingParams( T0::Int=1000; # number of rows to use to initialize Y before streaming begins stepsize::Number=1/T0, # (inverse of memory) Y_update_interval::Int=10 # how often to prox Y ) return StreamingParams(T0, convert(Float64, stepsize), Y_update_interval) end ### FITTING function streaming_fit!(glrm::GLRM, params::StreamingParams=StreamingParams(); ch::ConvergenceHistory=ConvergenceHistory("StreamingGLRM"), verbose=true) # make sure everything is quadratic @assert all(map(l->isa(l, QuadLoss), glrm.losses)) @assert all(map(l->isa(l, QuadReg), glrm.rx)) @assert all(map(l->isa(l, QuadReg), glrm.ry)) # initialize Y and first T0 rows of X init_glrm = keep_rows(glrm, params.T0) init_svd!(init_glrm) copy!(glrm.Y, init_glrm.Y) glrm.X[:, 1:params.T0] = init_glrm.X ### initialization A = glrm.A # rename these for easier local access rx = glrm.rx ry = glrm.ry X = glrm.X; Y = glrm.Y k = glrm.k m,n = size(A) # yscales = map(r->r.scale, ry) for i=params.T0+1:m # update x_i obs = glrm.observed_features[i] Yobs = Y[:, obs] Aobs = A[i, obs] X[:, i] = (Yobs * Yobs' + 2 * rx[i].scale * I) \ (Yobs * Aobs) xi = view(X, :, i) # update objective r = Yobs'*xi - Aobs push!(ch.objective, norm(r) ^ 2) # # update Y # TODO verify this is stochastic proximal gradient (with constant stepsize) for the problem # TODO don't prox Y at every iteration # TODO don't assume scales on all the rys are equal # gY[:, jj] = xi * r' == r[jj] * xi # gradient of ith row objective wrt Y for jj in 1:length(obs) Y[:,obs[jj]] -= params.stepsize * r[jj] * xi end if i%params.Y_update_interval == 0 # prox!(ry, Y, params.stepsize * params.Y_update_interval) Y ./= (1 + 2 * params.stepsize * params.Y_update_interval * ry[1].scale) end end return X, Y, ch end ### FITTING function streaming_impute!(glrm::GLRM, params::StreamingParams=StreamingParams(); ch::ConvergenceHistory=ConvergenceHistory("StreamingGLRM"), verbose=true) # make sure everything is quadratic @assert all(map(l->isa(l, QuadLoss), glrm.losses)) @assert all(map(l->isa(l, QuadReg), glrm.rx)) @assert all(map(l->isa(l, QuadReg), glrm.ry)) # initialize Y and first T0 rows of X init_glrm = keep_rows(glrm, params.T0) init_svd!(init_glrm) copy!(glrm.Y, init_glrm.Y) copy!(view(glrm.X, :, 1:params.T0), init_glrm.X) ### initialization A = glrm.A # rename these for easier local access Ahat = copy(glrm.A) rx = glrm.rx ry = glrm.ry X = glrm.X; Y = glrm.Y k = glrm.k m,n = size(A) # yscales = map(r->r.scale, ry) for i=params.T0+1:m # update x_i obs = glrm.observed_features[i] Yobs = Y[:, obs] Aobs = A[i, obs] X[:, i] = (Yobs * Yobs' + 2 * rx[i].scale * I) \ (Yobs * Aobs) xi = view(X, :, i) # impute not_obs = setdiff(Set(1:n), Set(obs)) if length(not_obs)>0 ahat = xi'*Y Ahat[i, not_obs] = ahat[not_obs] end # update objective r = Yobs'*xi - Aobs push!(ch.objective, norm(r) ^ 2) # # update Y # TODO verify this is stochastic proximal gradient (with constant stepsize) for the problem # TODO don't prox Y at every iteration # TODO don't assume scales on all the rys are equal # gY[:, jj] = xi * r' == r[jj] * xi # gradient of ith row objective wrt Y for jj in 1:length(obs) Y[:,obs[jj]] -= params.stepsize * r[jj] * xi end if i%params.Y_update_interval == 0 # prox!(ry, Y, params.stepsize * params.Y_update_interval) Y ./= (1 + 2 * params.stepsize * params.Y_update_interval * ry[1].scale) end end return Ahat end """ Constructs new GLRM on subset of rows of the data from input glrm """ function keep_rows(glrm, r::UnitRange{Int}) @assert maximum(r) <= size(glrm.A, 1) obs = flatten_observations(glrm.observed_features) first_row = minimum(r) if first_row > 1 new_obs = map( t -> (t[1]-first_row+1, t[2]), filter( t -> (t[1] in r), obs)) else new_obs = filter( t -> (t[1] in r), obs) end of, oe = sort_observations(new_obs, length(r), size(glrm.A, 2)) new_glrm = GLRM(glrm.A[r,:], glrm.losses, glrm.rx[r], glrm.ry, glrm.k, observed_features = of, observed_examples = oe) return new_glrm end keep_rows(glrm, T::Int) = keep_rows(glrm, 1:T)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
5359
### Proximal gradient method export SparseProxGradParams, fit! mutable struct SparseProxGradParams<:AbstractParams stepsize::Float64 # initial stepsize max_iter::Int # maximum number of outer iterations inner_iter::Int # how many prox grad steps to take on X before moving on to Y (and vice versa) abs_tol::Float64 # stop if objective decrease upon one outer iteration is less than this min_stepsize::Float64 # use a decreasing stepsize, stop when reaches min_stepsize end function SparseProxGradParams(stepsize::Number=1.0; # initial stepsize max_iter::Int=100, # maximum number of outer iterations inner_iter::Int=1, # how many prox grad steps to take on X before moving on to Y (and vice versa) abs_tol::Float64=0.00001, # stop if objective decrease upon one outer iteration is less than this min_stepsize::Float64=0.01*stepsize) # stop if stepsize gets this small stepsize = convert(Float64, stepsize) return SparseProxGradParams(stepsize, max_iter, inner_iter, abs_tol, min_stepsize) end ### FITTING function fit!(glrm::GLRM, params::SparseProxGradParams; ch::ConvergenceHistory=ConvergenceHistory("SparseProxGradGLRM"), verbose=true, kwargs...) println(params) ### initialization A = glrm.A # rename these for easier local access losses = glrm.losses rx = glrm.rx ry = glrm.ry # at any time, glrm.X and glrm.Y will be the best model yet found, while # X and Y will be the working variables X = copy(glrm.X); Y = copy(glrm.Y) k = glrm.k m,n = size(A) # check that we didn't initialize to zero (otherwise we will never move) if norm(Y) == 0 Y = .1*randn(k,n) end # step size (will be scaled below to ensure it never exceeds 1/\|g\|_2 or so for any subproblem) alpha = params.stepsize # stopping criterion: stop when decrease in objective < tol tol = params.abs_tol * mapreduce(length,+,glrm.observed_features) # alternating updates of X and Y if verbose println("Fitting GLRM") end update_ch!(ch, 0, objective(glrm; sparse=true)) t = time() steps_in_a_row = 0 g = zeros(k) # cache views ve = [view(X,:,e) for e=1:m] vf = [view(Y,:,f) for f=1:n] for i=1:params.max_iter # STEP 1: X update for inneri=1:params.inner_iter for e=1:m # doing this means looping over XY in row-major order, but otherwise we couldn't parallelize over Xᵢs rmul!(g, 0)# reset gradient to 0 # compute gradient of L with respect to Xᵢ as follows: # ∇{Xᵢ}L = Σⱼ dLⱼ(XᵢYⱼ)/dXᵢ for f in glrm.observed_features[e] # but we have no function dLⱼ/dXᵢ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du # by chain rule, the result is: Σⱼ (dLⱼ(XᵢYⱼ)/du * Yⱼ), where dLⱼ/du is our grad() function # our estimate for A[e,f] is given by dot(ve[e],vf[f]) axpy!(grad(losses[f],dot(ve[e],vf[f]),A[e,f]), vf[f], g) end # take a proximal gradient step l = length(glrm.observed_features[e]) + 1 rmul!(g, -alpha/l) ## gradient step: Xᵢ += -(α/l) * ∇{Xᵢ}L axpy!(1,g,ve[e]) ## prox step: Xᵢ = prox_rx(Xᵢ, α/l) prox!(rx[e],ve[e],alpha/l) end end # STEP 2: Y update for inneri=1:params.inner_iter for f=1:n rmul!(g, 0) # reset gradient to 0 # compute gradient of L with respect to Yⱼ as follows: # ∇{Yⱼ}L = Σⱼ dLⱼ(XᵢYⱼ)/dYⱼ for e in glrm.observed_examples[f] # but we have no function dLⱼ/dYⱼ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du # by chain rule, the result is: Σⱼ dLⱼ(XᵢYⱼ)/du * Xᵢ, where dLⱼ/du is our grad() function axpy!(grad(losses[f],dot(ve[e],vf[f]),A[e,f]), ve[e], g) end # take a proximal gradient step l = length(glrm.observed_examples[f]) + 1 rmul!(g, -alpha/l) ## gradient step: Yⱼ += -(α/l) * ∇{Yⱼ}L axpy!(1,g,vf[f]) ## prox step: Yⱼ = prox_ryⱼ(Yⱼ, α/l) prox!(ry[f],vf[f],alpha/l) end end # STEP 3: Check objective obj = objective(glrm, X, Y; sparse=true) # record the best X and Y yet found if obj < ch.objective[end] t = time() - t update_ch!(ch, t, obj) copy!(glrm.X, X); copy!(glrm.Y, Y) # save new best X and Y alpha = alpha * 1.05 steps_in_a_row = max(1, steps_in_a_row+1) t = time() else # if the objective went up, reduce the step size, and undo the step alpha = alpha / max(1.5, -steps_in_a_row) if verbose println("obj went up to $obj; reducing step size to $alpha") end copy!(X, glrm.X); copy!(Y, glrm.Y) # revert back to last X and Y steps_in_a_row = min(0, steps_in_a_row-1) end # STEP 4: Check stopping criterion if i>10 && (steps_in_a_row > 3 && ch.objective[end-1] - obj < tol) || alpha <= params.min_stepsize break end if verbose && i%10==0 println("Iteration $i: objective value = $(ch.objective[end])") end end t = time() - t update_ch!(ch, t, ch.objective[end]) return glrm.X, glrm.Y, ch end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2469
############################################################## ### copying ############################################################## import Base.copy export copy, copy_estimate, GLRM for T in :[Loss, Regularizer, AbstractGLRM].args @eval function copy(r::$T) fieldvals = [getfield(r, f) for f in fieldnames(typeof(r))] return typeof(r)(fieldvals...) end end # points to all the same problem data as the original input GLRM, # but copies the estimate of the model parameters function copy_estimate(g::GLRM) return GLRM(g.A,g.losses,g.rx,g.ry,g.k, g.observed_features,g.observed_examples, copy(g.X),copy(g.Y)) end # domains are struct, so this is ok copy(d::Domain) = d ############################################################## ### fill singleton losses and regularizers to the right shapes ############################################################## # fill an array of length n with copies of the object foo fillcopies(foo, n::Int; arraytype=typeof(foo)) = arraytype[copy(foo) for i=1:n] # singleton loss: GLRM(A, loss::Loss, rx::Array, ry::Regularizer, k::Int; kwargs...) = GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), rx, fillcopies(ry, size(A, 2), arraytype=Regularizer), k; kwargs...) GLRM(A, loss::Loss, rx::Regularizer, ry::Array, k::Int; kwargs...) = GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), fillcopies(rx, size(A, 1), arraytype=Regularizer), ry, k; kwargs...) GLRM(A, loss::Loss, rx::Array, ry::Array, k::Int; kwargs...) = GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), rx, ry, k; kwargs...) # singleton regularizer on x and/or y: GLRM(A, losses::Array, rx::Regularizer, ry::Array, k::Int; kwargs...) = GLRM(A, losses, fillcopies(rx, size(A, 1), arraytype=Regularizer), ry, k::Int; kwargs...) GLRM(A, losses::Array, rx::Array, ry::Regularizer, k::Int; kwargs...) = GLRM(A, losses, rx, fillcopies(ry, size(A, 2), arraytype=Regularizer), k::Int; kwargs...) GLRM(A, losses::Array, rx::Regularizer, ry::Regularizer, k::Int; kwargs...) = GLRM(A, losses, fillcopies(rx, size(A, 1), arraytype=Regularizer), fillcopies(ry, size(A, 2), arraytype=Regularizer), k::Int; kwargs...) # singleton everything GLRM(A, loss::Loss, rx::Regularizer, ry::Regularizer, k::Int; kwargs...) = GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), fillcopies(rx, size(A, 1), arraytype=Regularizer), fillcopies(ry, size(A, 2), arraytype=Regularizer), k::Int; kwargs...)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1166
using Base: depwarn Base.@deprecate GLRM(A::AbstractArray, obs::Array{Tuple{Int, Int}, 1}, args...; kwargs...) GLRM(A, args...; obs = obs, kwargs...) Base.@deprecate ProxGradParams(s::Number,m::Int,c::Float64,ms::Float64) ProxGradParams(s, max_iter=m, abs_tol=c, min_stepsize=ms) Base.@deprecate expand_categoricals expand_categoricals! Base.@deprecate errors(g::GLRM) error_metric(g) Base.@deprecate quadratic QuadLoss Base.@deprecate logistic LogisticLoss Base.@deprecate huber HuberLoss Base.@deprecate LogLoss LogisticLoss Base.@deprecate l1 L1Loss Base.@deprecate poisson PoissonLoss Base.@deprecate ordinal_hinge OrdinalHingeLoss Base.@deprecate OrdinalHinge OrdinalHingeLoss Base.@deprecate WeightedHinge WeightedHingeLoss Base.@deprecate periodic PeriodicLoss Base.@deprecate quadreg QuadReg Base.@deprecate constrained_quadreg QuadConstraint Base.@deprecate onereg OneReg Base.@deprecate zeroreg ZeroReg Base.@deprecate nonnegative NonNegConstraint Base.@deprecate onesparse OneSparseConstraint Base.@deprecate unitonesparse UnitOneSparseConstraint Base.@deprecate simplex SimplexConstraint Base.@deprecate nonneg_onereg NonNegOneReg
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1065
using LowRankModels # tests basic functionality of glrm.jl Random.seed!(1); m,n,k,s = 100,100,5,100*100; # matrix to encode X_real, Y_real = randn(m,k), randn(k,n); A = X_real*Y_real; losses = fill(QuadLoss(),n) rx, ry = ZeroReg(), ZeroReg(); glrm = GLRM(A,losses,rx,ry,5, scale=false, offset=false, X=randn(k,m), Y=randn(k,n)); p = Params(1, max_iter=200, abs_tol=0.0000001, min_stepsize=0.001) @time X,Y,ch = fit!(glrm, params=p, verbose=false); Ah = X'*Y; p.abs_tol > abs(norm(A-Ah)^2 - ch.objective[end]) function validate_folds(trf,tre,tsf,tse) for i=1:length(trf) if length(intersect(Set(trf[i]), Set(tsf[i]))) > 0 println("Error on example $i: train and test sets overlap") end end for i=1:length(tre) if length(intersect(Set(tre[i]), Set(tse[i]))) > 0 println("Error on feature $i: train and test sets overlap") end end true end obs = LowRankModels.flatten_observations(glrm.observed_features) folds = LowRankModels.getfolds(obs, 5, size(glrm.A)..., do_check = false) for i in 1:length(folds) @assert validate_folds(folds[i]...) end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2757
using LowRankModels Random.seed!(1); test_losses = Loss[ QuadLoss(), L1Loss(), HuberLoss(), PeriodicLoss(1), OrdinalHingeLoss(1,10), LogisticLoss(), WeightedHingeLoss() ] for test_iteration = 1:500 # Create the configuration for the model (random losses) config = round.(Int, abs(round(4*rand(length(test_losses))))); #config = [0 0 1 0 10 0 100] losses, doms = Array(Loss,1), Array(Domain,1); for (n,l) in zip(config, test_losses) for i=1:n push!(losses, l); push!(doms, l.domain); end end losses, doms = losses[2:end], doms[2:end]; # this is because the initialization leaves us with an #undef my_error_metric(glrm::GLRM, X::Array{Float64,2}, Y::Array{Float64,2}) = error_metric(glrm, X, Y, doms, standardize=true) # embed the domains into the error function. # Make a low rank matrix as our data precursor m, n, true_k = 100, length(doms), round.(Int, round(length(losses)/2))+1; X_real, Y_real = randn(true_k,m), randn(true_k,n); A_real = X_real'*Y_real; # Impute over the low rank-precursor to make our heterogenous dataset A = impute(doms, losses, A_real); # our data with noise # Create a glrm using these losses and data p = Params(1e-2, max_iter=1000, abs_tol=0.00000001, min_stepsize=1e-15) rx, ry = ZeroReg(), ZeroReg(); k_range = [int(round(true_k/2)), true_k] train_err_at_k = Dict() println("\n########################################################") println("Model:\nlosses = $(losses)\nrx,ry = $(rx), $(ry)") for k in k_range model = GLRM(A, losses, rx,ry,k, scale=false, offset=false); println("\nk = $(k)") # Test that our imputation is consistent if my_error_metric(model, X_real, Y_real) != 0 error("Imputation failed.") end real_obj = objective(model, X_real, Y_real, include_regularization=false); X_fit,Y_fit,ch = fit!(model, params=p, verbose=false); println("Starting objective: $(ch.objective[1])\t Ending objective: $(ch.objective[end])") train_err, test_err, trainers, testers = cross_validate(model, nfolds=3, error_fn=my_error_metric, verbose=false); train_err_at_k[k] = my_error_metric(model, model.X, model.Y) println("train err: $train_err") println("Test err: $test_err") end train_err_at_k = [train_err_at_k[k] for k in k_range] if !all([de<0 for de in diff(train_err_at_k)]) #check that errors monotonically decrease as k increases @warn("==================================================================================") @warn("ERRORS WENT UP FOR THIS CONFIGURATION") @warn("Model:\nlosses = $(losses)\nrx,ry = $(rx), $(ry)") @warn("Ranks: $(string(k_range))") @warn("Training errors: $(string(train_err_at_k))") @warn("==================================================================================") end end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
452
using LowRankModels m,n,k = 10,20,3 Y = rand(3,20) A = rand(10,20) ry = Regularizer[fixed_latent_features(Y[:,i]) for i=1:n] glrm = GLRM(A, QuadLoss(), SimplexConstraint(), ry, k+1) X, Yp, ch = fit!(glrm) @assert(Yp[1:k,end] == Y) m,n,k = 10,20,3 Y = rand(3,20) A = rand(10,20) ry = Regularizer[fixed_last_latent_features(Y[:,i]) for i=1:n] glrm = GLRM(A, QuadLoss(), SimplexConstraint(), ry, k+1) X, Yp, ch = fit!(glrm) @assert(Yp[2:end,:] == Y)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2823
using LowRankModels, DataFrames, Random, SparseArrays Random.seed!(0) # loss types to test real_loss_types = [QuadLoss, HuberLoss] bool_loss_types = [HingeLoss] ordinal_loss_types = [OrdinalHingeLoss, BvSLoss] categorical_loss_types = [MultinomialLoss, OvALoss] #instantiate losses ncat = 4 # maximum categorical levels nord = 5 # maximum ordinal levels real_losses = [l() for l in real_loss_types] bool_losses = [l() for l in bool_loss_types] ordinal_losses = [l(rand(3:nord)) for l in ordinal_loss_types] categorical_losses = [l(rand(3:ncat)) for l in categorical_loss_types] losses = [real_losses..., bool_losses..., ordinal_losses..., categorical_losses...] data_types = cat([:real for l in real_losses], [:bool for l in bool_losses], [:ord for l in ordinal_losses], [:cat for l in categorical_losses], dims=1) # scale losses for different columns for loss in losses mul!(loss, rand()) end # regularizers to test regularizers = [QuadReg(), OneReg(5), NonNegConstraint(), KSparseConstraint(2)] # add more regularizers = more rows so the data isn't degenerate regularizers = cat(regularizers, fill(QuadReg(), 10), dims=1) m,n = length(regularizers), length(losses) A_real = rand(m, length(real_losses)) A_bool = rand(Bool, m, length(bool_losses)) A_ord = rand(1:5, m, length(ordinal_losses)) A_cat = rand(1:3, m, length(categorical_losses)) # without saying "Any", upconverts to array of Floats A = Any[A_real A_bool A_ord A_cat] glrm = GLRM(A, losses, regularizers, QuadReg(), 2) fit!(glrm, verbose=false) println("successfully fit matrix") Ω = [(rand(1:m), rand(1:n)) for iobs in 1:(5*max(m,n))] # observe some random entries, with replacement glrm = GLRM(A, losses, regularizers, QuadReg(), 2, obs=Ω); fit!(glrm, verbose=false) println("successfully fit matrix with some entries unobserved") ### now fit data frame A_sparse = sprandn(10, 10, .5) df = NaNs_to_Missing!(DataFrame(Array(0 ./ A_sparse + A_sparse))) # explicitly encoding missing obs = observations(df) glrm = GLRM(df, QuadLoss(), QuadReg(), QuadReg(), 2, obs=obs) fit!(glrm, verbose=false) # implicitly encoding missings from dataframe - this functionality has not been implemented for dataframes # glrm = GLRM(df, QuadLoss(), QuadReg(), QuadReg(), 2) # fit!(glrm, verbose=false) # without specifying losses directly glrm = GLRM(DataFrame(A), 3, data_types) fit!(glrm, verbose=false) println("successfully fit dataframe") ### imputation and sampling impute(glrm) println("successfully imputed entries") sample(glrm) sample_missing(glrm) println("successfully sampled from model") ### now fit sparse matrix m, n = 10, 10 sparseA = sprandn(m, n, .5) glrm = GLRM(A, QuadLoss(), QuadReg(), QuadReg(), 5) fit!(glrm, verbose=false) println("successfully fit sparse GLRM")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1937
using Test using LowRankModels ## Tests for NNDSVD initialization # apply init_nnmf! to random dataset m,n,k = 10,5,3 A = rand(m,k)*rand(k,n) losses = fill(QuadLoss(),n) r = NonNegConstraint() glrm = GLRM(A,losses,r,r,k) init_nndsvd!(glrm) # Test dims and nonnegativity of X,Y @test(all(glrm.X .>= 0.0)) @test(all(glrm.Y .>= 0.0)) @test(size(glrm.X,1) == k) @test(size(glrm.X,2) == m) @test(size(glrm.Y,1) == k) @test(size(glrm.Y,2) == n) # validate against NMF.jl A = [ 0.245774 0.481246 0.293614 0.528272 0.608255 0.906146 0.0847498 0.963121 0.113728 0.552843 0.269553 0.830981 0.067466 0.854045 0.170701 0.781552 0.0302068 0.709045 0.578069 0.542792 0.824889 0.538719 0.881363 0.229199 0.356273 0.0530284 0.618962 0.96237 0.0877032 0.921746 0.295122 0.626784 0.348475 0.299937 0.35043 0.0499904 0.728344 0.573141 0.850758 0.425369 0.872088 0.322181 0.903238 0.695946 0.706841 0.542786 0.581426 0.0477561 0.601374 0.176598 ] # Wt and H produced by NMF.jl Wt = [0.25814 0.34329 0.24898 0.33489 0.35953 0.33798 0.23164 0.31705 0.43949 0.22484 0.24305 0.0 0.71244 0.0 0.0 0.0 0.19987 0.49127 0.0 0.38994 0.13531 0.0 0.0 0.0 0.0 0.94666 0.13278 0.26056 0.0 0.0] H = [1.60738 1.42165 1.97295 1.47396 1.57709 0.0 0.67822 0.0 0.6408 0.0 0.0 0.23296 0.27065 0.0 0.35102] # initialize glrm and check output glrm.A = A init_nndsvd!(glrm; scale=false) @test(all(round(glrm.X,5) .== Wt)) @test(all(round(glrm.Y,5) .== H)) # Test with missing entries for A obs = [(1,1),(1,3),(1,5), (2,1),(2,2),(2,4), (3,2),(3,3),(3,5), (4,1),(4,2),(4,5), (5,2),(5,3),(5,5), (6,1),(6,2),(6,5), (7,1),(7,3),(7,4), (8,2),(8,3),(8,4), (9,1),(9,3),(9,5), (10,3),(10,4),(10,5)] glrm = GLRM(A,losses,r,r,k,obs=obs) init_nndsvd!(glrm; max_iters=5)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2207
using LowRankModels # test losses in losses.jl Random.seed!(1); losses = [ QuadLoss(), QuadLoss(10), L1Loss(), L1Loss(5.2), HuberLoss(), HuberLoss(4), HuberLoss(3.1, crossover=3.2), PeriodicLoss(2*pi), PeriodicLoss(2*pi, 4), PoissonLoss(20), PoissonLoss(22,4.1), OrdinalHingeLoss(1,10), OrdinalHingeLoss(2,7,5), LogisticLoss(), LogisticLoss(0.2), WeightedHingeLoss(), WeightedHingeLoss(11), WeightedHingeLoss(1.5, case_weight_ratio=4.3), MultinomialLoss(4), MultinomialLoss(6, .5), # OrdisticLoss(5), MultinomialOrdinalLoss(3) ] #tests what should be successful constructions # TODO: do some bad constructions and test that they fail with catches bad_losses = [ :(QuadLoss(10,RealDomain)), :(HuberLoss(3.1, 3.2)), :(PeriodicLoss(scale=2*pi)), :(PeriodicLoss(2*pi, scale=4)), :(PeriodicLoss()) ] for expression in bad_losses try eval(expression); println("test FAILED for $expression") catch println("test PASSED for $expression (failed to construct)") end end m,n,k = 1000, length(losses), 5; d = embedding_dim(losses) X_real, Y_real = 2*randn(m,k), 2*randn(k,d); XY_real = X_real*Y_real; # tests default imputations and implicit domains # we can visually inspect the differences between A and A_real to make sure imputation is right A = impute(losses, XY_real) regscale = 1 yregs = Array(Regularizer, length(losses)) for i=1:length(losses) if typeof(losses[i]) == MultinomialOrdinalLoss || typeof(losses[i]) == OrdisticLoss yregs[i] = OrdinalReg(QuadReg(regscale)) else yregs[i] = QuadReg(regscale) end end # tests all the M-estimators with scale=false, offset=false glrm = GLRM(A, losses, QuadReg(regscale), yregs, 5, scale=false, offset=false); # interestingly adding an offset to a model with multidimensional ordinal data causes a segfault # but let's test the offset for everything but ordinals # oops we still get a segfault... # tamecols = [typeof(losses[i]) !== MultinomialOrdinalLoss && # typeof(losses[i]) !== OrdisticLoss # for i=1:length(losses)] # glrm = GLRM(A[:, tamecols], # losses[tamecols], # QuadReg(regscale), # yregs[tamecols], # 5, scale=false, offset=true) # tests eval and grad @time X,Y,ch = fit!(glrm); # tests initialization init_svd!(glrm)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
646
using LowRankModels using Test # Parameters n = 200 m = 200 r = 5 eta = 0.01 delta = 1e-3 # Generate problem Random.seed!(1) Um = randn(r, n) Vm = randn(r, m) U = Um .+ sqrt(eta) * randn(r, n) V = Vm .+ sqrt(eta) * randn(r, m) Y = U' * V + sqrt(delta) * randn(n, m) # Run algorithm glrm = GLRM(Y, QuadLoss(), [RemQuadReg(50, Um[:, i]) for i = 1:n], [RemQuadReg(50, Vm[:, j]) for j = 1:m], r) Uh, Vh, iter_info = fit!(glrm) mseU, mseV, mseY = mean((U - Uh).^2), mean((V - Vh).^2), mean((Y - Uh' * Vh).^2) @printf("MSE(U) = %.4g, MSE(V) = %.4g, MSE(Y) = %.4g\n", mseU, mseV, mseY) # Perform some tests @test mseU < 1e-3 @test mseV < 1e-3
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1306
using LowRankModels using Plotly test_losses = Loss[ QuadLoss(), L1Loss(), HuberLoss(), PeriodicLoss(1), OrdinalHingeLoss(1,10), WeightedHingeLoss() LogisticLoss(), ] #for test_iteration = 1:5 # Create the configuration for the model (random losses) config = round.(Int, abs(round(5*rand(length(test_losses))))); # config = [1,1,1,1,1,1,10] losses, doms = Array(Loss,1), Array(Domain,1); for (n,l) in zip(config, test_losses) for i=1:n push!(losses, l); push!(doms, l.domain); end end losses, doms = losses[2:end], doms[2:end]; # this is because the initialization leaves us with an #undef # losses = Array(Loss, 20) # fill!(losses, QuadLoss()) # doms = Domain[l.domain for l in losses] # Make a low rank matrix as our data precursor m, n, true_k = 1000, length(doms), round.(Int, round(length(losses)/2)); X_real, Y_real = 2*randn(m,true_k), 2*randn(true_k,n); A_real = X_real*Y_real; # Impute over the low rank-precursor to make our heterogenous dataset A = impute(doms, losses, A_real); # our imputed data p = Params(1, max_iter=1000, abs_tol=0.000001, min_stepsize=0.001); rx, ry = ZeroReg(), ZeroReg(); skip = 5 k0=skip model = GLRM(A, losses, rx, ry, k0, scale=false, offset=false); X_fit, Y_fit, ch = fit!(model, params=p, verbose=false);
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
938
using LowRankModels using Distributions p = Params(0.00001,min_stepsize=0.00000000001,max_iter=5000) # This is just to create a low-rank representation of some count data, I don't care how accurate it is. m,n = 100, 50; k = 2; A = rand(Poisson(2), m, n); # Data is Poisson with mean 2 losses = convert(Array{Loss,1}, fill(PoissonLoss(10),n)); rx, ry = QuadReg(), QuadReg(); g_pre = GLRM(A, losses, rx, ry, k, scale=false, offset=false); # let's check a different syntax works, too g_pre = GLRM(A, PoissonLoss(), rx, ry, k, scale=false, offset=false); X_real, Y_real, ch = fit!(g_pre, params=p); # Now we do the actual model using the perfect data and try to recapture it. A_real = impute(losses, X_real'*Y_real); g = GLRM(A_real, losses, rx, ry, k, scale=true, offset=true); X, Y, ch = fit!(g, params=p); U = X'*Y; A_imputed = impute(losses, X'*Y); @show error_metric(g) errors(Domain[l.domain for l in losses], losses, U, A_real);
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
325
using LowRankModels using Test TOL = 1e-5 # QuadConstraint r = QuadConstraint(7) @test evaluate(r, ones(7)) == 0 @test evaluate(r, ones(100)) == Inf @test_approx_eq prox(r, ones(100), 1) ones(100)/sqrt(100)*7 r = KSparseConstraint(3) u = [-1,2,-3,4,-5.] @test evaluate(r, u) == Inf @test prox!(r, u, 1) == [0,0.,-3,4,-5.]
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1260
using LowRankModels using Test, Random, SparseArrays using LinearAlgebra: norm ############################################################### # verify basic functionality works include("basic_functionality.jl") ############################################################### # verify most losses, regularizers, and ways of calling glrm work include("hello_world.jl") ################################################################################ # ScikitLearnBase test import ScikitLearnBase # Check that KMeans can correctly separate two non-overlapping Gaussians Random.seed!(21) gaussian1 = randn(100, 2) .+ 5. gaussian2 = randn(50, 2) .- 10. A = vcat(gaussian1, gaussian2) model = ScikitLearnBase.fit!(LowRankModels.KMeans(), A) @test Set(sum(ScikitLearnBase.transform(model, A), dims=1)) == Set([100, 50]) ############################################################### # test sparse and streaming functionality include("sparse_test.jl") include("streaming_test.jl") ############################################################### # verify all examples run include("../examples/runexamples.jl") ############################################################### # verify all tests of specific loss functions run include("prob_tests/runtests.jl")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
467
@everywhere using LowRankModels using Test function fit_pca(m,n,k) # matrix to encode Random.seed!(1) # generate a matrix with rank k A = randn(m,k)*randn(k,n) # fit a PCA model with rank k glrm = pca(A, k) glrm = share(glrm) p = Params() # just do 10 iterations p.max_iter = 10 X,Y,ch = fit!(glrm) return A,X,Y,ch end @everywhere Random.seed!(1) A,X,Y,ch = fit_pca(100,100,50) # make sure objective went down @test ch.objective[end] < ch.objective[1]
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
993
using Test using LowRankModels, Random, SparseArrays # Check that sparse algorithm converges to the same solution # from the same initial conditions for simple pca. m,n,k = 100,100,3 A = randn(m,k)*randn(k,n) loss = QuadLoss() r = ZeroReg() glrm_1 = GLRM(A,loss,r,r,k) # solve with prox algorithm glrm_2 = deepcopy(glrm_1) # solve with sparse prox algorithm X1,Y1,ch1 = fit!(glrm_1,ProxGradParams()) X2,Y2,ch2 = fit!(glrm_2,SparseProxGradParams()) @test_broken X1 ≈ X2 @test_broken Y1 ≈ Y2 # Check that the sparsity pattern in the data is correctly identified. A = sprand(m,n,0.5) glrm = GLRM(A,loss,r,r,k) # create glrm from sparse matrix @test length(glrm.observed_features) == m @test length(glrm.observed_examples) == n for i = 1:m for j = 1:n if j in glrm.observed_features[i] @test A[i,j] != 0.0 else @test A[i,j] == 0.0 end end end for j = 1:n for i = 1:m if i in glrm.observed_examples[j] @test A[i,j] != 0.0 else @test A[i,j] == 0.0 end end end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1446
using LowRankModels import LowRankModels: keep_rows import StatsBase: sample, Weights ## generate data Random.seed!(1); m,n,k = 2000,30,3; p = .05 # probability of observing any given matrix entry kfit = k+1 # variance of measurement sigmasq = .1 # coordinates of covariates X_real = randn(m,k) # directions of observations Y_real = randn(k,n) XY = X_real*Y_real; A = XY + sqrt(sigmasq)*randn(m,n) # missing values M = sprand(m,n,p) obs = findall(!iszero, M) # observed indices (CartesianIndices) # and the model losses = QuadLoss() rx, ry = QuadReg(1), QuadReg(1); glrm = GLRM(A,losses,rx,ry,kfit); T = 1000 println("SVD initialization") @time init_svd!(glrm) X0, Y0 = copy(glrm.X), copy(glrm.Y) svd_obj = objective(keep_rows(glrm, (T+1):m), include_regularization=false) println("Streaming fit") @time streaming_fit!(glrm, StreamingParams(T, Y_update_interval=100)) streaming_obj = objective(keep_rows(glrm, (T+1):m), include_regularization=false) println("Standard fit") # glrm.X, glrm.Y = X0, Y0 @time fit!(glrm) standard_obj = objective(keep_rows(glrm, (T+1):m), include_regularization=false) println("Streaming GLRM performs ", round(streaming_obj / svd_obj; digits=2), " times worse than SVD initialization") println("Streaming GLRM performs ", round(streaming_obj / standard_obj; digits=2), " times worse than standard GLRM") println("Streaming impute") @time streaming_impute!(glrm, StreamingParams(T, Y_update_interval=100))
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
204
using LowRankModels A = sprandn(100,100,.5) glrm = qpca(A, 3) fit!(glrm) A_sampled = sample(glrm) A_sample_missing = sample_missing(glrm) # tests obs = !(A.==0) @assert (A[obs]==A_sample_missing[obs])
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
3020
using LowRankModels, Random import StatsBase: sample, Weights import LinearAlgebra: norm # test ordistic loss ## generate data Random.seed!(1); m,n,k = 100,100,3; kfit = k+1 nlevels = 5; # number of levels d = nlevels-1 # embedding dimension D = n*d; # coordinates of covariates X_real = randn(m,k) # directions of observations Y_real = randn(k,n) # measurement thresholds T_real = k*randn(d,n) # notice x^T y has variance k; so making this bigger makes the problem easier for j=1:n # this scheme doesn't work to ensure uniform sampling T_real[:,j] = sort(T_real[:,j]) end signedsums = Array{Float64}(undef, d, nlevels) for i=1:d for j=1:nlevels signedsums[i,j] = i<j ? 1 : -1 end end XY = X_real*Y_real; XYplusT = zeros(Float64, (m,D)) A = zeros(Int, (m, n)) for i=1:m for j=1:n u = XY[i,j] .+ T_real[:,j] XYplusT[i,(j-1)*d .+ (1:d)] = u diffs = u'*signedsums wv = Weights(Float64[exp(-diffs[l]) for l in 1:nlevels]) l = sample(wv) A[i,j] = l end end # loss is insensitive to shifts; regularizer should pick this shift XYplusT = XYplusT .- mean(XYplusT, dims=2) # and the model losses = BvSLoss(nlevels) rx, ry = lastentry1(QuadReg(.01)), OrdinalReg(QuadReg(.01)) #lastentry_unpenalized(QuadReg(10)); glrm = GLRM(A,losses,rx,ry,kfit, scale=false, offset=false, X=randn(kfit,m), Y=randn(kfit,D)); # fit w/o initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y#[:,1:d:D]; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XYplusT - XYh)/sqrt(prod(size(XYplusT)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs,(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((nlevels-1)/nlevels*100)% of entries would be wrong.)\n") # initialize init_svd!(glrm) XYh = glrm.X' * glrm.Y println("After initialization with the svd, parameters differ from true parameters by $(norm(XYplusT - XYh)/sqrt(prod(size(XYplusT)))) in RMSE") A_imputed = impute(glrm) println("After initialization with the svd, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After initialization with the svd, imputed entries are off by $(sum(abs,(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((nlevels-1)/nlevels*100)% of entries would be wrong.)\n") # fit w/ initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y#[:,1:d:D]; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XYplusT - XYh)/sqrt(prod(size(XYplusT)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs.(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((nlevels-1)/nlevels*100)% of entries would be wrong.)\n") # test scaling mul!(glrm.ry, 3)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2101
using LowRankModels, Random import StatsBase: sample, Weights import LinearAlgebra: norm # test quadratic loss ## generate data Random.seed!(1); m,n,k = 1000,1000,3; kfit = k+1 # variance of measurement sigmasq = .1 # coordinates of covariates X_real = randn(m,k) # directions of observations Y_real = randn(k,n) XY = X_real*Y_real; A = zeros(Int, (m, n)) logistic(x) = 1/(1+exp(-x)) for i=1:m for j=1:n A[i,j] = (logistic(XY[i,j]) >= rand()) ? true : false end end # and the model losses = LogisticLoss() rx, ry = QuadReg(.1), QuadReg(.1); glrm = GLRM(A,losses,rx,ry,kfit) #scale=false, offset=false, X=randn(kfit,m), Y=randn(kfit,n)); # fit w/o initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs.(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, 50% of entries would be wrong.)\n") # initialize init_svd!(glrm) XYh = glrm.X' * glrm.Y println("After initialization with the svd, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs.(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, 50% of entries would be wrong.)\n") # fit w/ initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs.(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, 50% of entries would be wrong.)\n")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2085
using LowRankModels, Random import StatsBase: sample, Weights, mean import LinearAlgebra: norm # tests MNL loss Random.seed!(1); m,n,k = 200,50,2; kfit = k+1 K = 4; # number of categories d = n*K; # matrix to encode X_real, Y_real = randn(m,k), randn(k,d); XY = X_real*Y_real; # subtract the mean so we can compare the truth with the fit; # the loss function is invariant under shifts losses = fill(MultinomialLoss(K),n) yidxs = get_yidxs(losses) for i=1:m for j=1:n mef = mean(XY[i,yidxs[j]]) XY[i,yidxs[j]] = XY[i,yidxs[j]] .- mef end end A = zeros(Int, (m, n)) for i=1:m for j=1:n wv = Weights(Float64[exp(-XY[i, K*(j-1) + l]) for l in 1:K]) l = sample(wv) A[i,j] = l end end # and the model losses = fill(MultinomialLoss(K),n) rx, ry = QuadReg(), QuadReg(); glrm = GLRM(A,losses,rx,ry,kfit, scale=false, offset=false, X=randn(kfit,m), Y=randn(kfit,d)); # fit w/o initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("(Picking randomly, $((K-1)/K*100)% of entries would be wrong.)\n") # initialize init_svd!(glrm) XYh = glrm.X' * glrm.Y println("After initialization with the svd, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After initialization with the svd, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("(Picking randomly, $((K-1)/K*100)% of entries would be wrong.)\n") # fit w/ initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("(Picking randomly, $((K-1)/K*100)% of entries would be wrong.)\n")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
3143
using LowRankModels, Random import StatsBase: sample, Weights, mean import LinearAlgebra: norm # test MNL Ordinal loss ## generate data Random.seed!(1); m,n,k = 100,100,2; kfit = k+1 nlevels = 5; # number of levels d = nlevels-1 # embedding dimension D = n*d; # coordinates of covariates X_real = randn(m,k) # directions of observations Y_real = randn(k,n) # measurement thresholds T_real = k*randn(d,n) # notice x^T y has variance k; so making this bigger makes the problem easier for j=1:n # this scheme doesn't work to ensure uniform sampling T_real[:,j] = sort(T_real[:,j]) end signedsums = Array{Float64}(undef, d, nlevels) for i=1:d for j=1:nlevels signedsums[i,j] = i<j ? 1 : -1 end end XY = X_real*Y_real; XYplusT = zeros(Float64, (m,D)) A = zeros(Int, (m, n)) for i=1:m for j=1:n u = XY[i,j] .+ T_real[:,j] XYplusT[i,(j-1)*d .+ (1:d)] = u diffs = u'*signedsums wv = Weights(Float64[exp(-diffs[l]) for l in 1:nlevels]) l = sample(wv) A[i,j] = l end end # loss is insensitive to shifts; regularizer should pick this shift XYplusT = XYplusT .- mean(XYplusT, dims=2) # and the model losses = fill(MultinomialOrdinalLoss(nlevels),n) rx, ry = lastentry1(QuadReg(.01)), MNLOrdinalReg(QuadReg(.01)) #lastentry_unpenalized(QuadReg(10)); Yord = randn(kfit,D) yidxs = get_yidxs(losses) for j=1:n prox!(ry, view(Yord,:,yidxs[j]), 1) end glrm = GLRM(A,losses,rx,ry,kfit, scale=false, offset=false, X=randn(kfit,m), Y=Yord); # fit w/o initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y#[:,1:d:D]; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XYplusT - XYh)/sqrt(prod(size(XYplusT)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs.(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((nlevels-1)/nlevels*100)% of entries would be wrong.)\n") # initialize init_svd!(glrm) XYh = glrm.X' * glrm.Y println("After initialization with the svd, parameters differ from true parameters by $(norm(XYplusT - XYh)/sqrt(prod(size(XYplusT)))) in RMSE") A_imputed = impute(glrm) println("After initialization with the svd, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After initialization with the svd, imputed entries are off by $(sum(abs,(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((nlevels-1)/nlevels*100)% of entries would be wrong.)\n") # fit w/ initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y#[:,1:d:D]; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XYplusT - XYh)/sqrt(prod(size(XYplusT)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs,(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((nlevels-1)/nlevels*100)% of entries would be wrong.)\n") # test scaling mul!(glrm.ry, 3)
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2655
using LowRankModels, Random import StatsBase: sample, Weights import LinearAlgebra: norm # test ordistic loss ## generate data Random.seed!(1); m,n,k = 200,50,3; kfit = k+1 d = 7; # number of levels D = n*d; # coordinates of covariates X_real = randn(m,k) # directions of observations Y_real = randn(k,n) # centers of measurement T_real = sqrt(k)*randn(d,n) # notice x^T y has variance k, so this scales the thresholds in the same way for j=1:n T_real[:,j] = sort(T_real[:,j]) end # variance of measurement sigmasq = 1 XY = X_real*Y_real; A = zeros(Int, (m, n)) for i=1:m for j=1:n wv = Weights(Float64[exp(-(XY[i,j] - T_real[l,j])^2/sigmasq) for l in 1:d]) l = sample(wv) A[i,j] = l end end # and the model losses = fill(OrdisticLoss(d),n) rx, ry = lastentry1(QuadReg(.01)), lastentry_unpenalized(QuadReg(.01)); glrm = GLRM(A,losses,rx,ry,kfit, scale=false, offset=false, X=randn(kfit,m), Y=randn(kfit,D)); # fit w/o initialization p = Params(1, max_iter=10, abs_tol=0.0000001, min_stepsize=0.000001) @time X,Y,ch = fit!(glrm, params=p); XYh = X'*Y[:,1:d:D]; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs,(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((d-1)/d*100)% of entries would be wrong.)\n") # initialize init_svd!(glrm) XYh = glrm.X' * glrm.Y[:,1:d:D] println("After initialization with the svd, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After initialization with the svd, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs,(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((d-1)/d*100)% of entries would be wrong.)\n") # fit w/ initialization p = Params(1, max_iter=10, abs_tol=0.0000001, min_stepsize=0.000001) @time X,Y,ch = fit!(glrm, params=p); XYh = X'*Y[:,1:d:D]; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("After fitting, imputed entries are off by $(sum(abs,(A_imputed - A)) / prod(size(A))*100)% on average") println("(Picking randomly, $((d-1)/d*100)% of entries would be wrong.)\n")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
2066
using LowRankModels, Random import StatsBase: sample, Weights, mean import LinearAlgebra: norm # tests OvALoss Random.seed!(1); m,n,k = 200,50,2; kfit = k+1 K = 4; # number of categories d = n*K; # matrix to encode X_real, Y_real = randn(m,k), randn(k,d); XY = X_real*Y_real; # subtract the mean so we can compare the truth with the fit; # the loss function is invariant under shifts losses = fill(OvALoss(K),n) yidxs = get_yidxs(losses) for i=1:m for j=1:n mef = mean(XY[i,yidxs[j]]) XY[i,yidxs[j]] = XY[i,yidxs[j]] .- mef end end A = zeros(Int, (m, n)) for i=1:m for j=1:n wv = Weights(Float64[exp(-XY[i, K*(j-1) + l]) for l in 1:K]) l = sample(wv) A[i,j] = l end end # and the model losses = fill(OvALoss(K),n) rx, ry = QuadReg(), QuadReg(); glrm = GLRM(A,losses,rx,ry,kfit, scale=false, offset=false, X=randn(kfit,m), Y=randn(kfit,d)); # fit w/o initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("(Picking randomly, $((K-1)/K*100)% of entries would be wrong.)\n") # initialize init_svd!(glrm) XYh = glrm.X' * glrm.Y println("After initialization with the svd, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After initialization with the svd, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("(Picking randomly, $((K-1)/K*100)% of entries would be wrong.)\n") # fit w/ initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; @show ch.objective println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE") A_imputed = impute(glrm) println("After fitting, $(sum(A_imputed .!= A) / prod(size(A))*100)% of imputed entries are wrong") println("(Picking randomly, $((K-1)/K*100)% of entries would be wrong.)\n")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1095
using LowRankModels, Random import StatsBase: sample, Weights, std import LinearAlgebra: norm # test quadratic loss ## generate data Random.seed!(1); m,n,k = 300,300,3; kfit = k+1 # variance of measurement sigmasq = .1 # coordinates of covariates X_real = randn(m,k) # directions of observations Y_real = randn(k,n) XY = X_real*Y_real; A = XY + sqrt(sigmasq)*randn(m,n) # and the model losses = QuadLoss() rx, ry = QuadReg(.1), QuadReg(.1); glrm = GLRM(A,losses,rx,ry,kfit) #scale=false, offset=false, X=randn(kfit,m), Y=randn(kfit,n)); # fit w/o initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE\n") # initialize init_svd!(glrm) XYh = glrm.X' * glrm.Y println("After initialization with the svd, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE\n") # fit w/ initialization @time X,Y,ch = fit!(glrm); XYh = X'*Y; println("After fitting, parameters differ from true parameters by $(norm(XY - XYh)/sqrt(prod(size(XY)))) in RMSE")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
188
include("QuadLoss.jl") include("LogisticLoss.jl") include("MultinomialLoss.jl") include("OvALoss.jl") include("MultinomialOrdinalLoss.jl") include("OrdisticLoss.jl") include("BvSLoss.jl")
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
code
1721
using LowRankModels using FactCheck TOL = 1e-3 facts("Classification Losses") do context("logistic loss") do l = LogisticLoss() l1 = 1.31326168 l0 = 0.3132616875 # 1 is true # -1 and 0 are both false # anything else is an error @fact evaluate(l, 1, true) --> roughly(l0, TOL) @fact evaluate(l, 1, false) --> roughly(l1, TOL) @fact evaluate(l, -1, true) --> roughly(l1, TOL) @fact evaluate(l, -1, false) --> roughly(l0, TOL) @fact evaluate(l, 1, 1) --> roughly(l0, TOL) @fact evaluate(l, 1, -1) --> roughly(l1, TOL) @fact evaluate(l, 1, 0) --> roughly(l1, TOL) @fact evaluate(l, -1, 1) --> roughly(l1, TOL) @fact evaluate(l, -1, -1) --> roughly(l0, TOL) @fact evaluate(l, -1, 0) --> roughly(l0, TOL) @fact evaluate(3*l, 1, false) --> roughly(3*l1, TOL) end context("hinge loss") do l = HingeLoss() # 1 is true # -1 and 0 are both false # anything else is an error @fact evaluate(l, 1, true) --> roughly(0, TOL) @fact evaluate(l, 1, false) --> roughly(2, TOL) @fact evaluate(l, -1, true) --> roughly(2, TOL) @fact evaluate(l, -1, false) --> roughly(0, TOL) @fact evaluate(l, 1, 1) --> roughly(0, TOL) @fact evaluate(l, 1, -1) --> roughly(2, TOL) @fact evaluate(l, 1, 0) --> roughly(2, TOL) @fact evaluate(l, -1, 1) --> roughly(2, TOL) @fact evaluate(l, -1, -1) --> roughly(0, TOL) @fact evaluate(l, -1, 0) --> roughly(0, TOL) @fact evaluate(3*l, 1, false) --> roughly(3*2, TOL) @fact grad(l, -1, true) --> roughly(-1, TOL) @fact grad(l, 2, true) --> roughly(0, TOL) @fact grad(l, -2, false) --> roughly(0, TOL) @fact grad(l, 2, false) --> roughly(1, TOL) end end
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
docs
23323
# LowRankModels.jl [![Build Status](https://travis-ci.com/madeleineudell/LowRankModels.jl.svg?branch=master)](https://travis-ci.com/madeleineudell/LowRankModels.jl) [![Build status](https://ci.appveyor.com/api/projects/status/jjk1poiwtnflc61m?svg=true)](https://ci.appveyor.com/project/jiahao/lowrankmodels-jl) [![codecov](https://codecov.io/gh/jiahao/LowRankModels.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/jiahao/LowRankModels.jl) `LowRankModels.jl` is a Julia package for modeling and fitting generalized low rank models (GLRMs). GLRMs model a data array by a low rank matrix, and include many well known models in data analysis, such as principal components analysis (PCA), matrix completion, robust PCA, nonnegative matrix factorization, k-means, and many more. For more information on GLRMs, see [our paper][glrmpaper]. There is a [python interface](https://github.com/udellgroup/pyglrm) to this package, and a GLRM implementation in [the H2O machine learning platform](http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/glrm.html) with interfaces in a variety of languages. `LowRankModels.jl` makes it easy to mix and match loss functions and regularizers to construct a model suitable for a particular data set. In particular, it supports * using different loss functions for different columns of the data array, which is useful when data types are heterogeneous (e.g., real, boolean, and ordinal columns); * fitting the model to only *some* of the entries in the table, which is useful for data tables with many missing (unobserved) entries; and * adding offsets and scalings to the model without destroying sparsity, which is useful when the data is poorly scaled. ## Installation To install, just call ```julia Pkg.add("LowRankModels") ``` at the Julia prompt. # Generalized Low Rank Models GLRMs form a low rank model for tabular data `A` with `m` rows and `n` columns, which can be input as an array or any array-like object (for example, a data frame). It is fine if only some of the entries have been observed (i.e., the others are `missing`); the GLRM will only be fit on the `!ismissing` entries. The desired model is specified by choosing a rank `k` for the model, an array of loss functions `losses`, and two regularizers, `rx` and `ry`. The data is modeled as `X'*Y`, where `X` is a `k`x`m` matrix and `Y` is a `k`x`n` matrix. `X` and `Y` are found by solving the optimization problem <!--``\mbox{minimize} \quad \sum_{(i,j) \in \Omega} L_{ij}(x_i y_j, A_{ij}) + \sum_{i=1}^m r_i(x_i) + \sum_{j=1}^n \tilde r_j(y_j)``--> minimize sum_{(i,j) in obs} losses[j]((X'*Y)[i,j], A[i,j]) + sum_i rx(X[:,i]) + sum_j ry(Y[:,j]) The basic type used by LowRankModels.jl is the GLRM. To form a GLRM, the user specifies * the data `A` (any `AbstractArray`, such as an array, a sparse matrix, or a data frame) * the array of loss functions `losses` * the regularizers `rx` and `ry` * the rank `k` The user may also specify * the observed entries `obs` * starting matrices X₀ and Y₀ `obs` is a list of tuples of the indices of the observed entries in the matrix, and may be omitted if all the entries in the matrix have been observed. If `A` is a sparse matrix, implicit zeros are interpreted as missing entries by default; see the discussion of [sparse matrices](#fitting-sparse-matrices) below for more details. `X₀` and `Y₀` are initialization matrices that represent a starting guess for the optimization. Losses and regularizers must be of type `Loss` and `Regularizer`, respectively, and may be chosen from a list of supported losses and regularizers, which include Losses: * quadratic loss `QuadLoss` * hinge loss `HingeLoss` * logistic loss `LogisticLoss` * Poisson loss `PoissonLoss` * weighted hinge loss `WeightedHingeLoss` * l1 loss `L1Loss` * ordinal hinge loss `OrdinalHingeLoss` * periodic loss `PeriodicLoss` * multinomial categorical loss `MultinomialLoss` * multinomial ordinal (aka ordered logit) loss `OrderedMultinomialLoss` * bigger-vs-smaller loss `BvSLoss` (for ordinal data) * one-vs all-loss `OvALoss` (for categorical data) The constructors for all the ordinal and categorical losses take as an argument the maximum (or both minimum and maximum) value the variable may take. Using the one-vs-all loss is equivalent to transforming a categorical value to a one-hot vector and using a binary loss on each entry in that vector. Using the bigger-vs-smaller loss is equivalent to transforming the ordinal value to a Boolean vector and using a binary loss on each entry in that vector. By default, the binary loss used is the logistic loss. Regularizers: * quadratic regularization `QuadReg` * constrained squared euclidean norm `QuadConstraint` * l1 regularization `OneReg` * no regularization `ZeroReg` * nonnegative constraint `NonNegConstraint` (e.g., for nonnegative matrix factorization) * 1-sparse constraint `OneSparseConstraint` (e.g., for orthogonal NNMF) * unit 1-sparse constraint `UnitOneSparseConstraint` (e.g., for k-means) * simplex constraint `SimplexConstraint` * l1 regularization, combined with nonnegative constraint `NonNegOneReg` * fix features at values `y0` `FixedLatentFeaturesConstraint(y0)` Each of these losses and regularizers can be scaled (for example, to increase the importance of the loss relative to the regularizer) by calling `mul!(loss, newscale)`. Users may also implement their own losses and regularizers, or adjust internal parameters of the losses and regularizers; see [losses.jl](src/losses.jl) and [regularizers.jl](src/regularizers.jl) for more details. ## Example For example, the following code forms a k-means model with `k=5` on the `100`x`100` matrix `A`: ```julia using LowRankModels m, n, k = 100, 100, 5 losses = QuadLoss() # minimize squared distance to cluster centroids rx = UnitOneSparseConstraint() # each row is assigned to exactly one cluster ry = ZeroReg() # no regularization on the cluster centroids glrm = GLRM(A, losses, rx, ry, k) ``` To fit the model, call ```julia X, Y, ch = fit!(glrm) ``` which runs an alternating directions proximal gradient method on `glrm` to find the `X` and `Y` minimizing the objective function. (`ch` gives the convergence history; see [Technical details](#technical-details) below for more information.) The `losses` argument can also be an array of loss functions, with one for each column (in order). For example, for a data set with 3 columns, you could use ```julia losses = Loss[QuadLoss(), LogisticLoss(), HingeLoss()] ``` Similiarly, the `ry` argument can be an array of regularizers, with one for each column (in order). For example, for a data set with 3 columns, you could use ```julia ry = Regularizer[QuadReg(1), QuadReg(10), FixedLatentFeaturesConstraint([1.,2.,3.])] ``` This regularizes the first to columns of `Y` with `||Y[:,1]||^2 + 10||Y[:,2]||^2` and constrains the third (and last) column of `Y` to be equal to `[1,2,3]`. [More examples here.](examples/simple_glrms.jl) # Missing data If not all entries are present in your data table, just tell the GLRM which observations to fit the model to by listing tuples of their indices in `obs`, e.g., if `obs=[(1,2), (5,3)]`, exactly two entries have been observed. Then initialize the model using ```julia GLRM(A, losses, rx, ry, k, obs=obs) ``` If `A` is a DataFrame and you just want the model to ignore any entry that is `missing`, you can use ```julia obs = observations(A) ``` # Standard low rank models Low rank models can easily be used to fit standard models such as PCA, k-means, and nonnegative matrix factorization. The following functions are available: * `pca`: principal components analysis * `qpca`: quadratically regularized principal components analysis * `rpca`: robust principal components analysis * `nnmf`: nonnegative matrix factorization * `k-means`: k-means See [the code](src/simple_glrms.jl) for usage. Any keyword argument valid for a `GLRM` object, such as an initial value for `X` or `Y` or a list of observations, can also be used with these standard low rank models. # Scaling and offsets <a name="scaling"></a> If you choose, LowRankModels.jl can add an offset to your model and scale the loss functions and regularizers so all columns have the same pull in the model. Simply call ```julia glrm = GLRM(A, losses, rx, ry, k, offset=true, scale=true) ``` This transformation generalizes standardization, a common preprocessing technique applied before PCA. (For more about offsets and scaling, see the code or the paper.) You can also add offsets and scalings to previously unscaled models: * Add an offset to the model (by applying no regularization to the last row of the matrix `Y`, and enforcing that the last column of `X` be all 1s) using ```julia add_offset!(glrm) ``` * Scale the loss functions and regularizers by calling ```julia equilibrate_variance!(glrm) ``` * Scale only the columns associated to `QuadLoss` or `HuberLoss` loss functions. ```julia prob_scale!(glrm) ``` # Fitting DataFrames Perhaps all this sounds like too much work. Perhaps you happen to have a [DataFrame](https://github.com/JuliaStats/DataFrames.jl) `df` lying around that you'd like a low rank (e.g., `k=2`) model for. For example, ```julia import RDatasets df = RDatasets.dataset("psych", "msq") ``` Never fear! Just call ```julia glrm, labels = GLRM(df, k) X, Y, ch = fit!(glrm) ``` This will fit a GLRM with rank `k` to your data, using a QuadLoss loss for real valued columns, HingeLoss loss for boolean columns, and ordinal HingeLoss loss for integer columns, a small amount of QuadLoss regularization, and scaling and adding an offset to the model as described [here](#scaling). It returns the column labels for the columns it fit, along with the model. Right now, all other data types are ignored. `NaN` values are treated as missing values (`missing`s) and ignored in the fit. The full call signature is ```julia function GLRM(df::DataFrame, k::Int; losses = Loss[], rx = QuadReg(.01), ry = QuadReg(.01), offset = true, scale = false, prob_scale = true, NaNs_to_NAs = true) ``` You can modify the losses or regularizers, or turn off offsets or scaling, using these keyword arguments. Or to specify a map from data types to losses, define a new `loss_map` from datatypes to losses (like probabilistic_losses, below): ```julia probabilistic_losses = Dict{Symbol, Any}( :real => QuadLoss, :bool => LogisticLoss, :ord => MultinomialOrdinalLoss, :cat => MultinomialLoss ) ``` and input an array of datatypes (one for each column of your data frame: `GLRM(A, k, datatypes; loss_map = loss_map)`. The full call signature is ```julia function GLRM(df::DataFrame, k::Int, datatypes::Array{Symbol,1}; loss_map = probabilistic_losses, rx = QuadReg(.01), ry = QuadReg(.01), offset = true, scale = false, prob_scale = true, transform_data_to_numbers = true, NaNs_to_NAs = true) ``` You can modify the losses or regularizers, or turn off offsets or scaling, using these keyword arguments. To fit a data frame with categorical values, you can use the function `expand_categoricals!` to turn categorical columns into a Boolean column for each level of the categorical variable. For example, `expand_categoricals!(df, [:gender])` will replace the gender column with a column corresponding to `gender=male`, a column corresponding to `gender=female`, and other columns corresponding to labels outside the gender binary, if they appear in the data set. You can use the model to get some intuition for the data set. For example, try plotting the columns of `Y` with the labels; you might see that similar features are close to each other! # Fitting Sparse Matrices If you have a very large, sparsely observed dataset, then you may want to encode your data as a [sparse matrix](http://julia-demo.readthedocs.org/en/latest/stdlib/sparse.html). By default, `LowRankModels` interprets the sparse entries of a sparse matrix as missing entries (i.e. `NA` values). There is no need to pass the indices of observed entries (`obs`) -- this is done automatically when `GLRM(A::SparseMatrixCSC,...)` is called. In addition, calling `fit!(glrm)` when `glrm.A` is a sparse matrix will use the sparse variant of the proximal gradient descent algorithm, `fit!(glrm, SparseProxGradParams(); kwargs...)`. If, instead, you'd like to interpret the sparse entries as zeros, rather than missing or `NA` entries, use: ```julia glrm = GLRM(...; sparse_na=false) ``` In this case, the dataset is dense in terms of observations, but sparse in terms of nonzero values. Thus, it may make more sense to fit the model with the vanilla proximal gradient descent algorithm, `fit!(glrm, ProxGradParams(); kwargs...)`. # Parallel fitting (experimental) LowRankModels makes use of Julia v0.5's new multithreading functionality to fit models in parallel. To fit a LowRankModel in parallel using multithreading, simply set the number of threads from the command line before starting Julia: e.g., ```sh export JULIA_NUM_THREADS=4 ``` # Technical details ## Optimization The function `fit!` uses an alternating directions proximal gradient method to minimize the objective. This method is *not* guaranteed to converge to the optimum, or even to a local minimum. If your code is not converging or is converging to a model you dislike, there are a number of parameters you can tweak. ### Warm start The algorithm starts with `glrm.X` and `glrm.Y` as the initial estimates for `X` and `Y`. If these are not given explicitly, they will be initialized randomly. If you have a good guess for a model, try setting them explicitly. If you think that you're getting stuck in a local minimum, try reinitializing your GLRM (so as to construct a new initial random point) and see if the model you obtain improves. The function `fit!` sets the fields `glrm.X` and `glrm.Y` after fitting the model. This is particularly useful if you want to use the model you generate as a warm start for further iterations. If you prefer to preserve the original `glrm.X` and `glrm.Y` (e.g., for cross validation), you should call the function `fit`, which does not mutate its arguments. You can even start with an easy-to-optimize loss function, run `fit!`, change the loss function (`glrm.losses = newlosses`), and keep going from your warm start by calling `fit!` again to fit the new loss functions. ### Initialization If you don't have a good guess at a warm start for your model, you might try one of the initializations provided in `LowRankModels`. * `init_svd!` initializes the model as the truncated SVD of the matrix of observed entries, with unobserved entries filled in with zeros. This initialization is known to result in provably good solutions for a number of "PCA-like" problems. See [our paper][glrmpaper] for details. * `init_kmeanspp!` initializes the model using a modification of the [kmeans++](https://en.wikipedia.org/wiki/K-means_clustering) algorithm for data sets with missing entries; see [our paper][glrmpaper] for details. This works well for fitting clustering models, and may help in achieving better fits for nonnegative matrix factorization problems as well. * `init_nndsvd!` initializes the model using a modification of the [NNDSVD](https://github.com/JuliaStats/NMF.jl/blob/master/src/initialization.jl#L18) algorithm as implemented by the [NMF](https://github.com/JuliaStats/NMF.jl) package. This modification handles data sets with missing entries by replacing missing entries with zeros. Optionally, by setting the argument `max_iters=n` with `n>0`, it will iteratively replace missing entries by their values as imputed by the NNDSVD, and call NNDSVD again on the new matrix. (This procedure is similar to the [soft impute](http://dl.acm.org/citation.cfm?id=1859931) method of Mazumder, Hastie and Tibshirani for matrix completion.) ### Parameters As mentioned earlier, `LowRankModels` uses alternating proximal gradient descent to derive estimates of `X` and `Y`. This can be done by two slightly different procedures: (A) compute the full reconstruction, `X' * Y`, to compute the gradient and objective function; (B) only compute the model estimate for entries of `A` that are observed. The first method is likely preferred when there are few missing entries for `A` because of hardware level optimizations (e.g. chunking the operations so they just fit in various caches). The second method is likely preferred when there are many missing entries of `A`. To fit with the first (dense) method: ```julia fit!(glrm, ProxGradParams(); kwargs...) ``` To fit with the second (sparse) method: ```julia fit!(glrm, SparseProxGradParams(); kwargs...) ``` The first method is used by default if `glrm.A` is a standard matrix/array. The second method is used by default if `glrm.A` is a `SparseMatrixCSC`. `ProxGradParams()` and `SparseProxGradParams()` run these respective methods with the default parameters: * `stepsize`: The step size controls the speed of convergence. Small step sizes will slow convergence, while large ones will cause divergence. `stepsize` should be of order 1. * `abs_tol`: The algorithm stops when the decrease in the objective per iteration is less than `abs_tol*length(obs)`. * `rel_tol`: The algorithm stops when the decrease in the objective per iteration is less than `rel_tol`. * `max_iter`: The algorithm also stops if maximum number of rounds `max_iter` has been reached. * `min_stepsize`: The algorithm also stops if `stepsize` decreases below this limit. * `inner_iter`: specifies how many proximal gradient steps to take on `X` before moving on to `Y` (and vice versa). The default parameters are: `ProxGradParams(stepsize=1.0;max_iter=100,inner_iter=1,abs_tol=0.00001,rel_tol=0.0001,min_stepsize=0.01*stepsize)` ### Convergence `ch` gives the convergence history so that the success of the optimization can be monitored; `ch.objective` stores the objective values, and `ch.times` captures the times these objective values were achieved. Try plotting this to see if you just need to increase `max_iter` to converge to a better model. # Imputation After fitting a GLRM, you can use it to impute values of `A` in four different ways: * `impute(glrm)` gives the maximum likelihood estimates for each entry * `impute_missing(glrm)` imputes missing entries and leaves observed entries unchanged * `sample(glrm)` gives a draw from the posterior distribution, conditioned on the fit values of `X` and `Y`, for each entry * `sample_missing(glrm)` samples missing entries and leaves observed entries unchanged # Cross validation A number of useful functions are available to help you check whether a given low rank model overfits to the test data set. These functions should help you choose adequate regularization for your model. ## Cross validation * `cross_validate(glrm::GLRM, nfolds=5, params=Params(); verbose=false, use_folds=None, error_fn=objective, init=None)`: performs n-fold cross validation and returns average loss among all folds. More specifically, splits observations in `glrm` into `nfolds` groups, and builds new GLRMs, each with one group of observations left out. Fits each GLRM to the training set (the observations revealed to each GLRM) and returns the average loss on the test sets (the observations left out of each GLRM). **Optional arguments:** * `use_folds`: build `use_folds` new GLRMs instead of `n_folds` new GLRMs, each with `1/nfolds` of the entries left out. (`use_folds` defaults to `nfolds`.) * `error_fn`: use a custom error function to evaluate the fit, rather than the objective. For example, one might use the imputation error by setting `error_fn = error_metric`. * `init`: initialize the fit using a particular procedure. For example, consider `init=init_svd!`. See [Initialization](#initialization) for more options. * `cv_by_iter(glrm::GLRM, holdout_proportion=.1, params=Params(1,1,.01,.01), niters=30; verbose=true)`: computes the test error and train error of the GLRM as it is trained. Splits the observations into a training set (`1-holdout_proportion` of the original observations) and a test set (`holdout_proportion` of the original observations). Performs `params.maxiter` iterations of the fitting algorithm on the training set `niters` times, and returns the test and train error as a function of iteration. ## Regularization paths * `regularization_path(glrm::GLRM; params=Params(), reg_params=exp10.(range(2,stop=-2,length=5)), holdout_proportion=.1, verbose=true, ch::ConvergenceHistory=ConvergenceHistory("reg_path"))`: computes the train and test error for GLRMs varying the scaling of the regularization through any scaling factor in the array `reg_params`. ## Utilities * `get_train_and_test(obs, m, n, holdout_proportion=.1)`: splits observations `obs` into a train and test set. `m` and `n` must be at least as large as the maximal value of the first or second elements of the tuples in `observations`, respectively. Returns `observed_features` and `observed_examples` for both train and test sets. ## ScikitLearn This library implements the [ScikitLearn.jl](https://github.com/cstjean/ScikitLearn.jl) interface. These models are available: `SkGLRM, PCA, QPCA, NNMF, KMeans, RPCA`. See their docstrings for more information (e.g. `?QPCA`). All models support the `ScikitLearnBase.fit!` and `ScikitLearnBase.transform` interface. Examples: ```julia ## Apply PCA to the iris dataset using LowRankModels import ScikitLearnBase using RDatasets # may require Pkg.add("RDatasets") A = convert(Matrix, dataset("datasets", "iris")[[:SepalLength, :SepalWidth, :PetalLength, :PetalWidth]]) ScikitLearnBase.fit_transform!(PCA(k=3, max_iter=500), A) ``` ```julia ## Fit K-Means to a fake dataset of two Gaussians using LowRankModels import ScikitLearnBase # Generate two disjoint Gaussians with 100 and 50 points gaussian1 = randn(100, 2) + 5 gaussian2 = randn(50, 2) - 10 # Merge them into a single dataset A = vcat(gaussian1, gaussian2) model = ScikitLearnBase.fit!(LowRankModels.KMeans(), A) # Count how many points are assigned to each Gaussians (should be 100 and 50) Set(sum(ScikitLearnBase.transform(model, A), 1)) ``` See also [this notebook demonstrating K-Means](https://github.com/cstjean/ScikitLearn.jl/blob/master/examples/Plot_Kmeans_Digits_Julia.ipynb). These models can be used inside a [ScikitLearn pipeline](http://scikitlearnjl.readthedocs.io/en/latest/pipelines/), and every hyperparameter can be [tuned with GridSearchCV](http://scikitlearnjl.readthedocs.io/en/latest/model_selection/). # Citing this package If you use LowRankModels for published work, we encourage you to cite the software. Use the following BibTeX citation: ```bibtex @article{glrm, title = {Generalized Low Rank Models}, author ={Madeleine Udell and Horn, Corinne and Zadeh, Reza and Boyd, Stephen}, doi = {10.1561/2200000055}, year = {2016}, archivePrefix = "arXiv", eprint = {1410.0342}, primaryClass = "stat-ml", journal = {Foundations and Trends in Machine Learning}, number = {1}, volume = {9}, issn = {1935-8237}, url = {http://dx.doi.org/10.1561/2200000055}, } ``` [glrmpaper]: https://people.orie.cornell.edu/mru8/doc/udell16_glrm.pdf
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
1.1.1
cc10bb134a2eb9e6f22d10fa1bba2b3a97c2b152
docs
1182
# To Do * Set up a single command for multiple imputation: a) pick m random subsets of data, b) choose model of rank k, regularization constant α for each subset, c) impute missing data from each of the m selected models. * (nandana will do this) * Documentation! * how to think about mpca * imputation * error metrics for cross validation * new syntax for fitting data frame * how to specify loss function(s) * parallel fitting * full rank model / prisma * Poisson loss * scaling? * to log or not to log? that is the interpretative issue # Bugs * init_nndsvd! doesn't work (probably an upgrade-to-1.0 bug) * M_estimator doesn't work (losses.jl); bug in Optim? * sample doesn't work * lots of bugs in fit_dataframe_w_type_imputation; deprecated for now. (also it's an odd thing to do.) * imputation doesn't return correct type (for dataframes) # How to register/publish a new version of the package 1. update version number in Project.toml 2. navigate to commit that you want tagged on github 3. comment @Registrator register 4. monitor resulting PR on the general registry to see if any bugs are found 5. when PR is accepted, use Tagger to make github release
LowRankModels
https://github.com/madeleineudell/LowRankModels.jl.git
[ "MIT" ]
0.13.1
97f24d428f00f0e8c662d2aa52a389f3fcc08897
code
518
using Documenter, JuliaDB, IndexedTables @info "makedocs" makedocs( clean = true, debug = true, format = Documenter.HTML(), sitename = "JuliaDB.jl", pages = [ "index.md", "basics.md", "operations.md", "joins.md", "onlinestats.md", "plotting.md", "missing_values.md", "out_of_core.md", "ml.md", "tutorial.md", "api.md", ] ) @info "deploydocs" deploydocs( repo = "github.com/JuliaComputing/JuliaDB.jl.git" )
JuliaDB
https://github.com/JuliaData/JuliaDB.jl.git
[ "MIT" ]
0.13.1
97f24d428f00f0e8c662d2aa52a389f3fcc08897
code
3356
module JuliaDB import Base: collect, join, keys, values, iterate, broadcast, merge, reduce, mapslices, == import Base.Broadcast: broadcasted import Base.Iterators: PartitionIterator import IndexedTables: IndexedTable, table, NDSparse, ndsparse, Tup, groupjoin, DimName, Columns, column, columns, rows, pkeys, pairs, Tup, namedtuple, flatten, naturaljoin, leftjoin, asofjoin, eltypes, astuple, colnames, pkeynames, valuenames, showtable, reducedim_vec, _convert, groupreduce, groupby, ApplyColwise, stack, unstack, selectkeys, selectvalues, select, lowerselection, convertdim, excludecols, reindex, ColDict, AbstractIndexedTable, Dataset, promoted_similar, dropmissing, convertmissing, transform import TextParse: csvread import Dagger: compute, distribute, load, save, DomainBlocks, ArrayDomain, DArray, ArrayOp, domainchunks, chunks, Distribute, debug_compute, get_logs!, LocalEventLog, chunktype, tochunk, distribute, Context, treereduce, dsort_chunks import Serialization: serialize, deserialize import MemPool: mmwrite, mmread, MMSer, approx_size using IndexedTables, Dagger, OnlineStats, Distributed, Serialization, Nullables, Printf, Statistics, PooledArrays, WeakRefStrings, MemPool, StatsBase, OnlineStatsBase, DataValues, RecipesBase, TextParse, Glob #-----------------------------------------------------------------------# exports export @cols, @dateformat_str, AbstractNDSparse, All, Between, ColDict, Columns, DColumns, IndexedTable, JuliaDB, Keys, ML, NA, NDSparse, Not, aggregate_stats, asofjoin, chunks, colnames, column, columns, compute, convertdim, csvread, distribute, dropmissing, fetch_timings!, flatten, glob, groupby, groupjoin, groupreduce, ingest, ingest!, innerjoin, insert_row!, insertafter!, insertbefore!, insertcols, insertcolsafter, insertcolsbefore, leftjoin, load, load_table, loadfiles, loadndsparse, loadtable, merge, naturaljoin, ndsparse, pairs, partitionplot, partitionplot!, rechunk, rechunk_together, reducedim_vec, reindex, rename, rows, save, select, selectkeys, selectvalues, stack, start_tracking_time, stop_tracking_time, summarize, table, tracktime, transform, unstack, convertmissing include("util.jl") include("serialize.jl") include("interval.jl") include("table.jl") include("ndsparse.jl") include("reshape.jl") # equality function (==)(x::DDataset, y::Union{Dataset, DDataset}) y1 = distribute(y, length.(domainchunks(rows(x)))) res = delayed(==, get_result=true).(x.chunks, y1.chunks) all(collect(delayed((xs...) -> [xs...])(res...))) end function (==)(x::DDataset, y::Dataset) collect(x) == y end (==)(x::Dataset, y::DDataset) = y == x function Base.isequal(x::DDataset, y::Union{Dataset, DDataset}) y1 = distribute(y, length.(domainchunks(rows(x)))) res = delayed(isequal, get_result=true).(x.chunks, y1.chunks) all(collect(delayed((xs...) -> [xs...])(res...))) end Base.isequal(x::DDataset, y::Dataset) = isequal(collect(x), y) Base.isequal(x::Dataset, y::DDataset) = isequal(x, collect(y)) include("iteration.jl") include("sort.jl") include("io.jl") include("printing.jl") include("indexing.jl") include("selection.jl") include("reduce.jl") include("flatten.jl") include("join.jl") include("diagnostics.jl") include("recipes.jl") include("ml.jl") end # module
JuliaDB
https://github.com/JuliaData/JuliaDB.jl.git
[ "MIT" ]
0.13.1
97f24d428f00f0e8c662d2aa52a389f3fcc08897
code
4252
# Extract a column as a Dagger array const DColumns = DArray{<:Tup} function DColumns(arrays::Tup) if length(arrays) == 0 error("""DColumns must be constructed with at least one column.""") end i = findfirst(x->isa(x, ArrayOp), arrays) wrap = isa(arrays, Tuple) ? tuple : namedtuple(keys(arrays)...)∘tuple if i == 0 error("""At least 1 array passed to DColumns must be a DArray""") end darrays = asyncmap(arrays) do x isa(x, ArrayOp) ? compute(get_context(), x) : x end dist = domainchunks(darrays[i]) darrays = map(darrays) do x if isa(x, DArray) domainchunks(x) == dist ? x : error("Distribution incompatible") else Distribute(dist, x) end end darrays = asyncmap(darrays) do x compute(get_context(), x) end if length(darrays) == 1 cs = chunks(darrays[1]) chunkmatrix = reshape(cs, length(cs), 1) else chunkmatrix = reduce(hcat, map(chunks, darrays)) end cs = mapslices(x -> delayed((c...) -> Columns(wrap(c...)))(x...), chunkmatrix, 2)[:] T = isa(arrays, Tuple) ? Tuple{map(eltype, arrays)...} : wrap{map(eltype, arrays)...} DArray(T, domain(darrays[1]), domainchunks(darrays[1]), cs, dvcat) end function itable(keycols::DColumns, valuecols::DColumns) cs = map(delayed(itable), chunks(keycols), chunks(valuecols)) cs1 = compute(get_context(), delayed((xs...) -> [xs...]; meta=true)(cs...)) fromchunks(cs1) end function extractarray(t::Union{DNDSparse,DColumns}, accessor) arraymaker = function (cs_tup...) cs = [cs_tup...] lengths = length.(domain.(cs)) dmnchunks = DomainBlocks((1,), (cumsum(lengths),)) T = promote_eltype_chunktypes(cs) DArray(T, ArrayDomain(1:sum(lengths)), dmnchunks, [cs...], dvcat) end cs = map(delayed(accessor), t.chunks) compute(delayed(arraymaker; meta=true)(cs...)) end isas(d) = isa(d, As) && d.f !== identity function columns(t::Union{DNDSparse, DColumns}, which::Tuple...) if !isempty(which) && any(isas, which[1]) return _columns_as(t, which...) end cs = map(delayed(x->columns(x, which...)), t.chunks) f = delayed() do c map(tochunk, c) end tuples = collect(get_context(), treereduce(delayed(vcat), map(f, cs))) if isa(tuples, Tuple) tuples = [tuples] end # tuples is a vector of tuples map(tuples...) do cstup... cs = [cstup...] T = chunktype(cs[1]) ls = length.(domain.(cs)) d = ArrayDomain((1:sum(ls),)) dchunks = DomainBlocks((1,), (cumsum(ls),)) DArray(eltype(T), d, dchunks, cs, dvcat) end end function _columns_as(t, which) stripas(w) = isa(w, As) ? w.src : w which_ = ntuple(i->as(stripas(which[i]), i), length(which)) cs = columns(t, which_) asvecs = findall(isas, which) outvecs = Any[cs...] outvecs[asvecs] = map((w,x) -> w.f(x), [which[asvecs]...], outvecs[asvecs]) tup = IndexedTables._output_tuple(which) tup(outvecs...) end for f in [:rows, :keys, :values] @eval function $f(t::Union{DNDSparse, ArrayOp}, which::Tuple) if !any(isas, which) # easy extractarray(t, x -> $f(x, which)) else DColumns(columns($f(t), which)) end end end for f in [:rows, :keys, :values] @eval function $f(t::DNDSparse) extractarray(t, x -> $f(x)) end @eval function $f(t::DNDSparse, which::Union{Int, Symbol}) extractarray(t, x -> $f(x, which)) end @eval function $f(t::DNDSparse, which::As) which.f($f(t, which.src)) end end function column(t::DNDSparse, name) extractarray(t, x -> column(x, name)) end columns(t::DNDSparse, which::Union{Int,Symbol,As}) = column(t, which) function pairs(t::DNDSparse) extractarray(t, x -> map(Pair, x.index, x.data)) end Base.@deprecate getindexcol(t::DNDSparse, dim) keys(t, dim) Base.@deprecate getdatacol(t::DNDSparse, dim) values(t, dim) Base.@deprecate dindex(t::DNDSparse) keys(t) Base.@deprecate ddata(t::DNDSparse) values(t)
JuliaDB
https://github.com/JuliaData/JuliaDB.jl.git
[ "MIT" ]
0.13.1
97f24d428f00f0e8c662d2aa52a389f3fcc08897
code
2900
function time_table(log; profile=false) idx = Columns(proc=map(x->x.pid, getfields(log, :timeline)), event_type=getfields(log, :category), event_id=getfields(log, :id)) if profile data = Columns(start=getfields(log, :start), finish=getfields(log, :finish), gc_diff=getfields(log, :gc_diff), profile=getfields(log, :profiler_samples)) else data = Columns(start=getfields(log, :start), finish=getfields(log, :finish), gc_diff=getfields(log, :gc_diff)) end NDSparse(idx, data) end function add_gc_diff(x,y) Base.GC_Diff( x.allocd + y.allocd, x.malloc + y.malloc, x.realloc + y.realloc, x.poolalloc + y.poolalloc, x.bigalloc + y.bigalloc, x.freecall + y.freecall, x.total_time + y.total_time, x.pause + y.pause, x.full_sweep + y.full_sweep ) end function aggregate_profile(xs) treereduce(Dagger.mix_samples, xs) end function aggregate_events(xs) sort!(xs, by=x->x.start) gc_diff = reduce(add_gc_diff, map(x -> x.gc_diff, xs)) time_spent = sum(map(x -> x.finish - x.start, xs)) if isdefined(xs[1], :profile) time_spent, gc_diff, aggregate_profile(map(x->x.profile, xs)) end time_spent, gc_diff end function show_timings(t; maxdepth=5) # first aggregate by type of event t1 = reducedim_vec(aggregate_events, t, [:proc, :event_id]) foreach(t1.index, t1.data) do i, x time_spent, gc_diff = x print(string(i[1]),": ") Base.time_print(time_spent, gc_diff.allocd, gc_diff.total_time, Base.gc_alloc_count(gc_diff)) end t2 = reducedim_vec(aggregate_events, t, :event_id) println("Breakdown:") println(map(x->first(x)/1e9, t2)) if isdefined(columns(t.data), :profile) p = aggregate_profile(columns(t.data).profile) if !isempty(p.samples) println("\nProfile output:") Profile.print(p.samples, p.lineinfo, maxdepth=maxdepth) end end end function getfields(log, fieldname) map(x->getfield(x, fieldname), log) end function start_tracking_time(;profile=false) ctx = get_context() dbgctx = Context(procs(ctx), LocalEventLog(), profile) compute_context[] = dbgctx end function stop_tracking_time() compute_context[] = nothing end """ `tracktime(f)` Track the time spent on different processes in different categories in running `f`. """ function tracktime(f; profile=false, maxdepth=5) start_tracking_time(profile=profile) res = f() ctx = compute_context[] stop_tracking_time() t = fetch_timings!(ctx, profile=profile) show_timings(t, maxdepth=maxdepth) t, res end function fetch_timings!(ctx=get_context(); profile=true) time_table(Dagger.get_logs!(ctx.log_sink), profile=profile) end
JuliaDB
https://github.com/JuliaData/JuliaDB.jl.git