text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Method digest is redefined to return keyed MAC value instead of <END_TASK> <USER_TASK:> Description: def digest(self,data=None): """ Method digest is redefined to return keyed MAC value instead of just digest. """
if data is not None: self.update(data) b=create_string_buffer(256) size=c_size_t(256) if libcrypto.EVP_DigestSignFinal(self.ctx,b,pointer(size))<=0: raise DigestError('SignFinal') self.digest_finalized=True return b.raw[:size.value]
<SYSTEM_TASK:> Returns num bytes of cryptographically strong pseudo-random <END_TASK> <USER_TASK:> Description: def bytes(num, check_result=False): """ Returns num bytes of cryptographically strong pseudo-random bytes. If checkc_result is True, raises error if PRNG is not seeded enough """
if num <= 0: raise ValueError("'num' should be > 0") buf = create_string_buffer(num) result = libcrypto.RAND_bytes(buf, num) if check_result and result == 0: raise RandError("Random Number Generator not seeded sufficiently") return buf.raw[:num]
<SYSTEM_TASK:> Creates new OID in the database <END_TASK> <USER_TASK:> Description: def create(dotted, shortname, longname): """ Creates new OID in the database @param dotted - dotted-decimal representation of new OID @param shortname - short name for new OID @param longname - long name for new OID @returns Oid object corresponding to new OID This function should be used with exreme care. Whenever possible, it is better to add new OIDs via OpenSSL configuration file Results of calling this function twice for same OIDor for Oid alredy in database are undefined """
if pyver > 2: dotted = dotted.encode('ascii') shortname = shortname.encode('utf-8') longname = longname.encode('utf-8') nid = libcrypto.OBJ_create(dotted, shortname, longname) if nid == 0: raise LibCryptoError("Problem adding new OID to the database") return Oid(nid)
<SYSTEM_TASK:> Creates an OID object from the pointer to ASN1_OBJECT c structure. <END_TASK> <USER_TASK:> Description: def fromobj(obj): """ Creates an OID object from the pointer to ASN1_OBJECT c structure. This method intended for internal use for submodules which deal with libcrypto ASN1 parsing functions, such as x509 or CMS """
nid = libcrypto.OBJ_obj2nid(obj) if nid == 0: buf = create_string_buffer(80) dotted_len = libcrypto.OBJ_obj2txt(buf, 80, obj, 1) dotted = buf[:dotted_len] oid = create(dotted, dotted, dotted) else: oid = Oid(nid) return oid
<SYSTEM_TASK:> Converts given user function or string to C password callback <END_TASK> <USER_TASK:> Description: def _password_callback(c): """ Converts given user function or string to C password callback function, passable to openssl. IF function is passed, it would be called upon reading or writing PEM format private key with one argument which is True if we are writing key and should verify passphrase and false if we are reading """
if c is None: return PW_CALLBACK_FUNC(0) if callable(c): if pyver ==2 : def __cb(buf, length, rwflag, userdata): pwd = c(rwflag) cnt = min(len(pwd),length) memmove(buf,pwd, cnt) return cnt else: def __cb(buf, length, rwflag, userdata): pwd = c(rwflag).encode("utf-8") cnt = min(len(pwd),length) memmove(buf,pwd, cnt) return cnt else: if pyver > 2: c=c.encode("utf-8") def __cb(buf,length,rwflag,userdata): cnt=min(len(c),length) memmove(buf,c,cnt) return cnt return PW_CALLBACK_FUNC(__cb)
<SYSTEM_TASK:> Verifies given signature on given digest <END_TASK> <USER_TASK:> Description: def verify(self, digest, signature, **kwargs): """ Verifies given signature on given digest Returns True if Ok, False if don't match Keyword arguments allows to set algorithm-specific parameters """
ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None) if ctx is None: raise PKeyError("Initailizing verify context") if libcrypto.EVP_PKEY_verify_init(ctx) < 1: raise PKeyError("verify_init") self._configure_context(ctx, kwargs) ret = libcrypto.EVP_PKEY_verify(ctx, signature, len(signature), digest, len(digest)) if ret < 0: raise PKeyError("Signature verification") libcrypto.EVP_PKEY_CTX_free(ctx) return ret > 0
<SYSTEM_TASK:> Returns private key as PEM or DER Structure. <END_TASK> <USER_TASK:> Description: def exportpriv(self, format="PEM", password=None, cipher=None): """ Returns private key as PEM or DER Structure. If password and cipher are specified, encrypts key on given password, using given algorithm. Cipher must be an ctypescrypto.cipher.CipherType object Password can be either string or function with one argument, which returns password. It is called with argument True, which means, that we are encrypting key, and password should be verified (requested twice from user, for example). """
bio = Membio() if cipher is None: evp_cipher = None else: evp_cipher = cipher.cipher if format == "PEM": ret = libcrypto.PEM_write_bio_PrivateKey(bio.bio, self.key, evp_cipher, None, 0, _password_callback(password), None) if ret ==0: raise PKeyError("error serializing private key") return str(bio) else: ret = libcrypto.i2d_PKCS8PrivateKey_bio(bio.bio, self.key, evp_cipher, None, 0, _password_callback(password), None) if ret ==0: raise PKeyError("error serializing private key") return bintype(bio)
<SYSTEM_TASK:> Configures context of public key operations <END_TASK> <USER_TASK:> Description: def _configure_context(ctx, opts, skip=()): """ Configures context of public key operations @param ctx - context to configure @param opts - dictionary of options (from kwargs of calling function) @param skip - list of options which shouldn't be passed to context """
for oper in opts: if oper in skip: continue if isinstance(oper,chartype): op = oper.encode("ascii") else: op = oper if isinstance(opts[oper],chartype): value = opts[oper].encode("ascii") elif isinstance(opts[oper],bintype): value = opts[oper] else: if pyver == 2: value = str(opts[oper]) else: value = str(opts[oper]).encode('ascii') ret = libcrypto.EVP_PKEY_CTX_ctrl_str(ctx, op, value) if ret == -2: raise PKeyError("Parameter %s is not supported by key" % oper) if ret < 1: raise PKeyError("Error setting parameter %s" % oper)
<SYSTEM_TASK:> Reads data from readble BIO. For test purposes. <END_TASK> <USER_TASK:> Description: def read(self, length=None): """ Reads data from readble BIO. For test purposes. @param length - if specifed, limits amount of data read. If not BIO is read until end of buffer """
if not length is None: if not isinstance(length, inttype) : raise TypeError("length to read should be number") buf = create_string_buffer(length) readbytes = libcrypto.BIO_read(self.bio, buf, length) if readbytes == -2: raise NotImplementedError("Function is not supported by" + "this BIO") if readbytes == -1: raise IOError if readbytes == 0: return b"" return buf.raw[:readbytes] else: buf = create_string_buffer(1024) out = b"" readbytes = 1 while readbytes > 0: readbytes = libcrypto.BIO_read(self.bio, buf, 1024) if readbytes == -2: raise NotImplementedError("Function is not supported by " + "this BIO") if readbytes == -1: raise IOError if readbytes > 0: out += buf.raw[:readbytes] return out
<SYSTEM_TASK:> Writes data to writable bio. For test purposes <END_TASK> <USER_TASK:> Description: def write(self, data): """ Writes data to writable bio. For test purposes """
if pyver == 2: if isinstance(data, unicode): data = data.encode("utf-8") else: data = str(data) else: if not isinstance(data, bytes): data=str(data).encode("utf-8") written = libcrypto.BIO_write(self.bio, data, len(data)) if written == -2: raise NotImplementedError("Function not supported by this BIO") if written < len(data): raise IOError("Not all data were successfully written")
<SYSTEM_TASK:> Factory function to create CMS objects from received messages. <END_TASK> <USER_TASK:> Description: def CMS(data, format="PEM"): """ Factory function to create CMS objects from received messages. Parses CMS data and returns either SignedData or EnvelopedData object. format argument can be either "PEM" or "DER". It determines object type from the contents of received CMS structure. """
bio = Membio(data) if format == "PEM": ptr = libcrypto.PEM_read_bio_CMS(bio.bio, None, None, None) else: ptr = libcrypto.d2i_CMS_bio(bio.bio, None) if ptr is None: raise CMSError("Error parsing CMS data") typeoid = Oid(libcrypto.OBJ_obj2nid(libcrypto.CMS_get0_type(ptr))) if typeoid.shortname() == "pkcs7-signedData": return SignedData(ptr) elif typeoid.shortname() == "pkcs7-envelopedData": return EnvelopedData(ptr) elif typeoid.shortname() == "pkcs7-encryptedData": return EncryptedData(ptr) else: raise NotImplementedError("cannot handle "+typeoid.shortname())
<SYSTEM_TASK:> Creates SignedData message by signing data with pkey and <END_TASK> <USER_TASK:> Description: def create(data, cert, pkey, flags=Flags.BINARY, certs=None): """ Creates SignedData message by signing data with pkey and certificate. @param data - data to sign @param cert - signer's certificate @param pkey - pkey object with private key to sign @param flags - OReed combination of Flags constants @param certs - list of X509 objects to include into CMS """
if not pkey.cansign: raise ValueError("Specified keypair has no private part") if cert.pubkey != pkey: raise ValueError("Certificate doesn't match public key") bio = Membio(data) if certs is not None and len(certs) > 0: certstack_obj = StackOfX509(certs) # keep reference to prevent immediate __del__ call certstack = certstack_obj.ptr else: certstack = None ptr = libcrypto.CMS_sign(cert.cert, pkey.key, certstack, bio.bio, flags) if ptr is None: raise CMSError("signing message") return SignedData(ptr)
<SYSTEM_TASK:> Adds another signer to already signed message <END_TASK> <USER_TASK:> Description: def sign(self, cert, pkey, digest_type=None, data=None, flags=Flags.BINARY): """ Adds another signer to already signed message @param cert - signer's certificate @param pkey - signer's private key @param digest_type - message digest to use as DigestType object (if None - default for key would be used) @param data - data to sign (if detached and Flags.REUSE_DIGEST is not specified) @param flags - ORed combination of Flags consants """
if not pkey.cansign: raise ValueError("Specified keypair has no private part") if cert.pubkey != pkey: raise ValueError("Certificate doesn't match public key") if libcrypto.CMS_add1_signer(self.ptr, cert.cert, pkey.key, digest_type.digest, flags) is None: raise CMSError("adding signer") if flags & Flags.REUSE_DIGEST == 0: if data is not None: bio = Membio(data) biodata = bio.bio else: biodata = None res = libcrypto.CMS_final(self.ptr, biodata, None, flags) if res <= 0: raise CMSError("Cannot finalize CMS")
<SYSTEM_TASK:> Verifies signature under CMS message using trusted cert store <END_TASK> <USER_TASK:> Description: def verify(self, store, flags, data=None, certs=None): """ Verifies signature under CMS message using trusted cert store @param store - X509Store object with trusted certs @param flags - OR-ed combination of flag consants @param data - message data, if messge has detached signature param certs - list of certificates to use during verification If Flags.NOINTERN is specified, these are only sertificates to search for signing certificates @returns True if signature valid, False otherwise """
bio = None if data != None: bio_obj = Membio(data) bio = bio_obj.bio if certs is not None and len(certs) > 0: certstack_obj = StackOfX509(certs) # keep reference to prevent immediate __del__ call certstack = certstack_obj.ptr else: certstack = None res = libcrypto.CMS_verify(self.ptr, certstack, store.store, bio, None, flags) return res > 0
<SYSTEM_TASK:> Returns signed data if present in the message <END_TASK> <USER_TASK:> Description: def data(self): """ Returns signed data if present in the message """
# Check if signatire is detached if self.detached: return None bio = Membio() if not libcrypto.CMS_verify(self.ptr, None, None, None, bio.bio, Flags.NO_VERIFY): raise CMSError("extract data") return str(bio)
<SYSTEM_TASK:> List of the certificates contained in the structure <END_TASK> <USER_TASK:> Description: def certs(self): """ List of the certificates contained in the structure """
certstack = libcrypto.CMS_get1_certs(self.ptr) if certstack is None: raise CMSError("getting certs") return StackOfX509(ptr=certstack, disposable=True)
<SYSTEM_TASK:> Creates and encrypts message <END_TASK> <USER_TASK:> Description: def create(recipients, data, cipher, flags=0): """ Creates and encrypts message @param recipients - list of X509 objects @param data - contents of the message @param cipher - CipherType object @param flags - flag """
recp = StackOfX509(recipients) bio = Membio(data) cms_ptr = libcrypto.CMS_encrypt(recp.ptr, bio.bio, cipher.cipher, flags) if cms_ptr is None: raise CMSError("encrypt EnvelopedData") return EnvelopedData(cms_ptr)
<SYSTEM_TASK:> Creates an EncryptedData message. <END_TASK> <USER_TASK:> Description: def create(data, cipher, key, flags=0): """ Creates an EncryptedData message. @param data data to encrypt @param cipher cipher.CipherType object represening required cipher type @param key - byte array used as simmetic key @param flags - OR-ed combination of Flags constant """
bio = Membio(data) ptr = libcrypto.CMS_EncryptedData_encrypt(bio.bio, cipher.cipher, key, len(key), flags) if ptr is None: raise CMSError("encrypt data") return EncryptedData(ptr)
<SYSTEM_TASK:> Decrypts encrypted data message <END_TASK> <USER_TASK:> Description: def decrypt(self, key, flags=0): """ Decrypts encrypted data message @param key - symmetic key to decrypt @param flags - OR-ed combination of Flags constant """
bio = Membio() if libcrypto.CMS_EncryptedData_decrypt(self.ptr, key, len(key), None, bio.bio, flags) <= 0: raise CMSError("decrypt data") return str(bio)
<SYSTEM_TASK:> Returns name of the digest <END_TASK> <USER_TASK:> Description: def name(self): """ Returns name of the digest """
if not hasattr(self, 'digest_name'): self.digest_name = Oid(libcrypto.EVP_MD_type(self.digest) ).longname() return self.digest_name
<SYSTEM_TASK:> Hashes given byte string <END_TASK> <USER_TASK:> Description: def update(self, data, length=None): """ Hashes given byte string @param data - string to hash @param length - if not specifed, entire string is hashed, otherwise only first length bytes """
if self.digest_finalized: raise DigestError("No updates allowed") if not isinstance(data, bintype): raise TypeError("A byte string is expected") if length is None: length = len(data) elif length > len(data): raise ValueError("Specified length is greater than length of data") result = libcrypto.EVP_DigestUpdate(self.ctx, c_char_p(data), length) if result != 1: raise DigestError("Unable to update digest")
<SYSTEM_TASK:> Finalizes digest operation and return digest value <END_TASK> <USER_TASK:> Description: def digest(self, data=None): """ Finalizes digest operation and return digest value Optionally hashes more data before finalizing """
if self.digest_finalized: return self.digest_out.raw[:self.digest_size] if data is not None: self.update(data) self.digest_out = create_string_buffer(256) length = c_long(0) result = libcrypto.EVP_DigestFinal_ex(self.ctx, self.digest_out, byref(length)) if result != 1: raise DigestError("Unable to finalize digest") self.digest_finalized = True return self.digest_out.raw[:self.digest_size]
<SYSTEM_TASK:> Creates copy of the digest CTX to allow to compute digest <END_TASK> <USER_TASK:> Description: def copy(self): """ Creates copy of the digest CTX to allow to compute digest while being able to hash more data """
new_digest = Digest(self.digest_type) libcrypto.EVP_MD_CTX_copy(new_digest.ctx, self.ctx) return new_digest
<SYSTEM_TASK:> Clears and deallocates context <END_TASK> <USER_TASK:> Description: def _clean_ctx(self): """ Clears and deallocates context """
try: if self.ctx is not None: libcrypto.EVP_MD_CTX_free(self.ctx) del self.ctx except AttributeError: pass self.digest_out = None self.digest_finalized = False
<SYSTEM_TASK:> Returns digest in the hexadecimal form. For compatibility <END_TASK> <USER_TASK:> Description: def hexdigest(self, data=None): """ Returns digest in the hexadecimal form. For compatibility with hashlib """
from base64 import b16encode if pyver == 2: return b16encode(self.digest(data)) else: return b16encode(self.digest(data)).decode('us-ascii')
<SYSTEM_TASK:> Return list of extensions with given Oid <END_TASK> <USER_TASK:> Description: def find(self, oid): """ Return list of extensions with given Oid """
if not isinstance(oid, Oid): raise TypeError("Need crytypescrypto.oid.Oid as argument") found = [] index = -1 end = len(self) while True: index = libcrypto.X509_get_ext_by_NID(self.cert.cert, oid.nid, index) if index >= end or index < 0: break found.append(self[index]) return found
<SYSTEM_TASK:> Return list of critical extensions (or list of non-cricital, if <END_TASK> <USER_TASK:> Description: def find_critical(self, crit=True): """ Return list of critical extensions (or list of non-cricital, if optional second argument is False """
if crit: flag = 1 else: flag = 0 found = [] end = len(self) index = -1 while True: index = libcrypto.X509_get_ext_by_critical(self.cert.cert, flag, index) if index >= end or index < 0: break found.append(self[index]) return found
<SYSTEM_TASK:> Returns PEM represntation of the certificate <END_TASK> <USER_TASK:> Description: def pem(self): """ Returns PEM represntation of the certificate """
bio = Membio() if libcrypto.PEM_write_bio_X509(bio.bio, self.cert) == 0: raise X509Error("error serializing certificate") return str(bio)
<SYSTEM_TASK:> Serial number of certificate as integer <END_TASK> <USER_TASK:> Description: def serial(self): """ Serial number of certificate as integer """
asnint = libcrypto.X509_get_serialNumber(self.cert) bio = Membio() libcrypto.i2a_ASN1_INTEGER(bio.bio, asnint) return int(str(bio), 16)
<SYSTEM_TASK:> Explicitely adds certificate to set of trusted in the store <END_TASK> <USER_TASK:> Description: def add_cert(self, cert): """ Explicitely adds certificate to set of trusted in the store @param cert - X509 object to add """
if not isinstance(cert, X509): raise TypeError("cert should be X509") libcrypto.X509_STORE_add_cert(self.store, cert.cert)
<SYSTEM_TASK:> Sets certificate purpose which verified certificate should match <END_TASK> <USER_TASK:> Description: def setpurpose(self, purpose): """ Sets certificate purpose which verified certificate should match @param purpose - number from 1 to 9 or standard strind defined in Openssl possible strings - sslcient,sslserver, nssslserver, smimesign,i smimeencrypt, crlsign, any, ocsphelper """
if isinstance(purpose, str): purp_no = libcrypto.X509_PURPOSE_get_by_sname(purpose) if purp_no <= 0: raise X509Error("Invalid certificate purpose '%s'" % purpose) elif isinstance(purpose, int): purp_no = purpose if libcrypto.X509_STORE_set_purpose(self.store, purp_no) <= 0: raise X509Error("cannot set purpose")
<SYSTEM_TASK:> Set point in time used to check validity of certificates for <END_TASK> <USER_TASK:> Description: def settime(self, time): """ Set point in time used to check validity of certificates for Time can be either python datetime object or number of seconds sinse epoch """
if isinstance(time, datetime) or isinstance(time, datetime.date): seconds = int(time.strftime("%s")) elif isinstance(time, int): seconds = time else: raise TypeError("datetime.date, datetime.datetime or integer " + "is required as time argument") raise NotImplementedError
<SYSTEM_TASK:> Creates EC keypair from the just secret key and curve name <END_TASK> <USER_TASK:> Description: def create(curve, data): """ Creates EC keypair from the just secret key and curve name @param curve - name of elliptic curve @param num - byte array or long number representing key """
ec_key = libcrypto.EC_KEY_new_by_curve_name(curve.nid) if ec_key is None: raise PKeyError("EC_KEY_new_by_curvename") group = libcrypto.EC_KEY_get0_group(ec_key) if group is None: raise PKeyError("EC_KEY_get0_group") libcrypto.EC_GROUP_set_asn1_flag(group, 1) raw_key = libcrypto.BN_new() if isinstance(data, int): libcrypto.BN_hex2bn(byref(raw_key), hex(data)) else: if raw_key is None: raise PKeyError("BN_new") if libcrypto.BN_bin2bn(data, len(data), raw_key) is None: raise PKeyError("BN_bin2bn") ctx = libcrypto.BN_CTX_new() if ctx is None: raise PKeyError("BN_CTX_new") order = libcrypto.BN_new() if order is None: raise PKeyError("BN_new") priv_key = libcrypto.BN_new() if priv_key is None: raise PKeyError("BN_new") if libcrypto.EC_GROUP_get_order(group, order, ctx) <= 0: raise PKeyError("EC_GROUP_get_order") if libcrypto.BN_nnmod(priv_key, raw_key, order, ctx) <= 0: raise PKeyError("BN_nnmod") if libcrypto.EC_KEY_set_private_key(ec_key, priv_key) <= 0: raise PKeyError("EC_KEY_set_private_key") pub_key = libcrypto.EC_POINT_new(group) if pub_key is None: raise PKeyError("EC_POINT_new") if libcrypto.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx) <= 0: raise PKeyError("EC_POINT_mul") if libcrypto.EC_KEY_set_public_key(ec_key, pub_key) <= 0: raise PKeyError("EC_KEY_set_public_key") libcrypto.BN_free(raw_key) libcrypto.BN_free(order) libcrypto.BN_free(priv_key) libcrypto.BN_CTX_free(ctx) pkey = libcrypto.EVP_PKEY_new() if pkey is None: raise PKeyError("EVP_PKEY_new") if libcrypto.EVP_PKEY_set1_EC_KEY(pkey, ec_key) <= 0: raise PKeyError("EVP_PKEY_set1_EC_KEY") libcrypto.EC_KEY_free(ec_key) return PKey(ptr=pkey, cansign=True)
<SYSTEM_TASK:> Returns new cipher object ready to encrypt-decrypt data <END_TASK> <USER_TASK:> Description: def new(algname, key, encrypt=True, iv=None): """ Returns new cipher object ready to encrypt-decrypt data @param algname - string algorithm name like in opemssl command line @param key - binary string representing ciher key @param encrypt - if True (default) cipher would be initialized for encryption, otherwise - for decrypton @param iv - initialization vector """
ciph_type = CipherType(algname) return Cipher(ciph_type, key, iv, encrypt)
<SYSTEM_TASK:> Sets padding mode of the cipher <END_TASK> <USER_TASK:> Description: def padding(self, padding=True): """ Sets padding mode of the cipher """
padding_flag = 1 if padding else 0 libcrypto.EVP_CIPHER_CTX_set_padding(self.ctx, padding_flag)
<SYSTEM_TASK:> Finalizes processing. If some data are kept in the internal <END_TASK> <USER_TASK:> Description: def finish(self): """ Finalizes processing. If some data are kept in the internal state, they would be processed and returned. """
if self.cipher_finalized: raise CipherError("Cipher operation is already completed") outbuf = create_string_buffer(self.block_size) self.cipher_finalized = True outlen = c_int(0) result = libcrypto.EVP_CipherFinal_ex(self.ctx, outbuf, byref(outlen)) if result == 0: self._clean_ctx() raise CipherError("Unable to finalize cipher") if outlen.value > 0: return outbuf.raw[:int(outlen.value)] else: return b""
<SYSTEM_TASK:> Cleans up cipher ctx and deallocates it <END_TASK> <USER_TASK:> Description: def _clean_ctx(self): """ Cleans up cipher ctx and deallocates it """
try: if self.ctx is not None: self.__ctxcleanup(self.ctx) libcrypto.EVP_CIPHER_CTX_free(self.ctx) del self.ctx except AttributeError: pass self.cipher_finalized = True
<SYSTEM_TASK:> Sets specified engine as default for all <END_TASK> <USER_TASK:> Description: def set_default(eng, algorithms=0xFFFF): """ Sets specified engine as default for all algorithms, supported by it For compatibility with 0.2.x if string is passed instead of engine, attempts to load engine with this id """
if not isinstance(eng,Engine): eng=Engine(eng) global default libcrypto.ENGINE_set_default(eng.ptr, c_int(algorithms)) default = eng
<SYSTEM_TASK:> Construct a dictionary out of an iterable, using an attribute name as <END_TASK> <USER_TASK:> Description: def from_keyed_iterable(iterable, key, filter_func=None): """Construct a dictionary out of an iterable, using an attribute name as the key. Optionally provide a filter function, to determine what should be kept in the dictionary."""
generated = {} for element in iterable: try: k = getattr(element, key) except AttributeError: raise RuntimeError("{} does not have the keyed attribute: {}".format( element, key )) if filter_func is None or filter_func(element): if k in generated: generated[k] += [element] else: generated[k] = [element] return generated
<SYSTEM_TASK:> given two dicts, a and b, this function returns c = a - b, where <END_TASK> <USER_TASK:> Description: def subtract_by_key(dict_a, dict_b): """given two dicts, a and b, this function returns c = a - b, where a - b is defined as the key difference between a and b. e.g., {1:None, 2:3, 3:"yellow", 4:True} - {2:4, 1:"green"} = {3:"yellow", 4:True} """
difference_dict = {} for key in dict_a: if key not in dict_b: difference_dict[key] = dict_a[key] return difference_dict
<SYSTEM_TASK:> separates a dict into has-keys and not-has-keys pairs, using either <END_TASK> <USER_TASK:> Description: def winnow_by_keys(dct, keys=None, filter_func=None): """separates a dict into has-keys and not-has-keys pairs, using either a list of keys or a filtering function."""
has = {} has_not = {} for key in dct: key_passes_check = False if keys is not None: key_passes_check = key in keys elif filter_func is not None: key_passes_check = filter_func(key) if key_passes_check: has[key] = dct[key] else: has_not[key] = dct[key] return WinnowedResult(has, has_not)
<SYSTEM_TASK:> func must take an item and return an interable that contains that <END_TASK> <USER_TASK:> Description: def flat_map(iterable, func): """func must take an item and return an interable that contains that item. this is flatmap in the classic mode"""
results = [] for element in iterable: result = func(element) if len(result) > 0: results.extend(result) return results
<SYSTEM_TASK:> like the built-in sum, but for multiplication. <END_TASK> <USER_TASK:> Description: def product(sequence, initial=1): """like the built-in sum, but for multiplication."""
if not isinstance(sequence, collections.Iterable): raise TypeError("'{}' object is not iterable".format(type(sequence).__name__)) return reduce(operator.mul, sequence, initial)
<SYSTEM_TASK:> Runs through a few common string formats for datetimes, <END_TASK> <USER_TASK:> Description: def date_from_string(string, format_string=None): """Runs through a few common string formats for datetimes, and attempts to coerce them into a datetime. Alternatively, format_string can provide either a single string to attempt or an iterable of strings to attempt."""
if isinstance(format_string, str): return datetime.datetime.strptime(string, format_string).date() elif format_string is None: format_string = [ "%Y-%m-%d", "%m-%d-%Y", "%m/%d/%Y", "%d/%m/%Y", ] for format in format_string: try: return datetime.datetime.strptime(string, format).date() except ValueError: continue raise ValueError("Could not produce date from string: {}".format(string))
<SYSTEM_TASK:> given a datetime.date, gives back a datetime.datetime <END_TASK> <USER_TASK:> Description: def to_datetime(plain_date, hours=0, minutes=0, seconds=0, ms=0): """given a datetime.date, gives back a datetime.datetime"""
# don't mess with datetimes if isinstance(plain_date, datetime.datetime): return plain_date return datetime.datetime( plain_date.year, plain_date.month, plain_date.day, hours, minutes, seconds, ms, )
<SYSTEM_TASK:> Given a bunch of TimePeriods, return a TimePeriod that most closely <END_TASK> <USER_TASK:> Description: def get_containing_period(cls, *periods): """Given a bunch of TimePeriods, return a TimePeriod that most closely contains them."""
if any(not isinstance(period, TimePeriod) for period in periods): raise TypeError("periods must all be TimePeriods: {}".format(periods)) latest = datetime.datetime.min earliest = datetime.datetime.max for period in periods: # the best we can do to conain None is None! if period._latest is None: latest = None elif latest is not None and period._latest > latest: latest = period._latest if period._earliest is None: earliest = None elif earliest is not None and period._earliest < earliest: earliest = period._earliest return TimePeriod(earliest, latest)
<SYSTEM_TASK:> Allows the user to print the credential for a particular keyring entry <END_TASK> <USER_TASK:> Description: def get_user_password(env, param, force=False): """ Allows the user to print the credential for a particular keyring entry to the screen """
username = utils.assemble_username(env, param) if not utils.confirm_credential_display(force): return # Retrieve the credential from the keychain password = password_get(username) if password: return (username, password) else: return False
<SYSTEM_TASK:> Retrieves a password from the keychain based on the environment and <END_TASK> <USER_TASK:> Description: def password_get(username=None): """ Retrieves a password from the keychain based on the environment and configuration parameter pair. If this fails, None is returned. """
password = keyring.get_password('supernova', username) if password is None: split_username = tuple(username.split(':')) msg = ("Couldn't find a credential for {0}:{1}. You need to set one " "with: supernova-keyring -s {0} {1}").format(*split_username) raise LookupError(msg) else: return password.encode('ascii')
<SYSTEM_TASK:> Sets a user's password in the keyring storage <END_TASK> <USER_TASK:> Description: def set_user_password(environment, parameter, password): """ Sets a user's password in the keyring storage """
username = '%s:%s' % (environment, parameter) return password_set(username, password)
<SYSTEM_TASK:> Stores a password in a keychain for a particular environment and <END_TASK> <USER_TASK:> Description: def password_set(username=None, password=None): """ Stores a password in a keychain for a particular environment and configuration parameter pair. """
result = keyring.set_password('supernova', username, password) # NOTE: keyring returns None when the storage is successful. That's weird. if result is None: return True else: return False
<SYSTEM_TASK:> Appends new variables to the current shell environment temporarily. <END_TASK> <USER_TASK:> Description: def prep_shell_environment(nova_env, nova_creds): """ Appends new variables to the current shell environment temporarily. """
new_env = {} for key, value in prep_nova_creds(nova_env, nova_creds): if type(value) == six.binary_type: value = value.decode() new_env[key] = value return new_env
<SYSTEM_TASK:> Finds relevant config options in the supernova config and cleans them <END_TASK> <USER_TASK:> Description: def prep_nova_creds(nova_env, nova_creds): """ Finds relevant config options in the supernova config and cleans them up for novaclient. """
try: raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env]) except KeyError: msg = "{0} was not found in your supernova configuration "\ "file".format(nova_env) raise KeyError(msg) proxy_re = re.compile(r"(^http_proxy|^https_proxy)") creds = [] for param, value in raw_creds.items(): if not proxy_re.match(param): param = param.upper() if not hasattr(value, 'startswith'): continue # Get values from the keyring if we find a USE_KEYRING constant if value.startswith("USE_KEYRING"): username, credential = pull_env_credential(nova_env, param, value) else: credential = value.strip("\"'") # Make sure we got something valid from the configuration file or # the keyring if not credential: raise LookupError("No matching credentials found in keyring") creds.append((param, credential)) return creds
<SYSTEM_TASK:> Pulls the supernova configuration file and reads it <END_TASK> <USER_TASK:> Description: def load_config(config_file_override=False): """ Pulls the supernova configuration file and reads it """
supernova_config = get_config_file(config_file_override) supernova_config_dir = get_config_directory(config_file_override) if not supernova_config and not supernova_config_dir: raise Exception("Couldn't find a valid configuration file to parse") nova_creds = ConfigObj() # Can we successfully read the configuration file? if supernova_config: try: nova_creds.merge(ConfigObj(supernova_config)) except: raise("There's an error in your configuration file") if supernova_config_dir: for dir_file in os.listdir(supernova_config_dir): full_path = ''.join((supernova_config_dir, dir_file)) try: nova_creds.merge(ConfigObj(full_path)) except: msg = "Skipping '%s', Parsing Error.".format(full_path) print(msg) create_dynamic_configs(nova_creds) return nova_creds
<SYSTEM_TASK:> Looks for the most specific configuration file available. An override <END_TASK> <USER_TASK:> Description: def get_config_file(override_files=False): """ Looks for the most specific configuration file available. An override can be provided as a string if needed. """
if override_files: if isinstance(override_files, six.string_types): possible_configs = [override_files] else: raise Exception("Config file override must be a string") else: xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \ os.path.expanduser('~/.config') possible_configs = [os.path.join(xdg_config_home, "supernova"), os.path.expanduser("~/.supernova"), ".supernova"] for config_file in reversed(possible_configs): if os.path.isfile(config_file): return config_file return False
<SYSTEM_TASK:> Looks for the most specific configuration directory possible, in order to <END_TASK> <USER_TASK:> Description: def get_config_directory(override_files=False): """ Looks for the most specific configuration directory possible, in order to load individual configuration files. """
if override_files: possible_dirs = [override_files] else: xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \ os.path.expanduser('~/.config') possible_dirs = [os.path.join(xdg_config_home, "supernova.d/"), os.path.expanduser("~/.supernova.d/"), ".supernova.d/"] for config_dir in reversed(possible_dirs): if os.path.isdir(config_dir): return config_dir return False
<SYSTEM_TASK:> Executes the executable given by the user. <END_TASK> <USER_TASK:> Description: def execute_executable(nova_args, env_vars): """ Executes the executable given by the user. Hey, I know this method has a silly name, but I write the code here and I'm silly. """
process = subprocess.Popen(nova_args, stdout=sys.stdout, stderr=subprocess.PIPE, env=env_vars) process.wait() return process
<SYSTEM_TASK:> If the user wanted to run the executable with debugging enabled, we need <END_TASK> <USER_TASK:> Description: def check_for_debug(supernova_args, nova_args): """ If the user wanted to run the executable with debugging enabled, we need to apply the correct arguments to the executable. Heat is a corner case since it uses -d instead of --debug. """
# Heat requires special handling for debug arguments if supernova_args['debug'] and supernova_args['executable'] == 'heat': nova_args.insert(0, '-d ') elif supernova_args['debug']: nova_args.insert(0, '--debug ') return nova_args
<SYSTEM_TASK:> It's possible that a user might set their custom executable via an <END_TASK> <USER_TASK:> Description: def check_for_executable(supernova_args, env_vars): """ It's possible that a user might set their custom executable via an environment variable. If we detect one, we should add it to supernova's arguments ONLY IF an executable wasn't set on the command line. The command line executable must take priority. """
exe = supernova_args.get('executable', 'default') if exe != 'default': return supernova_args if 'OS_EXECUTABLE' in env_vars.keys(): supernova_args['executable'] = env_vars['OS_EXECUTABLE'] return supernova_args supernova_args['executable'] = 'nova' return supernova_args
<SYSTEM_TASK:> Return a list of extra args that need to be passed on cmdline to nova. <END_TASK> <USER_TASK:> Description: def check_for_bypass_url(raw_creds, nova_args): """ Return a list of extra args that need to be passed on cmdline to nova. """
if 'BYPASS_URL' in raw_creds.keys(): bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']] nova_args = bypass_args + nova_args return nova_args
<SYSTEM_TASK:> Sets the environment variables for the executable, runs the executable, <END_TASK> <USER_TASK:> Description: def run_command(nova_creds, nova_args, supernova_args): """ Sets the environment variables for the executable, runs the executable, and handles the output. """
nova_env = supernova_args['nova_env'] # (gtmanfred) make a copy of this object. If we don't copy it, the insert # to 0 happens multiple times because it is the same object in memory. nova_args = copy.copy(nova_args) # Get the environment variables ready env_vars = os.environ.copy() env_vars.update(credentials.prep_shell_environment(nova_env, nova_creds)) # BYPASS_URL is a weird one, so we need to send it as an argument, # not an environment variable. nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args) # Check for OS_EXECUTABLE supernova_args = check_for_executable(supernova_args, env_vars) # Check for a debug override nova_args = check_for_debug(supernova_args, nova_args) # Print a small message for the user (very helpful for groups) msg = "Running %s against %s..." % (supernova_args.get('executable'), nova_env) if not supernova_args.get('quiet'): click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg)) # Call executable and connect stdout to the current terminal # so that any unicode characters from the executable's list will be # displayed appropriately. # # In other news, I hate how python 2.6 does unicode. nova_args.insert(0, supernova_args['executable']) nova_args = [nova_arg.strip() for nova_arg in nova_args] process = execute_executable(nova_args, env_vars) # If the user asked us to be quiet, then let's not print stderr if not supernova_args.get('quiet'): handle_stderr(process.stderr) return process.returncode
<SYSTEM_TASK:> Checks for environment variables that can cause problems with supernova <END_TASK> <USER_TASK:> Description: def check_environment_presets(): """ Checks for environment variables that can cause problems with supernova """
presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or x.startswith('OS_')] if len(presets) < 1: return True else: click.echo("_" * 80) click.echo("*WARNING* Found existing environment variables that may " "cause conflicts:") for preset in presets: click.echo(" - %s" % preset) click.echo("_" * 80) return False
<SYSTEM_TASK:> Takes a group_name and finds any environments that have a SUPERNOVA_GROUP <END_TASK> <USER_TASK:> Description: def get_envs_in_group(group_name, nova_creds): """ Takes a group_name and finds any environments that have a SUPERNOVA_GROUP configuration line that matches the group_name. """
envs = [] for key, value in nova_creds.items(): supernova_groups = value.get('SUPERNOVA_GROUP', []) if hasattr(supernova_groups, 'startswith'): supernova_groups = [supernova_groups] if group_name in supernova_groups: envs.append(key) elif group_name == 'all': envs.append(key) return envs
<SYSTEM_TASK:> Checks to see if the configuration file contains a SUPERNOVA_GROUP <END_TASK> <USER_TASK:> Description: def is_valid_group(group_name, nova_creds): """ Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option. """
valid_groups = [] for key, value in nova_creds.items(): supernova_groups = value.get('SUPERNOVA_GROUP', []) if hasattr(supernova_groups, 'startswith'): supernova_groups = [supernova_groups] valid_groups.extend(supernova_groups) valid_groups.append('all') if group_name in valid_groups: return True else: return False
<SYSTEM_TASK:> Removes nova_ os_ novaclient_ prefix from string. <END_TASK> <USER_TASK:> Description: def rm_prefix(name): """ Removes nova_ os_ novaclient_ prefix from string. """
if name.startswith('nova_'): return name[5:] elif name.startswith('novaclient_'): return name[11:] elif name.startswith('os_'): return name[3:] else: return name
<SYSTEM_TASK:> Pads `strdata` with a Request's callback argument, if specified, or does <END_TASK> <USER_TASK:> Description: def __pad(strdata): """ Pads `strdata` with a Request's callback argument, if specified, or does nothing. """
if request.args.get('callback'): return "%s(%s);" % (request.args.get('callback'), strdata) else: return strdata
<SYSTEM_TASK:> Serializes `args` and `kwargs` as JSON. Supports serializing an array <END_TASK> <USER_TASK:> Description: def __dumps(*args, **kwargs): """ Serializes `args` and `kwargs` as JSON. Supports serializing an array as the top-level object, if it is the only argument. """
indent = None if (current_app.config.get('JSONIFY_PRETTYPRINT_REGULAR', False) and not request.is_xhr): indent = 2 return json.dumps(args[0] if len(args) is 1 else dict(*args, **kwargs), indent=indent)
<SYSTEM_TASK:> Update type and typestring lookup dicts. <END_TASK> <USER_TASK:> Description: def update_type_lookups(self): """ Update type and typestring lookup dicts. Must be called once the ``types`` and ``python_type_strings`` attributes are set so that ``type_to_typestring`` and ``typestring_to_type`` are constructed. .. versionadded:: 0.2 Notes ----- Subclasses need to call this function explicitly. """
self.type_to_typestring = dict(zip(self.types, self.python_type_strings)) self.typestring_to_type = dict(zip(self.python_type_strings, self.types))
<SYSTEM_TASK:> Gets type string. <END_TASK> <USER_TASK:> Description: def get_type_string(self, data, type_string): """ Gets type string. Finds the type string for 'data' contained in ``python_type_strings`` using its ``type``. Non-``None`` 'type_string` overrides whatever type string is looked up. The override makes it easier for subclasses to convert something that the parent marshaller can write to disk but still put the right type string in place). Parameters ---------- data : type to be marshalled The Python object that is being written to disk. type_string : str or None If it is a ``str``, it overrides any looked up type string. ``None`` means don't override. Returns ------- str The type string associated with 'data'. Will be 'type_string' if it is not ``None``. Notes ----- Subclasses probably do not need to override this method. """
if type_string is not None: return type_string else: tp = type(data) try: return self.type_to_typestring[tp] except KeyError: return self.type_to_typestring[tp.__module__ + '.' + tp.__name__]
<SYSTEM_TASK:> Writes an object's metadata to file. <END_TASK> <USER_TASK:> Description: def write(self, f, grp, name, data, type_string, options): """ Writes an object's metadata to file. Writes the Python object 'data' to 'name' in h5py.Group 'grp'. .. versionchanged:: 0.2 Arguements changed. Parameters ---------- f : h5py.File The HDF5 file handle that is open. grp : h5py.Group or h5py.File The parent HDF5 Group (or File if at '/') that contains the object with the specified name. name : str Name of the object. data The object to write to file. type_string : str or None The type string for `data`. If it is ``None``, one will have to be gotten by ``get_type_string``. options : hdf5storage.core.Options hdf5storage options object. Raises ------ NotImplementedError If writing 'data' to file is currently not supported. hdf5storage.exceptions.TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `options.action_for_matlab_incompatible` is set to ``'error'``. Notes ----- Must be overridden in a subclass because a ``NotImplementedError`` is thrown immediately. See Also -------- hdf5storage.utilities.write_data """
raise NotImplementedError('Can''t write data type: ' + str(type(data)))
<SYSTEM_TASK:> Writes an object to file. <END_TASK> <USER_TASK:> Description: def write_metadata(self, f, dsetgrp, data, type_string, options, attributes=None): """ Writes an object to file. Writes the metadata for a Python object `data` to file at `name` in h5py.Group `grp`. Metadata is written to HDF5 Attributes. Existing Attributes that are not being used are deleted. .. versionchanged:: 0.2 Arguements changed. Parameters ---------- f : h5py.File The HDF5 file handle that is open. dsetgrp : h5py.Dataset or h5py.Group The Dataset or Group object to add metadata to. data The object to write to file. type_string : str or None The type string for `data`. If it is ``None``, one will have to be gotten by ``get_type_string``. options : hdf5storage.core.Options hdf5storage options object. attributes : dict or None, optional The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. Default is no Attributes to set (``None``). Notes ----- The attribute 'Python.Type' is set to the type string. All H5PY Attributes not in ``python_attributes`` and/or ``matlab_attributes`` (depending on the attributes of 'options') are deleted. These are needed functions for writting essentially any Python object, so subclasses should probably call the baseclass's version of this function if they override it and just provide the additional functionality needed. This requires that the names of any additional HDF5 Attributes are put in the appropriate set. See Also -------- utilities.set_attributes_all """
if attributes is None: attributes = dict() # Make sure we have a complete type_string. if options.store_python_metadata \ and 'Python.Type' not in attributes: attributes['Python.Type'] = \ ('string', self.get_type_string(data, type_string)) set_attributes_all(dsetgrp, attributes, discard_others=True)
<SYSTEM_TASK:> Processes paths. <END_TASK> <USER_TASK:> Description: def process_path(pth): """ Processes paths. Processes the provided path and breaks it into it Group part (`groupname`) and target part (`targetname`). ``bytes`` paths are converted to ``str``. Separated paths are given as an iterable of ``str`` and ``bytes``. Each part of a separated path is escaped using ``escape_path``. Otherwise, the path is assumed to be already escaped. Escaping is done so that targets with a part that starts with one or more periods, contain slashes, and/or contain nulls can be used without causing the wrong Group to be looked in or the wrong target to be looked at. It essentially allows one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving around in the Dataset hierarchy. All paths are POSIX style. .. versionadded:: 0.2 Parameters ---------- pth : str or bytes or iterable of str or bytes The POSIX style path as a ``str`` or ``bytes`` or the separated path in an iterable with the elements being ``str`` and ``bytes``. For separated paths, escaping will be done on each part. Returns ------- groupname : str The path to the Group containing the target `pth` was pointing to. targetname : str The name of the target pointed to by `pth` in the Group `groupname`. Raises ------ TypeError If `pth` is not of the right type. See Also -------- escape_path """
# Do conversions and possibly escapes. if isinstance(pth, bytes): p = pth.decode('utf-8') elif (sys.hexversion >= 0x03000000 and isinstance(pth, str)) \ or (sys.hexversion < 0x03000000 \ and isinstance(pth, unicode)): p = pth elif not isinstance(pth, collections.Iterable): raise TypeError('p must be str, bytes, or an iterable ' + 'solely of one of those two.') else: # Check that all elements are unicode or bytes. if sys.hexversion >= 0x03000000: if not all([isinstance(s, (bytes, str)) for s in pth]): raise TypeError('Elements of p must be str or bytes.') else: if not all([isinstance(s, (str, unicode)) for s in pth]): raise TypeError('Elements of p must be str or ' + 'unicode.') # Escape (and possibly convert to unicode) each element and then # join them all together. parts = [None] * len(pth) for i, s in enumerate(pth): if isinstance(s, bytes): s = s.decode('utf-8') parts[i] = escape_path(s) parts = tuple(parts) p = posixpath.join(*parts) # Remove double slashes and a non-root trailing slash. path = posixpath.normpath(p) # Extract the group name and the target name (will be a dataset if # data can be mapped to it, but will end up being made into a group # otherwise. As HDF5 files use posix path, conventions, posixpath # will do everything. groupname = posixpath.dirname(path) targetname = posixpath.basename(path) # If groupname got turned into blank, then it is just root. if len(groupname) == 0: groupname = b'/'.decode('ascii') # If targetname got turned blank, then it is the current directory. if len(targetname) == 0: targetname = b'.'.decode('ascii') return groupname, targetname
<SYSTEM_TASK:> Writes an array of objects recursively. <END_TASK> <USER_TASK:> Description: def write_object_array(f, data, options): """ Writes an array of objects recursively. Writes the elements of the given object array recursively in the HDF5 Group ``options.group_for_references`` and returns an ``h5py.Reference`` array to all the elements. Parameters ---------- f : h5py.File The HDF5 file handle that is open. data : numpy.ndarray of objects Numpy object array to write the elements of. options : hdf5storage.core.Options hdf5storage options object. Returns ------- obj_array : numpy.ndarray of h5py.Reference A reference array pointing to all the elements written to the HDF5 file. For those that couldn't be written, the respective element points to the canonical empty. Raises ------ TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `options.action_for_matlab_incompatible` is set to ``'error'``. See Also -------- read_object_array hdf5storage.Options.group_for_references h5py.Reference """
# We need to grab the special reference dtype and make an empty # array to store all the references in. ref_dtype = h5py.special_dtype(ref=h5py.Reference) data_refs = np.zeros(shape=data.shape, dtype='object') # We need to make sure that the group to hold references is present, # and create it if it isn't. if options.group_for_references not in f: f.create_group(options.group_for_references) grp2 = f[options.group_for_references] if not isinstance(grp2, h5py.Group): del f[options.group_for_references] f.create_group(options.group_for_references) grp2 = f[options.group_for_references] # The Dataset 'a' needs to be present as the canonical empty. It is # just and np.uint32/64([0, 0]) with its a MATLAB_class of # 'canonical empty' and the 'MATLAB_empty' attribute set. If it # isn't present or is incorrectly formatted, it is created # truncating anything previously there. try: dset_a = grp2['a'] if dset_a.shape != (2,) \ or not dset_a.dtype.name.startswith('uint') \ or np.any(dset_a[...] != np.uint64([0, 0])) \ or get_attribute_string(dset_a, 'MATLAB_class') != \ 'canonical empty' \ or get_attribute(dset_a, 'MATLAB_empty') != 1: del grp2['a'] dset_a = grp2.create_dataset('a', data=np.uint64([0, 0])) set_attribute_string(dset_a, 'MATLAB_class', 'canonical empty') set_attribute(dset_a, 'MATLAB_empty', np.uint8(1)) except: dset_a = grp2.create_dataset('a', data=np.uint64([0, 0])) set_attribute_string(dset_a, 'MATLAB_class', 'canonical empty') set_attribute(dset_a, 'MATLAB_empty', np.uint8(1)) # Go through all the elements of data and write them, gabbing their # references and putting them in data_refs. They will be put in # group_for_references, which is also what the H5PATH needs to be # set to if we are doing MATLAB compatibility (otherwise, the # attribute needs to be deleted). If an element can't be written # (doing matlab compatibility, but it isn't compatible with matlab # and action_for_matlab_incompatible option is True), the reference # to the canonical empty will be used for the reference array to # point to. grp2name = grp2.name for index, x in np.ndenumerate(data): name_for_ref = next_unused_name_in_group(grp2, 16) write_data(f, grp2, name_for_ref, x, None, options) try: dset = grp2[name_for_ref] data_refs[index] = dset.ref if options.matlab_compatible: set_attribute_string(dset, 'H5PATH', grp2name) else: del_attribute(dset, 'H5PATH') except: data_refs[index] = dset_a.ref # Now, the dtype needs to be changed to the reference type and the # whole thing copied over to data_to_store. return data_refs.astype(ref_dtype).copy()
<SYSTEM_TASK:> Reads an array of objects recursively. <END_TASK> <USER_TASK:> Description: def read_object_array(f, data, options): """ Reads an array of objects recursively. Read the elements of the given HDF5 Reference array recursively in the and constructs a ``numpy.object_`` array from its elements, which is returned. Parameters ---------- f : h5py.File The HDF5 file handle that is open. data : numpy.ndarray of h5py.Reference The array of HDF5 References to read and make an object array from. options : hdf5storage.core.Options hdf5storage options object. Raises ------ NotImplementedError If reading the object from file is currently not supported. Returns ------- obj_array : numpy.ndarray of numpy.object\_ The Python object array containing the items pointed to by `data`. See Also -------- write_object_array hdf5storage.Options.group_for_references h5py.Reference """
# Go through all the elements of data and read them using their # references, and the putting the output in new object array. data_derefed = np.zeros(shape=data.shape, dtype='object') for index, x in np.ndenumerate(data): data_derefed[index] = read_data(f, None, None, options, dsetgrp=f[x]) return data_derefed
<SYSTEM_TASK:> Gives a name that isn't used in a Group. <END_TASK> <USER_TASK:> Description: def next_unused_name_in_group(grp, length): """ Gives a name that isn't used in a Group. Generates a name of the desired length that is not a Dataset or Group in the given group. Note, if length is not large enough and `grp` is full enough, there may be no available names meaning that this function will hang. Parameters ---------- grp : h5py.Group or h5py.File The HDF5 Group (or File if at '/') to generate an unused name in. length : int Number of characters the name should be. Returns ------- name : str A name that isn't already an existing Dataset or Group in `grp`. """
# While # # ltrs = string.ascii_letters + string.digits # name = ''.join([random.choice(ltrs) for i in range(length)]) # # seems intuitive, its performance is abysmal compared to # # '%0{0}x'.format(length) % random.getrandbits(length * 4) # # The difference is a factor of 20. Idea from # # https://stackoverflow.com/questions/2782229/most-lightweight-way- # to-create-a-random-string-and-a-random-hexadecimal-number/ # 35161595#35161595 fmt = '%0{0}x'.format(length) name = fmt % random.getrandbits(length * 4) while name in grp: name = fmt % random.getrandbits(length * 4) return name
<SYSTEM_TASK:> Converts a numpy.unicode\_ to UTF-16 in numpy.uint16 form. <END_TASK> <USER_TASK:> Description: def convert_numpy_str_to_uint16(data): """ Converts a numpy.unicode\_ to UTF-16 in numpy.uint16 form. Convert a ``numpy.unicode_`` or an array of them (they are UTF-32 strings) to UTF-16 in the equivalent array of ``numpy.uint16``. The conversion will throw an exception if any characters cannot be converted to UTF-16. Strings are expanded along rows (across columns) so a 2x3x4 array of 10 element strings will get turned into a 2x30x4 array of uint16's if every UTF-32 character converts easily to a UTF-16 singlet, as opposed to a UTF-16 doublet. Parameters ---------- data : numpy.unicode\_ or numpy.ndarray of numpy.unicode\_ The string or array of them to convert. Returns ------- array : numpy.ndarray of numpy.uint16 The result of the conversion. Raises ------ UnicodeEncodeError If a UTF-32 character has no UTF-16 representation. See Also -------- convert_numpy_str_to_uint32 convert_to_numpy_str """
# An empty string should be an empty uint16 if data.nbytes == 0: return np.uint16([]) # We need to use the UTF-16 codec for our endianness. Using the # right one means we don't have to worry about removing the BOM. if sys.byteorder == 'little': codec = 'UTF-16LE' else: codec = 'UTF-16BE' # numpy.char.encode can do the conversion element wise. Then, we # just have convert to uin16 with the appropriate dimensions. The # dimensions are gotten from the shape of the converted data with # the number of column increased by the number of words (pair of # bytes) in the strings. cdata = np.char.encode(np.atleast_1d(data), codec) shape = list(cdata.shape) shape[-1] *= (cdata.dtype.itemsize // 2) return np.ndarray(shape=shape, dtype='uint16', buffer=cdata.tostring())
<SYSTEM_TASK:> Converts a numpy.unicode\_ to its numpy.uint32 representation. <END_TASK> <USER_TASK:> Description: def convert_numpy_str_to_uint32(data): """ Converts a numpy.unicode\_ to its numpy.uint32 representation. Convert a ``numpy.unicode_`` or an array of them (they are UTF-32 strings) into the equivalent array of ``numpy.uint32`` that is byte for byte identical. Strings are expanded along rows (across columns) so a 2x3x4 array of 10 element strings will get turned into a 2x30x4 array of uint32's. Parameters ---------- data : numpy.unicode\_ or numpy.ndarray of numpy.unicode\_ The string or array of them to convert. Returns ------- array : numpy.ndarray of numpy.uint32 The result of the conversion. See Also -------- convert_numpy_str_to_uint16 convert_to_numpy_str """
if data.nbytes == 0: # An empty string should be an empty uint32. return np.uint32([]) else: # We need to calculate the new shape from the current shape, # which will have to be expanded along the rows to fit all the # characters (the dtype.itemsize gets the number of bytes in # each string, which is just 4 times the number of # characters. Then it is a mstter of getting a view of the # string (in flattened form so that it is contiguous) as uint32 # and then reshaping it. shape = list(np.atleast_1d(data).shape) shape[-1] *= data.dtype.itemsize//4 return data.flatten().view(np.uint32).reshape(tuple(shape))
<SYSTEM_TASK:> Decodes possibly complex data read from an HDF5 file. <END_TASK> <USER_TASK:> Description: def decode_complex(data, complex_names=(None, None)): """ Decodes possibly complex data read from an HDF5 file. Decodes possibly complex datasets read from an HDF5 file. HDF5 doesn't have a native complex type, so they are stored as H5T_COMPOUND types with fields such as 'r' and 'i' for the real and imaginary parts. As there is no standardization for field names, the field names have to be given explicitly, or the fieldnames in `data` analyzed for proper decoding to figure out the names. A variety of reasonably expected combinations of field names are checked and used if available to decode. If decoding is not possible, it is returned as is. Parameters ---------- data : arraylike The data read from an HDF5 file, that might be complex, to decode into the proper Numpy complex type. complex_names : tuple of 2 str and/or Nones, optional ``tuple`` of the names to use (in order) for the real and imaginary fields. A ``None`` indicates that various common field names should be tried. Returns ------- c : decoded data or data If `data` can be decoded into a complex type, the decoded complex version is returned. Otherwise, `data` is returned unchanged. See Also -------- encode_complex Notes ----- Currently looks for real field names of ``('r', 're', 'real')`` and imaginary field names of ``('i', 'im', 'imag', 'imaginary')`` ignoring case. """
# Now, complex types are stored in HDF5 files as an H5T_COMPOUND type # with fields along the lines of ('r', 're', 'real') and ('i', 'im', # 'imag', 'imaginary') for the real and imaginary parts, which most # likely won't be properly extracted back into making a Python # complex type unless the proper h5py configuration is set. Since we # can't depend on it being set and adjusting it is hazardous (the # setting is global), it is best to just decode it manually. These # fields are obtained from the fields of its dtype. Obviously, if # there are no fields, then there is nothing to do. if data.dtype.fields is None: return data fields = list(data.dtype.fields) # If there aren't exactly two fields, then it can't be complex. if len(fields) != 2: return data # We need to grab the field names for the real and imaginary # parts. This will be done by seeing which list, if any, each field # is and setting variables to the proper name if it is in it (they # are initialized to None so that we know if one isn't found). real_fields = ['r', 're', 'real'] imag_fields = ['i', 'im', 'imag', 'imaginary'] cnames = list(complex_names) for s in fields: if s.lower() in real_fields: cnames[0] = s elif s.lower() in imag_fields: cnames[1] = s # If the real and imaginary fields were found, construct the complex # form from the fields. This is done by finding the complex type # that they cast to, making an array, and then setting the # parts. Otherwise, return what we were given because it isn't in # the right form. if cnames[0] is not None and cnames[1] is not None: cdata = np.result_type(data[cnames[0]].dtype, \ data[cnames[1]].dtype, 'complex64').type(data[cnames[0]]) cdata.imag = data[cnames[1]] return cdata else: return data
<SYSTEM_TASK:> Encodes complex data to having arbitrary complex field names. <END_TASK> <USER_TASK:> Description: def encode_complex(data, complex_names): """ Encodes complex data to having arbitrary complex field names. Encodes complex `data` to have the real and imaginary field names given in `complex_numbers`. This is needed because the field names have to be set so that it can be written to an HDF5 file with the right field names (HDF5 doesn't have a native complex type, so H5T_COMPOUND have to be used). Parameters ---------- data : arraylike The data to encode as a complex type with the desired real and imaginary part field names. complex_names : tuple of 2 str ``tuple`` of the names to use (in order) for the real and imaginary fields. Returns ------- d : encoded data `data` encoded into having the specified field names for the real and imaginary parts. See Also -------- decode_complex """
# Grab the dtype name, and convert it to the right non-complex type # if it isn't already one. dtype_name = data.dtype.name if dtype_name[0:7] == 'complex': dtype_name = 'float' + str(int(float(dtype_name[7:])/2)) # Create the new version of the data with the right field names for # the real and complex parts. This is easy to do with putting the # right detype in the view function. dt = np.dtype([(complex_names[0], dtype_name), (complex_names[1], dtype_name)]) return data.view(dt).copy()
<SYSTEM_TASK:> Convert an attribute value to a string. <END_TASK> <USER_TASK:> Description: def convert_attribute_to_string(value): """ Convert an attribute value to a string. Converts the attribute value to a string if possible (get ``None`` if isn't a string type). .. versionadded:: 0.2 Parameters ---------- value : The Attribute value. Returns ------- s : str or None The ``str`` value of the attribute if the conversion is possible, or ``None`` if not. """
if value is None: return value elif (sys.hexversion >= 0x03000000 and isinstance(value, str)) \ or (sys.hexversion < 0x03000000 \ and isinstance(value, unicode)): return value elif isinstance(value, bytes): return value.decode() elif isinstance(value, np.unicode_): return str(value) elif isinstance(value, np.bytes_): return value.decode() else: return None
<SYSTEM_TASK:> Sets an attribute on a Dataset or Group. <END_TASK> <USER_TASK:> Description: def set_attribute(target, name, value): """ Sets an attribute on a Dataset or Group. If the attribute `name` doesn't exist yet, it is created. If it already exists, it is overwritten if it differs from `value`. Notes ----- ``set_attributes_all`` is the fastest way to set and delete Attributes in bulk. Parameters ---------- target : Dataset or Group Dataset or Group to set the attribute of. name : str Name of the attribute to set. value : numpy type other than numpy.unicode\_ Value to set the attribute to. See Also -------- set_attributes_all """
try: target.attrs.modify(name, value) except: target.attrs.create(name, value)
<SYSTEM_TASK:> Sets an attribute to a string on a Dataset or Group. <END_TASK> <USER_TASK:> Description: def set_attribute_string(target, name, value): """ Sets an attribute to a string on a Dataset or Group. If the attribute `name` doesn't exist yet, it is created. If it already exists, it is overwritten if it differs from `value`. Notes ----- ``set_attributes_all`` is the fastest way to set and delete Attributes in bulk. Parameters ---------- target : Dataset or Group Dataset or Group to set the string attribute of. name : str Name of the attribute to set. value : string Value to set the attribute to. Can be any sort of string type that will convert to a ``numpy.bytes_`` See Also -------- set_attributes_all """
set_attribute(target, name, np.bytes_(value))
<SYSTEM_TASK:> Sets an attribute to an array of string on a Dataset or Group. <END_TASK> <USER_TASK:> Description: def set_attribute_string_array(target, name, string_list): """ Sets an attribute to an array of string on a Dataset or Group. If the attribute `name` doesn't exist yet, it is created. If it already exists, it is overwritten with the list of string `string_list` (they will be vlen strings). Notes ----- ``set_attributes_all`` is the fastest way to set and delete Attributes in bulk. Parameters ---------- target : Dataset or Group Dataset or Group to set the string array attribute of. name : str Name of the attribute to set. string_list : list of str List of strings to set the attribute to. Strings must be ``str`` See Also -------- set_attributes_all """
s_list = [convert_to_str(s) for s in string_list] if sys.hexversion >= 0x03000000: target.attrs.create(name, s_list, dtype=h5py.special_dtype(vlen=str)) else: target.attrs.create(name, s_list, dtype=h5py.special_dtype(vlen=unicode))
<SYSTEM_TASK:> Set Attributes in bulk and optionally discard others. <END_TASK> <USER_TASK:> Description: def set_attributes_all(target, attributes, discard_others=True): """ Set Attributes in bulk and optionally discard others. Sets each Attribute in turn (modifying it in place if possible if it is already present) and optionally discarding all other Attributes not explicitly set. This function yields much greater performance than the required individual calls to ``set_attribute``, ``set_attribute_string``, ``set_attribute_string_array`` and ``del_attribute`` put together. .. versionadded:: 0.2 Parameters ---------- target : Dataset or Group Dataset or Group to set the Attributes of. attributes : dict The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. discard_others : bool, optional Whether to discard all other Attributes not explicitly set (default) or not. See Also -------- set_attribute set_attribute_string set_attribute_string_array """
attrs = target.attrs existing = dict(attrs.items()) # Generate special dtype for string arrays. if sys.hexversion >= 0x03000000: str_arr_dtype = h5py.special_dtype(vlen=str) else: str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode) # Go through each attribute. If it is already present, modify it if # possible and create it otherwise (deletes old value.) for k, (kind, value) in attributes.items(): if kind == 'string_array': attrs.create(k, [convert_to_str(s) for s in value], dtype=str_arr_dtype) else: if kind == 'string': value = np.bytes_(value) if k not in existing: attrs.create(k, value) else: try: if value.dtype == existing[k].dtype \ and value.shape == existing[k].shape: attrs.modify(k, value) except: attrs.create(k, value) # Discard all other attributes. if discard_others: for k in set(existing) - set(attributes): del attrs[k]
<SYSTEM_TASK:> Find, but don't load, all third party marshaller plugins. <END_TASK> <USER_TASK:> Description: def find_thirdparty_marshaller_plugins(): """ Find, but don't load, all third party marshaller plugins. Third party marshaller plugins declare the entry point ``'hdf5storage.marshallers.plugins'`` with the name being the Marshaller API version and the target being a function that returns a ``tuple`` or ``list`` of all the marshallers provided by that plugin when given the hdf5storage version (``str``) as its only argument. .. versionadded:: 0.2 Returns ------- plugins : dict The marshaller obtaining entry points from third party plugins. The keys are the Marshaller API versions (``str``) and the values are ``dict`` of the entry points, with the module names as the keys (``str``) and the values being the entry points (``pkg_resources.EntryPoint``). See Also -------- supported_marshaller_api_versions """
all_plugins = tuple(pkg_resources.iter_entry_points( 'hdf5storage.marshallers.plugins')) return {ver: {p.module_name: p for p in all_plugins if p.name == ver} for ver in supported_marshaller_api_versions()}
<SYSTEM_TASK:> Save a dictionary of python types to a MATLAB MAT file. <END_TASK> <USER_TASK:> Description: def savemat(file_name, mdict, appendmat=True, format='7.3', oned_as='row', store_python_metadata=True, action_for_matlab_incompatible='error', marshaller_collection=None, truncate_existing=False, truncate_invalid_matlab=False, **keywords): """ Save a dictionary of python types to a MATLAB MAT file. Saves the data provided in the dictionary `mdict` to a MATLAB MAT file. `format` determines which kind/vesion of file to use. The '7.3' version, which is HDF5 based, is handled by this package and all types that this package can write are supported. Versions 4 and 5 are not HDF5 based, so everything is dispatched to the SciPy package's ``scipy.io.savemat`` function, which this function is modelled after (arguments not specific to this package have the same names, etc.). Parameters ---------- file_name : str or file-like object Name of the MAT file to store in. The '.mat' extension is added on automatically if not present if `appendmat` is set to ``True``. An open file-like object can be passed if the writing is being dispatched to SciPy (`format` < 7.3). mdict : dict The dictionary of variables and their contents to store in the file. appendmat : bool, optional Whether to append the '.mat' extension to `file_name` if it doesn't already end in it or not. format : {'4', '5', '7.3'}, optional The MATLAB mat file format to use. The '7.3' format is handled by this package while the '4' and '5' formats are dispatched to SciPy. oned_as : {'row', 'column'}, optional Whether 1D arrays should be turned into row or column vectors. store_python_metadata : bool, optional Whether or not to store Python type information. Doing so allows most types to be read back perfectly. Only applicable if not dispatching to SciPy (`format` >= 7.3). action_for_matlab_incompatible: str, optional The action to perform writing data that is not MATLAB compatible. The actions are to write the data anyways ('ignore'), don't write the incompatible data ('discard'), or throw a ``TypeNotMatlabCompatibleError`` exception. marshaller_collection : MarshallerCollection, optional Collection of marshallers to disk to use. Only applicable if not dispatching to SciPy (`format` >= 7.3). truncate_existing : bool, optional Whether to truncate the file if it already exists before writing to it. truncate_invalid_matlab : bool, optional Whether to truncate a file if the file doesn't have the proper header (userblock in HDF5 terms) setup for MATLAB metadata to be placed. **keywords : Additional keywords arguments to be passed onto ``scipy.io.savemat`` if dispatching to SciPy (`format` < 7.3). Raises ------ ImportError If `format` < 7.3 and the ``scipy`` module can't be found. NotImplementedError If writing a variable in `mdict` is not supported. exceptions.TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `action_for_matlab_incompatible` is set to ``'error'``. Notes ----- Writing the same data and then reading it back from disk using the HDF5 based version 7.3 format (the functions in this package) or the older format (SciPy functions) can lead to very different results. Each package supports a different set of data types and converts them to and from the same MATLAB types differently. See Also -------- loadmat : Equivelent function to do reading. scipy.io.savemat : SciPy function this one models after and dispatches to. Options writes : Function used to do the actual writing. """
# If format is a number less than 7.3, the call needs to be # dispatched to the scipy version, if it is available, with all the # relevant and extra keywords options provided. if float(format) < 7.3: import scipy.io scipy.io.savemat(file_name, mdict, appendmat=appendmat, format=format, oned_as=oned_as, **keywords) return # Append .mat if it isn't on the end of the file name and we are # supposed to. if appendmat and not file_name.endswith('.mat'): file_name = file_name + '.mat' # Make the options with matlab compatibility forced. options = Options(store_python_metadata=store_python_metadata, \ matlab_compatible=True, oned_as=oned_as, \ action_for_matlab_incompatible=action_for_matlab_incompatible, \ marshaller_collection=marshaller_collection) # Write the variables in the dictionary to file. writes(mdict=mdict, filename=file_name, truncate_existing=truncate_existing, truncate_invalid_matlab=truncate_invalid_matlab, options=options)
<SYSTEM_TASK:> Loads data to a MATLAB MAT file. <END_TASK> <USER_TASK:> Description: def loadmat(file_name, mdict=None, appendmat=True, variable_names=None, marshaller_collection=None, **keywords): """ Loads data to a MATLAB MAT file. Reads data from the specified variables (or all) in a MATLAB MAT file. There are many different formats of MAT files. This package can only handle the HDF5 based ones (the version 7.3 and later). As SciPy's ``scipy.io.loadmat`` function can handle the earlier formats, if this function cannot read the file, it will dispatch it onto the scipy function with all the calling arguments it uses passed on. This function is modelled after the SciPy one (arguments not specific to this package have the same names, etc.). Parameters ---------- file_name : str Name of the MAT file to read from. The '.mat' extension is added on automatically if not present if `appendmat` is set to ``True``. mdict : dict, optional The dictionary to insert read variables into appendmat : bool, optional Whether to append the '.mat' extension to `file_name` if it doesn't already end in it or not. variable_names: None or sequence, optional The variable names to read from the file. ``None`` selects all. marshaller_collection : MarshallerCollection, optional Collection of marshallers from disk to use. Only applicable if not dispatching to SciPy (version 7.3 and newer files). **keywords : Additional keywords arguments to be passed onto ``scipy.io.loadmat`` if dispatching to SciPy if the file is not a version 7.3 or later format. Returns ------- dict Dictionary of all the variables read from the MAT file (name as the key, and content as the value). Raises ------ ImportError If it is not a version 7.3 .mat file and the ``scipy`` module can't be found when dispatching to SciPy. exceptions.CantReadError If reading the data can't be done. Notes ----- Writing the same data and then reading it back from disk using the HDF5 based version 7.3 format (the functions in this package) or the older format (SciPy functions) can lead to very different results. Each package supports a different set of data types and converts them to and from the same MATLAB types differently. See Also -------- savemat : Equivalent function to do writing. scipy.io.loadmat : SciPy function this one models after and dispatches to. Options reads : Function used to do the actual reading. """
# Will first assume that it is the HDF5 based 7.3 format. If an # OSError occurs, then it wasn't an HDF5 file and the scipy function # can be tried instead. try: # Make the options with the given marshallers. options = Options(marshaller_collection=marshaller_collection) # Append .mat if it isn't on the end of the file name and we are # supposed to. if appendmat and not file_name.endswith('.mat'): filename = file_name + '.mat' else: filename = file_name # Read everything if we were instructed. if variable_names is None: data = dict() with h5py.File(filename, mode='r') as f: for k in f: # Read if not group_for_references. Data that # produces errors when read is dicarded (the OSError # that would happen if this is not an HDF5 file # would already have happened when opening the # file). if f[k].name != options.group_for_references: try: data[utilities.unescape_path(k)] = \ utilities.read_data(f, f, k, options) except: pass else: # Extract the desired fields all together and then pack them # into a dictionary one by one. values = reads(paths=variable_names, filename=filename, options=options) data = dict() for i, name in enumerate(variable_names): data[name] = values[i] # Read all the variables, stuff them into mdict, and return it. if mdict is None: mdict = dict() for k, v in data.items(): mdict[k] = v return mdict except OSError: import scipy.io return scipy.io.loadmat(file_name, mdict, appendmat=appendmat, variable_names=variable_names, **keywords)
<SYSTEM_TASK:> Update the full marshaller list and other data structures. <END_TASK> <USER_TASK:> Description: def _update_marshallers(self): """ Update the full marshaller list and other data structures. Makes a full list of both builtin and user marshallers and rebuilds internal data structures used for looking up which marshaller to use for reading/writing Python objects to/from file. Also checks for whether the required modules are present or not, loading the required modules (if not doing lazy loading), and whether the required modules are imported already or not. """
# Combine all sets of marshallers. self._marshallers = [] for v in self._priority: if v == 'builtin': self._marshallers.extend(self._builtin_marshallers) elif v == 'plugin': self._marshallers.extend(self._plugin_marshallers) elif v == 'user': self._marshallers.extend(self._user_marshallers) else: raise ValueError('priority attribute has an illegal ' 'element value.') # Determine whether the required modules are present, do module # loading, and determine whether the required modules are # imported. self._has_required_modules = len(self._marshallers) * [False] self._imported_required_modules = \ len(self._marshallers) * [False] for i, m in enumerate(self._marshallers): # Check if the required modules are here. try: for name in m.required_parent_modules: if name not in sys.modules \ and pkgutil.find_loader(name) is None: raise ImportError('module not present') except ImportError: self._has_required_modules[i] = False except: raise else: self._has_required_modules[i] = True # Modules obviously can't be fully loaded if not all are # present. if not self._has_required_modules[i]: self._imported_required_modules[i] = False continue # Check if all modules are loaded or not, and load them if # doing lazy loading. try: for name in m.required_modules: if name not in sys.modules: raise ImportError('module not loaded yet.') except ImportError: if self._lazy_loading: self._imported_required_modules[i] = False else: success = self._import_marshaller_modules(m) self._has_required_modules[i] = success self._imported_required_modules[i] = success except: raise else: self._imported_required_modules[i] = True # Construct the dictionary to look up the appropriate marshaller # by type, the equivalent one to read data types given type # strings needs to be created from it (basically, we have to # make the key be the python_type_string from it), and the # equivalent one to read data types given MATLAB class strings # needs to be created from it (basically, we have to make the # key be the matlab_class from it). # # Marshallers earlier in the list have priority (means that the # builtins have the highest). Since the types can be specified # as strings as well, duplicates will be checked for by running # each type through str if it isn't str. types_as_str = set() self._types = dict() self._type_strings = dict() self._matlab_classes = dict() for i, m in enumerate(self._marshallers): # types. for tp in m.types: if isinstance(tp, str): tp_as_str = tp else: tp_as_str = tp.__module__ + '.' + tp.__name__ if tp_as_str not in types_as_str: self._types[tp_as_str] = i types_as_str.add(tp_as_str) # type strings for type_string in m.python_type_strings: if type_string not in self._type_strings: self._type_strings[type_string] = i # matlab classes. for matlab_class in m.matlab_classes: if matlab_class not in self._matlab_classes: self._matlab_classes[matlab_class] = i
<SYSTEM_TASK:> Imports the modules required by the marshaller. <END_TASK> <USER_TASK:> Description: def _import_marshaller_modules(self, m): """ Imports the modules required by the marshaller. Parameters ---------- m : marshaller The marshaller to load the modules for. Returns ------- success : bool Whether the modules `m` requires could be imported successfully or not. """
try: for name in m.required_modules: if name not in sys.modules: if _has_importlib: importlib.import_module(name) else: __import__(name) except ImportError: return False except: raise else: return True
<SYSTEM_TASK:> Gets the appropriate marshaller for a type. <END_TASK> <USER_TASK:> Description: def get_marshaller_for_type(self, tp): """ Gets the appropriate marshaller for a type. Retrieves the marshaller, if any, that can be used to read/write a Python object with type 'tp'. The modules it requires, if available, will be loaded. Parameters ---------- tp : type or str Python object ``type`` (which would be the class reference) or its string representation like ``'collections.deque'``. Returns ------- marshaller : marshaller or None The marshaller that can read/write the type to file. ``None`` if no appropriate marshaller is found. has_required_modules : bool Whether the required modules for reading the type are present or not. See Also -------- hdf5storage.Marshallers.TypeMarshaller.types """
if not isinstance(tp, str): tp = tp.__module__ + '.' + tp.__name__ if tp in self._types: index = self._types[tp] else: return None, False m = self._marshallers[index] if self._imported_required_modules[index]: return m, True if not self._has_required_modules[index]: return m, False success = self._import_marshaller_modules(m) self._has_required_modules[index] = success self._imported_required_modules[index] = success return m, success
<SYSTEM_TASK:> Gets the appropriate marshaller for a type string. <END_TASK> <USER_TASK:> Description: def get_marshaller_for_type_string(self, type_string): """ Gets the appropriate marshaller for a type string. Retrieves the marshaller, if any, that can be used to read/write a Python object with the given type string. The modules it requires, if available, will be loaded. Parameters ---------- type_string : str Type string for a Python object. Returns ------- marshaller : marshaller or None The marshaller that can read/write the type to file. ``None`` if no appropriate marshaller is found. has_required_modules : bool Whether the required modules for reading the type are present or not. See Also -------- hdf5storage.Marshallers.TypeMarshaller.python_type_strings """
if type_string in self._type_strings: index = self._type_strings[type_string] m = self._marshallers[index] if self._imported_required_modules[index]: return m, True if not self._has_required_modules[index]: return m, False success = self._import_marshaller_modules(m) self._has_required_modules[index] = success self._imported_required_modules[index] = success return m, success else: return None, False
<SYSTEM_TASK:> Gets the appropriate marshaller for a MATLAB class string. <END_TASK> <USER_TASK:> Description: def get_marshaller_for_matlab_class(self, matlab_class): """ Gets the appropriate marshaller for a MATLAB class string. Retrieves the marshaller, if any, that can be used to read/write a Python object associated with the given MATLAB class string. The modules it requires, if available, will be loaded. Parameters ---------- matlab_class : str MATLAB class string for a Python object. Returns ------- marshaller : marshaller or None The marshaller that can read/write the type to file. ``None`` if no appropriate marshaller is found. has_required_modules : bool Whether the required modules for reading the type are present or not. See Also -------- hdf5storage.Marshallers.TypeMarshaller.python_type_strings """
if matlab_class in self._matlab_classes: index = self._matlab_classes[matlab_class] m = self._marshallers[index] if self._imported_required_modules[index]: return m, True if not self._has_required_modules[index]: return m, False success = self._import_marshaller_modules(m) self._has_required_modules[index] = success self._imported_required_modules[index] = success return m, success else: return None, False
<SYSTEM_TASK:> Adds a new, blank node to the graph. <END_TASK> <USER_TASK:> Description: def new_node(self): """Adds a new, blank node to the graph. Returns the node id of the new node."""
node_id = self.generate_node_id() node = {'id': node_id, 'edges': [], 'data': {} } self.nodes[node_id] = node self._num_nodes += 1 return node_id
<SYSTEM_TASK:> Adds a new edge from node_a to node_b that has a cost. <END_TASK> <USER_TASK:> Description: def new_edge(self, node_a, node_b, cost=1): """Adds a new edge from node_a to node_b that has a cost. Returns the edge id of the new edge."""
# Verify that both nodes exist in the graph try: self.nodes[node_a] except KeyError: raise NonexistentNodeError(node_a) try: self.nodes[node_b] except KeyError: raise NonexistentNodeError(node_b) # Create the new edge edge_id = self.generate_edge_id() edge = {'id': edge_id, 'vertices': (node_a, node_b), 'cost': cost, 'data': {} } self.edges[edge_id] = edge self.nodes[node_a]['edges'].append(edge_id) self._num_edges += 1 return edge_id
<SYSTEM_TASK:> Determines whether there is an edge from node_a to node_b. <END_TASK> <USER_TASK:> Description: def adjacent(self, node_a, node_b): """Determines whether there is an edge from node_a to node_b. Returns True if such an edge exists, otherwise returns False."""
neighbors = self.neighbors(node_a) return node_b in neighbors
<SYSTEM_TASK:> Returns the cost of moving between the edge that connects node_a to node_b. <END_TASK> <USER_TASK:> Description: def edge_cost(self, node_a, node_b): """Returns the cost of moving between the edge that connects node_a to node_b. Returns +inf if no such edge exists."""
cost = float('inf') node_object_a = self.get_node(node_a) for edge_id in node_object_a['edges']: edge = self.get_edge(edge_id) tpl = (node_a, node_b) if edge['vertices'] == tpl: cost = edge['cost'] break return cost
<SYSTEM_TASK:> Returns the node object identified by "node_id". <END_TASK> <USER_TASK:> Description: def get_node(self, node_id): """Returns the node object identified by "node_id"."""
try: node_object = self.nodes[node_id] except KeyError: raise NonexistentNodeError(node_id) return node_object
<SYSTEM_TASK:> Removes all the edges from node_a to node_b from the graph. <END_TASK> <USER_TASK:> Description: def delete_edge_by_nodes(self, node_a, node_b): """Removes all the edges from node_a to node_b from the graph."""
node = self.get_node(node_a) # Determine the edge ids edge_ids = [] for e_id in node['edges']: edge = self.get_edge(e_id) if edge['vertices'][1] == node_b: edge_ids.append(e_id) # Delete the edges for e in edge_ids: self.delete_edge_by_id(e)
<SYSTEM_TASK:> Removes the node identified by node_id from the graph. <END_TASK> <USER_TASK:> Description: def delete_node(self, node_id): """Removes the node identified by node_id from the graph."""
node = self.get_node(node_id) # Remove all edges from the node for e in node['edges']: self.delete_edge_by_id(e) # Remove all edges to the node edges = [edge_id for edge_id, edge in list(self.edges.items()) if edge['vertices'][1] == node_id] for e in edges: self.delete_edge_by_id(e) # Remove the node from the node list del self.nodes[node_id] self._num_nodes -= 1
<SYSTEM_TASK:> Moves an edge originating from node_a so that it originates from node_b. <END_TASK> <USER_TASK:> Description: def move_edge_source(self, edge_id, node_a, node_b): """Moves an edge originating from node_a so that it originates from node_b."""
# Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (node_b, edge['vertices'][1]) # Remove the edge from node_a node = self.get_node(node_a) node['edges'].remove(edge_id) # Add the edge to node_b node = self.get_node(node_b) node['edges'].append(edge_id)
<SYSTEM_TASK:> Returns a list of edge ids connecting node_a to node_b. <END_TASK> <USER_TASK:> Description: def get_edge_ids_by_node_ids(self, node_a, node_b): """Returns a list of edge ids connecting node_a to node_b."""
# Check if the nodes are adjacent if not self.adjacent(node_a, node_b): return [] # They're adjacent, so pull the list of edges from node_a and determine which ones point to node_b node = self.get_node(node_a) return [edge_id for edge_id in node['edges'] if self.get_edge(edge_id)['vertices'][1] == node_b]