text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> return sheet names of a book. <END_TASK> <USER_TASK:> Description: def sheetNames(book=None): """return sheet names of a book. Args: book (str, optional): If a book is given, pull names from that book. Otherwise, try the active one Returns: list of sheet names (typical case). None if book has no sheets. False if book doesn't exlist. """
if book: if not book.lower() in [x.lower() for x in bookNames()]: return False else: book=activeBook() if not book: return False poBook=PyOrigin.WorksheetPages(book) if not len(poBook): return None return [x.GetName() for x in poBook.Layers()]
<SYSTEM_TASK:> returns the pyorigin object for a sheet. <END_TASK> <USER_TASK:> Description: def getSheet(book=None,sheet=None): """returns the pyorigin object for a sheet."""
# figure out what book to use if book and not book.lower() in [x.lower() for x in bookNames()]: print("book %s doesn't exist"%book) return if book is None: book=activeBook().lower() if book is None: print("no book given or selected") return # figure out what sheet to use if sheet and not sheet.lower() in [x.lower() for x in sheetNames(book)]: print("sheet %s doesn't exist"%sheet) return if sheet is None: sheet=activeSheet().lower() if sheet is None: return("no sheet given or selected") print # by now, we know the book/sheet exists and can be found for poSheet in PyOrigin.WorksheetPages(book).Layers(): if poSheet.GetName().lower()==sheet.lower(): return poSheet return False
<SYSTEM_TASK:> Delete a sheet from a book. If either isn't given, use the active one. <END_TASK> <USER_TASK:> Description: def sheetDelete(book=None,sheet=None): """ Delete a sheet from a book. If either isn't given, use the active one. """
if book is None: book=activeBook() if sheet in sheetNames(): PyOrigin.WorksheetPages(book).Layers(sheetNames().index(sheet)).Destroy()
<SYSTEM_TASK:> Delete all sheets which contain no data <END_TASK> <USER_TASK:> Description: def sheetDeleteEmpty(bookName=None): """Delete all sheets which contain no data"""
if bookName is None: bookName = activeBook() if not bookName.lower() in [x.lower() for x in bookNames()]: print("can't clean up a book that doesn't exist:",bookName) return poBook=PyOrigin.WorksheetPages(bookName) namesToKill=[] for i,poSheet in enumerate([poSheet for poSheet in poBook.Layers()]): poFirstCol=poSheet.Columns(0) if poFirstCol.GetLongName()=="" and poFirstCol.GetData()==[]: namesToKill.append(poSheet.GetName()) for sheetName in namesToKill: print("deleting empty sheet",sheetName) sheetDelete(bookName,sheetName)
<SYSTEM_TASK:> return the contents of a pickle file <END_TASK> <USER_TASK:> Description: def pickle_load(fname): """return the contents of a pickle file"""
assert type(fname) is str and os.path.exists(fname) print("loaded",fname) return pickle.load(open(fname,"rb"))
<SYSTEM_TASK:> save something to a pickle file <END_TASK> <USER_TASK:> Description: def pickle_save(thing,fname=None): """save something to a pickle file"""
if fname is None: fname=os.path.expanduser("~")+"/%d.pkl"%time.time() assert type(fname) is str and os.path.isdir(os.path.dirname(fname)) pickle.dump(thing, open(fname,"wb"),pickle.HIGHEST_PROTOCOL) print("saved",fname)
<SYSTEM_TASK:> return a dict with the code for each function <END_TASK> <USER_TASK:> Description: def getCodeBlocks(): """return a dict with the code for each function"""
raw=open("examples.py").read() d={} for block in raw.split("if __name__")[0].split("\ndef "): title=block.split("\n")[0].split("(")[0] if not title.startswith("demo_"): continue code=[x[4:] for x in block.split("\n")[1:] if x.startswith(" ")] d[title]="\n".join(code).strip() return d
<SYSTEM_TASK:> return a dict with the output of each function <END_TASK> <USER_TASK:> Description: def getOutputBlocks(): """return a dict with the output of each function"""
raw=open("output.txt").read() d={} for block in raw.split("\n####### ")[1:]: title=block.split("\n")[0].split("(")[0] block=block.split("\n",1)[1].strip() d[title]=block.split("\nfinished in ")[0] return d
<SYSTEM_TASK:> Turn a byte string from the command line into a unicode string. <END_TASK> <USER_TASK:> Description: def decodeCommandLine(self, cmdline): """Turn a byte string from the command line into a unicode string. """
codec = getattr(sys.stdin, 'encoding', None) or sys.getdefaultencoding() return unicode(cmdline, codec)
<SYSTEM_TASK:> Sort an ordered dictionary by value, descending. <END_TASK> <USER_TASK:> Description: def sort_dict(d, desc=True): """ Sort an ordered dictionary by value, descending. Args: d (OrderedDict): An ordered dictionary. desc (bool): If true, sort desc. Returns: OrderedDict: The sorted dictionary. """
sort = sorted(d.items(), key=lambda x: x[1], reverse=desc) return OrderedDict(sort)
<SYSTEM_TASK:> Yield a sliding window over an iterable. <END_TASK> <USER_TASK:> Description: def window(seq, n=2): """ Yield a sliding window over an iterable. Args: seq (iter): The sequence. n (int): The window width. Yields: tuple: The next window. """
it = iter(seq) result = tuple(islice(it, n)) if len(result) == n: yield result for token in it: result = result[1:] + (token,) yield result
<SYSTEM_TASK:> Move the SubStore at the indicated location into the given site store's <END_TASK> <USER_TASK:> Description: def insertUserStore(siteStore, userStorePath): """ Move the SubStore at the indicated location into the given site store's directory and then hook it up to the site store's authentication database. @type siteStore: C{Store} @type userStorePath: C{FilePath} """
# The following may, but does not need to be in a transaction, because it # is merely an attempt to guess a reasonable filesystem name to use for # this avatar. The user store being operated on is expected to be used # exclusively by this process. ls = siteStore.findUnique(LoginSystem) unattachedSubStore = Store(userStorePath) for lm in unattachedSubStore.query(LoginMethod, LoginMethod.account == unattachedSubStore.findUnique(LoginAccount), sort=LoginMethod.internal.descending): if ls.accountByAddress(lm.localpart, lm.domain) is None: localpart, domain = lm.localpart, lm.domain break else: raise AllNamesConflict() unattachedSubStore.close() insertLocation = siteStore.newFilePath('account', domain, localpart + '.axiom') insertParentLoc = insertLocation.parent() if not insertParentLoc.exists(): insertParentLoc.makedirs() if insertLocation.exists(): raise DatabaseDirectoryConflict() userStorePath.moveTo(insertLocation) ss = SubStore(store=siteStore, storepath=insertLocation) attachedStore = ss.open() # migrateUp() manages its own transactions because it interacts with two # different stores. attachedStore.findUnique(LoginAccount).migrateUp()
<SYSTEM_TASK:> Move the SubStore for the given user account out of the given site store <END_TASK> <USER_TASK:> Description: def extractUserStore(userAccount, extractionDestination, legacySiteAuthoritative=True): """ Move the SubStore for the given user account out of the given site store completely. Place the user store's database directory into the given destination directory. @type userAccount: C{LoginAccount} @type extractionDestination: C{FilePath} @type legacySiteAuthoritative: C{bool} @param legacySiteAuthoritative: before moving the user store, clear its authentication information, copy that which is associated with it in the site store rather than trusting its own. Currently this flag is necessary (and defaults to true) because things like the ClickChronicle password-changer gizmo still operate on the site store. """
if legacySiteAuthoritative: # migrateDown() manages its own transactions, since it is copying items # between two different stores. userAccount.migrateDown() av = userAccount.avatars av.open().close() def _(): # We're separately deleting several Items from the site store, then # we're moving some files. If we cannot move the files, we don't want # to delete the items. # There is one unaccounted failure mode here: if the destination of the # move is on a different mount point, the moveTo operation will fall # back to a non-atomic copy; if all of the copying succeeds, but then # part of the deletion of the source files fails, we will be left # without a complete store in this site store's files directory, but # the account Items will remain. This will cause odd errors on login # and at other unpredictable times. The database is only one file, so # we will either remove it all or none of it. Resolving this requires # manual intervention currently: delete the substore's database # directory and the account items (LoginAccount and LoginMethods) # manually. # However, this failure is extremely unlikely, as it would almost # certainly indicate a misconfiguration of the permissions on the site # store's files area. As described above, a failure of the call to # os.rename(), if the platform's rename is atomic (which it generally # is assumed to be) will not move any files and will cause a revert of # the transaction which would have deleted the accompanying items. av.deleteFromStore() userAccount.deleteLoginMethods() userAccount.deleteFromStore() av.storepath.moveTo(extractionDestination) userAccount.store.transact(_)
<SYSTEM_TASK:> Retrieve account name information about the given database. <END_TASK> <USER_TASK:> Description: def getAccountNames(store, protocol=None): """ Retrieve account name information about the given database. @param store: An Axiom Store representing a user account. It must have been opened through the store which contains its account information. @return: A generator of two-tuples of (username, domain) which refer to the given store. """
return ((meth.localpart, meth.domain) for meth in getLoginMethods(store, protocol))
<SYSTEM_TASK:> Retrieve a list of all local domain names represented in the given store. <END_TASK> <USER_TASK:> Description: def getDomainNames(store): """ Retrieve a list of all local domain names represented in the given store. """
domains = set() domains.update(store.query( LoginMethod, AND(LoginMethod.internal == True, LoginMethod.domain != None)).getColumn("domain").distinct()) return sorted(domains)
<SYSTEM_TASK:> Create a copy of this LoginAccount and all associated LoginMethods in a different Store. <END_TASK> <USER_TASK:> Description: def cloneInto(self, newStore, avatars): """ Create a copy of this LoginAccount and all associated LoginMethods in a different Store. Return the copied LoginAccount. """
la = LoginAccount(store=newStore, password=self.password, avatars=avatars, disabled=self.disabled) for siteMethod in self.store.query(LoginMethod, LoginMethod.account == self): LoginMethod(store=newStore, localpart=siteMethod.localpart, domain=siteMethod.domain, internal=siteMethod.internal, protocol=siteMethod.protocol, verified=siteMethod.verified, account=la) return la
<SYSTEM_TASK:> Add a login method to this account, propogating up or down as necessary <END_TASK> <USER_TASK:> Description: def addLoginMethod(self, localpart, domain, protocol=ANY_PROTOCOL, verified=False, internal=False): """ Add a login method to this account, propogating up or down as necessary to site store or user store to maintain consistency. """
# Out takes you west or something if self.store.parent is None: # West takes you in otherStore = self.avatars.open() peer = otherStore.findUnique(LoginAccount) else: # In takes you east otherStore = self.store.parent subStoreItem = self.store.parent.getItemByID(self.store.idInParent) peer = otherStore.findUnique(LoginAccount, LoginAccount.avatars == subStoreItem) # Up and down take you home for store, account in [(otherStore, peer), (self.store, self)]: store.findOrCreate(LoginMethod, account=account, localpart=localpart, domain=domain, protocol=protocol, verified=verified, internal=internal)
<SYSTEM_TASK:> Set this account's password if the current password matches. <END_TASK> <USER_TASK:> Description: def replacePassword(self, currentPassword, newPassword): """ Set this account's password if the current password matches. @param currentPassword: The password to match against the current one. @param newPassword: The new password. @return: A deferred firing when the password has been changed. @raise BadCredentials: If the current password did not match. """
if unicode(currentPassword) != self.password: return fail(BadCredentials()) return self.setPassword(newPassword)
<SYSTEM_TASK:> Create a user account, add it to this LoginBase, and return it. <END_TASK> <USER_TASK:> Description: def addAccount(self, username, domain, password, avatars=None, protocol=u'email', disabled=0, internal=False, verified=True): """ Create a user account, add it to this LoginBase, and return it. This method must be called within a transaction in my store. @param username: the user's name. @param domain: the domain part of the user's name [XXX TODO: this really ought to say something about whether it's a Q2Q domain, a SIP domain, an HTTP realm, or an email address domain - right now the assumption is generally that it's an email address domain, but not always] @param password: A shared secret. @param avatars: (Optional). A SubStore which, if passed, will be used by cred as the target of all adaptations for this user. By default, I will create a SubStore, and plugins can be installed on that substore using the powerUp method to provide implementations of cred client interfaces. @raise DuplicateUniqueItem: if the 'avatars' argument already contains a LoginAccount. @return: an instance of a LoginAccount, with all attributes filled out as they are passed in, stored in my store. """
# unicode(None) == u'None', kids. if username is not None: username = unicode(username) if domain is not None: domain = unicode(domain) if password is not None: password = unicode(password) if self.accountByAddress(username, domain) is not None: raise DuplicateUser(username, domain) if avatars is None: avatars = self.makeAvatars(domain, username) subStore = avatars.open() # create this unconditionally; as the docstring says, we must be run # within a transaction, so if something goes wrong in the substore # transaction this item's creation will be reverted... la = LoginAccount(store=self.store, password=password, avatars=avatars, disabled=disabled) def createSubStoreAccountObjects(): LoginAccount(store=subStore, password=password, disabled=disabled, avatars=subStore) la.addLoginMethod(localpart=username, domain=domain, protocol=protocol, internal=internal, verified=verified) subStore.transact(createSubStoreAccountObjects) return la
<SYSTEM_TASK:> Identify an appropriate SQL error object for the given message for the <END_TASK> <USER_TASK:> Description: def identifySQLError(self, sql, args, e): """ Identify an appropriate SQL error object for the given message for the supported versions of sqlite. @return: an SQLError """
message = e.args[0] if message.startswith("table") and message.endswith("already exists"): return errors.TableAlreadyExists(sql, args, e) return errors.SQLError(sql, args, e)
<SYSTEM_TASK:> Construct a callable to be used as a weakref callback for cache entries. <END_TASK> <USER_TASK:> Description: def createCacheRemoveCallback(cacheRef, key, finalizer): """ Construct a callable to be used as a weakref callback for cache entries. The callable will invoke the provided finalizer, as well as removing the cache entry if the cache still exists and contains an entry for the given key. @type cacheRef: L{weakref.ref} to L{FinalizingCache} @param cacheRef: A weakref to the cache in which the corresponding cache item was stored. @param key: The key for which this value is cached. @type finalizer: callable taking 0 arguments @param finalizer: A user-provided callable that will be called when the weakref callback runs. """
def remove(reference): # Weakref callbacks cannot raise exceptions or DOOM ensues try: finalizer() except: logErrorNoMatterWhat() try: cache = cacheRef() if cache is not None: if key in cache.data: if cache.data[key] is reference: del cache.data[key] except: logErrorNoMatterWhat() return remove
<SYSTEM_TASK:> Add an entry to the cache. <END_TASK> <USER_TASK:> Description: def cache(self, key, value): """ Add an entry to the cache. A weakref to the value is stored, rather than a direct reference. The value must have a C{__finalizer__} method that returns a callable which will be invoked when the weakref is broken. @param key: The key identifying the cache entry. @param value: The value for the cache entry. """
fin = value.__finalizer__() try: # It's okay if there's already a cache entry for this key as long # as the weakref has already been broken. See the comment in # get() for an explanation of why this might happen. if self.data[key]() is not None: raise CacheInconsistency( "Duplicate cache key: %r %r %r" % ( key, value, self.data[key])) except KeyError: pass callback = createCacheRemoveCallback(self._ref(self), key, fin) self.data[key] = self._ref(value, callback) return value
<SYSTEM_TASK:> Get an entry from the cache by key. <END_TASK> <USER_TASK:> Description: def get(self, key): """ Get an entry from the cache by key. @raise KeyError: if the given key is not present in the cache. @raise CacheFault: (a L{KeyError} subclass) if the given key is present in the cache, but the value it points to is gone. """
o = self.data[key]() if o is None: # On CPython, the weakref callback will always(?) run before any # other code has a chance to observe that the weakref is broken; # and since the callback removes the item from the dict, this # branch of code should never run. However, on PyPy (and possibly # other Python implementations), the weakref callback does not run # immediately, thus we may be able to observe this intermediate # state. Should this occur, we remove the dict item ourselves, # and raise CacheFault (which is a KeyError subclass). del self.data[key] raise CacheFault( "FinalizingCache has %r but its value is no more." % (key,)) log.msg(interface=iaxiom.IStatEvent, stat_cache_hits=1, key=key) return o
<SYSTEM_TASK:> Update the UBCPI XBlock's content from an XML definition. <END_TASK> <USER_TASK:> Description: def parse_from_xml(root): """ Update the UBCPI XBlock's content from an XML definition. We need to be strict about the XML we accept, to avoid setting the XBlock to an invalid state (which will then be persisted). Args: root (lxml.etree.Element): The XML definition of the XBlock's content. Returns: A dictionary of all of the XBlock's content. Raises: UpdateFromXmlError: The XML definition is invalid """
# Check that the root has the correct tag if root.tag != 'ubcpi': raise UpdateFromXmlError(_('Every peer instruction tool must contain an "ubcpi" element.')) display_name_el = root.find('display_name') if display_name_el is None: raise UpdateFromXmlError(_('Every peer instruction tool must contain a "display_name" element.')) else: display_name = _safe_get_text(display_name_el) rationale_size_min = int(root.attrib['rationale_size_min']) if 'rationale_size_min' in root.attrib else None rationale_size_max = int(root.attrib['rationale_size_max']) if 'rationale_size_max' in root.attrib else None question_el = root.find('question') if question_el is None: raise UpdateFromXmlError(_('Every peer instruction must tool contain a "question" element.')) else: question = parse_question_xml(question_el) options_el = root.find('options') if options_el is None: raise UpdateFromXmlError(_('Every peer instruction must tool contain a "options" element.')) else: options, correct_answer, correct_rationale = parse_options_xml(options_el) seeds_el = root.find('seeds') if seeds_el is None: raise UpdateFromXmlError(_('Every peer instruction must tool contain a "seeds" element.')) else: seeds = parse_seeds_xml(seeds_el) algo = unicode(root.attrib['algorithm']) if 'algorithm' in root.attrib else None num_responses = unicode(root.attrib['num_responses']) if 'num_responses' in root.attrib else None return { 'display_name': display_name, 'question_text': question, 'options': options, 'rationale_size': {'min': rationale_size_min, 'max': rationale_size_max}, 'correct_answer': correct_answer, 'correct_rationale': correct_rationale, 'seeds': seeds, 'algo': {"name": algo, 'num_responses': num_responses} }
<SYSTEM_TASK:> Serialize the options in peer instruction XBlock to xml <END_TASK> <USER_TASK:> Description: def serialize_options(options, block): """ Serialize the options in peer instruction XBlock to xml Args: options (lxml.etree.Element): The <options> XML element. block (PeerInstructionXBlock): The XBlock with configuration to serialize. Returns: None """
for index, option_dict in enumerate(block.options): option = etree.SubElement(options, 'option') # set correct option and rationale if index == block.correct_answer: option.set('correct', u'True') if hasattr(block, 'correct_rationale'): rationale = etree.SubElement(option, 'rationale') rationale.text = block.correct_rationale['text'] text = etree.SubElement(option, 'text') text.text = option_dict.get('text', '') serialize_image(option_dict, option)
<SYSTEM_TASK:> Serialize the seeds in peer instruction XBlock to xml <END_TASK> <USER_TASK:> Description: def serialize_seeds(seeds, block): """ Serialize the seeds in peer instruction XBlock to xml Args: seeds (lxml.etree.Element): The <seeds> XML element. block (PeerInstructionXBlock): The XBlock with configuration to serialize. Returns: None """
for seed_dict in block.seeds: seed = etree.SubElement(seeds, 'seed') # options in xml starts with 1 seed.set('option', unicode(seed_dict.get('answer', 0) + 1)) seed.text = seed_dict.get('rationale', '')
<SYSTEM_TASK:> Serialize the Peer Instruction XBlock's content to XML. <END_TASK> <USER_TASK:> Description: def serialize_to_xml(root, block): """ Serialize the Peer Instruction XBlock's content to XML. Args: block (PeerInstructionXBlock): The peer instruction block to serialize. root (etree.Element): The XML root node to update. Returns: etree.Element """
root.tag = 'ubcpi' if block.rationale_size is not None: if block.rationale_size.get('min'): root.set('rationale_size_min', unicode(block.rationale_size.get('min'))) if block.rationale_size.get('max'): root.set('rationale_size_max', unicode(block.rationale_size['max'])) if block.algo: if block.algo.get('name'): root.set('algorithm', block.algo.get('name')) if block.algo.get('num_responses'): root.set('num_responses', unicode(block.algo.get('num_responses'))) display_name = etree.SubElement(root, 'display_name') display_name.text = block.display_name question = etree.SubElement(root, 'question') question_text = etree.SubElement(question, 'text') question_text.text = block.question_text['text'] serialize_image(block.question_text, question) options = etree.SubElement(root, 'options') serialize_options(options, block) seeds = etree.SubElement(root, 'seeds') serialize_seeds(seeds, block)
<SYSTEM_TASK:> Obtains the lvm and vg_t handle. Usually you would never need to use this method <END_TASK> <USER_TASK:> Description: def open(self): """ Obtains the lvm and vg_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """
if not self.handle: self.lvm.open() self.__vgh = lvm_vg_open(self.lvm.handle, self.name, self.mode) if not bool(self.__vgh): raise HandleError("Failed to initialize VG Handle.")
<SYSTEM_TASK:> Closes the lvm and vg_t handle. Usually you would never need to use this method <END_TASK> <USER_TASK:> Description: def close(self): """ Closes the lvm and vg_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """
if self.handle: cl = lvm_vg_close(self.handle) if cl != 0: raise HandleError("Failed to close VG handle after init check.") self.__vgh = None self.lvm.close()
<SYSTEM_TASK:> Returns the volume group extent count. <END_TASK> <USER_TASK:> Description: def extent_count(self): """ Returns the volume group extent count. """
self.open() count = lvm_vg_get_extent_count(self.handle) self.close() return count
<SYSTEM_TASK:> Returns the volume group free extent count. <END_TASK> <USER_TASK:> Description: def free_extent_count(self): """ Returns the volume group free extent count. """
self.open() count = lvm_vg_get_free_extent_count(self.handle) self.close() return count
<SYSTEM_TASK:> Returns the physical volume count. <END_TASK> <USER_TASK:> Description: def pv_count(self): """ Returns the physical volume count. """
self.open() count = lvm_vg_get_pv_count(self.handle) self.close() return count
<SYSTEM_TASK:> Returns the maximum allowed physical volume count. <END_TASK> <USER_TASK:> Description: def max_pv_count(self): """ Returns the maximum allowed physical volume count. """
self.open() count = lvm_vg_get_max_pv(self.handle) self.close() return count
<SYSTEM_TASK:> Returns the maximum allowed logical volume count. <END_TASK> <USER_TASK:> Description: def max_lv_count(self): """ Returns the maximum allowed logical volume count. """
self.open() count = lvm_vg_get_max_lv(self.handle) self.close() return count
<SYSTEM_TASK:> Returns True if the VG is clustered, False otherwise. <END_TASK> <USER_TASK:> Description: def is_clustered(self): """ Returns True if the VG is clustered, False otherwise. """
self.open() clust = lvm_vg_is_clustered(self.handle) self.close() return bool(clust)
<SYSTEM_TASK:> Returns True if the VG is exported, False otherwise. <END_TASK> <USER_TASK:> Description: def is_exported(self): """ Returns True if the VG is exported, False otherwise. """
self.open() exp = lvm_vg_is_exported(self.handle) self.close() return bool(exp)
<SYSTEM_TASK:> Returns True if the VG is partial, False otherwise. <END_TASK> <USER_TASK:> Description: def is_partial(self): """ Returns True if the VG is partial, False otherwise. """
self.open() part = lvm_vg_is_partial(self.handle) self.close() return bool(part)
<SYSTEM_TASK:> Returns the volume group sequence number. This number increases <END_TASK> <USER_TASK:> Description: def sequence(self): """ Returns the volume group sequence number. This number increases everytime the volume group is modified. """
self.open() seq = lvm_vg_get_seqno(self.handle) self.close() return seq
<SYSTEM_TASK:> Returns the volume group size in the given units. Default units are MiB. <END_TASK> <USER_TASK:> Description: def size(self, units="MiB"): """ Returns the volume group size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """
self.open() size = lvm_vg_get_size(self.handle) self.close() return size_convert(size, units)
<SYSTEM_TASK:> Returns the volume group free size in the given units. Default units are MiB. <END_TASK> <USER_TASK:> Description: def free_size(self, units="MiB"): """ Returns the volume group free size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """
self.open() size = lvm_vg_get_free_size(self.handle) self.close() return size_convert(size, units)
<SYSTEM_TASK:> Returns the volume group extent size in the given units. Default units are MiB. <END_TASK> <USER_TASK:> Description: def extent_size(self, units="MiB"): """ Returns the volume group extent size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """
self.open() size = lvm_vg_get_extent_size(self.handle) self.close() return size_convert(size, units)
<SYSTEM_TASK:> Removes all logical volumes from the volume group. <END_TASK> <USER_TASK:> Description: def remove_all_lvs(self): """ Removes all logical volumes from the volume group. *Raises:* * HandleError, CommitError """
lvs = self.lvscan() for lv in lvs: self.remove_lv(lv)
<SYSTEM_TASK:> Set the value for a pair of terms. <END_TASK> <USER_TASK:> Description: def set_pair(self, term1, term2, value, **kwargs): """ Set the value for a pair of terms. Args: term1 (str) term2 (str) value (mixed) """
key = self.key(term1, term2) self.keys.update([term1, term2]) self.pairs[key] = value
<SYSTEM_TASK:> Get the value for a pair of terms. <END_TASK> <USER_TASK:> Description: def get_pair(self, term1, term2): """ Get the value for a pair of terms. Args: term1 (str) term2 (str) Returns: The stored value. """
key = self.key(term1, term2) return self.pairs.get(key, None)
<SYSTEM_TASK:> Index all term pair distances. <END_TASK> <USER_TASK:> Description: def index(self, text, terms=None, **kwargs): """ Index all term pair distances. Args: text (Text): The source text. terms (list): Terms to index. """
self.clear() # By default, use all terms. terms = terms or text.terms.keys() pairs = combinations(terms, 2) count = comb(len(terms), 2) for t1, t2 in bar(pairs, expected_size=count, every=1000): # Set the Bray-Curtis distance. score = text.score_braycurtis(t1, t2, **kwargs) self.set_pair(t1, t2, score)
<SYSTEM_TASK:> Get distances between an anchor term and all other terms. <END_TASK> <USER_TASK:> Description: def anchored_pairs(self, anchor): """ Get distances between an anchor term and all other terms. Args: anchor (str): The anchor term. Returns: OrderedDict: The distances, in descending order. """
pairs = OrderedDict() for term in self.keys: score = self.get_pair(anchor, term) if score: pairs[term] = score return utils.sort_dict(pairs)
<SYSTEM_TASK:> Return the nearest xterm 256 color code from rgb input. <END_TASK> <USER_TASK:> Description: def from_rgb(r, g=None, b=None): """ Return the nearest xterm 256 color code from rgb input. """
c = r if isinstance(r, list) else [r, g, b] best = {} for index, item in enumerate(colors): d = __distance(item, c) if(not best or d <= best['distance']): best = {'distance': d, 'index': index} if 'index' in best: return best['index'] else: return 1
<SYSTEM_TASK:> Parse command line arguments and run utilities. <END_TASK> <USER_TASK:> Description: def entry(): """Parse command line arguments and run utilities."""
parser = argparse.ArgumentParser() parser.add_argument( 'action', help='Action to take', choices=['from_hex', 'to_rgb', 'to_hex'], ) parser.add_argument( 'value', help='Value for the action', ) parsed = parser.parse_args() if parsed.action != "from_hex": try: parsed.value = int(parsed.value) except ValueError: raise argparse.ArgumentError( "Value for this action should be an integer", ) print(globals()[parsed.action](parsed.value))
<SYSTEM_TASK:> Return the SoftwareVersion object from store corresponding to the <END_TASK> <USER_TASK:> Description: def makeSoftwareVersion(store, version, systemVersion): """ Return the SoftwareVersion object from store corresponding to the version object, creating it if it doesn't already exist. """
return store.findOrCreate(SoftwareVersion, systemVersion=systemVersion, package=unicode(version.package), version=unicode(version.short()), major=version.major, minor=version.minor, micro=version.micro)
<SYSTEM_TASK:> List the software package version history of store. <END_TASK> <USER_TASK:> Description: def listVersionHistory(store): """ List the software package version history of store. """
q = store.query(SystemVersion, sort=SystemVersion.creation.descending) return [sv.longWindedRepr() for sv in q]
<SYSTEM_TASK:> Check if the current version is different from the previously recorded <END_TASK> <USER_TASK:> Description: def checkSystemVersion(s, versions=None): """ Check if the current version is different from the previously recorded version. If it is, or if there is no previously recorded version, create a version matching the current config. """
if versions is None: versions = getSystemVersions() currentVersionMap = dict([(v.package, v) for v in versions]) mostRecentSystemVersion = s.findFirst(SystemVersion, sort=SystemVersion.creation.descending) mostRecentVersionMap = dict([(v.package, v.asVersion()) for v in s.query(SoftwareVersion, (SoftwareVersion.systemVersion == mostRecentSystemVersion))]) if mostRecentVersionMap != currentVersionMap: currentSystemVersion = SystemVersion(store=s, creation=Time()) for v in currentVersionMap.itervalues(): makeSoftwareVersion(s, v, currentSystemVersion)
<SYSTEM_TASK:> clears all columns <END_TASK> <USER_TASK:> Description: def reset(self): """clears all columns"""
self.colNames,self.colDesc,self.colUnits,self.colComments,\ self.colTypes,self.colData=[],[],[],[],[],[]
<SYSTEM_TASK:> delete a column at a single index. Negative numbers count from the end. <END_TASK> <USER_TASK:> Description: def colDelete(self,colI=-1): """delete a column at a single index. Negative numbers count from the end."""
# print("DELETING COLUMN: [%d] %s"%(colI,self.colDesc[colI])) self.colNames.pop(colI) self.colDesc.pop(colI) self.colUnits.pop(colI) self.colComments.pop(colI) self.colTypes.pop(colI) self.colData.pop(colI) return
<SYSTEM_TASK:> delete all X columns except the first one. <END_TASK> <USER_TASK:> Description: def onex(self): """ delete all X columns except the first one. """
xCols=[i for i in range(self.nCols) if self.colTypes[i]==3] if len(xCols)>1: for colI in xCols[1:][::-1]: self.colDelete(colI)
<SYSTEM_TASK:> Slightly changes value of every cell in the worksheet. Used for testing. <END_TASK> <USER_TASK:> Description: def wiggle(self,noiseLevel=.1): """Slightly changes value of every cell in the worksheet. Used for testing."""
noise=(np.random.rand(*self.data.shape))-.5 self.data=self.data+noise*noiseLevel
<SYSTEM_TASK:> returns maximum number of rows based on the longest colData <END_TASK> <USER_TASK:> Description: def nRows(self): """returns maximum number of rows based on the longest colData"""
if self.nCols: return max([len(x) for x in self.colData]) else: return 0
<SYSTEM_TASK:> return all of colData as a 2D numpy array. <END_TASK> <USER_TASK:> Description: def data(self): """return all of colData as a 2D numpy array."""
data=np.empty((self.nRows,self.nCols),dtype=np.float) data[:]=np.nan # make everything nan by default for colNum,colData in enumerate(self.colData): validIs=np.where([np.isreal(v) for v in colData])[0] validData=np.ones(len(colData))*np.nan validData[validIs]=np.array(colData)[validIs] data[:len(colData),colNum]=validData # only fill cells that have data return data
<SYSTEM_TASK:> Obtains the lvm handle. Usually you would never need to use this method unless <END_TASK> <USER_TASK:> Description: def open(self): """ Obtains the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """
if not self.handle: try: path = self.system_dir except AttributeError: path = '' self.__handle = lvm_init(path) if not bool(self.__handle): raise HandleError("Failed to initialize LVM handle.")
<SYSTEM_TASK:> Closes the lvm handle. Usually you would never need to use this method unless <END_TASK> <USER_TASK:> Description: def close(self): """ Closes the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """
if self.handle: q = lvm_quit(self.handle) if q != 0: raise HandleError("Failed to close LVM handle.") self.__handle = None
<SYSTEM_TASK:> Load a set of stopwords. <END_TASK> <USER_TASK:> Description: def load_stopwords(self, path): """ Load a set of stopwords. Args: path (str): The stopwords file path. """
if path: with open(path) as f: self.stopwords = set(f.read().splitlines()) else: self.stopwords = set( pkgutil .get_data('textplot', 'data/stopwords.txt') .decode('utf8') .splitlines() )
<SYSTEM_TASK:> Get the X most frequent terms in the text, and then probe down to get <END_TASK> <USER_TASK:> Description: def most_frequent_terms(self, depth): """ Get the X most frequent terms in the text, and then probe down to get any other terms that have the same count as the last term. Args: depth (int): The number of terms. Returns: set: The set of frequent terms. """
counts = self.term_counts() # Get the top X terms and the instance count of the last word. top_terms = set(list(counts.keys())[:depth]) end_count = list(counts.values())[:depth][-1] # Merge in all other words with that appear that number of times, so # that we don't truncate the last bucket - eg, half of the words that # appear 5 times, but not the other half. bucket = self.term_count_buckets()[end_count] return top_terms.union(set(bucket))
<SYSTEM_TASK:> Given a stemmed term, get the most common unstemmed variant. <END_TASK> <USER_TASK:> Description: def unstem(self, term): """ Given a stemmed term, get the most common unstemmed variant. Args: term (str): A stemmed term. Returns: str: The unstemmed token. """
originals = [] for i in self.terms[term]: originals.append(self.tokens[i]['unstemmed']) mode = Counter(originals).most_common(1) return mode[0][0]
<SYSTEM_TASK:> Estimate the kernel density of the instances of term in the text. <END_TASK> <USER_TASK:> Description: def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'): """ Estimate the kernel density of the instances of term in the text. Args: term (str): A stemmed term. bandwidth (int): The kernel bandwidth. samples (int): The number of evenly-spaced sample points. kernel (str): The kernel function. Returns: np.array: The density estimate. """
# Get the offsets of the term instances. terms = np.array(self.terms[term])[:, np.newaxis] # Fit the density estimator on the terms. kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms) # Score an evely-spaced array of samples. x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis] scores = kde.score_samples(x_axis) # Scale the scores to integrate to 1. return np.exp(scores) * (len(self.tokens) / samples)
<SYSTEM_TASK:> Compute the geometric area of the overlap between the kernel density <END_TASK> <USER_TASK:> Description: def score_intersect(self, term1, term2, **kwargs): """ Compute the geometric area of the overlap between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """
t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) # Integrate the overlap. overlap = np.minimum(t1_kde, t2_kde) return np.trapz(overlap)
<SYSTEM_TASK:> Compute a weighting score based on the cosine distance between the <END_TASK> <USER_TASK:> Description: def score_cosine(self, term1, term2, **kwargs): """ Compute a weighting score based on the cosine distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """
t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.cosine(t1_kde, t2_kde)
<SYSTEM_TASK:> Compute a weighting score based on the "City Block" distance between <END_TASK> <USER_TASK:> Description: def score_braycurtis(self, term1, term2, **kwargs): """ Compute a weighting score based on the "City Block" distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """
t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.braycurtis(t1_kde, t2_kde)
<SYSTEM_TASK:> Plot kernel density estimates for multiple words. <END_TASK> <USER_TASK:> Description: def plot_term_kdes(self, words, **kwargs): """ Plot kernel density estimates for multiple words. Args: words (list): A list of unstemmed terms. """
stem = PorterStemmer().stem for word in words: kde = self.kde(stem(word), **kwargs) plt.plot(kde) plt.show()
<SYSTEM_TASK:> Compare two 'in-database tuples'. Useful when sorting by a compound key <END_TASK> <USER_TASK:> Description: def _tupleCompare(tuple1, ineq, tuple2, eq=lambda a,b: (a==b), ander=AND, orer=OR): """ Compare two 'in-database tuples'. Useful when sorting by a compound key and slicing into the middle of that query. """
orholder = [] for limit in range(len(tuple1)): eqconstraint = [ eq(elem1, elem2) for elem1, elem2 in zip(tuple1, tuple2)[:limit]] ineqconstraint = ineq(tuple1[limit], tuple2[limit]) orholder.append(ander(*(eqconstraint + [ineqconstraint]))) return orer(*orholder)
<SYSTEM_TASK:> Truncates the rationale for analytics event emission if necessary <END_TASK> <USER_TASK:> Description: def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT): """ Truncates the rationale for analytics event emission if necessary Args: rationale (string): the string value of the rationale max_length (int): the max length for truncation Returns: truncated_value (string): the possibly truncated version of the rationale was_truncated (bool): returns true if the rationale is truncated """
if isinstance(rationale, basestring) and max_length is not None and len(rationale) > max_length: return rationale[0:max_length], True else: return rationale, False
<SYSTEM_TASK:> Validate the options that course author set up and return errors in a dict if there is any <END_TASK> <USER_TASK:> Description: def validate_options(options): """ Validate the options that course author set up and return errors in a dict if there is any """
errors = [] if int(options['rationale_size']['min']) < 1: errors.append(_('Minimum Characters')) if int(options['rationale_size']['max']) < 0 or int(options['rationale_size']['max']) > MAX_RATIONALE_SIZE: errors.append(_('Maximum Characters')) if not any(error in [_('Minimum Characters'), _('Maximum Characters')] for error in errors) \ and int(options['rationale_size']['max']) <= int(options['rationale_size']['min']): errors += [_('Minimum Characters'), _('Maximum Characters')] try: if options['algo']['num_responses'] != '#' and int(options['algo']['num_responses']) < 0: errors.append(_('Number of Responses')) except ValueError: errors.append(_('Not an Integer')) if not errors: return None else: return {'options_error': _('Invalid Option(s): ') + ', '.join(errors)}
<SYSTEM_TASK:> Create a student_item_dict from our surrounding context. <END_TASK> <USER_TASK:> Description: def get_student_item_dict(self, anonymous_user_id=None): """Create a student_item_dict from our surrounding context. See also: submissions.api for details. Args: anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair. Returns: (dict): The student item associated with this XBlock instance. This includes the student id, item id, and course id. """
item_id = self._serialize_opaque_key(self.scope_ids.usage_id) # This is not the real way course_ids should work, but this is a # temporary expediency for LMS integration if hasattr(self, "xmodule_runtime"): course_id = self.get_course_id() # pylint:disable=E1101 if anonymous_user_id: student_id = anonymous_user_id else: student_id = self.xmodule_runtime.anonymous_student_id # pylint:disable=E1101 else: course_id = "edX/Enchantment_101/April_1" if self.scope_ids.user_id is None: student_id = '' else: student_id = unicode(self.scope_ids.user_id) student_item_dict = dict( student_id=student_id, item_id=item_id, course_id=course_id, item_type='ubcpi' ) return student_item_dict
<SYSTEM_TASK:> Retrieve answers from backend for a student and question <END_TASK> <USER_TASK:> Description: def get_answers_for_student(student_item): """ Retrieve answers from backend for a student and question Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. Returns: Answers: answers for the student """
submissions = sub_api.get_submissions(student_item) if not submissions: return Answers() latest_submission = submissions[0] latest_answer_item = latest_submission.get('answer', {}) return Answers(latest_answer_item.get(ANSWER_LIST_KEY, []))
<SYSTEM_TASK:> Add an answer for a student to the backend <END_TASK> <USER_TASK:> Description: def add_answer_for_student(student_item, vote, rationale): """ Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option """
answers = get_answers_for_student(student_item) answers.add_answer(vote, rationale) sub_api.create_submission(student_item, { ANSWER_LIST_KEY: answers.get_answers_as_list() })
<SYSTEM_TASK:> Add an answer <END_TASK> <USER_TASK:> Description: def add_answer(self, vote, rationale): """ Add an answer Args: vote (int): the option that student voted for rationale (str): the reason why the student vote for the option """
self.raw_answers.append({ VOTE_KEY: vote, RATIONALE_KEY: rationale, })
<SYSTEM_TASK:> Create a new SubStore, allocating a new file space for it. <END_TASK> <USER_TASK:> Description: def createNew(cls, store, pathSegments): """ Create a new SubStore, allocating a new file space for it. """
if isinstance(pathSegments, basestring): raise ValueError( 'Received %r instead of a sequence' % (pathSegments,)) if store.dbdir is None: self = cls(store=store, storepath=None) else: storepath = store.newDirectory(*pathSegments) self = cls(store=store, storepath=storepath) self.open() self.close() return self
<SYSTEM_TASK:> Create the actual Store this Substore represents. <END_TASK> <USER_TASK:> Description: def createStore(self, debug, journalMode=None): """ Create the actual Store this Substore represents. """
if self.storepath is None: self.store._memorySubstores.append(self) # don't fall out of cache if self.store.filesdir is None: filesdir = None else: filesdir = (self.store.filesdir.child("_substore_files") .child(str(self.storeID)) .path) return Store(parent=self.store, filesdir=filesdir, idInParent=self.storeID, debug=debug, journalMode=journalMode) else: return Store(self.storepath.path, parent=self.store, idInParent=self.storeID, debug=debug, journalMode=journalMode)
<SYSTEM_TASK:> Create _TagName instances which version 2 of Catalog automatically creates <END_TASK> <USER_TASK:> Description: def upgradeCatalog1to2(oldCatalog): """ Create _TagName instances which version 2 of Catalog automatically creates for use in determining the tagNames result, but which version 1 of Catalog did not create. """
newCatalog = oldCatalog.upgradeVersion('tag_catalog', 1, 2, tagCount=oldCatalog.tagCount) tags = newCatalog.store.query(Tag, Tag.catalog == newCatalog) tagNames = tags.getColumn("name").distinct() for t in tagNames: _TagName(store=newCatalog.store, catalog=newCatalog, name=t) return newCatalog
<SYSTEM_TASK:> Return an iterator of unicode strings - the unique tag names which have <END_TASK> <USER_TASK:> Description: def tagNames(self): """ Return an iterator of unicode strings - the unique tag names which have been applied objects in this catalog. """
return self.store.query(_TagName, _TagName.catalog == self).getColumn("name")
<SYSTEM_TASK:> Return an iterator of unicode strings - the tag names which apply to <END_TASK> <USER_TASK:> Description: def tagsOf(self, obj): """ Return an iterator of unicode strings - the tag names which apply to the given object. """
return self.store.query( Tag, AND(Tag.catalog == self, Tag.object == obj)).getColumn("name")
<SYSTEM_TASK:> This method is invoked when the item is loaded from the database, and <END_TASK> <USER_TASK:> Description: def loaded(self, oself, dbval): """ This method is invoked when the item is loaded from the database, and when a transaction is reverted which restores this attribute's value. @param oself: an instance of an item which has this attribute. @param dbval: the underlying database value which was retrieved. """
setattr(oself, self.dbunderlying, dbval) delattr(oself, self.underlying)
<SYSTEM_TASK:> Convert a Python value to a value suitable for inserting into the <END_TASK> <USER_TASK:> Description: def _convertPyval(self, oself, pyval): """ Convert a Python value to a value suitable for inserting into the database. @param oself: The object on which this descriptor is an attribute. @param pyval: The value to be converted. @return: A value legal for this column in the database. """
# convert to dbval later, I guess? if pyval is None and not self.allowNone: raise TypeError("attribute [%s.%s = %s()] must not be None" % ( self.classname, self.attrname, self.__class__.__name__)) return self.infilter(pyval, oself, oself.store)
<SYSTEM_TASK:> Generate and cache the subselect SQL and its arguments. Return the <END_TASK> <USER_TASK:> Description: def _queryContainer(self, store): """ Generate and cache the subselect SQL and its arguments. Return the subselect SQL. """
if self._subselectSQL is None: sql, args = self.container._sqlAndArgs('SELECT', self.container._queryTarget) self._subselectSQL, self._subselectArgs = sql, args return self._subselectSQL
<SYSTEM_TASK:> Smash whatever we got into a list and save the result in case we are <END_TASK> <USER_TASK:> Description: def _sequenceContainer(self, store): """ Smash whatever we got into a list and save the result in case we are executed multiple times. This keeps us from tripping up over generators and the like. """
if self._sequence is None: self._sequence = list(self.container) self._clause = ', '.join(['?'] * len(self._sequence)) return self._clause
<SYSTEM_TASK:> Filter each element of the data using the attribute type being <END_TASK> <USER_TASK:> Description: def _sequenceArgs(self, store): """ Filter each element of the data using the attribute type being tested for containment and hand back the resulting list. """
self._sequenceContainer(store) # Force _sequence to be valid return [self.attribute.infilter(pyval, None, store) for pyval in self._sequence]
<SYSTEM_TASK:> Prepare for insertion into the database by making the dbunderlying <END_TASK> <USER_TASK:> Description: def prepareInsert(self, oself, store): """ Prepare for insertion into the database by making the dbunderlying attribute of the item a relative pathname with respect to the store rather than an absolute pathname. """
if self.relative: fspath = self.__get__(oself) oself.__dirty__[self.attrname] = self, self.infilter(fspath, oself, store)
<SYSTEM_TASK:> Undeletes the object. Returns True if undeleted, False if it was already not deleted <END_TASK> <USER_TASK:> Description: def restore(self, time=None): """ Undeletes the object. Returns True if undeleted, False if it was already not deleted """
if self.deleted: time = time if time else self.deleted_at if time == self.deleted_at: self.deleted = False self.save() return True else: return False return False
<SYSTEM_TASK:> Restores itself, as well as objects that might have been deleted along with it if cascade is the deletion strategy <END_TASK> <USER_TASK:> Description: def full_restore(self, using=None): using = using or router.db_for_write(self.__class__, instance=self) restore_counter = Counter() if self.deleted: time = self.deleted_at else: return restore_counter self.collector = models.deletion.Collector(using=using) self.collector.collect([self]) for model, instances in self.collector.data.items(): instances_to_delete = sorted(instances, key=attrgetter("pk")) self.sort() for qs in self.collector.fast_deletes: # TODO make sure the queryset delete has been made a soft delete for qs_instance in qs: restore_counter.update([qs_instance._meta.model_name]) qs_instance.restore(time=time) for model, instances in self.collector.data.items(): for instance in instances: restore_counter.update([instance._meta.model_name]) instance.restore(time=time) return sum(restore_counter.values()), dict(restore_counter) """ Restores itself, as well as objects that might have been deleted along with it if cascade is the deletion strategy """
self.collector = models.deletion.Collector(using=using) self.collector.collect([self], keep_parents=keep_parents)
<SYSTEM_TASK:> Connect to AWS ec2 <END_TASK> <USER_TASK:> Description: def connect_to_ec2(region='us-east-1', access_key=None, secret_key=None): """ Connect to AWS ec2 :type region: str :param region: AWS region to connect to :type access_key: str :param access_key: AWS access key id :type secret_key: str :param secret_key: AWS secret access key :returns: boto.ec2.connection.EC2Connection -- EC2 connection """
if access_key: # Connect using supplied credentials logger.info('Connecting to AWS EC2 in {}'.format(region)) connection = ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) else: # Fetch instance metadata metadata = get_instance_metadata(timeout=1, num_retries=1) if metadata: try: region = metadata['placement']['availability-zone'][:-1] except KeyError: pass # Connect using env vars or boto credentials logger.info('Connecting to AWS EC2 in {}'.format(region)) connection = ec2.connect_to_region(region) if not connection: logger.error('An error occurred when connecting to EC2') sys.exit(1) return connection
<SYSTEM_TASK:> Read valid locations from HDX <END_TASK> <USER_TASK:> Description: def validlocations(configuration=None): # type: () -> List[Dict] """ Read valid locations from HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[Dict]: A list of valid locations """
if Locations._validlocations is None: if configuration is None: configuration = Configuration.read() Locations._validlocations = configuration.call_remoteckan('group_list', {'all_fields': True}) return Locations._validlocations
<SYSTEM_TASK:> Get location from HDX location code <END_TASK> <USER_TASK:> Description: def get_location_from_HDX_code(code, locations=None, configuration=None): # type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str] """Get location from HDX location code Args: code (str): code for which to get location name locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: location name """
if locations is None: locations = Locations.validlocations(configuration) for locdict in locations: if code.upper() == locdict['name'].upper(): return locdict['title'] return None
<SYSTEM_TASK:> Validates definition files in a directory. <END_TASK> <USER_TASK:> Description: def CheckDirectory(self, path, extension='yaml'): """Validates definition files in a directory. Args: path (str): path of the definition file. extension (Optional[str]): extension of the filenames to read. Returns: bool: True if the directory contains valid definitions. """
result = True if extension: glob_spec = os.path.join(path, '*.{0:s}'.format(extension)) else: glob_spec = os.path.join(path, '*') for definition_file in sorted(glob.glob(glob_spec)): if not self.CheckFile(definition_file): result = False return result
<SYSTEM_TASK:> Validates the definition in a file. <END_TASK> <USER_TASK:> Description: def CheckFile(self, path): """Validates the definition in a file. Args: path (str): path of the definition file. Returns: bool: True if the file contains valid definitions. """
print('Checking: {0:s}'.format(path)) definitions_registry = registry.DataTypeDefinitionsRegistry() definitions_reader = reader.YAMLDataTypeDefinitionsFileReader() result = False try: definitions_reader.ReadFile(definitions_registry, path) result = True except KeyError as exception: logging.warning(( 'Unable to register data type definition in file: {0:s} with ' 'error: {1:s}').format(path, exception)) except errors.FormatError as exception: logging.warning( 'Unable to validate file: {0:s} with error: {1:s}'.format( path, exception)) return result
<SYSTEM_TASK:> Inlines all CSS in an HTML string <END_TASK> <USER_TASK:> Description: def inline_css(html_message, encoding='unicode'): """ Inlines all CSS in an HTML string Given an HTML document with CSS declared in the HEAD, inlines it into the applicable elements. Used primarily in the preparation of styled emails. Arguments: html_message -- a string of HTML, including CSS """
document = etree.HTML(html_message) converter = Conversion() converter.perform(document, html_message, '', encoding=encoding) return converter.convertedHTML
<SYSTEM_TASK:> Checks if the byte stream is large enough for the data type. <END_TASK> <USER_TASK:> Description: def _CheckByteStreamSize(self, byte_stream, byte_offset, data_type_size): """Checks if the byte stream is large enough for the data type. Args: byte_stream (bytes): byte stream. byte_offset (int): offset into the byte stream where to start. data_type_size (int): data type size. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the size of the byte stream cannot be determined. """
try: byte_stream_size = len(byte_stream) except Exception as exception: raise errors.MappingError(exception) if byte_stream_size - byte_offset < data_type_size: raise errors.ByteStreamTooSmallError( 'Byte stream too small requested: {0:d} available: {1:d}'.format( data_type_size, byte_stream_size))
<SYSTEM_TASK:> Folds the data type into a value. <END_TASK> <USER_TASK:> Description: def FoldValue(self, value): """Folds the data type into a value. Args: value (object): value. Returns: object: folded value. Raises: ValueError: if the data type definition cannot be folded into the value. """
if value is False and self._data_type_definition.false_value is not None: return self._data_type_definition.false_value if value is True and self._data_type_definition.true_value is not None: return self._data_type_definition.true_value raise ValueError('No matching True and False values')
<SYSTEM_TASK:> Determines if the data type definition needs a composite map. <END_TASK> <USER_TASK:> Description: def _CheckCompositeMap(self, data_type_definition): """Determines if the data type definition needs a composite map. Args: data_type_definition (DataTypeDefinition): structure data type definition. Returns: bool: True if a composite map is needed, False otherwise. Raises: FormatError: if a composite map is needed cannot be determined from the data type definition. """
if not data_type_definition: raise errors.FormatError('Missing data type definition') members = getattr(data_type_definition, 'members', None) if not members: raise errors.FormatError('Invalid data type definition missing members') is_composite_map = False last_member_byte_order = data_type_definition.byte_order for member_definition in members: if member_definition.IsComposite(): is_composite_map = True break # TODO: check for padding type # TODO: determine if padding type can be defined as linear if (last_member_byte_order != definitions.BYTE_ORDER_NATIVE and member_definition.byte_order != definitions.BYTE_ORDER_NATIVE and last_member_byte_order != member_definition.byte_order): is_composite_map = True break last_member_byte_order = member_definition.byte_order return is_composite_map
<SYSTEM_TASK:> Retrieves the name of an enumeration value by number. <END_TASK> <USER_TASK:> Description: def GetName(self, number): """Retrieves the name of an enumeration value by number. Args: number (int): number. Returns: str: name of the enumeration value or None if no corresponding enumeration value was found. """
value = self._data_type_definition.values_per_number.get(number, None) if not value: return None return value.name
<SYSTEM_TASK:> Creates a specific data type map by name. <END_TASK> <USER_TASK:> Description: def CreateDataTypeMap(self, definition_name): """Creates a specific data type map by name. Args: definition_name (str): name of the data type definition. Returns: DataTypeMap: data type map or None if the date type definition is not available. """
data_type_definition = self._definitions_registry.GetDefinitionByName( definition_name) if not data_type_definition: return None return DataTypeMapFactory.CreateDataTypeMapByType(data_type_definition)
<SYSTEM_TASK:> Creates a specific data type map by type indicator. <END_TASK> <USER_TASK:> Description: def CreateDataTypeMapByType(cls, data_type_definition): """Creates a specific data type map by type indicator. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeMap: data type map or None if the date type definition is not available. """
data_type_map_class = cls._MAP_PER_DEFINITION.get( data_type_definition.TYPE_INDICATOR, None) if not data_type_map_class: return None return data_type_map_class(data_type_definition)
<SYSTEM_TASK:> Determines if the data type is composite. <END_TASK> <USER_TASK:> Description: def IsComposite(self): """Determines if the data type is composite. A composite data type consists of other data types. Returns: bool: True if the data type is composite, False otherwise. """
return bool(self.condition) or ( self.member_data_type_definition and self.member_data_type_definition.IsComposite())